file_name
stringlengths
4
45
method_name
stringlengths
3
58
code_before
stringlengths
980
1.05M
code_after
stringlengths
1.13k
1.05M
func_before
stringlengths
55
114k
func_after
stringlengths
71
114k
diff
stringlengths
75
133k
num_lines_added
float64
1
1.49k
num_lines_deleted
float64
1
1.13k
num_lines_in_file
float64
27
23.2k
num_tokens_in_file
float64
143
192k
repo
stringclasses
259 values
cve_id
stringlengths
13
16
cwe_id
stringclasses
73 values
ip_sockglue.c
ip_cmsg_recv_checksum
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * The IP to API glue. * * Authors: see ip.c * * Fixes: * Many : Split from ip.c , see ip.c for history. * Martin Mares : TOS setting fixed. * Alan Cox : Fixed a couple of oopses in Martin's * TOS tweaks. * Mike McLagan : Routing by source */ #include <linux/module.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/skbuff.h> #include <linux/ip.h> #include <linux/icmp.h> #include <linux/inetdevice.h> #include <linux/netdevice.h> #include <linux/slab.h> #include <net/sock.h> #include <net/ip.h> #include <net/icmp.h> #include <net/tcp_states.h> #include <linux/udp.h> #include <linux/igmp.h> #include <linux/netfilter.h> #include <linux/route.h> #include <linux/mroute.h> #include <net/inet_ecn.h> #include <net/route.h> #include <net/xfrm.h> #include <net/compat.h> #include <net/checksum.h> #if IS_ENABLED(CONFIG_IPV6) #include <net/transp_v6.h> #endif #include <net/ip_fib.h> #include <linux/errqueue.h> #include <linux/uaccess.h> /* * SOL_IP control messages. */ static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb) { struct in_pktinfo info = *PKTINFO_SKB_CB(skb); info.ipi_addr.s_addr = ip_hdr(skb)->daddr; put_cmsg(msg, SOL_IP, IP_PKTINFO, sizeof(info), &info); } static void ip_cmsg_recv_ttl(struct msghdr *msg, struct sk_buff *skb) { int ttl = ip_hdr(skb)->ttl; put_cmsg(msg, SOL_IP, IP_TTL, sizeof(int), &ttl); } static void ip_cmsg_recv_tos(struct msghdr *msg, struct sk_buff *skb) { put_cmsg(msg, SOL_IP, IP_TOS, 1, &ip_hdr(skb)->tos); } static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb) { if (IPCB(skb)->opt.optlen == 0) return; put_cmsg(msg, SOL_IP, IP_RECVOPTS, IPCB(skb)->opt.optlen, ip_hdr(skb) + 1); } static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb) { unsigned char optbuf[sizeof(struct ip_options) + 40]; struct ip_options *opt = (struct ip_options *)optbuf; if (IPCB(skb)->opt.optlen == 0) return; if (ip_options_echo(opt, skb)) { msg->msg_flags |= MSG_CTRUNC; return; } ip_options_undo(opt); put_cmsg(msg, SOL_IP, IP_RETOPTS, opt->optlen, opt->__data); } static void ip_cmsg_recv_fragsize(struct msghdr *msg, struct sk_buff *skb) { int val; if (IPCB(skb)->frag_max_size == 0) return; val = IPCB(skb)->frag_max_size; put_cmsg(msg, SOL_IP, IP_RECVFRAGSIZE, sizeof(val), &val); } static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb, int tlen, int offset) { __wsum csum = skb->csum; if (skb->ip_summed != CHECKSUM_COMPLETE) return; if (offset != 0) csum = csum_sub(csum, csum_partial(skb_transport_header(skb) + tlen, offset, 0)); put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum); } static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb) { char *secdata; u32 seclen, secid; int err; err = security_socket_getpeersec_dgram(NULL, skb, &secid); if (err) return; err = security_secid_to_secctx(secid, &secdata, &seclen); if (err) return; put_cmsg(msg, SOL_IP, SCM_SECURITY, seclen, secdata); security_release_secctx(secdata, seclen); } static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb) { struct sockaddr_in sin; const struct iphdr *iph = ip_hdr(skb); __be16 *ports = (__be16 *)skb_transport_header(skb); if (skb_transport_offset(skb) + 4 > (int)skb->len) return; /* All current transport protocols have the port numbers in the * first four bytes of the transport header and this function is * written with this assumption in mind. */ sin.sin_family = AF_INET; sin.sin_addr.s_addr = iph->daddr; sin.sin_port = ports[1]; memset(sin.sin_zero, 0, sizeof(sin.sin_zero)); put_cmsg(msg, SOL_IP, IP_ORIGDSTADDR, sizeof(sin), &sin); } void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk, struct sk_buff *skb, int tlen, int offset) { struct inet_sock *inet = inet_sk(sk); unsigned int flags = inet->cmsg_flags; /* Ordered by supposed usage frequency */ if (flags & IP_CMSG_PKTINFO) { ip_cmsg_recv_pktinfo(msg, skb); flags &= ~IP_CMSG_PKTINFO; if (!flags) return; } if (flags & IP_CMSG_TTL) { ip_cmsg_recv_ttl(msg, skb); flags &= ~IP_CMSG_TTL; if (!flags) return; } if (flags & IP_CMSG_TOS) { ip_cmsg_recv_tos(msg, skb); flags &= ~IP_CMSG_TOS; if (!flags) return; } if (flags & IP_CMSG_RECVOPTS) { ip_cmsg_recv_opts(msg, skb); flags &= ~IP_CMSG_RECVOPTS; if (!flags) return; } if (flags & IP_CMSG_RETOPTS) { ip_cmsg_recv_retopts(msg, skb); flags &= ~IP_CMSG_RETOPTS; if (!flags) return; } if (flags & IP_CMSG_PASSSEC) { ip_cmsg_recv_security(msg, skb); flags &= ~IP_CMSG_PASSSEC; if (!flags) return; } if (flags & IP_CMSG_ORIGDSTADDR) { ip_cmsg_recv_dstaddr(msg, skb); flags &= ~IP_CMSG_ORIGDSTADDR; if (!flags) return; } if (flags & IP_CMSG_CHECKSUM) ip_cmsg_recv_checksum(msg, skb, tlen, offset); if (flags & IP_CMSG_RECVFRAGSIZE) ip_cmsg_recv_fragsize(msg, skb); } EXPORT_SYMBOL(ip_cmsg_recv_offset); int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc, bool allow_ipv6) { int err, val; struct cmsghdr *cmsg; struct net *net = sock_net(sk); for_each_cmsghdr(cmsg, msg) { if (!CMSG_OK(msg, cmsg)) return -EINVAL; #if IS_ENABLED(CONFIG_IPV6) if (allow_ipv6 && cmsg->cmsg_level == SOL_IPV6 && cmsg->cmsg_type == IPV6_PKTINFO) { struct in6_pktinfo *src_info; if (cmsg->cmsg_len < CMSG_LEN(sizeof(*src_info))) return -EINVAL; src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg); if (!ipv6_addr_v4mapped(&src_info->ipi6_addr)) return -EINVAL; ipc->oif = src_info->ipi6_ifindex; ipc->addr = src_info->ipi6_addr.s6_addr32[3]; continue; } #endif if (cmsg->cmsg_level == SOL_SOCKET) { err = __sock_cmsg_send(sk, msg, cmsg, &ipc->sockc); if (err) return err; continue; } if (cmsg->cmsg_level != SOL_IP) continue; switch (cmsg->cmsg_type) { case IP_RETOPTS: err = cmsg->cmsg_len - sizeof(struct cmsghdr); /* Our caller is responsible for freeing ipc->opt */ err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg), err < 40 ? err : 40); if (err) return err; break; case IP_PKTINFO: { struct in_pktinfo *info; if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo))) return -EINVAL; info = (struct in_pktinfo *)CMSG_DATA(cmsg); ipc->oif = info->ipi_ifindex; ipc->addr = info->ipi_spec_dst.s_addr; break; } case IP_TTL: if (cmsg->cmsg_len != CMSG_LEN(sizeof(int))) return -EINVAL; val = *(int *)CMSG_DATA(cmsg); if (val < 1 || val > 255) return -EINVAL; ipc->ttl = val; break; case IP_TOS: if (cmsg->cmsg_len == CMSG_LEN(sizeof(int))) val = *(int *)CMSG_DATA(cmsg); else if (cmsg->cmsg_len == CMSG_LEN(sizeof(u8))) val = *(u8 *)CMSG_DATA(cmsg); else return -EINVAL; if (val < 0 || val > 255) return -EINVAL; ipc->tos = val; ipc->priority = rt_tos2priority(ipc->tos); break; default: return -EINVAL; } } return 0; } /* Special input handler for packets caught by router alert option. They are selected only by protocol field, and then processed likely local ones; but only if someone wants them! Otherwise, router not running rsvpd will kill RSVP. It is user level problem, what it will make with them. I have no idea, how it will masquearde or NAT them (it is joke, joke :-)), but receiver should be enough clever f.e. to forward mtrace requests, sent to multicast group to reach destination designated router. */ struct ip_ra_chain __rcu *ip_ra_chain; static DEFINE_SPINLOCK(ip_ra_lock); static void ip_ra_destroy_rcu(struct rcu_head *head) { struct ip_ra_chain *ra = container_of(head, struct ip_ra_chain, rcu); sock_put(ra->saved_sk); kfree(ra); } int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *)) { struct ip_ra_chain *ra, *new_ra; struct ip_ra_chain __rcu **rap; if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW) return -EINVAL; new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; spin_lock_bh(&ip_ra_lock); for (rap = &ip_ra_chain; (ra = rcu_dereference_protected(*rap, lockdep_is_held(&ip_ra_lock))) != NULL; rap = &ra->next) { if (ra->sk == sk) { if (on) { spin_unlock_bh(&ip_ra_lock); kfree(new_ra); return -EADDRINUSE; } /* dont let ip_call_ra_chain() use sk again */ ra->sk = NULL; RCU_INIT_POINTER(*rap, ra->next); spin_unlock_bh(&ip_ra_lock); if (ra->destructor) ra->destructor(sk); /* * Delay sock_put(sk) and kfree(ra) after one rcu grace * period. This guarantee ip_call_ra_chain() dont need * to mess with socket refcounts. */ ra->saved_sk = sk; call_rcu(&ra->rcu, ip_ra_destroy_rcu); return 0; } } if (!new_ra) { spin_unlock_bh(&ip_ra_lock); return -ENOBUFS; } new_ra->sk = sk; new_ra->destructor = destructor; RCU_INIT_POINTER(new_ra->next, ra); rcu_assign_pointer(*rap, new_ra); sock_hold(sk); spin_unlock_bh(&ip_ra_lock); return 0; } void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, u32 info, u8 *payload) { struct sock_exterr_skb *serr; skb = skb_clone(skb, GFP_ATOMIC); if (!skb) return; serr = SKB_EXT_ERR(skb); serr->ee.ee_errno = err; serr->ee.ee_origin = SO_EE_ORIGIN_ICMP; serr->ee.ee_type = icmp_hdr(skb)->type; serr->ee.ee_code = icmp_hdr(skb)->code; serr->ee.ee_pad = 0; serr->ee.ee_info = info; serr->ee.ee_data = 0; serr->addr_offset = (u8 *)&(((struct iphdr *)(icmp_hdr(skb) + 1))->daddr) - skb_network_header(skb); serr->port = port; if (skb_pull(skb, payload - skb->data)) { skb_reset_transport_header(skb); if (sock_queue_err_skb(sk, skb) == 0) return; } kfree_skb(skb); } void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 info) { struct inet_sock *inet = inet_sk(sk); struct sock_exterr_skb *serr; struct iphdr *iph; struct sk_buff *skb; if (!inet->recverr) return; skb = alloc_skb(sizeof(struct iphdr), GFP_ATOMIC); if (!skb) return; skb_put(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); iph = ip_hdr(skb); iph->daddr = daddr; serr = SKB_EXT_ERR(skb); serr->ee.ee_errno = err; serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL; serr->ee.ee_type = 0; serr->ee.ee_code = 0; serr->ee.ee_pad = 0; serr->ee.ee_info = info; serr->ee.ee_data = 0; serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb); serr->port = port; __skb_pull(skb, skb_tail_pointer(skb) - skb->data); skb_reset_transport_header(skb); if (sock_queue_err_skb(sk, skb)) kfree_skb(skb); } /* For some errors we have valid addr_offset even with zero payload and * zero port. Also, addr_offset should be supported if port is set. */ static inline bool ipv4_datagram_support_addr(struct sock_exterr_skb *serr) { return serr->ee.ee_origin == SO_EE_ORIGIN_ICMP || serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL || serr->port; } /* IPv4 supports cmsg on all imcp errors and some timestamps * * Timestamp code paths do not initialize the fields expected by cmsg: * the PKTINFO fields in skb->cb[]. Fill those in here. */ static bool ipv4_datagram_support_cmsg(const struct sock *sk, struct sk_buff *skb, int ee_origin) { struct in_pktinfo *info; if (ee_origin == SO_EE_ORIGIN_ICMP) return true; if (ee_origin == SO_EE_ORIGIN_LOCAL) return false; /* Support IP_PKTINFO on tstamp packets if requested, to correlate * timestamp with egress dev. Not possible for packets without dev * or without payload (SOF_TIMESTAMPING_OPT_TSONLY). */ if ((!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) || (!skb->dev)) return false; info = PKTINFO_SKB_CB(skb); info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr; info->ipi_ifindex = skb->dev->ifindex; return true; } /* * Handle MSG_ERRQUEUE */ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) { struct sock_exterr_skb *serr; struct sk_buff *skb; DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); struct { struct sock_extended_err ee; struct sockaddr_in offender; } errhdr; int err; int copied; WARN_ON_ONCE(sk->sk_family == AF_INET6); err = -EAGAIN; skb = sock_dequeue_err_skb(sk); if (!skb) goto out; copied = skb->len; if (copied > len) { msg->msg_flags |= MSG_TRUNC; copied = len; } err = skb_copy_datagram_msg(skb, 0, msg, copied); if (unlikely(err)) { kfree_skb(skb); return err; } sock_recv_timestamp(msg, sk, skb); serr = SKB_EXT_ERR(skb); if (sin && ipv4_datagram_support_addr(serr)) { sin->sin_family = AF_INET; sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) + serr->addr_offset); sin->sin_port = serr->port; memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); *addr_len = sizeof(*sin); } memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); sin = &errhdr.offender; memset(sin, 0, sizeof(*sin)); if (ipv4_datagram_support_cmsg(sk, skb, serr->ee.ee_origin)) { sin->sin_family = AF_INET; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; if (inet_sk(sk)->cmsg_flags) ip_cmsg_recv(msg, skb); } put_cmsg(msg, SOL_IP, IP_RECVERR, sizeof(errhdr), &errhdr); /* Now we could try to dump offended packet options */ msg->msg_flags |= MSG_ERRQUEUE; err = copied; consume_skb(skb); out: return err; } /* * Socket option code for IP. This is the end of the line after any * TCP,UDP etc options on an IP socket. */ static bool setsockopt_needs_rtnl(int optname) { switch (optname) { case IP_ADD_MEMBERSHIP: case IP_ADD_SOURCE_MEMBERSHIP: case IP_BLOCK_SOURCE: case IP_DROP_MEMBERSHIP: case IP_DROP_SOURCE_MEMBERSHIP: case IP_MSFILTER: case IP_UNBLOCK_SOURCE: case MCAST_BLOCK_SOURCE: case MCAST_MSFILTER: case MCAST_JOIN_GROUP: case MCAST_JOIN_SOURCE_GROUP: case MCAST_LEAVE_GROUP: case MCAST_LEAVE_SOURCE_GROUP: case MCAST_UNBLOCK_SOURCE: return true; } return false; } static int do_ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { struct inet_sock *inet = inet_sk(sk); struct net *net = sock_net(sk); int val = 0, err; bool needs_rtnl = setsockopt_needs_rtnl(optname); switch (optname) { case IP_PKTINFO: case IP_RECVTTL: case IP_RECVOPTS: case IP_RECVTOS: case IP_RETOPTS: case IP_TOS: case IP_TTL: case IP_HDRINCL: case IP_MTU_DISCOVER: case IP_RECVERR: case IP_ROUTER_ALERT: case IP_FREEBIND: case IP_PASSSEC: case IP_TRANSPARENT: case IP_MINTTL: case IP_NODEFRAG: case IP_BIND_ADDRESS_NO_PORT: case IP_UNICAST_IF: case IP_MULTICAST_TTL: case IP_MULTICAST_ALL: case IP_MULTICAST_LOOP: case IP_RECVORIGDSTADDR: case IP_CHECKSUM: case IP_RECVFRAGSIZE: if (optlen >= sizeof(int)) { if (get_user(val, (int __user *) optval)) return -EFAULT; } else if (optlen >= sizeof(char)) { unsigned char ucval; if (get_user(ucval, (unsigned char __user *) optval)) return -EFAULT; val = (int) ucval; } } /* If optlen==0, it is equivalent to val == 0 */ if (ip_mroute_opt(optname)) return ip_mroute_setsockopt(sk, optname, optval, optlen); err = 0; if (needs_rtnl) rtnl_lock(); lock_sock(sk); switch (optname) { case IP_OPTIONS: { struct ip_options_rcu *old, *opt = NULL; if (optlen > 40) goto e_inval; err = ip_options_get_from_user(sock_net(sk), &opt, optval, optlen); if (err) break; old = rcu_dereference_protected(inet->inet_opt, lockdep_sock_is_held(sk)); if (inet->is_icsk) { struct inet_connection_sock *icsk = inet_csk(sk); #if IS_ENABLED(CONFIG_IPV6) if (sk->sk_family == PF_INET || (!((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) && inet->inet_daddr != LOOPBACK4_IPV6)) { #endif if (old) icsk->icsk_ext_hdr_len -= old->opt.optlen; if (opt) icsk->icsk_ext_hdr_len += opt->opt.optlen; icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie); #if IS_ENABLED(CONFIG_IPV6) } #endif } rcu_assign_pointer(inet->inet_opt, opt); if (old) kfree_rcu(old, rcu); break; } case IP_PKTINFO: if (val) inet->cmsg_flags |= IP_CMSG_PKTINFO; else inet->cmsg_flags &= ~IP_CMSG_PKTINFO; break; case IP_RECVTTL: if (val) inet->cmsg_flags |= IP_CMSG_TTL; else inet->cmsg_flags &= ~IP_CMSG_TTL; break; case IP_RECVTOS: if (val) inet->cmsg_flags |= IP_CMSG_TOS; else inet->cmsg_flags &= ~IP_CMSG_TOS; break; case IP_RECVOPTS: if (val) inet->cmsg_flags |= IP_CMSG_RECVOPTS; else inet->cmsg_flags &= ~IP_CMSG_RECVOPTS; break; case IP_RETOPTS: if (val) inet->cmsg_flags |= IP_CMSG_RETOPTS; else inet->cmsg_flags &= ~IP_CMSG_RETOPTS; break; case IP_PASSSEC: if (val) inet->cmsg_flags |= IP_CMSG_PASSSEC; else inet->cmsg_flags &= ~IP_CMSG_PASSSEC; break; case IP_RECVORIGDSTADDR: if (val) inet->cmsg_flags |= IP_CMSG_ORIGDSTADDR; else inet->cmsg_flags &= ~IP_CMSG_ORIGDSTADDR; break; case IP_CHECKSUM: if (val) { if (!(inet->cmsg_flags & IP_CMSG_CHECKSUM)) { inet_inc_convert_csum(sk); inet->cmsg_flags |= IP_CMSG_CHECKSUM; } } else { if (inet->cmsg_flags & IP_CMSG_CHECKSUM) { inet_dec_convert_csum(sk); inet->cmsg_flags &= ~IP_CMSG_CHECKSUM; } } break; case IP_RECVFRAGSIZE: if (sk->sk_type != SOCK_RAW && sk->sk_type != SOCK_DGRAM) goto e_inval; if (val) inet->cmsg_flags |= IP_CMSG_RECVFRAGSIZE; else inet->cmsg_flags &= ~IP_CMSG_RECVFRAGSIZE; break; case IP_TOS: /* This sets both TOS and Precedence */ if (sk->sk_type == SOCK_STREAM) { val &= ~INET_ECN_MASK; val |= inet->tos & INET_ECN_MASK; } if (inet->tos != val) { inet->tos = val; sk->sk_priority = rt_tos2priority(val); sk_dst_reset(sk); } break; case IP_TTL: if (optlen < 1) goto e_inval; if (val != -1 && (val < 1 || val > 255)) goto e_inval; inet->uc_ttl = val; break; case IP_HDRINCL: if (sk->sk_type != SOCK_RAW) { err = -ENOPROTOOPT; break; } inet->hdrincl = val ? 1 : 0; break; case IP_NODEFRAG: if (sk->sk_type != SOCK_RAW) { err = -ENOPROTOOPT; break; } inet->nodefrag = val ? 1 : 0; break; case IP_BIND_ADDRESS_NO_PORT: inet->bind_address_no_port = val ? 1 : 0; break; case IP_MTU_DISCOVER: if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_OMIT) goto e_inval; inet->pmtudisc = val; break; case IP_RECVERR: inet->recverr = !!val; if (!val) skb_queue_purge(&sk->sk_error_queue); break; case IP_MULTICAST_TTL: if (sk->sk_type == SOCK_STREAM) goto e_inval; if (optlen < 1) goto e_inval; if (val == -1) val = 1; if (val < 0 || val > 255) goto e_inval; inet->mc_ttl = val; break; case IP_MULTICAST_LOOP: if (optlen < 1) goto e_inval; inet->mc_loop = !!val; break; case IP_UNICAST_IF: { struct net_device *dev = NULL; int ifindex; if (optlen != sizeof(int)) goto e_inval; ifindex = (__force int)ntohl((__force __be32)val); if (ifindex == 0) { inet->uc_index = 0; err = 0; break; } dev = dev_get_by_index(sock_net(sk), ifindex); err = -EADDRNOTAVAIL; if (!dev) break; dev_put(dev); err = -EINVAL; if (sk->sk_bound_dev_if) break; inet->uc_index = ifindex; err = 0; break; } case IP_MULTICAST_IF: { struct ip_mreqn mreq; struct net_device *dev = NULL; int midx; if (sk->sk_type == SOCK_STREAM) goto e_inval; /* * Check the arguments are allowable */ if (optlen < sizeof(struct in_addr)) goto e_inval; err = -EFAULT; if (optlen >= sizeof(struct ip_mreqn)) { if (copy_from_user(&mreq, optval, sizeof(mreq))) break; } else { memset(&mreq, 0, sizeof(mreq)); if (optlen >= sizeof(struct ip_mreq)) { if (copy_from_user(&mreq, optval, sizeof(struct ip_mreq))) break; } else if (optlen >= sizeof(struct in_addr)) { if (copy_from_user(&mreq.imr_address, optval, sizeof(struct in_addr))) break; } } if (!mreq.imr_ifindex) { if (mreq.imr_address.s_addr == htonl(INADDR_ANY)) { inet->mc_index = 0; inet->mc_addr = 0; err = 0; break; } dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr); if (dev) mreq.imr_ifindex = dev->ifindex; } else dev = dev_get_by_index(sock_net(sk), mreq.imr_ifindex); err = -EADDRNOTAVAIL; if (!dev) break; midx = l3mdev_master_ifindex(dev); dev_put(dev); err = -EINVAL; if (sk->sk_bound_dev_if && mreq.imr_ifindex != sk->sk_bound_dev_if && (!midx || midx != sk->sk_bound_dev_if)) break; inet->mc_index = mreq.imr_ifindex; inet->mc_addr = mreq.imr_address.s_addr; err = 0; break; } case IP_ADD_MEMBERSHIP: case IP_DROP_MEMBERSHIP: { struct ip_mreqn mreq; err = -EPROTO; if (inet_sk(sk)->is_icsk) break; if (optlen < sizeof(struct ip_mreq)) goto e_inval; err = -EFAULT; if (optlen >= sizeof(struct ip_mreqn)) { if (copy_from_user(&mreq, optval, sizeof(mreq))) break; } else { memset(&mreq, 0, sizeof(mreq)); if (copy_from_user(&mreq, optval, sizeof(struct ip_mreq))) break; } if (optname == IP_ADD_MEMBERSHIP) err = ip_mc_join_group(sk, &mreq); else err = ip_mc_leave_group(sk, &mreq); break; } case IP_MSFILTER: { struct ip_msfilter *msf; if (optlen < IP_MSFILTER_SIZE(0)) goto e_inval; if (optlen > sysctl_optmem_max) { err = -ENOBUFS; break; } msf = kmalloc(optlen, GFP_KERNEL); if (!msf) { err = -ENOBUFS; break; } err = -EFAULT; if (copy_from_user(msf, optval, optlen)) { kfree(msf); break; } /* numsrc >= (1G-4) overflow in 32 bits */ if (msf->imsf_numsrc >= 0x3ffffffcU || msf->imsf_numsrc > net->ipv4.sysctl_igmp_max_msf) { kfree(msf); err = -ENOBUFS; break; } if (IP_MSFILTER_SIZE(msf->imsf_numsrc) > optlen) { kfree(msf); err = -EINVAL; break; } err = ip_mc_msfilter(sk, msf, 0); kfree(msf); break; } case IP_BLOCK_SOURCE: case IP_UNBLOCK_SOURCE: case IP_ADD_SOURCE_MEMBERSHIP: case IP_DROP_SOURCE_MEMBERSHIP: { struct ip_mreq_source mreqs; int omode, add; if (optlen != sizeof(struct ip_mreq_source)) goto e_inval; if (copy_from_user(&mreqs, optval, sizeof(mreqs))) { err = -EFAULT; break; } if (optname == IP_BLOCK_SOURCE) { omode = MCAST_EXCLUDE; add = 1; } else if (optname == IP_UNBLOCK_SOURCE) { omode = MCAST_EXCLUDE; add = 0; } else if (optname == IP_ADD_SOURCE_MEMBERSHIP) { struct ip_mreqn mreq; mreq.imr_multiaddr.s_addr = mreqs.imr_multiaddr; mreq.imr_address.s_addr = mreqs.imr_interface; mreq.imr_ifindex = 0; err = ip_mc_join_group(sk, &mreq); if (err && err != -EADDRINUSE) break; omode = MCAST_INCLUDE; add = 1; } else /* IP_DROP_SOURCE_MEMBERSHIP */ { omode = MCAST_INCLUDE; add = 0; } err = ip_mc_source(add, omode, sk, &mreqs, 0); break; } case MCAST_JOIN_GROUP: case MCAST_LEAVE_GROUP: { struct group_req greq; struct sockaddr_in *psin; struct ip_mreqn mreq; if (optlen < sizeof(struct group_req)) goto e_inval; err = -EFAULT; if (copy_from_user(&greq, optval, sizeof(greq))) break; psin = (struct sockaddr_in *)&greq.gr_group; if (psin->sin_family != AF_INET) goto e_inval; memset(&mreq, 0, sizeof(mreq)); mreq.imr_multiaddr = psin->sin_addr; mreq.imr_ifindex = greq.gr_interface; if (optname == MCAST_JOIN_GROUP) err = ip_mc_join_group(sk, &mreq); else err = ip_mc_leave_group(sk, &mreq); break; } case MCAST_JOIN_SOURCE_GROUP: case MCAST_LEAVE_SOURCE_GROUP: case MCAST_BLOCK_SOURCE: case MCAST_UNBLOCK_SOURCE: { struct group_source_req greqs; struct ip_mreq_source mreqs; struct sockaddr_in *psin; int omode, add; if (optlen != sizeof(struct group_source_req)) goto e_inval; if (copy_from_user(&greqs, optval, sizeof(greqs))) { err = -EFAULT; break; } if (greqs.gsr_group.ss_family != AF_INET || greqs.gsr_source.ss_family != AF_INET) { err = -EADDRNOTAVAIL; break; } psin = (struct sockaddr_in *)&greqs.gsr_group; mreqs.imr_multiaddr = psin->sin_addr.s_addr; psin = (struct sockaddr_in *)&greqs.gsr_source; mreqs.imr_sourceaddr = psin->sin_addr.s_addr; mreqs.imr_interface = 0; /* use index for mc_source */ if (optname == MCAST_BLOCK_SOURCE) { omode = MCAST_EXCLUDE; add = 1; } else if (optname == MCAST_UNBLOCK_SOURCE) { omode = MCAST_EXCLUDE; add = 0; } else if (optname == MCAST_JOIN_SOURCE_GROUP) { struct ip_mreqn mreq; psin = (struct sockaddr_in *)&greqs.gsr_group; mreq.imr_multiaddr = psin->sin_addr; mreq.imr_address.s_addr = 0; mreq.imr_ifindex = greqs.gsr_interface; err = ip_mc_join_group(sk, &mreq); if (err && err != -EADDRINUSE) break; greqs.gsr_interface = mreq.imr_ifindex; omode = MCAST_INCLUDE; add = 1; } else /* MCAST_LEAVE_SOURCE_GROUP */ { omode = MCAST_INCLUDE; add = 0; } err = ip_mc_source(add, omode, sk, &mreqs, greqs.gsr_interface); break; } case MCAST_MSFILTER: { struct sockaddr_in *psin; struct ip_msfilter *msf = NULL; struct group_filter *gsf = NULL; int msize, i, ifindex; if (optlen < GROUP_FILTER_SIZE(0)) goto e_inval; if (optlen > sysctl_optmem_max) { err = -ENOBUFS; break; } gsf = kmalloc(optlen, GFP_KERNEL); if (!gsf) { err = -ENOBUFS; break; } err = -EFAULT; if (copy_from_user(gsf, optval, optlen)) goto mc_msf_out; /* numsrc >= (4G-140)/128 overflow in 32 bits */ if (gsf->gf_numsrc >= 0x1ffffff || gsf->gf_numsrc > net->ipv4.sysctl_igmp_max_msf) { err = -ENOBUFS; goto mc_msf_out; } if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) { err = -EINVAL; goto mc_msf_out; } msize = IP_MSFILTER_SIZE(gsf->gf_numsrc); msf = kmalloc(msize, GFP_KERNEL); if (!msf) { err = -ENOBUFS; goto mc_msf_out; } ifindex = gsf->gf_interface; psin = (struct sockaddr_in *)&gsf->gf_group; if (psin->sin_family != AF_INET) { err = -EADDRNOTAVAIL; goto mc_msf_out; } msf->imsf_multiaddr = psin->sin_addr.s_addr; msf->imsf_interface = 0; msf->imsf_fmode = gsf->gf_fmode; msf->imsf_numsrc = gsf->gf_numsrc; err = -EADDRNOTAVAIL; for (i = 0; i < gsf->gf_numsrc; ++i) { psin = (struct sockaddr_in *)&gsf->gf_slist[i]; if (psin->sin_family != AF_INET) goto mc_msf_out; msf->imsf_slist[i] = psin->sin_addr.s_addr; } kfree(gsf); gsf = NULL; err = ip_mc_msfilter(sk, msf, ifindex); mc_msf_out: kfree(msf); kfree(gsf); break; } case IP_MULTICAST_ALL: if (optlen < 1) goto e_inval; if (val != 0 && val != 1) goto e_inval; inet->mc_all = val; break; case IP_ROUTER_ALERT: err = ip_ra_control(sk, val ? 1 : 0, NULL); break; case IP_FREEBIND: if (optlen < 1) goto e_inval; inet->freebind = !!val; break; case IP_IPSEC_POLICY: case IP_XFRM_POLICY: err = -EPERM; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) break; err = xfrm_user_policy(sk, optname, optval, optlen); break; case IP_TRANSPARENT: if (!!val && !ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) && !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { err = -EPERM; break; } if (optlen < 1) goto e_inval; inet->transparent = !!val; break; case IP_MINTTL: if (optlen < 1) goto e_inval; if (val < 0 || val > 255) goto e_inval; inet->min_ttl = val; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); if (needs_rtnl) rtnl_unlock(); return err; e_inval: release_sock(sk); if (needs_rtnl) rtnl_unlock(); return -EINVAL; } /** * ipv4_pktinfo_prepare - transfer some info from rtable to skb * @sk: socket * @skb: buffer * * To support IP_CMSG_PKTINFO option, we store rt_iif and specific * destination in skb->cb[] before dst drop. * This way, receiver doesn't make cache line misses to read rtable. */ void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb) { struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb); bool prepare = (inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO) || ipv6_sk_rxinfo(sk); if (prepare && skb_rtable(skb)) { /* skb->cb is overloaded: prior to this point it is IP{6}CB * which has interface index (iif) as the first member of the * underlying inet{6}_skb_parm struct. This code then overlays * PKTINFO_SKB_CB and in_pktinfo also has iif as the first * element so the iif is picked up from the prior IPCB. If iif * is the loopback interface, then return the sending interface * (e.g., process binds socket to eth0 for Tx which is * redirected to loopback in the rtable/dst). */ if (pktinfo->ipi_ifindex == LOOPBACK_IFINDEX) pktinfo->ipi_ifindex = inet_iif(skb); pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb); } else { pktinfo->ipi_ifindex = 0; pktinfo->ipi_spec_dst.s_addr = 0; } /* We need to keep the dst for __ip_options_echo() * We could restrict the test to opt.ts_needtime || opt.srr, * but the following is good enough as IP options are not often used. */ if (unlikely(IPCB(skb)->opt.optlen)) skb_dst_force(skb); else skb_dst_drop(skb); } int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { int err; if (level != SOL_IP) return -ENOPROTOOPT; err = do_ip_setsockopt(sk, level, optname, optval, optlen); #ifdef CONFIG_NETFILTER /* we need to exclude all possible ENOPROTOOPTs except default case */ if (err == -ENOPROTOOPT && optname != IP_HDRINCL && optname != IP_IPSEC_POLICY && optname != IP_XFRM_POLICY && !ip_mroute_opt(optname)) { lock_sock(sk); err = nf_setsockopt(sk, PF_INET, optname, optval, optlen); release_sock(sk); } #endif return err; } EXPORT_SYMBOL(ip_setsockopt); #ifdef CONFIG_COMPAT int compat_ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { int err; if (level != SOL_IP) return -ENOPROTOOPT; if (optname >= MCAST_JOIN_GROUP && optname <= MCAST_MSFILTER) return compat_mc_setsockopt(sk, level, optname, optval, optlen, ip_setsockopt); err = do_ip_setsockopt(sk, level, optname, optval, optlen); #ifdef CONFIG_NETFILTER /* we need to exclude all possible ENOPROTOOPTs except default case */ if (err == -ENOPROTOOPT && optname != IP_HDRINCL && optname != IP_IPSEC_POLICY && optname != IP_XFRM_POLICY && !ip_mroute_opt(optname)) { lock_sock(sk); err = compat_nf_setsockopt(sk, PF_INET, optname, optval, optlen); release_sock(sk); } #endif return err; } EXPORT_SYMBOL(compat_ip_setsockopt); #endif /* * Get the options. Note for future reference. The GET of IP options gets * the _received_ ones. The set sets the _sent_ ones. */ static bool getsockopt_needs_rtnl(int optname) { switch (optname) { case IP_MSFILTER: case MCAST_MSFILTER: return true; } return false; } static int do_ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen, unsigned int flags) { struct inet_sock *inet = inet_sk(sk); bool needs_rtnl = getsockopt_needs_rtnl(optname); int val, err = 0; int len; if (level != SOL_IP) return -EOPNOTSUPP; if (ip_mroute_opt(optname)) return ip_mroute_getsockopt(sk, optname, optval, optlen); if (get_user(len, optlen)) return -EFAULT; if (len < 0) return -EINVAL; if (needs_rtnl) rtnl_lock(); lock_sock(sk); switch (optname) { case IP_OPTIONS: { unsigned char optbuf[sizeof(struct ip_options)+40]; struct ip_options *opt = (struct ip_options *)optbuf; struct ip_options_rcu *inet_opt; inet_opt = rcu_dereference_protected(inet->inet_opt, lockdep_sock_is_held(sk)); opt->optlen = 0; if (inet_opt) memcpy(optbuf, &inet_opt->opt, sizeof(struct ip_options) + inet_opt->opt.optlen); release_sock(sk); if (opt->optlen == 0) return put_user(0, optlen); ip_options_undo(opt); len = min_t(unsigned int, len, opt->optlen); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, opt->__data, len)) return -EFAULT; return 0; } case IP_PKTINFO: val = (inet->cmsg_flags & IP_CMSG_PKTINFO) != 0; break; case IP_RECVTTL: val = (inet->cmsg_flags & IP_CMSG_TTL) != 0; break; case IP_RECVTOS: val = (inet->cmsg_flags & IP_CMSG_TOS) != 0; break; case IP_RECVOPTS: val = (inet->cmsg_flags & IP_CMSG_RECVOPTS) != 0; break; case IP_RETOPTS: val = (inet->cmsg_flags & IP_CMSG_RETOPTS) != 0; break; case IP_PASSSEC: val = (inet->cmsg_flags & IP_CMSG_PASSSEC) != 0; break; case IP_RECVORIGDSTADDR: val = (inet->cmsg_flags & IP_CMSG_ORIGDSTADDR) != 0; break; case IP_CHECKSUM: val = (inet->cmsg_flags & IP_CMSG_CHECKSUM) != 0; break; case IP_RECVFRAGSIZE: val = (inet->cmsg_flags & IP_CMSG_RECVFRAGSIZE) != 0; break; case IP_TOS: val = inet->tos; break; case IP_TTL: { struct net *net = sock_net(sk); val = (inet->uc_ttl == -1 ? net->ipv4.sysctl_ip_default_ttl : inet->uc_ttl); break; } case IP_HDRINCL: val = inet->hdrincl; break; case IP_NODEFRAG: val = inet->nodefrag; break; case IP_BIND_ADDRESS_NO_PORT: val = inet->bind_address_no_port; break; case IP_MTU_DISCOVER: val = inet->pmtudisc; break; case IP_MTU: { struct dst_entry *dst; val = 0; dst = sk_dst_get(sk); if (dst) { val = dst_mtu(dst); dst_release(dst); } if (!val) { release_sock(sk); return -ENOTCONN; } break; } case IP_RECVERR: val = inet->recverr; break; case IP_MULTICAST_TTL: val = inet->mc_ttl; break; case IP_MULTICAST_LOOP: val = inet->mc_loop; break; case IP_UNICAST_IF: val = (__force int)htonl((__u32) inet->uc_index); break; case IP_MULTICAST_IF: { struct in_addr addr; len = min_t(unsigned int, len, sizeof(struct in_addr)); addr.s_addr = inet->mc_addr; release_sock(sk); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &addr, len)) return -EFAULT; return 0; } case IP_MSFILTER: { struct ip_msfilter msf; if (len < IP_MSFILTER_SIZE(0)) { err = -EINVAL; goto out; } if (copy_from_user(&msf, optval, IP_MSFILTER_SIZE(0))) { err = -EFAULT; goto out; } err = ip_mc_msfget(sk, &msf, (struct ip_msfilter __user *)optval, optlen); goto out; } case MCAST_MSFILTER: { struct group_filter gsf; if (len < GROUP_FILTER_SIZE(0)) { err = -EINVAL; goto out; } if (copy_from_user(&gsf, optval, GROUP_FILTER_SIZE(0))) { err = -EFAULT; goto out; } err = ip_mc_gsfget(sk, &gsf, (struct group_filter __user *)optval, optlen); goto out; } case IP_MULTICAST_ALL: val = inet->mc_all; break; case IP_PKTOPTIONS: { struct msghdr msg; release_sock(sk); if (sk->sk_type != SOCK_STREAM) return -ENOPROTOOPT; msg.msg_control = (__force void *) optval; msg.msg_controllen = len; msg.msg_flags = flags; if (inet->cmsg_flags & IP_CMSG_PKTINFO) { struct in_pktinfo info; info.ipi_addr.s_addr = inet->inet_rcv_saddr; info.ipi_spec_dst.s_addr = inet->inet_rcv_saddr; info.ipi_ifindex = inet->mc_index; put_cmsg(&msg, SOL_IP, IP_PKTINFO, sizeof(info), &info); } if (inet->cmsg_flags & IP_CMSG_TTL) { int hlim = inet->mc_ttl; put_cmsg(&msg, SOL_IP, IP_TTL, sizeof(hlim), &hlim); } if (inet->cmsg_flags & IP_CMSG_TOS) { int tos = inet->rcv_tos; put_cmsg(&msg, SOL_IP, IP_TOS, sizeof(tos), &tos); } len -= msg.msg_controllen; return put_user(len, optlen); } case IP_FREEBIND: val = inet->freebind; break; case IP_TRANSPARENT: val = inet->transparent; break; case IP_MINTTL: val = inet->min_ttl; break; default: release_sock(sk); return -ENOPROTOOPT; } release_sock(sk); if (len < sizeof(int) && len > 0 && val >= 0 && val <= 255) { unsigned char ucval = (unsigned char)val; len = 1; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &ucval, 1)) return -EFAULT; } else { len = min_t(unsigned int, sizeof(int), len); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; } return 0; out: release_sock(sk); if (needs_rtnl) rtnl_unlock(); return err; } int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { int err; err = do_ip_getsockopt(sk, level, optname, optval, optlen, 0); #ifdef CONFIG_NETFILTER /* we need to exclude all possible ENOPROTOOPTs except default case */ if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS && !ip_mroute_opt(optname)) { int len; if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); err = nf_getsockopt(sk, PF_INET, optname, optval, &len); release_sock(sk); if (err >= 0) err = put_user(len, optlen); return err; } #endif return err; } EXPORT_SYMBOL(ip_getsockopt); #ifdef CONFIG_COMPAT int compat_ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { int err; if (optname == MCAST_MSFILTER) return compat_mc_getsockopt(sk, level, optname, optval, optlen, ip_getsockopt); err = do_ip_getsockopt(sk, level, optname, optval, optlen, MSG_CMSG_COMPAT); #ifdef CONFIG_NETFILTER /* we need to exclude all possible ENOPROTOOPTs except default case */ if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS && !ip_mroute_opt(optname)) { int len; if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); err = compat_nf_getsockopt(sk, PF_INET, optname, optval, &len); release_sock(sk); if (err >= 0) err = put_user(len, optlen); return err; } #endif return err; } EXPORT_SYMBOL(compat_ip_getsockopt); #endif
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * The IP to API glue. * * Authors: see ip.c * * Fixes: * Many : Split from ip.c , see ip.c for history. * Martin Mares : TOS setting fixed. * Alan Cox : Fixed a couple of oopses in Martin's * TOS tweaks. * Mike McLagan : Routing by source */ #include <linux/module.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/skbuff.h> #include <linux/ip.h> #include <linux/icmp.h> #include <linux/inetdevice.h> #include <linux/netdevice.h> #include <linux/slab.h> #include <net/sock.h> #include <net/ip.h> #include <net/icmp.h> #include <net/tcp_states.h> #include <linux/udp.h> #include <linux/igmp.h> #include <linux/netfilter.h> #include <linux/route.h> #include <linux/mroute.h> #include <net/inet_ecn.h> #include <net/route.h> #include <net/xfrm.h> #include <net/compat.h> #include <net/checksum.h> #if IS_ENABLED(CONFIG_IPV6) #include <net/transp_v6.h> #endif #include <net/ip_fib.h> #include <linux/errqueue.h> #include <linux/uaccess.h> /* * SOL_IP control messages. */ static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb) { struct in_pktinfo info = *PKTINFO_SKB_CB(skb); info.ipi_addr.s_addr = ip_hdr(skb)->daddr; put_cmsg(msg, SOL_IP, IP_PKTINFO, sizeof(info), &info); } static void ip_cmsg_recv_ttl(struct msghdr *msg, struct sk_buff *skb) { int ttl = ip_hdr(skb)->ttl; put_cmsg(msg, SOL_IP, IP_TTL, sizeof(int), &ttl); } static void ip_cmsg_recv_tos(struct msghdr *msg, struct sk_buff *skb) { put_cmsg(msg, SOL_IP, IP_TOS, 1, &ip_hdr(skb)->tos); } static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb) { if (IPCB(skb)->opt.optlen == 0) return; put_cmsg(msg, SOL_IP, IP_RECVOPTS, IPCB(skb)->opt.optlen, ip_hdr(skb) + 1); } static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb) { unsigned char optbuf[sizeof(struct ip_options) + 40]; struct ip_options *opt = (struct ip_options *)optbuf; if (IPCB(skb)->opt.optlen == 0) return; if (ip_options_echo(opt, skb)) { msg->msg_flags |= MSG_CTRUNC; return; } ip_options_undo(opt); put_cmsg(msg, SOL_IP, IP_RETOPTS, opt->optlen, opt->__data); } static void ip_cmsg_recv_fragsize(struct msghdr *msg, struct sk_buff *skb) { int val; if (IPCB(skb)->frag_max_size == 0) return; val = IPCB(skb)->frag_max_size; put_cmsg(msg, SOL_IP, IP_RECVFRAGSIZE, sizeof(val), &val); } static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb, int tlen, int offset) { __wsum csum = skb->csum; if (skb->ip_summed != CHECKSUM_COMPLETE) return; if (offset != 0) { int tend_off = skb_transport_offset(skb) + tlen; csum = csum_sub(csum, skb_checksum(skb, tend_off, offset, 0)); } put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum); } static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb) { char *secdata; u32 seclen, secid; int err; err = security_socket_getpeersec_dgram(NULL, skb, &secid); if (err) return; err = security_secid_to_secctx(secid, &secdata, &seclen); if (err) return; put_cmsg(msg, SOL_IP, SCM_SECURITY, seclen, secdata); security_release_secctx(secdata, seclen); } static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb) { struct sockaddr_in sin; const struct iphdr *iph = ip_hdr(skb); __be16 *ports = (__be16 *)skb_transport_header(skb); if (skb_transport_offset(skb) + 4 > (int)skb->len) return; /* All current transport protocols have the port numbers in the * first four bytes of the transport header and this function is * written with this assumption in mind. */ sin.sin_family = AF_INET; sin.sin_addr.s_addr = iph->daddr; sin.sin_port = ports[1]; memset(sin.sin_zero, 0, sizeof(sin.sin_zero)); put_cmsg(msg, SOL_IP, IP_ORIGDSTADDR, sizeof(sin), &sin); } void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk, struct sk_buff *skb, int tlen, int offset) { struct inet_sock *inet = inet_sk(sk); unsigned int flags = inet->cmsg_flags; /* Ordered by supposed usage frequency */ if (flags & IP_CMSG_PKTINFO) { ip_cmsg_recv_pktinfo(msg, skb); flags &= ~IP_CMSG_PKTINFO; if (!flags) return; } if (flags & IP_CMSG_TTL) { ip_cmsg_recv_ttl(msg, skb); flags &= ~IP_CMSG_TTL; if (!flags) return; } if (flags & IP_CMSG_TOS) { ip_cmsg_recv_tos(msg, skb); flags &= ~IP_CMSG_TOS; if (!flags) return; } if (flags & IP_CMSG_RECVOPTS) { ip_cmsg_recv_opts(msg, skb); flags &= ~IP_CMSG_RECVOPTS; if (!flags) return; } if (flags & IP_CMSG_RETOPTS) { ip_cmsg_recv_retopts(msg, skb); flags &= ~IP_CMSG_RETOPTS; if (!flags) return; } if (flags & IP_CMSG_PASSSEC) { ip_cmsg_recv_security(msg, skb); flags &= ~IP_CMSG_PASSSEC; if (!flags) return; } if (flags & IP_CMSG_ORIGDSTADDR) { ip_cmsg_recv_dstaddr(msg, skb); flags &= ~IP_CMSG_ORIGDSTADDR; if (!flags) return; } if (flags & IP_CMSG_CHECKSUM) ip_cmsg_recv_checksum(msg, skb, tlen, offset); if (flags & IP_CMSG_RECVFRAGSIZE) ip_cmsg_recv_fragsize(msg, skb); } EXPORT_SYMBOL(ip_cmsg_recv_offset); int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc, bool allow_ipv6) { int err, val; struct cmsghdr *cmsg; struct net *net = sock_net(sk); for_each_cmsghdr(cmsg, msg) { if (!CMSG_OK(msg, cmsg)) return -EINVAL; #if IS_ENABLED(CONFIG_IPV6) if (allow_ipv6 && cmsg->cmsg_level == SOL_IPV6 && cmsg->cmsg_type == IPV6_PKTINFO) { struct in6_pktinfo *src_info; if (cmsg->cmsg_len < CMSG_LEN(sizeof(*src_info))) return -EINVAL; src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg); if (!ipv6_addr_v4mapped(&src_info->ipi6_addr)) return -EINVAL; ipc->oif = src_info->ipi6_ifindex; ipc->addr = src_info->ipi6_addr.s6_addr32[3]; continue; } #endif if (cmsg->cmsg_level == SOL_SOCKET) { err = __sock_cmsg_send(sk, msg, cmsg, &ipc->sockc); if (err) return err; continue; } if (cmsg->cmsg_level != SOL_IP) continue; switch (cmsg->cmsg_type) { case IP_RETOPTS: err = cmsg->cmsg_len - sizeof(struct cmsghdr); /* Our caller is responsible for freeing ipc->opt */ err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg), err < 40 ? err : 40); if (err) return err; break; case IP_PKTINFO: { struct in_pktinfo *info; if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo))) return -EINVAL; info = (struct in_pktinfo *)CMSG_DATA(cmsg); ipc->oif = info->ipi_ifindex; ipc->addr = info->ipi_spec_dst.s_addr; break; } case IP_TTL: if (cmsg->cmsg_len != CMSG_LEN(sizeof(int))) return -EINVAL; val = *(int *)CMSG_DATA(cmsg); if (val < 1 || val > 255) return -EINVAL; ipc->ttl = val; break; case IP_TOS: if (cmsg->cmsg_len == CMSG_LEN(sizeof(int))) val = *(int *)CMSG_DATA(cmsg); else if (cmsg->cmsg_len == CMSG_LEN(sizeof(u8))) val = *(u8 *)CMSG_DATA(cmsg); else return -EINVAL; if (val < 0 || val > 255) return -EINVAL; ipc->tos = val; ipc->priority = rt_tos2priority(ipc->tos); break; default: return -EINVAL; } } return 0; } /* Special input handler for packets caught by router alert option. They are selected only by protocol field, and then processed likely local ones; but only if someone wants them! Otherwise, router not running rsvpd will kill RSVP. It is user level problem, what it will make with them. I have no idea, how it will masquearde or NAT them (it is joke, joke :-)), but receiver should be enough clever f.e. to forward mtrace requests, sent to multicast group to reach destination designated router. */ struct ip_ra_chain __rcu *ip_ra_chain; static DEFINE_SPINLOCK(ip_ra_lock); static void ip_ra_destroy_rcu(struct rcu_head *head) { struct ip_ra_chain *ra = container_of(head, struct ip_ra_chain, rcu); sock_put(ra->saved_sk); kfree(ra); } int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *)) { struct ip_ra_chain *ra, *new_ra; struct ip_ra_chain __rcu **rap; if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW) return -EINVAL; new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; spin_lock_bh(&ip_ra_lock); for (rap = &ip_ra_chain; (ra = rcu_dereference_protected(*rap, lockdep_is_held(&ip_ra_lock))) != NULL; rap = &ra->next) { if (ra->sk == sk) { if (on) { spin_unlock_bh(&ip_ra_lock); kfree(new_ra); return -EADDRINUSE; } /* dont let ip_call_ra_chain() use sk again */ ra->sk = NULL; RCU_INIT_POINTER(*rap, ra->next); spin_unlock_bh(&ip_ra_lock); if (ra->destructor) ra->destructor(sk); /* * Delay sock_put(sk) and kfree(ra) after one rcu grace * period. This guarantee ip_call_ra_chain() dont need * to mess with socket refcounts. */ ra->saved_sk = sk; call_rcu(&ra->rcu, ip_ra_destroy_rcu); return 0; } } if (!new_ra) { spin_unlock_bh(&ip_ra_lock); return -ENOBUFS; } new_ra->sk = sk; new_ra->destructor = destructor; RCU_INIT_POINTER(new_ra->next, ra); rcu_assign_pointer(*rap, new_ra); sock_hold(sk); spin_unlock_bh(&ip_ra_lock); return 0; } void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, u32 info, u8 *payload) { struct sock_exterr_skb *serr; skb = skb_clone(skb, GFP_ATOMIC); if (!skb) return; serr = SKB_EXT_ERR(skb); serr->ee.ee_errno = err; serr->ee.ee_origin = SO_EE_ORIGIN_ICMP; serr->ee.ee_type = icmp_hdr(skb)->type; serr->ee.ee_code = icmp_hdr(skb)->code; serr->ee.ee_pad = 0; serr->ee.ee_info = info; serr->ee.ee_data = 0; serr->addr_offset = (u8 *)&(((struct iphdr *)(icmp_hdr(skb) + 1))->daddr) - skb_network_header(skb); serr->port = port; if (skb_pull(skb, payload - skb->data)) { skb_reset_transport_header(skb); if (sock_queue_err_skb(sk, skb) == 0) return; } kfree_skb(skb); } void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 info) { struct inet_sock *inet = inet_sk(sk); struct sock_exterr_skb *serr; struct iphdr *iph; struct sk_buff *skb; if (!inet->recverr) return; skb = alloc_skb(sizeof(struct iphdr), GFP_ATOMIC); if (!skb) return; skb_put(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); iph = ip_hdr(skb); iph->daddr = daddr; serr = SKB_EXT_ERR(skb); serr->ee.ee_errno = err; serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL; serr->ee.ee_type = 0; serr->ee.ee_code = 0; serr->ee.ee_pad = 0; serr->ee.ee_info = info; serr->ee.ee_data = 0; serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb); serr->port = port; __skb_pull(skb, skb_tail_pointer(skb) - skb->data); skb_reset_transport_header(skb); if (sock_queue_err_skb(sk, skb)) kfree_skb(skb); } /* For some errors we have valid addr_offset even with zero payload and * zero port. Also, addr_offset should be supported if port is set. */ static inline bool ipv4_datagram_support_addr(struct sock_exterr_skb *serr) { return serr->ee.ee_origin == SO_EE_ORIGIN_ICMP || serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL || serr->port; } /* IPv4 supports cmsg on all imcp errors and some timestamps * * Timestamp code paths do not initialize the fields expected by cmsg: * the PKTINFO fields in skb->cb[]. Fill those in here. */ static bool ipv4_datagram_support_cmsg(const struct sock *sk, struct sk_buff *skb, int ee_origin) { struct in_pktinfo *info; if (ee_origin == SO_EE_ORIGIN_ICMP) return true; if (ee_origin == SO_EE_ORIGIN_LOCAL) return false; /* Support IP_PKTINFO on tstamp packets if requested, to correlate * timestamp with egress dev. Not possible for packets without dev * or without payload (SOF_TIMESTAMPING_OPT_TSONLY). */ if ((!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) || (!skb->dev)) return false; info = PKTINFO_SKB_CB(skb); info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr; info->ipi_ifindex = skb->dev->ifindex; return true; } /* * Handle MSG_ERRQUEUE */ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) { struct sock_exterr_skb *serr; struct sk_buff *skb; DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); struct { struct sock_extended_err ee; struct sockaddr_in offender; } errhdr; int err; int copied; WARN_ON_ONCE(sk->sk_family == AF_INET6); err = -EAGAIN; skb = sock_dequeue_err_skb(sk); if (!skb) goto out; copied = skb->len; if (copied > len) { msg->msg_flags |= MSG_TRUNC; copied = len; } err = skb_copy_datagram_msg(skb, 0, msg, copied); if (unlikely(err)) { kfree_skb(skb); return err; } sock_recv_timestamp(msg, sk, skb); serr = SKB_EXT_ERR(skb); if (sin && ipv4_datagram_support_addr(serr)) { sin->sin_family = AF_INET; sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) + serr->addr_offset); sin->sin_port = serr->port; memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); *addr_len = sizeof(*sin); } memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); sin = &errhdr.offender; memset(sin, 0, sizeof(*sin)); if (ipv4_datagram_support_cmsg(sk, skb, serr->ee.ee_origin)) { sin->sin_family = AF_INET; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; if (inet_sk(sk)->cmsg_flags) ip_cmsg_recv(msg, skb); } put_cmsg(msg, SOL_IP, IP_RECVERR, sizeof(errhdr), &errhdr); /* Now we could try to dump offended packet options */ msg->msg_flags |= MSG_ERRQUEUE; err = copied; consume_skb(skb); out: return err; } /* * Socket option code for IP. This is the end of the line after any * TCP,UDP etc options on an IP socket. */ static bool setsockopt_needs_rtnl(int optname) { switch (optname) { case IP_ADD_MEMBERSHIP: case IP_ADD_SOURCE_MEMBERSHIP: case IP_BLOCK_SOURCE: case IP_DROP_MEMBERSHIP: case IP_DROP_SOURCE_MEMBERSHIP: case IP_MSFILTER: case IP_UNBLOCK_SOURCE: case MCAST_BLOCK_SOURCE: case MCAST_MSFILTER: case MCAST_JOIN_GROUP: case MCAST_JOIN_SOURCE_GROUP: case MCAST_LEAVE_GROUP: case MCAST_LEAVE_SOURCE_GROUP: case MCAST_UNBLOCK_SOURCE: return true; } return false; } static int do_ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { struct inet_sock *inet = inet_sk(sk); struct net *net = sock_net(sk); int val = 0, err; bool needs_rtnl = setsockopt_needs_rtnl(optname); switch (optname) { case IP_PKTINFO: case IP_RECVTTL: case IP_RECVOPTS: case IP_RECVTOS: case IP_RETOPTS: case IP_TOS: case IP_TTL: case IP_HDRINCL: case IP_MTU_DISCOVER: case IP_RECVERR: case IP_ROUTER_ALERT: case IP_FREEBIND: case IP_PASSSEC: case IP_TRANSPARENT: case IP_MINTTL: case IP_NODEFRAG: case IP_BIND_ADDRESS_NO_PORT: case IP_UNICAST_IF: case IP_MULTICAST_TTL: case IP_MULTICAST_ALL: case IP_MULTICAST_LOOP: case IP_RECVORIGDSTADDR: case IP_CHECKSUM: case IP_RECVFRAGSIZE: if (optlen >= sizeof(int)) { if (get_user(val, (int __user *) optval)) return -EFAULT; } else if (optlen >= sizeof(char)) { unsigned char ucval; if (get_user(ucval, (unsigned char __user *) optval)) return -EFAULT; val = (int) ucval; } } /* If optlen==0, it is equivalent to val == 0 */ if (ip_mroute_opt(optname)) return ip_mroute_setsockopt(sk, optname, optval, optlen); err = 0; if (needs_rtnl) rtnl_lock(); lock_sock(sk); switch (optname) { case IP_OPTIONS: { struct ip_options_rcu *old, *opt = NULL; if (optlen > 40) goto e_inval; err = ip_options_get_from_user(sock_net(sk), &opt, optval, optlen); if (err) break; old = rcu_dereference_protected(inet->inet_opt, lockdep_sock_is_held(sk)); if (inet->is_icsk) { struct inet_connection_sock *icsk = inet_csk(sk); #if IS_ENABLED(CONFIG_IPV6) if (sk->sk_family == PF_INET || (!((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) && inet->inet_daddr != LOOPBACK4_IPV6)) { #endif if (old) icsk->icsk_ext_hdr_len -= old->opt.optlen; if (opt) icsk->icsk_ext_hdr_len += opt->opt.optlen; icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie); #if IS_ENABLED(CONFIG_IPV6) } #endif } rcu_assign_pointer(inet->inet_opt, opt); if (old) kfree_rcu(old, rcu); break; } case IP_PKTINFO: if (val) inet->cmsg_flags |= IP_CMSG_PKTINFO; else inet->cmsg_flags &= ~IP_CMSG_PKTINFO; break; case IP_RECVTTL: if (val) inet->cmsg_flags |= IP_CMSG_TTL; else inet->cmsg_flags &= ~IP_CMSG_TTL; break; case IP_RECVTOS: if (val) inet->cmsg_flags |= IP_CMSG_TOS; else inet->cmsg_flags &= ~IP_CMSG_TOS; break; case IP_RECVOPTS: if (val) inet->cmsg_flags |= IP_CMSG_RECVOPTS; else inet->cmsg_flags &= ~IP_CMSG_RECVOPTS; break; case IP_RETOPTS: if (val) inet->cmsg_flags |= IP_CMSG_RETOPTS; else inet->cmsg_flags &= ~IP_CMSG_RETOPTS; break; case IP_PASSSEC: if (val) inet->cmsg_flags |= IP_CMSG_PASSSEC; else inet->cmsg_flags &= ~IP_CMSG_PASSSEC; break; case IP_RECVORIGDSTADDR: if (val) inet->cmsg_flags |= IP_CMSG_ORIGDSTADDR; else inet->cmsg_flags &= ~IP_CMSG_ORIGDSTADDR; break; case IP_CHECKSUM: if (val) { if (!(inet->cmsg_flags & IP_CMSG_CHECKSUM)) { inet_inc_convert_csum(sk); inet->cmsg_flags |= IP_CMSG_CHECKSUM; } } else { if (inet->cmsg_flags & IP_CMSG_CHECKSUM) { inet_dec_convert_csum(sk); inet->cmsg_flags &= ~IP_CMSG_CHECKSUM; } } break; case IP_RECVFRAGSIZE: if (sk->sk_type != SOCK_RAW && sk->sk_type != SOCK_DGRAM) goto e_inval; if (val) inet->cmsg_flags |= IP_CMSG_RECVFRAGSIZE; else inet->cmsg_flags &= ~IP_CMSG_RECVFRAGSIZE; break; case IP_TOS: /* This sets both TOS and Precedence */ if (sk->sk_type == SOCK_STREAM) { val &= ~INET_ECN_MASK; val |= inet->tos & INET_ECN_MASK; } if (inet->tos != val) { inet->tos = val; sk->sk_priority = rt_tos2priority(val); sk_dst_reset(sk); } break; case IP_TTL: if (optlen < 1) goto e_inval; if (val != -1 && (val < 1 || val > 255)) goto e_inval; inet->uc_ttl = val; break; case IP_HDRINCL: if (sk->sk_type != SOCK_RAW) { err = -ENOPROTOOPT; break; } inet->hdrincl = val ? 1 : 0; break; case IP_NODEFRAG: if (sk->sk_type != SOCK_RAW) { err = -ENOPROTOOPT; break; } inet->nodefrag = val ? 1 : 0; break; case IP_BIND_ADDRESS_NO_PORT: inet->bind_address_no_port = val ? 1 : 0; break; case IP_MTU_DISCOVER: if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_OMIT) goto e_inval; inet->pmtudisc = val; break; case IP_RECVERR: inet->recverr = !!val; if (!val) skb_queue_purge(&sk->sk_error_queue); break; case IP_MULTICAST_TTL: if (sk->sk_type == SOCK_STREAM) goto e_inval; if (optlen < 1) goto e_inval; if (val == -1) val = 1; if (val < 0 || val > 255) goto e_inval; inet->mc_ttl = val; break; case IP_MULTICAST_LOOP: if (optlen < 1) goto e_inval; inet->mc_loop = !!val; break; case IP_UNICAST_IF: { struct net_device *dev = NULL; int ifindex; if (optlen != sizeof(int)) goto e_inval; ifindex = (__force int)ntohl((__force __be32)val); if (ifindex == 0) { inet->uc_index = 0; err = 0; break; } dev = dev_get_by_index(sock_net(sk), ifindex); err = -EADDRNOTAVAIL; if (!dev) break; dev_put(dev); err = -EINVAL; if (sk->sk_bound_dev_if) break; inet->uc_index = ifindex; err = 0; break; } case IP_MULTICAST_IF: { struct ip_mreqn mreq; struct net_device *dev = NULL; int midx; if (sk->sk_type == SOCK_STREAM) goto e_inval; /* * Check the arguments are allowable */ if (optlen < sizeof(struct in_addr)) goto e_inval; err = -EFAULT; if (optlen >= sizeof(struct ip_mreqn)) { if (copy_from_user(&mreq, optval, sizeof(mreq))) break; } else { memset(&mreq, 0, sizeof(mreq)); if (optlen >= sizeof(struct ip_mreq)) { if (copy_from_user(&mreq, optval, sizeof(struct ip_mreq))) break; } else if (optlen >= sizeof(struct in_addr)) { if (copy_from_user(&mreq.imr_address, optval, sizeof(struct in_addr))) break; } } if (!mreq.imr_ifindex) { if (mreq.imr_address.s_addr == htonl(INADDR_ANY)) { inet->mc_index = 0; inet->mc_addr = 0; err = 0; break; } dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr); if (dev) mreq.imr_ifindex = dev->ifindex; } else dev = dev_get_by_index(sock_net(sk), mreq.imr_ifindex); err = -EADDRNOTAVAIL; if (!dev) break; midx = l3mdev_master_ifindex(dev); dev_put(dev); err = -EINVAL; if (sk->sk_bound_dev_if && mreq.imr_ifindex != sk->sk_bound_dev_if && (!midx || midx != sk->sk_bound_dev_if)) break; inet->mc_index = mreq.imr_ifindex; inet->mc_addr = mreq.imr_address.s_addr; err = 0; break; } case IP_ADD_MEMBERSHIP: case IP_DROP_MEMBERSHIP: { struct ip_mreqn mreq; err = -EPROTO; if (inet_sk(sk)->is_icsk) break; if (optlen < sizeof(struct ip_mreq)) goto e_inval; err = -EFAULT; if (optlen >= sizeof(struct ip_mreqn)) { if (copy_from_user(&mreq, optval, sizeof(mreq))) break; } else { memset(&mreq, 0, sizeof(mreq)); if (copy_from_user(&mreq, optval, sizeof(struct ip_mreq))) break; } if (optname == IP_ADD_MEMBERSHIP) err = ip_mc_join_group(sk, &mreq); else err = ip_mc_leave_group(sk, &mreq); break; } case IP_MSFILTER: { struct ip_msfilter *msf; if (optlen < IP_MSFILTER_SIZE(0)) goto e_inval; if (optlen > sysctl_optmem_max) { err = -ENOBUFS; break; } msf = kmalloc(optlen, GFP_KERNEL); if (!msf) { err = -ENOBUFS; break; } err = -EFAULT; if (copy_from_user(msf, optval, optlen)) { kfree(msf); break; } /* numsrc >= (1G-4) overflow in 32 bits */ if (msf->imsf_numsrc >= 0x3ffffffcU || msf->imsf_numsrc > net->ipv4.sysctl_igmp_max_msf) { kfree(msf); err = -ENOBUFS; break; } if (IP_MSFILTER_SIZE(msf->imsf_numsrc) > optlen) { kfree(msf); err = -EINVAL; break; } err = ip_mc_msfilter(sk, msf, 0); kfree(msf); break; } case IP_BLOCK_SOURCE: case IP_UNBLOCK_SOURCE: case IP_ADD_SOURCE_MEMBERSHIP: case IP_DROP_SOURCE_MEMBERSHIP: { struct ip_mreq_source mreqs; int omode, add; if (optlen != sizeof(struct ip_mreq_source)) goto e_inval; if (copy_from_user(&mreqs, optval, sizeof(mreqs))) { err = -EFAULT; break; } if (optname == IP_BLOCK_SOURCE) { omode = MCAST_EXCLUDE; add = 1; } else if (optname == IP_UNBLOCK_SOURCE) { omode = MCAST_EXCLUDE; add = 0; } else if (optname == IP_ADD_SOURCE_MEMBERSHIP) { struct ip_mreqn mreq; mreq.imr_multiaddr.s_addr = mreqs.imr_multiaddr; mreq.imr_address.s_addr = mreqs.imr_interface; mreq.imr_ifindex = 0; err = ip_mc_join_group(sk, &mreq); if (err && err != -EADDRINUSE) break; omode = MCAST_INCLUDE; add = 1; } else /* IP_DROP_SOURCE_MEMBERSHIP */ { omode = MCAST_INCLUDE; add = 0; } err = ip_mc_source(add, omode, sk, &mreqs, 0); break; } case MCAST_JOIN_GROUP: case MCAST_LEAVE_GROUP: { struct group_req greq; struct sockaddr_in *psin; struct ip_mreqn mreq; if (optlen < sizeof(struct group_req)) goto e_inval; err = -EFAULT; if (copy_from_user(&greq, optval, sizeof(greq))) break; psin = (struct sockaddr_in *)&greq.gr_group; if (psin->sin_family != AF_INET) goto e_inval; memset(&mreq, 0, sizeof(mreq)); mreq.imr_multiaddr = psin->sin_addr; mreq.imr_ifindex = greq.gr_interface; if (optname == MCAST_JOIN_GROUP) err = ip_mc_join_group(sk, &mreq); else err = ip_mc_leave_group(sk, &mreq); break; } case MCAST_JOIN_SOURCE_GROUP: case MCAST_LEAVE_SOURCE_GROUP: case MCAST_BLOCK_SOURCE: case MCAST_UNBLOCK_SOURCE: { struct group_source_req greqs; struct ip_mreq_source mreqs; struct sockaddr_in *psin; int omode, add; if (optlen != sizeof(struct group_source_req)) goto e_inval; if (copy_from_user(&greqs, optval, sizeof(greqs))) { err = -EFAULT; break; } if (greqs.gsr_group.ss_family != AF_INET || greqs.gsr_source.ss_family != AF_INET) { err = -EADDRNOTAVAIL; break; } psin = (struct sockaddr_in *)&greqs.gsr_group; mreqs.imr_multiaddr = psin->sin_addr.s_addr; psin = (struct sockaddr_in *)&greqs.gsr_source; mreqs.imr_sourceaddr = psin->sin_addr.s_addr; mreqs.imr_interface = 0; /* use index for mc_source */ if (optname == MCAST_BLOCK_SOURCE) { omode = MCAST_EXCLUDE; add = 1; } else if (optname == MCAST_UNBLOCK_SOURCE) { omode = MCAST_EXCLUDE; add = 0; } else if (optname == MCAST_JOIN_SOURCE_GROUP) { struct ip_mreqn mreq; psin = (struct sockaddr_in *)&greqs.gsr_group; mreq.imr_multiaddr = psin->sin_addr; mreq.imr_address.s_addr = 0; mreq.imr_ifindex = greqs.gsr_interface; err = ip_mc_join_group(sk, &mreq); if (err && err != -EADDRINUSE) break; greqs.gsr_interface = mreq.imr_ifindex; omode = MCAST_INCLUDE; add = 1; } else /* MCAST_LEAVE_SOURCE_GROUP */ { omode = MCAST_INCLUDE; add = 0; } err = ip_mc_source(add, omode, sk, &mreqs, greqs.gsr_interface); break; } case MCAST_MSFILTER: { struct sockaddr_in *psin; struct ip_msfilter *msf = NULL; struct group_filter *gsf = NULL; int msize, i, ifindex; if (optlen < GROUP_FILTER_SIZE(0)) goto e_inval; if (optlen > sysctl_optmem_max) { err = -ENOBUFS; break; } gsf = kmalloc(optlen, GFP_KERNEL); if (!gsf) { err = -ENOBUFS; break; } err = -EFAULT; if (copy_from_user(gsf, optval, optlen)) goto mc_msf_out; /* numsrc >= (4G-140)/128 overflow in 32 bits */ if (gsf->gf_numsrc >= 0x1ffffff || gsf->gf_numsrc > net->ipv4.sysctl_igmp_max_msf) { err = -ENOBUFS; goto mc_msf_out; } if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) { err = -EINVAL; goto mc_msf_out; } msize = IP_MSFILTER_SIZE(gsf->gf_numsrc); msf = kmalloc(msize, GFP_KERNEL); if (!msf) { err = -ENOBUFS; goto mc_msf_out; } ifindex = gsf->gf_interface; psin = (struct sockaddr_in *)&gsf->gf_group; if (psin->sin_family != AF_INET) { err = -EADDRNOTAVAIL; goto mc_msf_out; } msf->imsf_multiaddr = psin->sin_addr.s_addr; msf->imsf_interface = 0; msf->imsf_fmode = gsf->gf_fmode; msf->imsf_numsrc = gsf->gf_numsrc; err = -EADDRNOTAVAIL; for (i = 0; i < gsf->gf_numsrc; ++i) { psin = (struct sockaddr_in *)&gsf->gf_slist[i]; if (psin->sin_family != AF_INET) goto mc_msf_out; msf->imsf_slist[i] = psin->sin_addr.s_addr; } kfree(gsf); gsf = NULL; err = ip_mc_msfilter(sk, msf, ifindex); mc_msf_out: kfree(msf); kfree(gsf); break; } case IP_MULTICAST_ALL: if (optlen < 1) goto e_inval; if (val != 0 && val != 1) goto e_inval; inet->mc_all = val; break; case IP_ROUTER_ALERT: err = ip_ra_control(sk, val ? 1 : 0, NULL); break; case IP_FREEBIND: if (optlen < 1) goto e_inval; inet->freebind = !!val; break; case IP_IPSEC_POLICY: case IP_XFRM_POLICY: err = -EPERM; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) break; err = xfrm_user_policy(sk, optname, optval, optlen); break; case IP_TRANSPARENT: if (!!val && !ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) && !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { err = -EPERM; break; } if (optlen < 1) goto e_inval; inet->transparent = !!val; break; case IP_MINTTL: if (optlen < 1) goto e_inval; if (val < 0 || val > 255) goto e_inval; inet->min_ttl = val; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); if (needs_rtnl) rtnl_unlock(); return err; e_inval: release_sock(sk); if (needs_rtnl) rtnl_unlock(); return -EINVAL; } /** * ipv4_pktinfo_prepare - transfer some info from rtable to skb * @sk: socket * @skb: buffer * * To support IP_CMSG_PKTINFO option, we store rt_iif and specific * destination in skb->cb[] before dst drop. * This way, receiver doesn't make cache line misses to read rtable. */ void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb) { struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb); bool prepare = (inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO) || ipv6_sk_rxinfo(sk); if (prepare && skb_rtable(skb)) { /* skb->cb is overloaded: prior to this point it is IP{6}CB * which has interface index (iif) as the first member of the * underlying inet{6}_skb_parm struct. This code then overlays * PKTINFO_SKB_CB and in_pktinfo also has iif as the first * element so the iif is picked up from the prior IPCB. If iif * is the loopback interface, then return the sending interface * (e.g., process binds socket to eth0 for Tx which is * redirected to loopback in the rtable/dst). */ if (pktinfo->ipi_ifindex == LOOPBACK_IFINDEX) pktinfo->ipi_ifindex = inet_iif(skb); pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb); } else { pktinfo->ipi_ifindex = 0; pktinfo->ipi_spec_dst.s_addr = 0; } /* We need to keep the dst for __ip_options_echo() * We could restrict the test to opt.ts_needtime || opt.srr, * but the following is good enough as IP options are not often used. */ if (unlikely(IPCB(skb)->opt.optlen)) skb_dst_force(skb); else skb_dst_drop(skb); } int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { int err; if (level != SOL_IP) return -ENOPROTOOPT; err = do_ip_setsockopt(sk, level, optname, optval, optlen); #ifdef CONFIG_NETFILTER /* we need to exclude all possible ENOPROTOOPTs except default case */ if (err == -ENOPROTOOPT && optname != IP_HDRINCL && optname != IP_IPSEC_POLICY && optname != IP_XFRM_POLICY && !ip_mroute_opt(optname)) { lock_sock(sk); err = nf_setsockopt(sk, PF_INET, optname, optval, optlen); release_sock(sk); } #endif return err; } EXPORT_SYMBOL(ip_setsockopt); #ifdef CONFIG_COMPAT int compat_ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { int err; if (level != SOL_IP) return -ENOPROTOOPT; if (optname >= MCAST_JOIN_GROUP && optname <= MCAST_MSFILTER) return compat_mc_setsockopt(sk, level, optname, optval, optlen, ip_setsockopt); err = do_ip_setsockopt(sk, level, optname, optval, optlen); #ifdef CONFIG_NETFILTER /* we need to exclude all possible ENOPROTOOPTs except default case */ if (err == -ENOPROTOOPT && optname != IP_HDRINCL && optname != IP_IPSEC_POLICY && optname != IP_XFRM_POLICY && !ip_mroute_opt(optname)) { lock_sock(sk); err = compat_nf_setsockopt(sk, PF_INET, optname, optval, optlen); release_sock(sk); } #endif return err; } EXPORT_SYMBOL(compat_ip_setsockopt); #endif /* * Get the options. Note for future reference. The GET of IP options gets * the _received_ ones. The set sets the _sent_ ones. */ static bool getsockopt_needs_rtnl(int optname) { switch (optname) { case IP_MSFILTER: case MCAST_MSFILTER: return true; } return false; } static int do_ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen, unsigned int flags) { struct inet_sock *inet = inet_sk(sk); bool needs_rtnl = getsockopt_needs_rtnl(optname); int val, err = 0; int len; if (level != SOL_IP) return -EOPNOTSUPP; if (ip_mroute_opt(optname)) return ip_mroute_getsockopt(sk, optname, optval, optlen); if (get_user(len, optlen)) return -EFAULT; if (len < 0) return -EINVAL; if (needs_rtnl) rtnl_lock(); lock_sock(sk); switch (optname) { case IP_OPTIONS: { unsigned char optbuf[sizeof(struct ip_options)+40]; struct ip_options *opt = (struct ip_options *)optbuf; struct ip_options_rcu *inet_opt; inet_opt = rcu_dereference_protected(inet->inet_opt, lockdep_sock_is_held(sk)); opt->optlen = 0; if (inet_opt) memcpy(optbuf, &inet_opt->opt, sizeof(struct ip_options) + inet_opt->opt.optlen); release_sock(sk); if (opt->optlen == 0) return put_user(0, optlen); ip_options_undo(opt); len = min_t(unsigned int, len, opt->optlen); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, opt->__data, len)) return -EFAULT; return 0; } case IP_PKTINFO: val = (inet->cmsg_flags & IP_CMSG_PKTINFO) != 0; break; case IP_RECVTTL: val = (inet->cmsg_flags & IP_CMSG_TTL) != 0; break; case IP_RECVTOS: val = (inet->cmsg_flags & IP_CMSG_TOS) != 0; break; case IP_RECVOPTS: val = (inet->cmsg_flags & IP_CMSG_RECVOPTS) != 0; break; case IP_RETOPTS: val = (inet->cmsg_flags & IP_CMSG_RETOPTS) != 0; break; case IP_PASSSEC: val = (inet->cmsg_flags & IP_CMSG_PASSSEC) != 0; break; case IP_RECVORIGDSTADDR: val = (inet->cmsg_flags & IP_CMSG_ORIGDSTADDR) != 0; break; case IP_CHECKSUM: val = (inet->cmsg_flags & IP_CMSG_CHECKSUM) != 0; break; case IP_RECVFRAGSIZE: val = (inet->cmsg_flags & IP_CMSG_RECVFRAGSIZE) != 0; break; case IP_TOS: val = inet->tos; break; case IP_TTL: { struct net *net = sock_net(sk); val = (inet->uc_ttl == -1 ? net->ipv4.sysctl_ip_default_ttl : inet->uc_ttl); break; } case IP_HDRINCL: val = inet->hdrincl; break; case IP_NODEFRAG: val = inet->nodefrag; break; case IP_BIND_ADDRESS_NO_PORT: val = inet->bind_address_no_port; break; case IP_MTU_DISCOVER: val = inet->pmtudisc; break; case IP_MTU: { struct dst_entry *dst; val = 0; dst = sk_dst_get(sk); if (dst) { val = dst_mtu(dst); dst_release(dst); } if (!val) { release_sock(sk); return -ENOTCONN; } break; } case IP_RECVERR: val = inet->recverr; break; case IP_MULTICAST_TTL: val = inet->mc_ttl; break; case IP_MULTICAST_LOOP: val = inet->mc_loop; break; case IP_UNICAST_IF: val = (__force int)htonl((__u32) inet->uc_index); break; case IP_MULTICAST_IF: { struct in_addr addr; len = min_t(unsigned int, len, sizeof(struct in_addr)); addr.s_addr = inet->mc_addr; release_sock(sk); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &addr, len)) return -EFAULT; return 0; } case IP_MSFILTER: { struct ip_msfilter msf; if (len < IP_MSFILTER_SIZE(0)) { err = -EINVAL; goto out; } if (copy_from_user(&msf, optval, IP_MSFILTER_SIZE(0))) { err = -EFAULT; goto out; } err = ip_mc_msfget(sk, &msf, (struct ip_msfilter __user *)optval, optlen); goto out; } case MCAST_MSFILTER: { struct group_filter gsf; if (len < GROUP_FILTER_SIZE(0)) { err = -EINVAL; goto out; } if (copy_from_user(&gsf, optval, GROUP_FILTER_SIZE(0))) { err = -EFAULT; goto out; } err = ip_mc_gsfget(sk, &gsf, (struct group_filter __user *)optval, optlen); goto out; } case IP_MULTICAST_ALL: val = inet->mc_all; break; case IP_PKTOPTIONS: { struct msghdr msg; release_sock(sk); if (sk->sk_type != SOCK_STREAM) return -ENOPROTOOPT; msg.msg_control = (__force void *) optval; msg.msg_controllen = len; msg.msg_flags = flags; if (inet->cmsg_flags & IP_CMSG_PKTINFO) { struct in_pktinfo info; info.ipi_addr.s_addr = inet->inet_rcv_saddr; info.ipi_spec_dst.s_addr = inet->inet_rcv_saddr; info.ipi_ifindex = inet->mc_index; put_cmsg(&msg, SOL_IP, IP_PKTINFO, sizeof(info), &info); } if (inet->cmsg_flags & IP_CMSG_TTL) { int hlim = inet->mc_ttl; put_cmsg(&msg, SOL_IP, IP_TTL, sizeof(hlim), &hlim); } if (inet->cmsg_flags & IP_CMSG_TOS) { int tos = inet->rcv_tos; put_cmsg(&msg, SOL_IP, IP_TOS, sizeof(tos), &tos); } len -= msg.msg_controllen; return put_user(len, optlen); } case IP_FREEBIND: val = inet->freebind; break; case IP_TRANSPARENT: val = inet->transparent; break; case IP_MINTTL: val = inet->min_ttl; break; default: release_sock(sk); return -ENOPROTOOPT; } release_sock(sk); if (len < sizeof(int) && len > 0 && val >= 0 && val <= 255) { unsigned char ucval = (unsigned char)val; len = 1; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &ucval, 1)) return -EFAULT; } else { len = min_t(unsigned int, sizeof(int), len); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; } return 0; out: release_sock(sk); if (needs_rtnl) rtnl_unlock(); return err; } int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { int err; err = do_ip_getsockopt(sk, level, optname, optval, optlen, 0); #ifdef CONFIG_NETFILTER /* we need to exclude all possible ENOPROTOOPTs except default case */ if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS && !ip_mroute_opt(optname)) { int len; if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); err = nf_getsockopt(sk, PF_INET, optname, optval, &len); release_sock(sk); if (err >= 0) err = put_user(len, optlen); return err; } #endif return err; } EXPORT_SYMBOL(ip_getsockopt); #ifdef CONFIG_COMPAT int compat_ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { int err; if (optname == MCAST_MSFILTER) return compat_mc_getsockopt(sk, level, optname, optval, optlen, ip_getsockopt); err = do_ip_getsockopt(sk, level, optname, optval, optlen, MSG_CMSG_COMPAT); #ifdef CONFIG_NETFILTER /* we need to exclude all possible ENOPROTOOPTs except default case */ if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS && !ip_mroute_opt(optname)) { int len; if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); err = compat_nf_getsockopt(sk, PF_INET, optname, optval, &len); release_sock(sk); if (err >= 0) err = put_user(len, optlen); return err; } #endif return err; } EXPORT_SYMBOL(compat_ip_getsockopt); #endif
static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb, int tlen, int offset) { __wsum csum = skb->csum; if (skb->ip_summed != CHECKSUM_COMPLETE) return; if (offset != 0) csum = csum_sub(csum, csum_partial(skb_transport_header(skb) + tlen, offset, 0)); put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum); }
static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb, int tlen, int offset) { __wsum csum = skb->csum; if (skb->ip_summed != CHECKSUM_COMPLETE) return; if (offset != 0) { int tend_off = skb_transport_offset(skb) + tlen; csum = csum_sub(csum, skb_checksum(skb, tend_off, offset, 0)); } put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum); }
{'added': [(119, '\tif (offset != 0) {'), (120, '\t\tint tend_off = skb_transport_offset(skb) + tlen;'), (121, '\t\tcsum = csum_sub(csum, skb_checksum(skb, tend_off, offset, 0));'), (122, '\t}')], 'deleted': [(119, '\tif (offset != 0)'), (120, '\t\tcsum = csum_sub(csum,'), (121, '\t\t\t\tcsum_partial(skb_transport_header(skb) + tlen,'), (122, '\t\t\t\t\t offset, 0));')]}
4
4
1,333
7,618
https://github.com/torvalds/linux
CVE-2017-6347
['CWE-125']
url.c
Curl_close
/*************************************************************************** * _ _ ____ _ * Project ___| | | | _ \| | * / __| | | | |_) | | * | (__| |_| | _ <| |___ * \___|\___/|_| \_\_____| * * Copyright (C) 1998 - 2018, Daniel Stenberg, <daniel@haxx.se>, et al. * * This software is licensed as described in the file COPYING, which * you should have received as part of this distribution. The terms * are also available at https://curl.haxx.se/docs/copyright.html. * * You may opt to use, copy, modify, merge, publish, distribute and/or sell * copies of the Software, and permit persons to whom the Software is * furnished to do so, under the terms of the COPYING file. * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY * KIND, either express or implied. * ***************************************************************************/ #include "curl_setup.h" #ifdef HAVE_NETINET_IN_H #include <netinet/in.h> #endif #ifdef HAVE_NETDB_H #include <netdb.h> #endif #ifdef HAVE_ARPA_INET_H #include <arpa/inet.h> #endif #ifdef HAVE_NET_IF_H #include <net/if.h> #endif #ifdef HAVE_SYS_IOCTL_H #include <sys/ioctl.h> #endif #ifdef HAVE_SYS_PARAM_H #include <sys/param.h> #endif #ifdef __VMS #include <in.h> #include <inet.h> #endif #ifdef HAVE_SYS_UN_H #include <sys/un.h> #endif #ifndef HAVE_SOCKET #error "We can't compile without socket() support!" #endif #include <limits.h> #ifdef USE_LIBIDN2 #include <idn2.h> #elif defined(USE_WIN32_IDN) /* prototype for curl_win32_idn_to_ascii() */ bool curl_win32_idn_to_ascii(const char *in, char **out); #endif /* USE_LIBIDN2 */ #include "urldata.h" #include "netrc.h" #include "formdata.h" #include "mime.h" #include "vtls/vtls.h" #include "hostip.h" #include "transfer.h" #include "sendf.h" #include "progress.h" #include "cookie.h" #include "strcase.h" #include "strerror.h" #include "escape.h" #include "strtok.h" #include "share.h" #include "content_encoding.h" #include "http_digest.h" #include "http_negotiate.h" #include "select.h" #include "multiif.h" #include "easyif.h" #include "speedcheck.h" #include "warnless.h" #include "non-ascii.h" #include "inet_pton.h" #include "getinfo.h" #include "urlapi-int.h" /* And now for the protocols */ #include "ftp.h" #include "dict.h" #include "telnet.h" #include "tftp.h" #include "http.h" #include "http2.h" #include "file.h" #include "curl_ldap.h" #include "ssh.h" #include "imap.h" #include "url.h" #include "connect.h" #include "inet_ntop.h" #include "http_ntlm.h" #include "curl_ntlm_wb.h" #include "socks.h" #include "curl_rtmp.h" #include "gopher.h" #include "http_proxy.h" #include "conncache.h" #include "multihandle.h" #include "pipeline.h" #include "dotdot.h" #include "strdup.h" #include "setopt.h" /* The last 3 #include files should be in this order */ #include "curl_printf.h" #include "curl_memory.h" #include "memdebug.h" static void conn_free(struct connectdata *conn); static void free_fixed_hostname(struct hostname *host); static unsigned int get_protocol_family(unsigned int protocol); /* Some parts of the code (e.g. chunked encoding) assume this buffer has at * more than just a few bytes to play with. Don't let it become too small or * bad things will happen. */ #if READBUFFER_SIZE < READBUFFER_MIN # error READBUFFER_SIZE is too small #endif /* * Protocol table. */ static const struct Curl_handler * const protocols[] = { #ifndef CURL_DISABLE_HTTP &Curl_handler_http, #endif #if defined(USE_SSL) && !defined(CURL_DISABLE_HTTP) &Curl_handler_https, #endif #ifndef CURL_DISABLE_FTP &Curl_handler_ftp, #endif #if defined(USE_SSL) && !defined(CURL_DISABLE_FTP) &Curl_handler_ftps, #endif #ifndef CURL_DISABLE_TELNET &Curl_handler_telnet, #endif #ifndef CURL_DISABLE_DICT &Curl_handler_dict, #endif #ifndef CURL_DISABLE_LDAP &Curl_handler_ldap, #if !defined(CURL_DISABLE_LDAPS) && \ ((defined(USE_OPENLDAP) && defined(USE_SSL)) || \ (!defined(USE_OPENLDAP) && defined(HAVE_LDAP_SSL))) &Curl_handler_ldaps, #endif #endif #ifndef CURL_DISABLE_FILE &Curl_handler_file, #endif #ifndef CURL_DISABLE_TFTP &Curl_handler_tftp, #endif #if defined(USE_LIBSSH2) || defined(USE_LIBSSH) &Curl_handler_scp, #endif #if defined(USE_LIBSSH2) || defined(USE_LIBSSH) &Curl_handler_sftp, #endif #ifndef CURL_DISABLE_IMAP &Curl_handler_imap, #ifdef USE_SSL &Curl_handler_imaps, #endif #endif #ifndef CURL_DISABLE_POP3 &Curl_handler_pop3, #ifdef USE_SSL &Curl_handler_pop3s, #endif #endif #if !defined(CURL_DISABLE_SMB) && defined(USE_NTLM) && \ (CURL_SIZEOF_CURL_OFF_T > 4) && \ (!defined(USE_WINDOWS_SSPI) || defined(USE_WIN32_CRYPTO)) &Curl_handler_smb, #ifdef USE_SSL &Curl_handler_smbs, #endif #endif #ifndef CURL_DISABLE_SMTP &Curl_handler_smtp, #ifdef USE_SSL &Curl_handler_smtps, #endif #endif #ifndef CURL_DISABLE_RTSP &Curl_handler_rtsp, #endif #ifndef CURL_DISABLE_GOPHER &Curl_handler_gopher, #endif #ifdef USE_LIBRTMP &Curl_handler_rtmp, &Curl_handler_rtmpt, &Curl_handler_rtmpe, &Curl_handler_rtmpte, &Curl_handler_rtmps, &Curl_handler_rtmpts, #endif (struct Curl_handler *) NULL }; /* * Dummy handler for undefined protocol schemes. */ static const struct Curl_handler Curl_handler_dummy = { "<no protocol>", /* scheme */ ZERO_NULL, /* setup_connection */ ZERO_NULL, /* do_it */ ZERO_NULL, /* done */ ZERO_NULL, /* do_more */ ZERO_NULL, /* connect_it */ ZERO_NULL, /* connecting */ ZERO_NULL, /* doing */ ZERO_NULL, /* proto_getsock */ ZERO_NULL, /* doing_getsock */ ZERO_NULL, /* domore_getsock */ ZERO_NULL, /* perform_getsock */ ZERO_NULL, /* disconnect */ ZERO_NULL, /* readwrite */ ZERO_NULL, /* connection_check */ 0, /* defport */ 0, /* protocol */ PROTOPT_NONE /* flags */ }; void Curl_freeset(struct Curl_easy *data) { /* Free all dynamic strings stored in the data->set substructure. */ enum dupstring i; for(i = (enum dupstring)0; i < STRING_LAST; i++) { Curl_safefree(data->set.str[i]); } if(data->change.referer_alloc) { Curl_safefree(data->change.referer); data->change.referer_alloc = FALSE; } data->change.referer = NULL; if(data->change.url_alloc) { Curl_safefree(data->change.url); data->change.url_alloc = FALSE; } data->change.url = NULL; Curl_mime_cleanpart(&data->set.mimepost); } /* free the URL pieces */ void Curl_up_free(struct Curl_easy *data) { struct urlpieces *up = &data->state.up; Curl_safefree(up->scheme); Curl_safefree(up->hostname); Curl_safefree(up->port); Curl_safefree(up->user); Curl_safefree(up->password); Curl_safefree(up->options); Curl_safefree(up->path); Curl_safefree(up->query); curl_url_cleanup(data->state.uh); data->state.uh = NULL; } /* * This is the internal function curl_easy_cleanup() calls. This should * cleanup and free all resources associated with this sessionhandle. * * NOTE: if we ever add something that attempts to write to a socket or * similar here, we must ignore SIGPIPE first. It is currently only done * when curl_easy_perform() is invoked. */ CURLcode Curl_close(struct Curl_easy *data) { struct Curl_multi *m; if(!data) return CURLE_OK; Curl_expire_clear(data); /* shut off timers */ m = data->multi; if(m) /* This handle is still part of a multi handle, take care of this first and detach this handle from there. */ curl_multi_remove_handle(data->multi, data); if(data->multi_easy) /* when curl_easy_perform() is used, it creates its own multi handle to use and this is the one */ curl_multi_cleanup(data->multi_easy); /* Destroy the timeout list that is held in the easy handle. It is /normally/ done by curl_multi_remove_handle() but this is "just in case" */ Curl_llist_destroy(&data->state.timeoutlist, NULL); data->magic = 0; /* force a clear AFTER the possibly enforced removal from the multi handle, since that function uses the magic field! */ if(data->state.rangestringalloc) free(data->state.range); /* freed here just in case DONE wasn't called */ Curl_free_request_state(data); /* Close down all open SSL info and sessions */ Curl_ssl_close_all(data); Curl_safefree(data->state.first_host); Curl_safefree(data->state.scratch); Curl_ssl_free_certinfo(data); /* Cleanup possible redirect junk */ free(data->req.newurl); data->req.newurl = NULL; if(data->change.referer_alloc) { Curl_safefree(data->change.referer); data->change.referer_alloc = FALSE; } data->change.referer = NULL; Curl_up_free(data); Curl_safefree(data->state.buffer); Curl_safefree(data->state.headerbuff); Curl_safefree(data->state.ulbuf); Curl_flush_cookies(data, 1); Curl_digest_cleanup(data); Curl_safefree(data->info.contenttype); Curl_safefree(data->info.wouldredirect); /* this destroys the channel and we cannot use it anymore after this */ Curl_resolver_cleanup(data->state.resolver); Curl_http2_cleanup_dependencies(data); Curl_convert_close(data); /* No longer a dirty share, if it exists */ if(data->share) { Curl_share_lock(data, CURL_LOCK_DATA_SHARE, CURL_LOCK_ACCESS_SINGLE); data->share->dirty--; Curl_share_unlock(data, CURL_LOCK_DATA_SHARE); } /* destruct wildcard structures if it is needed */ Curl_wildcard_dtor(&data->wildcard); Curl_freeset(data); free(data); return CURLE_OK; } /* * Initialize the UserDefined fields within a Curl_easy. * This may be safely called on a new or existing Curl_easy. */ CURLcode Curl_init_userdefined(struct Curl_easy *data) { struct UserDefined *set = &data->set; CURLcode result = CURLE_OK; set->out = stdout; /* default output to stdout */ set->in_set = stdin; /* default input from stdin */ set->err = stderr; /* default stderr to stderr */ /* use fwrite as default function to store output */ set->fwrite_func = (curl_write_callback)fwrite; /* use fread as default function to read input */ set->fread_func_set = (curl_read_callback)fread; set->is_fread_set = 0; set->is_fwrite_set = 0; set->seek_func = ZERO_NULL; set->seek_client = ZERO_NULL; /* conversion callbacks for non-ASCII hosts */ set->convfromnetwork = ZERO_NULL; set->convtonetwork = ZERO_NULL; set->convfromutf8 = ZERO_NULL; set->filesize = -1; /* we don't know the size */ set->postfieldsize = -1; /* unknown size */ set->maxredirs = -1; /* allow any amount by default */ set->httpreq = HTTPREQ_GET; /* Default HTTP request */ set->rtspreq = RTSPREQ_OPTIONS; /* Default RTSP request */ set->ftp_use_epsv = TRUE; /* FTP defaults to EPSV operations */ set->ftp_use_eprt = TRUE; /* FTP defaults to EPRT operations */ set->ftp_use_pret = FALSE; /* mainly useful for drftpd servers */ set->ftp_filemethod = FTPFILE_MULTICWD; set->dns_cache_timeout = 60; /* Timeout every 60 seconds by default */ /* Set the default size of the SSL session ID cache */ set->general_ssl.max_ssl_sessions = 5; set->proxyport = 0; set->proxytype = CURLPROXY_HTTP; /* defaults to HTTP proxy */ set->httpauth = CURLAUTH_BASIC; /* defaults to basic */ set->proxyauth = CURLAUTH_BASIC; /* defaults to basic */ /* SOCKS5 proxy auth defaults to username/password + GSS-API */ set->socks5auth = CURLAUTH_BASIC | CURLAUTH_GSSAPI; /* make libcurl quiet by default: */ set->hide_progress = TRUE; /* CURLOPT_NOPROGRESS changes these */ Curl_mime_initpart(&set->mimepost, data); /* * libcurl 7.10 introduced SSL verification *by default*! This needs to be * switched off unless wanted. */ set->ssl.primary.verifypeer = TRUE; set->ssl.primary.verifyhost = TRUE; #ifdef USE_TLS_SRP set->ssl.authtype = CURL_TLSAUTH_NONE; #endif set->ssh_auth_types = CURLSSH_AUTH_DEFAULT; /* defaults to any auth type */ set->ssl.primary.sessionid = TRUE; /* session ID caching enabled by default */ set->proxy_ssl = set->ssl; set->new_file_perms = 0644; /* Default permissions */ set->new_directory_perms = 0755; /* Default permissions */ /* for the *protocols fields we don't use the CURLPROTO_ALL convenience define since we internally only use the lower 16 bits for the passed in bitmask to not conflict with the private bits */ set->allowed_protocols = CURLPROTO_ALL; set->redir_protocols = CURLPROTO_ALL & /* All except FILE, SCP and SMB */ ~(CURLPROTO_FILE | CURLPROTO_SCP | CURLPROTO_SMB | CURLPROTO_SMBS); #if defined(HAVE_GSSAPI) || defined(USE_WINDOWS_SSPI) /* * disallow unprotected protection negotiation NEC reference implementation * seem not to follow rfc1961 section 4.3/4.4 */ set->socks5_gssapi_nec = FALSE; #endif /* Set the default CA cert bundle/path detected/specified at build time. * * If Schannel (WinSSL) is the selected SSL backend then these locations * are ignored. We allow setting CA location for schannel only when * explicitly specified by the user via CURLOPT_CAINFO / --cacert. */ if(Curl_ssl_backend() != CURLSSLBACKEND_SCHANNEL) { #if defined(CURL_CA_BUNDLE) result = Curl_setstropt(&set->str[STRING_SSL_CAFILE_ORIG], CURL_CA_BUNDLE); if(result) return result; result = Curl_setstropt(&set->str[STRING_SSL_CAFILE_PROXY], CURL_CA_BUNDLE); if(result) return result; #endif #if defined(CURL_CA_PATH) result = Curl_setstropt(&set->str[STRING_SSL_CAPATH_ORIG], CURL_CA_PATH); if(result) return result; result = Curl_setstropt(&set->str[STRING_SSL_CAPATH_PROXY], CURL_CA_PATH); if(result) return result; #endif } set->wildcard_enabled = FALSE; set->chunk_bgn = ZERO_NULL; set->chunk_end = ZERO_NULL; set->tcp_keepalive = FALSE; set->tcp_keepintvl = 60; set->tcp_keepidle = 60; set->tcp_fastopen = FALSE; set->tcp_nodelay = TRUE; set->ssl_enable_npn = TRUE; set->ssl_enable_alpn = TRUE; set->expect_100_timeout = 1000L; /* Wait for a second by default. */ set->sep_headers = TRUE; /* separated header lists by default */ set->buffer_size = READBUFFER_SIZE; set->upload_buffer_size = UPLOADBUFFER_DEFAULT; set->happy_eyeballs_timeout = CURL_HET_DEFAULT; set->fnmatch = ZERO_NULL; set->upkeep_interval_ms = CURL_UPKEEP_INTERVAL_DEFAULT; set->maxconnects = DEFAULT_CONNCACHE_SIZE; /* for easy handles */ set->httpversion = #ifdef USE_NGHTTP2 CURL_HTTP_VERSION_2TLS #else CURL_HTTP_VERSION_1_1 #endif ; Curl_http2_init_userset(set); return result; } /** * Curl_open() * * @param curl is a pointer to a sessionhandle pointer that gets set by this * function. * @return CURLcode */ CURLcode Curl_open(struct Curl_easy **curl) { CURLcode result; struct Curl_easy *data; /* Very simple start-up: alloc the struct, init it with zeroes and return */ data = calloc(1, sizeof(struct Curl_easy)); if(!data) { /* this is a very serious error */ DEBUGF(fprintf(stderr, "Error: calloc of Curl_easy failed\n")); return CURLE_OUT_OF_MEMORY; } data->magic = CURLEASY_MAGIC_NUMBER; result = Curl_resolver_init(&data->state.resolver); if(result) { DEBUGF(fprintf(stderr, "Error: resolver_init failed\n")); free(data); return result; } /* We do some initial setup here, all those fields that can't be just 0 */ data->state.buffer = malloc(READBUFFER_SIZE + 1); if(!data->state.buffer) { DEBUGF(fprintf(stderr, "Error: malloc of buffer failed\n")); result = CURLE_OUT_OF_MEMORY; } else { data->state.headerbuff = malloc(HEADERSIZE); if(!data->state.headerbuff) { DEBUGF(fprintf(stderr, "Error: malloc of headerbuff failed\n")); result = CURLE_OUT_OF_MEMORY; } else { result = Curl_init_userdefined(data); data->state.headersize = HEADERSIZE; Curl_convert_init(data); Curl_initinfo(data); /* most recent connection is not yet defined */ data->state.lastconnect = NULL; data->progress.flags |= PGRS_HIDE; data->state.current_speed = -1; /* init to negative == impossible */ Curl_http2_init_state(&data->state); } } if(result) { Curl_resolver_cleanup(data->state.resolver); free(data->state.buffer); free(data->state.headerbuff); Curl_freeset(data); free(data); data = NULL; } else *curl = data; return result; } #ifdef USE_RECV_BEFORE_SEND_WORKAROUND static void conn_reset_postponed_data(struct connectdata *conn, int num) { struct postponed_data * const psnd = &(conn->postponed[num]); if(psnd->buffer) { DEBUGASSERT(psnd->allocated_size > 0); DEBUGASSERT(psnd->recv_size <= psnd->allocated_size); DEBUGASSERT(psnd->recv_size ? (psnd->recv_processed < psnd->recv_size) : (psnd->recv_processed == 0)); DEBUGASSERT(psnd->bindsock != CURL_SOCKET_BAD); free(psnd->buffer); psnd->buffer = NULL; psnd->allocated_size = 0; psnd->recv_size = 0; psnd->recv_processed = 0; #ifdef DEBUGBUILD psnd->bindsock = CURL_SOCKET_BAD; /* used only for DEBUGASSERT */ #endif /* DEBUGBUILD */ } else { DEBUGASSERT(psnd->allocated_size == 0); DEBUGASSERT(psnd->recv_size == 0); DEBUGASSERT(psnd->recv_processed == 0); DEBUGASSERT(psnd->bindsock == CURL_SOCKET_BAD); } } static void conn_reset_all_postponed_data(struct connectdata *conn) { conn_reset_postponed_data(conn, 0); conn_reset_postponed_data(conn, 1); } #else /* ! USE_RECV_BEFORE_SEND_WORKAROUND */ /* Use "do-nothing" macro instead of function when workaround not used */ #define conn_reset_all_postponed_data(c) do {} WHILE_FALSE #endif /* ! USE_RECV_BEFORE_SEND_WORKAROUND */ static void conn_free(struct connectdata *conn) { if(!conn) return; /* possible left-overs from the async name resolvers */ Curl_resolver_cancel(conn); /* close the SSL stuff before we close any sockets since they will/may write to the sockets */ Curl_ssl_close(conn, FIRSTSOCKET); Curl_ssl_close(conn, SECONDARYSOCKET); /* close possibly still open sockets */ if(CURL_SOCKET_BAD != conn->sock[SECONDARYSOCKET]) Curl_closesocket(conn, conn->sock[SECONDARYSOCKET]); if(CURL_SOCKET_BAD != conn->sock[FIRSTSOCKET]) Curl_closesocket(conn, conn->sock[FIRSTSOCKET]); if(CURL_SOCKET_BAD != conn->tempsock[0]) Curl_closesocket(conn, conn->tempsock[0]); if(CURL_SOCKET_BAD != conn->tempsock[1]) Curl_closesocket(conn, conn->tempsock[1]); #if !defined(CURL_DISABLE_HTTP) && defined(USE_NTLM) && \ defined(NTLM_WB_ENABLED) Curl_ntlm_wb_cleanup(conn); #endif Curl_safefree(conn->user); Curl_safefree(conn->passwd); Curl_safefree(conn->oauth_bearer); Curl_safefree(conn->options); Curl_safefree(conn->http_proxy.user); Curl_safefree(conn->socks_proxy.user); Curl_safefree(conn->http_proxy.passwd); Curl_safefree(conn->socks_proxy.passwd); Curl_safefree(conn->allocptr.proxyuserpwd); Curl_safefree(conn->allocptr.uagent); Curl_safefree(conn->allocptr.userpwd); Curl_safefree(conn->allocptr.accept_encoding); Curl_safefree(conn->allocptr.te); Curl_safefree(conn->allocptr.rangeline); Curl_safefree(conn->allocptr.ref); Curl_safefree(conn->allocptr.host); Curl_safefree(conn->allocptr.cookiehost); Curl_safefree(conn->allocptr.rtsp_transport); Curl_safefree(conn->trailer); Curl_safefree(conn->host.rawalloc); /* host name buffer */ Curl_safefree(conn->conn_to_host.rawalloc); /* host name buffer */ Curl_safefree(conn->secondaryhostname); Curl_safefree(conn->http_proxy.host.rawalloc); /* http proxy name buffer */ Curl_safefree(conn->socks_proxy.host.rawalloc); /* socks proxy name buffer */ Curl_safefree(conn->master_buffer); Curl_safefree(conn->connect_state); conn_reset_all_postponed_data(conn); Curl_llist_destroy(&conn->send_pipe, NULL); Curl_llist_destroy(&conn->recv_pipe, NULL); Curl_safefree(conn->localdev); Curl_free_primary_ssl_config(&conn->ssl_config); Curl_free_primary_ssl_config(&conn->proxy_ssl_config); #ifdef USE_UNIX_SOCKETS Curl_safefree(conn->unix_domain_socket); #endif #ifdef USE_SSL Curl_safefree(conn->ssl_extra); #endif free(conn); /* free all the connection oriented data */ } /* * Disconnects the given connection. Note the connection may not be the * primary connection, like when freeing room in the connection cache or * killing of a dead old connection. * * A connection needs an easy handle when closing down. We support this passed * in separately since the connection to get closed here is often already * disassociated from an easy handle. * * This function MUST NOT reset state in the Curl_easy struct if that * isn't strictly bound to the life-time of *this* particular connection. * */ CURLcode Curl_disconnect(struct Curl_easy *data, struct connectdata *conn, bool dead_connection) { if(!conn) return CURLE_OK; /* this is closed and fine already */ if(!data) { DEBUGF(infof(data, "DISCONNECT without easy handle, ignoring\n")); return CURLE_OK; } /* * If this connection isn't marked to force-close, leave it open if there * are other users of it */ if(CONN_INUSE(conn) && !dead_connection) { DEBUGF(infof(data, "Curl_disconnect when inuse: %zu\n", CONN_INUSE(conn))); return CURLE_OK; } conn->data = data; if(conn->dns_entry != NULL) { Curl_resolv_unlock(data, conn->dns_entry); conn->dns_entry = NULL; } Curl_hostcache_prune(data); /* kill old DNS cache entries */ #if !defined(CURL_DISABLE_HTTP) && defined(USE_NTLM) /* Cleanup NTLM connection-related data */ Curl_http_ntlm_cleanup(conn); #endif if(conn->handler->disconnect) /* This is set if protocol-specific cleanups should be made */ conn->handler->disconnect(conn, dead_connection); /* unlink ourselves! */ infof(data, "Closing connection %ld\n", conn->connection_id); Curl_conncache_remove_conn(conn, TRUE); free_fixed_hostname(&conn->host); free_fixed_hostname(&conn->conn_to_host); free_fixed_hostname(&conn->http_proxy.host); free_fixed_hostname(&conn->socks_proxy.host); DEBUGASSERT(conn->data == data); /* this assumes that the pointer is still there after the connection was detected from the cache */ Curl_ssl_close(conn, FIRSTSOCKET); conn_free(conn); return CURLE_OK; } /* * This function should return TRUE if the socket is to be assumed to * be dead. Most commonly this happens when the server has closed the * connection due to inactivity. */ static bool SocketIsDead(curl_socket_t sock) { int sval; bool ret_val = TRUE; sval = SOCKET_READABLE(sock, 0); if(sval == 0) /* timeout */ ret_val = FALSE; return ret_val; } /* * IsPipeliningPossible() * * Return a bitmask with the available pipelining and multiplexing options for * the given requested connection. */ static int IsPipeliningPossible(const struct Curl_easy *handle, const struct connectdata *conn) { int avail = 0; /* If a HTTP protocol and pipelining is enabled */ if((conn->handler->protocol & PROTO_FAMILY_HTTP) && (!conn->bits.protoconnstart || !conn->bits.close)) { if(Curl_pipeline_wanted(handle->multi, CURLPIPE_HTTP1) && (handle->set.httpversion != CURL_HTTP_VERSION_1_0) && (handle->set.httpreq == HTTPREQ_GET || handle->set.httpreq == HTTPREQ_HEAD)) /* didn't ask for HTTP/1.0 and a GET or HEAD */ avail |= CURLPIPE_HTTP1; if(Curl_pipeline_wanted(handle->multi, CURLPIPE_MULTIPLEX) && (handle->set.httpversion >= CURL_HTTP_VERSION_2)) /* allows HTTP/2 */ avail |= CURLPIPE_MULTIPLEX; } return avail; } /* Returns non-zero if a handle was removed */ int Curl_removeHandleFromPipeline(struct Curl_easy *handle, struct curl_llist *pipeline) { if(pipeline) { struct curl_llist_element *curr; curr = pipeline->head; while(curr) { if(curr->ptr == handle) { Curl_llist_remove(pipeline, curr, NULL); return 1; /* we removed a handle */ } curr = curr->next; } } return 0; } #if 0 /* this code is saved here as it is useful for debugging purposes */ static void Curl_printPipeline(struct curl_llist *pipeline) { struct curl_llist_element *curr; curr = pipeline->head; while(curr) { struct Curl_easy *data = (struct Curl_easy *) curr->ptr; infof(data, "Handle in pipeline: %s\n", data->state.path); curr = curr->next; } } #endif static struct Curl_easy* gethandleathead(struct curl_llist *pipeline) { struct curl_llist_element *curr = pipeline->head; #ifdef DEBUGBUILD { struct curl_llist_element *p = pipeline->head; while(p) { struct Curl_easy *e = p->ptr; DEBUGASSERT(GOOD_EASY_HANDLE(e)); p = p->next; } } #endif if(curr) { return (struct Curl_easy *) curr->ptr; } return NULL; } /* remove the specified connection from all (possible) pipelines and related queues */ void Curl_getoff_all_pipelines(struct Curl_easy *data, struct connectdata *conn) { if(!conn->bundle) return; if(conn->bundle->multiuse == BUNDLE_PIPELINING) { bool recv_head = (conn->readchannel_inuse && Curl_recvpipe_head(data, conn)); bool send_head = (conn->writechannel_inuse && Curl_sendpipe_head(data, conn)); if(Curl_removeHandleFromPipeline(data, &conn->recv_pipe) && recv_head) Curl_pipeline_leave_read(conn); if(Curl_removeHandleFromPipeline(data, &conn->send_pipe) && send_head) Curl_pipeline_leave_write(conn); } else { (void)Curl_removeHandleFromPipeline(data, &conn->recv_pipe); (void)Curl_removeHandleFromPipeline(data, &conn->send_pipe); } } static bool proxy_info_matches(const struct proxy_info* data, const struct proxy_info* needle) { if((data->proxytype == needle->proxytype) && (data->port == needle->port) && Curl_safe_strcasecompare(data->host.name, needle->host.name)) return TRUE; return FALSE; } /* * This function checks if the given connection is dead and extracts it from * the connection cache if so. * * When this is called as a Curl_conncache_foreach() callback, the connection * cache lock is held! * * Returns TRUE if the connection was dead and extracted. */ static bool extract_if_dead(struct connectdata *conn, struct Curl_easy *data) { size_t pipeLen = conn->send_pipe.size + conn->recv_pipe.size; if(!pipeLen && !CONN_INUSE(conn)) { /* The check for a dead socket makes sense only if there are no handles in pipeline and the connection isn't already marked in use */ bool dead; conn->data = data; if(conn->handler->connection_check) { /* The protocol has a special method for checking the state of the connection. Use it to check if the connection is dead. */ unsigned int state; state = conn->handler->connection_check(conn, CONNCHECK_ISDEAD); dead = (state & CONNRESULT_DEAD); } else { /* Use the general method for determining the death of a connection */ dead = SocketIsDead(conn->sock[FIRSTSOCKET]); } if(dead) { infof(data, "Connection %ld seems to be dead!\n", conn->connection_id); Curl_conncache_remove_conn(conn, FALSE); conn->data = NULL; /* detach */ return TRUE; } } return FALSE; } struct prunedead { struct Curl_easy *data; struct connectdata *extracted; }; /* * Wrapper to use extract_if_dead() function in Curl_conncache_foreach() * */ static int call_extract_if_dead(struct connectdata *conn, void *param) { struct prunedead *p = (struct prunedead *)param; if(extract_if_dead(conn, p->data)) { /* stop the iteration here, pass back the connection that was extracted */ p->extracted = conn; return 1; } return 0; /* continue iteration */ } /* * This function scans the connection cache for half-open/dead connections, * closes and removes them. * The cleanup is done at most once per second. */ static void prune_dead_connections(struct Curl_easy *data) { struct curltime now = Curl_now(); time_t elapsed = Curl_timediff(now, data->state.conn_cache->last_cleanup); if(elapsed >= 1000L) { struct prunedead prune; prune.data = data; prune.extracted = NULL; while(Curl_conncache_foreach(data, data->state.conn_cache, &prune, call_extract_if_dead)) { /* disconnect it */ (void)Curl_disconnect(data, prune.extracted, /* dead_connection */TRUE); } data->state.conn_cache->last_cleanup = now; } } static size_t max_pipeline_length(struct Curl_multi *multi) { return multi ? multi->max_pipeline_length : 0; } /* * Given one filled in connection struct (named needle), this function should * detect if there already is one that has all the significant details * exactly the same and thus should be used instead. * * If there is a match, this function returns TRUE - and has marked the * connection as 'in-use'. It must later be called with ConnectionDone() to * return back to 'idle' (unused) state. * * The force_reuse flag is set if the connection must be used, even if * the pipelining strategy wants to open a new connection instead of reusing. */ static bool ConnectionExists(struct Curl_easy *data, struct connectdata *needle, struct connectdata **usethis, bool *force_reuse, bool *waitpipe) { struct connectdata *check; struct connectdata *chosen = 0; bool foundPendingCandidate = FALSE; int canpipe = IsPipeliningPossible(data, needle); struct connectbundle *bundle; #ifdef USE_NTLM bool wantNTLMhttp = ((data->state.authhost.want & (CURLAUTH_NTLM | CURLAUTH_NTLM_WB)) && (needle->handler->protocol & PROTO_FAMILY_HTTP)); bool wantProxyNTLMhttp = (needle->bits.proxy_user_passwd && ((data->state.authproxy.want & (CURLAUTH_NTLM | CURLAUTH_NTLM_WB)) && (needle->handler->protocol & PROTO_FAMILY_HTTP))); #endif *force_reuse = FALSE; *waitpipe = FALSE; /* We can't pipeline if the site is blacklisted */ if((canpipe & CURLPIPE_HTTP1) && Curl_pipeline_site_blacklisted(data, needle)) canpipe &= ~ CURLPIPE_HTTP1; /* Look up the bundle with all the connections to this particular host. Locks the connection cache, beware of early returns! */ bundle = Curl_conncache_find_bundle(needle, data->state.conn_cache); if(bundle) { /* Max pipe length is zero (unlimited) for multiplexed connections */ size_t max_pipe_len = (bundle->multiuse != BUNDLE_MULTIPLEX)? max_pipeline_length(data->multi):0; size_t best_pipe_len = max_pipe_len; struct curl_llist_element *curr; infof(data, "Found bundle for host %s: %p [%s]\n", (needle->bits.conn_to_host ? needle->conn_to_host.name : needle->host.name), (void *)bundle, (bundle->multiuse == BUNDLE_PIPELINING ? "can pipeline" : (bundle->multiuse == BUNDLE_MULTIPLEX ? "can multiplex" : "serially"))); /* We can't pipeline if we don't know anything about the server */ if(canpipe) { if(bundle->multiuse <= BUNDLE_UNKNOWN) { if((bundle->multiuse == BUNDLE_UNKNOWN) && data->set.pipewait) { infof(data, "Server doesn't support multi-use yet, wait\n"); *waitpipe = TRUE; Curl_conncache_unlock(needle); return FALSE; /* no re-use */ } infof(data, "Server doesn't support multi-use (yet)\n"); canpipe = 0; } if((bundle->multiuse == BUNDLE_PIPELINING) && !Curl_pipeline_wanted(data->multi, CURLPIPE_HTTP1)) { /* not asked for, switch off */ infof(data, "Could pipeline, but not asked to!\n"); canpipe = 0; } else if((bundle->multiuse == BUNDLE_MULTIPLEX) && !Curl_pipeline_wanted(data->multi, CURLPIPE_MULTIPLEX)) { infof(data, "Could multiplex, but not asked to!\n"); canpipe = 0; } } curr = bundle->conn_list.head; while(curr) { bool match = FALSE; size_t pipeLen; /* * Note that if we use a HTTP proxy in normal mode (no tunneling), we * check connections to that proxy and not to the actual remote server. */ check = curr->ptr; curr = curr->next; if(extract_if_dead(check, data)) { /* disconnect it */ (void)Curl_disconnect(data, check, /* dead_connection */TRUE); continue; } pipeLen = check->send_pipe.size + check->recv_pipe.size; if(canpipe) { if(check->bits.protoconnstart && check->bits.close) continue; if(!check->bits.multiplex) { /* If not multiplexing, make sure the connection is fine for HTTP/1 pipelining */ struct Curl_easy* sh = gethandleathead(&check->send_pipe); struct Curl_easy* rh = gethandleathead(&check->recv_pipe); if(sh) { if(!(IsPipeliningPossible(sh, check) & CURLPIPE_HTTP1)) continue; } else if(rh) { if(!(IsPipeliningPossible(rh, check) & CURLPIPE_HTTP1)) continue; } } } else { if(pipeLen > 0) { /* can only happen within multi handles, and means that another easy handle is using this connection */ continue; } if(Curl_resolver_asynch()) { /* ip_addr_str[0] is NUL only if the resolving of the name hasn't completed yet and until then we don't re-use this connection */ if(!check->ip_addr_str[0]) { infof(data, "Connection #%ld is still name resolving, can't reuse\n", check->connection_id); continue; } } if((check->sock[FIRSTSOCKET] == CURL_SOCKET_BAD) || check->bits.close) { if(!check->bits.close) foundPendingCandidate = TRUE; /* Don't pick a connection that hasn't connected yet or that is going to get closed. */ infof(data, "Connection #%ld isn't open enough, can't reuse\n", check->connection_id); #ifdef DEBUGBUILD if(check->recv_pipe.size > 0) { infof(data, "BAD! Unconnected #%ld has a non-empty recv pipeline!\n", check->connection_id); } #endif continue; } } #ifdef USE_UNIX_SOCKETS if(needle->unix_domain_socket) { if(!check->unix_domain_socket) continue; if(strcmp(needle->unix_domain_socket, check->unix_domain_socket)) continue; if(needle->abstract_unix_socket != check->abstract_unix_socket) continue; } else if(check->unix_domain_socket) continue; #endif if((needle->handler->flags&PROTOPT_SSL) != (check->handler->flags&PROTOPT_SSL)) /* don't do mixed SSL and non-SSL connections */ if(get_protocol_family(check->handler->protocol) != needle->handler->protocol || !check->tls_upgraded) /* except protocols that have been upgraded via TLS */ continue; if(needle->bits.httpproxy != check->bits.httpproxy || needle->bits.socksproxy != check->bits.socksproxy) continue; if(needle->bits.socksproxy && !proxy_info_matches(&needle->socks_proxy, &check->socks_proxy)) continue; if(needle->bits.conn_to_host != check->bits.conn_to_host) /* don't mix connections that use the "connect to host" feature and * connections that don't use this feature */ continue; if(needle->bits.conn_to_port != check->bits.conn_to_port) /* don't mix connections that use the "connect to port" feature and * connections that don't use this feature */ continue; if(needle->bits.httpproxy) { if(!proxy_info_matches(&needle->http_proxy, &check->http_proxy)) continue; if(needle->bits.tunnel_proxy != check->bits.tunnel_proxy) continue; if(needle->http_proxy.proxytype == CURLPROXY_HTTPS) { /* use https proxy */ if(needle->handler->flags&PROTOPT_SSL) { /* use double layer ssl */ if(!Curl_ssl_config_matches(&needle->proxy_ssl_config, &check->proxy_ssl_config)) continue; if(check->proxy_ssl[FIRSTSOCKET].state != ssl_connection_complete) continue; } else { if(!Curl_ssl_config_matches(&needle->ssl_config, &check->ssl_config)) continue; if(check->ssl[FIRSTSOCKET].state != ssl_connection_complete) continue; } } } if(!canpipe && CONN_INUSE(check)) /* this request can't be pipelined but the checked connection is already in use so we skip it */ continue; if(CONN_INUSE(check) && (check->data->multi != needle->data->multi)) /* this could be subject for pipeline/multiplex use, but only if they belong to the same multi handle */ continue; if(needle->localdev || needle->localport) { /* If we are bound to a specific local end (IP+port), we must not re-use a random other one, although if we didn't ask for a particular one we can reuse one that was bound. This comparison is a bit rough and too strict. Since the input parameters can be specified in numerous ways and still end up the same it would take a lot of processing to make it really accurate. Instead, this matching will assume that re-uses of bound connections will most likely also re-use the exact same binding parameters and missing out a few edge cases shouldn't hurt anyone very much. */ if((check->localport != needle->localport) || (check->localportrange != needle->localportrange) || (needle->localdev && (!check->localdev || strcmp(check->localdev, needle->localdev)))) continue; } if(!(needle->handler->flags & PROTOPT_CREDSPERREQUEST)) { /* This protocol requires credentials per connection, so verify that we're using the same name and password as well */ if(strcmp(needle->user, check->user) || strcmp(needle->passwd, check->passwd)) { /* one of them was different */ continue; } } if(!needle->bits.httpproxy || (needle->handler->flags&PROTOPT_SSL) || needle->bits.tunnel_proxy) { /* The requested connection does not use a HTTP proxy or it uses SSL or it is a non-SSL protocol tunneled or it is a non-SSL protocol which is allowed to be upgraded via TLS */ if((strcasecompare(needle->handler->scheme, check->handler->scheme) || (get_protocol_family(check->handler->protocol) == needle->handler->protocol && check->tls_upgraded)) && (!needle->bits.conn_to_host || strcasecompare( needle->conn_to_host.name, check->conn_to_host.name)) && (!needle->bits.conn_to_port || needle->conn_to_port == check->conn_to_port) && strcasecompare(needle->host.name, check->host.name) && needle->remote_port == check->remote_port) { /* The schemes match or the the protocol family is the same and the previous connection was TLS upgraded, and the hostname and host port match */ if(needle->handler->flags & PROTOPT_SSL) { /* This is a SSL connection so verify that we're using the same SSL options as well */ if(!Curl_ssl_config_matches(&needle->ssl_config, &check->ssl_config)) { DEBUGF(infof(data, "Connection #%ld has different SSL parameters, " "can't reuse\n", check->connection_id)); continue; } if(check->ssl[FIRSTSOCKET].state != ssl_connection_complete) { foundPendingCandidate = TRUE; DEBUGF(infof(data, "Connection #%ld has not started SSL connect, " "can't reuse\n", check->connection_id)); continue; } } match = TRUE; } } else { /* The requested connection is using the same HTTP proxy in normal mode (no tunneling) */ match = TRUE; } if(match) { #if defined(USE_NTLM) /* If we are looking for an HTTP+NTLM connection, check if this is already authenticating with the right credentials. If not, keep looking so that we can reuse NTLM connections if possible. (Especially we must not reuse the same connection if partway through a handshake!) */ if(wantNTLMhttp) { if(strcmp(needle->user, check->user) || strcmp(needle->passwd, check->passwd)) continue; } else if(check->ntlm.state != NTLMSTATE_NONE) { /* Connection is using NTLM auth but we don't want NTLM */ continue; } /* Same for Proxy NTLM authentication */ if(wantProxyNTLMhttp) { /* Both check->http_proxy.user and check->http_proxy.passwd can be * NULL */ if(!check->http_proxy.user || !check->http_proxy.passwd) continue; if(strcmp(needle->http_proxy.user, check->http_proxy.user) || strcmp(needle->http_proxy.passwd, check->http_proxy.passwd)) continue; } else if(check->proxyntlm.state != NTLMSTATE_NONE) { /* Proxy connection is using NTLM auth but we don't want NTLM */ continue; } if(wantNTLMhttp || wantProxyNTLMhttp) { /* Credentials are already checked, we can use this connection */ chosen = check; if((wantNTLMhttp && (check->ntlm.state != NTLMSTATE_NONE)) || (wantProxyNTLMhttp && (check->proxyntlm.state != NTLMSTATE_NONE))) { /* We must use this connection, no other */ *force_reuse = TRUE; break; } /* Continue look up for a better connection */ continue; } #endif if(canpipe) { /* We can pipeline if we want to. Let's continue looking for the optimal connection to use, i.e the shortest pipe that is not blacklisted. */ if(pipeLen == 0) { /* We have the optimal connection. Let's stop looking. */ chosen = check; break; } /* We can't use the connection if the pipe is full */ if(max_pipe_len && (pipeLen >= max_pipe_len)) { infof(data, "Pipe is full, skip (%zu)\n", pipeLen); continue; } #ifdef USE_NGHTTP2 /* If multiplexed, make sure we don't go over concurrency limit */ if(check->bits.multiplex) { /* Multiplexed connections can only be HTTP/2 for now */ struct http_conn *httpc = &check->proto.httpc; if(pipeLen >= httpc->settings.max_concurrent_streams) { infof(data, "MAX_CONCURRENT_STREAMS reached, skip (%zu)\n", pipeLen); continue; } } #endif /* We can't use the connection if the pipe is penalized */ if(Curl_pipeline_penalized(data, check)) { infof(data, "Penalized, skip\n"); continue; } if(max_pipe_len) { if(pipeLen < best_pipe_len) { /* This connection has a shorter pipe so far. We'll pick this and continue searching */ chosen = check; best_pipe_len = pipeLen; continue; } } else { /* When not pipelining (== multiplexed), we have a match here! */ chosen = check; infof(data, "Multiplexed connection found!\n"); break; } } else { /* We have found a connection. Let's stop searching. */ chosen = check; break; } } } } if(chosen) { /* mark it as used before releasing the lock */ chosen->data = data; /* own it! */ Curl_conncache_unlock(needle); *usethis = chosen; return TRUE; /* yes, we found one to use! */ } Curl_conncache_unlock(needle); if(foundPendingCandidate && data->set.pipewait) { infof(data, "Found pending candidate for reuse and CURLOPT_PIPEWAIT is set\n"); *waitpipe = TRUE; } return FALSE; /* no matching connecting exists */ } /* after a TCP connection to the proxy has been verified, this function does the next magic step. Note: this function's sub-functions call failf() */ CURLcode Curl_connected_proxy(struct connectdata *conn, int sockindex) { CURLcode result = CURLE_OK; if(conn->bits.socksproxy) { #ifndef CURL_DISABLE_PROXY /* for the secondary socket (FTP), use the "connect to host" * but ignore the "connect to port" (use the secondary port) */ const char * const host = conn->bits.httpproxy ? conn->http_proxy.host.name : conn->bits.conn_to_host ? conn->conn_to_host.name : sockindex == SECONDARYSOCKET ? conn->secondaryhostname : conn->host.name; const int port = conn->bits.httpproxy ? (int)conn->http_proxy.port : sockindex == SECONDARYSOCKET ? conn->secondary_port : conn->bits.conn_to_port ? conn->conn_to_port : conn->remote_port; conn->bits.socksproxy_connecting = TRUE; switch(conn->socks_proxy.proxytype) { case CURLPROXY_SOCKS5: case CURLPROXY_SOCKS5_HOSTNAME: result = Curl_SOCKS5(conn->socks_proxy.user, conn->socks_proxy.passwd, host, port, sockindex, conn); break; case CURLPROXY_SOCKS4: case CURLPROXY_SOCKS4A: result = Curl_SOCKS4(conn->socks_proxy.user, host, port, sockindex, conn); break; default: failf(conn->data, "unknown proxytype option given"); result = CURLE_COULDNT_CONNECT; } /* switch proxytype */ conn->bits.socksproxy_connecting = FALSE; #else (void)sockindex; #endif /* CURL_DISABLE_PROXY */ } return result; } /* * verboseconnect() displays verbose information after a connect */ #ifndef CURL_DISABLE_VERBOSE_STRINGS void Curl_verboseconnect(struct connectdata *conn) { if(conn->data->set.verbose) infof(conn->data, "Connected to %s (%s) port %ld (#%ld)\n", conn->bits.socksproxy ? conn->socks_proxy.host.dispname : conn->bits.httpproxy ? conn->http_proxy.host.dispname : conn->bits.conn_to_host ? conn->conn_to_host.dispname : conn->host.dispname, conn->ip_addr_str, conn->port, conn->connection_id); } #endif int Curl_protocol_getsock(struct connectdata *conn, curl_socket_t *socks, int numsocks) { if(conn->handler->proto_getsock) return conn->handler->proto_getsock(conn, socks, numsocks); /* Backup getsock logic. Since there is a live socket in use, we must wait for it or it will be removed from watching when the multi_socket API is used. */ socks[0] = conn->sock[FIRSTSOCKET]; return GETSOCK_READSOCK(0) | GETSOCK_WRITESOCK(0); } int Curl_doing_getsock(struct connectdata *conn, curl_socket_t *socks, int numsocks) { if(conn && conn->handler->doing_getsock) return conn->handler->doing_getsock(conn, socks, numsocks); return GETSOCK_BLANK; } /* * We are doing protocol-specific connecting and this is being called over and * over from the multi interface until the connection phase is done on * protocol layer. */ CURLcode Curl_protocol_connecting(struct connectdata *conn, bool *done) { CURLcode result = CURLE_OK; if(conn && conn->handler->connecting) { *done = FALSE; result = conn->handler->connecting(conn, done); } else *done = TRUE; return result; } /* * We are DOING this is being called over and over from the multi interface * until the DOING phase is done on protocol layer. */ CURLcode Curl_protocol_doing(struct connectdata *conn, bool *done) { CURLcode result = CURLE_OK; if(conn && conn->handler->doing) { *done = FALSE; result = conn->handler->doing(conn, done); } else *done = TRUE; return result; } /* * We have discovered that the TCP connection has been successful, we can now * proceed with some action. * */ CURLcode Curl_protocol_connect(struct connectdata *conn, bool *protocol_done) { CURLcode result = CURLE_OK; *protocol_done = FALSE; if(conn->bits.tcpconnect[FIRSTSOCKET] && conn->bits.protoconnstart) { /* We already are connected, get back. This may happen when the connect worked fine in the first call, like when we connect to a local server or proxy. Note that we don't know if the protocol is actually done. Unless this protocol doesn't have any protocol-connect callback, as then we know we're done. */ if(!conn->handler->connecting) *protocol_done = TRUE; return CURLE_OK; } if(!conn->bits.protoconnstart) { result = Curl_proxy_connect(conn, FIRSTSOCKET); if(result) return result; if(CONNECT_FIRSTSOCKET_PROXY_SSL()) /* wait for HTTPS proxy SSL initialization to complete */ return CURLE_OK; if(conn->bits.tunnel_proxy && conn->bits.httpproxy && Curl_connect_ongoing(conn)) /* when using an HTTP tunnel proxy, await complete tunnel establishment before proceeding further. Return CURLE_OK so we'll be called again */ return CURLE_OK; if(conn->handler->connect_it) { /* is there a protocol-specific connect() procedure? */ /* Call the protocol-specific connect function */ result = conn->handler->connect_it(conn, protocol_done); } else *protocol_done = TRUE; /* it has started, possibly even completed but that knowledge isn't stored in this bit! */ if(!result) conn->bits.protoconnstart = TRUE; } return result; /* pass back status */ } /* * Helpers for IDNA conversions. */ static bool is_ASCII_name(const char *hostname) { const unsigned char *ch = (const unsigned char *)hostname; while(*ch) { if(*ch++ & 0x80) return FALSE; } return TRUE; } /* * Perform any necessary IDN conversion of hostname */ static CURLcode fix_hostname(struct connectdata *conn, struct hostname *host) { size_t len; struct Curl_easy *data = conn->data; #ifndef USE_LIBIDN2 (void)data; (void)conn; #elif defined(CURL_DISABLE_VERBOSE_STRINGS) (void)conn; #endif /* set the name we use to display the host name */ host->dispname = host->name; len = strlen(host->name); if(len && (host->name[len-1] == '.')) /* strip off a single trailing dot if present, primarily for SNI but there's no use for it */ host->name[len-1] = 0; /* Check name for non-ASCII and convert hostname to ACE form if we can */ if(!is_ASCII_name(host->name)) { #ifdef USE_LIBIDN2 if(idn2_check_version(IDN2_VERSION)) { char *ace_hostname = NULL; #if IDN2_VERSION_NUMBER >= 0x00140000 /* IDN2_NFC_INPUT: Normalize input string using normalization form C. IDN2_NONTRANSITIONAL: Perform Unicode TR46 non-transitional processing. */ int flags = IDN2_NFC_INPUT | IDN2_NONTRANSITIONAL; #else int flags = IDN2_NFC_INPUT; #endif int rc = idn2_lookup_ul((const char *)host->name, &ace_hostname, flags); if(rc == IDN2_OK) { host->encalloc = (char *)ace_hostname; /* change the name pointer to point to the encoded hostname */ host->name = host->encalloc; } else { failf(data, "Failed to convert %s to ACE; %s\n", host->name, idn2_strerror(rc)); return CURLE_URL_MALFORMAT; } } #elif defined(USE_WIN32_IDN) char *ace_hostname = NULL; if(curl_win32_idn_to_ascii(host->name, &ace_hostname)) { host->encalloc = ace_hostname; /* change the name pointer to point to the encoded hostname */ host->name = host->encalloc; } else { failf(data, "Failed to convert %s to ACE;\n", host->name); return CURLE_URL_MALFORMAT; } #else infof(data, "IDN support not present, can't parse Unicode domains\n"); #endif } { char *hostp; for(hostp = host->name; *hostp; hostp++) { if(*hostp <= 32) { failf(data, "Host name '%s' contains bad letter", host->name); return CURLE_URL_MALFORMAT; } } } return CURLE_OK; } /* * Frees data allocated by fix_hostname() */ static void free_fixed_hostname(struct hostname *host) { #if defined(USE_LIBIDN2) if(host->encalloc) { idn2_free(host->encalloc); /* must be freed with idn2_free() since this was allocated by libidn */ host->encalloc = NULL; } #elif defined(USE_WIN32_IDN) free(host->encalloc); /* must be freed with free() since this was allocated by curl_win32_idn_to_ascii */ host->encalloc = NULL; #else (void)host; #endif } static void llist_dtor(void *user, void *element) { (void)user; (void)element; /* Do nothing */ } /* * Allocate and initialize a new connectdata object. */ static struct connectdata *allocate_conn(struct Curl_easy *data) { struct connectdata *conn = calloc(1, sizeof(struct connectdata)); if(!conn) return NULL; #ifdef USE_SSL /* The SSL backend-specific data (ssl_backend_data) objects are allocated as a separate array to ensure suitable alignment. Note that these backend pointers can be swapped by vtls (eg ssl backend data becomes proxy backend data). */ { size_t sslsize = Curl_ssl->sizeof_ssl_backend_data; char *ssl = calloc(4, sslsize); if(!ssl) { free(conn); return NULL; } conn->ssl_extra = ssl; conn->ssl[0].backend = (void *)ssl; conn->ssl[1].backend = (void *)(ssl + sslsize); conn->proxy_ssl[0].backend = (void *)(ssl + 2 * sslsize); conn->proxy_ssl[1].backend = (void *)(ssl + 3 * sslsize); } #endif conn->handler = &Curl_handler_dummy; /* Be sure we have a handler defined already from start to avoid NULL situations and checks */ /* and we setup a few fields in case we end up actually using this struct */ conn->sock[FIRSTSOCKET] = CURL_SOCKET_BAD; /* no file descriptor */ conn->sock[SECONDARYSOCKET] = CURL_SOCKET_BAD; /* no file descriptor */ conn->tempsock[0] = CURL_SOCKET_BAD; /* no file descriptor */ conn->tempsock[1] = CURL_SOCKET_BAD; /* no file descriptor */ conn->connection_id = -1; /* no ID */ conn->port = -1; /* unknown at this point */ conn->remote_port = -1; /* unknown at this point */ #if defined(USE_RECV_BEFORE_SEND_WORKAROUND) && defined(DEBUGBUILD) conn->postponed[0].bindsock = CURL_SOCKET_BAD; /* no file descriptor */ conn->postponed[1].bindsock = CURL_SOCKET_BAD; /* no file descriptor */ #endif /* USE_RECV_BEFORE_SEND_WORKAROUND && DEBUGBUILD */ /* Default protocol-independent behavior doesn't support persistent connections, so we set this to force-close. Protocols that support this need to set this to FALSE in their "curl_do" functions. */ connclose(conn, "Default to force-close"); /* Store creation time to help future close decision making */ conn->created = Curl_now(); /* Store current time to give a baseline to keepalive connection times. */ conn->keepalive = Curl_now(); /* Store off the configured connection upkeep time. */ conn->upkeep_interval_ms = data->set.upkeep_interval_ms; conn->data = data; /* Setup the association between this connection and the Curl_easy */ conn->http_proxy.proxytype = data->set.proxytype; conn->socks_proxy.proxytype = CURLPROXY_SOCKS4; #ifdef CURL_DISABLE_PROXY conn->bits.proxy = FALSE; conn->bits.httpproxy = FALSE; conn->bits.socksproxy = FALSE; conn->bits.proxy_user_passwd = FALSE; conn->bits.tunnel_proxy = FALSE; #else /* CURL_DISABLE_PROXY */ /* note that these two proxy bits are now just on what looks to be requested, they may be altered down the road */ conn->bits.proxy = (data->set.str[STRING_PROXY] && *data->set.str[STRING_PROXY]) ? TRUE : FALSE; conn->bits.httpproxy = (conn->bits.proxy && (conn->http_proxy.proxytype == CURLPROXY_HTTP || conn->http_proxy.proxytype == CURLPROXY_HTTP_1_0 || conn->http_proxy.proxytype == CURLPROXY_HTTPS)) ? TRUE : FALSE; conn->bits.socksproxy = (conn->bits.proxy && !conn->bits.httpproxy) ? TRUE : FALSE; if(data->set.str[STRING_PRE_PROXY] && *data->set.str[STRING_PRE_PROXY]) { conn->bits.proxy = TRUE; conn->bits.socksproxy = TRUE; } conn->bits.proxy_user_passwd = (data->set.str[STRING_PROXYUSERNAME]) ? TRUE : FALSE; conn->bits.tunnel_proxy = data->set.tunnel_thru_httpproxy; #endif /* CURL_DISABLE_PROXY */ conn->bits.user_passwd = (data->set.str[STRING_USERNAME]) ? TRUE : FALSE; conn->bits.ftp_use_epsv = data->set.ftp_use_epsv; conn->bits.ftp_use_eprt = data->set.ftp_use_eprt; conn->ssl_config.verifystatus = data->set.ssl.primary.verifystatus; conn->ssl_config.verifypeer = data->set.ssl.primary.verifypeer; conn->ssl_config.verifyhost = data->set.ssl.primary.verifyhost; conn->proxy_ssl_config.verifystatus = data->set.proxy_ssl.primary.verifystatus; conn->proxy_ssl_config.verifypeer = data->set.proxy_ssl.primary.verifypeer; conn->proxy_ssl_config.verifyhost = data->set.proxy_ssl.primary.verifyhost; conn->ip_version = data->set.ipver; #if !defined(CURL_DISABLE_HTTP) && defined(USE_NTLM) && \ defined(NTLM_WB_ENABLED) conn->ntlm_auth_hlpr_socket = CURL_SOCKET_BAD; conn->ntlm_auth_hlpr_pid = 0; conn->challenge_header = NULL; conn->response_header = NULL; #endif if(Curl_pipeline_wanted(data->multi, CURLPIPE_HTTP1) && !conn->master_buffer) { /* Allocate master_buffer to be used for HTTP/1 pipelining */ conn->master_buffer = calloc(MASTERBUF_SIZE, sizeof(char)); if(!conn->master_buffer) goto error; } /* Initialize the pipeline lists */ Curl_llist_init(&conn->send_pipe, (curl_llist_dtor) llist_dtor); Curl_llist_init(&conn->recv_pipe, (curl_llist_dtor) llist_dtor); #ifdef HAVE_GSSAPI conn->data_prot = PROT_CLEAR; #endif /* Store the local bind parameters that will be used for this connection */ if(data->set.str[STRING_DEVICE]) { conn->localdev = strdup(data->set.str[STRING_DEVICE]); if(!conn->localdev) goto error; } conn->localportrange = data->set.localportrange; conn->localport = data->set.localport; /* the close socket stuff needs to be copied to the connection struct as it may live on without (this specific) Curl_easy */ conn->fclosesocket = data->set.fclosesocket; conn->closesocket_client = data->set.closesocket_client; return conn; error: Curl_llist_destroy(&conn->send_pipe, NULL); Curl_llist_destroy(&conn->recv_pipe, NULL); free(conn->master_buffer); free(conn->localdev); #ifdef USE_SSL free(conn->ssl_extra); #endif free(conn); return NULL; } /* returns the handler if the given scheme is built-in */ const struct Curl_handler *Curl_builtin_scheme(const char *scheme) { const struct Curl_handler * const *pp; const struct Curl_handler *p; /* Scan protocol handler table and match against 'scheme'. The handler may be changed later when the protocol specific setup function is called. */ for(pp = protocols; (p = *pp) != NULL; pp++) if(strcasecompare(p->scheme, scheme)) /* Protocol found in table. Check if allowed */ return p; return NULL; /* not found */ } static CURLcode findprotocol(struct Curl_easy *data, struct connectdata *conn, const char *protostr) { const struct Curl_handler *p = Curl_builtin_scheme(protostr); if(p && /* Protocol found in table. Check if allowed */ (data->set.allowed_protocols & p->protocol)) { /* it is allowed for "normal" request, now do an extra check if this is the result of a redirect */ if(data->state.this_is_a_follow && !(data->set.redir_protocols & p->protocol)) /* nope, get out */ ; else { /* Perform setup complement if some. */ conn->handler = conn->given = p; /* 'port' and 'remote_port' are set in setup_connection_internals() */ return CURLE_OK; } } /* The protocol was not found in the table, but we don't have to assign it to anything since it is already assigned to a dummy-struct in the create_conn() function when the connectdata struct is allocated. */ failf(data, "Protocol \"%s\" not supported or disabled in " LIBCURL_NAME, protostr); return CURLE_UNSUPPORTED_PROTOCOL; } CURLcode Curl_uc_to_curlcode(CURLUcode uc) { switch(uc) { default: return CURLE_URL_MALFORMAT; case CURLUE_UNSUPPORTED_SCHEME: return CURLE_UNSUPPORTED_PROTOCOL; case CURLUE_OUT_OF_MEMORY: return CURLE_OUT_OF_MEMORY; case CURLUE_USER_NOT_ALLOWED: return CURLE_LOGIN_DENIED; } } /* * Parse URL and fill in the relevant members of the connection struct. */ static CURLcode parseurlandfillconn(struct Curl_easy *data, struct connectdata *conn) { CURLcode result; CURLU *uh; CURLUcode uc; char *hostname; Curl_up_free(data); /* cleanup previous leftovers first */ /* parse the URL */ uh = data->state.uh = curl_url(); if(!uh) return CURLE_OUT_OF_MEMORY; if(data->set.str[STRING_DEFAULT_PROTOCOL] && !Curl_is_absolute_url(data->change.url, NULL, MAX_SCHEME_LEN)) { char *url; if(data->change.url_alloc) free(data->change.url); url = aprintf("%s://%s", data->set.str[STRING_DEFAULT_PROTOCOL], data->change.url); if(!url) return CURLE_OUT_OF_MEMORY; data->change.url = url; data->change.url_alloc = TRUE; } uc = curl_url_set(uh, CURLUPART_URL, data->change.url, CURLU_GUESS_SCHEME | CURLU_NON_SUPPORT_SCHEME | (data->set.disallow_username_in_url ? CURLU_DISALLOW_USER : 0) | (data->set.path_as_is ? CURLU_PATH_AS_IS : 0)); if(uc) return Curl_uc_to_curlcode(uc); uc = curl_url_get(uh, CURLUPART_SCHEME, &data->state.up.scheme, 0); if(uc) return Curl_uc_to_curlcode(uc); result = findprotocol(data, conn, data->state.up.scheme); if(result) return result; uc = curl_url_get(uh, CURLUPART_USER, &data->state.up.user, CURLU_URLDECODE); if(!uc) { conn->user = strdup(data->state.up.user); if(!conn->user) return CURLE_OUT_OF_MEMORY; conn->bits.user_passwd = TRUE; } else if(uc != CURLUE_NO_USER) return Curl_uc_to_curlcode(uc); uc = curl_url_get(uh, CURLUPART_PASSWORD, &data->state.up.password, CURLU_URLDECODE); if(!uc) { conn->passwd = strdup(data->state.up.password); if(!conn->passwd) return CURLE_OUT_OF_MEMORY; conn->bits.user_passwd = TRUE; } else if(uc != CURLUE_NO_PASSWORD) return Curl_uc_to_curlcode(uc); uc = curl_url_get(uh, CURLUPART_OPTIONS, &data->state.up.options, CURLU_URLDECODE); if(!uc) { conn->options = strdup(data->state.up.options); if(!conn->options) return CURLE_OUT_OF_MEMORY; } else if(uc != CURLUE_NO_OPTIONS) return Curl_uc_to_curlcode(uc); uc = curl_url_get(uh, CURLUPART_HOST, &data->state.up.hostname, 0); if(uc) { if(!strcasecompare("file", data->state.up.scheme)) return CURLE_OUT_OF_MEMORY; } uc = curl_url_get(uh, CURLUPART_PATH, &data->state.up.path, 0); if(uc) return Curl_uc_to_curlcode(uc); uc = curl_url_get(uh, CURLUPART_PORT, &data->state.up.port, CURLU_DEFAULT_PORT); if(uc) { if(!strcasecompare("file", data->state.up.scheme)) return CURLE_OUT_OF_MEMORY; } else { unsigned long port = strtoul(data->state.up.port, NULL, 10); conn->remote_port = curlx_ultous(port); } (void)curl_url_get(uh, CURLUPART_QUERY, &data->state.up.query, 0); hostname = data->state.up.hostname; if(!hostname) /* this is for file:// transfers, get a dummy made */ hostname = (char *)""; if(hostname[0] == '[') { /* This looks like an IPv6 address literal. See if there is an address scope. */ char *percent = strchr(++hostname, '%'); conn->bits.ipv6_ip = TRUE; if(percent) { unsigned int identifier_offset = 3; char *endp; unsigned long scope; if(strncmp("%25", percent, 3) != 0) { infof(data, "Please URL encode %% as %%25, see RFC 6874.\n"); identifier_offset = 1; } scope = strtoul(percent + identifier_offset, &endp, 10); if(*endp == ']') { /* The address scope was well formed. Knock it out of the hostname. */ memmove(percent, endp, strlen(endp) + 1); conn->scope_id = (unsigned int)scope; } else { /* Zone identifier is not numeric */ #if defined(HAVE_NET_IF_H) && defined(IFNAMSIZ) && defined(HAVE_IF_NAMETOINDEX) char ifname[IFNAMSIZ + 2]; char *square_bracket; unsigned int scopeidx = 0; strncpy(ifname, percent + identifier_offset, IFNAMSIZ + 2); /* Ensure nullbyte termination */ ifname[IFNAMSIZ + 1] = '\0'; square_bracket = strchr(ifname, ']'); if(square_bracket) { /* Remove ']' */ *square_bracket = '\0'; scopeidx = if_nametoindex(ifname); if(scopeidx == 0) { infof(data, "Invalid network interface: %s; %s\n", ifname, strerror(errno)); } } if(scopeidx > 0) { char *p = percent + identifier_offset + strlen(ifname); /* Remove zone identifier from hostname */ memmove(percent, p, strlen(p) + 1); conn->scope_id = scopeidx; } else #endif /* HAVE_NET_IF_H && IFNAMSIZ */ infof(data, "Invalid IPv6 address format\n"); } } percent = strchr(hostname, ']'); if(percent) /* terminate IPv6 numerical at end bracket */ *percent = 0; } /* make sure the connect struct gets its own copy of the host name */ conn->host.rawalloc = strdup(hostname); if(!conn->host.rawalloc) return CURLE_OUT_OF_MEMORY; conn->host.name = conn->host.rawalloc; if(data->set.scope_id) /* Override any scope that was set above. */ conn->scope_id = data->set.scope_id; return CURLE_OK; } /* * If we're doing a resumed transfer, we need to setup our stuff * properly. */ static CURLcode setup_range(struct Curl_easy *data) { struct UrlState *s = &data->state; s->resume_from = data->set.set_resume_from; if(s->resume_from || data->set.str[STRING_SET_RANGE]) { if(s->rangestringalloc) free(s->range); if(s->resume_from) s->range = aprintf("%" CURL_FORMAT_CURL_OFF_T "-", s->resume_from); else s->range = strdup(data->set.str[STRING_SET_RANGE]); s->rangestringalloc = (s->range) ? TRUE : FALSE; if(!s->range) return CURLE_OUT_OF_MEMORY; /* tell ourselves to fetch this range */ s->use_range = TRUE; /* enable range download */ } else s->use_range = FALSE; /* disable range download */ return CURLE_OK; } /* * setup_connection_internals() - * * Setup connection internals specific to the requested protocol in the * Curl_easy. This is inited and setup before the connection is made but * is about the particular protocol that is to be used. * * This MUST get called after proxy magic has been figured out. */ static CURLcode setup_connection_internals(struct connectdata *conn) { const struct Curl_handler * p; CURLcode result; conn->socktype = SOCK_STREAM; /* most of them are TCP streams */ /* Perform setup complement if some. */ p = conn->handler; if(p->setup_connection) { result = (*p->setup_connection)(conn); if(result) return result; p = conn->handler; /* May have changed. */ } if(conn->port < 0) /* we check for -1 here since if proxy was detected already, this was very likely already set to the proxy port */ conn->port = p->defport; return CURLE_OK; } /* * Curl_free_request_state() should free temp data that was allocated in the * Curl_easy for this single request. */ void Curl_free_request_state(struct Curl_easy *data) { Curl_safefree(data->req.protop); Curl_safefree(data->req.newurl); } #ifndef CURL_DISABLE_PROXY /**************************************************************** * Checks if the host is in the noproxy list. returns true if it matches * and therefore the proxy should NOT be used. ****************************************************************/ static bool check_noproxy(const char *name, const char *no_proxy) { /* no_proxy=domain1.dom,host.domain2.dom * (a comma-separated list of hosts which should * not be proxied, or an asterisk to override * all proxy variables) */ if(no_proxy && no_proxy[0]) { size_t tok_start; size_t tok_end; const char *separator = ", "; size_t no_proxy_len; size_t namelen; char *endptr; if(strcasecompare("*", no_proxy)) { return TRUE; } /* NO_PROXY was specified and it wasn't just an asterisk */ no_proxy_len = strlen(no_proxy); if(name[0] == '[') { /* IPv6 numerical address */ endptr = strchr(name, ']'); if(!endptr) return FALSE; name++; namelen = endptr - name; } else namelen = strlen(name); for(tok_start = 0; tok_start < no_proxy_len; tok_start = tok_end + 1) { while(tok_start < no_proxy_len && strchr(separator, no_proxy[tok_start]) != NULL) { /* Look for the beginning of the token. */ ++tok_start; } if(tok_start == no_proxy_len) break; /* It was all trailing separator chars, no more tokens. */ for(tok_end = tok_start; tok_end < no_proxy_len && strchr(separator, no_proxy[tok_end]) == NULL; ++tok_end) /* Look for the end of the token. */ ; /* To match previous behaviour, where it was necessary to specify * ".local.com" to prevent matching "notlocal.com", we will leave * the '.' off. */ if(no_proxy[tok_start] == '.') ++tok_start; if((tok_end - tok_start) <= namelen) { /* Match the last part of the name to the domain we are checking. */ const char *checkn = name + namelen - (tok_end - tok_start); if(strncasecompare(no_proxy + tok_start, checkn, tok_end - tok_start)) { if((tok_end - tok_start) == namelen || *(checkn - 1) == '.') { /* We either have an exact match, or the previous character is a . * so it is within the same domain, so no proxy for this host. */ return TRUE; } } } /* if((tok_end - tok_start) <= namelen) */ } /* for(tok_start = 0; tok_start < no_proxy_len; tok_start = tok_end + 1) */ } /* NO_PROXY was specified and it wasn't just an asterisk */ return FALSE; } #ifndef CURL_DISABLE_HTTP /**************************************************************** * Detect what (if any) proxy to use. Remember that this selects a host * name and is not limited to HTTP proxies only. * The returned pointer must be freed by the caller (unless NULL) ****************************************************************/ static char *detect_proxy(struct connectdata *conn) { char *proxy = NULL; /* If proxy was not specified, we check for default proxy environment * variables, to enable i.e Lynx compliance: * * http_proxy=http://some.server.dom:port/ * https_proxy=http://some.server.dom:port/ * ftp_proxy=http://some.server.dom:port/ * no_proxy=domain1.dom,host.domain2.dom * (a comma-separated list of hosts which should * not be proxied, or an asterisk to override * all proxy variables) * all_proxy=http://some.server.dom:port/ * (seems to exist for the CERN www lib. Probably * the first to check for.) * * For compatibility, the all-uppercase versions of these variables are * checked if the lowercase versions don't exist. */ char proxy_env[128]; const char *protop = conn->handler->scheme; char *envp = proxy_env; char *prox; /* Now, build <protocol>_proxy and check for such a one to use */ while(*protop) *envp++ = (char)tolower((int)*protop++); /* append _proxy */ strcpy(envp, "_proxy"); /* read the protocol proxy: */ prox = curl_getenv(proxy_env); /* * We don't try the uppercase version of HTTP_PROXY because of * security reasons: * * When curl is used in a webserver application * environment (cgi or php), this environment variable can * be controlled by the web server user by setting the * http header 'Proxy:' to some value. * * This can cause 'internal' http/ftp requests to be * arbitrarily redirected by any external attacker. */ if(!prox && !strcasecompare("http_proxy", proxy_env)) { /* There was no lowercase variable, try the uppercase version: */ Curl_strntoupper(proxy_env, proxy_env, sizeof(proxy_env)); prox = curl_getenv(proxy_env); } envp = proxy_env; if(prox) { proxy = prox; /* use this */ } else { envp = (char *)"all_proxy"; proxy = curl_getenv(envp); /* default proxy to use */ if(!proxy) { envp = (char *)"ALL_PROXY"; proxy = curl_getenv(envp); } } if(proxy) infof(conn->data, "Uses proxy env variable %s == '%s'\n", envp, proxy); return proxy; } #endif /* CURL_DISABLE_HTTP */ /* * If this is supposed to use a proxy, we need to figure out the proxy * host name, so that we can re-use an existing connection * that may exist registered to the same proxy host. */ static CURLcode parse_proxy(struct Curl_easy *data, struct connectdata *conn, char *proxy, curl_proxytype proxytype) { char *prox_portno; char *endofprot; /* We use 'proxyptr' to point to the proxy name from now on... */ char *proxyptr; char *portptr; char *atsign; long port = -1; char *proxyuser = NULL; char *proxypasswd = NULL; bool sockstype; /* We do the proxy host string parsing here. We want the host name and the * port name. Accept a protocol:// prefix */ /* Parse the protocol part if present */ endofprot = strstr(proxy, "://"); if(endofprot) { proxyptr = endofprot + 3; if(checkprefix("https", proxy)) proxytype = CURLPROXY_HTTPS; else if(checkprefix("socks5h", proxy)) proxytype = CURLPROXY_SOCKS5_HOSTNAME; else if(checkprefix("socks5", proxy)) proxytype = CURLPROXY_SOCKS5; else if(checkprefix("socks4a", proxy)) proxytype = CURLPROXY_SOCKS4A; else if(checkprefix("socks4", proxy) || checkprefix("socks", proxy)) proxytype = CURLPROXY_SOCKS4; else if(checkprefix("http:", proxy)) ; /* leave it as HTTP or HTTP/1.0 */ else { /* Any other xxx:// reject! */ failf(data, "Unsupported proxy scheme for \'%s\'", proxy); return CURLE_COULDNT_CONNECT; } } else proxyptr = proxy; /* No xxx:// head: It's a HTTP proxy */ #ifdef USE_SSL if(!(Curl_ssl->supports & SSLSUPP_HTTPS_PROXY)) #endif if(proxytype == CURLPROXY_HTTPS) { failf(data, "Unsupported proxy \'%s\', libcurl is built without the " "HTTPS-proxy support.", proxy); return CURLE_NOT_BUILT_IN; } sockstype = proxytype == CURLPROXY_SOCKS5_HOSTNAME || proxytype == CURLPROXY_SOCKS5 || proxytype == CURLPROXY_SOCKS4A || proxytype == CURLPROXY_SOCKS4; /* Is there a username and password given in this proxy url? */ atsign = strchr(proxyptr, '@'); if(atsign) { CURLcode result = Curl_parse_login_details(proxyptr, atsign - proxyptr, &proxyuser, &proxypasswd, NULL); if(result) return result; proxyptr = atsign + 1; } /* start scanning for port number at this point */ portptr = proxyptr; /* detect and extract RFC6874-style IPv6-addresses */ if(*proxyptr == '[') { char *ptr = ++proxyptr; /* advance beyond the initial bracket */ while(*ptr && (ISXDIGIT(*ptr) || (*ptr == ':') || (*ptr == '.'))) ptr++; if(*ptr == '%') { /* There might be a zone identifier */ if(strncmp("%25", ptr, 3)) infof(data, "Please URL encode %% as %%25, see RFC 6874.\n"); ptr++; /* Allow unreserved characters as defined in RFC 3986 */ while(*ptr && (ISALPHA(*ptr) || ISXDIGIT(*ptr) || (*ptr == '-') || (*ptr == '.') || (*ptr == '_') || (*ptr == '~'))) ptr++; } if(*ptr == ']') /* yeps, it ended nicely with a bracket as well */ *ptr++ = 0; else infof(data, "Invalid IPv6 address format\n"); portptr = ptr; /* Note that if this didn't end with a bracket, we still advanced the * proxyptr first, but I can't see anything wrong with that as no host * name nor a numeric can legally start with a bracket. */ } /* Get port number off proxy.server.com:1080 */ prox_portno = strchr(portptr, ':'); if(prox_portno) { char *endp = NULL; *prox_portno = 0x0; /* cut off number from host name */ prox_portno ++; /* now set the local port number */ port = strtol(prox_portno, &endp, 10); if((endp && *endp && (*endp != '/') && (*endp != ' ')) || (port < 0) || (port > 65535)) { /* meant to detect for example invalid IPv6 numerical addresses without brackets: "2a00:fac0:a000::7:13". Accept a trailing slash only because we then allow "URL style" with the number followed by a slash, used in curl test cases already. Space is also an acceptable terminating symbol. */ infof(data, "No valid port number in proxy string (%s)\n", prox_portno); } else conn->port = port; } else { if(proxyptr[0]=='/') { /* If the first character in the proxy string is a slash, fail immediately. The following code will otherwise clear the string which will lead to code running as if no proxy was set! */ Curl_safefree(proxyuser); Curl_safefree(proxypasswd); return CURLE_COULDNT_RESOLVE_PROXY; } /* without a port number after the host name, some people seem to use a slash so we strip everything from the first slash */ atsign = strchr(proxyptr, '/'); if(atsign) *atsign = '\0'; /* cut off path part from host name */ if(data->set.proxyport) /* None given in the proxy string, then get the default one if it is given */ port = data->set.proxyport; else { if(proxytype == CURLPROXY_HTTPS) port = CURL_DEFAULT_HTTPS_PROXY_PORT; else port = CURL_DEFAULT_PROXY_PORT; } } if(*proxyptr) { struct proxy_info *proxyinfo = sockstype ? &conn->socks_proxy : &conn->http_proxy; proxyinfo->proxytype = proxytype; if(proxyuser) { /* found user and password, rip them out. note that we are unescaping them, as there is otherwise no way to have a username or password with reserved characters like ':' in them. */ Curl_safefree(proxyinfo->user); proxyinfo->user = curl_easy_unescape(data, proxyuser, 0, NULL); Curl_safefree(proxyuser); if(!proxyinfo->user) { Curl_safefree(proxypasswd); return CURLE_OUT_OF_MEMORY; } Curl_safefree(proxyinfo->passwd); if(proxypasswd && strlen(proxypasswd) < MAX_CURL_PASSWORD_LENGTH) proxyinfo->passwd = curl_easy_unescape(data, proxypasswd, 0, NULL); else proxyinfo->passwd = strdup(""); Curl_safefree(proxypasswd); if(!proxyinfo->passwd) return CURLE_OUT_OF_MEMORY; conn->bits.proxy_user_passwd = TRUE; /* enable it */ } if(port >= 0) { proxyinfo->port = port; if(conn->port < 0 || sockstype || !conn->socks_proxy.host.rawalloc) conn->port = port; } /* now, clone the cleaned proxy host name */ Curl_safefree(proxyinfo->host.rawalloc); proxyinfo->host.rawalloc = strdup(proxyptr); proxyinfo->host.name = proxyinfo->host.rawalloc; if(!proxyinfo->host.rawalloc) return CURLE_OUT_OF_MEMORY; } Curl_safefree(proxyuser); Curl_safefree(proxypasswd); return CURLE_OK; } /* * Extract the user and password from the authentication string */ static CURLcode parse_proxy_auth(struct Curl_easy *data, struct connectdata *conn) { char proxyuser[MAX_CURL_USER_LENGTH]=""; char proxypasswd[MAX_CURL_PASSWORD_LENGTH]=""; CURLcode result; if(data->set.str[STRING_PROXYUSERNAME] != NULL) { strncpy(proxyuser, data->set.str[STRING_PROXYUSERNAME], MAX_CURL_USER_LENGTH); proxyuser[MAX_CURL_USER_LENGTH-1] = '\0'; /*To be on safe side*/ } if(data->set.str[STRING_PROXYPASSWORD] != NULL) { strncpy(proxypasswd, data->set.str[STRING_PROXYPASSWORD], MAX_CURL_PASSWORD_LENGTH); proxypasswd[MAX_CURL_PASSWORD_LENGTH-1] = '\0'; /*To be on safe side*/ } result = Curl_urldecode(data, proxyuser, 0, &conn->http_proxy.user, NULL, FALSE); if(!result) result = Curl_urldecode(data, proxypasswd, 0, &conn->http_proxy.passwd, NULL, FALSE); return result; } /* create_conn helper to parse and init proxy values. to be called after unix socket init but before any proxy vars are evaluated. */ static CURLcode create_conn_helper_init_proxy(struct connectdata *conn) { char *proxy = NULL; char *socksproxy = NULL; char *no_proxy = NULL; CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; /************************************************************* * Extract the user and password from the authentication string *************************************************************/ if(conn->bits.proxy_user_passwd) { result = parse_proxy_auth(data, conn); if(result) goto out; } /************************************************************* * Detect what (if any) proxy to use *************************************************************/ if(data->set.str[STRING_PROXY]) { proxy = strdup(data->set.str[STRING_PROXY]); /* if global proxy is set, this is it */ if(NULL == proxy) { failf(data, "memory shortage"); result = CURLE_OUT_OF_MEMORY; goto out; } } if(data->set.str[STRING_PRE_PROXY]) { socksproxy = strdup(data->set.str[STRING_PRE_PROXY]); /* if global socks proxy is set, this is it */ if(NULL == socksproxy) { failf(data, "memory shortage"); result = CURLE_OUT_OF_MEMORY; goto out; } } if(!data->set.str[STRING_NOPROXY]) { const char *p = "no_proxy"; no_proxy = curl_getenv(p); if(!no_proxy) { p = "NO_PROXY"; no_proxy = curl_getenv(p); } if(no_proxy) { infof(conn->data, "Uses proxy env variable %s == '%s'\n", p, no_proxy); } } if(check_noproxy(conn->host.name, data->set.str[STRING_NOPROXY] ? data->set.str[STRING_NOPROXY] : no_proxy)) { Curl_safefree(proxy); Curl_safefree(socksproxy); } #ifndef CURL_DISABLE_HTTP else if(!proxy && !socksproxy) /* if the host is not in the noproxy list, detect proxy. */ proxy = detect_proxy(conn); #endif /* CURL_DISABLE_HTTP */ Curl_safefree(no_proxy); #ifdef USE_UNIX_SOCKETS /* For the time being do not mix proxy and unix domain sockets. See #1274 */ if(proxy && conn->unix_domain_socket) { free(proxy); proxy = NULL; } #endif if(proxy && (!*proxy || (conn->handler->flags & PROTOPT_NONETWORK))) { free(proxy); /* Don't bother with an empty proxy string or if the protocol doesn't work with network */ proxy = NULL; } if(socksproxy && (!*socksproxy || (conn->handler->flags & PROTOPT_NONETWORK))) { free(socksproxy); /* Don't bother with an empty socks proxy string or if the protocol doesn't work with network */ socksproxy = NULL; } /*********************************************************************** * If this is supposed to use a proxy, we need to figure out the proxy host * name, proxy type and port number, so that we can re-use an existing * connection that may exist registered to the same proxy host. ***********************************************************************/ if(proxy || socksproxy) { if(proxy) { result = parse_proxy(data, conn, proxy, conn->http_proxy.proxytype); Curl_safefree(proxy); /* parse_proxy copies the proxy string */ if(result) goto out; } if(socksproxy) { result = parse_proxy(data, conn, socksproxy, conn->socks_proxy.proxytype); /* parse_proxy copies the socks proxy string */ Curl_safefree(socksproxy); if(result) goto out; } if(conn->http_proxy.host.rawalloc) { #ifdef CURL_DISABLE_HTTP /* asking for a HTTP proxy is a bit funny when HTTP is disabled... */ result = CURLE_UNSUPPORTED_PROTOCOL; goto out; #else /* force this connection's protocol to become HTTP if compatible */ if(!(conn->handler->protocol & PROTO_FAMILY_HTTP)) { if((conn->handler->flags & PROTOPT_PROXY_AS_HTTP) && !conn->bits.tunnel_proxy) conn->handler = &Curl_handler_http; else /* if not converting to HTTP over the proxy, enforce tunneling */ conn->bits.tunnel_proxy = TRUE; } conn->bits.httpproxy = TRUE; #endif } else { conn->bits.httpproxy = FALSE; /* not a HTTP proxy */ conn->bits.tunnel_proxy = FALSE; /* no tunneling if not HTTP */ } if(conn->socks_proxy.host.rawalloc) { if(!conn->http_proxy.host.rawalloc) { /* once a socks proxy */ if(!conn->socks_proxy.user) { conn->socks_proxy.user = conn->http_proxy.user; conn->http_proxy.user = NULL; Curl_safefree(conn->socks_proxy.passwd); conn->socks_proxy.passwd = conn->http_proxy.passwd; conn->http_proxy.passwd = NULL; } } conn->bits.socksproxy = TRUE; } else conn->bits.socksproxy = FALSE; /* not a socks proxy */ } else { conn->bits.socksproxy = FALSE; conn->bits.httpproxy = FALSE; } conn->bits.proxy = conn->bits.httpproxy || conn->bits.socksproxy; if(!conn->bits.proxy) { /* we aren't using the proxy after all... */ conn->bits.proxy = FALSE; conn->bits.httpproxy = FALSE; conn->bits.socksproxy = FALSE; conn->bits.proxy_user_passwd = FALSE; conn->bits.tunnel_proxy = FALSE; } out: free(socksproxy); free(proxy); return result; } #endif /* CURL_DISABLE_PROXY */ /* * Curl_parse_login_details() * * This is used to parse a login string for user name, password and options in * the following formats: * * user * user:password * user:password;options * user;options * user;options:password * :password * :password;options * ;options * ;options:password * * Parameters: * * login [in] - The login string. * len [in] - The length of the login string. * userp [in/out] - The address where a pointer to newly allocated memory * holding the user will be stored upon completion. * passwdp [in/out] - The address where a pointer to newly allocated memory * holding the password will be stored upon completion. * optionsp [in/out] - The address where a pointer to newly allocated memory * holding the options will be stored upon completion. * * Returns CURLE_OK on success. */ CURLcode Curl_parse_login_details(const char *login, const size_t len, char **userp, char **passwdp, char **optionsp) { CURLcode result = CURLE_OK; char *ubuf = NULL; char *pbuf = NULL; char *obuf = NULL; const char *psep = NULL; const char *osep = NULL; size_t ulen; size_t plen; size_t olen; /* Attempt to find the password separator */ if(passwdp) { psep = strchr(login, ':'); /* Within the constraint of the login string */ if(psep >= login + len) psep = NULL; } /* Attempt to find the options separator */ if(optionsp) { osep = strchr(login, ';'); /* Within the constraint of the login string */ if(osep >= login + len) osep = NULL; } /* Calculate the portion lengths */ ulen = (psep ? (size_t)(osep && psep > osep ? osep - login : psep - login) : (osep ? (size_t)(osep - login) : len)); plen = (psep ? (osep && osep > psep ? (size_t)(osep - psep) : (size_t)(login + len - psep)) - 1 : 0); olen = (osep ? (psep && psep > osep ? (size_t)(psep - osep) : (size_t)(login + len - osep)) - 1 : 0); /* Allocate the user portion buffer */ if(userp && ulen) { ubuf = malloc(ulen + 1); if(!ubuf) result = CURLE_OUT_OF_MEMORY; } /* Allocate the password portion buffer */ if(!result && passwdp && plen) { pbuf = malloc(plen + 1); if(!pbuf) { free(ubuf); result = CURLE_OUT_OF_MEMORY; } } /* Allocate the options portion buffer */ if(!result && optionsp && olen) { obuf = malloc(olen + 1); if(!obuf) { free(pbuf); free(ubuf); result = CURLE_OUT_OF_MEMORY; } } if(!result) { /* Store the user portion if necessary */ if(ubuf) { memcpy(ubuf, login, ulen); ubuf[ulen] = '\0'; Curl_safefree(*userp); *userp = ubuf; } /* Store the password portion if necessary */ if(pbuf) { memcpy(pbuf, psep + 1, plen); pbuf[plen] = '\0'; Curl_safefree(*passwdp); *passwdp = pbuf; } /* Store the options portion if necessary */ if(obuf) { memcpy(obuf, osep + 1, olen); obuf[olen] = '\0'; Curl_safefree(*optionsp); *optionsp = obuf; } } return result; } /************************************************************* * Figure out the remote port number and fix it in the URL * * No matter if we use a proxy or not, we have to figure out the remote * port number of various reasons. * * The port number embedded in the URL is replaced, if necessary. *************************************************************/ static CURLcode parse_remote_port(struct Curl_easy *data, struct connectdata *conn) { if(data->set.use_port && data->state.allow_port) { /* if set, we use this instead of the port possibly given in the URL */ char portbuf[16]; CURLUcode uc; conn->remote_port = (unsigned short)data->set.use_port; snprintf(portbuf, sizeof(portbuf), "%u", conn->remote_port); uc = curl_url_set(data->state.uh, CURLUPART_PORT, portbuf, 0); if(uc) return CURLE_OUT_OF_MEMORY; } return CURLE_OK; } /* * Override the login details from the URL with that in the CURLOPT_USERPWD * option or a .netrc file, if applicable. */ static CURLcode override_login(struct Curl_easy *data, struct connectdata *conn, char **userp, char **passwdp, char **optionsp) { bool user_changed = FALSE; bool passwd_changed = FALSE; CURLUcode uc; if(data->set.str[STRING_USERNAME]) { free(*userp); *userp = strdup(data->set.str[STRING_USERNAME]); if(!*userp) return CURLE_OUT_OF_MEMORY; conn->bits.user_passwd = TRUE; /* enable user+password */ user_changed = TRUE; } if(data->set.str[STRING_PASSWORD]) { free(*passwdp); *passwdp = strdup(data->set.str[STRING_PASSWORD]); if(!*passwdp) return CURLE_OUT_OF_MEMORY; conn->bits.user_passwd = TRUE; /* enable user+password */ passwd_changed = TRUE; } if(data->set.str[STRING_OPTIONS]) { free(*optionsp); *optionsp = strdup(data->set.str[STRING_OPTIONS]); if(!*optionsp) return CURLE_OUT_OF_MEMORY; } conn->bits.netrc = FALSE; if(data->set.use_netrc != CURL_NETRC_IGNORED) { char *nuser = NULL; char *npasswd = NULL; int ret; if(data->set.use_netrc == CURL_NETRC_OPTIONAL) nuser = *userp; /* to separate otherwise identical machines */ ret = Curl_parsenetrc(conn->host.name, &nuser, &npasswd, data->set.str[STRING_NETRC_FILE]); if(ret > 0) { infof(data, "Couldn't find host %s in the " DOT_CHAR "netrc file; using defaults\n", conn->host.name); } else if(ret < 0) { return CURLE_OUT_OF_MEMORY; } else { /* set bits.netrc TRUE to remember that we got the name from a .netrc file, so that it is safe to use even if we followed a Location: to a different host or similar. */ conn->bits.netrc = TRUE; conn->bits.user_passwd = TRUE; /* enable user+password */ if(data->set.use_netrc == CURL_NETRC_OPTIONAL) { /* prefer credentials outside netrc */ if(nuser && !*userp) { free(*userp); *userp = nuser; user_changed = TRUE; } if(npasswd && !*passwdp) { free(*passwdp); *passwdp = npasswd; passwd_changed = TRUE; } } else { /* prefer netrc credentials */ if(nuser) { free(*userp); *userp = nuser; user_changed = TRUE; } if(npasswd) { free(*passwdp); *passwdp = npasswd; passwd_changed = TRUE; } } } } /* for updated strings, we update them in the URL */ if(user_changed) { uc = curl_url_set(data->state.uh, CURLUPART_USER, *userp, 0); if(uc) return Curl_uc_to_curlcode(uc); } if(passwd_changed) { uc = curl_url_set(data->state.uh, CURLUPART_PASSWORD, *passwdp, 0); if(uc) return Curl_uc_to_curlcode(uc); } return CURLE_OK; } /* * Set the login details so they're available in the connection */ static CURLcode set_login(struct connectdata *conn) { CURLcode result = CURLE_OK; const char *setuser = CURL_DEFAULT_USER; const char *setpasswd = CURL_DEFAULT_PASSWORD; /* If our protocol needs a password and we have none, use the defaults */ if((conn->handler->flags & PROTOPT_NEEDSPWD) && !conn->bits.user_passwd) ; else { setuser = ""; setpasswd = ""; } /* Store the default user */ if(!conn->user) { conn->user = strdup(setuser); if(!conn->user) return CURLE_OUT_OF_MEMORY; } /* Store the default password */ if(!conn->passwd) { conn->passwd = strdup(setpasswd); if(!conn->passwd) result = CURLE_OUT_OF_MEMORY; } /* if there's a user without password, consider password blank */ if(conn->user && !conn->passwd) { conn->passwd = strdup(""); if(!conn->passwd) result = CURLE_OUT_OF_MEMORY; } return result; } /* * Parses a "host:port" string to connect to. * The hostname and the port may be empty; in this case, NULL is returned for * the hostname and -1 for the port. */ static CURLcode parse_connect_to_host_port(struct Curl_easy *data, const char *host, char **hostname_result, int *port_result) { char *host_dup; char *hostptr; char *host_portno; char *portptr; int port = -1; #if defined(CURL_DISABLE_VERBOSE_STRINGS) (void) data; #endif *hostname_result = NULL; *port_result = -1; if(!host || !*host) return CURLE_OK; host_dup = strdup(host); if(!host_dup) return CURLE_OUT_OF_MEMORY; hostptr = host_dup; /* start scanning for port number at this point */ portptr = hostptr; /* detect and extract RFC6874-style IPv6-addresses */ if(*hostptr == '[') { #ifdef ENABLE_IPV6 char *ptr = ++hostptr; /* advance beyond the initial bracket */ while(*ptr && (ISXDIGIT(*ptr) || (*ptr == ':') || (*ptr == '.'))) ptr++; if(*ptr == '%') { /* There might be a zone identifier */ if(strncmp("%25", ptr, 3)) infof(data, "Please URL encode %% as %%25, see RFC 6874.\n"); ptr++; /* Allow unreserved characters as defined in RFC 3986 */ while(*ptr && (ISALPHA(*ptr) || ISXDIGIT(*ptr) || (*ptr == '-') || (*ptr == '.') || (*ptr == '_') || (*ptr == '~'))) ptr++; } if(*ptr == ']') /* yeps, it ended nicely with a bracket as well */ *ptr++ = '\0'; else infof(data, "Invalid IPv6 address format\n"); portptr = ptr; /* Note that if this didn't end with a bracket, we still advanced the * hostptr first, but I can't see anything wrong with that as no host * name nor a numeric can legally start with a bracket. */ #else failf(data, "Use of IPv6 in *_CONNECT_TO without IPv6 support built-in!"); free(host_dup); return CURLE_NOT_BUILT_IN; #endif } /* Get port number off server.com:1080 */ host_portno = strchr(portptr, ':'); if(host_portno) { char *endp = NULL; *host_portno = '\0'; /* cut off number from host name */ host_portno++; if(*host_portno) { long portparse = strtol(host_portno, &endp, 10); if((endp && *endp) || (portparse < 0) || (portparse > 65535)) { infof(data, "No valid port number in connect to host string (%s)\n", host_portno); hostptr = NULL; port = -1; } else port = (int)portparse; /* we know it will fit */ } } /* now, clone the cleaned host name */ if(hostptr) { *hostname_result = strdup(hostptr); if(!*hostname_result) { free(host_dup); return CURLE_OUT_OF_MEMORY; } } *port_result = port; free(host_dup); return CURLE_OK; } /* * Parses one "connect to" string in the form: * "HOST:PORT:CONNECT-TO-HOST:CONNECT-TO-PORT". */ static CURLcode parse_connect_to_string(struct Curl_easy *data, struct connectdata *conn, const char *conn_to_host, char **host_result, int *port_result) { CURLcode result = CURLE_OK; const char *ptr = conn_to_host; int host_match = FALSE; int port_match = FALSE; *host_result = NULL; *port_result = -1; if(*ptr == ':') { /* an empty hostname always matches */ host_match = TRUE; ptr++; } else { /* check whether the URL's hostname matches */ size_t hostname_to_match_len; char *hostname_to_match = aprintf("%s%s%s", conn->bits.ipv6_ip ? "[" : "", conn->host.name, conn->bits.ipv6_ip ? "]" : ""); if(!hostname_to_match) return CURLE_OUT_OF_MEMORY; hostname_to_match_len = strlen(hostname_to_match); host_match = strncasecompare(ptr, hostname_to_match, hostname_to_match_len); free(hostname_to_match); ptr += hostname_to_match_len; host_match = host_match && *ptr == ':'; ptr++; } if(host_match) { if(*ptr == ':') { /* an empty port always matches */ port_match = TRUE; ptr++; } else { /* check whether the URL's port matches */ char *ptr_next = strchr(ptr, ':'); if(ptr_next) { char *endp = NULL; long port_to_match = strtol(ptr, &endp, 10); if((endp == ptr_next) && (port_to_match == conn->remote_port)) { port_match = TRUE; ptr = ptr_next + 1; } } } } if(host_match && port_match) { /* parse the hostname and port to connect to */ result = parse_connect_to_host_port(data, ptr, host_result, port_result); } return result; } /* * Processes all strings in the "connect to" slist, and uses the "connect * to host" and "connect to port" of the first string that matches. */ static CURLcode parse_connect_to_slist(struct Curl_easy *data, struct connectdata *conn, struct curl_slist *conn_to_host) { CURLcode result = CURLE_OK; char *host = NULL; int port = -1; while(conn_to_host && !host && port == -1) { result = parse_connect_to_string(data, conn, conn_to_host->data, &host, &port); if(result) return result; if(host && *host) { conn->conn_to_host.rawalloc = host; conn->conn_to_host.name = host; conn->bits.conn_to_host = TRUE; infof(data, "Connecting to hostname: %s\n", host); } else { /* no "connect to host" */ conn->bits.conn_to_host = FALSE; Curl_safefree(host); } if(port >= 0) { conn->conn_to_port = port; conn->bits.conn_to_port = TRUE; infof(data, "Connecting to port: %d\n", port); } else { /* no "connect to port" */ conn->bits.conn_to_port = FALSE; port = -1; } conn_to_host = conn_to_host->next; } return result; } /************************************************************* * Resolve the address of the server or proxy *************************************************************/ static CURLcode resolve_server(struct Curl_easy *data, struct connectdata *conn, bool *async) { CURLcode result = CURLE_OK; timediff_t timeout_ms = Curl_timeleft(data, NULL, TRUE); /************************************************************* * Resolve the name of the server or proxy *************************************************************/ if(conn->bits.reuse) /* We're reusing the connection - no need to resolve anything, and fix_hostname() was called already in create_conn() for the re-use case. */ *async = FALSE; else { /* this is a fresh connect */ int rc; struct Curl_dns_entry *hostaddr; #ifdef USE_UNIX_SOCKETS if(conn->unix_domain_socket) { /* Unix domain sockets are local. The host gets ignored, just use the * specified domain socket address. Do not cache "DNS entries". There is * no DNS involved and we already have the filesystem path available */ const char *path = conn->unix_domain_socket; hostaddr = calloc(1, sizeof(struct Curl_dns_entry)); if(!hostaddr) result = CURLE_OUT_OF_MEMORY; else { bool longpath = FALSE; hostaddr->addr = Curl_unix2addr(path, &longpath, conn->abstract_unix_socket); if(hostaddr->addr) hostaddr->inuse++; else { /* Long paths are not supported for now */ if(longpath) { failf(data, "Unix socket path too long: '%s'", path); result = CURLE_COULDNT_RESOLVE_HOST; } else result = CURLE_OUT_OF_MEMORY; free(hostaddr); hostaddr = NULL; } } } else #endif if(!conn->bits.proxy) { struct hostname *connhost; if(conn->bits.conn_to_host) connhost = &conn->conn_to_host; else connhost = &conn->host; /* If not connecting via a proxy, extract the port from the URL, if it is * there, thus overriding any defaults that might have been set above. */ if(conn->bits.conn_to_port) conn->port = conn->conn_to_port; else conn->port = conn->remote_port; /* Resolve target host right on */ rc = Curl_resolv_timeout(conn, connhost->name, (int)conn->port, &hostaddr, timeout_ms); if(rc == CURLRESOLV_PENDING) *async = TRUE; else if(rc == CURLRESOLV_TIMEDOUT) result = CURLE_OPERATION_TIMEDOUT; else if(!hostaddr) { failf(data, "Couldn't resolve host '%s'", connhost->dispname); result = CURLE_COULDNT_RESOLVE_HOST; /* don't return yet, we need to clean up the timeout first */ } } else { /* This is a proxy that hasn't been resolved yet. */ struct hostname * const host = conn->bits.socksproxy ? &conn->socks_proxy.host : &conn->http_proxy.host; /* resolve proxy */ rc = Curl_resolv_timeout(conn, host->name, (int)conn->port, &hostaddr, timeout_ms); if(rc == CURLRESOLV_PENDING) *async = TRUE; else if(rc == CURLRESOLV_TIMEDOUT) result = CURLE_OPERATION_TIMEDOUT; else if(!hostaddr) { failf(data, "Couldn't resolve proxy '%s'", host->dispname); result = CURLE_COULDNT_RESOLVE_PROXY; /* don't return yet, we need to clean up the timeout first */ } } DEBUGASSERT(conn->dns_entry == NULL); conn->dns_entry = hostaddr; } return result; } /* * Cleanup the connection just allocated before we can move along and use the * previously existing one. All relevant data is copied over and old_conn is * ready for freeing once this function returns. */ static void reuse_conn(struct connectdata *old_conn, struct connectdata *conn) { free_fixed_hostname(&old_conn->http_proxy.host); free_fixed_hostname(&old_conn->socks_proxy.host); free(old_conn->http_proxy.host.rawalloc); free(old_conn->socks_proxy.host.rawalloc); /* free the SSL config struct from this connection struct as this was allocated in vain and is targeted for destruction */ Curl_free_primary_ssl_config(&old_conn->ssl_config); Curl_free_primary_ssl_config(&old_conn->proxy_ssl_config); conn->data = old_conn->data; /* get the user+password information from the old_conn struct since it may * be new for this request even when we re-use an existing connection */ conn->bits.user_passwd = old_conn->bits.user_passwd; if(conn->bits.user_passwd) { /* use the new user name and password though */ Curl_safefree(conn->user); Curl_safefree(conn->passwd); conn->user = old_conn->user; conn->passwd = old_conn->passwd; old_conn->user = NULL; old_conn->passwd = NULL; } conn->bits.proxy_user_passwd = old_conn->bits.proxy_user_passwd; if(conn->bits.proxy_user_passwd) { /* use the new proxy user name and proxy password though */ Curl_safefree(conn->http_proxy.user); Curl_safefree(conn->socks_proxy.user); Curl_safefree(conn->http_proxy.passwd); Curl_safefree(conn->socks_proxy.passwd); conn->http_proxy.user = old_conn->http_proxy.user; conn->socks_proxy.user = old_conn->socks_proxy.user; conn->http_proxy.passwd = old_conn->http_proxy.passwd; conn->socks_proxy.passwd = old_conn->socks_proxy.passwd; old_conn->http_proxy.user = NULL; old_conn->socks_proxy.user = NULL; old_conn->http_proxy.passwd = NULL; old_conn->socks_proxy.passwd = NULL; } /* host can change, when doing keepalive with a proxy or if the case is different this time etc */ free_fixed_hostname(&conn->host); free_fixed_hostname(&conn->conn_to_host); Curl_safefree(conn->host.rawalloc); Curl_safefree(conn->conn_to_host.rawalloc); conn->host = old_conn->host; conn->conn_to_host = old_conn->conn_to_host; conn->conn_to_port = old_conn->conn_to_port; conn->remote_port = old_conn->remote_port; /* persist connection info in session handle */ Curl_persistconninfo(conn); conn_reset_all_postponed_data(old_conn); /* free buffers */ /* re-use init */ conn->bits.reuse = TRUE; /* yes, we're re-using here */ Curl_safefree(old_conn->user); Curl_safefree(old_conn->passwd); Curl_safefree(old_conn->options); Curl_safefree(old_conn->http_proxy.user); Curl_safefree(old_conn->socks_proxy.user); Curl_safefree(old_conn->http_proxy.passwd); Curl_safefree(old_conn->socks_proxy.passwd); Curl_safefree(old_conn->localdev); Curl_llist_destroy(&old_conn->send_pipe, NULL); Curl_llist_destroy(&old_conn->recv_pipe, NULL); Curl_safefree(old_conn->master_buffer); #ifdef USE_UNIX_SOCKETS Curl_safefree(old_conn->unix_domain_socket); #endif } /** * create_conn() sets up a new connectdata struct, or re-uses an already * existing one, and resolves host name. * * if this function returns CURLE_OK and *async is set to TRUE, the resolve * response will be coming asynchronously. If *async is FALSE, the name is * already resolved. * * @param data The sessionhandle pointer * @param in_connect is set to the next connection data pointer * @param async is set TRUE when an async DNS resolution is pending * @see Curl_setup_conn() * * *NOTE* this function assigns the conn->data pointer! */ static CURLcode create_conn(struct Curl_easy *data, struct connectdata **in_connect, bool *async) { CURLcode result = CURLE_OK; struct connectdata *conn; struct connectdata *conn_temp = NULL; bool reuse; bool connections_available = TRUE; bool force_reuse = FALSE; bool waitpipe = FALSE; size_t max_host_connections = Curl_multi_max_host_connections(data->multi); size_t max_total_connections = Curl_multi_max_total_connections(data->multi); *async = FALSE; /************************************************************* * Check input data *************************************************************/ if(!data->change.url) { result = CURLE_URL_MALFORMAT; goto out; } /* First, split up the current URL in parts so that we can use the parts for checking against the already present connections. In order to not have to modify everything at once, we allocate a temporary connection data struct and fill in for comparison purposes. */ conn = allocate_conn(data); if(!conn) { result = CURLE_OUT_OF_MEMORY; goto out; } /* We must set the return variable as soon as possible, so that our parent can cleanup any possible allocs we may have done before any failure */ *in_connect = conn; result = parseurlandfillconn(data, conn); if(result) goto out; if(data->set.str[STRING_BEARER]) { conn->oauth_bearer = strdup(data->set.str[STRING_BEARER]); if(!conn->oauth_bearer) { result = CURLE_OUT_OF_MEMORY; goto out; } } #ifdef USE_UNIX_SOCKETS if(data->set.str[STRING_UNIX_SOCKET_PATH]) { conn->unix_domain_socket = strdup(data->set.str[STRING_UNIX_SOCKET_PATH]); if(conn->unix_domain_socket == NULL) { result = CURLE_OUT_OF_MEMORY; goto out; } conn->abstract_unix_socket = data->set.abstract_unix_socket; } #endif /* After the unix socket init but before the proxy vars are used, parse and initialize the proxy vars */ #ifndef CURL_DISABLE_PROXY result = create_conn_helper_init_proxy(conn); if(result) goto out; #endif /************************************************************* * If the protocol is using SSL and HTTP proxy is used, we set * the tunnel_proxy bit. *************************************************************/ if((conn->given->flags&PROTOPT_SSL) && conn->bits.httpproxy) conn->bits.tunnel_proxy = TRUE; /************************************************************* * Figure out the remote port number and fix it in the URL *************************************************************/ result = parse_remote_port(data, conn); if(result) goto out; /* Check for overridden login details and set them accordingly so they they are known when protocol->setup_connection is called! */ result = override_login(data, conn, &conn->user, &conn->passwd, &conn->options); if(result) goto out; result = set_login(conn); /* default credentials */ if(result) goto out; /************************************************************* * Process the "connect to" linked list of hostname/port mappings. * Do this after the remote port number has been fixed in the URL. *************************************************************/ result = parse_connect_to_slist(data, conn, data->set.connect_to); if(result) goto out; /************************************************************* * IDN-fix the hostnames *************************************************************/ result = fix_hostname(conn, &conn->host); if(result) goto out; if(conn->bits.conn_to_host) { result = fix_hostname(conn, &conn->conn_to_host); if(result) goto out; } if(conn->bits.httpproxy) { result = fix_hostname(conn, &conn->http_proxy.host); if(result) goto out; } if(conn->bits.socksproxy) { result = fix_hostname(conn, &conn->socks_proxy.host); if(result) goto out; } /************************************************************* * Check whether the host and the "connect to host" are equal. * Do this after the hostnames have been IDN-fixed. *************************************************************/ if(conn->bits.conn_to_host && strcasecompare(conn->conn_to_host.name, conn->host.name)) { conn->bits.conn_to_host = FALSE; } /************************************************************* * Check whether the port and the "connect to port" are equal. * Do this after the remote port number has been fixed in the URL. *************************************************************/ if(conn->bits.conn_to_port && conn->conn_to_port == conn->remote_port) { conn->bits.conn_to_port = FALSE; } /************************************************************* * If the "connect to" feature is used with an HTTP proxy, * we set the tunnel_proxy bit. *************************************************************/ if((conn->bits.conn_to_host || conn->bits.conn_to_port) && conn->bits.httpproxy) conn->bits.tunnel_proxy = TRUE; /************************************************************* * Setup internals depending on protocol. Needs to be done after * we figured out what/if proxy to use. *************************************************************/ result = setup_connection_internals(conn); if(result) goto out; conn->recv[FIRSTSOCKET] = Curl_recv_plain; conn->send[FIRSTSOCKET] = Curl_send_plain; conn->recv[SECONDARYSOCKET] = Curl_recv_plain; conn->send[SECONDARYSOCKET] = Curl_send_plain; conn->bits.tcp_fastopen = data->set.tcp_fastopen; /*********************************************************************** * file: is a special case in that it doesn't need a network connection ***********************************************************************/ #ifndef CURL_DISABLE_FILE if(conn->handler->flags & PROTOPT_NONETWORK) { bool done; /* this is supposed to be the connect function so we better at least check that the file is present here! */ DEBUGASSERT(conn->handler->connect_it); Curl_persistconninfo(conn); result = conn->handler->connect_it(conn, &done); /* Setup a "faked" transfer that'll do nothing */ if(!result) { conn->data = data; conn->bits.tcpconnect[FIRSTSOCKET] = TRUE; /* we are "connected */ result = Curl_conncache_add_conn(data->state.conn_cache, conn); if(result) goto out; /* * Setup whatever necessary for a resumed transfer */ result = setup_range(data); if(result) { DEBUGASSERT(conn->handler->done); /* we ignore the return code for the protocol-specific DONE */ (void)conn->handler->done(conn, result, FALSE); goto out; } Curl_setup_transfer(conn, -1, -1, FALSE, NULL, /* no download */ -1, NULL); /* no upload */ } /* since we skip do_init() */ Curl_init_do(data, conn); goto out; } #endif /* Get a cloned copy of the SSL config situation stored in the connection struct. But to get this going nicely, we must first make sure that the strings in the master copy are pointing to the correct strings in the session handle strings array! Keep in mind that the pointers in the master copy are pointing to strings that will be freed as part of the Curl_easy struct, but all cloned copies will be separately allocated. */ data->set.ssl.primary.CApath = data->set.str[STRING_SSL_CAPATH_ORIG]; data->set.proxy_ssl.primary.CApath = data->set.str[STRING_SSL_CAPATH_PROXY]; data->set.ssl.primary.CAfile = data->set.str[STRING_SSL_CAFILE_ORIG]; data->set.proxy_ssl.primary.CAfile = data->set.str[STRING_SSL_CAFILE_PROXY]; data->set.ssl.primary.random_file = data->set.str[STRING_SSL_RANDOM_FILE]; data->set.proxy_ssl.primary.random_file = data->set.str[STRING_SSL_RANDOM_FILE]; data->set.ssl.primary.egdsocket = data->set.str[STRING_SSL_EGDSOCKET]; data->set.proxy_ssl.primary.egdsocket = data->set.str[STRING_SSL_EGDSOCKET]; data->set.ssl.primary.cipher_list = data->set.str[STRING_SSL_CIPHER_LIST_ORIG]; data->set.proxy_ssl.primary.cipher_list = data->set.str[STRING_SSL_CIPHER_LIST_PROXY]; data->set.ssl.primary.cipher_list13 = data->set.str[STRING_SSL_CIPHER13_LIST_ORIG]; data->set.proxy_ssl.primary.cipher_list13 = data->set.str[STRING_SSL_CIPHER13_LIST_PROXY]; data->set.ssl.CRLfile = data->set.str[STRING_SSL_CRLFILE_ORIG]; data->set.proxy_ssl.CRLfile = data->set.str[STRING_SSL_CRLFILE_PROXY]; data->set.ssl.issuercert = data->set.str[STRING_SSL_ISSUERCERT_ORIG]; data->set.proxy_ssl.issuercert = data->set.str[STRING_SSL_ISSUERCERT_PROXY]; data->set.ssl.cert = data->set.str[STRING_CERT_ORIG]; data->set.proxy_ssl.cert = data->set.str[STRING_CERT_PROXY]; data->set.ssl.cert_type = data->set.str[STRING_CERT_TYPE_ORIG]; data->set.proxy_ssl.cert_type = data->set.str[STRING_CERT_TYPE_PROXY]; data->set.ssl.key = data->set.str[STRING_KEY_ORIG]; data->set.proxy_ssl.key = data->set.str[STRING_KEY_PROXY]; data->set.ssl.key_type = data->set.str[STRING_KEY_TYPE_ORIG]; data->set.proxy_ssl.key_type = data->set.str[STRING_KEY_TYPE_PROXY]; data->set.ssl.key_passwd = data->set.str[STRING_KEY_PASSWD_ORIG]; data->set.proxy_ssl.key_passwd = data->set.str[STRING_KEY_PASSWD_PROXY]; data->set.ssl.primary.clientcert = data->set.str[STRING_CERT_ORIG]; data->set.proxy_ssl.primary.clientcert = data->set.str[STRING_CERT_PROXY]; #ifdef USE_TLS_SRP data->set.ssl.username = data->set.str[STRING_TLSAUTH_USERNAME_ORIG]; data->set.proxy_ssl.username = data->set.str[STRING_TLSAUTH_USERNAME_PROXY]; data->set.ssl.password = data->set.str[STRING_TLSAUTH_PASSWORD_ORIG]; data->set.proxy_ssl.password = data->set.str[STRING_TLSAUTH_PASSWORD_PROXY]; #endif if(!Curl_clone_primary_ssl_config(&data->set.ssl.primary, &conn->ssl_config)) { result = CURLE_OUT_OF_MEMORY; goto out; } if(!Curl_clone_primary_ssl_config(&data->set.proxy_ssl.primary, &conn->proxy_ssl_config)) { result = CURLE_OUT_OF_MEMORY; goto out; } prune_dead_connections(data); /************************************************************* * Check the current list of connections to see if we can * re-use an already existing one or if we have to create a * new one. *************************************************************/ DEBUGASSERT(conn->user); DEBUGASSERT(conn->passwd); /* reuse_fresh is TRUE if we are told to use a new connection by force, but we only acknowledge this option if this is not a re-used connection already (which happens due to follow-location or during a HTTP authentication phase). */ if(data->set.reuse_fresh && !data->state.this_is_a_follow) reuse = FALSE; else reuse = ConnectionExists(data, conn, &conn_temp, &force_reuse, &waitpipe); /* If we found a reusable connection that is now marked as in use, we may still want to open a new connection if we are pipelining. */ if(reuse && !force_reuse && IsPipeliningPossible(data, conn_temp)) { size_t pipelen = conn_temp->send_pipe.size + conn_temp->recv_pipe.size; if(pipelen > 0) { infof(data, "Found connection %ld, with requests in the pipe (%zu)\n", conn_temp->connection_id, pipelen); if(Curl_conncache_bundle_size(conn_temp) < max_host_connections && Curl_conncache_size(data) < max_total_connections) { /* We want a new connection anyway */ reuse = FALSE; infof(data, "We can reuse, but we want a new connection anyway\n"); Curl_conncache_return_conn(conn_temp); } } } if(reuse) { /* * We already have a connection for this, we got the former connection * in the conn_temp variable and thus we need to cleanup the one we * just allocated before we can move along and use the previously * existing one. */ reuse_conn(conn, conn_temp); #ifdef USE_SSL free(conn->ssl_extra); #endif free(conn); /* we don't need this anymore */ conn = conn_temp; *in_connect = conn; infof(data, "Re-using existing connection! (#%ld) with %s %s\n", conn->connection_id, conn->bits.proxy?"proxy":"host", conn->socks_proxy.host.name ? conn->socks_proxy.host.dispname : conn->http_proxy.host.name ? conn->http_proxy.host.dispname : conn->host.dispname); } else { /* We have decided that we want a new connection. However, we may not be able to do that if we have reached the limit of how many connections we are allowed to open. */ if(conn->handler->flags & PROTOPT_ALPN_NPN) { /* The protocol wants it, so set the bits if enabled in the easy handle (default) */ if(data->set.ssl_enable_alpn) conn->bits.tls_enable_alpn = TRUE; if(data->set.ssl_enable_npn) conn->bits.tls_enable_npn = TRUE; } if(waitpipe) /* There is a connection that *might* become usable for pipelining "soon", and we wait for that */ connections_available = FALSE; else { /* this gets a lock on the conncache */ struct connectbundle *bundle = Curl_conncache_find_bundle(conn, data->state.conn_cache); if(max_host_connections > 0 && bundle && (bundle->num_connections >= max_host_connections)) { struct connectdata *conn_candidate; /* The bundle is full. Extract the oldest connection. */ conn_candidate = Curl_conncache_extract_bundle(data, bundle); Curl_conncache_unlock(conn); if(conn_candidate) (void)Curl_disconnect(data, conn_candidate, /* dead_connection */ FALSE); else { infof(data, "No more connections allowed to host: %zu\n", max_host_connections); connections_available = FALSE; } } else Curl_conncache_unlock(conn); } if(connections_available && (max_total_connections > 0) && (Curl_conncache_size(data) >= max_total_connections)) { struct connectdata *conn_candidate; /* The cache is full. Let's see if we can kill a connection. */ conn_candidate = Curl_conncache_extract_oldest(data); if(conn_candidate) (void)Curl_disconnect(data, conn_candidate, /* dead_connection */ FALSE); else { infof(data, "No connections available in cache\n"); connections_available = FALSE; } } if(!connections_available) { infof(data, "No connections available.\n"); conn_free(conn); *in_connect = NULL; result = CURLE_NO_CONNECTION_AVAILABLE; goto out; } else { /* * This is a brand new connection, so let's store it in the connection * cache of ours! */ result = Curl_conncache_add_conn(data->state.conn_cache, conn); if(result) goto out; } #if defined(USE_NTLM) /* If NTLM is requested in a part of this connection, make sure we don't assume the state is fine as this is a fresh connection and NTLM is connection based. */ if((data->state.authhost.picked & (CURLAUTH_NTLM | CURLAUTH_NTLM_WB)) && data->state.authhost.done) { infof(data, "NTLM picked AND auth done set, clear picked!\n"); data->state.authhost.picked = CURLAUTH_NONE; data->state.authhost.done = FALSE; } if((data->state.authproxy.picked & (CURLAUTH_NTLM | CURLAUTH_NTLM_WB)) && data->state.authproxy.done) { infof(data, "NTLM-proxy picked AND auth done set, clear picked!\n"); data->state.authproxy.picked = CURLAUTH_NONE; data->state.authproxy.done = FALSE; } #endif } /* Setup and init stuff before DO starts, in preparing for the transfer. */ Curl_init_do(data, conn); /* * Setup whatever necessary for a resumed transfer */ result = setup_range(data); if(result) goto out; /* Continue connectdata initialization here. */ /* * Inherit the proper values from the urldata struct AFTER we have arranged * the persistent connection stuff */ conn->seek_func = data->set.seek_func; conn->seek_client = data->set.seek_client; /************************************************************* * Resolve the address of the server or proxy *************************************************************/ result = resolve_server(data, conn, async); out: return result; } /* Curl_setup_conn() is called after the name resolve initiated in * create_conn() is all done. * * Curl_setup_conn() also handles reused connections * * conn->data MUST already have been setup fine (in create_conn) */ CURLcode Curl_setup_conn(struct connectdata *conn, bool *protocol_done) { CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; Curl_pgrsTime(data, TIMER_NAMELOOKUP); if(conn->handler->flags & PROTOPT_NONETWORK) { /* nothing to setup when not using a network */ *protocol_done = TRUE; return result; } *protocol_done = FALSE; /* default to not done */ /* set proxy_connect_closed to false unconditionally already here since it is used strictly to provide extra information to a parent function in the case of proxy CONNECT failures and we must make sure we don't have it lingering set from a previous invoke */ conn->bits.proxy_connect_closed = FALSE; /* * Set user-agent. Used for HTTP, but since we can attempt to tunnel * basically anything through a http proxy we can't limit this based on * protocol. */ if(data->set.str[STRING_USERAGENT]) { Curl_safefree(conn->allocptr.uagent); conn->allocptr.uagent = aprintf("User-Agent: %s\r\n", data->set.str[STRING_USERAGENT]); if(!conn->allocptr.uagent) return CURLE_OUT_OF_MEMORY; } data->req.headerbytecount = 0; #ifdef CURL_DO_LINEEND_CONV data->state.crlf_conversions = 0; /* reset CRLF conversion counter */ #endif /* CURL_DO_LINEEND_CONV */ /* set start time here for timeout purposes in the connect procedure, it is later set again for the progress meter purpose */ conn->now = Curl_now(); if(CURL_SOCKET_BAD == conn->sock[FIRSTSOCKET]) { conn->bits.tcpconnect[FIRSTSOCKET] = FALSE; result = Curl_connecthost(conn, conn->dns_entry); if(result) return result; } else { Curl_pgrsTime(data, TIMER_CONNECT); /* we're connected already */ Curl_pgrsTime(data, TIMER_APPCONNECT); /* we're connected already */ conn->bits.tcpconnect[FIRSTSOCKET] = TRUE; *protocol_done = TRUE; Curl_updateconninfo(conn, conn->sock[FIRSTSOCKET]); Curl_verboseconnect(conn); } conn->now = Curl_now(); /* time this *after* the connect is done, we set this here perhaps a second time */ return result; } CURLcode Curl_connect(struct Curl_easy *data, struct connectdata **in_connect, bool *asyncp, bool *protocol_done) { CURLcode result; *asyncp = FALSE; /* assume synchronous resolves by default */ /* init the single-transfer specific data */ Curl_free_request_state(data); memset(&data->req, 0, sizeof(struct SingleRequest)); data->req.maxdownload = -1; /* call the stuff that needs to be called */ result = create_conn(data, in_connect, asyncp); if(!result) { if(CONN_INUSE(*in_connect)) /* pipelining */ *protocol_done = TRUE; else if(!*asyncp) { /* DNS resolution is done: that's either because this is a reused connection, in which case DNS was unnecessary, or because DNS really did finish already (synch resolver/fast async resolve) */ result = Curl_setup_conn(*in_connect, protocol_done); } } if(result == CURLE_NO_CONNECTION_AVAILABLE) { *in_connect = NULL; return result; } else if(result && *in_connect) { /* We're not allowed to return failure with memory left allocated in the connectdata struct, free those here */ Curl_disconnect(data, *in_connect, TRUE); *in_connect = NULL; /* return a NULL */ } return result; } /* * Curl_init_do() inits the readwrite session. This is inited each time (in * the DO function before the protocol-specific DO functions are invoked) for * a transfer, sometimes multiple times on the same Curl_easy. Make sure * nothing in here depends on stuff that are setup dynamically for the * transfer. * * Allow this function to get called with 'conn' set to NULL. */ CURLcode Curl_init_do(struct Curl_easy *data, struct connectdata *conn) { struct SingleRequest *k = &data->req; if(conn) { conn->bits.do_more = FALSE; /* by default there's no curl_do_more() to use */ /* if the protocol used doesn't support wildcards, switch it off */ if(data->state.wildcardmatch && !(conn->handler->flags & PROTOPT_WILDCARD)) data->state.wildcardmatch = FALSE; } data->state.done = FALSE; /* *_done() is not called yet */ data->state.expect100header = FALSE; if(data->set.opt_no_body) /* in HTTP lingo, no body means using the HEAD request... */ data->set.httpreq = HTTPREQ_HEAD; else if(HTTPREQ_HEAD == data->set.httpreq) /* ... but if unset there really is no perfect method that is the "opposite" of HEAD but in reality most people probably think GET then. The important thing is that we can't let it remain HEAD if the opt_no_body is set FALSE since then we'll behave wrong when getting HTTP. */ data->set.httpreq = HTTPREQ_GET; k->start = Curl_now(); /* start time */ k->now = k->start; /* current time is now */ k->header = TRUE; /* assume header */ k->bytecount = 0; k->buf = data->state.buffer; k->hbufp = data->state.headerbuff; k->ignorebody = FALSE; Curl_speedinit(data); Curl_pgrsSetUploadCounter(data, 0); Curl_pgrsSetDownloadCounter(data, 0); return CURLE_OK; } /* * get_protocol_family() * * This is used to return the protocol family for a given protocol. * * Parameters: * * protocol [in] - A single bit protocol identifier such as HTTP or HTTPS. * * Returns the family as a single bit protocol identifier. */ static unsigned int get_protocol_family(unsigned int protocol) { unsigned int family; switch(protocol) { case CURLPROTO_HTTP: case CURLPROTO_HTTPS: family = CURLPROTO_HTTP; break; case CURLPROTO_FTP: case CURLPROTO_FTPS: family = CURLPROTO_FTP; break; case CURLPROTO_SCP: family = CURLPROTO_SCP; break; case CURLPROTO_SFTP: family = CURLPROTO_SFTP; break; case CURLPROTO_TELNET: family = CURLPROTO_TELNET; break; case CURLPROTO_LDAP: case CURLPROTO_LDAPS: family = CURLPROTO_LDAP; break; case CURLPROTO_DICT: family = CURLPROTO_DICT; break; case CURLPROTO_FILE: family = CURLPROTO_FILE; break; case CURLPROTO_TFTP: family = CURLPROTO_TFTP; break; case CURLPROTO_IMAP: case CURLPROTO_IMAPS: family = CURLPROTO_IMAP; break; case CURLPROTO_POP3: case CURLPROTO_POP3S: family = CURLPROTO_POP3; break; case CURLPROTO_SMTP: case CURLPROTO_SMTPS: family = CURLPROTO_SMTP; break; case CURLPROTO_RTSP: family = CURLPROTO_RTSP; break; case CURLPROTO_RTMP: case CURLPROTO_RTMPS: family = CURLPROTO_RTMP; break; case CURLPROTO_RTMPT: case CURLPROTO_RTMPTS: family = CURLPROTO_RTMPT; break; case CURLPROTO_RTMPE: family = CURLPROTO_RTMPE; break; case CURLPROTO_RTMPTE: family = CURLPROTO_RTMPTE; break; case CURLPROTO_GOPHER: family = CURLPROTO_GOPHER; break; case CURLPROTO_SMB: case CURLPROTO_SMBS: family = CURLPROTO_SMB; break; default: family = 0; break; } return family; } /* * Wrapper to call functions in Curl_conncache_foreach() * * Returns always 0. */ static int conn_upkeep(struct connectdata *conn, void *param) { /* Param is unused. */ (void)param; if(conn->handler->connection_check) { /* Do a protocol-specific keepalive check on the connection. */ conn->handler->connection_check(conn, CONNCHECK_KEEPALIVE); } return 0; /* continue iteration */ } CURLcode Curl_upkeep(struct conncache *conn_cache, void *data) { /* Loop over every connection and make connection alive. */ Curl_conncache_foreach(data, conn_cache, data, conn_upkeep); return CURLE_OK; }
/*************************************************************************** * _ _ ____ _ * Project ___| | | | _ \| | * / __| | | | |_) | | * | (__| |_| | _ <| |___ * \___|\___/|_| \_\_____| * * Copyright (C) 1998 - 2018, Daniel Stenberg, <daniel@haxx.se>, et al. * * This software is licensed as described in the file COPYING, which * you should have received as part of this distribution. The terms * are also available at https://curl.haxx.se/docs/copyright.html. * * You may opt to use, copy, modify, merge, publish, distribute and/or sell * copies of the Software, and permit persons to whom the Software is * furnished to do so, under the terms of the COPYING file. * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY * KIND, either express or implied. * ***************************************************************************/ #include "curl_setup.h" #ifdef HAVE_NETINET_IN_H #include <netinet/in.h> #endif #ifdef HAVE_NETDB_H #include <netdb.h> #endif #ifdef HAVE_ARPA_INET_H #include <arpa/inet.h> #endif #ifdef HAVE_NET_IF_H #include <net/if.h> #endif #ifdef HAVE_SYS_IOCTL_H #include <sys/ioctl.h> #endif #ifdef HAVE_SYS_PARAM_H #include <sys/param.h> #endif #ifdef __VMS #include <in.h> #include <inet.h> #endif #ifdef HAVE_SYS_UN_H #include <sys/un.h> #endif #ifndef HAVE_SOCKET #error "We can't compile without socket() support!" #endif #include <limits.h> #ifdef USE_LIBIDN2 #include <idn2.h> #elif defined(USE_WIN32_IDN) /* prototype for curl_win32_idn_to_ascii() */ bool curl_win32_idn_to_ascii(const char *in, char **out); #endif /* USE_LIBIDN2 */ #include "urldata.h" #include "netrc.h" #include "formdata.h" #include "mime.h" #include "vtls/vtls.h" #include "hostip.h" #include "transfer.h" #include "sendf.h" #include "progress.h" #include "cookie.h" #include "strcase.h" #include "strerror.h" #include "escape.h" #include "strtok.h" #include "share.h" #include "content_encoding.h" #include "http_digest.h" #include "http_negotiate.h" #include "select.h" #include "multiif.h" #include "easyif.h" #include "speedcheck.h" #include "warnless.h" #include "non-ascii.h" #include "inet_pton.h" #include "getinfo.h" #include "urlapi-int.h" /* And now for the protocols */ #include "ftp.h" #include "dict.h" #include "telnet.h" #include "tftp.h" #include "http.h" #include "http2.h" #include "file.h" #include "curl_ldap.h" #include "ssh.h" #include "imap.h" #include "url.h" #include "connect.h" #include "inet_ntop.h" #include "http_ntlm.h" #include "curl_ntlm_wb.h" #include "socks.h" #include "curl_rtmp.h" #include "gopher.h" #include "http_proxy.h" #include "conncache.h" #include "multihandle.h" #include "pipeline.h" #include "dotdot.h" #include "strdup.h" #include "setopt.h" /* The last 3 #include files should be in this order */ #include "curl_printf.h" #include "curl_memory.h" #include "memdebug.h" static void conn_free(struct connectdata *conn); static void free_fixed_hostname(struct hostname *host); static unsigned int get_protocol_family(unsigned int protocol); /* Some parts of the code (e.g. chunked encoding) assume this buffer has at * more than just a few bytes to play with. Don't let it become too small or * bad things will happen. */ #if READBUFFER_SIZE < READBUFFER_MIN # error READBUFFER_SIZE is too small #endif /* * Protocol table. */ static const struct Curl_handler * const protocols[] = { #ifndef CURL_DISABLE_HTTP &Curl_handler_http, #endif #if defined(USE_SSL) && !defined(CURL_DISABLE_HTTP) &Curl_handler_https, #endif #ifndef CURL_DISABLE_FTP &Curl_handler_ftp, #endif #if defined(USE_SSL) && !defined(CURL_DISABLE_FTP) &Curl_handler_ftps, #endif #ifndef CURL_DISABLE_TELNET &Curl_handler_telnet, #endif #ifndef CURL_DISABLE_DICT &Curl_handler_dict, #endif #ifndef CURL_DISABLE_LDAP &Curl_handler_ldap, #if !defined(CURL_DISABLE_LDAPS) && \ ((defined(USE_OPENLDAP) && defined(USE_SSL)) || \ (!defined(USE_OPENLDAP) && defined(HAVE_LDAP_SSL))) &Curl_handler_ldaps, #endif #endif #ifndef CURL_DISABLE_FILE &Curl_handler_file, #endif #ifndef CURL_DISABLE_TFTP &Curl_handler_tftp, #endif #if defined(USE_LIBSSH2) || defined(USE_LIBSSH) &Curl_handler_scp, #endif #if defined(USE_LIBSSH2) || defined(USE_LIBSSH) &Curl_handler_sftp, #endif #ifndef CURL_DISABLE_IMAP &Curl_handler_imap, #ifdef USE_SSL &Curl_handler_imaps, #endif #endif #ifndef CURL_DISABLE_POP3 &Curl_handler_pop3, #ifdef USE_SSL &Curl_handler_pop3s, #endif #endif #if !defined(CURL_DISABLE_SMB) && defined(USE_NTLM) && \ (CURL_SIZEOF_CURL_OFF_T > 4) && \ (!defined(USE_WINDOWS_SSPI) || defined(USE_WIN32_CRYPTO)) &Curl_handler_smb, #ifdef USE_SSL &Curl_handler_smbs, #endif #endif #ifndef CURL_DISABLE_SMTP &Curl_handler_smtp, #ifdef USE_SSL &Curl_handler_smtps, #endif #endif #ifndef CURL_DISABLE_RTSP &Curl_handler_rtsp, #endif #ifndef CURL_DISABLE_GOPHER &Curl_handler_gopher, #endif #ifdef USE_LIBRTMP &Curl_handler_rtmp, &Curl_handler_rtmpt, &Curl_handler_rtmpe, &Curl_handler_rtmpte, &Curl_handler_rtmps, &Curl_handler_rtmpts, #endif (struct Curl_handler *) NULL }; /* * Dummy handler for undefined protocol schemes. */ static const struct Curl_handler Curl_handler_dummy = { "<no protocol>", /* scheme */ ZERO_NULL, /* setup_connection */ ZERO_NULL, /* do_it */ ZERO_NULL, /* done */ ZERO_NULL, /* do_more */ ZERO_NULL, /* connect_it */ ZERO_NULL, /* connecting */ ZERO_NULL, /* doing */ ZERO_NULL, /* proto_getsock */ ZERO_NULL, /* doing_getsock */ ZERO_NULL, /* domore_getsock */ ZERO_NULL, /* perform_getsock */ ZERO_NULL, /* disconnect */ ZERO_NULL, /* readwrite */ ZERO_NULL, /* connection_check */ 0, /* defport */ 0, /* protocol */ PROTOPT_NONE /* flags */ }; void Curl_freeset(struct Curl_easy *data) { /* Free all dynamic strings stored in the data->set substructure. */ enum dupstring i; for(i = (enum dupstring)0; i < STRING_LAST; i++) { Curl_safefree(data->set.str[i]); } if(data->change.referer_alloc) { Curl_safefree(data->change.referer); data->change.referer_alloc = FALSE; } data->change.referer = NULL; if(data->change.url_alloc) { Curl_safefree(data->change.url); data->change.url_alloc = FALSE; } data->change.url = NULL; Curl_mime_cleanpart(&data->set.mimepost); } /* free the URL pieces */ void Curl_up_free(struct Curl_easy *data) { struct urlpieces *up = &data->state.up; Curl_safefree(up->scheme); Curl_safefree(up->hostname); Curl_safefree(up->port); Curl_safefree(up->user); Curl_safefree(up->password); Curl_safefree(up->options); Curl_safefree(up->path); Curl_safefree(up->query); curl_url_cleanup(data->state.uh); data->state.uh = NULL; } /* * This is the internal function curl_easy_cleanup() calls. This should * cleanup and free all resources associated with this sessionhandle. * * NOTE: if we ever add something that attempts to write to a socket or * similar here, we must ignore SIGPIPE first. It is currently only done * when curl_easy_perform() is invoked. */ CURLcode Curl_close(struct Curl_easy *data) { struct Curl_multi *m; if(!data) return CURLE_OK; Curl_expire_clear(data); /* shut off timers */ m = data->multi; if(m) /* This handle is still part of a multi handle, take care of this first and detach this handle from there. */ curl_multi_remove_handle(data->multi, data); if(data->multi_easy) { /* when curl_easy_perform() is used, it creates its own multi handle to use and this is the one */ curl_multi_cleanup(data->multi_easy); data->multi_easy = NULL; } /* Destroy the timeout list that is held in the easy handle. It is /normally/ done by curl_multi_remove_handle() but this is "just in case" */ Curl_llist_destroy(&data->state.timeoutlist, NULL); data->magic = 0; /* force a clear AFTER the possibly enforced removal from the multi handle, since that function uses the magic field! */ if(data->state.rangestringalloc) free(data->state.range); /* freed here just in case DONE wasn't called */ Curl_free_request_state(data); /* Close down all open SSL info and sessions */ Curl_ssl_close_all(data); Curl_safefree(data->state.first_host); Curl_safefree(data->state.scratch); Curl_ssl_free_certinfo(data); /* Cleanup possible redirect junk */ free(data->req.newurl); data->req.newurl = NULL; if(data->change.referer_alloc) { Curl_safefree(data->change.referer); data->change.referer_alloc = FALSE; } data->change.referer = NULL; Curl_up_free(data); Curl_safefree(data->state.buffer); Curl_safefree(data->state.headerbuff); Curl_safefree(data->state.ulbuf); Curl_flush_cookies(data, 1); Curl_digest_cleanup(data); Curl_safefree(data->info.contenttype); Curl_safefree(data->info.wouldredirect); /* this destroys the channel and we cannot use it anymore after this */ Curl_resolver_cleanup(data->state.resolver); Curl_http2_cleanup_dependencies(data); Curl_convert_close(data); /* No longer a dirty share, if it exists */ if(data->share) { Curl_share_lock(data, CURL_LOCK_DATA_SHARE, CURL_LOCK_ACCESS_SINGLE); data->share->dirty--; Curl_share_unlock(data, CURL_LOCK_DATA_SHARE); } /* destruct wildcard structures if it is needed */ Curl_wildcard_dtor(&data->wildcard); Curl_freeset(data); free(data); return CURLE_OK; } /* * Initialize the UserDefined fields within a Curl_easy. * This may be safely called on a new or existing Curl_easy. */ CURLcode Curl_init_userdefined(struct Curl_easy *data) { struct UserDefined *set = &data->set; CURLcode result = CURLE_OK; set->out = stdout; /* default output to stdout */ set->in_set = stdin; /* default input from stdin */ set->err = stderr; /* default stderr to stderr */ /* use fwrite as default function to store output */ set->fwrite_func = (curl_write_callback)fwrite; /* use fread as default function to read input */ set->fread_func_set = (curl_read_callback)fread; set->is_fread_set = 0; set->is_fwrite_set = 0; set->seek_func = ZERO_NULL; set->seek_client = ZERO_NULL; /* conversion callbacks for non-ASCII hosts */ set->convfromnetwork = ZERO_NULL; set->convtonetwork = ZERO_NULL; set->convfromutf8 = ZERO_NULL; set->filesize = -1; /* we don't know the size */ set->postfieldsize = -1; /* unknown size */ set->maxredirs = -1; /* allow any amount by default */ set->httpreq = HTTPREQ_GET; /* Default HTTP request */ set->rtspreq = RTSPREQ_OPTIONS; /* Default RTSP request */ set->ftp_use_epsv = TRUE; /* FTP defaults to EPSV operations */ set->ftp_use_eprt = TRUE; /* FTP defaults to EPRT operations */ set->ftp_use_pret = FALSE; /* mainly useful for drftpd servers */ set->ftp_filemethod = FTPFILE_MULTICWD; set->dns_cache_timeout = 60; /* Timeout every 60 seconds by default */ /* Set the default size of the SSL session ID cache */ set->general_ssl.max_ssl_sessions = 5; set->proxyport = 0; set->proxytype = CURLPROXY_HTTP; /* defaults to HTTP proxy */ set->httpauth = CURLAUTH_BASIC; /* defaults to basic */ set->proxyauth = CURLAUTH_BASIC; /* defaults to basic */ /* SOCKS5 proxy auth defaults to username/password + GSS-API */ set->socks5auth = CURLAUTH_BASIC | CURLAUTH_GSSAPI; /* make libcurl quiet by default: */ set->hide_progress = TRUE; /* CURLOPT_NOPROGRESS changes these */ Curl_mime_initpart(&set->mimepost, data); /* * libcurl 7.10 introduced SSL verification *by default*! This needs to be * switched off unless wanted. */ set->ssl.primary.verifypeer = TRUE; set->ssl.primary.verifyhost = TRUE; #ifdef USE_TLS_SRP set->ssl.authtype = CURL_TLSAUTH_NONE; #endif set->ssh_auth_types = CURLSSH_AUTH_DEFAULT; /* defaults to any auth type */ set->ssl.primary.sessionid = TRUE; /* session ID caching enabled by default */ set->proxy_ssl = set->ssl; set->new_file_perms = 0644; /* Default permissions */ set->new_directory_perms = 0755; /* Default permissions */ /* for the *protocols fields we don't use the CURLPROTO_ALL convenience define since we internally only use the lower 16 bits for the passed in bitmask to not conflict with the private bits */ set->allowed_protocols = CURLPROTO_ALL; set->redir_protocols = CURLPROTO_ALL & /* All except FILE, SCP and SMB */ ~(CURLPROTO_FILE | CURLPROTO_SCP | CURLPROTO_SMB | CURLPROTO_SMBS); #if defined(HAVE_GSSAPI) || defined(USE_WINDOWS_SSPI) /* * disallow unprotected protection negotiation NEC reference implementation * seem not to follow rfc1961 section 4.3/4.4 */ set->socks5_gssapi_nec = FALSE; #endif /* Set the default CA cert bundle/path detected/specified at build time. * * If Schannel (WinSSL) is the selected SSL backend then these locations * are ignored. We allow setting CA location for schannel only when * explicitly specified by the user via CURLOPT_CAINFO / --cacert. */ if(Curl_ssl_backend() != CURLSSLBACKEND_SCHANNEL) { #if defined(CURL_CA_BUNDLE) result = Curl_setstropt(&set->str[STRING_SSL_CAFILE_ORIG], CURL_CA_BUNDLE); if(result) return result; result = Curl_setstropt(&set->str[STRING_SSL_CAFILE_PROXY], CURL_CA_BUNDLE); if(result) return result; #endif #if defined(CURL_CA_PATH) result = Curl_setstropt(&set->str[STRING_SSL_CAPATH_ORIG], CURL_CA_PATH); if(result) return result; result = Curl_setstropt(&set->str[STRING_SSL_CAPATH_PROXY], CURL_CA_PATH); if(result) return result; #endif } set->wildcard_enabled = FALSE; set->chunk_bgn = ZERO_NULL; set->chunk_end = ZERO_NULL; set->tcp_keepalive = FALSE; set->tcp_keepintvl = 60; set->tcp_keepidle = 60; set->tcp_fastopen = FALSE; set->tcp_nodelay = TRUE; set->ssl_enable_npn = TRUE; set->ssl_enable_alpn = TRUE; set->expect_100_timeout = 1000L; /* Wait for a second by default. */ set->sep_headers = TRUE; /* separated header lists by default */ set->buffer_size = READBUFFER_SIZE; set->upload_buffer_size = UPLOADBUFFER_DEFAULT; set->happy_eyeballs_timeout = CURL_HET_DEFAULT; set->fnmatch = ZERO_NULL; set->upkeep_interval_ms = CURL_UPKEEP_INTERVAL_DEFAULT; set->maxconnects = DEFAULT_CONNCACHE_SIZE; /* for easy handles */ set->httpversion = #ifdef USE_NGHTTP2 CURL_HTTP_VERSION_2TLS #else CURL_HTTP_VERSION_1_1 #endif ; Curl_http2_init_userset(set); return result; } /** * Curl_open() * * @param curl is a pointer to a sessionhandle pointer that gets set by this * function. * @return CURLcode */ CURLcode Curl_open(struct Curl_easy **curl) { CURLcode result; struct Curl_easy *data; /* Very simple start-up: alloc the struct, init it with zeroes and return */ data = calloc(1, sizeof(struct Curl_easy)); if(!data) { /* this is a very serious error */ DEBUGF(fprintf(stderr, "Error: calloc of Curl_easy failed\n")); return CURLE_OUT_OF_MEMORY; } data->magic = CURLEASY_MAGIC_NUMBER; result = Curl_resolver_init(&data->state.resolver); if(result) { DEBUGF(fprintf(stderr, "Error: resolver_init failed\n")); free(data); return result; } /* We do some initial setup here, all those fields that can't be just 0 */ data->state.buffer = malloc(READBUFFER_SIZE + 1); if(!data->state.buffer) { DEBUGF(fprintf(stderr, "Error: malloc of buffer failed\n")); result = CURLE_OUT_OF_MEMORY; } else { data->state.headerbuff = malloc(HEADERSIZE); if(!data->state.headerbuff) { DEBUGF(fprintf(stderr, "Error: malloc of headerbuff failed\n")); result = CURLE_OUT_OF_MEMORY; } else { result = Curl_init_userdefined(data); data->state.headersize = HEADERSIZE; Curl_convert_init(data); Curl_initinfo(data); /* most recent connection is not yet defined */ data->state.lastconnect = NULL; data->progress.flags |= PGRS_HIDE; data->state.current_speed = -1; /* init to negative == impossible */ Curl_http2_init_state(&data->state); } } if(result) { Curl_resolver_cleanup(data->state.resolver); free(data->state.buffer); free(data->state.headerbuff); Curl_freeset(data); free(data); data = NULL; } else *curl = data; return result; } #ifdef USE_RECV_BEFORE_SEND_WORKAROUND static void conn_reset_postponed_data(struct connectdata *conn, int num) { struct postponed_data * const psnd = &(conn->postponed[num]); if(psnd->buffer) { DEBUGASSERT(psnd->allocated_size > 0); DEBUGASSERT(psnd->recv_size <= psnd->allocated_size); DEBUGASSERT(psnd->recv_size ? (psnd->recv_processed < psnd->recv_size) : (psnd->recv_processed == 0)); DEBUGASSERT(psnd->bindsock != CURL_SOCKET_BAD); free(psnd->buffer); psnd->buffer = NULL; psnd->allocated_size = 0; psnd->recv_size = 0; psnd->recv_processed = 0; #ifdef DEBUGBUILD psnd->bindsock = CURL_SOCKET_BAD; /* used only for DEBUGASSERT */ #endif /* DEBUGBUILD */ } else { DEBUGASSERT(psnd->allocated_size == 0); DEBUGASSERT(psnd->recv_size == 0); DEBUGASSERT(psnd->recv_processed == 0); DEBUGASSERT(psnd->bindsock == CURL_SOCKET_BAD); } } static void conn_reset_all_postponed_data(struct connectdata *conn) { conn_reset_postponed_data(conn, 0); conn_reset_postponed_data(conn, 1); } #else /* ! USE_RECV_BEFORE_SEND_WORKAROUND */ /* Use "do-nothing" macro instead of function when workaround not used */ #define conn_reset_all_postponed_data(c) do {} WHILE_FALSE #endif /* ! USE_RECV_BEFORE_SEND_WORKAROUND */ static void conn_free(struct connectdata *conn) { if(!conn) return; /* possible left-overs from the async name resolvers */ Curl_resolver_cancel(conn); /* close the SSL stuff before we close any sockets since they will/may write to the sockets */ Curl_ssl_close(conn, FIRSTSOCKET); Curl_ssl_close(conn, SECONDARYSOCKET); /* close possibly still open sockets */ if(CURL_SOCKET_BAD != conn->sock[SECONDARYSOCKET]) Curl_closesocket(conn, conn->sock[SECONDARYSOCKET]); if(CURL_SOCKET_BAD != conn->sock[FIRSTSOCKET]) Curl_closesocket(conn, conn->sock[FIRSTSOCKET]); if(CURL_SOCKET_BAD != conn->tempsock[0]) Curl_closesocket(conn, conn->tempsock[0]); if(CURL_SOCKET_BAD != conn->tempsock[1]) Curl_closesocket(conn, conn->tempsock[1]); #if !defined(CURL_DISABLE_HTTP) && defined(USE_NTLM) && \ defined(NTLM_WB_ENABLED) Curl_ntlm_wb_cleanup(conn); #endif Curl_safefree(conn->user); Curl_safefree(conn->passwd); Curl_safefree(conn->oauth_bearer); Curl_safefree(conn->options); Curl_safefree(conn->http_proxy.user); Curl_safefree(conn->socks_proxy.user); Curl_safefree(conn->http_proxy.passwd); Curl_safefree(conn->socks_proxy.passwd); Curl_safefree(conn->allocptr.proxyuserpwd); Curl_safefree(conn->allocptr.uagent); Curl_safefree(conn->allocptr.userpwd); Curl_safefree(conn->allocptr.accept_encoding); Curl_safefree(conn->allocptr.te); Curl_safefree(conn->allocptr.rangeline); Curl_safefree(conn->allocptr.ref); Curl_safefree(conn->allocptr.host); Curl_safefree(conn->allocptr.cookiehost); Curl_safefree(conn->allocptr.rtsp_transport); Curl_safefree(conn->trailer); Curl_safefree(conn->host.rawalloc); /* host name buffer */ Curl_safefree(conn->conn_to_host.rawalloc); /* host name buffer */ Curl_safefree(conn->secondaryhostname); Curl_safefree(conn->http_proxy.host.rawalloc); /* http proxy name buffer */ Curl_safefree(conn->socks_proxy.host.rawalloc); /* socks proxy name buffer */ Curl_safefree(conn->master_buffer); Curl_safefree(conn->connect_state); conn_reset_all_postponed_data(conn); Curl_llist_destroy(&conn->send_pipe, NULL); Curl_llist_destroy(&conn->recv_pipe, NULL); Curl_safefree(conn->localdev); Curl_free_primary_ssl_config(&conn->ssl_config); Curl_free_primary_ssl_config(&conn->proxy_ssl_config); #ifdef USE_UNIX_SOCKETS Curl_safefree(conn->unix_domain_socket); #endif #ifdef USE_SSL Curl_safefree(conn->ssl_extra); #endif free(conn); /* free all the connection oriented data */ } /* * Disconnects the given connection. Note the connection may not be the * primary connection, like when freeing room in the connection cache or * killing of a dead old connection. * * A connection needs an easy handle when closing down. We support this passed * in separately since the connection to get closed here is often already * disassociated from an easy handle. * * This function MUST NOT reset state in the Curl_easy struct if that * isn't strictly bound to the life-time of *this* particular connection. * */ CURLcode Curl_disconnect(struct Curl_easy *data, struct connectdata *conn, bool dead_connection) { if(!conn) return CURLE_OK; /* this is closed and fine already */ if(!data) { DEBUGF(infof(data, "DISCONNECT without easy handle, ignoring\n")); return CURLE_OK; } /* * If this connection isn't marked to force-close, leave it open if there * are other users of it */ if(CONN_INUSE(conn) && !dead_connection) { DEBUGF(infof(data, "Curl_disconnect when inuse: %zu\n", CONN_INUSE(conn))); return CURLE_OK; } conn->data = data; if(conn->dns_entry != NULL) { Curl_resolv_unlock(data, conn->dns_entry); conn->dns_entry = NULL; } Curl_hostcache_prune(data); /* kill old DNS cache entries */ #if !defined(CURL_DISABLE_HTTP) && defined(USE_NTLM) /* Cleanup NTLM connection-related data */ Curl_http_ntlm_cleanup(conn); #endif if(conn->handler->disconnect) /* This is set if protocol-specific cleanups should be made */ conn->handler->disconnect(conn, dead_connection); /* unlink ourselves! */ infof(data, "Closing connection %ld\n", conn->connection_id); Curl_conncache_remove_conn(conn, TRUE); free_fixed_hostname(&conn->host); free_fixed_hostname(&conn->conn_to_host); free_fixed_hostname(&conn->http_proxy.host); free_fixed_hostname(&conn->socks_proxy.host); DEBUGASSERT(conn->data == data); /* this assumes that the pointer is still there after the connection was detected from the cache */ Curl_ssl_close(conn, FIRSTSOCKET); conn_free(conn); return CURLE_OK; } /* * This function should return TRUE if the socket is to be assumed to * be dead. Most commonly this happens when the server has closed the * connection due to inactivity. */ static bool SocketIsDead(curl_socket_t sock) { int sval; bool ret_val = TRUE; sval = SOCKET_READABLE(sock, 0); if(sval == 0) /* timeout */ ret_val = FALSE; return ret_val; } /* * IsPipeliningPossible() * * Return a bitmask with the available pipelining and multiplexing options for * the given requested connection. */ static int IsPipeliningPossible(const struct Curl_easy *handle, const struct connectdata *conn) { int avail = 0; /* If a HTTP protocol and pipelining is enabled */ if((conn->handler->protocol & PROTO_FAMILY_HTTP) && (!conn->bits.protoconnstart || !conn->bits.close)) { if(Curl_pipeline_wanted(handle->multi, CURLPIPE_HTTP1) && (handle->set.httpversion != CURL_HTTP_VERSION_1_0) && (handle->set.httpreq == HTTPREQ_GET || handle->set.httpreq == HTTPREQ_HEAD)) /* didn't ask for HTTP/1.0 and a GET or HEAD */ avail |= CURLPIPE_HTTP1; if(Curl_pipeline_wanted(handle->multi, CURLPIPE_MULTIPLEX) && (handle->set.httpversion >= CURL_HTTP_VERSION_2)) /* allows HTTP/2 */ avail |= CURLPIPE_MULTIPLEX; } return avail; } /* Returns non-zero if a handle was removed */ int Curl_removeHandleFromPipeline(struct Curl_easy *handle, struct curl_llist *pipeline) { if(pipeline) { struct curl_llist_element *curr; curr = pipeline->head; while(curr) { if(curr->ptr == handle) { Curl_llist_remove(pipeline, curr, NULL); return 1; /* we removed a handle */ } curr = curr->next; } } return 0; } #if 0 /* this code is saved here as it is useful for debugging purposes */ static void Curl_printPipeline(struct curl_llist *pipeline) { struct curl_llist_element *curr; curr = pipeline->head; while(curr) { struct Curl_easy *data = (struct Curl_easy *) curr->ptr; infof(data, "Handle in pipeline: %s\n", data->state.path); curr = curr->next; } } #endif static struct Curl_easy* gethandleathead(struct curl_llist *pipeline) { struct curl_llist_element *curr = pipeline->head; #ifdef DEBUGBUILD { struct curl_llist_element *p = pipeline->head; while(p) { struct Curl_easy *e = p->ptr; DEBUGASSERT(GOOD_EASY_HANDLE(e)); p = p->next; } } #endif if(curr) { return (struct Curl_easy *) curr->ptr; } return NULL; } /* remove the specified connection from all (possible) pipelines and related queues */ void Curl_getoff_all_pipelines(struct Curl_easy *data, struct connectdata *conn) { if(!conn->bundle) return; if(conn->bundle->multiuse == BUNDLE_PIPELINING) { bool recv_head = (conn->readchannel_inuse && Curl_recvpipe_head(data, conn)); bool send_head = (conn->writechannel_inuse && Curl_sendpipe_head(data, conn)); if(Curl_removeHandleFromPipeline(data, &conn->recv_pipe) && recv_head) Curl_pipeline_leave_read(conn); if(Curl_removeHandleFromPipeline(data, &conn->send_pipe) && send_head) Curl_pipeline_leave_write(conn); } else { (void)Curl_removeHandleFromPipeline(data, &conn->recv_pipe); (void)Curl_removeHandleFromPipeline(data, &conn->send_pipe); } } static bool proxy_info_matches(const struct proxy_info* data, const struct proxy_info* needle) { if((data->proxytype == needle->proxytype) && (data->port == needle->port) && Curl_safe_strcasecompare(data->host.name, needle->host.name)) return TRUE; return FALSE; } /* * This function checks if the given connection is dead and extracts it from * the connection cache if so. * * When this is called as a Curl_conncache_foreach() callback, the connection * cache lock is held! * * Returns TRUE if the connection was dead and extracted. */ static bool extract_if_dead(struct connectdata *conn, struct Curl_easy *data) { size_t pipeLen = conn->send_pipe.size + conn->recv_pipe.size; if(!pipeLen && !CONN_INUSE(conn)) { /* The check for a dead socket makes sense only if there are no handles in pipeline and the connection isn't already marked in use */ bool dead; conn->data = data; if(conn->handler->connection_check) { /* The protocol has a special method for checking the state of the connection. Use it to check if the connection is dead. */ unsigned int state; state = conn->handler->connection_check(conn, CONNCHECK_ISDEAD); dead = (state & CONNRESULT_DEAD); } else { /* Use the general method for determining the death of a connection */ dead = SocketIsDead(conn->sock[FIRSTSOCKET]); } if(dead) { infof(data, "Connection %ld seems to be dead!\n", conn->connection_id); Curl_conncache_remove_conn(conn, FALSE); conn->data = NULL; /* detach */ return TRUE; } } return FALSE; } struct prunedead { struct Curl_easy *data; struct connectdata *extracted; }; /* * Wrapper to use extract_if_dead() function in Curl_conncache_foreach() * */ static int call_extract_if_dead(struct connectdata *conn, void *param) { struct prunedead *p = (struct prunedead *)param; if(extract_if_dead(conn, p->data)) { /* stop the iteration here, pass back the connection that was extracted */ p->extracted = conn; return 1; } return 0; /* continue iteration */ } /* * This function scans the connection cache for half-open/dead connections, * closes and removes them. * The cleanup is done at most once per second. */ static void prune_dead_connections(struct Curl_easy *data) { struct curltime now = Curl_now(); time_t elapsed = Curl_timediff(now, data->state.conn_cache->last_cleanup); if(elapsed >= 1000L) { struct prunedead prune; prune.data = data; prune.extracted = NULL; while(Curl_conncache_foreach(data, data->state.conn_cache, &prune, call_extract_if_dead)) { /* disconnect it */ (void)Curl_disconnect(data, prune.extracted, /* dead_connection */TRUE); } data->state.conn_cache->last_cleanup = now; } } static size_t max_pipeline_length(struct Curl_multi *multi) { return multi ? multi->max_pipeline_length : 0; } /* * Given one filled in connection struct (named needle), this function should * detect if there already is one that has all the significant details * exactly the same and thus should be used instead. * * If there is a match, this function returns TRUE - and has marked the * connection as 'in-use'. It must later be called with ConnectionDone() to * return back to 'idle' (unused) state. * * The force_reuse flag is set if the connection must be used, even if * the pipelining strategy wants to open a new connection instead of reusing. */ static bool ConnectionExists(struct Curl_easy *data, struct connectdata *needle, struct connectdata **usethis, bool *force_reuse, bool *waitpipe) { struct connectdata *check; struct connectdata *chosen = 0; bool foundPendingCandidate = FALSE; int canpipe = IsPipeliningPossible(data, needle); struct connectbundle *bundle; #ifdef USE_NTLM bool wantNTLMhttp = ((data->state.authhost.want & (CURLAUTH_NTLM | CURLAUTH_NTLM_WB)) && (needle->handler->protocol & PROTO_FAMILY_HTTP)); bool wantProxyNTLMhttp = (needle->bits.proxy_user_passwd && ((data->state.authproxy.want & (CURLAUTH_NTLM | CURLAUTH_NTLM_WB)) && (needle->handler->protocol & PROTO_FAMILY_HTTP))); #endif *force_reuse = FALSE; *waitpipe = FALSE; /* We can't pipeline if the site is blacklisted */ if((canpipe & CURLPIPE_HTTP1) && Curl_pipeline_site_blacklisted(data, needle)) canpipe &= ~ CURLPIPE_HTTP1; /* Look up the bundle with all the connections to this particular host. Locks the connection cache, beware of early returns! */ bundle = Curl_conncache_find_bundle(needle, data->state.conn_cache); if(bundle) { /* Max pipe length is zero (unlimited) for multiplexed connections */ size_t max_pipe_len = (bundle->multiuse != BUNDLE_MULTIPLEX)? max_pipeline_length(data->multi):0; size_t best_pipe_len = max_pipe_len; struct curl_llist_element *curr; infof(data, "Found bundle for host %s: %p [%s]\n", (needle->bits.conn_to_host ? needle->conn_to_host.name : needle->host.name), (void *)bundle, (bundle->multiuse == BUNDLE_PIPELINING ? "can pipeline" : (bundle->multiuse == BUNDLE_MULTIPLEX ? "can multiplex" : "serially"))); /* We can't pipeline if we don't know anything about the server */ if(canpipe) { if(bundle->multiuse <= BUNDLE_UNKNOWN) { if((bundle->multiuse == BUNDLE_UNKNOWN) && data->set.pipewait) { infof(data, "Server doesn't support multi-use yet, wait\n"); *waitpipe = TRUE; Curl_conncache_unlock(needle); return FALSE; /* no re-use */ } infof(data, "Server doesn't support multi-use (yet)\n"); canpipe = 0; } if((bundle->multiuse == BUNDLE_PIPELINING) && !Curl_pipeline_wanted(data->multi, CURLPIPE_HTTP1)) { /* not asked for, switch off */ infof(data, "Could pipeline, but not asked to!\n"); canpipe = 0; } else if((bundle->multiuse == BUNDLE_MULTIPLEX) && !Curl_pipeline_wanted(data->multi, CURLPIPE_MULTIPLEX)) { infof(data, "Could multiplex, but not asked to!\n"); canpipe = 0; } } curr = bundle->conn_list.head; while(curr) { bool match = FALSE; size_t pipeLen; /* * Note that if we use a HTTP proxy in normal mode (no tunneling), we * check connections to that proxy and not to the actual remote server. */ check = curr->ptr; curr = curr->next; if(extract_if_dead(check, data)) { /* disconnect it */ (void)Curl_disconnect(data, check, /* dead_connection */TRUE); continue; } pipeLen = check->send_pipe.size + check->recv_pipe.size; if(canpipe) { if(check->bits.protoconnstart && check->bits.close) continue; if(!check->bits.multiplex) { /* If not multiplexing, make sure the connection is fine for HTTP/1 pipelining */ struct Curl_easy* sh = gethandleathead(&check->send_pipe); struct Curl_easy* rh = gethandleathead(&check->recv_pipe); if(sh) { if(!(IsPipeliningPossible(sh, check) & CURLPIPE_HTTP1)) continue; } else if(rh) { if(!(IsPipeliningPossible(rh, check) & CURLPIPE_HTTP1)) continue; } } } else { if(pipeLen > 0) { /* can only happen within multi handles, and means that another easy handle is using this connection */ continue; } if(Curl_resolver_asynch()) { /* ip_addr_str[0] is NUL only if the resolving of the name hasn't completed yet and until then we don't re-use this connection */ if(!check->ip_addr_str[0]) { infof(data, "Connection #%ld is still name resolving, can't reuse\n", check->connection_id); continue; } } if((check->sock[FIRSTSOCKET] == CURL_SOCKET_BAD) || check->bits.close) { if(!check->bits.close) foundPendingCandidate = TRUE; /* Don't pick a connection that hasn't connected yet or that is going to get closed. */ infof(data, "Connection #%ld isn't open enough, can't reuse\n", check->connection_id); #ifdef DEBUGBUILD if(check->recv_pipe.size > 0) { infof(data, "BAD! Unconnected #%ld has a non-empty recv pipeline!\n", check->connection_id); } #endif continue; } } #ifdef USE_UNIX_SOCKETS if(needle->unix_domain_socket) { if(!check->unix_domain_socket) continue; if(strcmp(needle->unix_domain_socket, check->unix_domain_socket)) continue; if(needle->abstract_unix_socket != check->abstract_unix_socket) continue; } else if(check->unix_domain_socket) continue; #endif if((needle->handler->flags&PROTOPT_SSL) != (check->handler->flags&PROTOPT_SSL)) /* don't do mixed SSL and non-SSL connections */ if(get_protocol_family(check->handler->protocol) != needle->handler->protocol || !check->tls_upgraded) /* except protocols that have been upgraded via TLS */ continue; if(needle->bits.httpproxy != check->bits.httpproxy || needle->bits.socksproxy != check->bits.socksproxy) continue; if(needle->bits.socksproxy && !proxy_info_matches(&needle->socks_proxy, &check->socks_proxy)) continue; if(needle->bits.conn_to_host != check->bits.conn_to_host) /* don't mix connections that use the "connect to host" feature and * connections that don't use this feature */ continue; if(needle->bits.conn_to_port != check->bits.conn_to_port) /* don't mix connections that use the "connect to port" feature and * connections that don't use this feature */ continue; if(needle->bits.httpproxy) { if(!proxy_info_matches(&needle->http_proxy, &check->http_proxy)) continue; if(needle->bits.tunnel_proxy != check->bits.tunnel_proxy) continue; if(needle->http_proxy.proxytype == CURLPROXY_HTTPS) { /* use https proxy */ if(needle->handler->flags&PROTOPT_SSL) { /* use double layer ssl */ if(!Curl_ssl_config_matches(&needle->proxy_ssl_config, &check->proxy_ssl_config)) continue; if(check->proxy_ssl[FIRSTSOCKET].state != ssl_connection_complete) continue; } else { if(!Curl_ssl_config_matches(&needle->ssl_config, &check->ssl_config)) continue; if(check->ssl[FIRSTSOCKET].state != ssl_connection_complete) continue; } } } if(!canpipe && CONN_INUSE(check)) /* this request can't be pipelined but the checked connection is already in use so we skip it */ continue; if(CONN_INUSE(check) && (check->data->multi != needle->data->multi)) /* this could be subject for pipeline/multiplex use, but only if they belong to the same multi handle */ continue; if(needle->localdev || needle->localport) { /* If we are bound to a specific local end (IP+port), we must not re-use a random other one, although if we didn't ask for a particular one we can reuse one that was bound. This comparison is a bit rough and too strict. Since the input parameters can be specified in numerous ways and still end up the same it would take a lot of processing to make it really accurate. Instead, this matching will assume that re-uses of bound connections will most likely also re-use the exact same binding parameters and missing out a few edge cases shouldn't hurt anyone very much. */ if((check->localport != needle->localport) || (check->localportrange != needle->localportrange) || (needle->localdev && (!check->localdev || strcmp(check->localdev, needle->localdev)))) continue; } if(!(needle->handler->flags & PROTOPT_CREDSPERREQUEST)) { /* This protocol requires credentials per connection, so verify that we're using the same name and password as well */ if(strcmp(needle->user, check->user) || strcmp(needle->passwd, check->passwd)) { /* one of them was different */ continue; } } if(!needle->bits.httpproxy || (needle->handler->flags&PROTOPT_SSL) || needle->bits.tunnel_proxy) { /* The requested connection does not use a HTTP proxy or it uses SSL or it is a non-SSL protocol tunneled or it is a non-SSL protocol which is allowed to be upgraded via TLS */ if((strcasecompare(needle->handler->scheme, check->handler->scheme) || (get_protocol_family(check->handler->protocol) == needle->handler->protocol && check->tls_upgraded)) && (!needle->bits.conn_to_host || strcasecompare( needle->conn_to_host.name, check->conn_to_host.name)) && (!needle->bits.conn_to_port || needle->conn_to_port == check->conn_to_port) && strcasecompare(needle->host.name, check->host.name) && needle->remote_port == check->remote_port) { /* The schemes match or the the protocol family is the same and the previous connection was TLS upgraded, and the hostname and host port match */ if(needle->handler->flags & PROTOPT_SSL) { /* This is a SSL connection so verify that we're using the same SSL options as well */ if(!Curl_ssl_config_matches(&needle->ssl_config, &check->ssl_config)) { DEBUGF(infof(data, "Connection #%ld has different SSL parameters, " "can't reuse\n", check->connection_id)); continue; } if(check->ssl[FIRSTSOCKET].state != ssl_connection_complete) { foundPendingCandidate = TRUE; DEBUGF(infof(data, "Connection #%ld has not started SSL connect, " "can't reuse\n", check->connection_id)); continue; } } match = TRUE; } } else { /* The requested connection is using the same HTTP proxy in normal mode (no tunneling) */ match = TRUE; } if(match) { #if defined(USE_NTLM) /* If we are looking for an HTTP+NTLM connection, check if this is already authenticating with the right credentials. If not, keep looking so that we can reuse NTLM connections if possible. (Especially we must not reuse the same connection if partway through a handshake!) */ if(wantNTLMhttp) { if(strcmp(needle->user, check->user) || strcmp(needle->passwd, check->passwd)) continue; } else if(check->ntlm.state != NTLMSTATE_NONE) { /* Connection is using NTLM auth but we don't want NTLM */ continue; } /* Same for Proxy NTLM authentication */ if(wantProxyNTLMhttp) { /* Both check->http_proxy.user and check->http_proxy.passwd can be * NULL */ if(!check->http_proxy.user || !check->http_proxy.passwd) continue; if(strcmp(needle->http_proxy.user, check->http_proxy.user) || strcmp(needle->http_proxy.passwd, check->http_proxy.passwd)) continue; } else if(check->proxyntlm.state != NTLMSTATE_NONE) { /* Proxy connection is using NTLM auth but we don't want NTLM */ continue; } if(wantNTLMhttp || wantProxyNTLMhttp) { /* Credentials are already checked, we can use this connection */ chosen = check; if((wantNTLMhttp && (check->ntlm.state != NTLMSTATE_NONE)) || (wantProxyNTLMhttp && (check->proxyntlm.state != NTLMSTATE_NONE))) { /* We must use this connection, no other */ *force_reuse = TRUE; break; } /* Continue look up for a better connection */ continue; } #endif if(canpipe) { /* We can pipeline if we want to. Let's continue looking for the optimal connection to use, i.e the shortest pipe that is not blacklisted. */ if(pipeLen == 0) { /* We have the optimal connection. Let's stop looking. */ chosen = check; break; } /* We can't use the connection if the pipe is full */ if(max_pipe_len && (pipeLen >= max_pipe_len)) { infof(data, "Pipe is full, skip (%zu)\n", pipeLen); continue; } #ifdef USE_NGHTTP2 /* If multiplexed, make sure we don't go over concurrency limit */ if(check->bits.multiplex) { /* Multiplexed connections can only be HTTP/2 for now */ struct http_conn *httpc = &check->proto.httpc; if(pipeLen >= httpc->settings.max_concurrent_streams) { infof(data, "MAX_CONCURRENT_STREAMS reached, skip (%zu)\n", pipeLen); continue; } } #endif /* We can't use the connection if the pipe is penalized */ if(Curl_pipeline_penalized(data, check)) { infof(data, "Penalized, skip\n"); continue; } if(max_pipe_len) { if(pipeLen < best_pipe_len) { /* This connection has a shorter pipe so far. We'll pick this and continue searching */ chosen = check; best_pipe_len = pipeLen; continue; } } else { /* When not pipelining (== multiplexed), we have a match here! */ chosen = check; infof(data, "Multiplexed connection found!\n"); break; } } else { /* We have found a connection. Let's stop searching. */ chosen = check; break; } } } } if(chosen) { /* mark it as used before releasing the lock */ chosen->data = data; /* own it! */ Curl_conncache_unlock(needle); *usethis = chosen; return TRUE; /* yes, we found one to use! */ } Curl_conncache_unlock(needle); if(foundPendingCandidate && data->set.pipewait) { infof(data, "Found pending candidate for reuse and CURLOPT_PIPEWAIT is set\n"); *waitpipe = TRUE; } return FALSE; /* no matching connecting exists */ } /* after a TCP connection to the proxy has been verified, this function does the next magic step. Note: this function's sub-functions call failf() */ CURLcode Curl_connected_proxy(struct connectdata *conn, int sockindex) { CURLcode result = CURLE_OK; if(conn->bits.socksproxy) { #ifndef CURL_DISABLE_PROXY /* for the secondary socket (FTP), use the "connect to host" * but ignore the "connect to port" (use the secondary port) */ const char * const host = conn->bits.httpproxy ? conn->http_proxy.host.name : conn->bits.conn_to_host ? conn->conn_to_host.name : sockindex == SECONDARYSOCKET ? conn->secondaryhostname : conn->host.name; const int port = conn->bits.httpproxy ? (int)conn->http_proxy.port : sockindex == SECONDARYSOCKET ? conn->secondary_port : conn->bits.conn_to_port ? conn->conn_to_port : conn->remote_port; conn->bits.socksproxy_connecting = TRUE; switch(conn->socks_proxy.proxytype) { case CURLPROXY_SOCKS5: case CURLPROXY_SOCKS5_HOSTNAME: result = Curl_SOCKS5(conn->socks_proxy.user, conn->socks_proxy.passwd, host, port, sockindex, conn); break; case CURLPROXY_SOCKS4: case CURLPROXY_SOCKS4A: result = Curl_SOCKS4(conn->socks_proxy.user, host, port, sockindex, conn); break; default: failf(conn->data, "unknown proxytype option given"); result = CURLE_COULDNT_CONNECT; } /* switch proxytype */ conn->bits.socksproxy_connecting = FALSE; #else (void)sockindex; #endif /* CURL_DISABLE_PROXY */ } return result; } /* * verboseconnect() displays verbose information after a connect */ #ifndef CURL_DISABLE_VERBOSE_STRINGS void Curl_verboseconnect(struct connectdata *conn) { if(conn->data->set.verbose) infof(conn->data, "Connected to %s (%s) port %ld (#%ld)\n", conn->bits.socksproxy ? conn->socks_proxy.host.dispname : conn->bits.httpproxy ? conn->http_proxy.host.dispname : conn->bits.conn_to_host ? conn->conn_to_host.dispname : conn->host.dispname, conn->ip_addr_str, conn->port, conn->connection_id); } #endif int Curl_protocol_getsock(struct connectdata *conn, curl_socket_t *socks, int numsocks) { if(conn->handler->proto_getsock) return conn->handler->proto_getsock(conn, socks, numsocks); /* Backup getsock logic. Since there is a live socket in use, we must wait for it or it will be removed from watching when the multi_socket API is used. */ socks[0] = conn->sock[FIRSTSOCKET]; return GETSOCK_READSOCK(0) | GETSOCK_WRITESOCK(0); } int Curl_doing_getsock(struct connectdata *conn, curl_socket_t *socks, int numsocks) { if(conn && conn->handler->doing_getsock) return conn->handler->doing_getsock(conn, socks, numsocks); return GETSOCK_BLANK; } /* * We are doing protocol-specific connecting and this is being called over and * over from the multi interface until the connection phase is done on * protocol layer. */ CURLcode Curl_protocol_connecting(struct connectdata *conn, bool *done) { CURLcode result = CURLE_OK; if(conn && conn->handler->connecting) { *done = FALSE; result = conn->handler->connecting(conn, done); } else *done = TRUE; return result; } /* * We are DOING this is being called over and over from the multi interface * until the DOING phase is done on protocol layer. */ CURLcode Curl_protocol_doing(struct connectdata *conn, bool *done) { CURLcode result = CURLE_OK; if(conn && conn->handler->doing) { *done = FALSE; result = conn->handler->doing(conn, done); } else *done = TRUE; return result; } /* * We have discovered that the TCP connection has been successful, we can now * proceed with some action. * */ CURLcode Curl_protocol_connect(struct connectdata *conn, bool *protocol_done) { CURLcode result = CURLE_OK; *protocol_done = FALSE; if(conn->bits.tcpconnect[FIRSTSOCKET] && conn->bits.protoconnstart) { /* We already are connected, get back. This may happen when the connect worked fine in the first call, like when we connect to a local server or proxy. Note that we don't know if the protocol is actually done. Unless this protocol doesn't have any protocol-connect callback, as then we know we're done. */ if(!conn->handler->connecting) *protocol_done = TRUE; return CURLE_OK; } if(!conn->bits.protoconnstart) { result = Curl_proxy_connect(conn, FIRSTSOCKET); if(result) return result; if(CONNECT_FIRSTSOCKET_PROXY_SSL()) /* wait for HTTPS proxy SSL initialization to complete */ return CURLE_OK; if(conn->bits.tunnel_proxy && conn->bits.httpproxy && Curl_connect_ongoing(conn)) /* when using an HTTP tunnel proxy, await complete tunnel establishment before proceeding further. Return CURLE_OK so we'll be called again */ return CURLE_OK; if(conn->handler->connect_it) { /* is there a protocol-specific connect() procedure? */ /* Call the protocol-specific connect function */ result = conn->handler->connect_it(conn, protocol_done); } else *protocol_done = TRUE; /* it has started, possibly even completed but that knowledge isn't stored in this bit! */ if(!result) conn->bits.protoconnstart = TRUE; } return result; /* pass back status */ } /* * Helpers for IDNA conversions. */ static bool is_ASCII_name(const char *hostname) { const unsigned char *ch = (const unsigned char *)hostname; while(*ch) { if(*ch++ & 0x80) return FALSE; } return TRUE; } /* * Perform any necessary IDN conversion of hostname */ static CURLcode fix_hostname(struct connectdata *conn, struct hostname *host) { size_t len; struct Curl_easy *data = conn->data; #ifndef USE_LIBIDN2 (void)data; (void)conn; #elif defined(CURL_DISABLE_VERBOSE_STRINGS) (void)conn; #endif /* set the name we use to display the host name */ host->dispname = host->name; len = strlen(host->name); if(len && (host->name[len-1] == '.')) /* strip off a single trailing dot if present, primarily for SNI but there's no use for it */ host->name[len-1] = 0; /* Check name for non-ASCII and convert hostname to ACE form if we can */ if(!is_ASCII_name(host->name)) { #ifdef USE_LIBIDN2 if(idn2_check_version(IDN2_VERSION)) { char *ace_hostname = NULL; #if IDN2_VERSION_NUMBER >= 0x00140000 /* IDN2_NFC_INPUT: Normalize input string using normalization form C. IDN2_NONTRANSITIONAL: Perform Unicode TR46 non-transitional processing. */ int flags = IDN2_NFC_INPUT | IDN2_NONTRANSITIONAL; #else int flags = IDN2_NFC_INPUT; #endif int rc = idn2_lookup_ul((const char *)host->name, &ace_hostname, flags); if(rc == IDN2_OK) { host->encalloc = (char *)ace_hostname; /* change the name pointer to point to the encoded hostname */ host->name = host->encalloc; } else { failf(data, "Failed to convert %s to ACE; %s\n", host->name, idn2_strerror(rc)); return CURLE_URL_MALFORMAT; } } #elif defined(USE_WIN32_IDN) char *ace_hostname = NULL; if(curl_win32_idn_to_ascii(host->name, &ace_hostname)) { host->encalloc = ace_hostname; /* change the name pointer to point to the encoded hostname */ host->name = host->encalloc; } else { failf(data, "Failed to convert %s to ACE;\n", host->name); return CURLE_URL_MALFORMAT; } #else infof(data, "IDN support not present, can't parse Unicode domains\n"); #endif } { char *hostp; for(hostp = host->name; *hostp; hostp++) { if(*hostp <= 32) { failf(data, "Host name '%s' contains bad letter", host->name); return CURLE_URL_MALFORMAT; } } } return CURLE_OK; } /* * Frees data allocated by fix_hostname() */ static void free_fixed_hostname(struct hostname *host) { #if defined(USE_LIBIDN2) if(host->encalloc) { idn2_free(host->encalloc); /* must be freed with idn2_free() since this was allocated by libidn */ host->encalloc = NULL; } #elif defined(USE_WIN32_IDN) free(host->encalloc); /* must be freed with free() since this was allocated by curl_win32_idn_to_ascii */ host->encalloc = NULL; #else (void)host; #endif } static void llist_dtor(void *user, void *element) { (void)user; (void)element; /* Do nothing */ } /* * Allocate and initialize a new connectdata object. */ static struct connectdata *allocate_conn(struct Curl_easy *data) { struct connectdata *conn = calloc(1, sizeof(struct connectdata)); if(!conn) return NULL; #ifdef USE_SSL /* The SSL backend-specific data (ssl_backend_data) objects are allocated as a separate array to ensure suitable alignment. Note that these backend pointers can be swapped by vtls (eg ssl backend data becomes proxy backend data). */ { size_t sslsize = Curl_ssl->sizeof_ssl_backend_data; char *ssl = calloc(4, sslsize); if(!ssl) { free(conn); return NULL; } conn->ssl_extra = ssl; conn->ssl[0].backend = (void *)ssl; conn->ssl[1].backend = (void *)(ssl + sslsize); conn->proxy_ssl[0].backend = (void *)(ssl + 2 * sslsize); conn->proxy_ssl[1].backend = (void *)(ssl + 3 * sslsize); } #endif conn->handler = &Curl_handler_dummy; /* Be sure we have a handler defined already from start to avoid NULL situations and checks */ /* and we setup a few fields in case we end up actually using this struct */ conn->sock[FIRSTSOCKET] = CURL_SOCKET_BAD; /* no file descriptor */ conn->sock[SECONDARYSOCKET] = CURL_SOCKET_BAD; /* no file descriptor */ conn->tempsock[0] = CURL_SOCKET_BAD; /* no file descriptor */ conn->tempsock[1] = CURL_SOCKET_BAD; /* no file descriptor */ conn->connection_id = -1; /* no ID */ conn->port = -1; /* unknown at this point */ conn->remote_port = -1; /* unknown at this point */ #if defined(USE_RECV_BEFORE_SEND_WORKAROUND) && defined(DEBUGBUILD) conn->postponed[0].bindsock = CURL_SOCKET_BAD; /* no file descriptor */ conn->postponed[1].bindsock = CURL_SOCKET_BAD; /* no file descriptor */ #endif /* USE_RECV_BEFORE_SEND_WORKAROUND && DEBUGBUILD */ /* Default protocol-independent behavior doesn't support persistent connections, so we set this to force-close. Protocols that support this need to set this to FALSE in their "curl_do" functions. */ connclose(conn, "Default to force-close"); /* Store creation time to help future close decision making */ conn->created = Curl_now(); /* Store current time to give a baseline to keepalive connection times. */ conn->keepalive = Curl_now(); /* Store off the configured connection upkeep time. */ conn->upkeep_interval_ms = data->set.upkeep_interval_ms; conn->data = data; /* Setup the association between this connection and the Curl_easy */ conn->http_proxy.proxytype = data->set.proxytype; conn->socks_proxy.proxytype = CURLPROXY_SOCKS4; #ifdef CURL_DISABLE_PROXY conn->bits.proxy = FALSE; conn->bits.httpproxy = FALSE; conn->bits.socksproxy = FALSE; conn->bits.proxy_user_passwd = FALSE; conn->bits.tunnel_proxy = FALSE; #else /* CURL_DISABLE_PROXY */ /* note that these two proxy bits are now just on what looks to be requested, they may be altered down the road */ conn->bits.proxy = (data->set.str[STRING_PROXY] && *data->set.str[STRING_PROXY]) ? TRUE : FALSE; conn->bits.httpproxy = (conn->bits.proxy && (conn->http_proxy.proxytype == CURLPROXY_HTTP || conn->http_proxy.proxytype == CURLPROXY_HTTP_1_0 || conn->http_proxy.proxytype == CURLPROXY_HTTPS)) ? TRUE : FALSE; conn->bits.socksproxy = (conn->bits.proxy && !conn->bits.httpproxy) ? TRUE : FALSE; if(data->set.str[STRING_PRE_PROXY] && *data->set.str[STRING_PRE_PROXY]) { conn->bits.proxy = TRUE; conn->bits.socksproxy = TRUE; } conn->bits.proxy_user_passwd = (data->set.str[STRING_PROXYUSERNAME]) ? TRUE : FALSE; conn->bits.tunnel_proxy = data->set.tunnel_thru_httpproxy; #endif /* CURL_DISABLE_PROXY */ conn->bits.user_passwd = (data->set.str[STRING_USERNAME]) ? TRUE : FALSE; conn->bits.ftp_use_epsv = data->set.ftp_use_epsv; conn->bits.ftp_use_eprt = data->set.ftp_use_eprt; conn->ssl_config.verifystatus = data->set.ssl.primary.verifystatus; conn->ssl_config.verifypeer = data->set.ssl.primary.verifypeer; conn->ssl_config.verifyhost = data->set.ssl.primary.verifyhost; conn->proxy_ssl_config.verifystatus = data->set.proxy_ssl.primary.verifystatus; conn->proxy_ssl_config.verifypeer = data->set.proxy_ssl.primary.verifypeer; conn->proxy_ssl_config.verifyhost = data->set.proxy_ssl.primary.verifyhost; conn->ip_version = data->set.ipver; #if !defined(CURL_DISABLE_HTTP) && defined(USE_NTLM) && \ defined(NTLM_WB_ENABLED) conn->ntlm_auth_hlpr_socket = CURL_SOCKET_BAD; conn->ntlm_auth_hlpr_pid = 0; conn->challenge_header = NULL; conn->response_header = NULL; #endif if(Curl_pipeline_wanted(data->multi, CURLPIPE_HTTP1) && !conn->master_buffer) { /* Allocate master_buffer to be used for HTTP/1 pipelining */ conn->master_buffer = calloc(MASTERBUF_SIZE, sizeof(char)); if(!conn->master_buffer) goto error; } /* Initialize the pipeline lists */ Curl_llist_init(&conn->send_pipe, (curl_llist_dtor) llist_dtor); Curl_llist_init(&conn->recv_pipe, (curl_llist_dtor) llist_dtor); #ifdef HAVE_GSSAPI conn->data_prot = PROT_CLEAR; #endif /* Store the local bind parameters that will be used for this connection */ if(data->set.str[STRING_DEVICE]) { conn->localdev = strdup(data->set.str[STRING_DEVICE]); if(!conn->localdev) goto error; } conn->localportrange = data->set.localportrange; conn->localport = data->set.localport; /* the close socket stuff needs to be copied to the connection struct as it may live on without (this specific) Curl_easy */ conn->fclosesocket = data->set.fclosesocket; conn->closesocket_client = data->set.closesocket_client; return conn; error: Curl_llist_destroy(&conn->send_pipe, NULL); Curl_llist_destroy(&conn->recv_pipe, NULL); free(conn->master_buffer); free(conn->localdev); #ifdef USE_SSL free(conn->ssl_extra); #endif free(conn); return NULL; } /* returns the handler if the given scheme is built-in */ const struct Curl_handler *Curl_builtin_scheme(const char *scheme) { const struct Curl_handler * const *pp; const struct Curl_handler *p; /* Scan protocol handler table and match against 'scheme'. The handler may be changed later when the protocol specific setup function is called. */ for(pp = protocols; (p = *pp) != NULL; pp++) if(strcasecompare(p->scheme, scheme)) /* Protocol found in table. Check if allowed */ return p; return NULL; /* not found */ } static CURLcode findprotocol(struct Curl_easy *data, struct connectdata *conn, const char *protostr) { const struct Curl_handler *p = Curl_builtin_scheme(protostr); if(p && /* Protocol found in table. Check if allowed */ (data->set.allowed_protocols & p->protocol)) { /* it is allowed for "normal" request, now do an extra check if this is the result of a redirect */ if(data->state.this_is_a_follow && !(data->set.redir_protocols & p->protocol)) /* nope, get out */ ; else { /* Perform setup complement if some. */ conn->handler = conn->given = p; /* 'port' and 'remote_port' are set in setup_connection_internals() */ return CURLE_OK; } } /* The protocol was not found in the table, but we don't have to assign it to anything since it is already assigned to a dummy-struct in the create_conn() function when the connectdata struct is allocated. */ failf(data, "Protocol \"%s\" not supported or disabled in " LIBCURL_NAME, protostr); return CURLE_UNSUPPORTED_PROTOCOL; } CURLcode Curl_uc_to_curlcode(CURLUcode uc) { switch(uc) { default: return CURLE_URL_MALFORMAT; case CURLUE_UNSUPPORTED_SCHEME: return CURLE_UNSUPPORTED_PROTOCOL; case CURLUE_OUT_OF_MEMORY: return CURLE_OUT_OF_MEMORY; case CURLUE_USER_NOT_ALLOWED: return CURLE_LOGIN_DENIED; } } /* * Parse URL and fill in the relevant members of the connection struct. */ static CURLcode parseurlandfillconn(struct Curl_easy *data, struct connectdata *conn) { CURLcode result; CURLU *uh; CURLUcode uc; char *hostname; Curl_up_free(data); /* cleanup previous leftovers first */ /* parse the URL */ uh = data->state.uh = curl_url(); if(!uh) return CURLE_OUT_OF_MEMORY; if(data->set.str[STRING_DEFAULT_PROTOCOL] && !Curl_is_absolute_url(data->change.url, NULL, MAX_SCHEME_LEN)) { char *url; if(data->change.url_alloc) free(data->change.url); url = aprintf("%s://%s", data->set.str[STRING_DEFAULT_PROTOCOL], data->change.url); if(!url) return CURLE_OUT_OF_MEMORY; data->change.url = url; data->change.url_alloc = TRUE; } uc = curl_url_set(uh, CURLUPART_URL, data->change.url, CURLU_GUESS_SCHEME | CURLU_NON_SUPPORT_SCHEME | (data->set.disallow_username_in_url ? CURLU_DISALLOW_USER : 0) | (data->set.path_as_is ? CURLU_PATH_AS_IS : 0)); if(uc) return Curl_uc_to_curlcode(uc); uc = curl_url_get(uh, CURLUPART_SCHEME, &data->state.up.scheme, 0); if(uc) return Curl_uc_to_curlcode(uc); result = findprotocol(data, conn, data->state.up.scheme); if(result) return result; uc = curl_url_get(uh, CURLUPART_USER, &data->state.up.user, CURLU_URLDECODE); if(!uc) { conn->user = strdup(data->state.up.user); if(!conn->user) return CURLE_OUT_OF_MEMORY; conn->bits.user_passwd = TRUE; } else if(uc != CURLUE_NO_USER) return Curl_uc_to_curlcode(uc); uc = curl_url_get(uh, CURLUPART_PASSWORD, &data->state.up.password, CURLU_URLDECODE); if(!uc) { conn->passwd = strdup(data->state.up.password); if(!conn->passwd) return CURLE_OUT_OF_MEMORY; conn->bits.user_passwd = TRUE; } else if(uc != CURLUE_NO_PASSWORD) return Curl_uc_to_curlcode(uc); uc = curl_url_get(uh, CURLUPART_OPTIONS, &data->state.up.options, CURLU_URLDECODE); if(!uc) { conn->options = strdup(data->state.up.options); if(!conn->options) return CURLE_OUT_OF_MEMORY; } else if(uc != CURLUE_NO_OPTIONS) return Curl_uc_to_curlcode(uc); uc = curl_url_get(uh, CURLUPART_HOST, &data->state.up.hostname, 0); if(uc) { if(!strcasecompare("file", data->state.up.scheme)) return CURLE_OUT_OF_MEMORY; } uc = curl_url_get(uh, CURLUPART_PATH, &data->state.up.path, 0); if(uc) return Curl_uc_to_curlcode(uc); uc = curl_url_get(uh, CURLUPART_PORT, &data->state.up.port, CURLU_DEFAULT_PORT); if(uc) { if(!strcasecompare("file", data->state.up.scheme)) return CURLE_OUT_OF_MEMORY; } else { unsigned long port = strtoul(data->state.up.port, NULL, 10); conn->remote_port = curlx_ultous(port); } (void)curl_url_get(uh, CURLUPART_QUERY, &data->state.up.query, 0); hostname = data->state.up.hostname; if(!hostname) /* this is for file:// transfers, get a dummy made */ hostname = (char *)""; if(hostname[0] == '[') { /* This looks like an IPv6 address literal. See if there is an address scope. */ char *percent = strchr(++hostname, '%'); conn->bits.ipv6_ip = TRUE; if(percent) { unsigned int identifier_offset = 3; char *endp; unsigned long scope; if(strncmp("%25", percent, 3) != 0) { infof(data, "Please URL encode %% as %%25, see RFC 6874.\n"); identifier_offset = 1; } scope = strtoul(percent + identifier_offset, &endp, 10); if(*endp == ']') { /* The address scope was well formed. Knock it out of the hostname. */ memmove(percent, endp, strlen(endp) + 1); conn->scope_id = (unsigned int)scope; } else { /* Zone identifier is not numeric */ #if defined(HAVE_NET_IF_H) && defined(IFNAMSIZ) && defined(HAVE_IF_NAMETOINDEX) char ifname[IFNAMSIZ + 2]; char *square_bracket; unsigned int scopeidx = 0; strncpy(ifname, percent + identifier_offset, IFNAMSIZ + 2); /* Ensure nullbyte termination */ ifname[IFNAMSIZ + 1] = '\0'; square_bracket = strchr(ifname, ']'); if(square_bracket) { /* Remove ']' */ *square_bracket = '\0'; scopeidx = if_nametoindex(ifname); if(scopeidx == 0) { infof(data, "Invalid network interface: %s; %s\n", ifname, strerror(errno)); } } if(scopeidx > 0) { char *p = percent + identifier_offset + strlen(ifname); /* Remove zone identifier from hostname */ memmove(percent, p, strlen(p) + 1); conn->scope_id = scopeidx; } else #endif /* HAVE_NET_IF_H && IFNAMSIZ */ infof(data, "Invalid IPv6 address format\n"); } } percent = strchr(hostname, ']'); if(percent) /* terminate IPv6 numerical at end bracket */ *percent = 0; } /* make sure the connect struct gets its own copy of the host name */ conn->host.rawalloc = strdup(hostname); if(!conn->host.rawalloc) return CURLE_OUT_OF_MEMORY; conn->host.name = conn->host.rawalloc; if(data->set.scope_id) /* Override any scope that was set above. */ conn->scope_id = data->set.scope_id; return CURLE_OK; } /* * If we're doing a resumed transfer, we need to setup our stuff * properly. */ static CURLcode setup_range(struct Curl_easy *data) { struct UrlState *s = &data->state; s->resume_from = data->set.set_resume_from; if(s->resume_from || data->set.str[STRING_SET_RANGE]) { if(s->rangestringalloc) free(s->range); if(s->resume_from) s->range = aprintf("%" CURL_FORMAT_CURL_OFF_T "-", s->resume_from); else s->range = strdup(data->set.str[STRING_SET_RANGE]); s->rangestringalloc = (s->range) ? TRUE : FALSE; if(!s->range) return CURLE_OUT_OF_MEMORY; /* tell ourselves to fetch this range */ s->use_range = TRUE; /* enable range download */ } else s->use_range = FALSE; /* disable range download */ return CURLE_OK; } /* * setup_connection_internals() - * * Setup connection internals specific to the requested protocol in the * Curl_easy. This is inited and setup before the connection is made but * is about the particular protocol that is to be used. * * This MUST get called after proxy magic has been figured out. */ static CURLcode setup_connection_internals(struct connectdata *conn) { const struct Curl_handler * p; CURLcode result; conn->socktype = SOCK_STREAM; /* most of them are TCP streams */ /* Perform setup complement if some. */ p = conn->handler; if(p->setup_connection) { result = (*p->setup_connection)(conn); if(result) return result; p = conn->handler; /* May have changed. */ } if(conn->port < 0) /* we check for -1 here since if proxy was detected already, this was very likely already set to the proxy port */ conn->port = p->defport; return CURLE_OK; } /* * Curl_free_request_state() should free temp data that was allocated in the * Curl_easy for this single request. */ void Curl_free_request_state(struct Curl_easy *data) { Curl_safefree(data->req.protop); Curl_safefree(data->req.newurl); } #ifndef CURL_DISABLE_PROXY /**************************************************************** * Checks if the host is in the noproxy list. returns true if it matches * and therefore the proxy should NOT be used. ****************************************************************/ static bool check_noproxy(const char *name, const char *no_proxy) { /* no_proxy=domain1.dom,host.domain2.dom * (a comma-separated list of hosts which should * not be proxied, or an asterisk to override * all proxy variables) */ if(no_proxy && no_proxy[0]) { size_t tok_start; size_t tok_end; const char *separator = ", "; size_t no_proxy_len; size_t namelen; char *endptr; if(strcasecompare("*", no_proxy)) { return TRUE; } /* NO_PROXY was specified and it wasn't just an asterisk */ no_proxy_len = strlen(no_proxy); if(name[0] == '[') { /* IPv6 numerical address */ endptr = strchr(name, ']'); if(!endptr) return FALSE; name++; namelen = endptr - name; } else namelen = strlen(name); for(tok_start = 0; tok_start < no_proxy_len; tok_start = tok_end + 1) { while(tok_start < no_proxy_len && strchr(separator, no_proxy[tok_start]) != NULL) { /* Look for the beginning of the token. */ ++tok_start; } if(tok_start == no_proxy_len) break; /* It was all trailing separator chars, no more tokens. */ for(tok_end = tok_start; tok_end < no_proxy_len && strchr(separator, no_proxy[tok_end]) == NULL; ++tok_end) /* Look for the end of the token. */ ; /* To match previous behaviour, where it was necessary to specify * ".local.com" to prevent matching "notlocal.com", we will leave * the '.' off. */ if(no_proxy[tok_start] == '.') ++tok_start; if((tok_end - tok_start) <= namelen) { /* Match the last part of the name to the domain we are checking. */ const char *checkn = name + namelen - (tok_end - tok_start); if(strncasecompare(no_proxy + tok_start, checkn, tok_end - tok_start)) { if((tok_end - tok_start) == namelen || *(checkn - 1) == '.') { /* We either have an exact match, or the previous character is a . * so it is within the same domain, so no proxy for this host. */ return TRUE; } } } /* if((tok_end - tok_start) <= namelen) */ } /* for(tok_start = 0; tok_start < no_proxy_len; tok_start = tok_end + 1) */ } /* NO_PROXY was specified and it wasn't just an asterisk */ return FALSE; } #ifndef CURL_DISABLE_HTTP /**************************************************************** * Detect what (if any) proxy to use. Remember that this selects a host * name and is not limited to HTTP proxies only. * The returned pointer must be freed by the caller (unless NULL) ****************************************************************/ static char *detect_proxy(struct connectdata *conn) { char *proxy = NULL; /* If proxy was not specified, we check for default proxy environment * variables, to enable i.e Lynx compliance: * * http_proxy=http://some.server.dom:port/ * https_proxy=http://some.server.dom:port/ * ftp_proxy=http://some.server.dom:port/ * no_proxy=domain1.dom,host.domain2.dom * (a comma-separated list of hosts which should * not be proxied, or an asterisk to override * all proxy variables) * all_proxy=http://some.server.dom:port/ * (seems to exist for the CERN www lib. Probably * the first to check for.) * * For compatibility, the all-uppercase versions of these variables are * checked if the lowercase versions don't exist. */ char proxy_env[128]; const char *protop = conn->handler->scheme; char *envp = proxy_env; char *prox; /* Now, build <protocol>_proxy and check for such a one to use */ while(*protop) *envp++ = (char)tolower((int)*protop++); /* append _proxy */ strcpy(envp, "_proxy"); /* read the protocol proxy: */ prox = curl_getenv(proxy_env); /* * We don't try the uppercase version of HTTP_PROXY because of * security reasons: * * When curl is used in a webserver application * environment (cgi or php), this environment variable can * be controlled by the web server user by setting the * http header 'Proxy:' to some value. * * This can cause 'internal' http/ftp requests to be * arbitrarily redirected by any external attacker. */ if(!prox && !strcasecompare("http_proxy", proxy_env)) { /* There was no lowercase variable, try the uppercase version: */ Curl_strntoupper(proxy_env, proxy_env, sizeof(proxy_env)); prox = curl_getenv(proxy_env); } envp = proxy_env; if(prox) { proxy = prox; /* use this */ } else { envp = (char *)"all_proxy"; proxy = curl_getenv(envp); /* default proxy to use */ if(!proxy) { envp = (char *)"ALL_PROXY"; proxy = curl_getenv(envp); } } if(proxy) infof(conn->data, "Uses proxy env variable %s == '%s'\n", envp, proxy); return proxy; } #endif /* CURL_DISABLE_HTTP */ /* * If this is supposed to use a proxy, we need to figure out the proxy * host name, so that we can re-use an existing connection * that may exist registered to the same proxy host. */ static CURLcode parse_proxy(struct Curl_easy *data, struct connectdata *conn, char *proxy, curl_proxytype proxytype) { char *prox_portno; char *endofprot; /* We use 'proxyptr' to point to the proxy name from now on... */ char *proxyptr; char *portptr; char *atsign; long port = -1; char *proxyuser = NULL; char *proxypasswd = NULL; bool sockstype; /* We do the proxy host string parsing here. We want the host name and the * port name. Accept a protocol:// prefix */ /* Parse the protocol part if present */ endofprot = strstr(proxy, "://"); if(endofprot) { proxyptr = endofprot + 3; if(checkprefix("https", proxy)) proxytype = CURLPROXY_HTTPS; else if(checkprefix("socks5h", proxy)) proxytype = CURLPROXY_SOCKS5_HOSTNAME; else if(checkprefix("socks5", proxy)) proxytype = CURLPROXY_SOCKS5; else if(checkprefix("socks4a", proxy)) proxytype = CURLPROXY_SOCKS4A; else if(checkprefix("socks4", proxy) || checkprefix("socks", proxy)) proxytype = CURLPROXY_SOCKS4; else if(checkprefix("http:", proxy)) ; /* leave it as HTTP or HTTP/1.0 */ else { /* Any other xxx:// reject! */ failf(data, "Unsupported proxy scheme for \'%s\'", proxy); return CURLE_COULDNT_CONNECT; } } else proxyptr = proxy; /* No xxx:// head: It's a HTTP proxy */ #ifdef USE_SSL if(!(Curl_ssl->supports & SSLSUPP_HTTPS_PROXY)) #endif if(proxytype == CURLPROXY_HTTPS) { failf(data, "Unsupported proxy \'%s\', libcurl is built without the " "HTTPS-proxy support.", proxy); return CURLE_NOT_BUILT_IN; } sockstype = proxytype == CURLPROXY_SOCKS5_HOSTNAME || proxytype == CURLPROXY_SOCKS5 || proxytype == CURLPROXY_SOCKS4A || proxytype == CURLPROXY_SOCKS4; /* Is there a username and password given in this proxy url? */ atsign = strchr(proxyptr, '@'); if(atsign) { CURLcode result = Curl_parse_login_details(proxyptr, atsign - proxyptr, &proxyuser, &proxypasswd, NULL); if(result) return result; proxyptr = atsign + 1; } /* start scanning for port number at this point */ portptr = proxyptr; /* detect and extract RFC6874-style IPv6-addresses */ if(*proxyptr == '[') { char *ptr = ++proxyptr; /* advance beyond the initial bracket */ while(*ptr && (ISXDIGIT(*ptr) || (*ptr == ':') || (*ptr == '.'))) ptr++; if(*ptr == '%') { /* There might be a zone identifier */ if(strncmp("%25", ptr, 3)) infof(data, "Please URL encode %% as %%25, see RFC 6874.\n"); ptr++; /* Allow unreserved characters as defined in RFC 3986 */ while(*ptr && (ISALPHA(*ptr) || ISXDIGIT(*ptr) || (*ptr == '-') || (*ptr == '.') || (*ptr == '_') || (*ptr == '~'))) ptr++; } if(*ptr == ']') /* yeps, it ended nicely with a bracket as well */ *ptr++ = 0; else infof(data, "Invalid IPv6 address format\n"); portptr = ptr; /* Note that if this didn't end with a bracket, we still advanced the * proxyptr first, but I can't see anything wrong with that as no host * name nor a numeric can legally start with a bracket. */ } /* Get port number off proxy.server.com:1080 */ prox_portno = strchr(portptr, ':'); if(prox_portno) { char *endp = NULL; *prox_portno = 0x0; /* cut off number from host name */ prox_portno ++; /* now set the local port number */ port = strtol(prox_portno, &endp, 10); if((endp && *endp && (*endp != '/') && (*endp != ' ')) || (port < 0) || (port > 65535)) { /* meant to detect for example invalid IPv6 numerical addresses without brackets: "2a00:fac0:a000::7:13". Accept a trailing slash only because we then allow "URL style" with the number followed by a slash, used in curl test cases already. Space is also an acceptable terminating symbol. */ infof(data, "No valid port number in proxy string (%s)\n", prox_portno); } else conn->port = port; } else { if(proxyptr[0]=='/') { /* If the first character in the proxy string is a slash, fail immediately. The following code will otherwise clear the string which will lead to code running as if no proxy was set! */ Curl_safefree(proxyuser); Curl_safefree(proxypasswd); return CURLE_COULDNT_RESOLVE_PROXY; } /* without a port number after the host name, some people seem to use a slash so we strip everything from the first slash */ atsign = strchr(proxyptr, '/'); if(atsign) *atsign = '\0'; /* cut off path part from host name */ if(data->set.proxyport) /* None given in the proxy string, then get the default one if it is given */ port = data->set.proxyport; else { if(proxytype == CURLPROXY_HTTPS) port = CURL_DEFAULT_HTTPS_PROXY_PORT; else port = CURL_DEFAULT_PROXY_PORT; } } if(*proxyptr) { struct proxy_info *proxyinfo = sockstype ? &conn->socks_proxy : &conn->http_proxy; proxyinfo->proxytype = proxytype; if(proxyuser) { /* found user and password, rip them out. note that we are unescaping them, as there is otherwise no way to have a username or password with reserved characters like ':' in them. */ Curl_safefree(proxyinfo->user); proxyinfo->user = curl_easy_unescape(data, proxyuser, 0, NULL); Curl_safefree(proxyuser); if(!proxyinfo->user) { Curl_safefree(proxypasswd); return CURLE_OUT_OF_MEMORY; } Curl_safefree(proxyinfo->passwd); if(proxypasswd && strlen(proxypasswd) < MAX_CURL_PASSWORD_LENGTH) proxyinfo->passwd = curl_easy_unescape(data, proxypasswd, 0, NULL); else proxyinfo->passwd = strdup(""); Curl_safefree(proxypasswd); if(!proxyinfo->passwd) return CURLE_OUT_OF_MEMORY; conn->bits.proxy_user_passwd = TRUE; /* enable it */ } if(port >= 0) { proxyinfo->port = port; if(conn->port < 0 || sockstype || !conn->socks_proxy.host.rawalloc) conn->port = port; } /* now, clone the cleaned proxy host name */ Curl_safefree(proxyinfo->host.rawalloc); proxyinfo->host.rawalloc = strdup(proxyptr); proxyinfo->host.name = proxyinfo->host.rawalloc; if(!proxyinfo->host.rawalloc) return CURLE_OUT_OF_MEMORY; } Curl_safefree(proxyuser); Curl_safefree(proxypasswd); return CURLE_OK; } /* * Extract the user and password from the authentication string */ static CURLcode parse_proxy_auth(struct Curl_easy *data, struct connectdata *conn) { char proxyuser[MAX_CURL_USER_LENGTH]=""; char proxypasswd[MAX_CURL_PASSWORD_LENGTH]=""; CURLcode result; if(data->set.str[STRING_PROXYUSERNAME] != NULL) { strncpy(proxyuser, data->set.str[STRING_PROXYUSERNAME], MAX_CURL_USER_LENGTH); proxyuser[MAX_CURL_USER_LENGTH-1] = '\0'; /*To be on safe side*/ } if(data->set.str[STRING_PROXYPASSWORD] != NULL) { strncpy(proxypasswd, data->set.str[STRING_PROXYPASSWORD], MAX_CURL_PASSWORD_LENGTH); proxypasswd[MAX_CURL_PASSWORD_LENGTH-1] = '\0'; /*To be on safe side*/ } result = Curl_urldecode(data, proxyuser, 0, &conn->http_proxy.user, NULL, FALSE); if(!result) result = Curl_urldecode(data, proxypasswd, 0, &conn->http_proxy.passwd, NULL, FALSE); return result; } /* create_conn helper to parse and init proxy values. to be called after unix socket init but before any proxy vars are evaluated. */ static CURLcode create_conn_helper_init_proxy(struct connectdata *conn) { char *proxy = NULL; char *socksproxy = NULL; char *no_proxy = NULL; CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; /************************************************************* * Extract the user and password from the authentication string *************************************************************/ if(conn->bits.proxy_user_passwd) { result = parse_proxy_auth(data, conn); if(result) goto out; } /************************************************************* * Detect what (if any) proxy to use *************************************************************/ if(data->set.str[STRING_PROXY]) { proxy = strdup(data->set.str[STRING_PROXY]); /* if global proxy is set, this is it */ if(NULL == proxy) { failf(data, "memory shortage"); result = CURLE_OUT_OF_MEMORY; goto out; } } if(data->set.str[STRING_PRE_PROXY]) { socksproxy = strdup(data->set.str[STRING_PRE_PROXY]); /* if global socks proxy is set, this is it */ if(NULL == socksproxy) { failf(data, "memory shortage"); result = CURLE_OUT_OF_MEMORY; goto out; } } if(!data->set.str[STRING_NOPROXY]) { const char *p = "no_proxy"; no_proxy = curl_getenv(p); if(!no_proxy) { p = "NO_PROXY"; no_proxy = curl_getenv(p); } if(no_proxy) { infof(conn->data, "Uses proxy env variable %s == '%s'\n", p, no_proxy); } } if(check_noproxy(conn->host.name, data->set.str[STRING_NOPROXY] ? data->set.str[STRING_NOPROXY] : no_proxy)) { Curl_safefree(proxy); Curl_safefree(socksproxy); } #ifndef CURL_DISABLE_HTTP else if(!proxy && !socksproxy) /* if the host is not in the noproxy list, detect proxy. */ proxy = detect_proxy(conn); #endif /* CURL_DISABLE_HTTP */ Curl_safefree(no_proxy); #ifdef USE_UNIX_SOCKETS /* For the time being do not mix proxy and unix domain sockets. See #1274 */ if(proxy && conn->unix_domain_socket) { free(proxy); proxy = NULL; } #endif if(proxy && (!*proxy || (conn->handler->flags & PROTOPT_NONETWORK))) { free(proxy); /* Don't bother with an empty proxy string or if the protocol doesn't work with network */ proxy = NULL; } if(socksproxy && (!*socksproxy || (conn->handler->flags & PROTOPT_NONETWORK))) { free(socksproxy); /* Don't bother with an empty socks proxy string or if the protocol doesn't work with network */ socksproxy = NULL; } /*********************************************************************** * If this is supposed to use a proxy, we need to figure out the proxy host * name, proxy type and port number, so that we can re-use an existing * connection that may exist registered to the same proxy host. ***********************************************************************/ if(proxy || socksproxy) { if(proxy) { result = parse_proxy(data, conn, proxy, conn->http_proxy.proxytype); Curl_safefree(proxy); /* parse_proxy copies the proxy string */ if(result) goto out; } if(socksproxy) { result = parse_proxy(data, conn, socksproxy, conn->socks_proxy.proxytype); /* parse_proxy copies the socks proxy string */ Curl_safefree(socksproxy); if(result) goto out; } if(conn->http_proxy.host.rawalloc) { #ifdef CURL_DISABLE_HTTP /* asking for a HTTP proxy is a bit funny when HTTP is disabled... */ result = CURLE_UNSUPPORTED_PROTOCOL; goto out; #else /* force this connection's protocol to become HTTP if compatible */ if(!(conn->handler->protocol & PROTO_FAMILY_HTTP)) { if((conn->handler->flags & PROTOPT_PROXY_AS_HTTP) && !conn->bits.tunnel_proxy) conn->handler = &Curl_handler_http; else /* if not converting to HTTP over the proxy, enforce tunneling */ conn->bits.tunnel_proxy = TRUE; } conn->bits.httpproxy = TRUE; #endif } else { conn->bits.httpproxy = FALSE; /* not a HTTP proxy */ conn->bits.tunnel_proxy = FALSE; /* no tunneling if not HTTP */ } if(conn->socks_proxy.host.rawalloc) { if(!conn->http_proxy.host.rawalloc) { /* once a socks proxy */ if(!conn->socks_proxy.user) { conn->socks_proxy.user = conn->http_proxy.user; conn->http_proxy.user = NULL; Curl_safefree(conn->socks_proxy.passwd); conn->socks_proxy.passwd = conn->http_proxy.passwd; conn->http_proxy.passwd = NULL; } } conn->bits.socksproxy = TRUE; } else conn->bits.socksproxy = FALSE; /* not a socks proxy */ } else { conn->bits.socksproxy = FALSE; conn->bits.httpproxy = FALSE; } conn->bits.proxy = conn->bits.httpproxy || conn->bits.socksproxy; if(!conn->bits.proxy) { /* we aren't using the proxy after all... */ conn->bits.proxy = FALSE; conn->bits.httpproxy = FALSE; conn->bits.socksproxy = FALSE; conn->bits.proxy_user_passwd = FALSE; conn->bits.tunnel_proxy = FALSE; } out: free(socksproxy); free(proxy); return result; } #endif /* CURL_DISABLE_PROXY */ /* * Curl_parse_login_details() * * This is used to parse a login string for user name, password and options in * the following formats: * * user * user:password * user:password;options * user;options * user;options:password * :password * :password;options * ;options * ;options:password * * Parameters: * * login [in] - The login string. * len [in] - The length of the login string. * userp [in/out] - The address where a pointer to newly allocated memory * holding the user will be stored upon completion. * passwdp [in/out] - The address where a pointer to newly allocated memory * holding the password will be stored upon completion. * optionsp [in/out] - The address where a pointer to newly allocated memory * holding the options will be stored upon completion. * * Returns CURLE_OK on success. */ CURLcode Curl_parse_login_details(const char *login, const size_t len, char **userp, char **passwdp, char **optionsp) { CURLcode result = CURLE_OK; char *ubuf = NULL; char *pbuf = NULL; char *obuf = NULL; const char *psep = NULL; const char *osep = NULL; size_t ulen; size_t plen; size_t olen; /* Attempt to find the password separator */ if(passwdp) { psep = strchr(login, ':'); /* Within the constraint of the login string */ if(psep >= login + len) psep = NULL; } /* Attempt to find the options separator */ if(optionsp) { osep = strchr(login, ';'); /* Within the constraint of the login string */ if(osep >= login + len) osep = NULL; } /* Calculate the portion lengths */ ulen = (psep ? (size_t)(osep && psep > osep ? osep - login : psep - login) : (osep ? (size_t)(osep - login) : len)); plen = (psep ? (osep && osep > psep ? (size_t)(osep - psep) : (size_t)(login + len - psep)) - 1 : 0); olen = (osep ? (psep && psep > osep ? (size_t)(psep - osep) : (size_t)(login + len - osep)) - 1 : 0); /* Allocate the user portion buffer */ if(userp && ulen) { ubuf = malloc(ulen + 1); if(!ubuf) result = CURLE_OUT_OF_MEMORY; } /* Allocate the password portion buffer */ if(!result && passwdp && plen) { pbuf = malloc(plen + 1); if(!pbuf) { free(ubuf); result = CURLE_OUT_OF_MEMORY; } } /* Allocate the options portion buffer */ if(!result && optionsp && olen) { obuf = malloc(olen + 1); if(!obuf) { free(pbuf); free(ubuf); result = CURLE_OUT_OF_MEMORY; } } if(!result) { /* Store the user portion if necessary */ if(ubuf) { memcpy(ubuf, login, ulen); ubuf[ulen] = '\0'; Curl_safefree(*userp); *userp = ubuf; } /* Store the password portion if necessary */ if(pbuf) { memcpy(pbuf, psep + 1, plen); pbuf[plen] = '\0'; Curl_safefree(*passwdp); *passwdp = pbuf; } /* Store the options portion if necessary */ if(obuf) { memcpy(obuf, osep + 1, olen); obuf[olen] = '\0'; Curl_safefree(*optionsp); *optionsp = obuf; } } return result; } /************************************************************* * Figure out the remote port number and fix it in the URL * * No matter if we use a proxy or not, we have to figure out the remote * port number of various reasons. * * The port number embedded in the URL is replaced, if necessary. *************************************************************/ static CURLcode parse_remote_port(struct Curl_easy *data, struct connectdata *conn) { if(data->set.use_port && data->state.allow_port) { /* if set, we use this instead of the port possibly given in the URL */ char portbuf[16]; CURLUcode uc; conn->remote_port = (unsigned short)data->set.use_port; snprintf(portbuf, sizeof(portbuf), "%u", conn->remote_port); uc = curl_url_set(data->state.uh, CURLUPART_PORT, portbuf, 0); if(uc) return CURLE_OUT_OF_MEMORY; } return CURLE_OK; } /* * Override the login details from the URL with that in the CURLOPT_USERPWD * option or a .netrc file, if applicable. */ static CURLcode override_login(struct Curl_easy *data, struct connectdata *conn, char **userp, char **passwdp, char **optionsp) { bool user_changed = FALSE; bool passwd_changed = FALSE; CURLUcode uc; if(data->set.str[STRING_USERNAME]) { free(*userp); *userp = strdup(data->set.str[STRING_USERNAME]); if(!*userp) return CURLE_OUT_OF_MEMORY; conn->bits.user_passwd = TRUE; /* enable user+password */ user_changed = TRUE; } if(data->set.str[STRING_PASSWORD]) { free(*passwdp); *passwdp = strdup(data->set.str[STRING_PASSWORD]); if(!*passwdp) return CURLE_OUT_OF_MEMORY; conn->bits.user_passwd = TRUE; /* enable user+password */ passwd_changed = TRUE; } if(data->set.str[STRING_OPTIONS]) { free(*optionsp); *optionsp = strdup(data->set.str[STRING_OPTIONS]); if(!*optionsp) return CURLE_OUT_OF_MEMORY; } conn->bits.netrc = FALSE; if(data->set.use_netrc != CURL_NETRC_IGNORED) { char *nuser = NULL; char *npasswd = NULL; int ret; if(data->set.use_netrc == CURL_NETRC_OPTIONAL) nuser = *userp; /* to separate otherwise identical machines */ ret = Curl_parsenetrc(conn->host.name, &nuser, &npasswd, data->set.str[STRING_NETRC_FILE]); if(ret > 0) { infof(data, "Couldn't find host %s in the " DOT_CHAR "netrc file; using defaults\n", conn->host.name); } else if(ret < 0) { return CURLE_OUT_OF_MEMORY; } else { /* set bits.netrc TRUE to remember that we got the name from a .netrc file, so that it is safe to use even if we followed a Location: to a different host or similar. */ conn->bits.netrc = TRUE; conn->bits.user_passwd = TRUE; /* enable user+password */ if(data->set.use_netrc == CURL_NETRC_OPTIONAL) { /* prefer credentials outside netrc */ if(nuser && !*userp) { free(*userp); *userp = nuser; user_changed = TRUE; } if(npasswd && !*passwdp) { free(*passwdp); *passwdp = npasswd; passwd_changed = TRUE; } } else { /* prefer netrc credentials */ if(nuser) { free(*userp); *userp = nuser; user_changed = TRUE; } if(npasswd) { free(*passwdp); *passwdp = npasswd; passwd_changed = TRUE; } } } } /* for updated strings, we update them in the URL */ if(user_changed) { uc = curl_url_set(data->state.uh, CURLUPART_USER, *userp, 0); if(uc) return Curl_uc_to_curlcode(uc); } if(passwd_changed) { uc = curl_url_set(data->state.uh, CURLUPART_PASSWORD, *passwdp, 0); if(uc) return Curl_uc_to_curlcode(uc); } return CURLE_OK; } /* * Set the login details so they're available in the connection */ static CURLcode set_login(struct connectdata *conn) { CURLcode result = CURLE_OK; const char *setuser = CURL_DEFAULT_USER; const char *setpasswd = CURL_DEFAULT_PASSWORD; /* If our protocol needs a password and we have none, use the defaults */ if((conn->handler->flags & PROTOPT_NEEDSPWD) && !conn->bits.user_passwd) ; else { setuser = ""; setpasswd = ""; } /* Store the default user */ if(!conn->user) { conn->user = strdup(setuser); if(!conn->user) return CURLE_OUT_OF_MEMORY; } /* Store the default password */ if(!conn->passwd) { conn->passwd = strdup(setpasswd); if(!conn->passwd) result = CURLE_OUT_OF_MEMORY; } /* if there's a user without password, consider password blank */ if(conn->user && !conn->passwd) { conn->passwd = strdup(""); if(!conn->passwd) result = CURLE_OUT_OF_MEMORY; } return result; } /* * Parses a "host:port" string to connect to. * The hostname and the port may be empty; in this case, NULL is returned for * the hostname and -1 for the port. */ static CURLcode parse_connect_to_host_port(struct Curl_easy *data, const char *host, char **hostname_result, int *port_result) { char *host_dup; char *hostptr; char *host_portno; char *portptr; int port = -1; #if defined(CURL_DISABLE_VERBOSE_STRINGS) (void) data; #endif *hostname_result = NULL; *port_result = -1; if(!host || !*host) return CURLE_OK; host_dup = strdup(host); if(!host_dup) return CURLE_OUT_OF_MEMORY; hostptr = host_dup; /* start scanning for port number at this point */ portptr = hostptr; /* detect and extract RFC6874-style IPv6-addresses */ if(*hostptr == '[') { #ifdef ENABLE_IPV6 char *ptr = ++hostptr; /* advance beyond the initial bracket */ while(*ptr && (ISXDIGIT(*ptr) || (*ptr == ':') || (*ptr == '.'))) ptr++; if(*ptr == '%') { /* There might be a zone identifier */ if(strncmp("%25", ptr, 3)) infof(data, "Please URL encode %% as %%25, see RFC 6874.\n"); ptr++; /* Allow unreserved characters as defined in RFC 3986 */ while(*ptr && (ISALPHA(*ptr) || ISXDIGIT(*ptr) || (*ptr == '-') || (*ptr == '.') || (*ptr == '_') || (*ptr == '~'))) ptr++; } if(*ptr == ']') /* yeps, it ended nicely with a bracket as well */ *ptr++ = '\0'; else infof(data, "Invalid IPv6 address format\n"); portptr = ptr; /* Note that if this didn't end with a bracket, we still advanced the * hostptr first, but I can't see anything wrong with that as no host * name nor a numeric can legally start with a bracket. */ #else failf(data, "Use of IPv6 in *_CONNECT_TO without IPv6 support built-in!"); free(host_dup); return CURLE_NOT_BUILT_IN; #endif } /* Get port number off server.com:1080 */ host_portno = strchr(portptr, ':'); if(host_portno) { char *endp = NULL; *host_portno = '\0'; /* cut off number from host name */ host_portno++; if(*host_portno) { long portparse = strtol(host_portno, &endp, 10); if((endp && *endp) || (portparse < 0) || (portparse > 65535)) { infof(data, "No valid port number in connect to host string (%s)\n", host_portno); hostptr = NULL; port = -1; } else port = (int)portparse; /* we know it will fit */ } } /* now, clone the cleaned host name */ if(hostptr) { *hostname_result = strdup(hostptr); if(!*hostname_result) { free(host_dup); return CURLE_OUT_OF_MEMORY; } } *port_result = port; free(host_dup); return CURLE_OK; } /* * Parses one "connect to" string in the form: * "HOST:PORT:CONNECT-TO-HOST:CONNECT-TO-PORT". */ static CURLcode parse_connect_to_string(struct Curl_easy *data, struct connectdata *conn, const char *conn_to_host, char **host_result, int *port_result) { CURLcode result = CURLE_OK; const char *ptr = conn_to_host; int host_match = FALSE; int port_match = FALSE; *host_result = NULL; *port_result = -1; if(*ptr == ':') { /* an empty hostname always matches */ host_match = TRUE; ptr++; } else { /* check whether the URL's hostname matches */ size_t hostname_to_match_len; char *hostname_to_match = aprintf("%s%s%s", conn->bits.ipv6_ip ? "[" : "", conn->host.name, conn->bits.ipv6_ip ? "]" : ""); if(!hostname_to_match) return CURLE_OUT_OF_MEMORY; hostname_to_match_len = strlen(hostname_to_match); host_match = strncasecompare(ptr, hostname_to_match, hostname_to_match_len); free(hostname_to_match); ptr += hostname_to_match_len; host_match = host_match && *ptr == ':'; ptr++; } if(host_match) { if(*ptr == ':') { /* an empty port always matches */ port_match = TRUE; ptr++; } else { /* check whether the URL's port matches */ char *ptr_next = strchr(ptr, ':'); if(ptr_next) { char *endp = NULL; long port_to_match = strtol(ptr, &endp, 10); if((endp == ptr_next) && (port_to_match == conn->remote_port)) { port_match = TRUE; ptr = ptr_next + 1; } } } } if(host_match && port_match) { /* parse the hostname and port to connect to */ result = parse_connect_to_host_port(data, ptr, host_result, port_result); } return result; } /* * Processes all strings in the "connect to" slist, and uses the "connect * to host" and "connect to port" of the first string that matches. */ static CURLcode parse_connect_to_slist(struct Curl_easy *data, struct connectdata *conn, struct curl_slist *conn_to_host) { CURLcode result = CURLE_OK; char *host = NULL; int port = -1; while(conn_to_host && !host && port == -1) { result = parse_connect_to_string(data, conn, conn_to_host->data, &host, &port); if(result) return result; if(host && *host) { conn->conn_to_host.rawalloc = host; conn->conn_to_host.name = host; conn->bits.conn_to_host = TRUE; infof(data, "Connecting to hostname: %s\n", host); } else { /* no "connect to host" */ conn->bits.conn_to_host = FALSE; Curl_safefree(host); } if(port >= 0) { conn->conn_to_port = port; conn->bits.conn_to_port = TRUE; infof(data, "Connecting to port: %d\n", port); } else { /* no "connect to port" */ conn->bits.conn_to_port = FALSE; port = -1; } conn_to_host = conn_to_host->next; } return result; } /************************************************************* * Resolve the address of the server or proxy *************************************************************/ static CURLcode resolve_server(struct Curl_easy *data, struct connectdata *conn, bool *async) { CURLcode result = CURLE_OK; timediff_t timeout_ms = Curl_timeleft(data, NULL, TRUE); /************************************************************* * Resolve the name of the server or proxy *************************************************************/ if(conn->bits.reuse) /* We're reusing the connection - no need to resolve anything, and fix_hostname() was called already in create_conn() for the re-use case. */ *async = FALSE; else { /* this is a fresh connect */ int rc; struct Curl_dns_entry *hostaddr; #ifdef USE_UNIX_SOCKETS if(conn->unix_domain_socket) { /* Unix domain sockets are local. The host gets ignored, just use the * specified domain socket address. Do not cache "DNS entries". There is * no DNS involved and we already have the filesystem path available */ const char *path = conn->unix_domain_socket; hostaddr = calloc(1, sizeof(struct Curl_dns_entry)); if(!hostaddr) result = CURLE_OUT_OF_MEMORY; else { bool longpath = FALSE; hostaddr->addr = Curl_unix2addr(path, &longpath, conn->abstract_unix_socket); if(hostaddr->addr) hostaddr->inuse++; else { /* Long paths are not supported for now */ if(longpath) { failf(data, "Unix socket path too long: '%s'", path); result = CURLE_COULDNT_RESOLVE_HOST; } else result = CURLE_OUT_OF_MEMORY; free(hostaddr); hostaddr = NULL; } } } else #endif if(!conn->bits.proxy) { struct hostname *connhost; if(conn->bits.conn_to_host) connhost = &conn->conn_to_host; else connhost = &conn->host; /* If not connecting via a proxy, extract the port from the URL, if it is * there, thus overriding any defaults that might have been set above. */ if(conn->bits.conn_to_port) conn->port = conn->conn_to_port; else conn->port = conn->remote_port; /* Resolve target host right on */ rc = Curl_resolv_timeout(conn, connhost->name, (int)conn->port, &hostaddr, timeout_ms); if(rc == CURLRESOLV_PENDING) *async = TRUE; else if(rc == CURLRESOLV_TIMEDOUT) result = CURLE_OPERATION_TIMEDOUT; else if(!hostaddr) { failf(data, "Couldn't resolve host '%s'", connhost->dispname); result = CURLE_COULDNT_RESOLVE_HOST; /* don't return yet, we need to clean up the timeout first */ } } else { /* This is a proxy that hasn't been resolved yet. */ struct hostname * const host = conn->bits.socksproxy ? &conn->socks_proxy.host : &conn->http_proxy.host; /* resolve proxy */ rc = Curl_resolv_timeout(conn, host->name, (int)conn->port, &hostaddr, timeout_ms); if(rc == CURLRESOLV_PENDING) *async = TRUE; else if(rc == CURLRESOLV_TIMEDOUT) result = CURLE_OPERATION_TIMEDOUT; else if(!hostaddr) { failf(data, "Couldn't resolve proxy '%s'", host->dispname); result = CURLE_COULDNT_RESOLVE_PROXY; /* don't return yet, we need to clean up the timeout first */ } } DEBUGASSERT(conn->dns_entry == NULL); conn->dns_entry = hostaddr; } return result; } /* * Cleanup the connection just allocated before we can move along and use the * previously existing one. All relevant data is copied over and old_conn is * ready for freeing once this function returns. */ static void reuse_conn(struct connectdata *old_conn, struct connectdata *conn) { free_fixed_hostname(&old_conn->http_proxy.host); free_fixed_hostname(&old_conn->socks_proxy.host); free(old_conn->http_proxy.host.rawalloc); free(old_conn->socks_proxy.host.rawalloc); /* free the SSL config struct from this connection struct as this was allocated in vain and is targeted for destruction */ Curl_free_primary_ssl_config(&old_conn->ssl_config); Curl_free_primary_ssl_config(&old_conn->proxy_ssl_config); conn->data = old_conn->data; /* get the user+password information from the old_conn struct since it may * be new for this request even when we re-use an existing connection */ conn->bits.user_passwd = old_conn->bits.user_passwd; if(conn->bits.user_passwd) { /* use the new user name and password though */ Curl_safefree(conn->user); Curl_safefree(conn->passwd); conn->user = old_conn->user; conn->passwd = old_conn->passwd; old_conn->user = NULL; old_conn->passwd = NULL; } conn->bits.proxy_user_passwd = old_conn->bits.proxy_user_passwd; if(conn->bits.proxy_user_passwd) { /* use the new proxy user name and proxy password though */ Curl_safefree(conn->http_proxy.user); Curl_safefree(conn->socks_proxy.user); Curl_safefree(conn->http_proxy.passwd); Curl_safefree(conn->socks_proxy.passwd); conn->http_proxy.user = old_conn->http_proxy.user; conn->socks_proxy.user = old_conn->socks_proxy.user; conn->http_proxy.passwd = old_conn->http_proxy.passwd; conn->socks_proxy.passwd = old_conn->socks_proxy.passwd; old_conn->http_proxy.user = NULL; old_conn->socks_proxy.user = NULL; old_conn->http_proxy.passwd = NULL; old_conn->socks_proxy.passwd = NULL; } /* host can change, when doing keepalive with a proxy or if the case is different this time etc */ free_fixed_hostname(&conn->host); free_fixed_hostname(&conn->conn_to_host); Curl_safefree(conn->host.rawalloc); Curl_safefree(conn->conn_to_host.rawalloc); conn->host = old_conn->host; conn->conn_to_host = old_conn->conn_to_host; conn->conn_to_port = old_conn->conn_to_port; conn->remote_port = old_conn->remote_port; /* persist connection info in session handle */ Curl_persistconninfo(conn); conn_reset_all_postponed_data(old_conn); /* free buffers */ /* re-use init */ conn->bits.reuse = TRUE; /* yes, we're re-using here */ Curl_safefree(old_conn->user); Curl_safefree(old_conn->passwd); Curl_safefree(old_conn->options); Curl_safefree(old_conn->http_proxy.user); Curl_safefree(old_conn->socks_proxy.user); Curl_safefree(old_conn->http_proxy.passwd); Curl_safefree(old_conn->socks_proxy.passwd); Curl_safefree(old_conn->localdev); Curl_llist_destroy(&old_conn->send_pipe, NULL); Curl_llist_destroy(&old_conn->recv_pipe, NULL); Curl_safefree(old_conn->master_buffer); #ifdef USE_UNIX_SOCKETS Curl_safefree(old_conn->unix_domain_socket); #endif } /** * create_conn() sets up a new connectdata struct, or re-uses an already * existing one, and resolves host name. * * if this function returns CURLE_OK and *async is set to TRUE, the resolve * response will be coming asynchronously. If *async is FALSE, the name is * already resolved. * * @param data The sessionhandle pointer * @param in_connect is set to the next connection data pointer * @param async is set TRUE when an async DNS resolution is pending * @see Curl_setup_conn() * * *NOTE* this function assigns the conn->data pointer! */ static CURLcode create_conn(struct Curl_easy *data, struct connectdata **in_connect, bool *async) { CURLcode result = CURLE_OK; struct connectdata *conn; struct connectdata *conn_temp = NULL; bool reuse; bool connections_available = TRUE; bool force_reuse = FALSE; bool waitpipe = FALSE; size_t max_host_connections = Curl_multi_max_host_connections(data->multi); size_t max_total_connections = Curl_multi_max_total_connections(data->multi); *async = FALSE; /************************************************************* * Check input data *************************************************************/ if(!data->change.url) { result = CURLE_URL_MALFORMAT; goto out; } /* First, split up the current URL in parts so that we can use the parts for checking against the already present connections. In order to not have to modify everything at once, we allocate a temporary connection data struct and fill in for comparison purposes. */ conn = allocate_conn(data); if(!conn) { result = CURLE_OUT_OF_MEMORY; goto out; } /* We must set the return variable as soon as possible, so that our parent can cleanup any possible allocs we may have done before any failure */ *in_connect = conn; result = parseurlandfillconn(data, conn); if(result) goto out; if(data->set.str[STRING_BEARER]) { conn->oauth_bearer = strdup(data->set.str[STRING_BEARER]); if(!conn->oauth_bearer) { result = CURLE_OUT_OF_MEMORY; goto out; } } #ifdef USE_UNIX_SOCKETS if(data->set.str[STRING_UNIX_SOCKET_PATH]) { conn->unix_domain_socket = strdup(data->set.str[STRING_UNIX_SOCKET_PATH]); if(conn->unix_domain_socket == NULL) { result = CURLE_OUT_OF_MEMORY; goto out; } conn->abstract_unix_socket = data->set.abstract_unix_socket; } #endif /* After the unix socket init but before the proxy vars are used, parse and initialize the proxy vars */ #ifndef CURL_DISABLE_PROXY result = create_conn_helper_init_proxy(conn); if(result) goto out; #endif /************************************************************* * If the protocol is using SSL and HTTP proxy is used, we set * the tunnel_proxy bit. *************************************************************/ if((conn->given->flags&PROTOPT_SSL) && conn->bits.httpproxy) conn->bits.tunnel_proxy = TRUE; /************************************************************* * Figure out the remote port number and fix it in the URL *************************************************************/ result = parse_remote_port(data, conn); if(result) goto out; /* Check for overridden login details and set them accordingly so they they are known when protocol->setup_connection is called! */ result = override_login(data, conn, &conn->user, &conn->passwd, &conn->options); if(result) goto out; result = set_login(conn); /* default credentials */ if(result) goto out; /************************************************************* * Process the "connect to" linked list of hostname/port mappings. * Do this after the remote port number has been fixed in the URL. *************************************************************/ result = parse_connect_to_slist(data, conn, data->set.connect_to); if(result) goto out; /************************************************************* * IDN-fix the hostnames *************************************************************/ result = fix_hostname(conn, &conn->host); if(result) goto out; if(conn->bits.conn_to_host) { result = fix_hostname(conn, &conn->conn_to_host); if(result) goto out; } if(conn->bits.httpproxy) { result = fix_hostname(conn, &conn->http_proxy.host); if(result) goto out; } if(conn->bits.socksproxy) { result = fix_hostname(conn, &conn->socks_proxy.host); if(result) goto out; } /************************************************************* * Check whether the host and the "connect to host" are equal. * Do this after the hostnames have been IDN-fixed. *************************************************************/ if(conn->bits.conn_to_host && strcasecompare(conn->conn_to_host.name, conn->host.name)) { conn->bits.conn_to_host = FALSE; } /************************************************************* * Check whether the port and the "connect to port" are equal. * Do this after the remote port number has been fixed in the URL. *************************************************************/ if(conn->bits.conn_to_port && conn->conn_to_port == conn->remote_port) { conn->bits.conn_to_port = FALSE; } /************************************************************* * If the "connect to" feature is used with an HTTP proxy, * we set the tunnel_proxy bit. *************************************************************/ if((conn->bits.conn_to_host || conn->bits.conn_to_port) && conn->bits.httpproxy) conn->bits.tunnel_proxy = TRUE; /************************************************************* * Setup internals depending on protocol. Needs to be done after * we figured out what/if proxy to use. *************************************************************/ result = setup_connection_internals(conn); if(result) goto out; conn->recv[FIRSTSOCKET] = Curl_recv_plain; conn->send[FIRSTSOCKET] = Curl_send_plain; conn->recv[SECONDARYSOCKET] = Curl_recv_plain; conn->send[SECONDARYSOCKET] = Curl_send_plain; conn->bits.tcp_fastopen = data->set.tcp_fastopen; /*********************************************************************** * file: is a special case in that it doesn't need a network connection ***********************************************************************/ #ifndef CURL_DISABLE_FILE if(conn->handler->flags & PROTOPT_NONETWORK) { bool done; /* this is supposed to be the connect function so we better at least check that the file is present here! */ DEBUGASSERT(conn->handler->connect_it); Curl_persistconninfo(conn); result = conn->handler->connect_it(conn, &done); /* Setup a "faked" transfer that'll do nothing */ if(!result) { conn->data = data; conn->bits.tcpconnect[FIRSTSOCKET] = TRUE; /* we are "connected */ result = Curl_conncache_add_conn(data->state.conn_cache, conn); if(result) goto out; /* * Setup whatever necessary for a resumed transfer */ result = setup_range(data); if(result) { DEBUGASSERT(conn->handler->done); /* we ignore the return code for the protocol-specific DONE */ (void)conn->handler->done(conn, result, FALSE); goto out; } Curl_setup_transfer(conn, -1, -1, FALSE, NULL, /* no download */ -1, NULL); /* no upload */ } /* since we skip do_init() */ Curl_init_do(data, conn); goto out; } #endif /* Get a cloned copy of the SSL config situation stored in the connection struct. But to get this going nicely, we must first make sure that the strings in the master copy are pointing to the correct strings in the session handle strings array! Keep in mind that the pointers in the master copy are pointing to strings that will be freed as part of the Curl_easy struct, but all cloned copies will be separately allocated. */ data->set.ssl.primary.CApath = data->set.str[STRING_SSL_CAPATH_ORIG]; data->set.proxy_ssl.primary.CApath = data->set.str[STRING_SSL_CAPATH_PROXY]; data->set.ssl.primary.CAfile = data->set.str[STRING_SSL_CAFILE_ORIG]; data->set.proxy_ssl.primary.CAfile = data->set.str[STRING_SSL_CAFILE_PROXY]; data->set.ssl.primary.random_file = data->set.str[STRING_SSL_RANDOM_FILE]; data->set.proxy_ssl.primary.random_file = data->set.str[STRING_SSL_RANDOM_FILE]; data->set.ssl.primary.egdsocket = data->set.str[STRING_SSL_EGDSOCKET]; data->set.proxy_ssl.primary.egdsocket = data->set.str[STRING_SSL_EGDSOCKET]; data->set.ssl.primary.cipher_list = data->set.str[STRING_SSL_CIPHER_LIST_ORIG]; data->set.proxy_ssl.primary.cipher_list = data->set.str[STRING_SSL_CIPHER_LIST_PROXY]; data->set.ssl.primary.cipher_list13 = data->set.str[STRING_SSL_CIPHER13_LIST_ORIG]; data->set.proxy_ssl.primary.cipher_list13 = data->set.str[STRING_SSL_CIPHER13_LIST_PROXY]; data->set.ssl.CRLfile = data->set.str[STRING_SSL_CRLFILE_ORIG]; data->set.proxy_ssl.CRLfile = data->set.str[STRING_SSL_CRLFILE_PROXY]; data->set.ssl.issuercert = data->set.str[STRING_SSL_ISSUERCERT_ORIG]; data->set.proxy_ssl.issuercert = data->set.str[STRING_SSL_ISSUERCERT_PROXY]; data->set.ssl.cert = data->set.str[STRING_CERT_ORIG]; data->set.proxy_ssl.cert = data->set.str[STRING_CERT_PROXY]; data->set.ssl.cert_type = data->set.str[STRING_CERT_TYPE_ORIG]; data->set.proxy_ssl.cert_type = data->set.str[STRING_CERT_TYPE_PROXY]; data->set.ssl.key = data->set.str[STRING_KEY_ORIG]; data->set.proxy_ssl.key = data->set.str[STRING_KEY_PROXY]; data->set.ssl.key_type = data->set.str[STRING_KEY_TYPE_ORIG]; data->set.proxy_ssl.key_type = data->set.str[STRING_KEY_TYPE_PROXY]; data->set.ssl.key_passwd = data->set.str[STRING_KEY_PASSWD_ORIG]; data->set.proxy_ssl.key_passwd = data->set.str[STRING_KEY_PASSWD_PROXY]; data->set.ssl.primary.clientcert = data->set.str[STRING_CERT_ORIG]; data->set.proxy_ssl.primary.clientcert = data->set.str[STRING_CERT_PROXY]; #ifdef USE_TLS_SRP data->set.ssl.username = data->set.str[STRING_TLSAUTH_USERNAME_ORIG]; data->set.proxy_ssl.username = data->set.str[STRING_TLSAUTH_USERNAME_PROXY]; data->set.ssl.password = data->set.str[STRING_TLSAUTH_PASSWORD_ORIG]; data->set.proxy_ssl.password = data->set.str[STRING_TLSAUTH_PASSWORD_PROXY]; #endif if(!Curl_clone_primary_ssl_config(&data->set.ssl.primary, &conn->ssl_config)) { result = CURLE_OUT_OF_MEMORY; goto out; } if(!Curl_clone_primary_ssl_config(&data->set.proxy_ssl.primary, &conn->proxy_ssl_config)) { result = CURLE_OUT_OF_MEMORY; goto out; } prune_dead_connections(data); /************************************************************* * Check the current list of connections to see if we can * re-use an already existing one or if we have to create a * new one. *************************************************************/ DEBUGASSERT(conn->user); DEBUGASSERT(conn->passwd); /* reuse_fresh is TRUE if we are told to use a new connection by force, but we only acknowledge this option if this is not a re-used connection already (which happens due to follow-location or during a HTTP authentication phase). */ if(data->set.reuse_fresh && !data->state.this_is_a_follow) reuse = FALSE; else reuse = ConnectionExists(data, conn, &conn_temp, &force_reuse, &waitpipe); /* If we found a reusable connection that is now marked as in use, we may still want to open a new connection if we are pipelining. */ if(reuse && !force_reuse && IsPipeliningPossible(data, conn_temp)) { size_t pipelen = conn_temp->send_pipe.size + conn_temp->recv_pipe.size; if(pipelen > 0) { infof(data, "Found connection %ld, with requests in the pipe (%zu)\n", conn_temp->connection_id, pipelen); if(Curl_conncache_bundle_size(conn_temp) < max_host_connections && Curl_conncache_size(data) < max_total_connections) { /* We want a new connection anyway */ reuse = FALSE; infof(data, "We can reuse, but we want a new connection anyway\n"); Curl_conncache_return_conn(conn_temp); } } } if(reuse) { /* * We already have a connection for this, we got the former connection * in the conn_temp variable and thus we need to cleanup the one we * just allocated before we can move along and use the previously * existing one. */ reuse_conn(conn, conn_temp); #ifdef USE_SSL free(conn->ssl_extra); #endif free(conn); /* we don't need this anymore */ conn = conn_temp; *in_connect = conn; infof(data, "Re-using existing connection! (#%ld) with %s %s\n", conn->connection_id, conn->bits.proxy?"proxy":"host", conn->socks_proxy.host.name ? conn->socks_proxy.host.dispname : conn->http_proxy.host.name ? conn->http_proxy.host.dispname : conn->host.dispname); } else { /* We have decided that we want a new connection. However, we may not be able to do that if we have reached the limit of how many connections we are allowed to open. */ if(conn->handler->flags & PROTOPT_ALPN_NPN) { /* The protocol wants it, so set the bits if enabled in the easy handle (default) */ if(data->set.ssl_enable_alpn) conn->bits.tls_enable_alpn = TRUE; if(data->set.ssl_enable_npn) conn->bits.tls_enable_npn = TRUE; } if(waitpipe) /* There is a connection that *might* become usable for pipelining "soon", and we wait for that */ connections_available = FALSE; else { /* this gets a lock on the conncache */ struct connectbundle *bundle = Curl_conncache_find_bundle(conn, data->state.conn_cache); if(max_host_connections > 0 && bundle && (bundle->num_connections >= max_host_connections)) { struct connectdata *conn_candidate; /* The bundle is full. Extract the oldest connection. */ conn_candidate = Curl_conncache_extract_bundle(data, bundle); Curl_conncache_unlock(conn); if(conn_candidate) (void)Curl_disconnect(data, conn_candidate, /* dead_connection */ FALSE); else { infof(data, "No more connections allowed to host: %zu\n", max_host_connections); connections_available = FALSE; } } else Curl_conncache_unlock(conn); } if(connections_available && (max_total_connections > 0) && (Curl_conncache_size(data) >= max_total_connections)) { struct connectdata *conn_candidate; /* The cache is full. Let's see if we can kill a connection. */ conn_candidate = Curl_conncache_extract_oldest(data); if(conn_candidate) (void)Curl_disconnect(data, conn_candidate, /* dead_connection */ FALSE); else { infof(data, "No connections available in cache\n"); connections_available = FALSE; } } if(!connections_available) { infof(data, "No connections available.\n"); conn_free(conn); *in_connect = NULL; result = CURLE_NO_CONNECTION_AVAILABLE; goto out; } else { /* * This is a brand new connection, so let's store it in the connection * cache of ours! */ result = Curl_conncache_add_conn(data->state.conn_cache, conn); if(result) goto out; } #if defined(USE_NTLM) /* If NTLM is requested in a part of this connection, make sure we don't assume the state is fine as this is a fresh connection and NTLM is connection based. */ if((data->state.authhost.picked & (CURLAUTH_NTLM | CURLAUTH_NTLM_WB)) && data->state.authhost.done) { infof(data, "NTLM picked AND auth done set, clear picked!\n"); data->state.authhost.picked = CURLAUTH_NONE; data->state.authhost.done = FALSE; } if((data->state.authproxy.picked & (CURLAUTH_NTLM | CURLAUTH_NTLM_WB)) && data->state.authproxy.done) { infof(data, "NTLM-proxy picked AND auth done set, clear picked!\n"); data->state.authproxy.picked = CURLAUTH_NONE; data->state.authproxy.done = FALSE; } #endif } /* Setup and init stuff before DO starts, in preparing for the transfer. */ Curl_init_do(data, conn); /* * Setup whatever necessary for a resumed transfer */ result = setup_range(data); if(result) goto out; /* Continue connectdata initialization here. */ /* * Inherit the proper values from the urldata struct AFTER we have arranged * the persistent connection stuff */ conn->seek_func = data->set.seek_func; conn->seek_client = data->set.seek_client; /************************************************************* * Resolve the address of the server or proxy *************************************************************/ result = resolve_server(data, conn, async); out: return result; } /* Curl_setup_conn() is called after the name resolve initiated in * create_conn() is all done. * * Curl_setup_conn() also handles reused connections * * conn->data MUST already have been setup fine (in create_conn) */ CURLcode Curl_setup_conn(struct connectdata *conn, bool *protocol_done) { CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; Curl_pgrsTime(data, TIMER_NAMELOOKUP); if(conn->handler->flags & PROTOPT_NONETWORK) { /* nothing to setup when not using a network */ *protocol_done = TRUE; return result; } *protocol_done = FALSE; /* default to not done */ /* set proxy_connect_closed to false unconditionally already here since it is used strictly to provide extra information to a parent function in the case of proxy CONNECT failures and we must make sure we don't have it lingering set from a previous invoke */ conn->bits.proxy_connect_closed = FALSE; /* * Set user-agent. Used for HTTP, but since we can attempt to tunnel * basically anything through a http proxy we can't limit this based on * protocol. */ if(data->set.str[STRING_USERAGENT]) { Curl_safefree(conn->allocptr.uagent); conn->allocptr.uagent = aprintf("User-Agent: %s\r\n", data->set.str[STRING_USERAGENT]); if(!conn->allocptr.uagent) return CURLE_OUT_OF_MEMORY; } data->req.headerbytecount = 0; #ifdef CURL_DO_LINEEND_CONV data->state.crlf_conversions = 0; /* reset CRLF conversion counter */ #endif /* CURL_DO_LINEEND_CONV */ /* set start time here for timeout purposes in the connect procedure, it is later set again for the progress meter purpose */ conn->now = Curl_now(); if(CURL_SOCKET_BAD == conn->sock[FIRSTSOCKET]) { conn->bits.tcpconnect[FIRSTSOCKET] = FALSE; result = Curl_connecthost(conn, conn->dns_entry); if(result) return result; } else { Curl_pgrsTime(data, TIMER_CONNECT); /* we're connected already */ Curl_pgrsTime(data, TIMER_APPCONNECT); /* we're connected already */ conn->bits.tcpconnect[FIRSTSOCKET] = TRUE; *protocol_done = TRUE; Curl_updateconninfo(conn, conn->sock[FIRSTSOCKET]); Curl_verboseconnect(conn); } conn->now = Curl_now(); /* time this *after* the connect is done, we set this here perhaps a second time */ return result; } CURLcode Curl_connect(struct Curl_easy *data, struct connectdata **in_connect, bool *asyncp, bool *protocol_done) { CURLcode result; *asyncp = FALSE; /* assume synchronous resolves by default */ /* init the single-transfer specific data */ Curl_free_request_state(data); memset(&data->req, 0, sizeof(struct SingleRequest)); data->req.maxdownload = -1; /* call the stuff that needs to be called */ result = create_conn(data, in_connect, asyncp); if(!result) { if(CONN_INUSE(*in_connect)) /* pipelining */ *protocol_done = TRUE; else if(!*asyncp) { /* DNS resolution is done: that's either because this is a reused connection, in which case DNS was unnecessary, or because DNS really did finish already (synch resolver/fast async resolve) */ result = Curl_setup_conn(*in_connect, protocol_done); } } if(result == CURLE_NO_CONNECTION_AVAILABLE) { *in_connect = NULL; return result; } else if(result && *in_connect) { /* We're not allowed to return failure with memory left allocated in the connectdata struct, free those here */ Curl_disconnect(data, *in_connect, TRUE); *in_connect = NULL; /* return a NULL */ } return result; } /* * Curl_init_do() inits the readwrite session. This is inited each time (in * the DO function before the protocol-specific DO functions are invoked) for * a transfer, sometimes multiple times on the same Curl_easy. Make sure * nothing in here depends on stuff that are setup dynamically for the * transfer. * * Allow this function to get called with 'conn' set to NULL. */ CURLcode Curl_init_do(struct Curl_easy *data, struct connectdata *conn) { struct SingleRequest *k = &data->req; if(conn) { conn->bits.do_more = FALSE; /* by default there's no curl_do_more() to use */ /* if the protocol used doesn't support wildcards, switch it off */ if(data->state.wildcardmatch && !(conn->handler->flags & PROTOPT_WILDCARD)) data->state.wildcardmatch = FALSE; } data->state.done = FALSE; /* *_done() is not called yet */ data->state.expect100header = FALSE; if(data->set.opt_no_body) /* in HTTP lingo, no body means using the HEAD request... */ data->set.httpreq = HTTPREQ_HEAD; else if(HTTPREQ_HEAD == data->set.httpreq) /* ... but if unset there really is no perfect method that is the "opposite" of HEAD but in reality most people probably think GET then. The important thing is that we can't let it remain HEAD if the opt_no_body is set FALSE since then we'll behave wrong when getting HTTP. */ data->set.httpreq = HTTPREQ_GET; k->start = Curl_now(); /* start time */ k->now = k->start; /* current time is now */ k->header = TRUE; /* assume header */ k->bytecount = 0; k->buf = data->state.buffer; k->hbufp = data->state.headerbuff; k->ignorebody = FALSE; Curl_speedinit(data); Curl_pgrsSetUploadCounter(data, 0); Curl_pgrsSetDownloadCounter(data, 0); return CURLE_OK; } /* * get_protocol_family() * * This is used to return the protocol family for a given protocol. * * Parameters: * * protocol [in] - A single bit protocol identifier such as HTTP or HTTPS. * * Returns the family as a single bit protocol identifier. */ static unsigned int get_protocol_family(unsigned int protocol) { unsigned int family; switch(protocol) { case CURLPROTO_HTTP: case CURLPROTO_HTTPS: family = CURLPROTO_HTTP; break; case CURLPROTO_FTP: case CURLPROTO_FTPS: family = CURLPROTO_FTP; break; case CURLPROTO_SCP: family = CURLPROTO_SCP; break; case CURLPROTO_SFTP: family = CURLPROTO_SFTP; break; case CURLPROTO_TELNET: family = CURLPROTO_TELNET; break; case CURLPROTO_LDAP: case CURLPROTO_LDAPS: family = CURLPROTO_LDAP; break; case CURLPROTO_DICT: family = CURLPROTO_DICT; break; case CURLPROTO_FILE: family = CURLPROTO_FILE; break; case CURLPROTO_TFTP: family = CURLPROTO_TFTP; break; case CURLPROTO_IMAP: case CURLPROTO_IMAPS: family = CURLPROTO_IMAP; break; case CURLPROTO_POP3: case CURLPROTO_POP3S: family = CURLPROTO_POP3; break; case CURLPROTO_SMTP: case CURLPROTO_SMTPS: family = CURLPROTO_SMTP; break; case CURLPROTO_RTSP: family = CURLPROTO_RTSP; break; case CURLPROTO_RTMP: case CURLPROTO_RTMPS: family = CURLPROTO_RTMP; break; case CURLPROTO_RTMPT: case CURLPROTO_RTMPTS: family = CURLPROTO_RTMPT; break; case CURLPROTO_RTMPE: family = CURLPROTO_RTMPE; break; case CURLPROTO_RTMPTE: family = CURLPROTO_RTMPTE; break; case CURLPROTO_GOPHER: family = CURLPROTO_GOPHER; break; case CURLPROTO_SMB: case CURLPROTO_SMBS: family = CURLPROTO_SMB; break; default: family = 0; break; } return family; } /* * Wrapper to call functions in Curl_conncache_foreach() * * Returns always 0. */ static int conn_upkeep(struct connectdata *conn, void *param) { /* Param is unused. */ (void)param; if(conn->handler->connection_check) { /* Do a protocol-specific keepalive check on the connection. */ conn->handler->connection_check(conn, CONNCHECK_KEEPALIVE); } return 0; /* continue iteration */ } CURLcode Curl_upkeep(struct conncache *conn_cache, void *data) { /* Loop over every connection and make connection alive. */ Curl_conncache_foreach(data, conn_cache, data, conn_upkeep); return CURLE_OK; }
CURLcode Curl_close(struct Curl_easy *data) { struct Curl_multi *m; if(!data) return CURLE_OK; Curl_expire_clear(data); /* shut off timers */ m = data->multi; if(m) /* This handle is still part of a multi handle, take care of this first and detach this handle from there. */ curl_multi_remove_handle(data->multi, data); if(data->multi_easy) /* when curl_easy_perform() is used, it creates its own multi handle to use and this is the one */ curl_multi_cleanup(data->multi_easy); /* Destroy the timeout list that is held in the easy handle. It is /normally/ done by curl_multi_remove_handle() but this is "just in case" */ Curl_llist_destroy(&data->state.timeoutlist, NULL); data->magic = 0; /* force a clear AFTER the possibly enforced removal from the multi handle, since that function uses the magic field! */ if(data->state.rangestringalloc) free(data->state.range); /* freed here just in case DONE wasn't called */ Curl_free_request_state(data); /* Close down all open SSL info and sessions */ Curl_ssl_close_all(data); Curl_safefree(data->state.first_host); Curl_safefree(data->state.scratch); Curl_ssl_free_certinfo(data); /* Cleanup possible redirect junk */ free(data->req.newurl); data->req.newurl = NULL; if(data->change.referer_alloc) { Curl_safefree(data->change.referer); data->change.referer_alloc = FALSE; } data->change.referer = NULL; Curl_up_free(data); Curl_safefree(data->state.buffer); Curl_safefree(data->state.headerbuff); Curl_safefree(data->state.ulbuf); Curl_flush_cookies(data, 1); Curl_digest_cleanup(data); Curl_safefree(data->info.contenttype); Curl_safefree(data->info.wouldredirect); /* this destroys the channel and we cannot use it anymore after this */ Curl_resolver_cleanup(data->state.resolver); Curl_http2_cleanup_dependencies(data); Curl_convert_close(data); /* No longer a dirty share, if it exists */ if(data->share) { Curl_share_lock(data, CURL_LOCK_DATA_SHARE, CURL_LOCK_ACCESS_SINGLE); data->share->dirty--; Curl_share_unlock(data, CURL_LOCK_DATA_SHARE); } /* destruct wildcard structures if it is needed */ Curl_wildcard_dtor(&data->wildcard); Curl_freeset(data); free(data); return CURLE_OK; }
CURLcode Curl_close(struct Curl_easy *data) { struct Curl_multi *m; if(!data) return CURLE_OK; Curl_expire_clear(data); /* shut off timers */ m = data->multi; if(m) /* This handle is still part of a multi handle, take care of this first and detach this handle from there. */ curl_multi_remove_handle(data->multi, data); if(data->multi_easy) { /* when curl_easy_perform() is used, it creates its own multi handle to use and this is the one */ curl_multi_cleanup(data->multi_easy); data->multi_easy = NULL; } /* Destroy the timeout list that is held in the easy handle. It is /normally/ done by curl_multi_remove_handle() but this is "just in case" */ Curl_llist_destroy(&data->state.timeoutlist, NULL); data->magic = 0; /* force a clear AFTER the possibly enforced removal from the multi handle, since that function uses the magic field! */ if(data->state.rangestringalloc) free(data->state.range); /* freed here just in case DONE wasn't called */ Curl_free_request_state(data); /* Close down all open SSL info and sessions */ Curl_ssl_close_all(data); Curl_safefree(data->state.first_host); Curl_safefree(data->state.scratch); Curl_ssl_free_certinfo(data); /* Cleanup possible redirect junk */ free(data->req.newurl); data->req.newurl = NULL; if(data->change.referer_alloc) { Curl_safefree(data->change.referer); data->change.referer_alloc = FALSE; } data->change.referer = NULL; Curl_up_free(data); Curl_safefree(data->state.buffer); Curl_safefree(data->state.headerbuff); Curl_safefree(data->state.ulbuf); Curl_flush_cookies(data, 1); Curl_digest_cleanup(data); Curl_safefree(data->info.contenttype); Curl_safefree(data->info.wouldredirect); /* this destroys the channel and we cannot use it anymore after this */ Curl_resolver_cleanup(data->state.resolver); Curl_http2_cleanup_dependencies(data); Curl_convert_close(data); /* No longer a dirty share, if it exists */ if(data->share) { Curl_share_lock(data, CURL_LOCK_DATA_SHARE, CURL_LOCK_ACCESS_SINGLE); data->share->dirty--; Curl_share_unlock(data, CURL_LOCK_DATA_SHARE); } /* destruct wildcard structures if it is needed */ Curl_wildcard_dtor(&data->wildcard); Curl_freeset(data); free(data); return CURLE_OK; }
{'added': [(334, ' if(data->multi_easy) {'), (338, ' data->multi_easy = NULL;'), (339, ' }')], 'deleted': [(334, ' if(data->multi_easy)')]}
3
1
2,738
16,845
https://github.com/curl/curl
CVE-2018-16840
['CWE-416']
box_code_adobe.c
abst_box_read
/* * GPAC - Multimedia Framework C SDK * * Author: Romain Bouqueau, Jean Le Feuvre * Copyright (c) Romain Bouqueau 2012- Telecom Paris 2019- * All rights reserved * * Note: this development was kindly sponsorized by Vizion'R (http://vizionr.com) * * This file is part of GPAC / ISO Media File Format sub-project * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <gpac/internal/isomedia_dev.h> #ifndef GPAC_DISABLE_ISOM_ADOBE #ifndef GPAC_DISABLE_ISOM #ifndef GPAC_DISABLE_ISOM_FRAGMENTS void abst_box_del(GF_Box *s) { GF_AdobeBootstrapInfoBox *ptr = (GF_AdobeBootstrapInfoBox *)s; if (ptr == NULL) return; if (ptr->movie_identifier) gf_free(ptr->movie_identifier); if (ptr->drm_data) gf_free(ptr->drm_data); if (ptr->meta_data) gf_free(ptr->meta_data); while (gf_list_count(ptr->server_entry_table)) { gf_free(gf_list_get(ptr->server_entry_table, 0)); gf_list_rem(ptr->server_entry_table, 0); } gf_list_del(ptr->server_entry_table); while (gf_list_count(ptr->quality_entry_table)) { gf_free(gf_list_get(ptr->quality_entry_table, 0)); gf_list_rem(ptr->quality_entry_table, 0); } gf_list_del(ptr->quality_entry_table); while (gf_list_count(ptr->segment_run_table_entries)) { gf_isom_box_del((GF_Box *)gf_list_get(ptr->segment_run_table_entries, 0)); gf_list_rem(ptr->segment_run_table_entries, 0); } gf_list_del(ptr->segment_run_table_entries); while (gf_list_count(ptr->fragment_run_table_entries)) { gf_isom_box_del((GF_Box *)gf_list_get(ptr->fragment_run_table_entries, 0)); gf_list_rem(ptr->fragment_run_table_entries, 0); } gf_list_del(ptr->fragment_run_table_entries); gf_free(ptr); } GF_Err abst_box_read(GF_Box *s, GF_BitStream *bs) { GF_AdobeBootstrapInfoBox *ptr = (GF_AdobeBootstrapInfoBox *)s; int i; u32 tmp_strsize; char *tmp_str; Bool zfound=GF_FALSE; GF_Err e; ISOM_DECREASE_SIZE(ptr, 25) ptr->bootstrapinfo_version = gf_bs_read_u32(bs); ptr->profile = gf_bs_read_int(bs, 2); ptr->live = gf_bs_read_int(bs, 1); ptr->update = gf_bs_read_int(bs, 1); ptr->reserved = gf_bs_read_int(bs, 4); ptr->time_scale = gf_bs_read_u32(bs); ptr->current_media_time = gf_bs_read_u64(bs); ptr->smpte_time_code_offset = gf_bs_read_u64(bs); i=0; if (ptr->size<8) return GF_ISOM_INVALID_FILE; tmp_strsize =(u32)ptr->size; tmp_str = gf_malloc(sizeof(char)*tmp_strsize); if (!tmp_str) return GF_OUT_OF_MEM; memset(tmp_str, 0, sizeof(char)*tmp_strsize); while (tmp_strsize) { ISOM_DECREASE_SIZE(ptr, 1) tmp_str[i] = gf_bs_read_u8(bs); tmp_strsize--; if (!tmp_str[i]) { zfound = GF_TRUE; break; } i++; } if (!zfound) return GF_ISOM_INVALID_FILE; if (i) { ptr->movie_identifier = gf_strdup(tmp_str); } ISOM_DECREASE_SIZE(ptr, 1) ptr->server_entry_count = gf_bs_read_u8(bs); for (i=0; i<ptr->server_entry_count; i++) { int j=0; zfound = GF_FALSE; tmp_strsize=(u32)ptr->size; while (tmp_strsize) { ISOM_DECREASE_SIZE(ptr, 1) tmp_str[j] = gf_bs_read_u8(bs); tmp_strsize--; if (!tmp_str[j]) { zfound = GF_TRUE; break; } j++; } if (!zfound) return GF_ISOM_INVALID_FILE; if (j) { gf_list_insert(ptr->server_entry_table, gf_strdup(tmp_str), i); } } ISOM_DECREASE_SIZE(ptr, 1) ptr->quality_entry_count = gf_bs_read_u8(bs); for (i=0; i<ptr->quality_entry_count; i++) { int j=0; zfound = GF_FALSE; tmp_strsize=(u32)ptr->size; while (tmp_strsize) { ISOM_DECREASE_SIZE(ptr, 1) tmp_str[j] = gf_bs_read_u8(bs); tmp_strsize--; if (!tmp_str[j]) { zfound = GF_TRUE; break; } j++; } if (!zfound) return GF_ISOM_INVALID_FILE; if (j) { gf_list_insert(ptr->quality_entry_table, gf_strdup(tmp_str), i); } } i=0; tmp_strsize=(u32)ptr->size; zfound = GF_FALSE; while (tmp_strsize) { ISOM_DECREASE_SIZE(ptr, 1) tmp_str[i] = gf_bs_read_u8(bs); tmp_strsize--; if (!tmp_str[i]) { zfound = GF_TRUE; break; } i++; } if (!zfound) return GF_ISOM_INVALID_FILE; if (i) { ptr->drm_data = gf_strdup(tmp_str); } i=0; tmp_strsize=(u32)ptr->size; zfound = GF_FALSE; while (tmp_strsize) { ISOM_DECREASE_SIZE(ptr, 1) tmp_str[i] = gf_bs_read_u8(bs); tmp_strsize--; if (!tmp_str[i]) { zfound = GF_TRUE; break; } i++; } if (!zfound) return GF_ISOM_INVALID_FILE; if (i) { ptr->meta_data = gf_strdup(tmp_str); } ISOM_DECREASE_SIZE(ptr, 1) ptr->segment_run_table_count = gf_bs_read_u8(bs); for (i=0; i<ptr->segment_run_table_count; i++) { GF_AdobeSegmentRunTableBox *asrt = NULL; e = gf_isom_box_parse((GF_Box **)&asrt, bs); if (e) { if (asrt) gf_isom_box_del((GF_Box*)asrt); gf_free(tmp_str); return e; } gf_list_add(ptr->segment_run_table_entries, asrt); } ISOM_DECREASE_SIZE(ptr, 1) ptr->fragment_run_table_count = gf_bs_read_u8(bs); for (i=0; i<ptr->fragment_run_table_count; i++) { GF_AdobeFragmentRunTableBox *afrt = NULL; e = gf_isom_box_parse((GF_Box **)&afrt, bs); if (e) { if (afrt) gf_isom_box_del((GF_Box*)afrt); gf_free(tmp_str); return e; } gf_list_add(ptr->fragment_run_table_entries, afrt); } gf_free(tmp_str); return GF_OK; } GF_Box *abst_box_new() { ISOM_DECL_BOX_ALLOC(GF_AdobeBootstrapInfoBox, GF_ISOM_BOX_TYPE_ABST); tmp->server_entry_table = gf_list_new(); tmp->quality_entry_table = gf_list_new(); tmp->segment_run_table_entries = gf_list_new(); tmp->fragment_run_table_entries = gf_list_new(); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err abst_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; unsigned int i; GF_AdobeBootstrapInfoBox *ptr = (GF_AdobeBootstrapInfoBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->bootstrapinfo_version); gf_bs_write_int(bs, ptr->profile, 2); gf_bs_write_int(bs, ptr->live, 1); gf_bs_write_int(bs, ptr->update, 1); gf_bs_write_int(bs, ptr->reserved, 4); gf_bs_write_u32(bs, ptr->time_scale); gf_bs_write_u64(bs, ptr->current_media_time); gf_bs_write_u64(bs, ptr->smpte_time_code_offset); if (ptr->movie_identifier) gf_bs_write_data(bs, ptr->movie_identifier, (u32)strlen(ptr->movie_identifier) + 1); else gf_bs_write_u8(bs, 0); gf_bs_write_u8(bs, ptr->server_entry_count); for (i=0; i<ptr->server_entry_count; i++) { char *str = (char*)gf_list_get(ptr->server_entry_table, i); gf_bs_write_data(bs, str, (u32)strlen(str) + 1); } gf_bs_write_u8(bs, ptr->quality_entry_count); for (i=0; i<ptr->quality_entry_count; i++) { char *str = (char*)gf_list_get(ptr->quality_entry_table, i); gf_bs_write_data(bs, str, (u32)strlen(str) + 1); } if (ptr->drm_data) gf_bs_write_data(bs, ptr->drm_data, (u32)strlen(ptr->drm_data) + 1); else gf_bs_write_u8(bs, 0); if (ptr->meta_data) gf_bs_write_data(bs, ptr->meta_data, (u32)strlen(ptr->meta_data) + 1); else gf_bs_write_u8(bs, 0); gf_bs_write_u8(bs, ptr->segment_run_table_count); for (i=0; i<ptr->segment_run_table_count; i++) { e = gf_isom_box_write((GF_Box *)gf_list_get(ptr->segment_run_table_entries, i), bs); if (e) return e; } gf_bs_write_u8(bs, ptr->fragment_run_table_count); for (i=0; i<ptr->fragment_run_table_count; i++) { e = gf_isom_box_write((GF_Box *)gf_list_get(ptr->fragment_run_table_entries, i), bs); if (e) return e; } return GF_OK; } GF_Err abst_box_size(GF_Box *s) { GF_Err e; u32 i; GF_AdobeBootstrapInfoBox *ptr = (GF_AdobeBootstrapInfoBox *)s; s->size += 25 + (ptr->movie_identifier ? (strlen(ptr->movie_identifier) + 1) : 1) + 1; for (i=0; i<ptr->server_entry_count; i++) s->size += strlen(gf_list_get(ptr->server_entry_table, i)) + 1; s->size += 1; for (i=0; i<ptr->quality_entry_count; i++) s->size += strlen(gf_list_get(ptr->quality_entry_table, i)) + 1; s->size += (ptr->drm_data ? (strlen(ptr->drm_data) + 1) : 1) + (ptr->meta_data ? (strlen(ptr->meta_data) + 1) : 1) + 1; for (i=0; i<ptr->segment_run_table_count; i++) { GF_Box *box = (GF_Box *)gf_list_get(ptr->segment_run_table_entries, i); e = gf_isom_box_size(box); if (e) return e; s->size += box->size; } s->size += 1; for (i=0; i<ptr->fragment_run_table_count; i++) { GF_Box *box = (GF_Box *)gf_list_get(ptr->fragment_run_table_entries, i); e = gf_isom_box_size(box); if (e) return e; s->size += box->size; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void afra_box_del(GF_Box *s) { GF_AdobeFragRandomAccessBox *ptr = (GF_AdobeFragRandomAccessBox *)s; if (ptr == NULL) return; while (gf_list_count(ptr->local_access_entries)) { gf_free(gf_list_get(ptr->local_access_entries, 0)); gf_list_rem(ptr->local_access_entries, 0); } gf_list_del(ptr->local_access_entries); while (gf_list_count(ptr->global_access_entries)) { gf_free(gf_list_get(ptr->global_access_entries, 0)); gf_list_rem(ptr->global_access_entries, 0); } gf_list_del(ptr->global_access_entries); gf_free(ptr); } GF_Err afra_box_read(GF_Box *s, GF_BitStream *bs) { unsigned int i; GF_AdobeFragRandomAccessBox *ptr = (GF_AdobeFragRandomAccessBox *)s; ISOM_DECREASE_SIZE(ptr, 9) ptr->long_ids = gf_bs_read_int(bs, 1); ptr->long_offsets = gf_bs_read_int(bs, 1); ptr->global_entries = gf_bs_read_int(bs, 1); ptr->reserved = gf_bs_read_int(bs, 5); ptr->time_scale = gf_bs_read_u32(bs); ptr->entry_count = gf_bs_read_u32(bs); if (ptr->size < ptr->entry_count * (ptr->long_offsets ? 16 : 12)) return GF_ISOM_INVALID_FILE; for (i=0; i<ptr->entry_count; i++) { GF_AfraEntry *ae = gf_malloc(sizeof(GF_AfraEntry)); if (!ae) return GF_OUT_OF_MEM; ISOM_DECREASE_SIZE(ptr, 8) ae->time = gf_bs_read_u64(bs); if (ptr->long_offsets) { ISOM_DECREASE_SIZE(ptr, 8) ae->offset = gf_bs_read_u64(bs); } else { ISOM_DECREASE_SIZE(ptr, 4) ae->offset = gf_bs_read_u32(bs); } gf_list_insert(ptr->local_access_entries, ae, i); } if (ptr->global_entries) { ISOM_DECREASE_SIZE(ptr, 4) ptr->global_entry_count = gf_bs_read_u32(bs); for (i=0; i<ptr->global_entry_count; i++) { GF_GlobalAfraEntry *ae = gf_malloc(sizeof(GF_GlobalAfraEntry)); if (!ae) return GF_OUT_OF_MEM; ISOM_DECREASE_SIZE(ptr, 8) ae->time = gf_bs_read_u64(bs); if (ptr->long_ids) { ISOM_DECREASE_SIZE(ptr, 8) ae->segment = gf_bs_read_u32(bs); ae->fragment = gf_bs_read_u32(bs); } else { ISOM_DECREASE_SIZE(ptr, 4) ae->segment = gf_bs_read_u16(bs); ae->fragment = gf_bs_read_u16(bs); } if (ptr->long_offsets) { ISOM_DECREASE_SIZE(ptr, 16) ae->afra_offset = gf_bs_read_u64(bs); ae->offset_from_afra = gf_bs_read_u64(bs); } else { ISOM_DECREASE_SIZE(ptr, 8) ae->afra_offset = gf_bs_read_u32(bs); ae->offset_from_afra = gf_bs_read_u32(bs); } gf_list_insert(ptr->global_access_entries, ae, i); } } return GF_OK; } GF_Box *afra_box_new() { ISOM_DECL_BOX_ALLOC(GF_AdobeFragRandomAccessBox, GF_ISOM_BOX_TYPE_AFRA); tmp->local_access_entries = gf_list_new(); tmp->global_access_entries = gf_list_new(); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err afra_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; unsigned int i; GF_AdobeFragRandomAccessBox *ptr = (GF_AdobeFragRandomAccessBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_int(bs, ptr->long_ids, 1); gf_bs_write_int(bs, ptr->long_offsets, 1); gf_bs_write_int(bs, ptr->global_entries, 1); gf_bs_write_int(bs, 0, 5); gf_bs_write_u32(bs, ptr->time_scale); gf_bs_write_u32(bs, ptr->entry_count); for (i=0; i<ptr->entry_count; i++) { GF_AfraEntry *ae = (GF_AfraEntry *)gf_list_get(ptr->local_access_entries, i); gf_bs_write_u64(bs, ae->time); if (ptr->long_offsets) gf_bs_write_u64(bs, ae->offset); else gf_bs_write_u32(bs, (u32)ae->offset); } if (ptr->global_entries) { gf_bs_write_u32(bs, ptr->global_entry_count); for (i=0; i<ptr->global_entry_count; i++) { GF_GlobalAfraEntry *gae = (GF_GlobalAfraEntry *)gf_list_get(ptr->global_access_entries, i); gf_bs_write_u64(bs, gae->time); if (ptr->long_ids) { gf_bs_write_u32(bs, gae->segment); gf_bs_write_u32(bs, gae->fragment); } else { gf_bs_write_u16(bs, (u16)gae->segment); gf_bs_write_u16(bs, (u16)gae->fragment); } if (ptr->long_offsets) { gf_bs_write_u64(bs, gae->afra_offset); gf_bs_write_u64(bs, gae->offset_from_afra); } else { gf_bs_write_u32(bs, (u32)gae->afra_offset); gf_bs_write_u32(bs, (u32)gae->offset_from_afra); } } } return GF_OK; } GF_Err afra_box_size(GF_Box *s) { GF_AdobeFragRandomAccessBox *ptr = (GF_AdobeFragRandomAccessBox *)s; s->size += 9 + ptr->entry_count * (ptr->long_offsets ? 16 : 12) + (ptr->global_entries ? 4 + ptr->global_entry_count * (4 + (ptr->long_offsets ? 16 : 8) + (ptr->long_ids ? 8 : 4)) : 0); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void asrt_box_del(GF_Box *s) { GF_AdobeSegmentRunTableBox *ptr = (GF_AdobeSegmentRunTableBox *)s; if (ptr == NULL) return; while (gf_list_count(ptr->quality_segment_url_modifiers)) { gf_free(gf_list_get(ptr->quality_segment_url_modifiers, 0)); gf_list_rem(ptr->quality_segment_url_modifiers, 0); } gf_list_del(ptr->quality_segment_url_modifiers); while (gf_list_count(ptr->segment_run_entry_table)) { gf_free(gf_list_get(ptr->segment_run_entry_table, 0)); gf_list_rem(ptr->segment_run_entry_table, 0); } gf_list_del(ptr->segment_run_entry_table); gf_free(ptr); } GF_Err asrt_box_read(GF_Box *s, GF_BitStream *bs) { unsigned int i; GF_AdobeSegmentRunTableBox *ptr = (GF_AdobeSegmentRunTableBox *)s; ISOM_DECREASE_SIZE(ptr, 1) ptr->quality_entry_count = gf_bs_read_u8(bs); if (ptr->size < ptr->quality_entry_count) return GF_ISOM_INVALID_FILE; for (i=0; i<ptr->quality_entry_count; i++) { int j=0; u32 tmp_strsize=(u32)ptr->size; char *tmp_str = (char*) gf_malloc(tmp_strsize); if (!tmp_str) return GF_OUT_OF_MEM; while (tmp_strsize) { tmp_str[j] = gf_bs_read_u8(bs); tmp_strsize--; if (!tmp_str[j]) break; j++; } ISOM_DECREASE_SIZE(ptr, j) gf_list_insert(ptr->quality_segment_url_modifiers, tmp_str, i); } ISOM_DECREASE_SIZE(ptr, 4) ptr->segment_run_entry_count = gf_bs_read_u32(bs); if (ptr->size < ptr->segment_run_entry_count*8) return GF_ISOM_INVALID_FILE; for (i=0; i<ptr->segment_run_entry_count; i++) { GF_AdobeSegmentRunEntry *sre = gf_malloc(sizeof(GF_AdobeSegmentRunEntry)); if (!sre) return GF_OUT_OF_MEM; ISOM_DECREASE_SIZE(ptr, 8) sre->first_segment = gf_bs_read_u32(bs); sre->fragment_per_segment = gf_bs_read_u32(bs); gf_list_insert(ptr->segment_run_entry_table, sre, i); } return GF_OK; } GF_Box *asrt_box_new() { ISOM_DECL_BOX_ALLOC(GF_AdobeSegmentRunTableBox, GF_ISOM_BOX_TYPE_ASRT); tmp->quality_segment_url_modifiers = gf_list_new(); tmp->segment_run_entry_table = gf_list_new(); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err asrt_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; unsigned int i; GF_AdobeSegmentRunTableBox *ptr = (GF_AdobeSegmentRunTableBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u8(bs, ptr->quality_entry_count); for (i=0; i<ptr->quality_entry_count; i++) { char *str = (char*)gf_list_get(ptr->quality_segment_url_modifiers, i); gf_bs_write_data(bs, str, (u32)strlen(str) + 1); } gf_bs_write_u32(bs, ptr->segment_run_entry_count); for (i=0; i<ptr->segment_run_entry_count; i++) { GF_AdobeSegmentRunEntry *sre = (GF_AdobeSegmentRunEntry *)gf_list_get(ptr->segment_run_entry_table, i); gf_bs_write_u32(bs, sre->first_segment); gf_bs_write_u32(bs, sre->fragment_per_segment); } return GF_OK; } GF_Err asrt_box_size(GF_Box *s) { int i; GF_AdobeSegmentRunTableBox *ptr = (GF_AdobeSegmentRunTableBox *)s; s->size += 5; for (i=0; i<ptr->quality_entry_count; i++) s->size += strlen(gf_list_get(ptr->quality_segment_url_modifiers, i)) + 1; s->size += ptr->segment_run_entry_count * sizeof(GF_AdobeSegmentRunEntry); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void afrt_box_del(GF_Box *s) { GF_AdobeFragmentRunTableBox *ptr = (GF_AdobeFragmentRunTableBox *)s; if (ptr == NULL) return; while (gf_list_count(ptr->quality_segment_url_modifiers)) { gf_free(gf_list_get(ptr->quality_segment_url_modifiers, 0)); gf_list_rem(ptr->quality_segment_url_modifiers, 0); } gf_list_del(ptr->quality_segment_url_modifiers); while (gf_list_count(ptr->fragment_run_entry_table)) { gf_free(gf_list_get(ptr->fragment_run_entry_table, 0)); gf_list_rem(ptr->fragment_run_entry_table, 0); } gf_list_del(ptr->fragment_run_entry_table); gf_free(ptr); } GF_Err afrt_box_read(GF_Box *s, GF_BitStream *bs) { unsigned int i; GF_AdobeFragmentRunTableBox *ptr = (GF_AdobeFragmentRunTableBox *)s; ISOM_DECREASE_SIZE(ptr, 5) ptr->timescale = gf_bs_read_u32(bs); ptr->quality_entry_count = gf_bs_read_u8(bs); if (ptr->size < ptr->quality_entry_count) return GF_ISOM_INVALID_FILE; for (i=0; i<ptr->quality_entry_count; i++) { int j=0; u32 tmp_strsize=(u32)ptr->size-8; char *tmp_str = (char*) gf_malloc(tmp_strsize); if (!tmp_str) return GF_OUT_OF_MEM; while (tmp_strsize) { tmp_str[j] = gf_bs_read_u8(bs); tmp_strsize--; if (!tmp_str[j]) break; j++; } ISOM_DECREASE_SIZE(ptr, j) gf_list_insert(ptr->quality_segment_url_modifiers, tmp_str, i); } ptr->fragment_run_entry_count = gf_bs_read_u32(bs); if (ptr->size < ptr->fragment_run_entry_count*16) return GF_ISOM_INVALID_FILE; for (i=0; i<ptr->fragment_run_entry_count; i++) { GF_AdobeFragmentRunEntry *fre = gf_malloc(sizeof(GF_AdobeFragmentRunEntry)); if (!fre) return GF_ISOM_INVALID_FILE; ISOM_DECREASE_SIZE(ptr, 16) fre->first_fragment = gf_bs_read_u32(bs); fre->first_fragment_timestamp = gf_bs_read_u64(bs); fre->fragment_duration = gf_bs_read_u32(bs); if (!fre->fragment_duration) { ISOM_DECREASE_SIZE(ptr, 1) fre->discontinuity_indicator = gf_bs_read_u8(bs); } gf_list_insert(ptr->fragment_run_entry_table, fre, i); } return GF_OK; } GF_Box *afrt_box_new() { ISOM_DECL_BOX_ALLOC(GF_AdobeFragmentRunTableBox, GF_ISOM_BOX_TYPE_AFRT); tmp->quality_segment_url_modifiers = gf_list_new(); tmp->fragment_run_entry_table = gf_list_new(); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err afrt_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; unsigned int i; GF_AdobeFragmentRunTableBox *ptr = (GF_AdobeFragmentRunTableBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->timescale); gf_bs_write_u8(bs, ptr->quality_entry_count); for (i=0; i<ptr->quality_entry_count; i++) { char *str = (char*)gf_list_get(ptr->quality_segment_url_modifiers, i); gf_bs_write_data(bs, str, (u32)strlen(str) + 1); } gf_bs_write_u32(bs, ptr->fragment_run_entry_count); for (i=0; i<ptr->fragment_run_entry_count; i++) { GF_AdobeFragmentRunEntry *fre = (GF_AdobeFragmentRunEntry *)gf_list_get(ptr->fragment_run_entry_table, i); gf_bs_write_u32(bs, fre->first_fragment); gf_bs_write_u64(bs, fre->first_fragment_timestamp); gf_bs_write_u32(bs, fre->fragment_duration); if (!fre->fragment_duration) gf_bs_write_u8(bs, fre->discontinuity_indicator); } return GF_OK; } GF_Err afrt_box_size(GF_Box *s) { u32 i; GF_AdobeFragmentRunTableBox *ptr = (GF_AdobeFragmentRunTableBox *)s; s->size += 5; for (i=0; i<ptr->quality_entry_count; i++) s->size += strlen(gf_list_get(ptr->quality_segment_url_modifiers, i)) + 1; s->size += 4; for (i=0; i<ptr->fragment_run_entry_count; i++) { GF_AdobeFragmentRunEntry *fre = (GF_AdobeFragmentRunEntry *)gf_list_get(ptr->fragment_run_entry_table, i); if (fre->fragment_duration) s->size += 16; else s->size += 17; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #endif /*GPAC_DISABLE_ISOM_FRAGMENTS*/ #endif /*GPAC_DISABLE_ISOM*/ #endif /*GPAC_DISABLE_ISOM_ADOBE*/
/* * GPAC - Multimedia Framework C SDK * * Author: Romain Bouqueau, Jean Le Feuvre * Copyright (c) Romain Bouqueau 2012- Telecom Paris 2019- * All rights reserved * * Note: this development was kindly sponsorized by Vizion'R (http://vizionr.com) * * This file is part of GPAC / ISO Media File Format sub-project * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <gpac/internal/isomedia_dev.h> #ifndef GPAC_DISABLE_ISOM_ADOBE #ifndef GPAC_DISABLE_ISOM #ifndef GPAC_DISABLE_ISOM_FRAGMENTS void abst_box_del(GF_Box *s) { GF_AdobeBootstrapInfoBox *ptr = (GF_AdobeBootstrapInfoBox *)s; if (ptr == NULL) return; if (ptr->movie_identifier) gf_free(ptr->movie_identifier); if (ptr->drm_data) gf_free(ptr->drm_data); if (ptr->meta_data) gf_free(ptr->meta_data); while (gf_list_count(ptr->server_entry_table)) { gf_free(gf_list_get(ptr->server_entry_table, 0)); gf_list_rem(ptr->server_entry_table, 0); } gf_list_del(ptr->server_entry_table); while (gf_list_count(ptr->quality_entry_table)) { gf_free(gf_list_get(ptr->quality_entry_table, 0)); gf_list_rem(ptr->quality_entry_table, 0); } gf_list_del(ptr->quality_entry_table); while (gf_list_count(ptr->segment_run_table_entries)) { gf_isom_box_del((GF_Box *)gf_list_get(ptr->segment_run_table_entries, 0)); gf_list_rem(ptr->segment_run_table_entries, 0); } gf_list_del(ptr->segment_run_table_entries); while (gf_list_count(ptr->fragment_run_table_entries)) { gf_isom_box_del((GF_Box *)gf_list_get(ptr->fragment_run_table_entries, 0)); gf_list_rem(ptr->fragment_run_table_entries, 0); } gf_list_del(ptr->fragment_run_table_entries); gf_free(ptr); } GF_Err abst_box_read(GF_Box *s, GF_BitStream *bs) { GF_AdobeBootstrapInfoBox *ptr = (GF_AdobeBootstrapInfoBox *)s; int i; u32 tmp_strsize; char *tmp_str; Bool zfound=GF_FALSE; GF_Err e = GF_OK; ISOM_DECREASE_SIZE(ptr, 25) ptr->bootstrapinfo_version = gf_bs_read_u32(bs); ptr->profile = gf_bs_read_int(bs, 2); ptr->live = gf_bs_read_int(bs, 1); ptr->update = gf_bs_read_int(bs, 1); ptr->reserved = gf_bs_read_int(bs, 4); ptr->time_scale = gf_bs_read_u32(bs); ptr->current_media_time = gf_bs_read_u64(bs); ptr->smpte_time_code_offset = gf_bs_read_u64(bs); i=0; if (ptr->size<8) return GF_ISOM_INVALID_FILE; tmp_strsize =(u32)ptr->size; tmp_str = gf_malloc(sizeof(char)*tmp_strsize); if (!tmp_str) return GF_OUT_OF_MEM; memset(tmp_str, 0, sizeof(char)*tmp_strsize); while (tmp_strsize) { ISOM_DECREASE_SIZE_GOTO_EXIT(ptr, 1) tmp_str[i] = gf_bs_read_u8(bs); tmp_strsize--; if (!tmp_str[i]) { zfound = GF_TRUE; break; } i++; } if (!zfound) { e = GF_ISOM_INVALID_FILE; goto exit; } if (i) { ptr->movie_identifier = gf_strdup(tmp_str); } ISOM_DECREASE_SIZE_GOTO_EXIT(ptr, 1) ptr->server_entry_count = gf_bs_read_u8(bs); for (i=0; i<ptr->server_entry_count; i++) { int j=0; zfound = GF_FALSE; tmp_strsize=(u32)ptr->size; while (tmp_strsize) { ISOM_DECREASE_SIZE_GOTO_EXIT(ptr, 1) tmp_str[j] = gf_bs_read_u8(bs); tmp_strsize--; if (!tmp_str[j]) { zfound = GF_TRUE; break; } j++; } if (!zfound) { e = GF_ISOM_INVALID_FILE; goto exit; } if (j) { gf_list_insert(ptr->server_entry_table, gf_strdup(tmp_str), i); } } if (ptr->server_entry_count != gf_list_count(ptr->server_entry_table)) { e = GF_ISOM_INVALID_FILE; goto exit; } ISOM_DECREASE_SIZE_GOTO_EXIT(ptr, 1) ptr->quality_entry_count = gf_bs_read_u8(bs); for (i=0; i<ptr->quality_entry_count; i++) { int j=0; zfound = GF_FALSE; tmp_strsize=(u32)ptr->size; while (tmp_strsize) { ISOM_DECREASE_SIZE_GOTO_EXIT(ptr, 1) tmp_str[j] = gf_bs_read_u8(bs); tmp_strsize--; if (!tmp_str[j]) { zfound = GF_TRUE; break; } j++; } if (!zfound) { e = GF_ISOM_INVALID_FILE; goto exit; } if (j) { gf_list_insert(ptr->quality_entry_table, gf_strdup(tmp_str), i); } } if (ptr->quality_entry_count != gf_list_count(ptr->quality_entry_table)) { e = GF_ISOM_INVALID_FILE; goto exit; } i=0; tmp_strsize=(u32)ptr->size; zfound = GF_FALSE; while (tmp_strsize) { ISOM_DECREASE_SIZE_GOTO_EXIT(ptr, 1) tmp_str[i] = gf_bs_read_u8(bs); tmp_strsize--; if (!tmp_str[i]) { zfound = GF_TRUE; break; } i++; } if (!zfound) { e = GF_ISOM_INVALID_FILE; goto exit; } if (i) { ptr->drm_data = gf_strdup(tmp_str); } i=0; tmp_strsize=(u32)ptr->size; zfound = GF_FALSE; while (tmp_strsize) { ISOM_DECREASE_SIZE_GOTO_EXIT(ptr, 1) tmp_str[i] = gf_bs_read_u8(bs); tmp_strsize--; if (!tmp_str[i]) { zfound = GF_TRUE; break; } i++; } if (!zfound) { e = GF_ISOM_INVALID_FILE; goto exit; } if (i) { ptr->meta_data = gf_strdup(tmp_str); } ISOM_DECREASE_SIZE_GOTO_EXIT(ptr, 1) ptr->segment_run_table_count = gf_bs_read_u8(bs); for (i=0; i<ptr->segment_run_table_count; i++) { GF_AdobeSegmentRunTableBox *asrt = NULL; e = gf_isom_box_parse((GF_Box **)&asrt, bs); if (e) { if (asrt) gf_isom_box_del((GF_Box*)asrt); goto exit; } gf_list_add(ptr->segment_run_table_entries, asrt); } if (ptr->segment_run_table_count != gf_list_count(ptr->segment_run_table_entries)) { e = GF_ISOM_INVALID_FILE; goto exit; } ISOM_DECREASE_SIZE_GOTO_EXIT(ptr, 1) ptr->fragment_run_table_count = gf_bs_read_u8(bs); for (i=0; i<ptr->fragment_run_table_count; i++) { GF_AdobeFragmentRunTableBox *afrt = NULL; e = gf_isom_box_parse((GF_Box **)&afrt, bs); if (e) { if (afrt) gf_isom_box_del((GF_Box*)afrt); goto exit; } gf_list_add(ptr->fragment_run_table_entries, afrt); } if (ptr->fragment_run_table_count != gf_list_count(ptr->fragment_run_table_entries)) { e = GF_ISOM_INVALID_FILE; goto exit; } exit: gf_free(tmp_str); return e; } GF_Box *abst_box_new() { ISOM_DECL_BOX_ALLOC(GF_AdobeBootstrapInfoBox, GF_ISOM_BOX_TYPE_ABST); tmp->server_entry_table = gf_list_new(); tmp->quality_entry_table = gf_list_new(); tmp->segment_run_table_entries = gf_list_new(); tmp->fragment_run_table_entries = gf_list_new(); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err abst_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; unsigned int i; GF_AdobeBootstrapInfoBox *ptr = (GF_AdobeBootstrapInfoBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->bootstrapinfo_version); gf_bs_write_int(bs, ptr->profile, 2); gf_bs_write_int(bs, ptr->live, 1); gf_bs_write_int(bs, ptr->update, 1); gf_bs_write_int(bs, ptr->reserved, 4); gf_bs_write_u32(bs, ptr->time_scale); gf_bs_write_u64(bs, ptr->current_media_time); gf_bs_write_u64(bs, ptr->smpte_time_code_offset); if (ptr->movie_identifier) gf_bs_write_data(bs, ptr->movie_identifier, (u32)strlen(ptr->movie_identifier) + 1); else gf_bs_write_u8(bs, 0); gf_bs_write_u8(bs, ptr->server_entry_count); for (i=0; i<ptr->server_entry_count; i++) { char *str = (char*)gf_list_get(ptr->server_entry_table, i); gf_bs_write_data(bs, str, (u32)strlen(str) + 1); } gf_bs_write_u8(bs, ptr->quality_entry_count); for (i=0; i<ptr->quality_entry_count; i++) { char *str = (char*)gf_list_get(ptr->quality_entry_table, i); gf_bs_write_data(bs, str, (u32)strlen(str) + 1); } if (ptr->drm_data) gf_bs_write_data(bs, ptr->drm_data, (u32)strlen(ptr->drm_data) + 1); else gf_bs_write_u8(bs, 0); if (ptr->meta_data) gf_bs_write_data(bs, ptr->meta_data, (u32)strlen(ptr->meta_data) + 1); else gf_bs_write_u8(bs, 0); gf_bs_write_u8(bs, ptr->segment_run_table_count); for (i=0; i<ptr->segment_run_table_count; i++) { e = gf_isom_box_write((GF_Box *)gf_list_get(ptr->segment_run_table_entries, i), bs); if (e) return e; } gf_bs_write_u8(bs, ptr->fragment_run_table_count); for (i=0; i<ptr->fragment_run_table_count; i++) { e = gf_isom_box_write((GF_Box *)gf_list_get(ptr->fragment_run_table_entries, i), bs); if (e) return e; } return GF_OK; } GF_Err abst_box_size(GF_Box *s) { GF_Err e; u32 i; GF_AdobeBootstrapInfoBox *ptr = (GF_AdobeBootstrapInfoBox *)s; s->size += 25 + (ptr->movie_identifier ? (strlen(ptr->movie_identifier) + 1) : 1) + 1; for (i=0; i<ptr->server_entry_count; i++) s->size += strlen(gf_list_get(ptr->server_entry_table, i)) + 1; s->size += 1; for (i=0; i<ptr->quality_entry_count; i++) s->size += strlen(gf_list_get(ptr->quality_entry_table, i)) + 1; s->size += (ptr->drm_data ? (strlen(ptr->drm_data) + 1) : 1) + (ptr->meta_data ? (strlen(ptr->meta_data) + 1) : 1) + 1; for (i=0; i<ptr->segment_run_table_count; i++) { GF_Box *box = (GF_Box *)gf_list_get(ptr->segment_run_table_entries, i); e = gf_isom_box_size(box); if (e) return e; s->size += box->size; } s->size += 1; for (i=0; i<ptr->fragment_run_table_count; i++) { GF_Box *box = (GF_Box *)gf_list_get(ptr->fragment_run_table_entries, i); e = gf_isom_box_size(box); if (e) return e; s->size += box->size; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void afra_box_del(GF_Box *s) { GF_AdobeFragRandomAccessBox *ptr = (GF_AdobeFragRandomAccessBox *)s; if (ptr == NULL) return; while (gf_list_count(ptr->local_access_entries)) { gf_free(gf_list_get(ptr->local_access_entries, 0)); gf_list_rem(ptr->local_access_entries, 0); } gf_list_del(ptr->local_access_entries); while (gf_list_count(ptr->global_access_entries)) { gf_free(gf_list_get(ptr->global_access_entries, 0)); gf_list_rem(ptr->global_access_entries, 0); } gf_list_del(ptr->global_access_entries); gf_free(ptr); } GF_Err afra_box_read(GF_Box *s, GF_BitStream *bs) { unsigned int i; GF_AdobeFragRandomAccessBox *ptr = (GF_AdobeFragRandomAccessBox *)s; ISOM_DECREASE_SIZE(ptr, 9) ptr->long_ids = gf_bs_read_int(bs, 1); ptr->long_offsets = gf_bs_read_int(bs, 1); ptr->global_entries = gf_bs_read_int(bs, 1); ptr->reserved = gf_bs_read_int(bs, 5); ptr->time_scale = gf_bs_read_u32(bs); ptr->entry_count = gf_bs_read_u32(bs); if (ptr->size < ptr->entry_count * (ptr->long_offsets ? 16 : 12)) return GF_ISOM_INVALID_FILE; for (i=0; i<ptr->entry_count; i++) { GF_AfraEntry *ae = gf_malloc(sizeof(GF_AfraEntry)); if (!ae) return GF_OUT_OF_MEM; ISOM_DECREASE_SIZE(ptr, 8) ae->time = gf_bs_read_u64(bs); if (ptr->long_offsets) { ISOM_DECREASE_SIZE(ptr, 8) ae->offset = gf_bs_read_u64(bs); } else { ISOM_DECREASE_SIZE(ptr, 4) ae->offset = gf_bs_read_u32(bs); } gf_list_insert(ptr->local_access_entries, ae, i); } if (ptr->global_entries) { ISOM_DECREASE_SIZE(ptr, 4) ptr->global_entry_count = gf_bs_read_u32(bs); for (i=0; i<ptr->global_entry_count; i++) { GF_GlobalAfraEntry *ae = gf_malloc(sizeof(GF_GlobalAfraEntry)); if (!ae) return GF_OUT_OF_MEM; ISOM_DECREASE_SIZE(ptr, 8) ae->time = gf_bs_read_u64(bs); if (ptr->long_ids) { ISOM_DECREASE_SIZE(ptr, 8) ae->segment = gf_bs_read_u32(bs); ae->fragment = gf_bs_read_u32(bs); } else { ISOM_DECREASE_SIZE(ptr, 4) ae->segment = gf_bs_read_u16(bs); ae->fragment = gf_bs_read_u16(bs); } if (ptr->long_offsets) { ISOM_DECREASE_SIZE(ptr, 16) ae->afra_offset = gf_bs_read_u64(bs); ae->offset_from_afra = gf_bs_read_u64(bs); } else { ISOM_DECREASE_SIZE(ptr, 8) ae->afra_offset = gf_bs_read_u32(bs); ae->offset_from_afra = gf_bs_read_u32(bs); } gf_list_insert(ptr->global_access_entries, ae, i); } } return GF_OK; } GF_Box *afra_box_new() { ISOM_DECL_BOX_ALLOC(GF_AdobeFragRandomAccessBox, GF_ISOM_BOX_TYPE_AFRA); tmp->local_access_entries = gf_list_new(); tmp->global_access_entries = gf_list_new(); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err afra_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; unsigned int i; GF_AdobeFragRandomAccessBox *ptr = (GF_AdobeFragRandomAccessBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_int(bs, ptr->long_ids, 1); gf_bs_write_int(bs, ptr->long_offsets, 1); gf_bs_write_int(bs, ptr->global_entries, 1); gf_bs_write_int(bs, 0, 5); gf_bs_write_u32(bs, ptr->time_scale); gf_bs_write_u32(bs, ptr->entry_count); for (i=0; i<ptr->entry_count; i++) { GF_AfraEntry *ae = (GF_AfraEntry *)gf_list_get(ptr->local_access_entries, i); gf_bs_write_u64(bs, ae->time); if (ptr->long_offsets) gf_bs_write_u64(bs, ae->offset); else gf_bs_write_u32(bs, (u32)ae->offset); } if (ptr->global_entries) { gf_bs_write_u32(bs, ptr->global_entry_count); for (i=0; i<ptr->global_entry_count; i++) { GF_GlobalAfraEntry *gae = (GF_GlobalAfraEntry *)gf_list_get(ptr->global_access_entries, i); gf_bs_write_u64(bs, gae->time); if (ptr->long_ids) { gf_bs_write_u32(bs, gae->segment); gf_bs_write_u32(bs, gae->fragment); } else { gf_bs_write_u16(bs, (u16)gae->segment); gf_bs_write_u16(bs, (u16)gae->fragment); } if (ptr->long_offsets) { gf_bs_write_u64(bs, gae->afra_offset); gf_bs_write_u64(bs, gae->offset_from_afra); } else { gf_bs_write_u32(bs, (u32)gae->afra_offset); gf_bs_write_u32(bs, (u32)gae->offset_from_afra); } } } return GF_OK; } GF_Err afra_box_size(GF_Box *s) { GF_AdobeFragRandomAccessBox *ptr = (GF_AdobeFragRandomAccessBox *)s; s->size += 9 + ptr->entry_count * (ptr->long_offsets ? 16 : 12) + (ptr->global_entries ? 4 + ptr->global_entry_count * (4 + (ptr->long_offsets ? 16 : 8) + (ptr->long_ids ? 8 : 4)) : 0); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void asrt_box_del(GF_Box *s) { GF_AdobeSegmentRunTableBox *ptr = (GF_AdobeSegmentRunTableBox *)s; if (ptr == NULL) return; while (gf_list_count(ptr->quality_segment_url_modifiers)) { gf_free(gf_list_get(ptr->quality_segment_url_modifiers, 0)); gf_list_rem(ptr->quality_segment_url_modifiers, 0); } gf_list_del(ptr->quality_segment_url_modifiers); while (gf_list_count(ptr->segment_run_entry_table)) { gf_free(gf_list_get(ptr->segment_run_entry_table, 0)); gf_list_rem(ptr->segment_run_entry_table, 0); } gf_list_del(ptr->segment_run_entry_table); gf_free(ptr); } GF_Err asrt_box_read(GF_Box *s, GF_BitStream *bs) { unsigned int i; GF_AdobeSegmentRunTableBox *ptr = (GF_AdobeSegmentRunTableBox *)s; ISOM_DECREASE_SIZE(ptr, 1) ptr->quality_entry_count = gf_bs_read_u8(bs); if (ptr->size < ptr->quality_entry_count) return GF_ISOM_INVALID_FILE; for (i=0; i<ptr->quality_entry_count; i++) { int j=0; u32 tmp_strsize=(u32)ptr->size; char *tmp_str = (char*) gf_malloc(tmp_strsize); if (!tmp_str) return GF_OUT_OF_MEM; while (tmp_strsize) { tmp_str[j] = gf_bs_read_u8(bs); tmp_strsize--; if (!tmp_str[j]) break; j++; } ISOM_DECREASE_SIZE(ptr, j) gf_list_insert(ptr->quality_segment_url_modifiers, tmp_str, i); } ISOM_DECREASE_SIZE(ptr, 4) ptr->segment_run_entry_count = gf_bs_read_u32(bs); if (ptr->size < ptr->segment_run_entry_count*8) return GF_ISOM_INVALID_FILE; for (i=0; i<ptr->segment_run_entry_count; i++) { GF_AdobeSegmentRunEntry *sre = gf_malloc(sizeof(GF_AdobeSegmentRunEntry)); if (!sre) return GF_OUT_OF_MEM; ISOM_DECREASE_SIZE(ptr, 8) sre->first_segment = gf_bs_read_u32(bs); sre->fragment_per_segment = gf_bs_read_u32(bs); gf_list_insert(ptr->segment_run_entry_table, sre, i); } return GF_OK; } GF_Box *asrt_box_new() { ISOM_DECL_BOX_ALLOC(GF_AdobeSegmentRunTableBox, GF_ISOM_BOX_TYPE_ASRT); tmp->quality_segment_url_modifiers = gf_list_new(); tmp->segment_run_entry_table = gf_list_new(); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err asrt_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; unsigned int i; GF_AdobeSegmentRunTableBox *ptr = (GF_AdobeSegmentRunTableBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u8(bs, ptr->quality_entry_count); for (i=0; i<ptr->quality_entry_count; i++) { char *str = (char*)gf_list_get(ptr->quality_segment_url_modifiers, i); gf_bs_write_data(bs, str, (u32)strlen(str) + 1); } gf_bs_write_u32(bs, ptr->segment_run_entry_count); for (i=0; i<ptr->segment_run_entry_count; i++) { GF_AdobeSegmentRunEntry *sre = (GF_AdobeSegmentRunEntry *)gf_list_get(ptr->segment_run_entry_table, i); gf_bs_write_u32(bs, sre->first_segment); gf_bs_write_u32(bs, sre->fragment_per_segment); } return GF_OK; } GF_Err asrt_box_size(GF_Box *s) { int i; GF_AdobeSegmentRunTableBox *ptr = (GF_AdobeSegmentRunTableBox *)s; s->size += 5; for (i=0; i<ptr->quality_entry_count; i++) s->size += strlen(gf_list_get(ptr->quality_segment_url_modifiers, i)) + 1; s->size += ptr->segment_run_entry_count * sizeof(GF_AdobeSegmentRunEntry); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void afrt_box_del(GF_Box *s) { GF_AdobeFragmentRunTableBox *ptr = (GF_AdobeFragmentRunTableBox *)s; if (ptr == NULL) return; while (gf_list_count(ptr->quality_segment_url_modifiers)) { gf_free(gf_list_get(ptr->quality_segment_url_modifiers, 0)); gf_list_rem(ptr->quality_segment_url_modifiers, 0); } gf_list_del(ptr->quality_segment_url_modifiers); while (gf_list_count(ptr->fragment_run_entry_table)) { gf_free(gf_list_get(ptr->fragment_run_entry_table, 0)); gf_list_rem(ptr->fragment_run_entry_table, 0); } gf_list_del(ptr->fragment_run_entry_table); gf_free(ptr); } GF_Err afrt_box_read(GF_Box *s, GF_BitStream *bs) { unsigned int i; GF_AdobeFragmentRunTableBox *ptr = (GF_AdobeFragmentRunTableBox *)s; ISOM_DECREASE_SIZE(ptr, 5) ptr->timescale = gf_bs_read_u32(bs); ptr->quality_entry_count = gf_bs_read_u8(bs); if (ptr->size < ptr->quality_entry_count) return GF_ISOM_INVALID_FILE; for (i=0; i<ptr->quality_entry_count; i++) { int j=0; u32 tmp_strsize=(u32)ptr->size-8; char *tmp_str = (char*) gf_malloc(tmp_strsize); if (!tmp_str) return GF_OUT_OF_MEM; while (tmp_strsize) { tmp_str[j] = gf_bs_read_u8(bs); tmp_strsize--; if (!tmp_str[j]) break; j++; } ISOM_DECREASE_SIZE(ptr, j) gf_list_insert(ptr->quality_segment_url_modifiers, tmp_str, i); } ptr->fragment_run_entry_count = gf_bs_read_u32(bs); if (ptr->size < ptr->fragment_run_entry_count*16) return GF_ISOM_INVALID_FILE; for (i=0; i<ptr->fragment_run_entry_count; i++) { GF_AdobeFragmentRunEntry *fre = gf_malloc(sizeof(GF_AdobeFragmentRunEntry)); if (!fre) return GF_ISOM_INVALID_FILE; ISOM_DECREASE_SIZE(ptr, 16) fre->first_fragment = gf_bs_read_u32(bs); fre->first_fragment_timestamp = gf_bs_read_u64(bs); fre->fragment_duration = gf_bs_read_u32(bs); if (!fre->fragment_duration) { ISOM_DECREASE_SIZE(ptr, 1) fre->discontinuity_indicator = gf_bs_read_u8(bs); } gf_list_insert(ptr->fragment_run_entry_table, fre, i); } return GF_OK; } GF_Box *afrt_box_new() { ISOM_DECL_BOX_ALLOC(GF_AdobeFragmentRunTableBox, GF_ISOM_BOX_TYPE_AFRT); tmp->quality_segment_url_modifiers = gf_list_new(); tmp->fragment_run_entry_table = gf_list_new(); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err afrt_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; unsigned int i; GF_AdobeFragmentRunTableBox *ptr = (GF_AdobeFragmentRunTableBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->timescale); gf_bs_write_u8(bs, ptr->quality_entry_count); for (i=0; i<ptr->quality_entry_count; i++) { char *str = (char*)gf_list_get(ptr->quality_segment_url_modifiers, i); gf_bs_write_data(bs, str, (u32)strlen(str) + 1); } gf_bs_write_u32(bs, ptr->fragment_run_entry_count); for (i=0; i<ptr->fragment_run_entry_count; i++) { GF_AdobeFragmentRunEntry *fre = (GF_AdobeFragmentRunEntry *)gf_list_get(ptr->fragment_run_entry_table, i); gf_bs_write_u32(bs, fre->first_fragment); gf_bs_write_u64(bs, fre->first_fragment_timestamp); gf_bs_write_u32(bs, fre->fragment_duration); if (!fre->fragment_duration) gf_bs_write_u8(bs, fre->discontinuity_indicator); } return GF_OK; } GF_Err afrt_box_size(GF_Box *s) { u32 i; GF_AdobeFragmentRunTableBox *ptr = (GF_AdobeFragmentRunTableBox *)s; s->size += 5; for (i=0; i<ptr->quality_entry_count; i++) s->size += strlen(gf_list_get(ptr->quality_segment_url_modifiers, i)) + 1; s->size += 4; for (i=0; i<ptr->fragment_run_entry_count; i++) { GF_AdobeFragmentRunEntry *fre = (GF_AdobeFragmentRunEntry *)gf_list_get(ptr->fragment_run_entry_table, i); if (fre->fragment_duration) s->size += 16; else s->size += 17; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #endif /*GPAC_DISABLE_ISOM_FRAGMENTS*/ #endif /*GPAC_DISABLE_ISOM*/ #endif /*GPAC_DISABLE_ISOM_ADOBE*/
GF_Err abst_box_read(GF_Box *s, GF_BitStream *bs) { GF_AdobeBootstrapInfoBox *ptr = (GF_AdobeBootstrapInfoBox *)s; int i; u32 tmp_strsize; char *tmp_str; Bool zfound=GF_FALSE; GF_Err e; ISOM_DECREASE_SIZE(ptr, 25) ptr->bootstrapinfo_version = gf_bs_read_u32(bs); ptr->profile = gf_bs_read_int(bs, 2); ptr->live = gf_bs_read_int(bs, 1); ptr->update = gf_bs_read_int(bs, 1); ptr->reserved = gf_bs_read_int(bs, 4); ptr->time_scale = gf_bs_read_u32(bs); ptr->current_media_time = gf_bs_read_u64(bs); ptr->smpte_time_code_offset = gf_bs_read_u64(bs); i=0; if (ptr->size<8) return GF_ISOM_INVALID_FILE; tmp_strsize =(u32)ptr->size; tmp_str = gf_malloc(sizeof(char)*tmp_strsize); if (!tmp_str) return GF_OUT_OF_MEM; memset(tmp_str, 0, sizeof(char)*tmp_strsize); while (tmp_strsize) { ISOM_DECREASE_SIZE(ptr, 1) tmp_str[i] = gf_bs_read_u8(bs); tmp_strsize--; if (!tmp_str[i]) { zfound = GF_TRUE; break; } i++; } if (!zfound) return GF_ISOM_INVALID_FILE; if (i) { ptr->movie_identifier = gf_strdup(tmp_str); } ISOM_DECREASE_SIZE(ptr, 1) ptr->server_entry_count = gf_bs_read_u8(bs); for (i=0; i<ptr->server_entry_count; i++) { int j=0; zfound = GF_FALSE; tmp_strsize=(u32)ptr->size; while (tmp_strsize) { ISOM_DECREASE_SIZE(ptr, 1) tmp_str[j] = gf_bs_read_u8(bs); tmp_strsize--; if (!tmp_str[j]) { zfound = GF_TRUE; break; } j++; } if (!zfound) return GF_ISOM_INVALID_FILE; if (j) { gf_list_insert(ptr->server_entry_table, gf_strdup(tmp_str), i); } } ISOM_DECREASE_SIZE(ptr, 1) ptr->quality_entry_count = gf_bs_read_u8(bs); for (i=0; i<ptr->quality_entry_count; i++) { int j=0; zfound = GF_FALSE; tmp_strsize=(u32)ptr->size; while (tmp_strsize) { ISOM_DECREASE_SIZE(ptr, 1) tmp_str[j] = gf_bs_read_u8(bs); tmp_strsize--; if (!tmp_str[j]) { zfound = GF_TRUE; break; } j++; } if (!zfound) return GF_ISOM_INVALID_FILE; if (j) { gf_list_insert(ptr->quality_entry_table, gf_strdup(tmp_str), i); } } i=0; tmp_strsize=(u32)ptr->size; zfound = GF_FALSE; while (tmp_strsize) { ISOM_DECREASE_SIZE(ptr, 1) tmp_str[i] = gf_bs_read_u8(bs); tmp_strsize--; if (!tmp_str[i]) { zfound = GF_TRUE; break; } i++; } if (!zfound) return GF_ISOM_INVALID_FILE; if (i) { ptr->drm_data = gf_strdup(tmp_str); } i=0; tmp_strsize=(u32)ptr->size; zfound = GF_FALSE; while (tmp_strsize) { ISOM_DECREASE_SIZE(ptr, 1) tmp_str[i] = gf_bs_read_u8(bs); tmp_strsize--; if (!tmp_str[i]) { zfound = GF_TRUE; break; } i++; } if (!zfound) return GF_ISOM_INVALID_FILE; if (i) { ptr->meta_data = gf_strdup(tmp_str); } ISOM_DECREASE_SIZE(ptr, 1) ptr->segment_run_table_count = gf_bs_read_u8(bs); for (i=0; i<ptr->segment_run_table_count; i++) { GF_AdobeSegmentRunTableBox *asrt = NULL; e = gf_isom_box_parse((GF_Box **)&asrt, bs); if (e) { if (asrt) gf_isom_box_del((GF_Box*)asrt); gf_free(tmp_str); return e; } gf_list_add(ptr->segment_run_table_entries, asrt); } ISOM_DECREASE_SIZE(ptr, 1) ptr->fragment_run_table_count = gf_bs_read_u8(bs); for (i=0; i<ptr->fragment_run_table_count; i++) { GF_AdobeFragmentRunTableBox *afrt = NULL; e = gf_isom_box_parse((GF_Box **)&afrt, bs); if (e) { if (afrt) gf_isom_box_del((GF_Box*)afrt); gf_free(tmp_str); return e; } gf_list_add(ptr->fragment_run_table_entries, afrt); } gf_free(tmp_str); return GF_OK; }
GF_Err abst_box_read(GF_Box *s, GF_BitStream *bs) { GF_AdobeBootstrapInfoBox *ptr = (GF_AdobeBootstrapInfoBox *)s; int i; u32 tmp_strsize; char *tmp_str; Bool zfound=GF_FALSE; GF_Err e = GF_OK; ISOM_DECREASE_SIZE(ptr, 25) ptr->bootstrapinfo_version = gf_bs_read_u32(bs); ptr->profile = gf_bs_read_int(bs, 2); ptr->live = gf_bs_read_int(bs, 1); ptr->update = gf_bs_read_int(bs, 1); ptr->reserved = gf_bs_read_int(bs, 4); ptr->time_scale = gf_bs_read_u32(bs); ptr->current_media_time = gf_bs_read_u64(bs); ptr->smpte_time_code_offset = gf_bs_read_u64(bs); i=0; if (ptr->size<8) return GF_ISOM_INVALID_FILE; tmp_strsize =(u32)ptr->size; tmp_str = gf_malloc(sizeof(char)*tmp_strsize); if (!tmp_str) return GF_OUT_OF_MEM; memset(tmp_str, 0, sizeof(char)*tmp_strsize); while (tmp_strsize) { ISOM_DECREASE_SIZE_GOTO_EXIT(ptr, 1) tmp_str[i] = gf_bs_read_u8(bs); tmp_strsize--; if (!tmp_str[i]) { zfound = GF_TRUE; break; } i++; } if (!zfound) { e = GF_ISOM_INVALID_FILE; goto exit; } if (i) { ptr->movie_identifier = gf_strdup(tmp_str); } ISOM_DECREASE_SIZE_GOTO_EXIT(ptr, 1) ptr->server_entry_count = gf_bs_read_u8(bs); for (i=0; i<ptr->server_entry_count; i++) { int j=0; zfound = GF_FALSE; tmp_strsize=(u32)ptr->size; while (tmp_strsize) { ISOM_DECREASE_SIZE_GOTO_EXIT(ptr, 1) tmp_str[j] = gf_bs_read_u8(bs); tmp_strsize--; if (!tmp_str[j]) { zfound = GF_TRUE; break; } j++; } if (!zfound) { e = GF_ISOM_INVALID_FILE; goto exit; } if (j) { gf_list_insert(ptr->server_entry_table, gf_strdup(tmp_str), i); } } if (ptr->server_entry_count != gf_list_count(ptr->server_entry_table)) { e = GF_ISOM_INVALID_FILE; goto exit; } ISOM_DECREASE_SIZE_GOTO_EXIT(ptr, 1) ptr->quality_entry_count = gf_bs_read_u8(bs); for (i=0; i<ptr->quality_entry_count; i++) { int j=0; zfound = GF_FALSE; tmp_strsize=(u32)ptr->size; while (tmp_strsize) { ISOM_DECREASE_SIZE_GOTO_EXIT(ptr, 1) tmp_str[j] = gf_bs_read_u8(bs); tmp_strsize--; if (!tmp_str[j]) { zfound = GF_TRUE; break; } j++; } if (!zfound) { e = GF_ISOM_INVALID_FILE; goto exit; } if (j) { gf_list_insert(ptr->quality_entry_table, gf_strdup(tmp_str), i); } } if (ptr->quality_entry_count != gf_list_count(ptr->quality_entry_table)) { e = GF_ISOM_INVALID_FILE; goto exit; } i=0; tmp_strsize=(u32)ptr->size; zfound = GF_FALSE; while (tmp_strsize) { ISOM_DECREASE_SIZE_GOTO_EXIT(ptr, 1) tmp_str[i] = gf_bs_read_u8(bs); tmp_strsize--; if (!tmp_str[i]) { zfound = GF_TRUE; break; } i++; } if (!zfound) { e = GF_ISOM_INVALID_FILE; goto exit; } if (i) { ptr->drm_data = gf_strdup(tmp_str); } i=0; tmp_strsize=(u32)ptr->size; zfound = GF_FALSE; while (tmp_strsize) { ISOM_DECREASE_SIZE_GOTO_EXIT(ptr, 1) tmp_str[i] = gf_bs_read_u8(bs); tmp_strsize--; if (!tmp_str[i]) { zfound = GF_TRUE; break; } i++; } if (!zfound) { e = GF_ISOM_INVALID_FILE; goto exit; } if (i) { ptr->meta_data = gf_strdup(tmp_str); } ISOM_DECREASE_SIZE_GOTO_EXIT(ptr, 1) ptr->segment_run_table_count = gf_bs_read_u8(bs); for (i=0; i<ptr->segment_run_table_count; i++) { GF_AdobeSegmentRunTableBox *asrt = NULL; e = gf_isom_box_parse((GF_Box **)&asrt, bs); if (e) { if (asrt) gf_isom_box_del((GF_Box*)asrt); goto exit; } gf_list_add(ptr->segment_run_table_entries, asrt); } if (ptr->segment_run_table_count != gf_list_count(ptr->segment_run_table_entries)) { e = GF_ISOM_INVALID_FILE; goto exit; } ISOM_DECREASE_SIZE_GOTO_EXIT(ptr, 1) ptr->fragment_run_table_count = gf_bs_read_u8(bs); for (i=0; i<ptr->fragment_run_table_count; i++) { GF_AdobeFragmentRunTableBox *afrt = NULL; e = gf_isom_box_parse((GF_Box **)&afrt, bs); if (e) { if (afrt) gf_isom_box_del((GF_Box*)afrt); goto exit; } gf_list_add(ptr->fragment_run_table_entries, afrt); } if (ptr->fragment_run_table_count != gf_list_count(ptr->fragment_run_table_entries)) { e = GF_ISOM_INVALID_FILE; goto exit; } exit: gf_free(tmp_str); return e; }
{'added': [(83, '\tGF_Err e = GF_OK;'), (103, '\t\tISOM_DECREASE_SIZE_GOTO_EXIT(ptr, 1)'), (112, '\tif (!zfound) {'), (113, '\t\te = GF_ISOM_INVALID_FILE;'), (114, '\t\tgoto exit;'), (115, '\t}'), (120, '\tISOM_DECREASE_SIZE_GOTO_EXIT(ptr, 1)'), (127, '\t\t\tISOM_DECREASE_SIZE_GOTO_EXIT(ptr, 1)'), (136, '\t\tif (!zfound) {'), (137, '\t\t\te = GF_ISOM_INVALID_FILE;'), (138, '\t\t\tgoto exit;'), (139, '\t\t}'), (144, '\tif (ptr->server_entry_count != gf_list_count(ptr->server_entry_table)) {'), (145, '\t\te = GF_ISOM_INVALID_FILE;'), (146, '\t\tgoto exit;'), (147, '\t}'), (149, '\tISOM_DECREASE_SIZE_GOTO_EXIT(ptr, 1)'), (156, '\t\t\tISOM_DECREASE_SIZE_GOTO_EXIT(ptr, 1)'), (166, '\t\tif (!zfound) {'), (167, '\t\t\te = GF_ISOM_INVALID_FILE;'), (168, '\t\t\tgoto exit;'), (169, '\t\t}'), (174, '\tif (ptr->quality_entry_count != gf_list_count(ptr->quality_entry_table)) {'), (175, '\t\te = GF_ISOM_INVALID_FILE;'), (176, '\t\tgoto exit;'), (177, '\t}'), (183, '\t\tISOM_DECREASE_SIZE_GOTO_EXIT(ptr, 1)'), (192, '\tif (!zfound) {'), (193, '\t\te = GF_ISOM_INVALID_FILE;'), (194, '\t\tgoto exit;'), (195, '\t}'), (196, ''), (205, '\t\tISOM_DECREASE_SIZE_GOTO_EXIT(ptr, 1)'), (214, '\tif (!zfound) {'), (215, '\t\te = GF_ISOM_INVALID_FILE;'), (216, '\t\tgoto exit;'), (217, '\t}'), (218, ''), (223, '\tISOM_DECREASE_SIZE_GOTO_EXIT(ptr, 1)'), (230, '\t\t\tgoto exit;'), (234, '\tif (ptr->segment_run_table_count != gf_list_count(ptr->segment_run_table_entries)) {'), (235, '\t\te = GF_ISOM_INVALID_FILE;'), (236, '\t\tgoto exit;'), (237, '\t}'), (239, '\tISOM_DECREASE_SIZE_GOTO_EXIT(ptr, 1)'), (246, '\t\t\tgoto exit;'), (250, '\tif (ptr->fragment_run_table_count != gf_list_count(ptr->fragment_run_table_entries)) {'), (251, '\t\te = GF_ISOM_INVALID_FILE;'), (252, '\t\tgoto exit;'), (253, '\t}'), (255, 'exit:'), (257, '\treturn e;')], 'deleted': [(83, '\tGF_Err e;'), (103, '\t\tISOM_DECREASE_SIZE(ptr, 1)'), (112, '\tif (!zfound)'), (113, '\t\treturn GF_ISOM_INVALID_FILE;'), (118, '\tISOM_DECREASE_SIZE(ptr, 1)'), (125, '\t\t\tISOM_DECREASE_SIZE(ptr, 1)'), (134, '\t\tif (!zfound)'), (135, '\t\t\treturn GF_ISOM_INVALID_FILE;'), (141, '\tISOM_DECREASE_SIZE(ptr, 1)'), (148, '\t\t\tISOM_DECREASE_SIZE(ptr, 1)'), (158, '\t\tif (!zfound)'), (159, '\t\t\treturn GF_ISOM_INVALID_FILE;'), (169, '\t\tISOM_DECREASE_SIZE(ptr, 1)'), (178, '\tif (!zfound)'), (179, '\t\treturn GF_ISOM_INVALID_FILE;'), (188, '\t\tISOM_DECREASE_SIZE(ptr, 1)'), (197, '\tif (!zfound)'), (198, '\t\treturn GF_ISOM_INVALID_FILE;'), (203, '\tISOM_DECREASE_SIZE(ptr, 1)'), (210, '\t\t\tgf_free(tmp_str);'), (211, '\t\t\treturn e;'), (216, '\tISOM_DECREASE_SIZE(ptr, 1)'), (223, '\t\t\tgf_free(tmp_str);'), (224, '\t\t\treturn e;'), (230, ''), (231, '\treturn GF_OK;')]}
52
26
621
4,431
https://github.com/gpac/gpac
CVE-2021-32132
['CWE-476']
vim9compile.c
compile_def_function
/* vi:set ts=8 sts=4 sw=4 noet: * * VIM - Vi IMproved by Bram Moolenaar * * Do ":help uganda" in Vim to read copying and usage conditions. * Do ":help credits" in Vim to see a list of people who contributed. * See README.txt for an overview of the Vim source code. */ /* * vim9compile.c: compiling a :def function */ #define USING_FLOAT_STUFF #include "vim.h" #if defined(FEAT_EVAL) || defined(PROTO) // When not generating protos this is included in proto.h #ifdef PROTO # include "vim9.h" #endif // Functions defined with :def are stored in this growarray. // They are never removed, so that they can be found by index. // Deleted functions have the df_deleted flag set. garray_T def_functions = {0, 0, sizeof(dfunc_T), 50, NULL}; static void delete_def_function_contents(dfunc_T *dfunc, int mark_deleted); /* * Lookup variable "name" in the local scope and return it in "lvar". * "lvar->lv_from_outer" is incremented accordingly. * If "lvar" is NULL only check if the variable can be found. * Return FAIL if not found. */ int lookup_local(char_u *name, size_t len, lvar_T *lvar, cctx_T *cctx) { int idx; lvar_T *lvp; if (len == 0) return FAIL; // Find local in current function scope. for (idx = 0; idx < cctx->ctx_locals.ga_len; ++idx) { lvp = ((lvar_T *)cctx->ctx_locals.ga_data) + idx; if (STRNCMP(name, lvp->lv_name, len) == 0 && STRLEN(lvp->lv_name) == len) { if (lvar != NULL) { *lvar = *lvp; lvar->lv_from_outer = 0; } return OK; } } // Find local in outer function scope. if (cctx->ctx_outer != NULL) { if (lookup_local(name, len, lvar, cctx->ctx_outer) == OK) { if (lvar != NULL) { cctx->ctx_outer_used = TRUE; ++lvar->lv_from_outer; } return OK; } } return FAIL; } /* * Lookup an argument in the current function and an enclosing function. * Returns the argument index in "idxp" * Returns the argument type in "type" * Sets "gen_load_outer" to TRUE if found in outer scope. * Returns OK when found, FAIL otherwise. */ int arg_exists( char_u *name, size_t len, int *idxp, type_T **type, int *gen_load_outer, cctx_T *cctx) { int idx; char_u *va_name; if (len == 0) return FAIL; for (idx = 0; idx < cctx->ctx_ufunc->uf_args_visible; ++idx) { char_u *arg = FUNCARG(cctx->ctx_ufunc, idx); if (STRNCMP(name, arg, len) == 0 && arg[len] == NUL) { if (idxp != NULL) { // Arguments are located above the frame pointer. One further // if there is a vararg argument *idxp = idx - (cctx->ctx_ufunc->uf_args.ga_len + STACK_FRAME_SIZE) + (cctx->ctx_ufunc->uf_va_name != NULL ? -1 : 0); if (cctx->ctx_ufunc->uf_arg_types != NULL) *type = cctx->ctx_ufunc->uf_arg_types[idx]; else *type = &t_any; } return OK; } } va_name = cctx->ctx_ufunc->uf_va_name; if (va_name != NULL && STRNCMP(name, va_name, len) == 0 && va_name[len] == NUL) { if (idxp != NULL) { // varargs is always the last argument *idxp = -STACK_FRAME_SIZE - 1; *type = cctx->ctx_ufunc->uf_va_type; } return OK; } if (cctx->ctx_outer != NULL) { // Lookup the name for an argument of the outer function. if (arg_exists(name, len, idxp, type, gen_load_outer, cctx->ctx_outer) == OK) { if (gen_load_outer != NULL) ++*gen_load_outer; return OK; } } return FAIL; } /* * Lookup a script-local variable in the current script, possibly defined in a * block that contains the function "cctx->ctx_ufunc". * "cctx" is NULL at the script level. * If "len" is <= 0 "name" must be NUL terminated. * Return NULL when not found. */ static sallvar_T * find_script_var(char_u *name, size_t len, cctx_T *cctx) { scriptitem_T *si = SCRIPT_ITEM(current_sctx.sc_sid); hashitem_T *hi; int cc; sallvar_T *sav; sallvar_T *found_sav; ufunc_T *ufunc; // Find the list of all script variables with the right name. if (len > 0) { cc = name[len]; name[len] = NUL; } hi = hash_find(&si->sn_all_vars.dv_hashtab, name); if (len > 0) name[len] = cc; if (HASHITEM_EMPTY(hi)) return NULL; sav = HI2SAV(hi); if (sav->sav_block_id == 0) // variable defined in the top script scope is always visible return sav; if (cctx == NULL) { // Not in a function scope, find variable with block id equal to or // smaller than the current block id. while (sav != NULL) { if (sav->sav_block_id <= si->sn_current_block_id) break; sav = sav->sav_next; } return sav; } // Go over the variables with this name and find one that was visible // from the function. ufunc = cctx->ctx_ufunc; found_sav = sav; while (sav != NULL) { int idx; // Go over the blocks that this function was defined in. If the // variable block ID matches it was visible to the function. for (idx = 0; idx < ufunc->uf_block_depth; ++idx) if (ufunc->uf_block_ids[idx] == sav->sav_block_id) return sav; sav = sav->sav_next; } // Not found, assume variable at script level was visible. return found_sav; } /* * Return TRUE if the script context is Vim9 script. */ int script_is_vim9() { return SCRIPT_ITEM(current_sctx.sc_sid)->sn_version == SCRIPT_VERSION_VIM9; } /* * Lookup a variable (without s: prefix) in the current script. * "cctx" is NULL at the script level. * Returns OK or FAIL. */ int script_var_exists(char_u *name, size_t len, cctx_T *cctx) { if (current_sctx.sc_sid <= 0) return FAIL; if (script_is_vim9()) { // Check script variables that were visible where the function was // defined. if (find_script_var(name, len, cctx) != NULL) return OK; } else { hashtab_T *ht = &SCRIPT_VARS(current_sctx.sc_sid); dictitem_T *di; int cc; // Check script variables that are currently visible cc = name[len]; name[len] = NUL; di = find_var_in_ht(ht, 0, name, TRUE); name[len] = cc; if (di != NULL) return OK; } return FAIL; } /* * Return TRUE if "name" is a local variable, argument, script variable or * imported. */ static int variable_exists(char_u *name, size_t len, cctx_T *cctx) { return (cctx != NULL && (lookup_local(name, len, NULL, cctx) == OK || arg_exists(name, len, NULL, NULL, NULL, cctx) == OK)) || script_var_exists(name, len, cctx) == OK || find_imported(name, len, cctx) != NULL; } /* * Return TRUE if "name" is a local variable, argument, script variable, * imported or function. */ static int item_exists(char_u *name, size_t len, int cmd UNUSED, cctx_T *cctx) { int is_global; char_u *p; if (variable_exists(name, len, cctx)) return TRUE; // This is similar to what is in lookup_scriptitem(): // Find a function, so that a following "->" works. // Require "(" or "->" to follow, "Cmd" is a user command while "Cmd()" is // a function call. p = skipwhite(name + len); if (name[len] == '(' || (p[0] == '-' && p[1] == '>')) { // Do not check for an internal function, since it might also be a // valid command, such as ":split" versus "split()". // Skip "g:" before a function name. is_global = (name[0] == 'g' && name[1] == ':'); return find_func(is_global ? name + 2 : name, is_global, cctx) != NULL; } return FALSE; } /* * Check if "p[len]" is already defined, either in script "import_sid" or in * compilation context "cctx". "cctx" is NULL at the script level. * Does not check the global namespace. * If "is_arg" is TRUE the error message is for an argument name. * Return FAIL and give an error if it defined. */ int check_defined(char_u *p, size_t len, cctx_T *cctx, int is_arg) { int c = p[len]; ufunc_T *ufunc = NULL; // underscore argument is OK if (len == 1 && *p == '_') return OK; if (script_var_exists(p, len, cctx) == OK) { if (is_arg) semsg(_(e_argument_already_declared_in_script_str), p); else semsg(_(e_variable_already_declared_in_script_str), p); return FAIL; } p[len] = NUL; if ((cctx != NULL && (lookup_local(p, len, NULL, cctx) == OK || arg_exists(p, len, NULL, NULL, NULL, cctx) == OK)) || find_imported(p, len, cctx) != NULL || (ufunc = find_func_even_dead(p, FALSE, cctx)) != NULL) { // A local or script-local function can shadow a global function. if (ufunc == NULL || ((ufunc->uf_flags & FC_DEAD) == 0 && (!func_is_global(ufunc) || (p[0] == 'g' && p[1] == ':')))) { if (is_arg) semsg(_(e_argument_name_shadows_existing_variable_str), p); else semsg(_(e_name_already_defined_str), p); p[len] = c; return FAIL; } } p[len] = c; return OK; } /* * Return TRUE if "actual" could be "expected" and a runtime typecheck is to be * used. Return FALSE if the types will never match. */ static int use_typecheck(type_T *actual, type_T *expected) { if (actual->tt_type == VAR_ANY || actual->tt_type == VAR_UNKNOWN || (actual->tt_type == VAR_FUNC && (expected->tt_type == VAR_FUNC || expected->tt_type == VAR_PARTIAL) && (actual->tt_member == &t_any || actual->tt_member == &t_unknown || actual->tt_argcount < 0) && (actual->tt_member == &t_unknown || (actual->tt_member == &t_void) == (expected->tt_member == &t_void)))) return TRUE; if ((actual->tt_type == VAR_LIST || actual->tt_type == VAR_DICT) && actual->tt_type == expected->tt_type) // This takes care of a nested list or dict. return use_typecheck(actual->tt_member, expected->tt_member); return FALSE; } /* * Check that * - "actual" matches "expected" type or * - "actual" is a type that can be "expected" type: add a runtime check; or * - return FAIL. * If "actual_is_const" is TRUE then the type won't change at runtime, do not * generate a TYPECHECK. */ static int need_type_where( type_T *actual, type_T *expected, int offset, where_T where, cctx_T *cctx, int silent, int actual_is_const) { int ret; if (expected == &t_bool && actual != &t_bool && (actual->tt_flags & TTFLAG_BOOL_OK)) { // Using "0", "1" or the result of an expression with "&&" or "||" as a // boolean is OK but requires a conversion. generate_2BOOL(cctx, FALSE, offset); return OK; } ret = check_type_maybe(expected, actual, FALSE, where); if (ret == OK) return OK; // If the actual type can be the expected type add a runtime check. // If it's a constant a runtime check makes no sense. if (!actual_is_const && ret == MAYBE && use_typecheck(actual, expected)) { generate_TYPECHECK(cctx, expected, offset, where.wt_index); return OK; } if (!silent) type_mismatch_where(expected, actual, where); return FAIL; } int need_type( type_T *actual, type_T *expected, int offset, int arg_idx, cctx_T *cctx, int silent, int actual_is_const) { where_T where = WHERE_INIT; where.wt_index = arg_idx; return need_type_where(actual, expected, offset, where, cctx, silent, actual_is_const); } /* * Reserve space for a local variable. * Return the variable or NULL if it failed. */ lvar_T * reserve_local( cctx_T *cctx, char_u *name, size_t len, int isConst, type_T *type) { lvar_T *lvar; dfunc_T *dfunc; if (arg_exists(name, len, NULL, NULL, NULL, cctx) == OK) { emsg_namelen(_(e_str_is_used_as_argument), name, (int)len); return NULL; } if (GA_GROW_FAILS(&cctx->ctx_locals, 1)) return NULL; lvar = ((lvar_T *)cctx->ctx_locals.ga_data) + cctx->ctx_locals.ga_len++; CLEAR_POINTER(lvar); // Every local variable uses the next entry on the stack. We could re-use // the last ones when leaving a scope, but then variables used in a closure // might get overwritten. To keep things simple do not re-use stack // entries. This is less efficient, but memory is cheap these days. dfunc = ((dfunc_T *)def_functions.ga_data) + cctx->ctx_ufunc->uf_dfunc_idx; lvar->lv_idx = dfunc->df_var_names.ga_len; lvar->lv_name = vim_strnsave(name, len == 0 ? STRLEN(name) : len); lvar->lv_const = isConst; lvar->lv_type = type; // Remember the name for debugging. if (GA_GROW_FAILS(&dfunc->df_var_names, 1)) return NULL; ((char_u **)dfunc->df_var_names.ga_data)[lvar->lv_idx] = vim_strsave(lvar->lv_name); ++dfunc->df_var_names.ga_len; return lvar; } /* * If "check_writable" is ASSIGN_CONST give an error if the variable was * defined with :final or :const, if "check_writable" is ASSIGN_FINAL give an * error if the variable was defined with :const. */ static int check_item_writable(svar_T *sv, int check_writable, char_u *name) { if ((check_writable == ASSIGN_CONST && sv->sv_const != 0) || (check_writable == ASSIGN_FINAL && sv->sv_const == ASSIGN_CONST)) { semsg(_(e_cannot_change_readonly_variable_str), name); return FAIL; } return OK; } /* * Find "name" in script-local items of script "sid". * Pass "check_writable" to check_item_writable(). * Returns the index in "sn_var_vals" if found. * If found but not in "sn_var_vals" returns -1. * If not found or the variable is not writable returns -2. */ int get_script_item_idx(int sid, char_u *name, int check_writable, cctx_T *cctx) { hashtab_T *ht; dictitem_T *di; scriptitem_T *si = SCRIPT_ITEM(sid); svar_T *sv; int idx; if (!SCRIPT_ID_VALID(sid)) return -1; if (sid == current_sctx.sc_sid) { sallvar_T *sav = find_script_var(name, 0, cctx); if (sav == NULL) return -2; idx = sav->sav_var_vals_idx; sv = ((svar_T *)si->sn_var_vals.ga_data) + idx; if (check_item_writable(sv, check_writable, name) == FAIL) return -2; return idx; } // First look the name up in the hashtable. ht = &SCRIPT_VARS(sid); di = find_var_in_ht(ht, 0, name, TRUE); if (di == NULL) return -2; // Now find the svar_T index in sn_var_vals. for (idx = 0; idx < si->sn_var_vals.ga_len; ++idx) { sv = ((svar_T *)si->sn_var_vals.ga_data) + idx; if (sv->sv_tv == &di->di_tv) { if (check_item_writable(sv, check_writable, name) == FAIL) return -2; return idx; } } return -1; } /* * Find "name" in imported items of the current script or in "cctx" if not * NULL. */ imported_T * find_imported(char_u *name, size_t len, cctx_T *cctx) { int idx; if (!SCRIPT_ID_VALID(current_sctx.sc_sid)) return NULL; if (cctx != NULL) for (idx = 0; idx < cctx->ctx_imports.ga_len; ++idx) { imported_T *import = ((imported_T *)cctx->ctx_imports.ga_data) + idx; if (len == 0 ? STRCMP(name, import->imp_name) == 0 : STRLEN(import->imp_name) == len && STRNCMP(name, import->imp_name, len) == 0) return import; } return find_imported_in_script(name, len, current_sctx.sc_sid); } imported_T * find_imported_in_script(char_u *name, size_t len, int sid) { scriptitem_T *si; int idx; if (!SCRIPT_ID_VALID(sid)) return NULL; si = SCRIPT_ITEM(sid); for (idx = 0; idx < si->sn_imports.ga_len; ++idx) { imported_T *import = ((imported_T *)si->sn_imports.ga_data) + idx; if (len == 0 ? STRCMP(name, import->imp_name) == 0 : STRLEN(import->imp_name) == len && STRNCMP(name, import->imp_name, len) == 0) return import; } return NULL; } /* * Free all imported variables. */ static void free_imported(cctx_T *cctx) { int idx; for (idx = 0; idx < cctx->ctx_imports.ga_len; ++idx) { imported_T *import = ((imported_T *)cctx->ctx_imports.ga_data) + idx; vim_free(import->imp_name); } ga_clear(&cctx->ctx_imports); } /* * Called when checking for a following operator at "arg". When the rest of * the line is empty or only a comment, peek the next line. If there is a next * line return a pointer to it and set "nextp". * Otherwise skip over white space. */ char_u * may_peek_next_line(cctx_T *cctx, char_u *arg, char_u **nextp) { char_u *p = skipwhite(arg); *nextp = NULL; if (*p == NUL || (VIM_ISWHITE(*arg) && vim9_comment_start(p))) { *nextp = peek_next_line_from_context(cctx); if (*nextp != NULL) return *nextp; } return p; } /* * Return a pointer to the next line that isn't empty or only contains a * comment. Skips over white space. * Returns NULL if there is none. */ char_u * peek_next_line_from_context(cctx_T *cctx) { int lnum = cctx->ctx_lnum; while (++lnum < cctx->ctx_ufunc->uf_lines.ga_len) { char_u *line = ((char_u **)cctx->ctx_ufunc->uf_lines.ga_data)[lnum]; char_u *p; // ignore NULLs inserted for continuation lines if (line != NULL) { p = skipwhite(line); if (vim9_bad_comment(p)) return NULL; if (*p != NUL && !vim9_comment_start(p)) return p; } } return NULL; } /* * Get the next line of the function from "cctx". * Skips over empty lines. Skips over comment lines if "skip_comment" is TRUE. * Returns NULL when at the end. */ char_u * next_line_from_context(cctx_T *cctx, int skip_comment) { char_u *line; do { ++cctx->ctx_lnum; if (cctx->ctx_lnum >= cctx->ctx_ufunc->uf_lines.ga_len) { line = NULL; break; } line = ((char_u **)cctx->ctx_ufunc->uf_lines.ga_data)[cctx->ctx_lnum]; cctx->ctx_line_start = line; SOURCING_LNUM = cctx->ctx_lnum + 1; } while (line == NULL || *skipwhite(line) == NUL || (skip_comment && vim9_comment_start(skipwhite(line)))); return line; } /* * Skip over white space at "whitep" and assign to "*arg". * If "*arg" is at the end of the line, advance to the next line. * Also when "whitep" points to white space and "*arg" is on a "#". * Return FAIL if beyond the last line, "*arg" is unmodified then. */ int may_get_next_line(char_u *whitep, char_u **arg, cctx_T *cctx) { *arg = skipwhite(whitep); if (vim9_bad_comment(*arg)) return FAIL; if (**arg == NUL || (VIM_ISWHITE(*whitep) && vim9_comment_start(*arg))) { char_u *next = next_line_from_context(cctx, TRUE); if (next == NULL) return FAIL; *arg = skipwhite(next); } return OK; } /* * Idem, and give an error when failed. */ int may_get_next_line_error(char_u *whitep, char_u **arg, cctx_T *cctx) { if (may_get_next_line(whitep, arg, cctx) == FAIL) { SOURCING_LNUM = cctx->ctx_lnum + 1; emsg(_(e_line_incomplete)); return FAIL; } return OK; } /* * Get a line from the compilation context, compatible with exarg_T getline(). * Return a pointer to the line in allocated memory. * Return NULL for end-of-file or some error. */ static char_u * exarg_getline( int c UNUSED, void *cookie, int indent UNUSED, getline_opt_T options UNUSED) { cctx_T *cctx = (cctx_T *)cookie; char_u *p; for (;;) { if (cctx->ctx_lnum >= cctx->ctx_ufunc->uf_lines.ga_len - 1) return NULL; ++cctx->ctx_lnum; p = ((char_u **)cctx->ctx_ufunc->uf_lines.ga_data)[cctx->ctx_lnum]; // Comment lines result in NULL pointers, skip them. if (p != NULL) return vim_strsave(p); } } void fill_exarg_from_cctx(exarg_T *eap, cctx_T *cctx) { eap->getline = exarg_getline; eap->cookie = cctx; } /* * Return TRUE if "ufunc" should be compiled, taking into account whether * "profile" indicates profiling is to be done. */ int func_needs_compiling(ufunc_T *ufunc, compiletype_T compile_type) { switch (ufunc->uf_def_status) { case UF_TO_BE_COMPILED: return TRUE; case UF_COMPILED: { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; switch (compile_type) { case CT_PROFILE: #ifdef FEAT_PROFILE return dfunc->df_instr_prof == NULL; #endif case CT_NONE: return dfunc->df_instr == NULL; case CT_DEBUG: return dfunc->df_instr_debug == NULL; } } case UF_NOT_COMPILED: case UF_COMPILE_ERROR: case UF_COMPILING: break; } return FALSE; } /* * Compile a nested :def command. */ static char_u * compile_nested_function(exarg_T *eap, cctx_T *cctx) { int is_global = *eap->arg == 'g' && eap->arg[1] == ':'; char_u *name_start = eap->arg; char_u *name_end = to_name_end(eap->arg, TRUE); char_u *lambda_name; ufunc_T *ufunc; int r = FAIL; compiletype_T compile_type; if (eap->forceit) { emsg(_(e_cannot_use_bang_with_nested_def)); return NULL; } if (*name_start == '/') { name_end = skip_regexp(name_start + 1, '/', TRUE); if (*name_end == '/') ++name_end; set_nextcmd(eap, name_end); } if (name_end == name_start || *skipwhite(name_end) != '(') { if (!ends_excmd2(name_start, name_end)) { semsg(_(e_invalid_command_str), eap->cmd); return NULL; } // "def" or "def Name": list functions if (generate_DEF(cctx, name_start, name_end - name_start) == FAIL) return NULL; return eap->nextcmd == NULL ? (char_u *)"" : eap->nextcmd; } // Only g:Func() can use a namespace. if (name_start[1] == ':' && !is_global) { semsg(_(e_namespace_not_supported_str), name_start); return NULL; } if (check_defined(name_start, name_end - name_start, cctx, FALSE) == FAIL) return NULL; eap->arg = name_end; fill_exarg_from_cctx(eap, cctx); eap->forceit = FALSE; // We use the special <Lamba>99 name, but it's not really a lambda. lambda_name = vim_strsave(get_lambda_name()); if (lambda_name == NULL) return NULL; ufunc = define_function(eap, lambda_name); if (ufunc == NULL) { r = eap->skip ? OK : FAIL; goto theend; } // copy over the block scope IDs before compiling if (!is_global && cctx->ctx_ufunc->uf_block_depth > 0) { int block_depth = cctx->ctx_ufunc->uf_block_depth; ufunc->uf_block_ids = ALLOC_MULT(int, block_depth); if (ufunc->uf_block_ids != NULL) { mch_memmove(ufunc->uf_block_ids, cctx->ctx_ufunc->uf_block_ids, sizeof(int) * block_depth); ufunc->uf_block_depth = block_depth; } } compile_type = COMPILE_TYPE(ufunc); #ifdef FEAT_PROFILE // If the outer function is profiled, also compile the nested function for // profiling. if (cctx->ctx_compile_type == CT_PROFILE) compile_type = CT_PROFILE; #endif if (func_needs_compiling(ufunc, compile_type) && compile_def_function(ufunc, TRUE, compile_type, cctx) == FAIL) { func_ptr_unref(ufunc); goto theend; } #ifdef FEAT_PROFILE // When the outer function is compiled for profiling, the nested function // may be called without profiling. Compile it here in the right context. if (compile_type == CT_PROFILE && func_needs_compiling(ufunc, CT_NONE)) compile_def_function(ufunc, FALSE, CT_NONE, cctx); #endif if (is_global) { char_u *func_name = vim_strnsave(name_start + 2, name_end - name_start - 2); if (func_name == NULL) r = FAIL; else { r = generate_NEWFUNC(cctx, lambda_name, func_name); lambda_name = NULL; } } else { // Define a local variable for the function reference. lvar_T *lvar = reserve_local(cctx, name_start, name_end - name_start, TRUE, ufunc->uf_func_type); if (lvar == NULL) goto theend; if (generate_FUNCREF(cctx, ufunc) == FAIL) goto theend; r = generate_STORE(cctx, ISN_STORE, lvar->lv_idx, NULL); } theend: vim_free(lambda_name); return r == FAIL ? NULL : (char_u *)""; } /* * Return the length of an assignment operator, or zero if there isn't one. */ int assignment_len(char_u *p, int *heredoc) { if (*p == '=') { if (p[1] == '<' && p[2] == '<') { *heredoc = TRUE; return 3; } return 1; } if (vim_strchr((char_u *)"+-*/%", *p) != NULL && p[1] == '=') return 2; if (STRNCMP(p, "..=", 3) == 0) return 3; return 0; } /* * Generate the load instruction for "name". */ static void generate_loadvar( cctx_T *cctx, assign_dest_T dest, char_u *name, lvar_T *lvar, type_T *type) { switch (dest) { case dest_option: case dest_func_option: generate_LOAD(cctx, ISN_LOADOPT, 0, name, type); break; case dest_global: if (vim_strchr(name, AUTOLOAD_CHAR) == NULL) generate_LOAD(cctx, ISN_LOADG, 0, name + 2, type); else generate_LOAD(cctx, ISN_LOADAUTO, 0, name, type); break; case dest_buffer: generate_LOAD(cctx, ISN_LOADB, 0, name + 2, type); break; case dest_window: generate_LOAD(cctx, ISN_LOADW, 0, name + 2, type); break; case dest_tab: generate_LOAD(cctx, ISN_LOADT, 0, name + 2, type); break; case dest_script: compile_load_scriptvar(cctx, name + (name[1] == ':' ? 2 : 0), NULL, NULL, TRUE); break; case dest_env: // Include $ in the name here generate_LOAD(cctx, ISN_LOADENV, 0, name, type); break; case dest_reg: generate_LOAD(cctx, ISN_LOADREG, name[1], NULL, &t_string); break; case dest_vimvar: generate_LOADV(cctx, name + 2, TRUE); break; case dest_local: if (lvar->lv_from_outer > 0) generate_LOADOUTER(cctx, lvar->lv_idx, lvar->lv_from_outer, type); else generate_LOAD(cctx, ISN_LOAD, lvar->lv_idx, NULL, type); break; case dest_expr: // list or dict value should already be on the stack. break; } } /* * Skip over "[expr]" or ".member". * Does not check for any errors. */ static char_u * skip_index(char_u *start) { char_u *p = start; if (*p == '[') { p = skipwhite(p + 1); (void)skip_expr(&p, NULL); p = skipwhite(p); if (*p == ']') return p + 1; return p; } // if (*p == '.') return to_name_end(p + 1, TRUE); } void vim9_declare_error(char_u *name) { char *scope = ""; switch (*name) { case 'g': scope = _("global"); break; case 'b': scope = _("buffer"); break; case 'w': scope = _("window"); break; case 't': scope = _("tab"); break; case 'v': scope = "v:"; break; case '$': semsg(_(e_cannot_declare_an_environment_variable), name); return; case '&': semsg(_(e_cannot_declare_an_option), name); return; case '@': semsg(_(e_cannot_declare_a_register_str), name); return; default: return; } semsg(_(e_cannot_declare_a_scope_variable), scope, name); } /* * For one assignment figure out the type of destination. Return it in "dest". * When not recognized "dest" is not set. * For an option "option_scope" is set. * For a v:var "vimvaridx" is set. * "type" is set to the destination type if known, unchanted otherwise. * Return FAIL if an error message was given. */ int get_var_dest( char_u *name, assign_dest_T *dest, int cmdidx, int *option_scope, int *vimvaridx, type_T **type, cctx_T *cctx) { char_u *p; if (*name == '&') { int cc; long numval; getoption_T opt_type; int opt_p_flags; *dest = dest_option; if (cmdidx == CMD_final || cmdidx == CMD_const) { emsg(_(e_const_option)); return FAIL; } p = name; p = find_option_end(&p, option_scope); if (p == NULL) { // cannot happen? emsg(_(e_unexpected_characters_in_assignment)); return FAIL; } cc = *p; *p = NUL; opt_type = get_option_value(skip_option_env_lead(name), &numval, NULL, &opt_p_flags, *option_scope); *p = cc; switch (opt_type) { case gov_unknown: semsg(_(e_unknown_option_str), name); return FAIL; case gov_string: case gov_hidden_string: if (opt_p_flags & P_FUNC) { // might be a Funcref, check the type later *type = &t_any; *dest = dest_func_option; } else { *type = &t_string; } break; case gov_bool: case gov_hidden_bool: *type = &t_bool; break; case gov_number: case gov_hidden_number: *type = &t_number; break; } } else if (*name == '$') { *dest = dest_env; *type = &t_string; } else if (*name == '@') { if (name[1] != '@' && (!valid_yank_reg(name[1], FALSE) || name[1] == '.')) { emsg_invreg(name[1]); return FAIL; } *dest = dest_reg; *type = name[1] == '#' ? &t_number_or_string : &t_string; } else if (STRNCMP(name, "g:", 2) == 0) { *dest = dest_global; } else if (STRNCMP(name, "b:", 2) == 0) { *dest = dest_buffer; } else if (STRNCMP(name, "w:", 2) == 0) { *dest = dest_window; } else if (STRNCMP(name, "t:", 2) == 0) { *dest = dest_tab; } else if (STRNCMP(name, "v:", 2) == 0) { typval_T *vtv; int di_flags; *vimvaridx = find_vim_var(name + 2, &di_flags); if (*vimvaridx < 0) { semsg(_(e_variable_not_found_str), name); return FAIL; } // We use the current value of "sandbox" here, is that OK? if (var_check_ro(di_flags, name, FALSE)) return FAIL; *dest = dest_vimvar; vtv = get_vim_var_tv(*vimvaridx); *type = typval2type_vimvar(vtv, cctx->ctx_type_list); } return OK; } static int is_decl_command(int cmdidx) { return cmdidx == CMD_let || cmdidx == CMD_var || cmdidx == CMD_final || cmdidx == CMD_const; } /* * Figure out the LHS type and other properties for an assignment or one item * of ":unlet" with an index. * Returns OK or FAIL. */ int compile_lhs( char_u *var_start, lhs_T *lhs, int cmdidx, int heredoc, int oplen, cctx_T *cctx) { char_u *var_end; int is_decl = is_decl_command(cmdidx); CLEAR_POINTER(lhs); lhs->lhs_dest = dest_local; lhs->lhs_vimvaridx = -1; lhs->lhs_scriptvar_idx = -1; // "dest_end" is the end of the destination, including "[expr]" or // ".name". // "var_end" is the end of the variable/option/etc. name. lhs->lhs_dest_end = skip_var_one(var_start, FALSE); if (*var_start == '@') var_end = var_start + 2; else { // skip over the leading "&", "&l:", "&g:" and "$" var_end = skip_option_env_lead(var_start); var_end = to_name_end(var_end, TRUE); } // "a: type" is declaring variable "a" with a type, not dict "a:". if (is_decl && lhs->lhs_dest_end == var_start + 2 && lhs->lhs_dest_end[-1] == ':') --lhs->lhs_dest_end; if (is_decl && var_end == var_start + 2 && var_end[-1] == ':') --var_end; lhs->lhs_end = lhs->lhs_dest_end; // compute the length of the destination without "[expr]" or ".name" lhs->lhs_varlen = var_end - var_start; lhs->lhs_varlen_total = lhs->lhs_varlen; lhs->lhs_name = vim_strnsave(var_start, lhs->lhs_varlen); if (lhs->lhs_name == NULL) return FAIL; if (lhs->lhs_dest_end > var_start + lhs->lhs_varlen) // Something follows after the variable: "var[idx]" or "var.key". lhs->lhs_has_index = TRUE; if (heredoc) lhs->lhs_type = &t_list_string; else lhs->lhs_type = &t_any; if (cctx->ctx_skip != SKIP_YES) { int declare_error = FALSE; if (get_var_dest(lhs->lhs_name, &lhs->lhs_dest, cmdidx, &lhs->lhs_opt_flags, &lhs->lhs_vimvaridx, &lhs->lhs_type, cctx) == FAIL) return FAIL; if (lhs->lhs_dest != dest_local && cmdidx != CMD_const && cmdidx != CMD_final) { // Specific kind of variable recognized. declare_error = is_decl; } else { // No specific kind of variable recognized, just a name. if (check_reserved_name(lhs->lhs_name) == FAIL) return FAIL; if (lookup_local(var_start, lhs->lhs_varlen, &lhs->lhs_local_lvar, cctx) == OK) lhs->lhs_lvar = &lhs->lhs_local_lvar; else { CLEAR_FIELD(lhs->lhs_arg_lvar); if (arg_exists(var_start, lhs->lhs_varlen, &lhs->lhs_arg_lvar.lv_idx, &lhs->lhs_arg_lvar.lv_type, &lhs->lhs_arg_lvar.lv_from_outer, cctx) == OK) { if (is_decl) { semsg(_(e_str_is_used_as_argument), lhs->lhs_name); return FAIL; } lhs->lhs_lvar = &lhs->lhs_arg_lvar; } } if (lhs->lhs_lvar != NULL) { if (is_decl) { semsg(_(e_variable_already_declared), lhs->lhs_name); return FAIL; } } else { int script_namespace = lhs->lhs_varlen > 1 && STRNCMP(var_start, "s:", 2) == 0; int script_var = (script_namespace ? script_var_exists(var_start + 2, lhs->lhs_varlen - 2, cctx) : script_var_exists(var_start, lhs->lhs_varlen, cctx)) == OK; imported_T *import = find_imported(var_start, lhs->lhs_varlen, cctx); if (script_namespace || script_var || import != NULL) { char_u *rawname = lhs->lhs_name + (lhs->lhs_name[1] == ':' ? 2 : 0); if (is_decl) { if (script_namespace) semsg(_(e_cannot_declare_script_variable_in_function), lhs->lhs_name); else semsg(_(e_variable_already_declared_in_script_str), lhs->lhs_name); return FAIL; } else if (cctx->ctx_ufunc->uf_script_ctx_version == SCRIPT_VERSION_VIM9 && script_namespace && !script_var && import == NULL) { semsg(_(e_unknown_variable_str), lhs->lhs_name); return FAIL; } lhs->lhs_dest = dest_script; // existing script-local variables should have a type lhs->lhs_scriptvar_sid = current_sctx.sc_sid; if (import != NULL) lhs->lhs_scriptvar_sid = import->imp_sid; if (SCRIPT_ID_VALID(lhs->lhs_scriptvar_sid)) { // Check writable only when no index follows. lhs->lhs_scriptvar_idx = get_script_item_idx( lhs->lhs_scriptvar_sid, rawname, lhs->lhs_has_index ? ASSIGN_FINAL : ASSIGN_CONST, cctx); if (lhs->lhs_scriptvar_idx >= 0) { scriptitem_T *si = SCRIPT_ITEM( lhs->lhs_scriptvar_sid); svar_T *sv = ((svar_T *)si->sn_var_vals.ga_data) + lhs->lhs_scriptvar_idx; lhs->lhs_type = sv->sv_type; } } } else if (check_defined(var_start, lhs->lhs_varlen, cctx, FALSE) == FAIL) return FAIL; } } if (declare_error) { vim9_declare_error(lhs->lhs_name); return FAIL; } } // handle "a:name" as a name, not index "name" in "a" if (lhs->lhs_varlen > 1 || var_start[lhs->lhs_varlen] != ':') var_end = lhs->lhs_dest_end; if (lhs->lhs_dest != dest_option && lhs->lhs_dest != dest_func_option) { if (is_decl && *var_end == ':') { char_u *p; // parse optional type: "let var: type = expr" if (!VIM_ISWHITE(var_end[1])) { semsg(_(e_white_space_required_after_str_str), ":", var_end); return FAIL; } p = skipwhite(var_end + 1); lhs->lhs_type = parse_type(&p, cctx->ctx_type_list, TRUE); if (lhs->lhs_type == NULL) return FAIL; lhs->lhs_has_type = TRUE; lhs->lhs_end = p; } else if (lhs->lhs_lvar != NULL) lhs->lhs_type = lhs->lhs_lvar->lv_type; } if (oplen == 3 && !heredoc && lhs->lhs_dest != dest_global && !lhs->lhs_has_index && lhs->lhs_type->tt_type != VAR_STRING && lhs->lhs_type->tt_type != VAR_ANY) { emsg(_(e_can_only_concatenate_to_string)); return FAIL; } if (lhs->lhs_lvar == NULL && lhs->lhs_dest == dest_local && cctx->ctx_skip != SKIP_YES) { if (oplen > 1 && !heredoc) { // +=, /=, etc. require an existing variable semsg(_(e_cannot_use_operator_on_new_variable), lhs->lhs_name); return FAIL; } if (!is_decl) { semsg(_(e_unknown_variable_str), lhs->lhs_name); return FAIL; } // Check the name is valid for a funcref. if ((lhs->lhs_type->tt_type == VAR_FUNC || lhs->lhs_type->tt_type == VAR_PARTIAL) && var_wrong_func_name(lhs->lhs_name, TRUE)) return FAIL; // New local variable. lhs->lhs_lvar = reserve_local(cctx, var_start, lhs->lhs_varlen, cmdidx == CMD_final || cmdidx == CMD_const, lhs->lhs_type); if (lhs->lhs_lvar == NULL) return FAIL; lhs->lhs_new_local = TRUE; } lhs->lhs_member_type = lhs->lhs_type; if (lhs->lhs_has_index) { char_u *after = var_start + lhs->lhs_varlen; char_u *p; // Something follows after the variable: "var[idx]" or "var.key". if (is_decl) { emsg(_(e_cannot_use_index_when_declaring_variable)); return FAIL; } // Now: var_start[lhs->lhs_varlen] is '[' or '.' // Only the last index is used below, if there are others // before it generate code for the expression. Thus for // "ll[1][2]" the expression is "ll[1]" and "[2]" is the index. for (;;) { p = skip_index(after); if (*p != '[' && *p != '.') { lhs->lhs_varlen_total = p - var_start; break; } after = p; } if (after > var_start + lhs->lhs_varlen) { lhs->lhs_varlen = after - var_start; lhs->lhs_dest = dest_expr; // We don't know the type before evaluating the expression, // use "any" until then. lhs->lhs_type = &t_any; } if (lhs->lhs_type->tt_member == NULL) lhs->lhs_member_type = &t_any; else lhs->lhs_member_type = lhs->lhs_type->tt_member; } return OK; } /* * Figure out the LHS and check a few errors. */ int compile_assign_lhs( char_u *var_start, lhs_T *lhs, int cmdidx, int is_decl, int heredoc, int oplen, cctx_T *cctx) { if (compile_lhs(var_start, lhs, cmdidx, heredoc, oplen, cctx) == FAIL) return FAIL; if (!lhs->lhs_has_index && lhs->lhs_lvar == &lhs->lhs_arg_lvar) { semsg(_(e_cannot_assign_to_argument), lhs->lhs_name); return FAIL; } if (!is_decl && lhs->lhs_lvar != NULL && lhs->lhs_lvar->lv_const && !lhs->lhs_has_index) { semsg(_(e_cannot_assign_to_constant), lhs->lhs_name); return FAIL; } return OK; } /* * Return TRUE if "lhs" has a range index: "[expr : expr]". */ static int has_list_index(char_u *idx_start, cctx_T *cctx) { char_u *p = idx_start; int save_skip; if (*p != '[') return FALSE; p = skipwhite(p + 1); if (*p == ':') return TRUE; save_skip = cctx->ctx_skip; cctx->ctx_skip = SKIP_YES; (void)compile_expr0(&p, cctx); cctx->ctx_skip = save_skip; return *skipwhite(p) == ':'; } /* * For an assignment with an index, compile the "idx" in "var[idx]" or "key" in * "var.key". */ static int compile_assign_index( char_u *var_start, lhs_T *lhs, int *range, cctx_T *cctx) { size_t varlen = lhs->lhs_varlen; char_u *p; int r = OK; int need_white_before = TRUE; int empty_second; p = var_start + varlen; if (*p == '[') { p = skipwhite(p + 1); if (*p == ':') { // empty first index, push zero r = generate_PUSHNR(cctx, 0); need_white_before = FALSE; } else r = compile_expr0(&p, cctx); if (r == OK && *skipwhite(p) == ':') { // unlet var[idx : idx] // blob[idx : idx] = value *range = TRUE; p = skipwhite(p); empty_second = *skipwhite(p + 1) == ']'; if ((need_white_before && !IS_WHITE_OR_NUL(p[-1])) || (!empty_second && !IS_WHITE_OR_NUL(p[1]))) { semsg(_(e_white_space_required_before_and_after_str_at_str), ":", p); return FAIL; } p = skipwhite(p + 1); if (*p == ']') // empty second index, push "none" r = generate_PUSHSPEC(cctx, VVAL_NONE); else r = compile_expr0(&p, cctx); } if (r == OK && *skipwhite(p) != ']') { // this should not happen emsg(_(e_missing_closing_square_brace)); r = FAIL; } } else // if (*p == '.') { char_u *key_end = to_name_end(p + 1, TRUE); char_u *key = vim_strnsave(p + 1, key_end - p - 1); r = generate_PUSHS(cctx, &key); } return r; } /* * For a LHS with an index, load the variable to be indexed. */ static int compile_load_lhs( lhs_T *lhs, char_u *var_start, type_T *rhs_type, cctx_T *cctx) { if (lhs->lhs_dest == dest_expr) { size_t varlen = lhs->lhs_varlen; int c = var_start[varlen]; int lines_len = cctx->ctx_ufunc->uf_lines.ga_len; char_u *p = var_start; garray_T *stack = &cctx->ctx_type_stack; int res; // Evaluate "ll[expr]" of "ll[expr][idx]". End the line with a NUL and // limit the lines array length to avoid skipping to a following line. var_start[varlen] = NUL; cctx->ctx_ufunc->uf_lines.ga_len = cctx->ctx_lnum + 1; res = compile_expr0(&p, cctx); var_start[varlen] = c; cctx->ctx_ufunc->uf_lines.ga_len = lines_len; if (res == FAIL || p != var_start + varlen) { // this should not happen if (res != FAIL) emsg(_(e_missing_closing_square_brace)); return FAIL; } lhs->lhs_type = stack->ga_len == 0 ? &t_void : ((type_T **)stack->ga_data)[stack->ga_len - 1]; // now we can properly check the type if (rhs_type != NULL && lhs->lhs_type->tt_member != NULL && rhs_type != &t_void && need_type(rhs_type, lhs->lhs_type->tt_member, -2, 0, cctx, FALSE, FALSE) == FAIL) return FAIL; } else generate_loadvar(cctx, lhs->lhs_dest, lhs->lhs_name, lhs->lhs_lvar, lhs->lhs_type); return OK; } /* * Produce code for loading "lhs" and also take care of an index. * Return OK/FAIL. */ int compile_load_lhs_with_index(lhs_T *lhs, char_u *var_start, cctx_T *cctx) { compile_load_lhs(lhs, var_start, NULL, cctx); if (lhs->lhs_has_index) { int range = FALSE; // Get member from list or dict. First compile the // index value. if (compile_assign_index(var_start, lhs, &range, cctx) == FAIL) return FAIL; if (range) { semsg(_(e_cannot_use_range_with_assignment_operator_str), var_start); return FAIL; } // Get the member. if (compile_member(FALSE, NULL, cctx) == FAIL) return FAIL; } return OK; } /* * Assignment to a list or dict member, or ":unlet" for the item, using the * information in "lhs". * Returns OK or FAIL. */ int compile_assign_unlet( char_u *var_start, lhs_T *lhs, int is_assign, type_T *rhs_type, cctx_T *cctx) { vartype_T dest_type; garray_T *stack = &cctx->ctx_type_stack; int range = FALSE; if (compile_assign_index(var_start, lhs, &range, cctx) == FAIL) return FAIL; if (is_assign && range && lhs->lhs_type->tt_type != VAR_LIST && lhs->lhs_type != &t_blob && lhs->lhs_type != &t_any) { semsg(_(e_cannot_use_range_with_assignment_str), var_start); return FAIL; } if (lhs->lhs_type == &t_any) { // Index on variable of unknown type: check at runtime. dest_type = VAR_ANY; } else { dest_type = lhs->lhs_type->tt_type; if (dest_type == VAR_DICT && range) { emsg(e_cannot_use_range_with_dictionary); return FAIL; } if (dest_type == VAR_DICT && may_generate_2STRING(-1, FALSE, cctx) == FAIL) return FAIL; if (dest_type == VAR_LIST || dest_type == VAR_BLOB) { type_T *type; if (range) { type = ((type_T **)stack->ga_data)[stack->ga_len - 2]; if (need_type(type, &t_number, -1, 0, cctx, FALSE, FALSE) == FAIL) return FAIL; } type = ((type_T **)stack->ga_data)[stack->ga_len - 1]; if ((dest_type != VAR_BLOB && type != &t_special) && need_type(type, &t_number, -1, 0, cctx, FALSE, FALSE) == FAIL) return FAIL; } } // Load the dict or list. On the stack we then have: // - value (for assignment, not for :unlet) // - index // - for [a : b] second index // - variable if (compile_load_lhs(lhs, var_start, rhs_type, cctx) == FAIL) return FAIL; if (dest_type == VAR_LIST || dest_type == VAR_DICT || dest_type == VAR_BLOB || dest_type == VAR_ANY) { if (is_assign) { if (range) { if (generate_instr_drop(cctx, ISN_STORERANGE, 4) == NULL) return FAIL; } else { isn_T *isn = generate_instr_drop(cctx, ISN_STOREINDEX, 3); if (isn == NULL) return FAIL; isn->isn_arg.vartype = dest_type; } } else if (range) { if (generate_instr_drop(cctx, ISN_UNLETRANGE, 3) == NULL) return FAIL; } else { if (generate_instr_drop(cctx, ISN_UNLETINDEX, 2) == NULL) return FAIL; } } else { emsg(_(e_indexable_type_required)); return FAIL; } return OK; } /* * Compile declaration and assignment: * "let name" * "var name = expr" * "final name = expr" * "const name = expr" * "name = expr" * "arg" points to "name". * "++arg" and "--arg" * Return NULL for an error. * Return "arg" if it does not look like a variable list. */ static char_u * compile_assignment(char_u *arg, exarg_T *eap, cmdidx_T cmdidx, cctx_T *cctx) { char_u *var_start; char_u *p; char_u *end = arg; char_u *ret = NULL; int var_count = 0; int var_idx; int semicolon = 0; int did_generate_slice = FALSE; garray_T *instr = &cctx->ctx_instr; garray_T *stack = &cctx->ctx_type_stack; char_u *op; int oplen = 0; int heredoc = FALSE; int incdec = FALSE; type_T *rhs_type = &t_any; char_u *sp; int is_decl = is_decl_command(cmdidx); lhs_T lhs; long start_lnum = SOURCING_LNUM; // Skip over the "var" or "[var, var]" to get to any "=". p = skip_var_list(arg, TRUE, &var_count, &semicolon, TRUE); if (p == NULL) return *arg == '[' ? arg : NULL; lhs.lhs_name = NULL; sp = p; p = skipwhite(p); op = p; oplen = assignment_len(p, &heredoc); if (var_count > 0 && oplen == 0) // can be something like "[1, 2]->func()" return arg; if (oplen > 0 && (!VIM_ISWHITE(*sp) || !IS_WHITE_OR_NUL(op[oplen]))) { error_white_both(op, oplen); return NULL; } if (eap->cmdidx == CMD_increment || eap->cmdidx == CMD_decrement) { if (VIM_ISWHITE(eap->cmd[2])) { semsg(_(e_no_white_space_allowed_after_str_str), eap->cmdidx == CMD_increment ? "++" : "--", eap->cmd); return NULL; } op = (char_u *)(eap->cmdidx == CMD_increment ? "+=" : "-="); oplen = 2; incdec = TRUE; } if (heredoc) { list_T *l; listitem_T *li; // [let] varname =<< [trim] {end} eap->getline = exarg_getline; eap->cookie = cctx; l = heredoc_get(eap, op + 3, FALSE); if (l == NULL) return NULL; if (cctx->ctx_skip != SKIP_YES) { // Push each line and the create the list. FOR_ALL_LIST_ITEMS(l, li) { generate_PUSHS(cctx, &li->li_tv.vval.v_string); li->li_tv.vval.v_string = NULL; } generate_NEWLIST(cctx, l->lv_len); } list_free(l); p += STRLEN(p); end = p; } else if (var_count > 0) { char_u *wp; // for "[var, var] = expr" evaluate the expression here, loop over the // list of variables below. // A line break may follow the "=". wp = op + oplen; if (may_get_next_line_error(wp, &p, cctx) == FAIL) return FAIL; if (compile_expr0(&p, cctx) == FAIL) return NULL; end = p; if (cctx->ctx_skip != SKIP_YES) { type_T *stacktype; int needed_list_len; int did_check = FALSE; stacktype = stack->ga_len == 0 ? &t_void : ((type_T **)stack->ga_data)[stack->ga_len - 1]; if (stacktype->tt_type == VAR_VOID) { emsg(_(e_cannot_use_void_value)); goto theend; } if (need_type(stacktype, &t_list_any, -1, 0, cctx, FALSE, FALSE) == FAIL) goto theend; // If a constant list was used we can check the length right here. needed_list_len = semicolon ? var_count - 1 : var_count; if (instr->ga_len > 0) { isn_T *isn = ((isn_T *)instr->ga_data) + instr->ga_len - 1; if (isn->isn_type == ISN_NEWLIST) { did_check = TRUE; if (semicolon ? isn->isn_arg.number < needed_list_len : isn->isn_arg.number != needed_list_len) { semsg(_(e_expected_nr_items_but_got_nr), needed_list_len, isn->isn_arg.number); goto theend; } } } if (!did_check) generate_CHECKLEN(cctx, needed_list_len, semicolon); if (stacktype->tt_member != NULL) rhs_type = stacktype->tt_member; } } /* * Loop over variables in "[var, var] = expr". * For "var = expr" and "let var: type" this is done only once. */ if (var_count > 0) var_start = skipwhite(arg + 1); // skip over the "[" else var_start = arg; for (var_idx = 0; var_idx == 0 || var_idx < var_count; var_idx++) { int instr_count = -1; int save_lnum; int skip_store = FALSE; if (var_start[0] == '_' && !eval_isnamec(var_start[1])) { // Ignore underscore in "[a, _, b] = list". if (var_count > 0) { var_start = skipwhite(var_start + 2); continue; } emsg(_(e_cannot_use_underscore_here)); goto theend; } vim_free(lhs.lhs_name); /* * Figure out the LHS type and other properties. */ if (compile_assign_lhs(var_start, &lhs, cmdidx, is_decl, heredoc, oplen, cctx) == FAIL) goto theend; if (heredoc) { SOURCING_LNUM = start_lnum; if (lhs.lhs_has_type && need_type(&t_list_string, lhs.lhs_type, -1, 0, cctx, FALSE, FALSE) == FAIL) goto theend; } else { if (cctx->ctx_skip == SKIP_YES) { if (oplen > 0 && var_count == 0) { // skip over the "=" and the expression p = skipwhite(op + oplen); (void)compile_expr0(&p, cctx); } } else if (oplen > 0) { int is_const = FALSE; char_u *wp; // for "+=", "*=", "..=" etc. first load the current value if (*op != '=' && compile_load_lhs_with_index(&lhs, var_start, cctx) == FAIL) goto theend; // For "var = expr" evaluate the expression. if (var_count == 0) { int r; // Compile the expression. instr_count = instr->ga_len; if (incdec) { r = generate_PUSHNR(cctx, 1); } else { // Temporarily hide the new local variable here, it is // not available to this expression. if (lhs.lhs_new_local) --cctx->ctx_locals.ga_len; wp = op + oplen; if (may_get_next_line_error(wp, &p, cctx) == FAIL) { if (lhs.lhs_new_local) ++cctx->ctx_locals.ga_len; goto theend; } r = compile_expr0_ext(&p, cctx, &is_const); if (lhs.lhs_new_local) ++cctx->ctx_locals.ga_len; if (r == FAIL) goto theend; } } else if (semicolon && var_idx == var_count - 1) { // For "[var; var] = expr" get the rest of the list did_generate_slice = TRUE; if (generate_SLICE(cctx, var_count - 1) == FAIL) goto theend; } else { // For "[var, var] = expr" get the "var_idx" item from the // list. if (generate_GETITEM(cctx, var_idx, *op != '=') == FAIL) goto theend; } rhs_type = stack->ga_len == 0 ? &t_void : ((type_T **)stack->ga_data)[stack->ga_len - 1]; if (lhs.lhs_lvar != NULL && (is_decl || !lhs.lhs_has_type)) { if ((rhs_type->tt_type == VAR_FUNC || rhs_type->tt_type == VAR_PARTIAL) && !lhs.lhs_has_index && var_wrong_func_name(lhs.lhs_name, TRUE)) goto theend; if (lhs.lhs_new_local && !lhs.lhs_has_type) { if (rhs_type->tt_type == VAR_VOID) { emsg(_(e_cannot_use_void_value)); goto theend; } else { // An empty list or dict has a &t_unknown member, // for a variable that implies &t_any. if (rhs_type == &t_list_empty) lhs.lhs_lvar->lv_type = &t_list_any; else if (rhs_type == &t_dict_empty) lhs.lhs_lvar->lv_type = &t_dict_any; else if (rhs_type == &t_unknown) lhs.lhs_lvar->lv_type = &t_any; else lhs.lhs_lvar->lv_type = rhs_type; } } else if (*op == '=') { type_T *use_type = lhs.lhs_lvar->lv_type; where_T where = WHERE_INIT; // Without operator check type here, otherwise below. // Use the line number of the assignment. SOURCING_LNUM = start_lnum; where.wt_index = var_count > 0 ? var_idx + 1 : 0; where.wt_variable = var_count > 0; // If assigning to a list or dict member, use the // member type. Not for "list[:] =". if (lhs.lhs_has_index && !has_list_index(var_start + lhs.lhs_varlen, cctx)) use_type = lhs.lhs_member_type; if (need_type_where(rhs_type, use_type, -1, where, cctx, FALSE, is_const) == FAIL) goto theend; } } else { type_T *lhs_type = lhs.lhs_member_type; // Special case: assigning to @# can use a number or a // string. // Also: can assign a number to a float. if ((lhs_type == &t_number_or_string || lhs_type == &t_float) && rhs_type->tt_type == VAR_NUMBER) lhs_type = &t_number; if (*p != '=' && need_type(rhs_type, lhs_type, -1, 0, cctx, FALSE, FALSE) == FAIL) goto theend; } } else if (cmdidx == CMD_final) { emsg(_(e_final_requires_a_value)); goto theend; } else if (cmdidx == CMD_const) { emsg(_(e_const_requires_a_value)); goto theend; } else if (!lhs.lhs_has_type || lhs.lhs_dest == dest_option || lhs.lhs_dest == dest_func_option) { emsg(_(e_type_or_initialization_required)); goto theend; } else { // variables are always initialized if (GA_GROW_FAILS(instr, 1)) goto theend; switch (lhs.lhs_member_type->tt_type) { case VAR_BOOL: generate_PUSHBOOL(cctx, VVAL_FALSE); break; case VAR_FLOAT: #ifdef FEAT_FLOAT generate_PUSHF(cctx, 0.0); #endif break; case VAR_STRING: generate_PUSHS(cctx, NULL); break; case VAR_BLOB: generate_PUSHBLOB(cctx, blob_alloc()); break; case VAR_FUNC: generate_PUSHFUNC(cctx, NULL, &t_func_void); break; case VAR_LIST: generate_NEWLIST(cctx, 0); break; case VAR_DICT: generate_NEWDICT(cctx, 0); break; case VAR_JOB: generate_PUSHJOB(cctx, NULL); break; case VAR_CHANNEL: generate_PUSHCHANNEL(cctx, NULL); break; case VAR_NUMBER: case VAR_UNKNOWN: case VAR_ANY: case VAR_PARTIAL: case VAR_VOID: case VAR_INSTR: case VAR_SPECIAL: // cannot happen // This is skipped for local variables, they are // always initialized to zero. if (lhs.lhs_dest == dest_local) skip_store = TRUE; else generate_PUSHNR(cctx, 0); break; } } if (var_count == 0) end = p; } // no need to parse more when skipping if (cctx->ctx_skip == SKIP_YES) break; if (oplen > 0 && *op != '=') { type_T *expected; type_T *stacktype = NULL; if (*op == '.') { if (may_generate_2STRING(-1, FALSE, cctx) == FAIL) goto theend; } else { expected = lhs.lhs_member_type; stacktype = ((type_T **)stack->ga_data)[stack->ga_len - 1]; if ( #ifdef FEAT_FLOAT // If variable is float operation with number is OK. !(expected == &t_float && (stacktype == &t_number || stacktype == &t_number_bool)) && #endif need_type(stacktype, expected, -1, 0, cctx, FALSE, FALSE) == FAIL) goto theend; } if (*op == '.') { if (generate_instr_drop(cctx, ISN_CONCAT, 1) == NULL) goto theend; } else if (*op == '+') { if (generate_add_instr(cctx, operator_type(lhs.lhs_member_type, stacktype), lhs.lhs_member_type, stacktype, EXPR_APPEND) == FAIL) goto theend; } else if (generate_two_op(cctx, op) == FAIL) goto theend; } // Use the line number of the assignment for store instruction. save_lnum = cctx->ctx_lnum; cctx->ctx_lnum = start_lnum - 1; if (lhs.lhs_has_index) { // Use the info in "lhs" to store the value at the index in the // list or dict. if (compile_assign_unlet(var_start, &lhs, TRUE, rhs_type, cctx) == FAIL) { cctx->ctx_lnum = save_lnum; goto theend; } } else { if (is_decl && cmdidx == CMD_const && (lhs.lhs_dest == dest_script || lhs.lhs_dest == dest_global || lhs.lhs_dest == dest_local)) // ":const var": lock the value, but not referenced variables generate_LOCKCONST(cctx); if (is_decl && (lhs.lhs_type->tt_type == VAR_DICT || lhs.lhs_type->tt_type == VAR_LIST) && lhs.lhs_type->tt_member != NULL && !(lhs.lhs_type->tt_member == &t_any && oplen > 0 && rhs_type != NULL && rhs_type->tt_type == lhs.lhs_type->tt_type && rhs_type->tt_member != &t_unknown) && lhs.lhs_type->tt_member != &t_unknown) // Set the type in the list or dict, so that it can be checked, // also in legacy script. Not for "list<any> = val", then the // type of "val" is used. generate_SETTYPE(cctx, lhs.lhs_type); if (!skip_store && generate_store_lhs(cctx, &lhs, instr_count, is_decl) == FAIL) { cctx->ctx_lnum = save_lnum; goto theend; } } cctx->ctx_lnum = save_lnum; if (var_idx + 1 < var_count) var_start = skipwhite(lhs.lhs_end + 1); } // For "[var, var] = expr" drop the "expr" value. // Also for "[var, var; _] = expr". if (var_count > 0 && (!semicolon || !did_generate_slice)) { if (generate_instr_drop(cctx, ISN_DROP, 1) == NULL) goto theend; } ret = skipwhite(end); theend: vim_free(lhs.lhs_name); return ret; } /* * Check for an assignment at "eap->cmd", compile it if found. * Return NOTDONE if there is none, FAIL for failure, OK if done. */ static int may_compile_assignment(exarg_T *eap, char_u **line, cctx_T *cctx) { char_u *pskip; char_u *p; // Assuming the command starts with a variable or function name, // find what follows. // Skip over "var.member", "var[idx]" and the like. // Also "&opt = val", "$ENV = val" and "@r = val". pskip = (*eap->cmd == '&' || *eap->cmd == '$' || *eap->cmd == '@') ? eap->cmd + 1 : eap->cmd; p = to_name_end(pskip, TRUE); if (p > eap->cmd && *p != NUL) { char_u *var_end; int oplen; int heredoc; if (eap->cmd[0] == '@') var_end = eap->cmd + 2; else var_end = find_name_end(pskip, NULL, NULL, FNE_CHECK_START | FNE_INCL_BR); oplen = assignment_len(skipwhite(var_end), &heredoc); if (oplen > 0) { size_t len = p - eap->cmd; // Recognize an assignment if we recognize the variable // name: // "g:var = expr" // "local = expr" where "local" is a local var. // "script = expr" where "script" is a script-local var. // "import = expr" where "import" is an imported var // "&opt = expr" // "$ENV = expr" // "@r = expr" if (*eap->cmd == '&' || *eap->cmd == '$' || *eap->cmd == '@' || ((len) > 2 && eap->cmd[1] == ':') || variable_exists(eap->cmd, len, cctx)) { *line = compile_assignment(eap->cmd, eap, CMD_SIZE, cctx); if (*line == NULL || *line == eap->cmd) return FAIL; return OK; } } } if (*eap->cmd == '[') { // [var, var] = expr *line = compile_assignment(eap->cmd, eap, CMD_SIZE, cctx); if (*line == NULL) return FAIL; if (*line != eap->cmd) return OK; } return NOTDONE; } /* * Add a function to the list of :def functions. * This sets "ufunc->uf_dfunc_idx" but the function isn't compiled yet. */ static int add_def_function(ufunc_T *ufunc) { dfunc_T *dfunc; if (def_functions.ga_len == 0) { // The first position is not used, so that a zero uf_dfunc_idx means it // wasn't set. if (GA_GROW_FAILS(&def_functions, 1)) return FAIL; ++def_functions.ga_len; } // Add the function to "def_functions". if (GA_GROW_FAILS(&def_functions, 1)) return FAIL; dfunc = ((dfunc_T *)def_functions.ga_data) + def_functions.ga_len; CLEAR_POINTER(dfunc); dfunc->df_idx = def_functions.ga_len; ufunc->uf_dfunc_idx = dfunc->df_idx; dfunc->df_ufunc = ufunc; dfunc->df_name = vim_strsave(ufunc->uf_name); ga_init2(&dfunc->df_var_names, sizeof(char_u *), 10); ++dfunc->df_refcount; ++def_functions.ga_len; return OK; } /* * After ex_function() has collected all the function lines: parse and compile * the lines into instructions. * Adds the function to "def_functions". * When "check_return_type" is set then set ufunc->uf_ret_type to the type of * the return statement (used for lambda). When uf_ret_type is already set * then check that it matches. * When "profiling" is true add ISN_PROF_START instructions. * "outer_cctx" is set for a nested function. * This can be used recursively through compile_lambda(), which may reallocate * "def_functions". * Returns OK or FAIL. */ int compile_def_function( ufunc_T *ufunc, int check_return_type, compiletype_T compile_type, cctx_T *outer_cctx) { char_u *line = NULL; char_u *line_to_free = NULL; char_u *p; char *errormsg = NULL; // error message cctx_T cctx; garray_T *instr; int did_emsg_before = did_emsg; int did_emsg_silent_before = did_emsg_silent; int ret = FAIL; sctx_T save_current_sctx = current_sctx; int save_estack_compiling = estack_compiling; int save_cmod_flags = cmdmod.cmod_flags; int do_estack_push; int new_def_function = FALSE; #ifdef FEAT_PROFILE int prof_lnum = -1; #endif int debug_lnum = -1; // When using a function that was compiled before: Free old instructions. // The index is reused. Otherwise add a new entry in "def_functions". if (ufunc->uf_dfunc_idx > 0) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; isn_T *instr_dest = NULL; switch (compile_type) { case CT_PROFILE: #ifdef FEAT_PROFILE instr_dest = dfunc->df_instr_prof; break; #endif case CT_NONE: instr_dest = dfunc->df_instr; break; case CT_DEBUG: instr_dest = dfunc->df_instr_debug; break; } if (instr_dest != NULL) // Was compiled in this mode before: Free old instructions. delete_def_function_contents(dfunc, FALSE); ga_clear_strings(&dfunc->df_var_names); } else { if (add_def_function(ufunc) == FAIL) return FAIL; new_def_function = TRUE; } ufunc->uf_def_status = UF_COMPILING; CLEAR_FIELD(cctx); cctx.ctx_compile_type = compile_type; cctx.ctx_ufunc = ufunc; cctx.ctx_lnum = -1; cctx.ctx_outer = outer_cctx; ga_init2(&cctx.ctx_locals, sizeof(lvar_T), 10); ga_init2(&cctx.ctx_type_stack, sizeof(type_T *), 50); ga_init2(&cctx.ctx_imports, sizeof(imported_T), 10); cctx.ctx_type_list = &ufunc->uf_type_list; ga_init2(&cctx.ctx_instr, sizeof(isn_T), 50); instr = &cctx.ctx_instr; // Set the context to the function, it may be compiled when called from // another script. Set the script version to the most modern one. // The line number will be set in next_line_from_context(). current_sctx = ufunc->uf_script_ctx; current_sctx.sc_version = SCRIPT_VERSION_VIM9; // Don't use the flag from ":legacy" here. cmdmod.cmod_flags &= ~CMOD_LEGACY; // Make sure error messages are OK. do_estack_push = !estack_top_is_ufunc(ufunc, 1); if (do_estack_push) estack_push_ufunc(ufunc, 1); estack_compiling = TRUE; if (ufunc->uf_def_args.ga_len > 0) { int count = ufunc->uf_def_args.ga_len; int first_def_arg = ufunc->uf_args.ga_len - count; int i; char_u *arg; int off = STACK_FRAME_SIZE + (ufunc->uf_va_name != NULL ? 1 : 0); int did_set_arg_type = FALSE; // Produce instructions for the default values of optional arguments. SOURCING_LNUM = 0; // line number unknown for (i = 0; i < count; ++i) { garray_T *stack = &cctx.ctx_type_stack; type_T *val_type; int arg_idx = first_def_arg + i; where_T where = WHERE_INIT; int r; int jump_instr_idx = instr->ga_len; isn_T *isn; // Use a JUMP_IF_ARG_SET instruction to skip if the value was given. if (generate_JUMP_IF_ARG_SET(&cctx, i - count - off) == FAIL) goto erret; // Make sure later arguments are not found. ufunc->uf_args_visible = arg_idx; arg = ((char_u **)(ufunc->uf_def_args.ga_data))[i]; r = compile_expr0(&arg, &cctx); if (r == FAIL) goto erret; // If no type specified use the type of the default value. // Otherwise check that the default value type matches the // specified type. val_type = ((type_T **)stack->ga_data)[stack->ga_len - 1]; where.wt_index = arg_idx + 1; if (ufunc->uf_arg_types[arg_idx] == &t_unknown) { did_set_arg_type = TRUE; ufunc->uf_arg_types[arg_idx] = val_type; } else if (need_type_where(val_type, ufunc->uf_arg_types[arg_idx], -1, where, &cctx, FALSE, FALSE) == FAIL) goto erret; if (generate_STORE(&cctx, ISN_STORE, i - count - off, NULL) == FAIL) goto erret; // set instruction index in JUMP_IF_ARG_SET to here isn = ((isn_T *)instr->ga_data) + jump_instr_idx; isn->isn_arg.jumparg.jump_where = instr->ga_len; } if (did_set_arg_type) set_function_type(ufunc); } ufunc->uf_args_visible = ufunc->uf_args.ga_len; /* * Loop over all the lines of the function and generate instructions. */ for (;;) { exarg_T ea; int starts_with_colon = FALSE; char_u *cmd; cmdmod_T local_cmdmod; // Bail out on the first error to avoid a flood of errors and report // the right line number when inside try/catch. if (did_emsg_before != did_emsg) goto erret; if (line != NULL && *line == '|') // the line continues after a '|' ++line; else if (line != NULL && *skipwhite(line) != NUL && !(*line == '#' && (line == cctx.ctx_line_start || VIM_ISWHITE(line[-1])))) { semsg(_(e_trailing_arg), line); goto erret; } else if (line != NULL && vim9_bad_comment(skipwhite(line))) goto erret; else { line = next_line_from_context(&cctx, FALSE); if (cctx.ctx_lnum >= ufunc->uf_lines.ga_len) { // beyond the last line #ifdef FEAT_PROFILE if (cctx.ctx_skip != SKIP_YES) may_generate_prof_end(&cctx, prof_lnum); #endif break; } // Make a copy, splitting off nextcmd and removing trailing spaces // may change it. if (line != NULL) { line = vim_strsave(line); vim_free(line_to_free); line_to_free = line; } } CLEAR_FIELD(ea); ea.cmdlinep = &line; ea.cmd = skipwhite(line); if (*ea.cmd == '#') { // "#" starts a comment line = (char_u *)""; continue; } #ifdef FEAT_PROFILE if (cctx.ctx_compile_type == CT_PROFILE && cctx.ctx_lnum != prof_lnum && cctx.ctx_skip != SKIP_YES) { may_generate_prof_end(&cctx, prof_lnum); prof_lnum = cctx.ctx_lnum; generate_instr(&cctx, ISN_PROF_START); } #endif if (cctx.ctx_compile_type == CT_DEBUG && cctx.ctx_lnum != debug_lnum && cctx.ctx_skip != SKIP_YES) { debug_lnum = cctx.ctx_lnum; generate_instr_debug(&cctx); } cctx.ctx_prev_lnum = cctx.ctx_lnum + 1; // Some things can be recognized by the first character. switch (*ea.cmd) { case '}': { // "}" ends a block scope scopetype_T stype = cctx.ctx_scope == NULL ? NO_SCOPE : cctx.ctx_scope->se_type; if (stype == BLOCK_SCOPE) { compile_endblock(&cctx); line = ea.cmd; } else { emsg(_(e_using_rcurly_outside_if_block_scope)); goto erret; } if (line != NULL) line = skipwhite(ea.cmd + 1); continue; } case '{': // "{" starts a block scope // "{'a': 1}->func() is something else if (ends_excmd(*skipwhite(ea.cmd + 1))) { line = compile_block(ea.cmd, &cctx); continue; } break; } /* * COMMAND MODIFIERS */ cctx.ctx_has_cmdmod = FALSE; if (parse_command_modifiers(&ea, &errormsg, &local_cmdmod, FALSE) == FAIL) { if (errormsg != NULL) goto erret; // empty line or comment line = (char_u *)""; continue; } generate_cmdmods(&cctx, &local_cmdmod); undo_cmdmod(&local_cmdmod); // Check if there was a colon after the last command modifier or before // the current position. for (p = ea.cmd; p >= line; --p) { if (*p == ':') starts_with_colon = TRUE; if (p < ea.cmd && !VIM_ISWHITE(*p)) break; } // Skip ":call" to get to the function name, unless using :legacy p = ea.cmd; if (!(local_cmdmod.cmod_flags & CMOD_LEGACY)) { if (checkforcmd(&ea.cmd, "call", 3)) { if (*ea.cmd == '(') // not for "call()" ea.cmd = p; else ea.cmd = skipwhite(ea.cmd); } if (!starts_with_colon) { int assign; // Check for assignment after command modifiers. assign = may_compile_assignment(&ea, &line, &cctx); if (assign == OK) goto nextline; if (assign == FAIL) goto erret; } } /* * COMMAND after range * 'text'->func() should not be confused with 'a mark * "++nr" and "--nr" are eval commands * in "$ENV->func()" the "$" is not a range */ cmd = ea.cmd; if ((*cmd != '$' || starts_with_colon) && (starts_with_colon || !(*cmd == '\'' || (cmd[0] == cmd[1] && (*cmd == '+' || *cmd == '-'))))) { ea.cmd = skip_range(ea.cmd, TRUE, NULL); if (ea.cmd > cmd) { if (!starts_with_colon && !(local_cmdmod.cmod_flags & CMOD_LEGACY)) { semsg(_(e_colon_required_before_range_str), cmd); goto erret; } ea.addr_count = 1; if (ends_excmd2(line, ea.cmd)) { // A range without a command: jump to the line. generate_EXEC(&cctx, ISN_EXECRANGE, vim_strnsave(cmd, ea.cmd - cmd)); line = ea.cmd; goto nextline; } } } p = find_ex_command(&ea, NULL, starts_with_colon || (local_cmdmod.cmod_flags & CMOD_LEGACY) ? NULL : item_exists, &cctx); if (p == NULL) { if (cctx.ctx_skip != SKIP_YES) emsg(_(e_ambiguous_use_of_user_defined_command)); goto erret; } // When using ":legacy cmd" always use compile_exec(). if (local_cmdmod.cmod_flags & CMOD_LEGACY) { char_u *start = ea.cmd; switch (ea.cmdidx) { case CMD_if: case CMD_elseif: case CMD_else: case CMD_endif: case CMD_for: case CMD_endfor: case CMD_continue: case CMD_break: case CMD_while: case CMD_endwhile: case CMD_try: case CMD_catch: case CMD_finally: case CMD_endtry: semsg(_(e_cannot_use_legacy_with_command_str), ea.cmd); goto erret; default: break; } // ":legacy return expr" needs to be handled differently. if (checkforcmd(&start, "return", 4)) ea.cmdidx = CMD_return; else ea.cmdidx = CMD_legacy; } if (p == ea.cmd && ea.cmdidx != CMD_SIZE) { if (cctx.ctx_skip == SKIP_YES && ea.cmdidx != CMD_eval) { line += STRLEN(line); goto nextline; } else if (ea.cmdidx != CMD_eval) { // CMD_var cannot happen, compile_assignment() above would be // used. Most likely an assignment to a non-existing variable. semsg(_(e_command_not_recognized_str), ea.cmd); goto erret; } } if (cctx.ctx_had_return && ea.cmdidx != CMD_elseif && ea.cmdidx != CMD_else && ea.cmdidx != CMD_endif && ea.cmdidx != CMD_endfor && ea.cmdidx != CMD_endwhile && ea.cmdidx != CMD_catch && ea.cmdidx != CMD_finally && ea.cmdidx != CMD_endtry) { emsg(_(e_unreachable_code_after_return)); goto erret; } p = skipwhite(p); if (ea.cmdidx != CMD_SIZE && ea.cmdidx != CMD_write && ea.cmdidx != CMD_read) { if (ea.cmdidx >= 0) ea.argt = excmd_get_argt(ea.cmdidx); if ((ea.argt & EX_BANG) && *p == '!') { ea.forceit = TRUE; p = skipwhite(p + 1); } } switch (ea.cmdidx) { case CMD_def: case CMD_function: ea.arg = p; line = compile_nested_function(&ea, &cctx); break; case CMD_return: line = compile_return(p, check_return_type, local_cmdmod.cmod_flags & CMOD_LEGACY, &cctx); cctx.ctx_had_return = TRUE; break; case CMD_let: emsg(_(e_cannot_use_let_in_vim9_script)); break; case CMD_var: case CMD_final: case CMD_const: case CMD_increment: case CMD_decrement: line = compile_assignment(p, &ea, ea.cmdidx, &cctx); if (line == p) line = NULL; break; case CMD_unlet: case CMD_unlockvar: case CMD_lockvar: line = compile_unletlock(p, &ea, &cctx); break; case CMD_import: emsg(_(e_import_can_only_be_used_in_script)); line = NULL; break; case CMD_if: line = compile_if(p, &cctx); break; case CMD_elseif: line = compile_elseif(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_else: line = compile_else(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_endif: line = compile_endif(p, &cctx); break; case CMD_while: line = compile_while(p, &cctx); break; case CMD_endwhile: line = compile_endwhile(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_for: line = compile_for(p, &cctx); break; case CMD_endfor: line = compile_endfor(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_continue: line = compile_continue(p, &cctx); break; case CMD_break: line = compile_break(p, &cctx); break; case CMD_try: line = compile_try(p, &cctx); break; case CMD_catch: line = compile_catch(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_finally: line = compile_finally(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_endtry: line = compile_endtry(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_throw: line = compile_throw(p, &cctx); break; case CMD_eval: line = compile_eval(p, &cctx); break; case CMD_echo: case CMD_echon: case CMD_execute: case CMD_echomsg: case CMD_echoerr: case CMD_echoconsole: line = compile_mult_expr(p, ea.cmdidx, &cctx); break; case CMD_put: ea.cmd = cmd; line = compile_put(p, &ea, &cctx); break; case CMD_substitute: if (check_global_and_subst(ea.cmd, p) == FAIL) goto erret; if (cctx.ctx_skip == SKIP_YES) line = (char_u *)""; else { ea.arg = p; line = compile_substitute(line, &ea, &cctx); } break; case CMD_redir: ea.arg = p; line = compile_redir(line, &ea, &cctx); break; case CMD_cexpr: case CMD_lexpr: case CMD_caddexpr: case CMD_laddexpr: case CMD_cgetexpr: case CMD_lgetexpr: #ifdef FEAT_QUICKFIX ea.arg = p; line = compile_cexpr(line, &ea, &cctx); #else ex_ni(&ea); line = NULL; #endif break; case CMD_append: case CMD_change: case CMD_insert: case CMD_k: case CMD_t: case CMD_xit: not_in_vim9(&ea); goto erret; case CMD_SIZE: if (cctx.ctx_skip != SKIP_YES) { semsg(_(e_invalid_command_str), ea.cmd); goto erret; } // We don't check for a next command here. line = (char_u *)""; break; case CMD_lua: case CMD_mzscheme: case CMD_perl: case CMD_py3: case CMD_python3: case CMD_python: case CMD_pythonx: case CMD_ruby: case CMD_tcl: ea.arg = p; if (vim_strchr(line, '\n') == NULL) line = compile_exec(line, &ea, &cctx); else // heredoc lines have been concatenated with NL // characters in get_function_body() line = compile_script(line, &cctx); break; case CMD_global: if (check_global_and_subst(ea.cmd, p) == FAIL) goto erret; // FALLTHROUGH default: // Not recognized, execute with do_cmdline_cmd(). ea.arg = p; line = compile_exec(line, &ea, &cctx); break; } nextline: if (line == NULL) goto erret; line = skipwhite(line); // Undo any command modifiers. generate_undo_cmdmods(&cctx); if (cctx.ctx_type_stack.ga_len < 0) { iemsg("Type stack underflow"); goto erret; } } if (cctx.ctx_scope != NULL) { if (cctx.ctx_scope->se_type == IF_SCOPE) emsg(_(e_endif)); else if (cctx.ctx_scope->se_type == WHILE_SCOPE) emsg(_(e_endwhile)); else if (cctx.ctx_scope->se_type == FOR_SCOPE) emsg(_(e_endfor)); else emsg(_(e_missing_rcurly)); goto erret; } if (!cctx.ctx_had_return) { if (ufunc->uf_ret_type->tt_type == VAR_UNKNOWN) ufunc->uf_ret_type = &t_void; else if (ufunc->uf_ret_type->tt_type != VAR_VOID) { emsg(_(e_missing_return_statement)); goto erret; } // Return void if there is no return at the end. generate_instr(&cctx, ISN_RETURN_VOID); } // When compiled with ":silent!" and there was an error don't consider the // function compiled. if (emsg_silent == 0 || did_emsg_silent == did_emsg_silent_before) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; dfunc->df_deleted = FALSE; dfunc->df_script_seq = current_sctx.sc_seq; #ifdef FEAT_PROFILE if (cctx.ctx_compile_type == CT_PROFILE) { dfunc->df_instr_prof = instr->ga_data; dfunc->df_instr_prof_count = instr->ga_len; } else #endif if (cctx.ctx_compile_type == CT_DEBUG) { dfunc->df_instr_debug = instr->ga_data; dfunc->df_instr_debug_count = instr->ga_len; } else { dfunc->df_instr = instr->ga_data; dfunc->df_instr_count = instr->ga_len; } dfunc->df_varcount = dfunc->df_var_names.ga_len; dfunc->df_has_closure = cctx.ctx_has_closure; if (cctx.ctx_outer_used) ufunc->uf_flags |= FC_CLOSURE; ufunc->uf_def_status = UF_COMPILED; } ret = OK; erret: if (ufunc->uf_def_status == UF_COMPILING) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; // Compiling aborted, free the generated instructions. clear_instr_ga(instr); VIM_CLEAR(dfunc->df_name); ga_clear_strings(&dfunc->df_var_names); // If using the last entry in the table and it was added above, we // might as well remove it. if (!dfunc->df_deleted && new_def_function && ufunc->uf_dfunc_idx == def_functions.ga_len - 1) { --def_functions.ga_len; ufunc->uf_dfunc_idx = 0; } ufunc->uf_def_status = UF_COMPILE_ERROR; while (cctx.ctx_scope != NULL) drop_scope(&cctx); if (errormsg != NULL) emsg(errormsg); else if (did_emsg == did_emsg_before) emsg(_(e_compiling_def_function_failed)); } if (cctx.ctx_redir_lhs.lhs_name != NULL) { if (ret == OK) { emsg(_(e_missing_redir_end)); ret = FAIL; } vim_free(cctx.ctx_redir_lhs.lhs_name); vim_free(cctx.ctx_redir_lhs.lhs_whole); } current_sctx = save_current_sctx; estack_compiling = save_estack_compiling; cmdmod.cmod_flags = save_cmod_flags; if (do_estack_push) estack_pop(); vim_free(line_to_free); free_imported(&cctx); free_locals(&cctx); ga_clear(&cctx.ctx_type_stack); return ret; } void set_function_type(ufunc_T *ufunc) { int varargs = ufunc->uf_va_name != NULL; int argcount = ufunc->uf_args.ga_len; // Create a type for the function, with the return type and any // argument types. // A vararg is included in uf_args.ga_len but not in uf_arg_types. // The type is included in "tt_args". if (argcount > 0 || varargs) { if (ufunc->uf_type_list.ga_itemsize == 0) ga_init2(&ufunc->uf_type_list, sizeof(type_T *), 10); ufunc->uf_func_type = alloc_func_type(ufunc->uf_ret_type, argcount, &ufunc->uf_type_list); // Add argument types to the function type. if (func_type_add_arg_types(ufunc->uf_func_type, argcount + varargs, &ufunc->uf_type_list) == FAIL) return; ufunc->uf_func_type->tt_argcount = argcount + varargs; ufunc->uf_func_type->tt_min_argcount = argcount - ufunc->uf_def_args.ga_len; if (ufunc->uf_arg_types == NULL) { int i; // lambda does not have argument types. for (i = 0; i < argcount; ++i) ufunc->uf_func_type->tt_args[i] = &t_any; } else mch_memmove(ufunc->uf_func_type->tt_args, ufunc->uf_arg_types, sizeof(type_T *) * argcount); if (varargs) { ufunc->uf_func_type->tt_args[argcount] = ufunc->uf_va_type == NULL ? &t_list_any : ufunc->uf_va_type; ufunc->uf_func_type->tt_flags = TTFLAG_VARARGS; } } else // No arguments, can use a predefined type. ufunc->uf_func_type = get_func_type(ufunc->uf_ret_type, argcount, &ufunc->uf_type_list); } /* * Free all instructions for "dfunc" except df_name. */ static void delete_def_function_contents(dfunc_T *dfunc, int mark_deleted) { int idx; ga_clear(&dfunc->df_def_args_isn); ga_clear_strings(&dfunc->df_var_names); if (dfunc->df_instr != NULL) { for (idx = 0; idx < dfunc->df_instr_count; ++idx) delete_instr(dfunc->df_instr + idx); VIM_CLEAR(dfunc->df_instr); dfunc->df_instr = NULL; } if (dfunc->df_instr_debug != NULL) { for (idx = 0; idx < dfunc->df_instr_debug_count; ++idx) delete_instr(dfunc->df_instr_debug + idx); VIM_CLEAR(dfunc->df_instr_debug); dfunc->df_instr_debug = NULL; } #ifdef FEAT_PROFILE if (dfunc->df_instr_prof != NULL) { for (idx = 0; idx < dfunc->df_instr_prof_count; ++idx) delete_instr(dfunc->df_instr_prof + idx); VIM_CLEAR(dfunc->df_instr_prof); dfunc->df_instr_prof = NULL; } #endif if (mark_deleted) dfunc->df_deleted = TRUE; if (dfunc->df_ufunc != NULL) dfunc->df_ufunc->uf_def_status = UF_NOT_COMPILED; } /* * When a user function is deleted, clear the contents of any associated def * function, unless another user function still uses it. * The position in def_functions can be re-used. */ void unlink_def_function(ufunc_T *ufunc) { if (ufunc->uf_dfunc_idx > 0) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; if (--dfunc->df_refcount <= 0) delete_def_function_contents(dfunc, TRUE); ufunc->uf_def_status = UF_NOT_COMPILED; ufunc->uf_dfunc_idx = 0; if (dfunc->df_ufunc == ufunc) dfunc->df_ufunc = NULL; } } /* * Used when a user function refers to an existing dfunc. */ void link_def_function(ufunc_T *ufunc) { if (ufunc->uf_dfunc_idx > 0) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; ++dfunc->df_refcount; } } #if defined(EXITFREE) || defined(PROTO) /* * Free all functions defined with ":def". */ void free_def_functions(void) { int idx; for (idx = 0; idx < def_functions.ga_len; ++idx) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + idx; delete_def_function_contents(dfunc, TRUE); vim_free(dfunc->df_name); } ga_clear(&def_functions); } #endif #endif // FEAT_EVAL
/* vi:set ts=8 sts=4 sw=4 noet: * * VIM - Vi IMproved by Bram Moolenaar * * Do ":help uganda" in Vim to read copying and usage conditions. * Do ":help credits" in Vim to see a list of people who contributed. * See README.txt for an overview of the Vim source code. */ /* * vim9compile.c: compiling a :def function */ #define USING_FLOAT_STUFF #include "vim.h" #if defined(FEAT_EVAL) || defined(PROTO) // When not generating protos this is included in proto.h #ifdef PROTO # include "vim9.h" #endif // Functions defined with :def are stored in this growarray. // They are never removed, so that they can be found by index. // Deleted functions have the df_deleted flag set. garray_T def_functions = {0, 0, sizeof(dfunc_T), 50, NULL}; static void delete_def_function_contents(dfunc_T *dfunc, int mark_deleted); /* * Lookup variable "name" in the local scope and return it in "lvar". * "lvar->lv_from_outer" is incremented accordingly. * If "lvar" is NULL only check if the variable can be found. * Return FAIL if not found. */ int lookup_local(char_u *name, size_t len, lvar_T *lvar, cctx_T *cctx) { int idx; lvar_T *lvp; if (len == 0) return FAIL; // Find local in current function scope. for (idx = 0; idx < cctx->ctx_locals.ga_len; ++idx) { lvp = ((lvar_T *)cctx->ctx_locals.ga_data) + idx; if (STRNCMP(name, lvp->lv_name, len) == 0 && STRLEN(lvp->lv_name) == len) { if (lvar != NULL) { *lvar = *lvp; lvar->lv_from_outer = 0; } return OK; } } // Find local in outer function scope. if (cctx->ctx_outer != NULL) { if (lookup_local(name, len, lvar, cctx->ctx_outer) == OK) { if (lvar != NULL) { cctx->ctx_outer_used = TRUE; ++lvar->lv_from_outer; } return OK; } } return FAIL; } /* * Lookup an argument in the current function and an enclosing function. * Returns the argument index in "idxp" * Returns the argument type in "type" * Sets "gen_load_outer" to TRUE if found in outer scope. * Returns OK when found, FAIL otherwise. */ int arg_exists( char_u *name, size_t len, int *idxp, type_T **type, int *gen_load_outer, cctx_T *cctx) { int idx; char_u *va_name; if (len == 0) return FAIL; for (idx = 0; idx < cctx->ctx_ufunc->uf_args_visible; ++idx) { char_u *arg = FUNCARG(cctx->ctx_ufunc, idx); if (STRNCMP(name, arg, len) == 0 && arg[len] == NUL) { if (idxp != NULL) { // Arguments are located above the frame pointer. One further // if there is a vararg argument *idxp = idx - (cctx->ctx_ufunc->uf_args.ga_len + STACK_FRAME_SIZE) + (cctx->ctx_ufunc->uf_va_name != NULL ? -1 : 0); if (cctx->ctx_ufunc->uf_arg_types != NULL) *type = cctx->ctx_ufunc->uf_arg_types[idx]; else *type = &t_any; } return OK; } } va_name = cctx->ctx_ufunc->uf_va_name; if (va_name != NULL && STRNCMP(name, va_name, len) == 0 && va_name[len] == NUL) { if (idxp != NULL) { // varargs is always the last argument *idxp = -STACK_FRAME_SIZE - 1; *type = cctx->ctx_ufunc->uf_va_type; } return OK; } if (cctx->ctx_outer != NULL) { // Lookup the name for an argument of the outer function. if (arg_exists(name, len, idxp, type, gen_load_outer, cctx->ctx_outer) == OK) { if (gen_load_outer != NULL) ++*gen_load_outer; return OK; } } return FAIL; } /* * Lookup a script-local variable in the current script, possibly defined in a * block that contains the function "cctx->ctx_ufunc". * "cctx" is NULL at the script level. * If "len" is <= 0 "name" must be NUL terminated. * Return NULL when not found. */ static sallvar_T * find_script_var(char_u *name, size_t len, cctx_T *cctx) { scriptitem_T *si = SCRIPT_ITEM(current_sctx.sc_sid); hashitem_T *hi; int cc; sallvar_T *sav; sallvar_T *found_sav; ufunc_T *ufunc; // Find the list of all script variables with the right name. if (len > 0) { cc = name[len]; name[len] = NUL; } hi = hash_find(&si->sn_all_vars.dv_hashtab, name); if (len > 0) name[len] = cc; if (HASHITEM_EMPTY(hi)) return NULL; sav = HI2SAV(hi); if (sav->sav_block_id == 0) // variable defined in the top script scope is always visible return sav; if (cctx == NULL) { // Not in a function scope, find variable with block id equal to or // smaller than the current block id. while (sav != NULL) { if (sav->sav_block_id <= si->sn_current_block_id) break; sav = sav->sav_next; } return sav; } // Go over the variables with this name and find one that was visible // from the function. ufunc = cctx->ctx_ufunc; found_sav = sav; while (sav != NULL) { int idx; // Go over the blocks that this function was defined in. If the // variable block ID matches it was visible to the function. for (idx = 0; idx < ufunc->uf_block_depth; ++idx) if (ufunc->uf_block_ids[idx] == sav->sav_block_id) return sav; sav = sav->sav_next; } // Not found, assume variable at script level was visible. return found_sav; } /* * Return TRUE if the script context is Vim9 script. */ int script_is_vim9() { return SCRIPT_ITEM(current_sctx.sc_sid)->sn_version == SCRIPT_VERSION_VIM9; } /* * Lookup a variable (without s: prefix) in the current script. * "cctx" is NULL at the script level. * Returns OK or FAIL. */ int script_var_exists(char_u *name, size_t len, cctx_T *cctx) { if (current_sctx.sc_sid <= 0) return FAIL; if (script_is_vim9()) { // Check script variables that were visible where the function was // defined. if (find_script_var(name, len, cctx) != NULL) return OK; } else { hashtab_T *ht = &SCRIPT_VARS(current_sctx.sc_sid); dictitem_T *di; int cc; // Check script variables that are currently visible cc = name[len]; name[len] = NUL; di = find_var_in_ht(ht, 0, name, TRUE); name[len] = cc; if (di != NULL) return OK; } return FAIL; } /* * Return TRUE if "name" is a local variable, argument, script variable or * imported. */ static int variable_exists(char_u *name, size_t len, cctx_T *cctx) { return (cctx != NULL && (lookup_local(name, len, NULL, cctx) == OK || arg_exists(name, len, NULL, NULL, NULL, cctx) == OK)) || script_var_exists(name, len, cctx) == OK || find_imported(name, len, cctx) != NULL; } /* * Return TRUE if "name" is a local variable, argument, script variable, * imported or function. */ static int item_exists(char_u *name, size_t len, int cmd UNUSED, cctx_T *cctx) { int is_global; char_u *p; if (variable_exists(name, len, cctx)) return TRUE; // This is similar to what is in lookup_scriptitem(): // Find a function, so that a following "->" works. // Require "(" or "->" to follow, "Cmd" is a user command while "Cmd()" is // a function call. p = skipwhite(name + len); if (name[len] == '(' || (p[0] == '-' && p[1] == '>')) { // Do not check for an internal function, since it might also be a // valid command, such as ":split" versus "split()". // Skip "g:" before a function name. is_global = (name[0] == 'g' && name[1] == ':'); return find_func(is_global ? name + 2 : name, is_global, cctx) != NULL; } return FALSE; } /* * Check if "p[len]" is already defined, either in script "import_sid" or in * compilation context "cctx". "cctx" is NULL at the script level. * Does not check the global namespace. * If "is_arg" is TRUE the error message is for an argument name. * Return FAIL and give an error if it defined. */ int check_defined(char_u *p, size_t len, cctx_T *cctx, int is_arg) { int c = p[len]; ufunc_T *ufunc = NULL; // underscore argument is OK if (len == 1 && *p == '_') return OK; if (script_var_exists(p, len, cctx) == OK) { if (is_arg) semsg(_(e_argument_already_declared_in_script_str), p); else semsg(_(e_variable_already_declared_in_script_str), p); return FAIL; } p[len] = NUL; if ((cctx != NULL && (lookup_local(p, len, NULL, cctx) == OK || arg_exists(p, len, NULL, NULL, NULL, cctx) == OK)) || find_imported(p, len, cctx) != NULL || (ufunc = find_func_even_dead(p, FALSE, cctx)) != NULL) { // A local or script-local function can shadow a global function. if (ufunc == NULL || ((ufunc->uf_flags & FC_DEAD) == 0 && (!func_is_global(ufunc) || (p[0] == 'g' && p[1] == ':')))) { if (is_arg) semsg(_(e_argument_name_shadows_existing_variable_str), p); else semsg(_(e_name_already_defined_str), p); p[len] = c; return FAIL; } } p[len] = c; return OK; } /* * Return TRUE if "actual" could be "expected" and a runtime typecheck is to be * used. Return FALSE if the types will never match. */ static int use_typecheck(type_T *actual, type_T *expected) { if (actual->tt_type == VAR_ANY || actual->tt_type == VAR_UNKNOWN || (actual->tt_type == VAR_FUNC && (expected->tt_type == VAR_FUNC || expected->tt_type == VAR_PARTIAL) && (actual->tt_member == &t_any || actual->tt_member == &t_unknown || actual->tt_argcount < 0) && (actual->tt_member == &t_unknown || (actual->tt_member == &t_void) == (expected->tt_member == &t_void)))) return TRUE; if ((actual->tt_type == VAR_LIST || actual->tt_type == VAR_DICT) && actual->tt_type == expected->tt_type) // This takes care of a nested list or dict. return use_typecheck(actual->tt_member, expected->tt_member); return FALSE; } /* * Check that * - "actual" matches "expected" type or * - "actual" is a type that can be "expected" type: add a runtime check; or * - return FAIL. * If "actual_is_const" is TRUE then the type won't change at runtime, do not * generate a TYPECHECK. */ static int need_type_where( type_T *actual, type_T *expected, int offset, where_T where, cctx_T *cctx, int silent, int actual_is_const) { int ret; if (expected == &t_bool && actual != &t_bool && (actual->tt_flags & TTFLAG_BOOL_OK)) { // Using "0", "1" or the result of an expression with "&&" or "||" as a // boolean is OK but requires a conversion. generate_2BOOL(cctx, FALSE, offset); return OK; } ret = check_type_maybe(expected, actual, FALSE, where); if (ret == OK) return OK; // If the actual type can be the expected type add a runtime check. // If it's a constant a runtime check makes no sense. if (!actual_is_const && ret == MAYBE && use_typecheck(actual, expected)) { generate_TYPECHECK(cctx, expected, offset, where.wt_index); return OK; } if (!silent) type_mismatch_where(expected, actual, where); return FAIL; } int need_type( type_T *actual, type_T *expected, int offset, int arg_idx, cctx_T *cctx, int silent, int actual_is_const) { where_T where = WHERE_INIT; where.wt_index = arg_idx; return need_type_where(actual, expected, offset, where, cctx, silent, actual_is_const); } /* * Reserve space for a local variable. * Return the variable or NULL if it failed. */ lvar_T * reserve_local( cctx_T *cctx, char_u *name, size_t len, int isConst, type_T *type) { lvar_T *lvar; dfunc_T *dfunc; if (arg_exists(name, len, NULL, NULL, NULL, cctx) == OK) { emsg_namelen(_(e_str_is_used_as_argument), name, (int)len); return NULL; } if (GA_GROW_FAILS(&cctx->ctx_locals, 1)) return NULL; lvar = ((lvar_T *)cctx->ctx_locals.ga_data) + cctx->ctx_locals.ga_len++; CLEAR_POINTER(lvar); // Every local variable uses the next entry on the stack. We could re-use // the last ones when leaving a scope, but then variables used in a closure // might get overwritten. To keep things simple do not re-use stack // entries. This is less efficient, but memory is cheap these days. dfunc = ((dfunc_T *)def_functions.ga_data) + cctx->ctx_ufunc->uf_dfunc_idx; lvar->lv_idx = dfunc->df_var_names.ga_len; lvar->lv_name = vim_strnsave(name, len == 0 ? STRLEN(name) : len); lvar->lv_const = isConst; lvar->lv_type = type; // Remember the name for debugging. if (GA_GROW_FAILS(&dfunc->df_var_names, 1)) return NULL; ((char_u **)dfunc->df_var_names.ga_data)[lvar->lv_idx] = vim_strsave(lvar->lv_name); ++dfunc->df_var_names.ga_len; return lvar; } /* * If "check_writable" is ASSIGN_CONST give an error if the variable was * defined with :final or :const, if "check_writable" is ASSIGN_FINAL give an * error if the variable was defined with :const. */ static int check_item_writable(svar_T *sv, int check_writable, char_u *name) { if ((check_writable == ASSIGN_CONST && sv->sv_const != 0) || (check_writable == ASSIGN_FINAL && sv->sv_const == ASSIGN_CONST)) { semsg(_(e_cannot_change_readonly_variable_str), name); return FAIL; } return OK; } /* * Find "name" in script-local items of script "sid". * Pass "check_writable" to check_item_writable(). * Returns the index in "sn_var_vals" if found. * If found but not in "sn_var_vals" returns -1. * If not found or the variable is not writable returns -2. */ int get_script_item_idx(int sid, char_u *name, int check_writable, cctx_T *cctx) { hashtab_T *ht; dictitem_T *di; scriptitem_T *si = SCRIPT_ITEM(sid); svar_T *sv; int idx; if (!SCRIPT_ID_VALID(sid)) return -1; if (sid == current_sctx.sc_sid) { sallvar_T *sav = find_script_var(name, 0, cctx); if (sav == NULL) return -2; idx = sav->sav_var_vals_idx; sv = ((svar_T *)si->sn_var_vals.ga_data) + idx; if (check_item_writable(sv, check_writable, name) == FAIL) return -2; return idx; } // First look the name up in the hashtable. ht = &SCRIPT_VARS(sid); di = find_var_in_ht(ht, 0, name, TRUE); if (di == NULL) return -2; // Now find the svar_T index in sn_var_vals. for (idx = 0; idx < si->sn_var_vals.ga_len; ++idx) { sv = ((svar_T *)si->sn_var_vals.ga_data) + idx; if (sv->sv_tv == &di->di_tv) { if (check_item_writable(sv, check_writable, name) == FAIL) return -2; return idx; } } return -1; } /* * Find "name" in imported items of the current script or in "cctx" if not * NULL. */ imported_T * find_imported(char_u *name, size_t len, cctx_T *cctx) { int idx; if (!SCRIPT_ID_VALID(current_sctx.sc_sid)) return NULL; if (cctx != NULL) for (idx = 0; idx < cctx->ctx_imports.ga_len; ++idx) { imported_T *import = ((imported_T *)cctx->ctx_imports.ga_data) + idx; if (len == 0 ? STRCMP(name, import->imp_name) == 0 : STRLEN(import->imp_name) == len && STRNCMP(name, import->imp_name, len) == 0) return import; } return find_imported_in_script(name, len, current_sctx.sc_sid); } imported_T * find_imported_in_script(char_u *name, size_t len, int sid) { scriptitem_T *si; int idx; if (!SCRIPT_ID_VALID(sid)) return NULL; si = SCRIPT_ITEM(sid); for (idx = 0; idx < si->sn_imports.ga_len; ++idx) { imported_T *import = ((imported_T *)si->sn_imports.ga_data) + idx; if (len == 0 ? STRCMP(name, import->imp_name) == 0 : STRLEN(import->imp_name) == len && STRNCMP(name, import->imp_name, len) == 0) return import; } return NULL; } /* * Free all imported variables. */ static void free_imported(cctx_T *cctx) { int idx; for (idx = 0; idx < cctx->ctx_imports.ga_len; ++idx) { imported_T *import = ((imported_T *)cctx->ctx_imports.ga_data) + idx; vim_free(import->imp_name); } ga_clear(&cctx->ctx_imports); } /* * Called when checking for a following operator at "arg". When the rest of * the line is empty or only a comment, peek the next line. If there is a next * line return a pointer to it and set "nextp". * Otherwise skip over white space. */ char_u * may_peek_next_line(cctx_T *cctx, char_u *arg, char_u **nextp) { char_u *p = skipwhite(arg); *nextp = NULL; if (*p == NUL || (VIM_ISWHITE(*arg) && vim9_comment_start(p))) { *nextp = peek_next_line_from_context(cctx); if (*nextp != NULL) return *nextp; } return p; } /* * Return a pointer to the next line that isn't empty or only contains a * comment. Skips over white space. * Returns NULL if there is none. */ char_u * peek_next_line_from_context(cctx_T *cctx) { int lnum = cctx->ctx_lnum; while (++lnum < cctx->ctx_ufunc->uf_lines.ga_len) { char_u *line = ((char_u **)cctx->ctx_ufunc->uf_lines.ga_data)[lnum]; char_u *p; // ignore NULLs inserted for continuation lines if (line != NULL) { p = skipwhite(line); if (vim9_bad_comment(p)) return NULL; if (*p != NUL && !vim9_comment_start(p)) return p; } } return NULL; } /* * Get the next line of the function from "cctx". * Skips over empty lines. Skips over comment lines if "skip_comment" is TRUE. * Returns NULL when at the end. */ char_u * next_line_from_context(cctx_T *cctx, int skip_comment) { char_u *line; do { ++cctx->ctx_lnum; if (cctx->ctx_lnum >= cctx->ctx_ufunc->uf_lines.ga_len) { line = NULL; break; } line = ((char_u **)cctx->ctx_ufunc->uf_lines.ga_data)[cctx->ctx_lnum]; cctx->ctx_line_start = line; SOURCING_LNUM = cctx->ctx_lnum + 1; } while (line == NULL || *skipwhite(line) == NUL || (skip_comment && vim9_comment_start(skipwhite(line)))); return line; } /* * Skip over white space at "whitep" and assign to "*arg". * If "*arg" is at the end of the line, advance to the next line. * Also when "whitep" points to white space and "*arg" is on a "#". * Return FAIL if beyond the last line, "*arg" is unmodified then. */ int may_get_next_line(char_u *whitep, char_u **arg, cctx_T *cctx) { *arg = skipwhite(whitep); if (vim9_bad_comment(*arg)) return FAIL; if (**arg == NUL || (VIM_ISWHITE(*whitep) && vim9_comment_start(*arg))) { char_u *next = next_line_from_context(cctx, TRUE); if (next == NULL) return FAIL; *arg = skipwhite(next); } return OK; } /* * Idem, and give an error when failed. */ int may_get_next_line_error(char_u *whitep, char_u **arg, cctx_T *cctx) { if (may_get_next_line(whitep, arg, cctx) == FAIL) { SOURCING_LNUM = cctx->ctx_lnum + 1; emsg(_(e_line_incomplete)); return FAIL; } return OK; } /* * Get a line from the compilation context, compatible with exarg_T getline(). * Return a pointer to the line in allocated memory. * Return NULL for end-of-file or some error. */ static char_u * exarg_getline( int c UNUSED, void *cookie, int indent UNUSED, getline_opt_T options UNUSED) { cctx_T *cctx = (cctx_T *)cookie; char_u *p; for (;;) { if (cctx->ctx_lnum >= cctx->ctx_ufunc->uf_lines.ga_len - 1) return NULL; ++cctx->ctx_lnum; p = ((char_u **)cctx->ctx_ufunc->uf_lines.ga_data)[cctx->ctx_lnum]; // Comment lines result in NULL pointers, skip them. if (p != NULL) return vim_strsave(p); } } void fill_exarg_from_cctx(exarg_T *eap, cctx_T *cctx) { eap->getline = exarg_getline; eap->cookie = cctx; } /* * Return TRUE if "ufunc" should be compiled, taking into account whether * "profile" indicates profiling is to be done. */ int func_needs_compiling(ufunc_T *ufunc, compiletype_T compile_type) { switch (ufunc->uf_def_status) { case UF_TO_BE_COMPILED: return TRUE; case UF_COMPILED: { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; switch (compile_type) { case CT_PROFILE: #ifdef FEAT_PROFILE return dfunc->df_instr_prof == NULL; #endif case CT_NONE: return dfunc->df_instr == NULL; case CT_DEBUG: return dfunc->df_instr_debug == NULL; } } case UF_NOT_COMPILED: case UF_COMPILE_ERROR: case UF_COMPILING: break; } return FALSE; } /* * Compile a nested :def command. */ static char_u * compile_nested_function(exarg_T *eap, cctx_T *cctx, char_u **line_to_free) { int is_global = *eap->arg == 'g' && eap->arg[1] == ':'; char_u *name_start = eap->arg; char_u *name_end = to_name_end(eap->arg, TRUE); int off; char_u *func_name; char_u *lambda_name; ufunc_T *ufunc; int r = FAIL; compiletype_T compile_type; if (eap->forceit) { emsg(_(e_cannot_use_bang_with_nested_def)); return NULL; } if (*name_start == '/') { name_end = skip_regexp(name_start + 1, '/', TRUE); if (*name_end == '/') ++name_end; set_nextcmd(eap, name_end); } if (name_end == name_start || *skipwhite(name_end) != '(') { if (!ends_excmd2(name_start, name_end)) { semsg(_(e_invalid_command_str), eap->cmd); return NULL; } // "def" or "def Name": list functions if (generate_DEF(cctx, name_start, name_end - name_start) == FAIL) return NULL; return eap->nextcmd == NULL ? (char_u *)"" : eap->nextcmd; } // Only g:Func() can use a namespace. if (name_start[1] == ':' && !is_global) { semsg(_(e_namespace_not_supported_str), name_start); return NULL; } if (check_defined(name_start, name_end - name_start, cctx, FALSE) == FAIL) return NULL; eap->arg = name_end; fill_exarg_from_cctx(eap, cctx); eap->forceit = FALSE; // We use the special <Lamba>99 name, but it's not really a lambda. lambda_name = vim_strsave(get_lambda_name()); if (lambda_name == NULL) return NULL; // This may free the current line, make a copy of the name. off = is_global ? 2 : 0; func_name = vim_strnsave(name_start + off, name_end - name_start - off); if (func_name == NULL) { r = FAIL; goto theend; } ufunc = define_function(eap, lambda_name, line_to_free); if (ufunc == NULL) { r = eap->skip ? OK : FAIL; goto theend; } // copy over the block scope IDs before compiling if (!is_global && cctx->ctx_ufunc->uf_block_depth > 0) { int block_depth = cctx->ctx_ufunc->uf_block_depth; ufunc->uf_block_ids = ALLOC_MULT(int, block_depth); if (ufunc->uf_block_ids != NULL) { mch_memmove(ufunc->uf_block_ids, cctx->ctx_ufunc->uf_block_ids, sizeof(int) * block_depth); ufunc->uf_block_depth = block_depth; } } compile_type = COMPILE_TYPE(ufunc); #ifdef FEAT_PROFILE // If the outer function is profiled, also compile the nested function for // profiling. if (cctx->ctx_compile_type == CT_PROFILE) compile_type = CT_PROFILE; #endif if (func_needs_compiling(ufunc, compile_type) && compile_def_function(ufunc, TRUE, compile_type, cctx) == FAIL) { func_ptr_unref(ufunc); goto theend; } #ifdef FEAT_PROFILE // When the outer function is compiled for profiling, the nested function // may be called without profiling. Compile it here in the right context. if (compile_type == CT_PROFILE && func_needs_compiling(ufunc, CT_NONE)) compile_def_function(ufunc, FALSE, CT_NONE, cctx); #endif if (is_global) { r = generate_NEWFUNC(cctx, lambda_name, func_name); func_name = NULL; lambda_name = NULL; } else { // Define a local variable for the function reference. lvar_T *lvar = reserve_local(cctx, func_name, name_end - name_start, TRUE, ufunc->uf_func_type); if (lvar == NULL) goto theend; if (generate_FUNCREF(cctx, ufunc) == FAIL) goto theend; r = generate_STORE(cctx, ISN_STORE, lvar->lv_idx, NULL); } theend: vim_free(lambda_name); vim_free(func_name); return r == FAIL ? NULL : (char_u *)""; } /* * Return the length of an assignment operator, or zero if there isn't one. */ int assignment_len(char_u *p, int *heredoc) { if (*p == '=') { if (p[1] == '<' && p[2] == '<') { *heredoc = TRUE; return 3; } return 1; } if (vim_strchr((char_u *)"+-*/%", *p) != NULL && p[1] == '=') return 2; if (STRNCMP(p, "..=", 3) == 0) return 3; return 0; } /* * Generate the load instruction for "name". */ static void generate_loadvar( cctx_T *cctx, assign_dest_T dest, char_u *name, lvar_T *lvar, type_T *type) { switch (dest) { case dest_option: case dest_func_option: generate_LOAD(cctx, ISN_LOADOPT, 0, name, type); break; case dest_global: if (vim_strchr(name, AUTOLOAD_CHAR) == NULL) generate_LOAD(cctx, ISN_LOADG, 0, name + 2, type); else generate_LOAD(cctx, ISN_LOADAUTO, 0, name, type); break; case dest_buffer: generate_LOAD(cctx, ISN_LOADB, 0, name + 2, type); break; case dest_window: generate_LOAD(cctx, ISN_LOADW, 0, name + 2, type); break; case dest_tab: generate_LOAD(cctx, ISN_LOADT, 0, name + 2, type); break; case dest_script: compile_load_scriptvar(cctx, name + (name[1] == ':' ? 2 : 0), NULL, NULL, TRUE); break; case dest_env: // Include $ in the name here generate_LOAD(cctx, ISN_LOADENV, 0, name, type); break; case dest_reg: generate_LOAD(cctx, ISN_LOADREG, name[1], NULL, &t_string); break; case dest_vimvar: generate_LOADV(cctx, name + 2, TRUE); break; case dest_local: if (lvar->lv_from_outer > 0) generate_LOADOUTER(cctx, lvar->lv_idx, lvar->lv_from_outer, type); else generate_LOAD(cctx, ISN_LOAD, lvar->lv_idx, NULL, type); break; case dest_expr: // list or dict value should already be on the stack. break; } } /* * Skip over "[expr]" or ".member". * Does not check for any errors. */ static char_u * skip_index(char_u *start) { char_u *p = start; if (*p == '[') { p = skipwhite(p + 1); (void)skip_expr(&p, NULL); p = skipwhite(p); if (*p == ']') return p + 1; return p; } // if (*p == '.') return to_name_end(p + 1, TRUE); } void vim9_declare_error(char_u *name) { char *scope = ""; switch (*name) { case 'g': scope = _("global"); break; case 'b': scope = _("buffer"); break; case 'w': scope = _("window"); break; case 't': scope = _("tab"); break; case 'v': scope = "v:"; break; case '$': semsg(_(e_cannot_declare_an_environment_variable), name); return; case '&': semsg(_(e_cannot_declare_an_option), name); return; case '@': semsg(_(e_cannot_declare_a_register_str), name); return; default: return; } semsg(_(e_cannot_declare_a_scope_variable), scope, name); } /* * For one assignment figure out the type of destination. Return it in "dest". * When not recognized "dest" is not set. * For an option "option_scope" is set. * For a v:var "vimvaridx" is set. * "type" is set to the destination type if known, unchanted otherwise. * Return FAIL if an error message was given. */ int get_var_dest( char_u *name, assign_dest_T *dest, int cmdidx, int *option_scope, int *vimvaridx, type_T **type, cctx_T *cctx) { char_u *p; if (*name == '&') { int cc; long numval; getoption_T opt_type; int opt_p_flags; *dest = dest_option; if (cmdidx == CMD_final || cmdidx == CMD_const) { emsg(_(e_const_option)); return FAIL; } p = name; p = find_option_end(&p, option_scope); if (p == NULL) { // cannot happen? emsg(_(e_unexpected_characters_in_assignment)); return FAIL; } cc = *p; *p = NUL; opt_type = get_option_value(skip_option_env_lead(name), &numval, NULL, &opt_p_flags, *option_scope); *p = cc; switch (opt_type) { case gov_unknown: semsg(_(e_unknown_option_str), name); return FAIL; case gov_string: case gov_hidden_string: if (opt_p_flags & P_FUNC) { // might be a Funcref, check the type later *type = &t_any; *dest = dest_func_option; } else { *type = &t_string; } break; case gov_bool: case gov_hidden_bool: *type = &t_bool; break; case gov_number: case gov_hidden_number: *type = &t_number; break; } } else if (*name == '$') { *dest = dest_env; *type = &t_string; } else if (*name == '@') { if (name[1] != '@' && (!valid_yank_reg(name[1], FALSE) || name[1] == '.')) { emsg_invreg(name[1]); return FAIL; } *dest = dest_reg; *type = name[1] == '#' ? &t_number_or_string : &t_string; } else if (STRNCMP(name, "g:", 2) == 0) { *dest = dest_global; } else if (STRNCMP(name, "b:", 2) == 0) { *dest = dest_buffer; } else if (STRNCMP(name, "w:", 2) == 0) { *dest = dest_window; } else if (STRNCMP(name, "t:", 2) == 0) { *dest = dest_tab; } else if (STRNCMP(name, "v:", 2) == 0) { typval_T *vtv; int di_flags; *vimvaridx = find_vim_var(name + 2, &di_flags); if (*vimvaridx < 0) { semsg(_(e_variable_not_found_str), name); return FAIL; } // We use the current value of "sandbox" here, is that OK? if (var_check_ro(di_flags, name, FALSE)) return FAIL; *dest = dest_vimvar; vtv = get_vim_var_tv(*vimvaridx); *type = typval2type_vimvar(vtv, cctx->ctx_type_list); } return OK; } static int is_decl_command(int cmdidx) { return cmdidx == CMD_let || cmdidx == CMD_var || cmdidx == CMD_final || cmdidx == CMD_const; } /* * Figure out the LHS type and other properties for an assignment or one item * of ":unlet" with an index. * Returns OK or FAIL. */ int compile_lhs( char_u *var_start, lhs_T *lhs, int cmdidx, int heredoc, int oplen, cctx_T *cctx) { char_u *var_end; int is_decl = is_decl_command(cmdidx); CLEAR_POINTER(lhs); lhs->lhs_dest = dest_local; lhs->lhs_vimvaridx = -1; lhs->lhs_scriptvar_idx = -1; // "dest_end" is the end of the destination, including "[expr]" or // ".name". // "var_end" is the end of the variable/option/etc. name. lhs->lhs_dest_end = skip_var_one(var_start, FALSE); if (*var_start == '@') var_end = var_start + 2; else { // skip over the leading "&", "&l:", "&g:" and "$" var_end = skip_option_env_lead(var_start); var_end = to_name_end(var_end, TRUE); } // "a: type" is declaring variable "a" with a type, not dict "a:". if (is_decl && lhs->lhs_dest_end == var_start + 2 && lhs->lhs_dest_end[-1] == ':') --lhs->lhs_dest_end; if (is_decl && var_end == var_start + 2 && var_end[-1] == ':') --var_end; lhs->lhs_end = lhs->lhs_dest_end; // compute the length of the destination without "[expr]" or ".name" lhs->lhs_varlen = var_end - var_start; lhs->lhs_varlen_total = lhs->lhs_varlen; lhs->lhs_name = vim_strnsave(var_start, lhs->lhs_varlen); if (lhs->lhs_name == NULL) return FAIL; if (lhs->lhs_dest_end > var_start + lhs->lhs_varlen) // Something follows after the variable: "var[idx]" or "var.key". lhs->lhs_has_index = TRUE; if (heredoc) lhs->lhs_type = &t_list_string; else lhs->lhs_type = &t_any; if (cctx->ctx_skip != SKIP_YES) { int declare_error = FALSE; if (get_var_dest(lhs->lhs_name, &lhs->lhs_dest, cmdidx, &lhs->lhs_opt_flags, &lhs->lhs_vimvaridx, &lhs->lhs_type, cctx) == FAIL) return FAIL; if (lhs->lhs_dest != dest_local && cmdidx != CMD_const && cmdidx != CMD_final) { // Specific kind of variable recognized. declare_error = is_decl; } else { // No specific kind of variable recognized, just a name. if (check_reserved_name(lhs->lhs_name) == FAIL) return FAIL; if (lookup_local(var_start, lhs->lhs_varlen, &lhs->lhs_local_lvar, cctx) == OK) lhs->lhs_lvar = &lhs->lhs_local_lvar; else { CLEAR_FIELD(lhs->lhs_arg_lvar); if (arg_exists(var_start, lhs->lhs_varlen, &lhs->lhs_arg_lvar.lv_idx, &lhs->lhs_arg_lvar.lv_type, &lhs->lhs_arg_lvar.lv_from_outer, cctx) == OK) { if (is_decl) { semsg(_(e_str_is_used_as_argument), lhs->lhs_name); return FAIL; } lhs->lhs_lvar = &lhs->lhs_arg_lvar; } } if (lhs->lhs_lvar != NULL) { if (is_decl) { semsg(_(e_variable_already_declared), lhs->lhs_name); return FAIL; } } else { int script_namespace = lhs->lhs_varlen > 1 && STRNCMP(var_start, "s:", 2) == 0; int script_var = (script_namespace ? script_var_exists(var_start + 2, lhs->lhs_varlen - 2, cctx) : script_var_exists(var_start, lhs->lhs_varlen, cctx)) == OK; imported_T *import = find_imported(var_start, lhs->lhs_varlen, cctx); if (script_namespace || script_var || import != NULL) { char_u *rawname = lhs->lhs_name + (lhs->lhs_name[1] == ':' ? 2 : 0); if (is_decl) { if (script_namespace) semsg(_(e_cannot_declare_script_variable_in_function), lhs->lhs_name); else semsg(_(e_variable_already_declared_in_script_str), lhs->lhs_name); return FAIL; } else if (cctx->ctx_ufunc->uf_script_ctx_version == SCRIPT_VERSION_VIM9 && script_namespace && !script_var && import == NULL) { semsg(_(e_unknown_variable_str), lhs->lhs_name); return FAIL; } lhs->lhs_dest = dest_script; // existing script-local variables should have a type lhs->lhs_scriptvar_sid = current_sctx.sc_sid; if (import != NULL) lhs->lhs_scriptvar_sid = import->imp_sid; if (SCRIPT_ID_VALID(lhs->lhs_scriptvar_sid)) { // Check writable only when no index follows. lhs->lhs_scriptvar_idx = get_script_item_idx( lhs->lhs_scriptvar_sid, rawname, lhs->lhs_has_index ? ASSIGN_FINAL : ASSIGN_CONST, cctx); if (lhs->lhs_scriptvar_idx >= 0) { scriptitem_T *si = SCRIPT_ITEM( lhs->lhs_scriptvar_sid); svar_T *sv = ((svar_T *)si->sn_var_vals.ga_data) + lhs->lhs_scriptvar_idx; lhs->lhs_type = sv->sv_type; } } } else if (check_defined(var_start, lhs->lhs_varlen, cctx, FALSE) == FAIL) return FAIL; } } if (declare_error) { vim9_declare_error(lhs->lhs_name); return FAIL; } } // handle "a:name" as a name, not index "name" in "a" if (lhs->lhs_varlen > 1 || var_start[lhs->lhs_varlen] != ':') var_end = lhs->lhs_dest_end; if (lhs->lhs_dest != dest_option && lhs->lhs_dest != dest_func_option) { if (is_decl && *var_end == ':') { char_u *p; // parse optional type: "let var: type = expr" if (!VIM_ISWHITE(var_end[1])) { semsg(_(e_white_space_required_after_str_str), ":", var_end); return FAIL; } p = skipwhite(var_end + 1); lhs->lhs_type = parse_type(&p, cctx->ctx_type_list, TRUE); if (lhs->lhs_type == NULL) return FAIL; lhs->lhs_has_type = TRUE; lhs->lhs_end = p; } else if (lhs->lhs_lvar != NULL) lhs->lhs_type = lhs->lhs_lvar->lv_type; } if (oplen == 3 && !heredoc && lhs->lhs_dest != dest_global && !lhs->lhs_has_index && lhs->lhs_type->tt_type != VAR_STRING && lhs->lhs_type->tt_type != VAR_ANY) { emsg(_(e_can_only_concatenate_to_string)); return FAIL; } if (lhs->lhs_lvar == NULL && lhs->lhs_dest == dest_local && cctx->ctx_skip != SKIP_YES) { if (oplen > 1 && !heredoc) { // +=, /=, etc. require an existing variable semsg(_(e_cannot_use_operator_on_new_variable), lhs->lhs_name); return FAIL; } if (!is_decl) { semsg(_(e_unknown_variable_str), lhs->lhs_name); return FAIL; } // Check the name is valid for a funcref. if ((lhs->lhs_type->tt_type == VAR_FUNC || lhs->lhs_type->tt_type == VAR_PARTIAL) && var_wrong_func_name(lhs->lhs_name, TRUE)) return FAIL; // New local variable. lhs->lhs_lvar = reserve_local(cctx, var_start, lhs->lhs_varlen, cmdidx == CMD_final || cmdidx == CMD_const, lhs->lhs_type); if (lhs->lhs_lvar == NULL) return FAIL; lhs->lhs_new_local = TRUE; } lhs->lhs_member_type = lhs->lhs_type; if (lhs->lhs_has_index) { char_u *after = var_start + lhs->lhs_varlen; char_u *p; // Something follows after the variable: "var[idx]" or "var.key". if (is_decl) { emsg(_(e_cannot_use_index_when_declaring_variable)); return FAIL; } // Now: var_start[lhs->lhs_varlen] is '[' or '.' // Only the last index is used below, if there are others // before it generate code for the expression. Thus for // "ll[1][2]" the expression is "ll[1]" and "[2]" is the index. for (;;) { p = skip_index(after); if (*p != '[' && *p != '.') { lhs->lhs_varlen_total = p - var_start; break; } after = p; } if (after > var_start + lhs->lhs_varlen) { lhs->lhs_varlen = after - var_start; lhs->lhs_dest = dest_expr; // We don't know the type before evaluating the expression, // use "any" until then. lhs->lhs_type = &t_any; } if (lhs->lhs_type->tt_member == NULL) lhs->lhs_member_type = &t_any; else lhs->lhs_member_type = lhs->lhs_type->tt_member; } return OK; } /* * Figure out the LHS and check a few errors. */ int compile_assign_lhs( char_u *var_start, lhs_T *lhs, int cmdidx, int is_decl, int heredoc, int oplen, cctx_T *cctx) { if (compile_lhs(var_start, lhs, cmdidx, heredoc, oplen, cctx) == FAIL) return FAIL; if (!lhs->lhs_has_index && lhs->lhs_lvar == &lhs->lhs_arg_lvar) { semsg(_(e_cannot_assign_to_argument), lhs->lhs_name); return FAIL; } if (!is_decl && lhs->lhs_lvar != NULL && lhs->lhs_lvar->lv_const && !lhs->lhs_has_index) { semsg(_(e_cannot_assign_to_constant), lhs->lhs_name); return FAIL; } return OK; } /* * Return TRUE if "lhs" has a range index: "[expr : expr]". */ static int has_list_index(char_u *idx_start, cctx_T *cctx) { char_u *p = idx_start; int save_skip; if (*p != '[') return FALSE; p = skipwhite(p + 1); if (*p == ':') return TRUE; save_skip = cctx->ctx_skip; cctx->ctx_skip = SKIP_YES; (void)compile_expr0(&p, cctx); cctx->ctx_skip = save_skip; return *skipwhite(p) == ':'; } /* * For an assignment with an index, compile the "idx" in "var[idx]" or "key" in * "var.key". */ static int compile_assign_index( char_u *var_start, lhs_T *lhs, int *range, cctx_T *cctx) { size_t varlen = lhs->lhs_varlen; char_u *p; int r = OK; int need_white_before = TRUE; int empty_second; p = var_start + varlen; if (*p == '[') { p = skipwhite(p + 1); if (*p == ':') { // empty first index, push zero r = generate_PUSHNR(cctx, 0); need_white_before = FALSE; } else r = compile_expr0(&p, cctx); if (r == OK && *skipwhite(p) == ':') { // unlet var[idx : idx] // blob[idx : idx] = value *range = TRUE; p = skipwhite(p); empty_second = *skipwhite(p + 1) == ']'; if ((need_white_before && !IS_WHITE_OR_NUL(p[-1])) || (!empty_second && !IS_WHITE_OR_NUL(p[1]))) { semsg(_(e_white_space_required_before_and_after_str_at_str), ":", p); return FAIL; } p = skipwhite(p + 1); if (*p == ']') // empty second index, push "none" r = generate_PUSHSPEC(cctx, VVAL_NONE); else r = compile_expr0(&p, cctx); } if (r == OK && *skipwhite(p) != ']') { // this should not happen emsg(_(e_missing_closing_square_brace)); r = FAIL; } } else // if (*p == '.') { char_u *key_end = to_name_end(p + 1, TRUE); char_u *key = vim_strnsave(p + 1, key_end - p - 1); r = generate_PUSHS(cctx, &key); } return r; } /* * For a LHS with an index, load the variable to be indexed. */ static int compile_load_lhs( lhs_T *lhs, char_u *var_start, type_T *rhs_type, cctx_T *cctx) { if (lhs->lhs_dest == dest_expr) { size_t varlen = lhs->lhs_varlen; int c = var_start[varlen]; int lines_len = cctx->ctx_ufunc->uf_lines.ga_len; char_u *p = var_start; garray_T *stack = &cctx->ctx_type_stack; int res; // Evaluate "ll[expr]" of "ll[expr][idx]". End the line with a NUL and // limit the lines array length to avoid skipping to a following line. var_start[varlen] = NUL; cctx->ctx_ufunc->uf_lines.ga_len = cctx->ctx_lnum + 1; res = compile_expr0(&p, cctx); var_start[varlen] = c; cctx->ctx_ufunc->uf_lines.ga_len = lines_len; if (res == FAIL || p != var_start + varlen) { // this should not happen if (res != FAIL) emsg(_(e_missing_closing_square_brace)); return FAIL; } lhs->lhs_type = stack->ga_len == 0 ? &t_void : ((type_T **)stack->ga_data)[stack->ga_len - 1]; // now we can properly check the type if (rhs_type != NULL && lhs->lhs_type->tt_member != NULL && rhs_type != &t_void && need_type(rhs_type, lhs->lhs_type->tt_member, -2, 0, cctx, FALSE, FALSE) == FAIL) return FAIL; } else generate_loadvar(cctx, lhs->lhs_dest, lhs->lhs_name, lhs->lhs_lvar, lhs->lhs_type); return OK; } /* * Produce code for loading "lhs" and also take care of an index. * Return OK/FAIL. */ int compile_load_lhs_with_index(lhs_T *lhs, char_u *var_start, cctx_T *cctx) { compile_load_lhs(lhs, var_start, NULL, cctx); if (lhs->lhs_has_index) { int range = FALSE; // Get member from list or dict. First compile the // index value. if (compile_assign_index(var_start, lhs, &range, cctx) == FAIL) return FAIL; if (range) { semsg(_(e_cannot_use_range_with_assignment_operator_str), var_start); return FAIL; } // Get the member. if (compile_member(FALSE, NULL, cctx) == FAIL) return FAIL; } return OK; } /* * Assignment to a list or dict member, or ":unlet" for the item, using the * information in "lhs". * Returns OK or FAIL. */ int compile_assign_unlet( char_u *var_start, lhs_T *lhs, int is_assign, type_T *rhs_type, cctx_T *cctx) { vartype_T dest_type; garray_T *stack = &cctx->ctx_type_stack; int range = FALSE; if (compile_assign_index(var_start, lhs, &range, cctx) == FAIL) return FAIL; if (is_assign && range && lhs->lhs_type->tt_type != VAR_LIST && lhs->lhs_type != &t_blob && lhs->lhs_type != &t_any) { semsg(_(e_cannot_use_range_with_assignment_str), var_start); return FAIL; } if (lhs->lhs_type == &t_any) { // Index on variable of unknown type: check at runtime. dest_type = VAR_ANY; } else { dest_type = lhs->lhs_type->tt_type; if (dest_type == VAR_DICT && range) { emsg(e_cannot_use_range_with_dictionary); return FAIL; } if (dest_type == VAR_DICT && may_generate_2STRING(-1, FALSE, cctx) == FAIL) return FAIL; if (dest_type == VAR_LIST || dest_type == VAR_BLOB) { type_T *type; if (range) { type = ((type_T **)stack->ga_data)[stack->ga_len - 2]; if (need_type(type, &t_number, -1, 0, cctx, FALSE, FALSE) == FAIL) return FAIL; } type = ((type_T **)stack->ga_data)[stack->ga_len - 1]; if ((dest_type != VAR_BLOB && type != &t_special) && need_type(type, &t_number, -1, 0, cctx, FALSE, FALSE) == FAIL) return FAIL; } } // Load the dict or list. On the stack we then have: // - value (for assignment, not for :unlet) // - index // - for [a : b] second index // - variable if (compile_load_lhs(lhs, var_start, rhs_type, cctx) == FAIL) return FAIL; if (dest_type == VAR_LIST || dest_type == VAR_DICT || dest_type == VAR_BLOB || dest_type == VAR_ANY) { if (is_assign) { if (range) { if (generate_instr_drop(cctx, ISN_STORERANGE, 4) == NULL) return FAIL; } else { isn_T *isn = generate_instr_drop(cctx, ISN_STOREINDEX, 3); if (isn == NULL) return FAIL; isn->isn_arg.vartype = dest_type; } } else if (range) { if (generate_instr_drop(cctx, ISN_UNLETRANGE, 3) == NULL) return FAIL; } else { if (generate_instr_drop(cctx, ISN_UNLETINDEX, 2) == NULL) return FAIL; } } else { emsg(_(e_indexable_type_required)); return FAIL; } return OK; } /* * Compile declaration and assignment: * "let name" * "var name = expr" * "final name = expr" * "const name = expr" * "name = expr" * "arg" points to "name". * "++arg" and "--arg" * Return NULL for an error. * Return "arg" if it does not look like a variable list. */ static char_u * compile_assignment(char_u *arg, exarg_T *eap, cmdidx_T cmdidx, cctx_T *cctx) { char_u *var_start; char_u *p; char_u *end = arg; char_u *ret = NULL; int var_count = 0; int var_idx; int semicolon = 0; int did_generate_slice = FALSE; garray_T *instr = &cctx->ctx_instr; garray_T *stack = &cctx->ctx_type_stack; char_u *op; int oplen = 0; int heredoc = FALSE; int incdec = FALSE; type_T *rhs_type = &t_any; char_u *sp; int is_decl = is_decl_command(cmdidx); lhs_T lhs; long start_lnum = SOURCING_LNUM; // Skip over the "var" or "[var, var]" to get to any "=". p = skip_var_list(arg, TRUE, &var_count, &semicolon, TRUE); if (p == NULL) return *arg == '[' ? arg : NULL; lhs.lhs_name = NULL; sp = p; p = skipwhite(p); op = p; oplen = assignment_len(p, &heredoc); if (var_count > 0 && oplen == 0) // can be something like "[1, 2]->func()" return arg; if (oplen > 0 && (!VIM_ISWHITE(*sp) || !IS_WHITE_OR_NUL(op[oplen]))) { error_white_both(op, oplen); return NULL; } if (eap->cmdidx == CMD_increment || eap->cmdidx == CMD_decrement) { if (VIM_ISWHITE(eap->cmd[2])) { semsg(_(e_no_white_space_allowed_after_str_str), eap->cmdidx == CMD_increment ? "++" : "--", eap->cmd); return NULL; } op = (char_u *)(eap->cmdidx == CMD_increment ? "+=" : "-="); oplen = 2; incdec = TRUE; } if (heredoc) { list_T *l; listitem_T *li; // [let] varname =<< [trim] {end} eap->getline = exarg_getline; eap->cookie = cctx; l = heredoc_get(eap, op + 3, FALSE); if (l == NULL) return NULL; if (cctx->ctx_skip != SKIP_YES) { // Push each line and the create the list. FOR_ALL_LIST_ITEMS(l, li) { generate_PUSHS(cctx, &li->li_tv.vval.v_string); li->li_tv.vval.v_string = NULL; } generate_NEWLIST(cctx, l->lv_len); } list_free(l); p += STRLEN(p); end = p; } else if (var_count > 0) { char_u *wp; // for "[var, var] = expr" evaluate the expression here, loop over the // list of variables below. // A line break may follow the "=". wp = op + oplen; if (may_get_next_line_error(wp, &p, cctx) == FAIL) return FAIL; if (compile_expr0(&p, cctx) == FAIL) return NULL; end = p; if (cctx->ctx_skip != SKIP_YES) { type_T *stacktype; int needed_list_len; int did_check = FALSE; stacktype = stack->ga_len == 0 ? &t_void : ((type_T **)stack->ga_data)[stack->ga_len - 1]; if (stacktype->tt_type == VAR_VOID) { emsg(_(e_cannot_use_void_value)); goto theend; } if (need_type(stacktype, &t_list_any, -1, 0, cctx, FALSE, FALSE) == FAIL) goto theend; // If a constant list was used we can check the length right here. needed_list_len = semicolon ? var_count - 1 : var_count; if (instr->ga_len > 0) { isn_T *isn = ((isn_T *)instr->ga_data) + instr->ga_len - 1; if (isn->isn_type == ISN_NEWLIST) { did_check = TRUE; if (semicolon ? isn->isn_arg.number < needed_list_len : isn->isn_arg.number != needed_list_len) { semsg(_(e_expected_nr_items_but_got_nr), needed_list_len, isn->isn_arg.number); goto theend; } } } if (!did_check) generate_CHECKLEN(cctx, needed_list_len, semicolon); if (stacktype->tt_member != NULL) rhs_type = stacktype->tt_member; } } /* * Loop over variables in "[var, var] = expr". * For "var = expr" and "let var: type" this is done only once. */ if (var_count > 0) var_start = skipwhite(arg + 1); // skip over the "[" else var_start = arg; for (var_idx = 0; var_idx == 0 || var_idx < var_count; var_idx++) { int instr_count = -1; int save_lnum; int skip_store = FALSE; if (var_start[0] == '_' && !eval_isnamec(var_start[1])) { // Ignore underscore in "[a, _, b] = list". if (var_count > 0) { var_start = skipwhite(var_start + 2); continue; } emsg(_(e_cannot_use_underscore_here)); goto theend; } vim_free(lhs.lhs_name); /* * Figure out the LHS type and other properties. */ if (compile_assign_lhs(var_start, &lhs, cmdidx, is_decl, heredoc, oplen, cctx) == FAIL) goto theend; if (heredoc) { SOURCING_LNUM = start_lnum; if (lhs.lhs_has_type && need_type(&t_list_string, lhs.lhs_type, -1, 0, cctx, FALSE, FALSE) == FAIL) goto theend; } else { if (cctx->ctx_skip == SKIP_YES) { if (oplen > 0 && var_count == 0) { // skip over the "=" and the expression p = skipwhite(op + oplen); (void)compile_expr0(&p, cctx); } } else if (oplen > 0) { int is_const = FALSE; char_u *wp; // for "+=", "*=", "..=" etc. first load the current value if (*op != '=' && compile_load_lhs_with_index(&lhs, var_start, cctx) == FAIL) goto theend; // For "var = expr" evaluate the expression. if (var_count == 0) { int r; // Compile the expression. instr_count = instr->ga_len; if (incdec) { r = generate_PUSHNR(cctx, 1); } else { // Temporarily hide the new local variable here, it is // not available to this expression. if (lhs.lhs_new_local) --cctx->ctx_locals.ga_len; wp = op + oplen; if (may_get_next_line_error(wp, &p, cctx) == FAIL) { if (lhs.lhs_new_local) ++cctx->ctx_locals.ga_len; goto theend; } r = compile_expr0_ext(&p, cctx, &is_const); if (lhs.lhs_new_local) ++cctx->ctx_locals.ga_len; if (r == FAIL) goto theend; } } else if (semicolon && var_idx == var_count - 1) { // For "[var; var] = expr" get the rest of the list did_generate_slice = TRUE; if (generate_SLICE(cctx, var_count - 1) == FAIL) goto theend; } else { // For "[var, var] = expr" get the "var_idx" item from the // list. if (generate_GETITEM(cctx, var_idx, *op != '=') == FAIL) goto theend; } rhs_type = stack->ga_len == 0 ? &t_void : ((type_T **)stack->ga_data)[stack->ga_len - 1]; if (lhs.lhs_lvar != NULL && (is_decl || !lhs.lhs_has_type)) { if ((rhs_type->tt_type == VAR_FUNC || rhs_type->tt_type == VAR_PARTIAL) && !lhs.lhs_has_index && var_wrong_func_name(lhs.lhs_name, TRUE)) goto theend; if (lhs.lhs_new_local && !lhs.lhs_has_type) { if (rhs_type->tt_type == VAR_VOID) { emsg(_(e_cannot_use_void_value)); goto theend; } else { // An empty list or dict has a &t_unknown member, // for a variable that implies &t_any. if (rhs_type == &t_list_empty) lhs.lhs_lvar->lv_type = &t_list_any; else if (rhs_type == &t_dict_empty) lhs.lhs_lvar->lv_type = &t_dict_any; else if (rhs_type == &t_unknown) lhs.lhs_lvar->lv_type = &t_any; else lhs.lhs_lvar->lv_type = rhs_type; } } else if (*op == '=') { type_T *use_type = lhs.lhs_lvar->lv_type; where_T where = WHERE_INIT; // Without operator check type here, otherwise below. // Use the line number of the assignment. SOURCING_LNUM = start_lnum; where.wt_index = var_count > 0 ? var_idx + 1 : 0; where.wt_variable = var_count > 0; // If assigning to a list or dict member, use the // member type. Not for "list[:] =". if (lhs.lhs_has_index && !has_list_index(var_start + lhs.lhs_varlen, cctx)) use_type = lhs.lhs_member_type; if (need_type_where(rhs_type, use_type, -1, where, cctx, FALSE, is_const) == FAIL) goto theend; } } else { type_T *lhs_type = lhs.lhs_member_type; // Special case: assigning to @# can use a number or a // string. // Also: can assign a number to a float. if ((lhs_type == &t_number_or_string || lhs_type == &t_float) && rhs_type->tt_type == VAR_NUMBER) lhs_type = &t_number; if (*p != '=' && need_type(rhs_type, lhs_type, -1, 0, cctx, FALSE, FALSE) == FAIL) goto theend; } } else if (cmdidx == CMD_final) { emsg(_(e_final_requires_a_value)); goto theend; } else if (cmdidx == CMD_const) { emsg(_(e_const_requires_a_value)); goto theend; } else if (!lhs.lhs_has_type || lhs.lhs_dest == dest_option || lhs.lhs_dest == dest_func_option) { emsg(_(e_type_or_initialization_required)); goto theend; } else { // variables are always initialized if (GA_GROW_FAILS(instr, 1)) goto theend; switch (lhs.lhs_member_type->tt_type) { case VAR_BOOL: generate_PUSHBOOL(cctx, VVAL_FALSE); break; case VAR_FLOAT: #ifdef FEAT_FLOAT generate_PUSHF(cctx, 0.0); #endif break; case VAR_STRING: generate_PUSHS(cctx, NULL); break; case VAR_BLOB: generate_PUSHBLOB(cctx, blob_alloc()); break; case VAR_FUNC: generate_PUSHFUNC(cctx, NULL, &t_func_void); break; case VAR_LIST: generate_NEWLIST(cctx, 0); break; case VAR_DICT: generate_NEWDICT(cctx, 0); break; case VAR_JOB: generate_PUSHJOB(cctx, NULL); break; case VAR_CHANNEL: generate_PUSHCHANNEL(cctx, NULL); break; case VAR_NUMBER: case VAR_UNKNOWN: case VAR_ANY: case VAR_PARTIAL: case VAR_VOID: case VAR_INSTR: case VAR_SPECIAL: // cannot happen // This is skipped for local variables, they are // always initialized to zero. if (lhs.lhs_dest == dest_local) skip_store = TRUE; else generate_PUSHNR(cctx, 0); break; } } if (var_count == 0) end = p; } // no need to parse more when skipping if (cctx->ctx_skip == SKIP_YES) break; if (oplen > 0 && *op != '=') { type_T *expected; type_T *stacktype = NULL; if (*op == '.') { if (may_generate_2STRING(-1, FALSE, cctx) == FAIL) goto theend; } else { expected = lhs.lhs_member_type; stacktype = ((type_T **)stack->ga_data)[stack->ga_len - 1]; if ( #ifdef FEAT_FLOAT // If variable is float operation with number is OK. !(expected == &t_float && (stacktype == &t_number || stacktype == &t_number_bool)) && #endif need_type(stacktype, expected, -1, 0, cctx, FALSE, FALSE) == FAIL) goto theend; } if (*op == '.') { if (generate_instr_drop(cctx, ISN_CONCAT, 1) == NULL) goto theend; } else if (*op == '+') { if (generate_add_instr(cctx, operator_type(lhs.lhs_member_type, stacktype), lhs.lhs_member_type, stacktype, EXPR_APPEND) == FAIL) goto theend; } else if (generate_two_op(cctx, op) == FAIL) goto theend; } // Use the line number of the assignment for store instruction. save_lnum = cctx->ctx_lnum; cctx->ctx_lnum = start_lnum - 1; if (lhs.lhs_has_index) { // Use the info in "lhs" to store the value at the index in the // list or dict. if (compile_assign_unlet(var_start, &lhs, TRUE, rhs_type, cctx) == FAIL) { cctx->ctx_lnum = save_lnum; goto theend; } } else { if (is_decl && cmdidx == CMD_const && (lhs.lhs_dest == dest_script || lhs.lhs_dest == dest_global || lhs.lhs_dest == dest_local)) // ":const var": lock the value, but not referenced variables generate_LOCKCONST(cctx); if (is_decl && (lhs.lhs_type->tt_type == VAR_DICT || lhs.lhs_type->tt_type == VAR_LIST) && lhs.lhs_type->tt_member != NULL && !(lhs.lhs_type->tt_member == &t_any && oplen > 0 && rhs_type != NULL && rhs_type->tt_type == lhs.lhs_type->tt_type && rhs_type->tt_member != &t_unknown) && lhs.lhs_type->tt_member != &t_unknown) // Set the type in the list or dict, so that it can be checked, // also in legacy script. Not for "list<any> = val", then the // type of "val" is used. generate_SETTYPE(cctx, lhs.lhs_type); if (!skip_store && generate_store_lhs(cctx, &lhs, instr_count, is_decl) == FAIL) { cctx->ctx_lnum = save_lnum; goto theend; } } cctx->ctx_lnum = save_lnum; if (var_idx + 1 < var_count) var_start = skipwhite(lhs.lhs_end + 1); } // For "[var, var] = expr" drop the "expr" value. // Also for "[var, var; _] = expr". if (var_count > 0 && (!semicolon || !did_generate_slice)) { if (generate_instr_drop(cctx, ISN_DROP, 1) == NULL) goto theend; } ret = skipwhite(end); theend: vim_free(lhs.lhs_name); return ret; } /* * Check for an assignment at "eap->cmd", compile it if found. * Return NOTDONE if there is none, FAIL for failure, OK if done. */ static int may_compile_assignment(exarg_T *eap, char_u **line, cctx_T *cctx) { char_u *pskip; char_u *p; // Assuming the command starts with a variable or function name, // find what follows. // Skip over "var.member", "var[idx]" and the like. // Also "&opt = val", "$ENV = val" and "@r = val". pskip = (*eap->cmd == '&' || *eap->cmd == '$' || *eap->cmd == '@') ? eap->cmd + 1 : eap->cmd; p = to_name_end(pskip, TRUE); if (p > eap->cmd && *p != NUL) { char_u *var_end; int oplen; int heredoc; if (eap->cmd[0] == '@') var_end = eap->cmd + 2; else var_end = find_name_end(pskip, NULL, NULL, FNE_CHECK_START | FNE_INCL_BR); oplen = assignment_len(skipwhite(var_end), &heredoc); if (oplen > 0) { size_t len = p - eap->cmd; // Recognize an assignment if we recognize the variable // name: // "g:var = expr" // "local = expr" where "local" is a local var. // "script = expr" where "script" is a script-local var. // "import = expr" where "import" is an imported var // "&opt = expr" // "$ENV = expr" // "@r = expr" if (*eap->cmd == '&' || *eap->cmd == '$' || *eap->cmd == '@' || ((len) > 2 && eap->cmd[1] == ':') || variable_exists(eap->cmd, len, cctx)) { *line = compile_assignment(eap->cmd, eap, CMD_SIZE, cctx); if (*line == NULL || *line == eap->cmd) return FAIL; return OK; } } } if (*eap->cmd == '[') { // [var, var] = expr *line = compile_assignment(eap->cmd, eap, CMD_SIZE, cctx); if (*line == NULL) return FAIL; if (*line != eap->cmd) return OK; } return NOTDONE; } /* * Add a function to the list of :def functions. * This sets "ufunc->uf_dfunc_idx" but the function isn't compiled yet. */ static int add_def_function(ufunc_T *ufunc) { dfunc_T *dfunc; if (def_functions.ga_len == 0) { // The first position is not used, so that a zero uf_dfunc_idx means it // wasn't set. if (GA_GROW_FAILS(&def_functions, 1)) return FAIL; ++def_functions.ga_len; } // Add the function to "def_functions". if (GA_GROW_FAILS(&def_functions, 1)) return FAIL; dfunc = ((dfunc_T *)def_functions.ga_data) + def_functions.ga_len; CLEAR_POINTER(dfunc); dfunc->df_idx = def_functions.ga_len; ufunc->uf_dfunc_idx = dfunc->df_idx; dfunc->df_ufunc = ufunc; dfunc->df_name = vim_strsave(ufunc->uf_name); ga_init2(&dfunc->df_var_names, sizeof(char_u *), 10); ++dfunc->df_refcount; ++def_functions.ga_len; return OK; } /* * After ex_function() has collected all the function lines: parse and compile * the lines into instructions. * Adds the function to "def_functions". * When "check_return_type" is set then set ufunc->uf_ret_type to the type of * the return statement (used for lambda). When uf_ret_type is already set * then check that it matches. * When "profiling" is true add ISN_PROF_START instructions. * "outer_cctx" is set for a nested function. * This can be used recursively through compile_lambda(), which may reallocate * "def_functions". * Returns OK or FAIL. */ int compile_def_function( ufunc_T *ufunc, int check_return_type, compiletype_T compile_type, cctx_T *outer_cctx) { char_u *line = NULL; char_u *line_to_free = NULL; char_u *p; char *errormsg = NULL; // error message cctx_T cctx; garray_T *instr; int did_emsg_before = did_emsg; int did_emsg_silent_before = did_emsg_silent; int ret = FAIL; sctx_T save_current_sctx = current_sctx; int save_estack_compiling = estack_compiling; int save_cmod_flags = cmdmod.cmod_flags; int do_estack_push; int new_def_function = FALSE; #ifdef FEAT_PROFILE int prof_lnum = -1; #endif int debug_lnum = -1; // When using a function that was compiled before: Free old instructions. // The index is reused. Otherwise add a new entry in "def_functions". if (ufunc->uf_dfunc_idx > 0) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; isn_T *instr_dest = NULL; switch (compile_type) { case CT_PROFILE: #ifdef FEAT_PROFILE instr_dest = dfunc->df_instr_prof; break; #endif case CT_NONE: instr_dest = dfunc->df_instr; break; case CT_DEBUG: instr_dest = dfunc->df_instr_debug; break; } if (instr_dest != NULL) // Was compiled in this mode before: Free old instructions. delete_def_function_contents(dfunc, FALSE); ga_clear_strings(&dfunc->df_var_names); } else { if (add_def_function(ufunc) == FAIL) return FAIL; new_def_function = TRUE; } ufunc->uf_def_status = UF_COMPILING; CLEAR_FIELD(cctx); cctx.ctx_compile_type = compile_type; cctx.ctx_ufunc = ufunc; cctx.ctx_lnum = -1; cctx.ctx_outer = outer_cctx; ga_init2(&cctx.ctx_locals, sizeof(lvar_T), 10); ga_init2(&cctx.ctx_type_stack, sizeof(type_T *), 50); ga_init2(&cctx.ctx_imports, sizeof(imported_T), 10); cctx.ctx_type_list = &ufunc->uf_type_list; ga_init2(&cctx.ctx_instr, sizeof(isn_T), 50); instr = &cctx.ctx_instr; // Set the context to the function, it may be compiled when called from // another script. Set the script version to the most modern one. // The line number will be set in next_line_from_context(). current_sctx = ufunc->uf_script_ctx; current_sctx.sc_version = SCRIPT_VERSION_VIM9; // Don't use the flag from ":legacy" here. cmdmod.cmod_flags &= ~CMOD_LEGACY; // Make sure error messages are OK. do_estack_push = !estack_top_is_ufunc(ufunc, 1); if (do_estack_push) estack_push_ufunc(ufunc, 1); estack_compiling = TRUE; if (ufunc->uf_def_args.ga_len > 0) { int count = ufunc->uf_def_args.ga_len; int first_def_arg = ufunc->uf_args.ga_len - count; int i; char_u *arg; int off = STACK_FRAME_SIZE + (ufunc->uf_va_name != NULL ? 1 : 0); int did_set_arg_type = FALSE; // Produce instructions for the default values of optional arguments. SOURCING_LNUM = 0; // line number unknown for (i = 0; i < count; ++i) { garray_T *stack = &cctx.ctx_type_stack; type_T *val_type; int arg_idx = first_def_arg + i; where_T where = WHERE_INIT; int r; int jump_instr_idx = instr->ga_len; isn_T *isn; // Use a JUMP_IF_ARG_SET instruction to skip if the value was given. if (generate_JUMP_IF_ARG_SET(&cctx, i - count - off) == FAIL) goto erret; // Make sure later arguments are not found. ufunc->uf_args_visible = arg_idx; arg = ((char_u **)(ufunc->uf_def_args.ga_data))[i]; r = compile_expr0(&arg, &cctx); if (r == FAIL) goto erret; // If no type specified use the type of the default value. // Otherwise check that the default value type matches the // specified type. val_type = ((type_T **)stack->ga_data)[stack->ga_len - 1]; where.wt_index = arg_idx + 1; if (ufunc->uf_arg_types[arg_idx] == &t_unknown) { did_set_arg_type = TRUE; ufunc->uf_arg_types[arg_idx] = val_type; } else if (need_type_where(val_type, ufunc->uf_arg_types[arg_idx], -1, where, &cctx, FALSE, FALSE) == FAIL) goto erret; if (generate_STORE(&cctx, ISN_STORE, i - count - off, NULL) == FAIL) goto erret; // set instruction index in JUMP_IF_ARG_SET to here isn = ((isn_T *)instr->ga_data) + jump_instr_idx; isn->isn_arg.jumparg.jump_where = instr->ga_len; } if (did_set_arg_type) set_function_type(ufunc); } ufunc->uf_args_visible = ufunc->uf_args.ga_len; /* * Loop over all the lines of the function and generate instructions. */ for (;;) { exarg_T ea; int starts_with_colon = FALSE; char_u *cmd; cmdmod_T local_cmdmod; // Bail out on the first error to avoid a flood of errors and report // the right line number when inside try/catch. if (did_emsg_before != did_emsg) goto erret; if (line != NULL && *line == '|') // the line continues after a '|' ++line; else if (line != NULL && *skipwhite(line) != NUL && !(*line == '#' && (line == cctx.ctx_line_start || VIM_ISWHITE(line[-1])))) { semsg(_(e_trailing_arg), line); goto erret; } else if (line != NULL && vim9_bad_comment(skipwhite(line))) goto erret; else { line = next_line_from_context(&cctx, FALSE); if (cctx.ctx_lnum >= ufunc->uf_lines.ga_len) { // beyond the last line #ifdef FEAT_PROFILE if (cctx.ctx_skip != SKIP_YES) may_generate_prof_end(&cctx, prof_lnum); #endif break; } // Make a copy, splitting off nextcmd and removing trailing spaces // may change it. if (line != NULL) { line = vim_strsave(line); vim_free(line_to_free); line_to_free = line; } } CLEAR_FIELD(ea); ea.cmdlinep = &line; ea.cmd = skipwhite(line); if (*ea.cmd == '#') { // "#" starts a comment line = (char_u *)""; continue; } #ifdef FEAT_PROFILE if (cctx.ctx_compile_type == CT_PROFILE && cctx.ctx_lnum != prof_lnum && cctx.ctx_skip != SKIP_YES) { may_generate_prof_end(&cctx, prof_lnum); prof_lnum = cctx.ctx_lnum; generate_instr(&cctx, ISN_PROF_START); } #endif if (cctx.ctx_compile_type == CT_DEBUG && cctx.ctx_lnum != debug_lnum && cctx.ctx_skip != SKIP_YES) { debug_lnum = cctx.ctx_lnum; generate_instr_debug(&cctx); } cctx.ctx_prev_lnum = cctx.ctx_lnum + 1; // Some things can be recognized by the first character. switch (*ea.cmd) { case '}': { // "}" ends a block scope scopetype_T stype = cctx.ctx_scope == NULL ? NO_SCOPE : cctx.ctx_scope->se_type; if (stype == BLOCK_SCOPE) { compile_endblock(&cctx); line = ea.cmd; } else { emsg(_(e_using_rcurly_outside_if_block_scope)); goto erret; } if (line != NULL) line = skipwhite(ea.cmd + 1); continue; } case '{': // "{" starts a block scope // "{'a': 1}->func() is something else if (ends_excmd(*skipwhite(ea.cmd + 1))) { line = compile_block(ea.cmd, &cctx); continue; } break; } /* * COMMAND MODIFIERS */ cctx.ctx_has_cmdmod = FALSE; if (parse_command_modifiers(&ea, &errormsg, &local_cmdmod, FALSE) == FAIL) { if (errormsg != NULL) goto erret; // empty line or comment line = (char_u *)""; continue; } generate_cmdmods(&cctx, &local_cmdmod); undo_cmdmod(&local_cmdmod); // Check if there was a colon after the last command modifier or before // the current position. for (p = ea.cmd; p >= line; --p) { if (*p == ':') starts_with_colon = TRUE; if (p < ea.cmd && !VIM_ISWHITE(*p)) break; } // Skip ":call" to get to the function name, unless using :legacy p = ea.cmd; if (!(local_cmdmod.cmod_flags & CMOD_LEGACY)) { if (checkforcmd(&ea.cmd, "call", 3)) { if (*ea.cmd == '(') // not for "call()" ea.cmd = p; else ea.cmd = skipwhite(ea.cmd); } if (!starts_with_colon) { int assign; // Check for assignment after command modifiers. assign = may_compile_assignment(&ea, &line, &cctx); if (assign == OK) goto nextline; if (assign == FAIL) goto erret; } } /* * COMMAND after range * 'text'->func() should not be confused with 'a mark * "++nr" and "--nr" are eval commands * in "$ENV->func()" the "$" is not a range */ cmd = ea.cmd; if ((*cmd != '$' || starts_with_colon) && (starts_with_colon || !(*cmd == '\'' || (cmd[0] == cmd[1] && (*cmd == '+' || *cmd == '-'))))) { ea.cmd = skip_range(ea.cmd, TRUE, NULL); if (ea.cmd > cmd) { if (!starts_with_colon && !(local_cmdmod.cmod_flags & CMOD_LEGACY)) { semsg(_(e_colon_required_before_range_str), cmd); goto erret; } ea.addr_count = 1; if (ends_excmd2(line, ea.cmd)) { // A range without a command: jump to the line. generate_EXEC(&cctx, ISN_EXECRANGE, vim_strnsave(cmd, ea.cmd - cmd)); line = ea.cmd; goto nextline; } } } p = find_ex_command(&ea, NULL, starts_with_colon || (local_cmdmod.cmod_flags & CMOD_LEGACY) ? NULL : item_exists, &cctx); if (p == NULL) { if (cctx.ctx_skip != SKIP_YES) emsg(_(e_ambiguous_use_of_user_defined_command)); goto erret; } // When using ":legacy cmd" always use compile_exec(). if (local_cmdmod.cmod_flags & CMOD_LEGACY) { char_u *start = ea.cmd; switch (ea.cmdidx) { case CMD_if: case CMD_elseif: case CMD_else: case CMD_endif: case CMD_for: case CMD_endfor: case CMD_continue: case CMD_break: case CMD_while: case CMD_endwhile: case CMD_try: case CMD_catch: case CMD_finally: case CMD_endtry: semsg(_(e_cannot_use_legacy_with_command_str), ea.cmd); goto erret; default: break; } // ":legacy return expr" needs to be handled differently. if (checkforcmd(&start, "return", 4)) ea.cmdidx = CMD_return; else ea.cmdidx = CMD_legacy; } if (p == ea.cmd && ea.cmdidx != CMD_SIZE) { if (cctx.ctx_skip == SKIP_YES && ea.cmdidx != CMD_eval) { line += STRLEN(line); goto nextline; } else if (ea.cmdidx != CMD_eval) { // CMD_var cannot happen, compile_assignment() above would be // used. Most likely an assignment to a non-existing variable. semsg(_(e_command_not_recognized_str), ea.cmd); goto erret; } } if (cctx.ctx_had_return && ea.cmdidx != CMD_elseif && ea.cmdidx != CMD_else && ea.cmdidx != CMD_endif && ea.cmdidx != CMD_endfor && ea.cmdidx != CMD_endwhile && ea.cmdidx != CMD_catch && ea.cmdidx != CMD_finally && ea.cmdidx != CMD_endtry) { emsg(_(e_unreachable_code_after_return)); goto erret; } p = skipwhite(p); if (ea.cmdidx != CMD_SIZE && ea.cmdidx != CMD_write && ea.cmdidx != CMD_read) { if (ea.cmdidx >= 0) ea.argt = excmd_get_argt(ea.cmdidx); if ((ea.argt & EX_BANG) && *p == '!') { ea.forceit = TRUE; p = skipwhite(p + 1); } } switch (ea.cmdidx) { case CMD_def: case CMD_function: ea.arg = p; line = compile_nested_function(&ea, &cctx, &line_to_free); break; case CMD_return: line = compile_return(p, check_return_type, local_cmdmod.cmod_flags & CMOD_LEGACY, &cctx); cctx.ctx_had_return = TRUE; break; case CMD_let: emsg(_(e_cannot_use_let_in_vim9_script)); break; case CMD_var: case CMD_final: case CMD_const: case CMD_increment: case CMD_decrement: line = compile_assignment(p, &ea, ea.cmdidx, &cctx); if (line == p) line = NULL; break; case CMD_unlet: case CMD_unlockvar: case CMD_lockvar: line = compile_unletlock(p, &ea, &cctx); break; case CMD_import: emsg(_(e_import_can_only_be_used_in_script)); line = NULL; break; case CMD_if: line = compile_if(p, &cctx); break; case CMD_elseif: line = compile_elseif(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_else: line = compile_else(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_endif: line = compile_endif(p, &cctx); break; case CMD_while: line = compile_while(p, &cctx); break; case CMD_endwhile: line = compile_endwhile(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_for: line = compile_for(p, &cctx); break; case CMD_endfor: line = compile_endfor(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_continue: line = compile_continue(p, &cctx); break; case CMD_break: line = compile_break(p, &cctx); break; case CMD_try: line = compile_try(p, &cctx); break; case CMD_catch: line = compile_catch(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_finally: line = compile_finally(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_endtry: line = compile_endtry(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_throw: line = compile_throw(p, &cctx); break; case CMD_eval: line = compile_eval(p, &cctx); break; case CMD_echo: case CMD_echon: case CMD_execute: case CMD_echomsg: case CMD_echoerr: case CMD_echoconsole: line = compile_mult_expr(p, ea.cmdidx, &cctx); break; case CMD_put: ea.cmd = cmd; line = compile_put(p, &ea, &cctx); break; case CMD_substitute: if (check_global_and_subst(ea.cmd, p) == FAIL) goto erret; if (cctx.ctx_skip == SKIP_YES) line = (char_u *)""; else { ea.arg = p; line = compile_substitute(line, &ea, &cctx); } break; case CMD_redir: ea.arg = p; line = compile_redir(line, &ea, &cctx); break; case CMD_cexpr: case CMD_lexpr: case CMD_caddexpr: case CMD_laddexpr: case CMD_cgetexpr: case CMD_lgetexpr: #ifdef FEAT_QUICKFIX ea.arg = p; line = compile_cexpr(line, &ea, &cctx); #else ex_ni(&ea); line = NULL; #endif break; case CMD_append: case CMD_change: case CMD_insert: case CMD_k: case CMD_t: case CMD_xit: not_in_vim9(&ea); goto erret; case CMD_SIZE: if (cctx.ctx_skip != SKIP_YES) { semsg(_(e_invalid_command_str), ea.cmd); goto erret; } // We don't check for a next command here. line = (char_u *)""; break; case CMD_lua: case CMD_mzscheme: case CMD_perl: case CMD_py3: case CMD_python3: case CMD_python: case CMD_pythonx: case CMD_ruby: case CMD_tcl: ea.arg = p; if (vim_strchr(line, '\n') == NULL) line = compile_exec(line, &ea, &cctx); else // heredoc lines have been concatenated with NL // characters in get_function_body() line = compile_script(line, &cctx); break; case CMD_global: if (check_global_and_subst(ea.cmd, p) == FAIL) goto erret; // FALLTHROUGH default: // Not recognized, execute with do_cmdline_cmd(). ea.arg = p; line = compile_exec(line, &ea, &cctx); break; } nextline: if (line == NULL) goto erret; line = skipwhite(line); // Undo any command modifiers. generate_undo_cmdmods(&cctx); if (cctx.ctx_type_stack.ga_len < 0) { iemsg("Type stack underflow"); goto erret; } } if (cctx.ctx_scope != NULL) { if (cctx.ctx_scope->se_type == IF_SCOPE) emsg(_(e_endif)); else if (cctx.ctx_scope->se_type == WHILE_SCOPE) emsg(_(e_endwhile)); else if (cctx.ctx_scope->se_type == FOR_SCOPE) emsg(_(e_endfor)); else emsg(_(e_missing_rcurly)); goto erret; } if (!cctx.ctx_had_return) { if (ufunc->uf_ret_type->tt_type == VAR_UNKNOWN) ufunc->uf_ret_type = &t_void; else if (ufunc->uf_ret_type->tt_type != VAR_VOID) { emsg(_(e_missing_return_statement)); goto erret; } // Return void if there is no return at the end. generate_instr(&cctx, ISN_RETURN_VOID); } // When compiled with ":silent!" and there was an error don't consider the // function compiled. if (emsg_silent == 0 || did_emsg_silent == did_emsg_silent_before) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; dfunc->df_deleted = FALSE; dfunc->df_script_seq = current_sctx.sc_seq; #ifdef FEAT_PROFILE if (cctx.ctx_compile_type == CT_PROFILE) { dfunc->df_instr_prof = instr->ga_data; dfunc->df_instr_prof_count = instr->ga_len; } else #endif if (cctx.ctx_compile_type == CT_DEBUG) { dfunc->df_instr_debug = instr->ga_data; dfunc->df_instr_debug_count = instr->ga_len; } else { dfunc->df_instr = instr->ga_data; dfunc->df_instr_count = instr->ga_len; } dfunc->df_varcount = dfunc->df_var_names.ga_len; dfunc->df_has_closure = cctx.ctx_has_closure; if (cctx.ctx_outer_used) ufunc->uf_flags |= FC_CLOSURE; ufunc->uf_def_status = UF_COMPILED; } ret = OK; erret: if (ufunc->uf_def_status == UF_COMPILING) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; // Compiling aborted, free the generated instructions. clear_instr_ga(instr); VIM_CLEAR(dfunc->df_name); ga_clear_strings(&dfunc->df_var_names); // If using the last entry in the table and it was added above, we // might as well remove it. if (!dfunc->df_deleted && new_def_function && ufunc->uf_dfunc_idx == def_functions.ga_len - 1) { --def_functions.ga_len; ufunc->uf_dfunc_idx = 0; } ufunc->uf_def_status = UF_COMPILE_ERROR; while (cctx.ctx_scope != NULL) drop_scope(&cctx); if (errormsg != NULL) emsg(errormsg); else if (did_emsg == did_emsg_before) emsg(_(e_compiling_def_function_failed)); } if (cctx.ctx_redir_lhs.lhs_name != NULL) { if (ret == OK) { emsg(_(e_missing_redir_end)); ret = FAIL; } vim_free(cctx.ctx_redir_lhs.lhs_name); vim_free(cctx.ctx_redir_lhs.lhs_whole); } current_sctx = save_current_sctx; estack_compiling = save_estack_compiling; cmdmod.cmod_flags = save_cmod_flags; if (do_estack_push) estack_pop(); vim_free(line_to_free); free_imported(&cctx); free_locals(&cctx); ga_clear(&cctx.ctx_type_stack); return ret; } void set_function_type(ufunc_T *ufunc) { int varargs = ufunc->uf_va_name != NULL; int argcount = ufunc->uf_args.ga_len; // Create a type for the function, with the return type and any // argument types. // A vararg is included in uf_args.ga_len but not in uf_arg_types. // The type is included in "tt_args". if (argcount > 0 || varargs) { if (ufunc->uf_type_list.ga_itemsize == 0) ga_init2(&ufunc->uf_type_list, sizeof(type_T *), 10); ufunc->uf_func_type = alloc_func_type(ufunc->uf_ret_type, argcount, &ufunc->uf_type_list); // Add argument types to the function type. if (func_type_add_arg_types(ufunc->uf_func_type, argcount + varargs, &ufunc->uf_type_list) == FAIL) return; ufunc->uf_func_type->tt_argcount = argcount + varargs; ufunc->uf_func_type->tt_min_argcount = argcount - ufunc->uf_def_args.ga_len; if (ufunc->uf_arg_types == NULL) { int i; // lambda does not have argument types. for (i = 0; i < argcount; ++i) ufunc->uf_func_type->tt_args[i] = &t_any; } else mch_memmove(ufunc->uf_func_type->tt_args, ufunc->uf_arg_types, sizeof(type_T *) * argcount); if (varargs) { ufunc->uf_func_type->tt_args[argcount] = ufunc->uf_va_type == NULL ? &t_list_any : ufunc->uf_va_type; ufunc->uf_func_type->tt_flags = TTFLAG_VARARGS; } } else // No arguments, can use a predefined type. ufunc->uf_func_type = get_func_type(ufunc->uf_ret_type, argcount, &ufunc->uf_type_list); } /* * Free all instructions for "dfunc" except df_name. */ static void delete_def_function_contents(dfunc_T *dfunc, int mark_deleted) { int idx; ga_clear(&dfunc->df_def_args_isn); ga_clear_strings(&dfunc->df_var_names); if (dfunc->df_instr != NULL) { for (idx = 0; idx < dfunc->df_instr_count; ++idx) delete_instr(dfunc->df_instr + idx); VIM_CLEAR(dfunc->df_instr); dfunc->df_instr = NULL; } if (dfunc->df_instr_debug != NULL) { for (idx = 0; idx < dfunc->df_instr_debug_count; ++idx) delete_instr(dfunc->df_instr_debug + idx); VIM_CLEAR(dfunc->df_instr_debug); dfunc->df_instr_debug = NULL; } #ifdef FEAT_PROFILE if (dfunc->df_instr_prof != NULL) { for (idx = 0; idx < dfunc->df_instr_prof_count; ++idx) delete_instr(dfunc->df_instr_prof + idx); VIM_CLEAR(dfunc->df_instr_prof); dfunc->df_instr_prof = NULL; } #endif if (mark_deleted) dfunc->df_deleted = TRUE; if (dfunc->df_ufunc != NULL) dfunc->df_ufunc->uf_def_status = UF_NOT_COMPILED; } /* * When a user function is deleted, clear the contents of any associated def * function, unless another user function still uses it. * The position in def_functions can be re-used. */ void unlink_def_function(ufunc_T *ufunc) { if (ufunc->uf_dfunc_idx > 0) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; if (--dfunc->df_refcount <= 0) delete_def_function_contents(dfunc, TRUE); ufunc->uf_def_status = UF_NOT_COMPILED; ufunc->uf_dfunc_idx = 0; if (dfunc->df_ufunc == ufunc) dfunc->df_ufunc = NULL; } } /* * Used when a user function refers to an existing dfunc. */ void link_def_function(ufunc_T *ufunc) { if (ufunc->uf_dfunc_idx > 0) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; ++dfunc->df_refcount; } } #if defined(EXITFREE) || defined(PROTO) /* * Free all functions defined with ":def". */ void free_def_functions(void) { int idx; for (idx = 0; idx < def_functions.ga_len; ++idx) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + idx; delete_def_function_contents(dfunc, TRUE); vim_free(dfunc->df_name); } ga_clear(&def_functions); } #endif #endif // FEAT_EVAL
compile_def_function( ufunc_T *ufunc, int check_return_type, compiletype_T compile_type, cctx_T *outer_cctx) { char_u *line = NULL; char_u *line_to_free = NULL; char_u *p; char *errormsg = NULL; // error message cctx_T cctx; garray_T *instr; int did_emsg_before = did_emsg; int did_emsg_silent_before = did_emsg_silent; int ret = FAIL; sctx_T save_current_sctx = current_sctx; int save_estack_compiling = estack_compiling; int save_cmod_flags = cmdmod.cmod_flags; int do_estack_push; int new_def_function = FALSE; #ifdef FEAT_PROFILE int prof_lnum = -1; #endif int debug_lnum = -1; // When using a function that was compiled before: Free old instructions. // The index is reused. Otherwise add a new entry in "def_functions". if (ufunc->uf_dfunc_idx > 0) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; isn_T *instr_dest = NULL; switch (compile_type) { case CT_PROFILE: #ifdef FEAT_PROFILE instr_dest = dfunc->df_instr_prof; break; #endif case CT_NONE: instr_dest = dfunc->df_instr; break; case CT_DEBUG: instr_dest = dfunc->df_instr_debug; break; } if (instr_dest != NULL) // Was compiled in this mode before: Free old instructions. delete_def_function_contents(dfunc, FALSE); ga_clear_strings(&dfunc->df_var_names); } else { if (add_def_function(ufunc) == FAIL) return FAIL; new_def_function = TRUE; } ufunc->uf_def_status = UF_COMPILING; CLEAR_FIELD(cctx); cctx.ctx_compile_type = compile_type; cctx.ctx_ufunc = ufunc; cctx.ctx_lnum = -1; cctx.ctx_outer = outer_cctx; ga_init2(&cctx.ctx_locals, sizeof(lvar_T), 10); ga_init2(&cctx.ctx_type_stack, sizeof(type_T *), 50); ga_init2(&cctx.ctx_imports, sizeof(imported_T), 10); cctx.ctx_type_list = &ufunc->uf_type_list; ga_init2(&cctx.ctx_instr, sizeof(isn_T), 50); instr = &cctx.ctx_instr; // Set the context to the function, it may be compiled when called from // another script. Set the script version to the most modern one. // The line number will be set in next_line_from_context(). current_sctx = ufunc->uf_script_ctx; current_sctx.sc_version = SCRIPT_VERSION_VIM9; // Don't use the flag from ":legacy" here. cmdmod.cmod_flags &= ~CMOD_LEGACY; // Make sure error messages are OK. do_estack_push = !estack_top_is_ufunc(ufunc, 1); if (do_estack_push) estack_push_ufunc(ufunc, 1); estack_compiling = TRUE; if (ufunc->uf_def_args.ga_len > 0) { int count = ufunc->uf_def_args.ga_len; int first_def_arg = ufunc->uf_args.ga_len - count; int i; char_u *arg; int off = STACK_FRAME_SIZE + (ufunc->uf_va_name != NULL ? 1 : 0); int did_set_arg_type = FALSE; // Produce instructions for the default values of optional arguments. SOURCING_LNUM = 0; // line number unknown for (i = 0; i < count; ++i) { garray_T *stack = &cctx.ctx_type_stack; type_T *val_type; int arg_idx = first_def_arg + i; where_T where = WHERE_INIT; int r; int jump_instr_idx = instr->ga_len; isn_T *isn; // Use a JUMP_IF_ARG_SET instruction to skip if the value was given. if (generate_JUMP_IF_ARG_SET(&cctx, i - count - off) == FAIL) goto erret; // Make sure later arguments are not found. ufunc->uf_args_visible = arg_idx; arg = ((char_u **)(ufunc->uf_def_args.ga_data))[i]; r = compile_expr0(&arg, &cctx); if (r == FAIL) goto erret; // If no type specified use the type of the default value. // Otherwise check that the default value type matches the // specified type. val_type = ((type_T **)stack->ga_data)[stack->ga_len - 1]; where.wt_index = arg_idx + 1; if (ufunc->uf_arg_types[arg_idx] == &t_unknown) { did_set_arg_type = TRUE; ufunc->uf_arg_types[arg_idx] = val_type; } else if (need_type_where(val_type, ufunc->uf_arg_types[arg_idx], -1, where, &cctx, FALSE, FALSE) == FAIL) goto erret; if (generate_STORE(&cctx, ISN_STORE, i - count - off, NULL) == FAIL) goto erret; // set instruction index in JUMP_IF_ARG_SET to here isn = ((isn_T *)instr->ga_data) + jump_instr_idx; isn->isn_arg.jumparg.jump_where = instr->ga_len; } if (did_set_arg_type) set_function_type(ufunc); } ufunc->uf_args_visible = ufunc->uf_args.ga_len; /* * Loop over all the lines of the function and generate instructions. */ for (;;) { exarg_T ea; int starts_with_colon = FALSE; char_u *cmd; cmdmod_T local_cmdmod; // Bail out on the first error to avoid a flood of errors and report // the right line number when inside try/catch. if (did_emsg_before != did_emsg) goto erret; if (line != NULL && *line == '|') // the line continues after a '|' ++line; else if (line != NULL && *skipwhite(line) != NUL && !(*line == '#' && (line == cctx.ctx_line_start || VIM_ISWHITE(line[-1])))) { semsg(_(e_trailing_arg), line); goto erret; } else if (line != NULL && vim9_bad_comment(skipwhite(line))) goto erret; else { line = next_line_from_context(&cctx, FALSE); if (cctx.ctx_lnum >= ufunc->uf_lines.ga_len) { // beyond the last line #ifdef FEAT_PROFILE if (cctx.ctx_skip != SKIP_YES) may_generate_prof_end(&cctx, prof_lnum); #endif break; } // Make a copy, splitting off nextcmd and removing trailing spaces // may change it. if (line != NULL) { line = vim_strsave(line); vim_free(line_to_free); line_to_free = line; } } CLEAR_FIELD(ea); ea.cmdlinep = &line; ea.cmd = skipwhite(line); if (*ea.cmd == '#') { // "#" starts a comment line = (char_u *)""; continue; } #ifdef FEAT_PROFILE if (cctx.ctx_compile_type == CT_PROFILE && cctx.ctx_lnum != prof_lnum && cctx.ctx_skip != SKIP_YES) { may_generate_prof_end(&cctx, prof_lnum); prof_lnum = cctx.ctx_lnum; generate_instr(&cctx, ISN_PROF_START); } #endif if (cctx.ctx_compile_type == CT_DEBUG && cctx.ctx_lnum != debug_lnum && cctx.ctx_skip != SKIP_YES) { debug_lnum = cctx.ctx_lnum; generate_instr_debug(&cctx); } cctx.ctx_prev_lnum = cctx.ctx_lnum + 1; // Some things can be recognized by the first character. switch (*ea.cmd) { case '}': { // "}" ends a block scope scopetype_T stype = cctx.ctx_scope == NULL ? NO_SCOPE : cctx.ctx_scope->se_type; if (stype == BLOCK_SCOPE) { compile_endblock(&cctx); line = ea.cmd; } else { emsg(_(e_using_rcurly_outside_if_block_scope)); goto erret; } if (line != NULL) line = skipwhite(ea.cmd + 1); continue; } case '{': // "{" starts a block scope // "{'a': 1}->func() is something else if (ends_excmd(*skipwhite(ea.cmd + 1))) { line = compile_block(ea.cmd, &cctx); continue; } break; } /* * COMMAND MODIFIERS */ cctx.ctx_has_cmdmod = FALSE; if (parse_command_modifiers(&ea, &errormsg, &local_cmdmod, FALSE) == FAIL) { if (errormsg != NULL) goto erret; // empty line or comment line = (char_u *)""; continue; } generate_cmdmods(&cctx, &local_cmdmod); undo_cmdmod(&local_cmdmod); // Check if there was a colon after the last command modifier or before // the current position. for (p = ea.cmd; p >= line; --p) { if (*p == ':') starts_with_colon = TRUE; if (p < ea.cmd && !VIM_ISWHITE(*p)) break; } // Skip ":call" to get to the function name, unless using :legacy p = ea.cmd; if (!(local_cmdmod.cmod_flags & CMOD_LEGACY)) { if (checkforcmd(&ea.cmd, "call", 3)) { if (*ea.cmd == '(') // not for "call()" ea.cmd = p; else ea.cmd = skipwhite(ea.cmd); } if (!starts_with_colon) { int assign; // Check for assignment after command modifiers. assign = may_compile_assignment(&ea, &line, &cctx); if (assign == OK) goto nextline; if (assign == FAIL) goto erret; } } /* * COMMAND after range * 'text'->func() should not be confused with 'a mark * "++nr" and "--nr" are eval commands * in "$ENV->func()" the "$" is not a range */ cmd = ea.cmd; if ((*cmd != '$' || starts_with_colon) && (starts_with_colon || !(*cmd == '\'' || (cmd[0] == cmd[1] && (*cmd == '+' || *cmd == '-'))))) { ea.cmd = skip_range(ea.cmd, TRUE, NULL); if (ea.cmd > cmd) { if (!starts_with_colon && !(local_cmdmod.cmod_flags & CMOD_LEGACY)) { semsg(_(e_colon_required_before_range_str), cmd); goto erret; } ea.addr_count = 1; if (ends_excmd2(line, ea.cmd)) { // A range without a command: jump to the line. generate_EXEC(&cctx, ISN_EXECRANGE, vim_strnsave(cmd, ea.cmd - cmd)); line = ea.cmd; goto nextline; } } } p = find_ex_command(&ea, NULL, starts_with_colon || (local_cmdmod.cmod_flags & CMOD_LEGACY) ? NULL : item_exists, &cctx); if (p == NULL) { if (cctx.ctx_skip != SKIP_YES) emsg(_(e_ambiguous_use_of_user_defined_command)); goto erret; } // When using ":legacy cmd" always use compile_exec(). if (local_cmdmod.cmod_flags & CMOD_LEGACY) { char_u *start = ea.cmd; switch (ea.cmdidx) { case CMD_if: case CMD_elseif: case CMD_else: case CMD_endif: case CMD_for: case CMD_endfor: case CMD_continue: case CMD_break: case CMD_while: case CMD_endwhile: case CMD_try: case CMD_catch: case CMD_finally: case CMD_endtry: semsg(_(e_cannot_use_legacy_with_command_str), ea.cmd); goto erret; default: break; } // ":legacy return expr" needs to be handled differently. if (checkforcmd(&start, "return", 4)) ea.cmdidx = CMD_return; else ea.cmdidx = CMD_legacy; } if (p == ea.cmd && ea.cmdidx != CMD_SIZE) { if (cctx.ctx_skip == SKIP_YES && ea.cmdidx != CMD_eval) { line += STRLEN(line); goto nextline; } else if (ea.cmdidx != CMD_eval) { // CMD_var cannot happen, compile_assignment() above would be // used. Most likely an assignment to a non-existing variable. semsg(_(e_command_not_recognized_str), ea.cmd); goto erret; } } if (cctx.ctx_had_return && ea.cmdidx != CMD_elseif && ea.cmdidx != CMD_else && ea.cmdidx != CMD_endif && ea.cmdidx != CMD_endfor && ea.cmdidx != CMD_endwhile && ea.cmdidx != CMD_catch && ea.cmdidx != CMD_finally && ea.cmdidx != CMD_endtry) { emsg(_(e_unreachable_code_after_return)); goto erret; } p = skipwhite(p); if (ea.cmdidx != CMD_SIZE && ea.cmdidx != CMD_write && ea.cmdidx != CMD_read) { if (ea.cmdidx >= 0) ea.argt = excmd_get_argt(ea.cmdidx); if ((ea.argt & EX_BANG) && *p == '!') { ea.forceit = TRUE; p = skipwhite(p + 1); } } switch (ea.cmdidx) { case CMD_def: case CMD_function: ea.arg = p; line = compile_nested_function(&ea, &cctx); break; case CMD_return: line = compile_return(p, check_return_type, local_cmdmod.cmod_flags & CMOD_LEGACY, &cctx); cctx.ctx_had_return = TRUE; break; case CMD_let: emsg(_(e_cannot_use_let_in_vim9_script)); break; case CMD_var: case CMD_final: case CMD_const: case CMD_increment: case CMD_decrement: line = compile_assignment(p, &ea, ea.cmdidx, &cctx); if (line == p) line = NULL; break; case CMD_unlet: case CMD_unlockvar: case CMD_lockvar: line = compile_unletlock(p, &ea, &cctx); break; case CMD_import: emsg(_(e_import_can_only_be_used_in_script)); line = NULL; break; case CMD_if: line = compile_if(p, &cctx); break; case CMD_elseif: line = compile_elseif(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_else: line = compile_else(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_endif: line = compile_endif(p, &cctx); break; case CMD_while: line = compile_while(p, &cctx); break; case CMD_endwhile: line = compile_endwhile(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_for: line = compile_for(p, &cctx); break; case CMD_endfor: line = compile_endfor(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_continue: line = compile_continue(p, &cctx); break; case CMD_break: line = compile_break(p, &cctx); break; case CMD_try: line = compile_try(p, &cctx); break; case CMD_catch: line = compile_catch(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_finally: line = compile_finally(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_endtry: line = compile_endtry(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_throw: line = compile_throw(p, &cctx); break; case CMD_eval: line = compile_eval(p, &cctx); break; case CMD_echo: case CMD_echon: case CMD_execute: case CMD_echomsg: case CMD_echoerr: case CMD_echoconsole: line = compile_mult_expr(p, ea.cmdidx, &cctx); break; case CMD_put: ea.cmd = cmd; line = compile_put(p, &ea, &cctx); break; case CMD_substitute: if (check_global_and_subst(ea.cmd, p) == FAIL) goto erret; if (cctx.ctx_skip == SKIP_YES) line = (char_u *)""; else { ea.arg = p; line = compile_substitute(line, &ea, &cctx); } break; case CMD_redir: ea.arg = p; line = compile_redir(line, &ea, &cctx); break; case CMD_cexpr: case CMD_lexpr: case CMD_caddexpr: case CMD_laddexpr: case CMD_cgetexpr: case CMD_lgetexpr: #ifdef FEAT_QUICKFIX ea.arg = p; line = compile_cexpr(line, &ea, &cctx); #else ex_ni(&ea); line = NULL; #endif break; case CMD_append: case CMD_change: case CMD_insert: case CMD_k: case CMD_t: case CMD_xit: not_in_vim9(&ea); goto erret; case CMD_SIZE: if (cctx.ctx_skip != SKIP_YES) { semsg(_(e_invalid_command_str), ea.cmd); goto erret; } // We don't check for a next command here. line = (char_u *)""; break; case CMD_lua: case CMD_mzscheme: case CMD_perl: case CMD_py3: case CMD_python3: case CMD_python: case CMD_pythonx: case CMD_ruby: case CMD_tcl: ea.arg = p; if (vim_strchr(line, '\n') == NULL) line = compile_exec(line, &ea, &cctx); else // heredoc lines have been concatenated with NL // characters in get_function_body() line = compile_script(line, &cctx); break; case CMD_global: if (check_global_and_subst(ea.cmd, p) == FAIL) goto erret; // FALLTHROUGH default: // Not recognized, execute with do_cmdline_cmd(). ea.arg = p; line = compile_exec(line, &ea, &cctx); break; } nextline: if (line == NULL) goto erret; line = skipwhite(line); // Undo any command modifiers. generate_undo_cmdmods(&cctx); if (cctx.ctx_type_stack.ga_len < 0) { iemsg("Type stack underflow"); goto erret; } } if (cctx.ctx_scope != NULL) { if (cctx.ctx_scope->se_type == IF_SCOPE) emsg(_(e_endif)); else if (cctx.ctx_scope->se_type == WHILE_SCOPE) emsg(_(e_endwhile)); else if (cctx.ctx_scope->se_type == FOR_SCOPE) emsg(_(e_endfor)); else emsg(_(e_missing_rcurly)); goto erret; } if (!cctx.ctx_had_return) { if (ufunc->uf_ret_type->tt_type == VAR_UNKNOWN) ufunc->uf_ret_type = &t_void; else if (ufunc->uf_ret_type->tt_type != VAR_VOID) { emsg(_(e_missing_return_statement)); goto erret; } // Return void if there is no return at the end. generate_instr(&cctx, ISN_RETURN_VOID); } // When compiled with ":silent!" and there was an error don't consider the // function compiled. if (emsg_silent == 0 || did_emsg_silent == did_emsg_silent_before) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; dfunc->df_deleted = FALSE; dfunc->df_script_seq = current_sctx.sc_seq; #ifdef FEAT_PROFILE if (cctx.ctx_compile_type == CT_PROFILE) { dfunc->df_instr_prof = instr->ga_data; dfunc->df_instr_prof_count = instr->ga_len; } else #endif if (cctx.ctx_compile_type == CT_DEBUG) { dfunc->df_instr_debug = instr->ga_data; dfunc->df_instr_debug_count = instr->ga_len; } else { dfunc->df_instr = instr->ga_data; dfunc->df_instr_count = instr->ga_len; } dfunc->df_varcount = dfunc->df_var_names.ga_len; dfunc->df_has_closure = cctx.ctx_has_closure; if (cctx.ctx_outer_used) ufunc->uf_flags |= FC_CLOSURE; ufunc->uf_def_status = UF_COMPILED; } ret = OK; erret: if (ufunc->uf_def_status == UF_COMPILING) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; // Compiling aborted, free the generated instructions. clear_instr_ga(instr); VIM_CLEAR(dfunc->df_name); ga_clear_strings(&dfunc->df_var_names); // If using the last entry in the table and it was added above, we // might as well remove it. if (!dfunc->df_deleted && new_def_function && ufunc->uf_dfunc_idx == def_functions.ga_len - 1) { --def_functions.ga_len; ufunc->uf_dfunc_idx = 0; } ufunc->uf_def_status = UF_COMPILE_ERROR; while (cctx.ctx_scope != NULL) drop_scope(&cctx); if (errormsg != NULL) emsg(errormsg); else if (did_emsg == did_emsg_before) emsg(_(e_compiling_def_function_failed)); } if (cctx.ctx_redir_lhs.lhs_name != NULL) { if (ret == OK) { emsg(_(e_missing_redir_end)); ret = FAIL; } vim_free(cctx.ctx_redir_lhs.lhs_name); vim_free(cctx.ctx_redir_lhs.lhs_whole); } current_sctx = save_current_sctx; estack_compiling = save_estack_compiling; cmdmod.cmod_flags = save_cmod_flags; if (do_estack_push) estack_pop(); vim_free(line_to_free); free_imported(&cctx); free_locals(&cctx); ga_clear(&cctx.ctx_type_stack); return ret; }
compile_def_function( ufunc_T *ufunc, int check_return_type, compiletype_T compile_type, cctx_T *outer_cctx) { char_u *line = NULL; char_u *line_to_free = NULL; char_u *p; char *errormsg = NULL; // error message cctx_T cctx; garray_T *instr; int did_emsg_before = did_emsg; int did_emsg_silent_before = did_emsg_silent; int ret = FAIL; sctx_T save_current_sctx = current_sctx; int save_estack_compiling = estack_compiling; int save_cmod_flags = cmdmod.cmod_flags; int do_estack_push; int new_def_function = FALSE; #ifdef FEAT_PROFILE int prof_lnum = -1; #endif int debug_lnum = -1; // When using a function that was compiled before: Free old instructions. // The index is reused. Otherwise add a new entry in "def_functions". if (ufunc->uf_dfunc_idx > 0) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; isn_T *instr_dest = NULL; switch (compile_type) { case CT_PROFILE: #ifdef FEAT_PROFILE instr_dest = dfunc->df_instr_prof; break; #endif case CT_NONE: instr_dest = dfunc->df_instr; break; case CT_DEBUG: instr_dest = dfunc->df_instr_debug; break; } if (instr_dest != NULL) // Was compiled in this mode before: Free old instructions. delete_def_function_contents(dfunc, FALSE); ga_clear_strings(&dfunc->df_var_names); } else { if (add_def_function(ufunc) == FAIL) return FAIL; new_def_function = TRUE; } ufunc->uf_def_status = UF_COMPILING; CLEAR_FIELD(cctx); cctx.ctx_compile_type = compile_type; cctx.ctx_ufunc = ufunc; cctx.ctx_lnum = -1; cctx.ctx_outer = outer_cctx; ga_init2(&cctx.ctx_locals, sizeof(lvar_T), 10); ga_init2(&cctx.ctx_type_stack, sizeof(type_T *), 50); ga_init2(&cctx.ctx_imports, sizeof(imported_T), 10); cctx.ctx_type_list = &ufunc->uf_type_list; ga_init2(&cctx.ctx_instr, sizeof(isn_T), 50); instr = &cctx.ctx_instr; // Set the context to the function, it may be compiled when called from // another script. Set the script version to the most modern one. // The line number will be set in next_line_from_context(). current_sctx = ufunc->uf_script_ctx; current_sctx.sc_version = SCRIPT_VERSION_VIM9; // Don't use the flag from ":legacy" here. cmdmod.cmod_flags &= ~CMOD_LEGACY; // Make sure error messages are OK. do_estack_push = !estack_top_is_ufunc(ufunc, 1); if (do_estack_push) estack_push_ufunc(ufunc, 1); estack_compiling = TRUE; if (ufunc->uf_def_args.ga_len > 0) { int count = ufunc->uf_def_args.ga_len; int first_def_arg = ufunc->uf_args.ga_len - count; int i; char_u *arg; int off = STACK_FRAME_SIZE + (ufunc->uf_va_name != NULL ? 1 : 0); int did_set_arg_type = FALSE; // Produce instructions for the default values of optional arguments. SOURCING_LNUM = 0; // line number unknown for (i = 0; i < count; ++i) { garray_T *stack = &cctx.ctx_type_stack; type_T *val_type; int arg_idx = first_def_arg + i; where_T where = WHERE_INIT; int r; int jump_instr_idx = instr->ga_len; isn_T *isn; // Use a JUMP_IF_ARG_SET instruction to skip if the value was given. if (generate_JUMP_IF_ARG_SET(&cctx, i - count - off) == FAIL) goto erret; // Make sure later arguments are not found. ufunc->uf_args_visible = arg_idx; arg = ((char_u **)(ufunc->uf_def_args.ga_data))[i]; r = compile_expr0(&arg, &cctx); if (r == FAIL) goto erret; // If no type specified use the type of the default value. // Otherwise check that the default value type matches the // specified type. val_type = ((type_T **)stack->ga_data)[stack->ga_len - 1]; where.wt_index = arg_idx + 1; if (ufunc->uf_arg_types[arg_idx] == &t_unknown) { did_set_arg_type = TRUE; ufunc->uf_arg_types[arg_idx] = val_type; } else if (need_type_where(val_type, ufunc->uf_arg_types[arg_idx], -1, where, &cctx, FALSE, FALSE) == FAIL) goto erret; if (generate_STORE(&cctx, ISN_STORE, i - count - off, NULL) == FAIL) goto erret; // set instruction index in JUMP_IF_ARG_SET to here isn = ((isn_T *)instr->ga_data) + jump_instr_idx; isn->isn_arg.jumparg.jump_where = instr->ga_len; } if (did_set_arg_type) set_function_type(ufunc); } ufunc->uf_args_visible = ufunc->uf_args.ga_len; /* * Loop over all the lines of the function and generate instructions. */ for (;;) { exarg_T ea; int starts_with_colon = FALSE; char_u *cmd; cmdmod_T local_cmdmod; // Bail out on the first error to avoid a flood of errors and report // the right line number when inside try/catch. if (did_emsg_before != did_emsg) goto erret; if (line != NULL && *line == '|') // the line continues after a '|' ++line; else if (line != NULL && *skipwhite(line) != NUL && !(*line == '#' && (line == cctx.ctx_line_start || VIM_ISWHITE(line[-1])))) { semsg(_(e_trailing_arg), line); goto erret; } else if (line != NULL && vim9_bad_comment(skipwhite(line))) goto erret; else { line = next_line_from_context(&cctx, FALSE); if (cctx.ctx_lnum >= ufunc->uf_lines.ga_len) { // beyond the last line #ifdef FEAT_PROFILE if (cctx.ctx_skip != SKIP_YES) may_generate_prof_end(&cctx, prof_lnum); #endif break; } // Make a copy, splitting off nextcmd and removing trailing spaces // may change it. if (line != NULL) { line = vim_strsave(line); vim_free(line_to_free); line_to_free = line; } } CLEAR_FIELD(ea); ea.cmdlinep = &line; ea.cmd = skipwhite(line); if (*ea.cmd == '#') { // "#" starts a comment line = (char_u *)""; continue; } #ifdef FEAT_PROFILE if (cctx.ctx_compile_type == CT_PROFILE && cctx.ctx_lnum != prof_lnum && cctx.ctx_skip != SKIP_YES) { may_generate_prof_end(&cctx, prof_lnum); prof_lnum = cctx.ctx_lnum; generate_instr(&cctx, ISN_PROF_START); } #endif if (cctx.ctx_compile_type == CT_DEBUG && cctx.ctx_lnum != debug_lnum && cctx.ctx_skip != SKIP_YES) { debug_lnum = cctx.ctx_lnum; generate_instr_debug(&cctx); } cctx.ctx_prev_lnum = cctx.ctx_lnum + 1; // Some things can be recognized by the first character. switch (*ea.cmd) { case '}': { // "}" ends a block scope scopetype_T stype = cctx.ctx_scope == NULL ? NO_SCOPE : cctx.ctx_scope->se_type; if (stype == BLOCK_SCOPE) { compile_endblock(&cctx); line = ea.cmd; } else { emsg(_(e_using_rcurly_outside_if_block_scope)); goto erret; } if (line != NULL) line = skipwhite(ea.cmd + 1); continue; } case '{': // "{" starts a block scope // "{'a': 1}->func() is something else if (ends_excmd(*skipwhite(ea.cmd + 1))) { line = compile_block(ea.cmd, &cctx); continue; } break; } /* * COMMAND MODIFIERS */ cctx.ctx_has_cmdmod = FALSE; if (parse_command_modifiers(&ea, &errormsg, &local_cmdmod, FALSE) == FAIL) { if (errormsg != NULL) goto erret; // empty line or comment line = (char_u *)""; continue; } generate_cmdmods(&cctx, &local_cmdmod); undo_cmdmod(&local_cmdmod); // Check if there was a colon after the last command modifier or before // the current position. for (p = ea.cmd; p >= line; --p) { if (*p == ':') starts_with_colon = TRUE; if (p < ea.cmd && !VIM_ISWHITE(*p)) break; } // Skip ":call" to get to the function name, unless using :legacy p = ea.cmd; if (!(local_cmdmod.cmod_flags & CMOD_LEGACY)) { if (checkforcmd(&ea.cmd, "call", 3)) { if (*ea.cmd == '(') // not for "call()" ea.cmd = p; else ea.cmd = skipwhite(ea.cmd); } if (!starts_with_colon) { int assign; // Check for assignment after command modifiers. assign = may_compile_assignment(&ea, &line, &cctx); if (assign == OK) goto nextline; if (assign == FAIL) goto erret; } } /* * COMMAND after range * 'text'->func() should not be confused with 'a mark * "++nr" and "--nr" are eval commands * in "$ENV->func()" the "$" is not a range */ cmd = ea.cmd; if ((*cmd != '$' || starts_with_colon) && (starts_with_colon || !(*cmd == '\'' || (cmd[0] == cmd[1] && (*cmd == '+' || *cmd == '-'))))) { ea.cmd = skip_range(ea.cmd, TRUE, NULL); if (ea.cmd > cmd) { if (!starts_with_colon && !(local_cmdmod.cmod_flags & CMOD_LEGACY)) { semsg(_(e_colon_required_before_range_str), cmd); goto erret; } ea.addr_count = 1; if (ends_excmd2(line, ea.cmd)) { // A range without a command: jump to the line. generate_EXEC(&cctx, ISN_EXECRANGE, vim_strnsave(cmd, ea.cmd - cmd)); line = ea.cmd; goto nextline; } } } p = find_ex_command(&ea, NULL, starts_with_colon || (local_cmdmod.cmod_flags & CMOD_LEGACY) ? NULL : item_exists, &cctx); if (p == NULL) { if (cctx.ctx_skip != SKIP_YES) emsg(_(e_ambiguous_use_of_user_defined_command)); goto erret; } // When using ":legacy cmd" always use compile_exec(). if (local_cmdmod.cmod_flags & CMOD_LEGACY) { char_u *start = ea.cmd; switch (ea.cmdidx) { case CMD_if: case CMD_elseif: case CMD_else: case CMD_endif: case CMD_for: case CMD_endfor: case CMD_continue: case CMD_break: case CMD_while: case CMD_endwhile: case CMD_try: case CMD_catch: case CMD_finally: case CMD_endtry: semsg(_(e_cannot_use_legacy_with_command_str), ea.cmd); goto erret; default: break; } // ":legacy return expr" needs to be handled differently. if (checkforcmd(&start, "return", 4)) ea.cmdidx = CMD_return; else ea.cmdidx = CMD_legacy; } if (p == ea.cmd && ea.cmdidx != CMD_SIZE) { if (cctx.ctx_skip == SKIP_YES && ea.cmdidx != CMD_eval) { line += STRLEN(line); goto nextline; } else if (ea.cmdidx != CMD_eval) { // CMD_var cannot happen, compile_assignment() above would be // used. Most likely an assignment to a non-existing variable. semsg(_(e_command_not_recognized_str), ea.cmd); goto erret; } } if (cctx.ctx_had_return && ea.cmdidx != CMD_elseif && ea.cmdidx != CMD_else && ea.cmdidx != CMD_endif && ea.cmdidx != CMD_endfor && ea.cmdidx != CMD_endwhile && ea.cmdidx != CMD_catch && ea.cmdidx != CMD_finally && ea.cmdidx != CMD_endtry) { emsg(_(e_unreachable_code_after_return)); goto erret; } p = skipwhite(p); if (ea.cmdidx != CMD_SIZE && ea.cmdidx != CMD_write && ea.cmdidx != CMD_read) { if (ea.cmdidx >= 0) ea.argt = excmd_get_argt(ea.cmdidx); if ((ea.argt & EX_BANG) && *p == '!') { ea.forceit = TRUE; p = skipwhite(p + 1); } } switch (ea.cmdidx) { case CMD_def: case CMD_function: ea.arg = p; line = compile_nested_function(&ea, &cctx, &line_to_free); break; case CMD_return: line = compile_return(p, check_return_type, local_cmdmod.cmod_flags & CMOD_LEGACY, &cctx); cctx.ctx_had_return = TRUE; break; case CMD_let: emsg(_(e_cannot_use_let_in_vim9_script)); break; case CMD_var: case CMD_final: case CMD_const: case CMD_increment: case CMD_decrement: line = compile_assignment(p, &ea, ea.cmdidx, &cctx); if (line == p) line = NULL; break; case CMD_unlet: case CMD_unlockvar: case CMD_lockvar: line = compile_unletlock(p, &ea, &cctx); break; case CMD_import: emsg(_(e_import_can_only_be_used_in_script)); line = NULL; break; case CMD_if: line = compile_if(p, &cctx); break; case CMD_elseif: line = compile_elseif(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_else: line = compile_else(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_endif: line = compile_endif(p, &cctx); break; case CMD_while: line = compile_while(p, &cctx); break; case CMD_endwhile: line = compile_endwhile(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_for: line = compile_for(p, &cctx); break; case CMD_endfor: line = compile_endfor(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_continue: line = compile_continue(p, &cctx); break; case CMD_break: line = compile_break(p, &cctx); break; case CMD_try: line = compile_try(p, &cctx); break; case CMD_catch: line = compile_catch(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_finally: line = compile_finally(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_endtry: line = compile_endtry(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_throw: line = compile_throw(p, &cctx); break; case CMD_eval: line = compile_eval(p, &cctx); break; case CMD_echo: case CMD_echon: case CMD_execute: case CMD_echomsg: case CMD_echoerr: case CMD_echoconsole: line = compile_mult_expr(p, ea.cmdidx, &cctx); break; case CMD_put: ea.cmd = cmd; line = compile_put(p, &ea, &cctx); break; case CMD_substitute: if (check_global_and_subst(ea.cmd, p) == FAIL) goto erret; if (cctx.ctx_skip == SKIP_YES) line = (char_u *)""; else { ea.arg = p; line = compile_substitute(line, &ea, &cctx); } break; case CMD_redir: ea.arg = p; line = compile_redir(line, &ea, &cctx); break; case CMD_cexpr: case CMD_lexpr: case CMD_caddexpr: case CMD_laddexpr: case CMD_cgetexpr: case CMD_lgetexpr: #ifdef FEAT_QUICKFIX ea.arg = p; line = compile_cexpr(line, &ea, &cctx); #else ex_ni(&ea); line = NULL; #endif break; case CMD_append: case CMD_change: case CMD_insert: case CMD_k: case CMD_t: case CMD_xit: not_in_vim9(&ea); goto erret; case CMD_SIZE: if (cctx.ctx_skip != SKIP_YES) { semsg(_(e_invalid_command_str), ea.cmd); goto erret; } // We don't check for a next command here. line = (char_u *)""; break; case CMD_lua: case CMD_mzscheme: case CMD_perl: case CMD_py3: case CMD_python3: case CMD_python: case CMD_pythonx: case CMD_ruby: case CMD_tcl: ea.arg = p; if (vim_strchr(line, '\n') == NULL) line = compile_exec(line, &ea, &cctx); else // heredoc lines have been concatenated with NL // characters in get_function_body() line = compile_script(line, &cctx); break; case CMD_global: if (check_global_and_subst(ea.cmd, p) == FAIL) goto erret; // FALLTHROUGH default: // Not recognized, execute with do_cmdline_cmd(). ea.arg = p; line = compile_exec(line, &ea, &cctx); break; } nextline: if (line == NULL) goto erret; line = skipwhite(line); // Undo any command modifiers. generate_undo_cmdmods(&cctx); if (cctx.ctx_type_stack.ga_len < 0) { iemsg("Type stack underflow"); goto erret; } } if (cctx.ctx_scope != NULL) { if (cctx.ctx_scope->se_type == IF_SCOPE) emsg(_(e_endif)); else if (cctx.ctx_scope->se_type == WHILE_SCOPE) emsg(_(e_endwhile)); else if (cctx.ctx_scope->se_type == FOR_SCOPE) emsg(_(e_endfor)); else emsg(_(e_missing_rcurly)); goto erret; } if (!cctx.ctx_had_return) { if (ufunc->uf_ret_type->tt_type == VAR_UNKNOWN) ufunc->uf_ret_type = &t_void; else if (ufunc->uf_ret_type->tt_type != VAR_VOID) { emsg(_(e_missing_return_statement)); goto erret; } // Return void if there is no return at the end. generate_instr(&cctx, ISN_RETURN_VOID); } // When compiled with ":silent!" and there was an error don't consider the // function compiled. if (emsg_silent == 0 || did_emsg_silent == did_emsg_silent_before) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; dfunc->df_deleted = FALSE; dfunc->df_script_seq = current_sctx.sc_seq; #ifdef FEAT_PROFILE if (cctx.ctx_compile_type == CT_PROFILE) { dfunc->df_instr_prof = instr->ga_data; dfunc->df_instr_prof_count = instr->ga_len; } else #endif if (cctx.ctx_compile_type == CT_DEBUG) { dfunc->df_instr_debug = instr->ga_data; dfunc->df_instr_debug_count = instr->ga_len; } else { dfunc->df_instr = instr->ga_data; dfunc->df_instr_count = instr->ga_len; } dfunc->df_varcount = dfunc->df_var_names.ga_len; dfunc->df_has_closure = cctx.ctx_has_closure; if (cctx.ctx_outer_used) ufunc->uf_flags |= FC_CLOSURE; ufunc->uf_def_status = UF_COMPILED; } ret = OK; erret: if (ufunc->uf_def_status == UF_COMPILING) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; // Compiling aborted, free the generated instructions. clear_instr_ga(instr); VIM_CLEAR(dfunc->df_name); ga_clear_strings(&dfunc->df_var_names); // If using the last entry in the table and it was added above, we // might as well remove it. if (!dfunc->df_deleted && new_def_function && ufunc->uf_dfunc_idx == def_functions.ga_len - 1) { --def_functions.ga_len; ufunc->uf_dfunc_idx = 0; } ufunc->uf_def_status = UF_COMPILE_ERROR; while (cctx.ctx_scope != NULL) drop_scope(&cctx); if (errormsg != NULL) emsg(errormsg); else if (did_emsg == did_emsg_before) emsg(_(e_compiling_def_function_failed)); } if (cctx.ctx_redir_lhs.lhs_name != NULL) { if (ret == OK) { emsg(_(e_missing_redir_end)); ret = FAIL; } vim_free(cctx.ctx_redir_lhs.lhs_name); vim_free(cctx.ctx_redir_lhs.lhs_whole); } current_sctx = save_current_sctx; estack_compiling = save_estack_compiling; cmdmod.cmod_flags = save_cmod_flags; if (do_estack_push) estack_pop(); vim_free(line_to_free); free_imported(&cctx); free_locals(&cctx); ga_clear(&cctx.ctx_type_stack); return ret; }
{'added': [(815, 'compile_nested_function(exarg_T *eap, cctx_T *cctx, char_u **line_to_free)'), (820, ' int\t\toff;'), (821, ' char_u\t*func_name;'), (871, ''), (872, ' // This may free the current line, make a copy of the name.'), (873, ' off = is_global ? 2 : 0;'), (874, ' func_name = vim_strnsave(name_start + off, name_end - name_start - off);'), (875, ' if (func_name == NULL)'), (876, ' {'), (877, '\tr = FAIL;'), (878, '\tgoto theend;'), (879, ' }'), (880, ''), (881, ' ufunc = define_function(eap, lambda_name, line_to_free);'), (926, '\tr = generate_NEWFUNC(cctx, lambda_name, func_name);'), (927, '\tfunc_name = NULL;'), (928, '\tlambda_name = NULL;'), (933, '\tlvar_T\t*lvar = reserve_local(cctx, func_name, name_end - name_start,'), (945, ' vim_free(func_name);'), (2870, '\t\t line = compile_nested_function(&ea, &cctx, &line_to_free);')], 'deleted': [(815, 'compile_nested_function(exarg_T *eap, cctx_T *cctx)'), (869, ' ufunc = define_function(eap, lambda_name);'), (914, '\tchar_u *func_name = vim_strnsave(name_start + 2,'), (915, '\t\t\t\t\t\t name_end - name_start - 2);'), (916, ''), (917, '\tif (func_name == NULL)'), (918, '\t r = FAIL;'), (919, '\telse'), (920, '\t{'), (921, '\t r = generate_NEWFUNC(cctx, lambda_name, func_name);'), (922, '\t lambda_name = NULL;'), (923, '\t}'), (928, '\tlvar_T\t*lvar = reserve_local(cctx, name_start, name_end - name_start,'), (2864, '\t\t line = compile_nested_function(&ea, &cctx);')]}
20
14
2,545
14,097
https://github.com/vim/vim
CVE-2021-4173
['CWE-416']
vim9compile.c
compile_nested_function
/* vi:set ts=8 sts=4 sw=4 noet: * * VIM - Vi IMproved by Bram Moolenaar * * Do ":help uganda" in Vim to read copying and usage conditions. * Do ":help credits" in Vim to see a list of people who contributed. * See README.txt for an overview of the Vim source code. */ /* * vim9compile.c: compiling a :def function */ #define USING_FLOAT_STUFF #include "vim.h" #if defined(FEAT_EVAL) || defined(PROTO) // When not generating protos this is included in proto.h #ifdef PROTO # include "vim9.h" #endif // Functions defined with :def are stored in this growarray. // They are never removed, so that they can be found by index. // Deleted functions have the df_deleted flag set. garray_T def_functions = {0, 0, sizeof(dfunc_T), 50, NULL}; static void delete_def_function_contents(dfunc_T *dfunc, int mark_deleted); /* * Lookup variable "name" in the local scope and return it in "lvar". * "lvar->lv_from_outer" is incremented accordingly. * If "lvar" is NULL only check if the variable can be found. * Return FAIL if not found. */ int lookup_local(char_u *name, size_t len, lvar_T *lvar, cctx_T *cctx) { int idx; lvar_T *lvp; if (len == 0) return FAIL; // Find local in current function scope. for (idx = 0; idx < cctx->ctx_locals.ga_len; ++idx) { lvp = ((lvar_T *)cctx->ctx_locals.ga_data) + idx; if (STRNCMP(name, lvp->lv_name, len) == 0 && STRLEN(lvp->lv_name) == len) { if (lvar != NULL) { *lvar = *lvp; lvar->lv_from_outer = 0; } return OK; } } // Find local in outer function scope. if (cctx->ctx_outer != NULL) { if (lookup_local(name, len, lvar, cctx->ctx_outer) == OK) { if (lvar != NULL) { cctx->ctx_outer_used = TRUE; ++lvar->lv_from_outer; } return OK; } } return FAIL; } /* * Lookup an argument in the current function and an enclosing function. * Returns the argument index in "idxp" * Returns the argument type in "type" * Sets "gen_load_outer" to TRUE if found in outer scope. * Returns OK when found, FAIL otherwise. */ int arg_exists( char_u *name, size_t len, int *idxp, type_T **type, int *gen_load_outer, cctx_T *cctx) { int idx; char_u *va_name; if (len == 0) return FAIL; for (idx = 0; idx < cctx->ctx_ufunc->uf_args_visible; ++idx) { char_u *arg = FUNCARG(cctx->ctx_ufunc, idx); if (STRNCMP(name, arg, len) == 0 && arg[len] == NUL) { if (idxp != NULL) { // Arguments are located above the frame pointer. One further // if there is a vararg argument *idxp = idx - (cctx->ctx_ufunc->uf_args.ga_len + STACK_FRAME_SIZE) + (cctx->ctx_ufunc->uf_va_name != NULL ? -1 : 0); if (cctx->ctx_ufunc->uf_arg_types != NULL) *type = cctx->ctx_ufunc->uf_arg_types[idx]; else *type = &t_any; } return OK; } } va_name = cctx->ctx_ufunc->uf_va_name; if (va_name != NULL && STRNCMP(name, va_name, len) == 0 && va_name[len] == NUL) { if (idxp != NULL) { // varargs is always the last argument *idxp = -STACK_FRAME_SIZE - 1; *type = cctx->ctx_ufunc->uf_va_type; } return OK; } if (cctx->ctx_outer != NULL) { // Lookup the name for an argument of the outer function. if (arg_exists(name, len, idxp, type, gen_load_outer, cctx->ctx_outer) == OK) { if (gen_load_outer != NULL) ++*gen_load_outer; return OK; } } return FAIL; } /* * Lookup a script-local variable in the current script, possibly defined in a * block that contains the function "cctx->ctx_ufunc". * "cctx" is NULL at the script level. * If "len" is <= 0 "name" must be NUL terminated. * Return NULL when not found. */ static sallvar_T * find_script_var(char_u *name, size_t len, cctx_T *cctx) { scriptitem_T *si = SCRIPT_ITEM(current_sctx.sc_sid); hashitem_T *hi; int cc; sallvar_T *sav; sallvar_T *found_sav; ufunc_T *ufunc; // Find the list of all script variables with the right name. if (len > 0) { cc = name[len]; name[len] = NUL; } hi = hash_find(&si->sn_all_vars.dv_hashtab, name); if (len > 0) name[len] = cc; if (HASHITEM_EMPTY(hi)) return NULL; sav = HI2SAV(hi); if (sav->sav_block_id == 0) // variable defined in the top script scope is always visible return sav; if (cctx == NULL) { // Not in a function scope, find variable with block id equal to or // smaller than the current block id. while (sav != NULL) { if (sav->sav_block_id <= si->sn_current_block_id) break; sav = sav->sav_next; } return sav; } // Go over the variables with this name and find one that was visible // from the function. ufunc = cctx->ctx_ufunc; found_sav = sav; while (sav != NULL) { int idx; // Go over the blocks that this function was defined in. If the // variable block ID matches it was visible to the function. for (idx = 0; idx < ufunc->uf_block_depth; ++idx) if (ufunc->uf_block_ids[idx] == sav->sav_block_id) return sav; sav = sav->sav_next; } // Not found, assume variable at script level was visible. return found_sav; } /* * Return TRUE if the script context is Vim9 script. */ int script_is_vim9() { return SCRIPT_ITEM(current_sctx.sc_sid)->sn_version == SCRIPT_VERSION_VIM9; } /* * Lookup a variable (without s: prefix) in the current script. * "cctx" is NULL at the script level. * Returns OK or FAIL. */ int script_var_exists(char_u *name, size_t len, cctx_T *cctx) { if (current_sctx.sc_sid <= 0) return FAIL; if (script_is_vim9()) { // Check script variables that were visible where the function was // defined. if (find_script_var(name, len, cctx) != NULL) return OK; } else { hashtab_T *ht = &SCRIPT_VARS(current_sctx.sc_sid); dictitem_T *di; int cc; // Check script variables that are currently visible cc = name[len]; name[len] = NUL; di = find_var_in_ht(ht, 0, name, TRUE); name[len] = cc; if (di != NULL) return OK; } return FAIL; } /* * Return TRUE if "name" is a local variable, argument, script variable or * imported. */ static int variable_exists(char_u *name, size_t len, cctx_T *cctx) { return (cctx != NULL && (lookup_local(name, len, NULL, cctx) == OK || arg_exists(name, len, NULL, NULL, NULL, cctx) == OK)) || script_var_exists(name, len, cctx) == OK || find_imported(name, len, cctx) != NULL; } /* * Return TRUE if "name" is a local variable, argument, script variable, * imported or function. */ static int item_exists(char_u *name, size_t len, int cmd UNUSED, cctx_T *cctx) { int is_global; char_u *p; if (variable_exists(name, len, cctx)) return TRUE; // This is similar to what is in lookup_scriptitem(): // Find a function, so that a following "->" works. // Require "(" or "->" to follow, "Cmd" is a user command while "Cmd()" is // a function call. p = skipwhite(name + len); if (name[len] == '(' || (p[0] == '-' && p[1] == '>')) { // Do not check for an internal function, since it might also be a // valid command, such as ":split" versus "split()". // Skip "g:" before a function name. is_global = (name[0] == 'g' && name[1] == ':'); return find_func(is_global ? name + 2 : name, is_global, cctx) != NULL; } return FALSE; } /* * Check if "p[len]" is already defined, either in script "import_sid" or in * compilation context "cctx". "cctx" is NULL at the script level. * Does not check the global namespace. * If "is_arg" is TRUE the error message is for an argument name. * Return FAIL and give an error if it defined. */ int check_defined(char_u *p, size_t len, cctx_T *cctx, int is_arg) { int c = p[len]; ufunc_T *ufunc = NULL; // underscore argument is OK if (len == 1 && *p == '_') return OK; if (script_var_exists(p, len, cctx) == OK) { if (is_arg) semsg(_(e_argument_already_declared_in_script_str), p); else semsg(_(e_variable_already_declared_in_script_str), p); return FAIL; } p[len] = NUL; if ((cctx != NULL && (lookup_local(p, len, NULL, cctx) == OK || arg_exists(p, len, NULL, NULL, NULL, cctx) == OK)) || find_imported(p, len, cctx) != NULL || (ufunc = find_func_even_dead(p, FALSE, cctx)) != NULL) { // A local or script-local function can shadow a global function. if (ufunc == NULL || ((ufunc->uf_flags & FC_DEAD) == 0 && (!func_is_global(ufunc) || (p[0] == 'g' && p[1] == ':')))) { if (is_arg) semsg(_(e_argument_name_shadows_existing_variable_str), p); else semsg(_(e_name_already_defined_str), p); p[len] = c; return FAIL; } } p[len] = c; return OK; } /* * Return TRUE if "actual" could be "expected" and a runtime typecheck is to be * used. Return FALSE if the types will never match. */ static int use_typecheck(type_T *actual, type_T *expected) { if (actual->tt_type == VAR_ANY || actual->tt_type == VAR_UNKNOWN || (actual->tt_type == VAR_FUNC && (expected->tt_type == VAR_FUNC || expected->tt_type == VAR_PARTIAL) && (actual->tt_member == &t_any || actual->tt_member == &t_unknown || actual->tt_argcount < 0) && (actual->tt_member == &t_unknown || (actual->tt_member == &t_void) == (expected->tt_member == &t_void)))) return TRUE; if ((actual->tt_type == VAR_LIST || actual->tt_type == VAR_DICT) && actual->tt_type == expected->tt_type) // This takes care of a nested list or dict. return use_typecheck(actual->tt_member, expected->tt_member); return FALSE; } /* * Check that * - "actual" matches "expected" type or * - "actual" is a type that can be "expected" type: add a runtime check; or * - return FAIL. * If "actual_is_const" is TRUE then the type won't change at runtime, do not * generate a TYPECHECK. */ static int need_type_where( type_T *actual, type_T *expected, int offset, where_T where, cctx_T *cctx, int silent, int actual_is_const) { int ret; if (expected == &t_bool && actual != &t_bool && (actual->tt_flags & TTFLAG_BOOL_OK)) { // Using "0", "1" or the result of an expression with "&&" or "||" as a // boolean is OK but requires a conversion. generate_2BOOL(cctx, FALSE, offset); return OK; } ret = check_type_maybe(expected, actual, FALSE, where); if (ret == OK) return OK; // If the actual type can be the expected type add a runtime check. // If it's a constant a runtime check makes no sense. if (!actual_is_const && ret == MAYBE && use_typecheck(actual, expected)) { generate_TYPECHECK(cctx, expected, offset, where.wt_index); return OK; } if (!silent) type_mismatch_where(expected, actual, where); return FAIL; } int need_type( type_T *actual, type_T *expected, int offset, int arg_idx, cctx_T *cctx, int silent, int actual_is_const) { where_T where = WHERE_INIT; where.wt_index = arg_idx; return need_type_where(actual, expected, offset, where, cctx, silent, actual_is_const); } /* * Reserve space for a local variable. * Return the variable or NULL if it failed. */ lvar_T * reserve_local( cctx_T *cctx, char_u *name, size_t len, int isConst, type_T *type) { lvar_T *lvar; dfunc_T *dfunc; if (arg_exists(name, len, NULL, NULL, NULL, cctx) == OK) { emsg_namelen(_(e_str_is_used_as_argument), name, (int)len); return NULL; } if (GA_GROW_FAILS(&cctx->ctx_locals, 1)) return NULL; lvar = ((lvar_T *)cctx->ctx_locals.ga_data) + cctx->ctx_locals.ga_len++; CLEAR_POINTER(lvar); // Every local variable uses the next entry on the stack. We could re-use // the last ones when leaving a scope, but then variables used in a closure // might get overwritten. To keep things simple do not re-use stack // entries. This is less efficient, but memory is cheap these days. dfunc = ((dfunc_T *)def_functions.ga_data) + cctx->ctx_ufunc->uf_dfunc_idx; lvar->lv_idx = dfunc->df_var_names.ga_len; lvar->lv_name = vim_strnsave(name, len == 0 ? STRLEN(name) : len); lvar->lv_const = isConst; lvar->lv_type = type; // Remember the name for debugging. if (GA_GROW_FAILS(&dfunc->df_var_names, 1)) return NULL; ((char_u **)dfunc->df_var_names.ga_data)[lvar->lv_idx] = vim_strsave(lvar->lv_name); ++dfunc->df_var_names.ga_len; return lvar; } /* * If "check_writable" is ASSIGN_CONST give an error if the variable was * defined with :final or :const, if "check_writable" is ASSIGN_FINAL give an * error if the variable was defined with :const. */ static int check_item_writable(svar_T *sv, int check_writable, char_u *name) { if ((check_writable == ASSIGN_CONST && sv->sv_const != 0) || (check_writable == ASSIGN_FINAL && sv->sv_const == ASSIGN_CONST)) { semsg(_(e_cannot_change_readonly_variable_str), name); return FAIL; } return OK; } /* * Find "name" in script-local items of script "sid". * Pass "check_writable" to check_item_writable(). * Returns the index in "sn_var_vals" if found. * If found but not in "sn_var_vals" returns -1. * If not found or the variable is not writable returns -2. */ int get_script_item_idx(int sid, char_u *name, int check_writable, cctx_T *cctx) { hashtab_T *ht; dictitem_T *di; scriptitem_T *si = SCRIPT_ITEM(sid); svar_T *sv; int idx; if (!SCRIPT_ID_VALID(sid)) return -1; if (sid == current_sctx.sc_sid) { sallvar_T *sav = find_script_var(name, 0, cctx); if (sav == NULL) return -2; idx = sav->sav_var_vals_idx; sv = ((svar_T *)si->sn_var_vals.ga_data) + idx; if (check_item_writable(sv, check_writable, name) == FAIL) return -2; return idx; } // First look the name up in the hashtable. ht = &SCRIPT_VARS(sid); di = find_var_in_ht(ht, 0, name, TRUE); if (di == NULL) return -2; // Now find the svar_T index in sn_var_vals. for (idx = 0; idx < si->sn_var_vals.ga_len; ++idx) { sv = ((svar_T *)si->sn_var_vals.ga_data) + idx; if (sv->sv_tv == &di->di_tv) { if (check_item_writable(sv, check_writable, name) == FAIL) return -2; return idx; } } return -1; } /* * Find "name" in imported items of the current script or in "cctx" if not * NULL. */ imported_T * find_imported(char_u *name, size_t len, cctx_T *cctx) { int idx; if (!SCRIPT_ID_VALID(current_sctx.sc_sid)) return NULL; if (cctx != NULL) for (idx = 0; idx < cctx->ctx_imports.ga_len; ++idx) { imported_T *import = ((imported_T *)cctx->ctx_imports.ga_data) + idx; if (len == 0 ? STRCMP(name, import->imp_name) == 0 : STRLEN(import->imp_name) == len && STRNCMP(name, import->imp_name, len) == 0) return import; } return find_imported_in_script(name, len, current_sctx.sc_sid); } imported_T * find_imported_in_script(char_u *name, size_t len, int sid) { scriptitem_T *si; int idx; if (!SCRIPT_ID_VALID(sid)) return NULL; si = SCRIPT_ITEM(sid); for (idx = 0; idx < si->sn_imports.ga_len; ++idx) { imported_T *import = ((imported_T *)si->sn_imports.ga_data) + idx; if (len == 0 ? STRCMP(name, import->imp_name) == 0 : STRLEN(import->imp_name) == len && STRNCMP(name, import->imp_name, len) == 0) return import; } return NULL; } /* * Free all imported variables. */ static void free_imported(cctx_T *cctx) { int idx; for (idx = 0; idx < cctx->ctx_imports.ga_len; ++idx) { imported_T *import = ((imported_T *)cctx->ctx_imports.ga_data) + idx; vim_free(import->imp_name); } ga_clear(&cctx->ctx_imports); } /* * Called when checking for a following operator at "arg". When the rest of * the line is empty or only a comment, peek the next line. If there is a next * line return a pointer to it and set "nextp". * Otherwise skip over white space. */ char_u * may_peek_next_line(cctx_T *cctx, char_u *arg, char_u **nextp) { char_u *p = skipwhite(arg); *nextp = NULL; if (*p == NUL || (VIM_ISWHITE(*arg) && vim9_comment_start(p))) { *nextp = peek_next_line_from_context(cctx); if (*nextp != NULL) return *nextp; } return p; } /* * Return a pointer to the next line that isn't empty or only contains a * comment. Skips over white space. * Returns NULL if there is none. */ char_u * peek_next_line_from_context(cctx_T *cctx) { int lnum = cctx->ctx_lnum; while (++lnum < cctx->ctx_ufunc->uf_lines.ga_len) { char_u *line = ((char_u **)cctx->ctx_ufunc->uf_lines.ga_data)[lnum]; char_u *p; // ignore NULLs inserted for continuation lines if (line != NULL) { p = skipwhite(line); if (vim9_bad_comment(p)) return NULL; if (*p != NUL && !vim9_comment_start(p)) return p; } } return NULL; } /* * Get the next line of the function from "cctx". * Skips over empty lines. Skips over comment lines if "skip_comment" is TRUE. * Returns NULL when at the end. */ char_u * next_line_from_context(cctx_T *cctx, int skip_comment) { char_u *line; do { ++cctx->ctx_lnum; if (cctx->ctx_lnum >= cctx->ctx_ufunc->uf_lines.ga_len) { line = NULL; break; } line = ((char_u **)cctx->ctx_ufunc->uf_lines.ga_data)[cctx->ctx_lnum]; cctx->ctx_line_start = line; SOURCING_LNUM = cctx->ctx_lnum + 1; } while (line == NULL || *skipwhite(line) == NUL || (skip_comment && vim9_comment_start(skipwhite(line)))); return line; } /* * Skip over white space at "whitep" and assign to "*arg". * If "*arg" is at the end of the line, advance to the next line. * Also when "whitep" points to white space and "*arg" is on a "#". * Return FAIL if beyond the last line, "*arg" is unmodified then. */ int may_get_next_line(char_u *whitep, char_u **arg, cctx_T *cctx) { *arg = skipwhite(whitep); if (vim9_bad_comment(*arg)) return FAIL; if (**arg == NUL || (VIM_ISWHITE(*whitep) && vim9_comment_start(*arg))) { char_u *next = next_line_from_context(cctx, TRUE); if (next == NULL) return FAIL; *arg = skipwhite(next); } return OK; } /* * Idem, and give an error when failed. */ int may_get_next_line_error(char_u *whitep, char_u **arg, cctx_T *cctx) { if (may_get_next_line(whitep, arg, cctx) == FAIL) { SOURCING_LNUM = cctx->ctx_lnum + 1; emsg(_(e_line_incomplete)); return FAIL; } return OK; } /* * Get a line from the compilation context, compatible with exarg_T getline(). * Return a pointer to the line in allocated memory. * Return NULL for end-of-file or some error. */ static char_u * exarg_getline( int c UNUSED, void *cookie, int indent UNUSED, getline_opt_T options UNUSED) { cctx_T *cctx = (cctx_T *)cookie; char_u *p; for (;;) { if (cctx->ctx_lnum >= cctx->ctx_ufunc->uf_lines.ga_len - 1) return NULL; ++cctx->ctx_lnum; p = ((char_u **)cctx->ctx_ufunc->uf_lines.ga_data)[cctx->ctx_lnum]; // Comment lines result in NULL pointers, skip them. if (p != NULL) return vim_strsave(p); } } void fill_exarg_from_cctx(exarg_T *eap, cctx_T *cctx) { eap->getline = exarg_getline; eap->cookie = cctx; } /* * Return TRUE if "ufunc" should be compiled, taking into account whether * "profile" indicates profiling is to be done. */ int func_needs_compiling(ufunc_T *ufunc, compiletype_T compile_type) { switch (ufunc->uf_def_status) { case UF_TO_BE_COMPILED: return TRUE; case UF_COMPILED: { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; switch (compile_type) { case CT_PROFILE: #ifdef FEAT_PROFILE return dfunc->df_instr_prof == NULL; #endif case CT_NONE: return dfunc->df_instr == NULL; case CT_DEBUG: return dfunc->df_instr_debug == NULL; } } case UF_NOT_COMPILED: case UF_COMPILE_ERROR: case UF_COMPILING: break; } return FALSE; } /* * Compile a nested :def command. */ static char_u * compile_nested_function(exarg_T *eap, cctx_T *cctx) { int is_global = *eap->arg == 'g' && eap->arg[1] == ':'; char_u *name_start = eap->arg; char_u *name_end = to_name_end(eap->arg, TRUE); char_u *lambda_name; ufunc_T *ufunc; int r = FAIL; compiletype_T compile_type; if (eap->forceit) { emsg(_(e_cannot_use_bang_with_nested_def)); return NULL; } if (*name_start == '/') { name_end = skip_regexp(name_start + 1, '/', TRUE); if (*name_end == '/') ++name_end; set_nextcmd(eap, name_end); } if (name_end == name_start || *skipwhite(name_end) != '(') { if (!ends_excmd2(name_start, name_end)) { semsg(_(e_invalid_command_str), eap->cmd); return NULL; } // "def" or "def Name": list functions if (generate_DEF(cctx, name_start, name_end - name_start) == FAIL) return NULL; return eap->nextcmd == NULL ? (char_u *)"" : eap->nextcmd; } // Only g:Func() can use a namespace. if (name_start[1] == ':' && !is_global) { semsg(_(e_namespace_not_supported_str), name_start); return NULL; } if (check_defined(name_start, name_end - name_start, cctx, FALSE) == FAIL) return NULL; eap->arg = name_end; fill_exarg_from_cctx(eap, cctx); eap->forceit = FALSE; // We use the special <Lamba>99 name, but it's not really a lambda. lambda_name = vim_strsave(get_lambda_name()); if (lambda_name == NULL) return NULL; ufunc = define_function(eap, lambda_name); if (ufunc == NULL) { r = eap->skip ? OK : FAIL; goto theend; } // copy over the block scope IDs before compiling if (!is_global && cctx->ctx_ufunc->uf_block_depth > 0) { int block_depth = cctx->ctx_ufunc->uf_block_depth; ufunc->uf_block_ids = ALLOC_MULT(int, block_depth); if (ufunc->uf_block_ids != NULL) { mch_memmove(ufunc->uf_block_ids, cctx->ctx_ufunc->uf_block_ids, sizeof(int) * block_depth); ufunc->uf_block_depth = block_depth; } } compile_type = COMPILE_TYPE(ufunc); #ifdef FEAT_PROFILE // If the outer function is profiled, also compile the nested function for // profiling. if (cctx->ctx_compile_type == CT_PROFILE) compile_type = CT_PROFILE; #endif if (func_needs_compiling(ufunc, compile_type) && compile_def_function(ufunc, TRUE, compile_type, cctx) == FAIL) { func_ptr_unref(ufunc); goto theend; } #ifdef FEAT_PROFILE // When the outer function is compiled for profiling, the nested function // may be called without profiling. Compile it here in the right context. if (compile_type == CT_PROFILE && func_needs_compiling(ufunc, CT_NONE)) compile_def_function(ufunc, FALSE, CT_NONE, cctx); #endif if (is_global) { char_u *func_name = vim_strnsave(name_start + 2, name_end - name_start - 2); if (func_name == NULL) r = FAIL; else { r = generate_NEWFUNC(cctx, lambda_name, func_name); lambda_name = NULL; } } else { // Define a local variable for the function reference. lvar_T *lvar = reserve_local(cctx, name_start, name_end - name_start, TRUE, ufunc->uf_func_type); if (lvar == NULL) goto theend; if (generate_FUNCREF(cctx, ufunc) == FAIL) goto theend; r = generate_STORE(cctx, ISN_STORE, lvar->lv_idx, NULL); } theend: vim_free(lambda_name); return r == FAIL ? NULL : (char_u *)""; } /* * Return the length of an assignment operator, or zero if there isn't one. */ int assignment_len(char_u *p, int *heredoc) { if (*p == '=') { if (p[1] == '<' && p[2] == '<') { *heredoc = TRUE; return 3; } return 1; } if (vim_strchr((char_u *)"+-*/%", *p) != NULL && p[1] == '=') return 2; if (STRNCMP(p, "..=", 3) == 0) return 3; return 0; } /* * Generate the load instruction for "name". */ static void generate_loadvar( cctx_T *cctx, assign_dest_T dest, char_u *name, lvar_T *lvar, type_T *type) { switch (dest) { case dest_option: case dest_func_option: generate_LOAD(cctx, ISN_LOADOPT, 0, name, type); break; case dest_global: if (vim_strchr(name, AUTOLOAD_CHAR) == NULL) generate_LOAD(cctx, ISN_LOADG, 0, name + 2, type); else generate_LOAD(cctx, ISN_LOADAUTO, 0, name, type); break; case dest_buffer: generate_LOAD(cctx, ISN_LOADB, 0, name + 2, type); break; case dest_window: generate_LOAD(cctx, ISN_LOADW, 0, name + 2, type); break; case dest_tab: generate_LOAD(cctx, ISN_LOADT, 0, name + 2, type); break; case dest_script: compile_load_scriptvar(cctx, name + (name[1] == ':' ? 2 : 0), NULL, NULL, TRUE); break; case dest_env: // Include $ in the name here generate_LOAD(cctx, ISN_LOADENV, 0, name, type); break; case dest_reg: generate_LOAD(cctx, ISN_LOADREG, name[1], NULL, &t_string); break; case dest_vimvar: generate_LOADV(cctx, name + 2, TRUE); break; case dest_local: if (lvar->lv_from_outer > 0) generate_LOADOUTER(cctx, lvar->lv_idx, lvar->lv_from_outer, type); else generate_LOAD(cctx, ISN_LOAD, lvar->lv_idx, NULL, type); break; case dest_expr: // list or dict value should already be on the stack. break; } } /* * Skip over "[expr]" or ".member". * Does not check for any errors. */ static char_u * skip_index(char_u *start) { char_u *p = start; if (*p == '[') { p = skipwhite(p + 1); (void)skip_expr(&p, NULL); p = skipwhite(p); if (*p == ']') return p + 1; return p; } // if (*p == '.') return to_name_end(p + 1, TRUE); } void vim9_declare_error(char_u *name) { char *scope = ""; switch (*name) { case 'g': scope = _("global"); break; case 'b': scope = _("buffer"); break; case 'w': scope = _("window"); break; case 't': scope = _("tab"); break; case 'v': scope = "v:"; break; case '$': semsg(_(e_cannot_declare_an_environment_variable), name); return; case '&': semsg(_(e_cannot_declare_an_option), name); return; case '@': semsg(_(e_cannot_declare_a_register_str), name); return; default: return; } semsg(_(e_cannot_declare_a_scope_variable), scope, name); } /* * For one assignment figure out the type of destination. Return it in "dest". * When not recognized "dest" is not set. * For an option "option_scope" is set. * For a v:var "vimvaridx" is set. * "type" is set to the destination type if known, unchanted otherwise. * Return FAIL if an error message was given. */ int get_var_dest( char_u *name, assign_dest_T *dest, int cmdidx, int *option_scope, int *vimvaridx, type_T **type, cctx_T *cctx) { char_u *p; if (*name == '&') { int cc; long numval; getoption_T opt_type; int opt_p_flags; *dest = dest_option; if (cmdidx == CMD_final || cmdidx == CMD_const) { emsg(_(e_const_option)); return FAIL; } p = name; p = find_option_end(&p, option_scope); if (p == NULL) { // cannot happen? emsg(_(e_unexpected_characters_in_assignment)); return FAIL; } cc = *p; *p = NUL; opt_type = get_option_value(skip_option_env_lead(name), &numval, NULL, &opt_p_flags, *option_scope); *p = cc; switch (opt_type) { case gov_unknown: semsg(_(e_unknown_option_str), name); return FAIL; case gov_string: case gov_hidden_string: if (opt_p_flags & P_FUNC) { // might be a Funcref, check the type later *type = &t_any; *dest = dest_func_option; } else { *type = &t_string; } break; case gov_bool: case gov_hidden_bool: *type = &t_bool; break; case gov_number: case gov_hidden_number: *type = &t_number; break; } } else if (*name == '$') { *dest = dest_env; *type = &t_string; } else if (*name == '@') { if (name[1] != '@' && (!valid_yank_reg(name[1], FALSE) || name[1] == '.')) { emsg_invreg(name[1]); return FAIL; } *dest = dest_reg; *type = name[1] == '#' ? &t_number_or_string : &t_string; } else if (STRNCMP(name, "g:", 2) == 0) { *dest = dest_global; } else if (STRNCMP(name, "b:", 2) == 0) { *dest = dest_buffer; } else if (STRNCMP(name, "w:", 2) == 0) { *dest = dest_window; } else if (STRNCMP(name, "t:", 2) == 0) { *dest = dest_tab; } else if (STRNCMP(name, "v:", 2) == 0) { typval_T *vtv; int di_flags; *vimvaridx = find_vim_var(name + 2, &di_flags); if (*vimvaridx < 0) { semsg(_(e_variable_not_found_str), name); return FAIL; } // We use the current value of "sandbox" here, is that OK? if (var_check_ro(di_flags, name, FALSE)) return FAIL; *dest = dest_vimvar; vtv = get_vim_var_tv(*vimvaridx); *type = typval2type_vimvar(vtv, cctx->ctx_type_list); } return OK; } static int is_decl_command(int cmdidx) { return cmdidx == CMD_let || cmdidx == CMD_var || cmdidx == CMD_final || cmdidx == CMD_const; } /* * Figure out the LHS type and other properties for an assignment or one item * of ":unlet" with an index. * Returns OK or FAIL. */ int compile_lhs( char_u *var_start, lhs_T *lhs, int cmdidx, int heredoc, int oplen, cctx_T *cctx) { char_u *var_end; int is_decl = is_decl_command(cmdidx); CLEAR_POINTER(lhs); lhs->lhs_dest = dest_local; lhs->lhs_vimvaridx = -1; lhs->lhs_scriptvar_idx = -1; // "dest_end" is the end of the destination, including "[expr]" or // ".name". // "var_end" is the end of the variable/option/etc. name. lhs->lhs_dest_end = skip_var_one(var_start, FALSE); if (*var_start == '@') var_end = var_start + 2; else { // skip over the leading "&", "&l:", "&g:" and "$" var_end = skip_option_env_lead(var_start); var_end = to_name_end(var_end, TRUE); } // "a: type" is declaring variable "a" with a type, not dict "a:". if (is_decl && lhs->lhs_dest_end == var_start + 2 && lhs->lhs_dest_end[-1] == ':') --lhs->lhs_dest_end; if (is_decl && var_end == var_start + 2 && var_end[-1] == ':') --var_end; lhs->lhs_end = lhs->lhs_dest_end; // compute the length of the destination without "[expr]" or ".name" lhs->lhs_varlen = var_end - var_start; lhs->lhs_varlen_total = lhs->lhs_varlen; lhs->lhs_name = vim_strnsave(var_start, lhs->lhs_varlen); if (lhs->lhs_name == NULL) return FAIL; if (lhs->lhs_dest_end > var_start + lhs->lhs_varlen) // Something follows after the variable: "var[idx]" or "var.key". lhs->lhs_has_index = TRUE; if (heredoc) lhs->lhs_type = &t_list_string; else lhs->lhs_type = &t_any; if (cctx->ctx_skip != SKIP_YES) { int declare_error = FALSE; if (get_var_dest(lhs->lhs_name, &lhs->lhs_dest, cmdidx, &lhs->lhs_opt_flags, &lhs->lhs_vimvaridx, &lhs->lhs_type, cctx) == FAIL) return FAIL; if (lhs->lhs_dest != dest_local && cmdidx != CMD_const && cmdidx != CMD_final) { // Specific kind of variable recognized. declare_error = is_decl; } else { // No specific kind of variable recognized, just a name. if (check_reserved_name(lhs->lhs_name) == FAIL) return FAIL; if (lookup_local(var_start, lhs->lhs_varlen, &lhs->lhs_local_lvar, cctx) == OK) lhs->lhs_lvar = &lhs->lhs_local_lvar; else { CLEAR_FIELD(lhs->lhs_arg_lvar); if (arg_exists(var_start, lhs->lhs_varlen, &lhs->lhs_arg_lvar.lv_idx, &lhs->lhs_arg_lvar.lv_type, &lhs->lhs_arg_lvar.lv_from_outer, cctx) == OK) { if (is_decl) { semsg(_(e_str_is_used_as_argument), lhs->lhs_name); return FAIL; } lhs->lhs_lvar = &lhs->lhs_arg_lvar; } } if (lhs->lhs_lvar != NULL) { if (is_decl) { semsg(_(e_variable_already_declared), lhs->lhs_name); return FAIL; } } else { int script_namespace = lhs->lhs_varlen > 1 && STRNCMP(var_start, "s:", 2) == 0; int script_var = (script_namespace ? script_var_exists(var_start + 2, lhs->lhs_varlen - 2, cctx) : script_var_exists(var_start, lhs->lhs_varlen, cctx)) == OK; imported_T *import = find_imported(var_start, lhs->lhs_varlen, cctx); if (script_namespace || script_var || import != NULL) { char_u *rawname = lhs->lhs_name + (lhs->lhs_name[1] == ':' ? 2 : 0); if (is_decl) { if (script_namespace) semsg(_(e_cannot_declare_script_variable_in_function), lhs->lhs_name); else semsg(_(e_variable_already_declared_in_script_str), lhs->lhs_name); return FAIL; } else if (cctx->ctx_ufunc->uf_script_ctx_version == SCRIPT_VERSION_VIM9 && script_namespace && !script_var && import == NULL) { semsg(_(e_unknown_variable_str), lhs->lhs_name); return FAIL; } lhs->lhs_dest = dest_script; // existing script-local variables should have a type lhs->lhs_scriptvar_sid = current_sctx.sc_sid; if (import != NULL) lhs->lhs_scriptvar_sid = import->imp_sid; if (SCRIPT_ID_VALID(lhs->lhs_scriptvar_sid)) { // Check writable only when no index follows. lhs->lhs_scriptvar_idx = get_script_item_idx( lhs->lhs_scriptvar_sid, rawname, lhs->lhs_has_index ? ASSIGN_FINAL : ASSIGN_CONST, cctx); if (lhs->lhs_scriptvar_idx >= 0) { scriptitem_T *si = SCRIPT_ITEM( lhs->lhs_scriptvar_sid); svar_T *sv = ((svar_T *)si->sn_var_vals.ga_data) + lhs->lhs_scriptvar_idx; lhs->lhs_type = sv->sv_type; } } } else if (check_defined(var_start, lhs->lhs_varlen, cctx, FALSE) == FAIL) return FAIL; } } if (declare_error) { vim9_declare_error(lhs->lhs_name); return FAIL; } } // handle "a:name" as a name, not index "name" in "a" if (lhs->lhs_varlen > 1 || var_start[lhs->lhs_varlen] != ':') var_end = lhs->lhs_dest_end; if (lhs->lhs_dest != dest_option && lhs->lhs_dest != dest_func_option) { if (is_decl && *var_end == ':') { char_u *p; // parse optional type: "let var: type = expr" if (!VIM_ISWHITE(var_end[1])) { semsg(_(e_white_space_required_after_str_str), ":", var_end); return FAIL; } p = skipwhite(var_end + 1); lhs->lhs_type = parse_type(&p, cctx->ctx_type_list, TRUE); if (lhs->lhs_type == NULL) return FAIL; lhs->lhs_has_type = TRUE; lhs->lhs_end = p; } else if (lhs->lhs_lvar != NULL) lhs->lhs_type = lhs->lhs_lvar->lv_type; } if (oplen == 3 && !heredoc && lhs->lhs_dest != dest_global && !lhs->lhs_has_index && lhs->lhs_type->tt_type != VAR_STRING && lhs->lhs_type->tt_type != VAR_ANY) { emsg(_(e_can_only_concatenate_to_string)); return FAIL; } if (lhs->lhs_lvar == NULL && lhs->lhs_dest == dest_local && cctx->ctx_skip != SKIP_YES) { if (oplen > 1 && !heredoc) { // +=, /=, etc. require an existing variable semsg(_(e_cannot_use_operator_on_new_variable), lhs->lhs_name); return FAIL; } if (!is_decl) { semsg(_(e_unknown_variable_str), lhs->lhs_name); return FAIL; } // Check the name is valid for a funcref. if ((lhs->lhs_type->tt_type == VAR_FUNC || lhs->lhs_type->tt_type == VAR_PARTIAL) && var_wrong_func_name(lhs->lhs_name, TRUE)) return FAIL; // New local variable. lhs->lhs_lvar = reserve_local(cctx, var_start, lhs->lhs_varlen, cmdidx == CMD_final || cmdidx == CMD_const, lhs->lhs_type); if (lhs->lhs_lvar == NULL) return FAIL; lhs->lhs_new_local = TRUE; } lhs->lhs_member_type = lhs->lhs_type; if (lhs->lhs_has_index) { char_u *after = var_start + lhs->lhs_varlen; char_u *p; // Something follows after the variable: "var[idx]" or "var.key". if (is_decl) { emsg(_(e_cannot_use_index_when_declaring_variable)); return FAIL; } // Now: var_start[lhs->lhs_varlen] is '[' or '.' // Only the last index is used below, if there are others // before it generate code for the expression. Thus for // "ll[1][2]" the expression is "ll[1]" and "[2]" is the index. for (;;) { p = skip_index(after); if (*p != '[' && *p != '.') { lhs->lhs_varlen_total = p - var_start; break; } after = p; } if (after > var_start + lhs->lhs_varlen) { lhs->lhs_varlen = after - var_start; lhs->lhs_dest = dest_expr; // We don't know the type before evaluating the expression, // use "any" until then. lhs->lhs_type = &t_any; } if (lhs->lhs_type->tt_member == NULL) lhs->lhs_member_type = &t_any; else lhs->lhs_member_type = lhs->lhs_type->tt_member; } return OK; } /* * Figure out the LHS and check a few errors. */ int compile_assign_lhs( char_u *var_start, lhs_T *lhs, int cmdidx, int is_decl, int heredoc, int oplen, cctx_T *cctx) { if (compile_lhs(var_start, lhs, cmdidx, heredoc, oplen, cctx) == FAIL) return FAIL; if (!lhs->lhs_has_index && lhs->lhs_lvar == &lhs->lhs_arg_lvar) { semsg(_(e_cannot_assign_to_argument), lhs->lhs_name); return FAIL; } if (!is_decl && lhs->lhs_lvar != NULL && lhs->lhs_lvar->lv_const && !lhs->lhs_has_index) { semsg(_(e_cannot_assign_to_constant), lhs->lhs_name); return FAIL; } return OK; } /* * Return TRUE if "lhs" has a range index: "[expr : expr]". */ static int has_list_index(char_u *idx_start, cctx_T *cctx) { char_u *p = idx_start; int save_skip; if (*p != '[') return FALSE; p = skipwhite(p + 1); if (*p == ':') return TRUE; save_skip = cctx->ctx_skip; cctx->ctx_skip = SKIP_YES; (void)compile_expr0(&p, cctx); cctx->ctx_skip = save_skip; return *skipwhite(p) == ':'; } /* * For an assignment with an index, compile the "idx" in "var[idx]" or "key" in * "var.key". */ static int compile_assign_index( char_u *var_start, lhs_T *lhs, int *range, cctx_T *cctx) { size_t varlen = lhs->lhs_varlen; char_u *p; int r = OK; int need_white_before = TRUE; int empty_second; p = var_start + varlen; if (*p == '[') { p = skipwhite(p + 1); if (*p == ':') { // empty first index, push zero r = generate_PUSHNR(cctx, 0); need_white_before = FALSE; } else r = compile_expr0(&p, cctx); if (r == OK && *skipwhite(p) == ':') { // unlet var[idx : idx] // blob[idx : idx] = value *range = TRUE; p = skipwhite(p); empty_second = *skipwhite(p + 1) == ']'; if ((need_white_before && !IS_WHITE_OR_NUL(p[-1])) || (!empty_second && !IS_WHITE_OR_NUL(p[1]))) { semsg(_(e_white_space_required_before_and_after_str_at_str), ":", p); return FAIL; } p = skipwhite(p + 1); if (*p == ']') // empty second index, push "none" r = generate_PUSHSPEC(cctx, VVAL_NONE); else r = compile_expr0(&p, cctx); } if (r == OK && *skipwhite(p) != ']') { // this should not happen emsg(_(e_missing_closing_square_brace)); r = FAIL; } } else // if (*p == '.') { char_u *key_end = to_name_end(p + 1, TRUE); char_u *key = vim_strnsave(p + 1, key_end - p - 1); r = generate_PUSHS(cctx, &key); } return r; } /* * For a LHS with an index, load the variable to be indexed. */ static int compile_load_lhs( lhs_T *lhs, char_u *var_start, type_T *rhs_type, cctx_T *cctx) { if (lhs->lhs_dest == dest_expr) { size_t varlen = lhs->lhs_varlen; int c = var_start[varlen]; int lines_len = cctx->ctx_ufunc->uf_lines.ga_len; char_u *p = var_start; garray_T *stack = &cctx->ctx_type_stack; int res; // Evaluate "ll[expr]" of "ll[expr][idx]". End the line with a NUL and // limit the lines array length to avoid skipping to a following line. var_start[varlen] = NUL; cctx->ctx_ufunc->uf_lines.ga_len = cctx->ctx_lnum + 1; res = compile_expr0(&p, cctx); var_start[varlen] = c; cctx->ctx_ufunc->uf_lines.ga_len = lines_len; if (res == FAIL || p != var_start + varlen) { // this should not happen if (res != FAIL) emsg(_(e_missing_closing_square_brace)); return FAIL; } lhs->lhs_type = stack->ga_len == 0 ? &t_void : ((type_T **)stack->ga_data)[stack->ga_len - 1]; // now we can properly check the type if (rhs_type != NULL && lhs->lhs_type->tt_member != NULL && rhs_type != &t_void && need_type(rhs_type, lhs->lhs_type->tt_member, -2, 0, cctx, FALSE, FALSE) == FAIL) return FAIL; } else generate_loadvar(cctx, lhs->lhs_dest, lhs->lhs_name, lhs->lhs_lvar, lhs->lhs_type); return OK; } /* * Produce code for loading "lhs" and also take care of an index. * Return OK/FAIL. */ int compile_load_lhs_with_index(lhs_T *lhs, char_u *var_start, cctx_T *cctx) { compile_load_lhs(lhs, var_start, NULL, cctx); if (lhs->lhs_has_index) { int range = FALSE; // Get member from list or dict. First compile the // index value. if (compile_assign_index(var_start, lhs, &range, cctx) == FAIL) return FAIL; if (range) { semsg(_(e_cannot_use_range_with_assignment_operator_str), var_start); return FAIL; } // Get the member. if (compile_member(FALSE, NULL, cctx) == FAIL) return FAIL; } return OK; } /* * Assignment to a list or dict member, or ":unlet" for the item, using the * information in "lhs". * Returns OK or FAIL. */ int compile_assign_unlet( char_u *var_start, lhs_T *lhs, int is_assign, type_T *rhs_type, cctx_T *cctx) { vartype_T dest_type; garray_T *stack = &cctx->ctx_type_stack; int range = FALSE; if (compile_assign_index(var_start, lhs, &range, cctx) == FAIL) return FAIL; if (is_assign && range && lhs->lhs_type->tt_type != VAR_LIST && lhs->lhs_type != &t_blob && lhs->lhs_type != &t_any) { semsg(_(e_cannot_use_range_with_assignment_str), var_start); return FAIL; } if (lhs->lhs_type == &t_any) { // Index on variable of unknown type: check at runtime. dest_type = VAR_ANY; } else { dest_type = lhs->lhs_type->tt_type; if (dest_type == VAR_DICT && range) { emsg(e_cannot_use_range_with_dictionary); return FAIL; } if (dest_type == VAR_DICT && may_generate_2STRING(-1, FALSE, cctx) == FAIL) return FAIL; if (dest_type == VAR_LIST || dest_type == VAR_BLOB) { type_T *type; if (range) { type = ((type_T **)stack->ga_data)[stack->ga_len - 2]; if (need_type(type, &t_number, -1, 0, cctx, FALSE, FALSE) == FAIL) return FAIL; } type = ((type_T **)stack->ga_data)[stack->ga_len - 1]; if ((dest_type != VAR_BLOB && type != &t_special) && need_type(type, &t_number, -1, 0, cctx, FALSE, FALSE) == FAIL) return FAIL; } } // Load the dict or list. On the stack we then have: // - value (for assignment, not for :unlet) // - index // - for [a : b] second index // - variable if (compile_load_lhs(lhs, var_start, rhs_type, cctx) == FAIL) return FAIL; if (dest_type == VAR_LIST || dest_type == VAR_DICT || dest_type == VAR_BLOB || dest_type == VAR_ANY) { if (is_assign) { if (range) { if (generate_instr_drop(cctx, ISN_STORERANGE, 4) == NULL) return FAIL; } else { isn_T *isn = generate_instr_drop(cctx, ISN_STOREINDEX, 3); if (isn == NULL) return FAIL; isn->isn_arg.vartype = dest_type; } } else if (range) { if (generate_instr_drop(cctx, ISN_UNLETRANGE, 3) == NULL) return FAIL; } else { if (generate_instr_drop(cctx, ISN_UNLETINDEX, 2) == NULL) return FAIL; } } else { emsg(_(e_indexable_type_required)); return FAIL; } return OK; } /* * Compile declaration and assignment: * "let name" * "var name = expr" * "final name = expr" * "const name = expr" * "name = expr" * "arg" points to "name". * "++arg" and "--arg" * Return NULL for an error. * Return "arg" if it does not look like a variable list. */ static char_u * compile_assignment(char_u *arg, exarg_T *eap, cmdidx_T cmdidx, cctx_T *cctx) { char_u *var_start; char_u *p; char_u *end = arg; char_u *ret = NULL; int var_count = 0; int var_idx; int semicolon = 0; int did_generate_slice = FALSE; garray_T *instr = &cctx->ctx_instr; garray_T *stack = &cctx->ctx_type_stack; char_u *op; int oplen = 0; int heredoc = FALSE; int incdec = FALSE; type_T *rhs_type = &t_any; char_u *sp; int is_decl = is_decl_command(cmdidx); lhs_T lhs; long start_lnum = SOURCING_LNUM; // Skip over the "var" or "[var, var]" to get to any "=". p = skip_var_list(arg, TRUE, &var_count, &semicolon, TRUE); if (p == NULL) return *arg == '[' ? arg : NULL; lhs.lhs_name = NULL; sp = p; p = skipwhite(p); op = p; oplen = assignment_len(p, &heredoc); if (var_count > 0 && oplen == 0) // can be something like "[1, 2]->func()" return arg; if (oplen > 0 && (!VIM_ISWHITE(*sp) || !IS_WHITE_OR_NUL(op[oplen]))) { error_white_both(op, oplen); return NULL; } if (eap->cmdidx == CMD_increment || eap->cmdidx == CMD_decrement) { if (VIM_ISWHITE(eap->cmd[2])) { semsg(_(e_no_white_space_allowed_after_str_str), eap->cmdidx == CMD_increment ? "++" : "--", eap->cmd); return NULL; } op = (char_u *)(eap->cmdidx == CMD_increment ? "+=" : "-="); oplen = 2; incdec = TRUE; } if (heredoc) { list_T *l; listitem_T *li; // [let] varname =<< [trim] {end} eap->getline = exarg_getline; eap->cookie = cctx; l = heredoc_get(eap, op + 3, FALSE); if (l == NULL) return NULL; if (cctx->ctx_skip != SKIP_YES) { // Push each line and the create the list. FOR_ALL_LIST_ITEMS(l, li) { generate_PUSHS(cctx, &li->li_tv.vval.v_string); li->li_tv.vval.v_string = NULL; } generate_NEWLIST(cctx, l->lv_len); } list_free(l); p += STRLEN(p); end = p; } else if (var_count > 0) { char_u *wp; // for "[var, var] = expr" evaluate the expression here, loop over the // list of variables below. // A line break may follow the "=". wp = op + oplen; if (may_get_next_line_error(wp, &p, cctx) == FAIL) return FAIL; if (compile_expr0(&p, cctx) == FAIL) return NULL; end = p; if (cctx->ctx_skip != SKIP_YES) { type_T *stacktype; int needed_list_len; int did_check = FALSE; stacktype = stack->ga_len == 0 ? &t_void : ((type_T **)stack->ga_data)[stack->ga_len - 1]; if (stacktype->tt_type == VAR_VOID) { emsg(_(e_cannot_use_void_value)); goto theend; } if (need_type(stacktype, &t_list_any, -1, 0, cctx, FALSE, FALSE) == FAIL) goto theend; // If a constant list was used we can check the length right here. needed_list_len = semicolon ? var_count - 1 : var_count; if (instr->ga_len > 0) { isn_T *isn = ((isn_T *)instr->ga_data) + instr->ga_len - 1; if (isn->isn_type == ISN_NEWLIST) { did_check = TRUE; if (semicolon ? isn->isn_arg.number < needed_list_len : isn->isn_arg.number != needed_list_len) { semsg(_(e_expected_nr_items_but_got_nr), needed_list_len, isn->isn_arg.number); goto theend; } } } if (!did_check) generate_CHECKLEN(cctx, needed_list_len, semicolon); if (stacktype->tt_member != NULL) rhs_type = stacktype->tt_member; } } /* * Loop over variables in "[var, var] = expr". * For "var = expr" and "let var: type" this is done only once. */ if (var_count > 0) var_start = skipwhite(arg + 1); // skip over the "[" else var_start = arg; for (var_idx = 0; var_idx == 0 || var_idx < var_count; var_idx++) { int instr_count = -1; int save_lnum; int skip_store = FALSE; if (var_start[0] == '_' && !eval_isnamec(var_start[1])) { // Ignore underscore in "[a, _, b] = list". if (var_count > 0) { var_start = skipwhite(var_start + 2); continue; } emsg(_(e_cannot_use_underscore_here)); goto theend; } vim_free(lhs.lhs_name); /* * Figure out the LHS type and other properties. */ if (compile_assign_lhs(var_start, &lhs, cmdidx, is_decl, heredoc, oplen, cctx) == FAIL) goto theend; if (heredoc) { SOURCING_LNUM = start_lnum; if (lhs.lhs_has_type && need_type(&t_list_string, lhs.lhs_type, -1, 0, cctx, FALSE, FALSE) == FAIL) goto theend; } else { if (cctx->ctx_skip == SKIP_YES) { if (oplen > 0 && var_count == 0) { // skip over the "=" and the expression p = skipwhite(op + oplen); (void)compile_expr0(&p, cctx); } } else if (oplen > 0) { int is_const = FALSE; char_u *wp; // for "+=", "*=", "..=" etc. first load the current value if (*op != '=' && compile_load_lhs_with_index(&lhs, var_start, cctx) == FAIL) goto theend; // For "var = expr" evaluate the expression. if (var_count == 0) { int r; // Compile the expression. instr_count = instr->ga_len; if (incdec) { r = generate_PUSHNR(cctx, 1); } else { // Temporarily hide the new local variable here, it is // not available to this expression. if (lhs.lhs_new_local) --cctx->ctx_locals.ga_len; wp = op + oplen; if (may_get_next_line_error(wp, &p, cctx) == FAIL) { if (lhs.lhs_new_local) ++cctx->ctx_locals.ga_len; goto theend; } r = compile_expr0_ext(&p, cctx, &is_const); if (lhs.lhs_new_local) ++cctx->ctx_locals.ga_len; if (r == FAIL) goto theend; } } else if (semicolon && var_idx == var_count - 1) { // For "[var; var] = expr" get the rest of the list did_generate_slice = TRUE; if (generate_SLICE(cctx, var_count - 1) == FAIL) goto theend; } else { // For "[var, var] = expr" get the "var_idx" item from the // list. if (generate_GETITEM(cctx, var_idx, *op != '=') == FAIL) goto theend; } rhs_type = stack->ga_len == 0 ? &t_void : ((type_T **)stack->ga_data)[stack->ga_len - 1]; if (lhs.lhs_lvar != NULL && (is_decl || !lhs.lhs_has_type)) { if ((rhs_type->tt_type == VAR_FUNC || rhs_type->tt_type == VAR_PARTIAL) && !lhs.lhs_has_index && var_wrong_func_name(lhs.lhs_name, TRUE)) goto theend; if (lhs.lhs_new_local && !lhs.lhs_has_type) { if (rhs_type->tt_type == VAR_VOID) { emsg(_(e_cannot_use_void_value)); goto theend; } else { // An empty list or dict has a &t_unknown member, // for a variable that implies &t_any. if (rhs_type == &t_list_empty) lhs.lhs_lvar->lv_type = &t_list_any; else if (rhs_type == &t_dict_empty) lhs.lhs_lvar->lv_type = &t_dict_any; else if (rhs_type == &t_unknown) lhs.lhs_lvar->lv_type = &t_any; else lhs.lhs_lvar->lv_type = rhs_type; } } else if (*op == '=') { type_T *use_type = lhs.lhs_lvar->lv_type; where_T where = WHERE_INIT; // Without operator check type here, otherwise below. // Use the line number of the assignment. SOURCING_LNUM = start_lnum; where.wt_index = var_count > 0 ? var_idx + 1 : 0; where.wt_variable = var_count > 0; // If assigning to a list or dict member, use the // member type. Not for "list[:] =". if (lhs.lhs_has_index && !has_list_index(var_start + lhs.lhs_varlen, cctx)) use_type = lhs.lhs_member_type; if (need_type_where(rhs_type, use_type, -1, where, cctx, FALSE, is_const) == FAIL) goto theend; } } else { type_T *lhs_type = lhs.lhs_member_type; // Special case: assigning to @# can use a number or a // string. // Also: can assign a number to a float. if ((lhs_type == &t_number_or_string || lhs_type == &t_float) && rhs_type->tt_type == VAR_NUMBER) lhs_type = &t_number; if (*p != '=' && need_type(rhs_type, lhs_type, -1, 0, cctx, FALSE, FALSE) == FAIL) goto theend; } } else if (cmdidx == CMD_final) { emsg(_(e_final_requires_a_value)); goto theend; } else if (cmdidx == CMD_const) { emsg(_(e_const_requires_a_value)); goto theend; } else if (!lhs.lhs_has_type || lhs.lhs_dest == dest_option || lhs.lhs_dest == dest_func_option) { emsg(_(e_type_or_initialization_required)); goto theend; } else { // variables are always initialized if (GA_GROW_FAILS(instr, 1)) goto theend; switch (lhs.lhs_member_type->tt_type) { case VAR_BOOL: generate_PUSHBOOL(cctx, VVAL_FALSE); break; case VAR_FLOAT: #ifdef FEAT_FLOAT generate_PUSHF(cctx, 0.0); #endif break; case VAR_STRING: generate_PUSHS(cctx, NULL); break; case VAR_BLOB: generate_PUSHBLOB(cctx, blob_alloc()); break; case VAR_FUNC: generate_PUSHFUNC(cctx, NULL, &t_func_void); break; case VAR_LIST: generate_NEWLIST(cctx, 0); break; case VAR_DICT: generate_NEWDICT(cctx, 0); break; case VAR_JOB: generate_PUSHJOB(cctx, NULL); break; case VAR_CHANNEL: generate_PUSHCHANNEL(cctx, NULL); break; case VAR_NUMBER: case VAR_UNKNOWN: case VAR_ANY: case VAR_PARTIAL: case VAR_VOID: case VAR_INSTR: case VAR_SPECIAL: // cannot happen // This is skipped for local variables, they are // always initialized to zero. if (lhs.lhs_dest == dest_local) skip_store = TRUE; else generate_PUSHNR(cctx, 0); break; } } if (var_count == 0) end = p; } // no need to parse more when skipping if (cctx->ctx_skip == SKIP_YES) break; if (oplen > 0 && *op != '=') { type_T *expected; type_T *stacktype = NULL; if (*op == '.') { if (may_generate_2STRING(-1, FALSE, cctx) == FAIL) goto theend; } else { expected = lhs.lhs_member_type; stacktype = ((type_T **)stack->ga_data)[stack->ga_len - 1]; if ( #ifdef FEAT_FLOAT // If variable is float operation with number is OK. !(expected == &t_float && (stacktype == &t_number || stacktype == &t_number_bool)) && #endif need_type(stacktype, expected, -1, 0, cctx, FALSE, FALSE) == FAIL) goto theend; } if (*op == '.') { if (generate_instr_drop(cctx, ISN_CONCAT, 1) == NULL) goto theend; } else if (*op == '+') { if (generate_add_instr(cctx, operator_type(lhs.lhs_member_type, stacktype), lhs.lhs_member_type, stacktype, EXPR_APPEND) == FAIL) goto theend; } else if (generate_two_op(cctx, op) == FAIL) goto theend; } // Use the line number of the assignment for store instruction. save_lnum = cctx->ctx_lnum; cctx->ctx_lnum = start_lnum - 1; if (lhs.lhs_has_index) { // Use the info in "lhs" to store the value at the index in the // list or dict. if (compile_assign_unlet(var_start, &lhs, TRUE, rhs_type, cctx) == FAIL) { cctx->ctx_lnum = save_lnum; goto theend; } } else { if (is_decl && cmdidx == CMD_const && (lhs.lhs_dest == dest_script || lhs.lhs_dest == dest_global || lhs.lhs_dest == dest_local)) // ":const var": lock the value, but not referenced variables generate_LOCKCONST(cctx); if (is_decl && (lhs.lhs_type->tt_type == VAR_DICT || lhs.lhs_type->tt_type == VAR_LIST) && lhs.lhs_type->tt_member != NULL && !(lhs.lhs_type->tt_member == &t_any && oplen > 0 && rhs_type != NULL && rhs_type->tt_type == lhs.lhs_type->tt_type && rhs_type->tt_member != &t_unknown) && lhs.lhs_type->tt_member != &t_unknown) // Set the type in the list or dict, so that it can be checked, // also in legacy script. Not for "list<any> = val", then the // type of "val" is used. generate_SETTYPE(cctx, lhs.lhs_type); if (!skip_store && generate_store_lhs(cctx, &lhs, instr_count, is_decl) == FAIL) { cctx->ctx_lnum = save_lnum; goto theend; } } cctx->ctx_lnum = save_lnum; if (var_idx + 1 < var_count) var_start = skipwhite(lhs.lhs_end + 1); } // For "[var, var] = expr" drop the "expr" value. // Also for "[var, var; _] = expr". if (var_count > 0 && (!semicolon || !did_generate_slice)) { if (generate_instr_drop(cctx, ISN_DROP, 1) == NULL) goto theend; } ret = skipwhite(end); theend: vim_free(lhs.lhs_name); return ret; } /* * Check for an assignment at "eap->cmd", compile it if found. * Return NOTDONE if there is none, FAIL for failure, OK if done. */ static int may_compile_assignment(exarg_T *eap, char_u **line, cctx_T *cctx) { char_u *pskip; char_u *p; // Assuming the command starts with a variable or function name, // find what follows. // Skip over "var.member", "var[idx]" and the like. // Also "&opt = val", "$ENV = val" and "@r = val". pskip = (*eap->cmd == '&' || *eap->cmd == '$' || *eap->cmd == '@') ? eap->cmd + 1 : eap->cmd; p = to_name_end(pskip, TRUE); if (p > eap->cmd && *p != NUL) { char_u *var_end; int oplen; int heredoc; if (eap->cmd[0] == '@') var_end = eap->cmd + 2; else var_end = find_name_end(pskip, NULL, NULL, FNE_CHECK_START | FNE_INCL_BR); oplen = assignment_len(skipwhite(var_end), &heredoc); if (oplen > 0) { size_t len = p - eap->cmd; // Recognize an assignment if we recognize the variable // name: // "g:var = expr" // "local = expr" where "local" is a local var. // "script = expr" where "script" is a script-local var. // "import = expr" where "import" is an imported var // "&opt = expr" // "$ENV = expr" // "@r = expr" if (*eap->cmd == '&' || *eap->cmd == '$' || *eap->cmd == '@' || ((len) > 2 && eap->cmd[1] == ':') || variable_exists(eap->cmd, len, cctx)) { *line = compile_assignment(eap->cmd, eap, CMD_SIZE, cctx); if (*line == NULL || *line == eap->cmd) return FAIL; return OK; } } } if (*eap->cmd == '[') { // [var, var] = expr *line = compile_assignment(eap->cmd, eap, CMD_SIZE, cctx); if (*line == NULL) return FAIL; if (*line != eap->cmd) return OK; } return NOTDONE; } /* * Add a function to the list of :def functions. * This sets "ufunc->uf_dfunc_idx" but the function isn't compiled yet. */ static int add_def_function(ufunc_T *ufunc) { dfunc_T *dfunc; if (def_functions.ga_len == 0) { // The first position is not used, so that a zero uf_dfunc_idx means it // wasn't set. if (GA_GROW_FAILS(&def_functions, 1)) return FAIL; ++def_functions.ga_len; } // Add the function to "def_functions". if (GA_GROW_FAILS(&def_functions, 1)) return FAIL; dfunc = ((dfunc_T *)def_functions.ga_data) + def_functions.ga_len; CLEAR_POINTER(dfunc); dfunc->df_idx = def_functions.ga_len; ufunc->uf_dfunc_idx = dfunc->df_idx; dfunc->df_ufunc = ufunc; dfunc->df_name = vim_strsave(ufunc->uf_name); ga_init2(&dfunc->df_var_names, sizeof(char_u *), 10); ++dfunc->df_refcount; ++def_functions.ga_len; return OK; } /* * After ex_function() has collected all the function lines: parse and compile * the lines into instructions. * Adds the function to "def_functions". * When "check_return_type" is set then set ufunc->uf_ret_type to the type of * the return statement (used for lambda). When uf_ret_type is already set * then check that it matches. * When "profiling" is true add ISN_PROF_START instructions. * "outer_cctx" is set for a nested function. * This can be used recursively through compile_lambda(), which may reallocate * "def_functions". * Returns OK or FAIL. */ int compile_def_function( ufunc_T *ufunc, int check_return_type, compiletype_T compile_type, cctx_T *outer_cctx) { char_u *line = NULL; char_u *line_to_free = NULL; char_u *p; char *errormsg = NULL; // error message cctx_T cctx; garray_T *instr; int did_emsg_before = did_emsg; int did_emsg_silent_before = did_emsg_silent; int ret = FAIL; sctx_T save_current_sctx = current_sctx; int save_estack_compiling = estack_compiling; int save_cmod_flags = cmdmod.cmod_flags; int do_estack_push; int new_def_function = FALSE; #ifdef FEAT_PROFILE int prof_lnum = -1; #endif int debug_lnum = -1; // When using a function that was compiled before: Free old instructions. // The index is reused. Otherwise add a new entry in "def_functions". if (ufunc->uf_dfunc_idx > 0) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; isn_T *instr_dest = NULL; switch (compile_type) { case CT_PROFILE: #ifdef FEAT_PROFILE instr_dest = dfunc->df_instr_prof; break; #endif case CT_NONE: instr_dest = dfunc->df_instr; break; case CT_DEBUG: instr_dest = dfunc->df_instr_debug; break; } if (instr_dest != NULL) // Was compiled in this mode before: Free old instructions. delete_def_function_contents(dfunc, FALSE); ga_clear_strings(&dfunc->df_var_names); } else { if (add_def_function(ufunc) == FAIL) return FAIL; new_def_function = TRUE; } ufunc->uf_def_status = UF_COMPILING; CLEAR_FIELD(cctx); cctx.ctx_compile_type = compile_type; cctx.ctx_ufunc = ufunc; cctx.ctx_lnum = -1; cctx.ctx_outer = outer_cctx; ga_init2(&cctx.ctx_locals, sizeof(lvar_T), 10); ga_init2(&cctx.ctx_type_stack, sizeof(type_T *), 50); ga_init2(&cctx.ctx_imports, sizeof(imported_T), 10); cctx.ctx_type_list = &ufunc->uf_type_list; ga_init2(&cctx.ctx_instr, sizeof(isn_T), 50); instr = &cctx.ctx_instr; // Set the context to the function, it may be compiled when called from // another script. Set the script version to the most modern one. // The line number will be set in next_line_from_context(). current_sctx = ufunc->uf_script_ctx; current_sctx.sc_version = SCRIPT_VERSION_VIM9; // Don't use the flag from ":legacy" here. cmdmod.cmod_flags &= ~CMOD_LEGACY; // Make sure error messages are OK. do_estack_push = !estack_top_is_ufunc(ufunc, 1); if (do_estack_push) estack_push_ufunc(ufunc, 1); estack_compiling = TRUE; if (ufunc->uf_def_args.ga_len > 0) { int count = ufunc->uf_def_args.ga_len; int first_def_arg = ufunc->uf_args.ga_len - count; int i; char_u *arg; int off = STACK_FRAME_SIZE + (ufunc->uf_va_name != NULL ? 1 : 0); int did_set_arg_type = FALSE; // Produce instructions for the default values of optional arguments. SOURCING_LNUM = 0; // line number unknown for (i = 0; i < count; ++i) { garray_T *stack = &cctx.ctx_type_stack; type_T *val_type; int arg_idx = first_def_arg + i; where_T where = WHERE_INIT; int r; int jump_instr_idx = instr->ga_len; isn_T *isn; // Use a JUMP_IF_ARG_SET instruction to skip if the value was given. if (generate_JUMP_IF_ARG_SET(&cctx, i - count - off) == FAIL) goto erret; // Make sure later arguments are not found. ufunc->uf_args_visible = arg_idx; arg = ((char_u **)(ufunc->uf_def_args.ga_data))[i]; r = compile_expr0(&arg, &cctx); if (r == FAIL) goto erret; // If no type specified use the type of the default value. // Otherwise check that the default value type matches the // specified type. val_type = ((type_T **)stack->ga_data)[stack->ga_len - 1]; where.wt_index = arg_idx + 1; if (ufunc->uf_arg_types[arg_idx] == &t_unknown) { did_set_arg_type = TRUE; ufunc->uf_arg_types[arg_idx] = val_type; } else if (need_type_where(val_type, ufunc->uf_arg_types[arg_idx], -1, where, &cctx, FALSE, FALSE) == FAIL) goto erret; if (generate_STORE(&cctx, ISN_STORE, i - count - off, NULL) == FAIL) goto erret; // set instruction index in JUMP_IF_ARG_SET to here isn = ((isn_T *)instr->ga_data) + jump_instr_idx; isn->isn_arg.jumparg.jump_where = instr->ga_len; } if (did_set_arg_type) set_function_type(ufunc); } ufunc->uf_args_visible = ufunc->uf_args.ga_len; /* * Loop over all the lines of the function and generate instructions. */ for (;;) { exarg_T ea; int starts_with_colon = FALSE; char_u *cmd; cmdmod_T local_cmdmod; // Bail out on the first error to avoid a flood of errors and report // the right line number when inside try/catch. if (did_emsg_before != did_emsg) goto erret; if (line != NULL && *line == '|') // the line continues after a '|' ++line; else if (line != NULL && *skipwhite(line) != NUL && !(*line == '#' && (line == cctx.ctx_line_start || VIM_ISWHITE(line[-1])))) { semsg(_(e_trailing_arg), line); goto erret; } else if (line != NULL && vim9_bad_comment(skipwhite(line))) goto erret; else { line = next_line_from_context(&cctx, FALSE); if (cctx.ctx_lnum >= ufunc->uf_lines.ga_len) { // beyond the last line #ifdef FEAT_PROFILE if (cctx.ctx_skip != SKIP_YES) may_generate_prof_end(&cctx, prof_lnum); #endif break; } // Make a copy, splitting off nextcmd and removing trailing spaces // may change it. if (line != NULL) { line = vim_strsave(line); vim_free(line_to_free); line_to_free = line; } } CLEAR_FIELD(ea); ea.cmdlinep = &line; ea.cmd = skipwhite(line); if (*ea.cmd == '#') { // "#" starts a comment line = (char_u *)""; continue; } #ifdef FEAT_PROFILE if (cctx.ctx_compile_type == CT_PROFILE && cctx.ctx_lnum != prof_lnum && cctx.ctx_skip != SKIP_YES) { may_generate_prof_end(&cctx, prof_lnum); prof_lnum = cctx.ctx_lnum; generate_instr(&cctx, ISN_PROF_START); } #endif if (cctx.ctx_compile_type == CT_DEBUG && cctx.ctx_lnum != debug_lnum && cctx.ctx_skip != SKIP_YES) { debug_lnum = cctx.ctx_lnum; generate_instr_debug(&cctx); } cctx.ctx_prev_lnum = cctx.ctx_lnum + 1; // Some things can be recognized by the first character. switch (*ea.cmd) { case '}': { // "}" ends a block scope scopetype_T stype = cctx.ctx_scope == NULL ? NO_SCOPE : cctx.ctx_scope->se_type; if (stype == BLOCK_SCOPE) { compile_endblock(&cctx); line = ea.cmd; } else { emsg(_(e_using_rcurly_outside_if_block_scope)); goto erret; } if (line != NULL) line = skipwhite(ea.cmd + 1); continue; } case '{': // "{" starts a block scope // "{'a': 1}->func() is something else if (ends_excmd(*skipwhite(ea.cmd + 1))) { line = compile_block(ea.cmd, &cctx); continue; } break; } /* * COMMAND MODIFIERS */ cctx.ctx_has_cmdmod = FALSE; if (parse_command_modifiers(&ea, &errormsg, &local_cmdmod, FALSE) == FAIL) { if (errormsg != NULL) goto erret; // empty line or comment line = (char_u *)""; continue; } generate_cmdmods(&cctx, &local_cmdmod); undo_cmdmod(&local_cmdmod); // Check if there was a colon after the last command modifier or before // the current position. for (p = ea.cmd; p >= line; --p) { if (*p == ':') starts_with_colon = TRUE; if (p < ea.cmd && !VIM_ISWHITE(*p)) break; } // Skip ":call" to get to the function name, unless using :legacy p = ea.cmd; if (!(local_cmdmod.cmod_flags & CMOD_LEGACY)) { if (checkforcmd(&ea.cmd, "call", 3)) { if (*ea.cmd == '(') // not for "call()" ea.cmd = p; else ea.cmd = skipwhite(ea.cmd); } if (!starts_with_colon) { int assign; // Check for assignment after command modifiers. assign = may_compile_assignment(&ea, &line, &cctx); if (assign == OK) goto nextline; if (assign == FAIL) goto erret; } } /* * COMMAND after range * 'text'->func() should not be confused with 'a mark * "++nr" and "--nr" are eval commands * in "$ENV->func()" the "$" is not a range */ cmd = ea.cmd; if ((*cmd != '$' || starts_with_colon) && (starts_with_colon || !(*cmd == '\'' || (cmd[0] == cmd[1] && (*cmd == '+' || *cmd == '-'))))) { ea.cmd = skip_range(ea.cmd, TRUE, NULL); if (ea.cmd > cmd) { if (!starts_with_colon && !(local_cmdmod.cmod_flags & CMOD_LEGACY)) { semsg(_(e_colon_required_before_range_str), cmd); goto erret; } ea.addr_count = 1; if (ends_excmd2(line, ea.cmd)) { // A range without a command: jump to the line. generate_EXEC(&cctx, ISN_EXECRANGE, vim_strnsave(cmd, ea.cmd - cmd)); line = ea.cmd; goto nextline; } } } p = find_ex_command(&ea, NULL, starts_with_colon || (local_cmdmod.cmod_flags & CMOD_LEGACY) ? NULL : item_exists, &cctx); if (p == NULL) { if (cctx.ctx_skip != SKIP_YES) emsg(_(e_ambiguous_use_of_user_defined_command)); goto erret; } // When using ":legacy cmd" always use compile_exec(). if (local_cmdmod.cmod_flags & CMOD_LEGACY) { char_u *start = ea.cmd; switch (ea.cmdidx) { case CMD_if: case CMD_elseif: case CMD_else: case CMD_endif: case CMD_for: case CMD_endfor: case CMD_continue: case CMD_break: case CMD_while: case CMD_endwhile: case CMD_try: case CMD_catch: case CMD_finally: case CMD_endtry: semsg(_(e_cannot_use_legacy_with_command_str), ea.cmd); goto erret; default: break; } // ":legacy return expr" needs to be handled differently. if (checkforcmd(&start, "return", 4)) ea.cmdidx = CMD_return; else ea.cmdidx = CMD_legacy; } if (p == ea.cmd && ea.cmdidx != CMD_SIZE) { if (cctx.ctx_skip == SKIP_YES && ea.cmdidx != CMD_eval) { line += STRLEN(line); goto nextline; } else if (ea.cmdidx != CMD_eval) { // CMD_var cannot happen, compile_assignment() above would be // used. Most likely an assignment to a non-existing variable. semsg(_(e_command_not_recognized_str), ea.cmd); goto erret; } } if (cctx.ctx_had_return && ea.cmdidx != CMD_elseif && ea.cmdidx != CMD_else && ea.cmdidx != CMD_endif && ea.cmdidx != CMD_endfor && ea.cmdidx != CMD_endwhile && ea.cmdidx != CMD_catch && ea.cmdidx != CMD_finally && ea.cmdidx != CMD_endtry) { emsg(_(e_unreachable_code_after_return)); goto erret; } p = skipwhite(p); if (ea.cmdidx != CMD_SIZE && ea.cmdidx != CMD_write && ea.cmdidx != CMD_read) { if (ea.cmdidx >= 0) ea.argt = excmd_get_argt(ea.cmdidx); if ((ea.argt & EX_BANG) && *p == '!') { ea.forceit = TRUE; p = skipwhite(p + 1); } } switch (ea.cmdidx) { case CMD_def: case CMD_function: ea.arg = p; line = compile_nested_function(&ea, &cctx); break; case CMD_return: line = compile_return(p, check_return_type, local_cmdmod.cmod_flags & CMOD_LEGACY, &cctx); cctx.ctx_had_return = TRUE; break; case CMD_let: emsg(_(e_cannot_use_let_in_vim9_script)); break; case CMD_var: case CMD_final: case CMD_const: case CMD_increment: case CMD_decrement: line = compile_assignment(p, &ea, ea.cmdidx, &cctx); if (line == p) line = NULL; break; case CMD_unlet: case CMD_unlockvar: case CMD_lockvar: line = compile_unletlock(p, &ea, &cctx); break; case CMD_import: emsg(_(e_import_can_only_be_used_in_script)); line = NULL; break; case CMD_if: line = compile_if(p, &cctx); break; case CMD_elseif: line = compile_elseif(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_else: line = compile_else(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_endif: line = compile_endif(p, &cctx); break; case CMD_while: line = compile_while(p, &cctx); break; case CMD_endwhile: line = compile_endwhile(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_for: line = compile_for(p, &cctx); break; case CMD_endfor: line = compile_endfor(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_continue: line = compile_continue(p, &cctx); break; case CMD_break: line = compile_break(p, &cctx); break; case CMD_try: line = compile_try(p, &cctx); break; case CMD_catch: line = compile_catch(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_finally: line = compile_finally(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_endtry: line = compile_endtry(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_throw: line = compile_throw(p, &cctx); break; case CMD_eval: line = compile_eval(p, &cctx); break; case CMD_echo: case CMD_echon: case CMD_execute: case CMD_echomsg: case CMD_echoerr: case CMD_echoconsole: line = compile_mult_expr(p, ea.cmdidx, &cctx); break; case CMD_put: ea.cmd = cmd; line = compile_put(p, &ea, &cctx); break; case CMD_substitute: if (check_global_and_subst(ea.cmd, p) == FAIL) goto erret; if (cctx.ctx_skip == SKIP_YES) line = (char_u *)""; else { ea.arg = p; line = compile_substitute(line, &ea, &cctx); } break; case CMD_redir: ea.arg = p; line = compile_redir(line, &ea, &cctx); break; case CMD_cexpr: case CMD_lexpr: case CMD_caddexpr: case CMD_laddexpr: case CMD_cgetexpr: case CMD_lgetexpr: #ifdef FEAT_QUICKFIX ea.arg = p; line = compile_cexpr(line, &ea, &cctx); #else ex_ni(&ea); line = NULL; #endif break; case CMD_append: case CMD_change: case CMD_insert: case CMD_k: case CMD_t: case CMD_xit: not_in_vim9(&ea); goto erret; case CMD_SIZE: if (cctx.ctx_skip != SKIP_YES) { semsg(_(e_invalid_command_str), ea.cmd); goto erret; } // We don't check for a next command here. line = (char_u *)""; break; case CMD_lua: case CMD_mzscheme: case CMD_perl: case CMD_py3: case CMD_python3: case CMD_python: case CMD_pythonx: case CMD_ruby: case CMD_tcl: ea.arg = p; if (vim_strchr(line, '\n') == NULL) line = compile_exec(line, &ea, &cctx); else // heredoc lines have been concatenated with NL // characters in get_function_body() line = compile_script(line, &cctx); break; case CMD_global: if (check_global_and_subst(ea.cmd, p) == FAIL) goto erret; // FALLTHROUGH default: // Not recognized, execute with do_cmdline_cmd(). ea.arg = p; line = compile_exec(line, &ea, &cctx); break; } nextline: if (line == NULL) goto erret; line = skipwhite(line); // Undo any command modifiers. generate_undo_cmdmods(&cctx); if (cctx.ctx_type_stack.ga_len < 0) { iemsg("Type stack underflow"); goto erret; } } if (cctx.ctx_scope != NULL) { if (cctx.ctx_scope->se_type == IF_SCOPE) emsg(_(e_endif)); else if (cctx.ctx_scope->se_type == WHILE_SCOPE) emsg(_(e_endwhile)); else if (cctx.ctx_scope->se_type == FOR_SCOPE) emsg(_(e_endfor)); else emsg(_(e_missing_rcurly)); goto erret; } if (!cctx.ctx_had_return) { if (ufunc->uf_ret_type->tt_type == VAR_UNKNOWN) ufunc->uf_ret_type = &t_void; else if (ufunc->uf_ret_type->tt_type != VAR_VOID) { emsg(_(e_missing_return_statement)); goto erret; } // Return void if there is no return at the end. generate_instr(&cctx, ISN_RETURN_VOID); } // When compiled with ":silent!" and there was an error don't consider the // function compiled. if (emsg_silent == 0 || did_emsg_silent == did_emsg_silent_before) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; dfunc->df_deleted = FALSE; dfunc->df_script_seq = current_sctx.sc_seq; #ifdef FEAT_PROFILE if (cctx.ctx_compile_type == CT_PROFILE) { dfunc->df_instr_prof = instr->ga_data; dfunc->df_instr_prof_count = instr->ga_len; } else #endif if (cctx.ctx_compile_type == CT_DEBUG) { dfunc->df_instr_debug = instr->ga_data; dfunc->df_instr_debug_count = instr->ga_len; } else { dfunc->df_instr = instr->ga_data; dfunc->df_instr_count = instr->ga_len; } dfunc->df_varcount = dfunc->df_var_names.ga_len; dfunc->df_has_closure = cctx.ctx_has_closure; if (cctx.ctx_outer_used) ufunc->uf_flags |= FC_CLOSURE; ufunc->uf_def_status = UF_COMPILED; } ret = OK; erret: if (ufunc->uf_def_status == UF_COMPILING) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; // Compiling aborted, free the generated instructions. clear_instr_ga(instr); VIM_CLEAR(dfunc->df_name); ga_clear_strings(&dfunc->df_var_names); // If using the last entry in the table and it was added above, we // might as well remove it. if (!dfunc->df_deleted && new_def_function && ufunc->uf_dfunc_idx == def_functions.ga_len - 1) { --def_functions.ga_len; ufunc->uf_dfunc_idx = 0; } ufunc->uf_def_status = UF_COMPILE_ERROR; while (cctx.ctx_scope != NULL) drop_scope(&cctx); if (errormsg != NULL) emsg(errormsg); else if (did_emsg == did_emsg_before) emsg(_(e_compiling_def_function_failed)); } if (cctx.ctx_redir_lhs.lhs_name != NULL) { if (ret == OK) { emsg(_(e_missing_redir_end)); ret = FAIL; } vim_free(cctx.ctx_redir_lhs.lhs_name); vim_free(cctx.ctx_redir_lhs.lhs_whole); } current_sctx = save_current_sctx; estack_compiling = save_estack_compiling; cmdmod.cmod_flags = save_cmod_flags; if (do_estack_push) estack_pop(); vim_free(line_to_free); free_imported(&cctx); free_locals(&cctx); ga_clear(&cctx.ctx_type_stack); return ret; } void set_function_type(ufunc_T *ufunc) { int varargs = ufunc->uf_va_name != NULL; int argcount = ufunc->uf_args.ga_len; // Create a type for the function, with the return type and any // argument types. // A vararg is included in uf_args.ga_len but not in uf_arg_types. // The type is included in "tt_args". if (argcount > 0 || varargs) { if (ufunc->uf_type_list.ga_itemsize == 0) ga_init2(&ufunc->uf_type_list, sizeof(type_T *), 10); ufunc->uf_func_type = alloc_func_type(ufunc->uf_ret_type, argcount, &ufunc->uf_type_list); // Add argument types to the function type. if (func_type_add_arg_types(ufunc->uf_func_type, argcount + varargs, &ufunc->uf_type_list) == FAIL) return; ufunc->uf_func_type->tt_argcount = argcount + varargs; ufunc->uf_func_type->tt_min_argcount = argcount - ufunc->uf_def_args.ga_len; if (ufunc->uf_arg_types == NULL) { int i; // lambda does not have argument types. for (i = 0; i < argcount; ++i) ufunc->uf_func_type->tt_args[i] = &t_any; } else mch_memmove(ufunc->uf_func_type->tt_args, ufunc->uf_arg_types, sizeof(type_T *) * argcount); if (varargs) { ufunc->uf_func_type->tt_args[argcount] = ufunc->uf_va_type == NULL ? &t_list_any : ufunc->uf_va_type; ufunc->uf_func_type->tt_flags = TTFLAG_VARARGS; } } else // No arguments, can use a predefined type. ufunc->uf_func_type = get_func_type(ufunc->uf_ret_type, argcount, &ufunc->uf_type_list); } /* * Free all instructions for "dfunc" except df_name. */ static void delete_def_function_contents(dfunc_T *dfunc, int mark_deleted) { int idx; ga_clear(&dfunc->df_def_args_isn); ga_clear_strings(&dfunc->df_var_names); if (dfunc->df_instr != NULL) { for (idx = 0; idx < dfunc->df_instr_count; ++idx) delete_instr(dfunc->df_instr + idx); VIM_CLEAR(dfunc->df_instr); dfunc->df_instr = NULL; } if (dfunc->df_instr_debug != NULL) { for (idx = 0; idx < dfunc->df_instr_debug_count; ++idx) delete_instr(dfunc->df_instr_debug + idx); VIM_CLEAR(dfunc->df_instr_debug); dfunc->df_instr_debug = NULL; } #ifdef FEAT_PROFILE if (dfunc->df_instr_prof != NULL) { for (idx = 0; idx < dfunc->df_instr_prof_count; ++idx) delete_instr(dfunc->df_instr_prof + idx); VIM_CLEAR(dfunc->df_instr_prof); dfunc->df_instr_prof = NULL; } #endif if (mark_deleted) dfunc->df_deleted = TRUE; if (dfunc->df_ufunc != NULL) dfunc->df_ufunc->uf_def_status = UF_NOT_COMPILED; } /* * When a user function is deleted, clear the contents of any associated def * function, unless another user function still uses it. * The position in def_functions can be re-used. */ void unlink_def_function(ufunc_T *ufunc) { if (ufunc->uf_dfunc_idx > 0) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; if (--dfunc->df_refcount <= 0) delete_def_function_contents(dfunc, TRUE); ufunc->uf_def_status = UF_NOT_COMPILED; ufunc->uf_dfunc_idx = 0; if (dfunc->df_ufunc == ufunc) dfunc->df_ufunc = NULL; } } /* * Used when a user function refers to an existing dfunc. */ void link_def_function(ufunc_T *ufunc) { if (ufunc->uf_dfunc_idx > 0) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; ++dfunc->df_refcount; } } #if defined(EXITFREE) || defined(PROTO) /* * Free all functions defined with ":def". */ void free_def_functions(void) { int idx; for (idx = 0; idx < def_functions.ga_len; ++idx) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + idx; delete_def_function_contents(dfunc, TRUE); vim_free(dfunc->df_name); } ga_clear(&def_functions); } #endif #endif // FEAT_EVAL
/* vi:set ts=8 sts=4 sw=4 noet: * * VIM - Vi IMproved by Bram Moolenaar * * Do ":help uganda" in Vim to read copying and usage conditions. * Do ":help credits" in Vim to see a list of people who contributed. * See README.txt for an overview of the Vim source code. */ /* * vim9compile.c: compiling a :def function */ #define USING_FLOAT_STUFF #include "vim.h" #if defined(FEAT_EVAL) || defined(PROTO) // When not generating protos this is included in proto.h #ifdef PROTO # include "vim9.h" #endif // Functions defined with :def are stored in this growarray. // They are never removed, so that they can be found by index. // Deleted functions have the df_deleted flag set. garray_T def_functions = {0, 0, sizeof(dfunc_T), 50, NULL}; static void delete_def_function_contents(dfunc_T *dfunc, int mark_deleted); /* * Lookup variable "name" in the local scope and return it in "lvar". * "lvar->lv_from_outer" is incremented accordingly. * If "lvar" is NULL only check if the variable can be found. * Return FAIL if not found. */ int lookup_local(char_u *name, size_t len, lvar_T *lvar, cctx_T *cctx) { int idx; lvar_T *lvp; if (len == 0) return FAIL; // Find local in current function scope. for (idx = 0; idx < cctx->ctx_locals.ga_len; ++idx) { lvp = ((lvar_T *)cctx->ctx_locals.ga_data) + idx; if (STRNCMP(name, lvp->lv_name, len) == 0 && STRLEN(lvp->lv_name) == len) { if (lvar != NULL) { *lvar = *lvp; lvar->lv_from_outer = 0; } return OK; } } // Find local in outer function scope. if (cctx->ctx_outer != NULL) { if (lookup_local(name, len, lvar, cctx->ctx_outer) == OK) { if (lvar != NULL) { cctx->ctx_outer_used = TRUE; ++lvar->lv_from_outer; } return OK; } } return FAIL; } /* * Lookup an argument in the current function and an enclosing function. * Returns the argument index in "idxp" * Returns the argument type in "type" * Sets "gen_load_outer" to TRUE if found in outer scope. * Returns OK when found, FAIL otherwise. */ int arg_exists( char_u *name, size_t len, int *idxp, type_T **type, int *gen_load_outer, cctx_T *cctx) { int idx; char_u *va_name; if (len == 0) return FAIL; for (idx = 0; idx < cctx->ctx_ufunc->uf_args_visible; ++idx) { char_u *arg = FUNCARG(cctx->ctx_ufunc, idx); if (STRNCMP(name, arg, len) == 0 && arg[len] == NUL) { if (idxp != NULL) { // Arguments are located above the frame pointer. One further // if there is a vararg argument *idxp = idx - (cctx->ctx_ufunc->uf_args.ga_len + STACK_FRAME_SIZE) + (cctx->ctx_ufunc->uf_va_name != NULL ? -1 : 0); if (cctx->ctx_ufunc->uf_arg_types != NULL) *type = cctx->ctx_ufunc->uf_arg_types[idx]; else *type = &t_any; } return OK; } } va_name = cctx->ctx_ufunc->uf_va_name; if (va_name != NULL && STRNCMP(name, va_name, len) == 0 && va_name[len] == NUL) { if (idxp != NULL) { // varargs is always the last argument *idxp = -STACK_FRAME_SIZE - 1; *type = cctx->ctx_ufunc->uf_va_type; } return OK; } if (cctx->ctx_outer != NULL) { // Lookup the name for an argument of the outer function. if (arg_exists(name, len, idxp, type, gen_load_outer, cctx->ctx_outer) == OK) { if (gen_load_outer != NULL) ++*gen_load_outer; return OK; } } return FAIL; } /* * Lookup a script-local variable in the current script, possibly defined in a * block that contains the function "cctx->ctx_ufunc". * "cctx" is NULL at the script level. * If "len" is <= 0 "name" must be NUL terminated. * Return NULL when not found. */ static sallvar_T * find_script_var(char_u *name, size_t len, cctx_T *cctx) { scriptitem_T *si = SCRIPT_ITEM(current_sctx.sc_sid); hashitem_T *hi; int cc; sallvar_T *sav; sallvar_T *found_sav; ufunc_T *ufunc; // Find the list of all script variables with the right name. if (len > 0) { cc = name[len]; name[len] = NUL; } hi = hash_find(&si->sn_all_vars.dv_hashtab, name); if (len > 0) name[len] = cc; if (HASHITEM_EMPTY(hi)) return NULL; sav = HI2SAV(hi); if (sav->sav_block_id == 0) // variable defined in the top script scope is always visible return sav; if (cctx == NULL) { // Not in a function scope, find variable with block id equal to or // smaller than the current block id. while (sav != NULL) { if (sav->sav_block_id <= si->sn_current_block_id) break; sav = sav->sav_next; } return sav; } // Go over the variables with this name and find one that was visible // from the function. ufunc = cctx->ctx_ufunc; found_sav = sav; while (sav != NULL) { int idx; // Go over the blocks that this function was defined in. If the // variable block ID matches it was visible to the function. for (idx = 0; idx < ufunc->uf_block_depth; ++idx) if (ufunc->uf_block_ids[idx] == sav->sav_block_id) return sav; sav = sav->sav_next; } // Not found, assume variable at script level was visible. return found_sav; } /* * Return TRUE if the script context is Vim9 script. */ int script_is_vim9() { return SCRIPT_ITEM(current_sctx.sc_sid)->sn_version == SCRIPT_VERSION_VIM9; } /* * Lookup a variable (without s: prefix) in the current script. * "cctx" is NULL at the script level. * Returns OK or FAIL. */ int script_var_exists(char_u *name, size_t len, cctx_T *cctx) { if (current_sctx.sc_sid <= 0) return FAIL; if (script_is_vim9()) { // Check script variables that were visible where the function was // defined. if (find_script_var(name, len, cctx) != NULL) return OK; } else { hashtab_T *ht = &SCRIPT_VARS(current_sctx.sc_sid); dictitem_T *di; int cc; // Check script variables that are currently visible cc = name[len]; name[len] = NUL; di = find_var_in_ht(ht, 0, name, TRUE); name[len] = cc; if (di != NULL) return OK; } return FAIL; } /* * Return TRUE if "name" is a local variable, argument, script variable or * imported. */ static int variable_exists(char_u *name, size_t len, cctx_T *cctx) { return (cctx != NULL && (lookup_local(name, len, NULL, cctx) == OK || arg_exists(name, len, NULL, NULL, NULL, cctx) == OK)) || script_var_exists(name, len, cctx) == OK || find_imported(name, len, cctx) != NULL; } /* * Return TRUE if "name" is a local variable, argument, script variable, * imported or function. */ static int item_exists(char_u *name, size_t len, int cmd UNUSED, cctx_T *cctx) { int is_global; char_u *p; if (variable_exists(name, len, cctx)) return TRUE; // This is similar to what is in lookup_scriptitem(): // Find a function, so that a following "->" works. // Require "(" or "->" to follow, "Cmd" is a user command while "Cmd()" is // a function call. p = skipwhite(name + len); if (name[len] == '(' || (p[0] == '-' && p[1] == '>')) { // Do not check for an internal function, since it might also be a // valid command, such as ":split" versus "split()". // Skip "g:" before a function name. is_global = (name[0] == 'g' && name[1] == ':'); return find_func(is_global ? name + 2 : name, is_global, cctx) != NULL; } return FALSE; } /* * Check if "p[len]" is already defined, either in script "import_sid" or in * compilation context "cctx". "cctx" is NULL at the script level. * Does not check the global namespace. * If "is_arg" is TRUE the error message is for an argument name. * Return FAIL and give an error if it defined. */ int check_defined(char_u *p, size_t len, cctx_T *cctx, int is_arg) { int c = p[len]; ufunc_T *ufunc = NULL; // underscore argument is OK if (len == 1 && *p == '_') return OK; if (script_var_exists(p, len, cctx) == OK) { if (is_arg) semsg(_(e_argument_already_declared_in_script_str), p); else semsg(_(e_variable_already_declared_in_script_str), p); return FAIL; } p[len] = NUL; if ((cctx != NULL && (lookup_local(p, len, NULL, cctx) == OK || arg_exists(p, len, NULL, NULL, NULL, cctx) == OK)) || find_imported(p, len, cctx) != NULL || (ufunc = find_func_even_dead(p, FALSE, cctx)) != NULL) { // A local or script-local function can shadow a global function. if (ufunc == NULL || ((ufunc->uf_flags & FC_DEAD) == 0 && (!func_is_global(ufunc) || (p[0] == 'g' && p[1] == ':')))) { if (is_arg) semsg(_(e_argument_name_shadows_existing_variable_str), p); else semsg(_(e_name_already_defined_str), p); p[len] = c; return FAIL; } } p[len] = c; return OK; } /* * Return TRUE if "actual" could be "expected" and a runtime typecheck is to be * used. Return FALSE if the types will never match. */ static int use_typecheck(type_T *actual, type_T *expected) { if (actual->tt_type == VAR_ANY || actual->tt_type == VAR_UNKNOWN || (actual->tt_type == VAR_FUNC && (expected->tt_type == VAR_FUNC || expected->tt_type == VAR_PARTIAL) && (actual->tt_member == &t_any || actual->tt_member == &t_unknown || actual->tt_argcount < 0) && (actual->tt_member == &t_unknown || (actual->tt_member == &t_void) == (expected->tt_member == &t_void)))) return TRUE; if ((actual->tt_type == VAR_LIST || actual->tt_type == VAR_DICT) && actual->tt_type == expected->tt_type) // This takes care of a nested list or dict. return use_typecheck(actual->tt_member, expected->tt_member); return FALSE; } /* * Check that * - "actual" matches "expected" type or * - "actual" is a type that can be "expected" type: add a runtime check; or * - return FAIL. * If "actual_is_const" is TRUE then the type won't change at runtime, do not * generate a TYPECHECK. */ static int need_type_where( type_T *actual, type_T *expected, int offset, where_T where, cctx_T *cctx, int silent, int actual_is_const) { int ret; if (expected == &t_bool && actual != &t_bool && (actual->tt_flags & TTFLAG_BOOL_OK)) { // Using "0", "1" or the result of an expression with "&&" or "||" as a // boolean is OK but requires a conversion. generate_2BOOL(cctx, FALSE, offset); return OK; } ret = check_type_maybe(expected, actual, FALSE, where); if (ret == OK) return OK; // If the actual type can be the expected type add a runtime check. // If it's a constant a runtime check makes no sense. if (!actual_is_const && ret == MAYBE && use_typecheck(actual, expected)) { generate_TYPECHECK(cctx, expected, offset, where.wt_index); return OK; } if (!silent) type_mismatch_where(expected, actual, where); return FAIL; } int need_type( type_T *actual, type_T *expected, int offset, int arg_idx, cctx_T *cctx, int silent, int actual_is_const) { where_T where = WHERE_INIT; where.wt_index = arg_idx; return need_type_where(actual, expected, offset, where, cctx, silent, actual_is_const); } /* * Reserve space for a local variable. * Return the variable or NULL if it failed. */ lvar_T * reserve_local( cctx_T *cctx, char_u *name, size_t len, int isConst, type_T *type) { lvar_T *lvar; dfunc_T *dfunc; if (arg_exists(name, len, NULL, NULL, NULL, cctx) == OK) { emsg_namelen(_(e_str_is_used_as_argument), name, (int)len); return NULL; } if (GA_GROW_FAILS(&cctx->ctx_locals, 1)) return NULL; lvar = ((lvar_T *)cctx->ctx_locals.ga_data) + cctx->ctx_locals.ga_len++; CLEAR_POINTER(lvar); // Every local variable uses the next entry on the stack. We could re-use // the last ones when leaving a scope, but then variables used in a closure // might get overwritten. To keep things simple do not re-use stack // entries. This is less efficient, but memory is cheap these days. dfunc = ((dfunc_T *)def_functions.ga_data) + cctx->ctx_ufunc->uf_dfunc_idx; lvar->lv_idx = dfunc->df_var_names.ga_len; lvar->lv_name = vim_strnsave(name, len == 0 ? STRLEN(name) : len); lvar->lv_const = isConst; lvar->lv_type = type; // Remember the name for debugging. if (GA_GROW_FAILS(&dfunc->df_var_names, 1)) return NULL; ((char_u **)dfunc->df_var_names.ga_data)[lvar->lv_idx] = vim_strsave(lvar->lv_name); ++dfunc->df_var_names.ga_len; return lvar; } /* * If "check_writable" is ASSIGN_CONST give an error if the variable was * defined with :final or :const, if "check_writable" is ASSIGN_FINAL give an * error if the variable was defined with :const. */ static int check_item_writable(svar_T *sv, int check_writable, char_u *name) { if ((check_writable == ASSIGN_CONST && sv->sv_const != 0) || (check_writable == ASSIGN_FINAL && sv->sv_const == ASSIGN_CONST)) { semsg(_(e_cannot_change_readonly_variable_str), name); return FAIL; } return OK; } /* * Find "name" in script-local items of script "sid". * Pass "check_writable" to check_item_writable(). * Returns the index in "sn_var_vals" if found. * If found but not in "sn_var_vals" returns -1. * If not found or the variable is not writable returns -2. */ int get_script_item_idx(int sid, char_u *name, int check_writable, cctx_T *cctx) { hashtab_T *ht; dictitem_T *di; scriptitem_T *si = SCRIPT_ITEM(sid); svar_T *sv; int idx; if (!SCRIPT_ID_VALID(sid)) return -1; if (sid == current_sctx.sc_sid) { sallvar_T *sav = find_script_var(name, 0, cctx); if (sav == NULL) return -2; idx = sav->sav_var_vals_idx; sv = ((svar_T *)si->sn_var_vals.ga_data) + idx; if (check_item_writable(sv, check_writable, name) == FAIL) return -2; return idx; } // First look the name up in the hashtable. ht = &SCRIPT_VARS(sid); di = find_var_in_ht(ht, 0, name, TRUE); if (di == NULL) return -2; // Now find the svar_T index in sn_var_vals. for (idx = 0; idx < si->sn_var_vals.ga_len; ++idx) { sv = ((svar_T *)si->sn_var_vals.ga_data) + idx; if (sv->sv_tv == &di->di_tv) { if (check_item_writable(sv, check_writable, name) == FAIL) return -2; return idx; } } return -1; } /* * Find "name" in imported items of the current script or in "cctx" if not * NULL. */ imported_T * find_imported(char_u *name, size_t len, cctx_T *cctx) { int idx; if (!SCRIPT_ID_VALID(current_sctx.sc_sid)) return NULL; if (cctx != NULL) for (idx = 0; idx < cctx->ctx_imports.ga_len; ++idx) { imported_T *import = ((imported_T *)cctx->ctx_imports.ga_data) + idx; if (len == 0 ? STRCMP(name, import->imp_name) == 0 : STRLEN(import->imp_name) == len && STRNCMP(name, import->imp_name, len) == 0) return import; } return find_imported_in_script(name, len, current_sctx.sc_sid); } imported_T * find_imported_in_script(char_u *name, size_t len, int sid) { scriptitem_T *si; int idx; if (!SCRIPT_ID_VALID(sid)) return NULL; si = SCRIPT_ITEM(sid); for (idx = 0; idx < si->sn_imports.ga_len; ++idx) { imported_T *import = ((imported_T *)si->sn_imports.ga_data) + idx; if (len == 0 ? STRCMP(name, import->imp_name) == 0 : STRLEN(import->imp_name) == len && STRNCMP(name, import->imp_name, len) == 0) return import; } return NULL; } /* * Free all imported variables. */ static void free_imported(cctx_T *cctx) { int idx; for (idx = 0; idx < cctx->ctx_imports.ga_len; ++idx) { imported_T *import = ((imported_T *)cctx->ctx_imports.ga_data) + idx; vim_free(import->imp_name); } ga_clear(&cctx->ctx_imports); } /* * Called when checking for a following operator at "arg". When the rest of * the line is empty or only a comment, peek the next line. If there is a next * line return a pointer to it and set "nextp". * Otherwise skip over white space. */ char_u * may_peek_next_line(cctx_T *cctx, char_u *arg, char_u **nextp) { char_u *p = skipwhite(arg); *nextp = NULL; if (*p == NUL || (VIM_ISWHITE(*arg) && vim9_comment_start(p))) { *nextp = peek_next_line_from_context(cctx); if (*nextp != NULL) return *nextp; } return p; } /* * Return a pointer to the next line that isn't empty or only contains a * comment. Skips over white space. * Returns NULL if there is none. */ char_u * peek_next_line_from_context(cctx_T *cctx) { int lnum = cctx->ctx_lnum; while (++lnum < cctx->ctx_ufunc->uf_lines.ga_len) { char_u *line = ((char_u **)cctx->ctx_ufunc->uf_lines.ga_data)[lnum]; char_u *p; // ignore NULLs inserted for continuation lines if (line != NULL) { p = skipwhite(line); if (vim9_bad_comment(p)) return NULL; if (*p != NUL && !vim9_comment_start(p)) return p; } } return NULL; } /* * Get the next line of the function from "cctx". * Skips over empty lines. Skips over comment lines if "skip_comment" is TRUE. * Returns NULL when at the end. */ char_u * next_line_from_context(cctx_T *cctx, int skip_comment) { char_u *line; do { ++cctx->ctx_lnum; if (cctx->ctx_lnum >= cctx->ctx_ufunc->uf_lines.ga_len) { line = NULL; break; } line = ((char_u **)cctx->ctx_ufunc->uf_lines.ga_data)[cctx->ctx_lnum]; cctx->ctx_line_start = line; SOURCING_LNUM = cctx->ctx_lnum + 1; } while (line == NULL || *skipwhite(line) == NUL || (skip_comment && vim9_comment_start(skipwhite(line)))); return line; } /* * Skip over white space at "whitep" and assign to "*arg". * If "*arg" is at the end of the line, advance to the next line. * Also when "whitep" points to white space and "*arg" is on a "#". * Return FAIL if beyond the last line, "*arg" is unmodified then. */ int may_get_next_line(char_u *whitep, char_u **arg, cctx_T *cctx) { *arg = skipwhite(whitep); if (vim9_bad_comment(*arg)) return FAIL; if (**arg == NUL || (VIM_ISWHITE(*whitep) && vim9_comment_start(*arg))) { char_u *next = next_line_from_context(cctx, TRUE); if (next == NULL) return FAIL; *arg = skipwhite(next); } return OK; } /* * Idem, and give an error when failed. */ int may_get_next_line_error(char_u *whitep, char_u **arg, cctx_T *cctx) { if (may_get_next_line(whitep, arg, cctx) == FAIL) { SOURCING_LNUM = cctx->ctx_lnum + 1; emsg(_(e_line_incomplete)); return FAIL; } return OK; } /* * Get a line from the compilation context, compatible with exarg_T getline(). * Return a pointer to the line in allocated memory. * Return NULL for end-of-file or some error. */ static char_u * exarg_getline( int c UNUSED, void *cookie, int indent UNUSED, getline_opt_T options UNUSED) { cctx_T *cctx = (cctx_T *)cookie; char_u *p; for (;;) { if (cctx->ctx_lnum >= cctx->ctx_ufunc->uf_lines.ga_len - 1) return NULL; ++cctx->ctx_lnum; p = ((char_u **)cctx->ctx_ufunc->uf_lines.ga_data)[cctx->ctx_lnum]; // Comment lines result in NULL pointers, skip them. if (p != NULL) return vim_strsave(p); } } void fill_exarg_from_cctx(exarg_T *eap, cctx_T *cctx) { eap->getline = exarg_getline; eap->cookie = cctx; } /* * Return TRUE if "ufunc" should be compiled, taking into account whether * "profile" indicates profiling is to be done. */ int func_needs_compiling(ufunc_T *ufunc, compiletype_T compile_type) { switch (ufunc->uf_def_status) { case UF_TO_BE_COMPILED: return TRUE; case UF_COMPILED: { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; switch (compile_type) { case CT_PROFILE: #ifdef FEAT_PROFILE return dfunc->df_instr_prof == NULL; #endif case CT_NONE: return dfunc->df_instr == NULL; case CT_DEBUG: return dfunc->df_instr_debug == NULL; } } case UF_NOT_COMPILED: case UF_COMPILE_ERROR: case UF_COMPILING: break; } return FALSE; } /* * Compile a nested :def command. */ static char_u * compile_nested_function(exarg_T *eap, cctx_T *cctx, char_u **line_to_free) { int is_global = *eap->arg == 'g' && eap->arg[1] == ':'; char_u *name_start = eap->arg; char_u *name_end = to_name_end(eap->arg, TRUE); int off; char_u *func_name; char_u *lambda_name; ufunc_T *ufunc; int r = FAIL; compiletype_T compile_type; if (eap->forceit) { emsg(_(e_cannot_use_bang_with_nested_def)); return NULL; } if (*name_start == '/') { name_end = skip_regexp(name_start + 1, '/', TRUE); if (*name_end == '/') ++name_end; set_nextcmd(eap, name_end); } if (name_end == name_start || *skipwhite(name_end) != '(') { if (!ends_excmd2(name_start, name_end)) { semsg(_(e_invalid_command_str), eap->cmd); return NULL; } // "def" or "def Name": list functions if (generate_DEF(cctx, name_start, name_end - name_start) == FAIL) return NULL; return eap->nextcmd == NULL ? (char_u *)"" : eap->nextcmd; } // Only g:Func() can use a namespace. if (name_start[1] == ':' && !is_global) { semsg(_(e_namespace_not_supported_str), name_start); return NULL; } if (check_defined(name_start, name_end - name_start, cctx, FALSE) == FAIL) return NULL; eap->arg = name_end; fill_exarg_from_cctx(eap, cctx); eap->forceit = FALSE; // We use the special <Lamba>99 name, but it's not really a lambda. lambda_name = vim_strsave(get_lambda_name()); if (lambda_name == NULL) return NULL; // This may free the current line, make a copy of the name. off = is_global ? 2 : 0; func_name = vim_strnsave(name_start + off, name_end - name_start - off); if (func_name == NULL) { r = FAIL; goto theend; } ufunc = define_function(eap, lambda_name, line_to_free); if (ufunc == NULL) { r = eap->skip ? OK : FAIL; goto theend; } // copy over the block scope IDs before compiling if (!is_global && cctx->ctx_ufunc->uf_block_depth > 0) { int block_depth = cctx->ctx_ufunc->uf_block_depth; ufunc->uf_block_ids = ALLOC_MULT(int, block_depth); if (ufunc->uf_block_ids != NULL) { mch_memmove(ufunc->uf_block_ids, cctx->ctx_ufunc->uf_block_ids, sizeof(int) * block_depth); ufunc->uf_block_depth = block_depth; } } compile_type = COMPILE_TYPE(ufunc); #ifdef FEAT_PROFILE // If the outer function is profiled, also compile the nested function for // profiling. if (cctx->ctx_compile_type == CT_PROFILE) compile_type = CT_PROFILE; #endif if (func_needs_compiling(ufunc, compile_type) && compile_def_function(ufunc, TRUE, compile_type, cctx) == FAIL) { func_ptr_unref(ufunc); goto theend; } #ifdef FEAT_PROFILE // When the outer function is compiled for profiling, the nested function // may be called without profiling. Compile it here in the right context. if (compile_type == CT_PROFILE && func_needs_compiling(ufunc, CT_NONE)) compile_def_function(ufunc, FALSE, CT_NONE, cctx); #endif if (is_global) { r = generate_NEWFUNC(cctx, lambda_name, func_name); func_name = NULL; lambda_name = NULL; } else { // Define a local variable for the function reference. lvar_T *lvar = reserve_local(cctx, func_name, name_end - name_start, TRUE, ufunc->uf_func_type); if (lvar == NULL) goto theend; if (generate_FUNCREF(cctx, ufunc) == FAIL) goto theend; r = generate_STORE(cctx, ISN_STORE, lvar->lv_idx, NULL); } theend: vim_free(lambda_name); vim_free(func_name); return r == FAIL ? NULL : (char_u *)""; } /* * Return the length of an assignment operator, or zero if there isn't one. */ int assignment_len(char_u *p, int *heredoc) { if (*p == '=') { if (p[1] == '<' && p[2] == '<') { *heredoc = TRUE; return 3; } return 1; } if (vim_strchr((char_u *)"+-*/%", *p) != NULL && p[1] == '=') return 2; if (STRNCMP(p, "..=", 3) == 0) return 3; return 0; } /* * Generate the load instruction for "name". */ static void generate_loadvar( cctx_T *cctx, assign_dest_T dest, char_u *name, lvar_T *lvar, type_T *type) { switch (dest) { case dest_option: case dest_func_option: generate_LOAD(cctx, ISN_LOADOPT, 0, name, type); break; case dest_global: if (vim_strchr(name, AUTOLOAD_CHAR) == NULL) generate_LOAD(cctx, ISN_LOADG, 0, name + 2, type); else generate_LOAD(cctx, ISN_LOADAUTO, 0, name, type); break; case dest_buffer: generate_LOAD(cctx, ISN_LOADB, 0, name + 2, type); break; case dest_window: generate_LOAD(cctx, ISN_LOADW, 0, name + 2, type); break; case dest_tab: generate_LOAD(cctx, ISN_LOADT, 0, name + 2, type); break; case dest_script: compile_load_scriptvar(cctx, name + (name[1] == ':' ? 2 : 0), NULL, NULL, TRUE); break; case dest_env: // Include $ in the name here generate_LOAD(cctx, ISN_LOADENV, 0, name, type); break; case dest_reg: generate_LOAD(cctx, ISN_LOADREG, name[1], NULL, &t_string); break; case dest_vimvar: generate_LOADV(cctx, name + 2, TRUE); break; case dest_local: if (lvar->lv_from_outer > 0) generate_LOADOUTER(cctx, lvar->lv_idx, lvar->lv_from_outer, type); else generate_LOAD(cctx, ISN_LOAD, lvar->lv_idx, NULL, type); break; case dest_expr: // list or dict value should already be on the stack. break; } } /* * Skip over "[expr]" or ".member". * Does not check for any errors. */ static char_u * skip_index(char_u *start) { char_u *p = start; if (*p == '[') { p = skipwhite(p + 1); (void)skip_expr(&p, NULL); p = skipwhite(p); if (*p == ']') return p + 1; return p; } // if (*p == '.') return to_name_end(p + 1, TRUE); } void vim9_declare_error(char_u *name) { char *scope = ""; switch (*name) { case 'g': scope = _("global"); break; case 'b': scope = _("buffer"); break; case 'w': scope = _("window"); break; case 't': scope = _("tab"); break; case 'v': scope = "v:"; break; case '$': semsg(_(e_cannot_declare_an_environment_variable), name); return; case '&': semsg(_(e_cannot_declare_an_option), name); return; case '@': semsg(_(e_cannot_declare_a_register_str), name); return; default: return; } semsg(_(e_cannot_declare_a_scope_variable), scope, name); } /* * For one assignment figure out the type of destination. Return it in "dest". * When not recognized "dest" is not set. * For an option "option_scope" is set. * For a v:var "vimvaridx" is set. * "type" is set to the destination type if known, unchanted otherwise. * Return FAIL if an error message was given. */ int get_var_dest( char_u *name, assign_dest_T *dest, int cmdidx, int *option_scope, int *vimvaridx, type_T **type, cctx_T *cctx) { char_u *p; if (*name == '&') { int cc; long numval; getoption_T opt_type; int opt_p_flags; *dest = dest_option; if (cmdidx == CMD_final || cmdidx == CMD_const) { emsg(_(e_const_option)); return FAIL; } p = name; p = find_option_end(&p, option_scope); if (p == NULL) { // cannot happen? emsg(_(e_unexpected_characters_in_assignment)); return FAIL; } cc = *p; *p = NUL; opt_type = get_option_value(skip_option_env_lead(name), &numval, NULL, &opt_p_flags, *option_scope); *p = cc; switch (opt_type) { case gov_unknown: semsg(_(e_unknown_option_str), name); return FAIL; case gov_string: case gov_hidden_string: if (opt_p_flags & P_FUNC) { // might be a Funcref, check the type later *type = &t_any; *dest = dest_func_option; } else { *type = &t_string; } break; case gov_bool: case gov_hidden_bool: *type = &t_bool; break; case gov_number: case gov_hidden_number: *type = &t_number; break; } } else if (*name == '$') { *dest = dest_env; *type = &t_string; } else if (*name == '@') { if (name[1] != '@' && (!valid_yank_reg(name[1], FALSE) || name[1] == '.')) { emsg_invreg(name[1]); return FAIL; } *dest = dest_reg; *type = name[1] == '#' ? &t_number_or_string : &t_string; } else if (STRNCMP(name, "g:", 2) == 0) { *dest = dest_global; } else if (STRNCMP(name, "b:", 2) == 0) { *dest = dest_buffer; } else if (STRNCMP(name, "w:", 2) == 0) { *dest = dest_window; } else if (STRNCMP(name, "t:", 2) == 0) { *dest = dest_tab; } else if (STRNCMP(name, "v:", 2) == 0) { typval_T *vtv; int di_flags; *vimvaridx = find_vim_var(name + 2, &di_flags); if (*vimvaridx < 0) { semsg(_(e_variable_not_found_str), name); return FAIL; } // We use the current value of "sandbox" here, is that OK? if (var_check_ro(di_flags, name, FALSE)) return FAIL; *dest = dest_vimvar; vtv = get_vim_var_tv(*vimvaridx); *type = typval2type_vimvar(vtv, cctx->ctx_type_list); } return OK; } static int is_decl_command(int cmdidx) { return cmdidx == CMD_let || cmdidx == CMD_var || cmdidx == CMD_final || cmdidx == CMD_const; } /* * Figure out the LHS type and other properties for an assignment or one item * of ":unlet" with an index. * Returns OK or FAIL. */ int compile_lhs( char_u *var_start, lhs_T *lhs, int cmdidx, int heredoc, int oplen, cctx_T *cctx) { char_u *var_end; int is_decl = is_decl_command(cmdidx); CLEAR_POINTER(lhs); lhs->lhs_dest = dest_local; lhs->lhs_vimvaridx = -1; lhs->lhs_scriptvar_idx = -1; // "dest_end" is the end of the destination, including "[expr]" or // ".name". // "var_end" is the end of the variable/option/etc. name. lhs->lhs_dest_end = skip_var_one(var_start, FALSE); if (*var_start == '@') var_end = var_start + 2; else { // skip over the leading "&", "&l:", "&g:" and "$" var_end = skip_option_env_lead(var_start); var_end = to_name_end(var_end, TRUE); } // "a: type" is declaring variable "a" with a type, not dict "a:". if (is_decl && lhs->lhs_dest_end == var_start + 2 && lhs->lhs_dest_end[-1] == ':') --lhs->lhs_dest_end; if (is_decl && var_end == var_start + 2 && var_end[-1] == ':') --var_end; lhs->lhs_end = lhs->lhs_dest_end; // compute the length of the destination without "[expr]" or ".name" lhs->lhs_varlen = var_end - var_start; lhs->lhs_varlen_total = lhs->lhs_varlen; lhs->lhs_name = vim_strnsave(var_start, lhs->lhs_varlen); if (lhs->lhs_name == NULL) return FAIL; if (lhs->lhs_dest_end > var_start + lhs->lhs_varlen) // Something follows after the variable: "var[idx]" or "var.key". lhs->lhs_has_index = TRUE; if (heredoc) lhs->lhs_type = &t_list_string; else lhs->lhs_type = &t_any; if (cctx->ctx_skip != SKIP_YES) { int declare_error = FALSE; if (get_var_dest(lhs->lhs_name, &lhs->lhs_dest, cmdidx, &lhs->lhs_opt_flags, &lhs->lhs_vimvaridx, &lhs->lhs_type, cctx) == FAIL) return FAIL; if (lhs->lhs_dest != dest_local && cmdidx != CMD_const && cmdidx != CMD_final) { // Specific kind of variable recognized. declare_error = is_decl; } else { // No specific kind of variable recognized, just a name. if (check_reserved_name(lhs->lhs_name) == FAIL) return FAIL; if (lookup_local(var_start, lhs->lhs_varlen, &lhs->lhs_local_lvar, cctx) == OK) lhs->lhs_lvar = &lhs->lhs_local_lvar; else { CLEAR_FIELD(lhs->lhs_arg_lvar); if (arg_exists(var_start, lhs->lhs_varlen, &lhs->lhs_arg_lvar.lv_idx, &lhs->lhs_arg_lvar.lv_type, &lhs->lhs_arg_lvar.lv_from_outer, cctx) == OK) { if (is_decl) { semsg(_(e_str_is_used_as_argument), lhs->lhs_name); return FAIL; } lhs->lhs_lvar = &lhs->lhs_arg_lvar; } } if (lhs->lhs_lvar != NULL) { if (is_decl) { semsg(_(e_variable_already_declared), lhs->lhs_name); return FAIL; } } else { int script_namespace = lhs->lhs_varlen > 1 && STRNCMP(var_start, "s:", 2) == 0; int script_var = (script_namespace ? script_var_exists(var_start + 2, lhs->lhs_varlen - 2, cctx) : script_var_exists(var_start, lhs->lhs_varlen, cctx)) == OK; imported_T *import = find_imported(var_start, lhs->lhs_varlen, cctx); if (script_namespace || script_var || import != NULL) { char_u *rawname = lhs->lhs_name + (lhs->lhs_name[1] == ':' ? 2 : 0); if (is_decl) { if (script_namespace) semsg(_(e_cannot_declare_script_variable_in_function), lhs->lhs_name); else semsg(_(e_variable_already_declared_in_script_str), lhs->lhs_name); return FAIL; } else if (cctx->ctx_ufunc->uf_script_ctx_version == SCRIPT_VERSION_VIM9 && script_namespace && !script_var && import == NULL) { semsg(_(e_unknown_variable_str), lhs->lhs_name); return FAIL; } lhs->lhs_dest = dest_script; // existing script-local variables should have a type lhs->lhs_scriptvar_sid = current_sctx.sc_sid; if (import != NULL) lhs->lhs_scriptvar_sid = import->imp_sid; if (SCRIPT_ID_VALID(lhs->lhs_scriptvar_sid)) { // Check writable only when no index follows. lhs->lhs_scriptvar_idx = get_script_item_idx( lhs->lhs_scriptvar_sid, rawname, lhs->lhs_has_index ? ASSIGN_FINAL : ASSIGN_CONST, cctx); if (lhs->lhs_scriptvar_idx >= 0) { scriptitem_T *si = SCRIPT_ITEM( lhs->lhs_scriptvar_sid); svar_T *sv = ((svar_T *)si->sn_var_vals.ga_data) + lhs->lhs_scriptvar_idx; lhs->lhs_type = sv->sv_type; } } } else if (check_defined(var_start, lhs->lhs_varlen, cctx, FALSE) == FAIL) return FAIL; } } if (declare_error) { vim9_declare_error(lhs->lhs_name); return FAIL; } } // handle "a:name" as a name, not index "name" in "a" if (lhs->lhs_varlen > 1 || var_start[lhs->lhs_varlen] != ':') var_end = lhs->lhs_dest_end; if (lhs->lhs_dest != dest_option && lhs->lhs_dest != dest_func_option) { if (is_decl && *var_end == ':') { char_u *p; // parse optional type: "let var: type = expr" if (!VIM_ISWHITE(var_end[1])) { semsg(_(e_white_space_required_after_str_str), ":", var_end); return FAIL; } p = skipwhite(var_end + 1); lhs->lhs_type = parse_type(&p, cctx->ctx_type_list, TRUE); if (lhs->lhs_type == NULL) return FAIL; lhs->lhs_has_type = TRUE; lhs->lhs_end = p; } else if (lhs->lhs_lvar != NULL) lhs->lhs_type = lhs->lhs_lvar->lv_type; } if (oplen == 3 && !heredoc && lhs->lhs_dest != dest_global && !lhs->lhs_has_index && lhs->lhs_type->tt_type != VAR_STRING && lhs->lhs_type->tt_type != VAR_ANY) { emsg(_(e_can_only_concatenate_to_string)); return FAIL; } if (lhs->lhs_lvar == NULL && lhs->lhs_dest == dest_local && cctx->ctx_skip != SKIP_YES) { if (oplen > 1 && !heredoc) { // +=, /=, etc. require an existing variable semsg(_(e_cannot_use_operator_on_new_variable), lhs->lhs_name); return FAIL; } if (!is_decl) { semsg(_(e_unknown_variable_str), lhs->lhs_name); return FAIL; } // Check the name is valid for a funcref. if ((lhs->lhs_type->tt_type == VAR_FUNC || lhs->lhs_type->tt_type == VAR_PARTIAL) && var_wrong_func_name(lhs->lhs_name, TRUE)) return FAIL; // New local variable. lhs->lhs_lvar = reserve_local(cctx, var_start, lhs->lhs_varlen, cmdidx == CMD_final || cmdidx == CMD_const, lhs->lhs_type); if (lhs->lhs_lvar == NULL) return FAIL; lhs->lhs_new_local = TRUE; } lhs->lhs_member_type = lhs->lhs_type; if (lhs->lhs_has_index) { char_u *after = var_start + lhs->lhs_varlen; char_u *p; // Something follows after the variable: "var[idx]" or "var.key". if (is_decl) { emsg(_(e_cannot_use_index_when_declaring_variable)); return FAIL; } // Now: var_start[lhs->lhs_varlen] is '[' or '.' // Only the last index is used below, if there are others // before it generate code for the expression. Thus for // "ll[1][2]" the expression is "ll[1]" and "[2]" is the index. for (;;) { p = skip_index(after); if (*p != '[' && *p != '.') { lhs->lhs_varlen_total = p - var_start; break; } after = p; } if (after > var_start + lhs->lhs_varlen) { lhs->lhs_varlen = after - var_start; lhs->lhs_dest = dest_expr; // We don't know the type before evaluating the expression, // use "any" until then. lhs->lhs_type = &t_any; } if (lhs->lhs_type->tt_member == NULL) lhs->lhs_member_type = &t_any; else lhs->lhs_member_type = lhs->lhs_type->tt_member; } return OK; } /* * Figure out the LHS and check a few errors. */ int compile_assign_lhs( char_u *var_start, lhs_T *lhs, int cmdidx, int is_decl, int heredoc, int oplen, cctx_T *cctx) { if (compile_lhs(var_start, lhs, cmdidx, heredoc, oplen, cctx) == FAIL) return FAIL; if (!lhs->lhs_has_index && lhs->lhs_lvar == &lhs->lhs_arg_lvar) { semsg(_(e_cannot_assign_to_argument), lhs->lhs_name); return FAIL; } if (!is_decl && lhs->lhs_lvar != NULL && lhs->lhs_lvar->lv_const && !lhs->lhs_has_index) { semsg(_(e_cannot_assign_to_constant), lhs->lhs_name); return FAIL; } return OK; } /* * Return TRUE if "lhs" has a range index: "[expr : expr]". */ static int has_list_index(char_u *idx_start, cctx_T *cctx) { char_u *p = idx_start; int save_skip; if (*p != '[') return FALSE; p = skipwhite(p + 1); if (*p == ':') return TRUE; save_skip = cctx->ctx_skip; cctx->ctx_skip = SKIP_YES; (void)compile_expr0(&p, cctx); cctx->ctx_skip = save_skip; return *skipwhite(p) == ':'; } /* * For an assignment with an index, compile the "idx" in "var[idx]" or "key" in * "var.key". */ static int compile_assign_index( char_u *var_start, lhs_T *lhs, int *range, cctx_T *cctx) { size_t varlen = lhs->lhs_varlen; char_u *p; int r = OK; int need_white_before = TRUE; int empty_second; p = var_start + varlen; if (*p == '[') { p = skipwhite(p + 1); if (*p == ':') { // empty first index, push zero r = generate_PUSHNR(cctx, 0); need_white_before = FALSE; } else r = compile_expr0(&p, cctx); if (r == OK && *skipwhite(p) == ':') { // unlet var[idx : idx] // blob[idx : idx] = value *range = TRUE; p = skipwhite(p); empty_second = *skipwhite(p + 1) == ']'; if ((need_white_before && !IS_WHITE_OR_NUL(p[-1])) || (!empty_second && !IS_WHITE_OR_NUL(p[1]))) { semsg(_(e_white_space_required_before_and_after_str_at_str), ":", p); return FAIL; } p = skipwhite(p + 1); if (*p == ']') // empty second index, push "none" r = generate_PUSHSPEC(cctx, VVAL_NONE); else r = compile_expr0(&p, cctx); } if (r == OK && *skipwhite(p) != ']') { // this should not happen emsg(_(e_missing_closing_square_brace)); r = FAIL; } } else // if (*p == '.') { char_u *key_end = to_name_end(p + 1, TRUE); char_u *key = vim_strnsave(p + 1, key_end - p - 1); r = generate_PUSHS(cctx, &key); } return r; } /* * For a LHS with an index, load the variable to be indexed. */ static int compile_load_lhs( lhs_T *lhs, char_u *var_start, type_T *rhs_type, cctx_T *cctx) { if (lhs->lhs_dest == dest_expr) { size_t varlen = lhs->lhs_varlen; int c = var_start[varlen]; int lines_len = cctx->ctx_ufunc->uf_lines.ga_len; char_u *p = var_start; garray_T *stack = &cctx->ctx_type_stack; int res; // Evaluate "ll[expr]" of "ll[expr][idx]". End the line with a NUL and // limit the lines array length to avoid skipping to a following line. var_start[varlen] = NUL; cctx->ctx_ufunc->uf_lines.ga_len = cctx->ctx_lnum + 1; res = compile_expr0(&p, cctx); var_start[varlen] = c; cctx->ctx_ufunc->uf_lines.ga_len = lines_len; if (res == FAIL || p != var_start + varlen) { // this should not happen if (res != FAIL) emsg(_(e_missing_closing_square_brace)); return FAIL; } lhs->lhs_type = stack->ga_len == 0 ? &t_void : ((type_T **)stack->ga_data)[stack->ga_len - 1]; // now we can properly check the type if (rhs_type != NULL && lhs->lhs_type->tt_member != NULL && rhs_type != &t_void && need_type(rhs_type, lhs->lhs_type->tt_member, -2, 0, cctx, FALSE, FALSE) == FAIL) return FAIL; } else generate_loadvar(cctx, lhs->lhs_dest, lhs->lhs_name, lhs->lhs_lvar, lhs->lhs_type); return OK; } /* * Produce code for loading "lhs" and also take care of an index. * Return OK/FAIL. */ int compile_load_lhs_with_index(lhs_T *lhs, char_u *var_start, cctx_T *cctx) { compile_load_lhs(lhs, var_start, NULL, cctx); if (lhs->lhs_has_index) { int range = FALSE; // Get member from list or dict. First compile the // index value. if (compile_assign_index(var_start, lhs, &range, cctx) == FAIL) return FAIL; if (range) { semsg(_(e_cannot_use_range_with_assignment_operator_str), var_start); return FAIL; } // Get the member. if (compile_member(FALSE, NULL, cctx) == FAIL) return FAIL; } return OK; } /* * Assignment to a list or dict member, or ":unlet" for the item, using the * information in "lhs". * Returns OK or FAIL. */ int compile_assign_unlet( char_u *var_start, lhs_T *lhs, int is_assign, type_T *rhs_type, cctx_T *cctx) { vartype_T dest_type; garray_T *stack = &cctx->ctx_type_stack; int range = FALSE; if (compile_assign_index(var_start, lhs, &range, cctx) == FAIL) return FAIL; if (is_assign && range && lhs->lhs_type->tt_type != VAR_LIST && lhs->lhs_type != &t_blob && lhs->lhs_type != &t_any) { semsg(_(e_cannot_use_range_with_assignment_str), var_start); return FAIL; } if (lhs->lhs_type == &t_any) { // Index on variable of unknown type: check at runtime. dest_type = VAR_ANY; } else { dest_type = lhs->lhs_type->tt_type; if (dest_type == VAR_DICT && range) { emsg(e_cannot_use_range_with_dictionary); return FAIL; } if (dest_type == VAR_DICT && may_generate_2STRING(-1, FALSE, cctx) == FAIL) return FAIL; if (dest_type == VAR_LIST || dest_type == VAR_BLOB) { type_T *type; if (range) { type = ((type_T **)stack->ga_data)[stack->ga_len - 2]; if (need_type(type, &t_number, -1, 0, cctx, FALSE, FALSE) == FAIL) return FAIL; } type = ((type_T **)stack->ga_data)[stack->ga_len - 1]; if ((dest_type != VAR_BLOB && type != &t_special) && need_type(type, &t_number, -1, 0, cctx, FALSE, FALSE) == FAIL) return FAIL; } } // Load the dict or list. On the stack we then have: // - value (for assignment, not for :unlet) // - index // - for [a : b] second index // - variable if (compile_load_lhs(lhs, var_start, rhs_type, cctx) == FAIL) return FAIL; if (dest_type == VAR_LIST || dest_type == VAR_DICT || dest_type == VAR_BLOB || dest_type == VAR_ANY) { if (is_assign) { if (range) { if (generate_instr_drop(cctx, ISN_STORERANGE, 4) == NULL) return FAIL; } else { isn_T *isn = generate_instr_drop(cctx, ISN_STOREINDEX, 3); if (isn == NULL) return FAIL; isn->isn_arg.vartype = dest_type; } } else if (range) { if (generate_instr_drop(cctx, ISN_UNLETRANGE, 3) == NULL) return FAIL; } else { if (generate_instr_drop(cctx, ISN_UNLETINDEX, 2) == NULL) return FAIL; } } else { emsg(_(e_indexable_type_required)); return FAIL; } return OK; } /* * Compile declaration and assignment: * "let name" * "var name = expr" * "final name = expr" * "const name = expr" * "name = expr" * "arg" points to "name". * "++arg" and "--arg" * Return NULL for an error. * Return "arg" if it does not look like a variable list. */ static char_u * compile_assignment(char_u *arg, exarg_T *eap, cmdidx_T cmdidx, cctx_T *cctx) { char_u *var_start; char_u *p; char_u *end = arg; char_u *ret = NULL; int var_count = 0; int var_idx; int semicolon = 0; int did_generate_slice = FALSE; garray_T *instr = &cctx->ctx_instr; garray_T *stack = &cctx->ctx_type_stack; char_u *op; int oplen = 0; int heredoc = FALSE; int incdec = FALSE; type_T *rhs_type = &t_any; char_u *sp; int is_decl = is_decl_command(cmdidx); lhs_T lhs; long start_lnum = SOURCING_LNUM; // Skip over the "var" or "[var, var]" to get to any "=". p = skip_var_list(arg, TRUE, &var_count, &semicolon, TRUE); if (p == NULL) return *arg == '[' ? arg : NULL; lhs.lhs_name = NULL; sp = p; p = skipwhite(p); op = p; oplen = assignment_len(p, &heredoc); if (var_count > 0 && oplen == 0) // can be something like "[1, 2]->func()" return arg; if (oplen > 0 && (!VIM_ISWHITE(*sp) || !IS_WHITE_OR_NUL(op[oplen]))) { error_white_both(op, oplen); return NULL; } if (eap->cmdidx == CMD_increment || eap->cmdidx == CMD_decrement) { if (VIM_ISWHITE(eap->cmd[2])) { semsg(_(e_no_white_space_allowed_after_str_str), eap->cmdidx == CMD_increment ? "++" : "--", eap->cmd); return NULL; } op = (char_u *)(eap->cmdidx == CMD_increment ? "+=" : "-="); oplen = 2; incdec = TRUE; } if (heredoc) { list_T *l; listitem_T *li; // [let] varname =<< [trim] {end} eap->getline = exarg_getline; eap->cookie = cctx; l = heredoc_get(eap, op + 3, FALSE); if (l == NULL) return NULL; if (cctx->ctx_skip != SKIP_YES) { // Push each line and the create the list. FOR_ALL_LIST_ITEMS(l, li) { generate_PUSHS(cctx, &li->li_tv.vval.v_string); li->li_tv.vval.v_string = NULL; } generate_NEWLIST(cctx, l->lv_len); } list_free(l); p += STRLEN(p); end = p; } else if (var_count > 0) { char_u *wp; // for "[var, var] = expr" evaluate the expression here, loop over the // list of variables below. // A line break may follow the "=". wp = op + oplen; if (may_get_next_line_error(wp, &p, cctx) == FAIL) return FAIL; if (compile_expr0(&p, cctx) == FAIL) return NULL; end = p; if (cctx->ctx_skip != SKIP_YES) { type_T *stacktype; int needed_list_len; int did_check = FALSE; stacktype = stack->ga_len == 0 ? &t_void : ((type_T **)stack->ga_data)[stack->ga_len - 1]; if (stacktype->tt_type == VAR_VOID) { emsg(_(e_cannot_use_void_value)); goto theend; } if (need_type(stacktype, &t_list_any, -1, 0, cctx, FALSE, FALSE) == FAIL) goto theend; // If a constant list was used we can check the length right here. needed_list_len = semicolon ? var_count - 1 : var_count; if (instr->ga_len > 0) { isn_T *isn = ((isn_T *)instr->ga_data) + instr->ga_len - 1; if (isn->isn_type == ISN_NEWLIST) { did_check = TRUE; if (semicolon ? isn->isn_arg.number < needed_list_len : isn->isn_arg.number != needed_list_len) { semsg(_(e_expected_nr_items_but_got_nr), needed_list_len, isn->isn_arg.number); goto theend; } } } if (!did_check) generate_CHECKLEN(cctx, needed_list_len, semicolon); if (stacktype->tt_member != NULL) rhs_type = stacktype->tt_member; } } /* * Loop over variables in "[var, var] = expr". * For "var = expr" and "let var: type" this is done only once. */ if (var_count > 0) var_start = skipwhite(arg + 1); // skip over the "[" else var_start = arg; for (var_idx = 0; var_idx == 0 || var_idx < var_count; var_idx++) { int instr_count = -1; int save_lnum; int skip_store = FALSE; if (var_start[0] == '_' && !eval_isnamec(var_start[1])) { // Ignore underscore in "[a, _, b] = list". if (var_count > 0) { var_start = skipwhite(var_start + 2); continue; } emsg(_(e_cannot_use_underscore_here)); goto theend; } vim_free(lhs.lhs_name); /* * Figure out the LHS type and other properties. */ if (compile_assign_lhs(var_start, &lhs, cmdidx, is_decl, heredoc, oplen, cctx) == FAIL) goto theend; if (heredoc) { SOURCING_LNUM = start_lnum; if (lhs.lhs_has_type && need_type(&t_list_string, lhs.lhs_type, -1, 0, cctx, FALSE, FALSE) == FAIL) goto theend; } else { if (cctx->ctx_skip == SKIP_YES) { if (oplen > 0 && var_count == 0) { // skip over the "=" and the expression p = skipwhite(op + oplen); (void)compile_expr0(&p, cctx); } } else if (oplen > 0) { int is_const = FALSE; char_u *wp; // for "+=", "*=", "..=" etc. first load the current value if (*op != '=' && compile_load_lhs_with_index(&lhs, var_start, cctx) == FAIL) goto theend; // For "var = expr" evaluate the expression. if (var_count == 0) { int r; // Compile the expression. instr_count = instr->ga_len; if (incdec) { r = generate_PUSHNR(cctx, 1); } else { // Temporarily hide the new local variable here, it is // not available to this expression. if (lhs.lhs_new_local) --cctx->ctx_locals.ga_len; wp = op + oplen; if (may_get_next_line_error(wp, &p, cctx) == FAIL) { if (lhs.lhs_new_local) ++cctx->ctx_locals.ga_len; goto theend; } r = compile_expr0_ext(&p, cctx, &is_const); if (lhs.lhs_new_local) ++cctx->ctx_locals.ga_len; if (r == FAIL) goto theend; } } else if (semicolon && var_idx == var_count - 1) { // For "[var; var] = expr" get the rest of the list did_generate_slice = TRUE; if (generate_SLICE(cctx, var_count - 1) == FAIL) goto theend; } else { // For "[var, var] = expr" get the "var_idx" item from the // list. if (generate_GETITEM(cctx, var_idx, *op != '=') == FAIL) goto theend; } rhs_type = stack->ga_len == 0 ? &t_void : ((type_T **)stack->ga_data)[stack->ga_len - 1]; if (lhs.lhs_lvar != NULL && (is_decl || !lhs.lhs_has_type)) { if ((rhs_type->tt_type == VAR_FUNC || rhs_type->tt_type == VAR_PARTIAL) && !lhs.lhs_has_index && var_wrong_func_name(lhs.lhs_name, TRUE)) goto theend; if (lhs.lhs_new_local && !lhs.lhs_has_type) { if (rhs_type->tt_type == VAR_VOID) { emsg(_(e_cannot_use_void_value)); goto theend; } else { // An empty list or dict has a &t_unknown member, // for a variable that implies &t_any. if (rhs_type == &t_list_empty) lhs.lhs_lvar->lv_type = &t_list_any; else if (rhs_type == &t_dict_empty) lhs.lhs_lvar->lv_type = &t_dict_any; else if (rhs_type == &t_unknown) lhs.lhs_lvar->lv_type = &t_any; else lhs.lhs_lvar->lv_type = rhs_type; } } else if (*op == '=') { type_T *use_type = lhs.lhs_lvar->lv_type; where_T where = WHERE_INIT; // Without operator check type here, otherwise below. // Use the line number of the assignment. SOURCING_LNUM = start_lnum; where.wt_index = var_count > 0 ? var_idx + 1 : 0; where.wt_variable = var_count > 0; // If assigning to a list or dict member, use the // member type. Not for "list[:] =". if (lhs.lhs_has_index && !has_list_index(var_start + lhs.lhs_varlen, cctx)) use_type = lhs.lhs_member_type; if (need_type_where(rhs_type, use_type, -1, where, cctx, FALSE, is_const) == FAIL) goto theend; } } else { type_T *lhs_type = lhs.lhs_member_type; // Special case: assigning to @# can use a number or a // string. // Also: can assign a number to a float. if ((lhs_type == &t_number_or_string || lhs_type == &t_float) && rhs_type->tt_type == VAR_NUMBER) lhs_type = &t_number; if (*p != '=' && need_type(rhs_type, lhs_type, -1, 0, cctx, FALSE, FALSE) == FAIL) goto theend; } } else if (cmdidx == CMD_final) { emsg(_(e_final_requires_a_value)); goto theend; } else if (cmdidx == CMD_const) { emsg(_(e_const_requires_a_value)); goto theend; } else if (!lhs.lhs_has_type || lhs.lhs_dest == dest_option || lhs.lhs_dest == dest_func_option) { emsg(_(e_type_or_initialization_required)); goto theend; } else { // variables are always initialized if (GA_GROW_FAILS(instr, 1)) goto theend; switch (lhs.lhs_member_type->tt_type) { case VAR_BOOL: generate_PUSHBOOL(cctx, VVAL_FALSE); break; case VAR_FLOAT: #ifdef FEAT_FLOAT generate_PUSHF(cctx, 0.0); #endif break; case VAR_STRING: generate_PUSHS(cctx, NULL); break; case VAR_BLOB: generate_PUSHBLOB(cctx, blob_alloc()); break; case VAR_FUNC: generate_PUSHFUNC(cctx, NULL, &t_func_void); break; case VAR_LIST: generate_NEWLIST(cctx, 0); break; case VAR_DICT: generate_NEWDICT(cctx, 0); break; case VAR_JOB: generate_PUSHJOB(cctx, NULL); break; case VAR_CHANNEL: generate_PUSHCHANNEL(cctx, NULL); break; case VAR_NUMBER: case VAR_UNKNOWN: case VAR_ANY: case VAR_PARTIAL: case VAR_VOID: case VAR_INSTR: case VAR_SPECIAL: // cannot happen // This is skipped for local variables, they are // always initialized to zero. if (lhs.lhs_dest == dest_local) skip_store = TRUE; else generate_PUSHNR(cctx, 0); break; } } if (var_count == 0) end = p; } // no need to parse more when skipping if (cctx->ctx_skip == SKIP_YES) break; if (oplen > 0 && *op != '=') { type_T *expected; type_T *stacktype = NULL; if (*op == '.') { if (may_generate_2STRING(-1, FALSE, cctx) == FAIL) goto theend; } else { expected = lhs.lhs_member_type; stacktype = ((type_T **)stack->ga_data)[stack->ga_len - 1]; if ( #ifdef FEAT_FLOAT // If variable is float operation with number is OK. !(expected == &t_float && (stacktype == &t_number || stacktype == &t_number_bool)) && #endif need_type(stacktype, expected, -1, 0, cctx, FALSE, FALSE) == FAIL) goto theend; } if (*op == '.') { if (generate_instr_drop(cctx, ISN_CONCAT, 1) == NULL) goto theend; } else if (*op == '+') { if (generate_add_instr(cctx, operator_type(lhs.lhs_member_type, stacktype), lhs.lhs_member_type, stacktype, EXPR_APPEND) == FAIL) goto theend; } else if (generate_two_op(cctx, op) == FAIL) goto theend; } // Use the line number of the assignment for store instruction. save_lnum = cctx->ctx_lnum; cctx->ctx_lnum = start_lnum - 1; if (lhs.lhs_has_index) { // Use the info in "lhs" to store the value at the index in the // list or dict. if (compile_assign_unlet(var_start, &lhs, TRUE, rhs_type, cctx) == FAIL) { cctx->ctx_lnum = save_lnum; goto theend; } } else { if (is_decl && cmdidx == CMD_const && (lhs.lhs_dest == dest_script || lhs.lhs_dest == dest_global || lhs.lhs_dest == dest_local)) // ":const var": lock the value, but not referenced variables generate_LOCKCONST(cctx); if (is_decl && (lhs.lhs_type->tt_type == VAR_DICT || lhs.lhs_type->tt_type == VAR_LIST) && lhs.lhs_type->tt_member != NULL && !(lhs.lhs_type->tt_member == &t_any && oplen > 0 && rhs_type != NULL && rhs_type->tt_type == lhs.lhs_type->tt_type && rhs_type->tt_member != &t_unknown) && lhs.lhs_type->tt_member != &t_unknown) // Set the type in the list or dict, so that it can be checked, // also in legacy script. Not for "list<any> = val", then the // type of "val" is used. generate_SETTYPE(cctx, lhs.lhs_type); if (!skip_store && generate_store_lhs(cctx, &lhs, instr_count, is_decl) == FAIL) { cctx->ctx_lnum = save_lnum; goto theend; } } cctx->ctx_lnum = save_lnum; if (var_idx + 1 < var_count) var_start = skipwhite(lhs.lhs_end + 1); } // For "[var, var] = expr" drop the "expr" value. // Also for "[var, var; _] = expr". if (var_count > 0 && (!semicolon || !did_generate_slice)) { if (generate_instr_drop(cctx, ISN_DROP, 1) == NULL) goto theend; } ret = skipwhite(end); theend: vim_free(lhs.lhs_name); return ret; } /* * Check for an assignment at "eap->cmd", compile it if found. * Return NOTDONE if there is none, FAIL for failure, OK if done. */ static int may_compile_assignment(exarg_T *eap, char_u **line, cctx_T *cctx) { char_u *pskip; char_u *p; // Assuming the command starts with a variable or function name, // find what follows. // Skip over "var.member", "var[idx]" and the like. // Also "&opt = val", "$ENV = val" and "@r = val". pskip = (*eap->cmd == '&' || *eap->cmd == '$' || *eap->cmd == '@') ? eap->cmd + 1 : eap->cmd; p = to_name_end(pskip, TRUE); if (p > eap->cmd && *p != NUL) { char_u *var_end; int oplen; int heredoc; if (eap->cmd[0] == '@') var_end = eap->cmd + 2; else var_end = find_name_end(pskip, NULL, NULL, FNE_CHECK_START | FNE_INCL_BR); oplen = assignment_len(skipwhite(var_end), &heredoc); if (oplen > 0) { size_t len = p - eap->cmd; // Recognize an assignment if we recognize the variable // name: // "g:var = expr" // "local = expr" where "local" is a local var. // "script = expr" where "script" is a script-local var. // "import = expr" where "import" is an imported var // "&opt = expr" // "$ENV = expr" // "@r = expr" if (*eap->cmd == '&' || *eap->cmd == '$' || *eap->cmd == '@' || ((len) > 2 && eap->cmd[1] == ':') || variable_exists(eap->cmd, len, cctx)) { *line = compile_assignment(eap->cmd, eap, CMD_SIZE, cctx); if (*line == NULL || *line == eap->cmd) return FAIL; return OK; } } } if (*eap->cmd == '[') { // [var, var] = expr *line = compile_assignment(eap->cmd, eap, CMD_SIZE, cctx); if (*line == NULL) return FAIL; if (*line != eap->cmd) return OK; } return NOTDONE; } /* * Add a function to the list of :def functions. * This sets "ufunc->uf_dfunc_idx" but the function isn't compiled yet. */ static int add_def_function(ufunc_T *ufunc) { dfunc_T *dfunc; if (def_functions.ga_len == 0) { // The first position is not used, so that a zero uf_dfunc_idx means it // wasn't set. if (GA_GROW_FAILS(&def_functions, 1)) return FAIL; ++def_functions.ga_len; } // Add the function to "def_functions". if (GA_GROW_FAILS(&def_functions, 1)) return FAIL; dfunc = ((dfunc_T *)def_functions.ga_data) + def_functions.ga_len; CLEAR_POINTER(dfunc); dfunc->df_idx = def_functions.ga_len; ufunc->uf_dfunc_idx = dfunc->df_idx; dfunc->df_ufunc = ufunc; dfunc->df_name = vim_strsave(ufunc->uf_name); ga_init2(&dfunc->df_var_names, sizeof(char_u *), 10); ++dfunc->df_refcount; ++def_functions.ga_len; return OK; } /* * After ex_function() has collected all the function lines: parse and compile * the lines into instructions. * Adds the function to "def_functions". * When "check_return_type" is set then set ufunc->uf_ret_type to the type of * the return statement (used for lambda). When uf_ret_type is already set * then check that it matches. * When "profiling" is true add ISN_PROF_START instructions. * "outer_cctx" is set for a nested function. * This can be used recursively through compile_lambda(), which may reallocate * "def_functions". * Returns OK or FAIL. */ int compile_def_function( ufunc_T *ufunc, int check_return_type, compiletype_T compile_type, cctx_T *outer_cctx) { char_u *line = NULL; char_u *line_to_free = NULL; char_u *p; char *errormsg = NULL; // error message cctx_T cctx; garray_T *instr; int did_emsg_before = did_emsg; int did_emsg_silent_before = did_emsg_silent; int ret = FAIL; sctx_T save_current_sctx = current_sctx; int save_estack_compiling = estack_compiling; int save_cmod_flags = cmdmod.cmod_flags; int do_estack_push; int new_def_function = FALSE; #ifdef FEAT_PROFILE int prof_lnum = -1; #endif int debug_lnum = -1; // When using a function that was compiled before: Free old instructions. // The index is reused. Otherwise add a new entry in "def_functions". if (ufunc->uf_dfunc_idx > 0) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; isn_T *instr_dest = NULL; switch (compile_type) { case CT_PROFILE: #ifdef FEAT_PROFILE instr_dest = dfunc->df_instr_prof; break; #endif case CT_NONE: instr_dest = dfunc->df_instr; break; case CT_DEBUG: instr_dest = dfunc->df_instr_debug; break; } if (instr_dest != NULL) // Was compiled in this mode before: Free old instructions. delete_def_function_contents(dfunc, FALSE); ga_clear_strings(&dfunc->df_var_names); } else { if (add_def_function(ufunc) == FAIL) return FAIL; new_def_function = TRUE; } ufunc->uf_def_status = UF_COMPILING; CLEAR_FIELD(cctx); cctx.ctx_compile_type = compile_type; cctx.ctx_ufunc = ufunc; cctx.ctx_lnum = -1; cctx.ctx_outer = outer_cctx; ga_init2(&cctx.ctx_locals, sizeof(lvar_T), 10); ga_init2(&cctx.ctx_type_stack, sizeof(type_T *), 50); ga_init2(&cctx.ctx_imports, sizeof(imported_T), 10); cctx.ctx_type_list = &ufunc->uf_type_list; ga_init2(&cctx.ctx_instr, sizeof(isn_T), 50); instr = &cctx.ctx_instr; // Set the context to the function, it may be compiled when called from // another script. Set the script version to the most modern one. // The line number will be set in next_line_from_context(). current_sctx = ufunc->uf_script_ctx; current_sctx.sc_version = SCRIPT_VERSION_VIM9; // Don't use the flag from ":legacy" here. cmdmod.cmod_flags &= ~CMOD_LEGACY; // Make sure error messages are OK. do_estack_push = !estack_top_is_ufunc(ufunc, 1); if (do_estack_push) estack_push_ufunc(ufunc, 1); estack_compiling = TRUE; if (ufunc->uf_def_args.ga_len > 0) { int count = ufunc->uf_def_args.ga_len; int first_def_arg = ufunc->uf_args.ga_len - count; int i; char_u *arg; int off = STACK_FRAME_SIZE + (ufunc->uf_va_name != NULL ? 1 : 0); int did_set_arg_type = FALSE; // Produce instructions for the default values of optional arguments. SOURCING_LNUM = 0; // line number unknown for (i = 0; i < count; ++i) { garray_T *stack = &cctx.ctx_type_stack; type_T *val_type; int arg_idx = first_def_arg + i; where_T where = WHERE_INIT; int r; int jump_instr_idx = instr->ga_len; isn_T *isn; // Use a JUMP_IF_ARG_SET instruction to skip if the value was given. if (generate_JUMP_IF_ARG_SET(&cctx, i - count - off) == FAIL) goto erret; // Make sure later arguments are not found. ufunc->uf_args_visible = arg_idx; arg = ((char_u **)(ufunc->uf_def_args.ga_data))[i]; r = compile_expr0(&arg, &cctx); if (r == FAIL) goto erret; // If no type specified use the type of the default value. // Otherwise check that the default value type matches the // specified type. val_type = ((type_T **)stack->ga_data)[stack->ga_len - 1]; where.wt_index = arg_idx + 1; if (ufunc->uf_arg_types[arg_idx] == &t_unknown) { did_set_arg_type = TRUE; ufunc->uf_arg_types[arg_idx] = val_type; } else if (need_type_where(val_type, ufunc->uf_arg_types[arg_idx], -1, where, &cctx, FALSE, FALSE) == FAIL) goto erret; if (generate_STORE(&cctx, ISN_STORE, i - count - off, NULL) == FAIL) goto erret; // set instruction index in JUMP_IF_ARG_SET to here isn = ((isn_T *)instr->ga_data) + jump_instr_idx; isn->isn_arg.jumparg.jump_where = instr->ga_len; } if (did_set_arg_type) set_function_type(ufunc); } ufunc->uf_args_visible = ufunc->uf_args.ga_len; /* * Loop over all the lines of the function and generate instructions. */ for (;;) { exarg_T ea; int starts_with_colon = FALSE; char_u *cmd; cmdmod_T local_cmdmod; // Bail out on the first error to avoid a flood of errors and report // the right line number when inside try/catch. if (did_emsg_before != did_emsg) goto erret; if (line != NULL && *line == '|') // the line continues after a '|' ++line; else if (line != NULL && *skipwhite(line) != NUL && !(*line == '#' && (line == cctx.ctx_line_start || VIM_ISWHITE(line[-1])))) { semsg(_(e_trailing_arg), line); goto erret; } else if (line != NULL && vim9_bad_comment(skipwhite(line))) goto erret; else { line = next_line_from_context(&cctx, FALSE); if (cctx.ctx_lnum >= ufunc->uf_lines.ga_len) { // beyond the last line #ifdef FEAT_PROFILE if (cctx.ctx_skip != SKIP_YES) may_generate_prof_end(&cctx, prof_lnum); #endif break; } // Make a copy, splitting off nextcmd and removing trailing spaces // may change it. if (line != NULL) { line = vim_strsave(line); vim_free(line_to_free); line_to_free = line; } } CLEAR_FIELD(ea); ea.cmdlinep = &line; ea.cmd = skipwhite(line); if (*ea.cmd == '#') { // "#" starts a comment line = (char_u *)""; continue; } #ifdef FEAT_PROFILE if (cctx.ctx_compile_type == CT_PROFILE && cctx.ctx_lnum != prof_lnum && cctx.ctx_skip != SKIP_YES) { may_generate_prof_end(&cctx, prof_lnum); prof_lnum = cctx.ctx_lnum; generate_instr(&cctx, ISN_PROF_START); } #endif if (cctx.ctx_compile_type == CT_DEBUG && cctx.ctx_lnum != debug_lnum && cctx.ctx_skip != SKIP_YES) { debug_lnum = cctx.ctx_lnum; generate_instr_debug(&cctx); } cctx.ctx_prev_lnum = cctx.ctx_lnum + 1; // Some things can be recognized by the first character. switch (*ea.cmd) { case '}': { // "}" ends a block scope scopetype_T stype = cctx.ctx_scope == NULL ? NO_SCOPE : cctx.ctx_scope->se_type; if (stype == BLOCK_SCOPE) { compile_endblock(&cctx); line = ea.cmd; } else { emsg(_(e_using_rcurly_outside_if_block_scope)); goto erret; } if (line != NULL) line = skipwhite(ea.cmd + 1); continue; } case '{': // "{" starts a block scope // "{'a': 1}->func() is something else if (ends_excmd(*skipwhite(ea.cmd + 1))) { line = compile_block(ea.cmd, &cctx); continue; } break; } /* * COMMAND MODIFIERS */ cctx.ctx_has_cmdmod = FALSE; if (parse_command_modifiers(&ea, &errormsg, &local_cmdmod, FALSE) == FAIL) { if (errormsg != NULL) goto erret; // empty line or comment line = (char_u *)""; continue; } generate_cmdmods(&cctx, &local_cmdmod); undo_cmdmod(&local_cmdmod); // Check if there was a colon after the last command modifier or before // the current position. for (p = ea.cmd; p >= line; --p) { if (*p == ':') starts_with_colon = TRUE; if (p < ea.cmd && !VIM_ISWHITE(*p)) break; } // Skip ":call" to get to the function name, unless using :legacy p = ea.cmd; if (!(local_cmdmod.cmod_flags & CMOD_LEGACY)) { if (checkforcmd(&ea.cmd, "call", 3)) { if (*ea.cmd == '(') // not for "call()" ea.cmd = p; else ea.cmd = skipwhite(ea.cmd); } if (!starts_with_colon) { int assign; // Check for assignment after command modifiers. assign = may_compile_assignment(&ea, &line, &cctx); if (assign == OK) goto nextline; if (assign == FAIL) goto erret; } } /* * COMMAND after range * 'text'->func() should not be confused with 'a mark * "++nr" and "--nr" are eval commands * in "$ENV->func()" the "$" is not a range */ cmd = ea.cmd; if ((*cmd != '$' || starts_with_colon) && (starts_with_colon || !(*cmd == '\'' || (cmd[0] == cmd[1] && (*cmd == '+' || *cmd == '-'))))) { ea.cmd = skip_range(ea.cmd, TRUE, NULL); if (ea.cmd > cmd) { if (!starts_with_colon && !(local_cmdmod.cmod_flags & CMOD_LEGACY)) { semsg(_(e_colon_required_before_range_str), cmd); goto erret; } ea.addr_count = 1; if (ends_excmd2(line, ea.cmd)) { // A range without a command: jump to the line. generate_EXEC(&cctx, ISN_EXECRANGE, vim_strnsave(cmd, ea.cmd - cmd)); line = ea.cmd; goto nextline; } } } p = find_ex_command(&ea, NULL, starts_with_colon || (local_cmdmod.cmod_flags & CMOD_LEGACY) ? NULL : item_exists, &cctx); if (p == NULL) { if (cctx.ctx_skip != SKIP_YES) emsg(_(e_ambiguous_use_of_user_defined_command)); goto erret; } // When using ":legacy cmd" always use compile_exec(). if (local_cmdmod.cmod_flags & CMOD_LEGACY) { char_u *start = ea.cmd; switch (ea.cmdidx) { case CMD_if: case CMD_elseif: case CMD_else: case CMD_endif: case CMD_for: case CMD_endfor: case CMD_continue: case CMD_break: case CMD_while: case CMD_endwhile: case CMD_try: case CMD_catch: case CMD_finally: case CMD_endtry: semsg(_(e_cannot_use_legacy_with_command_str), ea.cmd); goto erret; default: break; } // ":legacy return expr" needs to be handled differently. if (checkforcmd(&start, "return", 4)) ea.cmdidx = CMD_return; else ea.cmdidx = CMD_legacy; } if (p == ea.cmd && ea.cmdidx != CMD_SIZE) { if (cctx.ctx_skip == SKIP_YES && ea.cmdidx != CMD_eval) { line += STRLEN(line); goto nextline; } else if (ea.cmdidx != CMD_eval) { // CMD_var cannot happen, compile_assignment() above would be // used. Most likely an assignment to a non-existing variable. semsg(_(e_command_not_recognized_str), ea.cmd); goto erret; } } if (cctx.ctx_had_return && ea.cmdidx != CMD_elseif && ea.cmdidx != CMD_else && ea.cmdidx != CMD_endif && ea.cmdidx != CMD_endfor && ea.cmdidx != CMD_endwhile && ea.cmdidx != CMD_catch && ea.cmdidx != CMD_finally && ea.cmdidx != CMD_endtry) { emsg(_(e_unreachable_code_after_return)); goto erret; } p = skipwhite(p); if (ea.cmdidx != CMD_SIZE && ea.cmdidx != CMD_write && ea.cmdidx != CMD_read) { if (ea.cmdidx >= 0) ea.argt = excmd_get_argt(ea.cmdidx); if ((ea.argt & EX_BANG) && *p == '!') { ea.forceit = TRUE; p = skipwhite(p + 1); } } switch (ea.cmdidx) { case CMD_def: case CMD_function: ea.arg = p; line = compile_nested_function(&ea, &cctx, &line_to_free); break; case CMD_return: line = compile_return(p, check_return_type, local_cmdmod.cmod_flags & CMOD_LEGACY, &cctx); cctx.ctx_had_return = TRUE; break; case CMD_let: emsg(_(e_cannot_use_let_in_vim9_script)); break; case CMD_var: case CMD_final: case CMD_const: case CMD_increment: case CMD_decrement: line = compile_assignment(p, &ea, ea.cmdidx, &cctx); if (line == p) line = NULL; break; case CMD_unlet: case CMD_unlockvar: case CMD_lockvar: line = compile_unletlock(p, &ea, &cctx); break; case CMD_import: emsg(_(e_import_can_only_be_used_in_script)); line = NULL; break; case CMD_if: line = compile_if(p, &cctx); break; case CMD_elseif: line = compile_elseif(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_else: line = compile_else(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_endif: line = compile_endif(p, &cctx); break; case CMD_while: line = compile_while(p, &cctx); break; case CMD_endwhile: line = compile_endwhile(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_for: line = compile_for(p, &cctx); break; case CMD_endfor: line = compile_endfor(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_continue: line = compile_continue(p, &cctx); break; case CMD_break: line = compile_break(p, &cctx); break; case CMD_try: line = compile_try(p, &cctx); break; case CMD_catch: line = compile_catch(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_finally: line = compile_finally(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_endtry: line = compile_endtry(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_throw: line = compile_throw(p, &cctx); break; case CMD_eval: line = compile_eval(p, &cctx); break; case CMD_echo: case CMD_echon: case CMD_execute: case CMD_echomsg: case CMD_echoerr: case CMD_echoconsole: line = compile_mult_expr(p, ea.cmdidx, &cctx); break; case CMD_put: ea.cmd = cmd; line = compile_put(p, &ea, &cctx); break; case CMD_substitute: if (check_global_and_subst(ea.cmd, p) == FAIL) goto erret; if (cctx.ctx_skip == SKIP_YES) line = (char_u *)""; else { ea.arg = p; line = compile_substitute(line, &ea, &cctx); } break; case CMD_redir: ea.arg = p; line = compile_redir(line, &ea, &cctx); break; case CMD_cexpr: case CMD_lexpr: case CMD_caddexpr: case CMD_laddexpr: case CMD_cgetexpr: case CMD_lgetexpr: #ifdef FEAT_QUICKFIX ea.arg = p; line = compile_cexpr(line, &ea, &cctx); #else ex_ni(&ea); line = NULL; #endif break; case CMD_append: case CMD_change: case CMD_insert: case CMD_k: case CMD_t: case CMD_xit: not_in_vim9(&ea); goto erret; case CMD_SIZE: if (cctx.ctx_skip != SKIP_YES) { semsg(_(e_invalid_command_str), ea.cmd); goto erret; } // We don't check for a next command here. line = (char_u *)""; break; case CMD_lua: case CMD_mzscheme: case CMD_perl: case CMD_py3: case CMD_python3: case CMD_python: case CMD_pythonx: case CMD_ruby: case CMD_tcl: ea.arg = p; if (vim_strchr(line, '\n') == NULL) line = compile_exec(line, &ea, &cctx); else // heredoc lines have been concatenated with NL // characters in get_function_body() line = compile_script(line, &cctx); break; case CMD_global: if (check_global_and_subst(ea.cmd, p) == FAIL) goto erret; // FALLTHROUGH default: // Not recognized, execute with do_cmdline_cmd(). ea.arg = p; line = compile_exec(line, &ea, &cctx); break; } nextline: if (line == NULL) goto erret; line = skipwhite(line); // Undo any command modifiers. generate_undo_cmdmods(&cctx); if (cctx.ctx_type_stack.ga_len < 0) { iemsg("Type stack underflow"); goto erret; } } if (cctx.ctx_scope != NULL) { if (cctx.ctx_scope->se_type == IF_SCOPE) emsg(_(e_endif)); else if (cctx.ctx_scope->se_type == WHILE_SCOPE) emsg(_(e_endwhile)); else if (cctx.ctx_scope->se_type == FOR_SCOPE) emsg(_(e_endfor)); else emsg(_(e_missing_rcurly)); goto erret; } if (!cctx.ctx_had_return) { if (ufunc->uf_ret_type->tt_type == VAR_UNKNOWN) ufunc->uf_ret_type = &t_void; else if (ufunc->uf_ret_type->tt_type != VAR_VOID) { emsg(_(e_missing_return_statement)); goto erret; } // Return void if there is no return at the end. generate_instr(&cctx, ISN_RETURN_VOID); } // When compiled with ":silent!" and there was an error don't consider the // function compiled. if (emsg_silent == 0 || did_emsg_silent == did_emsg_silent_before) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; dfunc->df_deleted = FALSE; dfunc->df_script_seq = current_sctx.sc_seq; #ifdef FEAT_PROFILE if (cctx.ctx_compile_type == CT_PROFILE) { dfunc->df_instr_prof = instr->ga_data; dfunc->df_instr_prof_count = instr->ga_len; } else #endif if (cctx.ctx_compile_type == CT_DEBUG) { dfunc->df_instr_debug = instr->ga_data; dfunc->df_instr_debug_count = instr->ga_len; } else { dfunc->df_instr = instr->ga_data; dfunc->df_instr_count = instr->ga_len; } dfunc->df_varcount = dfunc->df_var_names.ga_len; dfunc->df_has_closure = cctx.ctx_has_closure; if (cctx.ctx_outer_used) ufunc->uf_flags |= FC_CLOSURE; ufunc->uf_def_status = UF_COMPILED; } ret = OK; erret: if (ufunc->uf_def_status == UF_COMPILING) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; // Compiling aborted, free the generated instructions. clear_instr_ga(instr); VIM_CLEAR(dfunc->df_name); ga_clear_strings(&dfunc->df_var_names); // If using the last entry in the table and it was added above, we // might as well remove it. if (!dfunc->df_deleted && new_def_function && ufunc->uf_dfunc_idx == def_functions.ga_len - 1) { --def_functions.ga_len; ufunc->uf_dfunc_idx = 0; } ufunc->uf_def_status = UF_COMPILE_ERROR; while (cctx.ctx_scope != NULL) drop_scope(&cctx); if (errormsg != NULL) emsg(errormsg); else if (did_emsg == did_emsg_before) emsg(_(e_compiling_def_function_failed)); } if (cctx.ctx_redir_lhs.lhs_name != NULL) { if (ret == OK) { emsg(_(e_missing_redir_end)); ret = FAIL; } vim_free(cctx.ctx_redir_lhs.lhs_name); vim_free(cctx.ctx_redir_lhs.lhs_whole); } current_sctx = save_current_sctx; estack_compiling = save_estack_compiling; cmdmod.cmod_flags = save_cmod_flags; if (do_estack_push) estack_pop(); vim_free(line_to_free); free_imported(&cctx); free_locals(&cctx); ga_clear(&cctx.ctx_type_stack); return ret; } void set_function_type(ufunc_T *ufunc) { int varargs = ufunc->uf_va_name != NULL; int argcount = ufunc->uf_args.ga_len; // Create a type for the function, with the return type and any // argument types. // A vararg is included in uf_args.ga_len but not in uf_arg_types. // The type is included in "tt_args". if (argcount > 0 || varargs) { if (ufunc->uf_type_list.ga_itemsize == 0) ga_init2(&ufunc->uf_type_list, sizeof(type_T *), 10); ufunc->uf_func_type = alloc_func_type(ufunc->uf_ret_type, argcount, &ufunc->uf_type_list); // Add argument types to the function type. if (func_type_add_arg_types(ufunc->uf_func_type, argcount + varargs, &ufunc->uf_type_list) == FAIL) return; ufunc->uf_func_type->tt_argcount = argcount + varargs; ufunc->uf_func_type->tt_min_argcount = argcount - ufunc->uf_def_args.ga_len; if (ufunc->uf_arg_types == NULL) { int i; // lambda does not have argument types. for (i = 0; i < argcount; ++i) ufunc->uf_func_type->tt_args[i] = &t_any; } else mch_memmove(ufunc->uf_func_type->tt_args, ufunc->uf_arg_types, sizeof(type_T *) * argcount); if (varargs) { ufunc->uf_func_type->tt_args[argcount] = ufunc->uf_va_type == NULL ? &t_list_any : ufunc->uf_va_type; ufunc->uf_func_type->tt_flags = TTFLAG_VARARGS; } } else // No arguments, can use a predefined type. ufunc->uf_func_type = get_func_type(ufunc->uf_ret_type, argcount, &ufunc->uf_type_list); } /* * Free all instructions for "dfunc" except df_name. */ static void delete_def_function_contents(dfunc_T *dfunc, int mark_deleted) { int idx; ga_clear(&dfunc->df_def_args_isn); ga_clear_strings(&dfunc->df_var_names); if (dfunc->df_instr != NULL) { for (idx = 0; idx < dfunc->df_instr_count; ++idx) delete_instr(dfunc->df_instr + idx); VIM_CLEAR(dfunc->df_instr); dfunc->df_instr = NULL; } if (dfunc->df_instr_debug != NULL) { for (idx = 0; idx < dfunc->df_instr_debug_count; ++idx) delete_instr(dfunc->df_instr_debug + idx); VIM_CLEAR(dfunc->df_instr_debug); dfunc->df_instr_debug = NULL; } #ifdef FEAT_PROFILE if (dfunc->df_instr_prof != NULL) { for (idx = 0; idx < dfunc->df_instr_prof_count; ++idx) delete_instr(dfunc->df_instr_prof + idx); VIM_CLEAR(dfunc->df_instr_prof); dfunc->df_instr_prof = NULL; } #endif if (mark_deleted) dfunc->df_deleted = TRUE; if (dfunc->df_ufunc != NULL) dfunc->df_ufunc->uf_def_status = UF_NOT_COMPILED; } /* * When a user function is deleted, clear the contents of any associated def * function, unless another user function still uses it. * The position in def_functions can be re-used. */ void unlink_def_function(ufunc_T *ufunc) { if (ufunc->uf_dfunc_idx > 0) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; if (--dfunc->df_refcount <= 0) delete_def_function_contents(dfunc, TRUE); ufunc->uf_def_status = UF_NOT_COMPILED; ufunc->uf_dfunc_idx = 0; if (dfunc->df_ufunc == ufunc) dfunc->df_ufunc = NULL; } } /* * Used when a user function refers to an existing dfunc. */ void link_def_function(ufunc_T *ufunc) { if (ufunc->uf_dfunc_idx > 0) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; ++dfunc->df_refcount; } } #if defined(EXITFREE) || defined(PROTO) /* * Free all functions defined with ":def". */ void free_def_functions(void) { int idx; for (idx = 0; idx < def_functions.ga_len; ++idx) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + idx; delete_def_function_contents(dfunc, TRUE); vim_free(dfunc->df_name); } ga_clear(&def_functions); } #endif #endif // FEAT_EVAL
compile_nested_function(exarg_T *eap, cctx_T *cctx) { int is_global = *eap->arg == 'g' && eap->arg[1] == ':'; char_u *name_start = eap->arg; char_u *name_end = to_name_end(eap->arg, TRUE); char_u *lambda_name; ufunc_T *ufunc; int r = FAIL; compiletype_T compile_type; if (eap->forceit) { emsg(_(e_cannot_use_bang_with_nested_def)); return NULL; } if (*name_start == '/') { name_end = skip_regexp(name_start + 1, '/', TRUE); if (*name_end == '/') ++name_end; set_nextcmd(eap, name_end); } if (name_end == name_start || *skipwhite(name_end) != '(') { if (!ends_excmd2(name_start, name_end)) { semsg(_(e_invalid_command_str), eap->cmd); return NULL; } // "def" or "def Name": list functions if (generate_DEF(cctx, name_start, name_end - name_start) == FAIL) return NULL; return eap->nextcmd == NULL ? (char_u *)"" : eap->nextcmd; } // Only g:Func() can use a namespace. if (name_start[1] == ':' && !is_global) { semsg(_(e_namespace_not_supported_str), name_start); return NULL; } if (check_defined(name_start, name_end - name_start, cctx, FALSE) == FAIL) return NULL; eap->arg = name_end; fill_exarg_from_cctx(eap, cctx); eap->forceit = FALSE; // We use the special <Lamba>99 name, but it's not really a lambda. lambda_name = vim_strsave(get_lambda_name()); if (lambda_name == NULL) return NULL; ufunc = define_function(eap, lambda_name); if (ufunc == NULL) { r = eap->skip ? OK : FAIL; goto theend; } // copy over the block scope IDs before compiling if (!is_global && cctx->ctx_ufunc->uf_block_depth > 0) { int block_depth = cctx->ctx_ufunc->uf_block_depth; ufunc->uf_block_ids = ALLOC_MULT(int, block_depth); if (ufunc->uf_block_ids != NULL) { mch_memmove(ufunc->uf_block_ids, cctx->ctx_ufunc->uf_block_ids, sizeof(int) * block_depth); ufunc->uf_block_depth = block_depth; } } compile_type = COMPILE_TYPE(ufunc); #ifdef FEAT_PROFILE // If the outer function is profiled, also compile the nested function for // profiling. if (cctx->ctx_compile_type == CT_PROFILE) compile_type = CT_PROFILE; #endif if (func_needs_compiling(ufunc, compile_type) && compile_def_function(ufunc, TRUE, compile_type, cctx) == FAIL) { func_ptr_unref(ufunc); goto theend; } #ifdef FEAT_PROFILE // When the outer function is compiled for profiling, the nested function // may be called without profiling. Compile it here in the right context. if (compile_type == CT_PROFILE && func_needs_compiling(ufunc, CT_NONE)) compile_def_function(ufunc, FALSE, CT_NONE, cctx); #endif if (is_global) { char_u *func_name = vim_strnsave(name_start + 2, name_end - name_start - 2); if (func_name == NULL) r = FAIL; else { r = generate_NEWFUNC(cctx, lambda_name, func_name); lambda_name = NULL; } } else { // Define a local variable for the function reference. lvar_T *lvar = reserve_local(cctx, name_start, name_end - name_start, TRUE, ufunc->uf_func_type); if (lvar == NULL) goto theend; if (generate_FUNCREF(cctx, ufunc) == FAIL) goto theend; r = generate_STORE(cctx, ISN_STORE, lvar->lv_idx, NULL); } theend: vim_free(lambda_name); return r == FAIL ? NULL : (char_u *)""; }
compile_nested_function(exarg_T *eap, cctx_T *cctx, char_u **line_to_free) { int is_global = *eap->arg == 'g' && eap->arg[1] == ':'; char_u *name_start = eap->arg; char_u *name_end = to_name_end(eap->arg, TRUE); int off; char_u *func_name; char_u *lambda_name; ufunc_T *ufunc; int r = FAIL; compiletype_T compile_type; if (eap->forceit) { emsg(_(e_cannot_use_bang_with_nested_def)); return NULL; } if (*name_start == '/') { name_end = skip_regexp(name_start + 1, '/', TRUE); if (*name_end == '/') ++name_end; set_nextcmd(eap, name_end); } if (name_end == name_start || *skipwhite(name_end) != '(') { if (!ends_excmd2(name_start, name_end)) { semsg(_(e_invalid_command_str), eap->cmd); return NULL; } // "def" or "def Name": list functions if (generate_DEF(cctx, name_start, name_end - name_start) == FAIL) return NULL; return eap->nextcmd == NULL ? (char_u *)"" : eap->nextcmd; } // Only g:Func() can use a namespace. if (name_start[1] == ':' && !is_global) { semsg(_(e_namespace_not_supported_str), name_start); return NULL; } if (check_defined(name_start, name_end - name_start, cctx, FALSE) == FAIL) return NULL; eap->arg = name_end; fill_exarg_from_cctx(eap, cctx); eap->forceit = FALSE; // We use the special <Lamba>99 name, but it's not really a lambda. lambda_name = vim_strsave(get_lambda_name()); if (lambda_name == NULL) return NULL; // This may free the current line, make a copy of the name. off = is_global ? 2 : 0; func_name = vim_strnsave(name_start + off, name_end - name_start - off); if (func_name == NULL) { r = FAIL; goto theend; } ufunc = define_function(eap, lambda_name, line_to_free); if (ufunc == NULL) { r = eap->skip ? OK : FAIL; goto theend; } // copy over the block scope IDs before compiling if (!is_global && cctx->ctx_ufunc->uf_block_depth > 0) { int block_depth = cctx->ctx_ufunc->uf_block_depth; ufunc->uf_block_ids = ALLOC_MULT(int, block_depth); if (ufunc->uf_block_ids != NULL) { mch_memmove(ufunc->uf_block_ids, cctx->ctx_ufunc->uf_block_ids, sizeof(int) * block_depth); ufunc->uf_block_depth = block_depth; } } compile_type = COMPILE_TYPE(ufunc); #ifdef FEAT_PROFILE // If the outer function is profiled, also compile the nested function for // profiling. if (cctx->ctx_compile_type == CT_PROFILE) compile_type = CT_PROFILE; #endif if (func_needs_compiling(ufunc, compile_type) && compile_def_function(ufunc, TRUE, compile_type, cctx) == FAIL) { func_ptr_unref(ufunc); goto theend; } #ifdef FEAT_PROFILE // When the outer function is compiled for profiling, the nested function // may be called without profiling. Compile it here in the right context. if (compile_type == CT_PROFILE && func_needs_compiling(ufunc, CT_NONE)) compile_def_function(ufunc, FALSE, CT_NONE, cctx); #endif if (is_global) { r = generate_NEWFUNC(cctx, lambda_name, func_name); func_name = NULL; lambda_name = NULL; } else { // Define a local variable for the function reference. lvar_T *lvar = reserve_local(cctx, func_name, name_end - name_start, TRUE, ufunc->uf_func_type); if (lvar == NULL) goto theend; if (generate_FUNCREF(cctx, ufunc) == FAIL) goto theend; r = generate_STORE(cctx, ISN_STORE, lvar->lv_idx, NULL); } theend: vim_free(lambda_name); vim_free(func_name); return r == FAIL ? NULL : (char_u *)""; }
{'added': [(815, 'compile_nested_function(exarg_T *eap, cctx_T *cctx, char_u **line_to_free)'), (820, ' int\t\toff;'), (821, ' char_u\t*func_name;'), (871, ''), (872, ' // This may free the current line, make a copy of the name.'), (873, ' off = is_global ? 2 : 0;'), (874, ' func_name = vim_strnsave(name_start + off, name_end - name_start - off);'), (875, ' if (func_name == NULL)'), (876, ' {'), (877, '\tr = FAIL;'), (878, '\tgoto theend;'), (879, ' }'), (880, ''), (881, ' ufunc = define_function(eap, lambda_name, line_to_free);'), (926, '\tr = generate_NEWFUNC(cctx, lambda_name, func_name);'), (927, '\tfunc_name = NULL;'), (928, '\tlambda_name = NULL;'), (933, '\tlvar_T\t*lvar = reserve_local(cctx, func_name, name_end - name_start,'), (945, ' vim_free(func_name);'), (2870, '\t\t line = compile_nested_function(&ea, &cctx, &line_to_free);')], 'deleted': [(815, 'compile_nested_function(exarg_T *eap, cctx_T *cctx)'), (869, ' ufunc = define_function(eap, lambda_name);'), (914, '\tchar_u *func_name = vim_strnsave(name_start + 2,'), (915, '\t\t\t\t\t\t name_end - name_start - 2);'), (916, ''), (917, '\tif (func_name == NULL)'), (918, '\t r = FAIL;'), (919, '\telse'), (920, '\t{'), (921, '\t r = generate_NEWFUNC(cctx, lambda_name, func_name);'), (922, '\t lambda_name = NULL;'), (923, '\t}'), (928, '\tlvar_T\t*lvar = reserve_local(cctx, name_start, name_end - name_start,'), (2864, '\t\t line = compile_nested_function(&ea, &cctx);')]}
20
14
2,545
14,097
https://github.com/vim/vim
CVE-2021-4173
['CWE-416']
sndfile.c
sf_open
/* ** Copyright (C) 1999-2016 Erik de Castro Lopo <erikd@mega-nerd.com> ** ** This program is free software; you can redistribute it and/or modify ** it under the terms of the GNU Lesser General Public License as published by ** the Free Software Foundation; either version 2.1 of the License, or ** (at your option) any later version. ** ** This program is distributed in the hope that it will be useful, ** but WITHOUT ANY WARRANTY; without even the implied warranty of ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** GNU Lesser General Public License for more details. ** ** You should have received a copy of the GNU Lesser General Public License ** along with this program; if not, write to the Free Software ** Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include "sfconfig.h" #include <stdlib.h> #include <string.h> #include <ctype.h> #include <assert.h> #include "sndfile.h" #include "sfendian.h" #include "common.h" #define SNDFILE_MAGICK 0x1234C0DE #ifdef __APPLE__ /* ** Detect if a compile for a universal binary is being attempted and barf if it is. ** See the URL below for the rationale. */ #ifdef __BIG_ENDIAN__ #if (CPU_IS_LITTLE_ENDIAN == 1) #error "Universal binary compile detected. See http://www.mega-nerd.com/libsndfile/FAQ.html#Q018" #endif #endif #ifdef __LITTLE_ENDIAN__ #if (CPU_IS_BIG_ENDIAN == 1) #error "Universal binary compile detected. See http://www.mega-nerd.com/libsndfile/FAQ.html#Q018" #endif #endif #endif typedef struct { int error ; const char *str ; } ErrorStruct ; static ErrorStruct SndfileErrors [] = { /* Public error values and their associated strings. */ { SF_ERR_NO_ERROR , "No Error." }, { SF_ERR_UNRECOGNISED_FORMAT , "Format not recognised." }, { SF_ERR_SYSTEM , "System error." /* Often replaced. */ }, { SF_ERR_MALFORMED_FILE , "Supported file format but file is malformed." }, { SF_ERR_UNSUPPORTED_ENCODING , "Supported file format but unsupported encoding." }, /* Private error values and their associated strings. */ { SFE_ZERO_MAJOR_FORMAT , "Error : major format is 0." }, { SFE_ZERO_MINOR_FORMAT , "Error : minor format is 0." }, { SFE_BAD_FILE , "File does not exist or is not a regular file (possibly a pipe?)." }, { SFE_BAD_FILE_READ , "File exists but no data could be read." }, { SFE_OPEN_FAILED , "Could not open file." }, { SFE_BAD_SNDFILE_PTR , "Not a valid SNDFILE* pointer." }, { SFE_BAD_SF_INFO_PTR , "NULL SF_INFO pointer passed to libsndfile." }, { SFE_BAD_SF_INCOMPLETE , "SF_PRIVATE struct incomplete and end of header parsing." }, { SFE_BAD_FILE_PTR , "Bad FILE pointer." }, { SFE_BAD_INT_PTR , "Internal error, Bad pointer." }, { SFE_BAD_STAT_SIZE , "Error : software was misconfigured at compile time (sizeof statbuf.st_size)." }, { SFE_NO_TEMP_DIR , "Error : Could not file temp dir." }, { SFE_MALLOC_FAILED , "Internal malloc () failed." }, { SFE_UNIMPLEMENTED , "File contains data in an unimplemented format." }, { SFE_BAD_READ_ALIGN , "Attempt to read a non-integer number of channels." }, { SFE_BAD_WRITE_ALIGN , "Attempt to write a non-integer number of channels." }, { SFE_UNKNOWN_FORMAT , "File contains data in an unknown format." }, { SFE_NOT_READMODE , "Read attempted on file currently open for write." }, { SFE_NOT_WRITEMODE , "Write attempted on file currently open for read." }, { SFE_BAD_MODE_RW , "Error : This file format does not support read/write mode." }, { SFE_BAD_SF_INFO , "Internal error : SF_INFO struct incomplete." }, { SFE_BAD_OFFSET , "Error : supplied offset beyond end of file." }, { SFE_NO_EMBED_SUPPORT , "Error : embedding not supported for this file format." }, { SFE_NO_EMBEDDED_RDWR , "Error : cannot open embedded file read/write." }, { SFE_NO_PIPE_WRITE , "Error : this file format does not support pipe write." }, { SFE_BAD_VIRTUAL_IO , "Error : bad pointer on SF_VIRTUAL_IO struct." }, { SFE_BAD_BROADCAST_INFO_SIZE , "Error : bad coding_history_size in SF_BROADCAST_INFO struct." }, { SFE_BAD_BROADCAST_INFO_TOO_BIG , "Error : SF_BROADCAST_INFO struct too large." }, { SFE_BAD_CART_INFO_SIZE , "Error: SF_CART_INFO struct too large." }, { SFE_BAD_CART_INFO_TOO_BIG , "Error: bag tag_text_size in SF_CART_INFO struct." }, { SFE_INTERLEAVE_MODE , "Attempt to write to file with non-interleaved data." }, { SFE_INTERLEAVE_SEEK , "Bad karma in seek during interleave read operation." }, { SFE_INTERLEAVE_READ , "Bad karma in read during interleave read operation." }, { SFE_INTERNAL , "Unspecified internal error." }, { SFE_BAD_COMMAND_PARAM , "Bad parameter passed to function sf_command." }, { SFE_BAD_ENDIAN , "Bad endian-ness. Try default endian-ness" }, { SFE_CHANNEL_COUNT_ZERO , "Channel count is zero." }, { SFE_CHANNEL_COUNT , "Too many channels specified." }, { SFE_CHANNEL_COUNT_BAD , "Bad channel count." }, { SFE_BAD_SEEK , "Internal psf_fseek() failed." }, { SFE_NOT_SEEKABLE , "Seek attempted on unseekable file type." }, { SFE_AMBIGUOUS_SEEK , "Error : combination of file open mode and seek command is ambiguous." }, { SFE_WRONG_SEEK , "Error : invalid seek parameters." }, { SFE_SEEK_FAILED , "Error : parameters OK, but psf_seek() failed." }, { SFE_BAD_OPEN_MODE , "Error : bad mode parameter for file open." }, { SFE_OPEN_PIPE_RDWR , "Error : attempt to open a pipe in read/write mode." }, { SFE_RDWR_POSITION , "Error on RDWR position (cryptic)." }, { SFE_RDWR_BAD_HEADER , "Error : Cannot open file in read/write mode due to string data in header." }, { SFE_CMD_HAS_DATA , "Error : Command fails because file already has audio data." }, { SFE_STR_NO_SUPPORT , "Error : File type does not support string data." }, { SFE_STR_NOT_WRITE , "Error : Trying to set a string when file is not in write mode." }, { SFE_STR_MAX_DATA , "Error : Maximum string data storage reached." }, { SFE_STR_MAX_COUNT , "Error : Maximum string data count reached." }, { SFE_STR_BAD_TYPE , "Error : Bad string data type." }, { SFE_STR_NO_ADD_END , "Error : file type does not support strings added at end of file." }, { SFE_STR_BAD_STRING , "Error : bad string." }, { SFE_STR_WEIRD , "Error : Weird string error." }, { SFE_WAV_NO_RIFF , "Error in WAV file. No 'RIFF' chunk marker." }, { SFE_WAV_NO_WAVE , "Error in WAV file. No 'WAVE' chunk marker." }, { SFE_WAV_NO_FMT , "Error in WAV/W64/RF64 file. No 'fmt ' chunk marker." }, { SFE_WAV_BAD_FMT , "Error in WAV/W64/RF64 file. Malformed 'fmt ' chunk." }, { SFE_WAV_FMT_SHORT , "Error in WAV/W64/RF64 file. Short 'fmt ' chunk." }, { SFE_WAV_BAD_FACT , "Error in WAV file. 'fact' chunk out of place." }, { SFE_WAV_BAD_PEAK , "Error in WAV file. Bad 'PEAK' chunk." }, { SFE_WAV_PEAK_B4_FMT , "Error in WAV file. 'PEAK' chunk found before 'fmt ' chunk." }, { SFE_WAV_BAD_FORMAT , "Error in WAV file. Errors in 'fmt ' chunk." }, { SFE_WAV_BAD_BLOCKALIGN , "Error in WAV file. Block alignment in 'fmt ' chunk is incorrect." }, { SFE_WAV_NO_DATA , "Error in WAV file. No 'data' chunk marker." }, { SFE_WAV_BAD_LIST , "Error in WAV file. Malformed LIST chunk." }, { SFE_WAV_UNKNOWN_CHUNK , "Error in WAV file. File contains an unknown chunk marker." }, { SFE_WAV_WVPK_DATA , "Error in WAV file. Data is in WAVPACK format." }, { SFE_WAV_ADPCM_NOT4BIT , "Error in ADPCM WAV file. Invalid bit width." }, { SFE_WAV_ADPCM_CHANNELS , "Error in ADPCM WAV file. Invalid number of channels." }, { SFE_WAV_ADPCM_SAMPLES , "Error in ADPCM WAV file. Invalid number of samples per block." }, { SFE_WAV_GSM610_FORMAT , "Error in GSM610 WAV file. Invalid format chunk." }, { SFE_AIFF_NO_FORM , "Error in AIFF file, bad 'FORM' marker." }, { SFE_AIFF_AIFF_NO_FORM , "Error in AIFF file, 'AIFF' marker without 'FORM'." }, { SFE_AIFF_COMM_NO_FORM , "Error in AIFF file, 'COMM' marker without 'FORM'." }, { SFE_AIFF_SSND_NO_COMM , "Error in AIFF file, 'SSND' marker without 'COMM'." }, { SFE_AIFF_UNKNOWN_CHUNK , "Error in AIFF file, unknown chunk." }, { SFE_AIFF_COMM_CHUNK_SIZE, "Error in AIFF file, bad 'COMM' chunk size." }, { SFE_AIFF_BAD_COMM_CHUNK , "Error in AIFF file, bad 'COMM' chunk." }, { SFE_AIFF_PEAK_B4_COMM , "Error in AIFF file. 'PEAK' chunk found before 'COMM' chunk." }, { SFE_AIFF_BAD_PEAK , "Error in AIFF file. Bad 'PEAK' chunk." }, { SFE_AIFF_NO_SSND , "Error in AIFF file, bad 'SSND' chunk." }, { SFE_AIFF_NO_DATA , "Error in AIFF file, no sound data." }, { SFE_AIFF_RW_SSND_NOT_LAST, "Error in AIFF file, RDWR only possible if SSND chunk at end of file." }, { SFE_AU_UNKNOWN_FORMAT , "Error in AU file, unknown format." }, { SFE_AU_NO_DOTSND , "Error in AU file, missing '.snd' or 'dns.' marker." }, { SFE_AU_EMBED_BAD_LEN , "Embedded AU file with unknown length." }, { SFE_RAW_READ_BAD_SPEC , "Error while opening RAW file for read. Must specify format and channels.\n" "Possibly trying to open unsupported format." }, { SFE_RAW_BAD_BITWIDTH , "Error. RAW file bitwidth must be a multiple of 8." }, { SFE_RAW_BAD_FORMAT , "Error. Bad format field in SF_INFO struct when opening a RAW file for read." }, { SFE_PAF_NO_MARKER , "Error in PAF file, no marker." }, { SFE_PAF_VERSION , "Error in PAF file, bad version." }, { SFE_PAF_UNKNOWN_FORMAT , "Error in PAF file, unknown format." }, { SFE_PAF_SHORT_HEADER , "Error in PAF file. File shorter than minimal header." }, { SFE_PAF_BAD_CHANNELS , "Error in PAF file. Bad channel count." }, { SFE_SVX_NO_FORM , "Error in 8SVX / 16SV file, no 'FORM' marker." }, { SFE_SVX_NO_BODY , "Error in 8SVX / 16SV file, no 'BODY' marker." }, { SFE_SVX_NO_DATA , "Error in 8SVX / 16SV file, no sound data." }, { SFE_SVX_BAD_COMP , "Error in 8SVX / 16SV file, unsupported compression format." }, { SFE_SVX_BAD_NAME_LENGTH , "Error in 8SVX / 16SV file, NAME chunk too long." }, { SFE_NIST_BAD_HEADER , "Error in NIST file, bad header." }, { SFE_NIST_CRLF_CONVERISON, "Error : NIST file damaged by Windows CR -> CRLF conversion process." }, { SFE_NIST_BAD_ENCODING , "Error in NIST file, unsupported compression format." }, { SFE_VOC_NO_CREATIVE , "Error in VOC file, no 'Creative Voice File' marker." }, { SFE_VOC_BAD_FORMAT , "Error in VOC file, bad format." }, { SFE_VOC_BAD_VERSION , "Error in VOC file, bad version number." }, { SFE_VOC_BAD_MARKER , "Error in VOC file, bad marker in file." }, { SFE_VOC_BAD_SECTIONS , "Error in VOC file, incompatible VOC sections." }, { SFE_VOC_MULTI_SAMPLERATE, "Error in VOC file, more than one sample rate defined." }, { SFE_VOC_MULTI_SECTION , "Unimplemented VOC file feature, file contains multiple sound sections." }, { SFE_VOC_MULTI_PARAM , "Error in VOC file, file contains multiple bit or channel widths." }, { SFE_VOC_SECTION_COUNT , "Error in VOC file, too many sections." }, { SFE_VOC_NO_PIPE , "Error : not able to operate on VOC files over a pipe." }, { SFE_IRCAM_NO_MARKER , "Error in IRCAM file, bad IRCAM marker." }, { SFE_IRCAM_BAD_CHANNELS , "Error in IRCAM file, bad channel count." }, { SFE_IRCAM_UNKNOWN_FORMAT, "Error in IRCAM file, unknown encoding format." }, { SFE_W64_64_BIT , "Error in W64 file, file contains 64 bit offset." }, { SFE_W64_NO_RIFF , "Error in W64 file. No 'riff' chunk marker." }, { SFE_W64_NO_WAVE , "Error in W64 file. No 'wave' chunk marker." }, { SFE_W64_NO_DATA , "Error in W64 file. No 'data' chunk marker." }, { SFE_W64_ADPCM_NOT4BIT , "Error in ADPCM W64 file. Invalid bit width." }, { SFE_W64_ADPCM_CHANNELS , "Error in ADPCM W64 file. Invalid number of channels." }, { SFE_W64_GSM610_FORMAT , "Error in GSM610 W64 file. Invalid format chunk." }, { SFE_MAT4_BAD_NAME , "Error in MAT4 file. No variable name." }, { SFE_MAT4_NO_SAMPLERATE , "Error in MAT4 file. No sample rate." }, { SFE_MAT5_BAD_ENDIAN , "Error in MAT5 file. Not able to determine endian-ness." }, { SFE_MAT5_NO_BLOCK , "Error in MAT5 file. Bad block structure." }, { SFE_MAT5_SAMPLE_RATE , "Error in MAT5 file. Not able to determine sample rate." }, { SFE_PVF_NO_PVF1 , "Error in PVF file. No PVF1 marker." }, { SFE_PVF_BAD_HEADER , "Error in PVF file. Bad header." }, { SFE_PVF_BAD_BITWIDTH , "Error in PVF file. Bad bit width." }, { SFE_XI_BAD_HEADER , "Error in XI file. Bad header." }, { SFE_XI_EXCESS_SAMPLES , "Error in XI file. Excess samples in file." }, { SFE_XI_NO_PIPE , "Error : not able to operate on XI files over a pipe." }, { SFE_HTK_NO_PIPE , "Error : not able to operate on HTK files over a pipe." }, { SFE_SDS_NOT_SDS , "Error : not an SDS file." }, { SFE_SDS_BAD_BIT_WIDTH , "Error : bad bit width for SDS file." }, { SFE_SD2_FD_DISALLOWED , "Error : cannot open SD2 file without a file name." }, { SFE_SD2_BAD_DATA_OFFSET , "Error : bad data offset." }, { SFE_SD2_BAD_MAP_OFFSET , "Error : bad map offset." }, { SFE_SD2_BAD_DATA_LENGTH , "Error : bad data length." }, { SFE_SD2_BAD_MAP_LENGTH , "Error : bad map length." }, { SFE_SD2_BAD_RSRC , "Error : bad resource fork." }, { SFE_SD2_BAD_SAMPLE_SIZE , "Error : bad sample size." }, { SFE_FLAC_BAD_HEADER , "Error : bad flac header." }, { SFE_FLAC_NEW_DECODER , "Error : problem while creating flac decoder." }, { SFE_FLAC_INIT_DECODER , "Error : problem while initialization of the flac decoder." }, { SFE_FLAC_LOST_SYNC , "Error : flac decoder lost sync." }, { SFE_FLAC_BAD_SAMPLE_RATE, "Error : flac does not support this sample rate." }, { SFE_FLAC_UNKOWN_ERROR , "Error : unknown error in flac decoder." }, { SFE_WVE_NOT_WVE , "Error : not a WVE file." }, { SFE_WVE_NO_PIPE , "Error : not able to operate on WVE files over a pipe." }, { SFE_DWVW_BAD_BITWIDTH , "Error : Bad bit width for DWVW encoding. Must be 12, 16 or 24." }, { SFE_G72X_NOT_MONO , "Error : G72x encoding does not support more than 1 channel." }, { SFE_VORBIS_ENCODER_BUG , "Error : Sample rate chosen is known to trigger a Vorbis encoder bug on this CPU." }, { SFE_RF64_NOT_RF64 , "Error : Not an RF64 file." }, { SFE_RF64_PEAK_B4_FMT , "Error in RF64 file. 'PEAK' chunk found before 'fmt ' chunk." }, { SFE_RF64_NO_DATA , "Error in RF64 file. No 'data' chunk marker." }, { SFE_ALAC_FAIL_TMPFILE , "Error : Failed to open tmp file for ALAC encoding." }, { SFE_BAD_CHUNK_PTR , "Error : Bad SF_CHUNK_INFO pointer." }, { SFE_UNKNOWN_CHUNK , "Error : Unknown chunk marker." }, { SFE_BAD_CHUNK_FORMAT , "Error : Reading/writing chunks from this file format is not supported." }, { SFE_BAD_CHUNK_MARKER , "Error : Bad chunk marker." }, { SFE_BAD_CHUNK_DATA_PTR , "Error : Bad data pointer in SF_CHUNK_INFO struct." }, { SFE_FILENAME_TOO_LONG , "Error : Supplied filename too long." }, { SFE_MAX_ERROR , "Maximum error number." }, { SFE_MAX_ERROR + 1 , NULL } } ; /*------------------------------------------------------------------------------ */ static int format_from_extension (SF_PRIVATE *psf) ; static int guess_file_type (SF_PRIVATE *psf) ; static int validate_sfinfo (SF_INFO *sfinfo) ; static int validate_psf (SF_PRIVATE *psf) ; static void save_header_info (SF_PRIVATE *psf) ; static int copy_filename (SF_PRIVATE *psf, const char *path) ; static int psf_close (SF_PRIVATE *psf) ; static int try_resource_fork (SF_PRIVATE * psf) ; /*------------------------------------------------------------------------------ ** Private (static) variables. */ int sf_errno = 0 ; static char sf_parselog [SF_BUFFER_LEN] = { 0 } ; static char sf_syserr [SF_SYSERR_LEN] = { 0 } ; /*------------------------------------------------------------------------------ */ #define VALIDATE_SNDFILE_AND_ASSIGN_PSF(a, b, c) \ { if ((a) == NULL) \ { sf_errno = SFE_BAD_SNDFILE_PTR ; \ return 0 ; \ } ; \ (b) = (SF_PRIVATE*) (a) ; \ if ((b)->virtual_io == SF_FALSE && \ psf_file_valid (b) == 0) \ { (b)->error = SFE_BAD_FILE_PTR ; \ return 0 ; \ } ; \ if ((b)->Magick != SNDFILE_MAGICK) \ { (b)->error = SFE_BAD_SNDFILE_PTR ; \ return 0 ; \ } ; \ if (c) (b)->error = 0 ; \ } /*------------------------------------------------------------------------------ ** Public functions. */ SNDFILE* sf_open (const char *path, int mode, SF_INFO *sfinfo) { SF_PRIVATE *psf ; /* Ultimate sanity check. */ assert (sizeof (sf_count_t) == 8) ; if ((psf = calloc (1, sizeof (SF_PRIVATE))) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; psf_log_printf (psf, "File : %s\n", path) ; if (copy_filename (psf, path) != 0) { sf_errno = psf->error ; return NULL ; } ; psf->file.mode = mode ; if (strcmp (path, "-") == 0) psf->error = psf_set_stdio (psf) ; else psf->error = psf_fopen (psf) ; return psf_open_file (psf, sfinfo) ; } /* sf_open */ SNDFILE* sf_open_fd (int fd, int mode, SF_INFO *sfinfo, int close_desc) { SF_PRIVATE *psf ; if ((SF_CONTAINER (sfinfo->format)) == SF_FORMAT_SD2) { sf_errno = SFE_SD2_FD_DISALLOWED ; return NULL ; } ; if ((psf = calloc (1, sizeof (SF_PRIVATE))) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; copy_filename (psf, "") ; psf->file.mode = mode ; psf_set_file (psf, fd) ; psf->is_pipe = psf_is_pipe (psf) ; psf->fileoffset = psf_ftell (psf) ; if (! close_desc) psf->file.do_not_close_descriptor = SF_TRUE ; return psf_open_file (psf, sfinfo) ; } /* sf_open_fd */ SNDFILE* sf_open_virtual (SF_VIRTUAL_IO *sfvirtual, int mode, SF_INFO *sfinfo, void *user_data) { SF_PRIVATE *psf ; /* Make sure we have a valid set ot virtual pointers. */ if (sfvirtual->get_filelen == NULL || sfvirtual->seek == NULL || sfvirtual->tell == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_get_filelen / vio_seek / vio_tell in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((mode == SFM_READ || mode == SFM_RDWR) && sfvirtual->read == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_read in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((mode == SFM_WRITE || mode == SFM_RDWR) && sfvirtual->write == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_write in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((psf = calloc (1, sizeof (SF_PRIVATE))) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; psf->virtual_io = SF_TRUE ; psf->vio = *sfvirtual ; psf->vio_user_data = user_data ; psf->file.mode = mode ; return psf_open_file (psf, sfinfo) ; } /* sf_open_virtual */ int sf_close (SNDFILE *sndfile) { SF_PRIVATE *psf ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; return psf_close (psf) ; } /* sf_close */ void sf_write_sync (SNDFILE *sndfile) { SF_PRIVATE *psf ; if ((psf = (SF_PRIVATE *) sndfile) == NULL) return ; psf_fsync (psf) ; return ; } /* sf_write_sync */ /*============================================================================== */ const char* sf_error_number (int errnum) { static const char *bad_errnum = "No error defined for this error number. This is a bug in libsndfile." ; int k ; if (errnum == SFE_MAX_ERROR) return SndfileErrors [0].str ; if (errnum < 0 || errnum > SFE_MAX_ERROR) { /* This really shouldn't happen in release versions. */ printf ("Not a valid error number (%d).\n", errnum) ; return bad_errnum ; } ; for (k = 0 ; SndfileErrors [k].str ; k++) if (errnum == SndfileErrors [k].error) return SndfileErrors [k].str ; return bad_errnum ; } /* sf_error_number */ const char* sf_strerror (SNDFILE *sndfile) { SF_PRIVATE *psf = NULL ; int errnum ; if (sndfile == NULL) { errnum = sf_errno ; if (errnum == SFE_SYSTEM && sf_syserr [0]) return sf_syserr ; } else { psf = (SF_PRIVATE *) sndfile ; if (psf->Magick != SNDFILE_MAGICK) return "sf_strerror : Bad magic number." ; errnum = psf->error ; if (errnum == SFE_SYSTEM && psf->syserr [0]) return psf->syserr ; } ; return sf_error_number (errnum) ; } /* sf_strerror */ /*------------------------------------------------------------------------------ */ int sf_error (SNDFILE *sndfile) { SF_PRIVATE *psf ; if (sndfile == NULL) return sf_errno ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 0) ; if (psf->error) return psf->error ; return 0 ; } /* sf_error */ /*------------------------------------------------------------------------------ */ int sf_perror (SNDFILE *sndfile) { SF_PRIVATE *psf ; int errnum ; if (sndfile == NULL) { errnum = sf_errno ; } else { VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 0) ; errnum = psf->error ; } ; fprintf (stderr, "%s\n", sf_error_number (errnum)) ; return SFE_NO_ERROR ; } /* sf_perror */ /*------------------------------------------------------------------------------ */ int sf_error_str (SNDFILE *sndfile, char *str, size_t maxlen) { SF_PRIVATE *psf ; int errnum ; if (str == NULL) return SFE_INTERNAL ; if (sndfile == NULL) errnum = sf_errno ; else { VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 0) ; errnum = psf->error ; } ; snprintf (str, maxlen, "%s", sf_error_number (errnum)) ; return SFE_NO_ERROR ; } /* sf_error_str */ /*============================================================================== */ int sf_format_check (const SF_INFO *info) { int subformat, endian ; subformat = SF_CODEC (info->format) ; endian = SF_ENDIAN (info->format) ; /* This is the place where each file format can check if the suppiled ** SF_INFO struct is valid. ** Return 0 on failure, 1 ons success. */ if (info->channels < 1 || info->channels > SF_MAX_CHANNELS) return 0 ; if (info->samplerate < 0) return 0 ; switch (SF_CONTAINER (info->format)) { case SF_FORMAT_WAV : /* WAV now allows both endian, RIFF or RIFX (little or big respectively) */ if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if ((subformat == SF_FORMAT_IMA_ADPCM || subformat == SF_FORMAT_MS_ADPCM) && info->channels <= 2) return 1 ; if (subformat == SF_FORMAT_GSM610 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_G721_32 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_WAVEX : if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_AIFF : /* AIFF does allow both endian-nesses for PCM data.*/ if (subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; /* For other encodings reject any endian-ness setting. */ if (endian != 0) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_S8) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if ((subformat == SF_FORMAT_DWVW_12 || subformat == SF_FORMAT_DWVW_16 || subformat == SF_FORMAT_DWVW_24) && info-> channels == 1) return 1 ; if (subformat == SF_FORMAT_GSM610 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_IMA_ADPCM && (info->channels == 1 || info->channels == 2)) return 1 ; break ; case SF_FORMAT_AU : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; if (subformat == SF_FORMAT_G721_32 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_G723_24 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_G723_40 && info->channels == 1) return 1 ; break ; case SF_FORMAT_CAF : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_ALAC_16 || subformat == SF_FORMAT_ALAC_20) return 1 ; if (subformat == SF_FORMAT_ALAC_24 || subformat == SF_FORMAT_ALAC_32) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_RAW : if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; if (subformat == SF_FORMAT_ALAW || subformat == SF_FORMAT_ULAW) return 1 ; if ((subformat == SF_FORMAT_DWVW_12 || subformat == SF_FORMAT_DWVW_16 || subformat == SF_FORMAT_DWVW_24) && info-> channels == 1) return 1 ; if (subformat == SF_FORMAT_GSM610 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_VOX_ADPCM && info->channels == 1) return 1 ; break ; case SF_FORMAT_PAF : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24) return 1 ; break ; case SF_FORMAT_SVX : /* SVX only supports writing mono SVX files. */ if (info->channels > 1) return 0 ; /* Always big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; break ; case SF_FORMAT_NIST : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; break ; case SF_FORMAT_IRCAM : if (info->channels > 256) return 0 ; if (subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW || subformat == SF_FORMAT_FLOAT) return 1 ; break ; case SF_FORMAT_VOC : if (info->channels > 2) return 0 ; /* VOC is strictly little endian. */ if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; break ; case SF_FORMAT_W64 : /* W64 is strictly little endian. */ if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if ((subformat == SF_FORMAT_IMA_ADPCM || subformat == SF_FORMAT_MS_ADPCM) && info->channels <= 2) return 1 ; if (subformat == SF_FORMAT_GSM610 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_MAT4 : if (subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_MAT5 : if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_PVF : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_32) return 1 ; break ; case SF_FORMAT_XI : if (info->channels != 1) return 0 ; if (subformat == SF_FORMAT_DPCM_8 || subformat == SF_FORMAT_DPCM_16) return 1 ; break ; case SF_FORMAT_HTK : if (info->channels != 1) return 0 ; /* HTK is strictly big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_16) return 1 ; break ; case SF_FORMAT_SDS : if (info->channels != 1) return 0 ; /* SDS is strictly big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24) return 1 ; break ; case SF_FORMAT_AVR : if (info->channels > 2) return 0 ; /* SDS is strictly big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; break ; case SF_FORMAT_FLAC : /* FLAC can't do more than 8 channels. */ if (info->channels > 8) return 0 ; if (endian != SF_ENDIAN_FILE) return 0 ; if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24) return 1 ; break ; case SF_FORMAT_SD2 : /* SD2 is strictly big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; break ; case SF_FORMAT_WVE : if (info->channels > 1) return 0 ; /* WVE is strictly big endian. */ if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_ALAW) return 1 ; break ; case SF_FORMAT_OGG : if (endian != SF_ENDIAN_FILE) return 0 ; if (subformat == SF_FORMAT_VORBIS) return 1 ; break ; case SF_FORMAT_MPC2K : if (info->channels > 2) return 0 ; /* MPC2000 is strictly little endian. */ if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_16) return 1 ; break ; case SF_FORMAT_RF64 : if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; default : break ; } ; return 0 ; } /* sf_format_check */ /*------------------------------------------------------------------------------ */ const char * sf_version_string (void) { #if ENABLE_EXPERIMENTAL_CODE return PACKAGE_NAME "-" PACKAGE_VERSION "-exp" ; #else return PACKAGE_NAME "-" PACKAGE_VERSION ; #endif } /*------------------------------------------------------------------------------ */ int sf_command (SNDFILE *sndfile, int command, void *data, int datasize) { SF_PRIVATE *psf = (SF_PRIVATE *) sndfile ; double quality ; int old_value ; /* This set of commands do not need the sndfile parameter. */ switch (command) { case SFC_GET_LIB_VERSION : if (data == NULL) { if (psf) psf->error = SFE_BAD_COMMAND_PARAM ; return SFE_BAD_COMMAND_PARAM ; } ; snprintf (data, datasize, "%s", sf_version_string ()) ; return strlen (data) ; case SFC_GET_SIMPLE_FORMAT_COUNT : if (data == NULL || datasize != SIGNED_SIZEOF (int)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; *((int*) data) = psf_get_format_simple_count () ; return 0 ; case SFC_GET_SIMPLE_FORMAT : if (data == NULL || datasize != SIGNED_SIZEOF (SF_FORMAT_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; return psf_get_format_simple (data) ; case SFC_GET_FORMAT_MAJOR_COUNT : if (data == NULL || datasize != SIGNED_SIZEOF (int)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; *((int*) data) = psf_get_format_major_count () ; return 0 ; case SFC_GET_FORMAT_MAJOR : if (data == NULL || datasize != SIGNED_SIZEOF (SF_FORMAT_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; return psf_get_format_major (data) ; case SFC_GET_FORMAT_SUBTYPE_COUNT : if (data == NULL || datasize != SIGNED_SIZEOF (int)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; *((int*) data) = psf_get_format_subtype_count () ; return 0 ; case SFC_GET_FORMAT_SUBTYPE : if (data == NULL || datasize != SIGNED_SIZEOF (SF_FORMAT_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; return psf_get_format_subtype (data) ; case SFC_GET_FORMAT_INFO : if (data == NULL || datasize != SIGNED_SIZEOF (SF_FORMAT_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; return psf_get_format_info (data) ; } ; if (sndfile == NULL && command == SFC_GET_LOG_INFO) { if (data == NULL) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; snprintf (data, datasize, "%s", sf_parselog) ; return strlen (data) ; } ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; switch (command) { case SFC_SET_NORM_FLOAT : old_value = psf->norm_float ; psf->norm_float = (datasize) ? SF_TRUE : SF_FALSE ; return old_value ; case SFC_GET_CURRENT_SF_INFO : if (data == NULL || datasize != SIGNED_SIZEOF (SF_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; memcpy (data, &psf->sf, sizeof (SF_INFO)) ; break ; case SFC_SET_NORM_DOUBLE : old_value = psf->norm_double ; psf->norm_double = (datasize) ? SF_TRUE : SF_FALSE ; return old_value ; case SFC_GET_NORM_FLOAT : return psf->norm_float ; case SFC_GET_NORM_DOUBLE : return psf->norm_double ; case SFC_SET_SCALE_FLOAT_INT_READ : old_value = psf->float_int_mult ; psf->float_int_mult = (datasize != 0) ? SF_TRUE : SF_FALSE ; if (psf->float_int_mult && psf->float_max < 0.0) /* Scale to prevent wrap-around distortion. */ psf->float_max = (32768.0 / 32767.0) * psf_calc_signal_max (psf, SF_FALSE) ; return old_value ; case SFC_SET_SCALE_INT_FLOAT_WRITE : old_value = psf->scale_int_float ; psf->scale_int_float = (datasize != 0) ? SF_TRUE : SF_FALSE ; return old_value ; case SFC_SET_ADD_PEAK_CHUNK : { int format = SF_CONTAINER (psf->sf.format) ; /* Only WAV and AIFF support the PEAK chunk. */ switch (format) { case SF_FORMAT_AIFF : case SF_FORMAT_CAF : case SF_FORMAT_WAV : case SF_FORMAT_WAVEX : case SF_FORMAT_RF64 : break ; default : return SF_FALSE ; } ; format = SF_CODEC (psf->sf.format) ; /* Only files containg the following data types support the PEAK chunk. */ if (format != SF_FORMAT_FLOAT && format != SF_FORMAT_DOUBLE) return SF_FALSE ; } ; /* Can only do this is in SFM_WRITE mode. */ if (psf->file.mode != SFM_WRITE && psf->file.mode != SFM_RDWR) return SF_FALSE ; /* If data has already been written this must fail. */ if (psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; /* Everything seems OK, so set psf->has_peak and re-write header. */ if (datasize == SF_FALSE && psf->peak_info != NULL) { free (psf->peak_info) ; psf->peak_info = NULL ; } else if (psf->peak_info == NULL) { psf->peak_info = peak_info_calloc (psf->sf.channels) ; if (psf->peak_info != NULL) psf->peak_info->peak_loc = SF_PEAK_START ; } ; if (psf->write_header) psf->write_header (psf, SF_TRUE) ; return datasize ; case SFC_SET_ADD_HEADER_PAD_CHUNK : return SF_FALSE ; case SFC_GET_LOG_INFO : if (data == NULL) return SFE_BAD_COMMAND_PARAM ; snprintf (data, datasize, "%s", psf->parselog.buf) ; break ; case SFC_CALC_SIGNAL_MAX : if (data == NULL || datasize != sizeof (double)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; *((double*) data) = psf_calc_signal_max (psf, SF_FALSE) ; break ; case SFC_CALC_NORM_SIGNAL_MAX : if (data == NULL || datasize != sizeof (double)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; *((double*) data) = psf_calc_signal_max (psf, SF_TRUE) ; break ; case SFC_CALC_MAX_ALL_CHANNELS : if (data == NULL || datasize != SIGNED_SIZEOF (double) * psf->sf.channels) return (psf->error = SFE_BAD_COMMAND_PARAM) ; return psf_calc_max_all_channels (psf, (double*) data, SF_FALSE) ; case SFC_CALC_NORM_MAX_ALL_CHANNELS : if (data == NULL || datasize != SIGNED_SIZEOF (double) * psf->sf.channels) return (psf->error = SFE_BAD_COMMAND_PARAM) ; return psf_calc_max_all_channels (psf, (double*) data, SF_TRUE) ; case SFC_GET_SIGNAL_MAX : if (data == NULL || datasize != sizeof (double)) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; return psf_get_signal_max (psf, (double *) data) ; case SFC_GET_MAX_ALL_CHANNELS : if (data == NULL || datasize != SIGNED_SIZEOF (double) * psf->sf.channels) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; return psf_get_max_all_channels (psf, (double*) data) ; case SFC_UPDATE_HEADER_NOW : if (psf->write_header) psf->write_header (psf, SF_TRUE) ; break ; case SFC_SET_UPDATE_HEADER_AUTO : psf->auto_header = datasize ? SF_TRUE : SF_FALSE ; return psf->auto_header ; break ; case SFC_SET_ADD_DITHER_ON_WRITE : case SFC_SET_ADD_DITHER_ON_READ : /* ** FIXME ! ** These are obsolete. Just return. ** Remove some time after version 1.0.8. */ break ; case SFC_SET_DITHER_ON_WRITE : if (data == NULL || datasize != SIGNED_SIZEOF (SF_DITHER_INFO)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; memcpy (&psf->write_dither, data, sizeof (psf->write_dither)) ; if (psf->file.mode == SFM_WRITE || psf->file.mode == SFM_RDWR) dither_init (psf, SFM_WRITE) ; break ; case SFC_SET_DITHER_ON_READ : if (data == NULL || datasize != SIGNED_SIZEOF (SF_DITHER_INFO)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; memcpy (&psf->read_dither, data, sizeof (psf->read_dither)) ; if (psf->file.mode == SFM_READ || psf->file.mode == SFM_RDWR) dither_init (psf, SFM_READ) ; break ; case SFC_FILE_TRUNCATE : if (psf->file.mode != SFM_WRITE && psf->file.mode != SFM_RDWR) return SF_TRUE ; if (datasize != sizeof (sf_count_t)) return SF_TRUE ; if (data == NULL || datasize != sizeof (sf_count_t)) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } else { sf_count_t position ; position = *((sf_count_t*) data) ; if (sf_seek (sndfile, position, SEEK_SET) != position) return SF_TRUE ; psf->sf.frames = position ; position = psf_fseek (psf, 0, SEEK_CUR) ; return psf_ftruncate (psf, position) ; } ; break ; case SFC_SET_RAW_START_OFFSET : if (data == NULL || datasize != sizeof (sf_count_t)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; if ((SF_CONTAINER (psf->sf.format)) != SF_FORMAT_RAW) return (psf->error = SFE_BAD_COMMAND_PARAM) ; psf->dataoffset = *((sf_count_t*) data) ; sf_seek (sndfile, 0, SEEK_CUR) ; break ; case SFC_GET_EMBED_FILE_INFO : if (data == NULL || datasize != sizeof (SF_EMBED_FILE_INFO)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; ((SF_EMBED_FILE_INFO*) data)->offset = psf->fileoffset ; ((SF_EMBED_FILE_INFO*) data)->length = psf->filelength ; break ; /* Lite remove start */ case SFC_TEST_IEEE_FLOAT_REPLACE : psf->ieee_replace = (datasize) ? SF_TRUE : SF_FALSE ; if ((SF_CODEC (psf->sf.format)) == SF_FORMAT_FLOAT) float32_init (psf) ; else if ((SF_CODEC (psf->sf.format)) == SF_FORMAT_DOUBLE) double64_init (psf) ; else return (psf->error = SFE_BAD_COMMAND_PARAM) ; break ; /* Lite remove end */ case SFC_SET_CLIPPING : psf->add_clipping = (datasize) ? SF_TRUE : SF_FALSE ; return psf->add_clipping ; case SFC_GET_CLIPPING : return psf->add_clipping ; case SFC_GET_LOOP_INFO : if (datasize != sizeof (SF_LOOP_INFO) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->loop_info == NULL) return SF_FALSE ; memcpy (data, psf->loop_info, sizeof (SF_LOOP_INFO)) ; return SF_TRUE ; case SFC_SET_BROADCAST_INFO : { int format = SF_CONTAINER (psf->sf.format) ; /* Only WAV and RF64 supports the BEXT (Broadcast) chunk. */ if (format != SF_FORMAT_WAV && format != SF_FORMAT_WAVEX && format != SF_FORMAT_RF64) return SF_FALSE ; } ; /* Only makes sense in SFM_WRITE or SFM_RDWR mode. */ if ((psf->file.mode != SFM_WRITE) && (psf->file.mode != SFM_RDWR)) return SF_FALSE ; /* If data has already been written this must fail. */ if (psf->broadcast_16k == NULL && psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (NOT (broadcast_var_set (psf, data, datasize))) return SF_FALSE ; if (psf->write_header) psf->write_header (psf, SF_TRUE) ; return SF_TRUE ; case SFC_GET_BROADCAST_INFO : if (data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; return broadcast_var_get (psf, data, datasize) ; case SFC_SET_CART_INFO : { int format = SF_CONTAINER (psf->sf.format) ; /* Only WAV and RF64 support cart chunk format */ if (format != SF_FORMAT_WAV && format != SF_FORMAT_RF64) return SF_FALSE ; } ; /* Only makes sense in SFM_WRITE or SFM_RDWR mode */ if ((psf->file.mode != SFM_WRITE) && (psf->file.mode != SFM_RDWR)) return SF_FALSE ; /* If data has already been written this must fail. */ if (psf->cart_16k == NULL && psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (NOT (cart_var_set (psf, data, datasize))) return SF_FALSE ; if (psf->write_header) psf->write_header (psf, SF_TRUE) ; return SF_TRUE ; case SFC_GET_CART_INFO : if (data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; return cart_var_get (psf, data, datasize) ; case SFC_GET_CUE_COUNT : if (datasize != sizeof (uint32_t) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->cues != NULL) { *((uint32_t *) data) = psf->cues->cue_count ; return SF_TRUE ; } ; return SF_FALSE ; case SFC_GET_CUE : if (datasize != sizeof (SF_CUES) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->cues == NULL) return SF_FALSE ; psf_get_cues (psf, data, datasize) ; return SF_TRUE ; case SFC_SET_CUE : if (psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (datasize != sizeof (SF_CUES) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->cues == NULL && (psf->cues = psf_cues_dup (data)) == NULL) { psf->error = SFE_MALLOC_FAILED ; return SF_FALSE ; } ; return SF_TRUE ; case SFC_GET_INSTRUMENT : if (datasize != sizeof (SF_INSTRUMENT) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->instrument == NULL) return SF_FALSE ; memcpy (data, psf->instrument, sizeof (SF_INSTRUMENT)) ; return SF_TRUE ; case SFC_SET_INSTRUMENT : /* If data has already been written this must fail. */ if (psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (datasize != sizeof (SF_INSTRUMENT) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->instrument == NULL && (psf->instrument = psf_instrument_alloc ()) == NULL) { psf->error = SFE_MALLOC_FAILED ; return SF_FALSE ; } ; memcpy (psf->instrument, data, sizeof (SF_INSTRUMENT)) ; return SF_TRUE ; case SFC_RAW_DATA_NEEDS_ENDSWAP : return psf->data_endswap ; case SFC_GET_CHANNEL_MAP_INFO : if (psf->channel_map == NULL) return SF_FALSE ; if (data == NULL || datasize != SIGNED_SIZEOF (psf->channel_map [0]) * psf->sf.channels) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; memcpy (data, psf->channel_map, datasize) ; return SF_TRUE ; case SFC_SET_CHANNEL_MAP_INFO : if (psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (data == NULL || datasize != SIGNED_SIZEOF (psf->channel_map [0]) * psf->sf.channels) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; { int *iptr ; for (iptr = data ; iptr < (int*) data + psf->sf.channels ; iptr++) { if (*iptr <= SF_CHANNEL_MAP_INVALID || *iptr >= SF_CHANNEL_MAP_MAX) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; } ; } ; free (psf->channel_map) ; if ((psf->channel_map = malloc (datasize)) == NULL) { psf->error = SFE_MALLOC_FAILED ; return SF_FALSE ; } ; memcpy (psf->channel_map, data, datasize) ; /* ** Pass the command down to the container's command handler. ** Don't pass user data, use validated psf->channel_map data instead. */ if (psf->command) return psf->command (psf, command, NULL, 0) ; return SF_FALSE ; case SFC_SET_VBR_ENCODING_QUALITY : if (data == NULL || datasize != sizeof (double)) return SF_FALSE ; quality = *((double *) data) ; quality = 1.0 - SF_MAX (0.0, SF_MIN (1.0, quality)) ; return sf_command (sndfile, SFC_SET_COMPRESSION_LEVEL, &quality, sizeof (quality)) ; default : /* Must be a file specific command. Pass it on. */ if (psf->command) return psf->command (psf, command, data, datasize) ; psf_log_printf (psf, "*** sf_command : cmd = 0x%X\n", command) ; return (psf->error = SFE_BAD_COMMAND_PARAM) ; } ; return 0 ; } /* sf_command */ /*------------------------------------------------------------------------------ */ sf_count_t sf_seek (SNDFILE *sndfile, sf_count_t offset, int whence) { SF_PRIVATE *psf ; sf_count_t seek_from_start = 0, retval ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (! psf->sf.seekable) { psf->error = SFE_NOT_SEEKABLE ; return PSF_SEEK_ERROR ; } ; /* If the whence parameter has a mode ORed in, check to see that ** it makes sense. */ if (((whence & SFM_MASK) == SFM_WRITE && psf->file.mode == SFM_READ) || ((whence & SFM_MASK) == SFM_READ && psf->file.mode == SFM_WRITE)) { psf->error = SFE_WRONG_SEEK ; return PSF_SEEK_ERROR ; } ; /* Convert all SEEK_CUR and SEEK_END into seek_from_start to be ** used with SEEK_SET. */ switch (whence) { /* The SEEK_SET behaviour is independant of mode. */ case SEEK_SET : case SEEK_SET | SFM_READ : case SEEK_SET | SFM_WRITE : case SEEK_SET | SFM_RDWR : seek_from_start = offset ; break ; /* The SEEK_CUR is a little more tricky. */ case SEEK_CUR : if (offset == 0) { if (psf->file.mode == SFM_READ) return psf->read_current ; if (psf->file.mode == SFM_WRITE) return psf->write_current ; } ; if (psf->file.mode == SFM_READ) seek_from_start = psf->read_current + offset ; else if (psf->file.mode == SFM_WRITE || psf->file.mode == SFM_RDWR) seek_from_start = psf->write_current + offset ; else psf->error = SFE_AMBIGUOUS_SEEK ; break ; case SEEK_CUR | SFM_READ : if (offset == 0) return psf->read_current ; seek_from_start = psf->read_current + offset ; break ; case SEEK_CUR | SFM_WRITE : if (offset == 0) return psf->write_current ; seek_from_start = psf->write_current + offset ; break ; /* The SEEK_END */ case SEEK_END : case SEEK_END | SFM_READ : case SEEK_END | SFM_WRITE : seek_from_start = psf->sf.frames + offset ; break ; default : psf->error = SFE_BAD_SEEK ; break ; } ; if (psf->error) return PSF_SEEK_ERROR ; if (psf->file.mode == SFM_RDWR || psf->file.mode == SFM_WRITE) { if (seek_from_start < 0) { psf->error = SFE_BAD_SEEK ; return PSF_SEEK_ERROR ; } ; } else if (seek_from_start < 0 || seek_from_start > psf->sf.frames) { psf->error = SFE_BAD_SEEK ; return PSF_SEEK_ERROR ; } ; if (psf->seek) { int new_mode = (whence & SFM_MASK) ? (whence & SFM_MASK) : psf->file.mode ; retval = psf->seek (psf, new_mode, seek_from_start) ; switch (new_mode) { case SFM_READ : psf->read_current = retval ; break ; case SFM_WRITE : psf->write_current = retval ; break ; case SFM_RDWR : psf->read_current = retval ; psf->write_current = retval ; new_mode = SFM_READ ; break ; } ; psf->last_op = new_mode ; return retval ; } ; psf->error = SFE_AMBIGUOUS_SEEK ; return PSF_SEEK_ERROR ; } /* sf_seek */ /*------------------------------------------------------------------------------ */ const char* sf_get_string (SNDFILE *sndfile, int str_type) { SF_PRIVATE *psf ; if ((psf = (SF_PRIVATE*) sndfile) == NULL) return NULL ; if (psf->Magick != SNDFILE_MAGICK) return NULL ; return psf_get_string (psf, str_type) ; } /* sf_get_string */ int sf_set_string (SNDFILE *sndfile, int str_type, const char* str) { SF_PRIVATE *psf ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; return psf_set_string (psf, str_type, str) ; } /* sf_get_string */ /*------------------------------------------------------------------------------ */ int sf_current_byterate (SNDFILE *sndfile) { SF_PRIVATE *psf ; if ((psf = (SF_PRIVATE*) sndfile) == NULL) return -1 ; if (psf->Magick != SNDFILE_MAGICK) return -1 ; /* This should cover all PCM and floating point formats. */ if (psf->bytewidth) return psf->sf.samplerate * psf->sf.channels * psf->bytewidth ; if (psf->byterate) return psf->byterate (psf) ; switch (SF_CODEC (psf->sf.format)) { case SF_FORMAT_IMA_ADPCM : case SF_FORMAT_MS_ADPCM : case SF_FORMAT_VOX_ADPCM : return (psf->sf.samplerate * psf->sf.channels) / 2 ; case SF_FORMAT_GSM610 : return (psf->sf.samplerate * psf->sf.channels * 13000) / 8000 ; case SF_FORMAT_G721_32 : /* 32kbs G721 ADPCM encoding. */ return (psf->sf.samplerate * psf->sf.channels) / 2 ; case SF_FORMAT_G723_24 : /* 24kbs G723 ADPCM encoding. */ return (psf->sf.samplerate * psf->sf.channels * 3) / 8 ; case SF_FORMAT_G723_40 : /* 40kbs G723 ADPCM encoding. */ return (psf->sf.samplerate * psf->sf.channels * 5) / 8 ; default : break ; } ; return -1 ; } /* sf_current_byterate */ /*============================================================================== */ sf_count_t sf_read_raw (SNDFILE *sndfile, void *ptr, sf_count_t bytes) { SF_PRIVATE *psf ; sf_count_t count, extra ; int bytewidth, blockwidth ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; bytewidth = (psf->bytewidth > 0) ? psf->bytewidth : 1 ; blockwidth = (psf->blockwidth > 0) ? psf->blockwidth : 1 ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (bytes < 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, bytes) ; return 0 ; } ; if (bytes % (psf->sf.channels * bytewidth)) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf_fread (ptr, 1, bytes, psf) ; if (psf->read_current + count / blockwidth <= psf->sf.frames) psf->read_current += count / blockwidth ; else { count = (psf->sf.frames - psf->read_current) * blockwidth ; extra = bytes - count ; psf_memset (((char *) ptr) + count, 0, extra) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_raw */ /*------------------------------------------------------------------------------ */ sf_count_t sf_read_short (SNDFILE *sndfile, short *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (len <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, len * sizeof (short)) ; return 0 ; /* End of file. */ } ; if (psf->read_short == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_short (psf, ptr, len) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = len - count ; psf_memset (ptr + count, 0, extra * sizeof (short)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_short */ sf_count_t sf_readf_short (SNDFILE *sndfile, short *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (frames <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, frames * psf->sf.channels * sizeof (short)) ; return 0 ; /* End of file. */ } ; if (psf->read_short == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_short (psf, ptr, frames * psf->sf.channels) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = frames * psf->sf.channels - count ; psf_memset (ptr + count, 0, extra * sizeof (short)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count / psf->sf.channels ; } /* sf_readf_short */ /*------------------------------------------------------------------------------ */ sf_count_t sf_read_int (SNDFILE *sndfile, int *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (len <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, len * sizeof (int)) ; return 0 ; } ; if (psf->read_int == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_int (psf, ptr, len) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = len - count ; psf_memset (ptr + count, 0, extra * sizeof (int)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_int */ sf_count_t sf_readf_int (SNDFILE *sndfile, int *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (frames <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, frames * psf->sf.channels * sizeof (int)) ; return 0 ; } ; if (psf->read_int == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_int (psf, ptr, frames * psf->sf.channels) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = frames * psf->sf.channels - count ; psf_memset (ptr + count, 0, extra * sizeof (int)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count / psf->sf.channels ; } /* sf_readf_int */ /*------------------------------------------------------------------------------ */ sf_count_t sf_read_float (SNDFILE *sndfile, float *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (len <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, len * sizeof (float)) ; return 0 ; } ; if (psf->read_float == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_float (psf, ptr, len) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = len - count ; psf_memset (ptr + count, 0, extra * sizeof (float)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_float */ sf_count_t sf_readf_float (SNDFILE *sndfile, float *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (frames <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, frames * psf->sf.channels * sizeof (float)) ; return 0 ; } ; if (psf->read_float == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_float (psf, ptr, frames * psf->sf.channels) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = frames * psf->sf.channels - count ; psf_memset (ptr + count, 0, extra * sizeof (float)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count / psf->sf.channels ; } /* sf_readf_float */ /*------------------------------------------------------------------------------ */ sf_count_t sf_read_double (SNDFILE *sndfile, double *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (len <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, len * sizeof (double)) ; return 0 ; } ; if (psf->read_double == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_double (psf, ptr, len) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = len - count ; psf_memset (ptr + count, 0, extra * sizeof (double)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_double */ sf_count_t sf_readf_double (SNDFILE *sndfile, double *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (frames <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, frames * psf->sf.channels * sizeof (double)) ; return 0 ; } ; if (psf->read_double == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_double (psf, ptr, frames * psf->sf.channels) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = frames * psf->sf.channels - count ; psf_memset (ptr + count, 0, extra * sizeof (double)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count / psf->sf.channels ; } /* sf_readf_double */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_raw (SNDFILE *sndfile, const void *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; int bytewidth, blockwidth ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; bytewidth = (psf->bytewidth > 0) ? psf->bytewidth : 1 ; blockwidth = (psf->blockwidth > 0) ? psf->blockwidth : 1 ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % (psf->sf.channels * bytewidth)) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf_fwrite (ptr, 1, len, psf) ; psf->write_current += count / blockwidth ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_raw */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_short (SNDFILE *sndfile, const short *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->write_short == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_short (psf, ptr, len) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_short */ sf_count_t sf_writef_short (SNDFILE *sndfile, const short *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (psf->write_short == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_short (psf, ptr, frames * psf->sf.channels) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count / psf->sf.channels ; } /* sf_writef_short */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_int (SNDFILE *sndfile, const int *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->write_int == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_int (psf, ptr, len) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_int */ sf_count_t sf_writef_int (SNDFILE *sndfile, const int *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (psf->write_int == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_int (psf, ptr, frames * psf->sf.channels) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count / psf->sf.channels ; } /* sf_writef_int */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_float (SNDFILE *sndfile, const float *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->write_float == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_float (psf, ptr, len) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_float */ sf_count_t sf_writef_float (SNDFILE *sndfile, const float *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (psf->write_float == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_float (psf, ptr, frames * psf->sf.channels) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count / psf->sf.channels ; } /* sf_writef_float */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_double (SNDFILE *sndfile, const double *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->write_double == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_double (psf, ptr, len) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_double */ sf_count_t sf_writef_double (SNDFILE *sndfile, const double *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (psf->write_double == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_double (psf, ptr, frames * psf->sf.channels) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count / psf->sf.channels ; } /* sf_writef_double */ /*========================================================================= ** Private functions. */ static int try_resource_fork (SF_PRIVATE * psf) { int old_error = psf->error ; /* Set READ mode now, to see if resource fork exists. */ psf->rsrc.mode = SFM_READ ; if (psf_open_rsrc (psf) != 0) { psf->error = old_error ; return 0 ; } ; /* More checking here. */ psf_log_printf (psf, "Resource fork : %s\n", psf->rsrc.path.c) ; return SF_FORMAT_SD2 ; } /* try_resource_fork */ static int format_from_extension (SF_PRIVATE *psf) { char *cptr ; char buffer [16] ; int format = 0 ; if ((cptr = strrchr (psf->file.name.c, '.')) == NULL) return 0 ; cptr ++ ; if (strlen (cptr) > sizeof (buffer) - 1) return 0 ; psf_strlcpy (buffer, sizeof (buffer), cptr) ; buffer [sizeof (buffer) - 1] = 0 ; /* Convert everything in the buffer to lower case. */ cptr = buffer ; while (*cptr) { *cptr = tolower (*cptr) ; cptr ++ ; } ; cptr = buffer ; if (strcmp (cptr, "au") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 8000 ; format = SF_FORMAT_RAW | SF_FORMAT_ULAW ; } else if (strcmp (cptr, "snd") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 8000 ; format = SF_FORMAT_RAW | SF_FORMAT_ULAW ; } else if (strcmp (cptr, "vox") == 0 || strcmp (cptr, "vox8") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 8000 ; format = SF_FORMAT_RAW | SF_FORMAT_VOX_ADPCM ; } else if (strcmp (cptr, "vox6") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 6000 ; format = SF_FORMAT_RAW | SF_FORMAT_VOX_ADPCM ; } else if (strcmp (cptr, "gsm") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 8000 ; format = SF_FORMAT_RAW | SF_FORMAT_GSM610 ; } /* For RAW files, make sure the dataoffset if set correctly. */ if ((SF_CONTAINER (format)) == SF_FORMAT_RAW) psf->dataoffset = 0 ; return format ; } /* format_from_extension */ static int guess_file_type (SF_PRIVATE *psf) { uint32_t buffer [3], format ; if (psf_binheader_readf (psf, "b", &buffer, SIGNED_SIZEOF (buffer)) != SIGNED_SIZEOF (buffer)) { psf->error = SFE_BAD_FILE_READ ; return 0 ; } ; if ((buffer [0] == MAKE_MARKER ('R', 'I', 'F', 'F') || buffer [0] == MAKE_MARKER ('R', 'I', 'F', 'X')) && buffer [2] == MAKE_MARKER ('W', 'A', 'V', 'E')) return SF_FORMAT_WAV ; if (buffer [0] == MAKE_MARKER ('F', 'O', 'R', 'M')) { if (buffer [2] == MAKE_MARKER ('A', 'I', 'F', 'F') || buffer [2] == MAKE_MARKER ('A', 'I', 'F', 'C')) return SF_FORMAT_AIFF ; if (buffer [2] == MAKE_MARKER ('8', 'S', 'V', 'X') || buffer [2] == MAKE_MARKER ('1', '6', 'S', 'V')) return SF_FORMAT_SVX ; return 0 ; } ; if (buffer [0] == MAKE_MARKER ('.', 's', 'n', 'd') || buffer [0] == MAKE_MARKER ('d', 'n', 's', '.')) return SF_FORMAT_AU ; if ((buffer [0] == MAKE_MARKER ('f', 'a', 'p', ' ') || buffer [0] == MAKE_MARKER (' ', 'p', 'a', 'f'))) return SF_FORMAT_PAF ; if (buffer [0] == MAKE_MARKER ('N', 'I', 'S', 'T')) return SF_FORMAT_NIST ; if (buffer [0] == MAKE_MARKER ('C', 'r', 'e', 'a') && buffer [1] == MAKE_MARKER ('t', 'i', 'v', 'e')) return SF_FORMAT_VOC ; if ((buffer [0] & MAKE_MARKER (0xFF, 0xFF, 0xF8, 0xFF)) == MAKE_MARKER (0x64, 0xA3, 0x00, 0x00) || (buffer [0] & MAKE_MARKER (0xFF, 0xF8, 0xFF, 0xFF)) == MAKE_MARKER (0x00, 0x00, 0xA3, 0x64)) return SF_FORMAT_IRCAM ; if (buffer [0] == MAKE_MARKER ('r', 'i', 'f', 'f')) return SF_FORMAT_W64 ; if (buffer [0] == MAKE_MARKER (0, 0, 0x03, 0xE8) && buffer [1] == MAKE_MARKER (0, 0, 0, 1) && buffer [2] == MAKE_MARKER (0, 0, 0, 1)) return SF_FORMAT_MAT4 ; if (buffer [0] == MAKE_MARKER (0, 0, 0, 0) && buffer [1] == MAKE_MARKER (1, 0, 0, 0) && buffer [2] == MAKE_MARKER (1, 0, 0, 0)) return SF_FORMAT_MAT4 ; if (buffer [0] == MAKE_MARKER ('M', 'A', 'T', 'L') && buffer [1] == MAKE_MARKER ('A', 'B', ' ', '5')) return SF_FORMAT_MAT5 ; if (buffer [0] == MAKE_MARKER ('P', 'V', 'F', '1')) return SF_FORMAT_PVF ; if (buffer [0] == MAKE_MARKER ('E', 'x', 't', 'e') && buffer [1] == MAKE_MARKER ('n', 'd', 'e', 'd') && buffer [2] == MAKE_MARKER (' ', 'I', 'n', 's')) return SF_FORMAT_XI ; if (buffer [0] == MAKE_MARKER ('c', 'a', 'f', 'f') && buffer [2] == MAKE_MARKER ('d', 'e', 's', 'c')) return SF_FORMAT_CAF ; if (buffer [0] == MAKE_MARKER ('O', 'g', 'g', 'S')) return SF_FORMAT_OGG ; if (buffer [0] == MAKE_MARKER ('A', 'L', 'a', 'w') && buffer [1] == MAKE_MARKER ('S', 'o', 'u', 'n') && buffer [2] == MAKE_MARKER ('d', 'F', 'i', 'l')) return SF_FORMAT_WVE ; if (buffer [0] == MAKE_MARKER ('D', 'i', 'a', 'm') && buffer [1] == MAKE_MARKER ('o', 'n', 'd', 'W') && buffer [2] == MAKE_MARKER ('a', 'r', 'e', ' ')) return SF_FORMAT_DWD ; if (buffer [0] == MAKE_MARKER ('L', 'M', '8', '9') || buffer [0] == MAKE_MARKER ('5', '3', 0, 0)) return SF_FORMAT_TXW ; if ((buffer [0] & MAKE_MARKER (0xFF, 0xFF, 0x80, 0xFF)) == MAKE_MARKER (0xF0, 0x7E, 0, 0x01)) return SF_FORMAT_SDS ; if ((buffer [0] & MAKE_MARKER (0xFF, 0xFF, 0, 0)) == MAKE_MARKER (1, 4, 0, 0)) return SF_FORMAT_MPC2K ; if (buffer [0] == MAKE_MARKER ('C', 'A', 'T', ' ') && buffer [2] == MAKE_MARKER ('R', 'E', 'X', '2')) return SF_FORMAT_REX2 ; if (buffer [0] == MAKE_MARKER (0x30, 0x26, 0xB2, 0x75) && buffer [1] == MAKE_MARKER (0x8E, 0x66, 0xCF, 0x11)) return 0 /*-SF_FORMAT_WMA-*/ ; /* HMM (Hidden Markov Model) Tool Kit. */ if (buffer [2] == MAKE_MARKER (0, 2, 0, 0) && 2 * ((int64_t) BE2H_32 (buffer [0])) + 12 == psf->filelength) return SF_FORMAT_HTK ; if (buffer [0] == MAKE_MARKER ('f', 'L', 'a', 'C')) return SF_FORMAT_FLAC ; if (buffer [0] == MAKE_MARKER ('2', 'B', 'I', 'T')) return SF_FORMAT_AVR ; if (buffer [0] == MAKE_MARKER ('R', 'F', '6', '4') && buffer [2] == MAKE_MARKER ('W', 'A', 'V', 'E')) return SF_FORMAT_RF64 ; if (buffer [0] == MAKE_MARKER ('I', 'D', '3', 3)) { psf_log_printf (psf, "Found 'ID3' marker.\n") ; if (id3_skip (psf)) return guess_file_type (psf) ; return 0 ; } ; /* Turtle Beach SMP 16-bit */ if (buffer [0] == MAKE_MARKER ('S', 'O', 'U', 'N') && buffer [1] == MAKE_MARKER ('D', ' ', 'S', 'A')) return 0 ; /* Yamaha sampler format. */ if (buffer [0] == MAKE_MARKER ('S', 'Y', '8', '0') || buffer [0] == MAKE_MARKER ('S', 'Y', '8', '5')) return 0 ; if (buffer [0] == MAKE_MARKER ('a', 'j', 'k', 'g')) return 0 /*-SF_FORMAT_SHN-*/ ; /* This must be the last one. */ if (psf->filelength > 0 && (format = try_resource_fork (psf)) != 0) return format ; return 0 ; } /* guess_file_type */ static int validate_sfinfo (SF_INFO *sfinfo) { if (sfinfo->samplerate < 1) return 0 ; if (sfinfo->frames < 0) return 0 ; if (sfinfo->channels < 1) return 0 ; if ((SF_CONTAINER (sfinfo->format)) == 0) return 0 ; if ((SF_CODEC (sfinfo->format)) == 0) return 0 ; if (sfinfo->sections < 1) return 0 ; return 1 ; } /* validate_sfinfo */ static int validate_psf (SF_PRIVATE *psf) { if (psf->datalength < 0) { psf_log_printf (psf, "Invalid SF_PRIVATE field : datalength == %D.\n", psf->datalength) ; return 0 ; } ; if (psf->dataoffset < 0) { psf_log_printf (psf, "Invalid SF_PRIVATE field : dataoffset == %D.\n", psf->dataoffset) ; return 0 ; } ; if (psf->blockwidth && psf->blockwidth != psf->sf.channels * psf->bytewidth) { psf_log_printf (psf, "Invalid SF_PRIVATE field : channels * bytewidth == %d.\n", psf->sf.channels * psf->bytewidth) ; return 0 ; } ; return 1 ; } /* validate_psf */ static void save_header_info (SF_PRIVATE *psf) { snprintf (sf_parselog, sizeof (sf_parselog), "%s", psf->parselog.buf) ; } /* save_header_info */ static int copy_filename (SF_PRIVATE *psf, const char *path) { const char *ccptr ; char *cptr ; if (strlen (path) > 1 && strlen (path) - 1 >= sizeof (psf->file.path.c)) { psf->error = SFE_FILENAME_TOO_LONG ; return psf->error ; } ; snprintf (psf->file.path.c, sizeof (psf->file.path.c), "%s", path) ; if ((ccptr = strrchr (path, '/')) || (ccptr = strrchr (path, '\\'))) ccptr ++ ; else ccptr = path ; snprintf (psf->file.name.c, sizeof (psf->file.name.c), "%s", ccptr) ; /* Now grab the directory. */ snprintf (psf->file.dir.c, sizeof (psf->file.dir.c), "%s", path) ; if ((cptr = strrchr (psf->file.dir.c, '/')) || (cptr = strrchr (psf->file.dir.c, '\\'))) cptr [1] = 0 ; else psf->file.dir.c [0] = 0 ; return 0 ; } /* copy_filename */ /*============================================================================== */ static int psf_close (SF_PRIVATE *psf) { uint32_t k ; int error = 0 ; if (psf->codec_close) { error = psf->codec_close (psf) ; /* To prevent it being called in psf->container_close(). */ psf->codec_close = NULL ; } ; if (psf->container_close) error = psf->container_close (psf) ; error = psf_fclose (psf) ; psf_close_rsrc (psf) ; /* For an ISO C compliant implementation it is ok to free a NULL pointer. */ free (psf->container_data) ; free (psf->codec_data) ; free (psf->interleave) ; free (psf->dither) ; free (psf->peak_info) ; free (psf->broadcast_16k) ; free (psf->loop_info) ; free (psf->instrument) ; free (psf->cues) ; free (psf->channel_map) ; free (psf->format_desc) ; free (psf->strings.storage) ; if (psf->wchunks.chunks) for (k = 0 ; k < psf->wchunks.used ; k++) free (psf->wchunks.chunks [k].data) ; free (psf->rchunks.chunks) ; free (psf->wchunks.chunks) ; free (psf->iterator) ; free (psf->cart_16k) ; memset (psf, 0, sizeof (SF_PRIVATE)) ; free (psf) ; return error ; } /* psf_close */ SNDFILE * psf_open_file (SF_PRIVATE *psf, SF_INFO *sfinfo) { int error, format ; sf_errno = error = 0 ; sf_parselog [0] = 0 ; if (psf->error) { error = psf->error ; goto error_exit ; } ; if (psf->file.mode != SFM_READ && psf->file.mode != SFM_WRITE && psf->file.mode != SFM_RDWR) { error = SFE_BAD_OPEN_MODE ; goto error_exit ; } ; if (sfinfo == NULL) { error = SFE_BAD_SF_INFO_PTR ; goto error_exit ; } ; if (psf->file.mode == SFM_READ) { if ((SF_CONTAINER (sfinfo->format)) == SF_FORMAT_RAW) { if (sf_format_check (sfinfo) == 0) { error = SFE_RAW_BAD_FORMAT ; goto error_exit ; } ; } else memset (sfinfo, 0, sizeof (SF_INFO)) ; } ; memcpy (&psf->sf, sfinfo, sizeof (SF_INFO)) ; psf->Magick = SNDFILE_MAGICK ; psf->norm_float = SF_TRUE ; psf->norm_double = SF_TRUE ; psf->dataoffset = -1 ; psf->datalength = -1 ; psf->read_current = -1 ; psf->write_current = -1 ; psf->auto_header = SF_FALSE ; psf->rwf_endian = SF_ENDIAN_LITTLE ; psf->seek = psf_default_seek ; psf->float_int_mult = 0 ; psf->float_max = -1.0 ; /* An attempt at a per SF_PRIVATE unique id. */ psf->unique_id = psf_rand_int32 () ; psf->sf.sections = 1 ; psf->is_pipe = psf_is_pipe (psf) ; if (psf->is_pipe) { psf->sf.seekable = SF_FALSE ; psf->filelength = SF_COUNT_MAX ; } else { psf->sf.seekable = SF_TRUE ; /* File is open, so get the length. */ psf->filelength = psf_get_filelen (psf) ; } ; if (psf->fileoffset > 0) { switch (psf->file.mode) { case SFM_READ : if (psf->filelength < 44) { psf_log_printf (psf, "Short filelength: %D (fileoffset: %D)\n", psf->filelength, psf->fileoffset) ; error = SFE_BAD_OFFSET ; goto error_exit ; } ; break ; case SFM_WRITE : psf->fileoffset = 0 ; psf_fseek (psf, 0, SEEK_END) ; psf->fileoffset = psf_ftell (psf) ; break ; case SFM_RDWR : error = SFE_NO_EMBEDDED_RDWR ; goto error_exit ; } ; psf_log_printf (psf, "Embedded file offset : %D\n", psf->fileoffset) ; } ; if (psf->filelength == SF_COUNT_MAX) psf_log_printf (psf, "Length : unknown\n") ; else psf_log_printf (psf, "Length : %D\n", psf->filelength) ; if (psf->file.mode == SFM_WRITE || (psf->file.mode == SFM_RDWR && psf->filelength == 0)) { /* If the file is being opened for write or RDWR and the file is currently ** empty, then the SF_INFO struct must contain valid data. */ if ((SF_CONTAINER (psf->sf.format)) == 0) { error = SFE_ZERO_MAJOR_FORMAT ; goto error_exit ; } ; if ((SF_CODEC (psf->sf.format)) == 0) { error = SFE_ZERO_MINOR_FORMAT ; goto error_exit ; } ; if (sf_format_check (&psf->sf) == 0) { error = SFE_BAD_OPEN_FORMAT ; goto error_exit ; } ; } else if ((SF_CONTAINER (psf->sf.format)) != SF_FORMAT_RAW) { /* If type RAW has not been specified then need to figure out file type. */ psf->sf.format = guess_file_type (psf) ; if (psf->sf.format == 0) psf->sf.format = format_from_extension (psf) ; } ; /* Prevent unnecessary seeks */ psf->last_op = psf->file.mode ; /* Set bytewidth if known. */ switch (SF_CODEC (psf->sf.format)) { case SF_FORMAT_PCM_S8 : case SF_FORMAT_PCM_U8 : case SF_FORMAT_ULAW : case SF_FORMAT_ALAW : case SF_FORMAT_DPCM_8 : psf->bytewidth = 1 ; break ; case SF_FORMAT_PCM_16 : case SF_FORMAT_DPCM_16 : psf->bytewidth = 2 ; break ; case SF_FORMAT_PCM_24 : psf->bytewidth = 3 ; break ; case SF_FORMAT_PCM_32 : case SF_FORMAT_FLOAT : psf->bytewidth = 4 ; break ; case SF_FORMAT_DOUBLE : psf->bytewidth = 8 ; break ; } ; /* Call the initialisation function for the relevant file type. */ switch (SF_CONTAINER (psf->sf.format)) { case SF_FORMAT_WAV : case SF_FORMAT_WAVEX : error = wav_open (psf) ; break ; case SF_FORMAT_AIFF : error = aiff_open (psf) ; break ; case SF_FORMAT_AU : error = au_open (psf) ; break ; case SF_FORMAT_RAW : error = raw_open (psf) ; break ; case SF_FORMAT_W64 : error = w64_open (psf) ; break ; case SF_FORMAT_RF64 : error = rf64_open (psf) ; break ; /* Lite remove start */ case SF_FORMAT_PAF : error = paf_open (psf) ; break ; case SF_FORMAT_SVX : error = svx_open (psf) ; break ; case SF_FORMAT_NIST : error = nist_open (psf) ; break ; case SF_FORMAT_IRCAM : error = ircam_open (psf) ; break ; case SF_FORMAT_VOC : error = voc_open (psf) ; break ; case SF_FORMAT_SDS : error = sds_open (psf) ; break ; case SF_FORMAT_OGG : error = ogg_open (psf) ; break ; case SF_FORMAT_TXW : error = txw_open (psf) ; break ; case SF_FORMAT_WVE : error = wve_open (psf) ; break ; case SF_FORMAT_DWD : error = dwd_open (psf) ; break ; case SF_FORMAT_MAT4 : error = mat4_open (psf) ; break ; case SF_FORMAT_MAT5 : error = mat5_open (psf) ; break ; case SF_FORMAT_PVF : error = pvf_open (psf) ; break ; case SF_FORMAT_XI : error = xi_open (psf) ; break ; case SF_FORMAT_HTK : error = htk_open (psf) ; break ; case SF_FORMAT_SD2 : error = sd2_open (psf) ; break ; case SF_FORMAT_REX2 : error = rx2_open (psf) ; break ; case SF_FORMAT_AVR : error = avr_open (psf) ; break ; case SF_FORMAT_FLAC : error = flac_open (psf) ; break ; case SF_FORMAT_CAF : error = caf_open (psf) ; break ; case SF_FORMAT_MPC2K : error = mpc2k_open (psf) ; break ; /* Lite remove end */ default : error = SFE_UNKNOWN_FORMAT ; } ; if (error) goto error_exit ; /* For now, check whether embedding is supported. */ format = SF_CONTAINER (psf->sf.format) ; if (psf->fileoffset > 0) { switch (format) { case SF_FORMAT_WAV : case SF_FORMAT_WAVEX : case SF_FORMAT_AIFF : case SF_FORMAT_AU : /* Actual embedded files. */ break ; case SF_FORMAT_FLAC : /* Flac with an ID3v2 header? */ break ; default : error = SFE_NO_EMBED_SUPPORT ; goto error_exit ; } ; } ; if (psf->fileoffset > 0) psf_log_printf (psf, "Embedded file length : %D\n", psf->filelength) ; if (psf->file.mode == SFM_RDWR && sf_format_check (&psf->sf) == 0) { error = SFE_BAD_MODE_RW ; goto error_exit ; } ; if (validate_sfinfo (&psf->sf) == 0) { psf_log_SF_INFO (psf) ; save_header_info (psf) ; error = SFE_BAD_SF_INFO ; goto error_exit ; } ; if (validate_psf (psf) == 0) { save_header_info (psf) ; error = SFE_INTERNAL ; goto error_exit ; } ; psf->read_current = 0 ; psf->write_current = 0 ; if (psf->file.mode == SFM_RDWR) { psf->write_current = psf->sf.frames ; psf->have_written = psf->sf.frames > 0 ? SF_TRUE : SF_FALSE ; } ; memcpy (sfinfo, &psf->sf, sizeof (SF_INFO)) ; if (psf->file.mode == SFM_WRITE) { /* Zero out these fields. */ sfinfo->frames = 0 ; sfinfo->sections = 0 ; sfinfo->seekable = 0 ; } ; return (SNDFILE *) psf ; error_exit : sf_errno = error ; if (error == SFE_SYSTEM) snprintf (sf_syserr, sizeof (sf_syserr), "%s", psf->syserr) ; snprintf (sf_parselog, sizeof (sf_parselog), "%s", psf->parselog.buf) ; switch (error) { case SF_ERR_SYSTEM : case SF_ERR_UNSUPPORTED_ENCODING : case SFE_UNIMPLEMENTED : break ; case SFE_RAW_BAD_FORMAT : break ; default : if (psf->file.mode == SFM_READ) { psf_log_printf (psf, "Parse error : %s\n", sf_error_number (error)) ; error = SF_ERR_MALFORMED_FILE ; } ; } ; psf_close (psf) ; return NULL ; } /* psf_open_file */ /*============================================================================== ** Chunk getting and setting. ** This works for AIFF, CAF, RF64 and WAV. ** It doesn't work for W64 because W64 uses weird GUID style chunk markers. */ int sf_set_chunk (SNDFILE * sndfile, const SF_CHUNK_INFO * chunk_info) { SF_PRIVATE *psf ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (chunk_info == NULL || chunk_info->data == NULL) return SFE_BAD_CHUNK_PTR ; if (psf->set_chunk) return psf->set_chunk (psf, chunk_info) ; return SFE_BAD_CHUNK_FORMAT ; } /* sf_set_chunk */ SF_CHUNK_ITERATOR * sf_get_chunk_iterator (SNDFILE * sndfile, const SF_CHUNK_INFO * chunk_info) { SF_PRIVATE *psf ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (chunk_info) return psf_get_chunk_iterator (psf, chunk_info->id) ; return psf_get_chunk_iterator (psf, NULL) ; } /* sf_get_chunk_iterator */ SF_CHUNK_ITERATOR * sf_next_chunk_iterator (SF_CHUNK_ITERATOR * iterator) { SF_PRIVATE *psf ; SNDFILE *sndfile = iterator ? iterator->sndfile : NULL ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->next_chunk_iterator) return psf->next_chunk_iterator (psf, iterator) ; return NULL ; } /* sf_get_chunk_iterator_next */ int sf_get_chunk_size (const SF_CHUNK_ITERATOR * iterator, SF_CHUNK_INFO * chunk_info) { SF_PRIVATE *psf ; SNDFILE *sndfile = iterator ? iterator->sndfile : NULL ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (chunk_info == NULL) return SFE_BAD_CHUNK_PTR ; if (psf->get_chunk_size) return psf->get_chunk_size (psf, iterator, chunk_info) ; return SFE_BAD_CHUNK_FORMAT ; return 0 ; } /* sf_get_chunk_size */ int sf_get_chunk_data (const SF_CHUNK_ITERATOR * iterator, SF_CHUNK_INFO * chunk_info) { SF_PRIVATE *psf ; SNDFILE *sndfile = iterator ? iterator->sndfile : NULL ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (chunk_info == NULL || chunk_info->data == NULL) return SFE_BAD_CHUNK_PTR ; if (psf->get_chunk_data) return psf->get_chunk_data (psf, iterator, chunk_info) ; return SFE_BAD_CHUNK_FORMAT ; } /* sf_get_chunk_data */
/* ** Copyright (C) 1999-2016 Erik de Castro Lopo <erikd@mega-nerd.com> ** ** This program is free software; you can redistribute it and/or modify ** it under the terms of the GNU Lesser General Public License as published by ** the Free Software Foundation; either version 2.1 of the License, or ** (at your option) any later version. ** ** This program is distributed in the hope that it will be useful, ** but WITHOUT ANY WARRANTY; without even the implied warranty of ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** GNU Lesser General Public License for more details. ** ** You should have received a copy of the GNU Lesser General Public License ** along with this program; if not, write to the Free Software ** Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include "sfconfig.h" #include <stdlib.h> #include <string.h> #include <ctype.h> #include <assert.h> #include "sndfile.h" #include "sfendian.h" #include "common.h" #define SNDFILE_MAGICK 0x1234C0DE #ifdef __APPLE__ /* ** Detect if a compile for a universal binary is being attempted and barf if it is. ** See the URL below for the rationale. */ #ifdef __BIG_ENDIAN__ #if (CPU_IS_LITTLE_ENDIAN == 1) #error "Universal binary compile detected. See http://www.mega-nerd.com/libsndfile/FAQ.html#Q018" #endif #endif #ifdef __LITTLE_ENDIAN__ #if (CPU_IS_BIG_ENDIAN == 1) #error "Universal binary compile detected. See http://www.mega-nerd.com/libsndfile/FAQ.html#Q018" #endif #endif #endif typedef struct { int error ; const char *str ; } ErrorStruct ; static ErrorStruct SndfileErrors [] = { /* Public error values and their associated strings. */ { SF_ERR_NO_ERROR , "No Error." }, { SF_ERR_UNRECOGNISED_FORMAT , "Format not recognised." }, { SF_ERR_SYSTEM , "System error." /* Often replaced. */ }, { SF_ERR_MALFORMED_FILE , "Supported file format but file is malformed." }, { SF_ERR_UNSUPPORTED_ENCODING , "Supported file format but unsupported encoding." }, /* Private error values and their associated strings. */ { SFE_ZERO_MAJOR_FORMAT , "Error : major format is 0." }, { SFE_ZERO_MINOR_FORMAT , "Error : minor format is 0." }, { SFE_BAD_FILE , "File does not exist or is not a regular file (possibly a pipe?)." }, { SFE_BAD_FILE_READ , "File exists but no data could be read." }, { SFE_OPEN_FAILED , "Could not open file." }, { SFE_BAD_SNDFILE_PTR , "Not a valid SNDFILE* pointer." }, { SFE_BAD_SF_INFO_PTR , "NULL SF_INFO pointer passed to libsndfile." }, { SFE_BAD_SF_INCOMPLETE , "SF_PRIVATE struct incomplete and end of header parsing." }, { SFE_BAD_FILE_PTR , "Bad FILE pointer." }, { SFE_BAD_INT_PTR , "Internal error, Bad pointer." }, { SFE_BAD_STAT_SIZE , "Error : software was misconfigured at compile time (sizeof statbuf.st_size)." }, { SFE_NO_TEMP_DIR , "Error : Could not file temp dir." }, { SFE_MALLOC_FAILED , "Internal malloc () failed." }, { SFE_UNIMPLEMENTED , "File contains data in an unimplemented format." }, { SFE_BAD_READ_ALIGN , "Attempt to read a non-integer number of channels." }, { SFE_BAD_WRITE_ALIGN , "Attempt to write a non-integer number of channels." }, { SFE_UNKNOWN_FORMAT , "File contains data in an unknown format." }, { SFE_NOT_READMODE , "Read attempted on file currently open for write." }, { SFE_NOT_WRITEMODE , "Write attempted on file currently open for read." }, { SFE_BAD_MODE_RW , "Error : This file format does not support read/write mode." }, { SFE_BAD_SF_INFO , "Internal error : SF_INFO struct incomplete." }, { SFE_BAD_OFFSET , "Error : supplied offset beyond end of file." }, { SFE_NO_EMBED_SUPPORT , "Error : embedding not supported for this file format." }, { SFE_NO_EMBEDDED_RDWR , "Error : cannot open embedded file read/write." }, { SFE_NO_PIPE_WRITE , "Error : this file format does not support pipe write." }, { SFE_BAD_VIRTUAL_IO , "Error : bad pointer on SF_VIRTUAL_IO struct." }, { SFE_BAD_BROADCAST_INFO_SIZE , "Error : bad coding_history_size in SF_BROADCAST_INFO struct." }, { SFE_BAD_BROADCAST_INFO_TOO_BIG , "Error : SF_BROADCAST_INFO struct too large." }, { SFE_BAD_CART_INFO_SIZE , "Error: SF_CART_INFO struct too large." }, { SFE_BAD_CART_INFO_TOO_BIG , "Error: bag tag_text_size in SF_CART_INFO struct." }, { SFE_INTERLEAVE_MODE , "Attempt to write to file with non-interleaved data." }, { SFE_INTERLEAVE_SEEK , "Bad karma in seek during interleave read operation." }, { SFE_INTERLEAVE_READ , "Bad karma in read during interleave read operation." }, { SFE_INTERNAL , "Unspecified internal error." }, { SFE_BAD_COMMAND_PARAM , "Bad parameter passed to function sf_command." }, { SFE_BAD_ENDIAN , "Bad endian-ness. Try default endian-ness" }, { SFE_CHANNEL_COUNT_ZERO , "Channel count is zero." }, { SFE_CHANNEL_COUNT , "Too many channels specified." }, { SFE_CHANNEL_COUNT_BAD , "Bad channel count." }, { SFE_BAD_SEEK , "Internal psf_fseek() failed." }, { SFE_NOT_SEEKABLE , "Seek attempted on unseekable file type." }, { SFE_AMBIGUOUS_SEEK , "Error : combination of file open mode and seek command is ambiguous." }, { SFE_WRONG_SEEK , "Error : invalid seek parameters." }, { SFE_SEEK_FAILED , "Error : parameters OK, but psf_seek() failed." }, { SFE_BAD_OPEN_MODE , "Error : bad mode parameter for file open." }, { SFE_OPEN_PIPE_RDWR , "Error : attempt to open a pipe in read/write mode." }, { SFE_RDWR_POSITION , "Error on RDWR position (cryptic)." }, { SFE_RDWR_BAD_HEADER , "Error : Cannot open file in read/write mode due to string data in header." }, { SFE_CMD_HAS_DATA , "Error : Command fails because file already has audio data." }, { SFE_STR_NO_SUPPORT , "Error : File type does not support string data." }, { SFE_STR_NOT_WRITE , "Error : Trying to set a string when file is not in write mode." }, { SFE_STR_MAX_DATA , "Error : Maximum string data storage reached." }, { SFE_STR_MAX_COUNT , "Error : Maximum string data count reached." }, { SFE_STR_BAD_TYPE , "Error : Bad string data type." }, { SFE_STR_NO_ADD_END , "Error : file type does not support strings added at end of file." }, { SFE_STR_BAD_STRING , "Error : bad string." }, { SFE_STR_WEIRD , "Error : Weird string error." }, { SFE_WAV_NO_RIFF , "Error in WAV file. No 'RIFF' chunk marker." }, { SFE_WAV_NO_WAVE , "Error in WAV file. No 'WAVE' chunk marker." }, { SFE_WAV_NO_FMT , "Error in WAV/W64/RF64 file. No 'fmt ' chunk marker." }, { SFE_WAV_BAD_FMT , "Error in WAV/W64/RF64 file. Malformed 'fmt ' chunk." }, { SFE_WAV_FMT_SHORT , "Error in WAV/W64/RF64 file. Short 'fmt ' chunk." }, { SFE_WAV_BAD_FACT , "Error in WAV file. 'fact' chunk out of place." }, { SFE_WAV_BAD_PEAK , "Error in WAV file. Bad 'PEAK' chunk." }, { SFE_WAV_PEAK_B4_FMT , "Error in WAV file. 'PEAK' chunk found before 'fmt ' chunk." }, { SFE_WAV_BAD_FORMAT , "Error in WAV file. Errors in 'fmt ' chunk." }, { SFE_WAV_BAD_BLOCKALIGN , "Error in WAV file. Block alignment in 'fmt ' chunk is incorrect." }, { SFE_WAV_NO_DATA , "Error in WAV file. No 'data' chunk marker." }, { SFE_WAV_BAD_LIST , "Error in WAV file. Malformed LIST chunk." }, { SFE_WAV_UNKNOWN_CHUNK , "Error in WAV file. File contains an unknown chunk marker." }, { SFE_WAV_WVPK_DATA , "Error in WAV file. Data is in WAVPACK format." }, { SFE_WAV_ADPCM_NOT4BIT , "Error in ADPCM WAV file. Invalid bit width." }, { SFE_WAV_ADPCM_CHANNELS , "Error in ADPCM WAV file. Invalid number of channels." }, { SFE_WAV_ADPCM_SAMPLES , "Error in ADPCM WAV file. Invalid number of samples per block." }, { SFE_WAV_GSM610_FORMAT , "Error in GSM610 WAV file. Invalid format chunk." }, { SFE_AIFF_NO_FORM , "Error in AIFF file, bad 'FORM' marker." }, { SFE_AIFF_AIFF_NO_FORM , "Error in AIFF file, 'AIFF' marker without 'FORM'." }, { SFE_AIFF_COMM_NO_FORM , "Error in AIFF file, 'COMM' marker without 'FORM'." }, { SFE_AIFF_SSND_NO_COMM , "Error in AIFF file, 'SSND' marker without 'COMM'." }, { SFE_AIFF_UNKNOWN_CHUNK , "Error in AIFF file, unknown chunk." }, { SFE_AIFF_COMM_CHUNK_SIZE, "Error in AIFF file, bad 'COMM' chunk size." }, { SFE_AIFF_BAD_COMM_CHUNK , "Error in AIFF file, bad 'COMM' chunk." }, { SFE_AIFF_PEAK_B4_COMM , "Error in AIFF file. 'PEAK' chunk found before 'COMM' chunk." }, { SFE_AIFF_BAD_PEAK , "Error in AIFF file. Bad 'PEAK' chunk." }, { SFE_AIFF_NO_SSND , "Error in AIFF file, bad 'SSND' chunk." }, { SFE_AIFF_NO_DATA , "Error in AIFF file, no sound data." }, { SFE_AIFF_RW_SSND_NOT_LAST, "Error in AIFF file, RDWR only possible if SSND chunk at end of file." }, { SFE_AU_UNKNOWN_FORMAT , "Error in AU file, unknown format." }, { SFE_AU_NO_DOTSND , "Error in AU file, missing '.snd' or 'dns.' marker." }, { SFE_AU_EMBED_BAD_LEN , "Embedded AU file with unknown length." }, { SFE_RAW_READ_BAD_SPEC , "Error while opening RAW file for read. Must specify format and channels.\n" "Possibly trying to open unsupported format." }, { SFE_RAW_BAD_BITWIDTH , "Error. RAW file bitwidth must be a multiple of 8." }, { SFE_RAW_BAD_FORMAT , "Error. Bad format field in SF_INFO struct when opening a RAW file for read." }, { SFE_PAF_NO_MARKER , "Error in PAF file, no marker." }, { SFE_PAF_VERSION , "Error in PAF file, bad version." }, { SFE_PAF_UNKNOWN_FORMAT , "Error in PAF file, unknown format." }, { SFE_PAF_SHORT_HEADER , "Error in PAF file. File shorter than minimal header." }, { SFE_PAF_BAD_CHANNELS , "Error in PAF file. Bad channel count." }, { SFE_SVX_NO_FORM , "Error in 8SVX / 16SV file, no 'FORM' marker." }, { SFE_SVX_NO_BODY , "Error in 8SVX / 16SV file, no 'BODY' marker." }, { SFE_SVX_NO_DATA , "Error in 8SVX / 16SV file, no sound data." }, { SFE_SVX_BAD_COMP , "Error in 8SVX / 16SV file, unsupported compression format." }, { SFE_SVX_BAD_NAME_LENGTH , "Error in 8SVX / 16SV file, NAME chunk too long." }, { SFE_NIST_BAD_HEADER , "Error in NIST file, bad header." }, { SFE_NIST_CRLF_CONVERISON, "Error : NIST file damaged by Windows CR -> CRLF conversion process." }, { SFE_NIST_BAD_ENCODING , "Error in NIST file, unsupported compression format." }, { SFE_VOC_NO_CREATIVE , "Error in VOC file, no 'Creative Voice File' marker." }, { SFE_VOC_BAD_FORMAT , "Error in VOC file, bad format." }, { SFE_VOC_BAD_VERSION , "Error in VOC file, bad version number." }, { SFE_VOC_BAD_MARKER , "Error in VOC file, bad marker in file." }, { SFE_VOC_BAD_SECTIONS , "Error in VOC file, incompatible VOC sections." }, { SFE_VOC_MULTI_SAMPLERATE, "Error in VOC file, more than one sample rate defined." }, { SFE_VOC_MULTI_SECTION , "Unimplemented VOC file feature, file contains multiple sound sections." }, { SFE_VOC_MULTI_PARAM , "Error in VOC file, file contains multiple bit or channel widths." }, { SFE_VOC_SECTION_COUNT , "Error in VOC file, too many sections." }, { SFE_VOC_NO_PIPE , "Error : not able to operate on VOC files over a pipe." }, { SFE_IRCAM_NO_MARKER , "Error in IRCAM file, bad IRCAM marker." }, { SFE_IRCAM_BAD_CHANNELS , "Error in IRCAM file, bad channel count." }, { SFE_IRCAM_UNKNOWN_FORMAT, "Error in IRCAM file, unknown encoding format." }, { SFE_W64_64_BIT , "Error in W64 file, file contains 64 bit offset." }, { SFE_W64_NO_RIFF , "Error in W64 file. No 'riff' chunk marker." }, { SFE_W64_NO_WAVE , "Error in W64 file. No 'wave' chunk marker." }, { SFE_W64_NO_DATA , "Error in W64 file. No 'data' chunk marker." }, { SFE_W64_ADPCM_NOT4BIT , "Error in ADPCM W64 file. Invalid bit width." }, { SFE_W64_ADPCM_CHANNELS , "Error in ADPCM W64 file. Invalid number of channels." }, { SFE_W64_GSM610_FORMAT , "Error in GSM610 W64 file. Invalid format chunk." }, { SFE_MAT4_BAD_NAME , "Error in MAT4 file. No variable name." }, { SFE_MAT4_NO_SAMPLERATE , "Error in MAT4 file. No sample rate." }, { SFE_MAT5_BAD_ENDIAN , "Error in MAT5 file. Not able to determine endian-ness." }, { SFE_MAT5_NO_BLOCK , "Error in MAT5 file. Bad block structure." }, { SFE_MAT5_SAMPLE_RATE , "Error in MAT5 file. Not able to determine sample rate." }, { SFE_PVF_NO_PVF1 , "Error in PVF file. No PVF1 marker." }, { SFE_PVF_BAD_HEADER , "Error in PVF file. Bad header." }, { SFE_PVF_BAD_BITWIDTH , "Error in PVF file. Bad bit width." }, { SFE_XI_BAD_HEADER , "Error in XI file. Bad header." }, { SFE_XI_EXCESS_SAMPLES , "Error in XI file. Excess samples in file." }, { SFE_XI_NO_PIPE , "Error : not able to operate on XI files over a pipe." }, { SFE_HTK_NO_PIPE , "Error : not able to operate on HTK files over a pipe." }, { SFE_SDS_NOT_SDS , "Error : not an SDS file." }, { SFE_SDS_BAD_BIT_WIDTH , "Error : bad bit width for SDS file." }, { SFE_SD2_FD_DISALLOWED , "Error : cannot open SD2 file without a file name." }, { SFE_SD2_BAD_DATA_OFFSET , "Error : bad data offset." }, { SFE_SD2_BAD_MAP_OFFSET , "Error : bad map offset." }, { SFE_SD2_BAD_DATA_LENGTH , "Error : bad data length." }, { SFE_SD2_BAD_MAP_LENGTH , "Error : bad map length." }, { SFE_SD2_BAD_RSRC , "Error : bad resource fork." }, { SFE_SD2_BAD_SAMPLE_SIZE , "Error : bad sample size." }, { SFE_FLAC_BAD_HEADER , "Error : bad flac header." }, { SFE_FLAC_NEW_DECODER , "Error : problem while creating flac decoder." }, { SFE_FLAC_INIT_DECODER , "Error : problem while initialization of the flac decoder." }, { SFE_FLAC_LOST_SYNC , "Error : flac decoder lost sync." }, { SFE_FLAC_BAD_SAMPLE_RATE, "Error : flac does not support this sample rate." }, { SFE_FLAC_UNKOWN_ERROR , "Error : unknown error in flac decoder." }, { SFE_WVE_NOT_WVE , "Error : not a WVE file." }, { SFE_WVE_NO_PIPE , "Error : not able to operate on WVE files over a pipe." }, { SFE_DWVW_BAD_BITWIDTH , "Error : Bad bit width for DWVW encoding. Must be 12, 16 or 24." }, { SFE_G72X_NOT_MONO , "Error : G72x encoding does not support more than 1 channel." }, { SFE_VORBIS_ENCODER_BUG , "Error : Sample rate chosen is known to trigger a Vorbis encoder bug on this CPU." }, { SFE_RF64_NOT_RF64 , "Error : Not an RF64 file." }, { SFE_RF64_PEAK_B4_FMT , "Error in RF64 file. 'PEAK' chunk found before 'fmt ' chunk." }, { SFE_RF64_NO_DATA , "Error in RF64 file. No 'data' chunk marker." }, { SFE_ALAC_FAIL_TMPFILE , "Error : Failed to open tmp file for ALAC encoding." }, { SFE_BAD_CHUNK_PTR , "Error : Bad SF_CHUNK_INFO pointer." }, { SFE_UNKNOWN_CHUNK , "Error : Unknown chunk marker." }, { SFE_BAD_CHUNK_FORMAT , "Error : Reading/writing chunks from this file format is not supported." }, { SFE_BAD_CHUNK_MARKER , "Error : Bad chunk marker." }, { SFE_BAD_CHUNK_DATA_PTR , "Error : Bad data pointer in SF_CHUNK_INFO struct." }, { SFE_FILENAME_TOO_LONG , "Error : Supplied filename too long." }, { SFE_BAD_HEADER_ALLOC , "Error : Required header allocation is too large." }, { SFE_MAX_ERROR , "Maximum error number." }, { SFE_MAX_ERROR + 1 , NULL } } ; /*------------------------------------------------------------------------------ */ static int format_from_extension (SF_PRIVATE *psf) ; static int guess_file_type (SF_PRIVATE *psf) ; static int validate_sfinfo (SF_INFO *sfinfo) ; static int validate_psf (SF_PRIVATE *psf) ; static void save_header_info (SF_PRIVATE *psf) ; static int copy_filename (SF_PRIVATE *psf, const char *path) ; static int psf_close (SF_PRIVATE *psf) ; static int try_resource_fork (SF_PRIVATE * psf) ; /*------------------------------------------------------------------------------ ** Private (static) variables. */ int sf_errno = 0 ; static char sf_parselog [SF_BUFFER_LEN] = { 0 } ; static char sf_syserr [SF_SYSERR_LEN] = { 0 } ; /*------------------------------------------------------------------------------ */ #define VALIDATE_SNDFILE_AND_ASSIGN_PSF(a, b, c) \ { if ((a) == NULL) \ { sf_errno = SFE_BAD_SNDFILE_PTR ; \ return 0 ; \ } ; \ (b) = (SF_PRIVATE*) (a) ; \ if ((b)->virtual_io == SF_FALSE && \ psf_file_valid (b) == 0) \ { (b)->error = SFE_BAD_FILE_PTR ; \ return 0 ; \ } ; \ if ((b)->Magick != SNDFILE_MAGICK) \ { (b)->error = SFE_BAD_SNDFILE_PTR ; \ return 0 ; \ } ; \ if (c) (b)->error = 0 ; \ } /*------------------------------------------------------------------------------ ** Public functions. */ SNDFILE* sf_open (const char *path, int mode, SF_INFO *sfinfo) { SF_PRIVATE *psf ; /* Ultimate sanity check. */ assert (sizeof (sf_count_t) == 8) ; if ((psf = psf_allocate ()) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; psf_log_printf (psf, "File : %s\n", path) ; if (copy_filename (psf, path) != 0) { sf_errno = psf->error ; return NULL ; } ; psf->file.mode = mode ; if (strcmp (path, "-") == 0) psf->error = psf_set_stdio (psf) ; else psf->error = psf_fopen (psf) ; return psf_open_file (psf, sfinfo) ; } /* sf_open */ SNDFILE* sf_open_fd (int fd, int mode, SF_INFO *sfinfo, int close_desc) { SF_PRIVATE *psf ; if ((SF_CONTAINER (sfinfo->format)) == SF_FORMAT_SD2) { sf_errno = SFE_SD2_FD_DISALLOWED ; return NULL ; } ; if ((psf = psf_allocate ()) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; copy_filename (psf, "") ; psf->file.mode = mode ; psf_set_file (psf, fd) ; psf->is_pipe = psf_is_pipe (psf) ; psf->fileoffset = psf_ftell (psf) ; if (! close_desc) psf->file.do_not_close_descriptor = SF_TRUE ; return psf_open_file (psf, sfinfo) ; } /* sf_open_fd */ SNDFILE* sf_open_virtual (SF_VIRTUAL_IO *sfvirtual, int mode, SF_INFO *sfinfo, void *user_data) { SF_PRIVATE *psf ; /* Make sure we have a valid set ot virtual pointers. */ if (sfvirtual->get_filelen == NULL || sfvirtual->seek == NULL || sfvirtual->tell == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_get_filelen / vio_seek / vio_tell in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((mode == SFM_READ || mode == SFM_RDWR) && sfvirtual->read == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_read in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((mode == SFM_WRITE || mode == SFM_RDWR) && sfvirtual->write == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_write in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((psf = psf_allocate ()) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; psf->virtual_io = SF_TRUE ; psf->vio = *sfvirtual ; psf->vio_user_data = user_data ; psf->file.mode = mode ; return psf_open_file (psf, sfinfo) ; } /* sf_open_virtual */ int sf_close (SNDFILE *sndfile) { SF_PRIVATE *psf ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; return psf_close (psf) ; } /* sf_close */ void sf_write_sync (SNDFILE *sndfile) { SF_PRIVATE *psf ; if ((psf = (SF_PRIVATE *) sndfile) == NULL) return ; psf_fsync (psf) ; return ; } /* sf_write_sync */ /*============================================================================== */ const char* sf_error_number (int errnum) { static const char *bad_errnum = "No error defined for this error number. This is a bug in libsndfile." ; int k ; if (errnum == SFE_MAX_ERROR) return SndfileErrors [0].str ; if (errnum < 0 || errnum > SFE_MAX_ERROR) { /* This really shouldn't happen in release versions. */ printf ("Not a valid error number (%d).\n", errnum) ; return bad_errnum ; } ; for (k = 0 ; SndfileErrors [k].str ; k++) if (errnum == SndfileErrors [k].error) return SndfileErrors [k].str ; return bad_errnum ; } /* sf_error_number */ const char* sf_strerror (SNDFILE *sndfile) { SF_PRIVATE *psf = NULL ; int errnum ; if (sndfile == NULL) { errnum = sf_errno ; if (errnum == SFE_SYSTEM && sf_syserr [0]) return sf_syserr ; } else { psf = (SF_PRIVATE *) sndfile ; if (psf->Magick != SNDFILE_MAGICK) return "sf_strerror : Bad magic number." ; errnum = psf->error ; if (errnum == SFE_SYSTEM && psf->syserr [0]) return psf->syserr ; } ; return sf_error_number (errnum) ; } /* sf_strerror */ /*------------------------------------------------------------------------------ */ int sf_error (SNDFILE *sndfile) { SF_PRIVATE *psf ; if (sndfile == NULL) return sf_errno ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 0) ; if (psf->error) return psf->error ; return 0 ; } /* sf_error */ /*------------------------------------------------------------------------------ */ int sf_perror (SNDFILE *sndfile) { SF_PRIVATE *psf ; int errnum ; if (sndfile == NULL) { errnum = sf_errno ; } else { VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 0) ; errnum = psf->error ; } ; fprintf (stderr, "%s\n", sf_error_number (errnum)) ; return SFE_NO_ERROR ; } /* sf_perror */ /*------------------------------------------------------------------------------ */ int sf_error_str (SNDFILE *sndfile, char *str, size_t maxlen) { SF_PRIVATE *psf ; int errnum ; if (str == NULL) return SFE_INTERNAL ; if (sndfile == NULL) errnum = sf_errno ; else { VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 0) ; errnum = psf->error ; } ; snprintf (str, maxlen, "%s", sf_error_number (errnum)) ; return SFE_NO_ERROR ; } /* sf_error_str */ /*============================================================================== */ int sf_format_check (const SF_INFO *info) { int subformat, endian ; subformat = SF_CODEC (info->format) ; endian = SF_ENDIAN (info->format) ; /* This is the place where each file format can check if the suppiled ** SF_INFO struct is valid. ** Return 0 on failure, 1 ons success. */ if (info->channels < 1 || info->channels > SF_MAX_CHANNELS) return 0 ; if (info->samplerate < 0) return 0 ; switch (SF_CONTAINER (info->format)) { case SF_FORMAT_WAV : /* WAV now allows both endian, RIFF or RIFX (little or big respectively) */ if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if ((subformat == SF_FORMAT_IMA_ADPCM || subformat == SF_FORMAT_MS_ADPCM) && info->channels <= 2) return 1 ; if (subformat == SF_FORMAT_GSM610 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_G721_32 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_WAVEX : if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_AIFF : /* AIFF does allow both endian-nesses for PCM data.*/ if (subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; /* For other encodings reject any endian-ness setting. */ if (endian != 0) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_S8) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if ((subformat == SF_FORMAT_DWVW_12 || subformat == SF_FORMAT_DWVW_16 || subformat == SF_FORMAT_DWVW_24) && info-> channels == 1) return 1 ; if (subformat == SF_FORMAT_GSM610 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_IMA_ADPCM && (info->channels == 1 || info->channels == 2)) return 1 ; break ; case SF_FORMAT_AU : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; if (subformat == SF_FORMAT_G721_32 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_G723_24 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_G723_40 && info->channels == 1) return 1 ; break ; case SF_FORMAT_CAF : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_ALAC_16 || subformat == SF_FORMAT_ALAC_20) return 1 ; if (subformat == SF_FORMAT_ALAC_24 || subformat == SF_FORMAT_ALAC_32) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_RAW : if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; if (subformat == SF_FORMAT_ALAW || subformat == SF_FORMAT_ULAW) return 1 ; if ((subformat == SF_FORMAT_DWVW_12 || subformat == SF_FORMAT_DWVW_16 || subformat == SF_FORMAT_DWVW_24) && info-> channels == 1) return 1 ; if (subformat == SF_FORMAT_GSM610 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_VOX_ADPCM && info->channels == 1) return 1 ; break ; case SF_FORMAT_PAF : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24) return 1 ; break ; case SF_FORMAT_SVX : /* SVX only supports writing mono SVX files. */ if (info->channels > 1) return 0 ; /* Always big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; break ; case SF_FORMAT_NIST : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; break ; case SF_FORMAT_IRCAM : if (info->channels > 256) return 0 ; if (subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW || subformat == SF_FORMAT_FLOAT) return 1 ; break ; case SF_FORMAT_VOC : if (info->channels > 2) return 0 ; /* VOC is strictly little endian. */ if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; break ; case SF_FORMAT_W64 : /* W64 is strictly little endian. */ if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if ((subformat == SF_FORMAT_IMA_ADPCM || subformat == SF_FORMAT_MS_ADPCM) && info->channels <= 2) return 1 ; if (subformat == SF_FORMAT_GSM610 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_MAT4 : if (subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_MAT5 : if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_PVF : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_32) return 1 ; break ; case SF_FORMAT_XI : if (info->channels != 1) return 0 ; if (subformat == SF_FORMAT_DPCM_8 || subformat == SF_FORMAT_DPCM_16) return 1 ; break ; case SF_FORMAT_HTK : if (info->channels != 1) return 0 ; /* HTK is strictly big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_16) return 1 ; break ; case SF_FORMAT_SDS : if (info->channels != 1) return 0 ; /* SDS is strictly big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24) return 1 ; break ; case SF_FORMAT_AVR : if (info->channels > 2) return 0 ; /* SDS is strictly big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; break ; case SF_FORMAT_FLAC : /* FLAC can't do more than 8 channels. */ if (info->channels > 8) return 0 ; if (endian != SF_ENDIAN_FILE) return 0 ; if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24) return 1 ; break ; case SF_FORMAT_SD2 : /* SD2 is strictly big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; break ; case SF_FORMAT_WVE : if (info->channels > 1) return 0 ; /* WVE is strictly big endian. */ if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_ALAW) return 1 ; break ; case SF_FORMAT_OGG : if (endian != SF_ENDIAN_FILE) return 0 ; if (subformat == SF_FORMAT_VORBIS) return 1 ; break ; case SF_FORMAT_MPC2K : if (info->channels > 2) return 0 ; /* MPC2000 is strictly little endian. */ if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_16) return 1 ; break ; case SF_FORMAT_RF64 : if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; default : break ; } ; return 0 ; } /* sf_format_check */ /*------------------------------------------------------------------------------ */ const char * sf_version_string (void) { #if ENABLE_EXPERIMENTAL_CODE return PACKAGE_NAME "-" PACKAGE_VERSION "-exp" ; #else return PACKAGE_NAME "-" PACKAGE_VERSION ; #endif } /*------------------------------------------------------------------------------ */ int sf_command (SNDFILE *sndfile, int command, void *data, int datasize) { SF_PRIVATE *psf = (SF_PRIVATE *) sndfile ; double quality ; int old_value ; /* This set of commands do not need the sndfile parameter. */ switch (command) { case SFC_GET_LIB_VERSION : if (data == NULL) { if (psf) psf->error = SFE_BAD_COMMAND_PARAM ; return SFE_BAD_COMMAND_PARAM ; } ; snprintf (data, datasize, "%s", sf_version_string ()) ; return strlen (data) ; case SFC_GET_SIMPLE_FORMAT_COUNT : if (data == NULL || datasize != SIGNED_SIZEOF (int)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; *((int*) data) = psf_get_format_simple_count () ; return 0 ; case SFC_GET_SIMPLE_FORMAT : if (data == NULL || datasize != SIGNED_SIZEOF (SF_FORMAT_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; return psf_get_format_simple (data) ; case SFC_GET_FORMAT_MAJOR_COUNT : if (data == NULL || datasize != SIGNED_SIZEOF (int)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; *((int*) data) = psf_get_format_major_count () ; return 0 ; case SFC_GET_FORMAT_MAJOR : if (data == NULL || datasize != SIGNED_SIZEOF (SF_FORMAT_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; return psf_get_format_major (data) ; case SFC_GET_FORMAT_SUBTYPE_COUNT : if (data == NULL || datasize != SIGNED_SIZEOF (int)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; *((int*) data) = psf_get_format_subtype_count () ; return 0 ; case SFC_GET_FORMAT_SUBTYPE : if (data == NULL || datasize != SIGNED_SIZEOF (SF_FORMAT_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; return psf_get_format_subtype (data) ; case SFC_GET_FORMAT_INFO : if (data == NULL || datasize != SIGNED_SIZEOF (SF_FORMAT_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; return psf_get_format_info (data) ; } ; if (sndfile == NULL && command == SFC_GET_LOG_INFO) { if (data == NULL) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; snprintf (data, datasize, "%s", sf_parselog) ; return strlen (data) ; } ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; switch (command) { case SFC_SET_NORM_FLOAT : old_value = psf->norm_float ; psf->norm_float = (datasize) ? SF_TRUE : SF_FALSE ; return old_value ; case SFC_GET_CURRENT_SF_INFO : if (data == NULL || datasize != SIGNED_SIZEOF (SF_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; memcpy (data, &psf->sf, sizeof (SF_INFO)) ; break ; case SFC_SET_NORM_DOUBLE : old_value = psf->norm_double ; psf->norm_double = (datasize) ? SF_TRUE : SF_FALSE ; return old_value ; case SFC_GET_NORM_FLOAT : return psf->norm_float ; case SFC_GET_NORM_DOUBLE : return psf->norm_double ; case SFC_SET_SCALE_FLOAT_INT_READ : old_value = psf->float_int_mult ; psf->float_int_mult = (datasize != 0) ? SF_TRUE : SF_FALSE ; if (psf->float_int_mult && psf->float_max < 0.0) /* Scale to prevent wrap-around distortion. */ psf->float_max = (32768.0 / 32767.0) * psf_calc_signal_max (psf, SF_FALSE) ; return old_value ; case SFC_SET_SCALE_INT_FLOAT_WRITE : old_value = psf->scale_int_float ; psf->scale_int_float = (datasize != 0) ? SF_TRUE : SF_FALSE ; return old_value ; case SFC_SET_ADD_PEAK_CHUNK : { int format = SF_CONTAINER (psf->sf.format) ; /* Only WAV and AIFF support the PEAK chunk. */ switch (format) { case SF_FORMAT_AIFF : case SF_FORMAT_CAF : case SF_FORMAT_WAV : case SF_FORMAT_WAVEX : case SF_FORMAT_RF64 : break ; default : return SF_FALSE ; } ; format = SF_CODEC (psf->sf.format) ; /* Only files containg the following data types support the PEAK chunk. */ if (format != SF_FORMAT_FLOAT && format != SF_FORMAT_DOUBLE) return SF_FALSE ; } ; /* Can only do this is in SFM_WRITE mode. */ if (psf->file.mode != SFM_WRITE && psf->file.mode != SFM_RDWR) return SF_FALSE ; /* If data has already been written this must fail. */ if (psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; /* Everything seems OK, so set psf->has_peak and re-write header. */ if (datasize == SF_FALSE && psf->peak_info != NULL) { free (psf->peak_info) ; psf->peak_info = NULL ; } else if (psf->peak_info == NULL) { psf->peak_info = peak_info_calloc (psf->sf.channels) ; if (psf->peak_info != NULL) psf->peak_info->peak_loc = SF_PEAK_START ; } ; if (psf->write_header) psf->write_header (psf, SF_TRUE) ; return datasize ; case SFC_SET_ADD_HEADER_PAD_CHUNK : return SF_FALSE ; case SFC_GET_LOG_INFO : if (data == NULL) return SFE_BAD_COMMAND_PARAM ; snprintf (data, datasize, "%s", psf->parselog.buf) ; break ; case SFC_CALC_SIGNAL_MAX : if (data == NULL || datasize != sizeof (double)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; *((double*) data) = psf_calc_signal_max (psf, SF_FALSE) ; break ; case SFC_CALC_NORM_SIGNAL_MAX : if (data == NULL || datasize != sizeof (double)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; *((double*) data) = psf_calc_signal_max (psf, SF_TRUE) ; break ; case SFC_CALC_MAX_ALL_CHANNELS : if (data == NULL || datasize != SIGNED_SIZEOF (double) * psf->sf.channels) return (psf->error = SFE_BAD_COMMAND_PARAM) ; return psf_calc_max_all_channels (psf, (double*) data, SF_FALSE) ; case SFC_CALC_NORM_MAX_ALL_CHANNELS : if (data == NULL || datasize != SIGNED_SIZEOF (double) * psf->sf.channels) return (psf->error = SFE_BAD_COMMAND_PARAM) ; return psf_calc_max_all_channels (psf, (double*) data, SF_TRUE) ; case SFC_GET_SIGNAL_MAX : if (data == NULL || datasize != sizeof (double)) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; return psf_get_signal_max (psf, (double *) data) ; case SFC_GET_MAX_ALL_CHANNELS : if (data == NULL || datasize != SIGNED_SIZEOF (double) * psf->sf.channels) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; return psf_get_max_all_channels (psf, (double*) data) ; case SFC_UPDATE_HEADER_NOW : if (psf->write_header) psf->write_header (psf, SF_TRUE) ; break ; case SFC_SET_UPDATE_HEADER_AUTO : psf->auto_header = datasize ? SF_TRUE : SF_FALSE ; return psf->auto_header ; break ; case SFC_SET_ADD_DITHER_ON_WRITE : case SFC_SET_ADD_DITHER_ON_READ : /* ** FIXME ! ** These are obsolete. Just return. ** Remove some time after version 1.0.8. */ break ; case SFC_SET_DITHER_ON_WRITE : if (data == NULL || datasize != SIGNED_SIZEOF (SF_DITHER_INFO)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; memcpy (&psf->write_dither, data, sizeof (psf->write_dither)) ; if (psf->file.mode == SFM_WRITE || psf->file.mode == SFM_RDWR) dither_init (psf, SFM_WRITE) ; break ; case SFC_SET_DITHER_ON_READ : if (data == NULL || datasize != SIGNED_SIZEOF (SF_DITHER_INFO)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; memcpy (&psf->read_dither, data, sizeof (psf->read_dither)) ; if (psf->file.mode == SFM_READ || psf->file.mode == SFM_RDWR) dither_init (psf, SFM_READ) ; break ; case SFC_FILE_TRUNCATE : if (psf->file.mode != SFM_WRITE && psf->file.mode != SFM_RDWR) return SF_TRUE ; if (datasize != sizeof (sf_count_t)) return SF_TRUE ; if (data == NULL || datasize != sizeof (sf_count_t)) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } else { sf_count_t position ; position = *((sf_count_t*) data) ; if (sf_seek (sndfile, position, SEEK_SET) != position) return SF_TRUE ; psf->sf.frames = position ; position = psf_fseek (psf, 0, SEEK_CUR) ; return psf_ftruncate (psf, position) ; } ; break ; case SFC_SET_RAW_START_OFFSET : if (data == NULL || datasize != sizeof (sf_count_t)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; if ((SF_CONTAINER (psf->sf.format)) != SF_FORMAT_RAW) return (psf->error = SFE_BAD_COMMAND_PARAM) ; psf->dataoffset = *((sf_count_t*) data) ; sf_seek (sndfile, 0, SEEK_CUR) ; break ; case SFC_GET_EMBED_FILE_INFO : if (data == NULL || datasize != sizeof (SF_EMBED_FILE_INFO)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; ((SF_EMBED_FILE_INFO*) data)->offset = psf->fileoffset ; ((SF_EMBED_FILE_INFO*) data)->length = psf->filelength ; break ; /* Lite remove start */ case SFC_TEST_IEEE_FLOAT_REPLACE : psf->ieee_replace = (datasize) ? SF_TRUE : SF_FALSE ; if ((SF_CODEC (psf->sf.format)) == SF_FORMAT_FLOAT) float32_init (psf) ; else if ((SF_CODEC (psf->sf.format)) == SF_FORMAT_DOUBLE) double64_init (psf) ; else return (psf->error = SFE_BAD_COMMAND_PARAM) ; break ; /* Lite remove end */ case SFC_SET_CLIPPING : psf->add_clipping = (datasize) ? SF_TRUE : SF_FALSE ; return psf->add_clipping ; case SFC_GET_CLIPPING : return psf->add_clipping ; case SFC_GET_LOOP_INFO : if (datasize != sizeof (SF_LOOP_INFO) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->loop_info == NULL) return SF_FALSE ; memcpy (data, psf->loop_info, sizeof (SF_LOOP_INFO)) ; return SF_TRUE ; case SFC_SET_BROADCAST_INFO : { int format = SF_CONTAINER (psf->sf.format) ; /* Only WAV and RF64 supports the BEXT (Broadcast) chunk. */ if (format != SF_FORMAT_WAV && format != SF_FORMAT_WAVEX && format != SF_FORMAT_RF64) return SF_FALSE ; } ; /* Only makes sense in SFM_WRITE or SFM_RDWR mode. */ if ((psf->file.mode != SFM_WRITE) && (psf->file.mode != SFM_RDWR)) return SF_FALSE ; /* If data has already been written this must fail. */ if (psf->broadcast_16k == NULL && psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (NOT (broadcast_var_set (psf, data, datasize))) return SF_FALSE ; if (psf->write_header) psf->write_header (psf, SF_TRUE) ; return SF_TRUE ; case SFC_GET_BROADCAST_INFO : if (data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; return broadcast_var_get (psf, data, datasize) ; case SFC_SET_CART_INFO : { int format = SF_CONTAINER (psf->sf.format) ; /* Only WAV and RF64 support cart chunk format */ if (format != SF_FORMAT_WAV && format != SF_FORMAT_RF64) return SF_FALSE ; } ; /* Only makes sense in SFM_WRITE or SFM_RDWR mode */ if ((psf->file.mode != SFM_WRITE) && (psf->file.mode != SFM_RDWR)) return SF_FALSE ; /* If data has already been written this must fail. */ if (psf->cart_16k == NULL && psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (NOT (cart_var_set (psf, data, datasize))) return SF_FALSE ; if (psf->write_header) psf->write_header (psf, SF_TRUE) ; return SF_TRUE ; case SFC_GET_CART_INFO : if (data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; return cart_var_get (psf, data, datasize) ; case SFC_GET_CUE_COUNT : if (datasize != sizeof (uint32_t) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->cues != NULL) { *((uint32_t *) data) = psf->cues->cue_count ; return SF_TRUE ; } ; return SF_FALSE ; case SFC_GET_CUE : if (datasize != sizeof (SF_CUES) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->cues == NULL) return SF_FALSE ; psf_get_cues (psf, data, datasize) ; return SF_TRUE ; case SFC_SET_CUE : if (psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (datasize != sizeof (SF_CUES) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->cues == NULL && (psf->cues = psf_cues_dup (data)) == NULL) { psf->error = SFE_MALLOC_FAILED ; return SF_FALSE ; } ; return SF_TRUE ; case SFC_GET_INSTRUMENT : if (datasize != sizeof (SF_INSTRUMENT) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->instrument == NULL) return SF_FALSE ; memcpy (data, psf->instrument, sizeof (SF_INSTRUMENT)) ; return SF_TRUE ; case SFC_SET_INSTRUMENT : /* If data has already been written this must fail. */ if (psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (datasize != sizeof (SF_INSTRUMENT) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->instrument == NULL && (psf->instrument = psf_instrument_alloc ()) == NULL) { psf->error = SFE_MALLOC_FAILED ; return SF_FALSE ; } ; memcpy (psf->instrument, data, sizeof (SF_INSTRUMENT)) ; return SF_TRUE ; case SFC_RAW_DATA_NEEDS_ENDSWAP : return psf->data_endswap ; case SFC_GET_CHANNEL_MAP_INFO : if (psf->channel_map == NULL) return SF_FALSE ; if (data == NULL || datasize != SIGNED_SIZEOF (psf->channel_map [0]) * psf->sf.channels) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; memcpy (data, psf->channel_map, datasize) ; return SF_TRUE ; case SFC_SET_CHANNEL_MAP_INFO : if (psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (data == NULL || datasize != SIGNED_SIZEOF (psf->channel_map [0]) * psf->sf.channels) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; { int *iptr ; for (iptr = data ; iptr < (int*) data + psf->sf.channels ; iptr++) { if (*iptr <= SF_CHANNEL_MAP_INVALID || *iptr >= SF_CHANNEL_MAP_MAX) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; } ; } ; free (psf->channel_map) ; if ((psf->channel_map = malloc (datasize)) == NULL) { psf->error = SFE_MALLOC_FAILED ; return SF_FALSE ; } ; memcpy (psf->channel_map, data, datasize) ; /* ** Pass the command down to the container's command handler. ** Don't pass user data, use validated psf->channel_map data instead. */ if (psf->command) return psf->command (psf, command, NULL, 0) ; return SF_FALSE ; case SFC_SET_VBR_ENCODING_QUALITY : if (data == NULL || datasize != sizeof (double)) return SF_FALSE ; quality = *((double *) data) ; quality = 1.0 - SF_MAX (0.0, SF_MIN (1.0, quality)) ; return sf_command (sndfile, SFC_SET_COMPRESSION_LEVEL, &quality, sizeof (quality)) ; default : /* Must be a file specific command. Pass it on. */ if (psf->command) return psf->command (psf, command, data, datasize) ; psf_log_printf (psf, "*** sf_command : cmd = 0x%X\n", command) ; return (psf->error = SFE_BAD_COMMAND_PARAM) ; } ; return 0 ; } /* sf_command */ /*------------------------------------------------------------------------------ */ sf_count_t sf_seek (SNDFILE *sndfile, sf_count_t offset, int whence) { SF_PRIVATE *psf ; sf_count_t seek_from_start = 0, retval ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (! psf->sf.seekable) { psf->error = SFE_NOT_SEEKABLE ; return PSF_SEEK_ERROR ; } ; /* If the whence parameter has a mode ORed in, check to see that ** it makes sense. */ if (((whence & SFM_MASK) == SFM_WRITE && psf->file.mode == SFM_READ) || ((whence & SFM_MASK) == SFM_READ && psf->file.mode == SFM_WRITE)) { psf->error = SFE_WRONG_SEEK ; return PSF_SEEK_ERROR ; } ; /* Convert all SEEK_CUR and SEEK_END into seek_from_start to be ** used with SEEK_SET. */ switch (whence) { /* The SEEK_SET behaviour is independant of mode. */ case SEEK_SET : case SEEK_SET | SFM_READ : case SEEK_SET | SFM_WRITE : case SEEK_SET | SFM_RDWR : seek_from_start = offset ; break ; /* The SEEK_CUR is a little more tricky. */ case SEEK_CUR : if (offset == 0) { if (psf->file.mode == SFM_READ) return psf->read_current ; if (psf->file.mode == SFM_WRITE) return psf->write_current ; } ; if (psf->file.mode == SFM_READ) seek_from_start = psf->read_current + offset ; else if (psf->file.mode == SFM_WRITE || psf->file.mode == SFM_RDWR) seek_from_start = psf->write_current + offset ; else psf->error = SFE_AMBIGUOUS_SEEK ; break ; case SEEK_CUR | SFM_READ : if (offset == 0) return psf->read_current ; seek_from_start = psf->read_current + offset ; break ; case SEEK_CUR | SFM_WRITE : if (offset == 0) return psf->write_current ; seek_from_start = psf->write_current + offset ; break ; /* The SEEK_END */ case SEEK_END : case SEEK_END | SFM_READ : case SEEK_END | SFM_WRITE : seek_from_start = psf->sf.frames + offset ; break ; default : psf->error = SFE_BAD_SEEK ; break ; } ; if (psf->error) return PSF_SEEK_ERROR ; if (psf->file.mode == SFM_RDWR || psf->file.mode == SFM_WRITE) { if (seek_from_start < 0) { psf->error = SFE_BAD_SEEK ; return PSF_SEEK_ERROR ; } ; } else if (seek_from_start < 0 || seek_from_start > psf->sf.frames) { psf->error = SFE_BAD_SEEK ; return PSF_SEEK_ERROR ; } ; if (psf->seek) { int new_mode = (whence & SFM_MASK) ? (whence & SFM_MASK) : psf->file.mode ; retval = psf->seek (psf, new_mode, seek_from_start) ; switch (new_mode) { case SFM_READ : psf->read_current = retval ; break ; case SFM_WRITE : psf->write_current = retval ; break ; case SFM_RDWR : psf->read_current = retval ; psf->write_current = retval ; new_mode = SFM_READ ; break ; } ; psf->last_op = new_mode ; return retval ; } ; psf->error = SFE_AMBIGUOUS_SEEK ; return PSF_SEEK_ERROR ; } /* sf_seek */ /*------------------------------------------------------------------------------ */ const char* sf_get_string (SNDFILE *sndfile, int str_type) { SF_PRIVATE *psf ; if ((psf = (SF_PRIVATE*) sndfile) == NULL) return NULL ; if (psf->Magick != SNDFILE_MAGICK) return NULL ; return psf_get_string (psf, str_type) ; } /* sf_get_string */ int sf_set_string (SNDFILE *sndfile, int str_type, const char* str) { SF_PRIVATE *psf ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; return psf_set_string (psf, str_type, str) ; } /* sf_get_string */ /*------------------------------------------------------------------------------ */ int sf_current_byterate (SNDFILE *sndfile) { SF_PRIVATE *psf ; if ((psf = (SF_PRIVATE*) sndfile) == NULL) return -1 ; if (psf->Magick != SNDFILE_MAGICK) return -1 ; /* This should cover all PCM and floating point formats. */ if (psf->bytewidth) return psf->sf.samplerate * psf->sf.channels * psf->bytewidth ; if (psf->byterate) return psf->byterate (psf) ; switch (SF_CODEC (psf->sf.format)) { case SF_FORMAT_IMA_ADPCM : case SF_FORMAT_MS_ADPCM : case SF_FORMAT_VOX_ADPCM : return (psf->sf.samplerate * psf->sf.channels) / 2 ; case SF_FORMAT_GSM610 : return (psf->sf.samplerate * psf->sf.channels * 13000) / 8000 ; case SF_FORMAT_G721_32 : /* 32kbs G721 ADPCM encoding. */ return (psf->sf.samplerate * psf->sf.channels) / 2 ; case SF_FORMAT_G723_24 : /* 24kbs G723 ADPCM encoding. */ return (psf->sf.samplerate * psf->sf.channels * 3) / 8 ; case SF_FORMAT_G723_40 : /* 40kbs G723 ADPCM encoding. */ return (psf->sf.samplerate * psf->sf.channels * 5) / 8 ; default : break ; } ; return -1 ; } /* sf_current_byterate */ /*============================================================================== */ sf_count_t sf_read_raw (SNDFILE *sndfile, void *ptr, sf_count_t bytes) { SF_PRIVATE *psf ; sf_count_t count, extra ; int bytewidth, blockwidth ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; bytewidth = (psf->bytewidth > 0) ? psf->bytewidth : 1 ; blockwidth = (psf->blockwidth > 0) ? psf->blockwidth : 1 ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (bytes < 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, bytes) ; return 0 ; } ; if (bytes % (psf->sf.channels * bytewidth)) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf_fread (ptr, 1, bytes, psf) ; if (psf->read_current + count / blockwidth <= psf->sf.frames) psf->read_current += count / blockwidth ; else { count = (psf->sf.frames - psf->read_current) * blockwidth ; extra = bytes - count ; psf_memset (((char *) ptr) + count, 0, extra) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_raw */ /*------------------------------------------------------------------------------ */ sf_count_t sf_read_short (SNDFILE *sndfile, short *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (len <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, len * sizeof (short)) ; return 0 ; /* End of file. */ } ; if (psf->read_short == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_short (psf, ptr, len) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = len - count ; psf_memset (ptr + count, 0, extra * sizeof (short)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_short */ sf_count_t sf_readf_short (SNDFILE *sndfile, short *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (frames <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, frames * psf->sf.channels * sizeof (short)) ; return 0 ; /* End of file. */ } ; if (psf->read_short == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_short (psf, ptr, frames * psf->sf.channels) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = frames * psf->sf.channels - count ; psf_memset (ptr + count, 0, extra * sizeof (short)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count / psf->sf.channels ; } /* sf_readf_short */ /*------------------------------------------------------------------------------ */ sf_count_t sf_read_int (SNDFILE *sndfile, int *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (len <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, len * sizeof (int)) ; return 0 ; } ; if (psf->read_int == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_int (psf, ptr, len) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = len - count ; psf_memset (ptr + count, 0, extra * sizeof (int)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_int */ sf_count_t sf_readf_int (SNDFILE *sndfile, int *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (frames <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, frames * psf->sf.channels * sizeof (int)) ; return 0 ; } ; if (psf->read_int == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_int (psf, ptr, frames * psf->sf.channels) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = frames * psf->sf.channels - count ; psf_memset (ptr + count, 0, extra * sizeof (int)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count / psf->sf.channels ; } /* sf_readf_int */ /*------------------------------------------------------------------------------ */ sf_count_t sf_read_float (SNDFILE *sndfile, float *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (len <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, len * sizeof (float)) ; return 0 ; } ; if (psf->read_float == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_float (psf, ptr, len) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = len - count ; psf_memset (ptr + count, 0, extra * sizeof (float)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_float */ sf_count_t sf_readf_float (SNDFILE *sndfile, float *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (frames <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, frames * psf->sf.channels * sizeof (float)) ; return 0 ; } ; if (psf->read_float == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_float (psf, ptr, frames * psf->sf.channels) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = frames * psf->sf.channels - count ; psf_memset (ptr + count, 0, extra * sizeof (float)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count / psf->sf.channels ; } /* sf_readf_float */ /*------------------------------------------------------------------------------ */ sf_count_t sf_read_double (SNDFILE *sndfile, double *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (len <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, len * sizeof (double)) ; return 0 ; } ; if (psf->read_double == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_double (psf, ptr, len) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = len - count ; psf_memset (ptr + count, 0, extra * sizeof (double)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_double */ sf_count_t sf_readf_double (SNDFILE *sndfile, double *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (frames <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, frames * psf->sf.channels * sizeof (double)) ; return 0 ; } ; if (psf->read_double == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_double (psf, ptr, frames * psf->sf.channels) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = frames * psf->sf.channels - count ; psf_memset (ptr + count, 0, extra * sizeof (double)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count / psf->sf.channels ; } /* sf_readf_double */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_raw (SNDFILE *sndfile, const void *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; int bytewidth, blockwidth ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; bytewidth = (psf->bytewidth > 0) ? psf->bytewidth : 1 ; blockwidth = (psf->blockwidth > 0) ? psf->blockwidth : 1 ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % (psf->sf.channels * bytewidth)) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf_fwrite (ptr, 1, len, psf) ; psf->write_current += count / blockwidth ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_raw */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_short (SNDFILE *sndfile, const short *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->write_short == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_short (psf, ptr, len) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_short */ sf_count_t sf_writef_short (SNDFILE *sndfile, const short *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (psf->write_short == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_short (psf, ptr, frames * psf->sf.channels) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count / psf->sf.channels ; } /* sf_writef_short */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_int (SNDFILE *sndfile, const int *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->write_int == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_int (psf, ptr, len) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_int */ sf_count_t sf_writef_int (SNDFILE *sndfile, const int *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (psf->write_int == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_int (psf, ptr, frames * psf->sf.channels) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count / psf->sf.channels ; } /* sf_writef_int */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_float (SNDFILE *sndfile, const float *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->write_float == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_float (psf, ptr, len) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_float */ sf_count_t sf_writef_float (SNDFILE *sndfile, const float *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (psf->write_float == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_float (psf, ptr, frames * psf->sf.channels) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count / psf->sf.channels ; } /* sf_writef_float */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_double (SNDFILE *sndfile, const double *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->write_double == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_double (psf, ptr, len) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_double */ sf_count_t sf_writef_double (SNDFILE *sndfile, const double *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (psf->write_double == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_double (psf, ptr, frames * psf->sf.channels) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count / psf->sf.channels ; } /* sf_writef_double */ /*========================================================================= ** Private functions. */ static int try_resource_fork (SF_PRIVATE * psf) { int old_error = psf->error ; /* Set READ mode now, to see if resource fork exists. */ psf->rsrc.mode = SFM_READ ; if (psf_open_rsrc (psf) != 0) { psf->error = old_error ; return 0 ; } ; /* More checking here. */ psf_log_printf (psf, "Resource fork : %s\n", psf->rsrc.path.c) ; return SF_FORMAT_SD2 ; } /* try_resource_fork */ static int format_from_extension (SF_PRIVATE *psf) { char *cptr ; char buffer [16] ; int format = 0 ; if ((cptr = strrchr (psf->file.name.c, '.')) == NULL) return 0 ; cptr ++ ; if (strlen (cptr) > sizeof (buffer) - 1) return 0 ; psf_strlcpy (buffer, sizeof (buffer), cptr) ; buffer [sizeof (buffer) - 1] = 0 ; /* Convert everything in the buffer to lower case. */ cptr = buffer ; while (*cptr) { *cptr = tolower (*cptr) ; cptr ++ ; } ; cptr = buffer ; if (strcmp (cptr, "au") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 8000 ; format = SF_FORMAT_RAW | SF_FORMAT_ULAW ; } else if (strcmp (cptr, "snd") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 8000 ; format = SF_FORMAT_RAW | SF_FORMAT_ULAW ; } else if (strcmp (cptr, "vox") == 0 || strcmp (cptr, "vox8") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 8000 ; format = SF_FORMAT_RAW | SF_FORMAT_VOX_ADPCM ; } else if (strcmp (cptr, "vox6") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 6000 ; format = SF_FORMAT_RAW | SF_FORMAT_VOX_ADPCM ; } else if (strcmp (cptr, "gsm") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 8000 ; format = SF_FORMAT_RAW | SF_FORMAT_GSM610 ; } /* For RAW files, make sure the dataoffset if set correctly. */ if ((SF_CONTAINER (format)) == SF_FORMAT_RAW) psf->dataoffset = 0 ; return format ; } /* format_from_extension */ static int guess_file_type (SF_PRIVATE *psf) { uint32_t buffer [3], format ; if (psf_binheader_readf (psf, "b", &buffer, SIGNED_SIZEOF (buffer)) != SIGNED_SIZEOF (buffer)) { psf->error = SFE_BAD_FILE_READ ; return 0 ; } ; if ((buffer [0] == MAKE_MARKER ('R', 'I', 'F', 'F') || buffer [0] == MAKE_MARKER ('R', 'I', 'F', 'X')) && buffer [2] == MAKE_MARKER ('W', 'A', 'V', 'E')) return SF_FORMAT_WAV ; if (buffer [0] == MAKE_MARKER ('F', 'O', 'R', 'M')) { if (buffer [2] == MAKE_MARKER ('A', 'I', 'F', 'F') || buffer [2] == MAKE_MARKER ('A', 'I', 'F', 'C')) return SF_FORMAT_AIFF ; if (buffer [2] == MAKE_MARKER ('8', 'S', 'V', 'X') || buffer [2] == MAKE_MARKER ('1', '6', 'S', 'V')) return SF_FORMAT_SVX ; return 0 ; } ; if (buffer [0] == MAKE_MARKER ('.', 's', 'n', 'd') || buffer [0] == MAKE_MARKER ('d', 'n', 's', '.')) return SF_FORMAT_AU ; if ((buffer [0] == MAKE_MARKER ('f', 'a', 'p', ' ') || buffer [0] == MAKE_MARKER (' ', 'p', 'a', 'f'))) return SF_FORMAT_PAF ; if (buffer [0] == MAKE_MARKER ('N', 'I', 'S', 'T')) return SF_FORMAT_NIST ; if (buffer [0] == MAKE_MARKER ('C', 'r', 'e', 'a') && buffer [1] == MAKE_MARKER ('t', 'i', 'v', 'e')) return SF_FORMAT_VOC ; if ((buffer [0] & MAKE_MARKER (0xFF, 0xFF, 0xF8, 0xFF)) == MAKE_MARKER (0x64, 0xA3, 0x00, 0x00) || (buffer [0] & MAKE_MARKER (0xFF, 0xF8, 0xFF, 0xFF)) == MAKE_MARKER (0x00, 0x00, 0xA3, 0x64)) return SF_FORMAT_IRCAM ; if (buffer [0] == MAKE_MARKER ('r', 'i', 'f', 'f')) return SF_FORMAT_W64 ; if (buffer [0] == MAKE_MARKER (0, 0, 0x03, 0xE8) && buffer [1] == MAKE_MARKER (0, 0, 0, 1) && buffer [2] == MAKE_MARKER (0, 0, 0, 1)) return SF_FORMAT_MAT4 ; if (buffer [0] == MAKE_MARKER (0, 0, 0, 0) && buffer [1] == MAKE_MARKER (1, 0, 0, 0) && buffer [2] == MAKE_MARKER (1, 0, 0, 0)) return SF_FORMAT_MAT4 ; if (buffer [0] == MAKE_MARKER ('M', 'A', 'T', 'L') && buffer [1] == MAKE_MARKER ('A', 'B', ' ', '5')) return SF_FORMAT_MAT5 ; if (buffer [0] == MAKE_MARKER ('P', 'V', 'F', '1')) return SF_FORMAT_PVF ; if (buffer [0] == MAKE_MARKER ('E', 'x', 't', 'e') && buffer [1] == MAKE_MARKER ('n', 'd', 'e', 'd') && buffer [2] == MAKE_MARKER (' ', 'I', 'n', 's')) return SF_FORMAT_XI ; if (buffer [0] == MAKE_MARKER ('c', 'a', 'f', 'f') && buffer [2] == MAKE_MARKER ('d', 'e', 's', 'c')) return SF_FORMAT_CAF ; if (buffer [0] == MAKE_MARKER ('O', 'g', 'g', 'S')) return SF_FORMAT_OGG ; if (buffer [0] == MAKE_MARKER ('A', 'L', 'a', 'w') && buffer [1] == MAKE_MARKER ('S', 'o', 'u', 'n') && buffer [2] == MAKE_MARKER ('d', 'F', 'i', 'l')) return SF_FORMAT_WVE ; if (buffer [0] == MAKE_MARKER ('D', 'i', 'a', 'm') && buffer [1] == MAKE_MARKER ('o', 'n', 'd', 'W') && buffer [2] == MAKE_MARKER ('a', 'r', 'e', ' ')) return SF_FORMAT_DWD ; if (buffer [0] == MAKE_MARKER ('L', 'M', '8', '9') || buffer [0] == MAKE_MARKER ('5', '3', 0, 0)) return SF_FORMAT_TXW ; if ((buffer [0] & MAKE_MARKER (0xFF, 0xFF, 0x80, 0xFF)) == MAKE_MARKER (0xF0, 0x7E, 0, 0x01)) return SF_FORMAT_SDS ; if ((buffer [0] & MAKE_MARKER (0xFF, 0xFF, 0, 0)) == MAKE_MARKER (1, 4, 0, 0)) return SF_FORMAT_MPC2K ; if (buffer [0] == MAKE_MARKER ('C', 'A', 'T', ' ') && buffer [2] == MAKE_MARKER ('R', 'E', 'X', '2')) return SF_FORMAT_REX2 ; if (buffer [0] == MAKE_MARKER (0x30, 0x26, 0xB2, 0x75) && buffer [1] == MAKE_MARKER (0x8E, 0x66, 0xCF, 0x11)) return 0 /*-SF_FORMAT_WMA-*/ ; /* HMM (Hidden Markov Model) Tool Kit. */ if (buffer [2] == MAKE_MARKER (0, 2, 0, 0) && 2 * ((int64_t) BE2H_32 (buffer [0])) + 12 == psf->filelength) return SF_FORMAT_HTK ; if (buffer [0] == MAKE_MARKER ('f', 'L', 'a', 'C')) return SF_FORMAT_FLAC ; if (buffer [0] == MAKE_MARKER ('2', 'B', 'I', 'T')) return SF_FORMAT_AVR ; if (buffer [0] == MAKE_MARKER ('R', 'F', '6', '4') && buffer [2] == MAKE_MARKER ('W', 'A', 'V', 'E')) return SF_FORMAT_RF64 ; if (buffer [0] == MAKE_MARKER ('I', 'D', '3', 3)) { psf_log_printf (psf, "Found 'ID3' marker.\n") ; if (id3_skip (psf)) return guess_file_type (psf) ; return 0 ; } ; /* Turtle Beach SMP 16-bit */ if (buffer [0] == MAKE_MARKER ('S', 'O', 'U', 'N') && buffer [1] == MAKE_MARKER ('D', ' ', 'S', 'A')) return 0 ; /* Yamaha sampler format. */ if (buffer [0] == MAKE_MARKER ('S', 'Y', '8', '0') || buffer [0] == MAKE_MARKER ('S', 'Y', '8', '5')) return 0 ; if (buffer [0] == MAKE_MARKER ('a', 'j', 'k', 'g')) return 0 /*-SF_FORMAT_SHN-*/ ; /* This must be the last one. */ if (psf->filelength > 0 && (format = try_resource_fork (psf)) != 0) return format ; return 0 ; } /* guess_file_type */ static int validate_sfinfo (SF_INFO *sfinfo) { if (sfinfo->samplerate < 1) return 0 ; if (sfinfo->frames < 0) return 0 ; if (sfinfo->channels < 1) return 0 ; if ((SF_CONTAINER (sfinfo->format)) == 0) return 0 ; if ((SF_CODEC (sfinfo->format)) == 0) return 0 ; if (sfinfo->sections < 1) return 0 ; return 1 ; } /* validate_sfinfo */ static int validate_psf (SF_PRIVATE *psf) { if (psf->datalength < 0) { psf_log_printf (psf, "Invalid SF_PRIVATE field : datalength == %D.\n", psf->datalength) ; return 0 ; } ; if (psf->dataoffset < 0) { psf_log_printf (psf, "Invalid SF_PRIVATE field : dataoffset == %D.\n", psf->dataoffset) ; return 0 ; } ; if (psf->blockwidth && psf->blockwidth != psf->sf.channels * psf->bytewidth) { psf_log_printf (psf, "Invalid SF_PRIVATE field : channels * bytewidth == %d.\n", psf->sf.channels * psf->bytewidth) ; return 0 ; } ; return 1 ; } /* validate_psf */ static void save_header_info (SF_PRIVATE *psf) { snprintf (sf_parselog, sizeof (sf_parselog), "%s", psf->parselog.buf) ; } /* save_header_info */ static int copy_filename (SF_PRIVATE *psf, const char *path) { const char *ccptr ; char *cptr ; if (strlen (path) > 1 && strlen (path) - 1 >= sizeof (psf->file.path.c)) { psf->error = SFE_FILENAME_TOO_LONG ; return psf->error ; } ; snprintf (psf->file.path.c, sizeof (psf->file.path.c), "%s", path) ; if ((ccptr = strrchr (path, '/')) || (ccptr = strrchr (path, '\\'))) ccptr ++ ; else ccptr = path ; snprintf (psf->file.name.c, sizeof (psf->file.name.c), "%s", ccptr) ; /* Now grab the directory. */ snprintf (psf->file.dir.c, sizeof (psf->file.dir.c), "%s", path) ; if ((cptr = strrchr (psf->file.dir.c, '/')) || (cptr = strrchr (psf->file.dir.c, '\\'))) cptr [1] = 0 ; else psf->file.dir.c [0] = 0 ; return 0 ; } /* copy_filename */ /*============================================================================== */ static int psf_close (SF_PRIVATE *psf) { uint32_t k ; int error = 0 ; if (psf->codec_close) { error = psf->codec_close (psf) ; /* To prevent it being called in psf->container_close(). */ psf->codec_close = NULL ; } ; if (psf->container_close) error = psf->container_close (psf) ; error = psf_fclose (psf) ; psf_close_rsrc (psf) ; /* For an ISO C compliant implementation it is ok to free a NULL pointer. */ free (psf->header.ptr) ; free (psf->container_data) ; free (psf->codec_data) ; free (psf->interleave) ; free (psf->dither) ; free (psf->peak_info) ; free (psf->broadcast_16k) ; free (psf->loop_info) ; free (psf->instrument) ; free (psf->cues) ; free (psf->channel_map) ; free (psf->format_desc) ; free (psf->strings.storage) ; if (psf->wchunks.chunks) for (k = 0 ; k < psf->wchunks.used ; k++) free (psf->wchunks.chunks [k].data) ; free (psf->rchunks.chunks) ; free (psf->wchunks.chunks) ; free (psf->iterator) ; free (psf->cart_16k) ; memset (psf, 0, sizeof (SF_PRIVATE)) ; free (psf) ; return error ; } /* psf_close */ SNDFILE * psf_open_file (SF_PRIVATE *psf, SF_INFO *sfinfo) { int error, format ; sf_errno = error = 0 ; sf_parselog [0] = 0 ; if (psf->error) { error = psf->error ; goto error_exit ; } ; if (psf->file.mode != SFM_READ && psf->file.mode != SFM_WRITE && psf->file.mode != SFM_RDWR) { error = SFE_BAD_OPEN_MODE ; goto error_exit ; } ; if (sfinfo == NULL) { error = SFE_BAD_SF_INFO_PTR ; goto error_exit ; } ; if (psf->file.mode == SFM_READ) { if ((SF_CONTAINER (sfinfo->format)) == SF_FORMAT_RAW) { if (sf_format_check (sfinfo) == 0) { error = SFE_RAW_BAD_FORMAT ; goto error_exit ; } ; } else memset (sfinfo, 0, sizeof (SF_INFO)) ; } ; memcpy (&psf->sf, sfinfo, sizeof (SF_INFO)) ; psf->Magick = SNDFILE_MAGICK ; psf->norm_float = SF_TRUE ; psf->norm_double = SF_TRUE ; psf->dataoffset = -1 ; psf->datalength = -1 ; psf->read_current = -1 ; psf->write_current = -1 ; psf->auto_header = SF_FALSE ; psf->rwf_endian = SF_ENDIAN_LITTLE ; psf->seek = psf_default_seek ; psf->float_int_mult = 0 ; psf->float_max = -1.0 ; /* An attempt at a per SF_PRIVATE unique id. */ psf->unique_id = psf_rand_int32 () ; psf->sf.sections = 1 ; psf->is_pipe = psf_is_pipe (psf) ; if (psf->is_pipe) { psf->sf.seekable = SF_FALSE ; psf->filelength = SF_COUNT_MAX ; } else { psf->sf.seekable = SF_TRUE ; /* File is open, so get the length. */ psf->filelength = psf_get_filelen (psf) ; } ; if (psf->fileoffset > 0) { switch (psf->file.mode) { case SFM_READ : if (psf->filelength < 44) { psf_log_printf (psf, "Short filelength: %D (fileoffset: %D)\n", psf->filelength, psf->fileoffset) ; error = SFE_BAD_OFFSET ; goto error_exit ; } ; break ; case SFM_WRITE : psf->fileoffset = 0 ; psf_fseek (psf, 0, SEEK_END) ; psf->fileoffset = psf_ftell (psf) ; break ; case SFM_RDWR : error = SFE_NO_EMBEDDED_RDWR ; goto error_exit ; } ; psf_log_printf (psf, "Embedded file offset : %D\n", psf->fileoffset) ; } ; if (psf->filelength == SF_COUNT_MAX) psf_log_printf (psf, "Length : unknown\n") ; else psf_log_printf (psf, "Length : %D\n", psf->filelength) ; if (psf->file.mode == SFM_WRITE || (psf->file.mode == SFM_RDWR && psf->filelength == 0)) { /* If the file is being opened for write or RDWR and the file is currently ** empty, then the SF_INFO struct must contain valid data. */ if ((SF_CONTAINER (psf->sf.format)) == 0) { error = SFE_ZERO_MAJOR_FORMAT ; goto error_exit ; } ; if ((SF_CODEC (psf->sf.format)) == 0) { error = SFE_ZERO_MINOR_FORMAT ; goto error_exit ; } ; if (sf_format_check (&psf->sf) == 0) { error = SFE_BAD_OPEN_FORMAT ; goto error_exit ; } ; } else if ((SF_CONTAINER (psf->sf.format)) != SF_FORMAT_RAW) { /* If type RAW has not been specified then need to figure out file type. */ psf->sf.format = guess_file_type (psf) ; if (psf->sf.format == 0) psf->sf.format = format_from_extension (psf) ; } ; /* Prevent unnecessary seeks */ psf->last_op = psf->file.mode ; /* Set bytewidth if known. */ switch (SF_CODEC (psf->sf.format)) { case SF_FORMAT_PCM_S8 : case SF_FORMAT_PCM_U8 : case SF_FORMAT_ULAW : case SF_FORMAT_ALAW : case SF_FORMAT_DPCM_8 : psf->bytewidth = 1 ; break ; case SF_FORMAT_PCM_16 : case SF_FORMAT_DPCM_16 : psf->bytewidth = 2 ; break ; case SF_FORMAT_PCM_24 : psf->bytewidth = 3 ; break ; case SF_FORMAT_PCM_32 : case SF_FORMAT_FLOAT : psf->bytewidth = 4 ; break ; case SF_FORMAT_DOUBLE : psf->bytewidth = 8 ; break ; } ; /* Call the initialisation function for the relevant file type. */ switch (SF_CONTAINER (psf->sf.format)) { case SF_FORMAT_WAV : case SF_FORMAT_WAVEX : error = wav_open (psf) ; break ; case SF_FORMAT_AIFF : error = aiff_open (psf) ; break ; case SF_FORMAT_AU : error = au_open (psf) ; break ; case SF_FORMAT_RAW : error = raw_open (psf) ; break ; case SF_FORMAT_W64 : error = w64_open (psf) ; break ; case SF_FORMAT_RF64 : error = rf64_open (psf) ; break ; /* Lite remove start */ case SF_FORMAT_PAF : error = paf_open (psf) ; break ; case SF_FORMAT_SVX : error = svx_open (psf) ; break ; case SF_FORMAT_NIST : error = nist_open (psf) ; break ; case SF_FORMAT_IRCAM : error = ircam_open (psf) ; break ; case SF_FORMAT_VOC : error = voc_open (psf) ; break ; case SF_FORMAT_SDS : error = sds_open (psf) ; break ; case SF_FORMAT_OGG : error = ogg_open (psf) ; break ; case SF_FORMAT_TXW : error = txw_open (psf) ; break ; case SF_FORMAT_WVE : error = wve_open (psf) ; break ; case SF_FORMAT_DWD : error = dwd_open (psf) ; break ; case SF_FORMAT_MAT4 : error = mat4_open (psf) ; break ; case SF_FORMAT_MAT5 : error = mat5_open (psf) ; break ; case SF_FORMAT_PVF : error = pvf_open (psf) ; break ; case SF_FORMAT_XI : error = xi_open (psf) ; break ; case SF_FORMAT_HTK : error = htk_open (psf) ; break ; case SF_FORMAT_SD2 : error = sd2_open (psf) ; break ; case SF_FORMAT_REX2 : error = rx2_open (psf) ; break ; case SF_FORMAT_AVR : error = avr_open (psf) ; break ; case SF_FORMAT_FLAC : error = flac_open (psf) ; break ; case SF_FORMAT_CAF : error = caf_open (psf) ; break ; case SF_FORMAT_MPC2K : error = mpc2k_open (psf) ; break ; /* Lite remove end */ default : error = SFE_UNKNOWN_FORMAT ; } ; if (error) goto error_exit ; /* For now, check whether embedding is supported. */ format = SF_CONTAINER (psf->sf.format) ; if (psf->fileoffset > 0) { switch (format) { case SF_FORMAT_WAV : case SF_FORMAT_WAVEX : case SF_FORMAT_AIFF : case SF_FORMAT_AU : /* Actual embedded files. */ break ; case SF_FORMAT_FLAC : /* Flac with an ID3v2 header? */ break ; default : error = SFE_NO_EMBED_SUPPORT ; goto error_exit ; } ; } ; if (psf->fileoffset > 0) psf_log_printf (psf, "Embedded file length : %D\n", psf->filelength) ; if (psf->file.mode == SFM_RDWR && sf_format_check (&psf->sf) == 0) { error = SFE_BAD_MODE_RW ; goto error_exit ; } ; if (validate_sfinfo (&psf->sf) == 0) { psf_log_SF_INFO (psf) ; save_header_info (psf) ; error = SFE_BAD_SF_INFO ; goto error_exit ; } ; if (validate_psf (psf) == 0) { save_header_info (psf) ; error = SFE_INTERNAL ; goto error_exit ; } ; psf->read_current = 0 ; psf->write_current = 0 ; if (psf->file.mode == SFM_RDWR) { psf->write_current = psf->sf.frames ; psf->have_written = psf->sf.frames > 0 ? SF_TRUE : SF_FALSE ; } ; memcpy (sfinfo, &psf->sf, sizeof (SF_INFO)) ; if (psf->file.mode == SFM_WRITE) { /* Zero out these fields. */ sfinfo->frames = 0 ; sfinfo->sections = 0 ; sfinfo->seekable = 0 ; } ; return (SNDFILE *) psf ; error_exit : sf_errno = error ; if (error == SFE_SYSTEM) snprintf (sf_syserr, sizeof (sf_syserr), "%s", psf->syserr) ; snprintf (sf_parselog, sizeof (sf_parselog), "%s", psf->parselog.buf) ; switch (error) { case SF_ERR_SYSTEM : case SF_ERR_UNSUPPORTED_ENCODING : case SFE_UNIMPLEMENTED : break ; case SFE_RAW_BAD_FORMAT : break ; default : if (psf->file.mode == SFM_READ) { psf_log_printf (psf, "Parse error : %s\n", sf_error_number (error)) ; error = SF_ERR_MALFORMED_FILE ; } ; } ; psf_close (psf) ; return NULL ; } /* psf_open_file */ /*============================================================================== ** Chunk getting and setting. ** This works for AIFF, CAF, RF64 and WAV. ** It doesn't work for W64 because W64 uses weird GUID style chunk markers. */ int sf_set_chunk (SNDFILE * sndfile, const SF_CHUNK_INFO * chunk_info) { SF_PRIVATE *psf ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (chunk_info == NULL || chunk_info->data == NULL) return SFE_BAD_CHUNK_PTR ; if (psf->set_chunk) return psf->set_chunk (psf, chunk_info) ; return SFE_BAD_CHUNK_FORMAT ; } /* sf_set_chunk */ SF_CHUNK_ITERATOR * sf_get_chunk_iterator (SNDFILE * sndfile, const SF_CHUNK_INFO * chunk_info) { SF_PRIVATE *psf ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (chunk_info) return psf_get_chunk_iterator (psf, chunk_info->id) ; return psf_get_chunk_iterator (psf, NULL) ; } /* sf_get_chunk_iterator */ SF_CHUNK_ITERATOR * sf_next_chunk_iterator (SF_CHUNK_ITERATOR * iterator) { SF_PRIVATE *psf ; SNDFILE *sndfile = iterator ? iterator->sndfile : NULL ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->next_chunk_iterator) return psf->next_chunk_iterator (psf, iterator) ; return NULL ; } /* sf_get_chunk_iterator_next */ int sf_get_chunk_size (const SF_CHUNK_ITERATOR * iterator, SF_CHUNK_INFO * chunk_info) { SF_PRIVATE *psf ; SNDFILE *sndfile = iterator ? iterator->sndfile : NULL ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (chunk_info == NULL) return SFE_BAD_CHUNK_PTR ; if (psf->get_chunk_size) return psf->get_chunk_size (psf, iterator, chunk_info) ; return SFE_BAD_CHUNK_FORMAT ; return 0 ; } /* sf_get_chunk_size */ int sf_get_chunk_data (const SF_CHUNK_ITERATOR * iterator, SF_CHUNK_INFO * chunk_info) { SF_PRIVATE *psf ; SNDFILE *sndfile = iterator ? iterator->sndfile : NULL ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (chunk_info == NULL || chunk_info->data == NULL) return SFE_BAD_CHUNK_PTR ; if (psf->get_chunk_data) return psf->get_chunk_data (psf, iterator, chunk_info) ; return SFE_BAD_CHUNK_FORMAT ; } /* sf_get_chunk_data */
sf_open (const char *path, int mode, SF_INFO *sfinfo) { SF_PRIVATE *psf ; /* Ultimate sanity check. */ assert (sizeof (sf_count_t) == 8) ; if ((psf = calloc (1, sizeof (SF_PRIVATE))) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; psf_log_printf (psf, "File : %s\n", path) ; if (copy_filename (psf, path) != 0) { sf_errno = psf->error ; return NULL ; } ; psf->file.mode = mode ; if (strcmp (path, "-") == 0) psf->error = psf_set_stdio (psf) ; else psf->error = psf_fopen (psf) ; return psf_open_file (psf, sfinfo) ; } /* sf_open */
sf_open (const char *path, int mode, SF_INFO *sfinfo) { SF_PRIVATE *psf ; /* Ultimate sanity check. */ assert (sizeof (sf_count_t) == 8) ; if ((psf = psf_allocate ()) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; psf_log_printf (psf, "File : %s\n", path) ; if (copy_filename (psf, path) != 0) { sf_errno = psf->error ; return NULL ; } ; psf->file.mode = mode ; if (strcmp (path, "-") == 0) psf->error = psf_set_stdio (psf) ; else psf->error = psf_fopen (psf) ; return psf_open_file (psf, sfinfo) ; } /* sf_open */
{'added': [(270, '\t{\tSFE_BAD_HEADER_ALLOC \t, "Error : Required header allocation is too large." },'), (329, '\tif ((psf = psf_allocate ()) == NULL)'), (361, '\tif ((psf = psf_allocate ()) == NULL)'), (403, '\tif ((psf = psf_allocate ()) == NULL)'), (2691, '\tfree (psf->header.ptr) ;')], 'deleted': [(270, ''), (329, '\tif ((psf = calloc (1, sizeof (SF_PRIVATE))) == NULL)'), (361, '\tif ((psf = calloc (1, sizeof (SF_PRIVATE))) == NULL)'), (403, '\tif ((psf = calloc (1, sizeof (SF_PRIVATE))) == NULL)')]}
5
4
2,307
15,935
https://github.com/erikd/libsndfile
CVE-2017-7586
['CWE-119']
sndfile.c
sf_open_fd
/* ** Copyright (C) 1999-2016 Erik de Castro Lopo <erikd@mega-nerd.com> ** ** This program is free software; you can redistribute it and/or modify ** it under the terms of the GNU Lesser General Public License as published by ** the Free Software Foundation; either version 2.1 of the License, or ** (at your option) any later version. ** ** This program is distributed in the hope that it will be useful, ** but WITHOUT ANY WARRANTY; without even the implied warranty of ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** GNU Lesser General Public License for more details. ** ** You should have received a copy of the GNU Lesser General Public License ** along with this program; if not, write to the Free Software ** Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include "sfconfig.h" #include <stdlib.h> #include <string.h> #include <ctype.h> #include <assert.h> #include "sndfile.h" #include "sfendian.h" #include "common.h" #define SNDFILE_MAGICK 0x1234C0DE #ifdef __APPLE__ /* ** Detect if a compile for a universal binary is being attempted and barf if it is. ** See the URL below for the rationale. */ #ifdef __BIG_ENDIAN__ #if (CPU_IS_LITTLE_ENDIAN == 1) #error "Universal binary compile detected. See http://www.mega-nerd.com/libsndfile/FAQ.html#Q018" #endif #endif #ifdef __LITTLE_ENDIAN__ #if (CPU_IS_BIG_ENDIAN == 1) #error "Universal binary compile detected. See http://www.mega-nerd.com/libsndfile/FAQ.html#Q018" #endif #endif #endif typedef struct { int error ; const char *str ; } ErrorStruct ; static ErrorStruct SndfileErrors [] = { /* Public error values and their associated strings. */ { SF_ERR_NO_ERROR , "No Error." }, { SF_ERR_UNRECOGNISED_FORMAT , "Format not recognised." }, { SF_ERR_SYSTEM , "System error." /* Often replaced. */ }, { SF_ERR_MALFORMED_FILE , "Supported file format but file is malformed." }, { SF_ERR_UNSUPPORTED_ENCODING , "Supported file format but unsupported encoding." }, /* Private error values and their associated strings. */ { SFE_ZERO_MAJOR_FORMAT , "Error : major format is 0." }, { SFE_ZERO_MINOR_FORMAT , "Error : minor format is 0." }, { SFE_BAD_FILE , "File does not exist or is not a regular file (possibly a pipe?)." }, { SFE_BAD_FILE_READ , "File exists but no data could be read." }, { SFE_OPEN_FAILED , "Could not open file." }, { SFE_BAD_SNDFILE_PTR , "Not a valid SNDFILE* pointer." }, { SFE_BAD_SF_INFO_PTR , "NULL SF_INFO pointer passed to libsndfile." }, { SFE_BAD_SF_INCOMPLETE , "SF_PRIVATE struct incomplete and end of header parsing." }, { SFE_BAD_FILE_PTR , "Bad FILE pointer." }, { SFE_BAD_INT_PTR , "Internal error, Bad pointer." }, { SFE_BAD_STAT_SIZE , "Error : software was misconfigured at compile time (sizeof statbuf.st_size)." }, { SFE_NO_TEMP_DIR , "Error : Could not file temp dir." }, { SFE_MALLOC_FAILED , "Internal malloc () failed." }, { SFE_UNIMPLEMENTED , "File contains data in an unimplemented format." }, { SFE_BAD_READ_ALIGN , "Attempt to read a non-integer number of channels." }, { SFE_BAD_WRITE_ALIGN , "Attempt to write a non-integer number of channels." }, { SFE_UNKNOWN_FORMAT , "File contains data in an unknown format." }, { SFE_NOT_READMODE , "Read attempted on file currently open for write." }, { SFE_NOT_WRITEMODE , "Write attempted on file currently open for read." }, { SFE_BAD_MODE_RW , "Error : This file format does not support read/write mode." }, { SFE_BAD_SF_INFO , "Internal error : SF_INFO struct incomplete." }, { SFE_BAD_OFFSET , "Error : supplied offset beyond end of file." }, { SFE_NO_EMBED_SUPPORT , "Error : embedding not supported for this file format." }, { SFE_NO_EMBEDDED_RDWR , "Error : cannot open embedded file read/write." }, { SFE_NO_PIPE_WRITE , "Error : this file format does not support pipe write." }, { SFE_BAD_VIRTUAL_IO , "Error : bad pointer on SF_VIRTUAL_IO struct." }, { SFE_BAD_BROADCAST_INFO_SIZE , "Error : bad coding_history_size in SF_BROADCAST_INFO struct." }, { SFE_BAD_BROADCAST_INFO_TOO_BIG , "Error : SF_BROADCAST_INFO struct too large." }, { SFE_BAD_CART_INFO_SIZE , "Error: SF_CART_INFO struct too large." }, { SFE_BAD_CART_INFO_TOO_BIG , "Error: bag tag_text_size in SF_CART_INFO struct." }, { SFE_INTERLEAVE_MODE , "Attempt to write to file with non-interleaved data." }, { SFE_INTERLEAVE_SEEK , "Bad karma in seek during interleave read operation." }, { SFE_INTERLEAVE_READ , "Bad karma in read during interleave read operation." }, { SFE_INTERNAL , "Unspecified internal error." }, { SFE_BAD_COMMAND_PARAM , "Bad parameter passed to function sf_command." }, { SFE_BAD_ENDIAN , "Bad endian-ness. Try default endian-ness" }, { SFE_CHANNEL_COUNT_ZERO , "Channel count is zero." }, { SFE_CHANNEL_COUNT , "Too many channels specified." }, { SFE_CHANNEL_COUNT_BAD , "Bad channel count." }, { SFE_BAD_SEEK , "Internal psf_fseek() failed." }, { SFE_NOT_SEEKABLE , "Seek attempted on unseekable file type." }, { SFE_AMBIGUOUS_SEEK , "Error : combination of file open mode and seek command is ambiguous." }, { SFE_WRONG_SEEK , "Error : invalid seek parameters." }, { SFE_SEEK_FAILED , "Error : parameters OK, but psf_seek() failed." }, { SFE_BAD_OPEN_MODE , "Error : bad mode parameter for file open." }, { SFE_OPEN_PIPE_RDWR , "Error : attempt to open a pipe in read/write mode." }, { SFE_RDWR_POSITION , "Error on RDWR position (cryptic)." }, { SFE_RDWR_BAD_HEADER , "Error : Cannot open file in read/write mode due to string data in header." }, { SFE_CMD_HAS_DATA , "Error : Command fails because file already has audio data." }, { SFE_STR_NO_SUPPORT , "Error : File type does not support string data." }, { SFE_STR_NOT_WRITE , "Error : Trying to set a string when file is not in write mode." }, { SFE_STR_MAX_DATA , "Error : Maximum string data storage reached." }, { SFE_STR_MAX_COUNT , "Error : Maximum string data count reached." }, { SFE_STR_BAD_TYPE , "Error : Bad string data type." }, { SFE_STR_NO_ADD_END , "Error : file type does not support strings added at end of file." }, { SFE_STR_BAD_STRING , "Error : bad string." }, { SFE_STR_WEIRD , "Error : Weird string error." }, { SFE_WAV_NO_RIFF , "Error in WAV file. No 'RIFF' chunk marker." }, { SFE_WAV_NO_WAVE , "Error in WAV file. No 'WAVE' chunk marker." }, { SFE_WAV_NO_FMT , "Error in WAV/W64/RF64 file. No 'fmt ' chunk marker." }, { SFE_WAV_BAD_FMT , "Error in WAV/W64/RF64 file. Malformed 'fmt ' chunk." }, { SFE_WAV_FMT_SHORT , "Error in WAV/W64/RF64 file. Short 'fmt ' chunk." }, { SFE_WAV_BAD_FACT , "Error in WAV file. 'fact' chunk out of place." }, { SFE_WAV_BAD_PEAK , "Error in WAV file. Bad 'PEAK' chunk." }, { SFE_WAV_PEAK_B4_FMT , "Error in WAV file. 'PEAK' chunk found before 'fmt ' chunk." }, { SFE_WAV_BAD_FORMAT , "Error in WAV file. Errors in 'fmt ' chunk." }, { SFE_WAV_BAD_BLOCKALIGN , "Error in WAV file. Block alignment in 'fmt ' chunk is incorrect." }, { SFE_WAV_NO_DATA , "Error in WAV file. No 'data' chunk marker." }, { SFE_WAV_BAD_LIST , "Error in WAV file. Malformed LIST chunk." }, { SFE_WAV_UNKNOWN_CHUNK , "Error in WAV file. File contains an unknown chunk marker." }, { SFE_WAV_WVPK_DATA , "Error in WAV file. Data is in WAVPACK format." }, { SFE_WAV_ADPCM_NOT4BIT , "Error in ADPCM WAV file. Invalid bit width." }, { SFE_WAV_ADPCM_CHANNELS , "Error in ADPCM WAV file. Invalid number of channels." }, { SFE_WAV_ADPCM_SAMPLES , "Error in ADPCM WAV file. Invalid number of samples per block." }, { SFE_WAV_GSM610_FORMAT , "Error in GSM610 WAV file. Invalid format chunk." }, { SFE_AIFF_NO_FORM , "Error in AIFF file, bad 'FORM' marker." }, { SFE_AIFF_AIFF_NO_FORM , "Error in AIFF file, 'AIFF' marker without 'FORM'." }, { SFE_AIFF_COMM_NO_FORM , "Error in AIFF file, 'COMM' marker without 'FORM'." }, { SFE_AIFF_SSND_NO_COMM , "Error in AIFF file, 'SSND' marker without 'COMM'." }, { SFE_AIFF_UNKNOWN_CHUNK , "Error in AIFF file, unknown chunk." }, { SFE_AIFF_COMM_CHUNK_SIZE, "Error in AIFF file, bad 'COMM' chunk size." }, { SFE_AIFF_BAD_COMM_CHUNK , "Error in AIFF file, bad 'COMM' chunk." }, { SFE_AIFF_PEAK_B4_COMM , "Error in AIFF file. 'PEAK' chunk found before 'COMM' chunk." }, { SFE_AIFF_BAD_PEAK , "Error in AIFF file. Bad 'PEAK' chunk." }, { SFE_AIFF_NO_SSND , "Error in AIFF file, bad 'SSND' chunk." }, { SFE_AIFF_NO_DATA , "Error in AIFF file, no sound data." }, { SFE_AIFF_RW_SSND_NOT_LAST, "Error in AIFF file, RDWR only possible if SSND chunk at end of file." }, { SFE_AU_UNKNOWN_FORMAT , "Error in AU file, unknown format." }, { SFE_AU_NO_DOTSND , "Error in AU file, missing '.snd' or 'dns.' marker." }, { SFE_AU_EMBED_BAD_LEN , "Embedded AU file with unknown length." }, { SFE_RAW_READ_BAD_SPEC , "Error while opening RAW file for read. Must specify format and channels.\n" "Possibly trying to open unsupported format." }, { SFE_RAW_BAD_BITWIDTH , "Error. RAW file bitwidth must be a multiple of 8." }, { SFE_RAW_BAD_FORMAT , "Error. Bad format field in SF_INFO struct when opening a RAW file for read." }, { SFE_PAF_NO_MARKER , "Error in PAF file, no marker." }, { SFE_PAF_VERSION , "Error in PAF file, bad version." }, { SFE_PAF_UNKNOWN_FORMAT , "Error in PAF file, unknown format." }, { SFE_PAF_SHORT_HEADER , "Error in PAF file. File shorter than minimal header." }, { SFE_PAF_BAD_CHANNELS , "Error in PAF file. Bad channel count." }, { SFE_SVX_NO_FORM , "Error in 8SVX / 16SV file, no 'FORM' marker." }, { SFE_SVX_NO_BODY , "Error in 8SVX / 16SV file, no 'BODY' marker." }, { SFE_SVX_NO_DATA , "Error in 8SVX / 16SV file, no sound data." }, { SFE_SVX_BAD_COMP , "Error in 8SVX / 16SV file, unsupported compression format." }, { SFE_SVX_BAD_NAME_LENGTH , "Error in 8SVX / 16SV file, NAME chunk too long." }, { SFE_NIST_BAD_HEADER , "Error in NIST file, bad header." }, { SFE_NIST_CRLF_CONVERISON, "Error : NIST file damaged by Windows CR -> CRLF conversion process." }, { SFE_NIST_BAD_ENCODING , "Error in NIST file, unsupported compression format." }, { SFE_VOC_NO_CREATIVE , "Error in VOC file, no 'Creative Voice File' marker." }, { SFE_VOC_BAD_FORMAT , "Error in VOC file, bad format." }, { SFE_VOC_BAD_VERSION , "Error in VOC file, bad version number." }, { SFE_VOC_BAD_MARKER , "Error in VOC file, bad marker in file." }, { SFE_VOC_BAD_SECTIONS , "Error in VOC file, incompatible VOC sections." }, { SFE_VOC_MULTI_SAMPLERATE, "Error in VOC file, more than one sample rate defined." }, { SFE_VOC_MULTI_SECTION , "Unimplemented VOC file feature, file contains multiple sound sections." }, { SFE_VOC_MULTI_PARAM , "Error in VOC file, file contains multiple bit or channel widths." }, { SFE_VOC_SECTION_COUNT , "Error in VOC file, too many sections." }, { SFE_VOC_NO_PIPE , "Error : not able to operate on VOC files over a pipe." }, { SFE_IRCAM_NO_MARKER , "Error in IRCAM file, bad IRCAM marker." }, { SFE_IRCAM_BAD_CHANNELS , "Error in IRCAM file, bad channel count." }, { SFE_IRCAM_UNKNOWN_FORMAT, "Error in IRCAM file, unknown encoding format." }, { SFE_W64_64_BIT , "Error in W64 file, file contains 64 bit offset." }, { SFE_W64_NO_RIFF , "Error in W64 file. No 'riff' chunk marker." }, { SFE_W64_NO_WAVE , "Error in W64 file. No 'wave' chunk marker." }, { SFE_W64_NO_DATA , "Error in W64 file. No 'data' chunk marker." }, { SFE_W64_ADPCM_NOT4BIT , "Error in ADPCM W64 file. Invalid bit width." }, { SFE_W64_ADPCM_CHANNELS , "Error in ADPCM W64 file. Invalid number of channels." }, { SFE_W64_GSM610_FORMAT , "Error in GSM610 W64 file. Invalid format chunk." }, { SFE_MAT4_BAD_NAME , "Error in MAT4 file. No variable name." }, { SFE_MAT4_NO_SAMPLERATE , "Error in MAT4 file. No sample rate." }, { SFE_MAT5_BAD_ENDIAN , "Error in MAT5 file. Not able to determine endian-ness." }, { SFE_MAT5_NO_BLOCK , "Error in MAT5 file. Bad block structure." }, { SFE_MAT5_SAMPLE_RATE , "Error in MAT5 file. Not able to determine sample rate." }, { SFE_PVF_NO_PVF1 , "Error in PVF file. No PVF1 marker." }, { SFE_PVF_BAD_HEADER , "Error in PVF file. Bad header." }, { SFE_PVF_BAD_BITWIDTH , "Error in PVF file. Bad bit width." }, { SFE_XI_BAD_HEADER , "Error in XI file. Bad header." }, { SFE_XI_EXCESS_SAMPLES , "Error in XI file. Excess samples in file." }, { SFE_XI_NO_PIPE , "Error : not able to operate on XI files over a pipe." }, { SFE_HTK_NO_PIPE , "Error : not able to operate on HTK files over a pipe." }, { SFE_SDS_NOT_SDS , "Error : not an SDS file." }, { SFE_SDS_BAD_BIT_WIDTH , "Error : bad bit width for SDS file." }, { SFE_SD2_FD_DISALLOWED , "Error : cannot open SD2 file without a file name." }, { SFE_SD2_BAD_DATA_OFFSET , "Error : bad data offset." }, { SFE_SD2_BAD_MAP_OFFSET , "Error : bad map offset." }, { SFE_SD2_BAD_DATA_LENGTH , "Error : bad data length." }, { SFE_SD2_BAD_MAP_LENGTH , "Error : bad map length." }, { SFE_SD2_BAD_RSRC , "Error : bad resource fork." }, { SFE_SD2_BAD_SAMPLE_SIZE , "Error : bad sample size." }, { SFE_FLAC_BAD_HEADER , "Error : bad flac header." }, { SFE_FLAC_NEW_DECODER , "Error : problem while creating flac decoder." }, { SFE_FLAC_INIT_DECODER , "Error : problem while initialization of the flac decoder." }, { SFE_FLAC_LOST_SYNC , "Error : flac decoder lost sync." }, { SFE_FLAC_BAD_SAMPLE_RATE, "Error : flac does not support this sample rate." }, { SFE_FLAC_UNKOWN_ERROR , "Error : unknown error in flac decoder." }, { SFE_WVE_NOT_WVE , "Error : not a WVE file." }, { SFE_WVE_NO_PIPE , "Error : not able to operate on WVE files over a pipe." }, { SFE_DWVW_BAD_BITWIDTH , "Error : Bad bit width for DWVW encoding. Must be 12, 16 or 24." }, { SFE_G72X_NOT_MONO , "Error : G72x encoding does not support more than 1 channel." }, { SFE_VORBIS_ENCODER_BUG , "Error : Sample rate chosen is known to trigger a Vorbis encoder bug on this CPU." }, { SFE_RF64_NOT_RF64 , "Error : Not an RF64 file." }, { SFE_RF64_PEAK_B4_FMT , "Error in RF64 file. 'PEAK' chunk found before 'fmt ' chunk." }, { SFE_RF64_NO_DATA , "Error in RF64 file. No 'data' chunk marker." }, { SFE_ALAC_FAIL_TMPFILE , "Error : Failed to open tmp file for ALAC encoding." }, { SFE_BAD_CHUNK_PTR , "Error : Bad SF_CHUNK_INFO pointer." }, { SFE_UNKNOWN_CHUNK , "Error : Unknown chunk marker." }, { SFE_BAD_CHUNK_FORMAT , "Error : Reading/writing chunks from this file format is not supported." }, { SFE_BAD_CHUNK_MARKER , "Error : Bad chunk marker." }, { SFE_BAD_CHUNK_DATA_PTR , "Error : Bad data pointer in SF_CHUNK_INFO struct." }, { SFE_FILENAME_TOO_LONG , "Error : Supplied filename too long." }, { SFE_MAX_ERROR , "Maximum error number." }, { SFE_MAX_ERROR + 1 , NULL } } ; /*------------------------------------------------------------------------------ */ static int format_from_extension (SF_PRIVATE *psf) ; static int guess_file_type (SF_PRIVATE *psf) ; static int validate_sfinfo (SF_INFO *sfinfo) ; static int validate_psf (SF_PRIVATE *psf) ; static void save_header_info (SF_PRIVATE *psf) ; static int copy_filename (SF_PRIVATE *psf, const char *path) ; static int psf_close (SF_PRIVATE *psf) ; static int try_resource_fork (SF_PRIVATE * psf) ; /*------------------------------------------------------------------------------ ** Private (static) variables. */ int sf_errno = 0 ; static char sf_parselog [SF_BUFFER_LEN] = { 0 } ; static char sf_syserr [SF_SYSERR_LEN] = { 0 } ; /*------------------------------------------------------------------------------ */ #define VALIDATE_SNDFILE_AND_ASSIGN_PSF(a, b, c) \ { if ((a) == NULL) \ { sf_errno = SFE_BAD_SNDFILE_PTR ; \ return 0 ; \ } ; \ (b) = (SF_PRIVATE*) (a) ; \ if ((b)->virtual_io == SF_FALSE && \ psf_file_valid (b) == 0) \ { (b)->error = SFE_BAD_FILE_PTR ; \ return 0 ; \ } ; \ if ((b)->Magick != SNDFILE_MAGICK) \ { (b)->error = SFE_BAD_SNDFILE_PTR ; \ return 0 ; \ } ; \ if (c) (b)->error = 0 ; \ } /*------------------------------------------------------------------------------ ** Public functions. */ SNDFILE* sf_open (const char *path, int mode, SF_INFO *sfinfo) { SF_PRIVATE *psf ; /* Ultimate sanity check. */ assert (sizeof (sf_count_t) == 8) ; if ((psf = calloc (1, sizeof (SF_PRIVATE))) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; psf_log_printf (psf, "File : %s\n", path) ; if (copy_filename (psf, path) != 0) { sf_errno = psf->error ; return NULL ; } ; psf->file.mode = mode ; if (strcmp (path, "-") == 0) psf->error = psf_set_stdio (psf) ; else psf->error = psf_fopen (psf) ; return psf_open_file (psf, sfinfo) ; } /* sf_open */ SNDFILE* sf_open_fd (int fd, int mode, SF_INFO *sfinfo, int close_desc) { SF_PRIVATE *psf ; if ((SF_CONTAINER (sfinfo->format)) == SF_FORMAT_SD2) { sf_errno = SFE_SD2_FD_DISALLOWED ; return NULL ; } ; if ((psf = calloc (1, sizeof (SF_PRIVATE))) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; copy_filename (psf, "") ; psf->file.mode = mode ; psf_set_file (psf, fd) ; psf->is_pipe = psf_is_pipe (psf) ; psf->fileoffset = psf_ftell (psf) ; if (! close_desc) psf->file.do_not_close_descriptor = SF_TRUE ; return psf_open_file (psf, sfinfo) ; } /* sf_open_fd */ SNDFILE* sf_open_virtual (SF_VIRTUAL_IO *sfvirtual, int mode, SF_INFO *sfinfo, void *user_data) { SF_PRIVATE *psf ; /* Make sure we have a valid set ot virtual pointers. */ if (sfvirtual->get_filelen == NULL || sfvirtual->seek == NULL || sfvirtual->tell == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_get_filelen / vio_seek / vio_tell in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((mode == SFM_READ || mode == SFM_RDWR) && sfvirtual->read == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_read in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((mode == SFM_WRITE || mode == SFM_RDWR) && sfvirtual->write == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_write in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((psf = calloc (1, sizeof (SF_PRIVATE))) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; psf->virtual_io = SF_TRUE ; psf->vio = *sfvirtual ; psf->vio_user_data = user_data ; psf->file.mode = mode ; return psf_open_file (psf, sfinfo) ; } /* sf_open_virtual */ int sf_close (SNDFILE *sndfile) { SF_PRIVATE *psf ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; return psf_close (psf) ; } /* sf_close */ void sf_write_sync (SNDFILE *sndfile) { SF_PRIVATE *psf ; if ((psf = (SF_PRIVATE *) sndfile) == NULL) return ; psf_fsync (psf) ; return ; } /* sf_write_sync */ /*============================================================================== */ const char* sf_error_number (int errnum) { static const char *bad_errnum = "No error defined for this error number. This is a bug in libsndfile." ; int k ; if (errnum == SFE_MAX_ERROR) return SndfileErrors [0].str ; if (errnum < 0 || errnum > SFE_MAX_ERROR) { /* This really shouldn't happen in release versions. */ printf ("Not a valid error number (%d).\n", errnum) ; return bad_errnum ; } ; for (k = 0 ; SndfileErrors [k].str ; k++) if (errnum == SndfileErrors [k].error) return SndfileErrors [k].str ; return bad_errnum ; } /* sf_error_number */ const char* sf_strerror (SNDFILE *sndfile) { SF_PRIVATE *psf = NULL ; int errnum ; if (sndfile == NULL) { errnum = sf_errno ; if (errnum == SFE_SYSTEM && sf_syserr [0]) return sf_syserr ; } else { psf = (SF_PRIVATE *) sndfile ; if (psf->Magick != SNDFILE_MAGICK) return "sf_strerror : Bad magic number." ; errnum = psf->error ; if (errnum == SFE_SYSTEM && psf->syserr [0]) return psf->syserr ; } ; return sf_error_number (errnum) ; } /* sf_strerror */ /*------------------------------------------------------------------------------ */ int sf_error (SNDFILE *sndfile) { SF_PRIVATE *psf ; if (sndfile == NULL) return sf_errno ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 0) ; if (psf->error) return psf->error ; return 0 ; } /* sf_error */ /*------------------------------------------------------------------------------ */ int sf_perror (SNDFILE *sndfile) { SF_PRIVATE *psf ; int errnum ; if (sndfile == NULL) { errnum = sf_errno ; } else { VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 0) ; errnum = psf->error ; } ; fprintf (stderr, "%s\n", sf_error_number (errnum)) ; return SFE_NO_ERROR ; } /* sf_perror */ /*------------------------------------------------------------------------------ */ int sf_error_str (SNDFILE *sndfile, char *str, size_t maxlen) { SF_PRIVATE *psf ; int errnum ; if (str == NULL) return SFE_INTERNAL ; if (sndfile == NULL) errnum = sf_errno ; else { VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 0) ; errnum = psf->error ; } ; snprintf (str, maxlen, "%s", sf_error_number (errnum)) ; return SFE_NO_ERROR ; } /* sf_error_str */ /*============================================================================== */ int sf_format_check (const SF_INFO *info) { int subformat, endian ; subformat = SF_CODEC (info->format) ; endian = SF_ENDIAN (info->format) ; /* This is the place where each file format can check if the suppiled ** SF_INFO struct is valid. ** Return 0 on failure, 1 ons success. */ if (info->channels < 1 || info->channels > SF_MAX_CHANNELS) return 0 ; if (info->samplerate < 0) return 0 ; switch (SF_CONTAINER (info->format)) { case SF_FORMAT_WAV : /* WAV now allows both endian, RIFF or RIFX (little or big respectively) */ if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if ((subformat == SF_FORMAT_IMA_ADPCM || subformat == SF_FORMAT_MS_ADPCM) && info->channels <= 2) return 1 ; if (subformat == SF_FORMAT_GSM610 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_G721_32 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_WAVEX : if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_AIFF : /* AIFF does allow both endian-nesses for PCM data.*/ if (subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; /* For other encodings reject any endian-ness setting. */ if (endian != 0) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_S8) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if ((subformat == SF_FORMAT_DWVW_12 || subformat == SF_FORMAT_DWVW_16 || subformat == SF_FORMAT_DWVW_24) && info-> channels == 1) return 1 ; if (subformat == SF_FORMAT_GSM610 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_IMA_ADPCM && (info->channels == 1 || info->channels == 2)) return 1 ; break ; case SF_FORMAT_AU : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; if (subformat == SF_FORMAT_G721_32 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_G723_24 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_G723_40 && info->channels == 1) return 1 ; break ; case SF_FORMAT_CAF : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_ALAC_16 || subformat == SF_FORMAT_ALAC_20) return 1 ; if (subformat == SF_FORMAT_ALAC_24 || subformat == SF_FORMAT_ALAC_32) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_RAW : if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; if (subformat == SF_FORMAT_ALAW || subformat == SF_FORMAT_ULAW) return 1 ; if ((subformat == SF_FORMAT_DWVW_12 || subformat == SF_FORMAT_DWVW_16 || subformat == SF_FORMAT_DWVW_24) && info-> channels == 1) return 1 ; if (subformat == SF_FORMAT_GSM610 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_VOX_ADPCM && info->channels == 1) return 1 ; break ; case SF_FORMAT_PAF : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24) return 1 ; break ; case SF_FORMAT_SVX : /* SVX only supports writing mono SVX files. */ if (info->channels > 1) return 0 ; /* Always big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; break ; case SF_FORMAT_NIST : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; break ; case SF_FORMAT_IRCAM : if (info->channels > 256) return 0 ; if (subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW || subformat == SF_FORMAT_FLOAT) return 1 ; break ; case SF_FORMAT_VOC : if (info->channels > 2) return 0 ; /* VOC is strictly little endian. */ if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; break ; case SF_FORMAT_W64 : /* W64 is strictly little endian. */ if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if ((subformat == SF_FORMAT_IMA_ADPCM || subformat == SF_FORMAT_MS_ADPCM) && info->channels <= 2) return 1 ; if (subformat == SF_FORMAT_GSM610 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_MAT4 : if (subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_MAT5 : if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_PVF : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_32) return 1 ; break ; case SF_FORMAT_XI : if (info->channels != 1) return 0 ; if (subformat == SF_FORMAT_DPCM_8 || subformat == SF_FORMAT_DPCM_16) return 1 ; break ; case SF_FORMAT_HTK : if (info->channels != 1) return 0 ; /* HTK is strictly big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_16) return 1 ; break ; case SF_FORMAT_SDS : if (info->channels != 1) return 0 ; /* SDS is strictly big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24) return 1 ; break ; case SF_FORMAT_AVR : if (info->channels > 2) return 0 ; /* SDS is strictly big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; break ; case SF_FORMAT_FLAC : /* FLAC can't do more than 8 channels. */ if (info->channels > 8) return 0 ; if (endian != SF_ENDIAN_FILE) return 0 ; if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24) return 1 ; break ; case SF_FORMAT_SD2 : /* SD2 is strictly big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; break ; case SF_FORMAT_WVE : if (info->channels > 1) return 0 ; /* WVE is strictly big endian. */ if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_ALAW) return 1 ; break ; case SF_FORMAT_OGG : if (endian != SF_ENDIAN_FILE) return 0 ; if (subformat == SF_FORMAT_VORBIS) return 1 ; break ; case SF_FORMAT_MPC2K : if (info->channels > 2) return 0 ; /* MPC2000 is strictly little endian. */ if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_16) return 1 ; break ; case SF_FORMAT_RF64 : if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; default : break ; } ; return 0 ; } /* sf_format_check */ /*------------------------------------------------------------------------------ */ const char * sf_version_string (void) { #if ENABLE_EXPERIMENTAL_CODE return PACKAGE_NAME "-" PACKAGE_VERSION "-exp" ; #else return PACKAGE_NAME "-" PACKAGE_VERSION ; #endif } /*------------------------------------------------------------------------------ */ int sf_command (SNDFILE *sndfile, int command, void *data, int datasize) { SF_PRIVATE *psf = (SF_PRIVATE *) sndfile ; double quality ; int old_value ; /* This set of commands do not need the sndfile parameter. */ switch (command) { case SFC_GET_LIB_VERSION : if (data == NULL) { if (psf) psf->error = SFE_BAD_COMMAND_PARAM ; return SFE_BAD_COMMAND_PARAM ; } ; snprintf (data, datasize, "%s", sf_version_string ()) ; return strlen (data) ; case SFC_GET_SIMPLE_FORMAT_COUNT : if (data == NULL || datasize != SIGNED_SIZEOF (int)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; *((int*) data) = psf_get_format_simple_count () ; return 0 ; case SFC_GET_SIMPLE_FORMAT : if (data == NULL || datasize != SIGNED_SIZEOF (SF_FORMAT_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; return psf_get_format_simple (data) ; case SFC_GET_FORMAT_MAJOR_COUNT : if (data == NULL || datasize != SIGNED_SIZEOF (int)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; *((int*) data) = psf_get_format_major_count () ; return 0 ; case SFC_GET_FORMAT_MAJOR : if (data == NULL || datasize != SIGNED_SIZEOF (SF_FORMAT_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; return psf_get_format_major (data) ; case SFC_GET_FORMAT_SUBTYPE_COUNT : if (data == NULL || datasize != SIGNED_SIZEOF (int)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; *((int*) data) = psf_get_format_subtype_count () ; return 0 ; case SFC_GET_FORMAT_SUBTYPE : if (data == NULL || datasize != SIGNED_SIZEOF (SF_FORMAT_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; return psf_get_format_subtype (data) ; case SFC_GET_FORMAT_INFO : if (data == NULL || datasize != SIGNED_SIZEOF (SF_FORMAT_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; return psf_get_format_info (data) ; } ; if (sndfile == NULL && command == SFC_GET_LOG_INFO) { if (data == NULL) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; snprintf (data, datasize, "%s", sf_parselog) ; return strlen (data) ; } ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; switch (command) { case SFC_SET_NORM_FLOAT : old_value = psf->norm_float ; psf->norm_float = (datasize) ? SF_TRUE : SF_FALSE ; return old_value ; case SFC_GET_CURRENT_SF_INFO : if (data == NULL || datasize != SIGNED_SIZEOF (SF_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; memcpy (data, &psf->sf, sizeof (SF_INFO)) ; break ; case SFC_SET_NORM_DOUBLE : old_value = psf->norm_double ; psf->norm_double = (datasize) ? SF_TRUE : SF_FALSE ; return old_value ; case SFC_GET_NORM_FLOAT : return psf->norm_float ; case SFC_GET_NORM_DOUBLE : return psf->norm_double ; case SFC_SET_SCALE_FLOAT_INT_READ : old_value = psf->float_int_mult ; psf->float_int_mult = (datasize != 0) ? SF_TRUE : SF_FALSE ; if (psf->float_int_mult && psf->float_max < 0.0) /* Scale to prevent wrap-around distortion. */ psf->float_max = (32768.0 / 32767.0) * psf_calc_signal_max (psf, SF_FALSE) ; return old_value ; case SFC_SET_SCALE_INT_FLOAT_WRITE : old_value = psf->scale_int_float ; psf->scale_int_float = (datasize != 0) ? SF_TRUE : SF_FALSE ; return old_value ; case SFC_SET_ADD_PEAK_CHUNK : { int format = SF_CONTAINER (psf->sf.format) ; /* Only WAV and AIFF support the PEAK chunk. */ switch (format) { case SF_FORMAT_AIFF : case SF_FORMAT_CAF : case SF_FORMAT_WAV : case SF_FORMAT_WAVEX : case SF_FORMAT_RF64 : break ; default : return SF_FALSE ; } ; format = SF_CODEC (psf->sf.format) ; /* Only files containg the following data types support the PEAK chunk. */ if (format != SF_FORMAT_FLOAT && format != SF_FORMAT_DOUBLE) return SF_FALSE ; } ; /* Can only do this is in SFM_WRITE mode. */ if (psf->file.mode != SFM_WRITE && psf->file.mode != SFM_RDWR) return SF_FALSE ; /* If data has already been written this must fail. */ if (psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; /* Everything seems OK, so set psf->has_peak and re-write header. */ if (datasize == SF_FALSE && psf->peak_info != NULL) { free (psf->peak_info) ; psf->peak_info = NULL ; } else if (psf->peak_info == NULL) { psf->peak_info = peak_info_calloc (psf->sf.channels) ; if (psf->peak_info != NULL) psf->peak_info->peak_loc = SF_PEAK_START ; } ; if (psf->write_header) psf->write_header (psf, SF_TRUE) ; return datasize ; case SFC_SET_ADD_HEADER_PAD_CHUNK : return SF_FALSE ; case SFC_GET_LOG_INFO : if (data == NULL) return SFE_BAD_COMMAND_PARAM ; snprintf (data, datasize, "%s", psf->parselog.buf) ; break ; case SFC_CALC_SIGNAL_MAX : if (data == NULL || datasize != sizeof (double)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; *((double*) data) = psf_calc_signal_max (psf, SF_FALSE) ; break ; case SFC_CALC_NORM_SIGNAL_MAX : if (data == NULL || datasize != sizeof (double)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; *((double*) data) = psf_calc_signal_max (psf, SF_TRUE) ; break ; case SFC_CALC_MAX_ALL_CHANNELS : if (data == NULL || datasize != SIGNED_SIZEOF (double) * psf->sf.channels) return (psf->error = SFE_BAD_COMMAND_PARAM) ; return psf_calc_max_all_channels (psf, (double*) data, SF_FALSE) ; case SFC_CALC_NORM_MAX_ALL_CHANNELS : if (data == NULL || datasize != SIGNED_SIZEOF (double) * psf->sf.channels) return (psf->error = SFE_BAD_COMMAND_PARAM) ; return psf_calc_max_all_channels (psf, (double*) data, SF_TRUE) ; case SFC_GET_SIGNAL_MAX : if (data == NULL || datasize != sizeof (double)) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; return psf_get_signal_max (psf, (double *) data) ; case SFC_GET_MAX_ALL_CHANNELS : if (data == NULL || datasize != SIGNED_SIZEOF (double) * psf->sf.channels) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; return psf_get_max_all_channels (psf, (double*) data) ; case SFC_UPDATE_HEADER_NOW : if (psf->write_header) psf->write_header (psf, SF_TRUE) ; break ; case SFC_SET_UPDATE_HEADER_AUTO : psf->auto_header = datasize ? SF_TRUE : SF_FALSE ; return psf->auto_header ; break ; case SFC_SET_ADD_DITHER_ON_WRITE : case SFC_SET_ADD_DITHER_ON_READ : /* ** FIXME ! ** These are obsolete. Just return. ** Remove some time after version 1.0.8. */ break ; case SFC_SET_DITHER_ON_WRITE : if (data == NULL || datasize != SIGNED_SIZEOF (SF_DITHER_INFO)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; memcpy (&psf->write_dither, data, sizeof (psf->write_dither)) ; if (psf->file.mode == SFM_WRITE || psf->file.mode == SFM_RDWR) dither_init (psf, SFM_WRITE) ; break ; case SFC_SET_DITHER_ON_READ : if (data == NULL || datasize != SIGNED_SIZEOF (SF_DITHER_INFO)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; memcpy (&psf->read_dither, data, sizeof (psf->read_dither)) ; if (psf->file.mode == SFM_READ || psf->file.mode == SFM_RDWR) dither_init (psf, SFM_READ) ; break ; case SFC_FILE_TRUNCATE : if (psf->file.mode != SFM_WRITE && psf->file.mode != SFM_RDWR) return SF_TRUE ; if (datasize != sizeof (sf_count_t)) return SF_TRUE ; if (data == NULL || datasize != sizeof (sf_count_t)) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } else { sf_count_t position ; position = *((sf_count_t*) data) ; if (sf_seek (sndfile, position, SEEK_SET) != position) return SF_TRUE ; psf->sf.frames = position ; position = psf_fseek (psf, 0, SEEK_CUR) ; return psf_ftruncate (psf, position) ; } ; break ; case SFC_SET_RAW_START_OFFSET : if (data == NULL || datasize != sizeof (sf_count_t)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; if ((SF_CONTAINER (psf->sf.format)) != SF_FORMAT_RAW) return (psf->error = SFE_BAD_COMMAND_PARAM) ; psf->dataoffset = *((sf_count_t*) data) ; sf_seek (sndfile, 0, SEEK_CUR) ; break ; case SFC_GET_EMBED_FILE_INFO : if (data == NULL || datasize != sizeof (SF_EMBED_FILE_INFO)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; ((SF_EMBED_FILE_INFO*) data)->offset = psf->fileoffset ; ((SF_EMBED_FILE_INFO*) data)->length = psf->filelength ; break ; /* Lite remove start */ case SFC_TEST_IEEE_FLOAT_REPLACE : psf->ieee_replace = (datasize) ? SF_TRUE : SF_FALSE ; if ((SF_CODEC (psf->sf.format)) == SF_FORMAT_FLOAT) float32_init (psf) ; else if ((SF_CODEC (psf->sf.format)) == SF_FORMAT_DOUBLE) double64_init (psf) ; else return (psf->error = SFE_BAD_COMMAND_PARAM) ; break ; /* Lite remove end */ case SFC_SET_CLIPPING : psf->add_clipping = (datasize) ? SF_TRUE : SF_FALSE ; return psf->add_clipping ; case SFC_GET_CLIPPING : return psf->add_clipping ; case SFC_GET_LOOP_INFO : if (datasize != sizeof (SF_LOOP_INFO) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->loop_info == NULL) return SF_FALSE ; memcpy (data, psf->loop_info, sizeof (SF_LOOP_INFO)) ; return SF_TRUE ; case SFC_SET_BROADCAST_INFO : { int format = SF_CONTAINER (psf->sf.format) ; /* Only WAV and RF64 supports the BEXT (Broadcast) chunk. */ if (format != SF_FORMAT_WAV && format != SF_FORMAT_WAVEX && format != SF_FORMAT_RF64) return SF_FALSE ; } ; /* Only makes sense in SFM_WRITE or SFM_RDWR mode. */ if ((psf->file.mode != SFM_WRITE) && (psf->file.mode != SFM_RDWR)) return SF_FALSE ; /* If data has already been written this must fail. */ if (psf->broadcast_16k == NULL && psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (NOT (broadcast_var_set (psf, data, datasize))) return SF_FALSE ; if (psf->write_header) psf->write_header (psf, SF_TRUE) ; return SF_TRUE ; case SFC_GET_BROADCAST_INFO : if (data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; return broadcast_var_get (psf, data, datasize) ; case SFC_SET_CART_INFO : { int format = SF_CONTAINER (psf->sf.format) ; /* Only WAV and RF64 support cart chunk format */ if (format != SF_FORMAT_WAV && format != SF_FORMAT_RF64) return SF_FALSE ; } ; /* Only makes sense in SFM_WRITE or SFM_RDWR mode */ if ((psf->file.mode != SFM_WRITE) && (psf->file.mode != SFM_RDWR)) return SF_FALSE ; /* If data has already been written this must fail. */ if (psf->cart_16k == NULL && psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (NOT (cart_var_set (psf, data, datasize))) return SF_FALSE ; if (psf->write_header) psf->write_header (psf, SF_TRUE) ; return SF_TRUE ; case SFC_GET_CART_INFO : if (data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; return cart_var_get (psf, data, datasize) ; case SFC_GET_CUE_COUNT : if (datasize != sizeof (uint32_t) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->cues != NULL) { *((uint32_t *) data) = psf->cues->cue_count ; return SF_TRUE ; } ; return SF_FALSE ; case SFC_GET_CUE : if (datasize != sizeof (SF_CUES) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->cues == NULL) return SF_FALSE ; psf_get_cues (psf, data, datasize) ; return SF_TRUE ; case SFC_SET_CUE : if (psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (datasize != sizeof (SF_CUES) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->cues == NULL && (psf->cues = psf_cues_dup (data)) == NULL) { psf->error = SFE_MALLOC_FAILED ; return SF_FALSE ; } ; return SF_TRUE ; case SFC_GET_INSTRUMENT : if (datasize != sizeof (SF_INSTRUMENT) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->instrument == NULL) return SF_FALSE ; memcpy (data, psf->instrument, sizeof (SF_INSTRUMENT)) ; return SF_TRUE ; case SFC_SET_INSTRUMENT : /* If data has already been written this must fail. */ if (psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (datasize != sizeof (SF_INSTRUMENT) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->instrument == NULL && (psf->instrument = psf_instrument_alloc ()) == NULL) { psf->error = SFE_MALLOC_FAILED ; return SF_FALSE ; } ; memcpy (psf->instrument, data, sizeof (SF_INSTRUMENT)) ; return SF_TRUE ; case SFC_RAW_DATA_NEEDS_ENDSWAP : return psf->data_endswap ; case SFC_GET_CHANNEL_MAP_INFO : if (psf->channel_map == NULL) return SF_FALSE ; if (data == NULL || datasize != SIGNED_SIZEOF (psf->channel_map [0]) * psf->sf.channels) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; memcpy (data, psf->channel_map, datasize) ; return SF_TRUE ; case SFC_SET_CHANNEL_MAP_INFO : if (psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (data == NULL || datasize != SIGNED_SIZEOF (psf->channel_map [0]) * psf->sf.channels) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; { int *iptr ; for (iptr = data ; iptr < (int*) data + psf->sf.channels ; iptr++) { if (*iptr <= SF_CHANNEL_MAP_INVALID || *iptr >= SF_CHANNEL_MAP_MAX) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; } ; } ; free (psf->channel_map) ; if ((psf->channel_map = malloc (datasize)) == NULL) { psf->error = SFE_MALLOC_FAILED ; return SF_FALSE ; } ; memcpy (psf->channel_map, data, datasize) ; /* ** Pass the command down to the container's command handler. ** Don't pass user data, use validated psf->channel_map data instead. */ if (psf->command) return psf->command (psf, command, NULL, 0) ; return SF_FALSE ; case SFC_SET_VBR_ENCODING_QUALITY : if (data == NULL || datasize != sizeof (double)) return SF_FALSE ; quality = *((double *) data) ; quality = 1.0 - SF_MAX (0.0, SF_MIN (1.0, quality)) ; return sf_command (sndfile, SFC_SET_COMPRESSION_LEVEL, &quality, sizeof (quality)) ; default : /* Must be a file specific command. Pass it on. */ if (psf->command) return psf->command (psf, command, data, datasize) ; psf_log_printf (psf, "*** sf_command : cmd = 0x%X\n", command) ; return (psf->error = SFE_BAD_COMMAND_PARAM) ; } ; return 0 ; } /* sf_command */ /*------------------------------------------------------------------------------ */ sf_count_t sf_seek (SNDFILE *sndfile, sf_count_t offset, int whence) { SF_PRIVATE *psf ; sf_count_t seek_from_start = 0, retval ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (! psf->sf.seekable) { psf->error = SFE_NOT_SEEKABLE ; return PSF_SEEK_ERROR ; } ; /* If the whence parameter has a mode ORed in, check to see that ** it makes sense. */ if (((whence & SFM_MASK) == SFM_WRITE && psf->file.mode == SFM_READ) || ((whence & SFM_MASK) == SFM_READ && psf->file.mode == SFM_WRITE)) { psf->error = SFE_WRONG_SEEK ; return PSF_SEEK_ERROR ; } ; /* Convert all SEEK_CUR and SEEK_END into seek_from_start to be ** used with SEEK_SET. */ switch (whence) { /* The SEEK_SET behaviour is independant of mode. */ case SEEK_SET : case SEEK_SET | SFM_READ : case SEEK_SET | SFM_WRITE : case SEEK_SET | SFM_RDWR : seek_from_start = offset ; break ; /* The SEEK_CUR is a little more tricky. */ case SEEK_CUR : if (offset == 0) { if (psf->file.mode == SFM_READ) return psf->read_current ; if (psf->file.mode == SFM_WRITE) return psf->write_current ; } ; if (psf->file.mode == SFM_READ) seek_from_start = psf->read_current + offset ; else if (psf->file.mode == SFM_WRITE || psf->file.mode == SFM_RDWR) seek_from_start = psf->write_current + offset ; else psf->error = SFE_AMBIGUOUS_SEEK ; break ; case SEEK_CUR | SFM_READ : if (offset == 0) return psf->read_current ; seek_from_start = psf->read_current + offset ; break ; case SEEK_CUR | SFM_WRITE : if (offset == 0) return psf->write_current ; seek_from_start = psf->write_current + offset ; break ; /* The SEEK_END */ case SEEK_END : case SEEK_END | SFM_READ : case SEEK_END | SFM_WRITE : seek_from_start = psf->sf.frames + offset ; break ; default : psf->error = SFE_BAD_SEEK ; break ; } ; if (psf->error) return PSF_SEEK_ERROR ; if (psf->file.mode == SFM_RDWR || psf->file.mode == SFM_WRITE) { if (seek_from_start < 0) { psf->error = SFE_BAD_SEEK ; return PSF_SEEK_ERROR ; } ; } else if (seek_from_start < 0 || seek_from_start > psf->sf.frames) { psf->error = SFE_BAD_SEEK ; return PSF_SEEK_ERROR ; } ; if (psf->seek) { int new_mode = (whence & SFM_MASK) ? (whence & SFM_MASK) : psf->file.mode ; retval = psf->seek (psf, new_mode, seek_from_start) ; switch (new_mode) { case SFM_READ : psf->read_current = retval ; break ; case SFM_WRITE : psf->write_current = retval ; break ; case SFM_RDWR : psf->read_current = retval ; psf->write_current = retval ; new_mode = SFM_READ ; break ; } ; psf->last_op = new_mode ; return retval ; } ; psf->error = SFE_AMBIGUOUS_SEEK ; return PSF_SEEK_ERROR ; } /* sf_seek */ /*------------------------------------------------------------------------------ */ const char* sf_get_string (SNDFILE *sndfile, int str_type) { SF_PRIVATE *psf ; if ((psf = (SF_PRIVATE*) sndfile) == NULL) return NULL ; if (psf->Magick != SNDFILE_MAGICK) return NULL ; return psf_get_string (psf, str_type) ; } /* sf_get_string */ int sf_set_string (SNDFILE *sndfile, int str_type, const char* str) { SF_PRIVATE *psf ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; return psf_set_string (psf, str_type, str) ; } /* sf_get_string */ /*------------------------------------------------------------------------------ */ int sf_current_byterate (SNDFILE *sndfile) { SF_PRIVATE *psf ; if ((psf = (SF_PRIVATE*) sndfile) == NULL) return -1 ; if (psf->Magick != SNDFILE_MAGICK) return -1 ; /* This should cover all PCM and floating point formats. */ if (psf->bytewidth) return psf->sf.samplerate * psf->sf.channels * psf->bytewidth ; if (psf->byterate) return psf->byterate (psf) ; switch (SF_CODEC (psf->sf.format)) { case SF_FORMAT_IMA_ADPCM : case SF_FORMAT_MS_ADPCM : case SF_FORMAT_VOX_ADPCM : return (psf->sf.samplerate * psf->sf.channels) / 2 ; case SF_FORMAT_GSM610 : return (psf->sf.samplerate * psf->sf.channels * 13000) / 8000 ; case SF_FORMAT_G721_32 : /* 32kbs G721 ADPCM encoding. */ return (psf->sf.samplerate * psf->sf.channels) / 2 ; case SF_FORMAT_G723_24 : /* 24kbs G723 ADPCM encoding. */ return (psf->sf.samplerate * psf->sf.channels * 3) / 8 ; case SF_FORMAT_G723_40 : /* 40kbs G723 ADPCM encoding. */ return (psf->sf.samplerate * psf->sf.channels * 5) / 8 ; default : break ; } ; return -1 ; } /* sf_current_byterate */ /*============================================================================== */ sf_count_t sf_read_raw (SNDFILE *sndfile, void *ptr, sf_count_t bytes) { SF_PRIVATE *psf ; sf_count_t count, extra ; int bytewidth, blockwidth ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; bytewidth = (psf->bytewidth > 0) ? psf->bytewidth : 1 ; blockwidth = (psf->blockwidth > 0) ? psf->blockwidth : 1 ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (bytes < 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, bytes) ; return 0 ; } ; if (bytes % (psf->sf.channels * bytewidth)) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf_fread (ptr, 1, bytes, psf) ; if (psf->read_current + count / blockwidth <= psf->sf.frames) psf->read_current += count / blockwidth ; else { count = (psf->sf.frames - psf->read_current) * blockwidth ; extra = bytes - count ; psf_memset (((char *) ptr) + count, 0, extra) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_raw */ /*------------------------------------------------------------------------------ */ sf_count_t sf_read_short (SNDFILE *sndfile, short *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (len <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, len * sizeof (short)) ; return 0 ; /* End of file. */ } ; if (psf->read_short == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_short (psf, ptr, len) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = len - count ; psf_memset (ptr + count, 0, extra * sizeof (short)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_short */ sf_count_t sf_readf_short (SNDFILE *sndfile, short *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (frames <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, frames * psf->sf.channels * sizeof (short)) ; return 0 ; /* End of file. */ } ; if (psf->read_short == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_short (psf, ptr, frames * psf->sf.channels) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = frames * psf->sf.channels - count ; psf_memset (ptr + count, 0, extra * sizeof (short)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count / psf->sf.channels ; } /* sf_readf_short */ /*------------------------------------------------------------------------------ */ sf_count_t sf_read_int (SNDFILE *sndfile, int *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (len <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, len * sizeof (int)) ; return 0 ; } ; if (psf->read_int == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_int (psf, ptr, len) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = len - count ; psf_memset (ptr + count, 0, extra * sizeof (int)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_int */ sf_count_t sf_readf_int (SNDFILE *sndfile, int *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (frames <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, frames * psf->sf.channels * sizeof (int)) ; return 0 ; } ; if (psf->read_int == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_int (psf, ptr, frames * psf->sf.channels) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = frames * psf->sf.channels - count ; psf_memset (ptr + count, 0, extra * sizeof (int)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count / psf->sf.channels ; } /* sf_readf_int */ /*------------------------------------------------------------------------------ */ sf_count_t sf_read_float (SNDFILE *sndfile, float *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (len <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, len * sizeof (float)) ; return 0 ; } ; if (psf->read_float == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_float (psf, ptr, len) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = len - count ; psf_memset (ptr + count, 0, extra * sizeof (float)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_float */ sf_count_t sf_readf_float (SNDFILE *sndfile, float *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (frames <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, frames * psf->sf.channels * sizeof (float)) ; return 0 ; } ; if (psf->read_float == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_float (psf, ptr, frames * psf->sf.channels) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = frames * psf->sf.channels - count ; psf_memset (ptr + count, 0, extra * sizeof (float)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count / psf->sf.channels ; } /* sf_readf_float */ /*------------------------------------------------------------------------------ */ sf_count_t sf_read_double (SNDFILE *sndfile, double *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (len <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, len * sizeof (double)) ; return 0 ; } ; if (psf->read_double == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_double (psf, ptr, len) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = len - count ; psf_memset (ptr + count, 0, extra * sizeof (double)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_double */ sf_count_t sf_readf_double (SNDFILE *sndfile, double *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (frames <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, frames * psf->sf.channels * sizeof (double)) ; return 0 ; } ; if (psf->read_double == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_double (psf, ptr, frames * psf->sf.channels) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = frames * psf->sf.channels - count ; psf_memset (ptr + count, 0, extra * sizeof (double)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count / psf->sf.channels ; } /* sf_readf_double */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_raw (SNDFILE *sndfile, const void *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; int bytewidth, blockwidth ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; bytewidth = (psf->bytewidth > 0) ? psf->bytewidth : 1 ; blockwidth = (psf->blockwidth > 0) ? psf->blockwidth : 1 ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % (psf->sf.channels * bytewidth)) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf_fwrite (ptr, 1, len, psf) ; psf->write_current += count / blockwidth ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_raw */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_short (SNDFILE *sndfile, const short *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->write_short == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_short (psf, ptr, len) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_short */ sf_count_t sf_writef_short (SNDFILE *sndfile, const short *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (psf->write_short == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_short (psf, ptr, frames * psf->sf.channels) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count / psf->sf.channels ; } /* sf_writef_short */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_int (SNDFILE *sndfile, const int *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->write_int == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_int (psf, ptr, len) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_int */ sf_count_t sf_writef_int (SNDFILE *sndfile, const int *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (psf->write_int == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_int (psf, ptr, frames * psf->sf.channels) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count / psf->sf.channels ; } /* sf_writef_int */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_float (SNDFILE *sndfile, const float *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->write_float == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_float (psf, ptr, len) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_float */ sf_count_t sf_writef_float (SNDFILE *sndfile, const float *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (psf->write_float == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_float (psf, ptr, frames * psf->sf.channels) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count / psf->sf.channels ; } /* sf_writef_float */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_double (SNDFILE *sndfile, const double *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->write_double == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_double (psf, ptr, len) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_double */ sf_count_t sf_writef_double (SNDFILE *sndfile, const double *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (psf->write_double == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_double (psf, ptr, frames * psf->sf.channels) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count / psf->sf.channels ; } /* sf_writef_double */ /*========================================================================= ** Private functions. */ static int try_resource_fork (SF_PRIVATE * psf) { int old_error = psf->error ; /* Set READ mode now, to see if resource fork exists. */ psf->rsrc.mode = SFM_READ ; if (psf_open_rsrc (psf) != 0) { psf->error = old_error ; return 0 ; } ; /* More checking here. */ psf_log_printf (psf, "Resource fork : %s\n", psf->rsrc.path.c) ; return SF_FORMAT_SD2 ; } /* try_resource_fork */ static int format_from_extension (SF_PRIVATE *psf) { char *cptr ; char buffer [16] ; int format = 0 ; if ((cptr = strrchr (psf->file.name.c, '.')) == NULL) return 0 ; cptr ++ ; if (strlen (cptr) > sizeof (buffer) - 1) return 0 ; psf_strlcpy (buffer, sizeof (buffer), cptr) ; buffer [sizeof (buffer) - 1] = 0 ; /* Convert everything in the buffer to lower case. */ cptr = buffer ; while (*cptr) { *cptr = tolower (*cptr) ; cptr ++ ; } ; cptr = buffer ; if (strcmp (cptr, "au") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 8000 ; format = SF_FORMAT_RAW | SF_FORMAT_ULAW ; } else if (strcmp (cptr, "snd") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 8000 ; format = SF_FORMAT_RAW | SF_FORMAT_ULAW ; } else if (strcmp (cptr, "vox") == 0 || strcmp (cptr, "vox8") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 8000 ; format = SF_FORMAT_RAW | SF_FORMAT_VOX_ADPCM ; } else if (strcmp (cptr, "vox6") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 6000 ; format = SF_FORMAT_RAW | SF_FORMAT_VOX_ADPCM ; } else if (strcmp (cptr, "gsm") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 8000 ; format = SF_FORMAT_RAW | SF_FORMAT_GSM610 ; } /* For RAW files, make sure the dataoffset if set correctly. */ if ((SF_CONTAINER (format)) == SF_FORMAT_RAW) psf->dataoffset = 0 ; return format ; } /* format_from_extension */ static int guess_file_type (SF_PRIVATE *psf) { uint32_t buffer [3], format ; if (psf_binheader_readf (psf, "b", &buffer, SIGNED_SIZEOF (buffer)) != SIGNED_SIZEOF (buffer)) { psf->error = SFE_BAD_FILE_READ ; return 0 ; } ; if ((buffer [0] == MAKE_MARKER ('R', 'I', 'F', 'F') || buffer [0] == MAKE_MARKER ('R', 'I', 'F', 'X')) && buffer [2] == MAKE_MARKER ('W', 'A', 'V', 'E')) return SF_FORMAT_WAV ; if (buffer [0] == MAKE_MARKER ('F', 'O', 'R', 'M')) { if (buffer [2] == MAKE_MARKER ('A', 'I', 'F', 'F') || buffer [2] == MAKE_MARKER ('A', 'I', 'F', 'C')) return SF_FORMAT_AIFF ; if (buffer [2] == MAKE_MARKER ('8', 'S', 'V', 'X') || buffer [2] == MAKE_MARKER ('1', '6', 'S', 'V')) return SF_FORMAT_SVX ; return 0 ; } ; if (buffer [0] == MAKE_MARKER ('.', 's', 'n', 'd') || buffer [0] == MAKE_MARKER ('d', 'n', 's', '.')) return SF_FORMAT_AU ; if ((buffer [0] == MAKE_MARKER ('f', 'a', 'p', ' ') || buffer [0] == MAKE_MARKER (' ', 'p', 'a', 'f'))) return SF_FORMAT_PAF ; if (buffer [0] == MAKE_MARKER ('N', 'I', 'S', 'T')) return SF_FORMAT_NIST ; if (buffer [0] == MAKE_MARKER ('C', 'r', 'e', 'a') && buffer [1] == MAKE_MARKER ('t', 'i', 'v', 'e')) return SF_FORMAT_VOC ; if ((buffer [0] & MAKE_MARKER (0xFF, 0xFF, 0xF8, 0xFF)) == MAKE_MARKER (0x64, 0xA3, 0x00, 0x00) || (buffer [0] & MAKE_MARKER (0xFF, 0xF8, 0xFF, 0xFF)) == MAKE_MARKER (0x00, 0x00, 0xA3, 0x64)) return SF_FORMAT_IRCAM ; if (buffer [0] == MAKE_MARKER ('r', 'i', 'f', 'f')) return SF_FORMAT_W64 ; if (buffer [0] == MAKE_MARKER (0, 0, 0x03, 0xE8) && buffer [1] == MAKE_MARKER (0, 0, 0, 1) && buffer [2] == MAKE_MARKER (0, 0, 0, 1)) return SF_FORMAT_MAT4 ; if (buffer [0] == MAKE_MARKER (0, 0, 0, 0) && buffer [1] == MAKE_MARKER (1, 0, 0, 0) && buffer [2] == MAKE_MARKER (1, 0, 0, 0)) return SF_FORMAT_MAT4 ; if (buffer [0] == MAKE_MARKER ('M', 'A', 'T', 'L') && buffer [1] == MAKE_MARKER ('A', 'B', ' ', '5')) return SF_FORMAT_MAT5 ; if (buffer [0] == MAKE_MARKER ('P', 'V', 'F', '1')) return SF_FORMAT_PVF ; if (buffer [0] == MAKE_MARKER ('E', 'x', 't', 'e') && buffer [1] == MAKE_MARKER ('n', 'd', 'e', 'd') && buffer [2] == MAKE_MARKER (' ', 'I', 'n', 's')) return SF_FORMAT_XI ; if (buffer [0] == MAKE_MARKER ('c', 'a', 'f', 'f') && buffer [2] == MAKE_MARKER ('d', 'e', 's', 'c')) return SF_FORMAT_CAF ; if (buffer [0] == MAKE_MARKER ('O', 'g', 'g', 'S')) return SF_FORMAT_OGG ; if (buffer [0] == MAKE_MARKER ('A', 'L', 'a', 'w') && buffer [1] == MAKE_MARKER ('S', 'o', 'u', 'n') && buffer [2] == MAKE_MARKER ('d', 'F', 'i', 'l')) return SF_FORMAT_WVE ; if (buffer [0] == MAKE_MARKER ('D', 'i', 'a', 'm') && buffer [1] == MAKE_MARKER ('o', 'n', 'd', 'W') && buffer [2] == MAKE_MARKER ('a', 'r', 'e', ' ')) return SF_FORMAT_DWD ; if (buffer [0] == MAKE_MARKER ('L', 'M', '8', '9') || buffer [0] == MAKE_MARKER ('5', '3', 0, 0)) return SF_FORMAT_TXW ; if ((buffer [0] & MAKE_MARKER (0xFF, 0xFF, 0x80, 0xFF)) == MAKE_MARKER (0xF0, 0x7E, 0, 0x01)) return SF_FORMAT_SDS ; if ((buffer [0] & MAKE_MARKER (0xFF, 0xFF, 0, 0)) == MAKE_MARKER (1, 4, 0, 0)) return SF_FORMAT_MPC2K ; if (buffer [0] == MAKE_MARKER ('C', 'A', 'T', ' ') && buffer [2] == MAKE_MARKER ('R', 'E', 'X', '2')) return SF_FORMAT_REX2 ; if (buffer [0] == MAKE_MARKER (0x30, 0x26, 0xB2, 0x75) && buffer [1] == MAKE_MARKER (0x8E, 0x66, 0xCF, 0x11)) return 0 /*-SF_FORMAT_WMA-*/ ; /* HMM (Hidden Markov Model) Tool Kit. */ if (buffer [2] == MAKE_MARKER (0, 2, 0, 0) && 2 * ((int64_t) BE2H_32 (buffer [0])) + 12 == psf->filelength) return SF_FORMAT_HTK ; if (buffer [0] == MAKE_MARKER ('f', 'L', 'a', 'C')) return SF_FORMAT_FLAC ; if (buffer [0] == MAKE_MARKER ('2', 'B', 'I', 'T')) return SF_FORMAT_AVR ; if (buffer [0] == MAKE_MARKER ('R', 'F', '6', '4') && buffer [2] == MAKE_MARKER ('W', 'A', 'V', 'E')) return SF_FORMAT_RF64 ; if (buffer [0] == MAKE_MARKER ('I', 'D', '3', 3)) { psf_log_printf (psf, "Found 'ID3' marker.\n") ; if (id3_skip (psf)) return guess_file_type (psf) ; return 0 ; } ; /* Turtle Beach SMP 16-bit */ if (buffer [0] == MAKE_MARKER ('S', 'O', 'U', 'N') && buffer [1] == MAKE_MARKER ('D', ' ', 'S', 'A')) return 0 ; /* Yamaha sampler format. */ if (buffer [0] == MAKE_MARKER ('S', 'Y', '8', '0') || buffer [0] == MAKE_MARKER ('S', 'Y', '8', '5')) return 0 ; if (buffer [0] == MAKE_MARKER ('a', 'j', 'k', 'g')) return 0 /*-SF_FORMAT_SHN-*/ ; /* This must be the last one. */ if (psf->filelength > 0 && (format = try_resource_fork (psf)) != 0) return format ; return 0 ; } /* guess_file_type */ static int validate_sfinfo (SF_INFO *sfinfo) { if (sfinfo->samplerate < 1) return 0 ; if (sfinfo->frames < 0) return 0 ; if (sfinfo->channels < 1) return 0 ; if ((SF_CONTAINER (sfinfo->format)) == 0) return 0 ; if ((SF_CODEC (sfinfo->format)) == 0) return 0 ; if (sfinfo->sections < 1) return 0 ; return 1 ; } /* validate_sfinfo */ static int validate_psf (SF_PRIVATE *psf) { if (psf->datalength < 0) { psf_log_printf (psf, "Invalid SF_PRIVATE field : datalength == %D.\n", psf->datalength) ; return 0 ; } ; if (psf->dataoffset < 0) { psf_log_printf (psf, "Invalid SF_PRIVATE field : dataoffset == %D.\n", psf->dataoffset) ; return 0 ; } ; if (psf->blockwidth && psf->blockwidth != psf->sf.channels * psf->bytewidth) { psf_log_printf (psf, "Invalid SF_PRIVATE field : channels * bytewidth == %d.\n", psf->sf.channels * psf->bytewidth) ; return 0 ; } ; return 1 ; } /* validate_psf */ static void save_header_info (SF_PRIVATE *psf) { snprintf (sf_parselog, sizeof (sf_parselog), "%s", psf->parselog.buf) ; } /* save_header_info */ static int copy_filename (SF_PRIVATE *psf, const char *path) { const char *ccptr ; char *cptr ; if (strlen (path) > 1 && strlen (path) - 1 >= sizeof (psf->file.path.c)) { psf->error = SFE_FILENAME_TOO_LONG ; return psf->error ; } ; snprintf (psf->file.path.c, sizeof (psf->file.path.c), "%s", path) ; if ((ccptr = strrchr (path, '/')) || (ccptr = strrchr (path, '\\'))) ccptr ++ ; else ccptr = path ; snprintf (psf->file.name.c, sizeof (psf->file.name.c), "%s", ccptr) ; /* Now grab the directory. */ snprintf (psf->file.dir.c, sizeof (psf->file.dir.c), "%s", path) ; if ((cptr = strrchr (psf->file.dir.c, '/')) || (cptr = strrchr (psf->file.dir.c, '\\'))) cptr [1] = 0 ; else psf->file.dir.c [0] = 0 ; return 0 ; } /* copy_filename */ /*============================================================================== */ static int psf_close (SF_PRIVATE *psf) { uint32_t k ; int error = 0 ; if (psf->codec_close) { error = psf->codec_close (psf) ; /* To prevent it being called in psf->container_close(). */ psf->codec_close = NULL ; } ; if (psf->container_close) error = psf->container_close (psf) ; error = psf_fclose (psf) ; psf_close_rsrc (psf) ; /* For an ISO C compliant implementation it is ok to free a NULL pointer. */ free (psf->container_data) ; free (psf->codec_data) ; free (psf->interleave) ; free (psf->dither) ; free (psf->peak_info) ; free (psf->broadcast_16k) ; free (psf->loop_info) ; free (psf->instrument) ; free (psf->cues) ; free (psf->channel_map) ; free (psf->format_desc) ; free (psf->strings.storage) ; if (psf->wchunks.chunks) for (k = 0 ; k < psf->wchunks.used ; k++) free (psf->wchunks.chunks [k].data) ; free (psf->rchunks.chunks) ; free (psf->wchunks.chunks) ; free (psf->iterator) ; free (psf->cart_16k) ; memset (psf, 0, sizeof (SF_PRIVATE)) ; free (psf) ; return error ; } /* psf_close */ SNDFILE * psf_open_file (SF_PRIVATE *psf, SF_INFO *sfinfo) { int error, format ; sf_errno = error = 0 ; sf_parselog [0] = 0 ; if (psf->error) { error = psf->error ; goto error_exit ; } ; if (psf->file.mode != SFM_READ && psf->file.mode != SFM_WRITE && psf->file.mode != SFM_RDWR) { error = SFE_BAD_OPEN_MODE ; goto error_exit ; } ; if (sfinfo == NULL) { error = SFE_BAD_SF_INFO_PTR ; goto error_exit ; } ; if (psf->file.mode == SFM_READ) { if ((SF_CONTAINER (sfinfo->format)) == SF_FORMAT_RAW) { if (sf_format_check (sfinfo) == 0) { error = SFE_RAW_BAD_FORMAT ; goto error_exit ; } ; } else memset (sfinfo, 0, sizeof (SF_INFO)) ; } ; memcpy (&psf->sf, sfinfo, sizeof (SF_INFO)) ; psf->Magick = SNDFILE_MAGICK ; psf->norm_float = SF_TRUE ; psf->norm_double = SF_TRUE ; psf->dataoffset = -1 ; psf->datalength = -1 ; psf->read_current = -1 ; psf->write_current = -1 ; psf->auto_header = SF_FALSE ; psf->rwf_endian = SF_ENDIAN_LITTLE ; psf->seek = psf_default_seek ; psf->float_int_mult = 0 ; psf->float_max = -1.0 ; /* An attempt at a per SF_PRIVATE unique id. */ psf->unique_id = psf_rand_int32 () ; psf->sf.sections = 1 ; psf->is_pipe = psf_is_pipe (psf) ; if (psf->is_pipe) { psf->sf.seekable = SF_FALSE ; psf->filelength = SF_COUNT_MAX ; } else { psf->sf.seekable = SF_TRUE ; /* File is open, so get the length. */ psf->filelength = psf_get_filelen (psf) ; } ; if (psf->fileoffset > 0) { switch (psf->file.mode) { case SFM_READ : if (psf->filelength < 44) { psf_log_printf (psf, "Short filelength: %D (fileoffset: %D)\n", psf->filelength, psf->fileoffset) ; error = SFE_BAD_OFFSET ; goto error_exit ; } ; break ; case SFM_WRITE : psf->fileoffset = 0 ; psf_fseek (psf, 0, SEEK_END) ; psf->fileoffset = psf_ftell (psf) ; break ; case SFM_RDWR : error = SFE_NO_EMBEDDED_RDWR ; goto error_exit ; } ; psf_log_printf (psf, "Embedded file offset : %D\n", psf->fileoffset) ; } ; if (psf->filelength == SF_COUNT_MAX) psf_log_printf (psf, "Length : unknown\n") ; else psf_log_printf (psf, "Length : %D\n", psf->filelength) ; if (psf->file.mode == SFM_WRITE || (psf->file.mode == SFM_RDWR && psf->filelength == 0)) { /* If the file is being opened for write or RDWR and the file is currently ** empty, then the SF_INFO struct must contain valid data. */ if ((SF_CONTAINER (psf->sf.format)) == 0) { error = SFE_ZERO_MAJOR_FORMAT ; goto error_exit ; } ; if ((SF_CODEC (psf->sf.format)) == 0) { error = SFE_ZERO_MINOR_FORMAT ; goto error_exit ; } ; if (sf_format_check (&psf->sf) == 0) { error = SFE_BAD_OPEN_FORMAT ; goto error_exit ; } ; } else if ((SF_CONTAINER (psf->sf.format)) != SF_FORMAT_RAW) { /* If type RAW has not been specified then need to figure out file type. */ psf->sf.format = guess_file_type (psf) ; if (psf->sf.format == 0) psf->sf.format = format_from_extension (psf) ; } ; /* Prevent unnecessary seeks */ psf->last_op = psf->file.mode ; /* Set bytewidth if known. */ switch (SF_CODEC (psf->sf.format)) { case SF_FORMAT_PCM_S8 : case SF_FORMAT_PCM_U8 : case SF_FORMAT_ULAW : case SF_FORMAT_ALAW : case SF_FORMAT_DPCM_8 : psf->bytewidth = 1 ; break ; case SF_FORMAT_PCM_16 : case SF_FORMAT_DPCM_16 : psf->bytewidth = 2 ; break ; case SF_FORMAT_PCM_24 : psf->bytewidth = 3 ; break ; case SF_FORMAT_PCM_32 : case SF_FORMAT_FLOAT : psf->bytewidth = 4 ; break ; case SF_FORMAT_DOUBLE : psf->bytewidth = 8 ; break ; } ; /* Call the initialisation function for the relevant file type. */ switch (SF_CONTAINER (psf->sf.format)) { case SF_FORMAT_WAV : case SF_FORMAT_WAVEX : error = wav_open (psf) ; break ; case SF_FORMAT_AIFF : error = aiff_open (psf) ; break ; case SF_FORMAT_AU : error = au_open (psf) ; break ; case SF_FORMAT_RAW : error = raw_open (psf) ; break ; case SF_FORMAT_W64 : error = w64_open (psf) ; break ; case SF_FORMAT_RF64 : error = rf64_open (psf) ; break ; /* Lite remove start */ case SF_FORMAT_PAF : error = paf_open (psf) ; break ; case SF_FORMAT_SVX : error = svx_open (psf) ; break ; case SF_FORMAT_NIST : error = nist_open (psf) ; break ; case SF_FORMAT_IRCAM : error = ircam_open (psf) ; break ; case SF_FORMAT_VOC : error = voc_open (psf) ; break ; case SF_FORMAT_SDS : error = sds_open (psf) ; break ; case SF_FORMAT_OGG : error = ogg_open (psf) ; break ; case SF_FORMAT_TXW : error = txw_open (psf) ; break ; case SF_FORMAT_WVE : error = wve_open (psf) ; break ; case SF_FORMAT_DWD : error = dwd_open (psf) ; break ; case SF_FORMAT_MAT4 : error = mat4_open (psf) ; break ; case SF_FORMAT_MAT5 : error = mat5_open (psf) ; break ; case SF_FORMAT_PVF : error = pvf_open (psf) ; break ; case SF_FORMAT_XI : error = xi_open (psf) ; break ; case SF_FORMAT_HTK : error = htk_open (psf) ; break ; case SF_FORMAT_SD2 : error = sd2_open (psf) ; break ; case SF_FORMAT_REX2 : error = rx2_open (psf) ; break ; case SF_FORMAT_AVR : error = avr_open (psf) ; break ; case SF_FORMAT_FLAC : error = flac_open (psf) ; break ; case SF_FORMAT_CAF : error = caf_open (psf) ; break ; case SF_FORMAT_MPC2K : error = mpc2k_open (psf) ; break ; /* Lite remove end */ default : error = SFE_UNKNOWN_FORMAT ; } ; if (error) goto error_exit ; /* For now, check whether embedding is supported. */ format = SF_CONTAINER (psf->sf.format) ; if (psf->fileoffset > 0) { switch (format) { case SF_FORMAT_WAV : case SF_FORMAT_WAVEX : case SF_FORMAT_AIFF : case SF_FORMAT_AU : /* Actual embedded files. */ break ; case SF_FORMAT_FLAC : /* Flac with an ID3v2 header? */ break ; default : error = SFE_NO_EMBED_SUPPORT ; goto error_exit ; } ; } ; if (psf->fileoffset > 0) psf_log_printf (psf, "Embedded file length : %D\n", psf->filelength) ; if (psf->file.mode == SFM_RDWR && sf_format_check (&psf->sf) == 0) { error = SFE_BAD_MODE_RW ; goto error_exit ; } ; if (validate_sfinfo (&psf->sf) == 0) { psf_log_SF_INFO (psf) ; save_header_info (psf) ; error = SFE_BAD_SF_INFO ; goto error_exit ; } ; if (validate_psf (psf) == 0) { save_header_info (psf) ; error = SFE_INTERNAL ; goto error_exit ; } ; psf->read_current = 0 ; psf->write_current = 0 ; if (psf->file.mode == SFM_RDWR) { psf->write_current = psf->sf.frames ; psf->have_written = psf->sf.frames > 0 ? SF_TRUE : SF_FALSE ; } ; memcpy (sfinfo, &psf->sf, sizeof (SF_INFO)) ; if (psf->file.mode == SFM_WRITE) { /* Zero out these fields. */ sfinfo->frames = 0 ; sfinfo->sections = 0 ; sfinfo->seekable = 0 ; } ; return (SNDFILE *) psf ; error_exit : sf_errno = error ; if (error == SFE_SYSTEM) snprintf (sf_syserr, sizeof (sf_syserr), "%s", psf->syserr) ; snprintf (sf_parselog, sizeof (sf_parselog), "%s", psf->parselog.buf) ; switch (error) { case SF_ERR_SYSTEM : case SF_ERR_UNSUPPORTED_ENCODING : case SFE_UNIMPLEMENTED : break ; case SFE_RAW_BAD_FORMAT : break ; default : if (psf->file.mode == SFM_READ) { psf_log_printf (psf, "Parse error : %s\n", sf_error_number (error)) ; error = SF_ERR_MALFORMED_FILE ; } ; } ; psf_close (psf) ; return NULL ; } /* psf_open_file */ /*============================================================================== ** Chunk getting and setting. ** This works for AIFF, CAF, RF64 and WAV. ** It doesn't work for W64 because W64 uses weird GUID style chunk markers. */ int sf_set_chunk (SNDFILE * sndfile, const SF_CHUNK_INFO * chunk_info) { SF_PRIVATE *psf ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (chunk_info == NULL || chunk_info->data == NULL) return SFE_BAD_CHUNK_PTR ; if (psf->set_chunk) return psf->set_chunk (psf, chunk_info) ; return SFE_BAD_CHUNK_FORMAT ; } /* sf_set_chunk */ SF_CHUNK_ITERATOR * sf_get_chunk_iterator (SNDFILE * sndfile, const SF_CHUNK_INFO * chunk_info) { SF_PRIVATE *psf ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (chunk_info) return psf_get_chunk_iterator (psf, chunk_info->id) ; return psf_get_chunk_iterator (psf, NULL) ; } /* sf_get_chunk_iterator */ SF_CHUNK_ITERATOR * sf_next_chunk_iterator (SF_CHUNK_ITERATOR * iterator) { SF_PRIVATE *psf ; SNDFILE *sndfile = iterator ? iterator->sndfile : NULL ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->next_chunk_iterator) return psf->next_chunk_iterator (psf, iterator) ; return NULL ; } /* sf_get_chunk_iterator_next */ int sf_get_chunk_size (const SF_CHUNK_ITERATOR * iterator, SF_CHUNK_INFO * chunk_info) { SF_PRIVATE *psf ; SNDFILE *sndfile = iterator ? iterator->sndfile : NULL ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (chunk_info == NULL) return SFE_BAD_CHUNK_PTR ; if (psf->get_chunk_size) return psf->get_chunk_size (psf, iterator, chunk_info) ; return SFE_BAD_CHUNK_FORMAT ; return 0 ; } /* sf_get_chunk_size */ int sf_get_chunk_data (const SF_CHUNK_ITERATOR * iterator, SF_CHUNK_INFO * chunk_info) { SF_PRIVATE *psf ; SNDFILE *sndfile = iterator ? iterator->sndfile : NULL ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (chunk_info == NULL || chunk_info->data == NULL) return SFE_BAD_CHUNK_PTR ; if (psf->get_chunk_data) return psf->get_chunk_data (psf, iterator, chunk_info) ; return SFE_BAD_CHUNK_FORMAT ; } /* sf_get_chunk_data */
/* ** Copyright (C) 1999-2016 Erik de Castro Lopo <erikd@mega-nerd.com> ** ** This program is free software; you can redistribute it and/or modify ** it under the terms of the GNU Lesser General Public License as published by ** the Free Software Foundation; either version 2.1 of the License, or ** (at your option) any later version. ** ** This program is distributed in the hope that it will be useful, ** but WITHOUT ANY WARRANTY; without even the implied warranty of ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** GNU Lesser General Public License for more details. ** ** You should have received a copy of the GNU Lesser General Public License ** along with this program; if not, write to the Free Software ** Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include "sfconfig.h" #include <stdlib.h> #include <string.h> #include <ctype.h> #include <assert.h> #include "sndfile.h" #include "sfendian.h" #include "common.h" #define SNDFILE_MAGICK 0x1234C0DE #ifdef __APPLE__ /* ** Detect if a compile for a universal binary is being attempted and barf if it is. ** See the URL below for the rationale. */ #ifdef __BIG_ENDIAN__ #if (CPU_IS_LITTLE_ENDIAN == 1) #error "Universal binary compile detected. See http://www.mega-nerd.com/libsndfile/FAQ.html#Q018" #endif #endif #ifdef __LITTLE_ENDIAN__ #if (CPU_IS_BIG_ENDIAN == 1) #error "Universal binary compile detected. See http://www.mega-nerd.com/libsndfile/FAQ.html#Q018" #endif #endif #endif typedef struct { int error ; const char *str ; } ErrorStruct ; static ErrorStruct SndfileErrors [] = { /* Public error values and their associated strings. */ { SF_ERR_NO_ERROR , "No Error." }, { SF_ERR_UNRECOGNISED_FORMAT , "Format not recognised." }, { SF_ERR_SYSTEM , "System error." /* Often replaced. */ }, { SF_ERR_MALFORMED_FILE , "Supported file format but file is malformed." }, { SF_ERR_UNSUPPORTED_ENCODING , "Supported file format but unsupported encoding." }, /* Private error values and their associated strings. */ { SFE_ZERO_MAJOR_FORMAT , "Error : major format is 0." }, { SFE_ZERO_MINOR_FORMAT , "Error : minor format is 0." }, { SFE_BAD_FILE , "File does not exist or is not a regular file (possibly a pipe?)." }, { SFE_BAD_FILE_READ , "File exists but no data could be read." }, { SFE_OPEN_FAILED , "Could not open file." }, { SFE_BAD_SNDFILE_PTR , "Not a valid SNDFILE* pointer." }, { SFE_BAD_SF_INFO_PTR , "NULL SF_INFO pointer passed to libsndfile." }, { SFE_BAD_SF_INCOMPLETE , "SF_PRIVATE struct incomplete and end of header parsing." }, { SFE_BAD_FILE_PTR , "Bad FILE pointer." }, { SFE_BAD_INT_PTR , "Internal error, Bad pointer." }, { SFE_BAD_STAT_SIZE , "Error : software was misconfigured at compile time (sizeof statbuf.st_size)." }, { SFE_NO_TEMP_DIR , "Error : Could not file temp dir." }, { SFE_MALLOC_FAILED , "Internal malloc () failed." }, { SFE_UNIMPLEMENTED , "File contains data in an unimplemented format." }, { SFE_BAD_READ_ALIGN , "Attempt to read a non-integer number of channels." }, { SFE_BAD_WRITE_ALIGN , "Attempt to write a non-integer number of channels." }, { SFE_UNKNOWN_FORMAT , "File contains data in an unknown format." }, { SFE_NOT_READMODE , "Read attempted on file currently open for write." }, { SFE_NOT_WRITEMODE , "Write attempted on file currently open for read." }, { SFE_BAD_MODE_RW , "Error : This file format does not support read/write mode." }, { SFE_BAD_SF_INFO , "Internal error : SF_INFO struct incomplete." }, { SFE_BAD_OFFSET , "Error : supplied offset beyond end of file." }, { SFE_NO_EMBED_SUPPORT , "Error : embedding not supported for this file format." }, { SFE_NO_EMBEDDED_RDWR , "Error : cannot open embedded file read/write." }, { SFE_NO_PIPE_WRITE , "Error : this file format does not support pipe write." }, { SFE_BAD_VIRTUAL_IO , "Error : bad pointer on SF_VIRTUAL_IO struct." }, { SFE_BAD_BROADCAST_INFO_SIZE , "Error : bad coding_history_size in SF_BROADCAST_INFO struct." }, { SFE_BAD_BROADCAST_INFO_TOO_BIG , "Error : SF_BROADCAST_INFO struct too large." }, { SFE_BAD_CART_INFO_SIZE , "Error: SF_CART_INFO struct too large." }, { SFE_BAD_CART_INFO_TOO_BIG , "Error: bag tag_text_size in SF_CART_INFO struct." }, { SFE_INTERLEAVE_MODE , "Attempt to write to file with non-interleaved data." }, { SFE_INTERLEAVE_SEEK , "Bad karma in seek during interleave read operation." }, { SFE_INTERLEAVE_READ , "Bad karma in read during interleave read operation." }, { SFE_INTERNAL , "Unspecified internal error." }, { SFE_BAD_COMMAND_PARAM , "Bad parameter passed to function sf_command." }, { SFE_BAD_ENDIAN , "Bad endian-ness. Try default endian-ness" }, { SFE_CHANNEL_COUNT_ZERO , "Channel count is zero." }, { SFE_CHANNEL_COUNT , "Too many channels specified." }, { SFE_CHANNEL_COUNT_BAD , "Bad channel count." }, { SFE_BAD_SEEK , "Internal psf_fseek() failed." }, { SFE_NOT_SEEKABLE , "Seek attempted on unseekable file type." }, { SFE_AMBIGUOUS_SEEK , "Error : combination of file open mode and seek command is ambiguous." }, { SFE_WRONG_SEEK , "Error : invalid seek parameters." }, { SFE_SEEK_FAILED , "Error : parameters OK, but psf_seek() failed." }, { SFE_BAD_OPEN_MODE , "Error : bad mode parameter for file open." }, { SFE_OPEN_PIPE_RDWR , "Error : attempt to open a pipe in read/write mode." }, { SFE_RDWR_POSITION , "Error on RDWR position (cryptic)." }, { SFE_RDWR_BAD_HEADER , "Error : Cannot open file in read/write mode due to string data in header." }, { SFE_CMD_HAS_DATA , "Error : Command fails because file already has audio data." }, { SFE_STR_NO_SUPPORT , "Error : File type does not support string data." }, { SFE_STR_NOT_WRITE , "Error : Trying to set a string when file is not in write mode." }, { SFE_STR_MAX_DATA , "Error : Maximum string data storage reached." }, { SFE_STR_MAX_COUNT , "Error : Maximum string data count reached." }, { SFE_STR_BAD_TYPE , "Error : Bad string data type." }, { SFE_STR_NO_ADD_END , "Error : file type does not support strings added at end of file." }, { SFE_STR_BAD_STRING , "Error : bad string." }, { SFE_STR_WEIRD , "Error : Weird string error." }, { SFE_WAV_NO_RIFF , "Error in WAV file. No 'RIFF' chunk marker." }, { SFE_WAV_NO_WAVE , "Error in WAV file. No 'WAVE' chunk marker." }, { SFE_WAV_NO_FMT , "Error in WAV/W64/RF64 file. No 'fmt ' chunk marker." }, { SFE_WAV_BAD_FMT , "Error in WAV/W64/RF64 file. Malformed 'fmt ' chunk." }, { SFE_WAV_FMT_SHORT , "Error in WAV/W64/RF64 file. Short 'fmt ' chunk." }, { SFE_WAV_BAD_FACT , "Error in WAV file. 'fact' chunk out of place." }, { SFE_WAV_BAD_PEAK , "Error in WAV file. Bad 'PEAK' chunk." }, { SFE_WAV_PEAK_B4_FMT , "Error in WAV file. 'PEAK' chunk found before 'fmt ' chunk." }, { SFE_WAV_BAD_FORMAT , "Error in WAV file. Errors in 'fmt ' chunk." }, { SFE_WAV_BAD_BLOCKALIGN , "Error in WAV file. Block alignment in 'fmt ' chunk is incorrect." }, { SFE_WAV_NO_DATA , "Error in WAV file. No 'data' chunk marker." }, { SFE_WAV_BAD_LIST , "Error in WAV file. Malformed LIST chunk." }, { SFE_WAV_UNKNOWN_CHUNK , "Error in WAV file. File contains an unknown chunk marker." }, { SFE_WAV_WVPK_DATA , "Error in WAV file. Data is in WAVPACK format." }, { SFE_WAV_ADPCM_NOT4BIT , "Error in ADPCM WAV file. Invalid bit width." }, { SFE_WAV_ADPCM_CHANNELS , "Error in ADPCM WAV file. Invalid number of channels." }, { SFE_WAV_ADPCM_SAMPLES , "Error in ADPCM WAV file. Invalid number of samples per block." }, { SFE_WAV_GSM610_FORMAT , "Error in GSM610 WAV file. Invalid format chunk." }, { SFE_AIFF_NO_FORM , "Error in AIFF file, bad 'FORM' marker." }, { SFE_AIFF_AIFF_NO_FORM , "Error in AIFF file, 'AIFF' marker without 'FORM'." }, { SFE_AIFF_COMM_NO_FORM , "Error in AIFF file, 'COMM' marker without 'FORM'." }, { SFE_AIFF_SSND_NO_COMM , "Error in AIFF file, 'SSND' marker without 'COMM'." }, { SFE_AIFF_UNKNOWN_CHUNK , "Error in AIFF file, unknown chunk." }, { SFE_AIFF_COMM_CHUNK_SIZE, "Error in AIFF file, bad 'COMM' chunk size." }, { SFE_AIFF_BAD_COMM_CHUNK , "Error in AIFF file, bad 'COMM' chunk." }, { SFE_AIFF_PEAK_B4_COMM , "Error in AIFF file. 'PEAK' chunk found before 'COMM' chunk." }, { SFE_AIFF_BAD_PEAK , "Error in AIFF file. Bad 'PEAK' chunk." }, { SFE_AIFF_NO_SSND , "Error in AIFF file, bad 'SSND' chunk." }, { SFE_AIFF_NO_DATA , "Error in AIFF file, no sound data." }, { SFE_AIFF_RW_SSND_NOT_LAST, "Error in AIFF file, RDWR only possible if SSND chunk at end of file." }, { SFE_AU_UNKNOWN_FORMAT , "Error in AU file, unknown format." }, { SFE_AU_NO_DOTSND , "Error in AU file, missing '.snd' or 'dns.' marker." }, { SFE_AU_EMBED_BAD_LEN , "Embedded AU file with unknown length." }, { SFE_RAW_READ_BAD_SPEC , "Error while opening RAW file for read. Must specify format and channels.\n" "Possibly trying to open unsupported format." }, { SFE_RAW_BAD_BITWIDTH , "Error. RAW file bitwidth must be a multiple of 8." }, { SFE_RAW_BAD_FORMAT , "Error. Bad format field in SF_INFO struct when opening a RAW file for read." }, { SFE_PAF_NO_MARKER , "Error in PAF file, no marker." }, { SFE_PAF_VERSION , "Error in PAF file, bad version." }, { SFE_PAF_UNKNOWN_FORMAT , "Error in PAF file, unknown format." }, { SFE_PAF_SHORT_HEADER , "Error in PAF file. File shorter than minimal header." }, { SFE_PAF_BAD_CHANNELS , "Error in PAF file. Bad channel count." }, { SFE_SVX_NO_FORM , "Error in 8SVX / 16SV file, no 'FORM' marker." }, { SFE_SVX_NO_BODY , "Error in 8SVX / 16SV file, no 'BODY' marker." }, { SFE_SVX_NO_DATA , "Error in 8SVX / 16SV file, no sound data." }, { SFE_SVX_BAD_COMP , "Error in 8SVX / 16SV file, unsupported compression format." }, { SFE_SVX_BAD_NAME_LENGTH , "Error in 8SVX / 16SV file, NAME chunk too long." }, { SFE_NIST_BAD_HEADER , "Error in NIST file, bad header." }, { SFE_NIST_CRLF_CONVERISON, "Error : NIST file damaged by Windows CR -> CRLF conversion process." }, { SFE_NIST_BAD_ENCODING , "Error in NIST file, unsupported compression format." }, { SFE_VOC_NO_CREATIVE , "Error in VOC file, no 'Creative Voice File' marker." }, { SFE_VOC_BAD_FORMAT , "Error in VOC file, bad format." }, { SFE_VOC_BAD_VERSION , "Error in VOC file, bad version number." }, { SFE_VOC_BAD_MARKER , "Error in VOC file, bad marker in file." }, { SFE_VOC_BAD_SECTIONS , "Error in VOC file, incompatible VOC sections." }, { SFE_VOC_MULTI_SAMPLERATE, "Error in VOC file, more than one sample rate defined." }, { SFE_VOC_MULTI_SECTION , "Unimplemented VOC file feature, file contains multiple sound sections." }, { SFE_VOC_MULTI_PARAM , "Error in VOC file, file contains multiple bit or channel widths." }, { SFE_VOC_SECTION_COUNT , "Error in VOC file, too many sections." }, { SFE_VOC_NO_PIPE , "Error : not able to operate on VOC files over a pipe." }, { SFE_IRCAM_NO_MARKER , "Error in IRCAM file, bad IRCAM marker." }, { SFE_IRCAM_BAD_CHANNELS , "Error in IRCAM file, bad channel count." }, { SFE_IRCAM_UNKNOWN_FORMAT, "Error in IRCAM file, unknown encoding format." }, { SFE_W64_64_BIT , "Error in W64 file, file contains 64 bit offset." }, { SFE_W64_NO_RIFF , "Error in W64 file. No 'riff' chunk marker." }, { SFE_W64_NO_WAVE , "Error in W64 file. No 'wave' chunk marker." }, { SFE_W64_NO_DATA , "Error in W64 file. No 'data' chunk marker." }, { SFE_W64_ADPCM_NOT4BIT , "Error in ADPCM W64 file. Invalid bit width." }, { SFE_W64_ADPCM_CHANNELS , "Error in ADPCM W64 file. Invalid number of channels." }, { SFE_W64_GSM610_FORMAT , "Error in GSM610 W64 file. Invalid format chunk." }, { SFE_MAT4_BAD_NAME , "Error in MAT4 file. No variable name." }, { SFE_MAT4_NO_SAMPLERATE , "Error in MAT4 file. No sample rate." }, { SFE_MAT5_BAD_ENDIAN , "Error in MAT5 file. Not able to determine endian-ness." }, { SFE_MAT5_NO_BLOCK , "Error in MAT5 file. Bad block structure." }, { SFE_MAT5_SAMPLE_RATE , "Error in MAT5 file. Not able to determine sample rate." }, { SFE_PVF_NO_PVF1 , "Error in PVF file. No PVF1 marker." }, { SFE_PVF_BAD_HEADER , "Error in PVF file. Bad header." }, { SFE_PVF_BAD_BITWIDTH , "Error in PVF file. Bad bit width." }, { SFE_XI_BAD_HEADER , "Error in XI file. Bad header." }, { SFE_XI_EXCESS_SAMPLES , "Error in XI file. Excess samples in file." }, { SFE_XI_NO_PIPE , "Error : not able to operate on XI files over a pipe." }, { SFE_HTK_NO_PIPE , "Error : not able to operate on HTK files over a pipe." }, { SFE_SDS_NOT_SDS , "Error : not an SDS file." }, { SFE_SDS_BAD_BIT_WIDTH , "Error : bad bit width for SDS file." }, { SFE_SD2_FD_DISALLOWED , "Error : cannot open SD2 file without a file name." }, { SFE_SD2_BAD_DATA_OFFSET , "Error : bad data offset." }, { SFE_SD2_BAD_MAP_OFFSET , "Error : bad map offset." }, { SFE_SD2_BAD_DATA_LENGTH , "Error : bad data length." }, { SFE_SD2_BAD_MAP_LENGTH , "Error : bad map length." }, { SFE_SD2_BAD_RSRC , "Error : bad resource fork." }, { SFE_SD2_BAD_SAMPLE_SIZE , "Error : bad sample size." }, { SFE_FLAC_BAD_HEADER , "Error : bad flac header." }, { SFE_FLAC_NEW_DECODER , "Error : problem while creating flac decoder." }, { SFE_FLAC_INIT_DECODER , "Error : problem while initialization of the flac decoder." }, { SFE_FLAC_LOST_SYNC , "Error : flac decoder lost sync." }, { SFE_FLAC_BAD_SAMPLE_RATE, "Error : flac does not support this sample rate." }, { SFE_FLAC_UNKOWN_ERROR , "Error : unknown error in flac decoder." }, { SFE_WVE_NOT_WVE , "Error : not a WVE file." }, { SFE_WVE_NO_PIPE , "Error : not able to operate on WVE files over a pipe." }, { SFE_DWVW_BAD_BITWIDTH , "Error : Bad bit width for DWVW encoding. Must be 12, 16 or 24." }, { SFE_G72X_NOT_MONO , "Error : G72x encoding does not support more than 1 channel." }, { SFE_VORBIS_ENCODER_BUG , "Error : Sample rate chosen is known to trigger a Vorbis encoder bug on this CPU." }, { SFE_RF64_NOT_RF64 , "Error : Not an RF64 file." }, { SFE_RF64_PEAK_B4_FMT , "Error in RF64 file. 'PEAK' chunk found before 'fmt ' chunk." }, { SFE_RF64_NO_DATA , "Error in RF64 file. No 'data' chunk marker." }, { SFE_ALAC_FAIL_TMPFILE , "Error : Failed to open tmp file for ALAC encoding." }, { SFE_BAD_CHUNK_PTR , "Error : Bad SF_CHUNK_INFO pointer." }, { SFE_UNKNOWN_CHUNK , "Error : Unknown chunk marker." }, { SFE_BAD_CHUNK_FORMAT , "Error : Reading/writing chunks from this file format is not supported." }, { SFE_BAD_CHUNK_MARKER , "Error : Bad chunk marker." }, { SFE_BAD_CHUNK_DATA_PTR , "Error : Bad data pointer in SF_CHUNK_INFO struct." }, { SFE_FILENAME_TOO_LONG , "Error : Supplied filename too long." }, { SFE_BAD_HEADER_ALLOC , "Error : Required header allocation is too large." }, { SFE_MAX_ERROR , "Maximum error number." }, { SFE_MAX_ERROR + 1 , NULL } } ; /*------------------------------------------------------------------------------ */ static int format_from_extension (SF_PRIVATE *psf) ; static int guess_file_type (SF_PRIVATE *psf) ; static int validate_sfinfo (SF_INFO *sfinfo) ; static int validate_psf (SF_PRIVATE *psf) ; static void save_header_info (SF_PRIVATE *psf) ; static int copy_filename (SF_PRIVATE *psf, const char *path) ; static int psf_close (SF_PRIVATE *psf) ; static int try_resource_fork (SF_PRIVATE * psf) ; /*------------------------------------------------------------------------------ ** Private (static) variables. */ int sf_errno = 0 ; static char sf_parselog [SF_BUFFER_LEN] = { 0 } ; static char sf_syserr [SF_SYSERR_LEN] = { 0 } ; /*------------------------------------------------------------------------------ */ #define VALIDATE_SNDFILE_AND_ASSIGN_PSF(a, b, c) \ { if ((a) == NULL) \ { sf_errno = SFE_BAD_SNDFILE_PTR ; \ return 0 ; \ } ; \ (b) = (SF_PRIVATE*) (a) ; \ if ((b)->virtual_io == SF_FALSE && \ psf_file_valid (b) == 0) \ { (b)->error = SFE_BAD_FILE_PTR ; \ return 0 ; \ } ; \ if ((b)->Magick != SNDFILE_MAGICK) \ { (b)->error = SFE_BAD_SNDFILE_PTR ; \ return 0 ; \ } ; \ if (c) (b)->error = 0 ; \ } /*------------------------------------------------------------------------------ ** Public functions. */ SNDFILE* sf_open (const char *path, int mode, SF_INFO *sfinfo) { SF_PRIVATE *psf ; /* Ultimate sanity check. */ assert (sizeof (sf_count_t) == 8) ; if ((psf = psf_allocate ()) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; psf_log_printf (psf, "File : %s\n", path) ; if (copy_filename (psf, path) != 0) { sf_errno = psf->error ; return NULL ; } ; psf->file.mode = mode ; if (strcmp (path, "-") == 0) psf->error = psf_set_stdio (psf) ; else psf->error = psf_fopen (psf) ; return psf_open_file (psf, sfinfo) ; } /* sf_open */ SNDFILE* sf_open_fd (int fd, int mode, SF_INFO *sfinfo, int close_desc) { SF_PRIVATE *psf ; if ((SF_CONTAINER (sfinfo->format)) == SF_FORMAT_SD2) { sf_errno = SFE_SD2_FD_DISALLOWED ; return NULL ; } ; if ((psf = psf_allocate ()) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; copy_filename (psf, "") ; psf->file.mode = mode ; psf_set_file (psf, fd) ; psf->is_pipe = psf_is_pipe (psf) ; psf->fileoffset = psf_ftell (psf) ; if (! close_desc) psf->file.do_not_close_descriptor = SF_TRUE ; return psf_open_file (psf, sfinfo) ; } /* sf_open_fd */ SNDFILE* sf_open_virtual (SF_VIRTUAL_IO *sfvirtual, int mode, SF_INFO *sfinfo, void *user_data) { SF_PRIVATE *psf ; /* Make sure we have a valid set ot virtual pointers. */ if (sfvirtual->get_filelen == NULL || sfvirtual->seek == NULL || sfvirtual->tell == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_get_filelen / vio_seek / vio_tell in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((mode == SFM_READ || mode == SFM_RDWR) && sfvirtual->read == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_read in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((mode == SFM_WRITE || mode == SFM_RDWR) && sfvirtual->write == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_write in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((psf = psf_allocate ()) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; psf->virtual_io = SF_TRUE ; psf->vio = *sfvirtual ; psf->vio_user_data = user_data ; psf->file.mode = mode ; return psf_open_file (psf, sfinfo) ; } /* sf_open_virtual */ int sf_close (SNDFILE *sndfile) { SF_PRIVATE *psf ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; return psf_close (psf) ; } /* sf_close */ void sf_write_sync (SNDFILE *sndfile) { SF_PRIVATE *psf ; if ((psf = (SF_PRIVATE *) sndfile) == NULL) return ; psf_fsync (psf) ; return ; } /* sf_write_sync */ /*============================================================================== */ const char* sf_error_number (int errnum) { static const char *bad_errnum = "No error defined for this error number. This is a bug in libsndfile." ; int k ; if (errnum == SFE_MAX_ERROR) return SndfileErrors [0].str ; if (errnum < 0 || errnum > SFE_MAX_ERROR) { /* This really shouldn't happen in release versions. */ printf ("Not a valid error number (%d).\n", errnum) ; return bad_errnum ; } ; for (k = 0 ; SndfileErrors [k].str ; k++) if (errnum == SndfileErrors [k].error) return SndfileErrors [k].str ; return bad_errnum ; } /* sf_error_number */ const char* sf_strerror (SNDFILE *sndfile) { SF_PRIVATE *psf = NULL ; int errnum ; if (sndfile == NULL) { errnum = sf_errno ; if (errnum == SFE_SYSTEM && sf_syserr [0]) return sf_syserr ; } else { psf = (SF_PRIVATE *) sndfile ; if (psf->Magick != SNDFILE_MAGICK) return "sf_strerror : Bad magic number." ; errnum = psf->error ; if (errnum == SFE_SYSTEM && psf->syserr [0]) return psf->syserr ; } ; return sf_error_number (errnum) ; } /* sf_strerror */ /*------------------------------------------------------------------------------ */ int sf_error (SNDFILE *sndfile) { SF_PRIVATE *psf ; if (sndfile == NULL) return sf_errno ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 0) ; if (psf->error) return psf->error ; return 0 ; } /* sf_error */ /*------------------------------------------------------------------------------ */ int sf_perror (SNDFILE *sndfile) { SF_PRIVATE *psf ; int errnum ; if (sndfile == NULL) { errnum = sf_errno ; } else { VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 0) ; errnum = psf->error ; } ; fprintf (stderr, "%s\n", sf_error_number (errnum)) ; return SFE_NO_ERROR ; } /* sf_perror */ /*------------------------------------------------------------------------------ */ int sf_error_str (SNDFILE *sndfile, char *str, size_t maxlen) { SF_PRIVATE *psf ; int errnum ; if (str == NULL) return SFE_INTERNAL ; if (sndfile == NULL) errnum = sf_errno ; else { VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 0) ; errnum = psf->error ; } ; snprintf (str, maxlen, "%s", sf_error_number (errnum)) ; return SFE_NO_ERROR ; } /* sf_error_str */ /*============================================================================== */ int sf_format_check (const SF_INFO *info) { int subformat, endian ; subformat = SF_CODEC (info->format) ; endian = SF_ENDIAN (info->format) ; /* This is the place where each file format can check if the suppiled ** SF_INFO struct is valid. ** Return 0 on failure, 1 ons success. */ if (info->channels < 1 || info->channels > SF_MAX_CHANNELS) return 0 ; if (info->samplerate < 0) return 0 ; switch (SF_CONTAINER (info->format)) { case SF_FORMAT_WAV : /* WAV now allows both endian, RIFF or RIFX (little or big respectively) */ if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if ((subformat == SF_FORMAT_IMA_ADPCM || subformat == SF_FORMAT_MS_ADPCM) && info->channels <= 2) return 1 ; if (subformat == SF_FORMAT_GSM610 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_G721_32 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_WAVEX : if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_AIFF : /* AIFF does allow both endian-nesses for PCM data.*/ if (subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; /* For other encodings reject any endian-ness setting. */ if (endian != 0) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_S8) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if ((subformat == SF_FORMAT_DWVW_12 || subformat == SF_FORMAT_DWVW_16 || subformat == SF_FORMAT_DWVW_24) && info-> channels == 1) return 1 ; if (subformat == SF_FORMAT_GSM610 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_IMA_ADPCM && (info->channels == 1 || info->channels == 2)) return 1 ; break ; case SF_FORMAT_AU : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; if (subformat == SF_FORMAT_G721_32 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_G723_24 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_G723_40 && info->channels == 1) return 1 ; break ; case SF_FORMAT_CAF : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_ALAC_16 || subformat == SF_FORMAT_ALAC_20) return 1 ; if (subformat == SF_FORMAT_ALAC_24 || subformat == SF_FORMAT_ALAC_32) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_RAW : if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; if (subformat == SF_FORMAT_ALAW || subformat == SF_FORMAT_ULAW) return 1 ; if ((subformat == SF_FORMAT_DWVW_12 || subformat == SF_FORMAT_DWVW_16 || subformat == SF_FORMAT_DWVW_24) && info-> channels == 1) return 1 ; if (subformat == SF_FORMAT_GSM610 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_VOX_ADPCM && info->channels == 1) return 1 ; break ; case SF_FORMAT_PAF : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24) return 1 ; break ; case SF_FORMAT_SVX : /* SVX only supports writing mono SVX files. */ if (info->channels > 1) return 0 ; /* Always big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; break ; case SF_FORMAT_NIST : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; break ; case SF_FORMAT_IRCAM : if (info->channels > 256) return 0 ; if (subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW || subformat == SF_FORMAT_FLOAT) return 1 ; break ; case SF_FORMAT_VOC : if (info->channels > 2) return 0 ; /* VOC is strictly little endian. */ if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; break ; case SF_FORMAT_W64 : /* W64 is strictly little endian. */ if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if ((subformat == SF_FORMAT_IMA_ADPCM || subformat == SF_FORMAT_MS_ADPCM) && info->channels <= 2) return 1 ; if (subformat == SF_FORMAT_GSM610 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_MAT4 : if (subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_MAT5 : if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_PVF : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_32) return 1 ; break ; case SF_FORMAT_XI : if (info->channels != 1) return 0 ; if (subformat == SF_FORMAT_DPCM_8 || subformat == SF_FORMAT_DPCM_16) return 1 ; break ; case SF_FORMAT_HTK : if (info->channels != 1) return 0 ; /* HTK is strictly big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_16) return 1 ; break ; case SF_FORMAT_SDS : if (info->channels != 1) return 0 ; /* SDS is strictly big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24) return 1 ; break ; case SF_FORMAT_AVR : if (info->channels > 2) return 0 ; /* SDS is strictly big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; break ; case SF_FORMAT_FLAC : /* FLAC can't do more than 8 channels. */ if (info->channels > 8) return 0 ; if (endian != SF_ENDIAN_FILE) return 0 ; if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24) return 1 ; break ; case SF_FORMAT_SD2 : /* SD2 is strictly big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; break ; case SF_FORMAT_WVE : if (info->channels > 1) return 0 ; /* WVE is strictly big endian. */ if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_ALAW) return 1 ; break ; case SF_FORMAT_OGG : if (endian != SF_ENDIAN_FILE) return 0 ; if (subformat == SF_FORMAT_VORBIS) return 1 ; break ; case SF_FORMAT_MPC2K : if (info->channels > 2) return 0 ; /* MPC2000 is strictly little endian. */ if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_16) return 1 ; break ; case SF_FORMAT_RF64 : if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; default : break ; } ; return 0 ; } /* sf_format_check */ /*------------------------------------------------------------------------------ */ const char * sf_version_string (void) { #if ENABLE_EXPERIMENTAL_CODE return PACKAGE_NAME "-" PACKAGE_VERSION "-exp" ; #else return PACKAGE_NAME "-" PACKAGE_VERSION ; #endif } /*------------------------------------------------------------------------------ */ int sf_command (SNDFILE *sndfile, int command, void *data, int datasize) { SF_PRIVATE *psf = (SF_PRIVATE *) sndfile ; double quality ; int old_value ; /* This set of commands do not need the sndfile parameter. */ switch (command) { case SFC_GET_LIB_VERSION : if (data == NULL) { if (psf) psf->error = SFE_BAD_COMMAND_PARAM ; return SFE_BAD_COMMAND_PARAM ; } ; snprintf (data, datasize, "%s", sf_version_string ()) ; return strlen (data) ; case SFC_GET_SIMPLE_FORMAT_COUNT : if (data == NULL || datasize != SIGNED_SIZEOF (int)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; *((int*) data) = psf_get_format_simple_count () ; return 0 ; case SFC_GET_SIMPLE_FORMAT : if (data == NULL || datasize != SIGNED_SIZEOF (SF_FORMAT_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; return psf_get_format_simple (data) ; case SFC_GET_FORMAT_MAJOR_COUNT : if (data == NULL || datasize != SIGNED_SIZEOF (int)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; *((int*) data) = psf_get_format_major_count () ; return 0 ; case SFC_GET_FORMAT_MAJOR : if (data == NULL || datasize != SIGNED_SIZEOF (SF_FORMAT_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; return psf_get_format_major (data) ; case SFC_GET_FORMAT_SUBTYPE_COUNT : if (data == NULL || datasize != SIGNED_SIZEOF (int)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; *((int*) data) = psf_get_format_subtype_count () ; return 0 ; case SFC_GET_FORMAT_SUBTYPE : if (data == NULL || datasize != SIGNED_SIZEOF (SF_FORMAT_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; return psf_get_format_subtype (data) ; case SFC_GET_FORMAT_INFO : if (data == NULL || datasize != SIGNED_SIZEOF (SF_FORMAT_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; return psf_get_format_info (data) ; } ; if (sndfile == NULL && command == SFC_GET_LOG_INFO) { if (data == NULL) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; snprintf (data, datasize, "%s", sf_parselog) ; return strlen (data) ; } ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; switch (command) { case SFC_SET_NORM_FLOAT : old_value = psf->norm_float ; psf->norm_float = (datasize) ? SF_TRUE : SF_FALSE ; return old_value ; case SFC_GET_CURRENT_SF_INFO : if (data == NULL || datasize != SIGNED_SIZEOF (SF_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; memcpy (data, &psf->sf, sizeof (SF_INFO)) ; break ; case SFC_SET_NORM_DOUBLE : old_value = psf->norm_double ; psf->norm_double = (datasize) ? SF_TRUE : SF_FALSE ; return old_value ; case SFC_GET_NORM_FLOAT : return psf->norm_float ; case SFC_GET_NORM_DOUBLE : return psf->norm_double ; case SFC_SET_SCALE_FLOAT_INT_READ : old_value = psf->float_int_mult ; psf->float_int_mult = (datasize != 0) ? SF_TRUE : SF_FALSE ; if (psf->float_int_mult && psf->float_max < 0.0) /* Scale to prevent wrap-around distortion. */ psf->float_max = (32768.0 / 32767.0) * psf_calc_signal_max (psf, SF_FALSE) ; return old_value ; case SFC_SET_SCALE_INT_FLOAT_WRITE : old_value = psf->scale_int_float ; psf->scale_int_float = (datasize != 0) ? SF_TRUE : SF_FALSE ; return old_value ; case SFC_SET_ADD_PEAK_CHUNK : { int format = SF_CONTAINER (psf->sf.format) ; /* Only WAV and AIFF support the PEAK chunk. */ switch (format) { case SF_FORMAT_AIFF : case SF_FORMAT_CAF : case SF_FORMAT_WAV : case SF_FORMAT_WAVEX : case SF_FORMAT_RF64 : break ; default : return SF_FALSE ; } ; format = SF_CODEC (psf->sf.format) ; /* Only files containg the following data types support the PEAK chunk. */ if (format != SF_FORMAT_FLOAT && format != SF_FORMAT_DOUBLE) return SF_FALSE ; } ; /* Can only do this is in SFM_WRITE mode. */ if (psf->file.mode != SFM_WRITE && psf->file.mode != SFM_RDWR) return SF_FALSE ; /* If data has already been written this must fail. */ if (psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; /* Everything seems OK, so set psf->has_peak and re-write header. */ if (datasize == SF_FALSE && psf->peak_info != NULL) { free (psf->peak_info) ; psf->peak_info = NULL ; } else if (psf->peak_info == NULL) { psf->peak_info = peak_info_calloc (psf->sf.channels) ; if (psf->peak_info != NULL) psf->peak_info->peak_loc = SF_PEAK_START ; } ; if (psf->write_header) psf->write_header (psf, SF_TRUE) ; return datasize ; case SFC_SET_ADD_HEADER_PAD_CHUNK : return SF_FALSE ; case SFC_GET_LOG_INFO : if (data == NULL) return SFE_BAD_COMMAND_PARAM ; snprintf (data, datasize, "%s", psf->parselog.buf) ; break ; case SFC_CALC_SIGNAL_MAX : if (data == NULL || datasize != sizeof (double)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; *((double*) data) = psf_calc_signal_max (psf, SF_FALSE) ; break ; case SFC_CALC_NORM_SIGNAL_MAX : if (data == NULL || datasize != sizeof (double)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; *((double*) data) = psf_calc_signal_max (psf, SF_TRUE) ; break ; case SFC_CALC_MAX_ALL_CHANNELS : if (data == NULL || datasize != SIGNED_SIZEOF (double) * psf->sf.channels) return (psf->error = SFE_BAD_COMMAND_PARAM) ; return psf_calc_max_all_channels (psf, (double*) data, SF_FALSE) ; case SFC_CALC_NORM_MAX_ALL_CHANNELS : if (data == NULL || datasize != SIGNED_SIZEOF (double) * psf->sf.channels) return (psf->error = SFE_BAD_COMMAND_PARAM) ; return psf_calc_max_all_channels (psf, (double*) data, SF_TRUE) ; case SFC_GET_SIGNAL_MAX : if (data == NULL || datasize != sizeof (double)) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; return psf_get_signal_max (psf, (double *) data) ; case SFC_GET_MAX_ALL_CHANNELS : if (data == NULL || datasize != SIGNED_SIZEOF (double) * psf->sf.channels) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; return psf_get_max_all_channels (psf, (double*) data) ; case SFC_UPDATE_HEADER_NOW : if (psf->write_header) psf->write_header (psf, SF_TRUE) ; break ; case SFC_SET_UPDATE_HEADER_AUTO : psf->auto_header = datasize ? SF_TRUE : SF_FALSE ; return psf->auto_header ; break ; case SFC_SET_ADD_DITHER_ON_WRITE : case SFC_SET_ADD_DITHER_ON_READ : /* ** FIXME ! ** These are obsolete. Just return. ** Remove some time after version 1.0.8. */ break ; case SFC_SET_DITHER_ON_WRITE : if (data == NULL || datasize != SIGNED_SIZEOF (SF_DITHER_INFO)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; memcpy (&psf->write_dither, data, sizeof (psf->write_dither)) ; if (psf->file.mode == SFM_WRITE || psf->file.mode == SFM_RDWR) dither_init (psf, SFM_WRITE) ; break ; case SFC_SET_DITHER_ON_READ : if (data == NULL || datasize != SIGNED_SIZEOF (SF_DITHER_INFO)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; memcpy (&psf->read_dither, data, sizeof (psf->read_dither)) ; if (psf->file.mode == SFM_READ || psf->file.mode == SFM_RDWR) dither_init (psf, SFM_READ) ; break ; case SFC_FILE_TRUNCATE : if (psf->file.mode != SFM_WRITE && psf->file.mode != SFM_RDWR) return SF_TRUE ; if (datasize != sizeof (sf_count_t)) return SF_TRUE ; if (data == NULL || datasize != sizeof (sf_count_t)) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } else { sf_count_t position ; position = *((sf_count_t*) data) ; if (sf_seek (sndfile, position, SEEK_SET) != position) return SF_TRUE ; psf->sf.frames = position ; position = psf_fseek (psf, 0, SEEK_CUR) ; return psf_ftruncate (psf, position) ; } ; break ; case SFC_SET_RAW_START_OFFSET : if (data == NULL || datasize != sizeof (sf_count_t)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; if ((SF_CONTAINER (psf->sf.format)) != SF_FORMAT_RAW) return (psf->error = SFE_BAD_COMMAND_PARAM) ; psf->dataoffset = *((sf_count_t*) data) ; sf_seek (sndfile, 0, SEEK_CUR) ; break ; case SFC_GET_EMBED_FILE_INFO : if (data == NULL || datasize != sizeof (SF_EMBED_FILE_INFO)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; ((SF_EMBED_FILE_INFO*) data)->offset = psf->fileoffset ; ((SF_EMBED_FILE_INFO*) data)->length = psf->filelength ; break ; /* Lite remove start */ case SFC_TEST_IEEE_FLOAT_REPLACE : psf->ieee_replace = (datasize) ? SF_TRUE : SF_FALSE ; if ((SF_CODEC (psf->sf.format)) == SF_FORMAT_FLOAT) float32_init (psf) ; else if ((SF_CODEC (psf->sf.format)) == SF_FORMAT_DOUBLE) double64_init (psf) ; else return (psf->error = SFE_BAD_COMMAND_PARAM) ; break ; /* Lite remove end */ case SFC_SET_CLIPPING : psf->add_clipping = (datasize) ? SF_TRUE : SF_FALSE ; return psf->add_clipping ; case SFC_GET_CLIPPING : return psf->add_clipping ; case SFC_GET_LOOP_INFO : if (datasize != sizeof (SF_LOOP_INFO) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->loop_info == NULL) return SF_FALSE ; memcpy (data, psf->loop_info, sizeof (SF_LOOP_INFO)) ; return SF_TRUE ; case SFC_SET_BROADCAST_INFO : { int format = SF_CONTAINER (psf->sf.format) ; /* Only WAV and RF64 supports the BEXT (Broadcast) chunk. */ if (format != SF_FORMAT_WAV && format != SF_FORMAT_WAVEX && format != SF_FORMAT_RF64) return SF_FALSE ; } ; /* Only makes sense in SFM_WRITE or SFM_RDWR mode. */ if ((psf->file.mode != SFM_WRITE) && (psf->file.mode != SFM_RDWR)) return SF_FALSE ; /* If data has already been written this must fail. */ if (psf->broadcast_16k == NULL && psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (NOT (broadcast_var_set (psf, data, datasize))) return SF_FALSE ; if (psf->write_header) psf->write_header (psf, SF_TRUE) ; return SF_TRUE ; case SFC_GET_BROADCAST_INFO : if (data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; return broadcast_var_get (psf, data, datasize) ; case SFC_SET_CART_INFO : { int format = SF_CONTAINER (psf->sf.format) ; /* Only WAV and RF64 support cart chunk format */ if (format != SF_FORMAT_WAV && format != SF_FORMAT_RF64) return SF_FALSE ; } ; /* Only makes sense in SFM_WRITE or SFM_RDWR mode */ if ((psf->file.mode != SFM_WRITE) && (psf->file.mode != SFM_RDWR)) return SF_FALSE ; /* If data has already been written this must fail. */ if (psf->cart_16k == NULL && psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (NOT (cart_var_set (psf, data, datasize))) return SF_FALSE ; if (psf->write_header) psf->write_header (psf, SF_TRUE) ; return SF_TRUE ; case SFC_GET_CART_INFO : if (data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; return cart_var_get (psf, data, datasize) ; case SFC_GET_CUE_COUNT : if (datasize != sizeof (uint32_t) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->cues != NULL) { *((uint32_t *) data) = psf->cues->cue_count ; return SF_TRUE ; } ; return SF_FALSE ; case SFC_GET_CUE : if (datasize != sizeof (SF_CUES) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->cues == NULL) return SF_FALSE ; psf_get_cues (psf, data, datasize) ; return SF_TRUE ; case SFC_SET_CUE : if (psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (datasize != sizeof (SF_CUES) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->cues == NULL && (psf->cues = psf_cues_dup (data)) == NULL) { psf->error = SFE_MALLOC_FAILED ; return SF_FALSE ; } ; return SF_TRUE ; case SFC_GET_INSTRUMENT : if (datasize != sizeof (SF_INSTRUMENT) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->instrument == NULL) return SF_FALSE ; memcpy (data, psf->instrument, sizeof (SF_INSTRUMENT)) ; return SF_TRUE ; case SFC_SET_INSTRUMENT : /* If data has already been written this must fail. */ if (psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (datasize != sizeof (SF_INSTRUMENT) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->instrument == NULL && (psf->instrument = psf_instrument_alloc ()) == NULL) { psf->error = SFE_MALLOC_FAILED ; return SF_FALSE ; } ; memcpy (psf->instrument, data, sizeof (SF_INSTRUMENT)) ; return SF_TRUE ; case SFC_RAW_DATA_NEEDS_ENDSWAP : return psf->data_endswap ; case SFC_GET_CHANNEL_MAP_INFO : if (psf->channel_map == NULL) return SF_FALSE ; if (data == NULL || datasize != SIGNED_SIZEOF (psf->channel_map [0]) * psf->sf.channels) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; memcpy (data, psf->channel_map, datasize) ; return SF_TRUE ; case SFC_SET_CHANNEL_MAP_INFO : if (psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (data == NULL || datasize != SIGNED_SIZEOF (psf->channel_map [0]) * psf->sf.channels) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; { int *iptr ; for (iptr = data ; iptr < (int*) data + psf->sf.channels ; iptr++) { if (*iptr <= SF_CHANNEL_MAP_INVALID || *iptr >= SF_CHANNEL_MAP_MAX) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; } ; } ; free (psf->channel_map) ; if ((psf->channel_map = malloc (datasize)) == NULL) { psf->error = SFE_MALLOC_FAILED ; return SF_FALSE ; } ; memcpy (psf->channel_map, data, datasize) ; /* ** Pass the command down to the container's command handler. ** Don't pass user data, use validated psf->channel_map data instead. */ if (psf->command) return psf->command (psf, command, NULL, 0) ; return SF_FALSE ; case SFC_SET_VBR_ENCODING_QUALITY : if (data == NULL || datasize != sizeof (double)) return SF_FALSE ; quality = *((double *) data) ; quality = 1.0 - SF_MAX (0.0, SF_MIN (1.0, quality)) ; return sf_command (sndfile, SFC_SET_COMPRESSION_LEVEL, &quality, sizeof (quality)) ; default : /* Must be a file specific command. Pass it on. */ if (psf->command) return psf->command (psf, command, data, datasize) ; psf_log_printf (psf, "*** sf_command : cmd = 0x%X\n", command) ; return (psf->error = SFE_BAD_COMMAND_PARAM) ; } ; return 0 ; } /* sf_command */ /*------------------------------------------------------------------------------ */ sf_count_t sf_seek (SNDFILE *sndfile, sf_count_t offset, int whence) { SF_PRIVATE *psf ; sf_count_t seek_from_start = 0, retval ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (! psf->sf.seekable) { psf->error = SFE_NOT_SEEKABLE ; return PSF_SEEK_ERROR ; } ; /* If the whence parameter has a mode ORed in, check to see that ** it makes sense. */ if (((whence & SFM_MASK) == SFM_WRITE && psf->file.mode == SFM_READ) || ((whence & SFM_MASK) == SFM_READ && psf->file.mode == SFM_WRITE)) { psf->error = SFE_WRONG_SEEK ; return PSF_SEEK_ERROR ; } ; /* Convert all SEEK_CUR and SEEK_END into seek_from_start to be ** used with SEEK_SET. */ switch (whence) { /* The SEEK_SET behaviour is independant of mode. */ case SEEK_SET : case SEEK_SET | SFM_READ : case SEEK_SET | SFM_WRITE : case SEEK_SET | SFM_RDWR : seek_from_start = offset ; break ; /* The SEEK_CUR is a little more tricky. */ case SEEK_CUR : if (offset == 0) { if (psf->file.mode == SFM_READ) return psf->read_current ; if (psf->file.mode == SFM_WRITE) return psf->write_current ; } ; if (psf->file.mode == SFM_READ) seek_from_start = psf->read_current + offset ; else if (psf->file.mode == SFM_WRITE || psf->file.mode == SFM_RDWR) seek_from_start = psf->write_current + offset ; else psf->error = SFE_AMBIGUOUS_SEEK ; break ; case SEEK_CUR | SFM_READ : if (offset == 0) return psf->read_current ; seek_from_start = psf->read_current + offset ; break ; case SEEK_CUR | SFM_WRITE : if (offset == 0) return psf->write_current ; seek_from_start = psf->write_current + offset ; break ; /* The SEEK_END */ case SEEK_END : case SEEK_END | SFM_READ : case SEEK_END | SFM_WRITE : seek_from_start = psf->sf.frames + offset ; break ; default : psf->error = SFE_BAD_SEEK ; break ; } ; if (psf->error) return PSF_SEEK_ERROR ; if (psf->file.mode == SFM_RDWR || psf->file.mode == SFM_WRITE) { if (seek_from_start < 0) { psf->error = SFE_BAD_SEEK ; return PSF_SEEK_ERROR ; } ; } else if (seek_from_start < 0 || seek_from_start > psf->sf.frames) { psf->error = SFE_BAD_SEEK ; return PSF_SEEK_ERROR ; } ; if (psf->seek) { int new_mode = (whence & SFM_MASK) ? (whence & SFM_MASK) : psf->file.mode ; retval = psf->seek (psf, new_mode, seek_from_start) ; switch (new_mode) { case SFM_READ : psf->read_current = retval ; break ; case SFM_WRITE : psf->write_current = retval ; break ; case SFM_RDWR : psf->read_current = retval ; psf->write_current = retval ; new_mode = SFM_READ ; break ; } ; psf->last_op = new_mode ; return retval ; } ; psf->error = SFE_AMBIGUOUS_SEEK ; return PSF_SEEK_ERROR ; } /* sf_seek */ /*------------------------------------------------------------------------------ */ const char* sf_get_string (SNDFILE *sndfile, int str_type) { SF_PRIVATE *psf ; if ((psf = (SF_PRIVATE*) sndfile) == NULL) return NULL ; if (psf->Magick != SNDFILE_MAGICK) return NULL ; return psf_get_string (psf, str_type) ; } /* sf_get_string */ int sf_set_string (SNDFILE *sndfile, int str_type, const char* str) { SF_PRIVATE *psf ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; return psf_set_string (psf, str_type, str) ; } /* sf_get_string */ /*------------------------------------------------------------------------------ */ int sf_current_byterate (SNDFILE *sndfile) { SF_PRIVATE *psf ; if ((psf = (SF_PRIVATE*) sndfile) == NULL) return -1 ; if (psf->Magick != SNDFILE_MAGICK) return -1 ; /* This should cover all PCM and floating point formats. */ if (psf->bytewidth) return psf->sf.samplerate * psf->sf.channels * psf->bytewidth ; if (psf->byterate) return psf->byterate (psf) ; switch (SF_CODEC (psf->sf.format)) { case SF_FORMAT_IMA_ADPCM : case SF_FORMAT_MS_ADPCM : case SF_FORMAT_VOX_ADPCM : return (psf->sf.samplerate * psf->sf.channels) / 2 ; case SF_FORMAT_GSM610 : return (psf->sf.samplerate * psf->sf.channels * 13000) / 8000 ; case SF_FORMAT_G721_32 : /* 32kbs G721 ADPCM encoding. */ return (psf->sf.samplerate * psf->sf.channels) / 2 ; case SF_FORMAT_G723_24 : /* 24kbs G723 ADPCM encoding. */ return (psf->sf.samplerate * psf->sf.channels * 3) / 8 ; case SF_FORMAT_G723_40 : /* 40kbs G723 ADPCM encoding. */ return (psf->sf.samplerate * psf->sf.channels * 5) / 8 ; default : break ; } ; return -1 ; } /* sf_current_byterate */ /*============================================================================== */ sf_count_t sf_read_raw (SNDFILE *sndfile, void *ptr, sf_count_t bytes) { SF_PRIVATE *psf ; sf_count_t count, extra ; int bytewidth, blockwidth ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; bytewidth = (psf->bytewidth > 0) ? psf->bytewidth : 1 ; blockwidth = (psf->blockwidth > 0) ? psf->blockwidth : 1 ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (bytes < 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, bytes) ; return 0 ; } ; if (bytes % (psf->sf.channels * bytewidth)) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf_fread (ptr, 1, bytes, psf) ; if (psf->read_current + count / blockwidth <= psf->sf.frames) psf->read_current += count / blockwidth ; else { count = (psf->sf.frames - psf->read_current) * blockwidth ; extra = bytes - count ; psf_memset (((char *) ptr) + count, 0, extra) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_raw */ /*------------------------------------------------------------------------------ */ sf_count_t sf_read_short (SNDFILE *sndfile, short *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (len <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, len * sizeof (short)) ; return 0 ; /* End of file. */ } ; if (psf->read_short == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_short (psf, ptr, len) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = len - count ; psf_memset (ptr + count, 0, extra * sizeof (short)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_short */ sf_count_t sf_readf_short (SNDFILE *sndfile, short *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (frames <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, frames * psf->sf.channels * sizeof (short)) ; return 0 ; /* End of file. */ } ; if (psf->read_short == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_short (psf, ptr, frames * psf->sf.channels) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = frames * psf->sf.channels - count ; psf_memset (ptr + count, 0, extra * sizeof (short)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count / psf->sf.channels ; } /* sf_readf_short */ /*------------------------------------------------------------------------------ */ sf_count_t sf_read_int (SNDFILE *sndfile, int *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (len <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, len * sizeof (int)) ; return 0 ; } ; if (psf->read_int == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_int (psf, ptr, len) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = len - count ; psf_memset (ptr + count, 0, extra * sizeof (int)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_int */ sf_count_t sf_readf_int (SNDFILE *sndfile, int *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (frames <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, frames * psf->sf.channels * sizeof (int)) ; return 0 ; } ; if (psf->read_int == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_int (psf, ptr, frames * psf->sf.channels) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = frames * psf->sf.channels - count ; psf_memset (ptr + count, 0, extra * sizeof (int)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count / psf->sf.channels ; } /* sf_readf_int */ /*------------------------------------------------------------------------------ */ sf_count_t sf_read_float (SNDFILE *sndfile, float *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (len <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, len * sizeof (float)) ; return 0 ; } ; if (psf->read_float == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_float (psf, ptr, len) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = len - count ; psf_memset (ptr + count, 0, extra * sizeof (float)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_float */ sf_count_t sf_readf_float (SNDFILE *sndfile, float *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (frames <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, frames * psf->sf.channels * sizeof (float)) ; return 0 ; } ; if (psf->read_float == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_float (psf, ptr, frames * psf->sf.channels) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = frames * psf->sf.channels - count ; psf_memset (ptr + count, 0, extra * sizeof (float)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count / psf->sf.channels ; } /* sf_readf_float */ /*------------------------------------------------------------------------------ */ sf_count_t sf_read_double (SNDFILE *sndfile, double *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (len <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, len * sizeof (double)) ; return 0 ; } ; if (psf->read_double == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_double (psf, ptr, len) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = len - count ; psf_memset (ptr + count, 0, extra * sizeof (double)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_double */ sf_count_t sf_readf_double (SNDFILE *sndfile, double *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (frames <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, frames * psf->sf.channels * sizeof (double)) ; return 0 ; } ; if (psf->read_double == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_double (psf, ptr, frames * psf->sf.channels) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = frames * psf->sf.channels - count ; psf_memset (ptr + count, 0, extra * sizeof (double)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count / psf->sf.channels ; } /* sf_readf_double */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_raw (SNDFILE *sndfile, const void *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; int bytewidth, blockwidth ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; bytewidth = (psf->bytewidth > 0) ? psf->bytewidth : 1 ; blockwidth = (psf->blockwidth > 0) ? psf->blockwidth : 1 ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % (psf->sf.channels * bytewidth)) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf_fwrite (ptr, 1, len, psf) ; psf->write_current += count / blockwidth ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_raw */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_short (SNDFILE *sndfile, const short *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->write_short == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_short (psf, ptr, len) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_short */ sf_count_t sf_writef_short (SNDFILE *sndfile, const short *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (psf->write_short == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_short (psf, ptr, frames * psf->sf.channels) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count / psf->sf.channels ; } /* sf_writef_short */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_int (SNDFILE *sndfile, const int *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->write_int == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_int (psf, ptr, len) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_int */ sf_count_t sf_writef_int (SNDFILE *sndfile, const int *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (psf->write_int == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_int (psf, ptr, frames * psf->sf.channels) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count / psf->sf.channels ; } /* sf_writef_int */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_float (SNDFILE *sndfile, const float *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->write_float == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_float (psf, ptr, len) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_float */ sf_count_t sf_writef_float (SNDFILE *sndfile, const float *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (psf->write_float == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_float (psf, ptr, frames * psf->sf.channels) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count / psf->sf.channels ; } /* sf_writef_float */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_double (SNDFILE *sndfile, const double *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->write_double == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_double (psf, ptr, len) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_double */ sf_count_t sf_writef_double (SNDFILE *sndfile, const double *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (psf->write_double == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_double (psf, ptr, frames * psf->sf.channels) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count / psf->sf.channels ; } /* sf_writef_double */ /*========================================================================= ** Private functions. */ static int try_resource_fork (SF_PRIVATE * psf) { int old_error = psf->error ; /* Set READ mode now, to see if resource fork exists. */ psf->rsrc.mode = SFM_READ ; if (psf_open_rsrc (psf) != 0) { psf->error = old_error ; return 0 ; } ; /* More checking here. */ psf_log_printf (psf, "Resource fork : %s\n", psf->rsrc.path.c) ; return SF_FORMAT_SD2 ; } /* try_resource_fork */ static int format_from_extension (SF_PRIVATE *psf) { char *cptr ; char buffer [16] ; int format = 0 ; if ((cptr = strrchr (psf->file.name.c, '.')) == NULL) return 0 ; cptr ++ ; if (strlen (cptr) > sizeof (buffer) - 1) return 0 ; psf_strlcpy (buffer, sizeof (buffer), cptr) ; buffer [sizeof (buffer) - 1] = 0 ; /* Convert everything in the buffer to lower case. */ cptr = buffer ; while (*cptr) { *cptr = tolower (*cptr) ; cptr ++ ; } ; cptr = buffer ; if (strcmp (cptr, "au") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 8000 ; format = SF_FORMAT_RAW | SF_FORMAT_ULAW ; } else if (strcmp (cptr, "snd") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 8000 ; format = SF_FORMAT_RAW | SF_FORMAT_ULAW ; } else if (strcmp (cptr, "vox") == 0 || strcmp (cptr, "vox8") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 8000 ; format = SF_FORMAT_RAW | SF_FORMAT_VOX_ADPCM ; } else if (strcmp (cptr, "vox6") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 6000 ; format = SF_FORMAT_RAW | SF_FORMAT_VOX_ADPCM ; } else if (strcmp (cptr, "gsm") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 8000 ; format = SF_FORMAT_RAW | SF_FORMAT_GSM610 ; } /* For RAW files, make sure the dataoffset if set correctly. */ if ((SF_CONTAINER (format)) == SF_FORMAT_RAW) psf->dataoffset = 0 ; return format ; } /* format_from_extension */ static int guess_file_type (SF_PRIVATE *psf) { uint32_t buffer [3], format ; if (psf_binheader_readf (psf, "b", &buffer, SIGNED_SIZEOF (buffer)) != SIGNED_SIZEOF (buffer)) { psf->error = SFE_BAD_FILE_READ ; return 0 ; } ; if ((buffer [0] == MAKE_MARKER ('R', 'I', 'F', 'F') || buffer [0] == MAKE_MARKER ('R', 'I', 'F', 'X')) && buffer [2] == MAKE_MARKER ('W', 'A', 'V', 'E')) return SF_FORMAT_WAV ; if (buffer [0] == MAKE_MARKER ('F', 'O', 'R', 'M')) { if (buffer [2] == MAKE_MARKER ('A', 'I', 'F', 'F') || buffer [2] == MAKE_MARKER ('A', 'I', 'F', 'C')) return SF_FORMAT_AIFF ; if (buffer [2] == MAKE_MARKER ('8', 'S', 'V', 'X') || buffer [2] == MAKE_MARKER ('1', '6', 'S', 'V')) return SF_FORMAT_SVX ; return 0 ; } ; if (buffer [0] == MAKE_MARKER ('.', 's', 'n', 'd') || buffer [0] == MAKE_MARKER ('d', 'n', 's', '.')) return SF_FORMAT_AU ; if ((buffer [0] == MAKE_MARKER ('f', 'a', 'p', ' ') || buffer [0] == MAKE_MARKER (' ', 'p', 'a', 'f'))) return SF_FORMAT_PAF ; if (buffer [0] == MAKE_MARKER ('N', 'I', 'S', 'T')) return SF_FORMAT_NIST ; if (buffer [0] == MAKE_MARKER ('C', 'r', 'e', 'a') && buffer [1] == MAKE_MARKER ('t', 'i', 'v', 'e')) return SF_FORMAT_VOC ; if ((buffer [0] & MAKE_MARKER (0xFF, 0xFF, 0xF8, 0xFF)) == MAKE_MARKER (0x64, 0xA3, 0x00, 0x00) || (buffer [0] & MAKE_MARKER (0xFF, 0xF8, 0xFF, 0xFF)) == MAKE_MARKER (0x00, 0x00, 0xA3, 0x64)) return SF_FORMAT_IRCAM ; if (buffer [0] == MAKE_MARKER ('r', 'i', 'f', 'f')) return SF_FORMAT_W64 ; if (buffer [0] == MAKE_MARKER (0, 0, 0x03, 0xE8) && buffer [1] == MAKE_MARKER (0, 0, 0, 1) && buffer [2] == MAKE_MARKER (0, 0, 0, 1)) return SF_FORMAT_MAT4 ; if (buffer [0] == MAKE_MARKER (0, 0, 0, 0) && buffer [1] == MAKE_MARKER (1, 0, 0, 0) && buffer [2] == MAKE_MARKER (1, 0, 0, 0)) return SF_FORMAT_MAT4 ; if (buffer [0] == MAKE_MARKER ('M', 'A', 'T', 'L') && buffer [1] == MAKE_MARKER ('A', 'B', ' ', '5')) return SF_FORMAT_MAT5 ; if (buffer [0] == MAKE_MARKER ('P', 'V', 'F', '1')) return SF_FORMAT_PVF ; if (buffer [0] == MAKE_MARKER ('E', 'x', 't', 'e') && buffer [1] == MAKE_MARKER ('n', 'd', 'e', 'd') && buffer [2] == MAKE_MARKER (' ', 'I', 'n', 's')) return SF_FORMAT_XI ; if (buffer [0] == MAKE_MARKER ('c', 'a', 'f', 'f') && buffer [2] == MAKE_MARKER ('d', 'e', 's', 'c')) return SF_FORMAT_CAF ; if (buffer [0] == MAKE_MARKER ('O', 'g', 'g', 'S')) return SF_FORMAT_OGG ; if (buffer [0] == MAKE_MARKER ('A', 'L', 'a', 'w') && buffer [1] == MAKE_MARKER ('S', 'o', 'u', 'n') && buffer [2] == MAKE_MARKER ('d', 'F', 'i', 'l')) return SF_FORMAT_WVE ; if (buffer [0] == MAKE_MARKER ('D', 'i', 'a', 'm') && buffer [1] == MAKE_MARKER ('o', 'n', 'd', 'W') && buffer [2] == MAKE_MARKER ('a', 'r', 'e', ' ')) return SF_FORMAT_DWD ; if (buffer [0] == MAKE_MARKER ('L', 'M', '8', '9') || buffer [0] == MAKE_MARKER ('5', '3', 0, 0)) return SF_FORMAT_TXW ; if ((buffer [0] & MAKE_MARKER (0xFF, 0xFF, 0x80, 0xFF)) == MAKE_MARKER (0xF0, 0x7E, 0, 0x01)) return SF_FORMAT_SDS ; if ((buffer [0] & MAKE_MARKER (0xFF, 0xFF, 0, 0)) == MAKE_MARKER (1, 4, 0, 0)) return SF_FORMAT_MPC2K ; if (buffer [0] == MAKE_MARKER ('C', 'A', 'T', ' ') && buffer [2] == MAKE_MARKER ('R', 'E', 'X', '2')) return SF_FORMAT_REX2 ; if (buffer [0] == MAKE_MARKER (0x30, 0x26, 0xB2, 0x75) && buffer [1] == MAKE_MARKER (0x8E, 0x66, 0xCF, 0x11)) return 0 /*-SF_FORMAT_WMA-*/ ; /* HMM (Hidden Markov Model) Tool Kit. */ if (buffer [2] == MAKE_MARKER (0, 2, 0, 0) && 2 * ((int64_t) BE2H_32 (buffer [0])) + 12 == psf->filelength) return SF_FORMAT_HTK ; if (buffer [0] == MAKE_MARKER ('f', 'L', 'a', 'C')) return SF_FORMAT_FLAC ; if (buffer [0] == MAKE_MARKER ('2', 'B', 'I', 'T')) return SF_FORMAT_AVR ; if (buffer [0] == MAKE_MARKER ('R', 'F', '6', '4') && buffer [2] == MAKE_MARKER ('W', 'A', 'V', 'E')) return SF_FORMAT_RF64 ; if (buffer [0] == MAKE_MARKER ('I', 'D', '3', 3)) { psf_log_printf (psf, "Found 'ID3' marker.\n") ; if (id3_skip (psf)) return guess_file_type (psf) ; return 0 ; } ; /* Turtle Beach SMP 16-bit */ if (buffer [0] == MAKE_MARKER ('S', 'O', 'U', 'N') && buffer [1] == MAKE_MARKER ('D', ' ', 'S', 'A')) return 0 ; /* Yamaha sampler format. */ if (buffer [0] == MAKE_MARKER ('S', 'Y', '8', '0') || buffer [0] == MAKE_MARKER ('S', 'Y', '8', '5')) return 0 ; if (buffer [0] == MAKE_MARKER ('a', 'j', 'k', 'g')) return 0 /*-SF_FORMAT_SHN-*/ ; /* This must be the last one. */ if (psf->filelength > 0 && (format = try_resource_fork (psf)) != 0) return format ; return 0 ; } /* guess_file_type */ static int validate_sfinfo (SF_INFO *sfinfo) { if (sfinfo->samplerate < 1) return 0 ; if (sfinfo->frames < 0) return 0 ; if (sfinfo->channels < 1) return 0 ; if ((SF_CONTAINER (sfinfo->format)) == 0) return 0 ; if ((SF_CODEC (sfinfo->format)) == 0) return 0 ; if (sfinfo->sections < 1) return 0 ; return 1 ; } /* validate_sfinfo */ static int validate_psf (SF_PRIVATE *psf) { if (psf->datalength < 0) { psf_log_printf (psf, "Invalid SF_PRIVATE field : datalength == %D.\n", psf->datalength) ; return 0 ; } ; if (psf->dataoffset < 0) { psf_log_printf (psf, "Invalid SF_PRIVATE field : dataoffset == %D.\n", psf->dataoffset) ; return 0 ; } ; if (psf->blockwidth && psf->blockwidth != psf->sf.channels * psf->bytewidth) { psf_log_printf (psf, "Invalid SF_PRIVATE field : channels * bytewidth == %d.\n", psf->sf.channels * psf->bytewidth) ; return 0 ; } ; return 1 ; } /* validate_psf */ static void save_header_info (SF_PRIVATE *psf) { snprintf (sf_parselog, sizeof (sf_parselog), "%s", psf->parselog.buf) ; } /* save_header_info */ static int copy_filename (SF_PRIVATE *psf, const char *path) { const char *ccptr ; char *cptr ; if (strlen (path) > 1 && strlen (path) - 1 >= sizeof (psf->file.path.c)) { psf->error = SFE_FILENAME_TOO_LONG ; return psf->error ; } ; snprintf (psf->file.path.c, sizeof (psf->file.path.c), "%s", path) ; if ((ccptr = strrchr (path, '/')) || (ccptr = strrchr (path, '\\'))) ccptr ++ ; else ccptr = path ; snprintf (psf->file.name.c, sizeof (psf->file.name.c), "%s", ccptr) ; /* Now grab the directory. */ snprintf (psf->file.dir.c, sizeof (psf->file.dir.c), "%s", path) ; if ((cptr = strrchr (psf->file.dir.c, '/')) || (cptr = strrchr (psf->file.dir.c, '\\'))) cptr [1] = 0 ; else psf->file.dir.c [0] = 0 ; return 0 ; } /* copy_filename */ /*============================================================================== */ static int psf_close (SF_PRIVATE *psf) { uint32_t k ; int error = 0 ; if (psf->codec_close) { error = psf->codec_close (psf) ; /* To prevent it being called in psf->container_close(). */ psf->codec_close = NULL ; } ; if (psf->container_close) error = psf->container_close (psf) ; error = psf_fclose (psf) ; psf_close_rsrc (psf) ; /* For an ISO C compliant implementation it is ok to free a NULL pointer. */ free (psf->header.ptr) ; free (psf->container_data) ; free (psf->codec_data) ; free (psf->interleave) ; free (psf->dither) ; free (psf->peak_info) ; free (psf->broadcast_16k) ; free (psf->loop_info) ; free (psf->instrument) ; free (psf->cues) ; free (psf->channel_map) ; free (psf->format_desc) ; free (psf->strings.storage) ; if (psf->wchunks.chunks) for (k = 0 ; k < psf->wchunks.used ; k++) free (psf->wchunks.chunks [k].data) ; free (psf->rchunks.chunks) ; free (psf->wchunks.chunks) ; free (psf->iterator) ; free (psf->cart_16k) ; memset (psf, 0, sizeof (SF_PRIVATE)) ; free (psf) ; return error ; } /* psf_close */ SNDFILE * psf_open_file (SF_PRIVATE *psf, SF_INFO *sfinfo) { int error, format ; sf_errno = error = 0 ; sf_parselog [0] = 0 ; if (psf->error) { error = psf->error ; goto error_exit ; } ; if (psf->file.mode != SFM_READ && psf->file.mode != SFM_WRITE && psf->file.mode != SFM_RDWR) { error = SFE_BAD_OPEN_MODE ; goto error_exit ; } ; if (sfinfo == NULL) { error = SFE_BAD_SF_INFO_PTR ; goto error_exit ; } ; if (psf->file.mode == SFM_READ) { if ((SF_CONTAINER (sfinfo->format)) == SF_FORMAT_RAW) { if (sf_format_check (sfinfo) == 0) { error = SFE_RAW_BAD_FORMAT ; goto error_exit ; } ; } else memset (sfinfo, 0, sizeof (SF_INFO)) ; } ; memcpy (&psf->sf, sfinfo, sizeof (SF_INFO)) ; psf->Magick = SNDFILE_MAGICK ; psf->norm_float = SF_TRUE ; psf->norm_double = SF_TRUE ; psf->dataoffset = -1 ; psf->datalength = -1 ; psf->read_current = -1 ; psf->write_current = -1 ; psf->auto_header = SF_FALSE ; psf->rwf_endian = SF_ENDIAN_LITTLE ; psf->seek = psf_default_seek ; psf->float_int_mult = 0 ; psf->float_max = -1.0 ; /* An attempt at a per SF_PRIVATE unique id. */ psf->unique_id = psf_rand_int32 () ; psf->sf.sections = 1 ; psf->is_pipe = psf_is_pipe (psf) ; if (psf->is_pipe) { psf->sf.seekable = SF_FALSE ; psf->filelength = SF_COUNT_MAX ; } else { psf->sf.seekable = SF_TRUE ; /* File is open, so get the length. */ psf->filelength = psf_get_filelen (psf) ; } ; if (psf->fileoffset > 0) { switch (psf->file.mode) { case SFM_READ : if (psf->filelength < 44) { psf_log_printf (psf, "Short filelength: %D (fileoffset: %D)\n", psf->filelength, psf->fileoffset) ; error = SFE_BAD_OFFSET ; goto error_exit ; } ; break ; case SFM_WRITE : psf->fileoffset = 0 ; psf_fseek (psf, 0, SEEK_END) ; psf->fileoffset = psf_ftell (psf) ; break ; case SFM_RDWR : error = SFE_NO_EMBEDDED_RDWR ; goto error_exit ; } ; psf_log_printf (psf, "Embedded file offset : %D\n", psf->fileoffset) ; } ; if (psf->filelength == SF_COUNT_MAX) psf_log_printf (psf, "Length : unknown\n") ; else psf_log_printf (psf, "Length : %D\n", psf->filelength) ; if (psf->file.mode == SFM_WRITE || (psf->file.mode == SFM_RDWR && psf->filelength == 0)) { /* If the file is being opened for write or RDWR and the file is currently ** empty, then the SF_INFO struct must contain valid data. */ if ((SF_CONTAINER (psf->sf.format)) == 0) { error = SFE_ZERO_MAJOR_FORMAT ; goto error_exit ; } ; if ((SF_CODEC (psf->sf.format)) == 0) { error = SFE_ZERO_MINOR_FORMAT ; goto error_exit ; } ; if (sf_format_check (&psf->sf) == 0) { error = SFE_BAD_OPEN_FORMAT ; goto error_exit ; } ; } else if ((SF_CONTAINER (psf->sf.format)) != SF_FORMAT_RAW) { /* If type RAW has not been specified then need to figure out file type. */ psf->sf.format = guess_file_type (psf) ; if (psf->sf.format == 0) psf->sf.format = format_from_extension (psf) ; } ; /* Prevent unnecessary seeks */ psf->last_op = psf->file.mode ; /* Set bytewidth if known. */ switch (SF_CODEC (psf->sf.format)) { case SF_FORMAT_PCM_S8 : case SF_FORMAT_PCM_U8 : case SF_FORMAT_ULAW : case SF_FORMAT_ALAW : case SF_FORMAT_DPCM_8 : psf->bytewidth = 1 ; break ; case SF_FORMAT_PCM_16 : case SF_FORMAT_DPCM_16 : psf->bytewidth = 2 ; break ; case SF_FORMAT_PCM_24 : psf->bytewidth = 3 ; break ; case SF_FORMAT_PCM_32 : case SF_FORMAT_FLOAT : psf->bytewidth = 4 ; break ; case SF_FORMAT_DOUBLE : psf->bytewidth = 8 ; break ; } ; /* Call the initialisation function for the relevant file type. */ switch (SF_CONTAINER (psf->sf.format)) { case SF_FORMAT_WAV : case SF_FORMAT_WAVEX : error = wav_open (psf) ; break ; case SF_FORMAT_AIFF : error = aiff_open (psf) ; break ; case SF_FORMAT_AU : error = au_open (psf) ; break ; case SF_FORMAT_RAW : error = raw_open (psf) ; break ; case SF_FORMAT_W64 : error = w64_open (psf) ; break ; case SF_FORMAT_RF64 : error = rf64_open (psf) ; break ; /* Lite remove start */ case SF_FORMAT_PAF : error = paf_open (psf) ; break ; case SF_FORMAT_SVX : error = svx_open (psf) ; break ; case SF_FORMAT_NIST : error = nist_open (psf) ; break ; case SF_FORMAT_IRCAM : error = ircam_open (psf) ; break ; case SF_FORMAT_VOC : error = voc_open (psf) ; break ; case SF_FORMAT_SDS : error = sds_open (psf) ; break ; case SF_FORMAT_OGG : error = ogg_open (psf) ; break ; case SF_FORMAT_TXW : error = txw_open (psf) ; break ; case SF_FORMAT_WVE : error = wve_open (psf) ; break ; case SF_FORMAT_DWD : error = dwd_open (psf) ; break ; case SF_FORMAT_MAT4 : error = mat4_open (psf) ; break ; case SF_FORMAT_MAT5 : error = mat5_open (psf) ; break ; case SF_FORMAT_PVF : error = pvf_open (psf) ; break ; case SF_FORMAT_XI : error = xi_open (psf) ; break ; case SF_FORMAT_HTK : error = htk_open (psf) ; break ; case SF_FORMAT_SD2 : error = sd2_open (psf) ; break ; case SF_FORMAT_REX2 : error = rx2_open (psf) ; break ; case SF_FORMAT_AVR : error = avr_open (psf) ; break ; case SF_FORMAT_FLAC : error = flac_open (psf) ; break ; case SF_FORMAT_CAF : error = caf_open (psf) ; break ; case SF_FORMAT_MPC2K : error = mpc2k_open (psf) ; break ; /* Lite remove end */ default : error = SFE_UNKNOWN_FORMAT ; } ; if (error) goto error_exit ; /* For now, check whether embedding is supported. */ format = SF_CONTAINER (psf->sf.format) ; if (psf->fileoffset > 0) { switch (format) { case SF_FORMAT_WAV : case SF_FORMAT_WAVEX : case SF_FORMAT_AIFF : case SF_FORMAT_AU : /* Actual embedded files. */ break ; case SF_FORMAT_FLAC : /* Flac with an ID3v2 header? */ break ; default : error = SFE_NO_EMBED_SUPPORT ; goto error_exit ; } ; } ; if (psf->fileoffset > 0) psf_log_printf (psf, "Embedded file length : %D\n", psf->filelength) ; if (psf->file.mode == SFM_RDWR && sf_format_check (&psf->sf) == 0) { error = SFE_BAD_MODE_RW ; goto error_exit ; } ; if (validate_sfinfo (&psf->sf) == 0) { psf_log_SF_INFO (psf) ; save_header_info (psf) ; error = SFE_BAD_SF_INFO ; goto error_exit ; } ; if (validate_psf (psf) == 0) { save_header_info (psf) ; error = SFE_INTERNAL ; goto error_exit ; } ; psf->read_current = 0 ; psf->write_current = 0 ; if (psf->file.mode == SFM_RDWR) { psf->write_current = psf->sf.frames ; psf->have_written = psf->sf.frames > 0 ? SF_TRUE : SF_FALSE ; } ; memcpy (sfinfo, &psf->sf, sizeof (SF_INFO)) ; if (psf->file.mode == SFM_WRITE) { /* Zero out these fields. */ sfinfo->frames = 0 ; sfinfo->sections = 0 ; sfinfo->seekable = 0 ; } ; return (SNDFILE *) psf ; error_exit : sf_errno = error ; if (error == SFE_SYSTEM) snprintf (sf_syserr, sizeof (sf_syserr), "%s", psf->syserr) ; snprintf (sf_parselog, sizeof (sf_parselog), "%s", psf->parselog.buf) ; switch (error) { case SF_ERR_SYSTEM : case SF_ERR_UNSUPPORTED_ENCODING : case SFE_UNIMPLEMENTED : break ; case SFE_RAW_BAD_FORMAT : break ; default : if (psf->file.mode == SFM_READ) { psf_log_printf (psf, "Parse error : %s\n", sf_error_number (error)) ; error = SF_ERR_MALFORMED_FILE ; } ; } ; psf_close (psf) ; return NULL ; } /* psf_open_file */ /*============================================================================== ** Chunk getting and setting. ** This works for AIFF, CAF, RF64 and WAV. ** It doesn't work for W64 because W64 uses weird GUID style chunk markers. */ int sf_set_chunk (SNDFILE * sndfile, const SF_CHUNK_INFO * chunk_info) { SF_PRIVATE *psf ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (chunk_info == NULL || chunk_info->data == NULL) return SFE_BAD_CHUNK_PTR ; if (psf->set_chunk) return psf->set_chunk (psf, chunk_info) ; return SFE_BAD_CHUNK_FORMAT ; } /* sf_set_chunk */ SF_CHUNK_ITERATOR * sf_get_chunk_iterator (SNDFILE * sndfile, const SF_CHUNK_INFO * chunk_info) { SF_PRIVATE *psf ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (chunk_info) return psf_get_chunk_iterator (psf, chunk_info->id) ; return psf_get_chunk_iterator (psf, NULL) ; } /* sf_get_chunk_iterator */ SF_CHUNK_ITERATOR * sf_next_chunk_iterator (SF_CHUNK_ITERATOR * iterator) { SF_PRIVATE *psf ; SNDFILE *sndfile = iterator ? iterator->sndfile : NULL ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->next_chunk_iterator) return psf->next_chunk_iterator (psf, iterator) ; return NULL ; } /* sf_get_chunk_iterator_next */ int sf_get_chunk_size (const SF_CHUNK_ITERATOR * iterator, SF_CHUNK_INFO * chunk_info) { SF_PRIVATE *psf ; SNDFILE *sndfile = iterator ? iterator->sndfile : NULL ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (chunk_info == NULL) return SFE_BAD_CHUNK_PTR ; if (psf->get_chunk_size) return psf->get_chunk_size (psf, iterator, chunk_info) ; return SFE_BAD_CHUNK_FORMAT ; return 0 ; } /* sf_get_chunk_size */ int sf_get_chunk_data (const SF_CHUNK_ITERATOR * iterator, SF_CHUNK_INFO * chunk_info) { SF_PRIVATE *psf ; SNDFILE *sndfile = iterator ? iterator->sndfile : NULL ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (chunk_info == NULL || chunk_info->data == NULL) return SFE_BAD_CHUNK_PTR ; if (psf->get_chunk_data) return psf->get_chunk_data (psf, iterator, chunk_info) ; return SFE_BAD_CHUNK_FORMAT ; } /* sf_get_chunk_data */
sf_open_fd (int fd, int mode, SF_INFO *sfinfo, int close_desc) { SF_PRIVATE *psf ; if ((SF_CONTAINER (sfinfo->format)) == SF_FORMAT_SD2) { sf_errno = SFE_SD2_FD_DISALLOWED ; return NULL ; } ; if ((psf = calloc (1, sizeof (SF_PRIVATE))) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; copy_filename (psf, "") ; psf->file.mode = mode ; psf_set_file (psf, fd) ; psf->is_pipe = psf_is_pipe (psf) ; psf->fileoffset = psf_ftell (psf) ; if (! close_desc) psf->file.do_not_close_descriptor = SF_TRUE ; return psf_open_file (psf, sfinfo) ; } /* sf_open_fd */
sf_open_fd (int fd, int mode, SF_INFO *sfinfo, int close_desc) { SF_PRIVATE *psf ; if ((SF_CONTAINER (sfinfo->format)) == SF_FORMAT_SD2) { sf_errno = SFE_SD2_FD_DISALLOWED ; return NULL ; } ; if ((psf = psf_allocate ()) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; copy_filename (psf, "") ; psf->file.mode = mode ; psf_set_file (psf, fd) ; psf->is_pipe = psf_is_pipe (psf) ; psf->fileoffset = psf_ftell (psf) ; if (! close_desc) psf->file.do_not_close_descriptor = SF_TRUE ; return psf_open_file (psf, sfinfo) ; } /* sf_open_fd */
{'added': [(270, '\t{\tSFE_BAD_HEADER_ALLOC \t, "Error : Required header allocation is too large." },'), (329, '\tif ((psf = psf_allocate ()) == NULL)'), (361, '\tif ((psf = psf_allocate ()) == NULL)'), (403, '\tif ((psf = psf_allocate ()) == NULL)'), (2691, '\tfree (psf->header.ptr) ;')], 'deleted': [(270, ''), (329, '\tif ((psf = calloc (1, sizeof (SF_PRIVATE))) == NULL)'), (361, '\tif ((psf = calloc (1, sizeof (SF_PRIVATE))) == NULL)'), (403, '\tif ((psf = calloc (1, sizeof (SF_PRIVATE))) == NULL)')]}
5
4
2,307
15,935
https://github.com/erikd/libsndfile
CVE-2017-7586
['CWE-119']
sndfile.c
sf_open_virtual
/* ** Copyright (C) 1999-2016 Erik de Castro Lopo <erikd@mega-nerd.com> ** ** This program is free software; you can redistribute it and/or modify ** it under the terms of the GNU Lesser General Public License as published by ** the Free Software Foundation; either version 2.1 of the License, or ** (at your option) any later version. ** ** This program is distributed in the hope that it will be useful, ** but WITHOUT ANY WARRANTY; without even the implied warranty of ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** GNU Lesser General Public License for more details. ** ** You should have received a copy of the GNU Lesser General Public License ** along with this program; if not, write to the Free Software ** Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include "sfconfig.h" #include <stdlib.h> #include <string.h> #include <ctype.h> #include <assert.h> #include "sndfile.h" #include "sfendian.h" #include "common.h" #define SNDFILE_MAGICK 0x1234C0DE #ifdef __APPLE__ /* ** Detect if a compile for a universal binary is being attempted and barf if it is. ** See the URL below for the rationale. */ #ifdef __BIG_ENDIAN__ #if (CPU_IS_LITTLE_ENDIAN == 1) #error "Universal binary compile detected. See http://www.mega-nerd.com/libsndfile/FAQ.html#Q018" #endif #endif #ifdef __LITTLE_ENDIAN__ #if (CPU_IS_BIG_ENDIAN == 1) #error "Universal binary compile detected. See http://www.mega-nerd.com/libsndfile/FAQ.html#Q018" #endif #endif #endif typedef struct { int error ; const char *str ; } ErrorStruct ; static ErrorStruct SndfileErrors [] = { /* Public error values and their associated strings. */ { SF_ERR_NO_ERROR , "No Error." }, { SF_ERR_UNRECOGNISED_FORMAT , "Format not recognised." }, { SF_ERR_SYSTEM , "System error." /* Often replaced. */ }, { SF_ERR_MALFORMED_FILE , "Supported file format but file is malformed." }, { SF_ERR_UNSUPPORTED_ENCODING , "Supported file format but unsupported encoding." }, /* Private error values and their associated strings. */ { SFE_ZERO_MAJOR_FORMAT , "Error : major format is 0." }, { SFE_ZERO_MINOR_FORMAT , "Error : minor format is 0." }, { SFE_BAD_FILE , "File does not exist or is not a regular file (possibly a pipe?)." }, { SFE_BAD_FILE_READ , "File exists but no data could be read." }, { SFE_OPEN_FAILED , "Could not open file." }, { SFE_BAD_SNDFILE_PTR , "Not a valid SNDFILE* pointer." }, { SFE_BAD_SF_INFO_PTR , "NULL SF_INFO pointer passed to libsndfile." }, { SFE_BAD_SF_INCOMPLETE , "SF_PRIVATE struct incomplete and end of header parsing." }, { SFE_BAD_FILE_PTR , "Bad FILE pointer." }, { SFE_BAD_INT_PTR , "Internal error, Bad pointer." }, { SFE_BAD_STAT_SIZE , "Error : software was misconfigured at compile time (sizeof statbuf.st_size)." }, { SFE_NO_TEMP_DIR , "Error : Could not file temp dir." }, { SFE_MALLOC_FAILED , "Internal malloc () failed." }, { SFE_UNIMPLEMENTED , "File contains data in an unimplemented format." }, { SFE_BAD_READ_ALIGN , "Attempt to read a non-integer number of channels." }, { SFE_BAD_WRITE_ALIGN , "Attempt to write a non-integer number of channels." }, { SFE_UNKNOWN_FORMAT , "File contains data in an unknown format." }, { SFE_NOT_READMODE , "Read attempted on file currently open for write." }, { SFE_NOT_WRITEMODE , "Write attempted on file currently open for read." }, { SFE_BAD_MODE_RW , "Error : This file format does not support read/write mode." }, { SFE_BAD_SF_INFO , "Internal error : SF_INFO struct incomplete." }, { SFE_BAD_OFFSET , "Error : supplied offset beyond end of file." }, { SFE_NO_EMBED_SUPPORT , "Error : embedding not supported for this file format." }, { SFE_NO_EMBEDDED_RDWR , "Error : cannot open embedded file read/write." }, { SFE_NO_PIPE_WRITE , "Error : this file format does not support pipe write." }, { SFE_BAD_VIRTUAL_IO , "Error : bad pointer on SF_VIRTUAL_IO struct." }, { SFE_BAD_BROADCAST_INFO_SIZE , "Error : bad coding_history_size in SF_BROADCAST_INFO struct." }, { SFE_BAD_BROADCAST_INFO_TOO_BIG , "Error : SF_BROADCAST_INFO struct too large." }, { SFE_BAD_CART_INFO_SIZE , "Error: SF_CART_INFO struct too large." }, { SFE_BAD_CART_INFO_TOO_BIG , "Error: bag tag_text_size in SF_CART_INFO struct." }, { SFE_INTERLEAVE_MODE , "Attempt to write to file with non-interleaved data." }, { SFE_INTERLEAVE_SEEK , "Bad karma in seek during interleave read operation." }, { SFE_INTERLEAVE_READ , "Bad karma in read during interleave read operation." }, { SFE_INTERNAL , "Unspecified internal error." }, { SFE_BAD_COMMAND_PARAM , "Bad parameter passed to function sf_command." }, { SFE_BAD_ENDIAN , "Bad endian-ness. Try default endian-ness" }, { SFE_CHANNEL_COUNT_ZERO , "Channel count is zero." }, { SFE_CHANNEL_COUNT , "Too many channels specified." }, { SFE_CHANNEL_COUNT_BAD , "Bad channel count." }, { SFE_BAD_SEEK , "Internal psf_fseek() failed." }, { SFE_NOT_SEEKABLE , "Seek attempted on unseekable file type." }, { SFE_AMBIGUOUS_SEEK , "Error : combination of file open mode and seek command is ambiguous." }, { SFE_WRONG_SEEK , "Error : invalid seek parameters." }, { SFE_SEEK_FAILED , "Error : parameters OK, but psf_seek() failed." }, { SFE_BAD_OPEN_MODE , "Error : bad mode parameter for file open." }, { SFE_OPEN_PIPE_RDWR , "Error : attempt to open a pipe in read/write mode." }, { SFE_RDWR_POSITION , "Error on RDWR position (cryptic)." }, { SFE_RDWR_BAD_HEADER , "Error : Cannot open file in read/write mode due to string data in header." }, { SFE_CMD_HAS_DATA , "Error : Command fails because file already has audio data." }, { SFE_STR_NO_SUPPORT , "Error : File type does not support string data." }, { SFE_STR_NOT_WRITE , "Error : Trying to set a string when file is not in write mode." }, { SFE_STR_MAX_DATA , "Error : Maximum string data storage reached." }, { SFE_STR_MAX_COUNT , "Error : Maximum string data count reached." }, { SFE_STR_BAD_TYPE , "Error : Bad string data type." }, { SFE_STR_NO_ADD_END , "Error : file type does not support strings added at end of file." }, { SFE_STR_BAD_STRING , "Error : bad string." }, { SFE_STR_WEIRD , "Error : Weird string error." }, { SFE_WAV_NO_RIFF , "Error in WAV file. No 'RIFF' chunk marker." }, { SFE_WAV_NO_WAVE , "Error in WAV file. No 'WAVE' chunk marker." }, { SFE_WAV_NO_FMT , "Error in WAV/W64/RF64 file. No 'fmt ' chunk marker." }, { SFE_WAV_BAD_FMT , "Error in WAV/W64/RF64 file. Malformed 'fmt ' chunk." }, { SFE_WAV_FMT_SHORT , "Error in WAV/W64/RF64 file. Short 'fmt ' chunk." }, { SFE_WAV_BAD_FACT , "Error in WAV file. 'fact' chunk out of place." }, { SFE_WAV_BAD_PEAK , "Error in WAV file. Bad 'PEAK' chunk." }, { SFE_WAV_PEAK_B4_FMT , "Error in WAV file. 'PEAK' chunk found before 'fmt ' chunk." }, { SFE_WAV_BAD_FORMAT , "Error in WAV file. Errors in 'fmt ' chunk." }, { SFE_WAV_BAD_BLOCKALIGN , "Error in WAV file. Block alignment in 'fmt ' chunk is incorrect." }, { SFE_WAV_NO_DATA , "Error in WAV file. No 'data' chunk marker." }, { SFE_WAV_BAD_LIST , "Error in WAV file. Malformed LIST chunk." }, { SFE_WAV_UNKNOWN_CHUNK , "Error in WAV file. File contains an unknown chunk marker." }, { SFE_WAV_WVPK_DATA , "Error in WAV file. Data is in WAVPACK format." }, { SFE_WAV_ADPCM_NOT4BIT , "Error in ADPCM WAV file. Invalid bit width." }, { SFE_WAV_ADPCM_CHANNELS , "Error in ADPCM WAV file. Invalid number of channels." }, { SFE_WAV_ADPCM_SAMPLES , "Error in ADPCM WAV file. Invalid number of samples per block." }, { SFE_WAV_GSM610_FORMAT , "Error in GSM610 WAV file. Invalid format chunk." }, { SFE_AIFF_NO_FORM , "Error in AIFF file, bad 'FORM' marker." }, { SFE_AIFF_AIFF_NO_FORM , "Error in AIFF file, 'AIFF' marker without 'FORM'." }, { SFE_AIFF_COMM_NO_FORM , "Error in AIFF file, 'COMM' marker without 'FORM'." }, { SFE_AIFF_SSND_NO_COMM , "Error in AIFF file, 'SSND' marker without 'COMM'." }, { SFE_AIFF_UNKNOWN_CHUNK , "Error in AIFF file, unknown chunk." }, { SFE_AIFF_COMM_CHUNK_SIZE, "Error in AIFF file, bad 'COMM' chunk size." }, { SFE_AIFF_BAD_COMM_CHUNK , "Error in AIFF file, bad 'COMM' chunk." }, { SFE_AIFF_PEAK_B4_COMM , "Error in AIFF file. 'PEAK' chunk found before 'COMM' chunk." }, { SFE_AIFF_BAD_PEAK , "Error in AIFF file. Bad 'PEAK' chunk." }, { SFE_AIFF_NO_SSND , "Error in AIFF file, bad 'SSND' chunk." }, { SFE_AIFF_NO_DATA , "Error in AIFF file, no sound data." }, { SFE_AIFF_RW_SSND_NOT_LAST, "Error in AIFF file, RDWR only possible if SSND chunk at end of file." }, { SFE_AU_UNKNOWN_FORMAT , "Error in AU file, unknown format." }, { SFE_AU_NO_DOTSND , "Error in AU file, missing '.snd' or 'dns.' marker." }, { SFE_AU_EMBED_BAD_LEN , "Embedded AU file with unknown length." }, { SFE_RAW_READ_BAD_SPEC , "Error while opening RAW file for read. Must specify format and channels.\n" "Possibly trying to open unsupported format." }, { SFE_RAW_BAD_BITWIDTH , "Error. RAW file bitwidth must be a multiple of 8." }, { SFE_RAW_BAD_FORMAT , "Error. Bad format field in SF_INFO struct when opening a RAW file for read." }, { SFE_PAF_NO_MARKER , "Error in PAF file, no marker." }, { SFE_PAF_VERSION , "Error in PAF file, bad version." }, { SFE_PAF_UNKNOWN_FORMAT , "Error in PAF file, unknown format." }, { SFE_PAF_SHORT_HEADER , "Error in PAF file. File shorter than minimal header." }, { SFE_PAF_BAD_CHANNELS , "Error in PAF file. Bad channel count." }, { SFE_SVX_NO_FORM , "Error in 8SVX / 16SV file, no 'FORM' marker." }, { SFE_SVX_NO_BODY , "Error in 8SVX / 16SV file, no 'BODY' marker." }, { SFE_SVX_NO_DATA , "Error in 8SVX / 16SV file, no sound data." }, { SFE_SVX_BAD_COMP , "Error in 8SVX / 16SV file, unsupported compression format." }, { SFE_SVX_BAD_NAME_LENGTH , "Error in 8SVX / 16SV file, NAME chunk too long." }, { SFE_NIST_BAD_HEADER , "Error in NIST file, bad header." }, { SFE_NIST_CRLF_CONVERISON, "Error : NIST file damaged by Windows CR -> CRLF conversion process." }, { SFE_NIST_BAD_ENCODING , "Error in NIST file, unsupported compression format." }, { SFE_VOC_NO_CREATIVE , "Error in VOC file, no 'Creative Voice File' marker." }, { SFE_VOC_BAD_FORMAT , "Error in VOC file, bad format." }, { SFE_VOC_BAD_VERSION , "Error in VOC file, bad version number." }, { SFE_VOC_BAD_MARKER , "Error in VOC file, bad marker in file." }, { SFE_VOC_BAD_SECTIONS , "Error in VOC file, incompatible VOC sections." }, { SFE_VOC_MULTI_SAMPLERATE, "Error in VOC file, more than one sample rate defined." }, { SFE_VOC_MULTI_SECTION , "Unimplemented VOC file feature, file contains multiple sound sections." }, { SFE_VOC_MULTI_PARAM , "Error in VOC file, file contains multiple bit or channel widths." }, { SFE_VOC_SECTION_COUNT , "Error in VOC file, too many sections." }, { SFE_VOC_NO_PIPE , "Error : not able to operate on VOC files over a pipe." }, { SFE_IRCAM_NO_MARKER , "Error in IRCAM file, bad IRCAM marker." }, { SFE_IRCAM_BAD_CHANNELS , "Error in IRCAM file, bad channel count." }, { SFE_IRCAM_UNKNOWN_FORMAT, "Error in IRCAM file, unknown encoding format." }, { SFE_W64_64_BIT , "Error in W64 file, file contains 64 bit offset." }, { SFE_W64_NO_RIFF , "Error in W64 file. No 'riff' chunk marker." }, { SFE_W64_NO_WAVE , "Error in W64 file. No 'wave' chunk marker." }, { SFE_W64_NO_DATA , "Error in W64 file. No 'data' chunk marker." }, { SFE_W64_ADPCM_NOT4BIT , "Error in ADPCM W64 file. Invalid bit width." }, { SFE_W64_ADPCM_CHANNELS , "Error in ADPCM W64 file. Invalid number of channels." }, { SFE_W64_GSM610_FORMAT , "Error in GSM610 W64 file. Invalid format chunk." }, { SFE_MAT4_BAD_NAME , "Error in MAT4 file. No variable name." }, { SFE_MAT4_NO_SAMPLERATE , "Error in MAT4 file. No sample rate." }, { SFE_MAT5_BAD_ENDIAN , "Error in MAT5 file. Not able to determine endian-ness." }, { SFE_MAT5_NO_BLOCK , "Error in MAT5 file. Bad block structure." }, { SFE_MAT5_SAMPLE_RATE , "Error in MAT5 file. Not able to determine sample rate." }, { SFE_PVF_NO_PVF1 , "Error in PVF file. No PVF1 marker." }, { SFE_PVF_BAD_HEADER , "Error in PVF file. Bad header." }, { SFE_PVF_BAD_BITWIDTH , "Error in PVF file. Bad bit width." }, { SFE_XI_BAD_HEADER , "Error in XI file. Bad header." }, { SFE_XI_EXCESS_SAMPLES , "Error in XI file. Excess samples in file." }, { SFE_XI_NO_PIPE , "Error : not able to operate on XI files over a pipe." }, { SFE_HTK_NO_PIPE , "Error : not able to operate on HTK files over a pipe." }, { SFE_SDS_NOT_SDS , "Error : not an SDS file." }, { SFE_SDS_BAD_BIT_WIDTH , "Error : bad bit width for SDS file." }, { SFE_SD2_FD_DISALLOWED , "Error : cannot open SD2 file without a file name." }, { SFE_SD2_BAD_DATA_OFFSET , "Error : bad data offset." }, { SFE_SD2_BAD_MAP_OFFSET , "Error : bad map offset." }, { SFE_SD2_BAD_DATA_LENGTH , "Error : bad data length." }, { SFE_SD2_BAD_MAP_LENGTH , "Error : bad map length." }, { SFE_SD2_BAD_RSRC , "Error : bad resource fork." }, { SFE_SD2_BAD_SAMPLE_SIZE , "Error : bad sample size." }, { SFE_FLAC_BAD_HEADER , "Error : bad flac header." }, { SFE_FLAC_NEW_DECODER , "Error : problem while creating flac decoder." }, { SFE_FLAC_INIT_DECODER , "Error : problem while initialization of the flac decoder." }, { SFE_FLAC_LOST_SYNC , "Error : flac decoder lost sync." }, { SFE_FLAC_BAD_SAMPLE_RATE, "Error : flac does not support this sample rate." }, { SFE_FLAC_UNKOWN_ERROR , "Error : unknown error in flac decoder." }, { SFE_WVE_NOT_WVE , "Error : not a WVE file." }, { SFE_WVE_NO_PIPE , "Error : not able to operate on WVE files over a pipe." }, { SFE_DWVW_BAD_BITWIDTH , "Error : Bad bit width for DWVW encoding. Must be 12, 16 or 24." }, { SFE_G72X_NOT_MONO , "Error : G72x encoding does not support more than 1 channel." }, { SFE_VORBIS_ENCODER_BUG , "Error : Sample rate chosen is known to trigger a Vorbis encoder bug on this CPU." }, { SFE_RF64_NOT_RF64 , "Error : Not an RF64 file." }, { SFE_RF64_PEAK_B4_FMT , "Error in RF64 file. 'PEAK' chunk found before 'fmt ' chunk." }, { SFE_RF64_NO_DATA , "Error in RF64 file. No 'data' chunk marker." }, { SFE_ALAC_FAIL_TMPFILE , "Error : Failed to open tmp file for ALAC encoding." }, { SFE_BAD_CHUNK_PTR , "Error : Bad SF_CHUNK_INFO pointer." }, { SFE_UNKNOWN_CHUNK , "Error : Unknown chunk marker." }, { SFE_BAD_CHUNK_FORMAT , "Error : Reading/writing chunks from this file format is not supported." }, { SFE_BAD_CHUNK_MARKER , "Error : Bad chunk marker." }, { SFE_BAD_CHUNK_DATA_PTR , "Error : Bad data pointer in SF_CHUNK_INFO struct." }, { SFE_FILENAME_TOO_LONG , "Error : Supplied filename too long." }, { SFE_MAX_ERROR , "Maximum error number." }, { SFE_MAX_ERROR + 1 , NULL } } ; /*------------------------------------------------------------------------------ */ static int format_from_extension (SF_PRIVATE *psf) ; static int guess_file_type (SF_PRIVATE *psf) ; static int validate_sfinfo (SF_INFO *sfinfo) ; static int validate_psf (SF_PRIVATE *psf) ; static void save_header_info (SF_PRIVATE *psf) ; static int copy_filename (SF_PRIVATE *psf, const char *path) ; static int psf_close (SF_PRIVATE *psf) ; static int try_resource_fork (SF_PRIVATE * psf) ; /*------------------------------------------------------------------------------ ** Private (static) variables. */ int sf_errno = 0 ; static char sf_parselog [SF_BUFFER_LEN] = { 0 } ; static char sf_syserr [SF_SYSERR_LEN] = { 0 } ; /*------------------------------------------------------------------------------ */ #define VALIDATE_SNDFILE_AND_ASSIGN_PSF(a, b, c) \ { if ((a) == NULL) \ { sf_errno = SFE_BAD_SNDFILE_PTR ; \ return 0 ; \ } ; \ (b) = (SF_PRIVATE*) (a) ; \ if ((b)->virtual_io == SF_FALSE && \ psf_file_valid (b) == 0) \ { (b)->error = SFE_BAD_FILE_PTR ; \ return 0 ; \ } ; \ if ((b)->Magick != SNDFILE_MAGICK) \ { (b)->error = SFE_BAD_SNDFILE_PTR ; \ return 0 ; \ } ; \ if (c) (b)->error = 0 ; \ } /*------------------------------------------------------------------------------ ** Public functions. */ SNDFILE* sf_open (const char *path, int mode, SF_INFO *sfinfo) { SF_PRIVATE *psf ; /* Ultimate sanity check. */ assert (sizeof (sf_count_t) == 8) ; if ((psf = calloc (1, sizeof (SF_PRIVATE))) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; psf_log_printf (psf, "File : %s\n", path) ; if (copy_filename (psf, path) != 0) { sf_errno = psf->error ; return NULL ; } ; psf->file.mode = mode ; if (strcmp (path, "-") == 0) psf->error = psf_set_stdio (psf) ; else psf->error = psf_fopen (psf) ; return psf_open_file (psf, sfinfo) ; } /* sf_open */ SNDFILE* sf_open_fd (int fd, int mode, SF_INFO *sfinfo, int close_desc) { SF_PRIVATE *psf ; if ((SF_CONTAINER (sfinfo->format)) == SF_FORMAT_SD2) { sf_errno = SFE_SD2_FD_DISALLOWED ; return NULL ; } ; if ((psf = calloc (1, sizeof (SF_PRIVATE))) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; copy_filename (psf, "") ; psf->file.mode = mode ; psf_set_file (psf, fd) ; psf->is_pipe = psf_is_pipe (psf) ; psf->fileoffset = psf_ftell (psf) ; if (! close_desc) psf->file.do_not_close_descriptor = SF_TRUE ; return psf_open_file (psf, sfinfo) ; } /* sf_open_fd */ SNDFILE* sf_open_virtual (SF_VIRTUAL_IO *sfvirtual, int mode, SF_INFO *sfinfo, void *user_data) { SF_PRIVATE *psf ; /* Make sure we have a valid set ot virtual pointers. */ if (sfvirtual->get_filelen == NULL || sfvirtual->seek == NULL || sfvirtual->tell == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_get_filelen / vio_seek / vio_tell in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((mode == SFM_READ || mode == SFM_RDWR) && sfvirtual->read == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_read in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((mode == SFM_WRITE || mode == SFM_RDWR) && sfvirtual->write == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_write in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((psf = calloc (1, sizeof (SF_PRIVATE))) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; psf->virtual_io = SF_TRUE ; psf->vio = *sfvirtual ; psf->vio_user_data = user_data ; psf->file.mode = mode ; return psf_open_file (psf, sfinfo) ; } /* sf_open_virtual */ int sf_close (SNDFILE *sndfile) { SF_PRIVATE *psf ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; return psf_close (psf) ; } /* sf_close */ void sf_write_sync (SNDFILE *sndfile) { SF_PRIVATE *psf ; if ((psf = (SF_PRIVATE *) sndfile) == NULL) return ; psf_fsync (psf) ; return ; } /* sf_write_sync */ /*============================================================================== */ const char* sf_error_number (int errnum) { static const char *bad_errnum = "No error defined for this error number. This is a bug in libsndfile." ; int k ; if (errnum == SFE_MAX_ERROR) return SndfileErrors [0].str ; if (errnum < 0 || errnum > SFE_MAX_ERROR) { /* This really shouldn't happen in release versions. */ printf ("Not a valid error number (%d).\n", errnum) ; return bad_errnum ; } ; for (k = 0 ; SndfileErrors [k].str ; k++) if (errnum == SndfileErrors [k].error) return SndfileErrors [k].str ; return bad_errnum ; } /* sf_error_number */ const char* sf_strerror (SNDFILE *sndfile) { SF_PRIVATE *psf = NULL ; int errnum ; if (sndfile == NULL) { errnum = sf_errno ; if (errnum == SFE_SYSTEM && sf_syserr [0]) return sf_syserr ; } else { psf = (SF_PRIVATE *) sndfile ; if (psf->Magick != SNDFILE_MAGICK) return "sf_strerror : Bad magic number." ; errnum = psf->error ; if (errnum == SFE_SYSTEM && psf->syserr [0]) return psf->syserr ; } ; return sf_error_number (errnum) ; } /* sf_strerror */ /*------------------------------------------------------------------------------ */ int sf_error (SNDFILE *sndfile) { SF_PRIVATE *psf ; if (sndfile == NULL) return sf_errno ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 0) ; if (psf->error) return psf->error ; return 0 ; } /* sf_error */ /*------------------------------------------------------------------------------ */ int sf_perror (SNDFILE *sndfile) { SF_PRIVATE *psf ; int errnum ; if (sndfile == NULL) { errnum = sf_errno ; } else { VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 0) ; errnum = psf->error ; } ; fprintf (stderr, "%s\n", sf_error_number (errnum)) ; return SFE_NO_ERROR ; } /* sf_perror */ /*------------------------------------------------------------------------------ */ int sf_error_str (SNDFILE *sndfile, char *str, size_t maxlen) { SF_PRIVATE *psf ; int errnum ; if (str == NULL) return SFE_INTERNAL ; if (sndfile == NULL) errnum = sf_errno ; else { VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 0) ; errnum = psf->error ; } ; snprintf (str, maxlen, "%s", sf_error_number (errnum)) ; return SFE_NO_ERROR ; } /* sf_error_str */ /*============================================================================== */ int sf_format_check (const SF_INFO *info) { int subformat, endian ; subformat = SF_CODEC (info->format) ; endian = SF_ENDIAN (info->format) ; /* This is the place where each file format can check if the suppiled ** SF_INFO struct is valid. ** Return 0 on failure, 1 ons success. */ if (info->channels < 1 || info->channels > SF_MAX_CHANNELS) return 0 ; if (info->samplerate < 0) return 0 ; switch (SF_CONTAINER (info->format)) { case SF_FORMAT_WAV : /* WAV now allows both endian, RIFF or RIFX (little or big respectively) */ if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if ((subformat == SF_FORMAT_IMA_ADPCM || subformat == SF_FORMAT_MS_ADPCM) && info->channels <= 2) return 1 ; if (subformat == SF_FORMAT_GSM610 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_G721_32 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_WAVEX : if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_AIFF : /* AIFF does allow both endian-nesses for PCM data.*/ if (subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; /* For other encodings reject any endian-ness setting. */ if (endian != 0) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_S8) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if ((subformat == SF_FORMAT_DWVW_12 || subformat == SF_FORMAT_DWVW_16 || subformat == SF_FORMAT_DWVW_24) && info-> channels == 1) return 1 ; if (subformat == SF_FORMAT_GSM610 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_IMA_ADPCM && (info->channels == 1 || info->channels == 2)) return 1 ; break ; case SF_FORMAT_AU : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; if (subformat == SF_FORMAT_G721_32 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_G723_24 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_G723_40 && info->channels == 1) return 1 ; break ; case SF_FORMAT_CAF : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_ALAC_16 || subformat == SF_FORMAT_ALAC_20) return 1 ; if (subformat == SF_FORMAT_ALAC_24 || subformat == SF_FORMAT_ALAC_32) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_RAW : if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; if (subformat == SF_FORMAT_ALAW || subformat == SF_FORMAT_ULAW) return 1 ; if ((subformat == SF_FORMAT_DWVW_12 || subformat == SF_FORMAT_DWVW_16 || subformat == SF_FORMAT_DWVW_24) && info-> channels == 1) return 1 ; if (subformat == SF_FORMAT_GSM610 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_VOX_ADPCM && info->channels == 1) return 1 ; break ; case SF_FORMAT_PAF : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24) return 1 ; break ; case SF_FORMAT_SVX : /* SVX only supports writing mono SVX files. */ if (info->channels > 1) return 0 ; /* Always big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; break ; case SF_FORMAT_NIST : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; break ; case SF_FORMAT_IRCAM : if (info->channels > 256) return 0 ; if (subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW || subformat == SF_FORMAT_FLOAT) return 1 ; break ; case SF_FORMAT_VOC : if (info->channels > 2) return 0 ; /* VOC is strictly little endian. */ if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; break ; case SF_FORMAT_W64 : /* W64 is strictly little endian. */ if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if ((subformat == SF_FORMAT_IMA_ADPCM || subformat == SF_FORMAT_MS_ADPCM) && info->channels <= 2) return 1 ; if (subformat == SF_FORMAT_GSM610 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_MAT4 : if (subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_MAT5 : if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_PVF : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_32) return 1 ; break ; case SF_FORMAT_XI : if (info->channels != 1) return 0 ; if (subformat == SF_FORMAT_DPCM_8 || subformat == SF_FORMAT_DPCM_16) return 1 ; break ; case SF_FORMAT_HTK : if (info->channels != 1) return 0 ; /* HTK is strictly big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_16) return 1 ; break ; case SF_FORMAT_SDS : if (info->channels != 1) return 0 ; /* SDS is strictly big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24) return 1 ; break ; case SF_FORMAT_AVR : if (info->channels > 2) return 0 ; /* SDS is strictly big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; break ; case SF_FORMAT_FLAC : /* FLAC can't do more than 8 channels. */ if (info->channels > 8) return 0 ; if (endian != SF_ENDIAN_FILE) return 0 ; if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24) return 1 ; break ; case SF_FORMAT_SD2 : /* SD2 is strictly big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; break ; case SF_FORMAT_WVE : if (info->channels > 1) return 0 ; /* WVE is strictly big endian. */ if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_ALAW) return 1 ; break ; case SF_FORMAT_OGG : if (endian != SF_ENDIAN_FILE) return 0 ; if (subformat == SF_FORMAT_VORBIS) return 1 ; break ; case SF_FORMAT_MPC2K : if (info->channels > 2) return 0 ; /* MPC2000 is strictly little endian. */ if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_16) return 1 ; break ; case SF_FORMAT_RF64 : if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; default : break ; } ; return 0 ; } /* sf_format_check */ /*------------------------------------------------------------------------------ */ const char * sf_version_string (void) { #if ENABLE_EXPERIMENTAL_CODE return PACKAGE_NAME "-" PACKAGE_VERSION "-exp" ; #else return PACKAGE_NAME "-" PACKAGE_VERSION ; #endif } /*------------------------------------------------------------------------------ */ int sf_command (SNDFILE *sndfile, int command, void *data, int datasize) { SF_PRIVATE *psf = (SF_PRIVATE *) sndfile ; double quality ; int old_value ; /* This set of commands do not need the sndfile parameter. */ switch (command) { case SFC_GET_LIB_VERSION : if (data == NULL) { if (psf) psf->error = SFE_BAD_COMMAND_PARAM ; return SFE_BAD_COMMAND_PARAM ; } ; snprintf (data, datasize, "%s", sf_version_string ()) ; return strlen (data) ; case SFC_GET_SIMPLE_FORMAT_COUNT : if (data == NULL || datasize != SIGNED_SIZEOF (int)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; *((int*) data) = psf_get_format_simple_count () ; return 0 ; case SFC_GET_SIMPLE_FORMAT : if (data == NULL || datasize != SIGNED_SIZEOF (SF_FORMAT_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; return psf_get_format_simple (data) ; case SFC_GET_FORMAT_MAJOR_COUNT : if (data == NULL || datasize != SIGNED_SIZEOF (int)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; *((int*) data) = psf_get_format_major_count () ; return 0 ; case SFC_GET_FORMAT_MAJOR : if (data == NULL || datasize != SIGNED_SIZEOF (SF_FORMAT_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; return psf_get_format_major (data) ; case SFC_GET_FORMAT_SUBTYPE_COUNT : if (data == NULL || datasize != SIGNED_SIZEOF (int)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; *((int*) data) = psf_get_format_subtype_count () ; return 0 ; case SFC_GET_FORMAT_SUBTYPE : if (data == NULL || datasize != SIGNED_SIZEOF (SF_FORMAT_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; return psf_get_format_subtype (data) ; case SFC_GET_FORMAT_INFO : if (data == NULL || datasize != SIGNED_SIZEOF (SF_FORMAT_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; return psf_get_format_info (data) ; } ; if (sndfile == NULL && command == SFC_GET_LOG_INFO) { if (data == NULL) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; snprintf (data, datasize, "%s", sf_parselog) ; return strlen (data) ; } ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; switch (command) { case SFC_SET_NORM_FLOAT : old_value = psf->norm_float ; psf->norm_float = (datasize) ? SF_TRUE : SF_FALSE ; return old_value ; case SFC_GET_CURRENT_SF_INFO : if (data == NULL || datasize != SIGNED_SIZEOF (SF_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; memcpy (data, &psf->sf, sizeof (SF_INFO)) ; break ; case SFC_SET_NORM_DOUBLE : old_value = psf->norm_double ; psf->norm_double = (datasize) ? SF_TRUE : SF_FALSE ; return old_value ; case SFC_GET_NORM_FLOAT : return psf->norm_float ; case SFC_GET_NORM_DOUBLE : return psf->norm_double ; case SFC_SET_SCALE_FLOAT_INT_READ : old_value = psf->float_int_mult ; psf->float_int_mult = (datasize != 0) ? SF_TRUE : SF_FALSE ; if (psf->float_int_mult && psf->float_max < 0.0) /* Scale to prevent wrap-around distortion. */ psf->float_max = (32768.0 / 32767.0) * psf_calc_signal_max (psf, SF_FALSE) ; return old_value ; case SFC_SET_SCALE_INT_FLOAT_WRITE : old_value = psf->scale_int_float ; psf->scale_int_float = (datasize != 0) ? SF_TRUE : SF_FALSE ; return old_value ; case SFC_SET_ADD_PEAK_CHUNK : { int format = SF_CONTAINER (psf->sf.format) ; /* Only WAV and AIFF support the PEAK chunk. */ switch (format) { case SF_FORMAT_AIFF : case SF_FORMAT_CAF : case SF_FORMAT_WAV : case SF_FORMAT_WAVEX : case SF_FORMAT_RF64 : break ; default : return SF_FALSE ; } ; format = SF_CODEC (psf->sf.format) ; /* Only files containg the following data types support the PEAK chunk. */ if (format != SF_FORMAT_FLOAT && format != SF_FORMAT_DOUBLE) return SF_FALSE ; } ; /* Can only do this is in SFM_WRITE mode. */ if (psf->file.mode != SFM_WRITE && psf->file.mode != SFM_RDWR) return SF_FALSE ; /* If data has already been written this must fail. */ if (psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; /* Everything seems OK, so set psf->has_peak and re-write header. */ if (datasize == SF_FALSE && psf->peak_info != NULL) { free (psf->peak_info) ; psf->peak_info = NULL ; } else if (psf->peak_info == NULL) { psf->peak_info = peak_info_calloc (psf->sf.channels) ; if (psf->peak_info != NULL) psf->peak_info->peak_loc = SF_PEAK_START ; } ; if (psf->write_header) psf->write_header (psf, SF_TRUE) ; return datasize ; case SFC_SET_ADD_HEADER_PAD_CHUNK : return SF_FALSE ; case SFC_GET_LOG_INFO : if (data == NULL) return SFE_BAD_COMMAND_PARAM ; snprintf (data, datasize, "%s", psf->parselog.buf) ; break ; case SFC_CALC_SIGNAL_MAX : if (data == NULL || datasize != sizeof (double)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; *((double*) data) = psf_calc_signal_max (psf, SF_FALSE) ; break ; case SFC_CALC_NORM_SIGNAL_MAX : if (data == NULL || datasize != sizeof (double)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; *((double*) data) = psf_calc_signal_max (psf, SF_TRUE) ; break ; case SFC_CALC_MAX_ALL_CHANNELS : if (data == NULL || datasize != SIGNED_SIZEOF (double) * psf->sf.channels) return (psf->error = SFE_BAD_COMMAND_PARAM) ; return psf_calc_max_all_channels (psf, (double*) data, SF_FALSE) ; case SFC_CALC_NORM_MAX_ALL_CHANNELS : if (data == NULL || datasize != SIGNED_SIZEOF (double) * psf->sf.channels) return (psf->error = SFE_BAD_COMMAND_PARAM) ; return psf_calc_max_all_channels (psf, (double*) data, SF_TRUE) ; case SFC_GET_SIGNAL_MAX : if (data == NULL || datasize != sizeof (double)) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; return psf_get_signal_max (psf, (double *) data) ; case SFC_GET_MAX_ALL_CHANNELS : if (data == NULL || datasize != SIGNED_SIZEOF (double) * psf->sf.channels) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; return psf_get_max_all_channels (psf, (double*) data) ; case SFC_UPDATE_HEADER_NOW : if (psf->write_header) psf->write_header (psf, SF_TRUE) ; break ; case SFC_SET_UPDATE_HEADER_AUTO : psf->auto_header = datasize ? SF_TRUE : SF_FALSE ; return psf->auto_header ; break ; case SFC_SET_ADD_DITHER_ON_WRITE : case SFC_SET_ADD_DITHER_ON_READ : /* ** FIXME ! ** These are obsolete. Just return. ** Remove some time after version 1.0.8. */ break ; case SFC_SET_DITHER_ON_WRITE : if (data == NULL || datasize != SIGNED_SIZEOF (SF_DITHER_INFO)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; memcpy (&psf->write_dither, data, sizeof (psf->write_dither)) ; if (psf->file.mode == SFM_WRITE || psf->file.mode == SFM_RDWR) dither_init (psf, SFM_WRITE) ; break ; case SFC_SET_DITHER_ON_READ : if (data == NULL || datasize != SIGNED_SIZEOF (SF_DITHER_INFO)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; memcpy (&psf->read_dither, data, sizeof (psf->read_dither)) ; if (psf->file.mode == SFM_READ || psf->file.mode == SFM_RDWR) dither_init (psf, SFM_READ) ; break ; case SFC_FILE_TRUNCATE : if (psf->file.mode != SFM_WRITE && psf->file.mode != SFM_RDWR) return SF_TRUE ; if (datasize != sizeof (sf_count_t)) return SF_TRUE ; if (data == NULL || datasize != sizeof (sf_count_t)) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } else { sf_count_t position ; position = *((sf_count_t*) data) ; if (sf_seek (sndfile, position, SEEK_SET) != position) return SF_TRUE ; psf->sf.frames = position ; position = psf_fseek (psf, 0, SEEK_CUR) ; return psf_ftruncate (psf, position) ; } ; break ; case SFC_SET_RAW_START_OFFSET : if (data == NULL || datasize != sizeof (sf_count_t)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; if ((SF_CONTAINER (psf->sf.format)) != SF_FORMAT_RAW) return (psf->error = SFE_BAD_COMMAND_PARAM) ; psf->dataoffset = *((sf_count_t*) data) ; sf_seek (sndfile, 0, SEEK_CUR) ; break ; case SFC_GET_EMBED_FILE_INFO : if (data == NULL || datasize != sizeof (SF_EMBED_FILE_INFO)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; ((SF_EMBED_FILE_INFO*) data)->offset = psf->fileoffset ; ((SF_EMBED_FILE_INFO*) data)->length = psf->filelength ; break ; /* Lite remove start */ case SFC_TEST_IEEE_FLOAT_REPLACE : psf->ieee_replace = (datasize) ? SF_TRUE : SF_FALSE ; if ((SF_CODEC (psf->sf.format)) == SF_FORMAT_FLOAT) float32_init (psf) ; else if ((SF_CODEC (psf->sf.format)) == SF_FORMAT_DOUBLE) double64_init (psf) ; else return (psf->error = SFE_BAD_COMMAND_PARAM) ; break ; /* Lite remove end */ case SFC_SET_CLIPPING : psf->add_clipping = (datasize) ? SF_TRUE : SF_FALSE ; return psf->add_clipping ; case SFC_GET_CLIPPING : return psf->add_clipping ; case SFC_GET_LOOP_INFO : if (datasize != sizeof (SF_LOOP_INFO) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->loop_info == NULL) return SF_FALSE ; memcpy (data, psf->loop_info, sizeof (SF_LOOP_INFO)) ; return SF_TRUE ; case SFC_SET_BROADCAST_INFO : { int format = SF_CONTAINER (psf->sf.format) ; /* Only WAV and RF64 supports the BEXT (Broadcast) chunk. */ if (format != SF_FORMAT_WAV && format != SF_FORMAT_WAVEX && format != SF_FORMAT_RF64) return SF_FALSE ; } ; /* Only makes sense in SFM_WRITE or SFM_RDWR mode. */ if ((psf->file.mode != SFM_WRITE) && (psf->file.mode != SFM_RDWR)) return SF_FALSE ; /* If data has already been written this must fail. */ if (psf->broadcast_16k == NULL && psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (NOT (broadcast_var_set (psf, data, datasize))) return SF_FALSE ; if (psf->write_header) psf->write_header (psf, SF_TRUE) ; return SF_TRUE ; case SFC_GET_BROADCAST_INFO : if (data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; return broadcast_var_get (psf, data, datasize) ; case SFC_SET_CART_INFO : { int format = SF_CONTAINER (psf->sf.format) ; /* Only WAV and RF64 support cart chunk format */ if (format != SF_FORMAT_WAV && format != SF_FORMAT_RF64) return SF_FALSE ; } ; /* Only makes sense in SFM_WRITE or SFM_RDWR mode */ if ((psf->file.mode != SFM_WRITE) && (psf->file.mode != SFM_RDWR)) return SF_FALSE ; /* If data has already been written this must fail. */ if (psf->cart_16k == NULL && psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (NOT (cart_var_set (psf, data, datasize))) return SF_FALSE ; if (psf->write_header) psf->write_header (psf, SF_TRUE) ; return SF_TRUE ; case SFC_GET_CART_INFO : if (data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; return cart_var_get (psf, data, datasize) ; case SFC_GET_CUE_COUNT : if (datasize != sizeof (uint32_t) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->cues != NULL) { *((uint32_t *) data) = psf->cues->cue_count ; return SF_TRUE ; } ; return SF_FALSE ; case SFC_GET_CUE : if (datasize != sizeof (SF_CUES) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->cues == NULL) return SF_FALSE ; psf_get_cues (psf, data, datasize) ; return SF_TRUE ; case SFC_SET_CUE : if (psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (datasize != sizeof (SF_CUES) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->cues == NULL && (psf->cues = psf_cues_dup (data)) == NULL) { psf->error = SFE_MALLOC_FAILED ; return SF_FALSE ; } ; return SF_TRUE ; case SFC_GET_INSTRUMENT : if (datasize != sizeof (SF_INSTRUMENT) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->instrument == NULL) return SF_FALSE ; memcpy (data, psf->instrument, sizeof (SF_INSTRUMENT)) ; return SF_TRUE ; case SFC_SET_INSTRUMENT : /* If data has already been written this must fail. */ if (psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (datasize != sizeof (SF_INSTRUMENT) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->instrument == NULL && (psf->instrument = psf_instrument_alloc ()) == NULL) { psf->error = SFE_MALLOC_FAILED ; return SF_FALSE ; } ; memcpy (psf->instrument, data, sizeof (SF_INSTRUMENT)) ; return SF_TRUE ; case SFC_RAW_DATA_NEEDS_ENDSWAP : return psf->data_endswap ; case SFC_GET_CHANNEL_MAP_INFO : if (psf->channel_map == NULL) return SF_FALSE ; if (data == NULL || datasize != SIGNED_SIZEOF (psf->channel_map [0]) * psf->sf.channels) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; memcpy (data, psf->channel_map, datasize) ; return SF_TRUE ; case SFC_SET_CHANNEL_MAP_INFO : if (psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (data == NULL || datasize != SIGNED_SIZEOF (psf->channel_map [0]) * psf->sf.channels) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; { int *iptr ; for (iptr = data ; iptr < (int*) data + psf->sf.channels ; iptr++) { if (*iptr <= SF_CHANNEL_MAP_INVALID || *iptr >= SF_CHANNEL_MAP_MAX) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; } ; } ; free (psf->channel_map) ; if ((psf->channel_map = malloc (datasize)) == NULL) { psf->error = SFE_MALLOC_FAILED ; return SF_FALSE ; } ; memcpy (psf->channel_map, data, datasize) ; /* ** Pass the command down to the container's command handler. ** Don't pass user data, use validated psf->channel_map data instead. */ if (psf->command) return psf->command (psf, command, NULL, 0) ; return SF_FALSE ; case SFC_SET_VBR_ENCODING_QUALITY : if (data == NULL || datasize != sizeof (double)) return SF_FALSE ; quality = *((double *) data) ; quality = 1.0 - SF_MAX (0.0, SF_MIN (1.0, quality)) ; return sf_command (sndfile, SFC_SET_COMPRESSION_LEVEL, &quality, sizeof (quality)) ; default : /* Must be a file specific command. Pass it on. */ if (psf->command) return psf->command (psf, command, data, datasize) ; psf_log_printf (psf, "*** sf_command : cmd = 0x%X\n", command) ; return (psf->error = SFE_BAD_COMMAND_PARAM) ; } ; return 0 ; } /* sf_command */ /*------------------------------------------------------------------------------ */ sf_count_t sf_seek (SNDFILE *sndfile, sf_count_t offset, int whence) { SF_PRIVATE *psf ; sf_count_t seek_from_start = 0, retval ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (! psf->sf.seekable) { psf->error = SFE_NOT_SEEKABLE ; return PSF_SEEK_ERROR ; } ; /* If the whence parameter has a mode ORed in, check to see that ** it makes sense. */ if (((whence & SFM_MASK) == SFM_WRITE && psf->file.mode == SFM_READ) || ((whence & SFM_MASK) == SFM_READ && psf->file.mode == SFM_WRITE)) { psf->error = SFE_WRONG_SEEK ; return PSF_SEEK_ERROR ; } ; /* Convert all SEEK_CUR and SEEK_END into seek_from_start to be ** used with SEEK_SET. */ switch (whence) { /* The SEEK_SET behaviour is independant of mode. */ case SEEK_SET : case SEEK_SET | SFM_READ : case SEEK_SET | SFM_WRITE : case SEEK_SET | SFM_RDWR : seek_from_start = offset ; break ; /* The SEEK_CUR is a little more tricky. */ case SEEK_CUR : if (offset == 0) { if (psf->file.mode == SFM_READ) return psf->read_current ; if (psf->file.mode == SFM_WRITE) return psf->write_current ; } ; if (psf->file.mode == SFM_READ) seek_from_start = psf->read_current + offset ; else if (psf->file.mode == SFM_WRITE || psf->file.mode == SFM_RDWR) seek_from_start = psf->write_current + offset ; else psf->error = SFE_AMBIGUOUS_SEEK ; break ; case SEEK_CUR | SFM_READ : if (offset == 0) return psf->read_current ; seek_from_start = psf->read_current + offset ; break ; case SEEK_CUR | SFM_WRITE : if (offset == 0) return psf->write_current ; seek_from_start = psf->write_current + offset ; break ; /* The SEEK_END */ case SEEK_END : case SEEK_END | SFM_READ : case SEEK_END | SFM_WRITE : seek_from_start = psf->sf.frames + offset ; break ; default : psf->error = SFE_BAD_SEEK ; break ; } ; if (psf->error) return PSF_SEEK_ERROR ; if (psf->file.mode == SFM_RDWR || psf->file.mode == SFM_WRITE) { if (seek_from_start < 0) { psf->error = SFE_BAD_SEEK ; return PSF_SEEK_ERROR ; } ; } else if (seek_from_start < 0 || seek_from_start > psf->sf.frames) { psf->error = SFE_BAD_SEEK ; return PSF_SEEK_ERROR ; } ; if (psf->seek) { int new_mode = (whence & SFM_MASK) ? (whence & SFM_MASK) : psf->file.mode ; retval = psf->seek (psf, new_mode, seek_from_start) ; switch (new_mode) { case SFM_READ : psf->read_current = retval ; break ; case SFM_WRITE : psf->write_current = retval ; break ; case SFM_RDWR : psf->read_current = retval ; psf->write_current = retval ; new_mode = SFM_READ ; break ; } ; psf->last_op = new_mode ; return retval ; } ; psf->error = SFE_AMBIGUOUS_SEEK ; return PSF_SEEK_ERROR ; } /* sf_seek */ /*------------------------------------------------------------------------------ */ const char* sf_get_string (SNDFILE *sndfile, int str_type) { SF_PRIVATE *psf ; if ((psf = (SF_PRIVATE*) sndfile) == NULL) return NULL ; if (psf->Magick != SNDFILE_MAGICK) return NULL ; return psf_get_string (psf, str_type) ; } /* sf_get_string */ int sf_set_string (SNDFILE *sndfile, int str_type, const char* str) { SF_PRIVATE *psf ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; return psf_set_string (psf, str_type, str) ; } /* sf_get_string */ /*------------------------------------------------------------------------------ */ int sf_current_byterate (SNDFILE *sndfile) { SF_PRIVATE *psf ; if ((psf = (SF_PRIVATE*) sndfile) == NULL) return -1 ; if (psf->Magick != SNDFILE_MAGICK) return -1 ; /* This should cover all PCM and floating point formats. */ if (psf->bytewidth) return psf->sf.samplerate * psf->sf.channels * psf->bytewidth ; if (psf->byterate) return psf->byterate (psf) ; switch (SF_CODEC (psf->sf.format)) { case SF_FORMAT_IMA_ADPCM : case SF_FORMAT_MS_ADPCM : case SF_FORMAT_VOX_ADPCM : return (psf->sf.samplerate * psf->sf.channels) / 2 ; case SF_FORMAT_GSM610 : return (psf->sf.samplerate * psf->sf.channels * 13000) / 8000 ; case SF_FORMAT_G721_32 : /* 32kbs G721 ADPCM encoding. */ return (psf->sf.samplerate * psf->sf.channels) / 2 ; case SF_FORMAT_G723_24 : /* 24kbs G723 ADPCM encoding. */ return (psf->sf.samplerate * psf->sf.channels * 3) / 8 ; case SF_FORMAT_G723_40 : /* 40kbs G723 ADPCM encoding. */ return (psf->sf.samplerate * psf->sf.channels * 5) / 8 ; default : break ; } ; return -1 ; } /* sf_current_byterate */ /*============================================================================== */ sf_count_t sf_read_raw (SNDFILE *sndfile, void *ptr, sf_count_t bytes) { SF_PRIVATE *psf ; sf_count_t count, extra ; int bytewidth, blockwidth ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; bytewidth = (psf->bytewidth > 0) ? psf->bytewidth : 1 ; blockwidth = (psf->blockwidth > 0) ? psf->blockwidth : 1 ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (bytes < 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, bytes) ; return 0 ; } ; if (bytes % (psf->sf.channels * bytewidth)) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf_fread (ptr, 1, bytes, psf) ; if (psf->read_current + count / blockwidth <= psf->sf.frames) psf->read_current += count / blockwidth ; else { count = (psf->sf.frames - psf->read_current) * blockwidth ; extra = bytes - count ; psf_memset (((char *) ptr) + count, 0, extra) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_raw */ /*------------------------------------------------------------------------------ */ sf_count_t sf_read_short (SNDFILE *sndfile, short *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (len <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, len * sizeof (short)) ; return 0 ; /* End of file. */ } ; if (psf->read_short == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_short (psf, ptr, len) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = len - count ; psf_memset (ptr + count, 0, extra * sizeof (short)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_short */ sf_count_t sf_readf_short (SNDFILE *sndfile, short *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (frames <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, frames * psf->sf.channels * sizeof (short)) ; return 0 ; /* End of file. */ } ; if (psf->read_short == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_short (psf, ptr, frames * psf->sf.channels) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = frames * psf->sf.channels - count ; psf_memset (ptr + count, 0, extra * sizeof (short)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count / psf->sf.channels ; } /* sf_readf_short */ /*------------------------------------------------------------------------------ */ sf_count_t sf_read_int (SNDFILE *sndfile, int *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (len <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, len * sizeof (int)) ; return 0 ; } ; if (psf->read_int == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_int (psf, ptr, len) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = len - count ; psf_memset (ptr + count, 0, extra * sizeof (int)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_int */ sf_count_t sf_readf_int (SNDFILE *sndfile, int *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (frames <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, frames * psf->sf.channels * sizeof (int)) ; return 0 ; } ; if (psf->read_int == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_int (psf, ptr, frames * psf->sf.channels) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = frames * psf->sf.channels - count ; psf_memset (ptr + count, 0, extra * sizeof (int)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count / psf->sf.channels ; } /* sf_readf_int */ /*------------------------------------------------------------------------------ */ sf_count_t sf_read_float (SNDFILE *sndfile, float *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (len <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, len * sizeof (float)) ; return 0 ; } ; if (psf->read_float == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_float (psf, ptr, len) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = len - count ; psf_memset (ptr + count, 0, extra * sizeof (float)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_float */ sf_count_t sf_readf_float (SNDFILE *sndfile, float *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (frames <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, frames * psf->sf.channels * sizeof (float)) ; return 0 ; } ; if (psf->read_float == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_float (psf, ptr, frames * psf->sf.channels) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = frames * psf->sf.channels - count ; psf_memset (ptr + count, 0, extra * sizeof (float)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count / psf->sf.channels ; } /* sf_readf_float */ /*------------------------------------------------------------------------------ */ sf_count_t sf_read_double (SNDFILE *sndfile, double *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (len <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, len * sizeof (double)) ; return 0 ; } ; if (psf->read_double == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_double (psf, ptr, len) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = len - count ; psf_memset (ptr + count, 0, extra * sizeof (double)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_double */ sf_count_t sf_readf_double (SNDFILE *sndfile, double *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (frames <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, frames * psf->sf.channels * sizeof (double)) ; return 0 ; } ; if (psf->read_double == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_double (psf, ptr, frames * psf->sf.channels) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = frames * psf->sf.channels - count ; psf_memset (ptr + count, 0, extra * sizeof (double)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count / psf->sf.channels ; } /* sf_readf_double */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_raw (SNDFILE *sndfile, const void *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; int bytewidth, blockwidth ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; bytewidth = (psf->bytewidth > 0) ? psf->bytewidth : 1 ; blockwidth = (psf->blockwidth > 0) ? psf->blockwidth : 1 ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % (psf->sf.channels * bytewidth)) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf_fwrite (ptr, 1, len, psf) ; psf->write_current += count / blockwidth ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_raw */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_short (SNDFILE *sndfile, const short *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->write_short == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_short (psf, ptr, len) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_short */ sf_count_t sf_writef_short (SNDFILE *sndfile, const short *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (psf->write_short == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_short (psf, ptr, frames * psf->sf.channels) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count / psf->sf.channels ; } /* sf_writef_short */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_int (SNDFILE *sndfile, const int *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->write_int == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_int (psf, ptr, len) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_int */ sf_count_t sf_writef_int (SNDFILE *sndfile, const int *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (psf->write_int == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_int (psf, ptr, frames * psf->sf.channels) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count / psf->sf.channels ; } /* sf_writef_int */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_float (SNDFILE *sndfile, const float *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->write_float == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_float (psf, ptr, len) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_float */ sf_count_t sf_writef_float (SNDFILE *sndfile, const float *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (psf->write_float == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_float (psf, ptr, frames * psf->sf.channels) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count / psf->sf.channels ; } /* sf_writef_float */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_double (SNDFILE *sndfile, const double *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->write_double == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_double (psf, ptr, len) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_double */ sf_count_t sf_writef_double (SNDFILE *sndfile, const double *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (psf->write_double == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_double (psf, ptr, frames * psf->sf.channels) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count / psf->sf.channels ; } /* sf_writef_double */ /*========================================================================= ** Private functions. */ static int try_resource_fork (SF_PRIVATE * psf) { int old_error = psf->error ; /* Set READ mode now, to see if resource fork exists. */ psf->rsrc.mode = SFM_READ ; if (psf_open_rsrc (psf) != 0) { psf->error = old_error ; return 0 ; } ; /* More checking here. */ psf_log_printf (psf, "Resource fork : %s\n", psf->rsrc.path.c) ; return SF_FORMAT_SD2 ; } /* try_resource_fork */ static int format_from_extension (SF_PRIVATE *psf) { char *cptr ; char buffer [16] ; int format = 0 ; if ((cptr = strrchr (psf->file.name.c, '.')) == NULL) return 0 ; cptr ++ ; if (strlen (cptr) > sizeof (buffer) - 1) return 0 ; psf_strlcpy (buffer, sizeof (buffer), cptr) ; buffer [sizeof (buffer) - 1] = 0 ; /* Convert everything in the buffer to lower case. */ cptr = buffer ; while (*cptr) { *cptr = tolower (*cptr) ; cptr ++ ; } ; cptr = buffer ; if (strcmp (cptr, "au") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 8000 ; format = SF_FORMAT_RAW | SF_FORMAT_ULAW ; } else if (strcmp (cptr, "snd") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 8000 ; format = SF_FORMAT_RAW | SF_FORMAT_ULAW ; } else if (strcmp (cptr, "vox") == 0 || strcmp (cptr, "vox8") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 8000 ; format = SF_FORMAT_RAW | SF_FORMAT_VOX_ADPCM ; } else if (strcmp (cptr, "vox6") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 6000 ; format = SF_FORMAT_RAW | SF_FORMAT_VOX_ADPCM ; } else if (strcmp (cptr, "gsm") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 8000 ; format = SF_FORMAT_RAW | SF_FORMAT_GSM610 ; } /* For RAW files, make sure the dataoffset if set correctly. */ if ((SF_CONTAINER (format)) == SF_FORMAT_RAW) psf->dataoffset = 0 ; return format ; } /* format_from_extension */ static int guess_file_type (SF_PRIVATE *psf) { uint32_t buffer [3], format ; if (psf_binheader_readf (psf, "b", &buffer, SIGNED_SIZEOF (buffer)) != SIGNED_SIZEOF (buffer)) { psf->error = SFE_BAD_FILE_READ ; return 0 ; } ; if ((buffer [0] == MAKE_MARKER ('R', 'I', 'F', 'F') || buffer [0] == MAKE_MARKER ('R', 'I', 'F', 'X')) && buffer [2] == MAKE_MARKER ('W', 'A', 'V', 'E')) return SF_FORMAT_WAV ; if (buffer [0] == MAKE_MARKER ('F', 'O', 'R', 'M')) { if (buffer [2] == MAKE_MARKER ('A', 'I', 'F', 'F') || buffer [2] == MAKE_MARKER ('A', 'I', 'F', 'C')) return SF_FORMAT_AIFF ; if (buffer [2] == MAKE_MARKER ('8', 'S', 'V', 'X') || buffer [2] == MAKE_MARKER ('1', '6', 'S', 'V')) return SF_FORMAT_SVX ; return 0 ; } ; if (buffer [0] == MAKE_MARKER ('.', 's', 'n', 'd') || buffer [0] == MAKE_MARKER ('d', 'n', 's', '.')) return SF_FORMAT_AU ; if ((buffer [0] == MAKE_MARKER ('f', 'a', 'p', ' ') || buffer [0] == MAKE_MARKER (' ', 'p', 'a', 'f'))) return SF_FORMAT_PAF ; if (buffer [0] == MAKE_MARKER ('N', 'I', 'S', 'T')) return SF_FORMAT_NIST ; if (buffer [0] == MAKE_MARKER ('C', 'r', 'e', 'a') && buffer [1] == MAKE_MARKER ('t', 'i', 'v', 'e')) return SF_FORMAT_VOC ; if ((buffer [0] & MAKE_MARKER (0xFF, 0xFF, 0xF8, 0xFF)) == MAKE_MARKER (0x64, 0xA3, 0x00, 0x00) || (buffer [0] & MAKE_MARKER (0xFF, 0xF8, 0xFF, 0xFF)) == MAKE_MARKER (0x00, 0x00, 0xA3, 0x64)) return SF_FORMAT_IRCAM ; if (buffer [0] == MAKE_MARKER ('r', 'i', 'f', 'f')) return SF_FORMAT_W64 ; if (buffer [0] == MAKE_MARKER (0, 0, 0x03, 0xE8) && buffer [1] == MAKE_MARKER (0, 0, 0, 1) && buffer [2] == MAKE_MARKER (0, 0, 0, 1)) return SF_FORMAT_MAT4 ; if (buffer [0] == MAKE_MARKER (0, 0, 0, 0) && buffer [1] == MAKE_MARKER (1, 0, 0, 0) && buffer [2] == MAKE_MARKER (1, 0, 0, 0)) return SF_FORMAT_MAT4 ; if (buffer [0] == MAKE_MARKER ('M', 'A', 'T', 'L') && buffer [1] == MAKE_MARKER ('A', 'B', ' ', '5')) return SF_FORMAT_MAT5 ; if (buffer [0] == MAKE_MARKER ('P', 'V', 'F', '1')) return SF_FORMAT_PVF ; if (buffer [0] == MAKE_MARKER ('E', 'x', 't', 'e') && buffer [1] == MAKE_MARKER ('n', 'd', 'e', 'd') && buffer [2] == MAKE_MARKER (' ', 'I', 'n', 's')) return SF_FORMAT_XI ; if (buffer [0] == MAKE_MARKER ('c', 'a', 'f', 'f') && buffer [2] == MAKE_MARKER ('d', 'e', 's', 'c')) return SF_FORMAT_CAF ; if (buffer [0] == MAKE_MARKER ('O', 'g', 'g', 'S')) return SF_FORMAT_OGG ; if (buffer [0] == MAKE_MARKER ('A', 'L', 'a', 'w') && buffer [1] == MAKE_MARKER ('S', 'o', 'u', 'n') && buffer [2] == MAKE_MARKER ('d', 'F', 'i', 'l')) return SF_FORMAT_WVE ; if (buffer [0] == MAKE_MARKER ('D', 'i', 'a', 'm') && buffer [1] == MAKE_MARKER ('o', 'n', 'd', 'W') && buffer [2] == MAKE_MARKER ('a', 'r', 'e', ' ')) return SF_FORMAT_DWD ; if (buffer [0] == MAKE_MARKER ('L', 'M', '8', '9') || buffer [0] == MAKE_MARKER ('5', '3', 0, 0)) return SF_FORMAT_TXW ; if ((buffer [0] & MAKE_MARKER (0xFF, 0xFF, 0x80, 0xFF)) == MAKE_MARKER (0xF0, 0x7E, 0, 0x01)) return SF_FORMAT_SDS ; if ((buffer [0] & MAKE_MARKER (0xFF, 0xFF, 0, 0)) == MAKE_MARKER (1, 4, 0, 0)) return SF_FORMAT_MPC2K ; if (buffer [0] == MAKE_MARKER ('C', 'A', 'T', ' ') && buffer [2] == MAKE_MARKER ('R', 'E', 'X', '2')) return SF_FORMAT_REX2 ; if (buffer [0] == MAKE_MARKER (0x30, 0x26, 0xB2, 0x75) && buffer [1] == MAKE_MARKER (0x8E, 0x66, 0xCF, 0x11)) return 0 /*-SF_FORMAT_WMA-*/ ; /* HMM (Hidden Markov Model) Tool Kit. */ if (buffer [2] == MAKE_MARKER (0, 2, 0, 0) && 2 * ((int64_t) BE2H_32 (buffer [0])) + 12 == psf->filelength) return SF_FORMAT_HTK ; if (buffer [0] == MAKE_MARKER ('f', 'L', 'a', 'C')) return SF_FORMAT_FLAC ; if (buffer [0] == MAKE_MARKER ('2', 'B', 'I', 'T')) return SF_FORMAT_AVR ; if (buffer [0] == MAKE_MARKER ('R', 'F', '6', '4') && buffer [2] == MAKE_MARKER ('W', 'A', 'V', 'E')) return SF_FORMAT_RF64 ; if (buffer [0] == MAKE_MARKER ('I', 'D', '3', 3)) { psf_log_printf (psf, "Found 'ID3' marker.\n") ; if (id3_skip (psf)) return guess_file_type (psf) ; return 0 ; } ; /* Turtle Beach SMP 16-bit */ if (buffer [0] == MAKE_MARKER ('S', 'O', 'U', 'N') && buffer [1] == MAKE_MARKER ('D', ' ', 'S', 'A')) return 0 ; /* Yamaha sampler format. */ if (buffer [0] == MAKE_MARKER ('S', 'Y', '8', '0') || buffer [0] == MAKE_MARKER ('S', 'Y', '8', '5')) return 0 ; if (buffer [0] == MAKE_MARKER ('a', 'j', 'k', 'g')) return 0 /*-SF_FORMAT_SHN-*/ ; /* This must be the last one. */ if (psf->filelength > 0 && (format = try_resource_fork (psf)) != 0) return format ; return 0 ; } /* guess_file_type */ static int validate_sfinfo (SF_INFO *sfinfo) { if (sfinfo->samplerate < 1) return 0 ; if (sfinfo->frames < 0) return 0 ; if (sfinfo->channels < 1) return 0 ; if ((SF_CONTAINER (sfinfo->format)) == 0) return 0 ; if ((SF_CODEC (sfinfo->format)) == 0) return 0 ; if (sfinfo->sections < 1) return 0 ; return 1 ; } /* validate_sfinfo */ static int validate_psf (SF_PRIVATE *psf) { if (psf->datalength < 0) { psf_log_printf (psf, "Invalid SF_PRIVATE field : datalength == %D.\n", psf->datalength) ; return 0 ; } ; if (psf->dataoffset < 0) { psf_log_printf (psf, "Invalid SF_PRIVATE field : dataoffset == %D.\n", psf->dataoffset) ; return 0 ; } ; if (psf->blockwidth && psf->blockwidth != psf->sf.channels * psf->bytewidth) { psf_log_printf (psf, "Invalid SF_PRIVATE field : channels * bytewidth == %d.\n", psf->sf.channels * psf->bytewidth) ; return 0 ; } ; return 1 ; } /* validate_psf */ static void save_header_info (SF_PRIVATE *psf) { snprintf (sf_parselog, sizeof (sf_parselog), "%s", psf->parselog.buf) ; } /* save_header_info */ static int copy_filename (SF_PRIVATE *psf, const char *path) { const char *ccptr ; char *cptr ; if (strlen (path) > 1 && strlen (path) - 1 >= sizeof (psf->file.path.c)) { psf->error = SFE_FILENAME_TOO_LONG ; return psf->error ; } ; snprintf (psf->file.path.c, sizeof (psf->file.path.c), "%s", path) ; if ((ccptr = strrchr (path, '/')) || (ccptr = strrchr (path, '\\'))) ccptr ++ ; else ccptr = path ; snprintf (psf->file.name.c, sizeof (psf->file.name.c), "%s", ccptr) ; /* Now grab the directory. */ snprintf (psf->file.dir.c, sizeof (psf->file.dir.c), "%s", path) ; if ((cptr = strrchr (psf->file.dir.c, '/')) || (cptr = strrchr (psf->file.dir.c, '\\'))) cptr [1] = 0 ; else psf->file.dir.c [0] = 0 ; return 0 ; } /* copy_filename */ /*============================================================================== */ static int psf_close (SF_PRIVATE *psf) { uint32_t k ; int error = 0 ; if (psf->codec_close) { error = psf->codec_close (psf) ; /* To prevent it being called in psf->container_close(). */ psf->codec_close = NULL ; } ; if (psf->container_close) error = psf->container_close (psf) ; error = psf_fclose (psf) ; psf_close_rsrc (psf) ; /* For an ISO C compliant implementation it is ok to free a NULL pointer. */ free (psf->container_data) ; free (psf->codec_data) ; free (psf->interleave) ; free (psf->dither) ; free (psf->peak_info) ; free (psf->broadcast_16k) ; free (psf->loop_info) ; free (psf->instrument) ; free (psf->cues) ; free (psf->channel_map) ; free (psf->format_desc) ; free (psf->strings.storage) ; if (psf->wchunks.chunks) for (k = 0 ; k < psf->wchunks.used ; k++) free (psf->wchunks.chunks [k].data) ; free (psf->rchunks.chunks) ; free (psf->wchunks.chunks) ; free (psf->iterator) ; free (psf->cart_16k) ; memset (psf, 0, sizeof (SF_PRIVATE)) ; free (psf) ; return error ; } /* psf_close */ SNDFILE * psf_open_file (SF_PRIVATE *psf, SF_INFO *sfinfo) { int error, format ; sf_errno = error = 0 ; sf_parselog [0] = 0 ; if (psf->error) { error = psf->error ; goto error_exit ; } ; if (psf->file.mode != SFM_READ && psf->file.mode != SFM_WRITE && psf->file.mode != SFM_RDWR) { error = SFE_BAD_OPEN_MODE ; goto error_exit ; } ; if (sfinfo == NULL) { error = SFE_BAD_SF_INFO_PTR ; goto error_exit ; } ; if (psf->file.mode == SFM_READ) { if ((SF_CONTAINER (sfinfo->format)) == SF_FORMAT_RAW) { if (sf_format_check (sfinfo) == 0) { error = SFE_RAW_BAD_FORMAT ; goto error_exit ; } ; } else memset (sfinfo, 0, sizeof (SF_INFO)) ; } ; memcpy (&psf->sf, sfinfo, sizeof (SF_INFO)) ; psf->Magick = SNDFILE_MAGICK ; psf->norm_float = SF_TRUE ; psf->norm_double = SF_TRUE ; psf->dataoffset = -1 ; psf->datalength = -1 ; psf->read_current = -1 ; psf->write_current = -1 ; psf->auto_header = SF_FALSE ; psf->rwf_endian = SF_ENDIAN_LITTLE ; psf->seek = psf_default_seek ; psf->float_int_mult = 0 ; psf->float_max = -1.0 ; /* An attempt at a per SF_PRIVATE unique id. */ psf->unique_id = psf_rand_int32 () ; psf->sf.sections = 1 ; psf->is_pipe = psf_is_pipe (psf) ; if (psf->is_pipe) { psf->sf.seekable = SF_FALSE ; psf->filelength = SF_COUNT_MAX ; } else { psf->sf.seekable = SF_TRUE ; /* File is open, so get the length. */ psf->filelength = psf_get_filelen (psf) ; } ; if (psf->fileoffset > 0) { switch (psf->file.mode) { case SFM_READ : if (psf->filelength < 44) { psf_log_printf (psf, "Short filelength: %D (fileoffset: %D)\n", psf->filelength, psf->fileoffset) ; error = SFE_BAD_OFFSET ; goto error_exit ; } ; break ; case SFM_WRITE : psf->fileoffset = 0 ; psf_fseek (psf, 0, SEEK_END) ; psf->fileoffset = psf_ftell (psf) ; break ; case SFM_RDWR : error = SFE_NO_EMBEDDED_RDWR ; goto error_exit ; } ; psf_log_printf (psf, "Embedded file offset : %D\n", psf->fileoffset) ; } ; if (psf->filelength == SF_COUNT_MAX) psf_log_printf (psf, "Length : unknown\n") ; else psf_log_printf (psf, "Length : %D\n", psf->filelength) ; if (psf->file.mode == SFM_WRITE || (psf->file.mode == SFM_RDWR && psf->filelength == 0)) { /* If the file is being opened for write or RDWR and the file is currently ** empty, then the SF_INFO struct must contain valid data. */ if ((SF_CONTAINER (psf->sf.format)) == 0) { error = SFE_ZERO_MAJOR_FORMAT ; goto error_exit ; } ; if ((SF_CODEC (psf->sf.format)) == 0) { error = SFE_ZERO_MINOR_FORMAT ; goto error_exit ; } ; if (sf_format_check (&psf->sf) == 0) { error = SFE_BAD_OPEN_FORMAT ; goto error_exit ; } ; } else if ((SF_CONTAINER (psf->sf.format)) != SF_FORMAT_RAW) { /* If type RAW has not been specified then need to figure out file type. */ psf->sf.format = guess_file_type (psf) ; if (psf->sf.format == 0) psf->sf.format = format_from_extension (psf) ; } ; /* Prevent unnecessary seeks */ psf->last_op = psf->file.mode ; /* Set bytewidth if known. */ switch (SF_CODEC (psf->sf.format)) { case SF_FORMAT_PCM_S8 : case SF_FORMAT_PCM_U8 : case SF_FORMAT_ULAW : case SF_FORMAT_ALAW : case SF_FORMAT_DPCM_8 : psf->bytewidth = 1 ; break ; case SF_FORMAT_PCM_16 : case SF_FORMAT_DPCM_16 : psf->bytewidth = 2 ; break ; case SF_FORMAT_PCM_24 : psf->bytewidth = 3 ; break ; case SF_FORMAT_PCM_32 : case SF_FORMAT_FLOAT : psf->bytewidth = 4 ; break ; case SF_FORMAT_DOUBLE : psf->bytewidth = 8 ; break ; } ; /* Call the initialisation function for the relevant file type. */ switch (SF_CONTAINER (psf->sf.format)) { case SF_FORMAT_WAV : case SF_FORMAT_WAVEX : error = wav_open (psf) ; break ; case SF_FORMAT_AIFF : error = aiff_open (psf) ; break ; case SF_FORMAT_AU : error = au_open (psf) ; break ; case SF_FORMAT_RAW : error = raw_open (psf) ; break ; case SF_FORMAT_W64 : error = w64_open (psf) ; break ; case SF_FORMAT_RF64 : error = rf64_open (psf) ; break ; /* Lite remove start */ case SF_FORMAT_PAF : error = paf_open (psf) ; break ; case SF_FORMAT_SVX : error = svx_open (psf) ; break ; case SF_FORMAT_NIST : error = nist_open (psf) ; break ; case SF_FORMAT_IRCAM : error = ircam_open (psf) ; break ; case SF_FORMAT_VOC : error = voc_open (psf) ; break ; case SF_FORMAT_SDS : error = sds_open (psf) ; break ; case SF_FORMAT_OGG : error = ogg_open (psf) ; break ; case SF_FORMAT_TXW : error = txw_open (psf) ; break ; case SF_FORMAT_WVE : error = wve_open (psf) ; break ; case SF_FORMAT_DWD : error = dwd_open (psf) ; break ; case SF_FORMAT_MAT4 : error = mat4_open (psf) ; break ; case SF_FORMAT_MAT5 : error = mat5_open (psf) ; break ; case SF_FORMAT_PVF : error = pvf_open (psf) ; break ; case SF_FORMAT_XI : error = xi_open (psf) ; break ; case SF_FORMAT_HTK : error = htk_open (psf) ; break ; case SF_FORMAT_SD2 : error = sd2_open (psf) ; break ; case SF_FORMAT_REX2 : error = rx2_open (psf) ; break ; case SF_FORMAT_AVR : error = avr_open (psf) ; break ; case SF_FORMAT_FLAC : error = flac_open (psf) ; break ; case SF_FORMAT_CAF : error = caf_open (psf) ; break ; case SF_FORMAT_MPC2K : error = mpc2k_open (psf) ; break ; /* Lite remove end */ default : error = SFE_UNKNOWN_FORMAT ; } ; if (error) goto error_exit ; /* For now, check whether embedding is supported. */ format = SF_CONTAINER (psf->sf.format) ; if (psf->fileoffset > 0) { switch (format) { case SF_FORMAT_WAV : case SF_FORMAT_WAVEX : case SF_FORMAT_AIFF : case SF_FORMAT_AU : /* Actual embedded files. */ break ; case SF_FORMAT_FLAC : /* Flac with an ID3v2 header? */ break ; default : error = SFE_NO_EMBED_SUPPORT ; goto error_exit ; } ; } ; if (psf->fileoffset > 0) psf_log_printf (psf, "Embedded file length : %D\n", psf->filelength) ; if (psf->file.mode == SFM_RDWR && sf_format_check (&psf->sf) == 0) { error = SFE_BAD_MODE_RW ; goto error_exit ; } ; if (validate_sfinfo (&psf->sf) == 0) { psf_log_SF_INFO (psf) ; save_header_info (psf) ; error = SFE_BAD_SF_INFO ; goto error_exit ; } ; if (validate_psf (psf) == 0) { save_header_info (psf) ; error = SFE_INTERNAL ; goto error_exit ; } ; psf->read_current = 0 ; psf->write_current = 0 ; if (psf->file.mode == SFM_RDWR) { psf->write_current = psf->sf.frames ; psf->have_written = psf->sf.frames > 0 ? SF_TRUE : SF_FALSE ; } ; memcpy (sfinfo, &psf->sf, sizeof (SF_INFO)) ; if (psf->file.mode == SFM_WRITE) { /* Zero out these fields. */ sfinfo->frames = 0 ; sfinfo->sections = 0 ; sfinfo->seekable = 0 ; } ; return (SNDFILE *) psf ; error_exit : sf_errno = error ; if (error == SFE_SYSTEM) snprintf (sf_syserr, sizeof (sf_syserr), "%s", psf->syserr) ; snprintf (sf_parselog, sizeof (sf_parselog), "%s", psf->parselog.buf) ; switch (error) { case SF_ERR_SYSTEM : case SF_ERR_UNSUPPORTED_ENCODING : case SFE_UNIMPLEMENTED : break ; case SFE_RAW_BAD_FORMAT : break ; default : if (psf->file.mode == SFM_READ) { psf_log_printf (psf, "Parse error : %s\n", sf_error_number (error)) ; error = SF_ERR_MALFORMED_FILE ; } ; } ; psf_close (psf) ; return NULL ; } /* psf_open_file */ /*============================================================================== ** Chunk getting and setting. ** This works for AIFF, CAF, RF64 and WAV. ** It doesn't work for W64 because W64 uses weird GUID style chunk markers. */ int sf_set_chunk (SNDFILE * sndfile, const SF_CHUNK_INFO * chunk_info) { SF_PRIVATE *psf ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (chunk_info == NULL || chunk_info->data == NULL) return SFE_BAD_CHUNK_PTR ; if (psf->set_chunk) return psf->set_chunk (psf, chunk_info) ; return SFE_BAD_CHUNK_FORMAT ; } /* sf_set_chunk */ SF_CHUNK_ITERATOR * sf_get_chunk_iterator (SNDFILE * sndfile, const SF_CHUNK_INFO * chunk_info) { SF_PRIVATE *psf ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (chunk_info) return psf_get_chunk_iterator (psf, chunk_info->id) ; return psf_get_chunk_iterator (psf, NULL) ; } /* sf_get_chunk_iterator */ SF_CHUNK_ITERATOR * sf_next_chunk_iterator (SF_CHUNK_ITERATOR * iterator) { SF_PRIVATE *psf ; SNDFILE *sndfile = iterator ? iterator->sndfile : NULL ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->next_chunk_iterator) return psf->next_chunk_iterator (psf, iterator) ; return NULL ; } /* sf_get_chunk_iterator_next */ int sf_get_chunk_size (const SF_CHUNK_ITERATOR * iterator, SF_CHUNK_INFO * chunk_info) { SF_PRIVATE *psf ; SNDFILE *sndfile = iterator ? iterator->sndfile : NULL ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (chunk_info == NULL) return SFE_BAD_CHUNK_PTR ; if (psf->get_chunk_size) return psf->get_chunk_size (psf, iterator, chunk_info) ; return SFE_BAD_CHUNK_FORMAT ; return 0 ; } /* sf_get_chunk_size */ int sf_get_chunk_data (const SF_CHUNK_ITERATOR * iterator, SF_CHUNK_INFO * chunk_info) { SF_PRIVATE *psf ; SNDFILE *sndfile = iterator ? iterator->sndfile : NULL ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (chunk_info == NULL || chunk_info->data == NULL) return SFE_BAD_CHUNK_PTR ; if (psf->get_chunk_data) return psf->get_chunk_data (psf, iterator, chunk_info) ; return SFE_BAD_CHUNK_FORMAT ; } /* sf_get_chunk_data */
/* ** Copyright (C) 1999-2016 Erik de Castro Lopo <erikd@mega-nerd.com> ** ** This program is free software; you can redistribute it and/or modify ** it under the terms of the GNU Lesser General Public License as published by ** the Free Software Foundation; either version 2.1 of the License, or ** (at your option) any later version. ** ** This program is distributed in the hope that it will be useful, ** but WITHOUT ANY WARRANTY; without even the implied warranty of ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** GNU Lesser General Public License for more details. ** ** You should have received a copy of the GNU Lesser General Public License ** along with this program; if not, write to the Free Software ** Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include "sfconfig.h" #include <stdlib.h> #include <string.h> #include <ctype.h> #include <assert.h> #include "sndfile.h" #include "sfendian.h" #include "common.h" #define SNDFILE_MAGICK 0x1234C0DE #ifdef __APPLE__ /* ** Detect if a compile for a universal binary is being attempted and barf if it is. ** See the URL below for the rationale. */ #ifdef __BIG_ENDIAN__ #if (CPU_IS_LITTLE_ENDIAN == 1) #error "Universal binary compile detected. See http://www.mega-nerd.com/libsndfile/FAQ.html#Q018" #endif #endif #ifdef __LITTLE_ENDIAN__ #if (CPU_IS_BIG_ENDIAN == 1) #error "Universal binary compile detected. See http://www.mega-nerd.com/libsndfile/FAQ.html#Q018" #endif #endif #endif typedef struct { int error ; const char *str ; } ErrorStruct ; static ErrorStruct SndfileErrors [] = { /* Public error values and their associated strings. */ { SF_ERR_NO_ERROR , "No Error." }, { SF_ERR_UNRECOGNISED_FORMAT , "Format not recognised." }, { SF_ERR_SYSTEM , "System error." /* Often replaced. */ }, { SF_ERR_MALFORMED_FILE , "Supported file format but file is malformed." }, { SF_ERR_UNSUPPORTED_ENCODING , "Supported file format but unsupported encoding." }, /* Private error values and their associated strings. */ { SFE_ZERO_MAJOR_FORMAT , "Error : major format is 0." }, { SFE_ZERO_MINOR_FORMAT , "Error : minor format is 0." }, { SFE_BAD_FILE , "File does not exist or is not a regular file (possibly a pipe?)." }, { SFE_BAD_FILE_READ , "File exists but no data could be read." }, { SFE_OPEN_FAILED , "Could not open file." }, { SFE_BAD_SNDFILE_PTR , "Not a valid SNDFILE* pointer." }, { SFE_BAD_SF_INFO_PTR , "NULL SF_INFO pointer passed to libsndfile." }, { SFE_BAD_SF_INCOMPLETE , "SF_PRIVATE struct incomplete and end of header parsing." }, { SFE_BAD_FILE_PTR , "Bad FILE pointer." }, { SFE_BAD_INT_PTR , "Internal error, Bad pointer." }, { SFE_BAD_STAT_SIZE , "Error : software was misconfigured at compile time (sizeof statbuf.st_size)." }, { SFE_NO_TEMP_DIR , "Error : Could not file temp dir." }, { SFE_MALLOC_FAILED , "Internal malloc () failed." }, { SFE_UNIMPLEMENTED , "File contains data in an unimplemented format." }, { SFE_BAD_READ_ALIGN , "Attempt to read a non-integer number of channels." }, { SFE_BAD_WRITE_ALIGN , "Attempt to write a non-integer number of channels." }, { SFE_UNKNOWN_FORMAT , "File contains data in an unknown format." }, { SFE_NOT_READMODE , "Read attempted on file currently open for write." }, { SFE_NOT_WRITEMODE , "Write attempted on file currently open for read." }, { SFE_BAD_MODE_RW , "Error : This file format does not support read/write mode." }, { SFE_BAD_SF_INFO , "Internal error : SF_INFO struct incomplete." }, { SFE_BAD_OFFSET , "Error : supplied offset beyond end of file." }, { SFE_NO_EMBED_SUPPORT , "Error : embedding not supported for this file format." }, { SFE_NO_EMBEDDED_RDWR , "Error : cannot open embedded file read/write." }, { SFE_NO_PIPE_WRITE , "Error : this file format does not support pipe write." }, { SFE_BAD_VIRTUAL_IO , "Error : bad pointer on SF_VIRTUAL_IO struct." }, { SFE_BAD_BROADCAST_INFO_SIZE , "Error : bad coding_history_size in SF_BROADCAST_INFO struct." }, { SFE_BAD_BROADCAST_INFO_TOO_BIG , "Error : SF_BROADCAST_INFO struct too large." }, { SFE_BAD_CART_INFO_SIZE , "Error: SF_CART_INFO struct too large." }, { SFE_BAD_CART_INFO_TOO_BIG , "Error: bag tag_text_size in SF_CART_INFO struct." }, { SFE_INTERLEAVE_MODE , "Attempt to write to file with non-interleaved data." }, { SFE_INTERLEAVE_SEEK , "Bad karma in seek during interleave read operation." }, { SFE_INTERLEAVE_READ , "Bad karma in read during interleave read operation." }, { SFE_INTERNAL , "Unspecified internal error." }, { SFE_BAD_COMMAND_PARAM , "Bad parameter passed to function sf_command." }, { SFE_BAD_ENDIAN , "Bad endian-ness. Try default endian-ness" }, { SFE_CHANNEL_COUNT_ZERO , "Channel count is zero." }, { SFE_CHANNEL_COUNT , "Too many channels specified." }, { SFE_CHANNEL_COUNT_BAD , "Bad channel count." }, { SFE_BAD_SEEK , "Internal psf_fseek() failed." }, { SFE_NOT_SEEKABLE , "Seek attempted on unseekable file type." }, { SFE_AMBIGUOUS_SEEK , "Error : combination of file open mode and seek command is ambiguous." }, { SFE_WRONG_SEEK , "Error : invalid seek parameters." }, { SFE_SEEK_FAILED , "Error : parameters OK, but psf_seek() failed." }, { SFE_BAD_OPEN_MODE , "Error : bad mode parameter for file open." }, { SFE_OPEN_PIPE_RDWR , "Error : attempt to open a pipe in read/write mode." }, { SFE_RDWR_POSITION , "Error on RDWR position (cryptic)." }, { SFE_RDWR_BAD_HEADER , "Error : Cannot open file in read/write mode due to string data in header." }, { SFE_CMD_HAS_DATA , "Error : Command fails because file already has audio data." }, { SFE_STR_NO_SUPPORT , "Error : File type does not support string data." }, { SFE_STR_NOT_WRITE , "Error : Trying to set a string when file is not in write mode." }, { SFE_STR_MAX_DATA , "Error : Maximum string data storage reached." }, { SFE_STR_MAX_COUNT , "Error : Maximum string data count reached." }, { SFE_STR_BAD_TYPE , "Error : Bad string data type." }, { SFE_STR_NO_ADD_END , "Error : file type does not support strings added at end of file." }, { SFE_STR_BAD_STRING , "Error : bad string." }, { SFE_STR_WEIRD , "Error : Weird string error." }, { SFE_WAV_NO_RIFF , "Error in WAV file. No 'RIFF' chunk marker." }, { SFE_WAV_NO_WAVE , "Error in WAV file. No 'WAVE' chunk marker." }, { SFE_WAV_NO_FMT , "Error in WAV/W64/RF64 file. No 'fmt ' chunk marker." }, { SFE_WAV_BAD_FMT , "Error in WAV/W64/RF64 file. Malformed 'fmt ' chunk." }, { SFE_WAV_FMT_SHORT , "Error in WAV/W64/RF64 file. Short 'fmt ' chunk." }, { SFE_WAV_BAD_FACT , "Error in WAV file. 'fact' chunk out of place." }, { SFE_WAV_BAD_PEAK , "Error in WAV file. Bad 'PEAK' chunk." }, { SFE_WAV_PEAK_B4_FMT , "Error in WAV file. 'PEAK' chunk found before 'fmt ' chunk." }, { SFE_WAV_BAD_FORMAT , "Error in WAV file. Errors in 'fmt ' chunk." }, { SFE_WAV_BAD_BLOCKALIGN , "Error in WAV file. Block alignment in 'fmt ' chunk is incorrect." }, { SFE_WAV_NO_DATA , "Error in WAV file. No 'data' chunk marker." }, { SFE_WAV_BAD_LIST , "Error in WAV file. Malformed LIST chunk." }, { SFE_WAV_UNKNOWN_CHUNK , "Error in WAV file. File contains an unknown chunk marker." }, { SFE_WAV_WVPK_DATA , "Error in WAV file. Data is in WAVPACK format." }, { SFE_WAV_ADPCM_NOT4BIT , "Error in ADPCM WAV file. Invalid bit width." }, { SFE_WAV_ADPCM_CHANNELS , "Error in ADPCM WAV file. Invalid number of channels." }, { SFE_WAV_ADPCM_SAMPLES , "Error in ADPCM WAV file. Invalid number of samples per block." }, { SFE_WAV_GSM610_FORMAT , "Error in GSM610 WAV file. Invalid format chunk." }, { SFE_AIFF_NO_FORM , "Error in AIFF file, bad 'FORM' marker." }, { SFE_AIFF_AIFF_NO_FORM , "Error in AIFF file, 'AIFF' marker without 'FORM'." }, { SFE_AIFF_COMM_NO_FORM , "Error in AIFF file, 'COMM' marker without 'FORM'." }, { SFE_AIFF_SSND_NO_COMM , "Error in AIFF file, 'SSND' marker without 'COMM'." }, { SFE_AIFF_UNKNOWN_CHUNK , "Error in AIFF file, unknown chunk." }, { SFE_AIFF_COMM_CHUNK_SIZE, "Error in AIFF file, bad 'COMM' chunk size." }, { SFE_AIFF_BAD_COMM_CHUNK , "Error in AIFF file, bad 'COMM' chunk." }, { SFE_AIFF_PEAK_B4_COMM , "Error in AIFF file. 'PEAK' chunk found before 'COMM' chunk." }, { SFE_AIFF_BAD_PEAK , "Error in AIFF file. Bad 'PEAK' chunk." }, { SFE_AIFF_NO_SSND , "Error in AIFF file, bad 'SSND' chunk." }, { SFE_AIFF_NO_DATA , "Error in AIFF file, no sound data." }, { SFE_AIFF_RW_SSND_NOT_LAST, "Error in AIFF file, RDWR only possible if SSND chunk at end of file." }, { SFE_AU_UNKNOWN_FORMAT , "Error in AU file, unknown format." }, { SFE_AU_NO_DOTSND , "Error in AU file, missing '.snd' or 'dns.' marker." }, { SFE_AU_EMBED_BAD_LEN , "Embedded AU file with unknown length." }, { SFE_RAW_READ_BAD_SPEC , "Error while opening RAW file for read. Must specify format and channels.\n" "Possibly trying to open unsupported format." }, { SFE_RAW_BAD_BITWIDTH , "Error. RAW file bitwidth must be a multiple of 8." }, { SFE_RAW_BAD_FORMAT , "Error. Bad format field in SF_INFO struct when opening a RAW file for read." }, { SFE_PAF_NO_MARKER , "Error in PAF file, no marker." }, { SFE_PAF_VERSION , "Error in PAF file, bad version." }, { SFE_PAF_UNKNOWN_FORMAT , "Error in PAF file, unknown format." }, { SFE_PAF_SHORT_HEADER , "Error in PAF file. File shorter than minimal header." }, { SFE_PAF_BAD_CHANNELS , "Error in PAF file. Bad channel count." }, { SFE_SVX_NO_FORM , "Error in 8SVX / 16SV file, no 'FORM' marker." }, { SFE_SVX_NO_BODY , "Error in 8SVX / 16SV file, no 'BODY' marker." }, { SFE_SVX_NO_DATA , "Error in 8SVX / 16SV file, no sound data." }, { SFE_SVX_BAD_COMP , "Error in 8SVX / 16SV file, unsupported compression format." }, { SFE_SVX_BAD_NAME_LENGTH , "Error in 8SVX / 16SV file, NAME chunk too long." }, { SFE_NIST_BAD_HEADER , "Error in NIST file, bad header." }, { SFE_NIST_CRLF_CONVERISON, "Error : NIST file damaged by Windows CR -> CRLF conversion process." }, { SFE_NIST_BAD_ENCODING , "Error in NIST file, unsupported compression format." }, { SFE_VOC_NO_CREATIVE , "Error in VOC file, no 'Creative Voice File' marker." }, { SFE_VOC_BAD_FORMAT , "Error in VOC file, bad format." }, { SFE_VOC_BAD_VERSION , "Error in VOC file, bad version number." }, { SFE_VOC_BAD_MARKER , "Error in VOC file, bad marker in file." }, { SFE_VOC_BAD_SECTIONS , "Error in VOC file, incompatible VOC sections." }, { SFE_VOC_MULTI_SAMPLERATE, "Error in VOC file, more than one sample rate defined." }, { SFE_VOC_MULTI_SECTION , "Unimplemented VOC file feature, file contains multiple sound sections." }, { SFE_VOC_MULTI_PARAM , "Error in VOC file, file contains multiple bit or channel widths." }, { SFE_VOC_SECTION_COUNT , "Error in VOC file, too many sections." }, { SFE_VOC_NO_PIPE , "Error : not able to operate on VOC files over a pipe." }, { SFE_IRCAM_NO_MARKER , "Error in IRCAM file, bad IRCAM marker." }, { SFE_IRCAM_BAD_CHANNELS , "Error in IRCAM file, bad channel count." }, { SFE_IRCAM_UNKNOWN_FORMAT, "Error in IRCAM file, unknown encoding format." }, { SFE_W64_64_BIT , "Error in W64 file, file contains 64 bit offset." }, { SFE_W64_NO_RIFF , "Error in W64 file. No 'riff' chunk marker." }, { SFE_W64_NO_WAVE , "Error in W64 file. No 'wave' chunk marker." }, { SFE_W64_NO_DATA , "Error in W64 file. No 'data' chunk marker." }, { SFE_W64_ADPCM_NOT4BIT , "Error in ADPCM W64 file. Invalid bit width." }, { SFE_W64_ADPCM_CHANNELS , "Error in ADPCM W64 file. Invalid number of channels." }, { SFE_W64_GSM610_FORMAT , "Error in GSM610 W64 file. Invalid format chunk." }, { SFE_MAT4_BAD_NAME , "Error in MAT4 file. No variable name." }, { SFE_MAT4_NO_SAMPLERATE , "Error in MAT4 file. No sample rate." }, { SFE_MAT5_BAD_ENDIAN , "Error in MAT5 file. Not able to determine endian-ness." }, { SFE_MAT5_NO_BLOCK , "Error in MAT5 file. Bad block structure." }, { SFE_MAT5_SAMPLE_RATE , "Error in MAT5 file. Not able to determine sample rate." }, { SFE_PVF_NO_PVF1 , "Error in PVF file. No PVF1 marker." }, { SFE_PVF_BAD_HEADER , "Error in PVF file. Bad header." }, { SFE_PVF_BAD_BITWIDTH , "Error in PVF file. Bad bit width." }, { SFE_XI_BAD_HEADER , "Error in XI file. Bad header." }, { SFE_XI_EXCESS_SAMPLES , "Error in XI file. Excess samples in file." }, { SFE_XI_NO_PIPE , "Error : not able to operate on XI files over a pipe." }, { SFE_HTK_NO_PIPE , "Error : not able to operate on HTK files over a pipe." }, { SFE_SDS_NOT_SDS , "Error : not an SDS file." }, { SFE_SDS_BAD_BIT_WIDTH , "Error : bad bit width for SDS file." }, { SFE_SD2_FD_DISALLOWED , "Error : cannot open SD2 file without a file name." }, { SFE_SD2_BAD_DATA_OFFSET , "Error : bad data offset." }, { SFE_SD2_BAD_MAP_OFFSET , "Error : bad map offset." }, { SFE_SD2_BAD_DATA_LENGTH , "Error : bad data length." }, { SFE_SD2_BAD_MAP_LENGTH , "Error : bad map length." }, { SFE_SD2_BAD_RSRC , "Error : bad resource fork." }, { SFE_SD2_BAD_SAMPLE_SIZE , "Error : bad sample size." }, { SFE_FLAC_BAD_HEADER , "Error : bad flac header." }, { SFE_FLAC_NEW_DECODER , "Error : problem while creating flac decoder." }, { SFE_FLAC_INIT_DECODER , "Error : problem while initialization of the flac decoder." }, { SFE_FLAC_LOST_SYNC , "Error : flac decoder lost sync." }, { SFE_FLAC_BAD_SAMPLE_RATE, "Error : flac does not support this sample rate." }, { SFE_FLAC_UNKOWN_ERROR , "Error : unknown error in flac decoder." }, { SFE_WVE_NOT_WVE , "Error : not a WVE file." }, { SFE_WVE_NO_PIPE , "Error : not able to operate on WVE files over a pipe." }, { SFE_DWVW_BAD_BITWIDTH , "Error : Bad bit width for DWVW encoding. Must be 12, 16 or 24." }, { SFE_G72X_NOT_MONO , "Error : G72x encoding does not support more than 1 channel." }, { SFE_VORBIS_ENCODER_BUG , "Error : Sample rate chosen is known to trigger a Vorbis encoder bug on this CPU." }, { SFE_RF64_NOT_RF64 , "Error : Not an RF64 file." }, { SFE_RF64_PEAK_B4_FMT , "Error in RF64 file. 'PEAK' chunk found before 'fmt ' chunk." }, { SFE_RF64_NO_DATA , "Error in RF64 file. No 'data' chunk marker." }, { SFE_ALAC_FAIL_TMPFILE , "Error : Failed to open tmp file for ALAC encoding." }, { SFE_BAD_CHUNK_PTR , "Error : Bad SF_CHUNK_INFO pointer." }, { SFE_UNKNOWN_CHUNK , "Error : Unknown chunk marker." }, { SFE_BAD_CHUNK_FORMAT , "Error : Reading/writing chunks from this file format is not supported." }, { SFE_BAD_CHUNK_MARKER , "Error : Bad chunk marker." }, { SFE_BAD_CHUNK_DATA_PTR , "Error : Bad data pointer in SF_CHUNK_INFO struct." }, { SFE_FILENAME_TOO_LONG , "Error : Supplied filename too long." }, { SFE_BAD_HEADER_ALLOC , "Error : Required header allocation is too large." }, { SFE_MAX_ERROR , "Maximum error number." }, { SFE_MAX_ERROR + 1 , NULL } } ; /*------------------------------------------------------------------------------ */ static int format_from_extension (SF_PRIVATE *psf) ; static int guess_file_type (SF_PRIVATE *psf) ; static int validate_sfinfo (SF_INFO *sfinfo) ; static int validate_psf (SF_PRIVATE *psf) ; static void save_header_info (SF_PRIVATE *psf) ; static int copy_filename (SF_PRIVATE *psf, const char *path) ; static int psf_close (SF_PRIVATE *psf) ; static int try_resource_fork (SF_PRIVATE * psf) ; /*------------------------------------------------------------------------------ ** Private (static) variables. */ int sf_errno = 0 ; static char sf_parselog [SF_BUFFER_LEN] = { 0 } ; static char sf_syserr [SF_SYSERR_LEN] = { 0 } ; /*------------------------------------------------------------------------------ */ #define VALIDATE_SNDFILE_AND_ASSIGN_PSF(a, b, c) \ { if ((a) == NULL) \ { sf_errno = SFE_BAD_SNDFILE_PTR ; \ return 0 ; \ } ; \ (b) = (SF_PRIVATE*) (a) ; \ if ((b)->virtual_io == SF_FALSE && \ psf_file_valid (b) == 0) \ { (b)->error = SFE_BAD_FILE_PTR ; \ return 0 ; \ } ; \ if ((b)->Magick != SNDFILE_MAGICK) \ { (b)->error = SFE_BAD_SNDFILE_PTR ; \ return 0 ; \ } ; \ if (c) (b)->error = 0 ; \ } /*------------------------------------------------------------------------------ ** Public functions. */ SNDFILE* sf_open (const char *path, int mode, SF_INFO *sfinfo) { SF_PRIVATE *psf ; /* Ultimate sanity check. */ assert (sizeof (sf_count_t) == 8) ; if ((psf = psf_allocate ()) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; psf_log_printf (psf, "File : %s\n", path) ; if (copy_filename (psf, path) != 0) { sf_errno = psf->error ; return NULL ; } ; psf->file.mode = mode ; if (strcmp (path, "-") == 0) psf->error = psf_set_stdio (psf) ; else psf->error = psf_fopen (psf) ; return psf_open_file (psf, sfinfo) ; } /* sf_open */ SNDFILE* sf_open_fd (int fd, int mode, SF_INFO *sfinfo, int close_desc) { SF_PRIVATE *psf ; if ((SF_CONTAINER (sfinfo->format)) == SF_FORMAT_SD2) { sf_errno = SFE_SD2_FD_DISALLOWED ; return NULL ; } ; if ((psf = psf_allocate ()) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; copy_filename (psf, "") ; psf->file.mode = mode ; psf_set_file (psf, fd) ; psf->is_pipe = psf_is_pipe (psf) ; psf->fileoffset = psf_ftell (psf) ; if (! close_desc) psf->file.do_not_close_descriptor = SF_TRUE ; return psf_open_file (psf, sfinfo) ; } /* sf_open_fd */ SNDFILE* sf_open_virtual (SF_VIRTUAL_IO *sfvirtual, int mode, SF_INFO *sfinfo, void *user_data) { SF_PRIVATE *psf ; /* Make sure we have a valid set ot virtual pointers. */ if (sfvirtual->get_filelen == NULL || sfvirtual->seek == NULL || sfvirtual->tell == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_get_filelen / vio_seek / vio_tell in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((mode == SFM_READ || mode == SFM_RDWR) && sfvirtual->read == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_read in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((mode == SFM_WRITE || mode == SFM_RDWR) && sfvirtual->write == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_write in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((psf = psf_allocate ()) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; psf->virtual_io = SF_TRUE ; psf->vio = *sfvirtual ; psf->vio_user_data = user_data ; psf->file.mode = mode ; return psf_open_file (psf, sfinfo) ; } /* sf_open_virtual */ int sf_close (SNDFILE *sndfile) { SF_PRIVATE *psf ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; return psf_close (psf) ; } /* sf_close */ void sf_write_sync (SNDFILE *sndfile) { SF_PRIVATE *psf ; if ((psf = (SF_PRIVATE *) sndfile) == NULL) return ; psf_fsync (psf) ; return ; } /* sf_write_sync */ /*============================================================================== */ const char* sf_error_number (int errnum) { static const char *bad_errnum = "No error defined for this error number. This is a bug in libsndfile." ; int k ; if (errnum == SFE_MAX_ERROR) return SndfileErrors [0].str ; if (errnum < 0 || errnum > SFE_MAX_ERROR) { /* This really shouldn't happen in release versions. */ printf ("Not a valid error number (%d).\n", errnum) ; return bad_errnum ; } ; for (k = 0 ; SndfileErrors [k].str ; k++) if (errnum == SndfileErrors [k].error) return SndfileErrors [k].str ; return bad_errnum ; } /* sf_error_number */ const char* sf_strerror (SNDFILE *sndfile) { SF_PRIVATE *psf = NULL ; int errnum ; if (sndfile == NULL) { errnum = sf_errno ; if (errnum == SFE_SYSTEM && sf_syserr [0]) return sf_syserr ; } else { psf = (SF_PRIVATE *) sndfile ; if (psf->Magick != SNDFILE_MAGICK) return "sf_strerror : Bad magic number." ; errnum = psf->error ; if (errnum == SFE_SYSTEM && psf->syserr [0]) return psf->syserr ; } ; return sf_error_number (errnum) ; } /* sf_strerror */ /*------------------------------------------------------------------------------ */ int sf_error (SNDFILE *sndfile) { SF_PRIVATE *psf ; if (sndfile == NULL) return sf_errno ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 0) ; if (psf->error) return psf->error ; return 0 ; } /* sf_error */ /*------------------------------------------------------------------------------ */ int sf_perror (SNDFILE *sndfile) { SF_PRIVATE *psf ; int errnum ; if (sndfile == NULL) { errnum = sf_errno ; } else { VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 0) ; errnum = psf->error ; } ; fprintf (stderr, "%s\n", sf_error_number (errnum)) ; return SFE_NO_ERROR ; } /* sf_perror */ /*------------------------------------------------------------------------------ */ int sf_error_str (SNDFILE *sndfile, char *str, size_t maxlen) { SF_PRIVATE *psf ; int errnum ; if (str == NULL) return SFE_INTERNAL ; if (sndfile == NULL) errnum = sf_errno ; else { VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 0) ; errnum = psf->error ; } ; snprintf (str, maxlen, "%s", sf_error_number (errnum)) ; return SFE_NO_ERROR ; } /* sf_error_str */ /*============================================================================== */ int sf_format_check (const SF_INFO *info) { int subformat, endian ; subformat = SF_CODEC (info->format) ; endian = SF_ENDIAN (info->format) ; /* This is the place where each file format can check if the suppiled ** SF_INFO struct is valid. ** Return 0 on failure, 1 ons success. */ if (info->channels < 1 || info->channels > SF_MAX_CHANNELS) return 0 ; if (info->samplerate < 0) return 0 ; switch (SF_CONTAINER (info->format)) { case SF_FORMAT_WAV : /* WAV now allows both endian, RIFF or RIFX (little or big respectively) */ if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if ((subformat == SF_FORMAT_IMA_ADPCM || subformat == SF_FORMAT_MS_ADPCM) && info->channels <= 2) return 1 ; if (subformat == SF_FORMAT_GSM610 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_G721_32 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_WAVEX : if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_AIFF : /* AIFF does allow both endian-nesses for PCM data.*/ if (subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; /* For other encodings reject any endian-ness setting. */ if (endian != 0) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_S8) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if ((subformat == SF_FORMAT_DWVW_12 || subformat == SF_FORMAT_DWVW_16 || subformat == SF_FORMAT_DWVW_24) && info-> channels == 1) return 1 ; if (subformat == SF_FORMAT_GSM610 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_IMA_ADPCM && (info->channels == 1 || info->channels == 2)) return 1 ; break ; case SF_FORMAT_AU : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; if (subformat == SF_FORMAT_G721_32 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_G723_24 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_G723_40 && info->channels == 1) return 1 ; break ; case SF_FORMAT_CAF : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_ALAC_16 || subformat == SF_FORMAT_ALAC_20) return 1 ; if (subformat == SF_FORMAT_ALAC_24 || subformat == SF_FORMAT_ALAC_32) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_RAW : if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; if (subformat == SF_FORMAT_ALAW || subformat == SF_FORMAT_ULAW) return 1 ; if ((subformat == SF_FORMAT_DWVW_12 || subformat == SF_FORMAT_DWVW_16 || subformat == SF_FORMAT_DWVW_24) && info-> channels == 1) return 1 ; if (subformat == SF_FORMAT_GSM610 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_VOX_ADPCM && info->channels == 1) return 1 ; break ; case SF_FORMAT_PAF : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24) return 1 ; break ; case SF_FORMAT_SVX : /* SVX only supports writing mono SVX files. */ if (info->channels > 1) return 0 ; /* Always big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; break ; case SF_FORMAT_NIST : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; break ; case SF_FORMAT_IRCAM : if (info->channels > 256) return 0 ; if (subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW || subformat == SF_FORMAT_FLOAT) return 1 ; break ; case SF_FORMAT_VOC : if (info->channels > 2) return 0 ; /* VOC is strictly little endian. */ if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; break ; case SF_FORMAT_W64 : /* W64 is strictly little endian. */ if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if ((subformat == SF_FORMAT_IMA_ADPCM || subformat == SF_FORMAT_MS_ADPCM) && info->channels <= 2) return 1 ; if (subformat == SF_FORMAT_GSM610 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_MAT4 : if (subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_MAT5 : if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_PVF : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_32) return 1 ; break ; case SF_FORMAT_XI : if (info->channels != 1) return 0 ; if (subformat == SF_FORMAT_DPCM_8 || subformat == SF_FORMAT_DPCM_16) return 1 ; break ; case SF_FORMAT_HTK : if (info->channels != 1) return 0 ; /* HTK is strictly big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_16) return 1 ; break ; case SF_FORMAT_SDS : if (info->channels != 1) return 0 ; /* SDS is strictly big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24) return 1 ; break ; case SF_FORMAT_AVR : if (info->channels > 2) return 0 ; /* SDS is strictly big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; break ; case SF_FORMAT_FLAC : /* FLAC can't do more than 8 channels. */ if (info->channels > 8) return 0 ; if (endian != SF_ENDIAN_FILE) return 0 ; if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24) return 1 ; break ; case SF_FORMAT_SD2 : /* SD2 is strictly big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; break ; case SF_FORMAT_WVE : if (info->channels > 1) return 0 ; /* WVE is strictly big endian. */ if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_ALAW) return 1 ; break ; case SF_FORMAT_OGG : if (endian != SF_ENDIAN_FILE) return 0 ; if (subformat == SF_FORMAT_VORBIS) return 1 ; break ; case SF_FORMAT_MPC2K : if (info->channels > 2) return 0 ; /* MPC2000 is strictly little endian. */ if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_16) return 1 ; break ; case SF_FORMAT_RF64 : if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; default : break ; } ; return 0 ; } /* sf_format_check */ /*------------------------------------------------------------------------------ */ const char * sf_version_string (void) { #if ENABLE_EXPERIMENTAL_CODE return PACKAGE_NAME "-" PACKAGE_VERSION "-exp" ; #else return PACKAGE_NAME "-" PACKAGE_VERSION ; #endif } /*------------------------------------------------------------------------------ */ int sf_command (SNDFILE *sndfile, int command, void *data, int datasize) { SF_PRIVATE *psf = (SF_PRIVATE *) sndfile ; double quality ; int old_value ; /* This set of commands do not need the sndfile parameter. */ switch (command) { case SFC_GET_LIB_VERSION : if (data == NULL) { if (psf) psf->error = SFE_BAD_COMMAND_PARAM ; return SFE_BAD_COMMAND_PARAM ; } ; snprintf (data, datasize, "%s", sf_version_string ()) ; return strlen (data) ; case SFC_GET_SIMPLE_FORMAT_COUNT : if (data == NULL || datasize != SIGNED_SIZEOF (int)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; *((int*) data) = psf_get_format_simple_count () ; return 0 ; case SFC_GET_SIMPLE_FORMAT : if (data == NULL || datasize != SIGNED_SIZEOF (SF_FORMAT_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; return psf_get_format_simple (data) ; case SFC_GET_FORMAT_MAJOR_COUNT : if (data == NULL || datasize != SIGNED_SIZEOF (int)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; *((int*) data) = psf_get_format_major_count () ; return 0 ; case SFC_GET_FORMAT_MAJOR : if (data == NULL || datasize != SIGNED_SIZEOF (SF_FORMAT_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; return psf_get_format_major (data) ; case SFC_GET_FORMAT_SUBTYPE_COUNT : if (data == NULL || datasize != SIGNED_SIZEOF (int)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; *((int*) data) = psf_get_format_subtype_count () ; return 0 ; case SFC_GET_FORMAT_SUBTYPE : if (data == NULL || datasize != SIGNED_SIZEOF (SF_FORMAT_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; return psf_get_format_subtype (data) ; case SFC_GET_FORMAT_INFO : if (data == NULL || datasize != SIGNED_SIZEOF (SF_FORMAT_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; return psf_get_format_info (data) ; } ; if (sndfile == NULL && command == SFC_GET_LOG_INFO) { if (data == NULL) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; snprintf (data, datasize, "%s", sf_parselog) ; return strlen (data) ; } ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; switch (command) { case SFC_SET_NORM_FLOAT : old_value = psf->norm_float ; psf->norm_float = (datasize) ? SF_TRUE : SF_FALSE ; return old_value ; case SFC_GET_CURRENT_SF_INFO : if (data == NULL || datasize != SIGNED_SIZEOF (SF_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; memcpy (data, &psf->sf, sizeof (SF_INFO)) ; break ; case SFC_SET_NORM_DOUBLE : old_value = psf->norm_double ; psf->norm_double = (datasize) ? SF_TRUE : SF_FALSE ; return old_value ; case SFC_GET_NORM_FLOAT : return psf->norm_float ; case SFC_GET_NORM_DOUBLE : return psf->norm_double ; case SFC_SET_SCALE_FLOAT_INT_READ : old_value = psf->float_int_mult ; psf->float_int_mult = (datasize != 0) ? SF_TRUE : SF_FALSE ; if (psf->float_int_mult && psf->float_max < 0.0) /* Scale to prevent wrap-around distortion. */ psf->float_max = (32768.0 / 32767.0) * psf_calc_signal_max (psf, SF_FALSE) ; return old_value ; case SFC_SET_SCALE_INT_FLOAT_WRITE : old_value = psf->scale_int_float ; psf->scale_int_float = (datasize != 0) ? SF_TRUE : SF_FALSE ; return old_value ; case SFC_SET_ADD_PEAK_CHUNK : { int format = SF_CONTAINER (psf->sf.format) ; /* Only WAV and AIFF support the PEAK chunk. */ switch (format) { case SF_FORMAT_AIFF : case SF_FORMAT_CAF : case SF_FORMAT_WAV : case SF_FORMAT_WAVEX : case SF_FORMAT_RF64 : break ; default : return SF_FALSE ; } ; format = SF_CODEC (psf->sf.format) ; /* Only files containg the following data types support the PEAK chunk. */ if (format != SF_FORMAT_FLOAT && format != SF_FORMAT_DOUBLE) return SF_FALSE ; } ; /* Can only do this is in SFM_WRITE mode. */ if (psf->file.mode != SFM_WRITE && psf->file.mode != SFM_RDWR) return SF_FALSE ; /* If data has already been written this must fail. */ if (psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; /* Everything seems OK, so set psf->has_peak and re-write header. */ if (datasize == SF_FALSE && psf->peak_info != NULL) { free (psf->peak_info) ; psf->peak_info = NULL ; } else if (psf->peak_info == NULL) { psf->peak_info = peak_info_calloc (psf->sf.channels) ; if (psf->peak_info != NULL) psf->peak_info->peak_loc = SF_PEAK_START ; } ; if (psf->write_header) psf->write_header (psf, SF_TRUE) ; return datasize ; case SFC_SET_ADD_HEADER_PAD_CHUNK : return SF_FALSE ; case SFC_GET_LOG_INFO : if (data == NULL) return SFE_BAD_COMMAND_PARAM ; snprintf (data, datasize, "%s", psf->parselog.buf) ; break ; case SFC_CALC_SIGNAL_MAX : if (data == NULL || datasize != sizeof (double)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; *((double*) data) = psf_calc_signal_max (psf, SF_FALSE) ; break ; case SFC_CALC_NORM_SIGNAL_MAX : if (data == NULL || datasize != sizeof (double)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; *((double*) data) = psf_calc_signal_max (psf, SF_TRUE) ; break ; case SFC_CALC_MAX_ALL_CHANNELS : if (data == NULL || datasize != SIGNED_SIZEOF (double) * psf->sf.channels) return (psf->error = SFE_BAD_COMMAND_PARAM) ; return psf_calc_max_all_channels (psf, (double*) data, SF_FALSE) ; case SFC_CALC_NORM_MAX_ALL_CHANNELS : if (data == NULL || datasize != SIGNED_SIZEOF (double) * psf->sf.channels) return (psf->error = SFE_BAD_COMMAND_PARAM) ; return psf_calc_max_all_channels (psf, (double*) data, SF_TRUE) ; case SFC_GET_SIGNAL_MAX : if (data == NULL || datasize != sizeof (double)) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; return psf_get_signal_max (psf, (double *) data) ; case SFC_GET_MAX_ALL_CHANNELS : if (data == NULL || datasize != SIGNED_SIZEOF (double) * psf->sf.channels) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; return psf_get_max_all_channels (psf, (double*) data) ; case SFC_UPDATE_HEADER_NOW : if (psf->write_header) psf->write_header (psf, SF_TRUE) ; break ; case SFC_SET_UPDATE_HEADER_AUTO : psf->auto_header = datasize ? SF_TRUE : SF_FALSE ; return psf->auto_header ; break ; case SFC_SET_ADD_DITHER_ON_WRITE : case SFC_SET_ADD_DITHER_ON_READ : /* ** FIXME ! ** These are obsolete. Just return. ** Remove some time after version 1.0.8. */ break ; case SFC_SET_DITHER_ON_WRITE : if (data == NULL || datasize != SIGNED_SIZEOF (SF_DITHER_INFO)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; memcpy (&psf->write_dither, data, sizeof (psf->write_dither)) ; if (psf->file.mode == SFM_WRITE || psf->file.mode == SFM_RDWR) dither_init (psf, SFM_WRITE) ; break ; case SFC_SET_DITHER_ON_READ : if (data == NULL || datasize != SIGNED_SIZEOF (SF_DITHER_INFO)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; memcpy (&psf->read_dither, data, sizeof (psf->read_dither)) ; if (psf->file.mode == SFM_READ || psf->file.mode == SFM_RDWR) dither_init (psf, SFM_READ) ; break ; case SFC_FILE_TRUNCATE : if (psf->file.mode != SFM_WRITE && psf->file.mode != SFM_RDWR) return SF_TRUE ; if (datasize != sizeof (sf_count_t)) return SF_TRUE ; if (data == NULL || datasize != sizeof (sf_count_t)) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } else { sf_count_t position ; position = *((sf_count_t*) data) ; if (sf_seek (sndfile, position, SEEK_SET) != position) return SF_TRUE ; psf->sf.frames = position ; position = psf_fseek (psf, 0, SEEK_CUR) ; return psf_ftruncate (psf, position) ; } ; break ; case SFC_SET_RAW_START_OFFSET : if (data == NULL || datasize != sizeof (sf_count_t)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; if ((SF_CONTAINER (psf->sf.format)) != SF_FORMAT_RAW) return (psf->error = SFE_BAD_COMMAND_PARAM) ; psf->dataoffset = *((sf_count_t*) data) ; sf_seek (sndfile, 0, SEEK_CUR) ; break ; case SFC_GET_EMBED_FILE_INFO : if (data == NULL || datasize != sizeof (SF_EMBED_FILE_INFO)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; ((SF_EMBED_FILE_INFO*) data)->offset = psf->fileoffset ; ((SF_EMBED_FILE_INFO*) data)->length = psf->filelength ; break ; /* Lite remove start */ case SFC_TEST_IEEE_FLOAT_REPLACE : psf->ieee_replace = (datasize) ? SF_TRUE : SF_FALSE ; if ((SF_CODEC (psf->sf.format)) == SF_FORMAT_FLOAT) float32_init (psf) ; else if ((SF_CODEC (psf->sf.format)) == SF_FORMAT_DOUBLE) double64_init (psf) ; else return (psf->error = SFE_BAD_COMMAND_PARAM) ; break ; /* Lite remove end */ case SFC_SET_CLIPPING : psf->add_clipping = (datasize) ? SF_TRUE : SF_FALSE ; return psf->add_clipping ; case SFC_GET_CLIPPING : return psf->add_clipping ; case SFC_GET_LOOP_INFO : if (datasize != sizeof (SF_LOOP_INFO) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->loop_info == NULL) return SF_FALSE ; memcpy (data, psf->loop_info, sizeof (SF_LOOP_INFO)) ; return SF_TRUE ; case SFC_SET_BROADCAST_INFO : { int format = SF_CONTAINER (psf->sf.format) ; /* Only WAV and RF64 supports the BEXT (Broadcast) chunk. */ if (format != SF_FORMAT_WAV && format != SF_FORMAT_WAVEX && format != SF_FORMAT_RF64) return SF_FALSE ; } ; /* Only makes sense in SFM_WRITE or SFM_RDWR mode. */ if ((psf->file.mode != SFM_WRITE) && (psf->file.mode != SFM_RDWR)) return SF_FALSE ; /* If data has already been written this must fail. */ if (psf->broadcast_16k == NULL && psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (NOT (broadcast_var_set (psf, data, datasize))) return SF_FALSE ; if (psf->write_header) psf->write_header (psf, SF_TRUE) ; return SF_TRUE ; case SFC_GET_BROADCAST_INFO : if (data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; return broadcast_var_get (psf, data, datasize) ; case SFC_SET_CART_INFO : { int format = SF_CONTAINER (psf->sf.format) ; /* Only WAV and RF64 support cart chunk format */ if (format != SF_FORMAT_WAV && format != SF_FORMAT_RF64) return SF_FALSE ; } ; /* Only makes sense in SFM_WRITE or SFM_RDWR mode */ if ((psf->file.mode != SFM_WRITE) && (psf->file.mode != SFM_RDWR)) return SF_FALSE ; /* If data has already been written this must fail. */ if (psf->cart_16k == NULL && psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (NOT (cart_var_set (psf, data, datasize))) return SF_FALSE ; if (psf->write_header) psf->write_header (psf, SF_TRUE) ; return SF_TRUE ; case SFC_GET_CART_INFO : if (data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; return cart_var_get (psf, data, datasize) ; case SFC_GET_CUE_COUNT : if (datasize != sizeof (uint32_t) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->cues != NULL) { *((uint32_t *) data) = psf->cues->cue_count ; return SF_TRUE ; } ; return SF_FALSE ; case SFC_GET_CUE : if (datasize != sizeof (SF_CUES) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->cues == NULL) return SF_FALSE ; psf_get_cues (psf, data, datasize) ; return SF_TRUE ; case SFC_SET_CUE : if (psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (datasize != sizeof (SF_CUES) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->cues == NULL && (psf->cues = psf_cues_dup (data)) == NULL) { psf->error = SFE_MALLOC_FAILED ; return SF_FALSE ; } ; return SF_TRUE ; case SFC_GET_INSTRUMENT : if (datasize != sizeof (SF_INSTRUMENT) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->instrument == NULL) return SF_FALSE ; memcpy (data, psf->instrument, sizeof (SF_INSTRUMENT)) ; return SF_TRUE ; case SFC_SET_INSTRUMENT : /* If data has already been written this must fail. */ if (psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (datasize != sizeof (SF_INSTRUMENT) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->instrument == NULL && (psf->instrument = psf_instrument_alloc ()) == NULL) { psf->error = SFE_MALLOC_FAILED ; return SF_FALSE ; } ; memcpy (psf->instrument, data, sizeof (SF_INSTRUMENT)) ; return SF_TRUE ; case SFC_RAW_DATA_NEEDS_ENDSWAP : return psf->data_endswap ; case SFC_GET_CHANNEL_MAP_INFO : if (psf->channel_map == NULL) return SF_FALSE ; if (data == NULL || datasize != SIGNED_SIZEOF (psf->channel_map [0]) * psf->sf.channels) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; memcpy (data, psf->channel_map, datasize) ; return SF_TRUE ; case SFC_SET_CHANNEL_MAP_INFO : if (psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (data == NULL || datasize != SIGNED_SIZEOF (psf->channel_map [0]) * psf->sf.channels) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; { int *iptr ; for (iptr = data ; iptr < (int*) data + psf->sf.channels ; iptr++) { if (*iptr <= SF_CHANNEL_MAP_INVALID || *iptr >= SF_CHANNEL_MAP_MAX) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; } ; } ; free (psf->channel_map) ; if ((psf->channel_map = malloc (datasize)) == NULL) { psf->error = SFE_MALLOC_FAILED ; return SF_FALSE ; } ; memcpy (psf->channel_map, data, datasize) ; /* ** Pass the command down to the container's command handler. ** Don't pass user data, use validated psf->channel_map data instead. */ if (psf->command) return psf->command (psf, command, NULL, 0) ; return SF_FALSE ; case SFC_SET_VBR_ENCODING_QUALITY : if (data == NULL || datasize != sizeof (double)) return SF_FALSE ; quality = *((double *) data) ; quality = 1.0 - SF_MAX (0.0, SF_MIN (1.0, quality)) ; return sf_command (sndfile, SFC_SET_COMPRESSION_LEVEL, &quality, sizeof (quality)) ; default : /* Must be a file specific command. Pass it on. */ if (psf->command) return psf->command (psf, command, data, datasize) ; psf_log_printf (psf, "*** sf_command : cmd = 0x%X\n", command) ; return (psf->error = SFE_BAD_COMMAND_PARAM) ; } ; return 0 ; } /* sf_command */ /*------------------------------------------------------------------------------ */ sf_count_t sf_seek (SNDFILE *sndfile, sf_count_t offset, int whence) { SF_PRIVATE *psf ; sf_count_t seek_from_start = 0, retval ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (! psf->sf.seekable) { psf->error = SFE_NOT_SEEKABLE ; return PSF_SEEK_ERROR ; } ; /* If the whence parameter has a mode ORed in, check to see that ** it makes sense. */ if (((whence & SFM_MASK) == SFM_WRITE && psf->file.mode == SFM_READ) || ((whence & SFM_MASK) == SFM_READ && psf->file.mode == SFM_WRITE)) { psf->error = SFE_WRONG_SEEK ; return PSF_SEEK_ERROR ; } ; /* Convert all SEEK_CUR and SEEK_END into seek_from_start to be ** used with SEEK_SET. */ switch (whence) { /* The SEEK_SET behaviour is independant of mode. */ case SEEK_SET : case SEEK_SET | SFM_READ : case SEEK_SET | SFM_WRITE : case SEEK_SET | SFM_RDWR : seek_from_start = offset ; break ; /* The SEEK_CUR is a little more tricky. */ case SEEK_CUR : if (offset == 0) { if (psf->file.mode == SFM_READ) return psf->read_current ; if (psf->file.mode == SFM_WRITE) return psf->write_current ; } ; if (psf->file.mode == SFM_READ) seek_from_start = psf->read_current + offset ; else if (psf->file.mode == SFM_WRITE || psf->file.mode == SFM_RDWR) seek_from_start = psf->write_current + offset ; else psf->error = SFE_AMBIGUOUS_SEEK ; break ; case SEEK_CUR | SFM_READ : if (offset == 0) return psf->read_current ; seek_from_start = psf->read_current + offset ; break ; case SEEK_CUR | SFM_WRITE : if (offset == 0) return psf->write_current ; seek_from_start = psf->write_current + offset ; break ; /* The SEEK_END */ case SEEK_END : case SEEK_END | SFM_READ : case SEEK_END | SFM_WRITE : seek_from_start = psf->sf.frames + offset ; break ; default : psf->error = SFE_BAD_SEEK ; break ; } ; if (psf->error) return PSF_SEEK_ERROR ; if (psf->file.mode == SFM_RDWR || psf->file.mode == SFM_WRITE) { if (seek_from_start < 0) { psf->error = SFE_BAD_SEEK ; return PSF_SEEK_ERROR ; } ; } else if (seek_from_start < 0 || seek_from_start > psf->sf.frames) { psf->error = SFE_BAD_SEEK ; return PSF_SEEK_ERROR ; } ; if (psf->seek) { int new_mode = (whence & SFM_MASK) ? (whence & SFM_MASK) : psf->file.mode ; retval = psf->seek (psf, new_mode, seek_from_start) ; switch (new_mode) { case SFM_READ : psf->read_current = retval ; break ; case SFM_WRITE : psf->write_current = retval ; break ; case SFM_RDWR : psf->read_current = retval ; psf->write_current = retval ; new_mode = SFM_READ ; break ; } ; psf->last_op = new_mode ; return retval ; } ; psf->error = SFE_AMBIGUOUS_SEEK ; return PSF_SEEK_ERROR ; } /* sf_seek */ /*------------------------------------------------------------------------------ */ const char* sf_get_string (SNDFILE *sndfile, int str_type) { SF_PRIVATE *psf ; if ((psf = (SF_PRIVATE*) sndfile) == NULL) return NULL ; if (psf->Magick != SNDFILE_MAGICK) return NULL ; return psf_get_string (psf, str_type) ; } /* sf_get_string */ int sf_set_string (SNDFILE *sndfile, int str_type, const char* str) { SF_PRIVATE *psf ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; return psf_set_string (psf, str_type, str) ; } /* sf_get_string */ /*------------------------------------------------------------------------------ */ int sf_current_byterate (SNDFILE *sndfile) { SF_PRIVATE *psf ; if ((psf = (SF_PRIVATE*) sndfile) == NULL) return -1 ; if (psf->Magick != SNDFILE_MAGICK) return -1 ; /* This should cover all PCM and floating point formats. */ if (psf->bytewidth) return psf->sf.samplerate * psf->sf.channels * psf->bytewidth ; if (psf->byterate) return psf->byterate (psf) ; switch (SF_CODEC (psf->sf.format)) { case SF_FORMAT_IMA_ADPCM : case SF_FORMAT_MS_ADPCM : case SF_FORMAT_VOX_ADPCM : return (psf->sf.samplerate * psf->sf.channels) / 2 ; case SF_FORMAT_GSM610 : return (psf->sf.samplerate * psf->sf.channels * 13000) / 8000 ; case SF_FORMAT_G721_32 : /* 32kbs G721 ADPCM encoding. */ return (psf->sf.samplerate * psf->sf.channels) / 2 ; case SF_FORMAT_G723_24 : /* 24kbs G723 ADPCM encoding. */ return (psf->sf.samplerate * psf->sf.channels * 3) / 8 ; case SF_FORMAT_G723_40 : /* 40kbs G723 ADPCM encoding. */ return (psf->sf.samplerate * psf->sf.channels * 5) / 8 ; default : break ; } ; return -1 ; } /* sf_current_byterate */ /*============================================================================== */ sf_count_t sf_read_raw (SNDFILE *sndfile, void *ptr, sf_count_t bytes) { SF_PRIVATE *psf ; sf_count_t count, extra ; int bytewidth, blockwidth ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; bytewidth = (psf->bytewidth > 0) ? psf->bytewidth : 1 ; blockwidth = (psf->blockwidth > 0) ? psf->blockwidth : 1 ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (bytes < 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, bytes) ; return 0 ; } ; if (bytes % (psf->sf.channels * bytewidth)) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf_fread (ptr, 1, bytes, psf) ; if (psf->read_current + count / blockwidth <= psf->sf.frames) psf->read_current += count / blockwidth ; else { count = (psf->sf.frames - psf->read_current) * blockwidth ; extra = bytes - count ; psf_memset (((char *) ptr) + count, 0, extra) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_raw */ /*------------------------------------------------------------------------------ */ sf_count_t sf_read_short (SNDFILE *sndfile, short *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (len <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, len * sizeof (short)) ; return 0 ; /* End of file. */ } ; if (psf->read_short == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_short (psf, ptr, len) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = len - count ; psf_memset (ptr + count, 0, extra * sizeof (short)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_short */ sf_count_t sf_readf_short (SNDFILE *sndfile, short *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (frames <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, frames * psf->sf.channels * sizeof (short)) ; return 0 ; /* End of file. */ } ; if (psf->read_short == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_short (psf, ptr, frames * psf->sf.channels) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = frames * psf->sf.channels - count ; psf_memset (ptr + count, 0, extra * sizeof (short)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count / psf->sf.channels ; } /* sf_readf_short */ /*------------------------------------------------------------------------------ */ sf_count_t sf_read_int (SNDFILE *sndfile, int *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (len <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, len * sizeof (int)) ; return 0 ; } ; if (psf->read_int == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_int (psf, ptr, len) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = len - count ; psf_memset (ptr + count, 0, extra * sizeof (int)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_int */ sf_count_t sf_readf_int (SNDFILE *sndfile, int *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (frames <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, frames * psf->sf.channels * sizeof (int)) ; return 0 ; } ; if (psf->read_int == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_int (psf, ptr, frames * psf->sf.channels) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = frames * psf->sf.channels - count ; psf_memset (ptr + count, 0, extra * sizeof (int)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count / psf->sf.channels ; } /* sf_readf_int */ /*------------------------------------------------------------------------------ */ sf_count_t sf_read_float (SNDFILE *sndfile, float *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (len <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, len * sizeof (float)) ; return 0 ; } ; if (psf->read_float == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_float (psf, ptr, len) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = len - count ; psf_memset (ptr + count, 0, extra * sizeof (float)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_float */ sf_count_t sf_readf_float (SNDFILE *sndfile, float *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (frames <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, frames * psf->sf.channels * sizeof (float)) ; return 0 ; } ; if (psf->read_float == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_float (psf, ptr, frames * psf->sf.channels) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = frames * psf->sf.channels - count ; psf_memset (ptr + count, 0, extra * sizeof (float)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count / psf->sf.channels ; } /* sf_readf_float */ /*------------------------------------------------------------------------------ */ sf_count_t sf_read_double (SNDFILE *sndfile, double *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (len <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, len * sizeof (double)) ; return 0 ; } ; if (psf->read_double == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_double (psf, ptr, len) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = len - count ; psf_memset (ptr + count, 0, extra * sizeof (double)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_double */ sf_count_t sf_readf_double (SNDFILE *sndfile, double *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (frames <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, frames * psf->sf.channels * sizeof (double)) ; return 0 ; } ; if (psf->read_double == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_double (psf, ptr, frames * psf->sf.channels) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = frames * psf->sf.channels - count ; psf_memset (ptr + count, 0, extra * sizeof (double)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count / psf->sf.channels ; } /* sf_readf_double */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_raw (SNDFILE *sndfile, const void *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; int bytewidth, blockwidth ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; bytewidth = (psf->bytewidth > 0) ? psf->bytewidth : 1 ; blockwidth = (psf->blockwidth > 0) ? psf->blockwidth : 1 ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % (psf->sf.channels * bytewidth)) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf_fwrite (ptr, 1, len, psf) ; psf->write_current += count / blockwidth ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_raw */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_short (SNDFILE *sndfile, const short *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->write_short == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_short (psf, ptr, len) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_short */ sf_count_t sf_writef_short (SNDFILE *sndfile, const short *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (psf->write_short == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_short (psf, ptr, frames * psf->sf.channels) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count / psf->sf.channels ; } /* sf_writef_short */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_int (SNDFILE *sndfile, const int *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->write_int == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_int (psf, ptr, len) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_int */ sf_count_t sf_writef_int (SNDFILE *sndfile, const int *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (psf->write_int == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_int (psf, ptr, frames * psf->sf.channels) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count / psf->sf.channels ; } /* sf_writef_int */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_float (SNDFILE *sndfile, const float *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->write_float == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_float (psf, ptr, len) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_float */ sf_count_t sf_writef_float (SNDFILE *sndfile, const float *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (psf->write_float == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_float (psf, ptr, frames * psf->sf.channels) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count / psf->sf.channels ; } /* sf_writef_float */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_double (SNDFILE *sndfile, const double *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->write_double == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_double (psf, ptr, len) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_double */ sf_count_t sf_writef_double (SNDFILE *sndfile, const double *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (psf->write_double == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_double (psf, ptr, frames * psf->sf.channels) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count / psf->sf.channels ; } /* sf_writef_double */ /*========================================================================= ** Private functions. */ static int try_resource_fork (SF_PRIVATE * psf) { int old_error = psf->error ; /* Set READ mode now, to see if resource fork exists. */ psf->rsrc.mode = SFM_READ ; if (psf_open_rsrc (psf) != 0) { psf->error = old_error ; return 0 ; } ; /* More checking here. */ psf_log_printf (psf, "Resource fork : %s\n", psf->rsrc.path.c) ; return SF_FORMAT_SD2 ; } /* try_resource_fork */ static int format_from_extension (SF_PRIVATE *psf) { char *cptr ; char buffer [16] ; int format = 0 ; if ((cptr = strrchr (psf->file.name.c, '.')) == NULL) return 0 ; cptr ++ ; if (strlen (cptr) > sizeof (buffer) - 1) return 0 ; psf_strlcpy (buffer, sizeof (buffer), cptr) ; buffer [sizeof (buffer) - 1] = 0 ; /* Convert everything in the buffer to lower case. */ cptr = buffer ; while (*cptr) { *cptr = tolower (*cptr) ; cptr ++ ; } ; cptr = buffer ; if (strcmp (cptr, "au") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 8000 ; format = SF_FORMAT_RAW | SF_FORMAT_ULAW ; } else if (strcmp (cptr, "snd") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 8000 ; format = SF_FORMAT_RAW | SF_FORMAT_ULAW ; } else if (strcmp (cptr, "vox") == 0 || strcmp (cptr, "vox8") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 8000 ; format = SF_FORMAT_RAW | SF_FORMAT_VOX_ADPCM ; } else if (strcmp (cptr, "vox6") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 6000 ; format = SF_FORMAT_RAW | SF_FORMAT_VOX_ADPCM ; } else if (strcmp (cptr, "gsm") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 8000 ; format = SF_FORMAT_RAW | SF_FORMAT_GSM610 ; } /* For RAW files, make sure the dataoffset if set correctly. */ if ((SF_CONTAINER (format)) == SF_FORMAT_RAW) psf->dataoffset = 0 ; return format ; } /* format_from_extension */ static int guess_file_type (SF_PRIVATE *psf) { uint32_t buffer [3], format ; if (psf_binheader_readf (psf, "b", &buffer, SIGNED_SIZEOF (buffer)) != SIGNED_SIZEOF (buffer)) { psf->error = SFE_BAD_FILE_READ ; return 0 ; } ; if ((buffer [0] == MAKE_MARKER ('R', 'I', 'F', 'F') || buffer [0] == MAKE_MARKER ('R', 'I', 'F', 'X')) && buffer [2] == MAKE_MARKER ('W', 'A', 'V', 'E')) return SF_FORMAT_WAV ; if (buffer [0] == MAKE_MARKER ('F', 'O', 'R', 'M')) { if (buffer [2] == MAKE_MARKER ('A', 'I', 'F', 'F') || buffer [2] == MAKE_MARKER ('A', 'I', 'F', 'C')) return SF_FORMAT_AIFF ; if (buffer [2] == MAKE_MARKER ('8', 'S', 'V', 'X') || buffer [2] == MAKE_MARKER ('1', '6', 'S', 'V')) return SF_FORMAT_SVX ; return 0 ; } ; if (buffer [0] == MAKE_MARKER ('.', 's', 'n', 'd') || buffer [0] == MAKE_MARKER ('d', 'n', 's', '.')) return SF_FORMAT_AU ; if ((buffer [0] == MAKE_MARKER ('f', 'a', 'p', ' ') || buffer [0] == MAKE_MARKER (' ', 'p', 'a', 'f'))) return SF_FORMAT_PAF ; if (buffer [0] == MAKE_MARKER ('N', 'I', 'S', 'T')) return SF_FORMAT_NIST ; if (buffer [0] == MAKE_MARKER ('C', 'r', 'e', 'a') && buffer [1] == MAKE_MARKER ('t', 'i', 'v', 'e')) return SF_FORMAT_VOC ; if ((buffer [0] & MAKE_MARKER (0xFF, 0xFF, 0xF8, 0xFF)) == MAKE_MARKER (0x64, 0xA3, 0x00, 0x00) || (buffer [0] & MAKE_MARKER (0xFF, 0xF8, 0xFF, 0xFF)) == MAKE_MARKER (0x00, 0x00, 0xA3, 0x64)) return SF_FORMAT_IRCAM ; if (buffer [0] == MAKE_MARKER ('r', 'i', 'f', 'f')) return SF_FORMAT_W64 ; if (buffer [0] == MAKE_MARKER (0, 0, 0x03, 0xE8) && buffer [1] == MAKE_MARKER (0, 0, 0, 1) && buffer [2] == MAKE_MARKER (0, 0, 0, 1)) return SF_FORMAT_MAT4 ; if (buffer [0] == MAKE_MARKER (0, 0, 0, 0) && buffer [1] == MAKE_MARKER (1, 0, 0, 0) && buffer [2] == MAKE_MARKER (1, 0, 0, 0)) return SF_FORMAT_MAT4 ; if (buffer [0] == MAKE_MARKER ('M', 'A', 'T', 'L') && buffer [1] == MAKE_MARKER ('A', 'B', ' ', '5')) return SF_FORMAT_MAT5 ; if (buffer [0] == MAKE_MARKER ('P', 'V', 'F', '1')) return SF_FORMAT_PVF ; if (buffer [0] == MAKE_MARKER ('E', 'x', 't', 'e') && buffer [1] == MAKE_MARKER ('n', 'd', 'e', 'd') && buffer [2] == MAKE_MARKER (' ', 'I', 'n', 's')) return SF_FORMAT_XI ; if (buffer [0] == MAKE_MARKER ('c', 'a', 'f', 'f') && buffer [2] == MAKE_MARKER ('d', 'e', 's', 'c')) return SF_FORMAT_CAF ; if (buffer [0] == MAKE_MARKER ('O', 'g', 'g', 'S')) return SF_FORMAT_OGG ; if (buffer [0] == MAKE_MARKER ('A', 'L', 'a', 'w') && buffer [1] == MAKE_MARKER ('S', 'o', 'u', 'n') && buffer [2] == MAKE_MARKER ('d', 'F', 'i', 'l')) return SF_FORMAT_WVE ; if (buffer [0] == MAKE_MARKER ('D', 'i', 'a', 'm') && buffer [1] == MAKE_MARKER ('o', 'n', 'd', 'W') && buffer [2] == MAKE_MARKER ('a', 'r', 'e', ' ')) return SF_FORMAT_DWD ; if (buffer [0] == MAKE_MARKER ('L', 'M', '8', '9') || buffer [0] == MAKE_MARKER ('5', '3', 0, 0)) return SF_FORMAT_TXW ; if ((buffer [0] & MAKE_MARKER (0xFF, 0xFF, 0x80, 0xFF)) == MAKE_MARKER (0xF0, 0x7E, 0, 0x01)) return SF_FORMAT_SDS ; if ((buffer [0] & MAKE_MARKER (0xFF, 0xFF, 0, 0)) == MAKE_MARKER (1, 4, 0, 0)) return SF_FORMAT_MPC2K ; if (buffer [0] == MAKE_MARKER ('C', 'A', 'T', ' ') && buffer [2] == MAKE_MARKER ('R', 'E', 'X', '2')) return SF_FORMAT_REX2 ; if (buffer [0] == MAKE_MARKER (0x30, 0x26, 0xB2, 0x75) && buffer [1] == MAKE_MARKER (0x8E, 0x66, 0xCF, 0x11)) return 0 /*-SF_FORMAT_WMA-*/ ; /* HMM (Hidden Markov Model) Tool Kit. */ if (buffer [2] == MAKE_MARKER (0, 2, 0, 0) && 2 * ((int64_t) BE2H_32 (buffer [0])) + 12 == psf->filelength) return SF_FORMAT_HTK ; if (buffer [0] == MAKE_MARKER ('f', 'L', 'a', 'C')) return SF_FORMAT_FLAC ; if (buffer [0] == MAKE_MARKER ('2', 'B', 'I', 'T')) return SF_FORMAT_AVR ; if (buffer [0] == MAKE_MARKER ('R', 'F', '6', '4') && buffer [2] == MAKE_MARKER ('W', 'A', 'V', 'E')) return SF_FORMAT_RF64 ; if (buffer [0] == MAKE_MARKER ('I', 'D', '3', 3)) { psf_log_printf (psf, "Found 'ID3' marker.\n") ; if (id3_skip (psf)) return guess_file_type (psf) ; return 0 ; } ; /* Turtle Beach SMP 16-bit */ if (buffer [0] == MAKE_MARKER ('S', 'O', 'U', 'N') && buffer [1] == MAKE_MARKER ('D', ' ', 'S', 'A')) return 0 ; /* Yamaha sampler format. */ if (buffer [0] == MAKE_MARKER ('S', 'Y', '8', '0') || buffer [0] == MAKE_MARKER ('S', 'Y', '8', '5')) return 0 ; if (buffer [0] == MAKE_MARKER ('a', 'j', 'k', 'g')) return 0 /*-SF_FORMAT_SHN-*/ ; /* This must be the last one. */ if (psf->filelength > 0 && (format = try_resource_fork (psf)) != 0) return format ; return 0 ; } /* guess_file_type */ static int validate_sfinfo (SF_INFO *sfinfo) { if (sfinfo->samplerate < 1) return 0 ; if (sfinfo->frames < 0) return 0 ; if (sfinfo->channels < 1) return 0 ; if ((SF_CONTAINER (sfinfo->format)) == 0) return 0 ; if ((SF_CODEC (sfinfo->format)) == 0) return 0 ; if (sfinfo->sections < 1) return 0 ; return 1 ; } /* validate_sfinfo */ static int validate_psf (SF_PRIVATE *psf) { if (psf->datalength < 0) { psf_log_printf (psf, "Invalid SF_PRIVATE field : datalength == %D.\n", psf->datalength) ; return 0 ; } ; if (psf->dataoffset < 0) { psf_log_printf (psf, "Invalid SF_PRIVATE field : dataoffset == %D.\n", psf->dataoffset) ; return 0 ; } ; if (psf->blockwidth && psf->blockwidth != psf->sf.channels * psf->bytewidth) { psf_log_printf (psf, "Invalid SF_PRIVATE field : channels * bytewidth == %d.\n", psf->sf.channels * psf->bytewidth) ; return 0 ; } ; return 1 ; } /* validate_psf */ static void save_header_info (SF_PRIVATE *psf) { snprintf (sf_parselog, sizeof (sf_parselog), "%s", psf->parselog.buf) ; } /* save_header_info */ static int copy_filename (SF_PRIVATE *psf, const char *path) { const char *ccptr ; char *cptr ; if (strlen (path) > 1 && strlen (path) - 1 >= sizeof (psf->file.path.c)) { psf->error = SFE_FILENAME_TOO_LONG ; return psf->error ; } ; snprintf (psf->file.path.c, sizeof (psf->file.path.c), "%s", path) ; if ((ccptr = strrchr (path, '/')) || (ccptr = strrchr (path, '\\'))) ccptr ++ ; else ccptr = path ; snprintf (psf->file.name.c, sizeof (psf->file.name.c), "%s", ccptr) ; /* Now grab the directory. */ snprintf (psf->file.dir.c, sizeof (psf->file.dir.c), "%s", path) ; if ((cptr = strrchr (psf->file.dir.c, '/')) || (cptr = strrchr (psf->file.dir.c, '\\'))) cptr [1] = 0 ; else psf->file.dir.c [0] = 0 ; return 0 ; } /* copy_filename */ /*============================================================================== */ static int psf_close (SF_PRIVATE *psf) { uint32_t k ; int error = 0 ; if (psf->codec_close) { error = psf->codec_close (psf) ; /* To prevent it being called in psf->container_close(). */ psf->codec_close = NULL ; } ; if (psf->container_close) error = psf->container_close (psf) ; error = psf_fclose (psf) ; psf_close_rsrc (psf) ; /* For an ISO C compliant implementation it is ok to free a NULL pointer. */ free (psf->header.ptr) ; free (psf->container_data) ; free (psf->codec_data) ; free (psf->interleave) ; free (psf->dither) ; free (psf->peak_info) ; free (psf->broadcast_16k) ; free (psf->loop_info) ; free (psf->instrument) ; free (psf->cues) ; free (psf->channel_map) ; free (psf->format_desc) ; free (psf->strings.storage) ; if (psf->wchunks.chunks) for (k = 0 ; k < psf->wchunks.used ; k++) free (psf->wchunks.chunks [k].data) ; free (psf->rchunks.chunks) ; free (psf->wchunks.chunks) ; free (psf->iterator) ; free (psf->cart_16k) ; memset (psf, 0, sizeof (SF_PRIVATE)) ; free (psf) ; return error ; } /* psf_close */ SNDFILE * psf_open_file (SF_PRIVATE *psf, SF_INFO *sfinfo) { int error, format ; sf_errno = error = 0 ; sf_parselog [0] = 0 ; if (psf->error) { error = psf->error ; goto error_exit ; } ; if (psf->file.mode != SFM_READ && psf->file.mode != SFM_WRITE && psf->file.mode != SFM_RDWR) { error = SFE_BAD_OPEN_MODE ; goto error_exit ; } ; if (sfinfo == NULL) { error = SFE_BAD_SF_INFO_PTR ; goto error_exit ; } ; if (psf->file.mode == SFM_READ) { if ((SF_CONTAINER (sfinfo->format)) == SF_FORMAT_RAW) { if (sf_format_check (sfinfo) == 0) { error = SFE_RAW_BAD_FORMAT ; goto error_exit ; } ; } else memset (sfinfo, 0, sizeof (SF_INFO)) ; } ; memcpy (&psf->sf, sfinfo, sizeof (SF_INFO)) ; psf->Magick = SNDFILE_MAGICK ; psf->norm_float = SF_TRUE ; psf->norm_double = SF_TRUE ; psf->dataoffset = -1 ; psf->datalength = -1 ; psf->read_current = -1 ; psf->write_current = -1 ; psf->auto_header = SF_FALSE ; psf->rwf_endian = SF_ENDIAN_LITTLE ; psf->seek = psf_default_seek ; psf->float_int_mult = 0 ; psf->float_max = -1.0 ; /* An attempt at a per SF_PRIVATE unique id. */ psf->unique_id = psf_rand_int32 () ; psf->sf.sections = 1 ; psf->is_pipe = psf_is_pipe (psf) ; if (psf->is_pipe) { psf->sf.seekable = SF_FALSE ; psf->filelength = SF_COUNT_MAX ; } else { psf->sf.seekable = SF_TRUE ; /* File is open, so get the length. */ psf->filelength = psf_get_filelen (psf) ; } ; if (psf->fileoffset > 0) { switch (psf->file.mode) { case SFM_READ : if (psf->filelength < 44) { psf_log_printf (psf, "Short filelength: %D (fileoffset: %D)\n", psf->filelength, psf->fileoffset) ; error = SFE_BAD_OFFSET ; goto error_exit ; } ; break ; case SFM_WRITE : psf->fileoffset = 0 ; psf_fseek (psf, 0, SEEK_END) ; psf->fileoffset = psf_ftell (psf) ; break ; case SFM_RDWR : error = SFE_NO_EMBEDDED_RDWR ; goto error_exit ; } ; psf_log_printf (psf, "Embedded file offset : %D\n", psf->fileoffset) ; } ; if (psf->filelength == SF_COUNT_MAX) psf_log_printf (psf, "Length : unknown\n") ; else psf_log_printf (psf, "Length : %D\n", psf->filelength) ; if (psf->file.mode == SFM_WRITE || (psf->file.mode == SFM_RDWR && psf->filelength == 0)) { /* If the file is being opened for write or RDWR and the file is currently ** empty, then the SF_INFO struct must contain valid data. */ if ((SF_CONTAINER (psf->sf.format)) == 0) { error = SFE_ZERO_MAJOR_FORMAT ; goto error_exit ; } ; if ((SF_CODEC (psf->sf.format)) == 0) { error = SFE_ZERO_MINOR_FORMAT ; goto error_exit ; } ; if (sf_format_check (&psf->sf) == 0) { error = SFE_BAD_OPEN_FORMAT ; goto error_exit ; } ; } else if ((SF_CONTAINER (psf->sf.format)) != SF_FORMAT_RAW) { /* If type RAW has not been specified then need to figure out file type. */ psf->sf.format = guess_file_type (psf) ; if (psf->sf.format == 0) psf->sf.format = format_from_extension (psf) ; } ; /* Prevent unnecessary seeks */ psf->last_op = psf->file.mode ; /* Set bytewidth if known. */ switch (SF_CODEC (psf->sf.format)) { case SF_FORMAT_PCM_S8 : case SF_FORMAT_PCM_U8 : case SF_FORMAT_ULAW : case SF_FORMAT_ALAW : case SF_FORMAT_DPCM_8 : psf->bytewidth = 1 ; break ; case SF_FORMAT_PCM_16 : case SF_FORMAT_DPCM_16 : psf->bytewidth = 2 ; break ; case SF_FORMAT_PCM_24 : psf->bytewidth = 3 ; break ; case SF_FORMAT_PCM_32 : case SF_FORMAT_FLOAT : psf->bytewidth = 4 ; break ; case SF_FORMAT_DOUBLE : psf->bytewidth = 8 ; break ; } ; /* Call the initialisation function for the relevant file type. */ switch (SF_CONTAINER (psf->sf.format)) { case SF_FORMAT_WAV : case SF_FORMAT_WAVEX : error = wav_open (psf) ; break ; case SF_FORMAT_AIFF : error = aiff_open (psf) ; break ; case SF_FORMAT_AU : error = au_open (psf) ; break ; case SF_FORMAT_RAW : error = raw_open (psf) ; break ; case SF_FORMAT_W64 : error = w64_open (psf) ; break ; case SF_FORMAT_RF64 : error = rf64_open (psf) ; break ; /* Lite remove start */ case SF_FORMAT_PAF : error = paf_open (psf) ; break ; case SF_FORMAT_SVX : error = svx_open (psf) ; break ; case SF_FORMAT_NIST : error = nist_open (psf) ; break ; case SF_FORMAT_IRCAM : error = ircam_open (psf) ; break ; case SF_FORMAT_VOC : error = voc_open (psf) ; break ; case SF_FORMAT_SDS : error = sds_open (psf) ; break ; case SF_FORMAT_OGG : error = ogg_open (psf) ; break ; case SF_FORMAT_TXW : error = txw_open (psf) ; break ; case SF_FORMAT_WVE : error = wve_open (psf) ; break ; case SF_FORMAT_DWD : error = dwd_open (psf) ; break ; case SF_FORMAT_MAT4 : error = mat4_open (psf) ; break ; case SF_FORMAT_MAT5 : error = mat5_open (psf) ; break ; case SF_FORMAT_PVF : error = pvf_open (psf) ; break ; case SF_FORMAT_XI : error = xi_open (psf) ; break ; case SF_FORMAT_HTK : error = htk_open (psf) ; break ; case SF_FORMAT_SD2 : error = sd2_open (psf) ; break ; case SF_FORMAT_REX2 : error = rx2_open (psf) ; break ; case SF_FORMAT_AVR : error = avr_open (psf) ; break ; case SF_FORMAT_FLAC : error = flac_open (psf) ; break ; case SF_FORMAT_CAF : error = caf_open (psf) ; break ; case SF_FORMAT_MPC2K : error = mpc2k_open (psf) ; break ; /* Lite remove end */ default : error = SFE_UNKNOWN_FORMAT ; } ; if (error) goto error_exit ; /* For now, check whether embedding is supported. */ format = SF_CONTAINER (psf->sf.format) ; if (psf->fileoffset > 0) { switch (format) { case SF_FORMAT_WAV : case SF_FORMAT_WAVEX : case SF_FORMAT_AIFF : case SF_FORMAT_AU : /* Actual embedded files. */ break ; case SF_FORMAT_FLAC : /* Flac with an ID3v2 header? */ break ; default : error = SFE_NO_EMBED_SUPPORT ; goto error_exit ; } ; } ; if (psf->fileoffset > 0) psf_log_printf (psf, "Embedded file length : %D\n", psf->filelength) ; if (psf->file.mode == SFM_RDWR && sf_format_check (&psf->sf) == 0) { error = SFE_BAD_MODE_RW ; goto error_exit ; } ; if (validate_sfinfo (&psf->sf) == 0) { psf_log_SF_INFO (psf) ; save_header_info (psf) ; error = SFE_BAD_SF_INFO ; goto error_exit ; } ; if (validate_psf (psf) == 0) { save_header_info (psf) ; error = SFE_INTERNAL ; goto error_exit ; } ; psf->read_current = 0 ; psf->write_current = 0 ; if (psf->file.mode == SFM_RDWR) { psf->write_current = psf->sf.frames ; psf->have_written = psf->sf.frames > 0 ? SF_TRUE : SF_FALSE ; } ; memcpy (sfinfo, &psf->sf, sizeof (SF_INFO)) ; if (psf->file.mode == SFM_WRITE) { /* Zero out these fields. */ sfinfo->frames = 0 ; sfinfo->sections = 0 ; sfinfo->seekable = 0 ; } ; return (SNDFILE *) psf ; error_exit : sf_errno = error ; if (error == SFE_SYSTEM) snprintf (sf_syserr, sizeof (sf_syserr), "%s", psf->syserr) ; snprintf (sf_parselog, sizeof (sf_parselog), "%s", psf->parselog.buf) ; switch (error) { case SF_ERR_SYSTEM : case SF_ERR_UNSUPPORTED_ENCODING : case SFE_UNIMPLEMENTED : break ; case SFE_RAW_BAD_FORMAT : break ; default : if (psf->file.mode == SFM_READ) { psf_log_printf (psf, "Parse error : %s\n", sf_error_number (error)) ; error = SF_ERR_MALFORMED_FILE ; } ; } ; psf_close (psf) ; return NULL ; } /* psf_open_file */ /*============================================================================== ** Chunk getting and setting. ** This works for AIFF, CAF, RF64 and WAV. ** It doesn't work for W64 because W64 uses weird GUID style chunk markers. */ int sf_set_chunk (SNDFILE * sndfile, const SF_CHUNK_INFO * chunk_info) { SF_PRIVATE *psf ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (chunk_info == NULL || chunk_info->data == NULL) return SFE_BAD_CHUNK_PTR ; if (psf->set_chunk) return psf->set_chunk (psf, chunk_info) ; return SFE_BAD_CHUNK_FORMAT ; } /* sf_set_chunk */ SF_CHUNK_ITERATOR * sf_get_chunk_iterator (SNDFILE * sndfile, const SF_CHUNK_INFO * chunk_info) { SF_PRIVATE *psf ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (chunk_info) return psf_get_chunk_iterator (psf, chunk_info->id) ; return psf_get_chunk_iterator (psf, NULL) ; } /* sf_get_chunk_iterator */ SF_CHUNK_ITERATOR * sf_next_chunk_iterator (SF_CHUNK_ITERATOR * iterator) { SF_PRIVATE *psf ; SNDFILE *sndfile = iterator ? iterator->sndfile : NULL ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->next_chunk_iterator) return psf->next_chunk_iterator (psf, iterator) ; return NULL ; } /* sf_get_chunk_iterator_next */ int sf_get_chunk_size (const SF_CHUNK_ITERATOR * iterator, SF_CHUNK_INFO * chunk_info) { SF_PRIVATE *psf ; SNDFILE *sndfile = iterator ? iterator->sndfile : NULL ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (chunk_info == NULL) return SFE_BAD_CHUNK_PTR ; if (psf->get_chunk_size) return psf->get_chunk_size (psf, iterator, chunk_info) ; return SFE_BAD_CHUNK_FORMAT ; return 0 ; } /* sf_get_chunk_size */ int sf_get_chunk_data (const SF_CHUNK_ITERATOR * iterator, SF_CHUNK_INFO * chunk_info) { SF_PRIVATE *psf ; SNDFILE *sndfile = iterator ? iterator->sndfile : NULL ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (chunk_info == NULL || chunk_info->data == NULL) return SFE_BAD_CHUNK_PTR ; if (psf->get_chunk_data) return psf->get_chunk_data (psf, iterator, chunk_info) ; return SFE_BAD_CHUNK_FORMAT ; } /* sf_get_chunk_data */
sf_open_virtual (SF_VIRTUAL_IO *sfvirtual, int mode, SF_INFO *sfinfo, void *user_data) { SF_PRIVATE *psf ; /* Make sure we have a valid set ot virtual pointers. */ if (sfvirtual->get_filelen == NULL || sfvirtual->seek == NULL || sfvirtual->tell == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_get_filelen / vio_seek / vio_tell in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((mode == SFM_READ || mode == SFM_RDWR) && sfvirtual->read == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_read in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((mode == SFM_WRITE || mode == SFM_RDWR) && sfvirtual->write == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_write in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((psf = calloc (1, sizeof (SF_PRIVATE))) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; psf->virtual_io = SF_TRUE ; psf->vio = *sfvirtual ; psf->vio_user_data = user_data ; psf->file.mode = mode ; return psf_open_file (psf, sfinfo) ; } /* sf_open_virtual */
sf_open_virtual (SF_VIRTUAL_IO *sfvirtual, int mode, SF_INFO *sfinfo, void *user_data) { SF_PRIVATE *psf ; /* Make sure we have a valid set ot virtual pointers. */ if (sfvirtual->get_filelen == NULL || sfvirtual->seek == NULL || sfvirtual->tell == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_get_filelen / vio_seek / vio_tell in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((mode == SFM_READ || mode == SFM_RDWR) && sfvirtual->read == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_read in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((mode == SFM_WRITE || mode == SFM_RDWR) && sfvirtual->write == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_write in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((psf = psf_allocate ()) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; psf->virtual_io = SF_TRUE ; psf->vio = *sfvirtual ; psf->vio_user_data = user_data ; psf->file.mode = mode ; return psf_open_file (psf, sfinfo) ; } /* sf_open_virtual */
{'added': [(270, '\t{\tSFE_BAD_HEADER_ALLOC \t, "Error : Required header allocation is too large." },'), (329, '\tif ((psf = psf_allocate ()) == NULL)'), (361, '\tif ((psf = psf_allocate ()) == NULL)'), (403, '\tif ((psf = psf_allocate ()) == NULL)'), (2691, '\tfree (psf->header.ptr) ;')], 'deleted': [(270, ''), (329, '\tif ((psf = calloc (1, sizeof (SF_PRIVATE))) == NULL)'), (361, '\tif ((psf = calloc (1, sizeof (SF_PRIVATE))) == NULL)'), (403, '\tif ((psf = calloc (1, sizeof (SF_PRIVATE))) == NULL)')]}
5
4
2,307
15,935
https://github.com/erikd/libsndfile
CVE-2017-7586
['CWE-119']
aiff.c
aiff_read_chanmap
/* ** Copyright (C) 1999-2017 Erik de Castro Lopo <erikd@mega-nerd.com> ** Copyright (C) 2005 David Viens <davidv@plogue.com> ** ** This program is free software; you can redistribute it and/or modify ** it under the terms of the GNU Lesser General Public License as published by ** the Free Software Foundation; either version 2.1 of the License, or ** (at your option) any later version. ** ** This program is distributed in the hope that it will be useful, ** but WITHOUT ANY WARRANTY; without even the implied warranty of ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** GNU Lesser General Public License for more details. ** ** You should have received a copy of the GNU Lesser General Public License ** along with this program; if not, write to the Free Software ** Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include "sfconfig.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <ctype.h> #include <inttypes.h> #include "sndfile.h" #include "sfendian.h" #include "common.h" #include "chanmap.h" /*------------------------------------------------------------------------------ * Macros to handle big/little endian issues. */ #define FORM_MARKER (MAKE_MARKER ('F', 'O', 'R', 'M')) #define AIFF_MARKER (MAKE_MARKER ('A', 'I', 'F', 'F')) #define AIFC_MARKER (MAKE_MARKER ('A', 'I', 'F', 'C')) #define COMM_MARKER (MAKE_MARKER ('C', 'O', 'M', 'M')) #define SSND_MARKER (MAKE_MARKER ('S', 'S', 'N', 'D')) #define MARK_MARKER (MAKE_MARKER ('M', 'A', 'R', 'K')) #define INST_MARKER (MAKE_MARKER ('I', 'N', 'S', 'T')) #define APPL_MARKER (MAKE_MARKER ('A', 'P', 'P', 'L')) #define CHAN_MARKER (MAKE_MARKER ('C', 'H', 'A', 'N')) #define c_MARKER (MAKE_MARKER ('(', 'c', ')', ' ')) #define NAME_MARKER (MAKE_MARKER ('N', 'A', 'M', 'E')) #define AUTH_MARKER (MAKE_MARKER ('A', 'U', 'T', 'H')) #define ANNO_MARKER (MAKE_MARKER ('A', 'N', 'N', 'O')) #define COMT_MARKER (MAKE_MARKER ('C', 'O', 'M', 'T')) #define FVER_MARKER (MAKE_MARKER ('F', 'V', 'E', 'R')) #define SFX_MARKER (MAKE_MARKER ('S', 'F', 'X', '!')) #define PEAK_MARKER (MAKE_MARKER ('P', 'E', 'A', 'K')) #define basc_MARKER (MAKE_MARKER ('b', 'a', 's', 'c')) /* Supported AIFC encodings.*/ #define NONE_MARKER (MAKE_MARKER ('N', 'O', 'N', 'E')) #define sowt_MARKER (MAKE_MARKER ('s', 'o', 'w', 't')) #define twos_MARKER (MAKE_MARKER ('t', 'w', 'o', 's')) #define raw_MARKER (MAKE_MARKER ('r', 'a', 'w', ' ')) #define in24_MARKER (MAKE_MARKER ('i', 'n', '2', '4')) #define ni24_MARKER (MAKE_MARKER ('4', '2', 'n', '1')) #define in32_MARKER (MAKE_MARKER ('i', 'n', '3', '2')) #define ni32_MARKER (MAKE_MARKER ('2', '3', 'n', 'i')) #define fl32_MARKER (MAKE_MARKER ('f', 'l', '3', '2')) #define FL32_MARKER (MAKE_MARKER ('F', 'L', '3', '2')) #define fl64_MARKER (MAKE_MARKER ('f', 'l', '6', '4')) #define FL64_MARKER (MAKE_MARKER ('F', 'L', '6', '4')) #define ulaw_MARKER (MAKE_MARKER ('u', 'l', 'a', 'w')) #define ULAW_MARKER (MAKE_MARKER ('U', 'L', 'A', 'W')) #define alaw_MARKER (MAKE_MARKER ('a', 'l', 'a', 'w')) #define ALAW_MARKER (MAKE_MARKER ('A', 'L', 'A', 'W')) #define DWVW_MARKER (MAKE_MARKER ('D', 'W', 'V', 'W')) #define GSM_MARKER (MAKE_MARKER ('G', 'S', 'M', ' ')) #define ima4_MARKER (MAKE_MARKER ('i', 'm', 'a', '4')) /* ** This value is officially assigned to Mega Nerd Pty Ltd by Apple ** Corportation as the Application marker for libsndfile. ** ** See : http://developer.apple.com/faq/datatype.html */ #define m3ga_MARKER (MAKE_MARKER ('m', '3', 'g', 'a')) /* Unsupported AIFC encodings.*/ #define MAC3_MARKER (MAKE_MARKER ('M', 'A', 'C', '3')) #define MAC6_MARKER (MAKE_MARKER ('M', 'A', 'C', '6')) #define ADP4_MARKER (MAKE_MARKER ('A', 'D', 'P', '4')) /* Predfined chunk sizes. */ #define SIZEOF_AIFF_COMM 18 #define SIZEOF_AIFC_COMM_MIN 22 #define SIZEOF_AIFC_COMM 24 #define SIZEOF_SSND_CHUNK 8 #define SIZEOF_INST_CHUNK 20 /* Is it constant? */ /* AIFC/IMA4 defines. */ #define AIFC_IMA4_BLOCK_LEN 34 #define AIFC_IMA4_SAMPLES_PER_BLOCK 64 #define AIFF_PEAK_CHUNK_SIZE(ch) (2 * sizeof (int) + ch * (sizeof (float) + sizeof (int))) /*------------------------------------------------------------------------------ * Typedefs for file chunks. */ enum { HAVE_FORM = 0x01, HAVE_AIFF = 0x02, HAVE_AIFC = 0x04, HAVE_FVER = 0x08, HAVE_COMM = 0x10, HAVE_SSND = 0x20 } ; typedef struct { uint32_t size ; int16_t numChannels ; uint32_t numSampleFrames ; int16_t sampleSize ; uint8_t sampleRate [10] ; uint32_t encoding ; char zero_bytes [2] ; } COMM_CHUNK ; typedef struct { uint32_t offset ; uint32_t blocksize ; } SSND_CHUNK ; typedef struct { int16_t playMode ; uint16_t beginLoop ; uint16_t endLoop ; } INST_LOOP ; typedef struct { int8_t baseNote ; /* all notes are MIDI note numbers */ int8_t detune ; /* cents off, only -50 to +50 are significant */ int8_t lowNote ; int8_t highNote ; int8_t lowVelocity ; /* 1 to 127 */ int8_t highVelocity ; /* 1 to 127 */ int16_t gain ; /* in dB, 0 is normal */ INST_LOOP sustain_loop ; INST_LOOP release_loop ; } INST_CHUNK ; enum { basc_SCALE_MINOR = 1, basc_SCALE_MAJOR, basc_SCALE_NEITHER, basc_SCALE_BOTH } ; enum { basc_TYPE_LOOP = 0, basc_TYPE_ONE_SHOT } ; typedef struct { uint32_t version ; uint32_t numBeats ; uint16_t rootNote ; uint16_t scaleType ; uint16_t sigNumerator ; uint16_t sigDenominator ; uint16_t loopType ; } basc_CHUNK ; typedef struct { uint16_t markerID ; uint32_t position ; } MARK_ID_POS ; typedef struct { sf_count_t comm_offset ; sf_count_t ssnd_offset ; int32_t chanmap_tag ; MARK_ID_POS *markstr ; } AIFF_PRIVATE ; /*------------------------------------------------------------------------------ * Private static functions. */ static int aiff_close (SF_PRIVATE *psf) ; static int tenbytefloat2int (uint8_t *bytes) ; static void uint2tenbytefloat (uint32_t num, uint8_t *bytes) ; static int aiff_read_comm_chunk (SF_PRIVATE *psf, COMM_CHUNK *comm_fmt) ; static int aiff_read_header (SF_PRIVATE *psf, COMM_CHUNK *comm_fmt) ; static int aiff_write_header (SF_PRIVATE *psf, int calc_length) ; static int aiff_write_tailer (SF_PRIVATE *psf) ; static void aiff_write_strings (SF_PRIVATE *psf, int location) ; static int aiff_command (SF_PRIVATE *psf, int command, void *data, int datasize) ; static const char *get_loop_mode_str (int16_t mode) ; static int16_t get_loop_mode (int16_t mode) ; static int aiff_read_basc_chunk (SF_PRIVATE * psf, int) ; static int aiff_read_chanmap (SF_PRIVATE * psf, unsigned dword) ; static uint32_t marker_to_position (const MARK_ID_POS *m, uint16_t n, int marksize) ; static int aiff_set_chunk (SF_PRIVATE *psf, const SF_CHUNK_INFO * chunk_info) ; static SF_CHUNK_ITERATOR * aiff_next_chunk_iterator (SF_PRIVATE *psf, SF_CHUNK_ITERATOR * iterator) ; static int aiff_get_chunk_size (SF_PRIVATE *psf, const SF_CHUNK_ITERATOR * iterator, SF_CHUNK_INFO * chunk_info) ; static int aiff_get_chunk_data (SF_PRIVATE *psf, const SF_CHUNK_ITERATOR * iterator, SF_CHUNK_INFO * chunk_info) ; /*------------------------------------------------------------------------------ ** Public function. */ int aiff_open (SF_PRIVATE *psf) { COMM_CHUNK comm_fmt ; int error, subformat ; memset (&comm_fmt, 0, sizeof (comm_fmt)) ; subformat = SF_CODEC (psf->sf.format) ; if ((psf->container_data = calloc (1, sizeof (AIFF_PRIVATE))) == NULL) return SFE_MALLOC_FAILED ; if (psf->file.mode == SFM_READ || (psf->file.mode == SFM_RDWR && psf->filelength > 0)) { if ((error = aiff_read_header (psf, &comm_fmt))) return error ; psf->next_chunk_iterator = aiff_next_chunk_iterator ; psf->get_chunk_size = aiff_get_chunk_size ; psf->get_chunk_data = aiff_get_chunk_data ; psf_fseek (psf, psf->dataoffset, SEEK_SET) ; } ; if (psf->file.mode == SFM_WRITE || psf->file.mode == SFM_RDWR) { if (psf->is_pipe) return SFE_NO_PIPE_WRITE ; if ((SF_CONTAINER (psf->sf.format)) != SF_FORMAT_AIFF) return SFE_BAD_OPEN_FORMAT ; if (psf->file.mode == SFM_WRITE && (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE)) { if ((psf->peak_info = peak_info_calloc (psf->sf.channels)) == NULL) return SFE_MALLOC_FAILED ; psf->peak_info->peak_loc = SF_PEAK_START ; } ; if (psf->file.mode != SFM_RDWR || psf->filelength < 40) { psf->filelength = 0 ; psf->datalength = 0 ; psf->dataoffset = 0 ; psf->sf.frames = 0 ; } ; psf->strings.flags = SF_STR_ALLOW_START | SF_STR_ALLOW_END ; if ((error = aiff_write_header (psf, SF_FALSE))) return error ; psf->write_header = aiff_write_header ; psf->set_chunk = aiff_set_chunk ; } ; psf->container_close = aiff_close ; psf->command = aiff_command ; switch (SF_CODEC (psf->sf.format)) { case SF_FORMAT_PCM_U8 : error = pcm_init (psf) ; break ; case SF_FORMAT_PCM_S8 : error = pcm_init (psf) ; break ; case SF_FORMAT_PCM_16 : case SF_FORMAT_PCM_24 : case SF_FORMAT_PCM_32 : error = pcm_init (psf) ; break ; case SF_FORMAT_ULAW : error = ulaw_init (psf) ; break ; case SF_FORMAT_ALAW : error = alaw_init (psf) ; break ; /* Lite remove start */ case SF_FORMAT_FLOAT : error = float32_init (psf) ; break ; case SF_FORMAT_DOUBLE : error = double64_init (psf) ; break ; case SF_FORMAT_DWVW_12 : if (psf->sf.frames > comm_fmt.numSampleFrames) psf->sf.frames = comm_fmt.numSampleFrames ; break ; case SF_FORMAT_DWVW_16 : error = dwvw_init (psf, 16) ; if (psf->sf.frames > comm_fmt.numSampleFrames) psf->sf.frames = comm_fmt.numSampleFrames ; break ; case SF_FORMAT_DWVW_24 : error = dwvw_init (psf, 24) ; if (psf->sf.frames > comm_fmt.numSampleFrames) psf->sf.frames = comm_fmt.numSampleFrames ; break ; case SF_FORMAT_DWVW_N : if (psf->file.mode != SFM_READ) { error = SFE_DWVW_BAD_BITWIDTH ; break ; } ; if (comm_fmt.sampleSize >= 8 && comm_fmt.sampleSize < 24) { error = dwvw_init (psf, comm_fmt.sampleSize) ; if (psf->sf.frames > comm_fmt.numSampleFrames) psf->sf.frames = comm_fmt.numSampleFrames ; break ; } ; psf_log_printf (psf, "AIFC/DWVW : Bad bitwidth %d\n", comm_fmt.sampleSize) ; error = SFE_DWVW_BAD_BITWIDTH ; break ; case SF_FORMAT_IMA_ADPCM : /* ** IMA ADPCM encoded AIFF files always have a block length ** of 34 which decodes to 64 samples. */ error = aiff_ima_init (psf, AIFC_IMA4_BLOCK_LEN, AIFC_IMA4_SAMPLES_PER_BLOCK) ; break ; /* Lite remove end */ case SF_FORMAT_GSM610 : error = gsm610_init (psf) ; if (psf->sf.frames > comm_fmt.numSampleFrames) psf->sf.frames = comm_fmt.numSampleFrames ; break ; default : return SFE_UNIMPLEMENTED ; } ; if (psf->file.mode != SFM_WRITE && psf->sf.frames - comm_fmt.numSampleFrames != 0) { psf_log_printf (psf, "*** Frame count read from 'COMM' chunk (%u) not equal to frame count\n" "*** calculated from length of 'SSND' chunk (%u).\n", comm_fmt.numSampleFrames, (uint32_t) psf->sf.frames) ; } ; return error ; } /* aiff_open */ /*========================================================================================== ** Private functions. */ /* This function ought to check size */ static uint32_t marker_to_position (const MARK_ID_POS *m, uint16_t n, int marksize) { int i ; for (i = 0 ; i < marksize ; i++) if (m [i].markerID == n) return m [i].position ; return 0 ; } /* marker_to_position */ static int aiff_read_header (SF_PRIVATE *psf, COMM_CHUNK *comm_fmt) { SSND_CHUNK ssnd_fmt ; AIFF_PRIVATE *paiff ; BUF_UNION ubuf ; uint32_t chunk_size = 0, FORMsize, SSNDsize, bytesread, mark_count = 0 ; int k, found_chunk = 0, done = 0, error = 0 ; char *cptr ; int instr_found = 0, mark_found = 0 ; if (psf->filelength > SF_PLATFORM_S64 (0xffffffff)) psf_log_printf (psf, "Warning : filelength > 0xffffffff. This is bad!!!!\n") ; if ((paiff = psf->container_data) == NULL) return SFE_INTERNAL ; paiff->comm_offset = 0 ; paiff->ssnd_offset = 0 ; /* Set position to start of file to begin reading header. */ psf_binheader_readf (psf, "p", 0) ; memset (comm_fmt, 0, sizeof (COMM_CHUNK)) ; /* Until recently AIF* file were all BIG endian. */ psf->endian = SF_ENDIAN_BIG ; /* AIFF files can apparently have their chunks in any order. However, they ** must have a FORM chunk. Approach here is to read all the chunks one by ** one and then check for the mandatory chunks at the end. */ while (! done) { unsigned marker ; size_t jump = chunk_size & 1 ; marker = chunk_size = 0 ; psf_binheader_readf (psf, "Ejm4", jump, &marker, &chunk_size) ; if (marker == 0) { sf_count_t pos = psf_ftell (psf) ; psf_log_printf (psf, "Have 0 marker at position %D (0x%x).\n", pos, pos) ; break ; } ; if (psf->file.mode == SFM_RDWR && (found_chunk & HAVE_SSND)) return SFE_AIFF_RW_SSND_NOT_LAST ; psf_store_read_chunk_u32 (&psf->rchunks, marker, psf_ftell (psf), chunk_size) ; switch (marker) { case FORM_MARKER : if (found_chunk) return SFE_AIFF_NO_FORM ; FORMsize = chunk_size ; found_chunk |= HAVE_FORM ; psf_binheader_readf (psf, "m", &marker) ; switch (marker) { case AIFC_MARKER : case AIFF_MARKER : found_chunk |= (marker == AIFC_MARKER) ? (HAVE_AIFC | HAVE_AIFF) : HAVE_AIFF ; break ; default : break ; } ; if (psf->fileoffset > 0 && psf->filelength > FORMsize + 8) { /* Set file length. */ psf->filelength = FORMsize + 8 ; psf_log_printf (psf, "FORM : %u\n %M\n", FORMsize, marker) ; } else if (FORMsize != psf->filelength - 2 * SIGNED_SIZEOF (chunk_size)) { chunk_size = psf->filelength - 2 * sizeof (chunk_size) ; psf_log_printf (psf, "FORM : %u (should be %u)\n %M\n", FORMsize, chunk_size, marker) ; FORMsize = chunk_size ; } else psf_log_printf (psf, "FORM : %u\n %M\n", FORMsize, marker) ; /* Set this to 0, so we don't jump a byte when parsing the next marker. */ chunk_size = 0 ; break ; case COMM_MARKER : paiff->comm_offset = psf_ftell (psf) - 8 ; chunk_size += chunk_size & 1 ; comm_fmt->size = chunk_size ; if ((error = aiff_read_comm_chunk (psf, comm_fmt)) != 0) return error ; found_chunk |= HAVE_COMM ; break ; case PEAK_MARKER : /* Must have COMM chunk before PEAK chunk. */ if ((found_chunk & (HAVE_FORM | HAVE_AIFF | HAVE_COMM)) != (HAVE_FORM | HAVE_AIFF | HAVE_COMM)) return SFE_AIFF_PEAK_B4_COMM ; psf_log_printf (psf, "%M : %d\n", marker, chunk_size) ; if (chunk_size != AIFF_PEAK_CHUNK_SIZE (psf->sf.channels)) { psf_binheader_readf (psf, "j", chunk_size) ; psf_log_printf (psf, "*** File PEAK chunk too big.\n") ; return SFE_WAV_BAD_PEAK ; } ; if ((psf->peak_info = peak_info_calloc (psf->sf.channels)) == NULL) return SFE_MALLOC_FAILED ; /* read in rest of PEAK chunk. */ psf_binheader_readf (psf, "E44", &(psf->peak_info->version), &(psf->peak_info->timestamp)) ; if (psf->peak_info->version != 1) psf_log_printf (psf, " version : %d *** (should be version 1)\n", psf->peak_info->version) ; else psf_log_printf (psf, " version : %d\n", psf->peak_info->version) ; psf_log_printf (psf, " time stamp : %d\n", psf->peak_info->timestamp) ; psf_log_printf (psf, " Ch Position Value\n") ; cptr = ubuf.cbuf ; for (k = 0 ; k < psf->sf.channels ; k++) { float value ; uint32_t position ; psf_binheader_readf (psf, "Ef4", &value, &position) ; psf->peak_info->peaks [k].value = value ; psf->peak_info->peaks [k].position = position ; snprintf (cptr, sizeof (ubuf.scbuf), " %2d %-12" PRId64 " %g\n", k, psf->peak_info->peaks [k].position, psf->peak_info->peaks [k].value) ; cptr [sizeof (ubuf.scbuf) - 1] = 0 ; psf_log_printf (psf, "%s", cptr) ; } ; psf->peak_info->peak_loc = ((found_chunk & HAVE_SSND) == 0) ? SF_PEAK_START : SF_PEAK_END ; break ; case SSND_MARKER : if ((found_chunk & HAVE_AIFC) && (found_chunk & HAVE_FVER) == 0) psf_log_printf (psf, "*** Valid AIFC files should have an FVER chunk.\n") ; paiff->ssnd_offset = psf_ftell (psf) - 8 ; SSNDsize = chunk_size ; psf_binheader_readf (psf, "E44", &(ssnd_fmt.offset), &(ssnd_fmt.blocksize)) ; psf->datalength = SSNDsize - sizeof (ssnd_fmt) ; psf->dataoffset = psf_ftell (psf) ; if (psf->datalength > psf->filelength - psf->dataoffset || psf->datalength < 0) { psf_log_printf (psf, " SSND : %u (should be %D)\n", SSNDsize, psf->filelength - psf->dataoffset + sizeof (SSND_CHUNK)) ; psf->datalength = psf->filelength - psf->dataoffset ; } else psf_log_printf (psf, " SSND : %u\n", SSNDsize) ; if (ssnd_fmt.offset == 0 || psf->dataoffset + ssnd_fmt.offset == ssnd_fmt.blocksize) { psf_log_printf (psf, " Offset : %u\n", ssnd_fmt.offset) ; psf_log_printf (psf, " Block Size : %u\n", ssnd_fmt.blocksize) ; psf->dataoffset += ssnd_fmt.offset ; psf->datalength -= ssnd_fmt.offset ; } else { psf_log_printf (psf, " Offset : %u\n", ssnd_fmt.offset) ; psf_log_printf (psf, " Block Size : %u ???\n", ssnd_fmt.blocksize) ; psf->dataoffset += ssnd_fmt.offset ; psf->datalength -= ssnd_fmt.offset ; } ; /* Only set dataend if there really is data at the end. */ if (psf->datalength + psf->dataoffset < psf->filelength) psf->dataend = psf->datalength + psf->dataoffset ; found_chunk |= HAVE_SSND ; if (! psf->sf.seekable) break ; /* Seek to end of SSND chunk. */ psf_fseek (psf, psf->dataoffset + psf->datalength, SEEK_SET) ; break ; case c_MARKER : if (chunk_size == 0) break ; if (chunk_size >= SIGNED_SIZEOF (ubuf.scbuf)) { psf_log_printf (psf, " %M : %d (too big)\n", marker, chunk_size) ; return SFE_INTERNAL ; } ; cptr = ubuf.cbuf ; psf_binheader_readf (psf, "b", cptr, chunk_size + (chunk_size & 1)) ; cptr [chunk_size] = 0 ; psf_sanitize_string (cptr, chunk_size) ; psf_log_printf (psf, " %M : %s\n", marker, cptr) ; psf_store_string (psf, SF_STR_COPYRIGHT, cptr) ; chunk_size += chunk_size & 1 ; break ; case AUTH_MARKER : if (chunk_size == 0) break ; if (chunk_size >= SIGNED_SIZEOF (ubuf.scbuf) - 1) { psf_log_printf (psf, " %M : %d (too big)\n", marker, chunk_size) ; return SFE_INTERNAL ; } ; cptr = ubuf.cbuf ; psf_binheader_readf (psf, "b", cptr, chunk_size + (chunk_size & 1)) ; cptr [chunk_size] = 0 ; psf_log_printf (psf, " %M : %s\n", marker, cptr) ; psf_store_string (psf, SF_STR_ARTIST, cptr) ; chunk_size += chunk_size & 1 ; break ; case COMT_MARKER : { uint16_t count, id, len ; uint32_t timestamp, bytes ; if (chunk_size == 0) break ; bytes = chunk_size ; bytes -= psf_binheader_readf (psf, "E2", &count) ; psf_log_printf (psf, " %M : %d\n count : %d\n", marker, chunk_size, count) ; for (k = 0 ; k < count ; k++) { bytes -= psf_binheader_readf (psf, "E422", &timestamp, &id, &len) ; psf_log_printf (psf, " time : 0x%x\n marker : %x\n length : %d\n", timestamp, id, len) ; if (len + 1 > SIGNED_SIZEOF (ubuf.scbuf)) { psf_log_printf (psf, "\nError : string length (%d) too big.\n", len) ; return SFE_INTERNAL ; } ; cptr = ubuf.cbuf ; bytes -= psf_binheader_readf (psf, "b", cptr, len) ; cptr [len] = 0 ; psf_log_printf (psf, " string : %s\n", cptr) ; } ; if (bytes > 0) psf_binheader_readf (psf, "j", bytes) ; } ; break ; case APPL_MARKER : { unsigned appl_marker ; if (chunk_size == 0) break ; if (chunk_size >= SIGNED_SIZEOF (ubuf.scbuf) - 1) { psf_log_printf (psf, " %M : %u (too big, skipping)\n", marker, chunk_size) ; psf_binheader_readf (psf, "j", chunk_size + (chunk_size & 1)) ; break ; } ; if (chunk_size < 4) { psf_log_printf (psf, " %M : %d (too small, skipping)\n", marker, chunk_size) ; psf_binheader_readf (psf, "j", chunk_size + (chunk_size & 1)) ; break ; } ; cptr = ubuf.cbuf ; psf_binheader_readf (psf, "mb", &appl_marker, cptr, chunk_size + (chunk_size & 1) - 4) ; cptr [chunk_size] = 0 ; for (k = 0 ; k < (int) chunk_size ; k++) if (! psf_isprint (cptr [k])) { cptr [k] = 0 ; break ; } ; psf_log_printf (psf, " %M : %d\n AppSig : %M\n Name : %s\n", marker, chunk_size, appl_marker, cptr) ; psf_store_string (psf, SF_STR_SOFTWARE, cptr) ; chunk_size += chunk_size & 1 ; } ; break ; case NAME_MARKER : if (chunk_size == 0) break ; if (chunk_size >= SIGNED_SIZEOF (ubuf.scbuf) - 2) { psf_log_printf (psf, " %M : %d (too big)\n", marker, chunk_size) ; return SFE_INTERNAL ; } ; cptr = ubuf.cbuf ; psf_binheader_readf (psf, "b", cptr, chunk_size + (chunk_size & 1)) ; cptr [chunk_size] = 0 ; psf_log_printf (psf, " %M : %s\n", marker, cptr) ; psf_store_string (psf, SF_STR_TITLE, cptr) ; chunk_size += chunk_size & 1 ; break ; case ANNO_MARKER : if (chunk_size == 0) break ; if (chunk_size >= SIGNED_SIZEOF (ubuf.scbuf) - 2) { psf_log_printf (psf, " %M : %d (too big)\n", marker, chunk_size) ; return SFE_INTERNAL ; } ; cptr = ubuf.cbuf ; psf_binheader_readf (psf, "b", cptr, chunk_size + (chunk_size & 1)) ; cptr [chunk_size] = 0 ; psf_log_printf (psf, " %M : %s\n", marker, cptr) ; psf_store_string (psf, SF_STR_COMMENT, cptr) ; chunk_size += chunk_size & 1 ; break ; case INST_MARKER : if (chunk_size != SIZEOF_INST_CHUNK) { psf_log_printf (psf, " %M : %d (should be %d)\n", marker, chunk_size, SIZEOF_INST_CHUNK) ; psf_binheader_readf (psf, "j", chunk_size) ; break ; } ; psf_log_printf (psf, " %M : %d\n", marker, chunk_size) ; { uint8_t bytes [6] ; int16_t gain ; if (psf->instrument == NULL && (psf->instrument = psf_instrument_alloc ()) == NULL) return SFE_MALLOC_FAILED ; psf_binheader_readf (psf, "b", bytes, 6) ; psf_log_printf (psf, " Base Note : %u\n Detune : %u\n" " Low Note : %u\n High Note : %u\n" " Low Vel. : %u\n High Vel. : %u\n", bytes [0], bytes [1], bytes [2], bytes [3], bytes [4], bytes [5]) ; psf->instrument->basenote = bytes [0] ; psf->instrument->detune = bytes [1] ; psf->instrument->key_lo = bytes [2] ; psf->instrument->key_hi = bytes [3] ; psf->instrument->velocity_lo = bytes [4] ; psf->instrument->velocity_hi = bytes [5] ; psf_binheader_readf (psf, "E2", &gain) ; psf->instrument->gain = gain ; psf_log_printf (psf, " Gain (dB) : %d\n", gain) ; } ; { int16_t mode ; /* 0 - no loop, 1 - forward looping, 2 - backward looping */ const char *loop_mode ; uint16_t begin, end ; psf_binheader_readf (psf, "E222", &mode, &begin, &end) ; loop_mode = get_loop_mode_str (mode) ; mode = get_loop_mode (mode) ; if (mode == SF_LOOP_NONE) { psf->instrument->loop_count = 0 ; psf->instrument->loops [0].mode = SF_LOOP_NONE ; } else { psf->instrument->loop_count = 1 ; psf->instrument->loops [0].mode = SF_LOOP_FORWARD ; psf->instrument->loops [0].start = begin ; psf->instrument->loops [0].end = end ; psf->instrument->loops [0].count = 0 ; } ; psf_log_printf (psf, " Sustain\n mode : %d => %s\n begin : %u\n end : %u\n", mode, loop_mode, begin, end) ; psf_binheader_readf (psf, "E222", &mode, &begin, &end) ; loop_mode = get_loop_mode_str (mode) ; mode = get_loop_mode (mode) ; if (mode == SF_LOOP_NONE) psf->instrument->loops [1].mode = SF_LOOP_NONE ; else { psf->instrument->loop_count += 1 ; psf->instrument->loops [1].mode = SF_LOOP_FORWARD ; psf->instrument->loops [1].start = begin ; psf->instrument->loops [1].end = end ; psf->instrument->loops [1].count = 0 ; } ; psf_log_printf (psf, " Release\n mode : %d => %s\n begin : %u\n end : %u\n", mode, loop_mode, begin, end) ; } ; instr_found++ ; break ; case basc_MARKER : psf_log_printf (psf, " basc : %u\n", chunk_size) ; if ((error = aiff_read_basc_chunk (psf, chunk_size))) return error ; break ; case MARK_MARKER : psf_log_printf (psf, " %M : %d\n", marker, chunk_size) ; { uint16_t mark_id, n = 0 ; uint32_t position ; bytesread = psf_binheader_readf (psf, "E2", &n) ; mark_count = n ; psf_log_printf (psf, " Count : %u\n", mark_count) ; if (paiff->markstr != NULL) { psf_log_printf (psf, "*** Second MARK chunk found. Throwing away the first.\n") ; free (paiff->markstr) ; } ; paiff->markstr = calloc (mark_count, sizeof (MARK_ID_POS)) ; if (paiff->markstr == NULL) return SFE_MALLOC_FAILED ; if (mark_count > 1000) { psf_log_printf (psf, " More than 1000 markers, skipping!\n") ; psf_binheader_readf (psf, "j", chunk_size - bytesread) ; break ; } ; if ((psf->cues = psf_cues_alloc (mark_count)) == NULL) return SFE_MALLOC_FAILED ; for (n = 0 ; n < mark_count && bytesread < chunk_size ; n++) { uint32_t pstr_len ; uint8_t ch ; bytesread += psf_binheader_readf (psf, "E241", &mark_id, &position, &ch) ; psf_log_printf (psf, " Mark ID : %u\n Position : %u\n", mark_id, position) ; psf->cues->cue_points [n].indx = mark_id ; psf->cues->cue_points [n].position = 0 ; psf->cues->cue_points [n].fcc_chunk = MAKE_MARKER ('d', 'a', 't', 'a') ; /* always data */ psf->cues->cue_points [n].chunk_start = 0 ; psf->cues->cue_points [n].block_start = 0 ; psf->cues->cue_points [n].sample_offset = position ; pstr_len = (ch & 1) ? ch : ch + 1 ; if (pstr_len < sizeof (ubuf.scbuf) - 1) { bytesread += psf_binheader_readf (psf, "b", ubuf.scbuf, pstr_len) ; ubuf.scbuf [pstr_len] = 0 ; } else { uint32_t read_len = pstr_len - (sizeof (ubuf.scbuf) - 1) ; bytesread += psf_binheader_readf (psf, "bj", ubuf.scbuf, read_len, pstr_len - read_len) ; ubuf.scbuf [sizeof (ubuf.scbuf) - 1] = 0 ; } psf_log_printf (psf, " Name : %s\n", ubuf.scbuf) ; psf_strlcpy (psf->cues->cue_points [n].name, sizeof (psf->cues->cue_points [n].name), ubuf.cbuf) ; paiff->markstr [n].markerID = mark_id ; paiff->markstr [n].position = position ; /* ** TODO if ubuf.scbuf is equal to ** either Beg_loop, Beg loop or beg loop and spam ** if (psf->instrument == NULL && (psf->instrument = psf_instrument_alloc ()) == NULL) ** return SFE_MALLOC_FAILED ; */ } ; } ; mark_found++ ; psf_binheader_readf (psf, "j", chunk_size - bytesread) ; break ; case FVER_MARKER : found_chunk |= HAVE_FVER ; /* Fall through to next case. */ case SFX_MARKER : psf_log_printf (psf, " %M : %d\n", marker, chunk_size) ; psf_binheader_readf (psf, "j", chunk_size) ; break ; case NONE_MARKER : /* Fix for broken AIFC files with incorrect COMM chunk length. */ chunk_size = (chunk_size >> 24) - 3 ; psf_log_printf (psf, " %M : %d\n", marker, chunk_size) ; psf_binheader_readf (psf, "j", make_size_t (chunk_size)) ; break ; case CHAN_MARKER : if (chunk_size < 12) { psf_log_printf (psf, " %M : %d (should be >= 12)\n", marker, chunk_size) ; psf_binheader_readf (psf, "j", chunk_size) ; break ; } psf_log_printf (psf, " %M : %d\n", marker, chunk_size) ; if ((error = aiff_read_chanmap (psf, chunk_size))) return error ; break ; default : if (chunk_size >= 0xffff0000) { done = SF_TRUE ; psf_log_printf (psf, "*** Unknown chunk marker (%X) at position %D with length %u. Exiting parser.\n", marker, psf_ftell (psf) - 8, chunk_size) ; break ; } ; if (psf_isprint ((marker >> 24) & 0xFF) && psf_isprint ((marker >> 16) & 0xFF) && psf_isprint ((marker >> 8) & 0xFF) && psf_isprint (marker & 0xFF)) { psf_log_printf (psf, " %M : %u (unknown marker)\n", marker, chunk_size) ; psf_binheader_readf (psf, "j", chunk_size) ; break ; } ; if (psf_ftell (psf) & 0x03) { psf_log_printf (psf, " Unknown chunk marker at position %D. Resynching.\n", psf_ftell (psf) - 8) ; psf_binheader_readf (psf, "j", -3) ; break ; } ; psf_log_printf (psf, "*** Unknown chunk marker %X at position %D. Exiting parser.\n", marker, psf_ftell (psf)) ; done = SF_TRUE ; break ; } ; /* switch (marker) */ if (chunk_size >= psf->filelength) { psf_log_printf (psf, "*** Chunk size %u > file length %D. Exiting parser.\n", chunk_size, psf->filelength) ; break ; } ; if ((! psf->sf.seekable) && (found_chunk & HAVE_SSND)) break ; if (psf_ftell (psf) >= psf->filelength - (2 * SIGNED_SIZEOF (int32_t))) break ; } ; /* while (1) */ if (instr_found && mark_found) { int ji, str_index ; /* Next loop will convert markers to loop positions for internal handling */ for (ji = 0 ; ji < psf->instrument->loop_count ; ji ++) { if (ji < ARRAY_LEN (psf->instrument->loops)) { psf->instrument->loops [ji].start = marker_to_position (paiff->markstr, psf->instrument->loops [ji].start, mark_count) ; psf->instrument->loops [ji].end = marker_to_position (paiff->markstr, psf->instrument->loops [ji].end, mark_count) ; psf->instrument->loops [ji].mode = SF_LOOP_FORWARD ; } ; } ; /* The markers that correspond to loop positions can now be removed from cues struct */ if (psf->cues->cue_count > (uint32_t) (psf->instrument->loop_count * 2)) { uint32_t j ; for (j = 0 ; j < psf->cues->cue_count - (uint32_t) (psf->instrument->loop_count * 2) ; j ++) { /* This simply copies the information in cues above loop positions and writes it at current count instead */ psf->cues->cue_points [j].indx = psf->cues->cue_points [j + psf->instrument->loop_count * 2].indx ; psf->cues->cue_points [j].position = psf->cues->cue_points [j + psf->instrument->loop_count * 2].position ; psf->cues->cue_points [j].fcc_chunk = psf->cues->cue_points [j + psf->instrument->loop_count * 2].fcc_chunk ; psf->cues->cue_points [j].chunk_start = psf->cues->cue_points [j + psf->instrument->loop_count * 2].chunk_start ; psf->cues->cue_points [j].block_start = psf->cues->cue_points [j + psf->instrument->loop_count * 2].block_start ; psf->cues->cue_points [j].sample_offset = psf->cues->cue_points [j + psf->instrument->loop_count * 2].sample_offset ; for (str_index = 0 ; str_index < 256 ; str_index++) psf->cues->cue_points [j].name [str_index] = psf->cues->cue_points [j + psf->instrument->loop_count * 2].name [str_index] ; } ; psf->cues->cue_count -= psf->instrument->loop_count * 2 ; } else { /* All the cues were in fact loop positions so we can actually remove the cues altogether */ free (psf->cues) ; psf->cues = NULL ; } } ; if (psf->sf.channels < 1) return SFE_CHANNEL_COUNT_ZERO ; if (psf->sf.channels >= SF_MAX_CHANNELS) return SFE_CHANNEL_COUNT ; if (! (found_chunk & HAVE_FORM)) return SFE_AIFF_NO_FORM ; if (! (found_chunk & HAVE_AIFF)) return SFE_AIFF_COMM_NO_FORM ; if (! (found_chunk & HAVE_COMM)) return SFE_AIFF_SSND_NO_COMM ; if (! psf->dataoffset) return SFE_AIFF_NO_DATA ; return 0 ; } /* aiff_read_header */ static int aiff_close (SF_PRIVATE *psf) { AIFF_PRIVATE *paiff = psf->container_data ; if (paiff != NULL && paiff->markstr != NULL) { free (paiff->markstr) ; paiff->markstr = NULL ; } ; if (psf->file.mode == SFM_WRITE || psf->file.mode == SFM_RDWR) { aiff_write_tailer (psf) ; aiff_write_header (psf, SF_TRUE) ; } ; return 0 ; } /* aiff_close */ static int aiff_read_comm_chunk (SF_PRIVATE *psf, COMM_CHUNK *comm_fmt) { BUF_UNION ubuf ; int subformat, samplerate ; ubuf.scbuf [0] = 0 ; /* The COMM chunk has an int aligned to an odd word boundary. Some ** procesors are not able to deal with this (ie bus fault) so we have ** to take special care. */ psf_binheader_readf (psf, "E242b", &(comm_fmt->numChannels), &(comm_fmt->numSampleFrames), &(comm_fmt->sampleSize), &(comm_fmt->sampleRate), SIGNED_SIZEOF (comm_fmt->sampleRate)) ; if (comm_fmt->size > 0x10000 && (comm_fmt->size & 0xffff) == 0) { psf_log_printf (psf, " COMM : %d (0x%x) *** should be ", comm_fmt->size, comm_fmt->size) ; comm_fmt->size = ENDSWAP_32 (comm_fmt->size) ; psf_log_printf (psf, "%d (0x%x)\n", comm_fmt->size, comm_fmt->size) ; } else psf_log_printf (psf, " COMM : %d\n", comm_fmt->size) ; if (comm_fmt->size == SIZEOF_AIFF_COMM) comm_fmt->encoding = NONE_MARKER ; else if (comm_fmt->size == SIZEOF_AIFC_COMM_MIN) psf_binheader_readf (psf, "Em", &(comm_fmt->encoding)) ; else if (comm_fmt->size >= SIZEOF_AIFC_COMM) { uint8_t encoding_len ; unsigned read_len ; psf_binheader_readf (psf, "Em1", &(comm_fmt->encoding), &encoding_len) ; comm_fmt->size = SF_MIN (sizeof (ubuf.scbuf), make_size_t (comm_fmt->size)) ; memset (ubuf.scbuf, 0, comm_fmt->size) ; read_len = comm_fmt->size - SIZEOF_AIFC_COMM + 1 ; psf_binheader_readf (psf, "b", ubuf.scbuf, read_len) ; ubuf.scbuf [read_len + 1] = 0 ; } ; samplerate = tenbytefloat2int (comm_fmt->sampleRate) ; psf_log_printf (psf, " Sample Rate : %d\n", samplerate) ; psf_log_printf (psf, " Frames : %u%s\n", comm_fmt->numSampleFrames, (comm_fmt->numSampleFrames == 0 && psf->filelength > 104) ? " (Should not be 0)" : "") ; if (comm_fmt->numChannels < 1 || comm_fmt->numChannels >= SF_MAX_CHANNELS) { psf_log_printf (psf, " Channels : %d (should be >= 1 and < %d)\n", comm_fmt->numChannels, SF_MAX_CHANNELS) ; return SFE_CHANNEL_COUNT_BAD ; } ; psf_log_printf (psf, " Channels : %d\n", comm_fmt->numChannels) ; /* Found some broken 'fl32' files with comm.samplesize == 16. Fix it here. */ if ((comm_fmt->encoding == fl32_MARKER || comm_fmt->encoding == FL32_MARKER) && comm_fmt->sampleSize != 32) { psf_log_printf (psf, " Sample Size : %d (should be 32)\n", comm_fmt->sampleSize) ; comm_fmt->sampleSize = 32 ; } else if ((comm_fmt->encoding == fl64_MARKER || comm_fmt->encoding == FL64_MARKER) && comm_fmt->sampleSize != 64) { psf_log_printf (psf, " Sample Size : %d (should be 64)\n", comm_fmt->sampleSize) ; comm_fmt->sampleSize = 64 ; } else psf_log_printf (psf, " Sample Size : %d\n", comm_fmt->sampleSize) ; subformat = s_bitwidth_to_subformat (comm_fmt->sampleSize) ; psf->sf.samplerate = samplerate ; psf->sf.frames = comm_fmt->numSampleFrames ; psf->sf.channels = comm_fmt->numChannels ; psf->bytewidth = BITWIDTH2BYTES (comm_fmt->sampleSize) ; psf->endian = SF_ENDIAN_BIG ; switch (comm_fmt->encoding) { case NONE_MARKER : psf->sf.format = (SF_FORMAT_AIFF | subformat) ; break ; case twos_MARKER : case in24_MARKER : case in32_MARKER : psf->sf.format = (SF_ENDIAN_BIG | SF_FORMAT_AIFF | subformat) ; break ; case sowt_MARKER : case ni24_MARKER : case ni32_MARKER : psf->endian = SF_ENDIAN_LITTLE ; psf->sf.format = (SF_ENDIAN_LITTLE | SF_FORMAT_AIFF | subformat) ; break ; case fl32_MARKER : case FL32_MARKER : psf->sf.format = (SF_FORMAT_AIFF | SF_FORMAT_FLOAT) ; break ; case ulaw_MARKER : case ULAW_MARKER : psf->sf.format = (SF_FORMAT_AIFF | SF_FORMAT_ULAW) ; break ; case alaw_MARKER : case ALAW_MARKER : psf->sf.format = (SF_FORMAT_AIFF | SF_FORMAT_ALAW) ; break ; case fl64_MARKER : case FL64_MARKER : psf->sf.format = (SF_FORMAT_AIFF | SF_FORMAT_DOUBLE) ; break ; case raw_MARKER : psf->sf.format = (SF_FORMAT_AIFF | SF_FORMAT_PCM_U8) ; break ; case DWVW_MARKER : psf->sf.format = SF_FORMAT_AIFF ; switch (comm_fmt->sampleSize) { case 12 : psf->sf.format |= SF_FORMAT_DWVW_12 ; break ; case 16 : psf->sf.format |= SF_FORMAT_DWVW_16 ; break ; case 24 : psf->sf.format |= SF_FORMAT_DWVW_24 ; break ; default : psf->sf.format |= SF_FORMAT_DWVW_N ; break ; } ; break ; case GSM_MARKER : psf->sf.format = SF_FORMAT_AIFF ; psf->sf.format = (SF_FORMAT_AIFF | SF_FORMAT_GSM610) ; break ; case ima4_MARKER : psf->endian = SF_ENDIAN_BIG ; psf->sf.format = (SF_FORMAT_AIFF | SF_FORMAT_IMA_ADPCM) ; break ; default : psf_log_printf (psf, "AIFC : Unimplemented format : %M\n", comm_fmt->encoding) ; return SFE_UNIMPLEMENTED ; } ; if (! ubuf.scbuf [0]) psf_log_printf (psf, " Encoding : %M\n", comm_fmt->encoding) ; else psf_log_printf (psf, " Encoding : %M => %s\n", comm_fmt->encoding, ubuf.scbuf) ; return 0 ; } /* aiff_read_comm_chunk */ /*========================================================================================== */ static void aiff_rewrite_header (SF_PRIVATE *psf) { /* Assuming here that the header has already been written and just ** needs to be corrected for new data length. That means that we ** only change the length fields of the FORM and SSND chunks ; ** everything else can be skipped over. */ int k, ch, comm_size, comm_frames ; psf_fseek (psf, 0, SEEK_SET) ; psf_fread (psf->header.ptr, psf->dataoffset, 1, psf) ; psf->header.indx = 0 ; /* FORM chunk. */ psf_binheader_writef (psf, "Etm8", FORM_MARKER, psf->filelength - 8) ; /* COMM chunk. */ if ((k = psf_find_read_chunk_m32 (&psf->rchunks, COMM_MARKER)) >= 0) { psf->header.indx = psf->rchunks.chunks [k].offset - 8 ; comm_frames = psf->sf.frames ; comm_size = psf->rchunks.chunks [k].len ; psf_binheader_writef (psf, "Em42t4", COMM_MARKER, comm_size, psf->sf.channels, comm_frames) ; } ; /* PEAK chunk. */ if ((k = psf_find_read_chunk_m32 (&psf->rchunks, PEAK_MARKER)) >= 0) { psf->header.indx = psf->rchunks.chunks [k].offset - 8 ; psf_binheader_writef (psf, "Em4", PEAK_MARKER, AIFF_PEAK_CHUNK_SIZE (psf->sf.channels)) ; psf_binheader_writef (psf, "E44", 1, time (NULL)) ; for (ch = 0 ; ch < psf->sf.channels ; ch++) psf_binheader_writef (psf, "Eft8", (float) psf->peak_info->peaks [ch].value, psf->peak_info->peaks [ch].position) ; } ; /* SSND chunk. */ if ((k = psf_find_read_chunk_m32 (&psf->rchunks, SSND_MARKER)) >= 0) { psf->header.indx = psf->rchunks.chunks [k].offset - 8 ; psf_binheader_writef (psf, "Etm8", SSND_MARKER, psf->datalength + SIZEOF_SSND_CHUNK) ; } ; /* Header mangling complete so write it out. */ psf_fseek (psf, 0, SEEK_SET) ; psf_fwrite (psf->header.ptr, psf->header.indx, 1, psf) ; return ; } /* aiff_rewrite_header */ static int aiff_write_header (SF_PRIVATE *psf, int calc_length) { sf_count_t current ; AIFF_PRIVATE *paiff ; uint8_t comm_sample_rate [10], comm_zero_bytes [2] = { 0, 0 } ; uint32_t comm_type, comm_size, comm_encoding, comm_frames = 0, uk ; int k, endian, has_data = SF_FALSE ; int16_t bit_width ; if ((paiff = psf->container_data) == NULL) return SFE_INTERNAL ; current = psf_ftell (psf) ; if (current > psf->dataoffset) has_data = SF_TRUE ; if (calc_length) { psf->filelength = psf_get_filelen (psf) ; psf->datalength = psf->filelength - psf->dataoffset ; if (psf->dataend) psf->datalength -= psf->filelength - psf->dataend ; if (psf->bytewidth > 0) psf->sf.frames = psf->datalength / (psf->bytewidth * psf->sf.channels) ; } ; if (psf->file.mode == SFM_RDWR && psf->dataoffset > 0 && psf->rchunks.count > 0) { aiff_rewrite_header (psf) ; if (current > 0) psf_fseek (psf, current, SEEK_SET) ; return 0 ; } ; endian = SF_ENDIAN (psf->sf.format) ; if (CPU_IS_LITTLE_ENDIAN && endian == SF_ENDIAN_CPU) endian = SF_ENDIAN_LITTLE ; /* Standard value here. */ bit_width = psf->bytewidth * 8 ; comm_frames = (psf->sf.frames > 0xFFFFFFFF) ? 0xFFFFFFFF : psf->sf.frames ; switch (SF_CODEC (psf->sf.format) | endian) { case SF_FORMAT_PCM_S8 | SF_ENDIAN_BIG : psf->endian = SF_ENDIAN_BIG ; comm_type = AIFC_MARKER ; comm_size = SIZEOF_AIFC_COMM ; comm_encoding = twos_MARKER ; break ; case SF_FORMAT_PCM_S8 | SF_ENDIAN_LITTLE : psf->endian = SF_ENDIAN_LITTLE ; comm_type = AIFC_MARKER ; comm_size = SIZEOF_AIFC_COMM ; comm_encoding = sowt_MARKER ; break ; case SF_FORMAT_PCM_16 | SF_ENDIAN_BIG : psf->endian = SF_ENDIAN_BIG ; comm_type = AIFC_MARKER ; comm_size = SIZEOF_AIFC_COMM ; comm_encoding = twos_MARKER ; break ; case SF_FORMAT_PCM_16 | SF_ENDIAN_LITTLE : psf->endian = SF_ENDIAN_LITTLE ; comm_type = AIFC_MARKER ; comm_size = SIZEOF_AIFC_COMM ; comm_encoding = sowt_MARKER ; break ; case SF_FORMAT_PCM_24 | SF_ENDIAN_BIG : psf->endian = SF_ENDIAN_BIG ; comm_type = AIFC_MARKER ; comm_size = SIZEOF_AIFC_COMM ; comm_encoding = in24_MARKER ; break ; case SF_FORMAT_PCM_24 | SF_ENDIAN_LITTLE : psf->endian = SF_ENDIAN_LITTLE ; comm_type = AIFC_MARKER ; comm_size = SIZEOF_AIFC_COMM ; comm_encoding = ni24_MARKER ; break ; case SF_FORMAT_PCM_32 | SF_ENDIAN_BIG : psf->endian = SF_ENDIAN_BIG ; comm_type = AIFC_MARKER ; comm_size = SIZEOF_AIFC_COMM ; comm_encoding = in32_MARKER ; break ; case SF_FORMAT_PCM_32 | SF_ENDIAN_LITTLE : psf->endian = SF_ENDIAN_LITTLE ; comm_type = AIFC_MARKER ; comm_size = SIZEOF_AIFC_COMM ; comm_encoding = ni32_MARKER ; break ; case SF_FORMAT_PCM_S8 : /* SF_ENDIAN_FILE */ case SF_FORMAT_PCM_16 : case SF_FORMAT_PCM_24 : case SF_FORMAT_PCM_32 : psf->endian = SF_ENDIAN_BIG ; comm_type = AIFF_MARKER ; comm_size = SIZEOF_AIFF_COMM ; comm_encoding = 0 ; break ; case SF_FORMAT_FLOAT : /* Big endian floating point. */ psf->endian = SF_ENDIAN_BIG ; comm_type = AIFC_MARKER ; comm_size = SIZEOF_AIFC_COMM ; comm_encoding = FL32_MARKER ; /* Use 'FL32' because its easier to read. */ break ; case SF_FORMAT_DOUBLE : /* Big endian double precision floating point. */ psf->endian = SF_ENDIAN_BIG ; comm_type = AIFC_MARKER ; comm_size = SIZEOF_AIFC_COMM ; comm_encoding = FL64_MARKER ; /* Use 'FL64' because its easier to read. */ break ; case SF_FORMAT_ULAW : psf->endian = SF_ENDIAN_BIG ; comm_type = AIFC_MARKER ; comm_size = SIZEOF_AIFC_COMM ; comm_encoding = ulaw_MARKER ; break ; case SF_FORMAT_ALAW : psf->endian = SF_ENDIAN_BIG ; comm_type = AIFC_MARKER ; comm_size = SIZEOF_AIFC_COMM ; comm_encoding = alaw_MARKER ; break ; case SF_FORMAT_PCM_U8 : psf->endian = SF_ENDIAN_BIG ; comm_type = AIFC_MARKER ; comm_size = SIZEOF_AIFC_COMM ; comm_encoding = raw_MARKER ; break ; case SF_FORMAT_DWVW_12 : psf->endian = SF_ENDIAN_BIG ; comm_type = AIFC_MARKER ; comm_size = SIZEOF_AIFC_COMM ; comm_encoding = DWVW_MARKER ; /* Override standard value here.*/ bit_width = 12 ; break ; case SF_FORMAT_DWVW_16 : psf->endian = SF_ENDIAN_BIG ; comm_type = AIFC_MARKER ; comm_size = SIZEOF_AIFC_COMM ; comm_encoding = DWVW_MARKER ; /* Override standard value here.*/ bit_width = 16 ; break ; case SF_FORMAT_DWVW_24 : psf->endian = SF_ENDIAN_BIG ; comm_type = AIFC_MARKER ; comm_size = SIZEOF_AIFC_COMM ; comm_encoding = DWVW_MARKER ; /* Override standard value here.*/ bit_width = 24 ; break ; case SF_FORMAT_GSM610 : psf->endian = SF_ENDIAN_BIG ; comm_type = AIFC_MARKER ; comm_size = SIZEOF_AIFC_COMM ; comm_encoding = GSM_MARKER ; /* Override standard value here.*/ bit_width = 16 ; break ; case SF_FORMAT_IMA_ADPCM : psf->endian = SF_ENDIAN_BIG ; comm_type = AIFC_MARKER ; comm_size = SIZEOF_AIFC_COMM ; comm_encoding = ima4_MARKER ; /* Override standard value here.*/ bit_width = 16 ; comm_frames = psf->sf.frames / AIFC_IMA4_SAMPLES_PER_BLOCK ; break ; default : return SFE_BAD_OPEN_FORMAT ; } ; /* Reset the current header length to zero. */ psf->header.ptr [0] = 0 ; psf->header.indx = 0 ; psf_fseek (psf, 0, SEEK_SET) ; psf_binheader_writef (psf, "Etm8", FORM_MARKER, psf->filelength - 8) ; /* Write AIFF/AIFC marker and COM chunk. */ if (comm_type == AIFC_MARKER) /* AIFC must have an FVER chunk. */ psf_binheader_writef (psf, "Emm44", comm_type, FVER_MARKER, 4, 0xA2805140) ; else psf_binheader_writef (psf, "Em", comm_type) ; paiff->comm_offset = psf->header.indx - 8 ; memset (comm_sample_rate, 0, sizeof (comm_sample_rate)) ; uint2tenbytefloat (psf->sf.samplerate, comm_sample_rate) ; psf_binheader_writef (psf, "Em42t42", COMM_MARKER, comm_size, psf->sf.channels, comm_frames, bit_width) ; psf_binheader_writef (psf, "b", comm_sample_rate, sizeof (comm_sample_rate)) ; /* AIFC chunks have some extra data. */ if (comm_type == AIFC_MARKER) psf_binheader_writef (psf, "mb", comm_encoding, comm_zero_bytes, sizeof (comm_zero_bytes)) ; if (psf->channel_map && paiff->chanmap_tag) psf_binheader_writef (psf, "Em4444", CHAN_MARKER, 12, paiff->chanmap_tag, 0, 0) ; /* Check if there's a INST chunk to write */ if (psf->instrument != NULL && psf->cues != NULL) { /* Huge chunk of code removed here because it had egregious errors that were ** not detected by either the compiler or the tests. It was found when updating ** the way psf_binheader_writef works. */ } else if (psf->instrument == NULL && psf->cues != NULL) { /* There are cues but no loops */ uint32_t idx ; int totalStringLength = 0, stringLength ; /* Here we count how many bytes will the pascal strings need */ for (idx = 0 ; idx < psf->cues->cue_count ; idx++) { stringLength = strlen (psf->cues->cue_points [idx].name) + 1 ; /* We'll count the first byte also of every pascal string */ totalStringLength += stringLength + (stringLength % 2 == 0 ? 0 : 1) ; } ; psf_binheader_writef (psf, "Em42", MARK_MARKER, 2 + psf->cues->cue_count * (2 + 4) + totalStringLength, psf->cues->cue_count) ; for (idx = 0 ; idx < psf->cues->cue_count ; idx++) psf_binheader_writef (psf, "E24p", psf->cues->cue_points [idx].indx, psf->cues->cue_points [idx].sample_offset, psf->cues->cue_points [idx].name) ; } ; if (psf->strings.flags & SF_STR_LOCATE_START) aiff_write_strings (psf, SF_STR_LOCATE_START) ; if (psf->peak_info != NULL && psf->peak_info->peak_loc == SF_PEAK_START) { psf_binheader_writef (psf, "Em4", PEAK_MARKER, AIFF_PEAK_CHUNK_SIZE (psf->sf.channels)) ; psf_binheader_writef (psf, "E44", 1, time (NULL)) ; for (k = 0 ; k < psf->sf.channels ; k++) psf_binheader_writef (psf, "Eft8", (float) psf->peak_info->peaks [k].value, psf->peak_info->peaks [k].position) ; } ; /* Write custom headers. */ for (uk = 0 ; uk < psf->wchunks.used ; uk++) psf_binheader_writef (psf, "Em4b", psf->wchunks.chunks [uk].mark32, psf->wchunks.chunks [uk].len, psf->wchunks.chunks [uk].data, make_size_t (psf->wchunks.chunks [uk].len)) ; /* Write SSND chunk. */ paiff->ssnd_offset = psf->header.indx ; psf_binheader_writef (psf, "Etm844", SSND_MARKER, psf->datalength + SIZEOF_SSND_CHUNK, 0, 0) ; /* Header construction complete so write it out. */ psf_fwrite (psf->header.ptr, psf->header.indx, 1, psf) ; if (psf->error) return psf->error ; if (has_data && psf->dataoffset != psf->header.indx) return psf->error = SFE_INTERNAL ; psf->dataoffset = psf->header.indx ; if (! has_data) psf_fseek (psf, psf->dataoffset, SEEK_SET) ; else if (current > 0) psf_fseek (psf, current, SEEK_SET) ; return psf->error ; } /* aiff_write_header */ static int aiff_write_tailer (SF_PRIVATE *psf) { int k ; /* Reset the current header length to zero. */ psf->header.ptr [0] = 0 ; psf->header.indx = 0 ; psf->dataend = psf_fseek (psf, 0, SEEK_END) ; /* Make sure tailer data starts at even byte offset. Pad if necessary. */ if (psf->dataend % 2 == 1) { psf_fwrite (psf->header.ptr, 1, 1, psf) ; psf->dataend ++ ; } ; if (psf->peak_info != NULL && psf->peak_info->peak_loc == SF_PEAK_END) { psf_binheader_writef (psf, "Em4", PEAK_MARKER, AIFF_PEAK_CHUNK_SIZE (psf->sf.channels)) ; psf_binheader_writef (psf, "E44", 1, time (NULL)) ; for (k = 0 ; k < psf->sf.channels ; k++) psf_binheader_writef (psf, "Eft8", (float) psf->peak_info->peaks [k].value, psf->peak_info->peaks [k].position) ; } ; if (psf->strings.flags & SF_STR_LOCATE_END) aiff_write_strings (psf, SF_STR_LOCATE_END) ; /* Write the tailer. */ if (psf->header.indx > 0) psf_fwrite (psf->header.ptr, psf->header.indx, 1, psf) ; return 0 ; } /* aiff_write_tailer */ static void aiff_write_strings (SF_PRIVATE *psf, int location) { int k, slen ; for (k = 0 ; k < SF_MAX_STRINGS ; k++) { if (psf->strings.data [k].type == 0) break ; if (psf->strings.data [k].flags != location) continue ; switch (psf->strings.data [k].type) { case SF_STR_SOFTWARE : slen = strlen (psf->strings.storage + psf->strings.data [k].offset) ; psf_binheader_writef (psf, "Em4mb", APPL_MARKER, slen + 4, m3ga_MARKER, psf->strings.storage + psf->strings.data [k].offset, make_size_t (slen + (slen & 1))) ; break ; case SF_STR_TITLE : psf_binheader_writef (psf, "EmS", NAME_MARKER, psf->strings.storage + psf->strings.data [k].offset) ; break ; case SF_STR_COPYRIGHT : psf_binheader_writef (psf, "EmS", c_MARKER, psf->strings.storage + psf->strings.data [k].offset) ; break ; case SF_STR_ARTIST : psf_binheader_writef (psf, "EmS", AUTH_MARKER, psf->strings.storage + psf->strings.data [k].offset) ; break ; case SF_STR_COMMENT : psf_binheader_writef (psf, "EmS", ANNO_MARKER, psf->strings.storage + psf->strings.data [k].offset) ; break ; /* case SF_STR_DATE : psf_binheader_writef (psf, "Ems", ICRD_MARKER, psf->strings.data [k].str) ; break ; */ } ; } ; return ; } /* aiff_write_strings */ static int aiff_command (SF_PRIVATE * psf, int command, void * UNUSED (data), int UNUSED (datasize)) { AIFF_PRIVATE *paiff ; if ((paiff = psf->container_data) == NULL) return SFE_INTERNAL ; switch (command) { case SFC_SET_CHANNEL_MAP_INFO : paiff->chanmap_tag = aiff_caf_find_channel_layout_tag (psf->channel_map, psf->sf.channels) ; return (paiff->chanmap_tag != 0) ; default : break ; } ; return 0 ; } /* aiff_command */ static const char* get_loop_mode_str (int16_t mode) { switch (mode) { case 0 : return "none" ; case 1 : return "forward" ; case 2 : return "backward" ; } ; return "*** unknown" ; } /* get_loop_mode_str */ static int16_t get_loop_mode (int16_t mode) { switch (mode) { case 0 : return SF_LOOP_NONE ; case 1 : return SF_LOOP_FORWARD ; case 2 : return SF_LOOP_BACKWARD ; } ; return SF_LOOP_NONE ; } /* get_loop_mode */ /*========================================================================================== ** Rough hack at converting from 80 bit IEEE float in AIFF header to an int and ** back again. It assumes that all sample rates are between 1 and 800MHz, which ** should be OK as other sound file formats use a 32 bit integer to store sample ** rate. ** There is another (probably better) version in the source code to the SoX but it ** has a copyright which probably prevents it from being allowable as GPL/LGPL. */ static int tenbytefloat2int (uint8_t *bytes) { int val = 3 ; if (bytes [0] & 0x80) /* Negative number. */ return 0 ; if (bytes [0] <= 0x3F) /* Less than 1. */ return 1 ; if (bytes [0] > 0x40) /* Way too big. */ return 0x4000000 ; if (bytes [0] == 0x40 && bytes [1] > 0x1C) /* Too big. */ return 800000000 ; /* Ok, can handle it. */ val = (bytes [2] << 23) | (bytes [3] << 15) | (bytes [4] << 7) | (bytes [5] >> 1) ; val >>= (29 - bytes [1]) ; return val ; } /* tenbytefloat2int */ static void uint2tenbytefloat (uint32_t num, uint8_t *bytes) { uint32_t mask = 0x40000000 ; int count ; if (num <= 1) { bytes [0] = 0x3F ; bytes [1] = 0xFF ; bytes [2] = 0x80 ; return ; } ; bytes [0] = 0x40 ; if (num >= mask) { bytes [1] = 0x1D ; return ; } ; for (count = 0 ; count < 32 ; count ++) { if (num & mask) break ; mask >>= 1 ; } ; num = count < 31 ? num << (count + 1) : 0 ; bytes [1] = 29 - count ; bytes [2] = (num >> 24) & 0xFF ; bytes [3] = (num >> 16) & 0xFF ; bytes [4] = (num >> 8) & 0xFF ; bytes [5] = num & 0xFF ; } /* uint2tenbytefloat */ static int aiff_read_basc_chunk (SF_PRIVATE * psf, int datasize) { const char * type_str ; basc_CHUNK bc ; int count ; count = psf_binheader_readf (psf, "E442", &bc.version, &bc.numBeats, &bc.rootNote) ; count += psf_binheader_readf (psf, "E222", &bc.scaleType, &bc.sigNumerator, &bc.sigDenominator) ; count += psf_binheader_readf (psf, "E2j", &bc.loopType, datasize - sizeof (bc)) ; psf_log_printf (psf, " Version ? : %u\n Num Beats : %u\n Root Note : 0x%x\n", bc.version, bc.numBeats, bc.rootNote) ; switch (bc.scaleType) { case basc_SCALE_MINOR : type_str = "MINOR" ; break ; case basc_SCALE_MAJOR : type_str = "MAJOR" ; break ; case basc_SCALE_NEITHER : type_str = "NEITHER" ; break ; case basc_SCALE_BOTH : type_str = "BOTH" ; break ; default : type_str = "!!WRONG!!" ; break ; } ; psf_log_printf (psf, " ScaleType : 0x%x (%s)\n", bc.scaleType, type_str) ; psf_log_printf (psf, " Time Sig : %d/%d\n", bc.sigNumerator, bc.sigDenominator) ; switch (bc.loopType) { case basc_TYPE_ONE_SHOT : type_str = "One Shot" ; break ; case basc_TYPE_LOOP : type_str = "Loop" ; break ; default: type_str = "!!WRONG!!" ; break ; } ; psf_log_printf (psf, " Loop Type : 0x%x (%s)\n", bc.loopType, type_str) ; if ((psf->loop_info = calloc (1, sizeof (SF_LOOP_INFO))) == NULL) return SFE_MALLOC_FAILED ; psf->loop_info->time_sig_num = bc.sigNumerator ; psf->loop_info->time_sig_den = bc.sigDenominator ; psf->loop_info->loop_mode = (bc.loopType == basc_TYPE_ONE_SHOT) ? SF_LOOP_NONE : SF_LOOP_FORWARD ; psf->loop_info->num_beats = bc.numBeats ; /* Can always be recalculated from other known fields. */ psf->loop_info->bpm = (1.0 / psf->sf.frames) * psf->sf.samplerate * ((bc.numBeats * 4.0) / bc.sigDenominator) * 60.0 ; psf->loop_info->root_key = bc.rootNote ; if (count < datasize) psf_binheader_readf (psf, "j", datasize - count) ; return 0 ; } /* aiff_read_basc_chunk */ static int aiff_read_chanmap (SF_PRIVATE * psf, unsigned dword) { const AIFF_CAF_CHANNEL_MAP * map_info ; unsigned channel_bitmap, channel_decriptions, bytesread ; int layout_tag ; bytesread = psf_binheader_readf (psf, "444", &layout_tag, &channel_bitmap, &channel_decriptions) ; if ((map_info = aiff_caf_of_channel_layout_tag (layout_tag)) == NULL) return 0 ; psf_log_printf (psf, " Tag : %x\n", layout_tag) ; if (map_info) psf_log_printf (psf, " Layout : %s\n", map_info->name) ; if (bytesread < dword) psf_binheader_readf (psf, "j", dword - bytesread) ; if (map_info->channel_map != NULL) { size_t chanmap_size = psf->sf.channels * sizeof (psf->channel_map [0]) ; free (psf->channel_map) ; if ((psf->channel_map = malloc (chanmap_size)) == NULL) return SFE_MALLOC_FAILED ; memcpy (psf->channel_map, map_info->channel_map, chanmap_size) ; } ; return 0 ; } /* aiff_read_chanmap */ /*============================================================================== */ static int aiff_set_chunk (SF_PRIVATE *psf, const SF_CHUNK_INFO * chunk_info) { return psf_save_write_chunk (&psf->wchunks, chunk_info) ; } /* aiff_set_chunk */ static SF_CHUNK_ITERATOR * aiff_next_chunk_iterator (SF_PRIVATE *psf, SF_CHUNK_ITERATOR * iterator) { return psf_next_chunk_iterator (&psf->rchunks, iterator) ; } /* aiff_next_chunk_iterator */ static int aiff_get_chunk_size (SF_PRIVATE *psf, const SF_CHUNK_ITERATOR * iterator, SF_CHUNK_INFO * chunk_info) { int indx ; if ((indx = psf_find_read_chunk_iterator (&psf->rchunks, iterator)) < 0) return SFE_UNKNOWN_CHUNK ; chunk_info->datalen = psf->rchunks.chunks [indx].len ; return SFE_NO_ERROR ; } /* aiff_get_chunk_size */ static int aiff_get_chunk_data (SF_PRIVATE *psf, const SF_CHUNK_ITERATOR * iterator, SF_CHUNK_INFO * chunk_info) { sf_count_t pos ; int indx ; if ((indx = psf_find_read_chunk_iterator (&psf->rchunks, iterator)) < 0) return SFE_UNKNOWN_CHUNK ; if (chunk_info->data == NULL) return SFE_BAD_CHUNK_DATA_PTR ; chunk_info->id_size = psf->rchunks.chunks [indx].id_size ; memcpy (chunk_info->id, psf->rchunks.chunks [indx].id, sizeof (chunk_info->id) / sizeof (*chunk_info->id)) ; pos = psf_ftell (psf) ; psf_fseek (psf, psf->rchunks.chunks [indx].offset, SEEK_SET) ; psf_fread (chunk_info->data, SF_MIN (chunk_info->datalen, psf->rchunks.chunks [indx].len), 1, psf) ; psf_fseek (psf, pos, SEEK_SET) ; return SFE_NO_ERROR ; } /* aiff_get_chunk_data */
/* ** Copyright (C) 1999-2017 Erik de Castro Lopo <erikd@mega-nerd.com> ** Copyright (C) 2005 David Viens <davidv@plogue.com> ** ** This program is free software; you can redistribute it and/or modify ** it under the terms of the GNU Lesser General Public License as published by ** the Free Software Foundation; either version 2.1 of the License, or ** (at your option) any later version. ** ** This program is distributed in the hope that it will be useful, ** but WITHOUT ANY WARRANTY; without even the implied warranty of ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** GNU Lesser General Public License for more details. ** ** You should have received a copy of the GNU Lesser General Public License ** along with this program; if not, write to the Free Software ** Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include "sfconfig.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <ctype.h> #include <inttypes.h> #include "sndfile.h" #include "sfendian.h" #include "common.h" #include "chanmap.h" /*------------------------------------------------------------------------------ * Macros to handle big/little endian issues. */ #define FORM_MARKER (MAKE_MARKER ('F', 'O', 'R', 'M')) #define AIFF_MARKER (MAKE_MARKER ('A', 'I', 'F', 'F')) #define AIFC_MARKER (MAKE_MARKER ('A', 'I', 'F', 'C')) #define COMM_MARKER (MAKE_MARKER ('C', 'O', 'M', 'M')) #define SSND_MARKER (MAKE_MARKER ('S', 'S', 'N', 'D')) #define MARK_MARKER (MAKE_MARKER ('M', 'A', 'R', 'K')) #define INST_MARKER (MAKE_MARKER ('I', 'N', 'S', 'T')) #define APPL_MARKER (MAKE_MARKER ('A', 'P', 'P', 'L')) #define CHAN_MARKER (MAKE_MARKER ('C', 'H', 'A', 'N')) #define c_MARKER (MAKE_MARKER ('(', 'c', ')', ' ')) #define NAME_MARKER (MAKE_MARKER ('N', 'A', 'M', 'E')) #define AUTH_MARKER (MAKE_MARKER ('A', 'U', 'T', 'H')) #define ANNO_MARKER (MAKE_MARKER ('A', 'N', 'N', 'O')) #define COMT_MARKER (MAKE_MARKER ('C', 'O', 'M', 'T')) #define FVER_MARKER (MAKE_MARKER ('F', 'V', 'E', 'R')) #define SFX_MARKER (MAKE_MARKER ('S', 'F', 'X', '!')) #define PEAK_MARKER (MAKE_MARKER ('P', 'E', 'A', 'K')) #define basc_MARKER (MAKE_MARKER ('b', 'a', 's', 'c')) /* Supported AIFC encodings.*/ #define NONE_MARKER (MAKE_MARKER ('N', 'O', 'N', 'E')) #define sowt_MARKER (MAKE_MARKER ('s', 'o', 'w', 't')) #define twos_MARKER (MAKE_MARKER ('t', 'w', 'o', 's')) #define raw_MARKER (MAKE_MARKER ('r', 'a', 'w', ' ')) #define in24_MARKER (MAKE_MARKER ('i', 'n', '2', '4')) #define ni24_MARKER (MAKE_MARKER ('4', '2', 'n', '1')) #define in32_MARKER (MAKE_MARKER ('i', 'n', '3', '2')) #define ni32_MARKER (MAKE_MARKER ('2', '3', 'n', 'i')) #define fl32_MARKER (MAKE_MARKER ('f', 'l', '3', '2')) #define FL32_MARKER (MAKE_MARKER ('F', 'L', '3', '2')) #define fl64_MARKER (MAKE_MARKER ('f', 'l', '6', '4')) #define FL64_MARKER (MAKE_MARKER ('F', 'L', '6', '4')) #define ulaw_MARKER (MAKE_MARKER ('u', 'l', 'a', 'w')) #define ULAW_MARKER (MAKE_MARKER ('U', 'L', 'A', 'W')) #define alaw_MARKER (MAKE_MARKER ('a', 'l', 'a', 'w')) #define ALAW_MARKER (MAKE_MARKER ('A', 'L', 'A', 'W')) #define DWVW_MARKER (MAKE_MARKER ('D', 'W', 'V', 'W')) #define GSM_MARKER (MAKE_MARKER ('G', 'S', 'M', ' ')) #define ima4_MARKER (MAKE_MARKER ('i', 'm', 'a', '4')) /* ** This value is officially assigned to Mega Nerd Pty Ltd by Apple ** Corportation as the Application marker for libsndfile. ** ** See : http://developer.apple.com/faq/datatype.html */ #define m3ga_MARKER (MAKE_MARKER ('m', '3', 'g', 'a')) /* Unsupported AIFC encodings.*/ #define MAC3_MARKER (MAKE_MARKER ('M', 'A', 'C', '3')) #define MAC6_MARKER (MAKE_MARKER ('M', 'A', 'C', '6')) #define ADP4_MARKER (MAKE_MARKER ('A', 'D', 'P', '4')) /* Predfined chunk sizes. */ #define SIZEOF_AIFF_COMM 18 #define SIZEOF_AIFC_COMM_MIN 22 #define SIZEOF_AIFC_COMM 24 #define SIZEOF_SSND_CHUNK 8 #define SIZEOF_INST_CHUNK 20 /* Is it constant? */ /* AIFC/IMA4 defines. */ #define AIFC_IMA4_BLOCK_LEN 34 #define AIFC_IMA4_SAMPLES_PER_BLOCK 64 #define AIFF_PEAK_CHUNK_SIZE(ch) (2 * sizeof (int) + ch * (sizeof (float) + sizeof (int))) /*------------------------------------------------------------------------------ * Typedefs for file chunks. */ enum { HAVE_FORM = 0x01, HAVE_AIFF = 0x02, HAVE_AIFC = 0x04, HAVE_FVER = 0x08, HAVE_COMM = 0x10, HAVE_SSND = 0x20 } ; typedef struct { uint32_t size ; int16_t numChannels ; uint32_t numSampleFrames ; int16_t sampleSize ; uint8_t sampleRate [10] ; uint32_t encoding ; char zero_bytes [2] ; } COMM_CHUNK ; typedef struct { uint32_t offset ; uint32_t blocksize ; } SSND_CHUNK ; typedef struct { int16_t playMode ; uint16_t beginLoop ; uint16_t endLoop ; } INST_LOOP ; typedef struct { int8_t baseNote ; /* all notes are MIDI note numbers */ int8_t detune ; /* cents off, only -50 to +50 are significant */ int8_t lowNote ; int8_t highNote ; int8_t lowVelocity ; /* 1 to 127 */ int8_t highVelocity ; /* 1 to 127 */ int16_t gain ; /* in dB, 0 is normal */ INST_LOOP sustain_loop ; INST_LOOP release_loop ; } INST_CHUNK ; enum { basc_SCALE_MINOR = 1, basc_SCALE_MAJOR, basc_SCALE_NEITHER, basc_SCALE_BOTH } ; enum { basc_TYPE_LOOP = 0, basc_TYPE_ONE_SHOT } ; typedef struct { uint32_t version ; uint32_t numBeats ; uint16_t rootNote ; uint16_t scaleType ; uint16_t sigNumerator ; uint16_t sigDenominator ; uint16_t loopType ; } basc_CHUNK ; typedef struct { uint16_t markerID ; uint32_t position ; } MARK_ID_POS ; typedef struct { sf_count_t comm_offset ; sf_count_t ssnd_offset ; int32_t chanmap_tag ; MARK_ID_POS *markstr ; } AIFF_PRIVATE ; /*------------------------------------------------------------------------------ * Private static functions. */ static int aiff_close (SF_PRIVATE *psf) ; static int tenbytefloat2int (uint8_t *bytes) ; static void uint2tenbytefloat (uint32_t num, uint8_t *bytes) ; static int aiff_read_comm_chunk (SF_PRIVATE *psf, COMM_CHUNK *comm_fmt) ; static int aiff_read_header (SF_PRIVATE *psf, COMM_CHUNK *comm_fmt) ; static int aiff_write_header (SF_PRIVATE *psf, int calc_length) ; static int aiff_write_tailer (SF_PRIVATE *psf) ; static void aiff_write_strings (SF_PRIVATE *psf, int location) ; static int aiff_command (SF_PRIVATE *psf, int command, void *data, int datasize) ; static const char *get_loop_mode_str (int16_t mode) ; static int16_t get_loop_mode (int16_t mode) ; static int aiff_read_basc_chunk (SF_PRIVATE * psf, int) ; static int aiff_read_chanmap (SF_PRIVATE * psf, unsigned dword) ; static uint32_t marker_to_position (const MARK_ID_POS *m, uint16_t n, int marksize) ; static int aiff_set_chunk (SF_PRIVATE *psf, const SF_CHUNK_INFO * chunk_info) ; static SF_CHUNK_ITERATOR * aiff_next_chunk_iterator (SF_PRIVATE *psf, SF_CHUNK_ITERATOR * iterator) ; static int aiff_get_chunk_size (SF_PRIVATE *psf, const SF_CHUNK_ITERATOR * iterator, SF_CHUNK_INFO * chunk_info) ; static int aiff_get_chunk_data (SF_PRIVATE *psf, const SF_CHUNK_ITERATOR * iterator, SF_CHUNK_INFO * chunk_info) ; /*------------------------------------------------------------------------------ ** Public function. */ int aiff_open (SF_PRIVATE *psf) { COMM_CHUNK comm_fmt ; int error, subformat ; memset (&comm_fmt, 0, sizeof (comm_fmt)) ; subformat = SF_CODEC (psf->sf.format) ; if ((psf->container_data = calloc (1, sizeof (AIFF_PRIVATE))) == NULL) return SFE_MALLOC_FAILED ; if (psf->file.mode == SFM_READ || (psf->file.mode == SFM_RDWR && psf->filelength > 0)) { if ((error = aiff_read_header (psf, &comm_fmt))) return error ; psf->next_chunk_iterator = aiff_next_chunk_iterator ; psf->get_chunk_size = aiff_get_chunk_size ; psf->get_chunk_data = aiff_get_chunk_data ; psf_fseek (psf, psf->dataoffset, SEEK_SET) ; } ; if (psf->file.mode == SFM_WRITE || psf->file.mode == SFM_RDWR) { if (psf->is_pipe) return SFE_NO_PIPE_WRITE ; if ((SF_CONTAINER (psf->sf.format)) != SF_FORMAT_AIFF) return SFE_BAD_OPEN_FORMAT ; if (psf->file.mode == SFM_WRITE && (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE)) { if ((psf->peak_info = peak_info_calloc (psf->sf.channels)) == NULL) return SFE_MALLOC_FAILED ; psf->peak_info->peak_loc = SF_PEAK_START ; } ; if (psf->file.mode != SFM_RDWR || psf->filelength < 40) { psf->filelength = 0 ; psf->datalength = 0 ; psf->dataoffset = 0 ; psf->sf.frames = 0 ; } ; psf->strings.flags = SF_STR_ALLOW_START | SF_STR_ALLOW_END ; if ((error = aiff_write_header (psf, SF_FALSE))) return error ; psf->write_header = aiff_write_header ; psf->set_chunk = aiff_set_chunk ; } ; psf->container_close = aiff_close ; psf->command = aiff_command ; switch (SF_CODEC (psf->sf.format)) { case SF_FORMAT_PCM_U8 : error = pcm_init (psf) ; break ; case SF_FORMAT_PCM_S8 : error = pcm_init (psf) ; break ; case SF_FORMAT_PCM_16 : case SF_FORMAT_PCM_24 : case SF_FORMAT_PCM_32 : error = pcm_init (psf) ; break ; case SF_FORMAT_ULAW : error = ulaw_init (psf) ; break ; case SF_FORMAT_ALAW : error = alaw_init (psf) ; break ; /* Lite remove start */ case SF_FORMAT_FLOAT : error = float32_init (psf) ; break ; case SF_FORMAT_DOUBLE : error = double64_init (psf) ; break ; case SF_FORMAT_DWVW_12 : if (psf->sf.frames > comm_fmt.numSampleFrames) psf->sf.frames = comm_fmt.numSampleFrames ; break ; case SF_FORMAT_DWVW_16 : error = dwvw_init (psf, 16) ; if (psf->sf.frames > comm_fmt.numSampleFrames) psf->sf.frames = comm_fmt.numSampleFrames ; break ; case SF_FORMAT_DWVW_24 : error = dwvw_init (psf, 24) ; if (psf->sf.frames > comm_fmt.numSampleFrames) psf->sf.frames = comm_fmt.numSampleFrames ; break ; case SF_FORMAT_DWVW_N : if (psf->file.mode != SFM_READ) { error = SFE_DWVW_BAD_BITWIDTH ; break ; } ; if (comm_fmt.sampleSize >= 8 && comm_fmt.sampleSize < 24) { error = dwvw_init (psf, comm_fmt.sampleSize) ; if (psf->sf.frames > comm_fmt.numSampleFrames) psf->sf.frames = comm_fmt.numSampleFrames ; break ; } ; psf_log_printf (psf, "AIFC/DWVW : Bad bitwidth %d\n", comm_fmt.sampleSize) ; error = SFE_DWVW_BAD_BITWIDTH ; break ; case SF_FORMAT_IMA_ADPCM : /* ** IMA ADPCM encoded AIFF files always have a block length ** of 34 which decodes to 64 samples. */ error = aiff_ima_init (psf, AIFC_IMA4_BLOCK_LEN, AIFC_IMA4_SAMPLES_PER_BLOCK) ; break ; /* Lite remove end */ case SF_FORMAT_GSM610 : error = gsm610_init (psf) ; if (psf->sf.frames > comm_fmt.numSampleFrames) psf->sf.frames = comm_fmt.numSampleFrames ; break ; default : return SFE_UNIMPLEMENTED ; } ; if (psf->file.mode != SFM_WRITE && psf->sf.frames - comm_fmt.numSampleFrames != 0) { psf_log_printf (psf, "*** Frame count read from 'COMM' chunk (%u) not equal to frame count\n" "*** calculated from length of 'SSND' chunk (%u).\n", comm_fmt.numSampleFrames, (uint32_t) psf->sf.frames) ; } ; return error ; } /* aiff_open */ /*========================================================================================== ** Private functions. */ /* This function ought to check size */ static uint32_t marker_to_position (const MARK_ID_POS *m, uint16_t n, int marksize) { int i ; for (i = 0 ; i < marksize ; i++) if (m [i].markerID == n) return m [i].position ; return 0 ; } /* marker_to_position */ static int aiff_read_header (SF_PRIVATE *psf, COMM_CHUNK *comm_fmt) { SSND_CHUNK ssnd_fmt ; AIFF_PRIVATE *paiff ; BUF_UNION ubuf ; uint32_t chunk_size = 0, FORMsize, SSNDsize, bytesread, mark_count = 0 ; int k, found_chunk = 0, done = 0, error = 0 ; char *cptr ; int instr_found = 0, mark_found = 0 ; if (psf->filelength > SF_PLATFORM_S64 (0xffffffff)) psf_log_printf (psf, "Warning : filelength > 0xffffffff. This is bad!!!!\n") ; if ((paiff = psf->container_data) == NULL) return SFE_INTERNAL ; paiff->comm_offset = 0 ; paiff->ssnd_offset = 0 ; /* Set position to start of file to begin reading header. */ psf_binheader_readf (psf, "p", 0) ; memset (comm_fmt, 0, sizeof (COMM_CHUNK)) ; /* Until recently AIF* file were all BIG endian. */ psf->endian = SF_ENDIAN_BIG ; /* AIFF files can apparently have their chunks in any order. However, they ** must have a FORM chunk. Approach here is to read all the chunks one by ** one and then check for the mandatory chunks at the end. */ while (! done) { unsigned marker ; size_t jump = chunk_size & 1 ; marker = chunk_size = 0 ; psf_binheader_readf (psf, "Ejm4", jump, &marker, &chunk_size) ; if (marker == 0) { sf_count_t pos = psf_ftell (psf) ; psf_log_printf (psf, "Have 0 marker at position %D (0x%x).\n", pos, pos) ; break ; } ; if (psf->file.mode == SFM_RDWR && (found_chunk & HAVE_SSND)) return SFE_AIFF_RW_SSND_NOT_LAST ; psf_store_read_chunk_u32 (&psf->rchunks, marker, psf_ftell (psf), chunk_size) ; switch (marker) { case FORM_MARKER : if (found_chunk) return SFE_AIFF_NO_FORM ; FORMsize = chunk_size ; found_chunk |= HAVE_FORM ; psf_binheader_readf (psf, "m", &marker) ; switch (marker) { case AIFC_MARKER : case AIFF_MARKER : found_chunk |= (marker == AIFC_MARKER) ? (HAVE_AIFC | HAVE_AIFF) : HAVE_AIFF ; break ; default : break ; } ; if (psf->fileoffset > 0 && psf->filelength > FORMsize + 8) { /* Set file length. */ psf->filelength = FORMsize + 8 ; psf_log_printf (psf, "FORM : %u\n %M\n", FORMsize, marker) ; } else if (FORMsize != psf->filelength - 2 * SIGNED_SIZEOF (chunk_size)) { chunk_size = psf->filelength - 2 * sizeof (chunk_size) ; psf_log_printf (psf, "FORM : %u (should be %u)\n %M\n", FORMsize, chunk_size, marker) ; FORMsize = chunk_size ; } else psf_log_printf (psf, "FORM : %u\n %M\n", FORMsize, marker) ; /* Set this to 0, so we don't jump a byte when parsing the next marker. */ chunk_size = 0 ; break ; case COMM_MARKER : paiff->comm_offset = psf_ftell (psf) - 8 ; chunk_size += chunk_size & 1 ; comm_fmt->size = chunk_size ; if ((error = aiff_read_comm_chunk (psf, comm_fmt)) != 0) return error ; found_chunk |= HAVE_COMM ; break ; case PEAK_MARKER : /* Must have COMM chunk before PEAK chunk. */ if ((found_chunk & (HAVE_FORM | HAVE_AIFF | HAVE_COMM)) != (HAVE_FORM | HAVE_AIFF | HAVE_COMM)) return SFE_AIFF_PEAK_B4_COMM ; psf_log_printf (psf, "%M : %d\n", marker, chunk_size) ; if (chunk_size != AIFF_PEAK_CHUNK_SIZE (psf->sf.channels)) { psf_binheader_readf (psf, "j", chunk_size) ; psf_log_printf (psf, "*** File PEAK chunk too big.\n") ; return SFE_WAV_BAD_PEAK ; } ; if ((psf->peak_info = peak_info_calloc (psf->sf.channels)) == NULL) return SFE_MALLOC_FAILED ; /* read in rest of PEAK chunk. */ psf_binheader_readf (psf, "E44", &(psf->peak_info->version), &(psf->peak_info->timestamp)) ; if (psf->peak_info->version != 1) psf_log_printf (psf, " version : %d *** (should be version 1)\n", psf->peak_info->version) ; else psf_log_printf (psf, " version : %d\n", psf->peak_info->version) ; psf_log_printf (psf, " time stamp : %d\n", psf->peak_info->timestamp) ; psf_log_printf (psf, " Ch Position Value\n") ; cptr = ubuf.cbuf ; for (k = 0 ; k < psf->sf.channels ; k++) { float value ; uint32_t position ; psf_binheader_readf (psf, "Ef4", &value, &position) ; psf->peak_info->peaks [k].value = value ; psf->peak_info->peaks [k].position = position ; snprintf (cptr, sizeof (ubuf.scbuf), " %2d %-12" PRId64 " %g\n", k, psf->peak_info->peaks [k].position, psf->peak_info->peaks [k].value) ; cptr [sizeof (ubuf.scbuf) - 1] = 0 ; psf_log_printf (psf, "%s", cptr) ; } ; psf->peak_info->peak_loc = ((found_chunk & HAVE_SSND) == 0) ? SF_PEAK_START : SF_PEAK_END ; break ; case SSND_MARKER : if ((found_chunk & HAVE_AIFC) && (found_chunk & HAVE_FVER) == 0) psf_log_printf (psf, "*** Valid AIFC files should have an FVER chunk.\n") ; paiff->ssnd_offset = psf_ftell (psf) - 8 ; SSNDsize = chunk_size ; psf_binheader_readf (psf, "E44", &(ssnd_fmt.offset), &(ssnd_fmt.blocksize)) ; psf->datalength = SSNDsize - sizeof (ssnd_fmt) ; psf->dataoffset = psf_ftell (psf) ; if (psf->datalength > psf->filelength - psf->dataoffset || psf->datalength < 0) { psf_log_printf (psf, " SSND : %u (should be %D)\n", SSNDsize, psf->filelength - psf->dataoffset + sizeof (SSND_CHUNK)) ; psf->datalength = psf->filelength - psf->dataoffset ; } else psf_log_printf (psf, " SSND : %u\n", SSNDsize) ; if (ssnd_fmt.offset == 0 || psf->dataoffset + ssnd_fmt.offset == ssnd_fmt.blocksize) { psf_log_printf (psf, " Offset : %u\n", ssnd_fmt.offset) ; psf_log_printf (psf, " Block Size : %u\n", ssnd_fmt.blocksize) ; psf->dataoffset += ssnd_fmt.offset ; psf->datalength -= ssnd_fmt.offset ; } else { psf_log_printf (psf, " Offset : %u\n", ssnd_fmt.offset) ; psf_log_printf (psf, " Block Size : %u ???\n", ssnd_fmt.blocksize) ; psf->dataoffset += ssnd_fmt.offset ; psf->datalength -= ssnd_fmt.offset ; } ; /* Only set dataend if there really is data at the end. */ if (psf->datalength + psf->dataoffset < psf->filelength) psf->dataend = psf->datalength + psf->dataoffset ; found_chunk |= HAVE_SSND ; if (! psf->sf.seekable) break ; /* Seek to end of SSND chunk. */ psf_fseek (psf, psf->dataoffset + psf->datalength, SEEK_SET) ; break ; case c_MARKER : if (chunk_size == 0) break ; if (chunk_size >= SIGNED_SIZEOF (ubuf.scbuf)) { psf_log_printf (psf, " %M : %d (too big)\n", marker, chunk_size) ; return SFE_INTERNAL ; } ; cptr = ubuf.cbuf ; psf_binheader_readf (psf, "b", cptr, chunk_size + (chunk_size & 1)) ; cptr [chunk_size] = 0 ; psf_sanitize_string (cptr, chunk_size) ; psf_log_printf (psf, " %M : %s\n", marker, cptr) ; psf_store_string (psf, SF_STR_COPYRIGHT, cptr) ; chunk_size += chunk_size & 1 ; break ; case AUTH_MARKER : if (chunk_size == 0) break ; if (chunk_size >= SIGNED_SIZEOF (ubuf.scbuf) - 1) { psf_log_printf (psf, " %M : %d (too big)\n", marker, chunk_size) ; return SFE_INTERNAL ; } ; cptr = ubuf.cbuf ; psf_binheader_readf (psf, "b", cptr, chunk_size + (chunk_size & 1)) ; cptr [chunk_size] = 0 ; psf_log_printf (psf, " %M : %s\n", marker, cptr) ; psf_store_string (psf, SF_STR_ARTIST, cptr) ; chunk_size += chunk_size & 1 ; break ; case COMT_MARKER : { uint16_t count, id, len ; uint32_t timestamp, bytes ; if (chunk_size == 0) break ; bytes = chunk_size ; bytes -= psf_binheader_readf (psf, "E2", &count) ; psf_log_printf (psf, " %M : %d\n count : %d\n", marker, chunk_size, count) ; for (k = 0 ; k < count ; k++) { bytes -= psf_binheader_readf (psf, "E422", &timestamp, &id, &len) ; psf_log_printf (psf, " time : 0x%x\n marker : %x\n length : %d\n", timestamp, id, len) ; if (len + 1 > SIGNED_SIZEOF (ubuf.scbuf)) { psf_log_printf (psf, "\nError : string length (%d) too big.\n", len) ; return SFE_INTERNAL ; } ; cptr = ubuf.cbuf ; bytes -= psf_binheader_readf (psf, "b", cptr, len) ; cptr [len] = 0 ; psf_log_printf (psf, " string : %s\n", cptr) ; } ; if (bytes > 0) psf_binheader_readf (psf, "j", bytes) ; } ; break ; case APPL_MARKER : { unsigned appl_marker ; if (chunk_size == 0) break ; if (chunk_size >= SIGNED_SIZEOF (ubuf.scbuf) - 1) { psf_log_printf (psf, " %M : %u (too big, skipping)\n", marker, chunk_size) ; psf_binheader_readf (psf, "j", chunk_size + (chunk_size & 1)) ; break ; } ; if (chunk_size < 4) { psf_log_printf (psf, " %M : %d (too small, skipping)\n", marker, chunk_size) ; psf_binheader_readf (psf, "j", chunk_size + (chunk_size & 1)) ; break ; } ; cptr = ubuf.cbuf ; psf_binheader_readf (psf, "mb", &appl_marker, cptr, chunk_size + (chunk_size & 1) - 4) ; cptr [chunk_size] = 0 ; for (k = 0 ; k < (int) chunk_size ; k++) if (! psf_isprint (cptr [k])) { cptr [k] = 0 ; break ; } ; psf_log_printf (psf, " %M : %d\n AppSig : %M\n Name : %s\n", marker, chunk_size, appl_marker, cptr) ; psf_store_string (psf, SF_STR_SOFTWARE, cptr) ; chunk_size += chunk_size & 1 ; } ; break ; case NAME_MARKER : if (chunk_size == 0) break ; if (chunk_size >= SIGNED_SIZEOF (ubuf.scbuf) - 2) { psf_log_printf (psf, " %M : %d (too big)\n", marker, chunk_size) ; return SFE_INTERNAL ; } ; cptr = ubuf.cbuf ; psf_binheader_readf (psf, "b", cptr, chunk_size + (chunk_size & 1)) ; cptr [chunk_size] = 0 ; psf_log_printf (psf, " %M : %s\n", marker, cptr) ; psf_store_string (psf, SF_STR_TITLE, cptr) ; chunk_size += chunk_size & 1 ; break ; case ANNO_MARKER : if (chunk_size == 0) break ; if (chunk_size >= SIGNED_SIZEOF (ubuf.scbuf) - 2) { psf_log_printf (psf, " %M : %d (too big)\n", marker, chunk_size) ; return SFE_INTERNAL ; } ; cptr = ubuf.cbuf ; psf_binheader_readf (psf, "b", cptr, chunk_size + (chunk_size & 1)) ; cptr [chunk_size] = 0 ; psf_log_printf (psf, " %M : %s\n", marker, cptr) ; psf_store_string (psf, SF_STR_COMMENT, cptr) ; chunk_size += chunk_size & 1 ; break ; case INST_MARKER : if (chunk_size != SIZEOF_INST_CHUNK) { psf_log_printf (psf, " %M : %d (should be %d)\n", marker, chunk_size, SIZEOF_INST_CHUNK) ; psf_binheader_readf (psf, "j", chunk_size) ; break ; } ; psf_log_printf (psf, " %M : %d\n", marker, chunk_size) ; { uint8_t bytes [6] ; int16_t gain ; if (psf->instrument == NULL && (psf->instrument = psf_instrument_alloc ()) == NULL) return SFE_MALLOC_FAILED ; psf_binheader_readf (psf, "b", bytes, 6) ; psf_log_printf (psf, " Base Note : %u\n Detune : %u\n" " Low Note : %u\n High Note : %u\n" " Low Vel. : %u\n High Vel. : %u\n", bytes [0], bytes [1], bytes [2], bytes [3], bytes [4], bytes [5]) ; psf->instrument->basenote = bytes [0] ; psf->instrument->detune = bytes [1] ; psf->instrument->key_lo = bytes [2] ; psf->instrument->key_hi = bytes [3] ; psf->instrument->velocity_lo = bytes [4] ; psf->instrument->velocity_hi = bytes [5] ; psf_binheader_readf (psf, "E2", &gain) ; psf->instrument->gain = gain ; psf_log_printf (psf, " Gain (dB) : %d\n", gain) ; } ; { int16_t mode ; /* 0 - no loop, 1 - forward looping, 2 - backward looping */ const char *loop_mode ; uint16_t begin, end ; psf_binheader_readf (psf, "E222", &mode, &begin, &end) ; loop_mode = get_loop_mode_str (mode) ; mode = get_loop_mode (mode) ; if (mode == SF_LOOP_NONE) { psf->instrument->loop_count = 0 ; psf->instrument->loops [0].mode = SF_LOOP_NONE ; } else { psf->instrument->loop_count = 1 ; psf->instrument->loops [0].mode = SF_LOOP_FORWARD ; psf->instrument->loops [0].start = begin ; psf->instrument->loops [0].end = end ; psf->instrument->loops [0].count = 0 ; } ; psf_log_printf (psf, " Sustain\n mode : %d => %s\n begin : %u\n end : %u\n", mode, loop_mode, begin, end) ; psf_binheader_readf (psf, "E222", &mode, &begin, &end) ; loop_mode = get_loop_mode_str (mode) ; mode = get_loop_mode (mode) ; if (mode == SF_LOOP_NONE) psf->instrument->loops [1].mode = SF_LOOP_NONE ; else { psf->instrument->loop_count += 1 ; psf->instrument->loops [1].mode = SF_LOOP_FORWARD ; psf->instrument->loops [1].start = begin ; psf->instrument->loops [1].end = end ; psf->instrument->loops [1].count = 0 ; } ; psf_log_printf (psf, " Release\n mode : %d => %s\n begin : %u\n end : %u\n", mode, loop_mode, begin, end) ; } ; instr_found++ ; break ; case basc_MARKER : psf_log_printf (psf, " basc : %u\n", chunk_size) ; if ((error = aiff_read_basc_chunk (psf, chunk_size))) return error ; break ; case MARK_MARKER : psf_log_printf (psf, " %M : %d\n", marker, chunk_size) ; { uint16_t mark_id, n = 0 ; uint32_t position ; bytesread = psf_binheader_readf (psf, "E2", &n) ; mark_count = n ; psf_log_printf (psf, " Count : %u\n", mark_count) ; if (paiff->markstr != NULL) { psf_log_printf (psf, "*** Second MARK chunk found. Throwing away the first.\n") ; free (paiff->markstr) ; } ; paiff->markstr = calloc (mark_count, sizeof (MARK_ID_POS)) ; if (paiff->markstr == NULL) return SFE_MALLOC_FAILED ; if (mark_count > 1000) { psf_log_printf (psf, " More than 1000 markers, skipping!\n") ; psf_binheader_readf (psf, "j", chunk_size - bytesread) ; break ; } ; if ((psf->cues = psf_cues_alloc (mark_count)) == NULL) return SFE_MALLOC_FAILED ; for (n = 0 ; n < mark_count && bytesread < chunk_size ; n++) { uint32_t pstr_len ; uint8_t ch ; bytesread += psf_binheader_readf (psf, "E241", &mark_id, &position, &ch) ; psf_log_printf (psf, " Mark ID : %u\n Position : %u\n", mark_id, position) ; psf->cues->cue_points [n].indx = mark_id ; psf->cues->cue_points [n].position = 0 ; psf->cues->cue_points [n].fcc_chunk = MAKE_MARKER ('d', 'a', 't', 'a') ; /* always data */ psf->cues->cue_points [n].chunk_start = 0 ; psf->cues->cue_points [n].block_start = 0 ; psf->cues->cue_points [n].sample_offset = position ; pstr_len = (ch & 1) ? ch : ch + 1 ; if (pstr_len < sizeof (ubuf.scbuf) - 1) { bytesread += psf_binheader_readf (psf, "b", ubuf.scbuf, pstr_len) ; ubuf.scbuf [pstr_len] = 0 ; } else { uint32_t read_len = pstr_len - (sizeof (ubuf.scbuf) - 1) ; bytesread += psf_binheader_readf (psf, "bj", ubuf.scbuf, read_len, pstr_len - read_len) ; ubuf.scbuf [sizeof (ubuf.scbuf) - 1] = 0 ; } psf_log_printf (psf, " Name : %s\n", ubuf.scbuf) ; psf_strlcpy (psf->cues->cue_points [n].name, sizeof (psf->cues->cue_points [n].name), ubuf.cbuf) ; paiff->markstr [n].markerID = mark_id ; paiff->markstr [n].position = position ; /* ** TODO if ubuf.scbuf is equal to ** either Beg_loop, Beg loop or beg loop and spam ** if (psf->instrument == NULL && (psf->instrument = psf_instrument_alloc ()) == NULL) ** return SFE_MALLOC_FAILED ; */ } ; } ; mark_found++ ; psf_binheader_readf (psf, "j", chunk_size - bytesread) ; break ; case FVER_MARKER : found_chunk |= HAVE_FVER ; /* Fall through to next case. */ case SFX_MARKER : psf_log_printf (psf, " %M : %d\n", marker, chunk_size) ; psf_binheader_readf (psf, "j", chunk_size) ; break ; case NONE_MARKER : /* Fix for broken AIFC files with incorrect COMM chunk length. */ chunk_size = (chunk_size >> 24) - 3 ; psf_log_printf (psf, " %M : %d\n", marker, chunk_size) ; psf_binheader_readf (psf, "j", make_size_t (chunk_size)) ; break ; case CHAN_MARKER : if (chunk_size < 12) { psf_log_printf (psf, " %M : %d (should be >= 12)\n", marker, chunk_size) ; psf_binheader_readf (psf, "j", chunk_size) ; break ; } psf_log_printf (psf, " %M : %d\n", marker, chunk_size) ; if ((error = aiff_read_chanmap (psf, chunk_size))) return error ; break ; default : if (chunk_size >= 0xffff0000) { done = SF_TRUE ; psf_log_printf (psf, "*** Unknown chunk marker (%X) at position %D with length %u. Exiting parser.\n", marker, psf_ftell (psf) - 8, chunk_size) ; break ; } ; if (psf_isprint ((marker >> 24) & 0xFF) && psf_isprint ((marker >> 16) & 0xFF) && psf_isprint ((marker >> 8) & 0xFF) && psf_isprint (marker & 0xFF)) { psf_log_printf (psf, " %M : %u (unknown marker)\n", marker, chunk_size) ; psf_binheader_readf (psf, "j", chunk_size) ; break ; } ; if (psf_ftell (psf) & 0x03) { psf_log_printf (psf, " Unknown chunk marker at position %D. Resynching.\n", psf_ftell (psf) - 8) ; psf_binheader_readf (psf, "j", -3) ; break ; } ; psf_log_printf (psf, "*** Unknown chunk marker %X at position %D. Exiting parser.\n", marker, psf_ftell (psf)) ; done = SF_TRUE ; break ; } ; /* switch (marker) */ if (chunk_size >= psf->filelength) { psf_log_printf (psf, "*** Chunk size %u > file length %D. Exiting parser.\n", chunk_size, psf->filelength) ; break ; } ; if ((! psf->sf.seekable) && (found_chunk & HAVE_SSND)) break ; if (psf_ftell (psf) >= psf->filelength - (2 * SIGNED_SIZEOF (int32_t))) break ; } ; /* while (1) */ if (instr_found && mark_found) { int ji, str_index ; /* Next loop will convert markers to loop positions for internal handling */ for (ji = 0 ; ji < psf->instrument->loop_count ; ji ++) { if (ji < ARRAY_LEN (psf->instrument->loops)) { psf->instrument->loops [ji].start = marker_to_position (paiff->markstr, psf->instrument->loops [ji].start, mark_count) ; psf->instrument->loops [ji].end = marker_to_position (paiff->markstr, psf->instrument->loops [ji].end, mark_count) ; psf->instrument->loops [ji].mode = SF_LOOP_FORWARD ; } ; } ; /* The markers that correspond to loop positions can now be removed from cues struct */ if (psf->cues->cue_count > (uint32_t) (psf->instrument->loop_count * 2)) { uint32_t j ; for (j = 0 ; j < psf->cues->cue_count - (uint32_t) (psf->instrument->loop_count * 2) ; j ++) { /* This simply copies the information in cues above loop positions and writes it at current count instead */ psf->cues->cue_points [j].indx = psf->cues->cue_points [j + psf->instrument->loop_count * 2].indx ; psf->cues->cue_points [j].position = psf->cues->cue_points [j + psf->instrument->loop_count * 2].position ; psf->cues->cue_points [j].fcc_chunk = psf->cues->cue_points [j + psf->instrument->loop_count * 2].fcc_chunk ; psf->cues->cue_points [j].chunk_start = psf->cues->cue_points [j + psf->instrument->loop_count * 2].chunk_start ; psf->cues->cue_points [j].block_start = psf->cues->cue_points [j + psf->instrument->loop_count * 2].block_start ; psf->cues->cue_points [j].sample_offset = psf->cues->cue_points [j + psf->instrument->loop_count * 2].sample_offset ; for (str_index = 0 ; str_index < 256 ; str_index++) psf->cues->cue_points [j].name [str_index] = psf->cues->cue_points [j + psf->instrument->loop_count * 2].name [str_index] ; } ; psf->cues->cue_count -= psf->instrument->loop_count * 2 ; } else { /* All the cues were in fact loop positions so we can actually remove the cues altogether */ free (psf->cues) ; psf->cues = NULL ; } } ; if (psf->sf.channels < 1) return SFE_CHANNEL_COUNT_ZERO ; if (psf->sf.channels >= SF_MAX_CHANNELS) return SFE_CHANNEL_COUNT ; if (! (found_chunk & HAVE_FORM)) return SFE_AIFF_NO_FORM ; if (! (found_chunk & HAVE_AIFF)) return SFE_AIFF_COMM_NO_FORM ; if (! (found_chunk & HAVE_COMM)) return SFE_AIFF_SSND_NO_COMM ; if (! psf->dataoffset) return SFE_AIFF_NO_DATA ; return 0 ; } /* aiff_read_header */ static int aiff_close (SF_PRIVATE *psf) { AIFF_PRIVATE *paiff = psf->container_data ; if (paiff != NULL && paiff->markstr != NULL) { free (paiff->markstr) ; paiff->markstr = NULL ; } ; if (psf->file.mode == SFM_WRITE || psf->file.mode == SFM_RDWR) { aiff_write_tailer (psf) ; aiff_write_header (psf, SF_TRUE) ; } ; return 0 ; } /* aiff_close */ static int aiff_read_comm_chunk (SF_PRIVATE *psf, COMM_CHUNK *comm_fmt) { BUF_UNION ubuf ; int subformat, samplerate ; ubuf.scbuf [0] = 0 ; /* The COMM chunk has an int aligned to an odd word boundary. Some ** procesors are not able to deal with this (ie bus fault) so we have ** to take special care. */ psf_binheader_readf (psf, "E242b", &(comm_fmt->numChannels), &(comm_fmt->numSampleFrames), &(comm_fmt->sampleSize), &(comm_fmt->sampleRate), SIGNED_SIZEOF (comm_fmt->sampleRate)) ; if (comm_fmt->size > 0x10000 && (comm_fmt->size & 0xffff) == 0) { psf_log_printf (psf, " COMM : %d (0x%x) *** should be ", comm_fmt->size, comm_fmt->size) ; comm_fmt->size = ENDSWAP_32 (comm_fmt->size) ; psf_log_printf (psf, "%d (0x%x)\n", comm_fmt->size, comm_fmt->size) ; } else psf_log_printf (psf, " COMM : %d\n", comm_fmt->size) ; if (comm_fmt->size == SIZEOF_AIFF_COMM) comm_fmt->encoding = NONE_MARKER ; else if (comm_fmt->size == SIZEOF_AIFC_COMM_MIN) psf_binheader_readf (psf, "Em", &(comm_fmt->encoding)) ; else if (comm_fmt->size >= SIZEOF_AIFC_COMM) { uint8_t encoding_len ; unsigned read_len ; psf_binheader_readf (psf, "Em1", &(comm_fmt->encoding), &encoding_len) ; comm_fmt->size = SF_MIN (sizeof (ubuf.scbuf), make_size_t (comm_fmt->size)) ; memset (ubuf.scbuf, 0, comm_fmt->size) ; read_len = comm_fmt->size - SIZEOF_AIFC_COMM + 1 ; psf_binheader_readf (psf, "b", ubuf.scbuf, read_len) ; ubuf.scbuf [read_len + 1] = 0 ; } ; samplerate = tenbytefloat2int (comm_fmt->sampleRate) ; psf_log_printf (psf, " Sample Rate : %d\n", samplerate) ; psf_log_printf (psf, " Frames : %u%s\n", comm_fmt->numSampleFrames, (comm_fmt->numSampleFrames == 0 && psf->filelength > 104) ? " (Should not be 0)" : "") ; if (comm_fmt->numChannels < 1 || comm_fmt->numChannels >= SF_MAX_CHANNELS) { psf_log_printf (psf, " Channels : %d (should be >= 1 and < %d)\n", comm_fmt->numChannels, SF_MAX_CHANNELS) ; return SFE_CHANNEL_COUNT_BAD ; } ; psf_log_printf (psf, " Channels : %d\n", comm_fmt->numChannels) ; /* Found some broken 'fl32' files with comm.samplesize == 16. Fix it here. */ if ((comm_fmt->encoding == fl32_MARKER || comm_fmt->encoding == FL32_MARKER) && comm_fmt->sampleSize != 32) { psf_log_printf (psf, " Sample Size : %d (should be 32)\n", comm_fmt->sampleSize) ; comm_fmt->sampleSize = 32 ; } else if ((comm_fmt->encoding == fl64_MARKER || comm_fmt->encoding == FL64_MARKER) && comm_fmt->sampleSize != 64) { psf_log_printf (psf, " Sample Size : %d (should be 64)\n", comm_fmt->sampleSize) ; comm_fmt->sampleSize = 64 ; } else psf_log_printf (psf, " Sample Size : %d\n", comm_fmt->sampleSize) ; subformat = s_bitwidth_to_subformat (comm_fmt->sampleSize) ; psf->sf.samplerate = samplerate ; psf->sf.frames = comm_fmt->numSampleFrames ; psf->sf.channels = comm_fmt->numChannels ; psf->bytewidth = BITWIDTH2BYTES (comm_fmt->sampleSize) ; psf->endian = SF_ENDIAN_BIG ; switch (comm_fmt->encoding) { case NONE_MARKER : psf->sf.format = (SF_FORMAT_AIFF | subformat) ; break ; case twos_MARKER : case in24_MARKER : case in32_MARKER : psf->sf.format = (SF_ENDIAN_BIG | SF_FORMAT_AIFF | subformat) ; break ; case sowt_MARKER : case ni24_MARKER : case ni32_MARKER : psf->endian = SF_ENDIAN_LITTLE ; psf->sf.format = (SF_ENDIAN_LITTLE | SF_FORMAT_AIFF | subformat) ; break ; case fl32_MARKER : case FL32_MARKER : psf->sf.format = (SF_FORMAT_AIFF | SF_FORMAT_FLOAT) ; break ; case ulaw_MARKER : case ULAW_MARKER : psf->sf.format = (SF_FORMAT_AIFF | SF_FORMAT_ULAW) ; break ; case alaw_MARKER : case ALAW_MARKER : psf->sf.format = (SF_FORMAT_AIFF | SF_FORMAT_ALAW) ; break ; case fl64_MARKER : case FL64_MARKER : psf->sf.format = (SF_FORMAT_AIFF | SF_FORMAT_DOUBLE) ; break ; case raw_MARKER : psf->sf.format = (SF_FORMAT_AIFF | SF_FORMAT_PCM_U8) ; break ; case DWVW_MARKER : psf->sf.format = SF_FORMAT_AIFF ; switch (comm_fmt->sampleSize) { case 12 : psf->sf.format |= SF_FORMAT_DWVW_12 ; break ; case 16 : psf->sf.format |= SF_FORMAT_DWVW_16 ; break ; case 24 : psf->sf.format |= SF_FORMAT_DWVW_24 ; break ; default : psf->sf.format |= SF_FORMAT_DWVW_N ; break ; } ; break ; case GSM_MARKER : psf->sf.format = SF_FORMAT_AIFF ; psf->sf.format = (SF_FORMAT_AIFF | SF_FORMAT_GSM610) ; break ; case ima4_MARKER : psf->endian = SF_ENDIAN_BIG ; psf->sf.format = (SF_FORMAT_AIFF | SF_FORMAT_IMA_ADPCM) ; break ; default : psf_log_printf (psf, "AIFC : Unimplemented format : %M\n", comm_fmt->encoding) ; return SFE_UNIMPLEMENTED ; } ; if (! ubuf.scbuf [0]) psf_log_printf (psf, " Encoding : %M\n", comm_fmt->encoding) ; else psf_log_printf (psf, " Encoding : %M => %s\n", comm_fmt->encoding, ubuf.scbuf) ; return 0 ; } /* aiff_read_comm_chunk */ /*========================================================================================== */ static void aiff_rewrite_header (SF_PRIVATE *psf) { /* Assuming here that the header has already been written and just ** needs to be corrected for new data length. That means that we ** only change the length fields of the FORM and SSND chunks ; ** everything else can be skipped over. */ int k, ch, comm_size, comm_frames ; psf_fseek (psf, 0, SEEK_SET) ; psf_fread (psf->header.ptr, psf->dataoffset, 1, psf) ; psf->header.indx = 0 ; /* FORM chunk. */ psf_binheader_writef (psf, "Etm8", FORM_MARKER, psf->filelength - 8) ; /* COMM chunk. */ if ((k = psf_find_read_chunk_m32 (&psf->rchunks, COMM_MARKER)) >= 0) { psf->header.indx = psf->rchunks.chunks [k].offset - 8 ; comm_frames = psf->sf.frames ; comm_size = psf->rchunks.chunks [k].len ; psf_binheader_writef (psf, "Em42t4", COMM_MARKER, comm_size, psf->sf.channels, comm_frames) ; } ; /* PEAK chunk. */ if ((k = psf_find_read_chunk_m32 (&psf->rchunks, PEAK_MARKER)) >= 0) { psf->header.indx = psf->rchunks.chunks [k].offset - 8 ; psf_binheader_writef (psf, "Em4", PEAK_MARKER, AIFF_PEAK_CHUNK_SIZE (psf->sf.channels)) ; psf_binheader_writef (psf, "E44", 1, time (NULL)) ; for (ch = 0 ; ch < psf->sf.channels ; ch++) psf_binheader_writef (psf, "Eft8", (float) psf->peak_info->peaks [ch].value, psf->peak_info->peaks [ch].position) ; } ; /* SSND chunk. */ if ((k = psf_find_read_chunk_m32 (&psf->rchunks, SSND_MARKER)) >= 0) { psf->header.indx = psf->rchunks.chunks [k].offset - 8 ; psf_binheader_writef (psf, "Etm8", SSND_MARKER, psf->datalength + SIZEOF_SSND_CHUNK) ; } ; /* Header mangling complete so write it out. */ psf_fseek (psf, 0, SEEK_SET) ; psf_fwrite (psf->header.ptr, psf->header.indx, 1, psf) ; return ; } /* aiff_rewrite_header */ static int aiff_write_header (SF_PRIVATE *psf, int calc_length) { sf_count_t current ; AIFF_PRIVATE *paiff ; uint8_t comm_sample_rate [10], comm_zero_bytes [2] = { 0, 0 } ; uint32_t comm_type, comm_size, comm_encoding, comm_frames = 0, uk ; int k, endian, has_data = SF_FALSE ; int16_t bit_width ; if ((paiff = psf->container_data) == NULL) return SFE_INTERNAL ; current = psf_ftell (psf) ; if (current > psf->dataoffset) has_data = SF_TRUE ; if (calc_length) { psf->filelength = psf_get_filelen (psf) ; psf->datalength = psf->filelength - psf->dataoffset ; if (psf->dataend) psf->datalength -= psf->filelength - psf->dataend ; if (psf->bytewidth > 0) psf->sf.frames = psf->datalength / (psf->bytewidth * psf->sf.channels) ; } ; if (psf->file.mode == SFM_RDWR && psf->dataoffset > 0 && psf->rchunks.count > 0) { aiff_rewrite_header (psf) ; if (current > 0) psf_fseek (psf, current, SEEK_SET) ; return 0 ; } ; endian = SF_ENDIAN (psf->sf.format) ; if (CPU_IS_LITTLE_ENDIAN && endian == SF_ENDIAN_CPU) endian = SF_ENDIAN_LITTLE ; /* Standard value here. */ bit_width = psf->bytewidth * 8 ; comm_frames = (psf->sf.frames > 0xFFFFFFFF) ? 0xFFFFFFFF : psf->sf.frames ; switch (SF_CODEC (psf->sf.format) | endian) { case SF_FORMAT_PCM_S8 | SF_ENDIAN_BIG : psf->endian = SF_ENDIAN_BIG ; comm_type = AIFC_MARKER ; comm_size = SIZEOF_AIFC_COMM ; comm_encoding = twos_MARKER ; break ; case SF_FORMAT_PCM_S8 | SF_ENDIAN_LITTLE : psf->endian = SF_ENDIAN_LITTLE ; comm_type = AIFC_MARKER ; comm_size = SIZEOF_AIFC_COMM ; comm_encoding = sowt_MARKER ; break ; case SF_FORMAT_PCM_16 | SF_ENDIAN_BIG : psf->endian = SF_ENDIAN_BIG ; comm_type = AIFC_MARKER ; comm_size = SIZEOF_AIFC_COMM ; comm_encoding = twos_MARKER ; break ; case SF_FORMAT_PCM_16 | SF_ENDIAN_LITTLE : psf->endian = SF_ENDIAN_LITTLE ; comm_type = AIFC_MARKER ; comm_size = SIZEOF_AIFC_COMM ; comm_encoding = sowt_MARKER ; break ; case SF_FORMAT_PCM_24 | SF_ENDIAN_BIG : psf->endian = SF_ENDIAN_BIG ; comm_type = AIFC_MARKER ; comm_size = SIZEOF_AIFC_COMM ; comm_encoding = in24_MARKER ; break ; case SF_FORMAT_PCM_24 | SF_ENDIAN_LITTLE : psf->endian = SF_ENDIAN_LITTLE ; comm_type = AIFC_MARKER ; comm_size = SIZEOF_AIFC_COMM ; comm_encoding = ni24_MARKER ; break ; case SF_FORMAT_PCM_32 | SF_ENDIAN_BIG : psf->endian = SF_ENDIAN_BIG ; comm_type = AIFC_MARKER ; comm_size = SIZEOF_AIFC_COMM ; comm_encoding = in32_MARKER ; break ; case SF_FORMAT_PCM_32 | SF_ENDIAN_LITTLE : psf->endian = SF_ENDIAN_LITTLE ; comm_type = AIFC_MARKER ; comm_size = SIZEOF_AIFC_COMM ; comm_encoding = ni32_MARKER ; break ; case SF_FORMAT_PCM_S8 : /* SF_ENDIAN_FILE */ case SF_FORMAT_PCM_16 : case SF_FORMAT_PCM_24 : case SF_FORMAT_PCM_32 : psf->endian = SF_ENDIAN_BIG ; comm_type = AIFF_MARKER ; comm_size = SIZEOF_AIFF_COMM ; comm_encoding = 0 ; break ; case SF_FORMAT_FLOAT : /* Big endian floating point. */ psf->endian = SF_ENDIAN_BIG ; comm_type = AIFC_MARKER ; comm_size = SIZEOF_AIFC_COMM ; comm_encoding = FL32_MARKER ; /* Use 'FL32' because its easier to read. */ break ; case SF_FORMAT_DOUBLE : /* Big endian double precision floating point. */ psf->endian = SF_ENDIAN_BIG ; comm_type = AIFC_MARKER ; comm_size = SIZEOF_AIFC_COMM ; comm_encoding = FL64_MARKER ; /* Use 'FL64' because its easier to read. */ break ; case SF_FORMAT_ULAW : psf->endian = SF_ENDIAN_BIG ; comm_type = AIFC_MARKER ; comm_size = SIZEOF_AIFC_COMM ; comm_encoding = ulaw_MARKER ; break ; case SF_FORMAT_ALAW : psf->endian = SF_ENDIAN_BIG ; comm_type = AIFC_MARKER ; comm_size = SIZEOF_AIFC_COMM ; comm_encoding = alaw_MARKER ; break ; case SF_FORMAT_PCM_U8 : psf->endian = SF_ENDIAN_BIG ; comm_type = AIFC_MARKER ; comm_size = SIZEOF_AIFC_COMM ; comm_encoding = raw_MARKER ; break ; case SF_FORMAT_DWVW_12 : psf->endian = SF_ENDIAN_BIG ; comm_type = AIFC_MARKER ; comm_size = SIZEOF_AIFC_COMM ; comm_encoding = DWVW_MARKER ; /* Override standard value here.*/ bit_width = 12 ; break ; case SF_FORMAT_DWVW_16 : psf->endian = SF_ENDIAN_BIG ; comm_type = AIFC_MARKER ; comm_size = SIZEOF_AIFC_COMM ; comm_encoding = DWVW_MARKER ; /* Override standard value here.*/ bit_width = 16 ; break ; case SF_FORMAT_DWVW_24 : psf->endian = SF_ENDIAN_BIG ; comm_type = AIFC_MARKER ; comm_size = SIZEOF_AIFC_COMM ; comm_encoding = DWVW_MARKER ; /* Override standard value here.*/ bit_width = 24 ; break ; case SF_FORMAT_GSM610 : psf->endian = SF_ENDIAN_BIG ; comm_type = AIFC_MARKER ; comm_size = SIZEOF_AIFC_COMM ; comm_encoding = GSM_MARKER ; /* Override standard value here.*/ bit_width = 16 ; break ; case SF_FORMAT_IMA_ADPCM : psf->endian = SF_ENDIAN_BIG ; comm_type = AIFC_MARKER ; comm_size = SIZEOF_AIFC_COMM ; comm_encoding = ima4_MARKER ; /* Override standard value here.*/ bit_width = 16 ; comm_frames = psf->sf.frames / AIFC_IMA4_SAMPLES_PER_BLOCK ; break ; default : return SFE_BAD_OPEN_FORMAT ; } ; /* Reset the current header length to zero. */ psf->header.ptr [0] = 0 ; psf->header.indx = 0 ; psf_fseek (psf, 0, SEEK_SET) ; psf_binheader_writef (psf, "Etm8", FORM_MARKER, psf->filelength - 8) ; /* Write AIFF/AIFC marker and COM chunk. */ if (comm_type == AIFC_MARKER) /* AIFC must have an FVER chunk. */ psf_binheader_writef (psf, "Emm44", comm_type, FVER_MARKER, 4, 0xA2805140) ; else psf_binheader_writef (psf, "Em", comm_type) ; paiff->comm_offset = psf->header.indx - 8 ; memset (comm_sample_rate, 0, sizeof (comm_sample_rate)) ; uint2tenbytefloat (psf->sf.samplerate, comm_sample_rate) ; psf_binheader_writef (psf, "Em42t42", COMM_MARKER, comm_size, psf->sf.channels, comm_frames, bit_width) ; psf_binheader_writef (psf, "b", comm_sample_rate, sizeof (comm_sample_rate)) ; /* AIFC chunks have some extra data. */ if (comm_type == AIFC_MARKER) psf_binheader_writef (psf, "mb", comm_encoding, comm_zero_bytes, sizeof (comm_zero_bytes)) ; if (psf->channel_map && paiff->chanmap_tag) psf_binheader_writef (psf, "Em4444", CHAN_MARKER, 12, paiff->chanmap_tag, 0, 0) ; /* Check if there's a INST chunk to write */ if (psf->instrument != NULL && psf->cues != NULL) { /* Huge chunk of code removed here because it had egregious errors that were ** not detected by either the compiler or the tests. It was found when updating ** the way psf_binheader_writef works. */ } else if (psf->instrument == NULL && psf->cues != NULL) { /* There are cues but no loops */ uint32_t idx ; int totalStringLength = 0, stringLength ; /* Here we count how many bytes will the pascal strings need */ for (idx = 0 ; idx < psf->cues->cue_count ; idx++) { stringLength = strlen (psf->cues->cue_points [idx].name) + 1 ; /* We'll count the first byte also of every pascal string */ totalStringLength += stringLength + (stringLength % 2 == 0 ? 0 : 1) ; } ; psf_binheader_writef (psf, "Em42", MARK_MARKER, 2 + psf->cues->cue_count * (2 + 4) + totalStringLength, psf->cues->cue_count) ; for (idx = 0 ; idx < psf->cues->cue_count ; idx++) psf_binheader_writef (psf, "E24p", psf->cues->cue_points [idx].indx, psf->cues->cue_points [idx].sample_offset, psf->cues->cue_points [idx].name) ; } ; if (psf->strings.flags & SF_STR_LOCATE_START) aiff_write_strings (psf, SF_STR_LOCATE_START) ; if (psf->peak_info != NULL && psf->peak_info->peak_loc == SF_PEAK_START) { psf_binheader_writef (psf, "Em4", PEAK_MARKER, AIFF_PEAK_CHUNK_SIZE (psf->sf.channels)) ; psf_binheader_writef (psf, "E44", 1, time (NULL)) ; for (k = 0 ; k < psf->sf.channels ; k++) psf_binheader_writef (psf, "Eft8", (float) psf->peak_info->peaks [k].value, psf->peak_info->peaks [k].position) ; } ; /* Write custom headers. */ for (uk = 0 ; uk < psf->wchunks.used ; uk++) psf_binheader_writef (psf, "Em4b", psf->wchunks.chunks [uk].mark32, psf->wchunks.chunks [uk].len, psf->wchunks.chunks [uk].data, make_size_t (psf->wchunks.chunks [uk].len)) ; /* Write SSND chunk. */ paiff->ssnd_offset = psf->header.indx ; psf_binheader_writef (psf, "Etm844", SSND_MARKER, psf->datalength + SIZEOF_SSND_CHUNK, 0, 0) ; /* Header construction complete so write it out. */ psf_fwrite (psf->header.ptr, psf->header.indx, 1, psf) ; if (psf->error) return psf->error ; if (has_data && psf->dataoffset != psf->header.indx) return psf->error = SFE_INTERNAL ; psf->dataoffset = psf->header.indx ; if (! has_data) psf_fseek (psf, psf->dataoffset, SEEK_SET) ; else if (current > 0) psf_fseek (psf, current, SEEK_SET) ; return psf->error ; } /* aiff_write_header */ static int aiff_write_tailer (SF_PRIVATE *psf) { int k ; /* Reset the current header length to zero. */ psf->header.ptr [0] = 0 ; psf->header.indx = 0 ; psf->dataend = psf_fseek (psf, 0, SEEK_END) ; /* Make sure tailer data starts at even byte offset. Pad if necessary. */ if (psf->dataend % 2 == 1) { psf_fwrite (psf->header.ptr, 1, 1, psf) ; psf->dataend ++ ; } ; if (psf->peak_info != NULL && psf->peak_info->peak_loc == SF_PEAK_END) { psf_binheader_writef (psf, "Em4", PEAK_MARKER, AIFF_PEAK_CHUNK_SIZE (psf->sf.channels)) ; psf_binheader_writef (psf, "E44", 1, time (NULL)) ; for (k = 0 ; k < psf->sf.channels ; k++) psf_binheader_writef (psf, "Eft8", (float) psf->peak_info->peaks [k].value, psf->peak_info->peaks [k].position) ; } ; if (psf->strings.flags & SF_STR_LOCATE_END) aiff_write_strings (psf, SF_STR_LOCATE_END) ; /* Write the tailer. */ if (psf->header.indx > 0) psf_fwrite (psf->header.ptr, psf->header.indx, 1, psf) ; return 0 ; } /* aiff_write_tailer */ static void aiff_write_strings (SF_PRIVATE *psf, int location) { int k, slen ; for (k = 0 ; k < SF_MAX_STRINGS ; k++) { if (psf->strings.data [k].type == 0) break ; if (psf->strings.data [k].flags != location) continue ; switch (psf->strings.data [k].type) { case SF_STR_SOFTWARE : slen = strlen (psf->strings.storage + psf->strings.data [k].offset) ; psf_binheader_writef (psf, "Em4mb", APPL_MARKER, slen + 4, m3ga_MARKER, psf->strings.storage + psf->strings.data [k].offset, make_size_t (slen + (slen & 1))) ; break ; case SF_STR_TITLE : psf_binheader_writef (psf, "EmS", NAME_MARKER, psf->strings.storage + psf->strings.data [k].offset) ; break ; case SF_STR_COPYRIGHT : psf_binheader_writef (psf, "EmS", c_MARKER, psf->strings.storage + psf->strings.data [k].offset) ; break ; case SF_STR_ARTIST : psf_binheader_writef (psf, "EmS", AUTH_MARKER, psf->strings.storage + psf->strings.data [k].offset) ; break ; case SF_STR_COMMENT : psf_binheader_writef (psf, "EmS", ANNO_MARKER, psf->strings.storage + psf->strings.data [k].offset) ; break ; /* case SF_STR_DATE : psf_binheader_writef (psf, "Ems", ICRD_MARKER, psf->strings.data [k].str) ; break ; */ } ; } ; return ; } /* aiff_write_strings */ static int aiff_command (SF_PRIVATE * psf, int command, void * UNUSED (data), int UNUSED (datasize)) { AIFF_PRIVATE *paiff ; if ((paiff = psf->container_data) == NULL) return SFE_INTERNAL ; switch (command) { case SFC_SET_CHANNEL_MAP_INFO : paiff->chanmap_tag = aiff_caf_find_channel_layout_tag (psf->channel_map, psf->sf.channels) ; return (paiff->chanmap_tag != 0) ; default : break ; } ; return 0 ; } /* aiff_command */ static const char* get_loop_mode_str (int16_t mode) { switch (mode) { case 0 : return "none" ; case 1 : return "forward" ; case 2 : return "backward" ; } ; return "*** unknown" ; } /* get_loop_mode_str */ static int16_t get_loop_mode (int16_t mode) { switch (mode) { case 0 : return SF_LOOP_NONE ; case 1 : return SF_LOOP_FORWARD ; case 2 : return SF_LOOP_BACKWARD ; } ; return SF_LOOP_NONE ; } /* get_loop_mode */ /*========================================================================================== ** Rough hack at converting from 80 bit IEEE float in AIFF header to an int and ** back again. It assumes that all sample rates are between 1 and 800MHz, which ** should be OK as other sound file formats use a 32 bit integer to store sample ** rate. ** There is another (probably better) version in the source code to the SoX but it ** has a copyright which probably prevents it from being allowable as GPL/LGPL. */ static int tenbytefloat2int (uint8_t *bytes) { int val = 3 ; if (bytes [0] & 0x80) /* Negative number. */ return 0 ; if (bytes [0] <= 0x3F) /* Less than 1. */ return 1 ; if (bytes [0] > 0x40) /* Way too big. */ return 0x4000000 ; if (bytes [0] == 0x40 && bytes [1] > 0x1C) /* Too big. */ return 800000000 ; /* Ok, can handle it. */ val = (bytes [2] << 23) | (bytes [3] << 15) | (bytes [4] << 7) | (bytes [5] >> 1) ; val >>= (29 - bytes [1]) ; return val ; } /* tenbytefloat2int */ static void uint2tenbytefloat (uint32_t num, uint8_t *bytes) { uint32_t mask = 0x40000000 ; int count ; if (num <= 1) { bytes [0] = 0x3F ; bytes [1] = 0xFF ; bytes [2] = 0x80 ; return ; } ; bytes [0] = 0x40 ; if (num >= mask) { bytes [1] = 0x1D ; return ; } ; for (count = 0 ; count < 32 ; count ++) { if (num & mask) break ; mask >>= 1 ; } ; num = count < 31 ? num << (count + 1) : 0 ; bytes [1] = 29 - count ; bytes [2] = (num >> 24) & 0xFF ; bytes [3] = (num >> 16) & 0xFF ; bytes [4] = (num >> 8) & 0xFF ; bytes [5] = num & 0xFF ; } /* uint2tenbytefloat */ static int aiff_read_basc_chunk (SF_PRIVATE * psf, int datasize) { const char * type_str ; basc_CHUNK bc ; int count ; count = psf_binheader_readf (psf, "E442", &bc.version, &bc.numBeats, &bc.rootNote) ; count += psf_binheader_readf (psf, "E222", &bc.scaleType, &bc.sigNumerator, &bc.sigDenominator) ; count += psf_binheader_readf (psf, "E2j", &bc.loopType, datasize - sizeof (bc)) ; psf_log_printf (psf, " Version ? : %u\n Num Beats : %u\n Root Note : 0x%x\n", bc.version, bc.numBeats, bc.rootNote) ; switch (bc.scaleType) { case basc_SCALE_MINOR : type_str = "MINOR" ; break ; case basc_SCALE_MAJOR : type_str = "MAJOR" ; break ; case basc_SCALE_NEITHER : type_str = "NEITHER" ; break ; case basc_SCALE_BOTH : type_str = "BOTH" ; break ; default : type_str = "!!WRONG!!" ; break ; } ; psf_log_printf (psf, " ScaleType : 0x%x (%s)\n", bc.scaleType, type_str) ; psf_log_printf (psf, " Time Sig : %d/%d\n", bc.sigNumerator, bc.sigDenominator) ; switch (bc.loopType) { case basc_TYPE_ONE_SHOT : type_str = "One Shot" ; break ; case basc_TYPE_LOOP : type_str = "Loop" ; break ; default: type_str = "!!WRONG!!" ; break ; } ; psf_log_printf (psf, " Loop Type : 0x%x (%s)\n", bc.loopType, type_str) ; if ((psf->loop_info = calloc (1, sizeof (SF_LOOP_INFO))) == NULL) return SFE_MALLOC_FAILED ; psf->loop_info->time_sig_num = bc.sigNumerator ; psf->loop_info->time_sig_den = bc.sigDenominator ; psf->loop_info->loop_mode = (bc.loopType == basc_TYPE_ONE_SHOT) ? SF_LOOP_NONE : SF_LOOP_FORWARD ; psf->loop_info->num_beats = bc.numBeats ; /* Can always be recalculated from other known fields. */ psf->loop_info->bpm = (1.0 / psf->sf.frames) * psf->sf.samplerate * ((bc.numBeats * 4.0) / bc.sigDenominator) * 60.0 ; psf->loop_info->root_key = bc.rootNote ; if (count < datasize) psf_binheader_readf (psf, "j", datasize - count) ; return 0 ; } /* aiff_read_basc_chunk */ static int aiff_read_chanmap (SF_PRIVATE * psf, unsigned dword) { const AIFF_CAF_CHANNEL_MAP * map_info ; unsigned channel_bitmap, channel_decriptions, bytesread ; int layout_tag ; bytesread = psf_binheader_readf (psf, "444", &layout_tag, &channel_bitmap, &channel_decriptions) ; if ((map_info = aiff_caf_of_channel_layout_tag (layout_tag)) == NULL) return 0 ; psf_log_printf (psf, " Tag : %x\n", layout_tag) ; if (map_info) psf_log_printf (psf, " Layout : %s\n", map_info->name) ; if (bytesread < dword) psf_binheader_readf (psf, "j", dword - bytesread) ; if (map_info->channel_map != NULL) { size_t chanmap_size = SF_MIN (psf->sf.channels, layout_tag & 0xffff) * sizeof (psf->channel_map [0]) ; free (psf->channel_map) ; if ((psf->channel_map = malloc (chanmap_size)) == NULL) return SFE_MALLOC_FAILED ; memcpy (psf->channel_map, map_info->channel_map, chanmap_size) ; } ; return 0 ; } /* aiff_read_chanmap */ /*============================================================================== */ static int aiff_set_chunk (SF_PRIVATE *psf, const SF_CHUNK_INFO * chunk_info) { return psf_save_write_chunk (&psf->wchunks, chunk_info) ; } /* aiff_set_chunk */ static SF_CHUNK_ITERATOR * aiff_next_chunk_iterator (SF_PRIVATE *psf, SF_CHUNK_ITERATOR * iterator) { return psf_next_chunk_iterator (&psf->rchunks, iterator) ; } /* aiff_next_chunk_iterator */ static int aiff_get_chunk_size (SF_PRIVATE *psf, const SF_CHUNK_ITERATOR * iterator, SF_CHUNK_INFO * chunk_info) { int indx ; if ((indx = psf_find_read_chunk_iterator (&psf->rchunks, iterator)) < 0) return SFE_UNKNOWN_CHUNK ; chunk_info->datalen = psf->rchunks.chunks [indx].len ; return SFE_NO_ERROR ; } /* aiff_get_chunk_size */ static int aiff_get_chunk_data (SF_PRIVATE *psf, const SF_CHUNK_ITERATOR * iterator, SF_CHUNK_INFO * chunk_info) { sf_count_t pos ; int indx ; if ((indx = psf_find_read_chunk_iterator (&psf->rchunks, iterator)) < 0) return SFE_UNKNOWN_CHUNK ; if (chunk_info->data == NULL) return SFE_BAD_CHUNK_DATA_PTR ; chunk_info->id_size = psf->rchunks.chunks [indx].id_size ; memcpy (chunk_info->id, psf->rchunks.chunks [indx].id, sizeof (chunk_info->id) / sizeof (*chunk_info->id)) ; pos = psf_ftell (psf) ; psf_fseek (psf, psf->rchunks.chunks [indx].offset, SEEK_SET) ; psf_fread (chunk_info->data, SF_MIN (chunk_info->datalen, psf->rchunks.chunks [indx].len), 1, psf) ; psf_fseek (psf, pos, SEEK_SET) ; return SFE_NO_ERROR ; } /* aiff_get_chunk_data */
aiff_read_chanmap (SF_PRIVATE * psf, unsigned dword) { const AIFF_CAF_CHANNEL_MAP * map_info ; unsigned channel_bitmap, channel_decriptions, bytesread ; int layout_tag ; bytesread = psf_binheader_readf (psf, "444", &layout_tag, &channel_bitmap, &channel_decriptions) ; if ((map_info = aiff_caf_of_channel_layout_tag (layout_tag)) == NULL) return 0 ; psf_log_printf (psf, " Tag : %x\n", layout_tag) ; if (map_info) psf_log_printf (psf, " Layout : %s\n", map_info->name) ; if (bytesread < dword) psf_binheader_readf (psf, "j", dword - bytesread) ; if (map_info->channel_map != NULL) { size_t chanmap_size = psf->sf.channels * sizeof (psf->channel_map [0]) ; free (psf->channel_map) ; if ((psf->channel_map = malloc (chanmap_size)) == NULL) return SFE_MALLOC_FAILED ; memcpy (psf->channel_map, map_info->channel_map, chanmap_size) ; } ; return 0 ; } /* aiff_read_chanmap */
aiff_read_chanmap (SF_PRIVATE * psf, unsigned dword) { const AIFF_CAF_CHANNEL_MAP * map_info ; unsigned channel_bitmap, channel_decriptions, bytesread ; int layout_tag ; bytesread = psf_binheader_readf (psf, "444", &layout_tag, &channel_bitmap, &channel_decriptions) ; if ((map_info = aiff_caf_of_channel_layout_tag (layout_tag)) == NULL) return 0 ; psf_log_printf (psf, " Tag : %x\n", layout_tag) ; if (map_info) psf_log_printf (psf, " Layout : %s\n", map_info->name) ; if (bytesread < dword) psf_binheader_readf (psf, "j", dword - bytesread) ; if (map_info->channel_map != NULL) { size_t chanmap_size = SF_MIN (psf->sf.channels, layout_tag & 0xffff) * sizeof (psf->channel_map [0]) ; free (psf->channel_map) ; if ((psf->channel_map = malloc (chanmap_size)) == NULL) return SFE_MALLOC_FAILED ; memcpy (psf->channel_map, map_info->channel_map, chanmap_size) ; } ; return 0 ; } /* aiff_read_chanmap */
{'added': [(1762, '\t{\tsize_t chanmap_size = SF_MIN (psf->sf.channels, layout_tag & 0xffff) * sizeof (psf->channel_map [0]) ;')], 'deleted': [(1762, '\t{\tsize_t chanmap_size = psf->sf.channels * sizeof (psf->channel_map [0]) ;')]}
1
1
1,277
9,668
https://github.com/erikd/libsndfile
CVE-2017-6892
['CWE-119']
request_key_auth.c
key_get_instantiation_authkey
/* Request key authorisation token key definition. * * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * See Documentation/security/keys-request-key.txt */ #include <linux/module.h> #include <linux/sched.h> #include <linux/err.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <asm/uaccess.h> #include "internal.h" #include <keys/user-type.h> static int request_key_auth_preparse(struct key_preparsed_payload *); static void request_key_auth_free_preparse(struct key_preparsed_payload *); static int request_key_auth_instantiate(struct key *, struct key_preparsed_payload *); static void request_key_auth_describe(const struct key *, struct seq_file *); static void request_key_auth_revoke(struct key *); static void request_key_auth_destroy(struct key *); static long request_key_auth_read(const struct key *, char __user *, size_t); /* * The request-key authorisation key type definition. */ struct key_type key_type_request_key_auth = { .name = ".request_key_auth", .def_datalen = sizeof(struct request_key_auth), .preparse = request_key_auth_preparse, .free_preparse = request_key_auth_free_preparse, .instantiate = request_key_auth_instantiate, .describe = request_key_auth_describe, .revoke = request_key_auth_revoke, .destroy = request_key_auth_destroy, .read = request_key_auth_read, }; static int request_key_auth_preparse(struct key_preparsed_payload *prep) { return 0; } static void request_key_auth_free_preparse(struct key_preparsed_payload *prep) { } /* * Instantiate a request-key authorisation key. */ static int request_key_auth_instantiate(struct key *key, struct key_preparsed_payload *prep) { key->payload.data = (struct request_key_auth *)prep->data; return 0; } /* * Describe an authorisation token. */ static void request_key_auth_describe(const struct key *key, struct seq_file *m) { struct request_key_auth *rka = key->payload.data; seq_puts(m, "key:"); seq_puts(m, key->description); if (key_is_instantiated(key)) seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len); } /* * Read the callout_info data (retrieves the callout information). * - the key's semaphore is read-locked */ static long request_key_auth_read(const struct key *key, char __user *buffer, size_t buflen) { struct request_key_auth *rka = key->payload.data; size_t datalen; long ret; datalen = rka->callout_len; ret = datalen; /* we can return the data as is */ if (buffer && buflen > 0) { if (buflen > datalen) buflen = datalen; if (copy_to_user(buffer, rka->callout_info, buflen) != 0) ret = -EFAULT; } return ret; } /* * Handle revocation of an authorisation token key. * * Called with the key sem write-locked. */ static void request_key_auth_revoke(struct key *key) { struct request_key_auth *rka = key->payload.data; kenter("{%d}", key->serial); if (rka->cred) { put_cred(rka->cred); rka->cred = NULL; } } /* * Destroy an instantiation authorisation token key. */ static void request_key_auth_destroy(struct key *key) { struct request_key_auth *rka = key->payload.data; kenter("{%d}", key->serial); if (rka->cred) { put_cred(rka->cred); rka->cred = NULL; } key_put(rka->target_key); key_put(rka->dest_keyring); kfree(rka->callout_info); kfree(rka); } /* * Create an authorisation token for /sbin/request-key or whoever to gain * access to the caller's security data. */ struct key *request_key_auth_new(struct key *target, const void *callout_info, size_t callout_len, struct key *dest_keyring) { struct request_key_auth *rka, *irka; const struct cred *cred = current->cred; struct key *authkey = NULL; char desc[20]; int ret; kenter("%d,", target->serial); /* allocate a auth record */ rka = kmalloc(sizeof(*rka), GFP_KERNEL); if (!rka) { kleave(" = -ENOMEM"); return ERR_PTR(-ENOMEM); } rka->callout_info = kmalloc(callout_len, GFP_KERNEL); if (!rka->callout_info) { kleave(" = -ENOMEM"); kfree(rka); return ERR_PTR(-ENOMEM); } /* see if the calling process is already servicing the key request of * another process */ if (cred->request_key_auth) { /* it is - use that instantiation context here too */ down_read(&cred->request_key_auth->sem); /* if the auth key has been revoked, then the key we're * servicing is already instantiated */ if (test_bit(KEY_FLAG_REVOKED, &cred->request_key_auth->flags)) goto auth_key_revoked; irka = cred->request_key_auth->payload.data; rka->cred = get_cred(irka->cred); rka->pid = irka->pid; up_read(&cred->request_key_auth->sem); } else { /* it isn't - use this process as the context */ rka->cred = get_cred(cred); rka->pid = current->pid; } rka->target_key = key_get(target); rka->dest_keyring = key_get(dest_keyring); memcpy(rka->callout_info, callout_info, callout_len); rka->callout_len = callout_len; /* allocate the auth key */ sprintf(desc, "%x", target->serial); authkey = key_alloc(&key_type_request_key_auth, desc, cred->fsuid, cred->fsgid, cred, KEY_POS_VIEW | KEY_POS_READ | KEY_POS_SEARCH | KEY_USR_VIEW, KEY_ALLOC_NOT_IN_QUOTA); if (IS_ERR(authkey)) { ret = PTR_ERR(authkey); goto error_alloc; } /* construct the auth key */ ret = key_instantiate_and_link(authkey, rka, 0, NULL, NULL); if (ret < 0) goto error_inst; kleave(" = {%d,%d}", authkey->serial, atomic_read(&authkey->usage)); return authkey; auth_key_revoked: up_read(&cred->request_key_auth->sem); kfree(rka->callout_info); kfree(rka); kleave("= -EKEYREVOKED"); return ERR_PTR(-EKEYREVOKED); error_inst: key_revoke(authkey); key_put(authkey); error_alloc: key_put(rka->target_key); key_put(rka->dest_keyring); kfree(rka->callout_info); kfree(rka); kleave("= %d", ret); return ERR_PTR(ret); } /* * Search the current process's keyrings for the authorisation key for * instantiation of a key. */ struct key *key_get_instantiation_authkey(key_serial_t target_id) { char description[16]; struct keyring_search_context ctx = { .index_key.type = &key_type_request_key_auth, .index_key.description = description, .cred = current_cred(), .match_data.cmp = user_match, .match_data.raw_data = description, .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, }; struct key *authkey; key_ref_t authkey_ref; sprintf(description, "%x", target_id); authkey_ref = search_process_keyrings(&ctx); if (IS_ERR(authkey_ref)) { authkey = ERR_CAST(authkey_ref); if (authkey == ERR_PTR(-EAGAIN)) authkey = ERR_PTR(-ENOKEY); goto error; } authkey = key_ref_to_ptr(authkey_ref); if (test_bit(KEY_FLAG_REVOKED, &authkey->flags)) { key_put(authkey); authkey = ERR_PTR(-EKEYREVOKED); } error: return authkey; }
/* Request key authorisation token key definition. * * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * See Documentation/security/keys-request-key.txt */ #include <linux/module.h> #include <linux/sched.h> #include <linux/err.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <asm/uaccess.h> #include "internal.h" #include <keys/user-type.h> static int request_key_auth_preparse(struct key_preparsed_payload *); static void request_key_auth_free_preparse(struct key_preparsed_payload *); static int request_key_auth_instantiate(struct key *, struct key_preparsed_payload *); static void request_key_auth_describe(const struct key *, struct seq_file *); static void request_key_auth_revoke(struct key *); static void request_key_auth_destroy(struct key *); static long request_key_auth_read(const struct key *, char __user *, size_t); /* * The request-key authorisation key type definition. */ struct key_type key_type_request_key_auth = { .name = ".request_key_auth", .def_datalen = sizeof(struct request_key_auth), .preparse = request_key_auth_preparse, .free_preparse = request_key_auth_free_preparse, .instantiate = request_key_auth_instantiate, .describe = request_key_auth_describe, .revoke = request_key_auth_revoke, .destroy = request_key_auth_destroy, .read = request_key_auth_read, }; static int request_key_auth_preparse(struct key_preparsed_payload *prep) { return 0; } static void request_key_auth_free_preparse(struct key_preparsed_payload *prep) { } /* * Instantiate a request-key authorisation key. */ static int request_key_auth_instantiate(struct key *key, struct key_preparsed_payload *prep) { key->payload.data = (struct request_key_auth *)prep->data; return 0; } /* * Describe an authorisation token. */ static void request_key_auth_describe(const struct key *key, struct seq_file *m) { struct request_key_auth *rka = key->payload.data; seq_puts(m, "key:"); seq_puts(m, key->description); if (key_is_instantiated(key)) seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len); } /* * Read the callout_info data (retrieves the callout information). * - the key's semaphore is read-locked */ static long request_key_auth_read(const struct key *key, char __user *buffer, size_t buflen) { struct request_key_auth *rka = key->payload.data; size_t datalen; long ret; datalen = rka->callout_len; ret = datalen; /* we can return the data as is */ if (buffer && buflen > 0) { if (buflen > datalen) buflen = datalen; if (copy_to_user(buffer, rka->callout_info, buflen) != 0) ret = -EFAULT; } return ret; } /* * Handle revocation of an authorisation token key. * * Called with the key sem write-locked. */ static void request_key_auth_revoke(struct key *key) { struct request_key_auth *rka = key->payload.data; kenter("{%d}", key->serial); if (rka->cred) { put_cred(rka->cred); rka->cred = NULL; } } /* * Destroy an instantiation authorisation token key. */ static void request_key_auth_destroy(struct key *key) { struct request_key_auth *rka = key->payload.data; kenter("{%d}", key->serial); if (rka->cred) { put_cred(rka->cred); rka->cred = NULL; } key_put(rka->target_key); key_put(rka->dest_keyring); kfree(rka->callout_info); kfree(rka); } /* * Create an authorisation token for /sbin/request-key or whoever to gain * access to the caller's security data. */ struct key *request_key_auth_new(struct key *target, const void *callout_info, size_t callout_len, struct key *dest_keyring) { struct request_key_auth *rka, *irka; const struct cred *cred = current->cred; struct key *authkey = NULL; char desc[20]; int ret; kenter("%d,", target->serial); /* allocate a auth record */ rka = kmalloc(sizeof(*rka), GFP_KERNEL); if (!rka) { kleave(" = -ENOMEM"); return ERR_PTR(-ENOMEM); } rka->callout_info = kmalloc(callout_len, GFP_KERNEL); if (!rka->callout_info) { kleave(" = -ENOMEM"); kfree(rka); return ERR_PTR(-ENOMEM); } /* see if the calling process is already servicing the key request of * another process */ if (cred->request_key_auth) { /* it is - use that instantiation context here too */ down_read(&cred->request_key_auth->sem); /* if the auth key has been revoked, then the key we're * servicing is already instantiated */ if (test_bit(KEY_FLAG_REVOKED, &cred->request_key_auth->flags)) goto auth_key_revoked; irka = cred->request_key_auth->payload.data; rka->cred = get_cred(irka->cred); rka->pid = irka->pid; up_read(&cred->request_key_auth->sem); } else { /* it isn't - use this process as the context */ rka->cred = get_cred(cred); rka->pid = current->pid; } rka->target_key = key_get(target); rka->dest_keyring = key_get(dest_keyring); memcpy(rka->callout_info, callout_info, callout_len); rka->callout_len = callout_len; /* allocate the auth key */ sprintf(desc, "%x", target->serial); authkey = key_alloc(&key_type_request_key_auth, desc, cred->fsuid, cred->fsgid, cred, KEY_POS_VIEW | KEY_POS_READ | KEY_POS_SEARCH | KEY_USR_VIEW, KEY_ALLOC_NOT_IN_QUOTA); if (IS_ERR(authkey)) { ret = PTR_ERR(authkey); goto error_alloc; } /* construct the auth key */ ret = key_instantiate_and_link(authkey, rka, 0, NULL, NULL); if (ret < 0) goto error_inst; kleave(" = {%d,%d}", authkey->serial, atomic_read(&authkey->usage)); return authkey; auth_key_revoked: up_read(&cred->request_key_auth->sem); kfree(rka->callout_info); kfree(rka); kleave("= -EKEYREVOKED"); return ERR_PTR(-EKEYREVOKED); error_inst: key_revoke(authkey); key_put(authkey); error_alloc: key_put(rka->target_key); key_put(rka->dest_keyring); kfree(rka->callout_info); kfree(rka); kleave("= %d", ret); return ERR_PTR(ret); } /* * Search the current process's keyrings for the authorisation key for * instantiation of a key. */ struct key *key_get_instantiation_authkey(key_serial_t target_id) { char description[16]; struct keyring_search_context ctx = { .index_key.type = &key_type_request_key_auth, .index_key.description = description, .cred = current_cred(), .match_data.cmp = key_default_cmp, .match_data.raw_data = description, .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, }; struct key *authkey; key_ref_t authkey_ref; sprintf(description, "%x", target_id); authkey_ref = search_process_keyrings(&ctx); if (IS_ERR(authkey_ref)) { authkey = ERR_CAST(authkey_ref); if (authkey == ERR_PTR(-EAGAIN)) authkey = ERR_PTR(-ENOKEY); goto error; } authkey = key_ref_to_ptr(authkey_ref); if (test_bit(KEY_FLAG_REVOKED, &authkey->flags)) { key_put(authkey); authkey = ERR_PTR(-EKEYREVOKED); } error: return authkey; }
struct key *key_get_instantiation_authkey(key_serial_t target_id) { char description[16]; struct keyring_search_context ctx = { .index_key.type = &key_type_request_key_auth, .index_key.description = description, .cred = current_cred(), .match_data.cmp = user_match, .match_data.raw_data = description, .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, }; struct key *authkey; key_ref_t authkey_ref; sprintf(description, "%x", target_id); authkey_ref = search_process_keyrings(&ctx); if (IS_ERR(authkey_ref)) { authkey = ERR_CAST(authkey_ref); if (authkey == ERR_PTR(-EAGAIN)) authkey = ERR_PTR(-ENOKEY); goto error; } authkey = key_ref_to_ptr(authkey_ref); if (test_bit(KEY_FLAG_REVOKED, &authkey->flags)) { key_put(authkey); authkey = ERR_PTR(-EKEYREVOKED); } error: return authkey; }
struct key *key_get_instantiation_authkey(key_serial_t target_id) { char description[16]; struct keyring_search_context ctx = { .index_key.type = &key_type_request_key_auth, .index_key.description = description, .cred = current_cred(), .match_data.cmp = key_default_cmp, .match_data.raw_data = description, .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, }; struct key *authkey; key_ref_t authkey_ref; sprintf(description, "%x", target_id); authkey_ref = search_process_keyrings(&ctx); if (IS_ERR(authkey_ref)) { authkey = ERR_CAST(authkey_ref); if (authkey == ERR_PTR(-EAGAIN)) authkey = ERR_PTR(-ENOKEY); goto error; } authkey = key_ref_to_ptr(authkey_ref); if (test_bit(KEY_FLAG_REVOKED, &authkey->flags)) { key_put(authkey); authkey = ERR_PTR(-EKEYREVOKED); } error: return authkey; }
{'added': [(249, '\t\t.match_data.cmp\t\t= key_default_cmp,')], 'deleted': [(249, '\t\t.match_data.cmp\t\t= user_match,')]}
1
1
184
1,122
https://github.com/torvalds/linux
CVE-2017-2647
['CWE-476']
parse.c
get_over
/* * Parsing functions. * * This file is part of abcm2ps. * * Copyright (C) 1998-2020 Jean-François Moine (http://moinejf.free.fr) * Adapted from abc2ps, Copyright (C) 1996-1998 Michael Methfessel * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <ctype.h> #include <regex.h> #include "abcm2ps.h" /* options = external formatting */ struct symsel_s { /* symbol selection */ short bar; short time; char seq; }; struct brk_s { /* music line break */ struct brk_s *next; struct symsel_s symsel; }; struct voice_opt_s { /* voice options */ struct voice_opt_s *next; struct SYMBOL *s; /* list of options (%%xxx) */ }; struct tune_opt_s { /* tune options */ struct tune_opt_s *next; struct voice_opt_s *voice_opts; struct SYMBOL *s; /* list of options (%%xxx) */ }; int nstaff; /* (0..MAXSTAFF-1) */ struct SYMBOL *tsfirst; /* first symbol in the time sorted list */ struct VOICE_S voice_tb[MAXVOICE]; /* voice table */ struct VOICE_S *first_voice; /* first voice */ struct SYSTEM *cursys; /* current system */ static struct SYSTEM *parsys; /* current system while parsing */ struct FORMAT dfmt; /* current global format */ int nbar; /* current measure number */ struct map *maps; /* note mappings */ static struct voice_opt_s *voice_opts, *tune_voice_opts; static struct tune_opt_s *tune_opts, *cur_tune_opts; static struct brk_s *brks; static struct symsel_s clip_start, clip_end; static INFO info_glob; /* global info definitions */ static char *deco_glob[256]; /* global decoration table */ static struct map *maps_glob; /* save note maps */ static int over_time; /* voice overlay start time */ static int over_mxtime; /* voice overlay max time */ static short over_bar; /* voice overlay in a measure */ static short over_voice; /* main voice in voice overlay */ static int staves_found; /* time of the last %%staves */ static int abc2win; static int capo; // capo indication float multicol_start; /* (for multicol) */ static float multicol_max; static float lmarg, rmarg; static void get_clef(struct SYMBOL *s); static struct SYMBOL *get_info(struct SYMBOL *s); static void get_key(struct SYMBOL *s); static void get_meter(struct SYMBOL *s); static void get_voice(struct SYMBOL *s); static void get_note(struct SYMBOL *s); static struct SYMBOL *process_pscomment(struct SYMBOL *s); static void ps_def(struct SYMBOL *s, char *p, char use); static void set_tblt(struct VOICE_S *p_voice); static void set_tuplet(struct SYMBOL *s); /* -- weight of the symbols -- */ static char w_tb[NSYMTYPES] = { /* !! index = symbol type !! */ 0, 9, /* 1- note / rest */ 3, /* 2- space */ 2, /* 3- bar */ 1, /* 4- clef */ 6, /* 5- timesig */ 5, /* 6- keysig */ 0, /* 7- tempo */ 0, /* 8- staves */ 9, /* 9- mrest */ 0, /* 10- part */ 3, /* 11- grace */ 0, /* 12- fmtchg */ 8, /* 13- tuplet */ 7, /* 14- stbrk */ 7 /* 15- custos */ }; /* key signature transposition tables */ static signed char cde2fcg[7] = {0, 2, 4, -1, 1, 3, 5}; static char cgd2cde[7] = {0, 4, 1, 5, 2, 6, 3}; /* -- link a ABC symbol into the current voice -- */ static void sym_link(struct SYMBOL *s, int type) { struct VOICE_S *p_voice = curvoice; if (!p_voice->ignore) { s->prev = p_voice->last_sym; if (s->prev) p_voice->last_sym->next = s; else p_voice->sym = s; p_voice->last_sym = s; //fixme:test bug // } else { // if (p_voice->sym) // p_voice->last_sym = p_voice->sym = s; } s->type = type; s->voice = p_voice - voice_tb; s->staff = p_voice->cstaff; s->time = p_voice->time; s->posit = p_voice->posit; } /* -- add a new symbol in a voice -- */ struct SYMBOL *sym_add(struct VOICE_S *p_voice, int type) { struct SYMBOL *s; struct VOICE_S *p_voice2; s = (struct SYMBOL *) getarena(sizeof *s); memset(s, 0, sizeof *s); p_voice2 = curvoice; curvoice = p_voice; sym_link(s, type); curvoice = p_voice2; if (p_voice->second) s->sflags |= S_SECOND; if (p_voice->floating) s->sflags |= S_FLOATING; if (s->prev) { s->fn = s->prev->fn; s->linenum = s->prev->linenum; s->colnum = s->prev->colnum; } return s; } /* -- expand a multi-rest into single rests and measure bars -- */ static void mrest_expand(struct SYMBOL *s) { struct VOICE_S *p_voice; struct SYMBOL *s2, *next; struct decos dc; int nb, dt; nb = s->u.bar.len; dt = s->dur / nb; /* change the multi-rest (type bar) to a single rest */ memcpy(&dc, &s->u.bar.dc, sizeof dc); memset(&s->u.note, 0, sizeof s->u.note); s->type = NOTEREST; s->abc_type = ABC_T_REST; // s->nhd = 0; s->dur = s->u.note.notes[0].len = dt; s->head = H_FULL; s->nflags = -2; /* add the bar(s) and rest(s) */ next = s->next; p_voice = &voice_tb[s->voice]; p_voice->last_sym = s; p_voice->time = s->time + dt; p_voice->cstaff = s->staff; s2 = s; while (--nb > 0) { s2 = sym_add(p_voice, BAR); s2->abc_type = ABC_T_BAR; s2->u.bar.type = B_SINGLE; s2 = sym_add(p_voice, NOTEREST); s2->abc_type = ABC_T_REST; s2->flags = s->flags; s2->dur = s2->u.note.notes[0].len = dt; s2->head = H_FULL; s2->nflags = -2; p_voice->time += dt; } s2->next = next; if (next) next->prev = s2; /* copy the mrest decorations to the last rest */ memcpy(&s2->u.note.dc, &dc, sizeof s2->u.note.dc); } /* -- sort all symbols by time and vertical sequence -- */ static void sort_all(void) { struct SYSTEM *sy; struct SYMBOL *s, *prev, *s2; struct VOICE_S *p_voice; int fl, voice, time, w, wmin, multi, mrest_time; int nb, r, set_sy, new_sy; // nv struct SYMBOL *vtb[MAXVOICE]; signed char vn[MAXVOICE]; /* voice indexed by range */ /* memset(vtb, 0, sizeof vtb); */ mrest_time = -1; for (p_voice = first_voice; p_voice; p_voice = p_voice->next) vtb[p_voice - voice_tb] = p_voice->sym; /* initialize the voice order */ sy = cursys; set_sy = 1; new_sy = 0; prev = NULL; fl = 1; /* (have gcc happy) */ multi = -1; /* (have gcc happy) */ for (;;) { if (set_sy) { fl = 1; // start a new sequence // if (!new_sy) { if (1) { set_sy = 0; multi = -1; memset(vn, -1, sizeof vn); for (p_voice = first_voice; p_voice; p_voice = p_voice->next) { voice = p_voice - voice_tb; r = sy->voice[voice].range; if (r < 0) continue; vn[r] = voice; multi++; } } } /* search the min time and symbol weight */ wmin = time = (unsigned) ~0 >> 1; /* max int */ // nv = nb = 0; for (r = 0; r < MAXVOICE; r++) { voice = vn[r]; if (voice < 0) break; s = vtb[voice]; if (!s || s->time > time) continue; w = w_tb[s->type]; if (s->time < time) { time = s->time; wmin = w; // nb = 0; } else if (w < wmin) { wmin = w; // nb = 0; } #if 0 if (!(s->sflags & S_SECOND)) { nv++; if (s->type == BAR) nb++; } #endif if (s->type == MREST) { if (s->u.bar.len == 1) mrest_expand(s); else if (multi > 0) mrest_time = time; } } if (wmin > 127) break; /* done */ #if 0 /* align the measure bars */ if (nb != 0 && nb != nv) { /* if other symbol than bars */ wmin = (unsigned) ~0 >> 1; for (r = 0; r < MAXVOICE; r++) { voice = vn[r]; if (voice < 0) break; s = vtb[voice]; if (!s || s->time > time || s->type == BAR) continue; w = w_tb[s->type]; if (w < wmin) wmin = w; } if (wmin > 127) wmin = w_tb[BAR]; } #endif /* if some multi-rest and many voices, expand */ if (time == mrest_time) { nb = 0; for (r = 0; r < MAXVOICE; r++) { voice = vn[r]; if (voice < 0) break; s = vtb[voice]; if (!s || s->time != time) continue; w = w_tb[s->type]; if (w != wmin) continue; if (s->type != MREST) { mrest_time = -1; /* some note or rest */ break; } if (nb == 0) { nb = s->u.bar.len; } else if (nb != s->u.bar.len) { mrest_time = -1; /* different duration */ break; } } if (mrest_time < 0) { for (r = 0; r < MAXVOICE; r++) { voice = vn[r]; if (voice < 0) break; s = vtb[voice]; if (s && s->type == MREST) mrest_expand(s); } } } /* link the vertical sequence */ for (r = 0; r < MAXVOICE; r++) { voice = vn[r]; if (voice < 0) break; s = vtb[voice]; if (!s || s->time != time || w_tb[s->type] != wmin) continue; if (s->type == STAVES) { // change STAVES to a flag sy = sy->next; set_sy = new_sy = 1; if (s->prev) s->prev->next = s->next; else voice_tb[voice].sym = s->next; if (s->next) s->next->prev = s->prev; } else { if (fl) { fl = 0; s->sflags |= S_SEQST; } if (new_sy) { new_sy = 0; s->sflags |= S_NEW_SY; } s->ts_prev = prev; if (prev) { prev->ts_next = s; //fixme: bad error when the 1st voice is second // if (s->type == BAR // && (s->sflags & S_SECOND) // && prev->type != BAR // && !(s->flags & ABC_F_INVIS)) // error(1, s, "Bad measure bar"); } else { tsfirst = s; } prev = s; } vtb[voice] = s->next; } fl = wmin; /* start a new sequence if some space */ } if (!prev) return; /* if no bar or format_change at end of tune, add a dummy symbol */ if ((prev->type != BAR && prev->type != FMTCHG) || new_sy) { p_voice = &voice_tb[prev->voice]; p_voice->last_sym = prev; s = sym_add(p_voice, FMTCHG); s->aux = -1; s->time = prev->time + prev->dur; s->sflags = S_SEQST; if (new_sy) s->sflags |= S_NEW_SY; prev->ts_next = s; s->ts_prev = prev; for (;;) { prev->sflags &= ~S_EOLN; if (prev->sflags & S_SEQST) break; prev = prev->ts_prev; } } /* if Q: from tune header, put it at start of the music */ s2 = info['Q' - 'A']; if (!s2) return; info['Q' - 'A'] = NULL; s = tsfirst->extra; while (s) { if (s->type == TEMPO) return; /* already a tempo */ s = s->next; } s = tsfirst; s2->type = TEMPO; s2->voice = s->voice; s2->staff = s->staff; s2->time = s->time; if (s->extra) { s2->next = s->extra; s2->next->prev = s2; } s->extra = s2; } /* -- move the symbols with no width to the next symbol -- */ static void voice_compress(void) { struct VOICE_S *p_voice; struct SYMBOL *s, *s2, *s3, *ns; for (p_voice = first_voice; p_voice; p_voice = p_voice->next) { //8.7.0 - for fmt at end of music line // if (p_voice->ignore) // continue; p_voice->ignore = 0; for (s = p_voice->sym; s; s = s->next) { if (s->time >= staves_found) break; } ns = NULL; for ( ; s; s = s->next) { switch (s->type) { #if 0 // test case KEYSIG: /* remove the empty key signatures */ if (s->u.key.empty) { if (s->prev) s->prev->next = s->next; else p_voice->sym = s->next; if (s->next) s->next->prev = s->prev; continue; } break; #endif case FMTCHG: s2 = s->extra; if (s2) { /* dummy format */ if (!ns) ns = s2; if (s->prev) { s->prev->next = s2; s2->prev = s->prev; } if (!s->next) { ns = NULL; break; } while (s2->next) s2 = s2->next; s->next->prev = s2; s2->next = s->next; } /* fall thru */ case TEMPO: case PART: case TUPLET: if (!ns) ns = s; continue; case MREST: /* don't shift P: and Q: */ if (!ns) continue; s2 = (struct SYMBOL *) getarena(sizeof *s); memset(s2, 0, sizeof *s2); s2->type = SPACE; s2->u.note.notes[1].len = -1; s2->flags = ABC_F_INVIS; s2->voice = s->voice; s2->staff = s->staff; s2->time = s->time; s2->sflags = s->sflags; s2->next = s; s2->prev = s->prev; s2->prev->next = s2; s->prev = s2; s = s2; break; } if (s->flags & ABC_F_GRACE) { if (!ns) ns = s; while (!(s->flags & ABC_F_GR_END)) s = s->next; s2 = (struct SYMBOL *) getarena(sizeof *s); memcpy(s2, s, sizeof *s2); s2->abc_type = 0; s2->type = GRACE; s2->dur = 0; s2->next = s->next; if (s2->next) { s2->next->prev = s2; if (cfmt.graceword) { for (s3 = s2->next; s3; s3 = s3->next) { switch (s3->type) { case SPACE: continue; case NOTEREST: s2->ly = s3->ly; s3->ly = NULL; default: break; } break; } } } else { p_voice->last_sym = s2; } s2->prev = s; s->next = s2; s = s2; // with w_tb[BAR] = 2, // the grace notes go after the bar // if before a bar, change the grace time if (s->next && s->next->type == BAR) s->time--; } if (!ns) continue; s->extra = ns; s->prev->next = NULL; s->prev = ns->prev; if (s->prev) s->prev->next = s; else p_voice->sym = s; ns->prev = NULL; ns = NULL; } /* when symbols with no space at end of tune, * add a dummy format */ if (ns) { s = sym_add(p_voice, FMTCHG); s->aux = -1; /* nothing */ s->extra = ns; s->prev->next = NULL; /* unlink */ s->prev = ns->prev; if (s->prev) s->prev->next = s; else p_voice->sym = s; ns->prev = NULL; } } } /* -- duplicate the voices as required -- */ static void voice_dup(void) { struct VOICE_S *p_voice, *p_voice2; struct SYMBOL *s, *s2, *g, *g2; int voice; for (p_voice = first_voice; p_voice; p_voice = p_voice->next) { if ((voice = p_voice->clone) < 0) continue; p_voice->clone = -1; p_voice2 = &voice_tb[voice]; for (s = p_voice->sym; s; s = s->next) { //fixme: there may be other symbols before the %%staves at this same time if (s->time >= staves_found) break; } for ( ; s; s = s->next) { if (s->type == STAVES) continue; s2 = (struct SYMBOL *) getarena(sizeof *s2); memcpy(s2, s, sizeof *s2); s2->prev = p_voice2->last_sym; s2->next = NULL; if (p_voice2->sym) p_voice2->last_sym->next = s2; else p_voice2->sym = s2; p_voice2->last_sym = s2; s2->voice = voice; s2->staff = p_voice2->staff; if (p_voice2->second) s2->sflags |= S_SECOND; else s2->sflags &= ~S_SECOND; if (p_voice2->floating) s2->sflags |= S_FLOATING; else s2->sflags &= ~S_FLOATING; s2->ly = NULL; g = s2->extra; if (!g) continue; g2 = (struct SYMBOL *) getarena(sizeof *g2); memcpy(g2, g, sizeof *g2); s2->extra = g2; s2 = g2; s2->voice = voice; s2->staff = p_voice2->staff; for (g = g->next; g; g = g->next) { g2 = (struct SYMBOL *) getarena(sizeof *g2); memcpy(g2, g, sizeof *g2); s2->next = g2; g2->prev = s2; s2 = g2; s2->voice = voice; s2->staff = p_voice2->staff; } } } } /* -- create a new staff system -- */ static void system_new(void) { struct SYSTEM *new_sy; int staff, voice; new_sy = (struct SYSTEM *) getarena(sizeof *new_sy); if (!parsys) { memset(new_sy, 0, sizeof *new_sy); for (voice = 0; voice < MAXVOICE; voice++) { new_sy->voice[voice].range = -1; } for (staff = 0; staff < MAXSTAFF; staff++) { new_sy->staff[staff].stafflines = "|||||"; new_sy->staff[staff].staffscale = 1; } cursys = new_sy; } else { for (voice = 0; voice < MAXVOICE; voice++) { // update the previous system // if (parsys->voice[voice].range < 0 // || parsys->voice[voice].second) // continue; staff = parsys->voice[voice].staff; if (voice_tb[voice].stafflines) parsys->staff[staff].stafflines = voice_tb[voice].stafflines; if (voice_tb[voice].staffscale != 0) parsys->staff[staff].staffscale = voice_tb[voice].staffscale; } memcpy(new_sy, parsys, sizeof *new_sy); for (voice = 0; voice < MAXVOICE; voice++) { new_sy->voice[voice].range = -1; new_sy->voice[voice].second = 0; } for (staff = 0; staff < MAXSTAFF; staff++) new_sy->staff[staff].flags = 0; parsys->next = new_sy; } parsys = new_sy; } /* -- initialize the voices and staves -- */ /* this routine is called when starting the generation */ static void system_init(void) { voice_compress(); voice_dup(); sort_all(); /* define the time / vertical sequences */ // if (!tsfirst) // return; // parsys->nstaff = nstaff; /* save the number of staves */ } /* go to a global (measure + time) */ static struct SYMBOL *go_global_time(struct SYMBOL *s, struct symsel_s *symsel) { struct SYMBOL *s2; int bar_time; if (symsel->bar <= 1) { /* special case: there is no measure 0/1 */ // && nbar == -1) { /* see set_bar_num */ if (symsel->bar == 0) goto chk_time; for (s2 = s; s2; s2 = s2->ts_next) { if (s2->type == BAR && s2->time != 0) break; } if (s2->time < voice_tb[cursys->top_voice].meter.wmeasure) s = s2; goto chk_time; } for ( ; s; s = s->ts_next) { if (s->type == BAR && s->aux >= symsel->bar) break; } if (!s) return NULL; if (symsel->seq != 0) { int seq; seq = symsel->seq; for (s = s->ts_next; s; s = s->ts_next) { if (s->type == BAR && s->aux == symsel->bar) { if (--seq == 0) break; } } if (!s) return NULL; } chk_time: if (symsel->time == 0) return s; bar_time = s->time + symsel->time; while (s->time < bar_time) { s = s->ts_next; if (!s) return s; } do { s = s->ts_prev; /* go back to the previous sequence */ } while (!(s->sflags & S_SEQST)); return s; } /* treat %%clip */ static void do_clip(void) { struct SYMBOL *s, *s2; struct SYSTEM *sy; struct VOICE_S *p_voice; int voice; /* remove the beginning of the tune */ s = tsfirst; if (clip_start.bar > 0 || clip_start.time > 0) { s = go_global_time(s, &clip_start); if (!s) { tsfirst = NULL; return; } /* update the start of voices */ sy = cursys; for (s2 = tsfirst; s2 != s; s2 = s2->ts_next) { if (s->sflags & S_NEW_SY) sy = sy->next; switch (s2->type) { case CLEF: voice_tb[s2->voice].s_clef = s2; break; case KEYSIG: memcpy(&voice_tb[s2->voice].key, &s2->u.key, sizeof voice_tb[0].key); break; case TIMESIG: memcpy(&voice_tb[s2->voice].meter, &s2->u.meter, sizeof voice_tb[0].meter); break; } } cursys = sy; for (p_voice = first_voice; p_voice; p_voice = p_voice->next) { voice = p_voice - voice_tb; for (s2 = s; s2; s2 = s2->ts_next) { if (s2->voice == voice) { s2->prev = NULL; break; } } p_voice->sym = s2; } tsfirst = s; s->ts_prev = NULL; } /* remove the end of the tune */ s = go_global_time(s, &clip_end); if (!s) return; /* keep the current sequence */ do { s = s->ts_next; if (!s) return; } while (!(s->sflags & S_SEQST)); /* cut the voices */ for (p_voice = first_voice; p_voice; p_voice = p_voice->next) { voice = p_voice - voice_tb; for (s2 = s->ts_prev; s2; s2 = s2->ts_prev) { if (s2->voice == voice) { s2->next = NULL; break; } } if (!s2) p_voice->sym = NULL; } s->ts_prev->ts_next = NULL; } /* -- set the bar numbers and treat %%clip / %%break -- */ static void set_bar_num(void) { struct SYMBOL *s, *s2, *s3; int bar_time, wmeasure, tim; int bar_num, bar_rep; wmeasure = voice_tb[cursys->top_voice].meter.wmeasure; bar_rep = nbar; /* don't count a bar at start of line */ for (s = tsfirst; ; s = s->ts_next) { if (!s) return; switch (s->type) { case TIMESIG: case CLEF: case KEYSIG: case FMTCHG: case STBRK: continue; case BAR: if (s->aux) { nbar = s->aux; /* (%%setbarnb) */ break; } if (s->u.bar.repeat_bar && s->text && !cfmt.contbarnb) { if (s->text[0] == '1') { bar_rep = nbar; } else { nbar = bar_rep; /* restart bar numbering */ s->aux = nbar; } } break; } break; } /* set the measure number on the top bars * and move the clefs before the measure bars */ bar_time = s->time + wmeasure; /* for incomplete measure at start of tune */ bar_num = nbar; for ( ; s; s = s->ts_next) { switch (s->type) { case CLEF: if (s->sflags & S_NEW_SY) break; for (s2 = s->ts_prev; s2; s2 = s2->ts_prev) { if (s2->sflags & S_NEW_SY) { s2 = NULL; break; } switch (s2->type) { case BAR: if (s2->sflags & S_SEQST) break; continue; case MREST: case NOTEREST: case SPACE: case STBRK: case TUPLET: s2 = NULL; break; default: continue; } break; } if (!s2) break; /* move the clef */ s->next->prev = s->prev; s->prev->next = s->next; s->ts_next->ts_prev = s->ts_prev; s->ts_prev->ts_next = s->ts_next; s->next = s2; s->prev = s2->prev; if (s->prev) s->prev->next = s; s2->prev = s; s->ts_next = s2; s->ts_prev = s2->ts_prev; if (s->ts_prev) s->ts_prev->ts_next = s; s2->ts_prev = s; // if (s->sflags & S_NEW_SY) { // s->sflags &= ~S_NEW_SY; // s->ts_next->sflags |= S_NEW_SY; // } s3 = s->extra; if (s3) { if (s->ts_next->extra) { while (s3->next) s3 = s3->next; s3->next = s->ts_next->extra; s->ts_next->extra = s->extra; } else { s->ts_next->extra = s3; } s->extra = NULL; } s = s2; break; case TIMESIG: wmeasure = s->u.meter.wmeasure; if (s->time < bar_time) bar_time = s->time + wmeasure; break; case MREST: bar_num += s->u.bar.len - 1; while (s->ts_next && s->ts_next->type != BAR) s = s->ts_next; break; case BAR: // if (s->flags & ABC_F_INVIS) // break; if (s->aux) { bar_num = s->aux; /* (%%setbarnb) */ // if (s->time < bar_time) { // s->aux = 0; break; // } } else { if (s->time < bar_time) /* incomplete measure */ break; bar_num++; } /* check if any repeat bar at this time */ tim = s->time; s2 = s; do { if (s2->type == BAR && s2->u.bar.repeat_bar && s2->text && !cfmt.contbarnb) { if (s2->text[0] == '1') bar_rep = bar_num; else /* restart bar numbering */ bar_num = bar_rep; break; } s2 = s2->next; } while (s2 && s2->time == tim); s->aux = bar_num; bar_time = s->time + wmeasure; break; } } /* do the %%clip stuff */ if (clip_start.bar >= 0) { if (bar_num <= clip_start.bar || nbar > clip_end.bar) { tsfirst = NULL; return; } do_clip(); } /* do the %%break stuff */ { struct brk_s *brk; int nbar_min; // if (nbar == 1) // nbar = -1; /* see go_global_time */ nbar_min = nbar; if (nbar_min == 1) nbar_min = -1; for (brk = brks; brk; brk = brk->next) { if (brk->symsel.bar <= nbar_min || brk->symsel.bar > bar_num) continue; s = go_global_time(tsfirst, &brk->symsel); if (s) s->sflags |= S_EOLN; } } if (cfmt.measurenb < 0) /* if no display of measure bar */ nbar = bar_num; /* update in case of more music to come */ } /* -- generate a piece of tune -- */ static void generate(void) { int old_lvl, voice; struct VOICE_S *p_voice; system_init(); if (!tsfirst) return; /* no symbol */ set_bar_num(); if (!tsfirst) return; /* no more symbol */ old_lvl = lvlarena(2); output_music(); clrarena(2); /* clear generation */ lvlarena(old_lvl); /* reset the parser */ for (p_voice = first_voice; p_voice; p_voice = p_voice->next) { voice = p_voice - voice_tb; p_voice->sym = p_voice->last_sym = NULL; p_voice->time = 0; p_voice->have_ly = 0; p_voice->staff = cursys->voice[voice].staff; p_voice->second = cursys->voice[voice].second; p_voice->s_clef->time = 0; p_voice->lyric_start = NULL; } staves_found = 0; // (for voice compress/dup) } /* -- output the music and lyrics after tune -- */ static void gen_ly(int eob) { generate(); if (info['W' - 'A']) { put_words(info['W' - 'A']); info['W' - 'A'] = NULL; } if (eob) buffer_eob(0); } /* * for transpose purpose, check if a pitch is already in the measure or * if it is tied from a previous note, and return the associated accidental */ static int acc_same_pitch(int pitch) { struct SYMBOL *s = curvoice->last_sym->prev; int i, time; // the overlaid voices may have no measure bars // if (curvoice->id[0] == '&') // s = voice_tb[curvoice->mvoice].last_sym; if (!s) return -1; time = s->time; for (; s; s = s->prev) { switch (s->abc_type) { case ABC_T_BAR: if (s->time < time) return -1; /* no same pitch */ for (;;) { s = s->prev; if (!s) return -1; if (s->abc_type == ABC_T_NOTE) { if (s->time + s->dur == time) break; return -1; } if (s->time < time) return -1; } for (i = 0; i <= s->nhd; i++) { if (s->u.note.notes[i].pit == pitch && s->u.note.notes[i].ti1) return s->u.note.notes[i].acc; } return -1; case ABC_T_NOTE: for (i = 0; i <= s->nhd; i++) { if (s->u.note.notes[i].pit == pitch) return s->u.note.notes[i].acc; } break; } } return -1; } /* transpose a note / chord */ static void note_transpose(struct SYMBOL *s) { int i, j, m, n, d, a, dp, i1, i2, i3, i4, sf_old; static const signed char acc1[6] = {0, 1, 0, -1, 2, -2}; static const char acc2[5] = {A_DF, A_FT, A_NT, A_SH, A_DS}; m = s->nhd; sf_old = curvoice->okey.sf; i2 = curvoice->ckey.sf - sf_old; dp = cgd2cde[(i2 + 4 * 7) % 7]; if (curvoice->transpose < 0 && dp != 0) dp -= 7; dp += curvoice->transpose / 3 / 12 * 7; for (i = 0; i <= m; i++) { /* pitch */ n = s->u.note.notes[i].pit; s->u.note.notes[i].pit += dp; s->pits[i] += dp; /* accidental */ i1 = cde2fcg[(n + 5 + 16 * 7) % 7]; /* fcgdaeb */ a = s->u.note.notes[i].acc & 0x07; if (a == 0) { if (curvoice->okey.nacc == 0) { if (sf_old > 0) { if (i1 < sf_old - 1) a = A_SH; } else if (sf_old < 0) { if (i1 >= sf_old + 6) a = A_FT; } } else { for (j = 0; j < curvoice->okey.nacc; j++) { if ((n + 16 * 7 - curvoice->okey.pits[j]) % 7 == 0) { a = curvoice->okey.accs[j]; break; } } } } i3 = i1 + i2 + acc1[a] * 7; i1 = ((i3 + 1 + 21) / 7 + 2 - 3 + 32 * 5) % 5; a = acc2[(unsigned) i1]; if (s->u.note.notes[i].acc != 0) { ; } else if (curvoice->ckey.empty) { /* key none */ if (a == A_NT || acc_same_pitch(s->u.note.notes[i].pit) >= 0) continue; } else if (curvoice->ckey.nacc > 0) { /* acc list */ i4 = cgd2cde[(unsigned) ((i3 + 16 * 7) % 7)]; for (j = 0; j < curvoice->ckey.nacc; j++) { if ((i4 + 16 * 7 - curvoice->ckey.pits[j]) % 7 == 0) break; } if (j < curvoice->ckey.nacc) continue; } else { continue; } i1 = s->u.note.notes[i].acc & 0x07; i4 = s->u.note.notes[i].acc >> 3; if (i4 != 0 /* microtone */ && i1 != a) { /* different accidental type */ if (s->u.note.microscale) { n = i4; d = s->u.note.microscale; } else { n = parse.micro_tb[i4]; d = ((n & 0xff) + 1) * 2; n = (n >> 8) + 1; } //fixme: double sharps/flats ?*/ //fixme: does not work in all cases (tied notes, previous accidental) switch (a) { case A_NT: if (n >= d / 2) { n -= d / 2; a = i1; } else { a = i1 == A_SH ? A_FT : A_SH; } break; case A_DS: if (n >= d / 2) { s->u.note.notes[i].pit += 1; s->pits[i] += 1; n -= d / 2; } else { n += d / 2; } a = i1; break; case A_DF: if (n >= d / 2) { s->u.note.notes[i].pit -= 1; s->pits[i] -= 1; n -= d / 2; } else { n += d / 2; } a = i1; break; } if (s->u.note.microscale) { i4 = n; } else { d = d / 2 - 1 + ((n - 1) << 8); for (i4 = 1; i4 < MAXMICRO; i4++) { if (parse.micro_tb[i4] == d) break; if (parse.micro_tb[i4] == 0) { parse.micro_tb[i4] = d; break; } } if (i4 == MAXMICRO) { error(1, s, "Too many microtone accidentals"); i4 = 0; } } } s->u.note.notes[i].acc = (i4 << 3) | a; } } /* transpose a guitar chord */ static void gch_tr1(struct SYMBOL *s, int i, int i2) { char *p = &s->text[i], *q = p + 1, *new_txt; int l, latin; int n, a, i1, i3, i4; static const char note_names[] = "CDEFGAB"; static const char *latin_names[7] = { "Do", "Ré", "Mi", "Fa", "Sol", "La", "Si" }; static const char *acc_name[5] = {"bb", "b", "", "#", "##"}; /* main chord */ latin = 0; switch (*p) { case 'A': case 'B': n = *p - 'A' + 5; break; case 'C': case 'E': case 'G': n = *p - 'C'; break; case 'D': if (p[1] == 'o') { latin++; n = 0; /* Do */ break; } n = 1; break; case 'F': if (p[1] == 'a') latin++; /* Fa */ n = 3; break; case 'L': latin++; /* La */ n = 5; break; case 'M': latin++; /* Mi */ n = 2; break; case 'R': latin++; if (p[1] != 'e') latin++; /* Ré */ n = 1; /* Re */ break; case 'S': latin++; if (p[1] == 'o') { latin++; n = 4; /* Sol */ } else { n = 6; /* Si */ } break; case '/': // bass only latin--; break; default: return; } q += latin; /* allocate a new string */ new_txt = getarena(strlen(s->text) + 6); l = p - s->text; memcpy(new_txt, s->text, l); s->text = new_txt; new_txt += l; p = q; if (latin >= 0) { // if some chord a = 0; while (*p == '#') { a++; p++; } while (*p == 'b') { a--; p++; } // if (*p == '=') // p++; i3 = cde2fcg[n] + i2 + a * 7; i4 = cgd2cde[(unsigned) ((i3 + 16 * 7) % 7)]; i1 = ((i3 + 1 + 21) / 7 + 2 - 3 + 32 * 5) % 5; /* accidental */ if (latin == 0) *new_txt++ = note_names[i4]; else new_txt += sprintf(new_txt, "%s", latin_names[i4]); new_txt += sprintf(new_txt, "%s", acc_name[i1]); } /* bass */ while (*p != '\0' && *p != '\n' && *p != '/') // skip 'm'/'dim'.. *new_txt++ = *p++; if (*p == '/') { *new_txt++ = *p++; //fixme: latin names not treated q = strchr(note_names, *p); if (q) { p++; n = q - note_names; if (*p == '#') { a = 1; p++; } else if (*p == 'b') { a = -1; p++; } else { a = 0; } i3 = cde2fcg[n] + i2 + a * 7; i4 = cgd2cde[(unsigned) ((i3 + 16 * 7) % 7)]; i1 = ((i3 + 1 + 21) / 7 + 2 - 3 + 32 * 5) % 5; *new_txt++ = note_names[i4]; new_txt += sprintf(new_txt, "%s", acc_name[i1]); } } strcpy(new_txt, p); } static void gch_capo(struct SYMBOL *s) { char *p = s->text, *q, *r; int i, l, li = 0; static const char *capo_txt = " (capo: %d)"; static signed char cap_trans[] = {0, 5, -2, 3, -4, 1, -6, -1, 4, -3, 2, -5}; // search the chord symbols for (;;) { if (!strchr("^_<>@", *p)) break; p = strchr(p, '\n'); if (!p) return; p++; } // add a capo chord symbol i = p - s->text; q = strchr(p + 1, '\n'); if (q) l = q - p; else l = strlen(p); if (!capo) { capo = 1; li = strlen(capo_txt); } r = (char *) getarena(strlen(s->text) + l + li + 1); i += l; strncpy(r, s->text, i); // annotations + chord symbol r[i++] = '\n'; strncpy(r + i, p, l); // capo if (li) { sprintf(r + i + l, capo_txt, cfmt.capo); l += li; } if (q) strcpy(r + i + l, q); // ending annotations s->text = r; gch_tr1(s, i, cap_trans[cfmt.capo % 12]); } static void gch_transpose(struct SYMBOL *s) { int in_ch = 0; int i2 = curvoice->ckey.sf - curvoice->okey.sf; char *o = s->text, *p = o; // search the chord symbols for (;;) { if (in_ch || !strchr("^_<>@", *p)) { gch_tr1(s, p - s->text, i2); p = s->text + (p - o); o = s->text; for (p++; *p; p++) { if (strchr("\t;\n", *p)) break; } if (!*p) break; switch (*p) { case '\t': in_ch = 1; break; case ';': in_ch = !strchr("^_<>@", p[1]); break; default: in_ch = 0; break; } } else { p = strchr(p, '\n'); if (!p) break; } p++; } } /* -- build the guitar chords / annotations -- */ static void gch_build(struct SYMBOL *s) { struct gch *gch; char *p, *q, antype, sep; float w, h_ann, h_gch, y_above, y_below, y_left, y_right; float xspc; int l, ix, box, gch_place; if (s->posit.gch == SL_HIDDEN) return; s->gch = getarena(sizeof *s->gch * MAXGCH); memset(s->gch, 0, sizeof *s->gch * MAXGCH); if (curvoice->transpose != 0) gch_transpose(s); if (cfmt.capo) gch_capo(s); /* split the guitar chords / annotations * and initialize their vertical offsets */ gch_place = s->posit.gch == SL_BELOW ? -1 : 1; h_gch = cfmt.font_tb[cfmt.gcf].size; h_ann = cfmt.font_tb[cfmt.anf].size; y_above = y_below = y_left = y_right = 0; box = cfmt.gchordbox; p = s->text; gch = s->gch; sep = '\n'; antype = 'g'; /* (compiler warning) */ for (;;) { if (sep != 'n' && strchr("^_<>@", *p)) { gch->font = cfmt.anf; antype = *p++; if (antype == '@') { int n; float xo, yo; if (sscanf(p, "%f,%f%n", &xo, &yo, &n) != 2) { error(1, s, "Error in annotation \"@\""); } else { p += n; if (*p == ' ') p++; gch->x = xo; gch->y = yo; } } } else if (sep == '\n') { gch->font = cfmt.gcf; gch->box = box; antype = 'g'; } else { gch->font = (gch - 1)->font; gch->box = (gch - 1)->box; } gch->type = antype; switch (antype) { default: /* guitar chord */ if (gch_place < 0) break; /* below */ y_above += h_gch; if (box) y_above += 2; break; case '^': /* above */ y_above += h_ann; break; case '_': /* below */ break; case '<': /* left */ y_left += h_ann * 0.5; break; case '>': /* right */ y_right += h_ann * 0.5; break; case '@': /* absolute */ if (gch->x == 0 && gch->y == 0 && gch != s->gch && s->gch->type == '@') { /* if not 1st line */ gch->x = (gch - 1)->x; gch->y = (gch - 1)->y - h_ann; } break; } gch->idx = p - s->text; for (;;) { switch (*p) { default: p++; continue; case '\\': p++; if (*p == 'n') { p[-1] = '\0'; break; /* sep = 'n' */ } p++; continue; case '&': /* skip "&xxx;" */ for (;;) { switch (*p) { default: p++; continue; case ';': p++; case '\0': case '\n': case '\\': break; } break; } continue; case '\0': case ';': case '\n': break; } break; } sep = *p; if (sep == '\0') break; *p++ = '\0'; gch++; if (gch - s->gch >= MAXGCH) { error(1, s, "Too many guitar chords / annotations"); break; } } /* change the accidentals in the guitar chords */ for (ix = 0, gch = s->gch; ix < MAXGCH; ix++, gch++) { if (gch->type == '\0') break; if (gch->type != 'g') continue; p = s->text + gch->idx; q = p; for (; *p != '\0'; p++) { switch (*p) { case '#': case 'b': case '=': if (p == q /* 1st char or after a slash */ || (p != q + 1 /* or invert '\' behaviour */ && p[-1] == '\\')) break; /* set the accidentals as unused utf-8 values * (see subs.c) */ switch (*p) { case '#': *p = 0x01; break; case 'b': *p = 0x02; break; default: /* case '=': */ *p = 0x03; break; } if (p[-1] == '\\') { p--; l = strlen(p); memmove(p, p + 1, l); } break; case ' ': case '/': q = p + 1; break; } } } /* set the offsets and widths */ /*fixme:utf8*/ for (ix = 0, gch = s->gch; ix < MAXGCH; ix++, gch++) { if (gch->type == '\0') break; if (gch->type == '@') continue; /* no width */ p = s->text + gch->idx; str_font(gch->font); w = tex_str(p); gch->w = w; // + 4; switch (gch->type) { case '_': /* below */ xspc = w * GCHPRE; if (xspc > 8) xspc = 8; gch->x = -xspc; y_below -= h_ann; gch->y = y_below; break; case '^': /* above */ xspc = w * GCHPRE; if (xspc > 8) xspc = 8; gch->x = -xspc; y_above -= h_ann; gch->y = y_above; break; default: /* guitar chord */ xspc = w * GCHPRE; if (xspc > 8) xspc = 8; gch->x = -xspc; if (gch_place < 0) { /* below */ y_below -= h_gch; gch->y = y_below; if (box) { y_below -= 2; gch->y -= 1; } } else { y_above -= h_gch; gch->y = y_above; if (box) { y_above -= 2; gch->y -= 1; } } break; case '<': /* left */ gch->x = -(w + 6); y_left -= h_ann; gch->y = y_left; break; case '>': /* right */ gch->x = 6; y_right -= h_ann; gch->y = y_right; break; } } } /* get the note which will receive a lyric word */ static struct SYMBOL *next_lyric_note(struct SYMBOL *s) { while (s && (s->abc_type != ABC_T_NOTE || (s->flags & ABC_F_GRACE))) s = s->next; return s; } /* -- parse lyric (vocal) lines (w:) -- */ static struct SYMBOL *get_lyric(struct SYMBOL *s) { struct SYMBOL *s1, *s2; char word[128], *p, *q; int ln, cont; struct FONTSPEC *f; curvoice->have_ly = curvoice->posit.voc != SL_HIDDEN; if (curvoice->ignore) { for (;;) { if (!s->abc_next) return s; switch (s->abc_next->abc_type) { case ABC_T_PSCOM: s = process_pscomment(s->abc_next); continue; case ABC_T_INFO: if (s->abc_next->text[0] == 'w' || s->abc_next->text[0] == '+') { s = s->abc_next; continue; } break; } return s; } } f = &cfmt.font_tb[cfmt.vof]; str_font(cfmt.vof); /* (for tex_str) */ /* treat all w: lines */ cont = 0; ln = -1; s2 = s1 = NULL; // have gcc happy for (;;) { if (!cont) { if (ln >= MAXLY- 1) { error(1, s, "Too many lyric lines"); ln--; } ln++; s2 = s1; s1 = curvoice->lyric_start; if (!s1) s1 = curvoice->sym; else s1 = s1->next; if (!s1) { error(1, s, "w: without music"); return s; } } else { cont = 0; } /* scan the lyric line */ p = &s->text[2]; while (*p != '\0') { while (isspace((unsigned char) *p)) p++; if (*p == '\0') break; if (*p == '\\' && p[1] == '\0') { cont = 1; break; } switch (*p) { case '|': while (s1 && s1->type != BAR) { s2 = s1; s1 = s1->next; } if (!s1) { error(1, s2, "Not enough bar lines for lyric line"); goto ly_next; } s2 = s1; s1 = s1->next; p++; continue; case '-': word[0] = LY_HYPH; word[1] = '\0'; p++; break; case '_': word[0] = LY_UNDER; word[1] = '\0'; p++; break; case '*': word[0] = '\0'; p++; break; default: q = word; for (;;) { unsigned char c; c = *p; switch (c) { case '\0': case ' ': case '\t': case '_': case '*': case '|': break; case '~': c = ' '; goto addch; case '-': c = LY_HYPH; goto addch; case '\\': if (p[1] == '\0') break; switch (p[1]) { case '~': case '_': case '*': case '|': case '-': case ' ': case '\\': c = *++p; break; } /* fall thru */ default: addch: if (q < &word[sizeof word - 1]) *q++ = c; p++; if (c == LY_HYPH) break; continue; } break; } *q = '\0'; break; } /* store the word in the next note */ if (s1) { /* for error */ s2 = s1; s1 = next_lyric_note(s1); } if (!s1) { if (!s2) s2 = s; error(1, s2, "Too many words in lyric line"); goto ly_next; } if (word[0] != '\0' && s1->posit.voc != SL_HIDDEN) { struct lyl *lyl; float w; if (!s1->ly) { s1->ly = (struct lyrics *) getarena(sizeof (struct lyrics)); memset(s1->ly, 0, sizeof (struct lyrics)); } /* handle the font change at start of text */ q = word; if (*q == '$' && isdigit((unsigned char) q[1]) && (unsigned) (q[1] - '0') < FONT_UMAX) { int ft; ft = q[1] - '0'; if (ft == 0) ft = cfmt.vof; f = &cfmt.font_tb[ft]; str_font(ft); q += 2; } w = tex_str(q); q = tex_buf; lyl = (struct lyl *) getarena(sizeof *s1->ly->lyl[0] - sizeof s1->ly->lyl[0]->t + strlen(q) + 1); s1->ly->lyl[ln] = lyl; lyl->f = f; lyl->w = w; strcpy(lyl->t, q); /* handle the font changes inside the text */ while (*q != '\0') { if (*q == '$' && isdigit((unsigned char) q[1]) && (unsigned) (q[1] - '0') < FONT_UMAX) { int ft; q++; ft = *q - '0'; if (ft == 0) ft = cfmt.vof; f = &cfmt.font_tb[ft]; str_font(ft); } q++; } } s2 = s1; s1 = s1->next; } /* loop if more lyrics */ ly_next: for (;;) { if (!s->abc_next) goto ly_upd; switch (s->abc_next->abc_type) { case ABC_T_PSCOM: s = process_pscomment(s->abc_next); f = &cfmt.font_tb[cfmt.vof]; /* may have changed */ str_font(cfmt.vof); continue; case ABC_T_INFO: if (s->abc_next->text[0] != 'w' && s->abc_next->text[0] != '+') goto ly_upd; s = s->abc_next; if (s->text[0] == '+') cont = 1; if (!cont) { s1 = next_lyric_note(s1); if (s1) { error(1, s1, "Not enough words for lyric line"); } } break; /* more lyric */ default: goto ly_upd; } break; } } /* the next lyrics will go into the next notes */ ly_upd: //fixme: no error with abc-2.1 if (next_lyric_note(s1)) error(0, s1, "Not enough words for lyric line"); // fill the w: with 'blank syllabes' curvoice->lyric_start = curvoice->last_sym; return s; } /* -- add a voice in the linked list -- */ static void voice_link(struct VOICE_S *p_voice) { struct VOICE_S *p_voice2; p_voice2 = first_voice; for (;;) { if (p_voice2 == p_voice) return; if (!p_voice2->next) break; p_voice2 = p_voice2->next; } p_voice2->next = p_voice; } /* -- get a voice overlay -- */ static void get_over(struct SYMBOL *s) { struct VOICE_S *p_voice, *p_voice2, *p_voice3; int range, voice, voice2, voice3; static char tx_wrong_dur[] = "Wrong duration in voice overlay"; static char txt_no_note[] = "No note in voice overlay"; /* treat the end of overlay */ p_voice = curvoice; if (p_voice->ignore) return; if (s->abc_type == ABC_T_BAR || s->u.v_over.type == V_OVER_E) { if (!p_voice->last_sym) { error(1, s, txt_no_note); return; } p_voice->last_sym->sflags |= S_BEAM_END; over_bar = 0; if (over_time < 0) { error(1, s, "Erroneous end of voice overlap"); return; } if (p_voice->time != over_mxtime) error(1, s, tx_wrong_dur); curvoice = &voice_tb[over_voice]; over_mxtime = 0; over_voice = -1; over_time = -1; return; } /* treat the full overlay start */ if (s->u.v_over.type == V_OVER_S) { over_voice = p_voice - voice_tb; over_time = p_voice->time; return; } /* (here is treated a new overlay - '&') */ /* create the extra voice if not done yet */ if (!p_voice->last_sym) { error(1, s, txt_no_note); return; } p_voice->last_sym->sflags |= S_BEAM_END; voice2 = s->u.v_over.voice; p_voice2 = &voice_tb[voice2]; if (parsys->voice[voice2].range < 0) { int clone; if (cfmt.abc2pscompat) { error(1, s, "Cannot have %%%%abc2pscompat"); cfmt.abc2pscompat = 0; } clone = p_voice->clone >= 0; p_voice2->id[0] = '&'; p_voice2->id[1] = '\0'; p_voice2->second = 1; parsys->voice[voice2].second = 1; p_voice2->scale = p_voice->scale; p_voice2->octave = p_voice->octave; p_voice2->transpose = p_voice->transpose; memcpy(&p_voice2->key, &p_voice->key, sizeof p_voice2->key); memcpy(&p_voice2->ckey, &p_voice->ckey, sizeof p_voice2->ckey); memcpy(&p_voice2->okey, &p_voice->okey, sizeof p_voice2->okey); p_voice2->posit = p_voice->posit; p_voice2->staff = p_voice->staff; p_voice2->cstaff = p_voice->cstaff; p_voice2->color = p_voice->color; p_voice2->map_name = p_voice->map_name; range = parsys->voice[p_voice - voice_tb].range; for (voice = 0; voice < MAXVOICE; voice++) { if (parsys->voice[voice].range > range) parsys->voice[voice].range += clone + 1; } parsys->voice[voice2].range = range + 1; voice_link(p_voice2); if (clone) { for (voice3 = MAXVOICE; --voice3 >= 0; ) { if (parsys->voice[voice3].range < 0) break; } if (voice3 > 0) { p_voice3 = &voice_tb[voice3]; strcpy(p_voice3->id, p_voice2->id); p_voice3->second = 1; parsys->voice[voice3].second = 1; p_voice3->scale = voice_tb[p_voice->clone].scale; parsys->voice[voice3].range = range + 2; voice_link(p_voice3); p_voice2->clone = voice3; } else { error(1, s, "Too many voices for overlay cloning"); } } } voice = p_voice - voice_tb; // p_voice2->cstaff = p_voice2->staff = parsys->voice[voice2].staff // = parsys->voice[voice].staff; // if ((voice3 = p_voice2->clone) >= 0) { // p_voice3 = &voice_tb[voice3]; // p_voice3->cstaff = p_voice3->staff // = parsys->voice[voice3].staff // = parsys->voice[p_voice->clone].staff; // } if (over_time < 0) { /* first '&' in a measure */ int time; over_bar = 1; over_mxtime = p_voice->time; over_voice = voice; time = p_voice2->time; for (s = p_voice->last_sym; /*s*/; s = s->prev) { if (s->type == BAR || s->time <= time) /* (if start of tune) */ break; } over_time = s->time; } else { if (over_mxtime == 0) over_mxtime = p_voice->time; else if (p_voice->time != over_mxtime) error(1, s, tx_wrong_dur); } p_voice2->time = over_time; curvoice = p_voice2; } struct staff_s { short voice; short flags; }; /* -- parse %%staves / %%score -- */ static void parse_staves(struct SYMBOL *s, struct staff_s *staves) { char *p; int voice, flags_st, brace, bracket, parenth, err; short flags; struct staff_s *p_staff; /* define the voices */ err = 0; flags = 0; brace = bracket = parenth = 0; flags_st = 0; voice = 0; p = s->text + 7; while (*p != '\0' && !isspace((unsigned char) *p)) p++; while (*p != '\0') { switch (*p) { case ' ': case '\t': break; case '[': if (parenth || brace + bracket >= 2) { error(1, s, "Misplaced '[' in %%%%staves"); err = 1; break; } if (brace + bracket == 0) flags |= OPEN_BRACKET; else flags |= OPEN_BRACKET2; bracket++; flags_st <<= 8; flags_st |= OPEN_BRACKET; break; case '{': if (parenth || brace || bracket >= 2) { error(1, s, "Misplaced '{' in %%%%staves"); err = 1; break; } if (bracket == 0) flags |= OPEN_BRACE; else flags |= OPEN_BRACE2; brace++; flags_st <<= 8; flags_st |= OPEN_BRACE; break; case '(': if (parenth) { error(1, s, "Misplaced '(' in %%%%staves"); err = 1; break; } flags |= OPEN_PARENTH; parenth++; flags_st <<= 8; flags_st |= OPEN_PARENTH; break; case '*': if (brace && !parenth && !(flags & (OPEN_BRACE | OPEN_BRACE2))) flags |= FL_VOICE; break; case '+': flags |= MASTER_VOICE; break; default: if (!isalnum((unsigned char) *p) && *p != '_') { error(1, s, "Bad voice ID in %%%%staves"); err = 1; break; } if (voice >= MAXVOICE) { error(1, s, "Too many voices in %%%%staves"); err = 1; break; } { int i, v; char sep, *q; q = p; while (isalnum((unsigned char) *p) || *p == '_') p++; sep = *p; *p = '\0'; /* search the voice in the voice table */ v = -1; for (i = 0; i < MAXVOICE; i++) { if (strcmp(q, voice_tb[i].id) == 0) { v = i; break; } } if (v < 0) { error(1, s, "Voice '%s' of %%%%staves has no symbol", q); err = 1; // break; p_staff = staves; } else { p_staff = staves + voice++; p_staff->voice = v; } *p = sep; } for ( ; *p != '\0'; p++) { switch (*p) { case ' ': case '\t': continue; case ']': if (!(flags_st & OPEN_BRACKET)) { error(1, s, "Misplaced ']' in %%%%staves"); err = 1; break; } bracket--; if (brace + bracket == 0) flags |= CLOSE_BRACKET; else flags |= CLOSE_BRACKET2; flags_st >>= 8; continue; case '}': if (!(flags_st & OPEN_BRACE)) { error(1, s, "Misplaced '}' in %%%%staves"); err = 1; break; } brace--; if (bracket == 0) flags |= CLOSE_BRACE; else flags |= CLOSE_BRACE2; flags &= ~FL_VOICE; flags_st >>= 8; continue; case ')': if (!(flags_st & OPEN_PARENTH)) { error(1, s, "Misplaced ')' in %%%%staves"); err = 1; break; } parenth--; flags |= CLOSE_PARENTH; flags_st >>= 8; continue; case '|': flags |= STOP_BAR; continue; } break; } p_staff->flags = flags; flags = 0; if (*p == '\0') break; continue; } if (*p == '\0') break; p++; } if (flags_st != 0) { error(1, s, "'}', ')' or ']' missing in %%%%staves"); err = 1; } if (err) { int i; for (i = 0; i < voice; i++) staves[i].flags = 0; } if (voice < MAXVOICE) staves[voice].voice = -1; } /* -- get staves definition (%%staves / %%score) -- */ static void get_staves(struct SYMBOL *s) { // struct SYMBOL *s2; struct VOICE_S *p_voice, *p_voice2; struct staff_s *p_staff, staves[MAXVOICE]; int i, flags, voice, staff, range, dup_voice, maxtime; memset(staves, 0, sizeof staves); parse_staves(s, staves); if (staves[0].voice < 0) // if error return; voice_compress(); voice_dup(); /* create a new staff system */ curvoice = p_voice = first_voice; maxtime = p_voice->time; flags = p_voice->sym != NULL; for (p_voice = p_voice->next; p_voice; p_voice = p_voice->next) { if (p_voice->time > maxtime) maxtime = p_voice->time; if (p_voice->sym) flags = 1; } if (flags == 0 /* if first %%staves */ || (maxtime == 0 && staves_found < 0)) { for (voice = 0; voice < MAXVOICE; voice++) parsys->voice[voice].range = -1; } else { /* * create a new staff system and * link the staves in a voice which is seen from * the previous system - see sort_all */ // p_voice = curvoice; if (parsys->voice[curvoice - voice_tb].range < 0) { for (voice = 0; voice < MAXVOICE; voice++) { if (parsys->voice[voice].range >= 0) { curvoice = &voice_tb[voice]; break; } } /*fixme: should check if voice < MAXVOICE*/ } curvoice->time = maxtime; // put the staves before a measure bar (see draw_bar()) // s2 = curvoice->last_sym; // if (s2 && s2->type == BAR && s2->time == maxtime) { // curvoice->last_sym = s2->prev; // if (!curvoice->last_sym) // curvoice->sym = NULL; // sym_link(s, STAVES); // s->next = s2; // s2->prev = s; // curvoice->last_sym = s2; // } else { sym_link(s, STAVES); // link the staves in the current voice // } s->state = ABC_S_HEAD; /* (output PS sequences immediately) */ parsys->nstaff = nstaff; system_new(); } staves_found = maxtime; /* initialize the voices */ for (voice = 0, p_voice = voice_tb; voice < MAXVOICE; voice++, p_voice++) { p_voice->second = 0; p_voice->floating = 0; p_voice->ignore = 0; p_voice->time = maxtime; } /* create the 'clone' voices */ dup_voice = MAXVOICE; range = 0; p_staff = staves; parsys->top_voice = p_staff->voice; for (i = 0; i < MAXVOICE && p_staff->voice >= 0; i++, p_staff++) { voice = p_staff->voice; p_voice = &voice_tb[voice]; if (parsys->voice[voice].range >= 0) { if (parsys->voice[dup_voice - 1].range >= 0) { error(1, s, "Too many voices for cloning"); continue; } voice = --dup_voice; /* duplicate the voice */ p_voice2 = &voice_tb[voice]; memcpy(p_voice2, p_voice, sizeof *p_voice2); p_voice2->next = NULL; p_voice2->sym = p_voice2->last_sym = NULL; p_voice2->tblts[0] = p_voice2->tblts[1] = NULL; p_voice2->clone = -1; while (p_voice->clone > 0) p_voice = &voice_tb[p_voice->clone]; p_voice->clone = voice; p_voice = p_voice2; p_staff->voice = voice; } parsys->voice[voice].range = range++; voice_link(p_voice); } /* change the behavior from %%staves to %%score */ if (s->text[3] == 't') { /* if %%staves */ for (i = 0, p_staff = staves; i < MAXVOICE - 2 && p_staff->voice >= 0; i++, p_staff++) { flags = p_staff->flags; if (!(flags & (OPEN_BRACE | OPEN_BRACE2))) continue; if ((flags & (OPEN_BRACE | CLOSE_BRACE)) == (OPEN_BRACE | CLOSE_BRACE) || (flags & (OPEN_BRACE2 | CLOSE_BRACE2)) == (OPEN_BRACE2 | CLOSE_BRACE2)) continue; if (p_staff[1].flags != 0) continue; if ((flags & OPEN_PARENTH) || (p_staff[2].flags & OPEN_PARENTH)) continue; /* {a b c} --> {a *b c} */ if (p_staff[2].flags & (CLOSE_BRACE | CLOSE_BRACE2)) { p_staff[1].flags |= FL_VOICE; /* {a b c d} --> {(a b) (c d)} */ } else if (p_staff[2].flags == 0 && (p_staff[3].flags & (CLOSE_BRACE | CLOSE_BRACE2))) { p_staff->flags |= OPEN_PARENTH; p_staff[1].flags |= CLOSE_PARENTH; p_staff[2].flags |= OPEN_PARENTH; p_staff[3].flags |= CLOSE_PARENTH; } } } /* set the staff system */ staff = -1; for (i = 0, p_staff = staves; i < MAXVOICE && p_staff->voice >= 0; i++, p_staff++) { flags = p_staff->flags; if ((flags & (OPEN_PARENTH | CLOSE_PARENTH)) == (OPEN_PARENTH | CLOSE_PARENTH)) { flags &= ~(OPEN_PARENTH | CLOSE_PARENTH); p_staff->flags = flags; } voice = p_staff->voice; p_voice = &voice_tb[voice]; if (flags & FL_VOICE) { p_voice->floating = 1; p_voice->second = 1; } else { #if MAXSTAFF < MAXVOICE if (staff >= MAXSTAFF - 1) { error(1, s, "Too many staves"); } else #endif staff++; parsys->staff[staff].flags = 0; } p_voice->staff = p_voice->cstaff = parsys->voice[voice].staff = staff; parsys->staff[staff].flags |= flags; if (flags & OPEN_PARENTH) { p_voice2 = p_voice; while (i < MAXVOICE) { i++; p_staff++; voice = p_staff->voice; p_voice = &voice_tb[voice]; if (p_staff->flags & MASTER_VOICE) { p_voice2->second = 1; p_voice2 = p_voice; } else { p_voice->second = 1; } p_voice->staff = p_voice->cstaff = parsys->voice[voice].staff = staff; if (p_staff->flags & CLOSE_PARENTH) break; } parsys->staff[staff].flags |= p_staff->flags; } } if (staff < 0) staff = 0; parsys->nstaff = nstaff = staff; /* change the behaviour of '|' in %%score */ if (s->text[3] == 'c') { /* if %%score */ for (staff = 0; staff < nstaff; staff++) parsys->staff[staff].flags ^= STOP_BAR; } for (voice = 0; voice < MAXVOICE; voice++) { p_voice = &voice_tb[voice]; parsys->voice[voice].second = p_voice->second; staff = p_voice->staff; if (staff > 0) p_voice->norepbra = !(parsys->staff[staff - 1].flags & STOP_BAR); if (p_voice->floating && staff == nstaff) p_voice->floating = 0; } curvoice = &voice_tb[parsys->top_voice]; } /* -- re-initialize all potential voices -- */ static void voice_init(void) { struct VOICE_S *p_voice; int i; for (i = 0, p_voice = voice_tb; i < MAXVOICE; i++, p_voice++) { p_voice->sym = p_voice->last_sym = NULL; p_voice->lyric_start = NULL; p_voice->bar_start = 0; p_voice->time = 0; p_voice->slur_st = 0; p_voice->hy_st = 0; p_voice->tie = 0; p_voice->rtie = 0; } } /* output a pdf mark */ static void put_pdfmark(char *p) { unsigned char c, *q; int u; p = trim_title(p, NULL); /* check if pure ASCII without '\', '(' nor ')'*/ for (q = (unsigned char *) p; *q != '\0'; q++) { switch (*q) { case '\\': case '(': case ')': break; default: if (*q >= 0x80) break; continue; } break; } if (*q == '\0') { a2b("[/Title(%s)/OUT pdfmark\n", p); return; } /* build utf-8 mark */ a2b("[/Title<FEFF"); q = (unsigned char *) p; u = -1; while (*q != '\0') { c = *q++; if (c < 0x80) { if (u >= 0) { a2b("%04X", u); u = -1; } a2b("%04X", (int) c); continue; } if (c < 0xc0) { u = (u << 6) | (c & 0x3f); continue; } if (u >= 0) { a2b("%04X", u); u = -1; } if (c < 0xe0) u = c & 0x1f; else if (c < 0xf0) u = c & 0x0f; else u = c & 0x07; } if (u >= 0) { a2b("%04X", u); u = -1; } a2b(">/OUT pdfmark\n"); } /* rebuild a tune header for %%tune filter */ static char *tune_header_rebuild(struct SYMBOL *s) { struct SYMBOL *s2; char *header, *p; int len; len = 0; s2 = s; for (;;) { if (s2->abc_type == ABC_T_INFO) { len += strlen(s2->text) + 1; if (s2->text[0] == 'K') break; } s2 = s2->abc_next; } header = malloc(len + 1); p = header; for (;;) { if (s->abc_type == ABC_T_INFO) { strcpy(p, s->text); p += strlen(p); *p++ = '\n'; if (s->text[0] == 'K') break; } s = s->abc_next; } *p++ = '\0'; return header; } /* apply the options to the current tune */ static void tune_filter(struct SYMBOL *s) { struct tune_opt_s *opt; struct SYMBOL *s1, *s2; regex_t r; char *header, *p; int ret; header = tune_header_rebuild(s); for (opt = tune_opts; opt; opt = opt->next) { struct SYMBOL *last_staves; p = &opt->s->text[2 + 5]; /* "%%tune RE" */ while (isspace((unsigned char) *p)) p++; ret = regcomp(&r, p, REG_EXTENDED | REG_NEWLINE | REG_NOSUB); if (ret) continue; ret = regexec(&r, header, 0, NULL, 0); regfree(&r); if (ret) continue; /* apply the options */ cur_tune_opts = opt; last_staves = s->abc_next; for (s1 = opt->s->next; s1; s1 = s1->next) { /* replace the next %%staves/%%score */ if (s1->abc_type == ABC_T_PSCOM && (strncmp(&s1->text[2], "staves", 6) == 0 || strncmp(&s1->text[2], "score", 5) == 0)) { while (last_staves) { if (last_staves->abc_type == ABC_T_PSCOM && (strncmp(&last_staves->text[2], "staves", 6) == 0 || strncmp(&last_staves->text[2], "score", 5) == 0)) { last_staves->text = s1->text; last_staves = last_staves->abc_next; break; } last_staves = last_staves->abc_next; } continue; } s2 = (struct SYMBOL *) getarena(sizeof *s2); memcpy(s2, s1, sizeof *s2); process_pscomment(s2); } cur_tune_opts = NULL; tune_voice_opts = opt->voice_opts; // for %%voice //fixme: what if many %%tune's with %%voice inside? } free(header); } /* apply the options of the current voice */ static void voice_filter(void) { struct voice_opt_s *opt; struct SYMBOL *s; regex_t r; int pass, ret; char *p; /* scan the global, then the tune options */ pass = 0; opt = voice_opts; for (;;) { if (!opt) { if (pass != 0) break; opt = tune_voice_opts; if (!opt) break; pass++; } p = &opt->s->text[2 + 6]; /* "%%voice RE" */ while (isspace((unsigned char) *p)) p++; ret = regcomp(&r, p, REG_EXTENDED | REG_NOSUB); if (ret) goto next_voice; ret = regexec(&r, curvoice->id, 0, NULL, 0); if (ret && curvoice->nm) ret = regexec(&r, curvoice->nm, 0, NULL, 0); regfree(&r); if (ret) goto next_voice; /* apply the options */ for (s = opt->s->next; s; s = s->next) { struct SYMBOL *s2; s2 = (struct SYMBOL *) getarena(sizeof *s2); memcpy(s2, s, sizeof *s2); process_pscomment(s2); } next_voice: opt = opt->next; } } /* -- check if a pseudo-comment may be in the tune header -- */ static int check_header(struct SYMBOL *s) { switch (s->text[2]) { case 'E': if (strncmp(s->text + 2, "EPS", 3) == 0) return 0; break; case 'm': if (strncmp(s->text + 2, "multicol", 8) == 0) return 0; break; } return 1; } /* -- set the global definitions after the first K: or middle-tune T:'s -- */ static void set_global_def(void) { struct VOICE_S *p_voice; int i; for (i = MAXVOICE, p_voice = voice_tb; --i >= 0; p_voice++) { switch (p_voice->key.instr) { case 0: if (!pipeformat) { // p_voice->transpose = cfmt.transpose; break; } //fall thru case K_HP: case K_Hp: if (p_voice->posit.std == 0) p_voice->posit.std = SL_BELOW; break; } // if (p_voice->key.empty) // p_voice->key.sf = 0; if (!cfmt.autoclef && p_voice->s_clef && (p_voice->s_clef->sflags & S_CLEF_AUTO)) { p_voice->s_clef->u.clef.type = TREBLE; p_voice->s_clef->sflags &= ~S_CLEF_AUTO; } } /* switch to the 1st voice */ curvoice = &voice_tb[parsys->top_voice]; } /* -- get the global definitions after the first K: or middle-tune T:'s -- */ static struct SYMBOL *get_global_def(struct SYMBOL *s) { struct SYMBOL *s2; for (;;) { s2 = s->abc_next; if (!s2) break; switch (s2->abc_type) { case ABC_T_INFO: switch (s2->text[0]) { case 'K': s = s2; s->state = ABC_S_HEAD; get_key(s); continue; case 'I': case 'M': case 'Q': s = s2; s->state = ABC_S_HEAD; s = get_info(s); continue; } break; case ABC_T_PSCOM: if (!check_header(s2)) break; s = s2; s->state = ABC_S_HEAD; s = process_pscomment(s); continue; } break; } set_global_def(); return s; } /* save the global note maps */ static void save_maps(void) { struct map *omap, *map; struct note_map *onotes, *notes; omap = maps; if (!omap) { maps_glob = NULL; return; } maps_glob = map = getarena(sizeof *maps_glob); for (;;) { memcpy(map, omap, sizeof *map); onotes = omap->notes; if (onotes) { map->notes = notes = getarena(sizeof *notes); for (;;) { memcpy(notes, onotes, sizeof *notes); onotes = onotes->next; if (!onotes) break; notes->next = getarena(sizeof *notes); notes = notes->next; } } omap = omap->next; if (!omap) break; map->next = getarena(sizeof *map); map = map->next; } } /* -- identify info line, store in proper place -- */ static struct SYMBOL *get_info(struct SYMBOL *s) { struct SYMBOL *s2; struct VOICE_S *p_voice; char *p; char info_type; int old_lvl; static char *state_txt[] = {"global", "header", "tune"}; /* change arena to global or tune */ old_lvl = lvlarena(s->state != ABC_S_GLOBAL); info_type = s->text[0]; switch (info_type) { case 'd': break; case 'I': s = process_pscomment(s); /* same as pseudo-comment */ break; case 'K': get_key(s); if (s->state != ABC_S_HEAD) break; info['K' - 'A'] = s; /* first K:, end of tune header */ tunenum++; if (!epsf) { // if (!cfmt.oneperpage) // use_buffer = cfmt.splittune != 1; bskip(cfmt.topspace); } a2b("%% --- xref %s\n", &info['X' - 'A']->text[2]); // (for index) write_heading(); block_put(); /* information for index * (pdfmark must be after title show for Adobe Distiller) */ s2 = info['T' - 'A']; p = &s2->text[2]; if (*p != '\0') { a2b("%% --- font "); outft = -1; set_font(TITLEFONT); /* font in comment */ a2b("\n"); outft = -1; } if (cfmt.pdfmark) { if (*p != '\0') put_pdfmark(p); if (cfmt.pdfmark > 1) { for (s2 = s2->next; s2; s2 = s2->next) { p = &s2->text[2]; if (*p != '\0') put_pdfmark(p); } } } nbar = cfmt.measurefirst; /* measure numbering */ over_voice = -1; over_time = -1; over_bar = 0; capo = 0; reset_gen(); s = get_global_def(s); if (!(cfmt.fields[0] & (1 << ('Q' - 'A')))) info['Q' - 'A'] = NULL; /* apply the filter for the voice '1' */ voice_filter(); /* activate the default tablature if not yet done */ if (!first_voice->tblts[0]) set_tblt(first_voice); break; case 'L': switch (s->state) { case ABC_S_HEAD: { int i, auto_len; auto_len = s->u.length.base_length < 0; for (i = MAXVOICE, p_voice = voice_tb; --i >= 0; p_voice++) p_voice->auto_len = auto_len; break; } case ABC_S_TUNE: curvoice->auto_len = s->u.length.base_length < 0; break; } break; case 'M': get_meter(s); break; case 'P': { struct VOICE_S *curvoice_sav; if (s->state != ABC_S_TUNE) { info['P' - 'A'] = s; break; } if (!(cfmt.fields[0] & (1 << ('P' - 'A')))) break; /* * If not in the main voice, then, * if the voices are synchronized and no P: yet in the main voice, * the misplaced P: goes into the main voice. */ p_voice = &voice_tb[parsys->top_voice]; if (curvoice != p_voice) { if (curvoice->time != p_voice->time) break; if (p_voice->last_sym && p_voice->last_sym->type == PART) break; // already a P: curvoice_sav = curvoice; curvoice = p_voice; sym_link(s, PART); curvoice = curvoice_sav; break; } sym_link(s, PART); break; } case 'Q': if (!(cfmt.fields[0] & (1 << ('Q' - 'A')))) break; if (s->state != ABC_S_TUNE) { info['Q' - 'A'] = s; break; } if (curvoice != &voice_tb[parsys->top_voice]) break; /* tempo only for first voice */ s2 = curvoice->last_sym; if (s2) { /* keep last Q: */ int tim; tim = s2->time; do { if (s2->type == TEMPO) { if (!s2->next) curvoice->last_sym = s2->prev; else s2->next->prev = s2->prev; if (!s2->prev) curvoice->sym = s2->next; else s2->prev->next = s2->next; break; } s2 = s2->prev; } while (s2 && s2->time == tim); } sym_link(s, TEMPO); break; case 'r': case 's': break; case 'T': if (s->state == ABC_S_GLOBAL) break; if (s->state == ABC_S_HEAD) /* in tune header */ goto addinfo; gen_ly(1); /* in tune */ p = &s->text[2]; if (*p != '\0') { write_title(s); a2b("%% --- + (%s) ---\n", p); if (cfmt.pdfmark) put_pdfmark(p); } voice_init(); reset_gen(); /* (display the time signature) */ s = get_global_def(s); break; case 'U': deco[s->u.user.symbol] = parse.deco_tb[s->u.user.value - 128]; break; case 'u': break; case 'V': get_voice(s); /* handle here the possible clef which could be replaced * in case of filter */ if (s->abc_next && s->abc_next->abc_type == ABC_T_CLEF) { s = s->abc_next; get_clef(s); } if (s->state == ABC_S_TUNE && !curvoice->last_sym && curvoice->time == 0) voice_filter(); break; case 'w': if (s->state != ABC_S_TUNE) break; if (!(cfmt.fields[1] & (1 << ('w' - 'a')))) { while (s->abc_next) { if (s->abc_next->abc_type != ABC_T_INFO || s->abc_next->text[0] != '+') break; s = s->abc_next; } break; } s = get_lyric(s); break; case 'W': if (s->state == ABC_S_GLOBAL || !(cfmt.fields[0] & (1 << ('W' - 'A')))) break; goto addinfo; case 'X': if (!epsf) { buffer_eob(0); /* flush stuff left from %% lines */ write_buffer(); //fixme: 8.6.2 if (cfmt.oneperpage) close_page(); // else if (in_page) else use_buffer = cfmt.splittune != 1; } memcpy(&dfmt, &cfmt, sizeof dfmt); /* save global values */ memcpy(&info_glob, &info, sizeof info_glob); memcpy(deco_glob, deco, sizeof deco_glob); save_maps(); info['X' - 'A'] = s; if (tune_opts) tune_filter(s); break; default: if (info_type >= 'A' && info_type <= 'Z') { struct SYMBOL *prev; if (s->state == ABC_S_TUNE) break; addinfo: prev = info[info_type - 'A']; if (!prev || (prev->state == ABC_S_GLOBAL && s->state != ABC_S_GLOBAL)) { info[info_type - 'A'] = s; } else { while (prev->next) prev = prev->next; prev->next = s; } while (s->abc_next && s->abc_next->abc_type == ABC_T_INFO && s->abc_next->text[0] == '+') { prev = s; s = s->abc_next; prev->next = s; } s->prev = prev; break; } if (s->state != ABC_S_GLOBAL) error(1, s, "%s info '%c:' not treated", state_txt[(int) s->state], info_type); break; } lvlarena(old_lvl); return s; } /* -- set head type, dots, flags for note -- */ void identify_note(struct SYMBOL *s, int dur, int *p_head, int *p_dots, int *p_flags) { int head, dots, flags; if (dur % 12 != 0) error(1, s, "Invalid note duration"); dur /= 12; /* see BASE_LEN for values */ if (dur == 0) error(1, s, "Note too short"); for (flags = 5; dur != 0; dur >>= 1, flags--) { if (dur & 1) break; } dur >>= 1; switch (dur) { case 0: dots = 0; break; case 1: dots = 1; break; case 3: dots = 2; break; case 7: dots = 3; break; default: error(1, s, "Note too much dotted"); dots = 3; break; } flags -= dots; if (flags >= 0) { head = H_FULL; } else switch (flags) { default: error(1, s, "Note too long"); flags = -4; /* fall thru */ case -4: head = H_SQUARE; break; case -3: head = cfmt.squarebreve ? H_SQUARE : H_OVAL; break; case -2: head = H_OVAL; break; case -1: head = H_EMPTY; break; } *p_head = head; *p_flags = flags; *p_dots = dots; } /* -- adjust the duration and time of symbols in a measure when L:auto -- */ static void adjust_dur(struct SYMBOL *s) { struct SYMBOL *s2; int time, auto_time; /* search the start of the measure */ s2 = curvoice->last_sym; if (!s2) return; /* the bar time is correct if there is multi-rests */ if (s2->type == MREST || s2->type == BAR) /* in second voice */ return; while (s2->type != BAR && s2->prev) s2 = s2->prev; time = s2->time; auto_time = curvoice->time - time; /* remove the invisible rest at start of tune */ if (time == 0) { while (s2 && s2->dur == 0) s2 = s2->next; if (s2 && s2->abc_type == ABC_T_REST && (s2->flags & ABC_F_INVIS)) { time += s2->dur * curvoice->wmeasure / auto_time; if (s2->prev) s2->prev->next = s2->next; else curvoice->sym = s2->next; if (s2->next) s2->next->prev = s2->prev; s2 = s2->next; } } if (curvoice->wmeasure == auto_time) return; /* already good duration */ for (; s2; s2 = s2->next) { int i, head, dots, nflags; s2->time = time; if (s2->dur == 0 || (s2->flags & ABC_F_GRACE)) continue; s2->dur = s2->dur * curvoice->wmeasure / auto_time; time += s2->dur; if (s2->type != NOTEREST) continue; for (i = 0; i <= s2->nhd; i++) s2->u.note.notes[i].len = s2->u.note.notes[i].len * curvoice->wmeasure / auto_time; identify_note(s2, s2->u.note.notes[0].len, &head, &dots, &nflags); s2->head = head; s2->dots = dots; s2->nflags = nflags; if (s2->nflags <= -2) s2->flags |= ABC_F_STEMLESS; else s2->flags &= ~ABC_F_STEMLESS; } curvoice->time = s->time = time; } /* -- measure bar -- */ static void get_bar(struct SYMBOL *s) { int bar_type; struct SYMBOL *s2; if (s->u.bar.repeat_bar && curvoice->norepbra && !curvoice->second) s->sflags |= S_NOREPBRA; if (curvoice->auto_len) adjust_dur(s); bar_type = s->u.bar.type; s2 = curvoice->last_sym; if (s2 && s2->type == SPACE) { s2->time--; // keep the space at the right place } else if (s2 && s2->type == BAR) { /* remove the invisible repeat bars when no shift is needed */ if (bar_type == B_OBRA && !s2->text && (curvoice == &voice_tb[parsys->top_voice] || (parsys->staff[curvoice->staff - 1].flags & STOP_BAR) || (s->sflags & S_NOREPBRA))) { s2->text = s->text; s2->u.bar.repeat_bar = s->u.bar.repeat_bar; s2->flags |= s->flags & (ABC_F_RBSTART | ABC_F_RBSTOP); s2->sflags |= s->sflags & (S_NOREPBRA | S_RBSTART | S_RBSTOP); s = s2; goto gch_build; } /* merge back-to-back repeat bars */ if (bar_type == B_LREP && !s->text) { if (s2->u.bar.type == B_RREP) { s2->u.bar.type = B_DREP; s2->flags |= ABC_F_RBSTOP; s2->sflags |= S_RBSTOP; return; } if (s2->u.bar.type == B_DOUBLE) { s2->u.bar.type = (B_SINGLE << 8) | B_LREP; s2->flags |= ABC_F_RBSTOP; s2->sflags |= S_RBSTOP; return; } } } /* link the bar in the voice */ /* the bar must appear before a key signature */ if (s2 && s2->type == KEYSIG && (!s2->prev || s2->prev->type != BAR)) { curvoice->last_sym = s2->prev; if (!curvoice->last_sym) curvoice->sym = NULL; sym_link(s, BAR); s->next = s2; s2->prev = s; curvoice->last_sym = s2; } else { sym_link(s, BAR); } s->staff = curvoice->staff; /* original staff */ /* set some flags */ switch (bar_type) { case B_OBRA: case (B_OBRA << 4) + B_CBRA: s->flags |= ABC_F_INVIS; break; case (B_COL << 8) + (B_BAR << 4) + B_COL: case (B_COL << 12) + (B_BAR << 8) + (B_BAR << 4) + B_COL: bar_type = (B_COL << 4) + B_COL; /* :|: and :||: -> :: */ s->u.bar.type = bar_type; break; case (B_BAR << 4) + B_BAR: if (!cfmt.rbdbstop) break; case (B_OBRA << 4) + B_BAR: case (B_BAR << 4) + B_CBRA: s->flags |= ABC_F_RBSTOP; s->sflags |= S_RBSTOP; break; } if (s->u.bar.dc.n > 0) deco_cnv(&s->u.bar.dc, s, NULL); /* convert the decorations */ /* build the gch */ gch_build: if (s->text) { if (!s->u.bar.repeat_bar) { gch_build(s); /* build the guitar chords */ } else { s->gch = getarena(sizeof *s->gch * 2); memset(s->gch, 0, sizeof *s->gch * 2); s->gch->type = 'r'; s->gch->font = REPEATFONT; str_font(REPEATFONT); s->gch->w = tex_str(s->text); s->gch->x = 4 + 4; } } } /* -- activate the tablature from the command line '-T' -- */ static void set_tblt(struct VOICE_S *p_voice) { struct tblt_s *tblt; int i; for (i = 0; i < ncmdtblt; i++) { if (!cmdtblts[i].active) continue; if (cmdtblts[i].vn[0] != '\0') { if (strcmp(cmdtblts[i].vn, p_voice->id) != 0 && (p_voice->nm == 0 || strcmp(cmdtblts[i].vn, p_voice->nm) != 0) && (p_voice->snm == 0 || strcmp(cmdtblts[i].vn, p_voice->snm) != 0)) continue; } tblt = tblts[cmdtblts[i].index]; if (p_voice->tblts[0] == tblt || p_voice->tblts[1] == tblt) continue; if (p_voice->tblts[0] == 0) p_voice->tblts[0] = tblt; else p_voice->tblts[1] = tblt; } } /* -- do a tune -- */ void do_tune(void) { struct VOICE_S *p_voice; struct SYMBOL *s, *s1, *s2; int i; /* initialize */ lvlarena(0); nstaff = 0; staves_found = -1; for (i = 0; i < MAXVOICE; i++) { p_voice = &voice_tb[i]; s1 = (struct SYMBOL *) getarena(sizeof *s1); memset(s1, 0, sizeof *s1); s1->type = CLEF; s1->voice = i; if (cfmt.autoclef) { s1->u.clef.type = AUTOCLEF; s1->sflags = S_CLEF_AUTO; } else { s1->u.clef.type = TREBLE; } s1->u.clef.line = 2; /* treble clef on 2nd line */ p_voice->s_clef = s1; p_voice->meter.wmeasure = 1; // M:none p_voice->wmeasure = 1; p_voice->scale = 1; p_voice->clone = -1; p_voice->over = -1; p_voice->posit = cfmt.posit; p_voice->stafflines = NULL; // p_voice->staffscale = 0; } curvoice = first_voice = voice_tb; reset_deco(); abc2win = 0; clip_start.bar = -1; clip_end.bar = (short unsigned) ~0 >> 1; parsys = NULL; system_new(); /* create the 1st staff system */ parsys->top_voice = parsys->voice[0].range = 0; /* implicit voice */ if (!epsf) { //fixme: 8.6.2 #if 1 // fixme: should already be 0 use_buffer = 0; #else if (cfmt.oneperpage) { use_buffer = 0; close_page(); } else { if (in_page) // ?? use_buffer = cfmt.splittune != 1; } #endif } else { use_buffer = 1; marg_init(); } /* set the duration of all notes/rests * (this is needed for tuplets and the feathered beams) */ for (s = parse.first_sym; s; s = s->abc_next) { switch (s->abc_type) { case ABC_T_EOLN: if (s->u.eoln.type == 2) abc2win = 1; break; case ABC_T_NOTE: case ABC_T_REST: s->dur = s->u.note.notes[0].len; break; } } if (voice_tb[0].id[0] == '\0') { /* single voice */ voice_tb[0].id[0] = '1'; /* implicit V:1 */ voice_tb[0].id[1] = '\0'; } /* scan the tune */ for (s = parse.first_sym; s; s = s->abc_next) { if (s->flags & ABC_F_LYRIC_START) curvoice->lyric_start = curvoice->last_sym; switch (s->abc_type) { case ABC_T_INFO: s = get_info(s); break; case ABC_T_PSCOM: s = process_pscomment(s); break; case ABC_T_NOTE: case ABC_T_REST: if (curvoice->space && !(s->flags & ABC_F_GRACE)) { curvoice->space = 0; s->flags |= ABC_F_SPACE; } get_note(s); break; case ABC_T_BAR: if (over_bar) get_over(s); get_bar(s); break; case ABC_T_CLEF: get_clef(s); break; case ABC_T_EOLN: if (cfmt.breakoneoln || (s->flags & ABC_F_SPACE)) curvoice->space = 1; if (cfmt.continueall || cfmt.barsperstaff || s->u.eoln.type == 1) /* if '\' */ continue; if (s->u.eoln.type == 0 /* if normal eoln */ && abc2win && parse.abc_vers != (2 << 16)) continue; if (parsys->voice[curvoice - voice_tb].range == 0 && curvoice->last_sym) curvoice->last_sym->sflags |= S_EOLN; if (!cfmt.alignbars) continue; /* normal */ /* align bars */ while (s->abc_next) { /* treat the lyrics */ if (s->abc_next->abc_type != ABC_T_INFO) break; switch (s->abc_next->text[0]) { case 'w': s = get_info(s->abc_next); continue; case 'd': case 's': s = s->abc_next; continue; } break; } i = (curvoice - voice_tb) + 1; if (i < cfmt.alignbars) { curvoice = &voice_tb[i]; continue; } generate(); buffer_eob(0); curvoice = &voice_tb[0]; continue; case ABC_T_MREST: { int dur; dur = curvoice->wmeasure * s->u.bar.len; if (curvoice->second) { curvoice->time += dur; break; } sym_link(s, MREST); s->dur = dur; curvoice->time += dur; if (s->text) gch_build(s); /* build the guitar chords */ if (s->u.bar.dc.n > 0) deco_cnv(&s->u.bar.dc, s, NULL); break; } case ABC_T_MREP: { int n; s2 = curvoice->last_sym; if (!s2 || s2->type != BAR) { error(1, s, "No bar before measure repeat"); break; } if (curvoice->ignore) break; n = s->u.bar.len; if (curvoice->second) { curvoice->time += curvoice->wmeasure * n; break; } s2 = sym_add(curvoice, NOTEREST); s2->abc_type = ABC_T_REST; s2->flags |= ABC_F_INVIS; s2->dur = curvoice->wmeasure; curvoice->time += s2->dur; if (n == 1) { s->abc_next->u.bar.len = n; /* <n> in the next bar */ break; } while (--n > 0) { s2 = sym_add(curvoice, BAR); s2->u.bar.type = B_SINGLE; if (n == s->u.bar.len - 1) s2->u.bar.len = s->u.bar.len; s2 = sym_add(curvoice, NOTEREST); s2->abc_type = ABC_T_REST; s2->flags |= ABC_F_INVIS; s2->dur = curvoice->wmeasure; curvoice->time += s2->dur; } break; } case ABC_T_V_OVER: get_over(s); continue; case ABC_T_TUPLET: set_tuplet(s); break; default: continue; } if (s->type == 0) continue; if (curvoice->second) s->sflags |= S_SECOND; if (curvoice->floating) s->sflags |= S_FLOATING; } gen_ly(0); put_history(); buffer_eob(1); if (epsf) { write_eps(); } else { write_buffer(); // if (!cfmt.oneperpage && in_page) // use_buffer = cfmt.splittune != 1; } if (info['X' - 'A']) { memcpy(&cfmt, &dfmt, sizeof cfmt); /* restore global values */ memcpy(&info, &info_glob, sizeof info); memcpy(deco, deco_glob, sizeof deco); maps = maps_glob; info['X' - 'A'] = NULL; } /* free the parsing resources */ { struct brk_s *brk, *brk2; brk = brks; while (brk) { brk2 = brk->next; free(brk); brk = brk2; } brks = brk; /* (NULL) */ } } /* check if a K: or M: may go to the tune key and time signatures */ static int is_tune_sig(void) { struct SYMBOL *s; if (!curvoice->sym) return 1; if (curvoice->time != 0) return 0; /* not at start of tune */ for (s = curvoice->sym; s; s = s->next) { switch (s->type) { case TEMPO: case PART: case FMTCHG: break; default: return 0; } } return 1; } /* -- get a clef definition (in K: or V:) -- */ static void get_clef(struct SYMBOL *s) { struct SYMBOL *s2; struct VOICE_S *p_voice; int voice; p_voice = curvoice; s->type = CLEF; if (s->abc_prev->abc_type == ABC_T_INFO) { switch (s->abc_prev->text[0]) { case 'K': if (s->abc_prev->state != ABC_S_HEAD) break; for (voice = 0; voice < MAXVOICE; voice++) { voice_tb[voice].s_clef = s; if (s->u.clef.type == PERC) voice_tb[voice].perc = 1; } return; case 'V': /* clef relative to a voice definition in the header */ p_voice = &voice_tb[(int) s->abc_prev->u.voice.voice]; curvoice = p_voice; break; } } if (is_tune_sig()) { p_voice->s_clef = s; } else { /* clef change */ #if 0 sym_link(s, CLEF); #else /* the clef must appear before a key signature or a bar */ s2 = p_voice->last_sym; if (s2 && s2->prev && s2->time == curvoice->time // if no time skip && (s2->type == KEYSIG || s2->type == BAR)) { struct SYMBOL *s3; for (s3 = s2; s3->prev; s3 = s3->prev) { switch (s3->prev->type) { case KEYSIG: case BAR: continue; } break; } p_voice->last_sym = s3->prev; sym_link(s, CLEF); s->next = s3; s3->prev = s; p_voice->last_sym = s2; } else { sym_link(s, CLEF); } #endif s->aux = 1; /* small clef */ } p_voice->perc = s->u.clef.type == PERC; if (s->u.clef.type == AUTOCLEF) s->sflags |= S_CLEF_AUTO; } /* -- treat %%clef -- */ static void clef_def(struct SYMBOL *s) { char *p; int clef, clef_line; char str[80]; clef = -1; clef_line = 2; p = &s->text[2 + 5]; /* skip %%clef */ while (isspace((unsigned char) *p)) p++; /* clef name */ switch (*p) { case '\"': /* user clef name */ p = get_str(str, p, sizeof str); s->u.clef.name = (char *) getarena(strlen(str) + 1); strcpy(s->u.clef.name, str); clef = TREBLE; break; case 'G': clef = TREBLE; p++; break; case 'F': clef = BASS; clef_line = 4; p++; break; case 'C': clef = ALTO; clef_line = 3; p++; break; case 'P': clef = PERC; p++; break; case 't': if (strncmp(p, "treble", 6) == 0) { clef = TREBLE; p += 6; } if (strncmp(p, "tenor", 5) == 0) { clef = ALTO; clef_line = 4; p += 5; } break; case 'a': if (strncmp(p, "alto", 4) == 0) { clef = ALTO; clef_line = 3; p += 4; } else if (strncmp(p, "auto", 4) == 0) { clef = AUTOCLEF; s->sflags |= S_CLEF_AUTO; p += 4; } break; case 'b': if (strncmp(p, "bass", 4) == 0) { clef = BASS; clef_line = 4; p += 4; } break; case 'p': if (strncmp(p, "perc", 4) == 0) { clef = PERC; p += 4; } break; case 'n': if (strncmp(p, "none", 4) == 0) { clef = TREBLE; s->u.clef.invis = 1; s->flags |= ABC_F_INVIS; p += 4; } break; } if (clef < 0) { error(1, s, "Unknown clef '%s'", p); return; } /* clef line */ switch (*p) { case '1': case '2': case '3': case '4': case '5': clef_line = *p++ - '0'; break; } /* +/-/^/_8 */ if (p[1] == '8') { switch (*p) { case '^': s->u.clef.transpose = -7; case '+': s->u.clef.octave = 1; break; case '_': s->u.clef.transpose = 7; case '-': s->u.clef.octave = -1; break; } } /* handle the clef */ s->abc_type = ABC_T_CLEF; s->u.clef.type = clef; s->u.clef.line = clef_line; get_clef(s); } /* transpose a key */ static void key_transpose(struct key_s *key) { int t, sf; t = curvoice->transpose / 3; sf = (t & ~1) + (t & 1) * 7 + key->sf; switch ((curvoice->transpose + 210) % 3) { case 1: sf = (sf + 4 + 12 * 4) % 12 - 4; /* more sharps */ break; case 2: sf = (sf + 7 + 12 * 4) % 12 - 7; /* more flats */ break; default: sf = (sf + 5 + 12 * 4) % 12 - 5; /* Db, F# or B */ break; } key->sf = sf; } /* -- set the accidentals when K: with modified accidentals -- */ static void set_k_acc(struct SYMBOL *s) { int i, j, nacc; char accs[8], pits[8]; static char sharp_tb[8] = {26, 23, 27, 24, 21, 25, 22}; static char flat_tb[8] = {22, 25, 21, 24, 20, 23, 26}; if (s->u.key.sf > 0) { for (nacc = 0; nacc < s->u.key.sf; nacc++) { accs[nacc] = A_SH; pits[nacc] = sharp_tb[nacc]; } } else { for (nacc = 0; nacc < -s->u.key.sf; nacc++) { accs[nacc] = A_FT; pits[nacc] = flat_tb[nacc]; } } for (i = 0; i < s->u.key.nacc; i++) { for (j = 0; j < nacc; j++) { // if ((pits[j] - s->u.key.pits[i]) % 7 == 0) { if (pits[j] == s->u.key.pits[i]) { accs[j] = s->u.key.accs[i]; break; } } if (j == nacc) { if (nacc >= sizeof accs) { error(1, s, "Too many accidentals"); } else { accs[j] = s->u.key.accs[i]; pits[j] = s->u.key.pits[i]; nacc++; } } } for (i = 0; i < nacc; i++) { s->u.key.accs[i] = accs[i]; s->u.key.pits[i] = pits[i]; } s->u.key.nacc = nacc; } /* -- get a key signature definition (K:) -- */ static void get_key(struct SYMBOL *s) { struct VOICE_S *p_voice; struct SYMBOL *s2; struct key_s okey; /* original key */ int i; // int delta; if (s->u.key.octave != NO_OCTAVE) curvoice->octave = s->u.key.octave; if (s->u.key.cue > 0) curvoice->scale = 0.7; else if (s->u.key.cue < 0) curvoice->scale = 1; if (s->u.key.stafflines) curvoice->stafflines = s->u.key.stafflines; if (s->u.key.staffscale != 0) curvoice->staffscale = s->u.key.staffscale; if (s->u.key.empty == 1) /* clef only */ return; if (s->u.key.sf != 0 && !s->u.key.exp && s->u.key.nacc != 0) set_k_acc(s); memcpy(&okey, &s->u.key, sizeof okey); if (s->state == ABC_S_HEAD) { /* if first K: (start of tune) */ for (i = MAXVOICE, p_voice = voice_tb; --i >= 0; p_voice++) p_voice->transpose = cfmt.transpose; // curvoice->transpose = cfmt.transpose; } if (curvoice->transpose != 0) { key_transpose(&s->u.key); #if 0 /* transpose explicit accidentals */ //fixme: not correct - transpose adds or removes accidentals... if (s->u.key.nacc > 0) { struct VOICE_S voice, *voice_sav; struct SYMBOL note; memset(&voice, 0, sizeof voice); voice.transpose = curvoice->transpose; memcpy(&voice.ckey, &s->u.key, sizeof voice.ckey); voice.ckey.empty = 2; voice.ckey.nacc = 0; memset(&note, 0, sizeof note); --fixme memcpy(note.u.note.pits, voice.ckey.pits, sizeof note.u.note.pits); memcpy(note.u.note.accs, voice.ckey.accs, sizeof note.u.note.accs); note.nhd = s->u.key.nacc; voice_sav = curvoice; curvoice = &voice; note_transpose(&note); memcpy(s->u.key.pits, note.u.note.pits, sizeof s->u.key.pits); memcpy(s->u.key.accs, note.u.note.accs, sizeof s->u.key.accs); curvoice = voice_sav; } #endif } // calculate the tonic delta // s->u.key.key_delta = (cgd2cde[(s->u.key.sf + 7) % 7] + 14 + s->u.key.mode) % 7; s->u.key.key_delta = (cgd2cde[(s->u.key.sf + 7) % 7] + 14) % 7; if (s->state == ABC_S_HEAD) { /* start of tune */ for (i = MAXVOICE, p_voice = voice_tb; --i >= 0; p_voice++) { memcpy(&p_voice->key, &s->u.key, sizeof p_voice->key); memcpy(&p_voice->ckey, &s->u.key, sizeof p_voice->ckey); memcpy(&p_voice->okey, &okey, sizeof p_voice->okey); if (p_voice->key.empty) p_voice->key.sf = 0; if (s->u.key.octave != NO_OCTAVE) p_voice->octave = s->u.key.octave; if (s->u.key.stafflines) p_voice->stafflines = s->u.key.stafflines; if (s->u.key.staffscale != 0) p_voice->staffscale = s->u.key.staffscale; //fixme: update parsys->voice[voice].stafflines = stafflines; ? } return; } /* ABC_S_TUNE (K: cannot be ABC_S_GLOBAL) */ if (is_tune_sig()) { /* define the starting key signature */ memcpy(&curvoice->key, &s->u.key, sizeof curvoice->key); memcpy(&curvoice->ckey, &s->u.key, sizeof curvoice->ckey); memcpy(&curvoice->okey, &okey, sizeof curvoice->okey); switch (curvoice->key.instr) { case 0: if (!pipeformat) { // curvoice->transpose = cfmt.transpose; break; } //fall thru case K_HP: case K_Hp: if (curvoice->posit.std == 0) curvoice->posit.std = SL_BELOW; break; } if (curvoice->key.empty) curvoice->key.sf = 0; return; } /* key signature change */ if ((!s->abc_next || s->abc_next->abc_type != ABC_T_CLEF) /* if not explicit clef */ && curvoice->ckey.sf == s->u.key.sf /* and same key */ && curvoice->ckey.nacc == 0 && s->u.key.nacc == 0 && curvoice->ckey.empty == s->u.key.empty && cfmt.keywarn) /* (if not key warning, * keep all key signatures) */ return; /* ignore */ if (!curvoice->ckey.empty) s->aux = curvoice->ckey.sf; /* previous key signature */ memcpy(&curvoice->ckey, &s->u.key, sizeof curvoice->ckey); memcpy(&curvoice->okey, &okey, sizeof curvoice->okey); if (s->u.key.empty) s->u.key.sf = 0; /* the key signature must appear before a time signature */ s2 = curvoice->last_sym; if (s2 && s2->type == TIMESIG) { curvoice->last_sym = s2->prev; if (!curvoice->last_sym) curvoice->sym = NULL; sym_link(s, KEYSIG); s->next = s2; s2->prev = s; curvoice->last_sym = s2; } else { sym_link(s, KEYSIG); } } /* -- set meter from M: -- */ static void get_meter(struct SYMBOL *s) { struct VOICE_S *p_voice; int i; switch (s->state) { case ABC_S_GLOBAL: /*fixme: keep the values and apply to all tunes?? */ break; case ABC_S_HEAD: for (i = MAXVOICE, p_voice = voice_tb; --i >= 0; p_voice++) { memcpy(&p_voice->meter, &s->u.meter, sizeof p_voice->meter); p_voice->wmeasure = s->u.meter.wmeasure; } break; case ABC_S_TUNE: curvoice->wmeasure = s->u.meter.wmeasure; if (is_tune_sig()) { memcpy(&curvoice->meter, &s->u.meter, sizeof curvoice->meter); reset_gen(); /* (display the time signature) */ break; } if (s->u.meter.nmeter == 0) break; /* M:none */ sym_link(s, TIMESIG); break; } } /* -- treat a 'V:' -- */ static void get_voice(struct SYMBOL *s) { struct VOICE_S *p_voice; int voice; voice = s->u.voice.voice; p_voice = &voice_tb[voice]; if (parsys->voice[voice].range < 0) { if (cfmt.alignbars) { error(1, s, "V: does not work with %%%%alignbars"); } if (staves_found < 0) { if (!s->u.voice.merge) { #if MAXSTAFF < MAXVOICE if (nstaff >= MAXSTAFF - 1) { error(1, s, "Too many staves"); return; } #endif nstaff++; } else { p_voice->second = 1; parsys->voice[voice].second = 1; } p_voice->staff = p_voice->cstaff = nstaff; parsys->voice[voice].staff = nstaff; parsys->nstaff = nstaff; { int range, i; range = 0; for (i = 0; i < MAXVOICE; i++) { if (parsys->voice[i].range > range) range = parsys->voice[i].range; } parsys->voice[voice].range = range + 1; voice_link(p_voice); } } else { p_voice->ignore = 1; p_voice->staff = p_voice->cstaff = nstaff + 1; } } /* if something has changed, update */ if (s->u.voice.fname != 0) { p_voice->nm = s->u.voice.fname; p_voice->new_name = 1; } if (s->u.voice.nname != 0) p_voice->snm = s->u.voice.nname; if (s->u.voice.octave != NO_OCTAVE) p_voice->octave = s->u.voice.octave; switch (s->u.voice.dyn) { case 1: p_voice->posit.dyn = SL_ABOVE; p_voice->posit.vol = SL_ABOVE; break; case -1: p_voice->posit.dyn = SL_BELOW; p_voice->posit.vol = SL_BELOW; break; } switch (s->u.voice.lyrics) { case 1: p_voice->posit.voc = SL_ABOVE; break; case -1: p_voice->posit.voc = SL_BELOW; break; } switch (s->u.voice.gchord) { case 1: p_voice->posit.gch = SL_ABOVE; break; case -1: p_voice->posit.gch = SL_BELOW; break; } switch (s->u.voice.stem) { case 1: p_voice->posit.std = SL_ABOVE; break; case -1: p_voice->posit.std = SL_BELOW; break; case 2: p_voice->posit.std = 0; /* auto */ break; } switch (s->u.voice.gstem) { case 1: p_voice->posit.gsd = SL_ABOVE; break; case -1: p_voice->posit.gsd = SL_BELOW; break; case 2: p_voice->posit.gsd = 0; /* auto */ break; } if (s->u.voice.scale != 0) p_voice->scale = s->u.voice.scale; else if (s->u.voice.cue > 0) p_voice->scale = 0.7; else if (s->u.voice.cue < 0) p_voice->scale = 1; if (s->u.voice.stafflines) p_voice->stafflines = s->u.voice.stafflines; if (s->u.voice.staffscale != 0) p_voice->staffscale = s->u.voice.staffscale; if (!p_voice->combine) p_voice->combine = cfmt.combinevoices; set_tblt(p_voice); /* if in tune, switch to this voice */ if (s->state == ABC_S_TUNE) curvoice = p_voice; } /* sort the notes of the chord by pitch (lowest first) */ void sort_pitch(struct SYMBOL *s) { int i, nx, k; struct note v_note; unsigned char new_order[MAXHD], inv_order[MAXHD]; for (i = 0; i <= s->nhd; i++) new_order[i] = i; for (;;) { nx = 0; for (i = 1; i <= s->nhd; i++) { if (s->u.note.notes[i].pit >= s->u.note.notes[i - 1].pit) continue; memcpy(&v_note, &s->u.note.notes[i], sizeof v_note); memcpy(&s->u.note.notes[i], &s->u.note.notes[i - 1], sizeof v_note); memcpy(&s->u.note.notes[i - 1], &v_note, sizeof v_note); k = s->pits[i]; s->pits[i] = s->pits[i - 1]; s->pits[i - 1] = k; k = new_order[i]; new_order[i] = new_order[i - 1]; new_order[i - 1] = k; nx++; } if (nx == 0) break; } /* change the indexes of the note head decorations */ if (s->nhd > 0) { for (i = 0; i <= s->nhd; i++) inv_order[new_order[i]] = i; for (i = 0; i <= s->u.note.dc.n; i++) { k = s->u.note.dc.tm[i].m; if (k >= 0) s->u.note.dc.tm[i].m = inv_order[k]; } } } // set the map of the notes static void set_map(struct SYMBOL *s) { struct map *map; struct note_map *note_map; struct note *note; int m, delta; for (map = maps; map; map = map->next) { if (strcmp(map->name, curvoice->map_name) == 0) break; } if (!map) return; // !? // loop on the note maps, then on the notes of the chord delta = curvoice->ckey.key_delta; for (m = 0; m <= s->nhd; m++) { note = &s->u.note.notes[m]; for (note_map = map->notes; note_map; note_map = note_map->next) { switch (note_map->type) { case MAP_ONE: if (note->pit == note_map->pit && note->acc == note_map->acc) break; continue; case MAP_OCT: if ((note->pit - note_map->pit + 28 ) % 7 == 0 && note->acc == note_map->acc) break; continue; case MAP_KEY: if ((note->pit + 28 - delta - note_map->pit) % 7 == 0) break; continue; default: // MAP_ALL break; } note->head = note_map->heads; note->color = note_map->color; if (note_map->print_pit != -128) { note->pit = note_map->print_pit; s->pits[m] = note->pit; note->acc = note_map->print_acc; } break; } } } /* -- note or rest -- */ static void get_note(struct SYMBOL *s) { struct SYMBOL *prev; int i, m, delta; prev = curvoice->last_sym; m = s->nhd; /* insert the note/rest in the voice */ sym_link(s, s->u.note.notes[0].len != 0 ? NOTEREST : SPACE); if (!(s->flags & ABC_F_GRACE)) curvoice->time += s->dur; if (curvoice->octave) { delta = curvoice->octave * 7; for (i = 0; i <= m; i++) { s->u.note.notes[i].pit += delta; s->pits[i] += delta; } } /* convert the decorations * (!beam-accel! and !beam-rall! may change the note duration) * (!8va(! may change ottava) */ if (s->u.note.dc.n > 0) deco_cnv(&s->u.note.dc, s, prev); if (curvoice->ottava) { delta = curvoice->ottava; for (i = 0; i <= m; i++) s->pits[i] += delta; } s->combine = curvoice->combine; s->color = curvoice->color; if (curvoice->perc) s->sflags |= S_PERC; else if (s->abc_type == ABC_T_NOTE && curvoice->transpose != 0) note_transpose(s); if (!(s->flags & ABC_F_GRACE)) { switch (curvoice->posit.std) { case SL_ABOVE: s->stem = 1; break; case SL_BELOW: s->stem = -1; break; case SL_HIDDEN: s->flags |= ABC_F_STEMLESS;; break; } } else { /* grace note - adjust its duration */ int div; if (curvoice->key.instr != K_HP && curvoice->key.instr != K_Hp && !pipeformat) { div = 2; if (!prev || !(prev->flags & ABC_F_GRACE)) { if (s->flags & ABC_F_GR_END) div = 1; /* one grace note */ } } else { div = 4; /* bagpipe */ } for (i = 0; i <= m; i++) s->u.note.notes[i].len /= div; s->dur /= div; switch (curvoice->posit.gsd) { case SL_ABOVE: s->stem = 1; break; case SL_BELOW: s->stem = -1; break; case SL_HIDDEN: s->stem = 2; break; /* opposite */ } } s->nohdi1 = s->nohdi2 = -1; /* change the figure of whole measure rests */ if (s->abc_type == ABC_T_REST) { if (s->dur == curvoice->wmeasure) { if (s->dur < BASE_LEN * 2) s->u.note.notes[0].len = BASE_LEN; else if (s->dur < BASE_LEN * 4) s->u.note.notes[0].len = BASE_LEN * 2; else s->u.note.notes[0].len = BASE_LEN * 4; } } else { /* sort the notes of the chord by pitch (lowest first) */ if (!(s->flags & ABC_F_GRACE) && curvoice->map_name) set_map(s); sort_pitch(s); } /* get the max head type, number of dots and number of flags */ if (!curvoice->auto_len || (s->flags & ABC_F_GRACE)) { int head, dots, nflags, l; if ((l = s->u.note.notes[0].len) != 0) { identify_note(s, l, &head, &dots, &nflags); s->head = head; s->dots = dots; s->nflags = nflags; for (i = 1; i <= m; i++) { if (s->u.note.notes[i].len == l) continue; identify_note(s, s->u.note.notes[i].len, &head, &dots, &nflags); if (head > s->head) s->head = head; if (dots > s->dots) s->dots = dots; if (nflags > s->nflags) s->nflags = nflags; } if (s->sflags & S_XSTEM) s->nflags = 0; /* word start+end */ } } if (s->nflags <= -2) s->flags |= ABC_F_STEMLESS; if (s->sflags & (S_TREM1 | S_TREM2)) { if (s->nflags > 0) s->nflags += s->aux; else s->nflags = s->aux; if ((s->sflags & S_TREM2) && (s->sflags & S_BEAM_END)) { /* if 2nd note - see deco.c */ prev->head = s->head; prev->aux = s->aux; prev->nflags = s->nflags; prev->flags |= (s->flags & ABC_F_STEMLESS); } } for (i = 0; i <= m; i++) { if (s->u.note.notes[i].sl1 != 0) s->sflags |= S_SL1; if (s->u.note.notes[i].sl2 != 0) s->sflags |= S_SL2; if (s->u.note.notes[i].ti1 != 0) s->sflags |= S_TI1; } switch (cfmt.shiftunison) { case 0: break; case 1: s->sflags |= S_SHIFTUNISON_1; break; case 2: s->sflags |= S_SHIFTUNISON_2; break; default: s->sflags |= S_SHIFTUNISON_1 | S_SHIFTUNISON_2; break; } /* build the guitar chords */ if (s->text) gch_build(s); } static char *get_val(char *p, float *v) { char tmp[32], *r = tmp; while (isspace((unsigned char) *p)) p++; while ((isdigit((unsigned char) *p) && r < &tmp[32 - 1]) || *p == '-' || *p == '.') *r++ = *p++; *r = '\0'; sscanf(tmp, "%f", v); return p; } // parse <path .../> from %%beginsvg and convert to Postscript static void parse_path(char *p, char *q, char *id, int idsz) { struct SYMBOL *s; char *buf, *r, *t, *op = NULL, *width, *scale, *trans; int i, fill, npar = 0; float x1, y1, x, y; char *rmax; r = strstr(p, "class=\""); if (!r || r > q) return; r += 7; fill = strncmp(r, "fill", 4) == 0; width = strstr(p, "stroke-width:"); scale = strstr(p, "scale("); if (scale && scale > q) scale = NULL; trans = strstr(p, "translate("); if (trans && trans > q) trans = NULL; for (;;) { p = strstr(p, "d=\""); if (!p) return; if (isspace((unsigned char) p[-1])) // (check not 'id=..") break; p += 3; } i = (int) (q - p) * 4 + 200; // estimated PS buffer size if (i > TEX_BUF_SZ) buf = malloc(i); else buf = tex_buf; rmax=buf + i; r = buf; *r++ = '/'; idsz -= 5; strncpy(r, id + 4, idsz); r += idsz; strcpy(r, "{gsave T "); r += strlen(r); if (scale || trans) { if (scale) { scale += 6; // "scale(" t = get_val(scale, &x1); if (*t == ',') t = get_val(t + 1, &y1); else y1 = x1; } if (trans) { trans += 10; // "translate(" t = get_val(trans, &x) + 1; //"," t = get_val(t, &y); } if (!scale) r += sprintf(r, "%.2f %.2f T ", x, -y); else if (!trans) r += sprintf(r, "%.2f %.2f scale ", x1, y1); else if (scale > trans) r += sprintf(r, "%.2f %.2f T %.2f %.2f scale ", x, -y, x1, y1); else r += sprintf(r, "%.2f %.2f scale %.2f %.2f T ", x1, y1, x, -y); } strcpy(r, "0 0 M\n"); r += strlen(r); if (width && width < q) { *r++ = ' '; width += 13; while (isdigit(*width) || *width == '.') *r++ = *width++; *r++ = ' '; *r++ = 'S'; *r++ = 'L'; *r++ = 'W'; } p += 3; for (;;) { if (*p == '\0' || *p == '"') break; i = 0; switch (*p++) { default: if ((isdigit((unsigned char) p[-1])) || p[-1] == '-' || p[-1] == '.') { if (!npar) continue; p--; // same op break; } continue; case 'M': op = "M"; npar = 2; break; case 'm': op = "RM"; npar = 2; break; case 'L': op = "L"; npar = 2; break; case 'l': op = "RL"; npar = 2; break; case 'H': op = "H"; npar = 1; break; case 'h': op = "h"; npar = 1; break; case 'V': op = "V"; npar = 1; break; case 'v': *r++ = ' '; *r++ = '0'; op = "RL"; i = 1; npar = 2; break; case 'z': op = "closepath"; npar = 0; break; case 'C': op = "C"; npar = 6; break; case 'c': op = "RC"; npar = 6; break; // case 'A': // op = "arc"; // break; // case 'a': // op = "arc"; // break; case 'q': op = "RC"; npar = 2; p = get_val(p, &x1); p = get_val(p, &y1); t = get_val(p, &x); t = get_val(t, &y); r += sprintf(r, " %.2f %.2f %.2f %.2f", x1*2/3, -y1*2/3, x1+(x-x1)*2/3, -y1-(y-y1)*2/3); break; case 't': op = "RC"; npar = 2; x1 = x - x1; y1 = y - y1; t = get_val(p, &x); t = get_val(t, &y); r += sprintf(r, " %.2f %.2f %.2f %.2f", x1*2/3, -y1*2/3, x1+(x-x1)*2/3, -y1-(y-y1)*2/3); break; } *r++ = ' '; for ( ; i < npar; i++) { while (isspace((unsigned char) *p)) p++; if (i & 1) { // y is inverted if (*p == '-') p++; else if (*p != '0' || p[1] != ' ') *r++ = '-'; } while ((isdigit((unsigned char) *p)) || *p == '-' || *p == '.') *r++ = *p++; *r++ = ' '; } if (*op == 'h') { *r++ = '0'; *r++ = ' '; op = "RL"; } strcpy(r, op); r += strlen(r); if (r + 30 > rmax) bug("Buffer overflow in SVG to PS", 1); } strcpy(r, fill ? " fill" : " stroke"); r += strlen(r); strcpy(r, "\ngrestore}!"); r += strlen(r); s = getarena(sizeof(struct SYMBOL)); memset(s, 0, sizeof(struct SYMBOL)); s->text = getarena(strlen(buf) + 1); strcpy(s->text, buf); ps_def(s, s->text, 'p'); if (buf != tex_buf) free(buf); } // parse <defs> .. </defs> from %%beginsvg static void parse_defs(char *p, char *q) { char *id, *r; int idsz; for (;;) { id = strstr(p, "id=\""); if (!id || id > q) return; r = strchr(id + 4, '"'); if (!r) return; idsz = r + 1 - id; // if SVG output, mark the id as defined if (svg || epsf > 1) { svg_def_id(id, idsz); p = r; continue; } // convert SVG to PS p = id; while (*p != '<') p--; if (strncmp(p, "<path ", 6) == 0) { r = strstr(p, "/>"); parse_path(p + 6, r, id, idsz); if (!r) break; p = r + 2; continue; } break; } } // extract the SVG defs from %%beginsvg and // convert to PostScript when PS output // move to the SVG glyphs when SVG output static void svg_ps(char *p) { char *q; for (;;) { q = strstr(p, "<defs>"); if (!q) break; p = strstr(q, "</defs>"); if (!p) { error(1, NULL, "No </defs> in %%beginsvg"); break; } parse_defs(q + 6, p); } } /* -- treat a postscript or SVG definition -- */ static void ps_def(struct SYMBOL *s, char *p, char use) /* cf user_ps_add() */ { if (!svg && epsf <= 1) { /* if PS output */ if (secure // || use == 'g' // SVG || use == 's') // PS for SVG return; } else { /* if SVG output */ if (use == 'p' // PS for PS || (use == 'g' // SVG && file_initialized > 0)) return; } if (s->abc_prev) s->state = s->abc_prev->state; if (s->state == ABC_S_TUNE) { if (use == 'g') // SVG return; sym_link(s, FMTCHG); s->aux = PSSEQ; s->text = p; // s->flags |= ABC_F_INVIS; return; } if (use == 'g') { // SVG svg_ps(p); if (!svg && epsf <= 1) return; } if (file_initialized > 0 || mbf != outbuf) a2b("%s\n", p); else user_ps_add(p, use); } /* get a symbol selection */ /* measure_number [ ":" time_numerator "/" time_denominator ] */ static char *get_symsel(struct symsel_s *symsel, char *p) { char *q; int tn, td, n; symsel->bar = strtod(p, &q); if (*q >= 'a' && *q <= 'z') symsel->seq = *q++ - 'a'; else symsel->seq = 0; if (*q == ':') { if (sscanf(q + 1, "%d/%d%n", &tn, &td, &n) != 2 || td <= 0) return 0; symsel->time = BASE_LEN * tn / td; q += 1 + n; } else { symsel->time = 0; } return q; } /* free the voice options */ static void free_voice_opt(struct voice_opt_s *opt) { struct voice_opt_s *opt2; while (opt) { opt2 = opt->next; free(opt); opt = opt2; } } // get a color static int get_color(char *p) { int i, color; static const struct { char *name; int color; } col_tb[] = { { "aqua", 0x00ffff }, { "black", 0x000000 }, { "blue", 0x0000ff }, { "fuchsia", 0xff00ff }, { "gray", 0x808080 }, { "green", 0x008000 }, { "lime", 0x00ff00 }, { "maroon", 0x800000 }, { "navy", 0x000080 }, { "olive", 0x808000 }, { "purple", 0x800080 }, { "red", 0xff0000 }, { "silver", 0xc0c0c0 }, { "teal", 0x008080 }, { "white", 0xffffff }, { "yellow", 0xffff00 }, }; if (*p == '#') { if (sscanf(p, "#%06x", &color) != 1 || (unsigned) color > 0x00ffffff) return -1; return color; } for (i = sizeof col_tb / sizeof col_tb[0]; --i >= 0; ) { if (strncasecmp(p, col_tb[i].name, strlen(col_tb[i].name)) == 0) break; } if (i < 0) return -1; return col_tb[i].color; } /* get a transposition */ static int get_transpose(char *p) { int val, pit1, pit2, acc; static int pit_st[7] = {0, 2, 4, 5, 7, 9, 11}; if (isdigit(*p) || *p == '-' || *p == '+') { sscanf(p, "%d", &val); val *= 3; switch (p[strlen(p) - 1]) { default: return val; case '#': val++; break; case 'b': val += 2; break; } if (val > 0) return val; return val - 3; } // by music interval p = parse_acc_pit(p, &pit1, &acc); if (acc < 0) { error(1, NULL, " in %%%%transpose"); return 0; } pit1 += 126 - 2; // for value > 0 and 'C' % 7 == 0 pit1 = (pit1 / 7) * 12 + pit_st[pit1 % 7]; switch (acc) { case A_DS: pit1 += 2; break; case A_SH: pit1++; break; case A_FT: pit1--; break; case A_DF: pit1 -= 2; break; } p = parse_acc_pit(p, &pit2, &acc); if (acc < 0) { error(1, NULL, " in %%%%transpose"); return 0; } pit2 += 126 - 2; pit2 = (pit2 / 7) * 12 + pit_st[pit2 % 7]; switch (acc) { case A_DS: pit2 += 2; break; case A_SH: pit2++; break; case A_FT: pit2--; break; case A_DF: pit2 -= 2; break; } val = (pit2 - pit1) * 3; switch (acc) { default: return val; case A_DS: case A_SH: val++; break; case A_FT: case A_DF: val += 2; break; } if (val > 0) return val; return val - 3; } // create a note mapping // %%map map_name note [print [heads]] [param]* static void get_map(char *p) { struct map *map; struct note_map *note_map; char *name, *q; int l, type, pit, acc; if (*p == '\0') return; /* map name */ name = p; while (!isspace((unsigned char) *p) && *p != '\0') p++; l = p - name; /* base note */ while (isspace((unsigned char) *p)) p++; if (*p == '*') { type = MAP_ALL; p++; } else if (strncmp(p, "octave,", 7) == 0) { type = MAP_OCT; p += 7; } else if (strncmp(p, "key,", 4) == 0) { type = MAP_KEY; p += 4; } else if (strncmp(p, "all", 3) == 0) { type = MAP_ALL; while (!isspace((unsigned char) *p) && *p != '\0') p++; } else { type = MAP_ONE; } if (type != MAP_ALL) { p = parse_acc_pit(p, &pit, &acc); if (acc < 0) // if error pit = acc = 0; if (type == MAP_OCT || type == MAP_KEY) { pit %= 7; if (type == MAP_KEY) acc = A_NULL; } } else { pit = acc = 0; } // get/create the map for (map = maps; map; map = map->next) { if (strncmp(name, map->name, l) == 0) break; } if (!map) { map = getarena(sizeof *map); map->next = maps; maps = map; map->name = getarena(l + 1); strncpy(map->name, name, l); map->name[l] = '\0'; map->notes = NULL; } for (note_map = map->notes; note_map; note_map = note_map->next) { if (note_map->type == type && note_map->pit == pit && note_map->acc == acc) break; } if (!note_map) { note_map = getarena(sizeof *note_map); memset(note_map, 0, sizeof *note_map); note_map->next = map->notes; map->notes = note_map; note_map->type = type; note_map->pit = pit; note_map->acc = acc; note_map->print_pit = -128; note_map->color = -1; } /* try the optional 'print' and 'heads' parameters */ while (isspace((unsigned char) *p)) p++; if (*p == '\0') return; q = p; while (!isspace((unsigned char) *q) && *q != '\0') { if (*q == '=') break; q++; } if (isspace((unsigned char) *q) || *q == '\0') { if (*p != '*') { p = parse_acc_pit(p, &pit, &acc); if (acc >= 0) { note_map->print_pit = pit; note_map->print_acc = acc; } if (*p == '\0') return; } p = q; while (isspace((unsigned char) *p)) p++; if (*p == '\0') return; q = p; while (!isspace((unsigned char) *q) && *q != '\0') { if (*q == '=') break; q++; } if (isspace((unsigned char) *q) || *q == '\0') { name = p; p = q; l = p - name; note_map->heads = getarena(l + 1); strncpy(note_map->heads, name, l); note_map->heads[l] = '\0'; } } /* loop on the parameters */ for (;;) { while (isspace((unsigned char) *p)) p++; if (*p == '\0') break; if (strncmp(p, "heads=", 6) == 0) { p += 6; name = p; while (!isspace((unsigned char) *p) && *p != '\0') p++; l = p - name; note_map->heads = getarena(l + 1); strncpy(note_map->heads, name, l); note_map->heads[l] = '\0'; } else if (strncmp(p, "print=", 6) == 0) { p += 6; p = parse_acc_pit(p, &pit, &acc); if (acc >= 0) { note_map->print_pit = pit; note_map->print_acc = acc; } } else if (strncmp(p, "color=", 6) == 0) { int color; color = get_color(p + 6); if (color < 0) { error(1, NULL, "Bad color in %%%%map"); return; } note_map->color = color; } while (!isspace((unsigned char) *p) && *p != '\0') p++; } } /* -- process a pseudo-comment (%% or I:) -- */ static struct SYMBOL *process_pscomment(struct SYMBOL *s) { char w[32], *p, *q; int voice; float h1; int lock = 0; p = s->text + 2; /* skip '%%' */ q = p + strlen(p) - 5; if (q > p && strncmp(q, " lock", 5) == 0) { lock = 1; *q = '\0'; } p = get_str(w, p, sizeof w); if (s->state == ABC_S_HEAD && !check_header(s)) { error(1, s, "Cannot have %%%%%s in tune header", w); return s; } switch (w[0]) { case 'b': if (strcmp(w, "beginps") == 0 || strcmp(w, "beginsvg") == 0) { char use; if (w[5] == 'p') { if (strncmp(p, "svg", 3) == 0) use = 's'; else if (strncmp(p, "nosvg", 5) == 0) use = 'p'; else use = 'b'; } else { use = 'g'; } p = s->text + 2 + 7; while (*p != '\0' && *p != '\n') p++; if (*p == '\0') return s; /* empty */ ps_def(s, p + 1, use); return s; } if (strcmp(w, "begintext") == 0) { int job; if (s->state == ABC_S_TUNE) { if (!multicol_start) gen_ly(1); } else if (s->state == ABC_S_GLOBAL) { if (epsf || !in_fname) return s; } p = s->text + 2 + 9; while (*p == ' ' || *p == '\t') p++; if (*p != '\n') { job = get_textopt(p); while (*p != '\0' && *p != '\n') p++; if (*p == '\0') return s; /* empty */ } else { job = cfmt.textoption; } if (job != T_SKIP) { p++; write_text(w, p, job); } return s; } if (strcmp(w, "break") == 0) { struct brk_s *brk; if (s->state != ABC_S_HEAD) { error(1, s, "%%%%%s ignored", w); return s; } if (*p == '\0') return s; for (;;) { brk = malloc(sizeof *brk); p = get_symsel(&brk->symsel, p); if (!p) { error(1, s, "Bad selection in %%%%%s", w); return s; } brk->next = brks; brks = brk; if (*p != ',' && *p != ' ') break; p++; } return s; } break; case 'c': if (strcmp(w, "center") == 0) goto center; if (strcmp(w, "clef") == 0) { if (s->state != ABC_S_GLOBAL) clef_def(s); return s; } if (strcmp(w, "clip") == 0) { if (!cur_tune_opts) { error(1, s, "%%%%%s not in %%%%tune sequence", w); return s; } /* %%clip <symbol selection> "-" <symbol selection> */ if (*p != '-') { p = get_symsel(&clip_start, p); if (!p) { error(1, s, "Bad start in %%%%%s", w); return s; } if (*p != '-') { error(1, s, "Lack of '-' in %%%%%s", w); return s; } } p++; p = get_symsel(&clip_end, p); if (!p) { error(1, s, "Bad end in %%%%%s", w); return s; } if (clip_start.bar < 0) clip_start.bar = 0; if (clip_end.bar < clip_start.bar || (clip_end.bar == clip_start.bar && clip_end.time <= clip_start.time)) { clip_end.bar = (short unsigned) ~0 >> 1; } return s; } break; case 'd': if (strcmp(w, "deco") == 0) { deco_add(p); return s; } if (strcmp(w, "dynamic") == 0) { set_voice_param(curvoice, s->state, w, p); return s; } break; case 'E': if (strcmp(w, "EPS") == 0) { float x1, y1, x2, y2; FILE *fp; char fn[STRL1], line[STRL1]; gen_ly(1); if (secure || cfmt.textoption == T_SKIP) return s; get_str(line, p, sizeof line); if ((fp = open_file(line, "eps", fn)) == NULL) { error(1, s, "No such file: %s", line); return s; } /* get the bounding box */ x1 = x2 = 0; while (fgets(line, sizeof line, fp)) { if (strncmp(line, "%%BoundingBox:", 14) == 0) { if (sscanf(&line[14], "%f %f %f %f", &x1, &y1, &x2, &y2) == 4) break; } } fclose(fp); if (x1 == x2) { error(1, s, "No bounding box in '%s'", fn); return s; } if (cfmt.textoption == T_CENTER || cfmt.textoption == T_RIGHT) { float lw; lw = ((cfmt.landscape ? cfmt.pageheight : cfmt.pagewidth) - cfmt.leftmargin - cfmt.rightmargin) / cfmt.scale; if (cfmt.textoption == T_CENTER) x1 += (lw - (x2 - x1)) * 0.5; else x1 += lw - (x2 - x1); } a2b("\001"); /* include file (must be the first after eob) */ bskip(y2 - y1); a2b("%.2f %.2f%%%s\n", x1, -y1, fn); buffer_eob(0); return s; } break; case 'g': if (strcmp(w, "gchord") == 0 || strcmp(w, "gstemdir") == 0) { set_voice_param(curvoice, s->state, w, p); return s; } if (strcmp(w, "glyph") == 0) { if (!svg && epsf <= 1) glyph_add(p); return s; } break; case 'm': if (strcmp(w, "map") == 0) { get_map(p); return s; } if (strcmp(w, "maxsysstaffsep") == 0) { if (s->state != ABC_S_TUNE) break; parsys->voice[curvoice - voice_tb].maxsep = scan_u(p, 0); return s; } if (strcmp(w, "multicol") == 0) { float bposy; generate(); if (strncmp(p, "start", 5) == 0) { if (!in_page) a2b("%%\n"); /* initialize the output */ buffer_eob(0); bposy = get_bposy(); multicol_max = multicol_start = bposy; lmarg = cfmt.leftmargin; rmarg = cfmt.rightmargin; } else if (strncmp(p, "new", 3) == 0) { if (multicol_start == 0) { error(1, s, "%%%%%s new without start", w); } else { buffer_eob(0); bposy = get_bposy(); if (bposy < multicol_start) bskip((bposy - multicol_start) / cfmt.scale); if (bposy < multicol_max) multicol_max = bposy; cfmt.leftmargin = lmarg; cfmt.rightmargin = rmarg; } } else if (strncmp(p, "end", 3) == 0) { if (multicol_start == 0) { error(1, s, "%%%%%s end without start", w); } else { buffer_eob(0); bposy = get_bposy(); if (bposy > multicol_max) bskip((bposy - multicol_max) / cfmt.scale); else a2b("%%\n"); /* force write_buffer */ cfmt.leftmargin = lmarg; cfmt.rightmargin = rmarg; multicol_start = 0; buffer_eob(0); if (!info['X' - 'A'] && !epsf) write_buffer(); } } else { error(1, s, "Unknown keyword '%s' in %%%%%s", p, w); } return s; } break; case 'M': if (strcmp(w, "MIDI") == 0 && strncmp(p, "temperamentequal", 16) == 0) { int n; if (cfmt.nedo) { error(1, s, "%%%%MIDI temperamentequal redefined"); return s; } p += 16; while (isspace((unsigned char) *p)) p++; n = atoi(p); if (n < 7 || n > 53) { error(1, s, "Bad value in %%%%MIDI temperamentequal"); return s; } cfmt.nedo = n; } break; case 'n': if (strcmp(w, "newpage") == 0) { if (epsf || !in_fname) return s; if (s->state == ABC_S_TUNE) generate(); buffer_eob(0); write_buffer(); // use_buffer = 0; if (isdigit((unsigned char) *p)) pagenum = atoi(p); close_page(); if (s->state == ABC_S_TUNE) bskip(cfmt.topspace); return s; } break; case 'p': if (strcmp(w, "pos") == 0) { // %%pos <type> <position> p = get_str(w, p, sizeof w); set_voice_param(curvoice, s->state, w, p); return s; } if (strcmp(w, "ps") == 0 || strcmp(w, "postscript") == 0) { ps_def(s, p, 'b'); return s; } break; case 'o': if (strcmp(w, "ornament") == 0) { set_voice_param(curvoice, s->state, w, p); return s; } break; case 'r': if (strcmp(w, "repbra") == 0) { if (s->state != ABC_S_TUNE) return s; curvoice->norepbra = strchr("0FfNn", *p) || *p == '\0'; return s; } if (strcmp(w, "repeat") == 0) { int n, k; if (s->state != ABC_S_TUNE) return s; if (!curvoice->last_sym) { error(1, s, "%%%s cannot start a tune", w); return s; } if (*p == '\0') { n = 1; k = 1; } else { n = atoi(p); if (n < 1 || (curvoice->last_sym->type == BAR && n > 2)) { error(1, s, "Incorrect 1st value in %%%%%s", w); return s; } while (*p != '\0' && !isspace((unsigned char) *p)) p++; while (isspace((unsigned char) *p)) p++; if (*p == '\0') { k = 1; } else { k = atoi(p); if (k < 1) { // || (curvoice->last_sym->type == BAR // && n == 2 // && k > 1)) { error(1, s, "Incorrect 2nd value in %%%%%s", w); return s; } } } s->aux = REPEAT; if (curvoice->last_sym->type == BAR) s->doty = n; else s->doty = -n; sym_link(s, FMTCHG); s->nohdi1 = k; s->text = NULL; return s; } break; case 's': if (strcmp(w, "setbarnb") == 0) { if (s->state == ABC_S_TUNE) { struct SYMBOL *s2; int n; n = atoi(p); for (s2 = s->abc_next; s2; s2 = s2->abc_next) { if (s2->abc_type == ABC_T_BAR) { s2->aux = n; break; } } return s; } strcpy(w, "measurefirst"); break; } if (strcmp(w, "sep") == 0) { float h2, len, lwidth; if (s->state == ABC_S_TUNE) { gen_ly(0); } else if (s->state == ABC_S_GLOBAL) { if (epsf || !in_fname) return s; } lwidth = (cfmt.landscape ? cfmt.pageheight : cfmt.pagewidth) - cfmt.leftmargin - cfmt.rightmargin; h1 = h2 = len = 0; if (*p != '\0') { h1 = scan_u(p, 0); while (*p != '\0' && !isspace((unsigned char) *p)) p++; while (isspace((unsigned char) *p)) p++; } if (*p != '\0') { h2 = scan_u(p, 0); while (*p != '\0' && !isspace((unsigned char) *p)) p++; while (isspace((unsigned char) *p)) p++; } if (*p != '\0') len = scan_u(p, 0); if (h1 < 1) h1 = 0.5 CM; if (h2 < 1) h2 = h1; if (len < 1) len = 3.0 CM; bskip(h1); a2b("%.1f %.1f sep0\n", len / cfmt.scale, (lwidth - len) * 0.5 / cfmt.scale); bskip(h2); buffer_eob(0); return s; } if (strcmp(w, "staff") == 0) { int staff; if (s->state != ABC_S_TUNE) return s; if (*p == '+') staff = curvoice->cstaff + atoi(p + 1); else if (*p == '-') staff = curvoice->cstaff - atoi(p + 1); else staff = atoi(p) - 1; if ((unsigned) staff > (unsigned) nstaff) { error(1, s, "Bad staff in %%%%%s", w); return s; } curvoice->floating = 0; curvoice->cstaff = staff; return s; } if (strcmp(w, "staffbreak") == 0) { if (s->state != ABC_S_TUNE) return s; if (isdigit(*p)) { s->xmx = scan_u(p, 0); if (s->xmx < 0) { error(1, s, "Bad value in %%%%%s", w); return s; } if (p[strlen(p) - 1] == 'f') s->doty = 1; } else { s->xmx = 0.5 CM; if (*p == 'f') s->doty = 1; } sym_link(s, STBRK); return s; } if (strcmp(w, "stafflines") == 0) { if (isdigit((unsigned char) *p)) { switch (atoi(p)) { case 0: p = "..."; break; case 1: p = "..|"; break; case 2: p = ".||"; break; case 3: p = ".|||"; break; case 4: p = "||||"; break; case 5: p = "|||||"; break; case 6: p = "||||||"; break; case 7: p = "|||||||"; break; case 8: p = "||||||||"; break; default: error(1, s, "Bad number of lines"); break; } } else { int l; l = strlen(p); q = p; p = getarena(l + 1); strcpy(p, q); } if (s->state != ABC_S_TUNE) { for (voice = 0; voice < MAXVOICE; voice++) voice_tb[voice].stafflines = p; } else { curvoice->stafflines = p; } return s; } if (strcmp(w, "staffscale") == 0) { char *q; float scale; scale = strtod(p, &q); if (scale < 0.3 || scale > 2 || (*q != '\0' && *q != ' ')) { error(1, s, "Bad value in %%%%%s", w); return s; } if (s->state != ABC_S_TUNE) { for (voice = 0; voice < MAXVOICE; voice++) voice_tb[voice].staffscale = scale; } else { curvoice->staffscale = scale; } return s; } if (strcmp(w, "staves") == 0 || strcmp(w, "score") == 0) { if (s->state == ABC_S_GLOBAL) return s; get_staves(s); return s; } if (strcmp(w, "stemdir") == 0) { set_voice_param(curvoice, s->state, w, p); return s; } if (strcmp(w, "sysstaffsep") == 0) { if (s->state != ABC_S_TUNE) break; parsys->voice[curvoice - voice_tb].sep = scan_u(p, 0); return s; } break; case 't': if (strcmp(w, "text") == 0) { int job; center: if (s->state == ABC_S_TUNE) { gen_ly(1); } else if (s->state == ABC_S_GLOBAL) { if (epsf || !in_fname) return s; } if (w[0] == 'c') { job = T_CENTER; } else { job = cfmt.textoption; switch(job) { case T_SKIP: return s; case T_LEFT: case T_RIGHT: case T_CENTER: break; default: job = T_LEFT; break; } } write_text(w, p, job); return s; } if (strcmp(w, "tablature") == 0) { struct tblt_s *tblt; int i, j; tblt = tblt_parse(p); if (tblt == 0) return s; switch (s->state) { case ABC_S_TUNE: case ABC_S_HEAD: for (i = 0; i < ncmdtblt; i++) { if (cmdtblts[i].active) continue; j = cmdtblts[i].index; if (j < 0 || tblts[j] == tblt) return s; } /* !! 2 tblts per voice !! */ if (curvoice->tblts[0] == tblt || curvoice->tblts[1] == tblt) break; if (curvoice->tblts[1]) { error(1, s, "Too many tablatures for voice %s", curvoice->id); break; } if (!curvoice->tblts[0]) curvoice->tblts[0] = tblt; else curvoice->tblts[1] = tblt; break; } return s; } if (strcmp(w, "transpose") == 0) { struct VOICE_S *p_voice; struct SYMBOL *s2; int i, val; val = get_transpose(p); switch (s->state) { case ABC_S_GLOBAL: cfmt.transpose = val; return s; case ABC_S_HEAD: { cfmt.transpose += val; for (i = MAXVOICE, p_voice = voice_tb; --i >= 0; p_voice++) { p_voice->transpose = cfmt.transpose; memcpy(&p_voice->key, &p_voice->okey, sizeof p_voice->key); key_transpose(&p_voice->key); memcpy(&p_voice->ckey, &p_voice->key, sizeof p_voice->ckey); if (p_voice->key.empty) p_voice->key.sf = 0; } return s; } } curvoice->transpose = cfmt.transpose + val; s2 = curvoice->sym; if (!s2) { memcpy(&curvoice->key, &curvoice->okey, sizeof curvoice->key); key_transpose(&curvoice->key); memcpy(&curvoice->ckey, &curvoice->key, sizeof curvoice->ckey); if (curvoice->key.empty) curvoice->key.sf = 0; return s; } for (;;) { if (s2->type == KEYSIG) break; if (s2->time == curvoice->time) { s2 = s2->prev; if (s2) continue; } s2 = s; s2->abc_type = ABC_T_INFO; s2->text = (char *) getarena(2); s2->text[0] = 'K'; s2->text[1] = '\0'; sym_link(s2, KEYSIG); // if (!curvoice->ckey.empty) // s2->aux = curvoice->ckey.sf; s2->aux = curvoice->key.sf; break; } memcpy(&s2->u.key, &curvoice->okey, sizeof s2->u.key); key_transpose(&s2->u.key); memcpy(&curvoice->ckey, &s2->u.key, sizeof curvoice->ckey); if (curvoice->key.empty) s2->u.key.sf = 0; return s; } if (strcmp(w, "tune") == 0) { struct SYMBOL *s2, *s3; struct tune_opt_s *opt, *opt2; if (s->state != ABC_S_GLOBAL) { error(1, s, "%%%%%s ignored", w); return s; } /* if void %%tune, remove all tune options */ if (*p == '\0') { opt = tune_opts; while (opt) { free_voice_opt(opt->voice_opts); opt2 = opt->next; free(opt); opt = opt2; } tune_opts = NULL; return s; } if (strcmp(p, "end") == 0) return s; /* end of previous %%tune */ /* search the end of the tune options */ s2 = s; for (;;) { s3 = s2->abc_next; if (!s3) break; if (s3->abc_type != ABC_T_NULL && (s3->abc_type != ABC_T_PSCOM || strncmp(&s3->text[2], "tune ", 5) == 0)) break; s2 = s3; } /* search if already a same %%tune */ opt2 = NULL; for (opt = tune_opts; opt; opt = opt->next) { if (strcmp(opt->s->text, s->text) == 0) break; opt2 = opt; } if (opt) { free_voice_opt(opt->voice_opts); if (s2 == s) { /* no option */ if (!opt2) tune_opts = opt->next; else opt2->next = opt->next; free(opt); return s; } opt->voice_opts = NULL; } else { if (s2 == s) /* no option */ return s; opt = malloc(sizeof *opt); memset(opt, 0, sizeof *opt); opt->next = tune_opts; tune_opts = opt; } /* link the options */ opt->s = s3 = s; cur_tune_opts = opt; s = s->abc_next; for (;;) { if (s->abc_type != ABC_T_PSCOM) continue; if (strncmp(&s->text[2], "voice ", 6) == 0) { s = process_pscomment(s); } else { s->state = ABC_S_HEAD; /* !! no reverse link !! */ s3->next = s; s3 = s; } if (s == s2) break; s = s->abc_next; } cur_tune_opts = NULL; return s; } break; case 'u': if (strcmp(w, "user") == 0) { deco[s->u.user.symbol] = parse.deco_tb[s->u.user.value - 128]; return s; } break; case 'v': if (strcmp(w, "vocal") == 0) { set_voice_param(curvoice, s->state, w, p); return s; } if (strcmp(w, "voice") == 0) { struct SYMBOL *s2, *s3; struct voice_opt_s *opt, *opt2; if (s->state != ABC_S_GLOBAL) { error(1, s, "%%%%voice ignored"); return s; } /* if void %%voice, free all voice options */ if (*p == '\0') { if (cur_tune_opts) { free_voice_opt(cur_tune_opts->voice_opts); cur_tune_opts->voice_opts = NULL; } else { free_voice_opt(voice_opts); voice_opts = NULL; } return s; } if (strcmp(p, "end") == 0) return s; /* end of previous %%voice */ if (cur_tune_opts) opt = cur_tune_opts->voice_opts; else opt = voice_opts; /* search the end of the voice options */ s2 = s; for (;;) { s3 = s2->abc_next; if (!s3) break; if (s3->abc_type != ABC_T_NULL && (s3->abc_type != ABC_T_PSCOM || strncmp(&s3->text[2], "score ", 6) == 0 || strncmp(&s3->text[2], "staves ", 7) == 0 || strncmp(&s3->text[2], "tune ", 5) == 0 || strncmp(&s3->text[2], "voice ", 6) == 0)) break; s2 = s3; } /* if already the same %%voice * remove the options */ opt2 = NULL; for ( ; opt; opt = opt->next) { if (strcmp(opt->s->text, s->text) == 0) { if (!opt2) { if (cur_tune_opts) cur_tune_opts->voice_opts = NULL; else voice_opts = NULL; } else { opt2->next = opt->next; } free(opt); break; } opt2 = opt; } if (s2 == s) /* no option */ return s; opt = malloc(sizeof *opt + strlen(p)); memset(opt, 0, sizeof *opt); if (cur_tune_opts) { opt->next = cur_tune_opts->voice_opts; cur_tune_opts->voice_opts = opt; } else { opt->next = voice_opts; voice_opts = opt; } /* link the options */ opt->s = s3 = s; for ( ; s != s2; s = s->abc_next) { if (s->abc_next->abc_type != ABC_T_PSCOM) continue; s->abc_next->state = ABC_S_TUNE; s3->next = s->abc_next; s3 = s3->next; } return s; } if (strcmp(w, "voicecolor") == 0) { int color; if (!curvoice) return s; color = get_color(p); if (color < 0) error(1, s, "Bad color in %%%%voicecolor"); else curvoice->color = color; return s; } if (strcmp(w, "voicecombine") == 0) { int combine; if (sscanf(p, "%d", &combine) != 1) { error(1, s, "Bad value in %%%%voicecombine"); return s; } switch (s->state) { case ABC_S_GLOBAL: cfmt.combinevoices = combine; break; case ABC_S_HEAD: for (voice = 0; voice < MAXVOICE; voice++) voice_tb[voice].combine = combine; break; default: curvoice->combine = combine; break; } return s; } if (strcmp(w, "voicemap") == 0) { if (s->state != ABC_S_TUNE) { for (voice = 0; voice < MAXVOICE; voice++) voice_tb[voice].map_name = p; } else { curvoice->map_name = p; } return s; } if (strcmp(w, "voicescale") == 0) { char *q; float scale; scale = strtod(p, &q); if (scale < 0.6 || scale > 1.5 || (*q != '\0' && *q != ' ')) { error(1, s, "Bad %%%%voicescale value"); return s; } if (s->state != ABC_S_TUNE) { for (voice = 0; voice < MAXVOICE; voice++) voice_tb[voice].scale = scale; } else { curvoice->scale = scale; } return s; } if (strcmp(w, "volume") == 0) { set_voice_param(curvoice, s->state, w, p); return s; } if (strcmp(w, "vskip") == 0) { if (s->state == ABC_S_TUNE) { gen_ly(0); } else if (s->state == ABC_S_GLOBAL) { if (epsf || !in_fname) return s; } bskip(scan_u(p, 0)); buffer_eob(0); return s; } break; } if (s->state == ABC_S_TUNE) { if (strcmp(w, "leftmargin") == 0 || strcmp(w, "rightmargin") == 0 || strcmp(w, "scale") == 0) { generate(); block_put(); } } interpret_fmt_line(w, p, lock); if (cfmt.alignbars && strcmp(w, "alignbars") == 0) { int i; generate(); if ((unsigned) cfmt.alignbars > MAXSTAFF) { error(1, s, "Too big value in %%%%alignbars"); cfmt.alignbars = MAXSTAFF; } if (staves_found >= 0) /* (compatibility) */ cfmt.alignbars = nstaff + 1; first_voice = curvoice = voice_tb; for (i = 0; i < cfmt.alignbars; i++) { voice_tb[i].staff = voice_tb[i].cstaff = i; voice_tb[i].next = &voice_tb[i + 1]; parsys->staff[i].flags |= STOP_BAR; parsys->voice[i].staff = i; parsys->voice[i].range = i; } i--; voice_tb[i].next = NULL; parsys->nstaff = nstaff = i; } return s; } /* -- set the duration of notes/rests in a tuplet -- */ /*fixme: KO if voice change*/ /*fixme: KO if in a grace sequence*/ static void set_tuplet(struct SYMBOL *t) { struct SYMBOL *s, *s1; int l, r, lplet, grace; r = t->u.tuplet.r_plet; grace = t->flags & ABC_F_GRACE; l = 0; for (s = t->abc_next; s; s = s->abc_next) { if (s->abc_type == ABC_T_TUPLET) { struct SYMBOL *s2; int l2, r2; r2 = s->u.tuplet.r_plet; l2 = 0; for (s2 = s->abc_next; s2; s2 = s2->abc_next) { switch (s2->abc_type) { case ABC_T_NOTE: case ABC_T_REST: break; case ABC_T_EOLN: if (s2->u.eoln.type != 1) { error(1, t, "End of line found inside a nested tuplet"); return; } continue; default: continue; } if (s2->u.note.notes[0].len == 0) continue; if (grace ^ (s2->flags & ABC_F_GRACE)) continue; s1 = s2; l2 += s1->dur; if (--r2 <= 0) break; } l2 = l2 * s->u.tuplet.q_plet / s->u.tuplet.p_plet; s->aux = l2; l += l2; r -= s->u.tuplet.r_plet; if (r == 0) break; if (r < 0) { error(1, t, "Bad nested tuplet"); break; } s = s2; continue; } switch (s->abc_type) { case ABC_T_NOTE: case ABC_T_REST: break; case ABC_T_EOLN: if (s->u.eoln.type != 1) { error(1, t, "End of line found inside a tuplet"); return; } continue; default: continue; } if (s->u.note.notes[0].len == 0) /* space ('y') */ continue; if (grace ^ (s->flags & ABC_F_GRACE)) continue; s1 = s; l += s->dur; if (--r <= 0) break; } if (!s) { error(1, t, "End of tune found inside a tuplet"); return; } if (t->aux != 0) /* if nested tuplet */ lplet = t->aux; else lplet = (l * t->u.tuplet.q_plet) / t->u.tuplet.p_plet; r = t->u.tuplet.r_plet; for (s = t->abc_next; s; s = s->abc_next) { int olddur; if (s->abc_type == ABC_T_TUPLET) { int r2; r2 = s->u.tuplet.r_plet; s1 = s; olddur = s->aux; s1->aux = (olddur * lplet) / l; l -= olddur; lplet -= s1->aux; r -= r2; for (;;) { s = s->abc_next; if (s->abc_type != ABC_T_NOTE && s->abc_type != ABC_T_REST) continue; if (s->u.note.notes[0].len == 0) continue; if (grace ^ (s->flags & ABC_F_GRACE)) continue; if (--r2 <= 0) break; } if (r <= 0) goto done; continue; } if (s->abc_type != ABC_T_NOTE && s->abc_type != ABC_T_REST) continue; if (s->u.note.notes[0].len == 0) continue; s->sflags |= S_IN_TUPLET; if (grace ^ (s->flags & ABC_F_GRACE)) continue; s1 = s; olddur = s->dur; s1->dur = (olddur * lplet) / l; if (--r <= 0) break; l -= olddur; lplet -= s1->dur; } done: if (grace) { error(1, t, "Tuplets in grace note sequence not yet treated"); } else { sym_link(t, TUPLET); t->aux = cfmt.tuplets; } }
/* * Parsing functions. * * This file is part of abcm2ps. * * Copyright (C) 1998-2020 Jean-François Moine (http://moinejf.free.fr) * Adapted from abc2ps, Copyright (C) 1996-1998 Michael Methfessel * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <ctype.h> #include <regex.h> #include "abcm2ps.h" /* options = external formatting */ struct symsel_s { /* symbol selection */ short bar; short time; char seq; }; struct brk_s { /* music line break */ struct brk_s *next; struct symsel_s symsel; }; struct voice_opt_s { /* voice options */ struct voice_opt_s *next; struct SYMBOL *s; /* list of options (%%xxx) */ }; struct tune_opt_s { /* tune options */ struct tune_opt_s *next; struct voice_opt_s *voice_opts; struct SYMBOL *s; /* list of options (%%xxx) */ }; int nstaff; /* (0..MAXSTAFF-1) */ struct SYMBOL *tsfirst; /* first symbol in the time sorted list */ struct VOICE_S voice_tb[MAXVOICE]; /* voice table */ struct VOICE_S *first_voice; /* first voice */ struct SYSTEM *cursys; /* current system */ static struct SYSTEM *parsys; /* current system while parsing */ struct FORMAT dfmt; /* current global format */ int nbar; /* current measure number */ struct map *maps; /* note mappings */ static struct voice_opt_s *voice_opts, *tune_voice_opts; static struct tune_opt_s *tune_opts, *cur_tune_opts; static struct brk_s *brks; static struct symsel_s clip_start, clip_end; static INFO info_glob; /* global info definitions */ static char *deco_glob[256]; /* global decoration table */ static struct map *maps_glob; /* save note maps */ static int over_time; /* voice overlay start time */ static int over_mxtime; /* voice overlay max time */ static short over_bar; /* voice overlay in a measure */ static short over_voice; /* main voice in voice overlay */ static int staves_found; /* time of the last %%staves */ static int abc2win; static int capo; // capo indication float multicol_start; /* (for multicol) */ static float multicol_max; static float lmarg, rmarg; static void get_clef(struct SYMBOL *s); static struct SYMBOL *get_info(struct SYMBOL *s); static void get_key(struct SYMBOL *s); static void get_meter(struct SYMBOL *s); static void get_voice(struct SYMBOL *s); static void get_note(struct SYMBOL *s); static struct SYMBOL *process_pscomment(struct SYMBOL *s); static void ps_def(struct SYMBOL *s, char *p, char use); static void set_tblt(struct VOICE_S *p_voice); static void set_tuplet(struct SYMBOL *s); /* -- weight of the symbols -- */ static char w_tb[NSYMTYPES] = { /* !! index = symbol type !! */ 0, 9, /* 1- note / rest */ 3, /* 2- space */ 2, /* 3- bar */ 1, /* 4- clef */ 6, /* 5- timesig */ 5, /* 6- keysig */ 0, /* 7- tempo */ 0, /* 8- staves */ 9, /* 9- mrest */ 0, /* 10- part */ 3, /* 11- grace */ 0, /* 12- fmtchg */ 8, /* 13- tuplet */ 7, /* 14- stbrk */ 7 /* 15- custos */ }; /* key signature transposition tables */ static signed char cde2fcg[7] = {0, 2, 4, -1, 1, 3, 5}; static char cgd2cde[7] = {0, 4, 1, 5, 2, 6, 3}; /* -- link a ABC symbol into the current voice -- */ static void sym_link(struct SYMBOL *s, int type) { struct VOICE_S *p_voice = curvoice; if (!p_voice->ignore) { s->prev = p_voice->last_sym; if (s->prev) p_voice->last_sym->next = s; else p_voice->sym = s; p_voice->last_sym = s; //fixme:test bug // } else { // if (p_voice->sym) // p_voice->last_sym = p_voice->sym = s; } s->type = type; s->voice = p_voice - voice_tb; s->staff = p_voice->cstaff; s->time = p_voice->time; s->posit = p_voice->posit; } /* -- add a new symbol in a voice -- */ struct SYMBOL *sym_add(struct VOICE_S *p_voice, int type) { struct SYMBOL *s; struct VOICE_S *p_voice2; s = (struct SYMBOL *) getarena(sizeof *s); memset(s, 0, sizeof *s); p_voice2 = curvoice; curvoice = p_voice; sym_link(s, type); curvoice = p_voice2; if (p_voice->second) s->sflags |= S_SECOND; if (p_voice->floating) s->sflags |= S_FLOATING; if (s->prev) { s->fn = s->prev->fn; s->linenum = s->prev->linenum; s->colnum = s->prev->colnum; } return s; } /* -- expand a multi-rest into single rests and measure bars -- */ static void mrest_expand(struct SYMBOL *s) { struct VOICE_S *p_voice; struct SYMBOL *s2, *next; struct decos dc; int nb, dt; nb = s->u.bar.len; dt = s->dur / nb; /* change the multi-rest (type bar) to a single rest */ memcpy(&dc, &s->u.bar.dc, sizeof dc); memset(&s->u.note, 0, sizeof s->u.note); s->type = NOTEREST; s->abc_type = ABC_T_REST; // s->nhd = 0; s->dur = s->u.note.notes[0].len = dt; s->head = H_FULL; s->nflags = -2; /* add the bar(s) and rest(s) */ next = s->next; p_voice = &voice_tb[s->voice]; p_voice->last_sym = s; p_voice->time = s->time + dt; p_voice->cstaff = s->staff; s2 = s; while (--nb > 0) { s2 = sym_add(p_voice, BAR); s2->abc_type = ABC_T_BAR; s2->u.bar.type = B_SINGLE; s2 = sym_add(p_voice, NOTEREST); s2->abc_type = ABC_T_REST; s2->flags = s->flags; s2->dur = s2->u.note.notes[0].len = dt; s2->head = H_FULL; s2->nflags = -2; p_voice->time += dt; } s2->next = next; if (next) next->prev = s2; /* copy the mrest decorations to the last rest */ memcpy(&s2->u.note.dc, &dc, sizeof s2->u.note.dc); } /* -- sort all symbols by time and vertical sequence -- */ static void sort_all(void) { struct SYSTEM *sy; struct SYMBOL *s, *prev, *s2; struct VOICE_S *p_voice; int fl, voice, time, w, wmin, multi, mrest_time; int nb, r, set_sy, new_sy; // nv struct SYMBOL *vtb[MAXVOICE]; signed char vn[MAXVOICE]; /* voice indexed by range */ /* memset(vtb, 0, sizeof vtb); */ mrest_time = -1; for (p_voice = first_voice; p_voice; p_voice = p_voice->next) vtb[p_voice - voice_tb] = p_voice->sym; /* initialize the voice order */ sy = cursys; set_sy = 1; new_sy = 0; prev = NULL; fl = 1; /* (have gcc happy) */ multi = -1; /* (have gcc happy) */ for (;;) { if (set_sy) { fl = 1; // start a new sequence // if (!new_sy) { if (1) { set_sy = 0; multi = -1; memset(vn, -1, sizeof vn); for (p_voice = first_voice; p_voice; p_voice = p_voice->next) { voice = p_voice - voice_tb; r = sy->voice[voice].range; if (r < 0) continue; vn[r] = voice; multi++; } } } /* search the min time and symbol weight */ wmin = time = (unsigned) ~0 >> 1; /* max int */ // nv = nb = 0; for (r = 0; r < MAXVOICE; r++) { voice = vn[r]; if (voice < 0) break; s = vtb[voice]; if (!s || s->time > time) continue; w = w_tb[s->type]; if (s->time < time) { time = s->time; wmin = w; // nb = 0; } else if (w < wmin) { wmin = w; // nb = 0; } #if 0 if (!(s->sflags & S_SECOND)) { nv++; if (s->type == BAR) nb++; } #endif if (s->type == MREST) { if (s->u.bar.len == 1) mrest_expand(s); else if (multi > 0) mrest_time = time; } } if (wmin > 127) break; /* done */ #if 0 /* align the measure bars */ if (nb != 0 && nb != nv) { /* if other symbol than bars */ wmin = (unsigned) ~0 >> 1; for (r = 0; r < MAXVOICE; r++) { voice = vn[r]; if (voice < 0) break; s = vtb[voice]; if (!s || s->time > time || s->type == BAR) continue; w = w_tb[s->type]; if (w < wmin) wmin = w; } if (wmin > 127) wmin = w_tb[BAR]; } #endif /* if some multi-rest and many voices, expand */ if (time == mrest_time) { nb = 0; for (r = 0; r < MAXVOICE; r++) { voice = vn[r]; if (voice < 0) break; s = vtb[voice]; if (!s || s->time != time) continue; w = w_tb[s->type]; if (w != wmin) continue; if (s->type != MREST) { mrest_time = -1; /* some note or rest */ break; } if (nb == 0) { nb = s->u.bar.len; } else if (nb != s->u.bar.len) { mrest_time = -1; /* different duration */ break; } } if (mrest_time < 0) { for (r = 0; r < MAXVOICE; r++) { voice = vn[r]; if (voice < 0) break; s = vtb[voice]; if (s && s->type == MREST) mrest_expand(s); } } } /* link the vertical sequence */ for (r = 0; r < MAXVOICE; r++) { voice = vn[r]; if (voice < 0) break; s = vtb[voice]; if (!s || s->time != time || w_tb[s->type] != wmin) continue; if (s->type == STAVES) { // change STAVES to a flag sy = sy->next; set_sy = new_sy = 1; if (s->prev) s->prev->next = s->next; else voice_tb[voice].sym = s->next; if (s->next) s->next->prev = s->prev; } else { if (fl) { fl = 0; s->sflags |= S_SEQST; } if (new_sy) { new_sy = 0; s->sflags |= S_NEW_SY; } s->ts_prev = prev; if (prev) { prev->ts_next = s; //fixme: bad error when the 1st voice is second // if (s->type == BAR // && (s->sflags & S_SECOND) // && prev->type != BAR // && !(s->flags & ABC_F_INVIS)) // error(1, s, "Bad measure bar"); } else { tsfirst = s; } prev = s; } vtb[voice] = s->next; } fl = wmin; /* start a new sequence if some space */ } if (!prev) return; /* if no bar or format_change at end of tune, add a dummy symbol */ if ((prev->type != BAR && prev->type != FMTCHG) || new_sy) { p_voice = &voice_tb[prev->voice]; p_voice->last_sym = prev; s = sym_add(p_voice, FMTCHG); s->aux = -1; s->time = prev->time + prev->dur; s->sflags = S_SEQST; if (new_sy) s->sflags |= S_NEW_SY; prev->ts_next = s; s->ts_prev = prev; for (;;) { prev->sflags &= ~S_EOLN; if (prev->sflags & S_SEQST) break; prev = prev->ts_prev; } } /* if Q: from tune header, put it at start of the music */ s2 = info['Q' - 'A']; if (!s2) return; info['Q' - 'A'] = NULL; s = tsfirst->extra; while (s) { if (s->type == TEMPO) return; /* already a tempo */ s = s->next; } s = tsfirst; s2->type = TEMPO; s2->voice = s->voice; s2->staff = s->staff; s2->time = s->time; if (s->extra) { s2->next = s->extra; s2->next->prev = s2; } s->extra = s2; } /* -- move the symbols with no width to the next symbol -- */ static void voice_compress(void) { struct VOICE_S *p_voice; struct SYMBOL *s, *s2, *s3, *ns; for (p_voice = first_voice; p_voice; p_voice = p_voice->next) { //8.7.0 - for fmt at end of music line // if (p_voice->ignore) // continue; p_voice->ignore = 0; for (s = p_voice->sym; s; s = s->next) { if (s->time >= staves_found) break; } ns = NULL; for ( ; s; s = s->next) { switch (s->type) { #if 0 // test case KEYSIG: /* remove the empty key signatures */ if (s->u.key.empty) { if (s->prev) s->prev->next = s->next; else p_voice->sym = s->next; if (s->next) s->next->prev = s->prev; continue; } break; #endif case FMTCHG: s2 = s->extra; if (s2) { /* dummy format */ if (!ns) ns = s2; if (s->prev) { s->prev->next = s2; s2->prev = s->prev; } if (!s->next) { ns = NULL; break; } while (s2->next) s2 = s2->next; s->next->prev = s2; s2->next = s->next; } /* fall thru */ case TEMPO: case PART: case TUPLET: if (!ns) ns = s; continue; case MREST: /* don't shift P: and Q: */ if (!ns) continue; s2 = (struct SYMBOL *) getarena(sizeof *s); memset(s2, 0, sizeof *s2); s2->type = SPACE; s2->u.note.notes[1].len = -1; s2->flags = ABC_F_INVIS; s2->voice = s->voice; s2->staff = s->staff; s2->time = s->time; s2->sflags = s->sflags; s2->next = s; s2->prev = s->prev; s2->prev->next = s2; s->prev = s2; s = s2; break; } if (s->flags & ABC_F_GRACE) { if (!ns) ns = s; while (!(s->flags & ABC_F_GR_END)) s = s->next; s2 = (struct SYMBOL *) getarena(sizeof *s); memcpy(s2, s, sizeof *s2); s2->abc_type = 0; s2->type = GRACE; s2->dur = 0; s2->next = s->next; if (s2->next) { s2->next->prev = s2; if (cfmt.graceword) { for (s3 = s2->next; s3; s3 = s3->next) { switch (s3->type) { case SPACE: continue; case NOTEREST: s2->ly = s3->ly; s3->ly = NULL; default: break; } break; } } } else { p_voice->last_sym = s2; } s2->prev = s; s->next = s2; s = s2; // with w_tb[BAR] = 2, // the grace notes go after the bar // if before a bar, change the grace time if (s->next && s->next->type == BAR) s->time--; } if (!ns) continue; s->extra = ns; s->prev->next = NULL; s->prev = ns->prev; if (s->prev) s->prev->next = s; else p_voice->sym = s; ns->prev = NULL; ns = NULL; } /* when symbols with no space at end of tune, * add a dummy format */ if (ns) { s = sym_add(p_voice, FMTCHG); s->aux = -1; /* nothing */ s->extra = ns; s->prev->next = NULL; /* unlink */ s->prev = ns->prev; if (s->prev) s->prev->next = s; else p_voice->sym = s; ns->prev = NULL; } } } /* -- duplicate the voices as required -- */ static void voice_dup(void) { struct VOICE_S *p_voice, *p_voice2; struct SYMBOL *s, *s2, *g, *g2; int voice; for (p_voice = first_voice; p_voice; p_voice = p_voice->next) { if ((voice = p_voice->clone) < 0) continue; p_voice->clone = -1; p_voice2 = &voice_tb[voice]; for (s = p_voice->sym; s; s = s->next) { //fixme: there may be other symbols before the %%staves at this same time if (s->time >= staves_found) break; } for ( ; s; s = s->next) { if (s->type == STAVES) continue; s2 = (struct SYMBOL *) getarena(sizeof *s2); memcpy(s2, s, sizeof *s2); s2->prev = p_voice2->last_sym; s2->next = NULL; if (p_voice2->sym) p_voice2->last_sym->next = s2; else p_voice2->sym = s2; p_voice2->last_sym = s2; s2->voice = voice; s2->staff = p_voice2->staff; if (p_voice2->second) s2->sflags |= S_SECOND; else s2->sflags &= ~S_SECOND; if (p_voice2->floating) s2->sflags |= S_FLOATING; else s2->sflags &= ~S_FLOATING; s2->ly = NULL; g = s2->extra; if (!g) continue; g2 = (struct SYMBOL *) getarena(sizeof *g2); memcpy(g2, g, sizeof *g2); s2->extra = g2; s2 = g2; s2->voice = voice; s2->staff = p_voice2->staff; for (g = g->next; g; g = g->next) { g2 = (struct SYMBOL *) getarena(sizeof *g2); memcpy(g2, g, sizeof *g2); s2->next = g2; g2->prev = s2; s2 = g2; s2->voice = voice; s2->staff = p_voice2->staff; } } } } /* -- create a new staff system -- */ static void system_new(void) { struct SYSTEM *new_sy; int staff, voice; new_sy = (struct SYSTEM *) getarena(sizeof *new_sy); if (!parsys) { memset(new_sy, 0, sizeof *new_sy); for (voice = 0; voice < MAXVOICE; voice++) { new_sy->voice[voice].range = -1; } for (staff = 0; staff < MAXSTAFF; staff++) { new_sy->staff[staff].stafflines = "|||||"; new_sy->staff[staff].staffscale = 1; } cursys = new_sy; } else { for (voice = 0; voice < MAXVOICE; voice++) { // update the previous system // if (parsys->voice[voice].range < 0 // || parsys->voice[voice].second) // continue; staff = parsys->voice[voice].staff; if (voice_tb[voice].stafflines) parsys->staff[staff].stafflines = voice_tb[voice].stafflines; if (voice_tb[voice].staffscale != 0) parsys->staff[staff].staffscale = voice_tb[voice].staffscale; } memcpy(new_sy, parsys, sizeof *new_sy); for (voice = 0; voice < MAXVOICE; voice++) { new_sy->voice[voice].range = -1; new_sy->voice[voice].second = 0; } for (staff = 0; staff < MAXSTAFF; staff++) new_sy->staff[staff].flags = 0; parsys->next = new_sy; } parsys = new_sy; } /* -- initialize the voices and staves -- */ /* this routine is called when starting the generation */ static void system_init(void) { voice_compress(); voice_dup(); sort_all(); /* define the time / vertical sequences */ // if (!tsfirst) // return; // parsys->nstaff = nstaff; /* save the number of staves */ } /* go to a global (measure + time) */ static struct SYMBOL *go_global_time(struct SYMBOL *s, struct symsel_s *symsel) { struct SYMBOL *s2; int bar_time; if (symsel->bar <= 1) { /* special case: there is no measure 0/1 */ // && nbar == -1) { /* see set_bar_num */ if (symsel->bar == 0) goto chk_time; for (s2 = s; s2; s2 = s2->ts_next) { if (s2->type == BAR && s2->time != 0) break; } if (s2->time < voice_tb[cursys->top_voice].meter.wmeasure) s = s2; goto chk_time; } for ( ; s; s = s->ts_next) { if (s->type == BAR && s->aux >= symsel->bar) break; } if (!s) return NULL; if (symsel->seq != 0) { int seq; seq = symsel->seq; for (s = s->ts_next; s; s = s->ts_next) { if (s->type == BAR && s->aux == symsel->bar) { if (--seq == 0) break; } } if (!s) return NULL; } chk_time: if (symsel->time == 0) return s; bar_time = s->time + symsel->time; while (s->time < bar_time) { s = s->ts_next; if (!s) return s; } do { s = s->ts_prev; /* go back to the previous sequence */ } while (!(s->sflags & S_SEQST)); return s; } /* treat %%clip */ static void do_clip(void) { struct SYMBOL *s, *s2; struct SYSTEM *sy; struct VOICE_S *p_voice; int voice; /* remove the beginning of the tune */ s = tsfirst; if (clip_start.bar > 0 || clip_start.time > 0) { s = go_global_time(s, &clip_start); if (!s) { tsfirst = NULL; return; } /* update the start of voices */ sy = cursys; for (s2 = tsfirst; s2 != s; s2 = s2->ts_next) { if (s->sflags & S_NEW_SY) sy = sy->next; switch (s2->type) { case CLEF: voice_tb[s2->voice].s_clef = s2; break; case KEYSIG: memcpy(&voice_tb[s2->voice].key, &s2->u.key, sizeof voice_tb[0].key); break; case TIMESIG: memcpy(&voice_tb[s2->voice].meter, &s2->u.meter, sizeof voice_tb[0].meter); break; } } cursys = sy; for (p_voice = first_voice; p_voice; p_voice = p_voice->next) { voice = p_voice - voice_tb; for (s2 = s; s2; s2 = s2->ts_next) { if (s2->voice == voice) { s2->prev = NULL; break; } } p_voice->sym = s2; } tsfirst = s; s->ts_prev = NULL; } /* remove the end of the tune */ s = go_global_time(s, &clip_end); if (!s) return; /* keep the current sequence */ do { s = s->ts_next; if (!s) return; } while (!(s->sflags & S_SEQST)); /* cut the voices */ for (p_voice = first_voice; p_voice; p_voice = p_voice->next) { voice = p_voice - voice_tb; for (s2 = s->ts_prev; s2; s2 = s2->ts_prev) { if (s2->voice == voice) { s2->next = NULL; break; } } if (!s2) p_voice->sym = NULL; } s->ts_prev->ts_next = NULL; } /* -- set the bar numbers and treat %%clip / %%break -- */ static void set_bar_num(void) { struct SYMBOL *s, *s2, *s3; int bar_time, wmeasure, tim; int bar_num, bar_rep; wmeasure = voice_tb[cursys->top_voice].meter.wmeasure; bar_rep = nbar; /* don't count a bar at start of line */ for (s = tsfirst; ; s = s->ts_next) { if (!s) return; switch (s->type) { case TIMESIG: case CLEF: case KEYSIG: case FMTCHG: case STBRK: continue; case BAR: if (s->aux) { nbar = s->aux; /* (%%setbarnb) */ break; } if (s->u.bar.repeat_bar && s->text && !cfmt.contbarnb) { if (s->text[0] == '1') { bar_rep = nbar; } else { nbar = bar_rep; /* restart bar numbering */ s->aux = nbar; } } break; } break; } /* set the measure number on the top bars * and move the clefs before the measure bars */ bar_time = s->time + wmeasure; /* for incomplete measure at start of tune */ bar_num = nbar; for ( ; s; s = s->ts_next) { switch (s->type) { case CLEF: if (s->sflags & S_NEW_SY) break; for (s2 = s->ts_prev; s2; s2 = s2->ts_prev) { if (s2->sflags & S_NEW_SY) { s2 = NULL; break; } switch (s2->type) { case BAR: if (s2->sflags & S_SEQST) break; continue; case MREST: case NOTEREST: case SPACE: case STBRK: case TUPLET: s2 = NULL; break; default: continue; } break; } if (!s2) break; /* move the clef */ s->next->prev = s->prev; s->prev->next = s->next; s->ts_next->ts_prev = s->ts_prev; s->ts_prev->ts_next = s->ts_next; s->next = s2; s->prev = s2->prev; if (s->prev) s->prev->next = s; s2->prev = s; s->ts_next = s2; s->ts_prev = s2->ts_prev; if (s->ts_prev) s->ts_prev->ts_next = s; s2->ts_prev = s; // if (s->sflags & S_NEW_SY) { // s->sflags &= ~S_NEW_SY; // s->ts_next->sflags |= S_NEW_SY; // } s3 = s->extra; if (s3) { if (s->ts_next->extra) { while (s3->next) s3 = s3->next; s3->next = s->ts_next->extra; s->ts_next->extra = s->extra; } else { s->ts_next->extra = s3; } s->extra = NULL; } s = s2; break; case TIMESIG: wmeasure = s->u.meter.wmeasure; if (s->time < bar_time) bar_time = s->time + wmeasure; break; case MREST: bar_num += s->u.bar.len - 1; while (s->ts_next && s->ts_next->type != BAR) s = s->ts_next; break; case BAR: // if (s->flags & ABC_F_INVIS) // break; if (s->aux) { bar_num = s->aux; /* (%%setbarnb) */ // if (s->time < bar_time) { // s->aux = 0; break; // } } else { if (s->time < bar_time) /* incomplete measure */ break; bar_num++; } /* check if any repeat bar at this time */ tim = s->time; s2 = s; do { if (s2->type == BAR && s2->u.bar.repeat_bar && s2->text && !cfmt.contbarnb) { if (s2->text[0] == '1') bar_rep = bar_num; else /* restart bar numbering */ bar_num = bar_rep; break; } s2 = s2->next; } while (s2 && s2->time == tim); s->aux = bar_num; bar_time = s->time + wmeasure; break; } } /* do the %%clip stuff */ if (clip_start.bar >= 0) { if (bar_num <= clip_start.bar || nbar > clip_end.bar) { tsfirst = NULL; return; } do_clip(); } /* do the %%break stuff */ { struct brk_s *brk; int nbar_min; // if (nbar == 1) // nbar = -1; /* see go_global_time */ nbar_min = nbar; if (nbar_min == 1) nbar_min = -1; for (brk = brks; brk; brk = brk->next) { if (brk->symsel.bar <= nbar_min || brk->symsel.bar > bar_num) continue; s = go_global_time(tsfirst, &brk->symsel); if (s) s->sflags |= S_EOLN; } } if (cfmt.measurenb < 0) /* if no display of measure bar */ nbar = bar_num; /* update in case of more music to come */ } /* -- generate a piece of tune -- */ static void generate(void) { int old_lvl, voice; struct VOICE_S *p_voice; system_init(); if (!tsfirst) return; /* no symbol */ set_bar_num(); if (!tsfirst) return; /* no more symbol */ old_lvl = lvlarena(2); output_music(); clrarena(2); /* clear generation */ lvlarena(old_lvl); /* reset the parser */ for (p_voice = first_voice; p_voice; p_voice = p_voice->next) { voice = p_voice - voice_tb; p_voice->sym = p_voice->last_sym = NULL; p_voice->time = 0; p_voice->have_ly = 0; p_voice->staff = cursys->voice[voice].staff; p_voice->second = cursys->voice[voice].second; p_voice->s_clef->time = 0; p_voice->lyric_start = NULL; } staves_found = 0; // (for voice compress/dup) } /* -- output the music and lyrics after tune -- */ static void gen_ly(int eob) { generate(); if (info['W' - 'A']) { put_words(info['W' - 'A']); info['W' - 'A'] = NULL; } if (eob) buffer_eob(0); } /* * for transpose purpose, check if a pitch is already in the measure or * if it is tied from a previous note, and return the associated accidental */ static int acc_same_pitch(int pitch) { struct SYMBOL *s = curvoice->last_sym->prev; int i, time; // the overlaid voices may have no measure bars // if (curvoice->id[0] == '&') // s = voice_tb[curvoice->mvoice].last_sym; if (!s) return -1; time = s->time; for (; s; s = s->prev) { switch (s->abc_type) { case ABC_T_BAR: if (s->time < time) return -1; /* no same pitch */ for (;;) { s = s->prev; if (!s) return -1; if (s->abc_type == ABC_T_NOTE) { if (s->time + s->dur == time) break; return -1; } if (s->time < time) return -1; } for (i = 0; i <= s->nhd; i++) { if (s->u.note.notes[i].pit == pitch && s->u.note.notes[i].ti1) return s->u.note.notes[i].acc; } return -1; case ABC_T_NOTE: for (i = 0; i <= s->nhd; i++) { if (s->u.note.notes[i].pit == pitch) return s->u.note.notes[i].acc; } break; } } return -1; } /* transpose a note / chord */ static void note_transpose(struct SYMBOL *s) { int i, j, m, n, d, a, dp, i1, i2, i3, i4, sf_old; static const signed char acc1[6] = {0, 1, 0, -1, 2, -2}; static const char acc2[5] = {A_DF, A_FT, A_NT, A_SH, A_DS}; m = s->nhd; sf_old = curvoice->okey.sf; i2 = curvoice->ckey.sf - sf_old; dp = cgd2cde[(i2 + 4 * 7) % 7]; if (curvoice->transpose < 0 && dp != 0) dp -= 7; dp += curvoice->transpose / 3 / 12 * 7; for (i = 0; i <= m; i++) { /* pitch */ n = s->u.note.notes[i].pit; s->u.note.notes[i].pit += dp; s->pits[i] += dp; /* accidental */ i1 = cde2fcg[(n + 5 + 16 * 7) % 7]; /* fcgdaeb */ a = s->u.note.notes[i].acc & 0x07; if (a == 0) { if (curvoice->okey.nacc == 0) { if (sf_old > 0) { if (i1 < sf_old - 1) a = A_SH; } else if (sf_old < 0) { if (i1 >= sf_old + 6) a = A_FT; } } else { for (j = 0; j < curvoice->okey.nacc; j++) { if ((n + 16 * 7 - curvoice->okey.pits[j]) % 7 == 0) { a = curvoice->okey.accs[j]; break; } } } } i3 = i1 + i2 + acc1[a] * 7; i1 = ((i3 + 1 + 21) / 7 + 2 - 3 + 32 * 5) % 5; a = acc2[(unsigned) i1]; if (s->u.note.notes[i].acc != 0) { ; } else if (curvoice->ckey.empty) { /* key none */ if (a == A_NT || acc_same_pitch(s->u.note.notes[i].pit) >= 0) continue; } else if (curvoice->ckey.nacc > 0) { /* acc list */ i4 = cgd2cde[(unsigned) ((i3 + 16 * 7) % 7)]; for (j = 0; j < curvoice->ckey.nacc; j++) { if ((i4 + 16 * 7 - curvoice->ckey.pits[j]) % 7 == 0) break; } if (j < curvoice->ckey.nacc) continue; } else { continue; } i1 = s->u.note.notes[i].acc & 0x07; i4 = s->u.note.notes[i].acc >> 3; if (i4 != 0 /* microtone */ && i1 != a) { /* different accidental type */ if (s->u.note.microscale) { n = i4; d = s->u.note.microscale; } else { n = parse.micro_tb[i4]; d = ((n & 0xff) + 1) * 2; n = (n >> 8) + 1; } //fixme: double sharps/flats ?*/ //fixme: does not work in all cases (tied notes, previous accidental) switch (a) { case A_NT: if (n >= d / 2) { n -= d / 2; a = i1; } else { a = i1 == A_SH ? A_FT : A_SH; } break; case A_DS: if (n >= d / 2) { s->u.note.notes[i].pit += 1; s->pits[i] += 1; n -= d / 2; } else { n += d / 2; } a = i1; break; case A_DF: if (n >= d / 2) { s->u.note.notes[i].pit -= 1; s->pits[i] -= 1; n -= d / 2; } else { n += d / 2; } a = i1; break; } if (s->u.note.microscale) { i4 = n; } else { d = d / 2 - 1 + ((n - 1) << 8); for (i4 = 1; i4 < MAXMICRO; i4++) { if (parse.micro_tb[i4] == d) break; if (parse.micro_tb[i4] == 0) { parse.micro_tb[i4] = d; break; } } if (i4 == MAXMICRO) { error(1, s, "Too many microtone accidentals"); i4 = 0; } } } s->u.note.notes[i].acc = (i4 << 3) | a; } } /* transpose a guitar chord */ static void gch_tr1(struct SYMBOL *s, int i, int i2) { char *p = &s->text[i], *q = p + 1, *new_txt; int l, latin; int n, a, i1, i3, i4; static const char note_names[] = "CDEFGAB"; static const char *latin_names[7] = { "Do", "Ré", "Mi", "Fa", "Sol", "La", "Si" }; static const char *acc_name[5] = {"bb", "b", "", "#", "##"}; /* main chord */ latin = 0; switch (*p) { case 'A': case 'B': n = *p - 'A' + 5; break; case 'C': case 'E': case 'G': n = *p - 'C'; break; case 'D': if (p[1] == 'o') { latin++; n = 0; /* Do */ break; } n = 1; break; case 'F': if (p[1] == 'a') latin++; /* Fa */ n = 3; break; case 'L': latin++; /* La */ n = 5; break; case 'M': latin++; /* Mi */ n = 2; break; case 'R': latin++; if (p[1] != 'e') latin++; /* Ré */ n = 1; /* Re */ break; case 'S': latin++; if (p[1] == 'o') { latin++; n = 4; /* Sol */ } else { n = 6; /* Si */ } break; case '/': // bass only latin--; break; default: return; } q += latin; /* allocate a new string */ new_txt = getarena(strlen(s->text) + 6); l = p - s->text; memcpy(new_txt, s->text, l); s->text = new_txt; new_txt += l; p = q; if (latin >= 0) { // if some chord a = 0; while (*p == '#') { a++; p++; } while (*p == 'b') { a--; p++; } // if (*p == '=') // p++; i3 = cde2fcg[n] + i2 + a * 7; i4 = cgd2cde[(unsigned) ((i3 + 16 * 7) % 7)]; i1 = ((i3 + 1 + 21) / 7 + 2 - 3 + 32 * 5) % 5; /* accidental */ if (latin == 0) *new_txt++ = note_names[i4]; else new_txt += sprintf(new_txt, "%s", latin_names[i4]); new_txt += sprintf(new_txt, "%s", acc_name[i1]); } /* bass */ while (*p != '\0' && *p != '\n' && *p != '/') // skip 'm'/'dim'.. *new_txt++ = *p++; if (*p == '/') { *new_txt++ = *p++; //fixme: latin names not treated q = strchr(note_names, *p); if (q) { p++; n = q - note_names; if (*p == '#') { a = 1; p++; } else if (*p == 'b') { a = -1; p++; } else { a = 0; } i3 = cde2fcg[n] + i2 + a * 7; i4 = cgd2cde[(unsigned) ((i3 + 16 * 7) % 7)]; i1 = ((i3 + 1 + 21) / 7 + 2 - 3 + 32 * 5) % 5; *new_txt++ = note_names[i4]; new_txt += sprintf(new_txt, "%s", acc_name[i1]); } } strcpy(new_txt, p); } static void gch_capo(struct SYMBOL *s) { char *p = s->text, *q, *r; int i, l, li = 0; static const char *capo_txt = " (capo: %d)"; static signed char cap_trans[] = {0, 5, -2, 3, -4, 1, -6, -1, 4, -3, 2, -5}; // search the chord symbols for (;;) { if (!strchr("^_<>@", *p)) break; p = strchr(p, '\n'); if (!p) return; p++; } // add a capo chord symbol i = p - s->text; q = strchr(p + 1, '\n'); if (q) l = q - p; else l = strlen(p); if (!capo) { capo = 1; li = strlen(capo_txt); } r = (char *) getarena(strlen(s->text) + l + li + 1); i += l; strncpy(r, s->text, i); // annotations + chord symbol r[i++] = '\n'; strncpy(r + i, p, l); // capo if (li) { sprintf(r + i + l, capo_txt, cfmt.capo); l += li; } if (q) strcpy(r + i + l, q); // ending annotations s->text = r; gch_tr1(s, i, cap_trans[cfmt.capo % 12]); } static void gch_transpose(struct SYMBOL *s) { int in_ch = 0; int i2 = curvoice->ckey.sf - curvoice->okey.sf; char *o = s->text, *p = o; // search the chord symbols for (;;) { if (in_ch || !strchr("^_<>@", *p)) { gch_tr1(s, p - s->text, i2); p = s->text + (p - o); o = s->text; for (p++; *p; p++) { if (strchr("\t;\n", *p)) break; } if (!*p) break; switch (*p) { case '\t': in_ch = 1; break; case ';': in_ch = !strchr("^_<>@", p[1]); break; default: in_ch = 0; break; } } else { p = strchr(p, '\n'); if (!p) break; } p++; } } /* -- build the guitar chords / annotations -- */ static void gch_build(struct SYMBOL *s) { struct gch *gch; char *p, *q, antype, sep; float w, h_ann, h_gch, y_above, y_below, y_left, y_right; float xspc; int l, ix, box, gch_place; if (s->posit.gch == SL_HIDDEN) return; s->gch = getarena(sizeof *s->gch * MAXGCH); memset(s->gch, 0, sizeof *s->gch * MAXGCH); if (curvoice->transpose != 0) gch_transpose(s); if (cfmt.capo) gch_capo(s); /* split the guitar chords / annotations * and initialize their vertical offsets */ gch_place = s->posit.gch == SL_BELOW ? -1 : 1; h_gch = cfmt.font_tb[cfmt.gcf].size; h_ann = cfmt.font_tb[cfmt.anf].size; y_above = y_below = y_left = y_right = 0; box = cfmt.gchordbox; p = s->text; gch = s->gch; sep = '\n'; antype = 'g'; /* (compiler warning) */ for (;;) { if (sep != 'n' && strchr("^_<>@", *p)) { gch->font = cfmt.anf; antype = *p++; if (antype == '@') { int n; float xo, yo; if (sscanf(p, "%f,%f%n", &xo, &yo, &n) != 2) { error(1, s, "Error in annotation \"@\""); } else { p += n; if (*p == ' ') p++; gch->x = xo; gch->y = yo; } } } else if (sep == '\n') { gch->font = cfmt.gcf; gch->box = box; antype = 'g'; } else { gch->font = (gch - 1)->font; gch->box = (gch - 1)->box; } gch->type = antype; switch (antype) { default: /* guitar chord */ if (gch_place < 0) break; /* below */ y_above += h_gch; if (box) y_above += 2; break; case '^': /* above */ y_above += h_ann; break; case '_': /* below */ break; case '<': /* left */ y_left += h_ann * 0.5; break; case '>': /* right */ y_right += h_ann * 0.5; break; case '@': /* absolute */ if (gch->x == 0 && gch->y == 0 && gch != s->gch && s->gch->type == '@') { /* if not 1st line */ gch->x = (gch - 1)->x; gch->y = (gch - 1)->y - h_ann; } break; } gch->idx = p - s->text; for (;;) { switch (*p) { default: p++; continue; case '\\': p++; if (*p == 'n') { p[-1] = '\0'; break; /* sep = 'n' */ } p++; continue; case '&': /* skip "&xxx;" */ for (;;) { switch (*p) { default: p++; continue; case ';': p++; case '\0': case '\n': case '\\': break; } break; } continue; case '\0': case ';': case '\n': break; } break; } sep = *p; if (sep == '\0') break; *p++ = '\0'; gch++; if (gch - s->gch >= MAXGCH) { error(1, s, "Too many guitar chords / annotations"); break; } } /* change the accidentals in the guitar chords */ for (ix = 0, gch = s->gch; ix < MAXGCH; ix++, gch++) { if (gch->type == '\0') break; if (gch->type != 'g') continue; p = s->text + gch->idx; q = p; for (; *p != '\0'; p++) { switch (*p) { case '#': case 'b': case '=': if (p == q /* 1st char or after a slash */ || (p != q + 1 /* or invert '\' behaviour */ && p[-1] == '\\')) break; /* set the accidentals as unused utf-8 values * (see subs.c) */ switch (*p) { case '#': *p = 0x01; break; case 'b': *p = 0x02; break; default: /* case '=': */ *p = 0x03; break; } if (p[-1] == '\\') { p--; l = strlen(p); memmove(p, p + 1, l); } break; case ' ': case '/': q = p + 1; break; } } } /* set the offsets and widths */ /*fixme:utf8*/ for (ix = 0, gch = s->gch; ix < MAXGCH; ix++, gch++) { if (gch->type == '\0') break; if (gch->type == '@') continue; /* no width */ p = s->text + gch->idx; str_font(gch->font); w = tex_str(p); gch->w = w; // + 4; switch (gch->type) { case '_': /* below */ xspc = w * GCHPRE; if (xspc > 8) xspc = 8; gch->x = -xspc; y_below -= h_ann; gch->y = y_below; break; case '^': /* above */ xspc = w * GCHPRE; if (xspc > 8) xspc = 8; gch->x = -xspc; y_above -= h_ann; gch->y = y_above; break; default: /* guitar chord */ xspc = w * GCHPRE; if (xspc > 8) xspc = 8; gch->x = -xspc; if (gch_place < 0) { /* below */ y_below -= h_gch; gch->y = y_below; if (box) { y_below -= 2; gch->y -= 1; } } else { y_above -= h_gch; gch->y = y_above; if (box) { y_above -= 2; gch->y -= 1; } } break; case '<': /* left */ gch->x = -(w + 6); y_left -= h_ann; gch->y = y_left; break; case '>': /* right */ gch->x = 6; y_right -= h_ann; gch->y = y_right; break; } } } /* get the note which will receive a lyric word */ static struct SYMBOL *next_lyric_note(struct SYMBOL *s) { while (s && (s->abc_type != ABC_T_NOTE || (s->flags & ABC_F_GRACE))) s = s->next; return s; } /* -- parse lyric (vocal) lines (w:) -- */ static struct SYMBOL *get_lyric(struct SYMBOL *s) { struct SYMBOL *s1, *s2; char word[128], *p, *q; int ln, cont; struct FONTSPEC *f; curvoice->have_ly = curvoice->posit.voc != SL_HIDDEN; if (curvoice->ignore) { for (;;) { if (!s->abc_next) return s; switch (s->abc_next->abc_type) { case ABC_T_PSCOM: s = process_pscomment(s->abc_next); continue; case ABC_T_INFO: if (s->abc_next->text[0] == 'w' || s->abc_next->text[0] == '+') { s = s->abc_next; continue; } break; } return s; } } f = &cfmt.font_tb[cfmt.vof]; str_font(cfmt.vof); /* (for tex_str) */ /* treat all w: lines */ cont = 0; ln = -1; s2 = s1 = NULL; // have gcc happy for (;;) { if (!cont) { if (ln >= MAXLY- 1) { error(1, s, "Too many lyric lines"); ln--; } ln++; s2 = s1; s1 = curvoice->lyric_start; if (!s1) s1 = curvoice->sym; else s1 = s1->next; if (!s1) { error(1, s, "w: without music"); return s; } } else { cont = 0; } /* scan the lyric line */ p = &s->text[2]; while (*p != '\0') { while (isspace((unsigned char) *p)) p++; if (*p == '\0') break; if (*p == '\\' && p[1] == '\0') { cont = 1; break; } switch (*p) { case '|': while (s1 && s1->type != BAR) { s2 = s1; s1 = s1->next; } if (!s1) { error(1, s2, "Not enough bar lines for lyric line"); goto ly_next; } s2 = s1; s1 = s1->next; p++; continue; case '-': word[0] = LY_HYPH; word[1] = '\0'; p++; break; case '_': word[0] = LY_UNDER; word[1] = '\0'; p++; break; case '*': word[0] = '\0'; p++; break; default: q = word; for (;;) { unsigned char c; c = *p; switch (c) { case '\0': case ' ': case '\t': case '_': case '*': case '|': break; case '~': c = ' '; goto addch; case '-': c = LY_HYPH; goto addch; case '\\': if (p[1] == '\0') break; switch (p[1]) { case '~': case '_': case '*': case '|': case '-': case ' ': case '\\': c = *++p; break; } /* fall thru */ default: addch: if (q < &word[sizeof word - 1]) *q++ = c; p++; if (c == LY_HYPH) break; continue; } break; } *q = '\0'; break; } /* store the word in the next note */ if (s1) { /* for error */ s2 = s1; s1 = next_lyric_note(s1); } if (!s1) { if (!s2) s2 = s; error(1, s2, "Too many words in lyric line"); goto ly_next; } if (word[0] != '\0' && s1->posit.voc != SL_HIDDEN) { struct lyl *lyl; float w; if (!s1->ly) { s1->ly = (struct lyrics *) getarena(sizeof (struct lyrics)); memset(s1->ly, 0, sizeof (struct lyrics)); } /* handle the font change at start of text */ q = word; if (*q == '$' && isdigit((unsigned char) q[1]) && (unsigned) (q[1] - '0') < FONT_UMAX) { int ft; ft = q[1] - '0'; if (ft == 0) ft = cfmt.vof; f = &cfmt.font_tb[ft]; str_font(ft); q += 2; } w = tex_str(q); q = tex_buf; lyl = (struct lyl *) getarena(sizeof *s1->ly->lyl[0] - sizeof s1->ly->lyl[0]->t + strlen(q) + 1); s1->ly->lyl[ln] = lyl; lyl->f = f; lyl->w = w; strcpy(lyl->t, q); /* handle the font changes inside the text */ while (*q != '\0') { if (*q == '$' && isdigit((unsigned char) q[1]) && (unsigned) (q[1] - '0') < FONT_UMAX) { int ft; q++; ft = *q - '0'; if (ft == 0) ft = cfmt.vof; f = &cfmt.font_tb[ft]; str_font(ft); } q++; } } s2 = s1; s1 = s1->next; } /* loop if more lyrics */ ly_next: for (;;) { if (!s->abc_next) goto ly_upd; switch (s->abc_next->abc_type) { case ABC_T_PSCOM: s = process_pscomment(s->abc_next); f = &cfmt.font_tb[cfmt.vof]; /* may have changed */ str_font(cfmt.vof); continue; case ABC_T_INFO: if (s->abc_next->text[0] != 'w' && s->abc_next->text[0] != '+') goto ly_upd; s = s->abc_next; if (s->text[0] == '+') cont = 1; if (!cont) { s1 = next_lyric_note(s1); if (s1) { error(1, s1, "Not enough words for lyric line"); } } break; /* more lyric */ default: goto ly_upd; } break; } } /* the next lyrics will go into the next notes */ ly_upd: //fixme: no error with abc-2.1 if (next_lyric_note(s1)) error(0, s1, "Not enough words for lyric line"); // fill the w: with 'blank syllabes' curvoice->lyric_start = curvoice->last_sym; return s; } /* -- add a voice in the linked list -- */ static void voice_link(struct VOICE_S *p_voice) { struct VOICE_S *p_voice2; p_voice2 = first_voice; for (;;) { if (p_voice2 == p_voice) return; if (!p_voice2->next) break; p_voice2 = p_voice2->next; } p_voice2->next = p_voice; } /* -- get a voice overlay -- */ static void get_over(struct SYMBOL *s) { struct VOICE_S *p_voice, *p_voice2, *p_voice3; int range, voice, voice2, voice3; static char tx_wrong_dur[] = "Wrong duration in voice overlay"; static char txt_no_note[] = "No note in voice overlay"; /* treat the end of overlay */ p_voice = curvoice; if (p_voice->ignore) return; if (s->abc_type == ABC_T_BAR || s->u.v_over.type == V_OVER_E) { if (!p_voice->last_sym) { error(1, s, txt_no_note); return; } p_voice->last_sym->sflags |= S_BEAM_END; over_bar = 0; if (over_time < 0) { error(1, s, "Erroneous end of voice overlap"); return; } curvoice = &voice_tb[over_voice]; if (p_voice->time != over_mxtime) { error(1, s, tx_wrong_dur); if (p_voice->time > over_mxtime) curvoice->time = p_voice->time; else p_voice->time = curvoice->time; } over_mxtime = 0; over_voice = -1; over_time = -1; return; } /* treat the full overlay start */ if (s->u.v_over.type == V_OVER_S) { over_voice = p_voice - voice_tb; over_time = p_voice->time; return; } /* (here is treated a new overlay - '&') */ /* create the extra voice if not done yet */ if (!p_voice->last_sym) { error(1, s, txt_no_note); return; } p_voice->last_sym->sflags |= S_BEAM_END; voice2 = s->u.v_over.voice; p_voice2 = &voice_tb[voice2]; if (parsys->voice[voice2].range < 0) { int clone; if (cfmt.abc2pscompat) { error(1, s, "Cannot have %%%%abc2pscompat"); cfmt.abc2pscompat = 0; } clone = p_voice->clone >= 0; p_voice2->id[0] = '&'; p_voice2->id[1] = '\0'; p_voice2->second = 1; parsys->voice[voice2].second = 1; p_voice2->scale = p_voice->scale; p_voice2->octave = p_voice->octave; p_voice2->transpose = p_voice->transpose; memcpy(&p_voice2->key, &p_voice->key, sizeof p_voice2->key); memcpy(&p_voice2->ckey, &p_voice->ckey, sizeof p_voice2->ckey); memcpy(&p_voice2->okey, &p_voice->okey, sizeof p_voice2->okey); p_voice2->posit = p_voice->posit; p_voice2->staff = p_voice->staff; p_voice2->cstaff = p_voice->cstaff; p_voice2->color = p_voice->color; p_voice2->map_name = p_voice->map_name; range = parsys->voice[p_voice - voice_tb].range; for (voice = 0; voice < MAXVOICE; voice++) { if (parsys->voice[voice].range > range) parsys->voice[voice].range += clone + 1; } parsys->voice[voice2].range = range + 1; voice_link(p_voice2); if (clone) { for (voice3 = MAXVOICE; --voice3 >= 0; ) { if (parsys->voice[voice3].range < 0) break; } if (voice3 > 0) { p_voice3 = &voice_tb[voice3]; strcpy(p_voice3->id, p_voice2->id); p_voice3->second = 1; parsys->voice[voice3].second = 1; p_voice3->scale = voice_tb[p_voice->clone].scale; parsys->voice[voice3].range = range + 2; voice_link(p_voice3); p_voice2->clone = voice3; } else { error(1, s, "Too many voices for overlay cloning"); } } } voice = p_voice - voice_tb; // p_voice2->cstaff = p_voice2->staff = parsys->voice[voice2].staff // = parsys->voice[voice].staff; // if ((voice3 = p_voice2->clone) >= 0) { // p_voice3 = &voice_tb[voice3]; // p_voice3->cstaff = p_voice3->staff // = parsys->voice[voice3].staff // = parsys->voice[p_voice->clone].staff; // } if (over_time < 0) { /* first '&' in a measure */ int time; over_bar = 1; over_mxtime = p_voice->time; over_voice = voice; time = p_voice2->time; for (s = p_voice->last_sym; /*s*/; s = s->prev) { if (s->type == BAR || s->time <= time) /* (if start of tune) */ break; } over_time = s->time; } else { if (over_mxtime == 0) over_mxtime = p_voice->time; else if (p_voice->time != over_mxtime) error(1, s, tx_wrong_dur); } p_voice2->time = over_time; curvoice = p_voice2; } struct staff_s { short voice; short flags; }; /* -- parse %%staves / %%score -- */ static void parse_staves(struct SYMBOL *s, struct staff_s *staves) { char *p; int voice, flags_st, brace, bracket, parenth, err; short flags; struct staff_s *p_staff; /* define the voices */ err = 0; flags = 0; brace = bracket = parenth = 0; flags_st = 0; voice = 0; p = s->text + 7; while (*p != '\0' && !isspace((unsigned char) *p)) p++; while (*p != '\0') { switch (*p) { case ' ': case '\t': break; case '[': if (parenth || brace + bracket >= 2) { error(1, s, "Misplaced '[' in %%%%staves"); err = 1; break; } if (brace + bracket == 0) flags |= OPEN_BRACKET; else flags |= OPEN_BRACKET2; bracket++; flags_st <<= 8; flags_st |= OPEN_BRACKET; break; case '{': if (parenth || brace || bracket >= 2) { error(1, s, "Misplaced '{' in %%%%staves"); err = 1; break; } if (bracket == 0) flags |= OPEN_BRACE; else flags |= OPEN_BRACE2; brace++; flags_st <<= 8; flags_st |= OPEN_BRACE; break; case '(': if (parenth) { error(1, s, "Misplaced '(' in %%%%staves"); err = 1; break; } flags |= OPEN_PARENTH; parenth++; flags_st <<= 8; flags_st |= OPEN_PARENTH; break; case '*': if (brace && !parenth && !(flags & (OPEN_BRACE | OPEN_BRACE2))) flags |= FL_VOICE; break; case '+': flags |= MASTER_VOICE; break; default: if (!isalnum((unsigned char) *p) && *p != '_') { error(1, s, "Bad voice ID in %%%%staves"); err = 1; break; } if (voice >= MAXVOICE) { error(1, s, "Too many voices in %%%%staves"); err = 1; break; } { int i, v; char sep, *q; q = p; while (isalnum((unsigned char) *p) || *p == '_') p++; sep = *p; *p = '\0'; /* search the voice in the voice table */ v = -1; for (i = 0; i < MAXVOICE; i++) { if (strcmp(q, voice_tb[i].id) == 0) { v = i; break; } } if (v < 0) { error(1, s, "Voice '%s' of %%%%staves has no symbol", q); err = 1; // break; p_staff = staves; } else { p_staff = staves + voice++; p_staff->voice = v; } *p = sep; } for ( ; *p != '\0'; p++) { switch (*p) { case ' ': case '\t': continue; case ']': if (!(flags_st & OPEN_BRACKET)) { error(1, s, "Misplaced ']' in %%%%staves"); err = 1; break; } bracket--; if (brace + bracket == 0) flags |= CLOSE_BRACKET; else flags |= CLOSE_BRACKET2; flags_st >>= 8; continue; case '}': if (!(flags_st & OPEN_BRACE)) { error(1, s, "Misplaced '}' in %%%%staves"); err = 1; break; } brace--; if (bracket == 0) flags |= CLOSE_BRACE; else flags |= CLOSE_BRACE2; flags &= ~FL_VOICE; flags_st >>= 8; continue; case ')': if (!(flags_st & OPEN_PARENTH)) { error(1, s, "Misplaced ')' in %%%%staves"); err = 1; break; } parenth--; flags |= CLOSE_PARENTH; flags_st >>= 8; continue; case '|': flags |= STOP_BAR; continue; } break; } p_staff->flags = flags; flags = 0; if (*p == '\0') break; continue; } if (*p == '\0') break; p++; } if (flags_st != 0) { error(1, s, "'}', ')' or ']' missing in %%%%staves"); err = 1; } if (err) { int i; for (i = 0; i < voice; i++) staves[i].flags = 0; } if (voice < MAXVOICE) staves[voice].voice = -1; } /* -- get staves definition (%%staves / %%score) -- */ static void get_staves(struct SYMBOL *s) { // struct SYMBOL *s2; struct VOICE_S *p_voice, *p_voice2; struct staff_s *p_staff, staves[MAXVOICE]; int i, flags, voice, staff, range, dup_voice, maxtime; memset(staves, 0, sizeof staves); parse_staves(s, staves); if (staves[0].voice < 0) // if error return; voice_compress(); voice_dup(); /* create a new staff system */ curvoice = p_voice = first_voice; maxtime = p_voice->time; flags = p_voice->sym != NULL; for (p_voice = p_voice->next; p_voice; p_voice = p_voice->next) { if (p_voice->time > maxtime) maxtime = p_voice->time; if (p_voice->sym) flags = 1; } if (flags == 0 /* if first %%staves */ || (maxtime == 0 && staves_found < 0)) { for (voice = 0; voice < MAXVOICE; voice++) parsys->voice[voice].range = -1; } else { /* * create a new staff system and * link the staves in a voice which is seen from * the previous system - see sort_all */ // p_voice = curvoice; if (parsys->voice[curvoice - voice_tb].range < 0) { for (voice = 0; voice < MAXVOICE; voice++) { if (parsys->voice[voice].range >= 0) { curvoice = &voice_tb[voice]; break; } } /*fixme: should check if voice < MAXVOICE*/ } curvoice->time = maxtime; // put the staves before a measure bar (see draw_bar()) // s2 = curvoice->last_sym; // if (s2 && s2->type == BAR && s2->time == maxtime) { // curvoice->last_sym = s2->prev; // if (!curvoice->last_sym) // curvoice->sym = NULL; // sym_link(s, STAVES); // s->next = s2; // s2->prev = s; // curvoice->last_sym = s2; // } else { sym_link(s, STAVES); // link the staves in the current voice // } s->state = ABC_S_HEAD; /* (output PS sequences immediately) */ parsys->nstaff = nstaff; system_new(); } staves_found = maxtime; /* initialize the voices */ for (voice = 0, p_voice = voice_tb; voice < MAXVOICE; voice++, p_voice++) { p_voice->second = 0; p_voice->floating = 0; p_voice->ignore = 0; p_voice->time = maxtime; } /* create the 'clone' voices */ dup_voice = MAXVOICE; range = 0; p_staff = staves; parsys->top_voice = p_staff->voice; for (i = 0; i < MAXVOICE && p_staff->voice >= 0; i++, p_staff++) { voice = p_staff->voice; p_voice = &voice_tb[voice]; if (parsys->voice[voice].range >= 0) { if (parsys->voice[dup_voice - 1].range >= 0) { error(1, s, "Too many voices for cloning"); continue; } voice = --dup_voice; /* duplicate the voice */ p_voice2 = &voice_tb[voice]; memcpy(p_voice2, p_voice, sizeof *p_voice2); p_voice2->next = NULL; p_voice2->sym = p_voice2->last_sym = NULL; p_voice2->tblts[0] = p_voice2->tblts[1] = NULL; p_voice2->clone = -1; while (p_voice->clone > 0) p_voice = &voice_tb[p_voice->clone]; p_voice->clone = voice; p_voice = p_voice2; p_staff->voice = voice; } parsys->voice[voice].range = range++; voice_link(p_voice); } /* change the behavior from %%staves to %%score */ if (s->text[3] == 't') { /* if %%staves */ for (i = 0, p_staff = staves; i < MAXVOICE - 2 && p_staff->voice >= 0; i++, p_staff++) { flags = p_staff->flags; if (!(flags & (OPEN_BRACE | OPEN_BRACE2))) continue; if ((flags & (OPEN_BRACE | CLOSE_BRACE)) == (OPEN_BRACE | CLOSE_BRACE) || (flags & (OPEN_BRACE2 | CLOSE_BRACE2)) == (OPEN_BRACE2 | CLOSE_BRACE2)) continue; if (p_staff[1].flags != 0) continue; if ((flags & OPEN_PARENTH) || (p_staff[2].flags & OPEN_PARENTH)) continue; /* {a b c} --> {a *b c} */ if (p_staff[2].flags & (CLOSE_BRACE | CLOSE_BRACE2)) { p_staff[1].flags |= FL_VOICE; /* {a b c d} --> {(a b) (c d)} */ } else if (p_staff[2].flags == 0 && (p_staff[3].flags & (CLOSE_BRACE | CLOSE_BRACE2))) { p_staff->flags |= OPEN_PARENTH; p_staff[1].flags |= CLOSE_PARENTH; p_staff[2].flags |= OPEN_PARENTH; p_staff[3].flags |= CLOSE_PARENTH; } } } /* set the staff system */ staff = -1; for (i = 0, p_staff = staves; i < MAXVOICE && p_staff->voice >= 0; i++, p_staff++) { flags = p_staff->flags; if ((flags & (OPEN_PARENTH | CLOSE_PARENTH)) == (OPEN_PARENTH | CLOSE_PARENTH)) { flags &= ~(OPEN_PARENTH | CLOSE_PARENTH); p_staff->flags = flags; } voice = p_staff->voice; p_voice = &voice_tb[voice]; if (flags & FL_VOICE) { p_voice->floating = 1; p_voice->second = 1; } else { #if MAXSTAFF < MAXVOICE if (staff >= MAXSTAFF - 1) { error(1, s, "Too many staves"); } else #endif staff++; parsys->staff[staff].flags = 0; } p_voice->staff = p_voice->cstaff = parsys->voice[voice].staff = staff; parsys->staff[staff].flags |= flags; if (flags & OPEN_PARENTH) { p_voice2 = p_voice; while (i < MAXVOICE) { i++; p_staff++; voice = p_staff->voice; p_voice = &voice_tb[voice]; if (p_staff->flags & MASTER_VOICE) { p_voice2->second = 1; p_voice2 = p_voice; } else { p_voice->second = 1; } p_voice->staff = p_voice->cstaff = parsys->voice[voice].staff = staff; if (p_staff->flags & CLOSE_PARENTH) break; } parsys->staff[staff].flags |= p_staff->flags; } } if (staff < 0) staff = 0; parsys->nstaff = nstaff = staff; /* change the behaviour of '|' in %%score */ if (s->text[3] == 'c') { /* if %%score */ for (staff = 0; staff < nstaff; staff++) parsys->staff[staff].flags ^= STOP_BAR; } for (voice = 0; voice < MAXVOICE; voice++) { p_voice = &voice_tb[voice]; parsys->voice[voice].second = p_voice->second; staff = p_voice->staff; if (staff > 0) p_voice->norepbra = !(parsys->staff[staff - 1].flags & STOP_BAR); if (p_voice->floating && staff == nstaff) p_voice->floating = 0; } curvoice = &voice_tb[parsys->top_voice]; } /* -- re-initialize all potential voices -- */ static void voice_init(void) { struct VOICE_S *p_voice; int i; for (i = 0, p_voice = voice_tb; i < MAXVOICE; i++, p_voice++) { p_voice->sym = p_voice->last_sym = NULL; p_voice->lyric_start = NULL; p_voice->bar_start = 0; p_voice->time = 0; p_voice->slur_st = 0; p_voice->hy_st = 0; p_voice->tie = 0; p_voice->rtie = 0; } } /* output a pdf mark */ static void put_pdfmark(char *p) { unsigned char c, *q; int u; p = trim_title(p, NULL); /* check if pure ASCII without '\', '(' nor ')'*/ for (q = (unsigned char *) p; *q != '\0'; q++) { switch (*q) { case '\\': case '(': case ')': break; default: if (*q >= 0x80) break; continue; } break; } if (*q == '\0') { a2b("[/Title(%s)/OUT pdfmark\n", p); return; } /* build utf-8 mark */ a2b("[/Title<FEFF"); q = (unsigned char *) p; u = -1; while (*q != '\0') { c = *q++; if (c < 0x80) { if (u >= 0) { a2b("%04X", u); u = -1; } a2b("%04X", (int) c); continue; } if (c < 0xc0) { u = (u << 6) | (c & 0x3f); continue; } if (u >= 0) { a2b("%04X", u); u = -1; } if (c < 0xe0) u = c & 0x1f; else if (c < 0xf0) u = c & 0x0f; else u = c & 0x07; } if (u >= 0) { a2b("%04X", u); u = -1; } a2b(">/OUT pdfmark\n"); } /* rebuild a tune header for %%tune filter */ static char *tune_header_rebuild(struct SYMBOL *s) { struct SYMBOL *s2; char *header, *p; int len; len = 0; s2 = s; for (;;) { if (s2->abc_type == ABC_T_INFO) { len += strlen(s2->text) + 1; if (s2->text[0] == 'K') break; } s2 = s2->abc_next; } header = malloc(len + 1); p = header; for (;;) { if (s->abc_type == ABC_T_INFO) { strcpy(p, s->text); p += strlen(p); *p++ = '\n'; if (s->text[0] == 'K') break; } s = s->abc_next; } *p++ = '\0'; return header; } /* apply the options to the current tune */ static void tune_filter(struct SYMBOL *s) { struct tune_opt_s *opt; struct SYMBOL *s1, *s2; regex_t r; char *header, *p; int ret; header = tune_header_rebuild(s); for (opt = tune_opts; opt; opt = opt->next) { struct SYMBOL *last_staves; p = &opt->s->text[2 + 5]; /* "%%tune RE" */ while (isspace((unsigned char) *p)) p++; ret = regcomp(&r, p, REG_EXTENDED | REG_NEWLINE | REG_NOSUB); if (ret) continue; ret = regexec(&r, header, 0, NULL, 0); regfree(&r); if (ret) continue; /* apply the options */ cur_tune_opts = opt; last_staves = s->abc_next; for (s1 = opt->s->next; s1; s1 = s1->next) { /* replace the next %%staves/%%score */ if (s1->abc_type == ABC_T_PSCOM && (strncmp(&s1->text[2], "staves", 6) == 0 || strncmp(&s1->text[2], "score", 5) == 0)) { while (last_staves) { if (last_staves->abc_type == ABC_T_PSCOM && (strncmp(&last_staves->text[2], "staves", 6) == 0 || strncmp(&last_staves->text[2], "score", 5) == 0)) { last_staves->text = s1->text; last_staves = last_staves->abc_next; break; } last_staves = last_staves->abc_next; } continue; } s2 = (struct SYMBOL *) getarena(sizeof *s2); memcpy(s2, s1, sizeof *s2); process_pscomment(s2); } cur_tune_opts = NULL; tune_voice_opts = opt->voice_opts; // for %%voice //fixme: what if many %%tune's with %%voice inside? } free(header); } /* apply the options of the current voice */ static void voice_filter(void) { struct voice_opt_s *opt; struct SYMBOL *s; regex_t r; int pass, ret; char *p; /* scan the global, then the tune options */ pass = 0; opt = voice_opts; for (;;) { if (!opt) { if (pass != 0) break; opt = tune_voice_opts; if (!opt) break; pass++; } p = &opt->s->text[2 + 6]; /* "%%voice RE" */ while (isspace((unsigned char) *p)) p++; ret = regcomp(&r, p, REG_EXTENDED | REG_NOSUB); if (ret) goto next_voice; ret = regexec(&r, curvoice->id, 0, NULL, 0); if (ret && curvoice->nm) ret = regexec(&r, curvoice->nm, 0, NULL, 0); regfree(&r); if (ret) goto next_voice; /* apply the options */ for (s = opt->s->next; s; s = s->next) { struct SYMBOL *s2; s2 = (struct SYMBOL *) getarena(sizeof *s2); memcpy(s2, s, sizeof *s2); process_pscomment(s2); } next_voice: opt = opt->next; } } /* -- check if a pseudo-comment may be in the tune header -- */ static int check_header(struct SYMBOL *s) { switch (s->text[2]) { case 'E': if (strncmp(s->text + 2, "EPS", 3) == 0) return 0; break; case 'm': if (strncmp(s->text + 2, "multicol", 8) == 0) return 0; break; } return 1; } /* -- set the global definitions after the first K: or middle-tune T:'s -- */ static void set_global_def(void) { struct VOICE_S *p_voice; int i; for (i = MAXVOICE, p_voice = voice_tb; --i >= 0; p_voice++) { switch (p_voice->key.instr) { case 0: if (!pipeformat) { // p_voice->transpose = cfmt.transpose; break; } //fall thru case K_HP: case K_Hp: if (p_voice->posit.std == 0) p_voice->posit.std = SL_BELOW; break; } // if (p_voice->key.empty) // p_voice->key.sf = 0; if (!cfmt.autoclef && p_voice->s_clef && (p_voice->s_clef->sflags & S_CLEF_AUTO)) { p_voice->s_clef->u.clef.type = TREBLE; p_voice->s_clef->sflags &= ~S_CLEF_AUTO; } } /* switch to the 1st voice */ curvoice = &voice_tb[parsys->top_voice]; } /* -- get the global definitions after the first K: or middle-tune T:'s -- */ static struct SYMBOL *get_global_def(struct SYMBOL *s) { struct SYMBOL *s2; for (;;) { s2 = s->abc_next; if (!s2) break; switch (s2->abc_type) { case ABC_T_INFO: switch (s2->text[0]) { case 'K': s = s2; s->state = ABC_S_HEAD; get_key(s); continue; case 'I': case 'M': case 'Q': s = s2; s->state = ABC_S_HEAD; s = get_info(s); continue; } break; case ABC_T_PSCOM: if (!check_header(s2)) break; s = s2; s->state = ABC_S_HEAD; s = process_pscomment(s); continue; } break; } set_global_def(); return s; } /* save the global note maps */ static void save_maps(void) { struct map *omap, *map; struct note_map *onotes, *notes; omap = maps; if (!omap) { maps_glob = NULL; return; } maps_glob = map = getarena(sizeof *maps_glob); for (;;) { memcpy(map, omap, sizeof *map); onotes = omap->notes; if (onotes) { map->notes = notes = getarena(sizeof *notes); for (;;) { memcpy(notes, onotes, sizeof *notes); onotes = onotes->next; if (!onotes) break; notes->next = getarena(sizeof *notes); notes = notes->next; } } omap = omap->next; if (!omap) break; map->next = getarena(sizeof *map); map = map->next; } } /* -- identify info line, store in proper place -- */ static struct SYMBOL *get_info(struct SYMBOL *s) { struct SYMBOL *s2; struct VOICE_S *p_voice; char *p; char info_type; int old_lvl; static char *state_txt[] = {"global", "header", "tune"}; /* change arena to global or tune */ old_lvl = lvlarena(s->state != ABC_S_GLOBAL); info_type = s->text[0]; switch (info_type) { case 'd': break; case 'I': s = process_pscomment(s); /* same as pseudo-comment */ break; case 'K': get_key(s); if (s->state != ABC_S_HEAD) break; info['K' - 'A'] = s; /* first K:, end of tune header */ tunenum++; if (!epsf) { // if (!cfmt.oneperpage) // use_buffer = cfmt.splittune != 1; bskip(cfmt.topspace); } a2b("%% --- xref %s\n", &info['X' - 'A']->text[2]); // (for index) write_heading(); block_put(); /* information for index * (pdfmark must be after title show for Adobe Distiller) */ s2 = info['T' - 'A']; p = &s2->text[2]; if (*p != '\0') { a2b("%% --- font "); outft = -1; set_font(TITLEFONT); /* font in comment */ a2b("\n"); outft = -1; } if (cfmt.pdfmark) { if (*p != '\0') put_pdfmark(p); if (cfmt.pdfmark > 1) { for (s2 = s2->next; s2; s2 = s2->next) { p = &s2->text[2]; if (*p != '\0') put_pdfmark(p); } } } nbar = cfmt.measurefirst; /* measure numbering */ over_voice = -1; over_time = -1; over_bar = 0; capo = 0; reset_gen(); s = get_global_def(s); if (!(cfmt.fields[0] & (1 << ('Q' - 'A')))) info['Q' - 'A'] = NULL; /* apply the filter for the voice '1' */ voice_filter(); /* activate the default tablature if not yet done */ if (!first_voice->tblts[0]) set_tblt(first_voice); break; case 'L': switch (s->state) { case ABC_S_HEAD: { int i, auto_len; auto_len = s->u.length.base_length < 0; for (i = MAXVOICE, p_voice = voice_tb; --i >= 0; p_voice++) p_voice->auto_len = auto_len; break; } case ABC_S_TUNE: curvoice->auto_len = s->u.length.base_length < 0; break; } break; case 'M': get_meter(s); break; case 'P': { struct VOICE_S *curvoice_sav; if (s->state != ABC_S_TUNE) { info['P' - 'A'] = s; break; } if (!(cfmt.fields[0] & (1 << ('P' - 'A')))) break; /* * If not in the main voice, then, * if the voices are synchronized and no P: yet in the main voice, * the misplaced P: goes into the main voice. */ p_voice = &voice_tb[parsys->top_voice]; if (curvoice != p_voice) { if (curvoice->time != p_voice->time) break; if (p_voice->last_sym && p_voice->last_sym->type == PART) break; // already a P: curvoice_sav = curvoice; curvoice = p_voice; sym_link(s, PART); curvoice = curvoice_sav; break; } sym_link(s, PART); break; } case 'Q': if (!(cfmt.fields[0] & (1 << ('Q' - 'A')))) break; if (s->state != ABC_S_TUNE) { info['Q' - 'A'] = s; break; } if (curvoice != &voice_tb[parsys->top_voice]) break; /* tempo only for first voice */ s2 = curvoice->last_sym; if (s2) { /* keep last Q: */ int tim; tim = s2->time; do { if (s2->type == TEMPO) { if (!s2->next) curvoice->last_sym = s2->prev; else s2->next->prev = s2->prev; if (!s2->prev) curvoice->sym = s2->next; else s2->prev->next = s2->next; break; } s2 = s2->prev; } while (s2 && s2->time == tim); } sym_link(s, TEMPO); break; case 'r': case 's': break; case 'T': if (s->state == ABC_S_GLOBAL) break; if (s->state == ABC_S_HEAD) /* in tune header */ goto addinfo; gen_ly(1); /* in tune */ p = &s->text[2]; if (*p != '\0') { write_title(s); a2b("%% --- + (%s) ---\n", p); if (cfmt.pdfmark) put_pdfmark(p); } voice_init(); reset_gen(); /* (display the time signature) */ s = get_global_def(s); break; case 'U': deco[s->u.user.symbol] = parse.deco_tb[s->u.user.value - 128]; break; case 'u': break; case 'V': get_voice(s); /* handle here the possible clef which could be replaced * in case of filter */ if (s->abc_next && s->abc_next->abc_type == ABC_T_CLEF) { s = s->abc_next; get_clef(s); } if (s->state == ABC_S_TUNE && !curvoice->last_sym && curvoice->time == 0) voice_filter(); break; case 'w': if (s->state != ABC_S_TUNE) break; if (!(cfmt.fields[1] & (1 << ('w' - 'a')))) { while (s->abc_next) { if (s->abc_next->abc_type != ABC_T_INFO || s->abc_next->text[0] != '+') break; s = s->abc_next; } break; } s = get_lyric(s); break; case 'W': if (s->state == ABC_S_GLOBAL || !(cfmt.fields[0] & (1 << ('W' - 'A')))) break; goto addinfo; case 'X': if (!epsf) { buffer_eob(0); /* flush stuff left from %% lines */ write_buffer(); //fixme: 8.6.2 if (cfmt.oneperpage) close_page(); // else if (in_page) else use_buffer = cfmt.splittune != 1; } memcpy(&dfmt, &cfmt, sizeof dfmt); /* save global values */ memcpy(&info_glob, &info, sizeof info_glob); memcpy(deco_glob, deco, sizeof deco_glob); save_maps(); info['X' - 'A'] = s; if (tune_opts) tune_filter(s); break; default: if (info_type >= 'A' && info_type <= 'Z') { struct SYMBOL *prev; if (s->state == ABC_S_TUNE) break; addinfo: prev = info[info_type - 'A']; if (!prev || (prev->state == ABC_S_GLOBAL && s->state != ABC_S_GLOBAL)) { info[info_type - 'A'] = s; } else { while (prev->next) prev = prev->next; prev->next = s; } while (s->abc_next && s->abc_next->abc_type == ABC_T_INFO && s->abc_next->text[0] == '+') { prev = s; s = s->abc_next; prev->next = s; } s->prev = prev; break; } if (s->state != ABC_S_GLOBAL) error(1, s, "%s info '%c:' not treated", state_txt[(int) s->state], info_type); break; } lvlarena(old_lvl); return s; } /* -- set head type, dots, flags for note -- */ void identify_note(struct SYMBOL *s, int dur, int *p_head, int *p_dots, int *p_flags) { int head, dots, flags; if (dur % 12 != 0) error(1, s, "Invalid note duration"); dur /= 12; /* see BASE_LEN for values */ if (dur == 0) error(1, s, "Note too short"); for (flags = 5; dur != 0; dur >>= 1, flags--) { if (dur & 1) break; } dur >>= 1; switch (dur) { case 0: dots = 0; break; case 1: dots = 1; break; case 3: dots = 2; break; case 7: dots = 3; break; default: error(1, s, "Note too much dotted"); dots = 3; break; } flags -= dots; if (flags >= 0) { head = H_FULL; } else switch (flags) { default: error(1, s, "Note too long"); flags = -4; /* fall thru */ case -4: head = H_SQUARE; break; case -3: head = cfmt.squarebreve ? H_SQUARE : H_OVAL; break; case -2: head = H_OVAL; break; case -1: head = H_EMPTY; break; } *p_head = head; *p_flags = flags; *p_dots = dots; } /* -- adjust the duration and time of symbols in a measure when L:auto -- */ static void adjust_dur(struct SYMBOL *s) { struct SYMBOL *s2; int time, auto_time; /* search the start of the measure */ s2 = curvoice->last_sym; if (!s2) return; /* the bar time is correct if there is multi-rests */ if (s2->type == MREST || s2->type == BAR) /* in second voice */ return; while (s2->type != BAR && s2->prev) s2 = s2->prev; time = s2->time; auto_time = curvoice->time - time; /* remove the invisible rest at start of tune */ if (time == 0) { while (s2 && s2->dur == 0) s2 = s2->next; if (s2 && s2->abc_type == ABC_T_REST && (s2->flags & ABC_F_INVIS)) { time += s2->dur * curvoice->wmeasure / auto_time; if (s2->prev) s2->prev->next = s2->next; else curvoice->sym = s2->next; if (s2->next) s2->next->prev = s2->prev; s2 = s2->next; } } if (curvoice->wmeasure == auto_time) return; /* already good duration */ for (; s2; s2 = s2->next) { int i, head, dots, nflags; s2->time = time; if (s2->dur == 0 || (s2->flags & ABC_F_GRACE)) continue; s2->dur = s2->dur * curvoice->wmeasure / auto_time; time += s2->dur; if (s2->type != NOTEREST) continue; for (i = 0; i <= s2->nhd; i++) s2->u.note.notes[i].len = s2->u.note.notes[i].len * curvoice->wmeasure / auto_time; identify_note(s2, s2->u.note.notes[0].len, &head, &dots, &nflags); s2->head = head; s2->dots = dots; s2->nflags = nflags; if (s2->nflags <= -2) s2->flags |= ABC_F_STEMLESS; else s2->flags &= ~ABC_F_STEMLESS; } curvoice->time = s->time = time; } /* -- measure bar -- */ static void get_bar(struct SYMBOL *s) { int bar_type; struct SYMBOL *s2; if (s->u.bar.repeat_bar && curvoice->norepbra && !curvoice->second) s->sflags |= S_NOREPBRA; if (curvoice->auto_len) adjust_dur(s); bar_type = s->u.bar.type; s2 = curvoice->last_sym; if (s2 && s2->type == SPACE) { s2->time--; // keep the space at the right place } else if (s2 && s2->type == BAR) { /* remove the invisible repeat bars when no shift is needed */ if (bar_type == B_OBRA && !s2->text && (curvoice == &voice_tb[parsys->top_voice] || (parsys->staff[curvoice->staff - 1].flags & STOP_BAR) || (s->sflags & S_NOREPBRA))) { s2->text = s->text; s2->u.bar.repeat_bar = s->u.bar.repeat_bar; s2->flags |= s->flags & (ABC_F_RBSTART | ABC_F_RBSTOP); s2->sflags |= s->sflags & (S_NOREPBRA | S_RBSTART | S_RBSTOP); s = s2; goto gch_build; } /* merge back-to-back repeat bars */ if (bar_type == B_LREP && !s->text) { if (s2->u.bar.type == B_RREP) { s2->u.bar.type = B_DREP; s2->flags |= ABC_F_RBSTOP; s2->sflags |= S_RBSTOP; return; } if (s2->u.bar.type == B_DOUBLE) { s2->u.bar.type = (B_SINGLE << 8) | B_LREP; s2->flags |= ABC_F_RBSTOP; s2->sflags |= S_RBSTOP; return; } } } /* link the bar in the voice */ /* the bar must appear before a key signature */ if (s2 && s2->type == KEYSIG && (!s2->prev || s2->prev->type != BAR)) { curvoice->last_sym = s2->prev; if (!curvoice->last_sym) curvoice->sym = NULL; sym_link(s, BAR); s->next = s2; s2->prev = s; curvoice->last_sym = s2; } else { sym_link(s, BAR); } s->staff = curvoice->staff; /* original staff */ /* set some flags */ switch (bar_type) { case B_OBRA: case (B_OBRA << 4) + B_CBRA: s->flags |= ABC_F_INVIS; break; case (B_COL << 8) + (B_BAR << 4) + B_COL: case (B_COL << 12) + (B_BAR << 8) + (B_BAR << 4) + B_COL: bar_type = (B_COL << 4) + B_COL; /* :|: and :||: -> :: */ s->u.bar.type = bar_type; break; case (B_BAR << 4) + B_BAR: if (!cfmt.rbdbstop) break; case (B_OBRA << 4) + B_BAR: case (B_BAR << 4) + B_CBRA: s->flags |= ABC_F_RBSTOP; s->sflags |= S_RBSTOP; break; } if (s->u.bar.dc.n > 0) deco_cnv(&s->u.bar.dc, s, NULL); /* convert the decorations */ /* build the gch */ gch_build: if (s->text) { if (!s->u.bar.repeat_bar) { gch_build(s); /* build the guitar chords */ } else { s->gch = getarena(sizeof *s->gch * 2); memset(s->gch, 0, sizeof *s->gch * 2); s->gch->type = 'r'; s->gch->font = REPEATFONT; str_font(REPEATFONT); s->gch->w = tex_str(s->text); s->gch->x = 4 + 4; } } } /* -- activate the tablature from the command line '-T' -- */ static void set_tblt(struct VOICE_S *p_voice) { struct tblt_s *tblt; int i; for (i = 0; i < ncmdtblt; i++) { if (!cmdtblts[i].active) continue; if (cmdtblts[i].vn[0] != '\0') { if (strcmp(cmdtblts[i].vn, p_voice->id) != 0 && (p_voice->nm == 0 || strcmp(cmdtblts[i].vn, p_voice->nm) != 0) && (p_voice->snm == 0 || strcmp(cmdtblts[i].vn, p_voice->snm) != 0)) continue; } tblt = tblts[cmdtblts[i].index]; if (p_voice->tblts[0] == tblt || p_voice->tblts[1] == tblt) continue; if (p_voice->tblts[0] == 0) p_voice->tblts[0] = tblt; else p_voice->tblts[1] = tblt; } } /* -- do a tune -- */ void do_tune(void) { struct VOICE_S *p_voice; struct SYMBOL *s, *s1, *s2; int i; /* initialize */ lvlarena(0); nstaff = 0; staves_found = -1; for (i = 0; i < MAXVOICE; i++) { p_voice = &voice_tb[i]; s1 = (struct SYMBOL *) getarena(sizeof *s1); memset(s1, 0, sizeof *s1); s1->type = CLEF; s1->voice = i; if (cfmt.autoclef) { s1->u.clef.type = AUTOCLEF; s1->sflags = S_CLEF_AUTO; } else { s1->u.clef.type = TREBLE; } s1->u.clef.line = 2; /* treble clef on 2nd line */ p_voice->s_clef = s1; p_voice->meter.wmeasure = 1; // M:none p_voice->wmeasure = 1; p_voice->scale = 1; p_voice->clone = -1; p_voice->over = -1; p_voice->posit = cfmt.posit; p_voice->stafflines = NULL; // p_voice->staffscale = 0; } curvoice = first_voice = voice_tb; reset_deco(); abc2win = 0; clip_start.bar = -1; clip_end.bar = (short unsigned) ~0 >> 1; parsys = NULL; system_new(); /* create the 1st staff system */ parsys->top_voice = parsys->voice[0].range = 0; /* implicit voice */ if (!epsf) { //fixme: 8.6.2 #if 1 // fixme: should already be 0 use_buffer = 0; #else if (cfmt.oneperpage) { use_buffer = 0; close_page(); } else { if (in_page) // ?? use_buffer = cfmt.splittune != 1; } #endif } else { use_buffer = 1; marg_init(); } /* set the duration of all notes/rests * (this is needed for tuplets and the feathered beams) */ for (s = parse.first_sym; s; s = s->abc_next) { switch (s->abc_type) { case ABC_T_EOLN: if (s->u.eoln.type == 2) abc2win = 1; break; case ABC_T_NOTE: case ABC_T_REST: s->dur = s->u.note.notes[0].len; break; } } if (voice_tb[0].id[0] == '\0') { /* single voice */ voice_tb[0].id[0] = '1'; /* implicit V:1 */ voice_tb[0].id[1] = '\0'; } /* scan the tune */ for (s = parse.first_sym; s; s = s->abc_next) { if (s->flags & ABC_F_LYRIC_START) curvoice->lyric_start = curvoice->last_sym; switch (s->abc_type) { case ABC_T_INFO: s = get_info(s); break; case ABC_T_PSCOM: s = process_pscomment(s); break; case ABC_T_NOTE: case ABC_T_REST: if (curvoice->space && !(s->flags & ABC_F_GRACE)) { curvoice->space = 0; s->flags |= ABC_F_SPACE; } get_note(s); break; case ABC_T_BAR: if (over_bar) get_over(s); get_bar(s); break; case ABC_T_CLEF: get_clef(s); break; case ABC_T_EOLN: if (cfmt.breakoneoln || (s->flags & ABC_F_SPACE)) curvoice->space = 1; if (cfmt.continueall || cfmt.barsperstaff || s->u.eoln.type == 1) /* if '\' */ continue; if (s->u.eoln.type == 0 /* if normal eoln */ && abc2win && parse.abc_vers != (2 << 16)) continue; if (parsys->voice[curvoice - voice_tb].range == 0 && curvoice->last_sym) curvoice->last_sym->sflags |= S_EOLN; if (!cfmt.alignbars) continue; /* normal */ /* align bars */ while (s->abc_next) { /* treat the lyrics */ if (s->abc_next->abc_type != ABC_T_INFO) break; switch (s->abc_next->text[0]) { case 'w': s = get_info(s->abc_next); continue; case 'd': case 's': s = s->abc_next; continue; } break; } i = (curvoice - voice_tb) + 1; if (i < cfmt.alignbars) { curvoice = &voice_tb[i]; continue; } generate(); buffer_eob(0); curvoice = &voice_tb[0]; continue; case ABC_T_MREST: { int dur; dur = curvoice->wmeasure * s->u.bar.len; if (curvoice->second) { curvoice->time += dur; break; } sym_link(s, MREST); s->dur = dur; curvoice->time += dur; if (s->text) gch_build(s); /* build the guitar chords */ if (s->u.bar.dc.n > 0) deco_cnv(&s->u.bar.dc, s, NULL); break; } case ABC_T_MREP: { int n; s2 = curvoice->last_sym; if (!s2 || s2->type != BAR) { error(1, s, "No bar before measure repeat"); break; } if (curvoice->ignore) break; n = s->u.bar.len; if (curvoice->second) { curvoice->time += curvoice->wmeasure * n; break; } s2 = sym_add(curvoice, NOTEREST); s2->abc_type = ABC_T_REST; s2->flags |= ABC_F_INVIS; s2->dur = curvoice->wmeasure; curvoice->time += s2->dur; if (n == 1) { s->abc_next->u.bar.len = n; /* <n> in the next bar */ break; } while (--n > 0) { s2 = sym_add(curvoice, BAR); s2->u.bar.type = B_SINGLE; if (n == s->u.bar.len - 1) s2->u.bar.len = s->u.bar.len; s2 = sym_add(curvoice, NOTEREST); s2->abc_type = ABC_T_REST; s2->flags |= ABC_F_INVIS; s2->dur = curvoice->wmeasure; curvoice->time += s2->dur; } break; } case ABC_T_V_OVER: get_over(s); continue; case ABC_T_TUPLET: set_tuplet(s); break; default: continue; } if (s->type == 0) continue; if (curvoice->second) s->sflags |= S_SECOND; if (curvoice->floating) s->sflags |= S_FLOATING; } gen_ly(0); put_history(); buffer_eob(1); if (epsf) { write_eps(); } else { write_buffer(); // if (!cfmt.oneperpage && in_page) // use_buffer = cfmt.splittune != 1; } if (info['X' - 'A']) { memcpy(&cfmt, &dfmt, sizeof cfmt); /* restore global values */ memcpy(&info, &info_glob, sizeof info); memcpy(deco, deco_glob, sizeof deco); maps = maps_glob; info['X' - 'A'] = NULL; } /* free the parsing resources */ { struct brk_s *brk, *brk2; brk = brks; while (brk) { brk2 = brk->next; free(brk); brk = brk2; } brks = brk; /* (NULL) */ } } /* check if a K: or M: may go to the tune key and time signatures */ static int is_tune_sig(void) { struct SYMBOL *s; if (!curvoice->sym) return 1; if (curvoice->time != 0) return 0; /* not at start of tune */ for (s = curvoice->sym; s; s = s->next) { switch (s->type) { case TEMPO: case PART: case FMTCHG: break; default: return 0; } } return 1; } /* -- get a clef definition (in K: or V:) -- */ static void get_clef(struct SYMBOL *s) { struct SYMBOL *s2; struct VOICE_S *p_voice; int voice; p_voice = curvoice; s->type = CLEF; if (s->abc_prev->abc_type == ABC_T_INFO) { switch (s->abc_prev->text[0]) { case 'K': if (s->abc_prev->state != ABC_S_HEAD) break; for (voice = 0; voice < MAXVOICE; voice++) { voice_tb[voice].s_clef = s; if (s->u.clef.type == PERC) voice_tb[voice].perc = 1; } return; case 'V': /* clef relative to a voice definition in the header */ p_voice = &voice_tb[(int) s->abc_prev->u.voice.voice]; curvoice = p_voice; break; } } if (is_tune_sig()) { p_voice->s_clef = s; } else { /* clef change */ #if 0 sym_link(s, CLEF); #else /* the clef must appear before a key signature or a bar */ s2 = p_voice->last_sym; if (s2 && s2->prev && s2->time == curvoice->time // if no time skip && (s2->type == KEYSIG || s2->type == BAR)) { struct SYMBOL *s3; for (s3 = s2; s3->prev; s3 = s3->prev) { switch (s3->prev->type) { case KEYSIG: case BAR: continue; } break; } p_voice->last_sym = s3->prev; sym_link(s, CLEF); s->next = s3; s3->prev = s; p_voice->last_sym = s2; } else { sym_link(s, CLEF); } #endif s->aux = 1; /* small clef */ } p_voice->perc = s->u.clef.type == PERC; if (s->u.clef.type == AUTOCLEF) s->sflags |= S_CLEF_AUTO; } /* -- treat %%clef -- */ static void clef_def(struct SYMBOL *s) { char *p; int clef, clef_line; char str[80]; clef = -1; clef_line = 2; p = &s->text[2 + 5]; /* skip %%clef */ while (isspace((unsigned char) *p)) p++; /* clef name */ switch (*p) { case '\"': /* user clef name */ p = get_str(str, p, sizeof str); s->u.clef.name = (char *) getarena(strlen(str) + 1); strcpy(s->u.clef.name, str); clef = TREBLE; break; case 'G': clef = TREBLE; p++; break; case 'F': clef = BASS; clef_line = 4; p++; break; case 'C': clef = ALTO; clef_line = 3; p++; break; case 'P': clef = PERC; p++; break; case 't': if (strncmp(p, "treble", 6) == 0) { clef = TREBLE; p += 6; } if (strncmp(p, "tenor", 5) == 0) { clef = ALTO; clef_line = 4; p += 5; } break; case 'a': if (strncmp(p, "alto", 4) == 0) { clef = ALTO; clef_line = 3; p += 4; } else if (strncmp(p, "auto", 4) == 0) { clef = AUTOCLEF; s->sflags |= S_CLEF_AUTO; p += 4; } break; case 'b': if (strncmp(p, "bass", 4) == 0) { clef = BASS; clef_line = 4; p += 4; } break; case 'p': if (strncmp(p, "perc", 4) == 0) { clef = PERC; p += 4; } break; case 'n': if (strncmp(p, "none", 4) == 0) { clef = TREBLE; s->u.clef.invis = 1; s->flags |= ABC_F_INVIS; p += 4; } break; } if (clef < 0) { error(1, s, "Unknown clef '%s'", p); return; } /* clef line */ switch (*p) { case '1': case '2': case '3': case '4': case '5': clef_line = *p++ - '0'; break; } /* +/-/^/_8 */ if (p[1] == '8') { switch (*p) { case '^': s->u.clef.transpose = -7; case '+': s->u.clef.octave = 1; break; case '_': s->u.clef.transpose = 7; case '-': s->u.clef.octave = -1; break; } } /* handle the clef */ s->abc_type = ABC_T_CLEF; s->u.clef.type = clef; s->u.clef.line = clef_line; get_clef(s); } /* transpose a key */ static void key_transpose(struct key_s *key) { int t, sf; t = curvoice->transpose / 3; sf = (t & ~1) + (t & 1) * 7 + key->sf; switch ((curvoice->transpose + 210) % 3) { case 1: sf = (sf + 4 + 12 * 4) % 12 - 4; /* more sharps */ break; case 2: sf = (sf + 7 + 12 * 4) % 12 - 7; /* more flats */ break; default: sf = (sf + 5 + 12 * 4) % 12 - 5; /* Db, F# or B */ break; } key->sf = sf; } /* -- set the accidentals when K: with modified accidentals -- */ static void set_k_acc(struct SYMBOL *s) { int i, j, nacc; char accs[8], pits[8]; static char sharp_tb[8] = {26, 23, 27, 24, 21, 25, 22}; static char flat_tb[8] = {22, 25, 21, 24, 20, 23, 26}; if (s->u.key.sf > 0) { for (nacc = 0; nacc < s->u.key.sf; nacc++) { accs[nacc] = A_SH; pits[nacc] = sharp_tb[nacc]; } } else { for (nacc = 0; nacc < -s->u.key.sf; nacc++) { accs[nacc] = A_FT; pits[nacc] = flat_tb[nacc]; } } for (i = 0; i < s->u.key.nacc; i++) { for (j = 0; j < nacc; j++) { // if ((pits[j] - s->u.key.pits[i]) % 7 == 0) { if (pits[j] == s->u.key.pits[i]) { accs[j] = s->u.key.accs[i]; break; } } if (j == nacc) { if (nacc >= sizeof accs) { error(1, s, "Too many accidentals"); } else { accs[j] = s->u.key.accs[i]; pits[j] = s->u.key.pits[i]; nacc++; } } } for (i = 0; i < nacc; i++) { s->u.key.accs[i] = accs[i]; s->u.key.pits[i] = pits[i]; } s->u.key.nacc = nacc; } /* -- get a key signature definition (K:) -- */ static void get_key(struct SYMBOL *s) { struct VOICE_S *p_voice; struct SYMBOL *s2; struct key_s okey; /* original key */ int i; // int delta; if (s->u.key.octave != NO_OCTAVE) curvoice->octave = s->u.key.octave; if (s->u.key.cue > 0) curvoice->scale = 0.7; else if (s->u.key.cue < 0) curvoice->scale = 1; if (s->u.key.stafflines) curvoice->stafflines = s->u.key.stafflines; if (s->u.key.staffscale != 0) curvoice->staffscale = s->u.key.staffscale; if (s->u.key.empty == 1) /* clef only */ return; if (s->u.key.sf != 0 && !s->u.key.exp && s->u.key.nacc != 0) set_k_acc(s); memcpy(&okey, &s->u.key, sizeof okey); if (s->state == ABC_S_HEAD) { /* if first K: (start of tune) */ for (i = MAXVOICE, p_voice = voice_tb; --i >= 0; p_voice++) p_voice->transpose = cfmt.transpose; // curvoice->transpose = cfmt.transpose; } if (curvoice->transpose != 0) { key_transpose(&s->u.key); #if 0 /* transpose explicit accidentals */ //fixme: not correct - transpose adds or removes accidentals... if (s->u.key.nacc > 0) { struct VOICE_S voice, *voice_sav; struct SYMBOL note; memset(&voice, 0, sizeof voice); voice.transpose = curvoice->transpose; memcpy(&voice.ckey, &s->u.key, sizeof voice.ckey); voice.ckey.empty = 2; voice.ckey.nacc = 0; memset(&note, 0, sizeof note); --fixme memcpy(note.u.note.pits, voice.ckey.pits, sizeof note.u.note.pits); memcpy(note.u.note.accs, voice.ckey.accs, sizeof note.u.note.accs); note.nhd = s->u.key.nacc; voice_sav = curvoice; curvoice = &voice; note_transpose(&note); memcpy(s->u.key.pits, note.u.note.pits, sizeof s->u.key.pits); memcpy(s->u.key.accs, note.u.note.accs, sizeof s->u.key.accs); curvoice = voice_sav; } #endif } // calculate the tonic delta // s->u.key.key_delta = (cgd2cde[(s->u.key.sf + 7) % 7] + 14 + s->u.key.mode) % 7; s->u.key.key_delta = (cgd2cde[(s->u.key.sf + 7) % 7] + 14) % 7; if (s->state == ABC_S_HEAD) { /* start of tune */ for (i = MAXVOICE, p_voice = voice_tb; --i >= 0; p_voice++) { memcpy(&p_voice->key, &s->u.key, sizeof p_voice->key); memcpy(&p_voice->ckey, &s->u.key, sizeof p_voice->ckey); memcpy(&p_voice->okey, &okey, sizeof p_voice->okey); if (p_voice->key.empty) p_voice->key.sf = 0; if (s->u.key.octave != NO_OCTAVE) p_voice->octave = s->u.key.octave; if (s->u.key.stafflines) p_voice->stafflines = s->u.key.stafflines; if (s->u.key.staffscale != 0) p_voice->staffscale = s->u.key.staffscale; //fixme: update parsys->voice[voice].stafflines = stafflines; ? } return; } /* ABC_S_TUNE (K: cannot be ABC_S_GLOBAL) */ if (is_tune_sig()) { /* define the starting key signature */ memcpy(&curvoice->key, &s->u.key, sizeof curvoice->key); memcpy(&curvoice->ckey, &s->u.key, sizeof curvoice->ckey); memcpy(&curvoice->okey, &okey, sizeof curvoice->okey); switch (curvoice->key.instr) { case 0: if (!pipeformat) { // curvoice->transpose = cfmt.transpose; break; } //fall thru case K_HP: case K_Hp: if (curvoice->posit.std == 0) curvoice->posit.std = SL_BELOW; break; } if (curvoice->key.empty) curvoice->key.sf = 0; return; } /* key signature change */ if ((!s->abc_next || s->abc_next->abc_type != ABC_T_CLEF) /* if not explicit clef */ && curvoice->ckey.sf == s->u.key.sf /* and same key */ && curvoice->ckey.nacc == 0 && s->u.key.nacc == 0 && curvoice->ckey.empty == s->u.key.empty && cfmt.keywarn) /* (if not key warning, * keep all key signatures) */ return; /* ignore */ if (!curvoice->ckey.empty) s->aux = curvoice->ckey.sf; /* previous key signature */ memcpy(&curvoice->ckey, &s->u.key, sizeof curvoice->ckey); memcpy(&curvoice->okey, &okey, sizeof curvoice->okey); if (s->u.key.empty) s->u.key.sf = 0; /* the key signature must appear before a time signature */ s2 = curvoice->last_sym; if (s2 && s2->type == TIMESIG) { curvoice->last_sym = s2->prev; if (!curvoice->last_sym) curvoice->sym = NULL; sym_link(s, KEYSIG); s->next = s2; s2->prev = s; curvoice->last_sym = s2; } else { sym_link(s, KEYSIG); } } /* -- set meter from M: -- */ static void get_meter(struct SYMBOL *s) { struct VOICE_S *p_voice; int i; switch (s->state) { case ABC_S_GLOBAL: /*fixme: keep the values and apply to all tunes?? */ break; case ABC_S_HEAD: for (i = MAXVOICE, p_voice = voice_tb; --i >= 0; p_voice++) { memcpy(&p_voice->meter, &s->u.meter, sizeof p_voice->meter); p_voice->wmeasure = s->u.meter.wmeasure; } break; case ABC_S_TUNE: curvoice->wmeasure = s->u.meter.wmeasure; if (is_tune_sig()) { memcpy(&curvoice->meter, &s->u.meter, sizeof curvoice->meter); reset_gen(); /* (display the time signature) */ break; } if (s->u.meter.nmeter == 0) break; /* M:none */ sym_link(s, TIMESIG); break; } } /* -- treat a 'V:' -- */ static void get_voice(struct SYMBOL *s) { struct VOICE_S *p_voice; int voice; voice = s->u.voice.voice; p_voice = &voice_tb[voice]; if (parsys->voice[voice].range < 0) { if (cfmt.alignbars) { error(1, s, "V: does not work with %%%%alignbars"); } if (staves_found < 0) { if (!s->u.voice.merge) { #if MAXSTAFF < MAXVOICE if (nstaff >= MAXSTAFF - 1) { error(1, s, "Too many staves"); return; } #endif nstaff++; } else { p_voice->second = 1; parsys->voice[voice].second = 1; } p_voice->staff = p_voice->cstaff = nstaff; parsys->voice[voice].staff = nstaff; parsys->nstaff = nstaff; { int range, i; range = 0; for (i = 0; i < MAXVOICE; i++) { if (parsys->voice[i].range > range) range = parsys->voice[i].range; } parsys->voice[voice].range = range + 1; voice_link(p_voice); } } else { p_voice->ignore = 1; p_voice->staff = p_voice->cstaff = nstaff + 1; } } /* if something has changed, update */ if (s->u.voice.fname != 0) { p_voice->nm = s->u.voice.fname; p_voice->new_name = 1; } if (s->u.voice.nname != 0) p_voice->snm = s->u.voice.nname; if (s->u.voice.octave != NO_OCTAVE) p_voice->octave = s->u.voice.octave; switch (s->u.voice.dyn) { case 1: p_voice->posit.dyn = SL_ABOVE; p_voice->posit.vol = SL_ABOVE; break; case -1: p_voice->posit.dyn = SL_BELOW; p_voice->posit.vol = SL_BELOW; break; } switch (s->u.voice.lyrics) { case 1: p_voice->posit.voc = SL_ABOVE; break; case -1: p_voice->posit.voc = SL_BELOW; break; } switch (s->u.voice.gchord) { case 1: p_voice->posit.gch = SL_ABOVE; break; case -1: p_voice->posit.gch = SL_BELOW; break; } switch (s->u.voice.stem) { case 1: p_voice->posit.std = SL_ABOVE; break; case -1: p_voice->posit.std = SL_BELOW; break; case 2: p_voice->posit.std = 0; /* auto */ break; } switch (s->u.voice.gstem) { case 1: p_voice->posit.gsd = SL_ABOVE; break; case -1: p_voice->posit.gsd = SL_BELOW; break; case 2: p_voice->posit.gsd = 0; /* auto */ break; } if (s->u.voice.scale != 0) p_voice->scale = s->u.voice.scale; else if (s->u.voice.cue > 0) p_voice->scale = 0.7; else if (s->u.voice.cue < 0) p_voice->scale = 1; if (s->u.voice.stafflines) p_voice->stafflines = s->u.voice.stafflines; if (s->u.voice.staffscale != 0) p_voice->staffscale = s->u.voice.staffscale; if (!p_voice->combine) p_voice->combine = cfmt.combinevoices; set_tblt(p_voice); /* if in tune, switch to this voice */ if (s->state == ABC_S_TUNE) curvoice = p_voice; } /* sort the notes of the chord by pitch (lowest first) */ void sort_pitch(struct SYMBOL *s) { int i, nx, k; struct note v_note; unsigned char new_order[MAXHD], inv_order[MAXHD]; for (i = 0; i <= s->nhd; i++) new_order[i] = i; for (;;) { nx = 0; for (i = 1; i <= s->nhd; i++) { if (s->u.note.notes[i].pit >= s->u.note.notes[i - 1].pit) continue; memcpy(&v_note, &s->u.note.notes[i], sizeof v_note); memcpy(&s->u.note.notes[i], &s->u.note.notes[i - 1], sizeof v_note); memcpy(&s->u.note.notes[i - 1], &v_note, sizeof v_note); k = s->pits[i]; s->pits[i] = s->pits[i - 1]; s->pits[i - 1] = k; k = new_order[i]; new_order[i] = new_order[i - 1]; new_order[i - 1] = k; nx++; } if (nx == 0) break; } /* change the indexes of the note head decorations */ if (s->nhd > 0) { for (i = 0; i <= s->nhd; i++) inv_order[new_order[i]] = i; for (i = 0; i <= s->u.note.dc.n; i++) { k = s->u.note.dc.tm[i].m; if (k >= 0) s->u.note.dc.tm[i].m = inv_order[k]; } } } // set the map of the notes static void set_map(struct SYMBOL *s) { struct map *map; struct note_map *note_map; struct note *note; int m, delta; for (map = maps; map; map = map->next) { if (strcmp(map->name, curvoice->map_name) == 0) break; } if (!map) return; // !? // loop on the note maps, then on the notes of the chord delta = curvoice->ckey.key_delta; for (m = 0; m <= s->nhd; m++) { note = &s->u.note.notes[m]; for (note_map = map->notes; note_map; note_map = note_map->next) { switch (note_map->type) { case MAP_ONE: if (note->pit == note_map->pit && note->acc == note_map->acc) break; continue; case MAP_OCT: if ((note->pit - note_map->pit + 28 ) % 7 == 0 && note->acc == note_map->acc) break; continue; case MAP_KEY: if ((note->pit + 28 - delta - note_map->pit) % 7 == 0) break; continue; default: // MAP_ALL break; } note->head = note_map->heads; note->color = note_map->color; if (note_map->print_pit != -128) { note->pit = note_map->print_pit; s->pits[m] = note->pit; note->acc = note_map->print_acc; } break; } } } /* -- note or rest -- */ static void get_note(struct SYMBOL *s) { struct SYMBOL *prev; int i, m, delta; prev = curvoice->last_sym; m = s->nhd; /* insert the note/rest in the voice */ sym_link(s, s->u.note.notes[0].len != 0 ? NOTEREST : SPACE); if (!(s->flags & ABC_F_GRACE)) curvoice->time += s->dur; if (curvoice->octave) { delta = curvoice->octave * 7; for (i = 0; i <= m; i++) { s->u.note.notes[i].pit += delta; s->pits[i] += delta; } } /* convert the decorations * (!beam-accel! and !beam-rall! may change the note duration) * (!8va(! may change ottava) */ if (s->u.note.dc.n > 0) deco_cnv(&s->u.note.dc, s, prev); if (curvoice->ottava) { delta = curvoice->ottava; for (i = 0; i <= m; i++) s->pits[i] += delta; } s->combine = curvoice->combine; s->color = curvoice->color; if (curvoice->perc) s->sflags |= S_PERC; else if (s->abc_type == ABC_T_NOTE && curvoice->transpose != 0) note_transpose(s); if (!(s->flags & ABC_F_GRACE)) { switch (curvoice->posit.std) { case SL_ABOVE: s->stem = 1; break; case SL_BELOW: s->stem = -1; break; case SL_HIDDEN: s->flags |= ABC_F_STEMLESS;; break; } } else { /* grace note - adjust its duration */ int div; if (curvoice->key.instr != K_HP && curvoice->key.instr != K_Hp && !pipeformat) { div = 2; if (!prev || !(prev->flags & ABC_F_GRACE)) { if (s->flags & ABC_F_GR_END) div = 1; /* one grace note */ } } else { div = 4; /* bagpipe */ } for (i = 0; i <= m; i++) s->u.note.notes[i].len /= div; s->dur /= div; switch (curvoice->posit.gsd) { case SL_ABOVE: s->stem = 1; break; case SL_BELOW: s->stem = -1; break; case SL_HIDDEN: s->stem = 2; break; /* opposite */ } } s->nohdi1 = s->nohdi2 = -1; /* change the figure of whole measure rests */ if (s->abc_type == ABC_T_REST) { if (s->dur == curvoice->wmeasure) { if (s->dur < BASE_LEN * 2) s->u.note.notes[0].len = BASE_LEN; else if (s->dur < BASE_LEN * 4) s->u.note.notes[0].len = BASE_LEN * 2; else s->u.note.notes[0].len = BASE_LEN * 4; } } else { /* sort the notes of the chord by pitch (lowest first) */ if (!(s->flags & ABC_F_GRACE) && curvoice->map_name) set_map(s); sort_pitch(s); } /* get the max head type, number of dots and number of flags */ if (!curvoice->auto_len || (s->flags & ABC_F_GRACE)) { int head, dots, nflags, l; if ((l = s->u.note.notes[0].len) != 0) { identify_note(s, l, &head, &dots, &nflags); s->head = head; s->dots = dots; s->nflags = nflags; for (i = 1; i <= m; i++) { if (s->u.note.notes[i].len == l) continue; identify_note(s, s->u.note.notes[i].len, &head, &dots, &nflags); if (head > s->head) s->head = head; if (dots > s->dots) s->dots = dots; if (nflags > s->nflags) s->nflags = nflags; } if (s->sflags & S_XSTEM) s->nflags = 0; /* word start+end */ } } if (s->nflags <= -2) s->flags |= ABC_F_STEMLESS; if (s->sflags & (S_TREM1 | S_TREM2)) { if (s->nflags > 0) s->nflags += s->aux; else s->nflags = s->aux; if ((s->sflags & S_TREM2) && (s->sflags & S_BEAM_END)) { /* if 2nd note - see deco.c */ prev->head = s->head; prev->aux = s->aux; prev->nflags = s->nflags; prev->flags |= (s->flags & ABC_F_STEMLESS); } } for (i = 0; i <= m; i++) { if (s->u.note.notes[i].sl1 != 0) s->sflags |= S_SL1; if (s->u.note.notes[i].sl2 != 0) s->sflags |= S_SL2; if (s->u.note.notes[i].ti1 != 0) s->sflags |= S_TI1; } switch (cfmt.shiftunison) { case 0: break; case 1: s->sflags |= S_SHIFTUNISON_1; break; case 2: s->sflags |= S_SHIFTUNISON_2; break; default: s->sflags |= S_SHIFTUNISON_1 | S_SHIFTUNISON_2; break; } /* build the guitar chords */ if (s->text) gch_build(s); } static char *get_val(char *p, float *v) { char tmp[32], *r = tmp; while (isspace((unsigned char) *p)) p++; while ((isdigit((unsigned char) *p) && r < &tmp[32 - 1]) || *p == '-' || *p == '.') *r++ = *p++; *r = '\0'; sscanf(tmp, "%f", v); return p; } // parse <path .../> from %%beginsvg and convert to Postscript static void parse_path(char *p, char *q, char *id, int idsz) { struct SYMBOL *s; char *buf, *r, *t, *op = NULL, *width, *scale, *trans; int i, fill, npar = 0; float x1, y1, x, y; char *rmax; r = strstr(p, "class=\""); if (!r || r > q) return; r += 7; fill = strncmp(r, "fill", 4) == 0; width = strstr(p, "stroke-width:"); scale = strstr(p, "scale("); if (scale && scale > q) scale = NULL; trans = strstr(p, "translate("); if (trans && trans > q) trans = NULL; for (;;) { p = strstr(p, "d=\""); if (!p) return; if (isspace((unsigned char) p[-1])) // (check not 'id=..") break; p += 3; } i = (int) (q - p) * 4 + 200; // estimated PS buffer size if (i > TEX_BUF_SZ) buf = malloc(i); else buf = tex_buf; rmax=buf + i; r = buf; *r++ = '/'; idsz -= 5; strncpy(r, id + 4, idsz); r += idsz; strcpy(r, "{gsave T "); r += strlen(r); if (scale || trans) { if (scale) { scale += 6; // "scale(" t = get_val(scale, &x1); if (*t == ',') t = get_val(t + 1, &y1); else y1 = x1; } if (trans) { trans += 10; // "translate(" t = get_val(trans, &x) + 1; //"," t = get_val(t, &y); } if (!scale) r += sprintf(r, "%.2f %.2f T ", x, -y); else if (!trans) r += sprintf(r, "%.2f %.2f scale ", x1, y1); else if (scale > trans) r += sprintf(r, "%.2f %.2f T %.2f %.2f scale ", x, -y, x1, y1); else r += sprintf(r, "%.2f %.2f scale %.2f %.2f T ", x1, y1, x, -y); } strcpy(r, "0 0 M\n"); r += strlen(r); if (width && width < q) { *r++ = ' '; width += 13; while (isdigit(*width) || *width == '.') *r++ = *width++; *r++ = ' '; *r++ = 'S'; *r++ = 'L'; *r++ = 'W'; } p += 3; for (;;) { if (*p == '\0' || *p == '"') break; i = 0; switch (*p++) { default: if ((isdigit((unsigned char) p[-1])) || p[-1] == '-' || p[-1] == '.') { if (!npar) continue; p--; // same op break; } continue; case 'M': op = "M"; npar = 2; break; case 'm': op = "RM"; npar = 2; break; case 'L': op = "L"; npar = 2; break; case 'l': op = "RL"; npar = 2; break; case 'H': op = "H"; npar = 1; break; case 'h': op = "h"; npar = 1; break; case 'V': op = "V"; npar = 1; break; case 'v': *r++ = ' '; *r++ = '0'; op = "RL"; i = 1; npar = 2; break; case 'z': op = "closepath"; npar = 0; break; case 'C': op = "C"; npar = 6; break; case 'c': op = "RC"; npar = 6; break; // case 'A': // op = "arc"; // break; // case 'a': // op = "arc"; // break; case 'q': op = "RC"; npar = 2; p = get_val(p, &x1); p = get_val(p, &y1); t = get_val(p, &x); t = get_val(t, &y); r += sprintf(r, " %.2f %.2f %.2f %.2f", x1*2/3, -y1*2/3, x1+(x-x1)*2/3, -y1-(y-y1)*2/3); break; case 't': op = "RC"; npar = 2; x1 = x - x1; y1 = y - y1; t = get_val(p, &x); t = get_val(t, &y); r += sprintf(r, " %.2f %.2f %.2f %.2f", x1*2/3, -y1*2/3, x1+(x-x1)*2/3, -y1-(y-y1)*2/3); break; } *r++ = ' '; for ( ; i < npar; i++) { while (isspace((unsigned char) *p)) p++; if (i & 1) { // y is inverted if (*p == '-') p++; else if (*p != '0' || p[1] != ' ') *r++ = '-'; } while ((isdigit((unsigned char) *p)) || *p == '-' || *p == '.') *r++ = *p++; *r++ = ' '; } if (*op == 'h') { *r++ = '0'; *r++ = ' '; op = "RL"; } strcpy(r, op); r += strlen(r); if (r + 30 > rmax) bug("Buffer overflow in SVG to PS", 1); } strcpy(r, fill ? " fill" : " stroke"); r += strlen(r); strcpy(r, "\ngrestore}!"); r += strlen(r); s = getarena(sizeof(struct SYMBOL)); memset(s, 0, sizeof(struct SYMBOL)); s->text = getarena(strlen(buf) + 1); strcpy(s->text, buf); ps_def(s, s->text, 'p'); if (buf != tex_buf) free(buf); } // parse <defs> .. </defs> from %%beginsvg static void parse_defs(char *p, char *q) { char *id, *r; int idsz; for (;;) { id = strstr(p, "id=\""); if (!id || id > q) return; r = strchr(id + 4, '"'); if (!r) return; idsz = r + 1 - id; // if SVG output, mark the id as defined if (svg || epsf > 1) { svg_def_id(id, idsz); p = r; continue; } // convert SVG to PS p = id; while (*p != '<') p--; if (strncmp(p, "<path ", 6) == 0) { r = strstr(p, "/>"); parse_path(p + 6, r, id, idsz); if (!r) break; p = r + 2; continue; } break; } } // extract the SVG defs from %%beginsvg and // convert to PostScript when PS output // move to the SVG glyphs when SVG output static void svg_ps(char *p) { char *q; for (;;) { q = strstr(p, "<defs>"); if (!q) break; p = strstr(q, "</defs>"); if (!p) { error(1, NULL, "No </defs> in %%beginsvg"); break; } parse_defs(q + 6, p); } } /* -- treat a postscript or SVG definition -- */ static void ps_def(struct SYMBOL *s, char *p, char use) /* cf user_ps_add() */ { if (!svg && epsf <= 1) { /* if PS output */ if (secure // || use == 'g' // SVG || use == 's') // PS for SVG return; } else { /* if SVG output */ if (use == 'p' // PS for PS || (use == 'g' // SVG && file_initialized > 0)) return; } if (s->abc_prev) s->state = s->abc_prev->state; if (s->state == ABC_S_TUNE) { if (use == 'g') // SVG return; sym_link(s, FMTCHG); s->aux = PSSEQ; s->text = p; // s->flags |= ABC_F_INVIS; return; } if (use == 'g') { // SVG svg_ps(p); if (!svg && epsf <= 1) return; } if (file_initialized > 0 || mbf != outbuf) a2b("%s\n", p); else user_ps_add(p, use); } /* get a symbol selection */ /* measure_number [ ":" time_numerator "/" time_denominator ] */ static char *get_symsel(struct symsel_s *symsel, char *p) { char *q; int tn, td, n; symsel->bar = strtod(p, &q); if (*q >= 'a' && *q <= 'z') symsel->seq = *q++ - 'a'; else symsel->seq = 0; if (*q == ':') { if (sscanf(q + 1, "%d/%d%n", &tn, &td, &n) != 2 || td <= 0) return 0; symsel->time = BASE_LEN * tn / td; q += 1 + n; } else { symsel->time = 0; } return q; } /* free the voice options */ static void free_voice_opt(struct voice_opt_s *opt) { struct voice_opt_s *opt2; while (opt) { opt2 = opt->next; free(opt); opt = opt2; } } // get a color static int get_color(char *p) { int i, color; static const struct { char *name; int color; } col_tb[] = { { "aqua", 0x00ffff }, { "black", 0x000000 }, { "blue", 0x0000ff }, { "fuchsia", 0xff00ff }, { "gray", 0x808080 }, { "green", 0x008000 }, { "lime", 0x00ff00 }, { "maroon", 0x800000 }, { "navy", 0x000080 }, { "olive", 0x808000 }, { "purple", 0x800080 }, { "red", 0xff0000 }, { "silver", 0xc0c0c0 }, { "teal", 0x008080 }, { "white", 0xffffff }, { "yellow", 0xffff00 }, }; if (*p == '#') { if (sscanf(p, "#%06x", &color) != 1 || (unsigned) color > 0x00ffffff) return -1; return color; } for (i = sizeof col_tb / sizeof col_tb[0]; --i >= 0; ) { if (strncasecmp(p, col_tb[i].name, strlen(col_tb[i].name)) == 0) break; } if (i < 0) return -1; return col_tb[i].color; } /* get a transposition */ static int get_transpose(char *p) { int val, pit1, pit2, acc; static int pit_st[7] = {0, 2, 4, 5, 7, 9, 11}; if (isdigit(*p) || *p == '-' || *p == '+') { sscanf(p, "%d", &val); val *= 3; switch (p[strlen(p) - 1]) { default: return val; case '#': val++; break; case 'b': val += 2; break; } if (val > 0) return val; return val - 3; } // by music interval p = parse_acc_pit(p, &pit1, &acc); if (acc < 0) { error(1, NULL, " in %%%%transpose"); return 0; } pit1 += 126 - 2; // for value > 0 and 'C' % 7 == 0 pit1 = (pit1 / 7) * 12 + pit_st[pit1 % 7]; switch (acc) { case A_DS: pit1 += 2; break; case A_SH: pit1++; break; case A_FT: pit1--; break; case A_DF: pit1 -= 2; break; } p = parse_acc_pit(p, &pit2, &acc); if (acc < 0) { error(1, NULL, " in %%%%transpose"); return 0; } pit2 += 126 - 2; pit2 = (pit2 / 7) * 12 + pit_st[pit2 % 7]; switch (acc) { case A_DS: pit2 += 2; break; case A_SH: pit2++; break; case A_FT: pit2--; break; case A_DF: pit2 -= 2; break; } val = (pit2 - pit1) * 3; switch (acc) { default: return val; case A_DS: case A_SH: val++; break; case A_FT: case A_DF: val += 2; break; } if (val > 0) return val; return val - 3; } // create a note mapping // %%map map_name note [print [heads]] [param]* static void get_map(char *p) { struct map *map; struct note_map *note_map; char *name, *q; int l, type, pit, acc; if (*p == '\0') return; /* map name */ name = p; while (!isspace((unsigned char) *p) && *p != '\0') p++; l = p - name; /* base note */ while (isspace((unsigned char) *p)) p++; if (*p == '*') { type = MAP_ALL; p++; } else if (strncmp(p, "octave,", 7) == 0) { type = MAP_OCT; p += 7; } else if (strncmp(p, "key,", 4) == 0) { type = MAP_KEY; p += 4; } else if (strncmp(p, "all", 3) == 0) { type = MAP_ALL; while (!isspace((unsigned char) *p) && *p != '\0') p++; } else { type = MAP_ONE; } if (type != MAP_ALL) { p = parse_acc_pit(p, &pit, &acc); if (acc < 0) // if error pit = acc = 0; if (type == MAP_OCT || type == MAP_KEY) { pit %= 7; if (type == MAP_KEY) acc = A_NULL; } } else { pit = acc = 0; } // get/create the map for (map = maps; map; map = map->next) { if (strncmp(name, map->name, l) == 0) break; } if (!map) { map = getarena(sizeof *map); map->next = maps; maps = map; map->name = getarena(l + 1); strncpy(map->name, name, l); map->name[l] = '\0'; map->notes = NULL; } for (note_map = map->notes; note_map; note_map = note_map->next) { if (note_map->type == type && note_map->pit == pit && note_map->acc == acc) break; } if (!note_map) { note_map = getarena(sizeof *note_map); memset(note_map, 0, sizeof *note_map); note_map->next = map->notes; map->notes = note_map; note_map->type = type; note_map->pit = pit; note_map->acc = acc; note_map->print_pit = -128; note_map->color = -1; } /* try the optional 'print' and 'heads' parameters */ while (isspace((unsigned char) *p)) p++; if (*p == '\0') return; q = p; while (!isspace((unsigned char) *q) && *q != '\0') { if (*q == '=') break; q++; } if (isspace((unsigned char) *q) || *q == '\0') { if (*p != '*') { p = parse_acc_pit(p, &pit, &acc); if (acc >= 0) { note_map->print_pit = pit; note_map->print_acc = acc; } if (*p == '\0') return; } p = q; while (isspace((unsigned char) *p)) p++; if (*p == '\0') return; q = p; while (!isspace((unsigned char) *q) && *q != '\0') { if (*q == '=') break; q++; } if (isspace((unsigned char) *q) || *q == '\0') { name = p; p = q; l = p - name; note_map->heads = getarena(l + 1); strncpy(note_map->heads, name, l); note_map->heads[l] = '\0'; } } /* loop on the parameters */ for (;;) { while (isspace((unsigned char) *p)) p++; if (*p == '\0') break; if (strncmp(p, "heads=", 6) == 0) { p += 6; name = p; while (!isspace((unsigned char) *p) && *p != '\0') p++; l = p - name; note_map->heads = getarena(l + 1); strncpy(note_map->heads, name, l); note_map->heads[l] = '\0'; } else if (strncmp(p, "print=", 6) == 0) { p += 6; p = parse_acc_pit(p, &pit, &acc); if (acc >= 0) { note_map->print_pit = pit; note_map->print_acc = acc; } } else if (strncmp(p, "color=", 6) == 0) { int color; color = get_color(p + 6); if (color < 0) { error(1, NULL, "Bad color in %%%%map"); return; } note_map->color = color; } while (!isspace((unsigned char) *p) && *p != '\0') p++; } } /* -- process a pseudo-comment (%% or I:) -- */ static struct SYMBOL *process_pscomment(struct SYMBOL *s) { char w[32], *p, *q; int voice; float h1; int lock = 0; p = s->text + 2; /* skip '%%' */ q = p + strlen(p) - 5; if (q > p && strncmp(q, " lock", 5) == 0) { lock = 1; *q = '\0'; } p = get_str(w, p, sizeof w); if (s->state == ABC_S_HEAD && !check_header(s)) { error(1, s, "Cannot have %%%%%s in tune header", w); return s; } switch (w[0]) { case 'b': if (strcmp(w, "beginps") == 0 || strcmp(w, "beginsvg") == 0) { char use; if (w[5] == 'p') { if (strncmp(p, "svg", 3) == 0) use = 's'; else if (strncmp(p, "nosvg", 5) == 0) use = 'p'; else use = 'b'; } else { use = 'g'; } p = s->text + 2 + 7; while (*p != '\0' && *p != '\n') p++; if (*p == '\0') return s; /* empty */ ps_def(s, p + 1, use); return s; } if (strcmp(w, "begintext") == 0) { int job; if (s->state == ABC_S_TUNE) { if (!multicol_start) gen_ly(1); } else if (s->state == ABC_S_GLOBAL) { if (epsf || !in_fname) return s; } p = s->text + 2 + 9; while (*p == ' ' || *p == '\t') p++; if (*p != '\n') { job = get_textopt(p); while (*p != '\0' && *p != '\n') p++; if (*p == '\0') return s; /* empty */ } else { job = cfmt.textoption; } if (job != T_SKIP) { p++; write_text(w, p, job); } return s; } if (strcmp(w, "break") == 0) { struct brk_s *brk; if (s->state != ABC_S_HEAD) { error(1, s, "%%%%%s ignored", w); return s; } if (*p == '\0') return s; for (;;) { brk = malloc(sizeof *brk); p = get_symsel(&brk->symsel, p); if (!p) { error(1, s, "Bad selection in %%%%%s", w); return s; } brk->next = brks; brks = brk; if (*p != ',' && *p != ' ') break; p++; } return s; } break; case 'c': if (strcmp(w, "center") == 0) goto center; if (strcmp(w, "clef") == 0) { if (s->state != ABC_S_GLOBAL) clef_def(s); return s; } if (strcmp(w, "clip") == 0) { if (!cur_tune_opts) { error(1, s, "%%%%%s not in %%%%tune sequence", w); return s; } /* %%clip <symbol selection> "-" <symbol selection> */ if (*p != '-') { p = get_symsel(&clip_start, p); if (!p) { error(1, s, "Bad start in %%%%%s", w); return s; } if (*p != '-') { error(1, s, "Lack of '-' in %%%%%s", w); return s; } } p++; p = get_symsel(&clip_end, p); if (!p) { error(1, s, "Bad end in %%%%%s", w); return s; } if (clip_start.bar < 0) clip_start.bar = 0; if (clip_end.bar < clip_start.bar || (clip_end.bar == clip_start.bar && clip_end.time <= clip_start.time)) { clip_end.bar = (short unsigned) ~0 >> 1; } return s; } break; case 'd': if (strcmp(w, "deco") == 0) { deco_add(p); return s; } if (strcmp(w, "dynamic") == 0) { set_voice_param(curvoice, s->state, w, p); return s; } break; case 'E': if (strcmp(w, "EPS") == 0) { float x1, y1, x2, y2; FILE *fp; char fn[STRL1], line[STRL1]; gen_ly(1); if (secure || cfmt.textoption == T_SKIP) return s; get_str(line, p, sizeof line); if ((fp = open_file(line, "eps", fn)) == NULL) { error(1, s, "No such file: %s", line); return s; } /* get the bounding box */ x1 = x2 = 0; while (fgets(line, sizeof line, fp)) { if (strncmp(line, "%%BoundingBox:", 14) == 0) { if (sscanf(&line[14], "%f %f %f %f", &x1, &y1, &x2, &y2) == 4) break; } } fclose(fp); if (x1 == x2) { error(1, s, "No bounding box in '%s'", fn); return s; } if (cfmt.textoption == T_CENTER || cfmt.textoption == T_RIGHT) { float lw; lw = ((cfmt.landscape ? cfmt.pageheight : cfmt.pagewidth) - cfmt.leftmargin - cfmt.rightmargin) / cfmt.scale; if (cfmt.textoption == T_CENTER) x1 += (lw - (x2 - x1)) * 0.5; else x1 += lw - (x2 - x1); } a2b("\001"); /* include file (must be the first after eob) */ bskip(y2 - y1); a2b("%.2f %.2f%%%s\n", x1, -y1, fn); buffer_eob(0); return s; } break; case 'g': if (strcmp(w, "gchord") == 0 || strcmp(w, "gstemdir") == 0) { set_voice_param(curvoice, s->state, w, p); return s; } if (strcmp(w, "glyph") == 0) { if (!svg && epsf <= 1) glyph_add(p); return s; } break; case 'm': if (strcmp(w, "map") == 0) { get_map(p); return s; } if (strcmp(w, "maxsysstaffsep") == 0) { if (s->state != ABC_S_TUNE) break; parsys->voice[curvoice - voice_tb].maxsep = scan_u(p, 0); return s; } if (strcmp(w, "multicol") == 0) { float bposy; generate(); if (strncmp(p, "start", 5) == 0) { if (!in_page) a2b("%%\n"); /* initialize the output */ buffer_eob(0); bposy = get_bposy(); multicol_max = multicol_start = bposy; lmarg = cfmt.leftmargin; rmarg = cfmt.rightmargin; } else if (strncmp(p, "new", 3) == 0) { if (multicol_start == 0) { error(1, s, "%%%%%s new without start", w); } else { buffer_eob(0); bposy = get_bposy(); if (bposy < multicol_start) bskip((bposy - multicol_start) / cfmt.scale); if (bposy < multicol_max) multicol_max = bposy; cfmt.leftmargin = lmarg; cfmt.rightmargin = rmarg; } } else if (strncmp(p, "end", 3) == 0) { if (multicol_start == 0) { error(1, s, "%%%%%s end without start", w); } else { buffer_eob(0); bposy = get_bposy(); if (bposy > multicol_max) bskip((bposy - multicol_max) / cfmt.scale); else a2b("%%\n"); /* force write_buffer */ cfmt.leftmargin = lmarg; cfmt.rightmargin = rmarg; multicol_start = 0; buffer_eob(0); if (!info['X' - 'A'] && !epsf) write_buffer(); } } else { error(1, s, "Unknown keyword '%s' in %%%%%s", p, w); } return s; } break; case 'M': if (strcmp(w, "MIDI") == 0 && strncmp(p, "temperamentequal", 16) == 0) { int n; if (cfmt.nedo) { error(1, s, "%%%%MIDI temperamentequal redefined"); return s; } p += 16; while (isspace((unsigned char) *p)) p++; n = atoi(p); if (n < 7 || n > 53) { error(1, s, "Bad value in %%%%MIDI temperamentequal"); return s; } cfmt.nedo = n; } break; case 'n': if (strcmp(w, "newpage") == 0) { if (epsf || !in_fname) return s; if (s->state == ABC_S_TUNE) generate(); buffer_eob(0); write_buffer(); // use_buffer = 0; if (isdigit((unsigned char) *p)) pagenum = atoi(p); close_page(); if (s->state == ABC_S_TUNE) bskip(cfmt.topspace); return s; } break; case 'p': if (strcmp(w, "pos") == 0) { // %%pos <type> <position> p = get_str(w, p, sizeof w); set_voice_param(curvoice, s->state, w, p); return s; } if (strcmp(w, "ps") == 0 || strcmp(w, "postscript") == 0) { ps_def(s, p, 'b'); return s; } break; case 'o': if (strcmp(w, "ornament") == 0) { set_voice_param(curvoice, s->state, w, p); return s; } break; case 'r': if (strcmp(w, "repbra") == 0) { if (s->state != ABC_S_TUNE) return s; curvoice->norepbra = strchr("0FfNn", *p) || *p == '\0'; return s; } if (strcmp(w, "repeat") == 0) { int n, k; if (s->state != ABC_S_TUNE) return s; if (!curvoice->last_sym) { error(1, s, "%%%s cannot start a tune", w); return s; } if (*p == '\0') { n = 1; k = 1; } else { n = atoi(p); if (n < 1 || (curvoice->last_sym->type == BAR && n > 2)) { error(1, s, "Incorrect 1st value in %%%%%s", w); return s; } while (*p != '\0' && !isspace((unsigned char) *p)) p++; while (isspace((unsigned char) *p)) p++; if (*p == '\0') { k = 1; } else { k = atoi(p); if (k < 1) { // || (curvoice->last_sym->type == BAR // && n == 2 // && k > 1)) { error(1, s, "Incorrect 2nd value in %%%%%s", w); return s; } } } s->aux = REPEAT; if (curvoice->last_sym->type == BAR) s->doty = n; else s->doty = -n; sym_link(s, FMTCHG); s->nohdi1 = k; s->text = NULL; return s; } break; case 's': if (strcmp(w, "setbarnb") == 0) { if (s->state == ABC_S_TUNE) { struct SYMBOL *s2; int n; n = atoi(p); for (s2 = s->abc_next; s2; s2 = s2->abc_next) { if (s2->abc_type == ABC_T_BAR) { s2->aux = n; break; } } return s; } strcpy(w, "measurefirst"); break; } if (strcmp(w, "sep") == 0) { float h2, len, lwidth; if (s->state == ABC_S_TUNE) { gen_ly(0); } else if (s->state == ABC_S_GLOBAL) { if (epsf || !in_fname) return s; } lwidth = (cfmt.landscape ? cfmt.pageheight : cfmt.pagewidth) - cfmt.leftmargin - cfmt.rightmargin; h1 = h2 = len = 0; if (*p != '\0') { h1 = scan_u(p, 0); while (*p != '\0' && !isspace((unsigned char) *p)) p++; while (isspace((unsigned char) *p)) p++; } if (*p != '\0') { h2 = scan_u(p, 0); while (*p != '\0' && !isspace((unsigned char) *p)) p++; while (isspace((unsigned char) *p)) p++; } if (*p != '\0') len = scan_u(p, 0); if (h1 < 1) h1 = 0.5 CM; if (h2 < 1) h2 = h1; if (len < 1) len = 3.0 CM; bskip(h1); a2b("%.1f %.1f sep0\n", len / cfmt.scale, (lwidth - len) * 0.5 / cfmt.scale); bskip(h2); buffer_eob(0); return s; } if (strcmp(w, "staff") == 0) { int staff; if (s->state != ABC_S_TUNE) return s; if (*p == '+') staff = curvoice->cstaff + atoi(p + 1); else if (*p == '-') staff = curvoice->cstaff - atoi(p + 1); else staff = atoi(p) - 1; if ((unsigned) staff > (unsigned) nstaff) { error(1, s, "Bad staff in %%%%%s", w); return s; } curvoice->floating = 0; curvoice->cstaff = staff; return s; } if (strcmp(w, "staffbreak") == 0) { if (s->state != ABC_S_TUNE) return s; if (isdigit(*p)) { s->xmx = scan_u(p, 0); if (s->xmx < 0) { error(1, s, "Bad value in %%%%%s", w); return s; } if (p[strlen(p) - 1] == 'f') s->doty = 1; } else { s->xmx = 0.5 CM; if (*p == 'f') s->doty = 1; } sym_link(s, STBRK); return s; } if (strcmp(w, "stafflines") == 0) { if (isdigit((unsigned char) *p)) { switch (atoi(p)) { case 0: p = "..."; break; case 1: p = "..|"; break; case 2: p = ".||"; break; case 3: p = ".|||"; break; case 4: p = "||||"; break; case 5: p = "|||||"; break; case 6: p = "||||||"; break; case 7: p = "|||||||"; break; case 8: p = "||||||||"; break; default: error(1, s, "Bad number of lines"); break; } } else { int l; l = strlen(p); q = p; p = getarena(l + 1); strcpy(p, q); } if (s->state != ABC_S_TUNE) { for (voice = 0; voice < MAXVOICE; voice++) voice_tb[voice].stafflines = p; } else { curvoice->stafflines = p; } return s; } if (strcmp(w, "staffscale") == 0) { char *q; float scale; scale = strtod(p, &q); if (scale < 0.3 || scale > 2 || (*q != '\0' && *q != ' ')) { error(1, s, "Bad value in %%%%%s", w); return s; } if (s->state != ABC_S_TUNE) { for (voice = 0; voice < MAXVOICE; voice++) voice_tb[voice].staffscale = scale; } else { curvoice->staffscale = scale; } return s; } if (strcmp(w, "staves") == 0 || strcmp(w, "score") == 0) { if (s->state == ABC_S_GLOBAL) return s; get_staves(s); return s; } if (strcmp(w, "stemdir") == 0) { set_voice_param(curvoice, s->state, w, p); return s; } if (strcmp(w, "sysstaffsep") == 0) { if (s->state != ABC_S_TUNE) break; parsys->voice[curvoice - voice_tb].sep = scan_u(p, 0); return s; } break; case 't': if (strcmp(w, "text") == 0) { int job; center: if (s->state == ABC_S_TUNE) { gen_ly(1); } else if (s->state == ABC_S_GLOBAL) { if (epsf || !in_fname) return s; } if (w[0] == 'c') { job = T_CENTER; } else { job = cfmt.textoption; switch(job) { case T_SKIP: return s; case T_LEFT: case T_RIGHT: case T_CENTER: break; default: job = T_LEFT; break; } } write_text(w, p, job); return s; } if (strcmp(w, "tablature") == 0) { struct tblt_s *tblt; int i, j; tblt = tblt_parse(p); if (tblt == 0) return s; switch (s->state) { case ABC_S_TUNE: case ABC_S_HEAD: for (i = 0; i < ncmdtblt; i++) { if (cmdtblts[i].active) continue; j = cmdtblts[i].index; if (j < 0 || tblts[j] == tblt) return s; } /* !! 2 tblts per voice !! */ if (curvoice->tblts[0] == tblt || curvoice->tblts[1] == tblt) break; if (curvoice->tblts[1]) { error(1, s, "Too many tablatures for voice %s", curvoice->id); break; } if (!curvoice->tblts[0]) curvoice->tblts[0] = tblt; else curvoice->tblts[1] = tblt; break; } return s; } if (strcmp(w, "transpose") == 0) { struct VOICE_S *p_voice; struct SYMBOL *s2; int i, val; val = get_transpose(p); switch (s->state) { case ABC_S_GLOBAL: cfmt.transpose = val; return s; case ABC_S_HEAD: { cfmt.transpose += val; for (i = MAXVOICE, p_voice = voice_tb; --i >= 0; p_voice++) { p_voice->transpose = cfmt.transpose; memcpy(&p_voice->key, &p_voice->okey, sizeof p_voice->key); key_transpose(&p_voice->key); memcpy(&p_voice->ckey, &p_voice->key, sizeof p_voice->ckey); if (p_voice->key.empty) p_voice->key.sf = 0; } return s; } } curvoice->transpose = cfmt.transpose + val; s2 = curvoice->sym; if (!s2) { memcpy(&curvoice->key, &curvoice->okey, sizeof curvoice->key); key_transpose(&curvoice->key); memcpy(&curvoice->ckey, &curvoice->key, sizeof curvoice->ckey); if (curvoice->key.empty) curvoice->key.sf = 0; return s; } for (;;) { if (s2->type == KEYSIG) break; if (s2->time == curvoice->time) { s2 = s2->prev; if (s2) continue; } s2 = s; s2->abc_type = ABC_T_INFO; s2->text = (char *) getarena(2); s2->text[0] = 'K'; s2->text[1] = '\0'; sym_link(s2, KEYSIG); // if (!curvoice->ckey.empty) // s2->aux = curvoice->ckey.sf; s2->aux = curvoice->key.sf; break; } memcpy(&s2->u.key, &curvoice->okey, sizeof s2->u.key); key_transpose(&s2->u.key); memcpy(&curvoice->ckey, &s2->u.key, sizeof curvoice->ckey); if (curvoice->key.empty) s2->u.key.sf = 0; return s; } if (strcmp(w, "tune") == 0) { struct SYMBOL *s2, *s3; struct tune_opt_s *opt, *opt2; if (s->state != ABC_S_GLOBAL) { error(1, s, "%%%%%s ignored", w); return s; } /* if void %%tune, remove all tune options */ if (*p == '\0') { opt = tune_opts; while (opt) { free_voice_opt(opt->voice_opts); opt2 = opt->next; free(opt); opt = opt2; } tune_opts = NULL; return s; } if (strcmp(p, "end") == 0) return s; /* end of previous %%tune */ /* search the end of the tune options */ s2 = s; for (;;) { s3 = s2->abc_next; if (!s3) break; if (s3->abc_type != ABC_T_NULL && (s3->abc_type != ABC_T_PSCOM || strncmp(&s3->text[2], "tune ", 5) == 0)) break; s2 = s3; } /* search if already a same %%tune */ opt2 = NULL; for (opt = tune_opts; opt; opt = opt->next) { if (strcmp(opt->s->text, s->text) == 0) break; opt2 = opt; } if (opt) { free_voice_opt(opt->voice_opts); if (s2 == s) { /* no option */ if (!opt2) tune_opts = opt->next; else opt2->next = opt->next; free(opt); return s; } opt->voice_opts = NULL; } else { if (s2 == s) /* no option */ return s; opt = malloc(sizeof *opt); memset(opt, 0, sizeof *opt); opt->next = tune_opts; tune_opts = opt; } /* link the options */ opt->s = s3 = s; cur_tune_opts = opt; s = s->abc_next; for (;;) { if (s->abc_type != ABC_T_PSCOM) continue; if (strncmp(&s->text[2], "voice ", 6) == 0) { s = process_pscomment(s); } else { s->state = ABC_S_HEAD; /* !! no reverse link !! */ s3->next = s; s3 = s; } if (s == s2) break; s = s->abc_next; } cur_tune_opts = NULL; return s; } break; case 'u': if (strcmp(w, "user") == 0) { deco[s->u.user.symbol] = parse.deco_tb[s->u.user.value - 128]; return s; } break; case 'v': if (strcmp(w, "vocal") == 0) { set_voice_param(curvoice, s->state, w, p); return s; } if (strcmp(w, "voice") == 0) { struct SYMBOL *s2, *s3; struct voice_opt_s *opt, *opt2; if (s->state != ABC_S_GLOBAL) { error(1, s, "%%%%voice ignored"); return s; } /* if void %%voice, free all voice options */ if (*p == '\0') { if (cur_tune_opts) { free_voice_opt(cur_tune_opts->voice_opts); cur_tune_opts->voice_opts = NULL; } else { free_voice_opt(voice_opts); voice_opts = NULL; } return s; } if (strcmp(p, "end") == 0) return s; /* end of previous %%voice */ if (cur_tune_opts) opt = cur_tune_opts->voice_opts; else opt = voice_opts; /* search the end of the voice options */ s2 = s; for (;;) { s3 = s2->abc_next; if (!s3) break; if (s3->abc_type != ABC_T_NULL && (s3->abc_type != ABC_T_PSCOM || strncmp(&s3->text[2], "score ", 6) == 0 || strncmp(&s3->text[2], "staves ", 7) == 0 || strncmp(&s3->text[2], "tune ", 5) == 0 || strncmp(&s3->text[2], "voice ", 6) == 0)) break; s2 = s3; } /* if already the same %%voice * remove the options */ opt2 = NULL; for ( ; opt; opt = opt->next) { if (strcmp(opt->s->text, s->text) == 0) { if (!opt2) { if (cur_tune_opts) cur_tune_opts->voice_opts = NULL; else voice_opts = NULL; } else { opt2->next = opt->next; } free(opt); break; } opt2 = opt; } if (s2 == s) /* no option */ return s; opt = malloc(sizeof *opt + strlen(p)); memset(opt, 0, sizeof *opt); if (cur_tune_opts) { opt->next = cur_tune_opts->voice_opts; cur_tune_opts->voice_opts = opt; } else { opt->next = voice_opts; voice_opts = opt; } /* link the options */ opt->s = s3 = s; for ( ; s != s2; s = s->abc_next) { if (s->abc_next->abc_type != ABC_T_PSCOM) continue; s->abc_next->state = ABC_S_TUNE; s3->next = s->abc_next; s3 = s3->next; } return s; } if (strcmp(w, "voicecolor") == 0) { int color; if (!curvoice) return s; color = get_color(p); if (color < 0) error(1, s, "Bad color in %%%%voicecolor"); else curvoice->color = color; return s; } if (strcmp(w, "voicecombine") == 0) { int combine; if (sscanf(p, "%d", &combine) != 1) { error(1, s, "Bad value in %%%%voicecombine"); return s; } switch (s->state) { case ABC_S_GLOBAL: cfmt.combinevoices = combine; break; case ABC_S_HEAD: for (voice = 0; voice < MAXVOICE; voice++) voice_tb[voice].combine = combine; break; default: curvoice->combine = combine; break; } return s; } if (strcmp(w, "voicemap") == 0) { if (s->state != ABC_S_TUNE) { for (voice = 0; voice < MAXVOICE; voice++) voice_tb[voice].map_name = p; } else { curvoice->map_name = p; } return s; } if (strcmp(w, "voicescale") == 0) { char *q; float scale; scale = strtod(p, &q); if (scale < 0.6 || scale > 1.5 || (*q != '\0' && *q != ' ')) { error(1, s, "Bad %%%%voicescale value"); return s; } if (s->state != ABC_S_TUNE) { for (voice = 0; voice < MAXVOICE; voice++) voice_tb[voice].scale = scale; } else { curvoice->scale = scale; } return s; } if (strcmp(w, "volume") == 0) { set_voice_param(curvoice, s->state, w, p); return s; } if (strcmp(w, "vskip") == 0) { if (s->state == ABC_S_TUNE) { gen_ly(0); } else if (s->state == ABC_S_GLOBAL) { if (epsf || !in_fname) return s; } bskip(scan_u(p, 0)); buffer_eob(0); return s; } break; } if (s->state == ABC_S_TUNE) { if (strcmp(w, "leftmargin") == 0 || strcmp(w, "rightmargin") == 0 || strcmp(w, "scale") == 0) { generate(); block_put(); } } interpret_fmt_line(w, p, lock); if (cfmt.alignbars && strcmp(w, "alignbars") == 0) { int i; generate(); if ((unsigned) cfmt.alignbars > MAXSTAFF) { error(1, s, "Too big value in %%%%alignbars"); cfmt.alignbars = MAXSTAFF; } if (staves_found >= 0) /* (compatibility) */ cfmt.alignbars = nstaff + 1; first_voice = curvoice = voice_tb; for (i = 0; i < cfmt.alignbars; i++) { voice_tb[i].staff = voice_tb[i].cstaff = i; voice_tb[i].next = &voice_tb[i + 1]; parsys->staff[i].flags |= STOP_BAR; parsys->voice[i].staff = i; parsys->voice[i].range = i; } i--; voice_tb[i].next = NULL; parsys->nstaff = nstaff = i; } return s; } /* -- set the duration of notes/rests in a tuplet -- */ /*fixme: KO if voice change*/ /*fixme: KO if in a grace sequence*/ static void set_tuplet(struct SYMBOL *t) { struct SYMBOL *s, *s1; int l, r, lplet, grace; r = t->u.tuplet.r_plet; grace = t->flags & ABC_F_GRACE; l = 0; for (s = t->abc_next; s; s = s->abc_next) { if (s->abc_type == ABC_T_TUPLET) { struct SYMBOL *s2; int l2, r2; r2 = s->u.tuplet.r_plet; l2 = 0; for (s2 = s->abc_next; s2; s2 = s2->abc_next) { switch (s2->abc_type) { case ABC_T_NOTE: case ABC_T_REST: break; case ABC_T_EOLN: if (s2->u.eoln.type != 1) { error(1, t, "End of line found inside a nested tuplet"); return; } continue; default: continue; } if (s2->u.note.notes[0].len == 0) continue; if (grace ^ (s2->flags & ABC_F_GRACE)) continue; s1 = s2; l2 += s1->dur; if (--r2 <= 0) break; } l2 = l2 * s->u.tuplet.q_plet / s->u.tuplet.p_plet; s->aux = l2; l += l2; r -= s->u.tuplet.r_plet; if (r == 0) break; if (r < 0) { error(1, t, "Bad nested tuplet"); break; } s = s2; continue; } switch (s->abc_type) { case ABC_T_NOTE: case ABC_T_REST: break; case ABC_T_EOLN: if (s->u.eoln.type != 1) { error(1, t, "End of line found inside a tuplet"); return; } continue; default: continue; } if (s->u.note.notes[0].len == 0) /* space ('y') */ continue; if (grace ^ (s->flags & ABC_F_GRACE)) continue; s1 = s; l += s->dur; if (--r <= 0) break; } if (!s) { error(1, t, "End of tune found inside a tuplet"); return; } if (t->aux != 0) /* if nested tuplet */ lplet = t->aux; else lplet = (l * t->u.tuplet.q_plet) / t->u.tuplet.p_plet; r = t->u.tuplet.r_plet; for (s = t->abc_next; s; s = s->abc_next) { int olddur; if (s->abc_type == ABC_T_TUPLET) { int r2; r2 = s->u.tuplet.r_plet; s1 = s; olddur = s->aux; s1->aux = (olddur * lplet) / l; l -= olddur; lplet -= s1->aux; r -= r2; for (;;) { s = s->abc_next; if (s->abc_type != ABC_T_NOTE && s->abc_type != ABC_T_REST) continue; if (s->u.note.notes[0].len == 0) continue; if (grace ^ (s->flags & ABC_F_GRACE)) continue; if (--r2 <= 0) break; } if (r <= 0) goto done; continue; } if (s->abc_type != ABC_T_NOTE && s->abc_type != ABC_T_REST) continue; if (s->u.note.notes[0].len == 0) continue; s->sflags |= S_IN_TUPLET; if (grace ^ (s->flags & ABC_F_GRACE)) continue; s1 = s; olddur = s->dur; s1->dur = (olddur * lplet) / l; if (--r <= 0) break; l -= olddur; lplet -= s1->dur; } done: if (grace) { error(1, t, "Tuplets in grace note sequence not yet treated"); } else { sym_link(t, TUPLET); t->aux = cfmt.tuplets; } }
static void get_over(struct SYMBOL *s) { struct VOICE_S *p_voice, *p_voice2, *p_voice3; int range, voice, voice2, voice3; static char tx_wrong_dur[] = "Wrong duration in voice overlay"; static char txt_no_note[] = "No note in voice overlay"; /* treat the end of overlay */ p_voice = curvoice; if (p_voice->ignore) return; if (s->abc_type == ABC_T_BAR || s->u.v_over.type == V_OVER_E) { if (!p_voice->last_sym) { error(1, s, txt_no_note); return; } p_voice->last_sym->sflags |= S_BEAM_END; over_bar = 0; if (over_time < 0) { error(1, s, "Erroneous end of voice overlap"); return; } if (p_voice->time != over_mxtime) error(1, s, tx_wrong_dur); curvoice = &voice_tb[over_voice]; over_mxtime = 0; over_voice = -1; over_time = -1; return; } /* treat the full overlay start */ if (s->u.v_over.type == V_OVER_S) { over_voice = p_voice - voice_tb; over_time = p_voice->time; return; } /* (here is treated a new overlay - '&') */ /* create the extra voice if not done yet */ if (!p_voice->last_sym) { error(1, s, txt_no_note); return; } p_voice->last_sym->sflags |= S_BEAM_END; voice2 = s->u.v_over.voice; p_voice2 = &voice_tb[voice2]; if (parsys->voice[voice2].range < 0) { int clone; if (cfmt.abc2pscompat) { error(1, s, "Cannot have %%%%abc2pscompat"); cfmt.abc2pscompat = 0; } clone = p_voice->clone >= 0; p_voice2->id[0] = '&'; p_voice2->id[1] = '\0'; p_voice2->second = 1; parsys->voice[voice2].second = 1; p_voice2->scale = p_voice->scale; p_voice2->octave = p_voice->octave; p_voice2->transpose = p_voice->transpose; memcpy(&p_voice2->key, &p_voice->key, sizeof p_voice2->key); memcpy(&p_voice2->ckey, &p_voice->ckey, sizeof p_voice2->ckey); memcpy(&p_voice2->okey, &p_voice->okey, sizeof p_voice2->okey); p_voice2->posit = p_voice->posit; p_voice2->staff = p_voice->staff; p_voice2->cstaff = p_voice->cstaff; p_voice2->color = p_voice->color; p_voice2->map_name = p_voice->map_name; range = parsys->voice[p_voice - voice_tb].range; for (voice = 0; voice < MAXVOICE; voice++) { if (parsys->voice[voice].range > range) parsys->voice[voice].range += clone + 1; } parsys->voice[voice2].range = range + 1; voice_link(p_voice2); if (clone) { for (voice3 = MAXVOICE; --voice3 >= 0; ) { if (parsys->voice[voice3].range < 0) break; } if (voice3 > 0) { p_voice3 = &voice_tb[voice3]; strcpy(p_voice3->id, p_voice2->id); p_voice3->second = 1; parsys->voice[voice3].second = 1; p_voice3->scale = voice_tb[p_voice->clone].scale; parsys->voice[voice3].range = range + 2; voice_link(p_voice3); p_voice2->clone = voice3; } else { error(1, s, "Too many voices for overlay cloning"); } } } voice = p_voice - voice_tb; // p_voice2->cstaff = p_voice2->staff = parsys->voice[voice2].staff // = parsys->voice[voice].staff; // if ((voice3 = p_voice2->clone) >= 0) { // p_voice3 = &voice_tb[voice3]; // p_voice3->cstaff = p_voice3->staff // = parsys->voice[voice3].staff // = parsys->voice[p_voice->clone].staff; // } if (over_time < 0) { /* first '&' in a measure */ int time; over_bar = 1; over_mxtime = p_voice->time; over_voice = voice; time = p_voice2->time; for (s = p_voice->last_sym; /*s*/; s = s->prev) { if (s->type == BAR || s->time <= time) /* (if start of tune) */ break; } over_time = s->time; } else { if (over_mxtime == 0) over_mxtime = p_voice->time; else if (p_voice->time != over_mxtime) error(1, s, tx_wrong_dur); } p_voice2->time = over_time; curvoice = p_voice2; }
static void get_over(struct SYMBOL *s) { struct VOICE_S *p_voice, *p_voice2, *p_voice3; int range, voice, voice2, voice3; static char tx_wrong_dur[] = "Wrong duration in voice overlay"; static char txt_no_note[] = "No note in voice overlay"; /* treat the end of overlay */ p_voice = curvoice; if (p_voice->ignore) return; if (s->abc_type == ABC_T_BAR || s->u.v_over.type == V_OVER_E) { if (!p_voice->last_sym) { error(1, s, txt_no_note); return; } p_voice->last_sym->sflags |= S_BEAM_END; over_bar = 0; if (over_time < 0) { error(1, s, "Erroneous end of voice overlap"); return; } curvoice = &voice_tb[over_voice]; if (p_voice->time != over_mxtime) { error(1, s, tx_wrong_dur); if (p_voice->time > over_mxtime) curvoice->time = p_voice->time; else p_voice->time = curvoice->time; } over_mxtime = 0; over_voice = -1; over_time = -1; return; } /* treat the full overlay start */ if (s->u.v_over.type == V_OVER_S) { over_voice = p_voice - voice_tb; over_time = p_voice->time; return; } /* (here is treated a new overlay - '&') */ /* create the extra voice if not done yet */ if (!p_voice->last_sym) { error(1, s, txt_no_note); return; } p_voice->last_sym->sflags |= S_BEAM_END; voice2 = s->u.v_over.voice; p_voice2 = &voice_tb[voice2]; if (parsys->voice[voice2].range < 0) { int clone; if (cfmt.abc2pscompat) { error(1, s, "Cannot have %%%%abc2pscompat"); cfmt.abc2pscompat = 0; } clone = p_voice->clone >= 0; p_voice2->id[0] = '&'; p_voice2->id[1] = '\0'; p_voice2->second = 1; parsys->voice[voice2].second = 1; p_voice2->scale = p_voice->scale; p_voice2->octave = p_voice->octave; p_voice2->transpose = p_voice->transpose; memcpy(&p_voice2->key, &p_voice->key, sizeof p_voice2->key); memcpy(&p_voice2->ckey, &p_voice->ckey, sizeof p_voice2->ckey); memcpy(&p_voice2->okey, &p_voice->okey, sizeof p_voice2->okey); p_voice2->posit = p_voice->posit; p_voice2->staff = p_voice->staff; p_voice2->cstaff = p_voice->cstaff; p_voice2->color = p_voice->color; p_voice2->map_name = p_voice->map_name; range = parsys->voice[p_voice - voice_tb].range; for (voice = 0; voice < MAXVOICE; voice++) { if (parsys->voice[voice].range > range) parsys->voice[voice].range += clone + 1; } parsys->voice[voice2].range = range + 1; voice_link(p_voice2); if (clone) { for (voice3 = MAXVOICE; --voice3 >= 0; ) { if (parsys->voice[voice3].range < 0) break; } if (voice3 > 0) { p_voice3 = &voice_tb[voice3]; strcpy(p_voice3->id, p_voice2->id); p_voice3->second = 1; parsys->voice[voice3].second = 1; p_voice3->scale = voice_tb[p_voice->clone].scale; parsys->voice[voice3].range = range + 2; voice_link(p_voice3); p_voice2->clone = voice3; } else { error(1, s, "Too many voices for overlay cloning"); } } } voice = p_voice - voice_tb; // p_voice2->cstaff = p_voice2->staff = parsys->voice[voice2].staff // = parsys->voice[voice].staff; // if ((voice3 = p_voice2->clone) >= 0) { // p_voice3 = &voice_tb[voice3]; // p_voice3->cstaff = p_voice3->staff // = parsys->voice[voice3].staff // = parsys->voice[p_voice->clone].staff; // } if (over_time < 0) { /* first '&' in a measure */ int time; over_bar = 1; over_mxtime = p_voice->time; over_voice = voice; time = p_voice2->time; for (s = p_voice->last_sym; /*s*/; s = s->prev) { if (s->type == BAR || s->time <= time) /* (if start of tune) */ break; } over_time = s->time; } else { if (over_mxtime == 0) over_mxtime = p_voice->time; else if (p_voice->time != over_mxtime) error(1, s, tx_wrong_dur); } p_voice2->time = over_time; curvoice = p_voice2; }
{'added': [(2015, '\t\tif (p_voice->time != over_mxtime) {'), (2016, '\t\t\terror(1, s, tx_wrong_dur);'), (2017, '\t\t\tif (p_voice->time > over_mxtime)'), (2018, '\t\t\t\tcurvoice->time = p_voice->time;'), (2019, '\t\t\telse'), (2020, '\t\t\t\tp_voice->time = curvoice->time;'), (2021, '\t\t}')], 'deleted': [(2014, '\t\tif (p_voice->time != over_mxtime)'), (2015, '\t\t\terror(1, s, tx_wrong_dur);')]}
7
2
5,565
33,643
https://github.com/leesavide/abcm2ps
CVE-2021-32434
['CWE-125']
ebtables.c
ebt_size_mwt
/* * ebtables * * Author: * Bart De Schuymer <bdschuym@pandora.be> * * ebtables.c,v 2.0, July, 2002 * * This code is strongly inspired by the iptables code which is * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kmod.h> #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_bridge/ebtables.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/smp.h> #include <linux/cpumask.h> #include <linux/audit.h> #include <net/sock.h> /* needed for logical [in,out]-dev filtering */ #include "../br_private.h" #define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\ "report to author: "format, ## args) /* #define BUGPRINT(format, args...) */ /* Each cpu has its own set of counters, so there is no need for write_lock in * the softirq * For reading or updating the counters, the user context needs to * get a write_lock */ /* The size of each set of counters is altered to get cache alignment */ #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1)) #define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter))) #define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \ COUNTER_OFFSET(n) * cpu)) static DEFINE_MUTEX(ebt_mutex); #ifdef CONFIG_COMPAT static void ebt_standard_compat_from_user(void *dst, const void *src) { int v = *(compat_int_t *)src; if (v >= 0) v += xt_compat_calc_jump(NFPROTO_BRIDGE, v); memcpy(dst, &v, sizeof(v)); } static int ebt_standard_compat_to_user(void __user *dst, const void *src) { compat_int_t cv = *(int *)src; if (cv >= 0) cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv); return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; } #endif static struct xt_target ebt_standard_target = { .name = "standard", .revision = 0, .family = NFPROTO_BRIDGE, .targetsize = sizeof(int), #ifdef CONFIG_COMPAT .compatsize = sizeof(compat_int_t), .compat_from_user = ebt_standard_compat_from_user, .compat_to_user = ebt_standard_compat_to_user, #endif }; static inline int ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb, struct xt_action_param *par) { par->target = w->u.watcher; par->targinfo = w->data; w->u.watcher->target(skb, par); /* watchers don't give a verdict */ return 0; } static inline int ebt_do_match(struct ebt_entry_match *m, const struct sk_buff *skb, struct xt_action_param *par) { par->match = m->u.match; par->matchinfo = m->data; return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH; } static inline int ebt_dev_check(const char *entry, const struct net_device *device) { int i = 0; const char *devname; if (*entry == '\0') return 0; if (!device) return 1; devname = device->name; /* 1 is the wildcard token */ while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i]) i++; return devname[i] != entry[i] && entry[i] != 1; } /* process standard matches */ static inline int ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out) { const struct ethhdr *h = eth_hdr(skb); const struct net_bridge_port *p; __be16 ethproto; if (skb_vlan_tag_present(skb)) ethproto = htons(ETH_P_8021Q); else ethproto = h->h_proto; if (e->bitmask & EBT_802_3) { if (NF_INVF(e, EBT_IPROTO, eth_proto_is_802_3(ethproto))) return 1; } else if (!(e->bitmask & EBT_NOPROTO) && NF_INVF(e, EBT_IPROTO, e->ethproto != ethproto)) return 1; if (NF_INVF(e, EBT_IIN, ebt_dev_check(e->in, in))) return 1; if (NF_INVF(e, EBT_IOUT, ebt_dev_check(e->out, out))) return 1; /* rcu_read_lock()ed by nf_hook_thresh */ if (in && (p = br_port_get_rcu(in)) != NULL && NF_INVF(e, EBT_ILOGICALIN, ebt_dev_check(e->logical_in, p->br->dev))) return 1; if (out && (p = br_port_get_rcu(out)) != NULL && NF_INVF(e, EBT_ILOGICALOUT, ebt_dev_check(e->logical_out, p->br->dev))) return 1; if (e->bitmask & EBT_SOURCEMAC) { if (NF_INVF(e, EBT_ISOURCE, !ether_addr_equal_masked(h->h_source, e->sourcemac, e->sourcemsk))) return 1; } if (e->bitmask & EBT_DESTMAC) { if (NF_INVF(e, EBT_IDEST, !ether_addr_equal_masked(h->h_dest, e->destmac, e->destmsk))) return 1; } return 0; } static inline struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry) { return (void *)entry + entry->next_offset; } /* Do some firewalling */ unsigned int ebt_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct ebt_table *table) { unsigned int hook = state->hook; int i, nentries; struct ebt_entry *point; struct ebt_counter *counter_base, *cb_base; const struct ebt_entry_target *t; int verdict, sp = 0; struct ebt_chainstack *cs; struct ebt_entries *chaininfo; const char *base; const struct ebt_table_info *private; struct xt_action_param acpar; acpar.state = state; acpar.hotdrop = false; read_lock_bh(&table->lock); private = table->private; cb_base = COUNTER_BASE(private->counters, private->nentries, smp_processor_id()); if (private->chainstack) cs = private->chainstack[smp_processor_id()]; else cs = NULL; chaininfo = private->hook_entry[hook]; nentries = private->hook_entry[hook]->nentries; point = (struct ebt_entry *)(private->hook_entry[hook]->data); counter_base = cb_base + private->hook_entry[hook]->counter_offset; /* base for chain jumps */ base = private->entries; i = 0; while (i < nentries) { if (ebt_basic_match(point, skb, state->in, state->out)) goto letscontinue; if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &acpar) != 0) goto letscontinue; if (acpar.hotdrop) { read_unlock_bh(&table->lock); return NF_DROP; } /* increase counter */ (*(counter_base + i)).pcnt++; (*(counter_base + i)).bcnt += skb->len; /* these should only watch: not modify, nor tell us * what to do with the packet */ EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &acpar); t = (struct ebt_entry_target *) (((char *)point) + point->target_offset); /* standard target */ if (!t->u.target->target) verdict = ((struct ebt_standard_target *)t)->verdict; else { acpar.target = t->u.target; acpar.targinfo = t->data; verdict = t->u.target->target(skb, &acpar); } if (verdict == EBT_ACCEPT) { read_unlock_bh(&table->lock); return NF_ACCEPT; } if (verdict == EBT_DROP) { read_unlock_bh(&table->lock); return NF_DROP; } if (verdict == EBT_RETURN) { letsreturn: if (WARN(sp == 0, "RETURN on base chain")) { /* act like this is EBT_CONTINUE */ goto letscontinue; } sp--; /* put all the local variables right */ i = cs[sp].n; chaininfo = cs[sp].chaininfo; nentries = chaininfo->nentries; point = cs[sp].e; counter_base = cb_base + chaininfo->counter_offset; continue; } if (verdict == EBT_CONTINUE) goto letscontinue; if (WARN(verdict < 0, "bogus standard verdict\n")) { read_unlock_bh(&table->lock); return NF_DROP; } /* jump to a udc */ cs[sp].n = i + 1; cs[sp].chaininfo = chaininfo; cs[sp].e = ebt_next_entry(point); i = 0; chaininfo = (struct ebt_entries *) (base + verdict); if (WARN(chaininfo->distinguisher, "jump to non-chain\n")) { read_unlock_bh(&table->lock); return NF_DROP; } nentries = chaininfo->nentries; point = (struct ebt_entry *)chaininfo->data; counter_base = cb_base + chaininfo->counter_offset; sp++; continue; letscontinue: point = ebt_next_entry(point); i++; } /* I actually like this :) */ if (chaininfo->policy == EBT_RETURN) goto letsreturn; if (chaininfo->policy == EBT_ACCEPT) { read_unlock_bh(&table->lock); return NF_ACCEPT; } read_unlock_bh(&table->lock); return NF_DROP; } /* If it succeeds, returns element and locks mutex */ static inline void * find_inlist_lock_noload(struct list_head *head, const char *name, int *error, struct mutex *mutex) { struct { struct list_head list; char name[EBT_FUNCTION_MAXNAMELEN]; } *e; mutex_lock(mutex); list_for_each_entry(e, head, list) { if (strcmp(e->name, name) == 0) return e; } *error = -ENOENT; mutex_unlock(mutex); return NULL; } static void * find_inlist_lock(struct list_head *head, const char *name, const char *prefix, int *error, struct mutex *mutex) { return try_then_request_module( find_inlist_lock_noload(head, name, error, mutex), "%s%s", prefix, name); } static inline struct ebt_table * find_table_lock(struct net *net, const char *name, int *error, struct mutex *mutex) { return find_inlist_lock(&net->xt.tables[NFPROTO_BRIDGE], name, "ebtable_", error, mutex); } static inline int ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par, unsigned int *cnt) { const struct ebt_entry *e = par->entryinfo; struct xt_match *match; size_t left = ((char *)e + e->watchers_offset) - (char *)m; int ret; if (left < sizeof(struct ebt_entry_match) || left - sizeof(struct ebt_entry_match) < m->match_size) return -EINVAL; match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0); if (IS_ERR(match) || match->family != NFPROTO_BRIDGE) { if (!IS_ERR(match)) module_put(match->me); request_module("ebt_%s", m->u.name); match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0); } if (IS_ERR(match)) return PTR_ERR(match); m->u.match = match; par->match = match; par->matchinfo = m->data; ret = xt_check_match(par, m->match_size, e->ethproto, e->invflags & EBT_IPROTO); if (ret < 0) { module_put(match->me); return ret; } (*cnt)++; return 0; } static inline int ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par, unsigned int *cnt) { const struct ebt_entry *e = par->entryinfo; struct xt_target *watcher; size_t left = ((char *)e + e->target_offset) - (char *)w; int ret; if (left < sizeof(struct ebt_entry_watcher) || left - sizeof(struct ebt_entry_watcher) < w->watcher_size) return -EINVAL; watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0); if (IS_ERR(watcher)) return PTR_ERR(watcher); w->u.watcher = watcher; par->target = watcher; par->targinfo = w->data; ret = xt_check_target(par, w->watcher_size, e->ethproto, e->invflags & EBT_IPROTO); if (ret < 0) { module_put(watcher->me); return ret; } (*cnt)++; return 0; } static int ebt_verify_pointers(const struct ebt_replace *repl, struct ebt_table_info *newinfo) { unsigned int limit = repl->entries_size; unsigned int valid_hooks = repl->valid_hooks; unsigned int offset = 0; int i; for (i = 0; i < NF_BR_NUMHOOKS; i++) newinfo->hook_entry[i] = NULL; newinfo->entries_size = repl->entries_size; newinfo->nentries = repl->nentries; while (offset < limit) { size_t left = limit - offset; struct ebt_entry *e = (void *)newinfo->entries + offset; if (left < sizeof(unsigned int)) break; for (i = 0; i < NF_BR_NUMHOOKS; i++) { if ((valid_hooks & (1 << i)) == 0) continue; if ((char __user *)repl->hook_entry[i] == repl->entries + offset) break; } if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) { if (e->bitmask != 0) { /* we make userspace set this right, * so there is no misunderstanding */ BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set " "in distinguisher\n"); return -EINVAL; } if (i != NF_BR_NUMHOOKS) newinfo->hook_entry[i] = (struct ebt_entries *)e; if (left < sizeof(struct ebt_entries)) break; offset += sizeof(struct ebt_entries); } else { if (left < sizeof(struct ebt_entry)) break; if (left < e->next_offset) break; if (e->next_offset < sizeof(struct ebt_entry)) return -EINVAL; offset += e->next_offset; } } if (offset != limit) { BUGPRINT("entries_size too small\n"); return -EINVAL; } /* check if all valid hooks have a chain */ for (i = 0; i < NF_BR_NUMHOOKS; i++) { if (!newinfo->hook_entry[i] && (valid_hooks & (1 << i))) { BUGPRINT("Valid hook without chain\n"); return -EINVAL; } } return 0; } /* this one is very careful, as it is the first function * to parse the userspace data */ static inline int ebt_check_entry_size_and_hooks(const struct ebt_entry *e, const struct ebt_table_info *newinfo, unsigned int *n, unsigned int *cnt, unsigned int *totalcnt, unsigned int *udc_cnt) { int i; for (i = 0; i < NF_BR_NUMHOOKS; i++) { if ((void *)e == (void *)newinfo->hook_entry[i]) break; } /* beginning of a new chain * if i == NF_BR_NUMHOOKS it must be a user defined chain */ if (i != NF_BR_NUMHOOKS || !e->bitmask) { /* this checks if the previous chain has as many entries * as it said it has */ if (*n != *cnt) { BUGPRINT("nentries does not equal the nr of entries " "in the chain\n"); return -EINVAL; } if (((struct ebt_entries *)e)->policy != EBT_DROP && ((struct ebt_entries *)e)->policy != EBT_ACCEPT) { /* only RETURN from udc */ if (i != NF_BR_NUMHOOKS || ((struct ebt_entries *)e)->policy != EBT_RETURN) { BUGPRINT("bad policy\n"); return -EINVAL; } } if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */ (*udc_cnt)++; if (((struct ebt_entries *)e)->counter_offset != *totalcnt) { BUGPRINT("counter_offset != totalcnt"); return -EINVAL; } *n = ((struct ebt_entries *)e)->nentries; *cnt = 0; return 0; } /* a plain old entry, heh */ if (sizeof(struct ebt_entry) > e->watchers_offset || e->watchers_offset > e->target_offset || e->target_offset >= e->next_offset) { BUGPRINT("entry offsets not in right order\n"); return -EINVAL; } /* this is not checked anywhere else */ if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target)) { BUGPRINT("target size too small\n"); return -EINVAL; } (*cnt)++; (*totalcnt)++; return 0; } struct ebt_cl_stack { struct ebt_chainstack cs; int from; unsigned int hookmask; }; /* We need these positions to check that the jumps to a different part of the * entries is a jump to the beginning of a new chain. */ static inline int ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo, unsigned int *n, struct ebt_cl_stack *udc) { int i; /* we're only interested in chain starts */ if (e->bitmask) return 0; for (i = 0; i < NF_BR_NUMHOOKS; i++) { if (newinfo->hook_entry[i] == (struct ebt_entries *)e) break; } /* only care about udc */ if (i != NF_BR_NUMHOOKS) return 0; udc[*n].cs.chaininfo = (struct ebt_entries *)e; /* these initialisations are depended on later in check_chainloops() */ udc[*n].cs.n = 0; udc[*n].hookmask = 0; (*n)++; return 0; } static inline int ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i) { struct xt_mtdtor_param par; if (i && (*i)-- == 0) return 1; par.net = net; par.match = m->u.match; par.matchinfo = m->data; par.family = NFPROTO_BRIDGE; if (par.match->destroy != NULL) par.match->destroy(&par); module_put(par.match->me); return 0; } static inline int ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i) { struct xt_tgdtor_param par; if (i && (*i)-- == 0) return 1; par.net = net; par.target = w->u.watcher; par.targinfo = w->data; par.family = NFPROTO_BRIDGE; if (par.target->destroy != NULL) par.target->destroy(&par); module_put(par.target->me); return 0; } static inline int ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt) { struct xt_tgdtor_param par; struct ebt_entry_target *t; if (e->bitmask == 0) return 0; /* we're done */ if (cnt && (*cnt)-- == 0) return 1; EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL); EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL); t = (struct ebt_entry_target *)(((char *)e) + e->target_offset); par.net = net; par.target = t->u.target; par.targinfo = t->data; par.family = NFPROTO_BRIDGE; if (par.target->destroy != NULL) par.target->destroy(&par); module_put(par.target->me); return 0; } static inline int ebt_check_entry(struct ebt_entry *e, struct net *net, const struct ebt_table_info *newinfo, const char *name, unsigned int *cnt, struct ebt_cl_stack *cl_s, unsigned int udc_cnt) { struct ebt_entry_target *t; struct xt_target *target; unsigned int i, j, hook = 0, hookmask = 0; size_t gap; int ret; struct xt_mtchk_param mtpar; struct xt_tgchk_param tgpar; /* don't mess with the struct ebt_entries */ if (e->bitmask == 0) return 0; if (e->bitmask & ~EBT_F_MASK) { BUGPRINT("Unknown flag for bitmask\n"); return -EINVAL; } if (e->invflags & ~EBT_INV_MASK) { BUGPRINT("Unknown flag for inv bitmask\n"); return -EINVAL; } if ((e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3)) { BUGPRINT("NOPROTO & 802_3 not allowed\n"); return -EINVAL; } /* what hook do we belong to? */ for (i = 0; i < NF_BR_NUMHOOKS; i++) { if (!newinfo->hook_entry[i]) continue; if ((char *)newinfo->hook_entry[i] < (char *)e) hook = i; else break; } /* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on * a base chain */ if (i < NF_BR_NUMHOOKS) hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS); else { for (i = 0; i < udc_cnt; i++) if ((char *)(cl_s[i].cs.chaininfo) > (char *)e) break; if (i == 0) hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS); else hookmask = cl_s[i - 1].hookmask; } i = 0; mtpar.net = tgpar.net = net; mtpar.table = tgpar.table = name; mtpar.entryinfo = tgpar.entryinfo = e; mtpar.hook_mask = tgpar.hook_mask = hookmask; mtpar.family = tgpar.family = NFPROTO_BRIDGE; ret = EBT_MATCH_ITERATE(e, ebt_check_match, &mtpar, &i); if (ret != 0) goto cleanup_matches; j = 0; ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, &tgpar, &j); if (ret != 0) goto cleanup_watchers; t = (struct ebt_entry_target *)(((char *)e) + e->target_offset); gap = e->next_offset - e->target_offset; target = xt_request_find_target(NFPROTO_BRIDGE, t->u.name, 0); if (IS_ERR(target)) { ret = PTR_ERR(target); goto cleanup_watchers; } t->u.target = target; if (t->u.target == &ebt_standard_target) { if (gap < sizeof(struct ebt_standard_target)) { BUGPRINT("Standard target size too big\n"); ret = -EFAULT; goto cleanup_watchers; } if (((struct ebt_standard_target *)t)->verdict < -NUM_STANDARD_TARGETS) { BUGPRINT("Invalid standard target\n"); ret = -EFAULT; goto cleanup_watchers; } } else if (t->target_size > gap - sizeof(struct ebt_entry_target)) { module_put(t->u.target->me); ret = -EFAULT; goto cleanup_watchers; } tgpar.target = target; tgpar.targinfo = t->data; ret = xt_check_target(&tgpar, t->target_size, e->ethproto, e->invflags & EBT_IPROTO); if (ret < 0) { module_put(target->me); goto cleanup_watchers; } (*cnt)++; return 0; cleanup_watchers: EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j); cleanup_matches: EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i); return ret; } /* checks for loops and sets the hook mask for udc * the hook mask for udc tells us from which base chains the udc can be * accessed. This mask is a parameter to the check() functions of the extensions */ static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s, unsigned int udc_cnt, unsigned int hooknr, char *base) { int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict; const struct ebt_entry *e = (struct ebt_entry *)chain->data; const struct ebt_entry_target *t; while (pos < nentries || chain_nr != -1) { /* end of udc, go back one 'recursion' step */ if (pos == nentries) { /* put back values of the time when this chain was called */ e = cl_s[chain_nr].cs.e; if (cl_s[chain_nr].from != -1) nentries = cl_s[cl_s[chain_nr].from].cs.chaininfo->nentries; else nentries = chain->nentries; pos = cl_s[chain_nr].cs.n; /* make sure we won't see a loop that isn't one */ cl_s[chain_nr].cs.n = 0; chain_nr = cl_s[chain_nr].from; if (pos == nentries) continue; } t = (struct ebt_entry_target *) (((char *)e) + e->target_offset); if (strcmp(t->u.name, EBT_STANDARD_TARGET)) goto letscontinue; if (e->target_offset + sizeof(struct ebt_standard_target) > e->next_offset) { BUGPRINT("Standard target size too big\n"); return -1; } verdict = ((struct ebt_standard_target *)t)->verdict; if (verdict >= 0) { /* jump to another chain */ struct ebt_entries *hlp2 = (struct ebt_entries *)(base + verdict); for (i = 0; i < udc_cnt; i++) if (hlp2 == cl_s[i].cs.chaininfo) break; /* bad destination or loop */ if (i == udc_cnt) { BUGPRINT("bad destination\n"); return -1; } if (cl_s[i].cs.n) { BUGPRINT("loop\n"); return -1; } if (cl_s[i].hookmask & (1 << hooknr)) goto letscontinue; /* this can't be 0, so the loop test is correct */ cl_s[i].cs.n = pos + 1; pos = 0; cl_s[i].cs.e = ebt_next_entry(e); e = (struct ebt_entry *)(hlp2->data); nentries = hlp2->nentries; cl_s[i].from = chain_nr; chain_nr = i; /* this udc is accessible from the base chain for hooknr */ cl_s[i].hookmask |= (1 << hooknr); continue; } letscontinue: e = ebt_next_entry(e); pos++; } return 0; } /* do the parsing of the table/chains/entries/matches/watchers/targets, heh */ static int translate_table(struct net *net, const char *name, struct ebt_table_info *newinfo) { unsigned int i, j, k, udc_cnt; int ret; struct ebt_cl_stack *cl_s = NULL; /* used in the checking for chain loops */ i = 0; while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i]) i++; if (i == NF_BR_NUMHOOKS) { BUGPRINT("No valid hooks specified\n"); return -EINVAL; } if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) { BUGPRINT("Chains don't start at beginning\n"); return -EINVAL; } /* make sure chains are ordered after each other in same order * as their corresponding hooks */ for (j = i + 1; j < NF_BR_NUMHOOKS; j++) { if (!newinfo->hook_entry[j]) continue; if (newinfo->hook_entry[j] <= newinfo->hook_entry[i]) { BUGPRINT("Hook order must be followed\n"); return -EINVAL; } i = j; } /* do some early checkings and initialize some things */ i = 0; /* holds the expected nr. of entries for the chain */ j = 0; /* holds the up to now counted entries for the chain */ k = 0; /* holds the total nr. of entries, should equal * newinfo->nentries afterwards */ udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */ ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, ebt_check_entry_size_and_hooks, newinfo, &i, &j, &k, &udc_cnt); if (ret != 0) return ret; if (i != j) { BUGPRINT("nentries does not equal the nr of entries in the " "(last) chain\n"); return -EINVAL; } if (k != newinfo->nentries) { BUGPRINT("Total nentries is wrong\n"); return -EINVAL; } /* get the location of the udc, put them in an array * while we're at it, allocate the chainstack */ if (udc_cnt) { /* this will get free'd in do_replace()/ebt_register_table() * if an error occurs */ newinfo->chainstack = vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack))); if (!newinfo->chainstack) return -ENOMEM; for_each_possible_cpu(i) { newinfo->chainstack[i] = vmalloc(udc_cnt * sizeof(*(newinfo->chainstack[0]))); if (!newinfo->chainstack[i]) { while (i) vfree(newinfo->chainstack[--i]); vfree(newinfo->chainstack); newinfo->chainstack = NULL; return -ENOMEM; } } cl_s = vmalloc(udc_cnt * sizeof(*cl_s)); if (!cl_s) return -ENOMEM; i = 0; /* the i'th udc */ EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, ebt_get_udc_positions, newinfo, &i, cl_s); /* sanity check */ if (i != udc_cnt) { BUGPRINT("i != udc_cnt\n"); vfree(cl_s); return -EFAULT; } } /* Check for loops */ for (i = 0; i < NF_BR_NUMHOOKS; i++) if (newinfo->hook_entry[i]) if (check_chainloops(newinfo->hook_entry[i], cl_s, udc_cnt, i, newinfo->entries)) { vfree(cl_s); return -EINVAL; } /* we now know the following (along with E=mc²): * - the nr of entries in each chain is right * - the size of the allocated space is right * - all valid hooks have a corresponding chain * - there are no loops * - wrong data can still be on the level of a single entry * - could be there are jumps to places that are not the * beginning of a chain. This can only occur in chains that * are not accessible from any base chains, so we don't care. */ /* used to know what we need to clean up if something goes wrong */ i = 0; ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt); if (ret != 0) { EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, ebt_cleanup_entry, net, &i); } vfree(cl_s); return ret; } /* called under write_lock */ static void get_counters(const struct ebt_counter *oldcounters, struct ebt_counter *counters, unsigned int nentries) { int i, cpu; struct ebt_counter *counter_base; /* counters of cpu 0 */ memcpy(counters, oldcounters, sizeof(struct ebt_counter) * nentries); /* add other counters to those of cpu 0 */ for_each_possible_cpu(cpu) { if (cpu == 0) continue; counter_base = COUNTER_BASE(oldcounters, nentries, cpu); for (i = 0; i < nentries; i++) { counters[i].pcnt += counter_base[i].pcnt; counters[i].bcnt += counter_base[i].bcnt; } } } static int do_replace_finish(struct net *net, struct ebt_replace *repl, struct ebt_table_info *newinfo) { int ret, i; struct ebt_counter *counterstmp = NULL; /* used to be able to unlock earlier */ struct ebt_table_info *table; struct ebt_table *t; /* the user wants counters back * the check on the size is done later, when we have the lock */ if (repl->num_counters) { unsigned long size = repl->num_counters * sizeof(*counterstmp); counterstmp = vmalloc(size); if (!counterstmp) return -ENOMEM; } newinfo->chainstack = NULL; ret = ebt_verify_pointers(repl, newinfo); if (ret != 0) goto free_counterstmp; ret = translate_table(net, repl->name, newinfo); if (ret != 0) goto free_counterstmp; t = find_table_lock(net, repl->name, &ret, &ebt_mutex); if (!t) { ret = -ENOENT; goto free_iterate; } /* the table doesn't like it */ if (t->check && (ret = t->check(newinfo, repl->valid_hooks))) goto free_unlock; if (repl->num_counters && repl->num_counters != t->private->nentries) { BUGPRINT("Wrong nr. of counters requested\n"); ret = -EINVAL; goto free_unlock; } /* we have the mutex lock, so no danger in reading this pointer */ table = t->private; /* make sure the table can only be rmmod'ed if it contains no rules */ if (!table->nentries && newinfo->nentries && !try_module_get(t->me)) { ret = -ENOENT; goto free_unlock; } else if (table->nentries && !newinfo->nentries) module_put(t->me); /* we need an atomic snapshot of the counters */ write_lock_bh(&t->lock); if (repl->num_counters) get_counters(t->private->counters, counterstmp, t->private->nentries); t->private = newinfo; write_unlock_bh(&t->lock); mutex_unlock(&ebt_mutex); /* so, a user can change the chains while having messed up her counter * allocation. Only reason why this is done is because this way the lock * is held only once, while this doesn't bring the kernel into a * dangerous state. */ if (repl->num_counters && copy_to_user(repl->counters, counterstmp, repl->num_counters * sizeof(struct ebt_counter))) { /* Silent error, can't fail, new table is already in place */ net_warn_ratelimited("ebtables: counters copy to user failed while replacing table\n"); } /* decrease module count and free resources */ EBT_ENTRY_ITERATE(table->entries, table->entries_size, ebt_cleanup_entry, net, NULL); vfree(table->entries); if (table->chainstack) { for_each_possible_cpu(i) vfree(table->chainstack[i]); vfree(table->chainstack); } vfree(table); vfree(counterstmp); #ifdef CONFIG_AUDIT if (audit_enabled) { audit_log(current->audit_context, GFP_KERNEL, AUDIT_NETFILTER_CFG, "table=%s family=%u entries=%u", repl->name, AF_BRIDGE, repl->nentries); } #endif return ret; free_unlock: mutex_unlock(&ebt_mutex); free_iterate: EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, ebt_cleanup_entry, net, NULL); free_counterstmp: vfree(counterstmp); /* can be initialized in translate_table() */ if (newinfo->chainstack) { for_each_possible_cpu(i) vfree(newinfo->chainstack[i]); vfree(newinfo->chainstack); } return ret; } /* replace the table */ static int do_replace(struct net *net, const void __user *user, unsigned int len) { int ret, countersize; struct ebt_table_info *newinfo; struct ebt_replace tmp; if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) return -EFAULT; if (len != sizeof(tmp) + tmp.entries_size) { BUGPRINT("Wrong len argument\n"); return -EINVAL; } if (tmp.entries_size == 0) { BUGPRINT("Entries_size never zero\n"); return -EINVAL; } /* overflow check */ if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) / NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter)) return -ENOMEM; if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter)) return -ENOMEM; tmp.name[sizeof(tmp.name) - 1] = 0; countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; newinfo = vmalloc(sizeof(*newinfo) + countersize); if (!newinfo) return -ENOMEM; if (countersize) memset(newinfo->counters, 0, countersize); newinfo->entries = vmalloc(tmp.entries_size); if (!newinfo->entries) { ret = -ENOMEM; goto free_newinfo; } if (copy_from_user( newinfo->entries, tmp.entries, tmp.entries_size) != 0) { BUGPRINT("Couldn't copy entries from userspace\n"); ret = -EFAULT; goto free_entries; } ret = do_replace_finish(net, &tmp, newinfo); if (ret == 0) return ret; free_entries: vfree(newinfo->entries); free_newinfo: vfree(newinfo); return ret; } static void __ebt_unregister_table(struct net *net, struct ebt_table *table) { int i; mutex_lock(&ebt_mutex); list_del(&table->list); mutex_unlock(&ebt_mutex); EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size, ebt_cleanup_entry, net, NULL); if (table->private->nentries) module_put(table->me); vfree(table->private->entries); if (table->private->chainstack) { for_each_possible_cpu(i) vfree(table->private->chainstack[i]); vfree(table->private->chainstack); } vfree(table->private); kfree(table); } int ebt_register_table(struct net *net, const struct ebt_table *input_table, const struct nf_hook_ops *ops, struct ebt_table **res) { struct ebt_table_info *newinfo; struct ebt_table *t, *table; struct ebt_replace_kernel *repl; int ret, i, countersize; void *p; if (input_table == NULL || (repl = input_table->table) == NULL || repl->entries == NULL || repl->entries_size == 0 || repl->counters != NULL || input_table->private != NULL) { BUGPRINT("Bad table data for ebt_register_table!!!\n"); return -EINVAL; } /* Don't add one table to multiple lists. */ table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL); if (!table) { ret = -ENOMEM; goto out; } countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids; newinfo = vmalloc(sizeof(*newinfo) + countersize); ret = -ENOMEM; if (!newinfo) goto free_table; p = vmalloc(repl->entries_size); if (!p) goto free_newinfo; memcpy(p, repl->entries, repl->entries_size); newinfo->entries = p; newinfo->entries_size = repl->entries_size; newinfo->nentries = repl->nentries; if (countersize) memset(newinfo->counters, 0, countersize); /* fill in newinfo and parse the entries */ newinfo->chainstack = NULL; for (i = 0; i < NF_BR_NUMHOOKS; i++) { if ((repl->valid_hooks & (1 << i)) == 0) newinfo->hook_entry[i] = NULL; else newinfo->hook_entry[i] = p + ((char *)repl->hook_entry[i] - repl->entries); } ret = translate_table(net, repl->name, newinfo); if (ret != 0) { BUGPRINT("Translate_table failed\n"); goto free_chainstack; } if (table->check && table->check(newinfo, table->valid_hooks)) { BUGPRINT("The table doesn't like its own initial data, lol\n"); ret = -EINVAL; goto free_chainstack; } table->private = newinfo; rwlock_init(&table->lock); mutex_lock(&ebt_mutex); list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) { if (strcmp(t->name, table->name) == 0) { ret = -EEXIST; BUGPRINT("Table name already exists\n"); goto free_unlock; } } /* Hold a reference count if the chains aren't empty */ if (newinfo->nentries && !try_module_get(table->me)) { ret = -ENOENT; goto free_unlock; } list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]); mutex_unlock(&ebt_mutex); WRITE_ONCE(*res, table); if (!ops) return 0; ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks)); if (ret) { __ebt_unregister_table(net, table); *res = NULL; } return ret; free_unlock: mutex_unlock(&ebt_mutex); free_chainstack: if (newinfo->chainstack) { for_each_possible_cpu(i) vfree(newinfo->chainstack[i]); vfree(newinfo->chainstack); } vfree(newinfo->entries); free_newinfo: vfree(newinfo); free_table: kfree(table); out: return ret; } void ebt_unregister_table(struct net *net, struct ebt_table *table, const struct nf_hook_ops *ops) { if (ops) nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks)); __ebt_unregister_table(net, table); } /* userspace just supplied us with counters */ static int do_update_counters(struct net *net, const char *name, struct ebt_counter __user *counters, unsigned int num_counters, const void __user *user, unsigned int len) { int i, ret; struct ebt_counter *tmp; struct ebt_table *t; if (num_counters == 0) return -EINVAL; tmp = vmalloc(num_counters * sizeof(*tmp)); if (!tmp) return -ENOMEM; t = find_table_lock(net, name, &ret, &ebt_mutex); if (!t) goto free_tmp; if (num_counters != t->private->nentries) { BUGPRINT("Wrong nr of counters\n"); ret = -EINVAL; goto unlock_mutex; } if (copy_from_user(tmp, counters, num_counters * sizeof(*counters))) { ret = -EFAULT; goto unlock_mutex; } /* we want an atomic add of the counters */ write_lock_bh(&t->lock); /* we add to the counters of the first cpu */ for (i = 0; i < num_counters; i++) { t->private->counters[i].pcnt += tmp[i].pcnt; t->private->counters[i].bcnt += tmp[i].bcnt; } write_unlock_bh(&t->lock); ret = 0; unlock_mutex: mutex_unlock(&ebt_mutex); free_tmp: vfree(tmp); return ret; } static int update_counters(struct net *net, const void __user *user, unsigned int len) { struct ebt_replace hlp; if (copy_from_user(&hlp, user, sizeof(hlp))) return -EFAULT; if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter)) return -EINVAL; return do_update_counters(net, hlp.name, hlp.counters, hlp.num_counters, user, len); } static inline int ebt_obj_to_user(char __user *um, const char *_name, const char *data, int entrysize, int usersize, int datasize) { char name[EBT_FUNCTION_MAXNAMELEN] = {0}; /* ebtables expects 32 bytes long names but xt_match names are 29 bytes * long. Copy 29 bytes and fill remaining bytes with zeroes. */ strlcpy(name, _name, sizeof(name)); if (copy_to_user(um, name, EBT_FUNCTION_MAXNAMELEN) || put_user(datasize, (int __user *)(um + EBT_FUNCTION_MAXNAMELEN)) || xt_data_to_user(um + entrysize, data, usersize, datasize, XT_ALIGN(datasize))) return -EFAULT; return 0; } static inline int ebt_match_to_user(const struct ebt_entry_match *m, const char *base, char __user *ubase) { return ebt_obj_to_user(ubase + ((char *)m - base), m->u.match->name, m->data, sizeof(*m), m->u.match->usersize, m->match_size); } static inline int ebt_watcher_to_user(const struct ebt_entry_watcher *w, const char *base, char __user *ubase) { return ebt_obj_to_user(ubase + ((char *)w - base), w->u.watcher->name, w->data, sizeof(*w), w->u.watcher->usersize, w->watcher_size); } static inline int ebt_entry_to_user(struct ebt_entry *e, const char *base, char __user *ubase) { int ret; char __user *hlp; const struct ebt_entry_target *t; if (e->bitmask == 0) { /* special case !EBT_ENTRY_OR_ENTRIES */ if (copy_to_user(ubase + ((char *)e - base), e, sizeof(struct ebt_entries))) return -EFAULT; return 0; } if (copy_to_user(ubase + ((char *)e - base), e, sizeof(*e))) return -EFAULT; hlp = ubase + (((char *)e + e->target_offset) - base); t = (struct ebt_entry_target *)(((char *)e) + e->target_offset); ret = EBT_MATCH_ITERATE(e, ebt_match_to_user, base, ubase); if (ret != 0) return ret; ret = EBT_WATCHER_ITERATE(e, ebt_watcher_to_user, base, ubase); if (ret != 0) return ret; ret = ebt_obj_to_user(hlp, t->u.target->name, t->data, sizeof(*t), t->u.target->usersize, t->target_size); if (ret != 0) return ret; return 0; } static int copy_counters_to_user(struct ebt_table *t, const struct ebt_counter *oldcounters, void __user *user, unsigned int num_counters, unsigned int nentries) { struct ebt_counter *counterstmp; int ret = 0; /* userspace might not need the counters */ if (num_counters == 0) return 0; if (num_counters != nentries) { BUGPRINT("Num_counters wrong\n"); return -EINVAL; } counterstmp = vmalloc(nentries * sizeof(*counterstmp)); if (!counterstmp) return -ENOMEM; write_lock_bh(&t->lock); get_counters(oldcounters, counterstmp, nentries); write_unlock_bh(&t->lock); if (copy_to_user(user, counterstmp, nentries * sizeof(struct ebt_counter))) ret = -EFAULT; vfree(counterstmp); return ret; } /* called with ebt_mutex locked */ static int copy_everything_to_user(struct ebt_table *t, void __user *user, const int *len, int cmd) { struct ebt_replace tmp; const struct ebt_counter *oldcounters; unsigned int entries_size, nentries; int ret; char *entries; if (cmd == EBT_SO_GET_ENTRIES) { entries_size = t->private->entries_size; nentries = t->private->nentries; entries = t->private->entries; oldcounters = t->private->counters; } else { entries_size = t->table->entries_size; nentries = t->table->nentries; entries = t->table->entries; oldcounters = t->table->counters; } if (copy_from_user(&tmp, user, sizeof(tmp))) return -EFAULT; if (*len != sizeof(struct ebt_replace) + entries_size + (tmp.num_counters ? nentries * sizeof(struct ebt_counter) : 0)) return -EINVAL; if (tmp.nentries != nentries) { BUGPRINT("Nentries wrong\n"); return -EINVAL; } if (tmp.entries_size != entries_size) { BUGPRINT("Wrong size\n"); return -EINVAL; } ret = copy_counters_to_user(t, oldcounters, tmp.counters, tmp.num_counters, nentries); if (ret) return ret; /* set the match/watcher/target names right */ return EBT_ENTRY_ITERATE(entries, entries_size, ebt_entry_to_user, entries, tmp.entries); } static int do_ebt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) { int ret; struct net *net = sock_net(sk); if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case EBT_SO_SET_ENTRIES: ret = do_replace(net, user, len); break; case EBT_SO_SET_COUNTERS: ret = update_counters(net, user, len); break; default: ret = -EINVAL; } return ret; } static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { int ret; struct ebt_replace tmp; struct ebt_table *t; struct net *net = sock_net(sk); if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; if (copy_from_user(&tmp, user, sizeof(tmp))) return -EFAULT; tmp.name[sizeof(tmp.name) - 1] = '\0'; t = find_table_lock(net, tmp.name, &ret, &ebt_mutex); if (!t) return ret; switch (cmd) { case EBT_SO_GET_INFO: case EBT_SO_GET_INIT_INFO: if (*len != sizeof(struct ebt_replace)) { ret = -EINVAL; mutex_unlock(&ebt_mutex); break; } if (cmd == EBT_SO_GET_INFO) { tmp.nentries = t->private->nentries; tmp.entries_size = t->private->entries_size; tmp.valid_hooks = t->valid_hooks; } else { tmp.nentries = t->table->nentries; tmp.entries_size = t->table->entries_size; tmp.valid_hooks = t->table->valid_hooks; } mutex_unlock(&ebt_mutex); if (copy_to_user(user, &tmp, *len) != 0) { BUGPRINT("c2u Didn't work\n"); ret = -EFAULT; break; } ret = 0; break; case EBT_SO_GET_ENTRIES: case EBT_SO_GET_INIT_ENTRIES: ret = copy_everything_to_user(t, user, len, cmd); mutex_unlock(&ebt_mutex); break; default: mutex_unlock(&ebt_mutex); ret = -EINVAL; } return ret; } #ifdef CONFIG_COMPAT /* 32 bit-userspace compatibility definitions. */ struct compat_ebt_replace { char name[EBT_TABLE_MAXNAMELEN]; compat_uint_t valid_hooks; compat_uint_t nentries; compat_uint_t entries_size; /* start of the chains */ compat_uptr_t hook_entry[NF_BR_NUMHOOKS]; /* nr of counters userspace expects back */ compat_uint_t num_counters; /* where the kernel will put the old counters. */ compat_uptr_t counters; compat_uptr_t entries; }; /* struct ebt_entry_match, _target and _watcher have same layout */ struct compat_ebt_entry_mwt { union { char name[EBT_FUNCTION_MAXNAMELEN]; compat_uptr_t ptr; } u; compat_uint_t match_size; compat_uint_t data[0]; }; /* account for possible padding between match_size and ->data */ static int ebt_compat_entry_padsize(void) { BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) < COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt))); return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) - COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)); } static int ebt_compat_match_offset(const struct xt_match *match, unsigned int userlen) { /* ebt_among needs special handling. The kernel .matchsize is * set to -1 at registration time; at runtime an EBT_ALIGN()ed * value is expected. * Example: userspace sends 4500, ebt_among.c wants 4504. */ if (unlikely(match->matchsize == -1)) return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen); return xt_compat_match_offset(match); } static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr, unsigned int *size) { const struct xt_match *match = m->u.match; struct compat_ebt_entry_mwt __user *cm = *dstptr; int off = ebt_compat_match_offset(match, m->match_size); compat_uint_t msize = m->match_size - off; if (WARN_ON(off >= m->match_size)) return -EINVAL; if (copy_to_user(cm->u.name, match->name, strlen(match->name) + 1) || put_user(msize, &cm->match_size)) return -EFAULT; if (match->compat_to_user) { if (match->compat_to_user(cm->data, m->data)) return -EFAULT; } else { if (xt_data_to_user(cm->data, m->data, match->usersize, msize, COMPAT_XT_ALIGN(msize))) return -EFAULT; } *size -= ebt_compat_entry_padsize() + off; *dstptr = cm->data; *dstptr += msize; return 0; } static int compat_target_to_user(struct ebt_entry_target *t, void __user **dstptr, unsigned int *size) { const struct xt_target *target = t->u.target; struct compat_ebt_entry_mwt __user *cm = *dstptr; int off = xt_compat_target_offset(target); compat_uint_t tsize = t->target_size - off; if (WARN_ON(off >= t->target_size)) return -EINVAL; if (copy_to_user(cm->u.name, target->name, strlen(target->name) + 1) || put_user(tsize, &cm->match_size)) return -EFAULT; if (target->compat_to_user) { if (target->compat_to_user(cm->data, t->data)) return -EFAULT; } else { if (xt_data_to_user(cm->data, t->data, target->usersize, tsize, COMPAT_XT_ALIGN(tsize))) return -EFAULT; } *size -= ebt_compat_entry_padsize() + off; *dstptr = cm->data; *dstptr += tsize; return 0; } static int compat_watcher_to_user(struct ebt_entry_watcher *w, void __user **dstptr, unsigned int *size) { return compat_target_to_user((struct ebt_entry_target *)w, dstptr, size); } static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr, unsigned int *size) { struct ebt_entry_target *t; struct ebt_entry __user *ce; u32 watchers_offset, target_offset, next_offset; compat_uint_t origsize; int ret; if (e->bitmask == 0) { if (*size < sizeof(struct ebt_entries)) return -EINVAL; if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries))) return -EFAULT; *dstptr += sizeof(struct ebt_entries); *size -= sizeof(struct ebt_entries); return 0; } if (*size < sizeof(*ce)) return -EINVAL; ce = *dstptr; if (copy_to_user(ce, e, sizeof(*ce))) return -EFAULT; origsize = *size; *dstptr += sizeof(*ce); ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size); if (ret) return ret; watchers_offset = e->watchers_offset - (origsize - *size); ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size); if (ret) return ret; target_offset = e->target_offset - (origsize - *size); t = (struct ebt_entry_target *) ((char *) e + e->target_offset); ret = compat_target_to_user(t, dstptr, size); if (ret) return ret; next_offset = e->next_offset - (origsize - *size); if (put_user(watchers_offset, &ce->watchers_offset) || put_user(target_offset, &ce->target_offset) || put_user(next_offset, &ce->next_offset)) return -EFAULT; *size -= sizeof(*ce); return 0; } static int compat_calc_match(struct ebt_entry_match *m, int *off) { *off += ebt_compat_match_offset(m->u.match, m->match_size); *off += ebt_compat_entry_padsize(); return 0; } static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off) { *off += xt_compat_target_offset(w->u.watcher); *off += ebt_compat_entry_padsize(); return 0; } static int compat_calc_entry(const struct ebt_entry *e, const struct ebt_table_info *info, const void *base, struct compat_ebt_replace *newinfo) { const struct ebt_entry_target *t; unsigned int entry_offset; int off, ret, i; if (e->bitmask == 0) return 0; off = 0; entry_offset = (void *)e - base; EBT_MATCH_ITERATE(e, compat_calc_match, &off); EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off); t = (const struct ebt_entry_target *) ((char *) e + e->target_offset); off += xt_compat_target_offset(t->u.target); off += ebt_compat_entry_padsize(); newinfo->entries_size -= off; ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off); if (ret) return ret; for (i = 0; i < NF_BR_NUMHOOKS; i++) { const void *hookptr = info->hook_entry[i]; if (info->hook_entry[i] && (e < (struct ebt_entry *)(base - hookptr))) { newinfo->hook_entry[i] -= off; pr_debug("0x%08X -> 0x%08X\n", newinfo->hook_entry[i] + off, newinfo->hook_entry[i]); } } return 0; } static int compat_table_info(const struct ebt_table_info *info, struct compat_ebt_replace *newinfo) { unsigned int size = info->entries_size; const void *entries = info->entries; newinfo->entries_size = size; xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries); return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info, entries, newinfo); } static int compat_copy_everything_to_user(struct ebt_table *t, void __user *user, int *len, int cmd) { struct compat_ebt_replace repl, tmp; struct ebt_counter *oldcounters; struct ebt_table_info tinfo; int ret; void __user *pos; memset(&tinfo, 0, sizeof(tinfo)); if (cmd == EBT_SO_GET_ENTRIES) { tinfo.entries_size = t->private->entries_size; tinfo.nentries = t->private->nentries; tinfo.entries = t->private->entries; oldcounters = t->private->counters; } else { tinfo.entries_size = t->table->entries_size; tinfo.nentries = t->table->nentries; tinfo.entries = t->table->entries; oldcounters = t->table->counters; } if (copy_from_user(&tmp, user, sizeof(tmp))) return -EFAULT; if (tmp.nentries != tinfo.nentries || (tmp.num_counters && tmp.num_counters != tinfo.nentries)) return -EINVAL; memcpy(&repl, &tmp, sizeof(repl)); if (cmd == EBT_SO_GET_ENTRIES) ret = compat_table_info(t->private, &repl); else ret = compat_table_info(&tinfo, &repl); if (ret) return ret; if (*len != sizeof(tmp) + repl.entries_size + (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) { pr_err("wrong size: *len %d, entries_size %u, replsz %d\n", *len, tinfo.entries_size, repl.entries_size); return -EINVAL; } /* userspace might not need the counters */ ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters), tmp.num_counters, tinfo.nentries); if (ret) return ret; pos = compat_ptr(tmp.entries); return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size, compat_copy_entry_to_user, &pos, &tmp.entries_size); } struct ebt_entries_buf_state { char *buf_kern_start; /* kernel buffer to copy (translated) data to */ u32 buf_kern_len; /* total size of kernel buffer */ u32 buf_kern_offset; /* amount of data copied so far */ u32 buf_user_offset; /* read position in userspace buffer */ }; static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz) { state->buf_kern_offset += sz; return state->buf_kern_offset >= sz ? 0 : -EINVAL; } static int ebt_buf_add(struct ebt_entries_buf_state *state, void *data, unsigned int sz) { if (state->buf_kern_start == NULL) goto count_only; if (WARN_ON(state->buf_kern_offset + sz > state->buf_kern_len)) return -EINVAL; memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz); count_only: state->buf_user_offset += sz; return ebt_buf_count(state, sz); } static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz) { char *b = state->buf_kern_start; if (WARN_ON(b && state->buf_kern_offset > state->buf_kern_len)) return -EINVAL; if (b != NULL && sz > 0) memset(b + state->buf_kern_offset, 0, sz); /* do not adjust ->buf_user_offset here, we added kernel-side padding */ return ebt_buf_count(state, sz); } enum compat_mwt { EBT_COMPAT_MATCH, EBT_COMPAT_WATCHER, EBT_COMPAT_TARGET, }; static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt, enum compat_mwt compat_mwt, struct ebt_entries_buf_state *state, const unsigned char *base) { char name[EBT_FUNCTION_MAXNAMELEN]; struct xt_match *match; struct xt_target *wt; void *dst = NULL; int off, pad = 0; unsigned int size_kern, match_size = mwt->match_size; strlcpy(name, mwt->u.name, sizeof(name)); if (state->buf_kern_start) dst = state->buf_kern_start + state->buf_kern_offset; switch (compat_mwt) { case EBT_COMPAT_MATCH: match = xt_request_find_match(NFPROTO_BRIDGE, name, 0); if (IS_ERR(match)) return PTR_ERR(match); off = ebt_compat_match_offset(match, match_size); if (dst) { if (match->compat_from_user) match->compat_from_user(dst, mwt->data); else memcpy(dst, mwt->data, match_size); } size_kern = match->matchsize; if (unlikely(size_kern == -1)) size_kern = match_size; module_put(match->me); break; case EBT_COMPAT_WATCHER: /* fallthrough */ case EBT_COMPAT_TARGET: wt = xt_request_find_target(NFPROTO_BRIDGE, name, 0); if (IS_ERR(wt)) return PTR_ERR(wt); off = xt_compat_target_offset(wt); if (dst) { if (wt->compat_from_user) wt->compat_from_user(dst, mwt->data); else memcpy(dst, mwt->data, match_size); } size_kern = wt->targetsize; module_put(wt->me); break; default: return -EINVAL; } state->buf_kern_offset += match_size + off; state->buf_user_offset += match_size; pad = XT_ALIGN(size_kern) - size_kern; if (pad > 0 && dst) { if (WARN_ON(state->buf_kern_len <= pad)) return -EINVAL; if (WARN_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad)) return -EINVAL; memset(dst + size_kern, 0, pad); } return off + match_size; } /* return size of all matches, watchers or target, including necessary * alignment and padding. */ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32, unsigned int size_left, enum compat_mwt type, struct ebt_entries_buf_state *state, const void *base) { int growth = 0; char *buf; if (size_left == 0) return 0; buf = (char *) match32; while (size_left >= sizeof(*match32)) { struct ebt_entry_match *match_kern; int ret; match_kern = (struct ebt_entry_match *) state->buf_kern_start; if (match_kern) { char *tmp; tmp = state->buf_kern_start + state->buf_kern_offset; match_kern = (struct ebt_entry_match *) tmp; } ret = ebt_buf_add(state, buf, sizeof(*match32)); if (ret < 0) return ret; size_left -= sizeof(*match32); /* add padding before match->data (if any) */ ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize()); if (ret < 0) return ret; if (match32->match_size > size_left) return -EINVAL; size_left -= match32->match_size; ret = compat_mtw_from_user(match32, type, state, base); if (ret < 0) return ret; if (WARN_ON(ret < match32->match_size)) return -EINVAL; growth += ret - match32->match_size; growth += ebt_compat_entry_padsize(); buf += sizeof(*match32); buf += match32->match_size; if (match_kern) match_kern->match_size = ret; WARN_ON(type == EBT_COMPAT_TARGET && size_left); match32 = (struct compat_ebt_entry_mwt *) buf; } return growth; } /* called for all ebt_entry structures. */ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, unsigned int *total, struct ebt_entries_buf_state *state) { unsigned int i, j, startoff, new_offset = 0; /* stores match/watchers/targets & offset of next struct ebt_entry: */ unsigned int offsets[4]; unsigned int *offsets_update = NULL; int ret; char *buf_start; if (*total < sizeof(struct ebt_entries)) return -EINVAL; if (!entry->bitmask) { *total -= sizeof(struct ebt_entries); return ebt_buf_add(state, entry, sizeof(struct ebt_entries)); } if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry)) return -EINVAL; startoff = state->buf_user_offset; /* pull in most part of ebt_entry, it does not need to be changed. */ ret = ebt_buf_add(state, entry, offsetof(struct ebt_entry, watchers_offset)); if (ret < 0) return ret; offsets[0] = sizeof(struct ebt_entry); /* matches come first */ memcpy(&offsets[1], &entry->watchers_offset, sizeof(offsets) - sizeof(offsets[0])); if (state->buf_kern_start) { buf_start = state->buf_kern_start + state->buf_kern_offset; offsets_update = (unsigned int *) buf_start; } ret = ebt_buf_add(state, &offsets[1], sizeof(offsets) - sizeof(offsets[0])); if (ret < 0) return ret; buf_start = (char *) entry; /* 0: matches offset, always follows ebt_entry. * 1: watchers offset, from ebt_entry structure * 2: target offset, from ebt_entry structure * 3: next ebt_entry offset, from ebt_entry structure * * offsets are relative to beginning of struct ebt_entry (i.e., 0). */ for (i = 0, j = 1 ; j < 4 ; j++, i++) { struct compat_ebt_entry_mwt *match32; unsigned int size; char *buf = buf_start + offsets[i]; if (offsets[i] > offsets[j]) return -EINVAL; match32 = (struct compat_ebt_entry_mwt *) buf; size = offsets[j] - offsets[i]; ret = ebt_size_mwt(match32, size, i, state, base); if (ret < 0) return ret; new_offset += ret; if (offsets_update && new_offset) { pr_debug("change offset %d to %d\n", offsets_update[i], offsets[j] + new_offset); offsets_update[i] = offsets[j] + new_offset; } } if (state->buf_kern_start == NULL) { unsigned int offset = buf_start - (char *) base; ret = xt_compat_add_offset(NFPROTO_BRIDGE, offset, new_offset); if (ret < 0) return ret; } startoff = state->buf_user_offset - startoff; if (WARN_ON(*total < startoff)) return -EINVAL; *total -= startoff; return 0; } /* repl->entries_size is the size of the ebt_entry blob in userspace. * It might need more memory when copied to a 64 bit kernel in case * userspace is 32-bit. So, first task: find out how much memory is needed. * * Called before validation is performed. */ static int compat_copy_entries(unsigned char *data, unsigned int size_user, struct ebt_entries_buf_state *state) { unsigned int size_remaining = size_user; int ret; ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data, &size_remaining, state); if (ret < 0) return ret; WARN_ON(size_remaining); return state->buf_kern_offset; } static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl, void __user *user, unsigned int len) { struct compat_ebt_replace tmp; int i; if (len < sizeof(tmp)) return -EINVAL; if (copy_from_user(&tmp, user, sizeof(tmp))) return -EFAULT; if (len != sizeof(tmp) + tmp.entries_size) return -EINVAL; if (tmp.entries_size == 0) return -EINVAL; if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) / NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter)) return -ENOMEM; if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter)) return -ENOMEM; memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry)); /* starting with hook_entry, 32 vs. 64 bit structures are different */ for (i = 0; i < NF_BR_NUMHOOKS; i++) repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]); repl->num_counters = tmp.num_counters; repl->counters = compat_ptr(tmp.counters); repl->entries = compat_ptr(tmp.entries); return 0; } static int compat_do_replace(struct net *net, void __user *user, unsigned int len) { int ret, i, countersize, size64; struct ebt_table_info *newinfo; struct ebt_replace tmp; struct ebt_entries_buf_state state; void *entries_tmp; ret = compat_copy_ebt_replace_from_user(&tmp, user, len); if (ret) { /* try real handler in case userland supplied needed padding */ if (ret == -EINVAL && do_replace(net, user, len) == 0) ret = 0; return ret; } countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; newinfo = vmalloc(sizeof(*newinfo) + countersize); if (!newinfo) return -ENOMEM; if (countersize) memset(newinfo->counters, 0, countersize); memset(&state, 0, sizeof(state)); newinfo->entries = vmalloc(tmp.entries_size); if (!newinfo->entries) { ret = -ENOMEM; goto free_newinfo; } if (copy_from_user( newinfo->entries, tmp.entries, tmp.entries_size) != 0) { ret = -EFAULT; goto free_entries; } entries_tmp = newinfo->entries; xt_compat_lock(NFPROTO_BRIDGE); xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries); ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); if (ret < 0) goto out_unlock; pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n", tmp.entries_size, state.buf_kern_offset, state.buf_user_offset, xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size)); size64 = ret; newinfo->entries = vmalloc(size64); if (!newinfo->entries) { vfree(entries_tmp); ret = -ENOMEM; goto out_unlock; } memset(&state, 0, sizeof(state)); state.buf_kern_start = newinfo->entries; state.buf_kern_len = size64; ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); if (WARN_ON(ret < 0)) goto out_unlock; vfree(entries_tmp); tmp.entries_size = size64; for (i = 0; i < NF_BR_NUMHOOKS; i++) { char __user *usrptr; if (tmp.hook_entry[i]) { unsigned int delta; usrptr = (char __user *) tmp.hook_entry[i]; delta = usrptr - tmp.entries; usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta); tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr; } } xt_compat_flush_offsets(NFPROTO_BRIDGE); xt_compat_unlock(NFPROTO_BRIDGE); ret = do_replace_finish(net, &tmp, newinfo); if (ret == 0) return ret; free_entries: vfree(newinfo->entries); free_newinfo: vfree(newinfo); return ret; out_unlock: xt_compat_flush_offsets(NFPROTO_BRIDGE); xt_compat_unlock(NFPROTO_BRIDGE); goto free_entries; } static int compat_update_counters(struct net *net, void __user *user, unsigned int len) { struct compat_ebt_replace hlp; if (copy_from_user(&hlp, user, sizeof(hlp))) return -EFAULT; /* try real handler in case userland supplied needed padding */ if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter)) return update_counters(net, user, len); return do_update_counters(net, hlp.name, compat_ptr(hlp.counters), hlp.num_counters, user, len); } static int compat_do_ebt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) { int ret; struct net *net = sock_net(sk); if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case EBT_SO_SET_ENTRIES: ret = compat_do_replace(net, user, len); break; case EBT_SO_SET_COUNTERS: ret = compat_update_counters(net, user, len); break; default: ret = -EINVAL; } return ret; } static int compat_do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { int ret; struct compat_ebt_replace tmp; struct ebt_table *t; struct net *net = sock_net(sk); if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; /* try real handler in case userland supplied needed padding */ if ((cmd == EBT_SO_GET_INFO || cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp)) return do_ebt_get_ctl(sk, cmd, user, len); if (copy_from_user(&tmp, user, sizeof(tmp))) return -EFAULT; tmp.name[sizeof(tmp.name) - 1] = '\0'; t = find_table_lock(net, tmp.name, &ret, &ebt_mutex); if (!t) return ret; xt_compat_lock(NFPROTO_BRIDGE); switch (cmd) { case EBT_SO_GET_INFO: tmp.nentries = t->private->nentries; ret = compat_table_info(t->private, &tmp); if (ret) goto out; tmp.valid_hooks = t->valid_hooks; if (copy_to_user(user, &tmp, *len) != 0) { ret = -EFAULT; break; } ret = 0; break; case EBT_SO_GET_INIT_INFO: tmp.nentries = t->table->nentries; tmp.entries_size = t->table->entries_size; tmp.valid_hooks = t->table->valid_hooks; if (copy_to_user(user, &tmp, *len) != 0) { ret = -EFAULT; break; } ret = 0; break; case EBT_SO_GET_ENTRIES: case EBT_SO_GET_INIT_ENTRIES: /* try real handler first in case of userland-side padding. * in case we are dealing with an 'ordinary' 32 bit binary * without 64bit compatibility padding, this will fail right * after copy_from_user when the *len argument is validated. * * the compat_ variant needs to do one pass over the kernel * data set to adjust for size differences before it the check. */ if (copy_everything_to_user(t, user, len, cmd) == 0) ret = 0; else ret = compat_copy_everything_to_user(t, user, len, cmd); break; default: ret = -EINVAL; } out: xt_compat_flush_offsets(NFPROTO_BRIDGE); xt_compat_unlock(NFPROTO_BRIDGE); mutex_unlock(&ebt_mutex); return ret; } #endif static struct nf_sockopt_ops ebt_sockopts = { .pf = PF_INET, .set_optmin = EBT_BASE_CTL, .set_optmax = EBT_SO_SET_MAX + 1, .set = do_ebt_set_ctl, #ifdef CONFIG_COMPAT .compat_set = compat_do_ebt_set_ctl, #endif .get_optmin = EBT_BASE_CTL, .get_optmax = EBT_SO_GET_MAX + 1, .get = do_ebt_get_ctl, #ifdef CONFIG_COMPAT .compat_get = compat_do_ebt_get_ctl, #endif .owner = THIS_MODULE, }; static int __init ebtables_init(void) { int ret; ret = xt_register_target(&ebt_standard_target); if (ret < 0) return ret; ret = nf_register_sockopt(&ebt_sockopts); if (ret < 0) { xt_unregister_target(&ebt_standard_target); return ret; } return 0; } static void __exit ebtables_fini(void) { nf_unregister_sockopt(&ebt_sockopts); xt_unregister_target(&ebt_standard_target); } EXPORT_SYMBOL(ebt_register_table); EXPORT_SYMBOL(ebt_unregister_table); EXPORT_SYMBOL(ebt_do_table); module_init(ebtables_init); module_exit(ebtables_fini); MODULE_LICENSE("GPL");
/* * ebtables * * Author: * Bart De Schuymer <bdschuym@pandora.be> * * ebtables.c,v 2.0, July, 2002 * * This code is strongly inspired by the iptables code which is * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kmod.h> #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_bridge/ebtables.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/smp.h> #include <linux/cpumask.h> #include <linux/audit.h> #include <net/sock.h> /* needed for logical [in,out]-dev filtering */ #include "../br_private.h" #define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\ "report to author: "format, ## args) /* #define BUGPRINT(format, args...) */ /* Each cpu has its own set of counters, so there is no need for write_lock in * the softirq * For reading or updating the counters, the user context needs to * get a write_lock */ /* The size of each set of counters is altered to get cache alignment */ #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1)) #define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter))) #define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \ COUNTER_OFFSET(n) * cpu)) static DEFINE_MUTEX(ebt_mutex); #ifdef CONFIG_COMPAT static void ebt_standard_compat_from_user(void *dst, const void *src) { int v = *(compat_int_t *)src; if (v >= 0) v += xt_compat_calc_jump(NFPROTO_BRIDGE, v); memcpy(dst, &v, sizeof(v)); } static int ebt_standard_compat_to_user(void __user *dst, const void *src) { compat_int_t cv = *(int *)src; if (cv >= 0) cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv); return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; } #endif static struct xt_target ebt_standard_target = { .name = "standard", .revision = 0, .family = NFPROTO_BRIDGE, .targetsize = sizeof(int), #ifdef CONFIG_COMPAT .compatsize = sizeof(compat_int_t), .compat_from_user = ebt_standard_compat_from_user, .compat_to_user = ebt_standard_compat_to_user, #endif }; static inline int ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb, struct xt_action_param *par) { par->target = w->u.watcher; par->targinfo = w->data; w->u.watcher->target(skb, par); /* watchers don't give a verdict */ return 0; } static inline int ebt_do_match(struct ebt_entry_match *m, const struct sk_buff *skb, struct xt_action_param *par) { par->match = m->u.match; par->matchinfo = m->data; return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH; } static inline int ebt_dev_check(const char *entry, const struct net_device *device) { int i = 0; const char *devname; if (*entry == '\0') return 0; if (!device) return 1; devname = device->name; /* 1 is the wildcard token */ while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i]) i++; return devname[i] != entry[i] && entry[i] != 1; } /* process standard matches */ static inline int ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out) { const struct ethhdr *h = eth_hdr(skb); const struct net_bridge_port *p; __be16 ethproto; if (skb_vlan_tag_present(skb)) ethproto = htons(ETH_P_8021Q); else ethproto = h->h_proto; if (e->bitmask & EBT_802_3) { if (NF_INVF(e, EBT_IPROTO, eth_proto_is_802_3(ethproto))) return 1; } else if (!(e->bitmask & EBT_NOPROTO) && NF_INVF(e, EBT_IPROTO, e->ethproto != ethproto)) return 1; if (NF_INVF(e, EBT_IIN, ebt_dev_check(e->in, in))) return 1; if (NF_INVF(e, EBT_IOUT, ebt_dev_check(e->out, out))) return 1; /* rcu_read_lock()ed by nf_hook_thresh */ if (in && (p = br_port_get_rcu(in)) != NULL && NF_INVF(e, EBT_ILOGICALIN, ebt_dev_check(e->logical_in, p->br->dev))) return 1; if (out && (p = br_port_get_rcu(out)) != NULL && NF_INVF(e, EBT_ILOGICALOUT, ebt_dev_check(e->logical_out, p->br->dev))) return 1; if (e->bitmask & EBT_SOURCEMAC) { if (NF_INVF(e, EBT_ISOURCE, !ether_addr_equal_masked(h->h_source, e->sourcemac, e->sourcemsk))) return 1; } if (e->bitmask & EBT_DESTMAC) { if (NF_INVF(e, EBT_IDEST, !ether_addr_equal_masked(h->h_dest, e->destmac, e->destmsk))) return 1; } return 0; } static inline struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry) { return (void *)entry + entry->next_offset; } /* Do some firewalling */ unsigned int ebt_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct ebt_table *table) { unsigned int hook = state->hook; int i, nentries; struct ebt_entry *point; struct ebt_counter *counter_base, *cb_base; const struct ebt_entry_target *t; int verdict, sp = 0; struct ebt_chainstack *cs; struct ebt_entries *chaininfo; const char *base; const struct ebt_table_info *private; struct xt_action_param acpar; acpar.state = state; acpar.hotdrop = false; read_lock_bh(&table->lock); private = table->private; cb_base = COUNTER_BASE(private->counters, private->nentries, smp_processor_id()); if (private->chainstack) cs = private->chainstack[smp_processor_id()]; else cs = NULL; chaininfo = private->hook_entry[hook]; nentries = private->hook_entry[hook]->nentries; point = (struct ebt_entry *)(private->hook_entry[hook]->data); counter_base = cb_base + private->hook_entry[hook]->counter_offset; /* base for chain jumps */ base = private->entries; i = 0; while (i < nentries) { if (ebt_basic_match(point, skb, state->in, state->out)) goto letscontinue; if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &acpar) != 0) goto letscontinue; if (acpar.hotdrop) { read_unlock_bh(&table->lock); return NF_DROP; } /* increase counter */ (*(counter_base + i)).pcnt++; (*(counter_base + i)).bcnt += skb->len; /* these should only watch: not modify, nor tell us * what to do with the packet */ EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &acpar); t = (struct ebt_entry_target *) (((char *)point) + point->target_offset); /* standard target */ if (!t->u.target->target) verdict = ((struct ebt_standard_target *)t)->verdict; else { acpar.target = t->u.target; acpar.targinfo = t->data; verdict = t->u.target->target(skb, &acpar); } if (verdict == EBT_ACCEPT) { read_unlock_bh(&table->lock); return NF_ACCEPT; } if (verdict == EBT_DROP) { read_unlock_bh(&table->lock); return NF_DROP; } if (verdict == EBT_RETURN) { letsreturn: if (WARN(sp == 0, "RETURN on base chain")) { /* act like this is EBT_CONTINUE */ goto letscontinue; } sp--; /* put all the local variables right */ i = cs[sp].n; chaininfo = cs[sp].chaininfo; nentries = chaininfo->nentries; point = cs[sp].e; counter_base = cb_base + chaininfo->counter_offset; continue; } if (verdict == EBT_CONTINUE) goto letscontinue; if (WARN(verdict < 0, "bogus standard verdict\n")) { read_unlock_bh(&table->lock); return NF_DROP; } /* jump to a udc */ cs[sp].n = i + 1; cs[sp].chaininfo = chaininfo; cs[sp].e = ebt_next_entry(point); i = 0; chaininfo = (struct ebt_entries *) (base + verdict); if (WARN(chaininfo->distinguisher, "jump to non-chain\n")) { read_unlock_bh(&table->lock); return NF_DROP; } nentries = chaininfo->nentries; point = (struct ebt_entry *)chaininfo->data; counter_base = cb_base + chaininfo->counter_offset; sp++; continue; letscontinue: point = ebt_next_entry(point); i++; } /* I actually like this :) */ if (chaininfo->policy == EBT_RETURN) goto letsreturn; if (chaininfo->policy == EBT_ACCEPT) { read_unlock_bh(&table->lock); return NF_ACCEPT; } read_unlock_bh(&table->lock); return NF_DROP; } /* If it succeeds, returns element and locks mutex */ static inline void * find_inlist_lock_noload(struct list_head *head, const char *name, int *error, struct mutex *mutex) { struct { struct list_head list; char name[EBT_FUNCTION_MAXNAMELEN]; } *e; mutex_lock(mutex); list_for_each_entry(e, head, list) { if (strcmp(e->name, name) == 0) return e; } *error = -ENOENT; mutex_unlock(mutex); return NULL; } static void * find_inlist_lock(struct list_head *head, const char *name, const char *prefix, int *error, struct mutex *mutex) { return try_then_request_module( find_inlist_lock_noload(head, name, error, mutex), "%s%s", prefix, name); } static inline struct ebt_table * find_table_lock(struct net *net, const char *name, int *error, struct mutex *mutex) { return find_inlist_lock(&net->xt.tables[NFPROTO_BRIDGE], name, "ebtable_", error, mutex); } static inline int ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par, unsigned int *cnt) { const struct ebt_entry *e = par->entryinfo; struct xt_match *match; size_t left = ((char *)e + e->watchers_offset) - (char *)m; int ret; if (left < sizeof(struct ebt_entry_match) || left - sizeof(struct ebt_entry_match) < m->match_size) return -EINVAL; match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0); if (IS_ERR(match) || match->family != NFPROTO_BRIDGE) { if (!IS_ERR(match)) module_put(match->me); request_module("ebt_%s", m->u.name); match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0); } if (IS_ERR(match)) return PTR_ERR(match); m->u.match = match; par->match = match; par->matchinfo = m->data; ret = xt_check_match(par, m->match_size, e->ethproto, e->invflags & EBT_IPROTO); if (ret < 0) { module_put(match->me); return ret; } (*cnt)++; return 0; } static inline int ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par, unsigned int *cnt) { const struct ebt_entry *e = par->entryinfo; struct xt_target *watcher; size_t left = ((char *)e + e->target_offset) - (char *)w; int ret; if (left < sizeof(struct ebt_entry_watcher) || left - sizeof(struct ebt_entry_watcher) < w->watcher_size) return -EINVAL; watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0); if (IS_ERR(watcher)) return PTR_ERR(watcher); w->u.watcher = watcher; par->target = watcher; par->targinfo = w->data; ret = xt_check_target(par, w->watcher_size, e->ethproto, e->invflags & EBT_IPROTO); if (ret < 0) { module_put(watcher->me); return ret; } (*cnt)++; return 0; } static int ebt_verify_pointers(const struct ebt_replace *repl, struct ebt_table_info *newinfo) { unsigned int limit = repl->entries_size; unsigned int valid_hooks = repl->valid_hooks; unsigned int offset = 0; int i; for (i = 0; i < NF_BR_NUMHOOKS; i++) newinfo->hook_entry[i] = NULL; newinfo->entries_size = repl->entries_size; newinfo->nentries = repl->nentries; while (offset < limit) { size_t left = limit - offset; struct ebt_entry *e = (void *)newinfo->entries + offset; if (left < sizeof(unsigned int)) break; for (i = 0; i < NF_BR_NUMHOOKS; i++) { if ((valid_hooks & (1 << i)) == 0) continue; if ((char __user *)repl->hook_entry[i] == repl->entries + offset) break; } if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) { if (e->bitmask != 0) { /* we make userspace set this right, * so there is no misunderstanding */ BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set " "in distinguisher\n"); return -EINVAL; } if (i != NF_BR_NUMHOOKS) newinfo->hook_entry[i] = (struct ebt_entries *)e; if (left < sizeof(struct ebt_entries)) break; offset += sizeof(struct ebt_entries); } else { if (left < sizeof(struct ebt_entry)) break; if (left < e->next_offset) break; if (e->next_offset < sizeof(struct ebt_entry)) return -EINVAL; offset += e->next_offset; } } if (offset != limit) { BUGPRINT("entries_size too small\n"); return -EINVAL; } /* check if all valid hooks have a chain */ for (i = 0; i < NF_BR_NUMHOOKS; i++) { if (!newinfo->hook_entry[i] && (valid_hooks & (1 << i))) { BUGPRINT("Valid hook without chain\n"); return -EINVAL; } } return 0; } /* this one is very careful, as it is the first function * to parse the userspace data */ static inline int ebt_check_entry_size_and_hooks(const struct ebt_entry *e, const struct ebt_table_info *newinfo, unsigned int *n, unsigned int *cnt, unsigned int *totalcnt, unsigned int *udc_cnt) { int i; for (i = 0; i < NF_BR_NUMHOOKS; i++) { if ((void *)e == (void *)newinfo->hook_entry[i]) break; } /* beginning of a new chain * if i == NF_BR_NUMHOOKS it must be a user defined chain */ if (i != NF_BR_NUMHOOKS || !e->bitmask) { /* this checks if the previous chain has as many entries * as it said it has */ if (*n != *cnt) { BUGPRINT("nentries does not equal the nr of entries " "in the chain\n"); return -EINVAL; } if (((struct ebt_entries *)e)->policy != EBT_DROP && ((struct ebt_entries *)e)->policy != EBT_ACCEPT) { /* only RETURN from udc */ if (i != NF_BR_NUMHOOKS || ((struct ebt_entries *)e)->policy != EBT_RETURN) { BUGPRINT("bad policy\n"); return -EINVAL; } } if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */ (*udc_cnt)++; if (((struct ebt_entries *)e)->counter_offset != *totalcnt) { BUGPRINT("counter_offset != totalcnt"); return -EINVAL; } *n = ((struct ebt_entries *)e)->nentries; *cnt = 0; return 0; } /* a plain old entry, heh */ if (sizeof(struct ebt_entry) > e->watchers_offset || e->watchers_offset > e->target_offset || e->target_offset >= e->next_offset) { BUGPRINT("entry offsets not in right order\n"); return -EINVAL; } /* this is not checked anywhere else */ if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target)) { BUGPRINT("target size too small\n"); return -EINVAL; } (*cnt)++; (*totalcnt)++; return 0; } struct ebt_cl_stack { struct ebt_chainstack cs; int from; unsigned int hookmask; }; /* We need these positions to check that the jumps to a different part of the * entries is a jump to the beginning of a new chain. */ static inline int ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo, unsigned int *n, struct ebt_cl_stack *udc) { int i; /* we're only interested in chain starts */ if (e->bitmask) return 0; for (i = 0; i < NF_BR_NUMHOOKS; i++) { if (newinfo->hook_entry[i] == (struct ebt_entries *)e) break; } /* only care about udc */ if (i != NF_BR_NUMHOOKS) return 0; udc[*n].cs.chaininfo = (struct ebt_entries *)e; /* these initialisations are depended on later in check_chainloops() */ udc[*n].cs.n = 0; udc[*n].hookmask = 0; (*n)++; return 0; } static inline int ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i) { struct xt_mtdtor_param par; if (i && (*i)-- == 0) return 1; par.net = net; par.match = m->u.match; par.matchinfo = m->data; par.family = NFPROTO_BRIDGE; if (par.match->destroy != NULL) par.match->destroy(&par); module_put(par.match->me); return 0; } static inline int ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i) { struct xt_tgdtor_param par; if (i && (*i)-- == 0) return 1; par.net = net; par.target = w->u.watcher; par.targinfo = w->data; par.family = NFPROTO_BRIDGE; if (par.target->destroy != NULL) par.target->destroy(&par); module_put(par.target->me); return 0; } static inline int ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt) { struct xt_tgdtor_param par; struct ebt_entry_target *t; if (e->bitmask == 0) return 0; /* we're done */ if (cnt && (*cnt)-- == 0) return 1; EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL); EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL); t = (struct ebt_entry_target *)(((char *)e) + e->target_offset); par.net = net; par.target = t->u.target; par.targinfo = t->data; par.family = NFPROTO_BRIDGE; if (par.target->destroy != NULL) par.target->destroy(&par); module_put(par.target->me); return 0; } static inline int ebt_check_entry(struct ebt_entry *e, struct net *net, const struct ebt_table_info *newinfo, const char *name, unsigned int *cnt, struct ebt_cl_stack *cl_s, unsigned int udc_cnt) { struct ebt_entry_target *t; struct xt_target *target; unsigned int i, j, hook = 0, hookmask = 0; size_t gap; int ret; struct xt_mtchk_param mtpar; struct xt_tgchk_param tgpar; /* don't mess with the struct ebt_entries */ if (e->bitmask == 0) return 0; if (e->bitmask & ~EBT_F_MASK) { BUGPRINT("Unknown flag for bitmask\n"); return -EINVAL; } if (e->invflags & ~EBT_INV_MASK) { BUGPRINT("Unknown flag for inv bitmask\n"); return -EINVAL; } if ((e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3)) { BUGPRINT("NOPROTO & 802_3 not allowed\n"); return -EINVAL; } /* what hook do we belong to? */ for (i = 0; i < NF_BR_NUMHOOKS; i++) { if (!newinfo->hook_entry[i]) continue; if ((char *)newinfo->hook_entry[i] < (char *)e) hook = i; else break; } /* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on * a base chain */ if (i < NF_BR_NUMHOOKS) hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS); else { for (i = 0; i < udc_cnt; i++) if ((char *)(cl_s[i].cs.chaininfo) > (char *)e) break; if (i == 0) hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS); else hookmask = cl_s[i - 1].hookmask; } i = 0; mtpar.net = tgpar.net = net; mtpar.table = tgpar.table = name; mtpar.entryinfo = tgpar.entryinfo = e; mtpar.hook_mask = tgpar.hook_mask = hookmask; mtpar.family = tgpar.family = NFPROTO_BRIDGE; ret = EBT_MATCH_ITERATE(e, ebt_check_match, &mtpar, &i); if (ret != 0) goto cleanup_matches; j = 0; ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, &tgpar, &j); if (ret != 0) goto cleanup_watchers; t = (struct ebt_entry_target *)(((char *)e) + e->target_offset); gap = e->next_offset - e->target_offset; target = xt_request_find_target(NFPROTO_BRIDGE, t->u.name, 0); if (IS_ERR(target)) { ret = PTR_ERR(target); goto cleanup_watchers; } t->u.target = target; if (t->u.target == &ebt_standard_target) { if (gap < sizeof(struct ebt_standard_target)) { BUGPRINT("Standard target size too big\n"); ret = -EFAULT; goto cleanup_watchers; } if (((struct ebt_standard_target *)t)->verdict < -NUM_STANDARD_TARGETS) { BUGPRINT("Invalid standard target\n"); ret = -EFAULT; goto cleanup_watchers; } } else if (t->target_size > gap - sizeof(struct ebt_entry_target)) { module_put(t->u.target->me); ret = -EFAULT; goto cleanup_watchers; } tgpar.target = target; tgpar.targinfo = t->data; ret = xt_check_target(&tgpar, t->target_size, e->ethproto, e->invflags & EBT_IPROTO); if (ret < 0) { module_put(target->me); goto cleanup_watchers; } (*cnt)++; return 0; cleanup_watchers: EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j); cleanup_matches: EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i); return ret; } /* checks for loops and sets the hook mask for udc * the hook mask for udc tells us from which base chains the udc can be * accessed. This mask is a parameter to the check() functions of the extensions */ static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s, unsigned int udc_cnt, unsigned int hooknr, char *base) { int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict; const struct ebt_entry *e = (struct ebt_entry *)chain->data; const struct ebt_entry_target *t; while (pos < nentries || chain_nr != -1) { /* end of udc, go back one 'recursion' step */ if (pos == nentries) { /* put back values of the time when this chain was called */ e = cl_s[chain_nr].cs.e; if (cl_s[chain_nr].from != -1) nentries = cl_s[cl_s[chain_nr].from].cs.chaininfo->nentries; else nentries = chain->nentries; pos = cl_s[chain_nr].cs.n; /* make sure we won't see a loop that isn't one */ cl_s[chain_nr].cs.n = 0; chain_nr = cl_s[chain_nr].from; if (pos == nentries) continue; } t = (struct ebt_entry_target *) (((char *)e) + e->target_offset); if (strcmp(t->u.name, EBT_STANDARD_TARGET)) goto letscontinue; if (e->target_offset + sizeof(struct ebt_standard_target) > e->next_offset) { BUGPRINT("Standard target size too big\n"); return -1; } verdict = ((struct ebt_standard_target *)t)->verdict; if (verdict >= 0) { /* jump to another chain */ struct ebt_entries *hlp2 = (struct ebt_entries *)(base + verdict); for (i = 0; i < udc_cnt; i++) if (hlp2 == cl_s[i].cs.chaininfo) break; /* bad destination or loop */ if (i == udc_cnt) { BUGPRINT("bad destination\n"); return -1; } if (cl_s[i].cs.n) { BUGPRINT("loop\n"); return -1; } if (cl_s[i].hookmask & (1 << hooknr)) goto letscontinue; /* this can't be 0, so the loop test is correct */ cl_s[i].cs.n = pos + 1; pos = 0; cl_s[i].cs.e = ebt_next_entry(e); e = (struct ebt_entry *)(hlp2->data); nentries = hlp2->nentries; cl_s[i].from = chain_nr; chain_nr = i; /* this udc is accessible from the base chain for hooknr */ cl_s[i].hookmask |= (1 << hooknr); continue; } letscontinue: e = ebt_next_entry(e); pos++; } return 0; } /* do the parsing of the table/chains/entries/matches/watchers/targets, heh */ static int translate_table(struct net *net, const char *name, struct ebt_table_info *newinfo) { unsigned int i, j, k, udc_cnt; int ret; struct ebt_cl_stack *cl_s = NULL; /* used in the checking for chain loops */ i = 0; while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i]) i++; if (i == NF_BR_NUMHOOKS) { BUGPRINT("No valid hooks specified\n"); return -EINVAL; } if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) { BUGPRINT("Chains don't start at beginning\n"); return -EINVAL; } /* make sure chains are ordered after each other in same order * as their corresponding hooks */ for (j = i + 1; j < NF_BR_NUMHOOKS; j++) { if (!newinfo->hook_entry[j]) continue; if (newinfo->hook_entry[j] <= newinfo->hook_entry[i]) { BUGPRINT("Hook order must be followed\n"); return -EINVAL; } i = j; } /* do some early checkings and initialize some things */ i = 0; /* holds the expected nr. of entries for the chain */ j = 0; /* holds the up to now counted entries for the chain */ k = 0; /* holds the total nr. of entries, should equal * newinfo->nentries afterwards */ udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */ ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, ebt_check_entry_size_and_hooks, newinfo, &i, &j, &k, &udc_cnt); if (ret != 0) return ret; if (i != j) { BUGPRINT("nentries does not equal the nr of entries in the " "(last) chain\n"); return -EINVAL; } if (k != newinfo->nentries) { BUGPRINT("Total nentries is wrong\n"); return -EINVAL; } /* get the location of the udc, put them in an array * while we're at it, allocate the chainstack */ if (udc_cnt) { /* this will get free'd in do_replace()/ebt_register_table() * if an error occurs */ newinfo->chainstack = vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack))); if (!newinfo->chainstack) return -ENOMEM; for_each_possible_cpu(i) { newinfo->chainstack[i] = vmalloc(udc_cnt * sizeof(*(newinfo->chainstack[0]))); if (!newinfo->chainstack[i]) { while (i) vfree(newinfo->chainstack[--i]); vfree(newinfo->chainstack); newinfo->chainstack = NULL; return -ENOMEM; } } cl_s = vmalloc(udc_cnt * sizeof(*cl_s)); if (!cl_s) return -ENOMEM; i = 0; /* the i'th udc */ EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, ebt_get_udc_positions, newinfo, &i, cl_s); /* sanity check */ if (i != udc_cnt) { BUGPRINT("i != udc_cnt\n"); vfree(cl_s); return -EFAULT; } } /* Check for loops */ for (i = 0; i < NF_BR_NUMHOOKS; i++) if (newinfo->hook_entry[i]) if (check_chainloops(newinfo->hook_entry[i], cl_s, udc_cnt, i, newinfo->entries)) { vfree(cl_s); return -EINVAL; } /* we now know the following (along with E=mc²): * - the nr of entries in each chain is right * - the size of the allocated space is right * - all valid hooks have a corresponding chain * - there are no loops * - wrong data can still be on the level of a single entry * - could be there are jumps to places that are not the * beginning of a chain. This can only occur in chains that * are not accessible from any base chains, so we don't care. */ /* used to know what we need to clean up if something goes wrong */ i = 0; ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt); if (ret != 0) { EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, ebt_cleanup_entry, net, &i); } vfree(cl_s); return ret; } /* called under write_lock */ static void get_counters(const struct ebt_counter *oldcounters, struct ebt_counter *counters, unsigned int nentries) { int i, cpu; struct ebt_counter *counter_base; /* counters of cpu 0 */ memcpy(counters, oldcounters, sizeof(struct ebt_counter) * nentries); /* add other counters to those of cpu 0 */ for_each_possible_cpu(cpu) { if (cpu == 0) continue; counter_base = COUNTER_BASE(oldcounters, nentries, cpu); for (i = 0; i < nentries; i++) { counters[i].pcnt += counter_base[i].pcnt; counters[i].bcnt += counter_base[i].bcnt; } } } static int do_replace_finish(struct net *net, struct ebt_replace *repl, struct ebt_table_info *newinfo) { int ret, i; struct ebt_counter *counterstmp = NULL; /* used to be able to unlock earlier */ struct ebt_table_info *table; struct ebt_table *t; /* the user wants counters back * the check on the size is done later, when we have the lock */ if (repl->num_counters) { unsigned long size = repl->num_counters * sizeof(*counterstmp); counterstmp = vmalloc(size); if (!counterstmp) return -ENOMEM; } newinfo->chainstack = NULL; ret = ebt_verify_pointers(repl, newinfo); if (ret != 0) goto free_counterstmp; ret = translate_table(net, repl->name, newinfo); if (ret != 0) goto free_counterstmp; t = find_table_lock(net, repl->name, &ret, &ebt_mutex); if (!t) { ret = -ENOENT; goto free_iterate; } /* the table doesn't like it */ if (t->check && (ret = t->check(newinfo, repl->valid_hooks))) goto free_unlock; if (repl->num_counters && repl->num_counters != t->private->nentries) { BUGPRINT("Wrong nr. of counters requested\n"); ret = -EINVAL; goto free_unlock; } /* we have the mutex lock, so no danger in reading this pointer */ table = t->private; /* make sure the table can only be rmmod'ed if it contains no rules */ if (!table->nentries && newinfo->nentries && !try_module_get(t->me)) { ret = -ENOENT; goto free_unlock; } else if (table->nentries && !newinfo->nentries) module_put(t->me); /* we need an atomic snapshot of the counters */ write_lock_bh(&t->lock); if (repl->num_counters) get_counters(t->private->counters, counterstmp, t->private->nentries); t->private = newinfo; write_unlock_bh(&t->lock); mutex_unlock(&ebt_mutex); /* so, a user can change the chains while having messed up her counter * allocation. Only reason why this is done is because this way the lock * is held only once, while this doesn't bring the kernel into a * dangerous state. */ if (repl->num_counters && copy_to_user(repl->counters, counterstmp, repl->num_counters * sizeof(struct ebt_counter))) { /* Silent error, can't fail, new table is already in place */ net_warn_ratelimited("ebtables: counters copy to user failed while replacing table\n"); } /* decrease module count and free resources */ EBT_ENTRY_ITERATE(table->entries, table->entries_size, ebt_cleanup_entry, net, NULL); vfree(table->entries); if (table->chainstack) { for_each_possible_cpu(i) vfree(table->chainstack[i]); vfree(table->chainstack); } vfree(table); vfree(counterstmp); #ifdef CONFIG_AUDIT if (audit_enabled) { audit_log(current->audit_context, GFP_KERNEL, AUDIT_NETFILTER_CFG, "table=%s family=%u entries=%u", repl->name, AF_BRIDGE, repl->nentries); } #endif return ret; free_unlock: mutex_unlock(&ebt_mutex); free_iterate: EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, ebt_cleanup_entry, net, NULL); free_counterstmp: vfree(counterstmp); /* can be initialized in translate_table() */ if (newinfo->chainstack) { for_each_possible_cpu(i) vfree(newinfo->chainstack[i]); vfree(newinfo->chainstack); } return ret; } /* replace the table */ static int do_replace(struct net *net, const void __user *user, unsigned int len) { int ret, countersize; struct ebt_table_info *newinfo; struct ebt_replace tmp; if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) return -EFAULT; if (len != sizeof(tmp) + tmp.entries_size) { BUGPRINT("Wrong len argument\n"); return -EINVAL; } if (tmp.entries_size == 0) { BUGPRINT("Entries_size never zero\n"); return -EINVAL; } /* overflow check */ if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) / NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter)) return -ENOMEM; if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter)) return -ENOMEM; tmp.name[sizeof(tmp.name) - 1] = 0; countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; newinfo = vmalloc(sizeof(*newinfo) + countersize); if (!newinfo) return -ENOMEM; if (countersize) memset(newinfo->counters, 0, countersize); newinfo->entries = vmalloc(tmp.entries_size); if (!newinfo->entries) { ret = -ENOMEM; goto free_newinfo; } if (copy_from_user( newinfo->entries, tmp.entries, tmp.entries_size) != 0) { BUGPRINT("Couldn't copy entries from userspace\n"); ret = -EFAULT; goto free_entries; } ret = do_replace_finish(net, &tmp, newinfo); if (ret == 0) return ret; free_entries: vfree(newinfo->entries); free_newinfo: vfree(newinfo); return ret; } static void __ebt_unregister_table(struct net *net, struct ebt_table *table) { int i; mutex_lock(&ebt_mutex); list_del(&table->list); mutex_unlock(&ebt_mutex); EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size, ebt_cleanup_entry, net, NULL); if (table->private->nentries) module_put(table->me); vfree(table->private->entries); if (table->private->chainstack) { for_each_possible_cpu(i) vfree(table->private->chainstack[i]); vfree(table->private->chainstack); } vfree(table->private); kfree(table); } int ebt_register_table(struct net *net, const struct ebt_table *input_table, const struct nf_hook_ops *ops, struct ebt_table **res) { struct ebt_table_info *newinfo; struct ebt_table *t, *table; struct ebt_replace_kernel *repl; int ret, i, countersize; void *p; if (input_table == NULL || (repl = input_table->table) == NULL || repl->entries == NULL || repl->entries_size == 0 || repl->counters != NULL || input_table->private != NULL) { BUGPRINT("Bad table data for ebt_register_table!!!\n"); return -EINVAL; } /* Don't add one table to multiple lists. */ table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL); if (!table) { ret = -ENOMEM; goto out; } countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids; newinfo = vmalloc(sizeof(*newinfo) + countersize); ret = -ENOMEM; if (!newinfo) goto free_table; p = vmalloc(repl->entries_size); if (!p) goto free_newinfo; memcpy(p, repl->entries, repl->entries_size); newinfo->entries = p; newinfo->entries_size = repl->entries_size; newinfo->nentries = repl->nentries; if (countersize) memset(newinfo->counters, 0, countersize); /* fill in newinfo and parse the entries */ newinfo->chainstack = NULL; for (i = 0; i < NF_BR_NUMHOOKS; i++) { if ((repl->valid_hooks & (1 << i)) == 0) newinfo->hook_entry[i] = NULL; else newinfo->hook_entry[i] = p + ((char *)repl->hook_entry[i] - repl->entries); } ret = translate_table(net, repl->name, newinfo); if (ret != 0) { BUGPRINT("Translate_table failed\n"); goto free_chainstack; } if (table->check && table->check(newinfo, table->valid_hooks)) { BUGPRINT("The table doesn't like its own initial data, lol\n"); ret = -EINVAL; goto free_chainstack; } table->private = newinfo; rwlock_init(&table->lock); mutex_lock(&ebt_mutex); list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) { if (strcmp(t->name, table->name) == 0) { ret = -EEXIST; BUGPRINT("Table name already exists\n"); goto free_unlock; } } /* Hold a reference count if the chains aren't empty */ if (newinfo->nentries && !try_module_get(table->me)) { ret = -ENOENT; goto free_unlock; } list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]); mutex_unlock(&ebt_mutex); WRITE_ONCE(*res, table); if (!ops) return 0; ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks)); if (ret) { __ebt_unregister_table(net, table); *res = NULL; } return ret; free_unlock: mutex_unlock(&ebt_mutex); free_chainstack: if (newinfo->chainstack) { for_each_possible_cpu(i) vfree(newinfo->chainstack[i]); vfree(newinfo->chainstack); } vfree(newinfo->entries); free_newinfo: vfree(newinfo); free_table: kfree(table); out: return ret; } void ebt_unregister_table(struct net *net, struct ebt_table *table, const struct nf_hook_ops *ops) { if (ops) nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks)); __ebt_unregister_table(net, table); } /* userspace just supplied us with counters */ static int do_update_counters(struct net *net, const char *name, struct ebt_counter __user *counters, unsigned int num_counters, const void __user *user, unsigned int len) { int i, ret; struct ebt_counter *tmp; struct ebt_table *t; if (num_counters == 0) return -EINVAL; tmp = vmalloc(num_counters * sizeof(*tmp)); if (!tmp) return -ENOMEM; t = find_table_lock(net, name, &ret, &ebt_mutex); if (!t) goto free_tmp; if (num_counters != t->private->nentries) { BUGPRINT("Wrong nr of counters\n"); ret = -EINVAL; goto unlock_mutex; } if (copy_from_user(tmp, counters, num_counters * sizeof(*counters))) { ret = -EFAULT; goto unlock_mutex; } /* we want an atomic add of the counters */ write_lock_bh(&t->lock); /* we add to the counters of the first cpu */ for (i = 0; i < num_counters; i++) { t->private->counters[i].pcnt += tmp[i].pcnt; t->private->counters[i].bcnt += tmp[i].bcnt; } write_unlock_bh(&t->lock); ret = 0; unlock_mutex: mutex_unlock(&ebt_mutex); free_tmp: vfree(tmp); return ret; } static int update_counters(struct net *net, const void __user *user, unsigned int len) { struct ebt_replace hlp; if (copy_from_user(&hlp, user, sizeof(hlp))) return -EFAULT; if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter)) return -EINVAL; return do_update_counters(net, hlp.name, hlp.counters, hlp.num_counters, user, len); } static inline int ebt_obj_to_user(char __user *um, const char *_name, const char *data, int entrysize, int usersize, int datasize) { char name[EBT_FUNCTION_MAXNAMELEN] = {0}; /* ebtables expects 32 bytes long names but xt_match names are 29 bytes * long. Copy 29 bytes and fill remaining bytes with zeroes. */ strlcpy(name, _name, sizeof(name)); if (copy_to_user(um, name, EBT_FUNCTION_MAXNAMELEN) || put_user(datasize, (int __user *)(um + EBT_FUNCTION_MAXNAMELEN)) || xt_data_to_user(um + entrysize, data, usersize, datasize, XT_ALIGN(datasize))) return -EFAULT; return 0; } static inline int ebt_match_to_user(const struct ebt_entry_match *m, const char *base, char __user *ubase) { return ebt_obj_to_user(ubase + ((char *)m - base), m->u.match->name, m->data, sizeof(*m), m->u.match->usersize, m->match_size); } static inline int ebt_watcher_to_user(const struct ebt_entry_watcher *w, const char *base, char __user *ubase) { return ebt_obj_to_user(ubase + ((char *)w - base), w->u.watcher->name, w->data, sizeof(*w), w->u.watcher->usersize, w->watcher_size); } static inline int ebt_entry_to_user(struct ebt_entry *e, const char *base, char __user *ubase) { int ret; char __user *hlp; const struct ebt_entry_target *t; if (e->bitmask == 0) { /* special case !EBT_ENTRY_OR_ENTRIES */ if (copy_to_user(ubase + ((char *)e - base), e, sizeof(struct ebt_entries))) return -EFAULT; return 0; } if (copy_to_user(ubase + ((char *)e - base), e, sizeof(*e))) return -EFAULT; hlp = ubase + (((char *)e + e->target_offset) - base); t = (struct ebt_entry_target *)(((char *)e) + e->target_offset); ret = EBT_MATCH_ITERATE(e, ebt_match_to_user, base, ubase); if (ret != 0) return ret; ret = EBT_WATCHER_ITERATE(e, ebt_watcher_to_user, base, ubase); if (ret != 0) return ret; ret = ebt_obj_to_user(hlp, t->u.target->name, t->data, sizeof(*t), t->u.target->usersize, t->target_size); if (ret != 0) return ret; return 0; } static int copy_counters_to_user(struct ebt_table *t, const struct ebt_counter *oldcounters, void __user *user, unsigned int num_counters, unsigned int nentries) { struct ebt_counter *counterstmp; int ret = 0; /* userspace might not need the counters */ if (num_counters == 0) return 0; if (num_counters != nentries) { BUGPRINT("Num_counters wrong\n"); return -EINVAL; } counterstmp = vmalloc(nentries * sizeof(*counterstmp)); if (!counterstmp) return -ENOMEM; write_lock_bh(&t->lock); get_counters(oldcounters, counterstmp, nentries); write_unlock_bh(&t->lock); if (copy_to_user(user, counterstmp, nentries * sizeof(struct ebt_counter))) ret = -EFAULT; vfree(counterstmp); return ret; } /* called with ebt_mutex locked */ static int copy_everything_to_user(struct ebt_table *t, void __user *user, const int *len, int cmd) { struct ebt_replace tmp; const struct ebt_counter *oldcounters; unsigned int entries_size, nentries; int ret; char *entries; if (cmd == EBT_SO_GET_ENTRIES) { entries_size = t->private->entries_size; nentries = t->private->nentries; entries = t->private->entries; oldcounters = t->private->counters; } else { entries_size = t->table->entries_size; nentries = t->table->nentries; entries = t->table->entries; oldcounters = t->table->counters; } if (copy_from_user(&tmp, user, sizeof(tmp))) return -EFAULT; if (*len != sizeof(struct ebt_replace) + entries_size + (tmp.num_counters ? nentries * sizeof(struct ebt_counter) : 0)) return -EINVAL; if (tmp.nentries != nentries) { BUGPRINT("Nentries wrong\n"); return -EINVAL; } if (tmp.entries_size != entries_size) { BUGPRINT("Wrong size\n"); return -EINVAL; } ret = copy_counters_to_user(t, oldcounters, tmp.counters, tmp.num_counters, nentries); if (ret) return ret; /* set the match/watcher/target names right */ return EBT_ENTRY_ITERATE(entries, entries_size, ebt_entry_to_user, entries, tmp.entries); } static int do_ebt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) { int ret; struct net *net = sock_net(sk); if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case EBT_SO_SET_ENTRIES: ret = do_replace(net, user, len); break; case EBT_SO_SET_COUNTERS: ret = update_counters(net, user, len); break; default: ret = -EINVAL; } return ret; } static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { int ret; struct ebt_replace tmp; struct ebt_table *t; struct net *net = sock_net(sk); if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; if (copy_from_user(&tmp, user, sizeof(tmp))) return -EFAULT; tmp.name[sizeof(tmp.name) - 1] = '\0'; t = find_table_lock(net, tmp.name, &ret, &ebt_mutex); if (!t) return ret; switch (cmd) { case EBT_SO_GET_INFO: case EBT_SO_GET_INIT_INFO: if (*len != sizeof(struct ebt_replace)) { ret = -EINVAL; mutex_unlock(&ebt_mutex); break; } if (cmd == EBT_SO_GET_INFO) { tmp.nentries = t->private->nentries; tmp.entries_size = t->private->entries_size; tmp.valid_hooks = t->valid_hooks; } else { tmp.nentries = t->table->nentries; tmp.entries_size = t->table->entries_size; tmp.valid_hooks = t->table->valid_hooks; } mutex_unlock(&ebt_mutex); if (copy_to_user(user, &tmp, *len) != 0) { BUGPRINT("c2u Didn't work\n"); ret = -EFAULT; break; } ret = 0; break; case EBT_SO_GET_ENTRIES: case EBT_SO_GET_INIT_ENTRIES: ret = copy_everything_to_user(t, user, len, cmd); mutex_unlock(&ebt_mutex); break; default: mutex_unlock(&ebt_mutex); ret = -EINVAL; } return ret; } #ifdef CONFIG_COMPAT /* 32 bit-userspace compatibility definitions. */ struct compat_ebt_replace { char name[EBT_TABLE_MAXNAMELEN]; compat_uint_t valid_hooks; compat_uint_t nentries; compat_uint_t entries_size; /* start of the chains */ compat_uptr_t hook_entry[NF_BR_NUMHOOKS]; /* nr of counters userspace expects back */ compat_uint_t num_counters; /* where the kernel will put the old counters. */ compat_uptr_t counters; compat_uptr_t entries; }; /* struct ebt_entry_match, _target and _watcher have same layout */ struct compat_ebt_entry_mwt { union { char name[EBT_FUNCTION_MAXNAMELEN]; compat_uptr_t ptr; } u; compat_uint_t match_size; compat_uint_t data[0]; }; /* account for possible padding between match_size and ->data */ static int ebt_compat_entry_padsize(void) { BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) < COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt))); return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) - COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)); } static int ebt_compat_match_offset(const struct xt_match *match, unsigned int userlen) { /* ebt_among needs special handling. The kernel .matchsize is * set to -1 at registration time; at runtime an EBT_ALIGN()ed * value is expected. * Example: userspace sends 4500, ebt_among.c wants 4504. */ if (unlikely(match->matchsize == -1)) return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen); return xt_compat_match_offset(match); } static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr, unsigned int *size) { const struct xt_match *match = m->u.match; struct compat_ebt_entry_mwt __user *cm = *dstptr; int off = ebt_compat_match_offset(match, m->match_size); compat_uint_t msize = m->match_size - off; if (WARN_ON(off >= m->match_size)) return -EINVAL; if (copy_to_user(cm->u.name, match->name, strlen(match->name) + 1) || put_user(msize, &cm->match_size)) return -EFAULT; if (match->compat_to_user) { if (match->compat_to_user(cm->data, m->data)) return -EFAULT; } else { if (xt_data_to_user(cm->data, m->data, match->usersize, msize, COMPAT_XT_ALIGN(msize))) return -EFAULT; } *size -= ebt_compat_entry_padsize() + off; *dstptr = cm->data; *dstptr += msize; return 0; } static int compat_target_to_user(struct ebt_entry_target *t, void __user **dstptr, unsigned int *size) { const struct xt_target *target = t->u.target; struct compat_ebt_entry_mwt __user *cm = *dstptr; int off = xt_compat_target_offset(target); compat_uint_t tsize = t->target_size - off; if (WARN_ON(off >= t->target_size)) return -EINVAL; if (copy_to_user(cm->u.name, target->name, strlen(target->name) + 1) || put_user(tsize, &cm->match_size)) return -EFAULT; if (target->compat_to_user) { if (target->compat_to_user(cm->data, t->data)) return -EFAULT; } else { if (xt_data_to_user(cm->data, t->data, target->usersize, tsize, COMPAT_XT_ALIGN(tsize))) return -EFAULT; } *size -= ebt_compat_entry_padsize() + off; *dstptr = cm->data; *dstptr += tsize; return 0; } static int compat_watcher_to_user(struct ebt_entry_watcher *w, void __user **dstptr, unsigned int *size) { return compat_target_to_user((struct ebt_entry_target *)w, dstptr, size); } static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr, unsigned int *size) { struct ebt_entry_target *t; struct ebt_entry __user *ce; u32 watchers_offset, target_offset, next_offset; compat_uint_t origsize; int ret; if (e->bitmask == 0) { if (*size < sizeof(struct ebt_entries)) return -EINVAL; if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries))) return -EFAULT; *dstptr += sizeof(struct ebt_entries); *size -= sizeof(struct ebt_entries); return 0; } if (*size < sizeof(*ce)) return -EINVAL; ce = *dstptr; if (copy_to_user(ce, e, sizeof(*ce))) return -EFAULT; origsize = *size; *dstptr += sizeof(*ce); ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size); if (ret) return ret; watchers_offset = e->watchers_offset - (origsize - *size); ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size); if (ret) return ret; target_offset = e->target_offset - (origsize - *size); t = (struct ebt_entry_target *) ((char *) e + e->target_offset); ret = compat_target_to_user(t, dstptr, size); if (ret) return ret; next_offset = e->next_offset - (origsize - *size); if (put_user(watchers_offset, &ce->watchers_offset) || put_user(target_offset, &ce->target_offset) || put_user(next_offset, &ce->next_offset)) return -EFAULT; *size -= sizeof(*ce); return 0; } static int compat_calc_match(struct ebt_entry_match *m, int *off) { *off += ebt_compat_match_offset(m->u.match, m->match_size); *off += ebt_compat_entry_padsize(); return 0; } static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off) { *off += xt_compat_target_offset(w->u.watcher); *off += ebt_compat_entry_padsize(); return 0; } static int compat_calc_entry(const struct ebt_entry *e, const struct ebt_table_info *info, const void *base, struct compat_ebt_replace *newinfo) { const struct ebt_entry_target *t; unsigned int entry_offset; int off, ret, i; if (e->bitmask == 0) return 0; off = 0; entry_offset = (void *)e - base; EBT_MATCH_ITERATE(e, compat_calc_match, &off); EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off); t = (const struct ebt_entry_target *) ((char *) e + e->target_offset); off += xt_compat_target_offset(t->u.target); off += ebt_compat_entry_padsize(); newinfo->entries_size -= off; ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off); if (ret) return ret; for (i = 0; i < NF_BR_NUMHOOKS; i++) { const void *hookptr = info->hook_entry[i]; if (info->hook_entry[i] && (e < (struct ebt_entry *)(base - hookptr))) { newinfo->hook_entry[i] -= off; pr_debug("0x%08X -> 0x%08X\n", newinfo->hook_entry[i] + off, newinfo->hook_entry[i]); } } return 0; } static int compat_table_info(const struct ebt_table_info *info, struct compat_ebt_replace *newinfo) { unsigned int size = info->entries_size; const void *entries = info->entries; newinfo->entries_size = size; xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries); return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info, entries, newinfo); } static int compat_copy_everything_to_user(struct ebt_table *t, void __user *user, int *len, int cmd) { struct compat_ebt_replace repl, tmp; struct ebt_counter *oldcounters; struct ebt_table_info tinfo; int ret; void __user *pos; memset(&tinfo, 0, sizeof(tinfo)); if (cmd == EBT_SO_GET_ENTRIES) { tinfo.entries_size = t->private->entries_size; tinfo.nentries = t->private->nentries; tinfo.entries = t->private->entries; oldcounters = t->private->counters; } else { tinfo.entries_size = t->table->entries_size; tinfo.nentries = t->table->nentries; tinfo.entries = t->table->entries; oldcounters = t->table->counters; } if (copy_from_user(&tmp, user, sizeof(tmp))) return -EFAULT; if (tmp.nentries != tinfo.nentries || (tmp.num_counters && tmp.num_counters != tinfo.nentries)) return -EINVAL; memcpy(&repl, &tmp, sizeof(repl)); if (cmd == EBT_SO_GET_ENTRIES) ret = compat_table_info(t->private, &repl); else ret = compat_table_info(&tinfo, &repl); if (ret) return ret; if (*len != sizeof(tmp) + repl.entries_size + (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) { pr_err("wrong size: *len %d, entries_size %u, replsz %d\n", *len, tinfo.entries_size, repl.entries_size); return -EINVAL; } /* userspace might not need the counters */ ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters), tmp.num_counters, tinfo.nentries); if (ret) return ret; pos = compat_ptr(tmp.entries); return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size, compat_copy_entry_to_user, &pos, &tmp.entries_size); } struct ebt_entries_buf_state { char *buf_kern_start; /* kernel buffer to copy (translated) data to */ u32 buf_kern_len; /* total size of kernel buffer */ u32 buf_kern_offset; /* amount of data copied so far */ u32 buf_user_offset; /* read position in userspace buffer */ }; static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz) { state->buf_kern_offset += sz; return state->buf_kern_offset >= sz ? 0 : -EINVAL; } static int ebt_buf_add(struct ebt_entries_buf_state *state, void *data, unsigned int sz) { if (state->buf_kern_start == NULL) goto count_only; if (WARN_ON(state->buf_kern_offset + sz > state->buf_kern_len)) return -EINVAL; memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz); count_only: state->buf_user_offset += sz; return ebt_buf_count(state, sz); } static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz) { char *b = state->buf_kern_start; if (WARN_ON(b && state->buf_kern_offset > state->buf_kern_len)) return -EINVAL; if (b != NULL && sz > 0) memset(b + state->buf_kern_offset, 0, sz); /* do not adjust ->buf_user_offset here, we added kernel-side padding */ return ebt_buf_count(state, sz); } enum compat_mwt { EBT_COMPAT_MATCH, EBT_COMPAT_WATCHER, EBT_COMPAT_TARGET, }; static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt, enum compat_mwt compat_mwt, struct ebt_entries_buf_state *state, const unsigned char *base) { char name[EBT_FUNCTION_MAXNAMELEN]; struct xt_match *match; struct xt_target *wt; void *dst = NULL; int off, pad = 0; unsigned int size_kern, match_size = mwt->match_size; strlcpy(name, mwt->u.name, sizeof(name)); if (state->buf_kern_start) dst = state->buf_kern_start + state->buf_kern_offset; switch (compat_mwt) { case EBT_COMPAT_MATCH: match = xt_request_find_match(NFPROTO_BRIDGE, name, 0); if (IS_ERR(match)) return PTR_ERR(match); off = ebt_compat_match_offset(match, match_size); if (dst) { if (match->compat_from_user) match->compat_from_user(dst, mwt->data); else memcpy(dst, mwt->data, match_size); } size_kern = match->matchsize; if (unlikely(size_kern == -1)) size_kern = match_size; module_put(match->me); break; case EBT_COMPAT_WATCHER: /* fallthrough */ case EBT_COMPAT_TARGET: wt = xt_request_find_target(NFPROTO_BRIDGE, name, 0); if (IS_ERR(wt)) return PTR_ERR(wt); off = xt_compat_target_offset(wt); if (dst) { if (wt->compat_from_user) wt->compat_from_user(dst, mwt->data); else memcpy(dst, mwt->data, match_size); } size_kern = wt->targetsize; module_put(wt->me); break; default: return -EINVAL; } state->buf_kern_offset += match_size + off; state->buf_user_offset += match_size; pad = XT_ALIGN(size_kern) - size_kern; if (pad > 0 && dst) { if (WARN_ON(state->buf_kern_len <= pad)) return -EINVAL; if (WARN_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad)) return -EINVAL; memset(dst + size_kern, 0, pad); } return off + match_size; } /* return size of all matches, watchers or target, including necessary * alignment and padding. */ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32, unsigned int size_left, enum compat_mwt type, struct ebt_entries_buf_state *state, const void *base) { int growth = 0; char *buf; if (size_left == 0) return 0; buf = (char *) match32; while (size_left >= sizeof(*match32)) { struct ebt_entry_match *match_kern; int ret; match_kern = (struct ebt_entry_match *) state->buf_kern_start; if (match_kern) { char *tmp; tmp = state->buf_kern_start + state->buf_kern_offset; match_kern = (struct ebt_entry_match *) tmp; } ret = ebt_buf_add(state, buf, sizeof(*match32)); if (ret < 0) return ret; size_left -= sizeof(*match32); /* add padding before match->data (if any) */ ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize()); if (ret < 0) return ret; if (match32->match_size > size_left) return -EINVAL; size_left -= match32->match_size; ret = compat_mtw_from_user(match32, type, state, base); if (ret < 0) return ret; if (WARN_ON(ret < match32->match_size)) return -EINVAL; growth += ret - match32->match_size; growth += ebt_compat_entry_padsize(); buf += sizeof(*match32); buf += match32->match_size; if (match_kern) match_kern->match_size = ret; if (WARN_ON(type == EBT_COMPAT_TARGET && size_left)) return -EINVAL; match32 = (struct compat_ebt_entry_mwt *) buf; } return growth; } /* called for all ebt_entry structures. */ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, unsigned int *total, struct ebt_entries_buf_state *state) { unsigned int i, j, startoff, new_offset = 0; /* stores match/watchers/targets & offset of next struct ebt_entry: */ unsigned int offsets[4]; unsigned int *offsets_update = NULL; int ret; char *buf_start; if (*total < sizeof(struct ebt_entries)) return -EINVAL; if (!entry->bitmask) { *total -= sizeof(struct ebt_entries); return ebt_buf_add(state, entry, sizeof(struct ebt_entries)); } if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry)) return -EINVAL; startoff = state->buf_user_offset; /* pull in most part of ebt_entry, it does not need to be changed. */ ret = ebt_buf_add(state, entry, offsetof(struct ebt_entry, watchers_offset)); if (ret < 0) return ret; offsets[0] = sizeof(struct ebt_entry); /* matches come first */ memcpy(&offsets[1], &entry->watchers_offset, sizeof(offsets) - sizeof(offsets[0])); if (state->buf_kern_start) { buf_start = state->buf_kern_start + state->buf_kern_offset; offsets_update = (unsigned int *) buf_start; } ret = ebt_buf_add(state, &offsets[1], sizeof(offsets) - sizeof(offsets[0])); if (ret < 0) return ret; buf_start = (char *) entry; /* 0: matches offset, always follows ebt_entry. * 1: watchers offset, from ebt_entry structure * 2: target offset, from ebt_entry structure * 3: next ebt_entry offset, from ebt_entry structure * * offsets are relative to beginning of struct ebt_entry (i.e., 0). */ for (i = 0; i < 4 ; ++i) { if (offsets[i] >= *total) return -EINVAL; if (i == 0) continue; if (offsets[i-1] > offsets[i]) return -EINVAL; } for (i = 0, j = 1 ; j < 4 ; j++, i++) { struct compat_ebt_entry_mwt *match32; unsigned int size; char *buf = buf_start + offsets[i]; if (offsets[i] > offsets[j]) return -EINVAL; match32 = (struct compat_ebt_entry_mwt *) buf; size = offsets[j] - offsets[i]; ret = ebt_size_mwt(match32, size, i, state, base); if (ret < 0) return ret; new_offset += ret; if (offsets_update && new_offset) { pr_debug("change offset %d to %d\n", offsets_update[i], offsets[j] + new_offset); offsets_update[i] = offsets[j] + new_offset; } } if (state->buf_kern_start == NULL) { unsigned int offset = buf_start - (char *) base; ret = xt_compat_add_offset(NFPROTO_BRIDGE, offset, new_offset); if (ret < 0) return ret; } startoff = state->buf_user_offset - startoff; if (WARN_ON(*total < startoff)) return -EINVAL; *total -= startoff; return 0; } /* repl->entries_size is the size of the ebt_entry blob in userspace. * It might need more memory when copied to a 64 bit kernel in case * userspace is 32-bit. So, first task: find out how much memory is needed. * * Called before validation is performed. */ static int compat_copy_entries(unsigned char *data, unsigned int size_user, struct ebt_entries_buf_state *state) { unsigned int size_remaining = size_user; int ret; ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data, &size_remaining, state); if (ret < 0) return ret; WARN_ON(size_remaining); return state->buf_kern_offset; } static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl, void __user *user, unsigned int len) { struct compat_ebt_replace tmp; int i; if (len < sizeof(tmp)) return -EINVAL; if (copy_from_user(&tmp, user, sizeof(tmp))) return -EFAULT; if (len != sizeof(tmp) + tmp.entries_size) return -EINVAL; if (tmp.entries_size == 0) return -EINVAL; if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) / NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter)) return -ENOMEM; if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter)) return -ENOMEM; memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry)); /* starting with hook_entry, 32 vs. 64 bit structures are different */ for (i = 0; i < NF_BR_NUMHOOKS; i++) repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]); repl->num_counters = tmp.num_counters; repl->counters = compat_ptr(tmp.counters); repl->entries = compat_ptr(tmp.entries); return 0; } static int compat_do_replace(struct net *net, void __user *user, unsigned int len) { int ret, i, countersize, size64; struct ebt_table_info *newinfo; struct ebt_replace tmp; struct ebt_entries_buf_state state; void *entries_tmp; ret = compat_copy_ebt_replace_from_user(&tmp, user, len); if (ret) { /* try real handler in case userland supplied needed padding */ if (ret == -EINVAL && do_replace(net, user, len) == 0) ret = 0; return ret; } countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; newinfo = vmalloc(sizeof(*newinfo) + countersize); if (!newinfo) return -ENOMEM; if (countersize) memset(newinfo->counters, 0, countersize); memset(&state, 0, sizeof(state)); newinfo->entries = vmalloc(tmp.entries_size); if (!newinfo->entries) { ret = -ENOMEM; goto free_newinfo; } if (copy_from_user( newinfo->entries, tmp.entries, tmp.entries_size) != 0) { ret = -EFAULT; goto free_entries; } entries_tmp = newinfo->entries; xt_compat_lock(NFPROTO_BRIDGE); xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries); ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); if (ret < 0) goto out_unlock; pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n", tmp.entries_size, state.buf_kern_offset, state.buf_user_offset, xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size)); size64 = ret; newinfo->entries = vmalloc(size64); if (!newinfo->entries) { vfree(entries_tmp); ret = -ENOMEM; goto out_unlock; } memset(&state, 0, sizeof(state)); state.buf_kern_start = newinfo->entries; state.buf_kern_len = size64; ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); if (WARN_ON(ret < 0)) goto out_unlock; vfree(entries_tmp); tmp.entries_size = size64; for (i = 0; i < NF_BR_NUMHOOKS; i++) { char __user *usrptr; if (tmp.hook_entry[i]) { unsigned int delta; usrptr = (char __user *) tmp.hook_entry[i]; delta = usrptr - tmp.entries; usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta); tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr; } } xt_compat_flush_offsets(NFPROTO_BRIDGE); xt_compat_unlock(NFPROTO_BRIDGE); ret = do_replace_finish(net, &tmp, newinfo); if (ret == 0) return ret; free_entries: vfree(newinfo->entries); free_newinfo: vfree(newinfo); return ret; out_unlock: xt_compat_flush_offsets(NFPROTO_BRIDGE); xt_compat_unlock(NFPROTO_BRIDGE); goto free_entries; } static int compat_update_counters(struct net *net, void __user *user, unsigned int len) { struct compat_ebt_replace hlp; if (copy_from_user(&hlp, user, sizeof(hlp))) return -EFAULT; /* try real handler in case userland supplied needed padding */ if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter)) return update_counters(net, user, len); return do_update_counters(net, hlp.name, compat_ptr(hlp.counters), hlp.num_counters, user, len); } static int compat_do_ebt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) { int ret; struct net *net = sock_net(sk); if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case EBT_SO_SET_ENTRIES: ret = compat_do_replace(net, user, len); break; case EBT_SO_SET_COUNTERS: ret = compat_update_counters(net, user, len); break; default: ret = -EINVAL; } return ret; } static int compat_do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { int ret; struct compat_ebt_replace tmp; struct ebt_table *t; struct net *net = sock_net(sk); if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; /* try real handler in case userland supplied needed padding */ if ((cmd == EBT_SO_GET_INFO || cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp)) return do_ebt_get_ctl(sk, cmd, user, len); if (copy_from_user(&tmp, user, sizeof(tmp))) return -EFAULT; tmp.name[sizeof(tmp.name) - 1] = '\0'; t = find_table_lock(net, tmp.name, &ret, &ebt_mutex); if (!t) return ret; xt_compat_lock(NFPROTO_BRIDGE); switch (cmd) { case EBT_SO_GET_INFO: tmp.nentries = t->private->nentries; ret = compat_table_info(t->private, &tmp); if (ret) goto out; tmp.valid_hooks = t->valid_hooks; if (copy_to_user(user, &tmp, *len) != 0) { ret = -EFAULT; break; } ret = 0; break; case EBT_SO_GET_INIT_INFO: tmp.nentries = t->table->nentries; tmp.entries_size = t->table->entries_size; tmp.valid_hooks = t->table->valid_hooks; if (copy_to_user(user, &tmp, *len) != 0) { ret = -EFAULT; break; } ret = 0; break; case EBT_SO_GET_ENTRIES: case EBT_SO_GET_INIT_ENTRIES: /* try real handler first in case of userland-side padding. * in case we are dealing with an 'ordinary' 32 bit binary * without 64bit compatibility padding, this will fail right * after copy_from_user when the *len argument is validated. * * the compat_ variant needs to do one pass over the kernel * data set to adjust for size differences before it the check. */ if (copy_everything_to_user(t, user, len, cmd) == 0) ret = 0; else ret = compat_copy_everything_to_user(t, user, len, cmd); break; default: ret = -EINVAL; } out: xt_compat_flush_offsets(NFPROTO_BRIDGE); xt_compat_unlock(NFPROTO_BRIDGE); mutex_unlock(&ebt_mutex); return ret; } #endif static struct nf_sockopt_ops ebt_sockopts = { .pf = PF_INET, .set_optmin = EBT_BASE_CTL, .set_optmax = EBT_SO_SET_MAX + 1, .set = do_ebt_set_ctl, #ifdef CONFIG_COMPAT .compat_set = compat_do_ebt_set_ctl, #endif .get_optmin = EBT_BASE_CTL, .get_optmax = EBT_SO_GET_MAX + 1, .get = do_ebt_get_ctl, #ifdef CONFIG_COMPAT .compat_get = compat_do_ebt_get_ctl, #endif .owner = THIS_MODULE, }; static int __init ebtables_init(void) { int ret; ret = xt_register_target(&ebt_standard_target); if (ret < 0) return ret; ret = nf_register_sockopt(&ebt_sockopts); if (ret < 0) { xt_unregister_target(&ebt_standard_target); return ret; } return 0; } static void __exit ebtables_fini(void) { nf_unregister_sockopt(&ebt_sockopts); xt_unregister_target(&ebt_standard_target); } EXPORT_SYMBOL(ebt_register_table); EXPORT_SYMBOL(ebt_unregister_table); EXPORT_SYMBOL(ebt_do_table); module_init(ebtables_init); module_exit(ebtables_fini); MODULE_LICENSE("GPL");
static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32, unsigned int size_left, enum compat_mwt type, struct ebt_entries_buf_state *state, const void *base) { int growth = 0; char *buf; if (size_left == 0) return 0; buf = (char *) match32; while (size_left >= sizeof(*match32)) { struct ebt_entry_match *match_kern; int ret; match_kern = (struct ebt_entry_match *) state->buf_kern_start; if (match_kern) { char *tmp; tmp = state->buf_kern_start + state->buf_kern_offset; match_kern = (struct ebt_entry_match *) tmp; } ret = ebt_buf_add(state, buf, sizeof(*match32)); if (ret < 0) return ret; size_left -= sizeof(*match32); /* add padding before match->data (if any) */ ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize()); if (ret < 0) return ret; if (match32->match_size > size_left) return -EINVAL; size_left -= match32->match_size; ret = compat_mtw_from_user(match32, type, state, base); if (ret < 0) return ret; if (WARN_ON(ret < match32->match_size)) return -EINVAL; growth += ret - match32->match_size; growth += ebt_compat_entry_padsize(); buf += sizeof(*match32); buf += match32->match_size; if (match_kern) match_kern->match_size = ret; WARN_ON(type == EBT_COMPAT_TARGET && size_left); match32 = (struct compat_ebt_entry_mwt *) buf; } return growth; }
static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32, unsigned int size_left, enum compat_mwt type, struct ebt_entries_buf_state *state, const void *base) { int growth = 0; char *buf; if (size_left == 0) return 0; buf = (char *) match32; while (size_left >= sizeof(*match32)) { struct ebt_entry_match *match_kern; int ret; match_kern = (struct ebt_entry_match *) state->buf_kern_start; if (match_kern) { char *tmp; tmp = state->buf_kern_start + state->buf_kern_offset; match_kern = (struct ebt_entry_match *) tmp; } ret = ebt_buf_add(state, buf, sizeof(*match32)); if (ret < 0) return ret; size_left -= sizeof(*match32); /* add padding before match->data (if any) */ ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize()); if (ret < 0) return ret; if (match32->match_size > size_left) return -EINVAL; size_left -= match32->match_size; ret = compat_mtw_from_user(match32, type, state, base); if (ret < 0) return ret; if (WARN_ON(ret < match32->match_size)) return -EINVAL; growth += ret - match32->match_size; growth += ebt_compat_entry_padsize(); buf += sizeof(*match32); buf += match32->match_size; if (match_kern) match_kern->match_size = ret; if (WARN_ON(type == EBT_COMPAT_TARGET && size_left)) return -EINVAL; match32 = (struct compat_ebt_entry_mwt *) buf; } return growth; }
{'added': [(2063, '\t\tif (WARN_ON(type == EBT_COMPAT_TARGET && size_left))'), (2064, '\t\t\treturn -EINVAL;'), (2065, ''), (2121, '\tfor (i = 0; i < 4 ; ++i) {'), (2122, '\t\tif (offsets[i] >= *total)'), (2123, '\t\t\treturn -EINVAL;'), (2124, '\t\tif (i == 0)'), (2125, '\t\t\tcontinue;'), (2126, '\t\tif (offsets[i-1] > offsets[i])'), (2127, '\t\t\treturn -EINVAL;'), (2128, '\t}'), (2129, '')], 'deleted': [(2063, '\t\tWARN_ON(type == EBT_COMPAT_TARGET && size_left);')]}
12
1
1,939
12,955
https://github.com/torvalds/linux
CVE-2018-1068
['CWE-787']
d1_lib.c
dtls1_clear_queues
/* ssl/d1_lib.c */ /* * DTLS implementation written by Nagendra Modadugu * (nagendra@cs.stanford.edu) for the OpenSSL project 2005. */ /* ==================================================================== * Copyright (c) 1999-2005 The OpenSSL Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * 3. All advertising materials mentioning features or use of this * software must display the following acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" * * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to * endorse or promote products derived from this software without * prior written permission. For written permission, please contact * openssl-core@OpenSSL.org. * * 5. Products derived from this software may not be called "OpenSSL" * nor may "OpenSSL" appear in their names without prior written * permission of the OpenSSL Project. * * 6. Redistributions of any form whatsoever must retain the following * acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" * * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * ==================================================================== * * This product includes cryptographic software written by Eric Young * (eay@cryptsoft.com). This product includes software written by Tim * Hudson (tjh@cryptsoft.com). * */ #include <stdio.h> #define USE_SOCKETS #include <openssl/objects.h> #include "ssl_locl.h" #if defined(OPENSSL_SYS_VMS) #include <sys/timeb.h> #endif static void get_current_time(struct timeval *t); static void dtls1_set_handshake_header(SSL *s, int type, unsigned long len); static int dtls1_handshake_write(SSL *s); const char dtls1_version_str[]="DTLSv1" OPENSSL_VERSION_PTEXT; int dtls1_listen(SSL *s, struct sockaddr *client); SSL3_ENC_METHOD DTLSv1_enc_data={ tls1_enc, tls1_mac, tls1_setup_key_block, tls1_generate_master_secret, tls1_change_cipher_state, tls1_final_finish_mac, TLS1_FINISH_MAC_LENGTH, tls1_cert_verify_mac, TLS_MD_CLIENT_FINISH_CONST,TLS_MD_CLIENT_FINISH_CONST_SIZE, TLS_MD_SERVER_FINISH_CONST,TLS_MD_SERVER_FINISH_CONST_SIZE, tls1_alert_code, tls1_export_keying_material, SSL_ENC_FLAG_DTLS|SSL_ENC_FLAG_EXPLICIT_IV, DTLS1_HM_HEADER_LENGTH, dtls1_set_handshake_header, dtls1_handshake_write }; SSL3_ENC_METHOD DTLSv1_2_enc_data={ tls1_enc, tls1_mac, tls1_setup_key_block, tls1_generate_master_secret, tls1_change_cipher_state, tls1_final_finish_mac, TLS1_FINISH_MAC_LENGTH, tls1_cert_verify_mac, TLS_MD_CLIENT_FINISH_CONST,TLS_MD_CLIENT_FINISH_CONST_SIZE, TLS_MD_SERVER_FINISH_CONST,TLS_MD_SERVER_FINISH_CONST_SIZE, tls1_alert_code, tls1_export_keying_material, SSL_ENC_FLAG_DTLS|SSL_ENC_FLAG_EXPLICIT_IV|SSL_ENC_FLAG_SIGALGS |SSL_ENC_FLAG_SHA256_PRF|SSL_ENC_FLAG_TLS1_2_CIPHERS, DTLS1_HM_HEADER_LENGTH, dtls1_set_handshake_header, dtls1_handshake_write }; long dtls1_default_timeout(void) { /* 2 hours, the 24 hours mentioned in the DTLSv1 spec * is way too long for http, the cache would over fill */ return(60*60*2); } int dtls1_new(SSL *s) { DTLS1_STATE *d1; if (!ssl3_new(s)) return(0); if ((d1=OPENSSL_malloc(sizeof *d1)) == NULL) return (0); memset(d1,0, sizeof *d1); /* d1->handshake_epoch=0; */ d1->unprocessed_rcds.q=pqueue_new(); d1->processed_rcds.q=pqueue_new(); d1->buffered_messages = pqueue_new(); d1->sent_messages=pqueue_new(); d1->buffered_app_data.q=pqueue_new(); if ( s->server) { d1->cookie_len = sizeof(s->d1->cookie); } if( ! d1->unprocessed_rcds.q || ! d1->processed_rcds.q || ! d1->buffered_messages || ! d1->sent_messages || ! d1->buffered_app_data.q) { if ( d1->unprocessed_rcds.q) pqueue_free(d1->unprocessed_rcds.q); if ( d1->processed_rcds.q) pqueue_free(d1->processed_rcds.q); if ( d1->buffered_messages) pqueue_free(d1->buffered_messages); if ( d1->sent_messages) pqueue_free(d1->sent_messages); if ( d1->buffered_app_data.q) pqueue_free(d1->buffered_app_data.q); OPENSSL_free(d1); return (0); } s->d1=d1; s->method->ssl_clear(s); return(1); } static void dtls1_clear_queues(SSL *s) { pitem *item = NULL; hm_fragment *frag = NULL; DTLS1_RECORD_DATA *rdata; while( (item = pqueue_pop(s->d1->unprocessed_rcds.q)) != NULL) { rdata = (DTLS1_RECORD_DATA *) item->data; if (rdata->rbuf.buf) { OPENSSL_free(rdata->rbuf.buf); } OPENSSL_free(item->data); pitem_free(item); } while( (item = pqueue_pop(s->d1->processed_rcds.q)) != NULL) { rdata = (DTLS1_RECORD_DATA *) item->data; if (rdata->rbuf.buf) { OPENSSL_free(rdata->rbuf.buf); } OPENSSL_free(item->data); pitem_free(item); } while( (item = pqueue_pop(s->d1->buffered_messages)) != NULL) { frag = (hm_fragment *)item->data; OPENSSL_free(frag->fragment); OPENSSL_free(frag); pitem_free(item); } while ( (item = pqueue_pop(s->d1->sent_messages)) != NULL) { frag = (hm_fragment *)item->data; OPENSSL_free(frag->fragment); OPENSSL_free(frag); pitem_free(item); } while ( (item = pqueue_pop(s->d1->buffered_app_data.q)) != NULL) { frag = (hm_fragment *)item->data; OPENSSL_free(frag->fragment); OPENSSL_free(frag); pitem_free(item); } } void dtls1_free(SSL *s) { ssl3_free(s); dtls1_clear_queues(s); pqueue_free(s->d1->unprocessed_rcds.q); pqueue_free(s->d1->processed_rcds.q); pqueue_free(s->d1->buffered_messages); pqueue_free(s->d1->sent_messages); pqueue_free(s->d1->buffered_app_data.q); OPENSSL_free(s->d1); s->d1 = NULL; } void dtls1_clear(SSL *s) { pqueue unprocessed_rcds; pqueue processed_rcds; pqueue buffered_messages; pqueue sent_messages; pqueue buffered_app_data; unsigned int mtu; if (s->d1) { unprocessed_rcds = s->d1->unprocessed_rcds.q; processed_rcds = s->d1->processed_rcds.q; buffered_messages = s->d1->buffered_messages; sent_messages = s->d1->sent_messages; buffered_app_data = s->d1->buffered_app_data.q; mtu = s->d1->mtu; dtls1_clear_queues(s); memset(s->d1, 0, sizeof(*(s->d1))); if (s->server) { s->d1->cookie_len = sizeof(s->d1->cookie); } if (SSL_get_options(s) & SSL_OP_NO_QUERY_MTU) { s->d1->mtu = mtu; } s->d1->unprocessed_rcds.q = unprocessed_rcds; s->d1->processed_rcds.q = processed_rcds; s->d1->buffered_messages = buffered_messages; s->d1->sent_messages = sent_messages; s->d1->buffered_app_data.q = buffered_app_data; } ssl3_clear(s); if (s->options & SSL_OP_CISCO_ANYCONNECT) s->version=DTLS1_BAD_VER; else if (s->method->version == DTLS_ANY_VERSION) s->version=DTLS1_2_VERSION; else s->version=s->method->version; } long dtls1_ctrl(SSL *s, int cmd, long larg, void *parg) { int ret=0; switch (cmd) { case DTLS_CTRL_GET_TIMEOUT: if (dtls1_get_timeout(s, (struct timeval*) parg) != NULL) { ret = 1; } break; case DTLS_CTRL_HANDLE_TIMEOUT: ret = dtls1_handle_timeout(s); break; case DTLS_CTRL_LISTEN: ret = dtls1_listen(s, parg); break; default: ret = ssl3_ctrl(s, cmd, larg, parg); break; } return(ret); } /* * As it's impossible to use stream ciphers in "datagram" mode, this * simple filter is designed to disengage them in DTLS. Unfortunately * there is no universal way to identify stream SSL_CIPHER, so we have * to explicitly list their SSL_* codes. Currently RC4 is the only one * available, but if new ones emerge, they will have to be added... */ const SSL_CIPHER *dtls1_get_cipher(unsigned int u) { const SSL_CIPHER *ciph = ssl3_get_cipher(u); if (ciph != NULL) { if (ciph->algorithm_enc == SSL_RC4) return NULL; } return ciph; } void dtls1_start_timer(SSL *s) { #ifndef OPENSSL_NO_SCTP /* Disable timer for SCTP */ if (BIO_dgram_is_sctp(SSL_get_wbio(s))) { memset(&(s->d1->next_timeout), 0, sizeof(struct timeval)); return; } #endif /* If timer is not set, initialize duration with 1 second */ if (s->d1->next_timeout.tv_sec == 0 && s->d1->next_timeout.tv_usec == 0) { s->d1->timeout_duration = 1; } /* Set timeout to current time */ get_current_time(&(s->d1->next_timeout)); /* Add duration to current time */ s->d1->next_timeout.tv_sec += s->d1->timeout_duration; BIO_ctrl(SSL_get_rbio(s), BIO_CTRL_DGRAM_SET_NEXT_TIMEOUT, 0, &(s->d1->next_timeout)); } struct timeval* dtls1_get_timeout(SSL *s, struct timeval* timeleft) { struct timeval timenow; /* If no timeout is set, just return NULL */ if (s->d1->next_timeout.tv_sec == 0 && s->d1->next_timeout.tv_usec == 0) { return NULL; } /* Get current time */ get_current_time(&timenow); /* If timer already expired, set remaining time to 0 */ if (s->d1->next_timeout.tv_sec < timenow.tv_sec || (s->d1->next_timeout.tv_sec == timenow.tv_sec && s->d1->next_timeout.tv_usec <= timenow.tv_usec)) { memset(timeleft, 0, sizeof(struct timeval)); return timeleft; } /* Calculate time left until timer expires */ memcpy(timeleft, &(s->d1->next_timeout), sizeof(struct timeval)); timeleft->tv_sec -= timenow.tv_sec; timeleft->tv_usec -= timenow.tv_usec; if (timeleft->tv_usec < 0) { timeleft->tv_sec--; timeleft->tv_usec += 1000000; } /* If remaining time is less than 15 ms, set it to 0 * to prevent issues because of small devergences with * socket timeouts. */ if (timeleft->tv_sec == 0 && timeleft->tv_usec < 15000) { memset(timeleft, 0, sizeof(struct timeval)); } return timeleft; } int dtls1_is_timer_expired(SSL *s) { struct timeval timeleft; /* Get time left until timeout, return false if no timer running */ if (dtls1_get_timeout(s, &timeleft) == NULL) { return 0; } /* Return false if timer is not expired yet */ if (timeleft.tv_sec > 0 || timeleft.tv_usec > 0) { return 0; } /* Timer expired, so return true */ return 1; } void dtls1_double_timeout(SSL *s) { s->d1->timeout_duration *= 2; if (s->d1->timeout_duration > 60) s->d1->timeout_duration = 60; dtls1_start_timer(s); } void dtls1_stop_timer(SSL *s) { /* Reset everything */ memset(&(s->d1->timeout), 0, sizeof(struct dtls1_timeout_st)); memset(&(s->d1->next_timeout), 0, sizeof(struct timeval)); s->d1->timeout_duration = 1; BIO_ctrl(SSL_get_rbio(s), BIO_CTRL_DGRAM_SET_NEXT_TIMEOUT, 0, &(s->d1->next_timeout)); /* Clear retransmission buffer */ dtls1_clear_record_buffer(s); } int dtls1_check_timeout_num(SSL *s) { s->d1->timeout.num_alerts++; /* Reduce MTU after 2 unsuccessful retransmissions */ if (s->d1->timeout.num_alerts > 2) { s->d1->mtu = BIO_ctrl(SSL_get_wbio(s), BIO_CTRL_DGRAM_GET_FALLBACK_MTU, 0, NULL); } if (s->d1->timeout.num_alerts > DTLS1_TMO_ALERT_COUNT) { /* fail the connection, enough alerts have been sent */ SSLerr(SSL_F_DTLS1_CHECK_TIMEOUT_NUM,SSL_R_READ_TIMEOUT_EXPIRED); return -1; } return 0; } int dtls1_handle_timeout(SSL *s) { /* if no timer is expired, don't do anything */ if (!dtls1_is_timer_expired(s)) { return 0; } dtls1_double_timeout(s); if (dtls1_check_timeout_num(s) < 0) return -1; s->d1->timeout.read_timeouts++; if (s->d1->timeout.read_timeouts > DTLS1_TMO_READ_COUNT) { s->d1->timeout.read_timeouts = 1; } #ifndef OPENSSL_NO_HEARTBEATS if (s->tlsext_hb_pending) { s->tlsext_hb_pending = 0; return dtls1_heartbeat(s); } #endif dtls1_start_timer(s); return dtls1_retransmit_buffered_messages(s); } static void get_current_time(struct timeval *t) { #if defined(_WIN32) SYSTEMTIME st; union { unsigned __int64 ul; FILETIME ft; } now; GetSystemTime(&st); SystemTimeToFileTime(&st,&now.ft); #ifdef __MINGW32__ now.ul -= 116444736000000000ULL; #else now.ul -= 116444736000000000UI64; /* re-bias to 1/1/1970 */ #endif t->tv_sec = (long)(now.ul/10000000); t->tv_usec = ((int)(now.ul%10000000))/10; #elif defined(OPENSSL_SYS_VMS) struct timeb tb; ftime(&tb); t->tv_sec = (long)tb.time; t->tv_usec = (long)tb.millitm * 1000; #else gettimeofday(t, NULL); #endif } int dtls1_listen(SSL *s, struct sockaddr *client) { int ret; SSL_set_options(s, SSL_OP_COOKIE_EXCHANGE); s->d1->listen = 1; ret = SSL_accept(s); if (ret <= 0) return ret; (void) BIO_dgram_get_peer(SSL_get_rbio(s), client); return 1; } static void dtls1_set_handshake_header(SSL *s, int htype, unsigned long len) { unsigned char *p = (unsigned char *)s->init_buf->data; dtls1_set_message_header(s, p, htype, len, 0, len); s->init_num = (int)len + DTLS1_HM_HEADER_LENGTH; s->init_off = 0; /* Buffer the message to handle re-xmits */ dtls1_buffer_message(s, 0); } static int dtls1_handshake_write(SSL *s) { return dtls1_do_write(s, SSL3_RT_HANDSHAKE); }
/* ssl/d1_lib.c */ /* * DTLS implementation written by Nagendra Modadugu * (nagendra@cs.stanford.edu) for the OpenSSL project 2005. */ /* ==================================================================== * Copyright (c) 1999-2005 The OpenSSL Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * 3. All advertising materials mentioning features or use of this * software must display the following acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" * * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to * endorse or promote products derived from this software without * prior written permission. For written permission, please contact * openssl-core@OpenSSL.org. * * 5. Products derived from this software may not be called "OpenSSL" * nor may "OpenSSL" appear in their names without prior written * permission of the OpenSSL Project. * * 6. Redistributions of any form whatsoever must retain the following * acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" * * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * ==================================================================== * * This product includes cryptographic software written by Eric Young * (eay@cryptsoft.com). This product includes software written by Tim * Hudson (tjh@cryptsoft.com). * */ #include <stdio.h> #define USE_SOCKETS #include <openssl/objects.h> #include "ssl_locl.h" #if defined(OPENSSL_SYS_VMS) #include <sys/timeb.h> #endif static void get_current_time(struct timeval *t); static void dtls1_set_handshake_header(SSL *s, int type, unsigned long len); static int dtls1_handshake_write(SSL *s); const char dtls1_version_str[]="DTLSv1" OPENSSL_VERSION_PTEXT; int dtls1_listen(SSL *s, struct sockaddr *client); SSL3_ENC_METHOD DTLSv1_enc_data={ tls1_enc, tls1_mac, tls1_setup_key_block, tls1_generate_master_secret, tls1_change_cipher_state, tls1_final_finish_mac, TLS1_FINISH_MAC_LENGTH, tls1_cert_verify_mac, TLS_MD_CLIENT_FINISH_CONST,TLS_MD_CLIENT_FINISH_CONST_SIZE, TLS_MD_SERVER_FINISH_CONST,TLS_MD_SERVER_FINISH_CONST_SIZE, tls1_alert_code, tls1_export_keying_material, SSL_ENC_FLAG_DTLS|SSL_ENC_FLAG_EXPLICIT_IV, DTLS1_HM_HEADER_LENGTH, dtls1_set_handshake_header, dtls1_handshake_write }; SSL3_ENC_METHOD DTLSv1_2_enc_data={ tls1_enc, tls1_mac, tls1_setup_key_block, tls1_generate_master_secret, tls1_change_cipher_state, tls1_final_finish_mac, TLS1_FINISH_MAC_LENGTH, tls1_cert_verify_mac, TLS_MD_CLIENT_FINISH_CONST,TLS_MD_CLIENT_FINISH_CONST_SIZE, TLS_MD_SERVER_FINISH_CONST,TLS_MD_SERVER_FINISH_CONST_SIZE, tls1_alert_code, tls1_export_keying_material, SSL_ENC_FLAG_DTLS|SSL_ENC_FLAG_EXPLICIT_IV|SSL_ENC_FLAG_SIGALGS |SSL_ENC_FLAG_SHA256_PRF|SSL_ENC_FLAG_TLS1_2_CIPHERS, DTLS1_HM_HEADER_LENGTH, dtls1_set_handshake_header, dtls1_handshake_write }; long dtls1_default_timeout(void) { /* 2 hours, the 24 hours mentioned in the DTLSv1 spec * is way too long for http, the cache would over fill */ return(60*60*2); } int dtls1_new(SSL *s) { DTLS1_STATE *d1; if (!ssl3_new(s)) return(0); if ((d1=OPENSSL_malloc(sizeof *d1)) == NULL) return (0); memset(d1,0, sizeof *d1); /* d1->handshake_epoch=0; */ d1->unprocessed_rcds.q=pqueue_new(); d1->processed_rcds.q=pqueue_new(); d1->buffered_messages = pqueue_new(); d1->sent_messages=pqueue_new(); d1->buffered_app_data.q=pqueue_new(); if ( s->server) { d1->cookie_len = sizeof(s->d1->cookie); } if( ! d1->unprocessed_rcds.q || ! d1->processed_rcds.q || ! d1->buffered_messages || ! d1->sent_messages || ! d1->buffered_app_data.q) { if ( d1->unprocessed_rcds.q) pqueue_free(d1->unprocessed_rcds.q); if ( d1->processed_rcds.q) pqueue_free(d1->processed_rcds.q); if ( d1->buffered_messages) pqueue_free(d1->buffered_messages); if ( d1->sent_messages) pqueue_free(d1->sent_messages); if ( d1->buffered_app_data.q) pqueue_free(d1->buffered_app_data.q); OPENSSL_free(d1); return (0); } s->d1=d1; s->method->ssl_clear(s); return(1); } static void dtls1_clear_queues(SSL *s) { pitem *item = NULL; hm_fragment *frag = NULL; DTLS1_RECORD_DATA *rdata; while( (item = pqueue_pop(s->d1->unprocessed_rcds.q)) != NULL) { rdata = (DTLS1_RECORD_DATA *) item->data; if (rdata->rbuf.buf) { OPENSSL_free(rdata->rbuf.buf); } OPENSSL_free(item->data); pitem_free(item); } while( (item = pqueue_pop(s->d1->processed_rcds.q)) != NULL) { rdata = (DTLS1_RECORD_DATA *) item->data; if (rdata->rbuf.buf) { OPENSSL_free(rdata->rbuf.buf); } OPENSSL_free(item->data); pitem_free(item); } while( (item = pqueue_pop(s->d1->buffered_messages)) != NULL) { frag = (hm_fragment *)item->data; OPENSSL_free(frag->fragment); OPENSSL_free(frag); pitem_free(item); } while ( (item = pqueue_pop(s->d1->sent_messages)) != NULL) { frag = (hm_fragment *)item->data; OPENSSL_free(frag->fragment); OPENSSL_free(frag); pitem_free(item); } while ( (item = pqueue_pop(s->d1->buffered_app_data.q)) != NULL) { rdata = (DTLS1_RECORD_DATA *) item->data; if (rdata->rbuf.buf) { OPENSSL_free(rdata->rbuf.buf); } OPENSSL_free(item->data); pitem_free(item); } } void dtls1_free(SSL *s) { ssl3_free(s); dtls1_clear_queues(s); pqueue_free(s->d1->unprocessed_rcds.q); pqueue_free(s->d1->processed_rcds.q); pqueue_free(s->d1->buffered_messages); pqueue_free(s->d1->sent_messages); pqueue_free(s->d1->buffered_app_data.q); OPENSSL_free(s->d1); s->d1 = NULL; } void dtls1_clear(SSL *s) { pqueue unprocessed_rcds; pqueue processed_rcds; pqueue buffered_messages; pqueue sent_messages; pqueue buffered_app_data; unsigned int mtu; if (s->d1) { unprocessed_rcds = s->d1->unprocessed_rcds.q; processed_rcds = s->d1->processed_rcds.q; buffered_messages = s->d1->buffered_messages; sent_messages = s->d1->sent_messages; buffered_app_data = s->d1->buffered_app_data.q; mtu = s->d1->mtu; dtls1_clear_queues(s); memset(s->d1, 0, sizeof(*(s->d1))); if (s->server) { s->d1->cookie_len = sizeof(s->d1->cookie); } if (SSL_get_options(s) & SSL_OP_NO_QUERY_MTU) { s->d1->mtu = mtu; } s->d1->unprocessed_rcds.q = unprocessed_rcds; s->d1->processed_rcds.q = processed_rcds; s->d1->buffered_messages = buffered_messages; s->d1->sent_messages = sent_messages; s->d1->buffered_app_data.q = buffered_app_data; } ssl3_clear(s); if (s->options & SSL_OP_CISCO_ANYCONNECT) s->version=DTLS1_BAD_VER; else if (s->method->version == DTLS_ANY_VERSION) s->version=DTLS1_2_VERSION; else s->version=s->method->version; } long dtls1_ctrl(SSL *s, int cmd, long larg, void *parg) { int ret=0; switch (cmd) { case DTLS_CTRL_GET_TIMEOUT: if (dtls1_get_timeout(s, (struct timeval*) parg) != NULL) { ret = 1; } break; case DTLS_CTRL_HANDLE_TIMEOUT: ret = dtls1_handle_timeout(s); break; case DTLS_CTRL_LISTEN: ret = dtls1_listen(s, parg); break; default: ret = ssl3_ctrl(s, cmd, larg, parg); break; } return(ret); } /* * As it's impossible to use stream ciphers in "datagram" mode, this * simple filter is designed to disengage them in DTLS. Unfortunately * there is no universal way to identify stream SSL_CIPHER, so we have * to explicitly list their SSL_* codes. Currently RC4 is the only one * available, but if new ones emerge, they will have to be added... */ const SSL_CIPHER *dtls1_get_cipher(unsigned int u) { const SSL_CIPHER *ciph = ssl3_get_cipher(u); if (ciph != NULL) { if (ciph->algorithm_enc == SSL_RC4) return NULL; } return ciph; } void dtls1_start_timer(SSL *s) { #ifndef OPENSSL_NO_SCTP /* Disable timer for SCTP */ if (BIO_dgram_is_sctp(SSL_get_wbio(s))) { memset(&(s->d1->next_timeout), 0, sizeof(struct timeval)); return; } #endif /* If timer is not set, initialize duration with 1 second */ if (s->d1->next_timeout.tv_sec == 0 && s->d1->next_timeout.tv_usec == 0) { s->d1->timeout_duration = 1; } /* Set timeout to current time */ get_current_time(&(s->d1->next_timeout)); /* Add duration to current time */ s->d1->next_timeout.tv_sec += s->d1->timeout_duration; BIO_ctrl(SSL_get_rbio(s), BIO_CTRL_DGRAM_SET_NEXT_TIMEOUT, 0, &(s->d1->next_timeout)); } struct timeval* dtls1_get_timeout(SSL *s, struct timeval* timeleft) { struct timeval timenow; /* If no timeout is set, just return NULL */ if (s->d1->next_timeout.tv_sec == 0 && s->d1->next_timeout.tv_usec == 0) { return NULL; } /* Get current time */ get_current_time(&timenow); /* If timer already expired, set remaining time to 0 */ if (s->d1->next_timeout.tv_sec < timenow.tv_sec || (s->d1->next_timeout.tv_sec == timenow.tv_sec && s->d1->next_timeout.tv_usec <= timenow.tv_usec)) { memset(timeleft, 0, sizeof(struct timeval)); return timeleft; } /* Calculate time left until timer expires */ memcpy(timeleft, &(s->d1->next_timeout), sizeof(struct timeval)); timeleft->tv_sec -= timenow.tv_sec; timeleft->tv_usec -= timenow.tv_usec; if (timeleft->tv_usec < 0) { timeleft->tv_sec--; timeleft->tv_usec += 1000000; } /* If remaining time is less than 15 ms, set it to 0 * to prevent issues because of small devergences with * socket timeouts. */ if (timeleft->tv_sec == 0 && timeleft->tv_usec < 15000) { memset(timeleft, 0, sizeof(struct timeval)); } return timeleft; } int dtls1_is_timer_expired(SSL *s) { struct timeval timeleft; /* Get time left until timeout, return false if no timer running */ if (dtls1_get_timeout(s, &timeleft) == NULL) { return 0; } /* Return false if timer is not expired yet */ if (timeleft.tv_sec > 0 || timeleft.tv_usec > 0) { return 0; } /* Timer expired, so return true */ return 1; } void dtls1_double_timeout(SSL *s) { s->d1->timeout_duration *= 2; if (s->d1->timeout_duration > 60) s->d1->timeout_duration = 60; dtls1_start_timer(s); } void dtls1_stop_timer(SSL *s) { /* Reset everything */ memset(&(s->d1->timeout), 0, sizeof(struct dtls1_timeout_st)); memset(&(s->d1->next_timeout), 0, sizeof(struct timeval)); s->d1->timeout_duration = 1; BIO_ctrl(SSL_get_rbio(s), BIO_CTRL_DGRAM_SET_NEXT_TIMEOUT, 0, &(s->d1->next_timeout)); /* Clear retransmission buffer */ dtls1_clear_record_buffer(s); } int dtls1_check_timeout_num(SSL *s) { s->d1->timeout.num_alerts++; /* Reduce MTU after 2 unsuccessful retransmissions */ if (s->d1->timeout.num_alerts > 2) { s->d1->mtu = BIO_ctrl(SSL_get_wbio(s), BIO_CTRL_DGRAM_GET_FALLBACK_MTU, 0, NULL); } if (s->d1->timeout.num_alerts > DTLS1_TMO_ALERT_COUNT) { /* fail the connection, enough alerts have been sent */ SSLerr(SSL_F_DTLS1_CHECK_TIMEOUT_NUM,SSL_R_READ_TIMEOUT_EXPIRED); return -1; } return 0; } int dtls1_handle_timeout(SSL *s) { /* if no timer is expired, don't do anything */ if (!dtls1_is_timer_expired(s)) { return 0; } dtls1_double_timeout(s); if (dtls1_check_timeout_num(s) < 0) return -1; s->d1->timeout.read_timeouts++; if (s->d1->timeout.read_timeouts > DTLS1_TMO_READ_COUNT) { s->d1->timeout.read_timeouts = 1; } #ifndef OPENSSL_NO_HEARTBEATS if (s->tlsext_hb_pending) { s->tlsext_hb_pending = 0; return dtls1_heartbeat(s); } #endif dtls1_start_timer(s); return dtls1_retransmit_buffered_messages(s); } static void get_current_time(struct timeval *t) { #if defined(_WIN32) SYSTEMTIME st; union { unsigned __int64 ul; FILETIME ft; } now; GetSystemTime(&st); SystemTimeToFileTime(&st,&now.ft); #ifdef __MINGW32__ now.ul -= 116444736000000000ULL; #else now.ul -= 116444736000000000UI64; /* re-bias to 1/1/1970 */ #endif t->tv_sec = (long)(now.ul/10000000); t->tv_usec = ((int)(now.ul%10000000))/10; #elif defined(OPENSSL_SYS_VMS) struct timeb tb; ftime(&tb); t->tv_sec = (long)tb.time; t->tv_usec = (long)tb.millitm * 1000; #else gettimeofday(t, NULL); #endif } int dtls1_listen(SSL *s, struct sockaddr *client) { int ret; SSL_set_options(s, SSL_OP_COOKIE_EXCHANGE); s->d1->listen = 1; ret = SSL_accept(s); if (ret <= 0) return ret; (void) BIO_dgram_get_peer(SSL_get_rbio(s), client); return 1; } static void dtls1_set_handshake_header(SSL *s, int htype, unsigned long len) { unsigned char *p = (unsigned char *)s->init_buf->data; dtls1_set_message_header(s, p, htype, len, 0, len); s->init_num = (int)len + DTLS1_HM_HEADER_LENGTH; s->init_off = 0; /* Buffer the message to handle re-xmits */ dtls1_buffer_message(s, 0); } static int dtls1_handshake_write(SSL *s) { return dtls1_do_write(s, SSL3_RT_HANDSHAKE); }
static void dtls1_clear_queues(SSL *s) { pitem *item = NULL; hm_fragment *frag = NULL; DTLS1_RECORD_DATA *rdata; while( (item = pqueue_pop(s->d1->unprocessed_rcds.q)) != NULL) { rdata = (DTLS1_RECORD_DATA *) item->data; if (rdata->rbuf.buf) { OPENSSL_free(rdata->rbuf.buf); } OPENSSL_free(item->data); pitem_free(item); } while( (item = pqueue_pop(s->d1->processed_rcds.q)) != NULL) { rdata = (DTLS1_RECORD_DATA *) item->data; if (rdata->rbuf.buf) { OPENSSL_free(rdata->rbuf.buf); } OPENSSL_free(item->data); pitem_free(item); } while( (item = pqueue_pop(s->d1->buffered_messages)) != NULL) { frag = (hm_fragment *)item->data; OPENSSL_free(frag->fragment); OPENSSL_free(frag); pitem_free(item); } while ( (item = pqueue_pop(s->d1->sent_messages)) != NULL) { frag = (hm_fragment *)item->data; OPENSSL_free(frag->fragment); OPENSSL_free(frag); pitem_free(item); } while ( (item = pqueue_pop(s->d1->buffered_app_data.q)) != NULL) { frag = (hm_fragment *)item->data; OPENSSL_free(frag->fragment); OPENSSL_free(frag); pitem_free(item); } }
static void dtls1_clear_queues(SSL *s) { pitem *item = NULL; hm_fragment *frag = NULL; DTLS1_RECORD_DATA *rdata; while( (item = pqueue_pop(s->d1->unprocessed_rcds.q)) != NULL) { rdata = (DTLS1_RECORD_DATA *) item->data; if (rdata->rbuf.buf) { OPENSSL_free(rdata->rbuf.buf); } OPENSSL_free(item->data); pitem_free(item); } while( (item = pqueue_pop(s->d1->processed_rcds.q)) != NULL) { rdata = (DTLS1_RECORD_DATA *) item->data; if (rdata->rbuf.buf) { OPENSSL_free(rdata->rbuf.buf); } OPENSSL_free(item->data); pitem_free(item); } while( (item = pqueue_pop(s->d1->buffered_messages)) != NULL) { frag = (hm_fragment *)item->data; OPENSSL_free(frag->fragment); OPENSSL_free(frag); pitem_free(item); } while ( (item = pqueue_pop(s->d1->sent_messages)) != NULL) { frag = (hm_fragment *)item->data; OPENSSL_free(frag->fragment); OPENSSL_free(frag); pitem_free(item); } while ( (item = pqueue_pop(s->d1->buffered_app_data.q)) != NULL) { rdata = (DTLS1_RECORD_DATA *) item->data; if (rdata->rbuf.buf) { OPENSSL_free(rdata->rbuf.buf); } OPENSSL_free(item->data); pitem_free(item); } }
{'added': [(205, '\t\trdata = (DTLS1_RECORD_DATA *) item->data;'), (206, '\t\tif (rdata->rbuf.buf)'), (207, '\t\t\t{'), (208, '\t\t\tOPENSSL_free(rdata->rbuf.buf);'), (209, '\t\t\t}'), (210, '\t\tOPENSSL_free(item->data);')], 'deleted': [(205, '\t\tfrag = (hm_fragment *)item->data;'), (206, '\t\tOPENSSL_free(frag->fragment);'), (207, '\t\tOPENSSL_free(frag);')]}
6
3
361
2,189
https://github.com/openssl/openssl
CVE-2014-8176
['CWE-119']
edge_proxy_common.c
edge_sparse_csr_reader_double
/****************************************************************************** ** Copyright (c) 2017-2018, Intel Corporation ** ** All rights reserved. ** ** ** ** Redistribution and use in source and binary forms, with or without ** ** modification, are permitted provided that the following conditions ** ** are met: ** ** 1. Redistributions of source code must retain the above copyright ** ** notice, this list of conditions and the following disclaimer. ** ** 2. Redistributions in binary form must reproduce the above copyright ** ** notice, this list of conditions and the following disclaimer in the ** ** documentation and/or other materials provided with the distribution. ** ** 3. Neither the name of the copyright holder nor the names of its ** ** contributors may be used to endorse or promote products derived ** ** from this software without specific prior written permission. ** ** ** ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ** ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ** ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ** ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ** ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ** ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED ** ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ** ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ** ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ** ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ** ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ** ******************************************************************************/ /* Alexander Heinecke (Intel Corp.) ******************************************************************************/ #include "edge_proxy_common.h" void edge_sparse_csr_reader_double( const char* i_csr_file_in, unsigned int** o_row_idx, unsigned int** o_column_idx, double** o_values, unsigned int* o_row_count, unsigned int* o_column_count, unsigned int* o_element_count ) { FILE *l_csr_file_handle; const unsigned int l_line_length = 512; char l_line[512/*l_line_length*/+1]; unsigned int l_header_read = 0; unsigned int* l_row_idx_id = NULL; unsigned int l_i = 0; l_csr_file_handle = fopen( i_csr_file_in, "r" ); if ( l_csr_file_handle == NULL ) { fprintf( stderr, "cannot open CSR file!\n" ); return; } while (fgets(l_line, l_line_length, l_csr_file_handle) != NULL) { if ( strlen(l_line) == l_line_length ) { fprintf( stderr, "could not read file length!\n" ); return; } /* check if we are still reading comments header */ if ( l_line[0] == '%' ) { continue; } else { /* if we are the first line after comment header, we allocate our data structures */ if ( l_header_read == 0 ) { if ( sscanf(l_line, "%u %u %u", o_row_count, o_column_count, o_element_count) == 3 ) { /* allocate CSC datastructure matching mtx file */ *o_column_idx = (unsigned int*) malloc(sizeof(unsigned int) * (*o_element_count)); *o_row_idx = (unsigned int*) malloc(sizeof(unsigned int) * (*o_row_count + 1)); *o_values = (double*) malloc(sizeof(double) * (*o_element_count)); l_row_idx_id = (unsigned int*) malloc(sizeof(unsigned int) * (*o_row_count)); /* check if mallocs were successful */ if ( ( *o_row_idx == NULL ) || ( *o_column_idx == NULL ) || ( *o_values == NULL ) || ( l_row_idx_id == NULL ) ) { fprintf( stderr, "could not allocate sp data!\n" ); return; } /* set everything to zero for init */ memset(*o_row_idx, 0, sizeof(unsigned int)*(*o_row_count + 1)); memset(*o_column_idx, 0, sizeof(unsigned int)*(*o_element_count)); memset(*o_values, 0, sizeof(double)*(*o_element_count)); memset(l_row_idx_id, 0, sizeof(unsigned int)*(*o_row_count)); /* init column idx */ for ( l_i = 0; l_i < (*o_row_count + 1); l_i++) (*o_row_idx)[l_i] = (*o_element_count); /* init */ (*o_row_idx)[0] = 0; l_i = 0; l_header_read = 1; } else { fprintf( stderr, "could not csr description!\n" ); return; } /* now we read the actual content */ } else { unsigned int l_row, l_column; double l_value; /* read a line of content */ if ( sscanf(l_line, "%u %u %lf", &l_row, &l_column, &l_value) != 3 ) { fprintf( stderr, "could not read element!\n" ); return; } /* adjust numbers to zero termination */ l_row--; l_column--; /* add these values to row and value structure */ (*o_column_idx)[l_i] = l_column; (*o_values)[l_i] = l_value; l_i++; /* handle columns, set id to own for this column, yeah we need to handle empty columns */ l_row_idx_id[l_row] = 1; (*o_row_idx)[l_row+1] = l_i; } } } /* close mtx file */ fclose( l_csr_file_handle ); /* check if we read a file which was consistent */ if ( l_i != (*o_element_count) ) { fprintf( stderr, "we were not able to read all elements!\n" ); return; } /* let's handle empty rows */ for ( l_i = 0; l_i < (*o_row_count); l_i++) { if ( l_row_idx_id[l_i] == 0 ) { (*o_row_idx)[l_i+1] = (*o_row_idx)[l_i]; } } /* free helper data structure */ if ( l_row_idx_id != NULL ) { free( l_row_idx_id ); } } void edge_sparse_csr_reader_float( const char* i_csr_file_in, unsigned int** o_row_idx, unsigned int** o_column_idx, float** o_values, unsigned int* o_row_count, unsigned int* o_column_count, unsigned int* o_element_count ) { double* l_values; unsigned int i; /* read using double */ edge_sparse_csr_reader_double( i_csr_file_in, o_row_idx, o_column_idx, &l_values, o_row_count, o_column_count, o_element_count ); /* converting double values into float */ *o_values = (float*) malloc((*o_element_count)*sizeof(float)); for ( i = 0; i < (*o_element_count); ++i ) { (*o_values)[i] = (float)l_values[i]; } free(l_values); }
/****************************************************************************** ** Copyright (c) 2017-2018, Intel Corporation ** ** All rights reserved. ** ** ** ** Redistribution and use in source and binary forms, with or without ** ** modification, are permitted provided that the following conditions ** ** are met: ** ** 1. Redistributions of source code must retain the above copyright ** ** notice, this list of conditions and the following disclaimer. ** ** 2. Redistributions in binary form must reproduce the above copyright ** ** notice, this list of conditions and the following disclaimer in the ** ** documentation and/or other materials provided with the distribution. ** ** 3. Neither the name of the copyright holder nor the names of its ** ** contributors may be used to endorse or promote products derived ** ** from this software without specific prior written permission. ** ** ** ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ** ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ** ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ** ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ** ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ** ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED ** ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ** ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ** ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ** ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ** ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ** ******************************************************************************/ /* Alexander Heinecke (Intel Corp.) ******************************************************************************/ #include "edge_proxy_common.h" void edge_sparse_csr_reader_double( const char* i_csr_file_in, unsigned int** o_row_idx, unsigned int** o_column_idx, double** o_values, unsigned int* o_row_count, unsigned int* o_column_count, unsigned int* o_element_count ) { FILE *l_csr_file_handle; const unsigned int l_line_length = 512; char l_line[512/*l_line_length*/+1]; unsigned int l_header_read = 0; unsigned int* l_row_idx_id = NULL; unsigned int l_i = 0; l_csr_file_handle = fopen( i_csr_file_in, "r" ); if ( l_csr_file_handle == NULL ) { fprintf( stderr, "cannot open CSR file!\n" ); return; } while (fgets(l_line, l_line_length, l_csr_file_handle) != NULL) { if ( strlen(l_line) == l_line_length ) { fprintf( stderr, "could not read file length!\n" ); return; } /* check if we are still reading comments header */ if ( l_line[0] == '%' ) { continue; } else { /* if we are the first line after comment header, we allocate our data structures */ if ( l_header_read == 0 ) { if (3 == sscanf(l_line, "%u %u %u", o_row_count, o_column_count, o_element_count) && 0 != *o_row_count && 0 != *o_column_count && 0 != *o_element_count) { /* allocate CSC datastructure matching mtx file */ *o_column_idx = (unsigned int*) malloc(sizeof(unsigned int) * (*o_element_count)); *o_row_idx = (unsigned int*) malloc(sizeof(unsigned int) * (*o_row_count + 1)); *o_values = (double*) malloc(sizeof(double) * (*o_element_count)); l_row_idx_id = (unsigned int*) malloc(sizeof(unsigned int) * (*o_row_count)); /* check if mallocs were successful */ if ( ( *o_row_idx == NULL ) || ( *o_column_idx == NULL ) || ( *o_values == NULL ) || ( l_row_idx_id == NULL ) ) { fprintf( stderr, "could not allocate sp data!\n" ); return; } /* set everything to zero for init */ memset(*o_row_idx, 0, sizeof(unsigned int)*(*o_row_count + 1)); memset(*o_column_idx, 0, sizeof(unsigned int)*(*o_element_count)); memset(*o_values, 0, sizeof(double)*(*o_element_count)); memset(l_row_idx_id, 0, sizeof(unsigned int)*(*o_row_count)); /* init column idx */ for ( l_i = 0; l_i < (*o_row_count + 1); l_i++) (*o_row_idx)[l_i] = (*o_element_count); /* init */ (*o_row_idx)[0] = 0; l_i = 0; l_header_read = 1; } else { fprintf( stderr, "could not csr description!\n" ); return; } /* now we read the actual content */ } else { unsigned int l_row, l_column; double l_value; /* read a line of content */ if ( sscanf(l_line, "%u %u %lf", &l_row, &l_column, &l_value) != 3 ) { fprintf( stderr, "could not read element!\n" ); return; } /* adjust numbers to zero termination */ l_row--; l_column--; /* add these values to row and value structure */ (*o_column_idx)[l_i] = l_column; (*o_values)[l_i] = l_value; l_i++; /* handle columns, set id to own for this column, yeah we need to handle empty columns */ l_row_idx_id[l_row] = 1; (*o_row_idx)[l_row+1] = l_i; } } } /* close mtx file */ fclose( l_csr_file_handle ); /* check if we read a file which was consistent */ if ( l_i != (*o_element_count) ) { fprintf( stderr, "we were not able to read all elements!\n" ); return; } /* let's handle empty rows */ for ( l_i = 0; l_i < (*o_row_count); l_i++) { if ( l_row_idx_id[l_i] == 0 ) { (*o_row_idx)[l_i+1] = (*o_row_idx)[l_i]; } } /* free helper data structure */ if ( l_row_idx_id != NULL ) { free( l_row_idx_id ); } } void edge_sparse_csr_reader_float( const char* i_csr_file_in, unsigned int** o_row_idx, unsigned int** o_column_idx, float** o_values, unsigned int* o_row_count, unsigned int* o_column_count, unsigned int* o_element_count ) { double* l_values; unsigned int i; /* read using double */ edge_sparse_csr_reader_double( i_csr_file_in, o_row_idx, o_column_idx, &l_values, o_row_count, o_column_count, o_element_count ); /* converting double values into float */ *o_values = (float*) malloc((*o_element_count)*sizeof(float)); for ( i = 0; i < (*o_element_count); ++i ) { (*o_values)[i] = (float)l_values[i]; } free(l_values); }
void edge_sparse_csr_reader_double( const char* i_csr_file_in, unsigned int** o_row_idx, unsigned int** o_column_idx, double** o_values, unsigned int* o_row_count, unsigned int* o_column_count, unsigned int* o_element_count ) { FILE *l_csr_file_handle; const unsigned int l_line_length = 512; char l_line[512/*l_line_length*/+1]; unsigned int l_header_read = 0; unsigned int* l_row_idx_id = NULL; unsigned int l_i = 0; l_csr_file_handle = fopen( i_csr_file_in, "r" ); if ( l_csr_file_handle == NULL ) { fprintf( stderr, "cannot open CSR file!\n" ); return; } while (fgets(l_line, l_line_length, l_csr_file_handle) != NULL) { if ( strlen(l_line) == l_line_length ) { fprintf( stderr, "could not read file length!\n" ); return; } /* check if we are still reading comments header */ if ( l_line[0] == '%' ) { continue; } else { /* if we are the first line after comment header, we allocate our data structures */ if ( l_header_read == 0 ) { if ( sscanf(l_line, "%u %u %u", o_row_count, o_column_count, o_element_count) == 3 ) { /* allocate CSC datastructure matching mtx file */ *o_column_idx = (unsigned int*) malloc(sizeof(unsigned int) * (*o_element_count)); *o_row_idx = (unsigned int*) malloc(sizeof(unsigned int) * (*o_row_count + 1)); *o_values = (double*) malloc(sizeof(double) * (*o_element_count)); l_row_idx_id = (unsigned int*) malloc(sizeof(unsigned int) * (*o_row_count)); /* check if mallocs were successful */ if ( ( *o_row_idx == NULL ) || ( *o_column_idx == NULL ) || ( *o_values == NULL ) || ( l_row_idx_id == NULL ) ) { fprintf( stderr, "could not allocate sp data!\n" ); return; } /* set everything to zero for init */ memset(*o_row_idx, 0, sizeof(unsigned int)*(*o_row_count + 1)); memset(*o_column_idx, 0, sizeof(unsigned int)*(*o_element_count)); memset(*o_values, 0, sizeof(double)*(*o_element_count)); memset(l_row_idx_id, 0, sizeof(unsigned int)*(*o_row_count)); /* init column idx */ for ( l_i = 0; l_i < (*o_row_count + 1); l_i++) (*o_row_idx)[l_i] = (*o_element_count); /* init */ (*o_row_idx)[0] = 0; l_i = 0; l_header_read = 1; } else { fprintf( stderr, "could not csr description!\n" ); return; } /* now we read the actual content */ } else { unsigned int l_row, l_column; double l_value; /* read a line of content */ if ( sscanf(l_line, "%u %u %lf", &l_row, &l_column, &l_value) != 3 ) { fprintf( stderr, "could not read element!\n" ); return; } /* adjust numbers to zero termination */ l_row--; l_column--; /* add these values to row and value structure */ (*o_column_idx)[l_i] = l_column; (*o_values)[l_i] = l_value; l_i++; /* handle columns, set id to own for this column, yeah we need to handle empty columns */ l_row_idx_id[l_row] = 1; (*o_row_idx)[l_row+1] = l_i; } } } /* close mtx file */ fclose( l_csr_file_handle ); /* check if we read a file which was consistent */ if ( l_i != (*o_element_count) ) { fprintf( stderr, "we were not able to read all elements!\n" ); return; } /* let's handle empty rows */ for ( l_i = 0; l_i < (*o_row_count); l_i++) { if ( l_row_idx_id[l_i] == 0 ) { (*o_row_idx)[l_i+1] = (*o_row_idx)[l_i]; } } /* free helper data structure */ if ( l_row_idx_id != NULL ) { free( l_row_idx_id ); } }
void edge_sparse_csr_reader_double( const char* i_csr_file_in, unsigned int** o_row_idx, unsigned int** o_column_idx, double** o_values, unsigned int* o_row_count, unsigned int* o_column_count, unsigned int* o_element_count ) { FILE *l_csr_file_handle; const unsigned int l_line_length = 512; char l_line[512/*l_line_length*/+1]; unsigned int l_header_read = 0; unsigned int* l_row_idx_id = NULL; unsigned int l_i = 0; l_csr_file_handle = fopen( i_csr_file_in, "r" ); if ( l_csr_file_handle == NULL ) { fprintf( stderr, "cannot open CSR file!\n" ); return; } while (fgets(l_line, l_line_length, l_csr_file_handle) != NULL) { if ( strlen(l_line) == l_line_length ) { fprintf( stderr, "could not read file length!\n" ); return; } /* check if we are still reading comments header */ if ( l_line[0] == '%' ) { continue; } else { /* if we are the first line after comment header, we allocate our data structures */ if ( l_header_read == 0 ) { if (3 == sscanf(l_line, "%u %u %u", o_row_count, o_column_count, o_element_count) && 0 != *o_row_count && 0 != *o_column_count && 0 != *o_element_count) { /* allocate CSC datastructure matching mtx file */ *o_column_idx = (unsigned int*) malloc(sizeof(unsigned int) * (*o_element_count)); *o_row_idx = (unsigned int*) malloc(sizeof(unsigned int) * (*o_row_count + 1)); *o_values = (double*) malloc(sizeof(double) * (*o_element_count)); l_row_idx_id = (unsigned int*) malloc(sizeof(unsigned int) * (*o_row_count)); /* check if mallocs were successful */ if ( ( *o_row_idx == NULL ) || ( *o_column_idx == NULL ) || ( *o_values == NULL ) || ( l_row_idx_id == NULL ) ) { fprintf( stderr, "could not allocate sp data!\n" ); return; } /* set everything to zero for init */ memset(*o_row_idx, 0, sizeof(unsigned int)*(*o_row_count + 1)); memset(*o_column_idx, 0, sizeof(unsigned int)*(*o_element_count)); memset(*o_values, 0, sizeof(double)*(*o_element_count)); memset(l_row_idx_id, 0, sizeof(unsigned int)*(*o_row_count)); /* init column idx */ for ( l_i = 0; l_i < (*o_row_count + 1); l_i++) (*o_row_idx)[l_i] = (*o_element_count); /* init */ (*o_row_idx)[0] = 0; l_i = 0; l_header_read = 1; } else { fprintf( stderr, "could not csr description!\n" ); return; } /* now we read the actual content */ } else { unsigned int l_row, l_column; double l_value; /* read a line of content */ if ( sscanf(l_line, "%u %u %lf", &l_row, &l_column, &l_value) != 3 ) { fprintf( stderr, "could not read element!\n" ); return; } /* adjust numbers to zero termination */ l_row--; l_column--; /* add these values to row and value structure */ (*o_column_idx)[l_i] = l_column; (*o_values)[l_i] = l_value; l_i++; /* handle columns, set id to own for this column, yeah we need to handle empty columns */ l_row_idx_id[l_row] = 1; (*o_row_idx)[l_row+1] = l_i; } } } /* close mtx file */ fclose( l_csr_file_handle ); /* check if we read a file which was consistent */ if ( l_i != (*o_element_count) ) { fprintf( stderr, "we were not able to read all elements!\n" ); return; } /* let's handle empty rows */ for ( l_i = 0; l_i < (*o_row_count); l_i++) { if ( l_row_idx_id[l_i] == 0 ) { (*o_row_idx)[l_i+1] = (*o_row_idx)[l_i]; } } /* free helper data structure */ if ( l_row_idx_id != NULL ) { free( l_row_idx_id ); } }
{'added': [(65, ' if (3 == sscanf(l_line, "%u %u %u", o_row_count, o_column_count, o_element_count) &&'), (66, ' 0 != *o_row_count && 0 != *o_column_count && 0 != *o_element_count)'), (67, ' {')], 'deleted': [(65, ' if ( sscanf(l_line, "%u %u %u", o_row_count, o_column_count, o_element_count) == 3 ) {')]}
3
1
103
762
https://github.com/hfp/libxsmm
CVE-2018-20541
['CWE-787']
debug.c
dump_mm
// SPDX-License-Identifier: GPL-2.0 /* * mm/debug.c * * mm/ specific debug routines. * */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/trace_events.h> #include <linux/memcontrol.h> #include <trace/events/mmflags.h> #include <linux/migrate.h> #include <linux/page_owner.h> #include "internal.h" char *migrate_reason_names[MR_TYPES] = { "compaction", "memory_failure", "memory_hotplug", "syscall_or_cpuset", "mempolicy_mbind", "numa_misplaced", "cma", }; const struct trace_print_flags pageflag_names[] = { __def_pageflag_names, {0, NULL} }; const struct trace_print_flags gfpflag_names[] = { __def_gfpflag_names, {0, NULL} }; const struct trace_print_flags vmaflag_names[] = { __def_vmaflag_names, {0, NULL} }; void __dump_page(struct page *page, const char *reason) { bool page_poisoned = PagePoisoned(page); int mapcount; /* * If struct page is poisoned don't access Page*() functions as that * leads to recursive loop. Page*() check for poisoned pages, and calls * dump_page() when detected. */ if (page_poisoned) { pr_emerg("page:%px is uninitialized and poisoned", page); goto hex_only; } /* * Avoid VM_BUG_ON() in page_mapcount(). * page->_mapcount space in struct page is used by sl[aou]b pages to * encode own info. */ mapcount = PageSlab(page) ? 0 : page_mapcount(page); pr_emerg("page:%px count:%d mapcount:%d mapping:%px index:%#lx", page, page_ref_count(page), mapcount, page->mapping, page_to_pgoff(page)); if (PageCompound(page)) pr_cont(" compound_mapcount: %d", compound_mapcount(page)); pr_cont("\n"); BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1); pr_emerg("flags: %#lx(%pGp)\n", page->flags, &page->flags); hex_only: print_hex_dump(KERN_ALERT, "raw: ", DUMP_PREFIX_NONE, 32, sizeof(unsigned long), page, sizeof(struct page), false); if (reason) pr_alert("page dumped because: %s\n", reason); #ifdef CONFIG_MEMCG if (!page_poisoned && page->mem_cgroup) pr_alert("page->mem_cgroup:%px\n", page->mem_cgroup); #endif } void dump_page(struct page *page, const char *reason) { __dump_page(page, reason); dump_page_owner(page); } EXPORT_SYMBOL(dump_page); #ifdef CONFIG_DEBUG_VM void dump_vma(const struct vm_area_struct *vma) { pr_emerg("vma %px start %px end %px\n" "next %px prev %px mm %px\n" "prot %lx anon_vma %px vm_ops %px\n" "pgoff %lx file %px private_data %px\n" "flags: %#lx(%pGv)\n", vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next, vma->vm_prev, vma->vm_mm, (unsigned long)pgprot_val(vma->vm_page_prot), vma->anon_vma, vma->vm_ops, vma->vm_pgoff, vma->vm_file, vma->vm_private_data, vma->vm_flags, &vma->vm_flags); } EXPORT_SYMBOL(dump_vma); void dump_mm(const struct mm_struct *mm) { pr_emerg("mm %px mmap %px seqnum %d task_size %lu\n" #ifdef CONFIG_MMU "get_unmapped_area %px\n" #endif "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n" "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n" "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n" "pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n" "start_code %lx end_code %lx start_data %lx end_data %lx\n" "start_brk %lx brk %lx start_stack %lx\n" "arg_start %lx arg_end %lx env_start %lx env_end %lx\n" "binfmt %px flags %lx core_state %px\n" #ifdef CONFIG_AIO "ioctx_table %px\n" #endif #ifdef CONFIG_MEMCG "owner %px " #endif "exe_file %px\n" #ifdef CONFIG_MMU_NOTIFIER "mmu_notifier_mm %px\n" #endif #ifdef CONFIG_NUMA_BALANCING "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n" #endif "tlb_flush_pending %d\n" "def_flags: %#lx(%pGv)\n", mm, mm->mmap, mm->vmacache_seqnum, mm->task_size, #ifdef CONFIG_MMU mm->get_unmapped_area, #endif mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end, mm->pgd, atomic_read(&mm->mm_users), atomic_read(&mm->mm_count), mm_pgtables_bytes(mm), mm->map_count, mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, mm->pinned_vm, mm->data_vm, mm->exec_vm, mm->stack_vm, mm->start_code, mm->end_code, mm->start_data, mm->end_data, mm->start_brk, mm->brk, mm->start_stack, mm->arg_start, mm->arg_end, mm->env_start, mm->env_end, mm->binfmt, mm->flags, mm->core_state, #ifdef CONFIG_AIO mm->ioctx_table, #endif #ifdef CONFIG_MEMCG mm->owner, #endif mm->exe_file, #ifdef CONFIG_MMU_NOTIFIER mm->mmu_notifier_mm, #endif #ifdef CONFIG_NUMA_BALANCING mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq, #endif atomic_read(&mm->tlb_flush_pending), mm->def_flags, &mm->def_flags ); } #endif /* CONFIG_DEBUG_VM */
// SPDX-License-Identifier: GPL-2.0 /* * mm/debug.c * * mm/ specific debug routines. * */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/trace_events.h> #include <linux/memcontrol.h> #include <trace/events/mmflags.h> #include <linux/migrate.h> #include <linux/page_owner.h> #include "internal.h" char *migrate_reason_names[MR_TYPES] = { "compaction", "memory_failure", "memory_hotplug", "syscall_or_cpuset", "mempolicy_mbind", "numa_misplaced", "cma", }; const struct trace_print_flags pageflag_names[] = { __def_pageflag_names, {0, NULL} }; const struct trace_print_flags gfpflag_names[] = { __def_gfpflag_names, {0, NULL} }; const struct trace_print_flags vmaflag_names[] = { __def_vmaflag_names, {0, NULL} }; void __dump_page(struct page *page, const char *reason) { bool page_poisoned = PagePoisoned(page); int mapcount; /* * If struct page is poisoned don't access Page*() functions as that * leads to recursive loop. Page*() check for poisoned pages, and calls * dump_page() when detected. */ if (page_poisoned) { pr_emerg("page:%px is uninitialized and poisoned", page); goto hex_only; } /* * Avoid VM_BUG_ON() in page_mapcount(). * page->_mapcount space in struct page is used by sl[aou]b pages to * encode own info. */ mapcount = PageSlab(page) ? 0 : page_mapcount(page); pr_emerg("page:%px count:%d mapcount:%d mapping:%px index:%#lx", page, page_ref_count(page), mapcount, page->mapping, page_to_pgoff(page)); if (PageCompound(page)) pr_cont(" compound_mapcount: %d", compound_mapcount(page)); pr_cont("\n"); BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1); pr_emerg("flags: %#lx(%pGp)\n", page->flags, &page->flags); hex_only: print_hex_dump(KERN_ALERT, "raw: ", DUMP_PREFIX_NONE, 32, sizeof(unsigned long), page, sizeof(struct page), false); if (reason) pr_alert("page dumped because: %s\n", reason); #ifdef CONFIG_MEMCG if (!page_poisoned && page->mem_cgroup) pr_alert("page->mem_cgroup:%px\n", page->mem_cgroup); #endif } void dump_page(struct page *page, const char *reason) { __dump_page(page, reason); dump_page_owner(page); } EXPORT_SYMBOL(dump_page); #ifdef CONFIG_DEBUG_VM void dump_vma(const struct vm_area_struct *vma) { pr_emerg("vma %px start %px end %px\n" "next %px prev %px mm %px\n" "prot %lx anon_vma %px vm_ops %px\n" "pgoff %lx file %px private_data %px\n" "flags: %#lx(%pGv)\n", vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next, vma->vm_prev, vma->vm_mm, (unsigned long)pgprot_val(vma->vm_page_prot), vma->anon_vma, vma->vm_ops, vma->vm_pgoff, vma->vm_file, vma->vm_private_data, vma->vm_flags, &vma->vm_flags); } EXPORT_SYMBOL(dump_vma); void dump_mm(const struct mm_struct *mm) { pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n" #ifdef CONFIG_MMU "get_unmapped_area %px\n" #endif "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n" "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n" "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n" "pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n" "start_code %lx end_code %lx start_data %lx end_data %lx\n" "start_brk %lx brk %lx start_stack %lx\n" "arg_start %lx arg_end %lx env_start %lx env_end %lx\n" "binfmt %px flags %lx core_state %px\n" #ifdef CONFIG_AIO "ioctx_table %px\n" #endif #ifdef CONFIG_MEMCG "owner %px " #endif "exe_file %px\n" #ifdef CONFIG_MMU_NOTIFIER "mmu_notifier_mm %px\n" #endif #ifdef CONFIG_NUMA_BALANCING "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n" #endif "tlb_flush_pending %d\n" "def_flags: %#lx(%pGv)\n", mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size, #ifdef CONFIG_MMU mm->get_unmapped_area, #endif mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end, mm->pgd, atomic_read(&mm->mm_users), atomic_read(&mm->mm_count), mm_pgtables_bytes(mm), mm->map_count, mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, mm->pinned_vm, mm->data_vm, mm->exec_vm, mm->stack_vm, mm->start_code, mm->end_code, mm->start_data, mm->end_data, mm->start_brk, mm->brk, mm->start_stack, mm->arg_start, mm->arg_end, mm->env_start, mm->env_end, mm->binfmt, mm->flags, mm->core_state, #ifdef CONFIG_AIO mm->ioctx_table, #endif #ifdef CONFIG_MEMCG mm->owner, #endif mm->exe_file, #ifdef CONFIG_MMU_NOTIFIER mm->mmu_notifier_mm, #endif #ifdef CONFIG_NUMA_BALANCING mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq, #endif atomic_read(&mm->tlb_flush_pending), mm->def_flags, &mm->def_flags ); } #endif /* CONFIG_DEBUG_VM */
void dump_mm(const struct mm_struct *mm) { pr_emerg("mm %px mmap %px seqnum %d task_size %lu\n" #ifdef CONFIG_MMU "get_unmapped_area %px\n" #endif "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n" "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n" "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n" "pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n" "start_code %lx end_code %lx start_data %lx end_data %lx\n" "start_brk %lx brk %lx start_stack %lx\n" "arg_start %lx arg_end %lx env_start %lx env_end %lx\n" "binfmt %px flags %lx core_state %px\n" #ifdef CONFIG_AIO "ioctx_table %px\n" #endif #ifdef CONFIG_MEMCG "owner %px " #endif "exe_file %px\n" #ifdef CONFIG_MMU_NOTIFIER "mmu_notifier_mm %px\n" #endif #ifdef CONFIG_NUMA_BALANCING "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n" #endif "tlb_flush_pending %d\n" "def_flags: %#lx(%pGv)\n", mm, mm->mmap, mm->vmacache_seqnum, mm->task_size, #ifdef CONFIG_MMU mm->get_unmapped_area, #endif mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end, mm->pgd, atomic_read(&mm->mm_users), atomic_read(&mm->mm_count), mm_pgtables_bytes(mm), mm->map_count, mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, mm->pinned_vm, mm->data_vm, mm->exec_vm, mm->stack_vm, mm->start_code, mm->end_code, mm->start_data, mm->end_data, mm->start_brk, mm->brk, mm->start_stack, mm->arg_start, mm->arg_end, mm->env_start, mm->env_end, mm->binfmt, mm->flags, mm->core_state, #ifdef CONFIG_AIO mm->ioctx_table, #endif #ifdef CONFIG_MEMCG mm->owner, #endif mm->exe_file, #ifdef CONFIG_MMU_NOTIFIER mm->mmu_notifier_mm, #endif #ifdef CONFIG_NUMA_BALANCING mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq, #endif atomic_read(&mm->tlb_flush_pending), mm->def_flags, &mm->def_flags ); }
void dump_mm(const struct mm_struct *mm) { pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n" #ifdef CONFIG_MMU "get_unmapped_area %px\n" #endif "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n" "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n" "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n" "pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n" "start_code %lx end_code %lx start_data %lx end_data %lx\n" "start_brk %lx brk %lx start_stack %lx\n" "arg_start %lx arg_end %lx env_start %lx env_end %lx\n" "binfmt %px flags %lx core_state %px\n" #ifdef CONFIG_AIO "ioctx_table %px\n" #endif #ifdef CONFIG_MEMCG "owner %px " #endif "exe_file %px\n" #ifdef CONFIG_MMU_NOTIFIER "mmu_notifier_mm %px\n" #endif #ifdef CONFIG_NUMA_BALANCING "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n" #endif "tlb_flush_pending %d\n" "def_flags: %#lx(%pGv)\n", mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size, #ifdef CONFIG_MMU mm->get_unmapped_area, #endif mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end, mm->pgd, atomic_read(&mm->mm_users), atomic_read(&mm->mm_count), mm_pgtables_bytes(mm), mm->map_count, mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, mm->pinned_vm, mm->data_vm, mm->exec_vm, mm->stack_vm, mm->start_code, mm->end_code, mm->start_data, mm->end_data, mm->start_brk, mm->brk, mm->start_stack, mm->arg_start, mm->arg_end, mm->env_start, mm->env_end, mm->binfmt, mm->flags, mm->core_state, #ifdef CONFIG_AIO mm->ioctx_table, #endif #ifdef CONFIG_MEMCG mm->owner, #endif mm->exe_file, #ifdef CONFIG_MMU_NOTIFIER mm->mmu_notifier_mm, #endif #ifdef CONFIG_NUMA_BALANCING mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq, #endif atomic_read(&mm->tlb_flush_pending), mm->def_flags, &mm->def_flags ); }
{'added': [(117, '\tpr_emerg("mm %px mmap %px seqnum %llu task_size %lu\\n"'), (145, '\t\tmm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,')], 'deleted': [(117, '\tpr_emerg("mm %px mmap %px seqnum %d task_size %lu\\n"'), (145, '\t\tmm, mm->mmap, mm->vmacache_seqnum, mm->task_size,')]}
2
2
117
631
https://github.com/torvalds/linux
CVE-2018-17182
['CWE-416']
smart_pkt.c
git_pkt_parse_line
/* * Copyright (C) the libgit2 contributors. All rights reserved. * * This file is part of libgit2, distributed under the GNU GPL v2 with * a Linking Exception. For full terms see the included COPYING file. */ #include "common.h" #include "git2/types.h" #include "git2/errors.h" #include "git2/refs.h" #include "git2/revwalk.h" #include "smart.h" #include "util.h" #include "netops.h" #include "posix.h" #include "buffer.h" #include <ctype.h> #define PKT_LEN_SIZE 4 static const char pkt_done_str[] = "0009done\n"; static const char pkt_flush_str[] = "0000"; static const char pkt_have_prefix[] = "0032have "; static const char pkt_want_prefix[] = "0032want "; static int flush_pkt(git_pkt **out) { git_pkt *pkt; pkt = git__malloc(sizeof(git_pkt)); GITERR_CHECK_ALLOC(pkt); pkt->type = GIT_PKT_FLUSH; *out = pkt; return 0; } /* the rest of the line will be useful for multi_ack and multi_ack_detailed */ static int ack_pkt(git_pkt **out, const char *line, size_t len) { git_pkt_ack *pkt; GIT_UNUSED(line); GIT_UNUSED(len); pkt = git__calloc(1, sizeof(git_pkt_ack)); GITERR_CHECK_ALLOC(pkt); pkt->type = GIT_PKT_ACK; line += 3; len -= 3; if (len >= GIT_OID_HEXSZ) { git_oid_fromstr(&pkt->oid, line + 1); line += GIT_OID_HEXSZ + 1; len -= GIT_OID_HEXSZ + 1; } if (len >= 7) { if (!git__prefixcmp(line + 1, "continue")) pkt->status = GIT_ACK_CONTINUE; if (!git__prefixcmp(line + 1, "common")) pkt->status = GIT_ACK_COMMON; if (!git__prefixcmp(line + 1, "ready")) pkt->status = GIT_ACK_READY; } *out = (git_pkt *) pkt; return 0; } static int nak_pkt(git_pkt **out) { git_pkt *pkt; pkt = git__malloc(sizeof(git_pkt)); GITERR_CHECK_ALLOC(pkt); pkt->type = GIT_PKT_NAK; *out = pkt; return 0; } static int pack_pkt(git_pkt **out) { git_pkt *pkt; pkt = git__malloc(sizeof(git_pkt)); GITERR_CHECK_ALLOC(pkt); pkt->type = GIT_PKT_PACK; *out = pkt; return 0; } static int comment_pkt(git_pkt **out, const char *line, size_t len) { git_pkt_comment *pkt; size_t alloclen; GITERR_CHECK_ALLOC_ADD(&alloclen, sizeof(git_pkt_comment), len); GITERR_CHECK_ALLOC_ADD(&alloclen, alloclen, 1); pkt = git__malloc(alloclen); GITERR_CHECK_ALLOC(pkt); pkt->type = GIT_PKT_COMMENT; memcpy(pkt->comment, line, len); pkt->comment[len] = '\0'; *out = (git_pkt *) pkt; return 0; } static int err_pkt(git_pkt **out, const char *line, size_t len) { git_pkt_err *pkt; size_t alloclen; /* Remove "ERR " from the line */ line += 4; len -= 4; GITERR_CHECK_ALLOC_ADD(&alloclen, sizeof(git_pkt_progress), len); GITERR_CHECK_ALLOC_ADD(&alloclen, alloclen, 1); pkt = git__malloc(alloclen); GITERR_CHECK_ALLOC(pkt); pkt->type = GIT_PKT_ERR; pkt->len = (int)len; memcpy(pkt->error, line, len); pkt->error[len] = '\0'; *out = (git_pkt *) pkt; return 0; } static int data_pkt(git_pkt **out, const char *line, size_t len) { git_pkt_data *pkt; size_t alloclen; line++; len--; GITERR_CHECK_ALLOC_ADD(&alloclen, sizeof(git_pkt_progress), len); pkt = git__malloc(alloclen); GITERR_CHECK_ALLOC(pkt); pkt->type = GIT_PKT_DATA; pkt->len = (int) len; memcpy(pkt->data, line, len); *out = (git_pkt *) pkt; return 0; } static int sideband_progress_pkt(git_pkt **out, const char *line, size_t len) { git_pkt_progress *pkt; size_t alloclen; line++; len--; GITERR_CHECK_ALLOC_ADD(&alloclen, sizeof(git_pkt_progress), len); pkt = git__malloc(alloclen); GITERR_CHECK_ALLOC(pkt); pkt->type = GIT_PKT_PROGRESS; pkt->len = (int) len; memcpy(pkt->data, line, len); *out = (git_pkt *) pkt; return 0; } static int sideband_error_pkt(git_pkt **out, const char *line, size_t len) { git_pkt_err *pkt; size_t alloc_len; line++; len--; GITERR_CHECK_ALLOC_ADD(&alloc_len, sizeof(git_pkt_err), len); GITERR_CHECK_ALLOC_ADD(&alloc_len, alloc_len, 1); pkt = git__malloc(alloc_len); GITERR_CHECK_ALLOC(pkt); pkt->type = GIT_PKT_ERR; pkt->len = (int)len; memcpy(pkt->error, line, len); pkt->error[len] = '\0'; *out = (git_pkt *)pkt; return 0; } /* * Parse an other-ref line. */ static int ref_pkt(git_pkt **out, const char *line, size_t len) { int error; git_pkt_ref *pkt; size_t alloclen; pkt = git__malloc(sizeof(git_pkt_ref)); GITERR_CHECK_ALLOC(pkt); memset(pkt, 0x0, sizeof(git_pkt_ref)); pkt->type = GIT_PKT_REF; if ((error = git_oid_fromstr(&pkt->head.oid, line)) < 0) goto error_out; /* Check for a bit of consistency */ if (line[GIT_OID_HEXSZ] != ' ') { giterr_set(GITERR_NET, "Error parsing pkt-line"); error = -1; goto error_out; } /* Jump from the name */ line += GIT_OID_HEXSZ + 1; len -= (GIT_OID_HEXSZ + 1); if (line[len - 1] == '\n') --len; GITERR_CHECK_ALLOC_ADD(&alloclen, len, 1); pkt->head.name = git__malloc(alloclen); GITERR_CHECK_ALLOC(pkt->head.name); memcpy(pkt->head.name, line, len); pkt->head.name[len] = '\0'; if (strlen(pkt->head.name) < len) { pkt->capabilities = strchr(pkt->head.name, '\0') + 1; } *out = (git_pkt *)pkt; return 0; error_out: git__free(pkt); return error; } static int ok_pkt(git_pkt **out, const char *line, size_t len) { git_pkt_ok *pkt; const char *ptr; size_t alloc_len; pkt = git__malloc(sizeof(*pkt)); GITERR_CHECK_ALLOC(pkt); pkt->type = GIT_PKT_OK; line += 3; /* skip "ok " */ if (!(ptr = strchr(line, '\n'))) { giterr_set(GITERR_NET, "Invalid packet line"); git__free(pkt); return -1; } len = ptr - line; GITERR_CHECK_ALLOC_ADD(&alloc_len, len, 1); pkt->ref = git__malloc(alloc_len); GITERR_CHECK_ALLOC(pkt->ref); memcpy(pkt->ref, line, len); pkt->ref[len] = '\0'; *out = (git_pkt *)pkt; return 0; } static int ng_pkt(git_pkt **out, const char *line, size_t len) { git_pkt_ng *pkt; const char *ptr; size_t alloclen; pkt = git__malloc(sizeof(*pkt)); GITERR_CHECK_ALLOC(pkt); pkt->ref = NULL; pkt->type = GIT_PKT_NG; line += 3; /* skip "ng " */ if (!(ptr = strchr(line, ' '))) goto out_err; len = ptr - line; GITERR_CHECK_ALLOC_ADD(&alloclen, len, 1); pkt->ref = git__malloc(alloclen); GITERR_CHECK_ALLOC(pkt->ref); memcpy(pkt->ref, line, len); pkt->ref[len] = '\0'; line = ptr + 1; if (!(ptr = strchr(line, '\n'))) goto out_err; len = ptr - line; GITERR_CHECK_ALLOC_ADD(&alloclen, len, 1); pkt->msg = git__malloc(alloclen); GITERR_CHECK_ALLOC(pkt->msg); memcpy(pkt->msg, line, len); pkt->msg[len] = '\0'; *out = (git_pkt *)pkt; return 0; out_err: giterr_set(GITERR_NET, "Invalid packet line"); git__free(pkt->ref); git__free(pkt); return -1; } static int unpack_pkt(git_pkt **out, const char *line, size_t len) { git_pkt_unpack *pkt; GIT_UNUSED(len); pkt = git__malloc(sizeof(*pkt)); GITERR_CHECK_ALLOC(pkt); pkt->type = GIT_PKT_UNPACK; if (!git__prefixcmp(line, "unpack ok")) pkt->unpack_ok = 1; else pkt->unpack_ok = 0; *out = (git_pkt *)pkt; return 0; } static int32_t parse_len(const char *line) { char num[PKT_LEN_SIZE + 1]; int i, k, error; int32_t len; const char *num_end; memcpy(num, line, PKT_LEN_SIZE); num[PKT_LEN_SIZE] = '\0'; for (i = 0; i < PKT_LEN_SIZE; ++i) { if (!isxdigit(num[i])) { /* Make sure there are no special characters before passing to error message */ for (k = 0; k < PKT_LEN_SIZE; ++k) { if(!isprint(num[k])) { num[k] = '.'; } } giterr_set(GITERR_NET, "invalid hex digit in length: '%s'", num); return -1; } } if ((error = git__strtol32(&len, num, &num_end, 16)) < 0) return error; return len; } /* * As per the documentation, the syntax is: * * pkt-line = data-pkt / flush-pkt * data-pkt = pkt-len pkt-payload * pkt-len = 4*(HEXDIG) * pkt-payload = (pkt-len -4)*(OCTET) * flush-pkt = "0000" * * Which means that the first four bytes are the length of the line, * in ASCII hexadecimal (including itself) */ int git_pkt_parse_line( git_pkt **head, const char *line, const char **out, size_t bufflen) { int ret; int32_t len; /* Not even enough for the length */ if (bufflen > 0 && bufflen < PKT_LEN_SIZE) return GIT_EBUFS; len = parse_len(line); if (len < 0) { /* * If we fail to parse the length, it might be because the * server is trying to send us the packfile already. */ if (bufflen >= 4 && !git__prefixcmp(line, "PACK")) { giterr_clear(); *out = line; return pack_pkt(head); } return (int)len; } /* * If we were given a buffer length, then make sure there is * enough in the buffer to satisfy this line */ if (bufflen > 0 && bufflen < (size_t)len) return GIT_EBUFS; /* * The length has to be exactly 0 in case of a flush * packet or greater than PKT_LEN_SIZE, as the decoded * length includes its own encoded length of four bytes. */ if (len != 0 && len < PKT_LEN_SIZE) return GIT_ERROR; line += PKT_LEN_SIZE; /* * TODO: How do we deal with empty lines? Try again? with the next * line? */ if (len == PKT_LEN_SIZE) { *head = NULL; *out = line; return 0; } if (len == 0) { /* Flush pkt */ *out = line; return flush_pkt(head); } len -= PKT_LEN_SIZE; /* the encoded length includes its own size */ if (*line == GIT_SIDE_BAND_DATA) ret = data_pkt(head, line, len); else if (*line == GIT_SIDE_BAND_PROGRESS) ret = sideband_progress_pkt(head, line, len); else if (*line == GIT_SIDE_BAND_ERROR) ret = sideband_error_pkt(head, line, len); else if (!git__prefixcmp(line, "ACK")) ret = ack_pkt(head, line, len); else if (!git__prefixcmp(line, "NAK")) ret = nak_pkt(head); else if (!git__prefixcmp(line, "ERR ")) ret = err_pkt(head, line, len); else if (*line == '#') ret = comment_pkt(head, line, len); else if (!git__prefixcmp(line, "ok")) ret = ok_pkt(head, line, len); else if (!git__prefixcmp(line, "ng")) ret = ng_pkt(head, line, len); else if (!git__prefixcmp(line, "unpack")) ret = unpack_pkt(head, line, len); else ret = ref_pkt(head, line, len); *out = line + len; return ret; } void git_pkt_free(git_pkt *pkt) { if (pkt->type == GIT_PKT_REF) { git_pkt_ref *p = (git_pkt_ref *) pkt; git__free(p->head.name); git__free(p->head.symref_target); } if (pkt->type == GIT_PKT_OK) { git_pkt_ok *p = (git_pkt_ok *) pkt; git__free(p->ref); } if (pkt->type == GIT_PKT_NG) { git_pkt_ng *p = (git_pkt_ng *) pkt; git__free(p->ref); git__free(p->msg); } git__free(pkt); } int git_pkt_buffer_flush(git_buf *buf) { return git_buf_put(buf, pkt_flush_str, strlen(pkt_flush_str)); } static int buffer_want_with_caps(const git_remote_head *head, transport_smart_caps *caps, git_buf *buf) { git_buf str = GIT_BUF_INIT; char oid[GIT_OID_HEXSZ +1] = {0}; size_t len; /* Prefer multi_ack_detailed */ if (caps->multi_ack_detailed) git_buf_puts(&str, GIT_CAP_MULTI_ACK_DETAILED " "); else if (caps->multi_ack) git_buf_puts(&str, GIT_CAP_MULTI_ACK " "); /* Prefer side-band-64k if the server supports both */ if (caps->side_band_64k) git_buf_printf(&str, "%s ", GIT_CAP_SIDE_BAND_64K); else if (caps->side_band) git_buf_printf(&str, "%s ", GIT_CAP_SIDE_BAND); if (caps->include_tag) git_buf_puts(&str, GIT_CAP_INCLUDE_TAG " "); if (caps->thin_pack) git_buf_puts(&str, GIT_CAP_THIN_PACK " "); if (caps->ofs_delta) git_buf_puts(&str, GIT_CAP_OFS_DELTA " "); if (git_buf_oom(&str)) return -1; len = strlen("XXXXwant ") + GIT_OID_HEXSZ + 1 /* NUL */ + git_buf_len(&str) + 1 /* LF */; if (len > 0xffff) { giterr_set(GITERR_NET, "Tried to produce packet with invalid length %" PRIuZ, len); return -1; } git_buf_grow_by(buf, len); git_oid_fmt(oid, &head->oid); git_buf_printf(buf, "%04xwant %s %s\n", (unsigned int)len, oid, git_buf_cstr(&str)); git_buf_free(&str); GITERR_CHECK_ALLOC_BUF(buf); return 0; } /* * All "want" packets have the same length and format, so what we do * is overwrite the OID each time. */ int git_pkt_buffer_wants( const git_remote_head * const *refs, size_t count, transport_smart_caps *caps, git_buf *buf) { size_t i = 0; const git_remote_head *head; if (caps->common) { for (; i < count; ++i) { head = refs[i]; if (!head->local) break; } if (buffer_want_with_caps(refs[i], caps, buf) < 0) return -1; i++; } for (; i < count; ++i) { char oid[GIT_OID_HEXSZ]; head = refs[i]; if (head->local) continue; git_oid_fmt(oid, &head->oid); git_buf_put(buf, pkt_want_prefix, strlen(pkt_want_prefix)); git_buf_put(buf, oid, GIT_OID_HEXSZ); git_buf_putc(buf, '\n'); if (git_buf_oom(buf)) return -1; } return git_pkt_buffer_flush(buf); } int git_pkt_buffer_have(git_oid *oid, git_buf *buf) { char oidhex[GIT_OID_HEXSZ + 1]; memset(oidhex, 0x0, sizeof(oidhex)); git_oid_fmt(oidhex, oid); return git_buf_printf(buf, "%s%s\n", pkt_have_prefix, oidhex); } int git_pkt_buffer_done(git_buf *buf) { return git_buf_puts(buf, pkt_done_str); }
/* * Copyright (C) the libgit2 contributors. All rights reserved. * * This file is part of libgit2, distributed under the GNU GPL v2 with * a Linking Exception. For full terms see the included COPYING file. */ #include "common.h" #include "git2/types.h" #include "git2/errors.h" #include "git2/refs.h" #include "git2/revwalk.h" #include "smart.h" #include "util.h" #include "netops.h" #include "posix.h" #include "buffer.h" #include <ctype.h> #define PKT_LEN_SIZE 4 static const char pkt_done_str[] = "0009done\n"; static const char pkt_flush_str[] = "0000"; static const char pkt_have_prefix[] = "0032have "; static const char pkt_want_prefix[] = "0032want "; static int flush_pkt(git_pkt **out) { git_pkt *pkt; pkt = git__malloc(sizeof(git_pkt)); GITERR_CHECK_ALLOC(pkt); pkt->type = GIT_PKT_FLUSH; *out = pkt; return 0; } /* the rest of the line will be useful for multi_ack and multi_ack_detailed */ static int ack_pkt(git_pkt **out, const char *line, size_t len) { git_pkt_ack *pkt; GIT_UNUSED(line); GIT_UNUSED(len); pkt = git__calloc(1, sizeof(git_pkt_ack)); GITERR_CHECK_ALLOC(pkt); pkt->type = GIT_PKT_ACK; line += 3; len -= 3; if (len >= GIT_OID_HEXSZ) { git_oid_fromstr(&pkt->oid, line + 1); line += GIT_OID_HEXSZ + 1; len -= GIT_OID_HEXSZ + 1; } if (len >= 7) { if (!git__prefixcmp(line + 1, "continue")) pkt->status = GIT_ACK_CONTINUE; if (!git__prefixcmp(line + 1, "common")) pkt->status = GIT_ACK_COMMON; if (!git__prefixcmp(line + 1, "ready")) pkt->status = GIT_ACK_READY; } *out = (git_pkt *) pkt; return 0; } static int nak_pkt(git_pkt **out) { git_pkt *pkt; pkt = git__malloc(sizeof(git_pkt)); GITERR_CHECK_ALLOC(pkt); pkt->type = GIT_PKT_NAK; *out = pkt; return 0; } static int pack_pkt(git_pkt **out) { git_pkt *pkt; pkt = git__malloc(sizeof(git_pkt)); GITERR_CHECK_ALLOC(pkt); pkt->type = GIT_PKT_PACK; *out = pkt; return 0; } static int comment_pkt(git_pkt **out, const char *line, size_t len) { git_pkt_comment *pkt; size_t alloclen; GITERR_CHECK_ALLOC_ADD(&alloclen, sizeof(git_pkt_comment), len); GITERR_CHECK_ALLOC_ADD(&alloclen, alloclen, 1); pkt = git__malloc(alloclen); GITERR_CHECK_ALLOC(pkt); pkt->type = GIT_PKT_COMMENT; memcpy(pkt->comment, line, len); pkt->comment[len] = '\0'; *out = (git_pkt *) pkt; return 0; } static int err_pkt(git_pkt **out, const char *line, size_t len) { git_pkt_err *pkt; size_t alloclen; /* Remove "ERR " from the line */ line += 4; len -= 4; GITERR_CHECK_ALLOC_ADD(&alloclen, sizeof(git_pkt_progress), len); GITERR_CHECK_ALLOC_ADD(&alloclen, alloclen, 1); pkt = git__malloc(alloclen); GITERR_CHECK_ALLOC(pkt); pkt->type = GIT_PKT_ERR; pkt->len = (int)len; memcpy(pkt->error, line, len); pkt->error[len] = '\0'; *out = (git_pkt *) pkt; return 0; } static int data_pkt(git_pkt **out, const char *line, size_t len) { git_pkt_data *pkt; size_t alloclen; line++; len--; GITERR_CHECK_ALLOC_ADD(&alloclen, sizeof(git_pkt_progress), len); pkt = git__malloc(alloclen); GITERR_CHECK_ALLOC(pkt); pkt->type = GIT_PKT_DATA; pkt->len = (int) len; memcpy(pkt->data, line, len); *out = (git_pkt *) pkt; return 0; } static int sideband_progress_pkt(git_pkt **out, const char *line, size_t len) { git_pkt_progress *pkt; size_t alloclen; line++; len--; GITERR_CHECK_ALLOC_ADD(&alloclen, sizeof(git_pkt_progress), len); pkt = git__malloc(alloclen); GITERR_CHECK_ALLOC(pkt); pkt->type = GIT_PKT_PROGRESS; pkt->len = (int) len; memcpy(pkt->data, line, len); *out = (git_pkt *) pkt; return 0; } static int sideband_error_pkt(git_pkt **out, const char *line, size_t len) { git_pkt_err *pkt; size_t alloc_len; line++; len--; GITERR_CHECK_ALLOC_ADD(&alloc_len, sizeof(git_pkt_err), len); GITERR_CHECK_ALLOC_ADD(&alloc_len, alloc_len, 1); pkt = git__malloc(alloc_len); GITERR_CHECK_ALLOC(pkt); pkt->type = GIT_PKT_ERR; pkt->len = (int)len; memcpy(pkt->error, line, len); pkt->error[len] = '\0'; *out = (git_pkt *)pkt; return 0; } /* * Parse an other-ref line. */ static int ref_pkt(git_pkt **out, const char *line, size_t len) { int error; git_pkt_ref *pkt; size_t alloclen; pkt = git__malloc(sizeof(git_pkt_ref)); GITERR_CHECK_ALLOC(pkt); memset(pkt, 0x0, sizeof(git_pkt_ref)); pkt->type = GIT_PKT_REF; if ((error = git_oid_fromstr(&pkt->head.oid, line)) < 0) goto error_out; /* Check for a bit of consistency */ if (line[GIT_OID_HEXSZ] != ' ') { giterr_set(GITERR_NET, "Error parsing pkt-line"); error = -1; goto error_out; } /* Jump from the name */ line += GIT_OID_HEXSZ + 1; len -= (GIT_OID_HEXSZ + 1); if (line[len - 1] == '\n') --len; GITERR_CHECK_ALLOC_ADD(&alloclen, len, 1); pkt->head.name = git__malloc(alloclen); GITERR_CHECK_ALLOC(pkt->head.name); memcpy(pkt->head.name, line, len); pkt->head.name[len] = '\0'; if (strlen(pkt->head.name) < len) { pkt->capabilities = strchr(pkt->head.name, '\0') + 1; } *out = (git_pkt *)pkt; return 0; error_out: git__free(pkt); return error; } static int ok_pkt(git_pkt **out, const char *line, size_t len) { git_pkt_ok *pkt; const char *ptr; size_t alloc_len; pkt = git__malloc(sizeof(*pkt)); GITERR_CHECK_ALLOC(pkt); pkt->type = GIT_PKT_OK; line += 3; /* skip "ok " */ if (!(ptr = strchr(line, '\n'))) { giterr_set(GITERR_NET, "Invalid packet line"); git__free(pkt); return -1; } len = ptr - line; GITERR_CHECK_ALLOC_ADD(&alloc_len, len, 1); pkt->ref = git__malloc(alloc_len); GITERR_CHECK_ALLOC(pkt->ref); memcpy(pkt->ref, line, len); pkt->ref[len] = '\0'; *out = (git_pkt *)pkt; return 0; } static int ng_pkt(git_pkt **out, const char *line, size_t len) { git_pkt_ng *pkt; const char *ptr; size_t alloclen; pkt = git__malloc(sizeof(*pkt)); GITERR_CHECK_ALLOC(pkt); pkt->ref = NULL; pkt->type = GIT_PKT_NG; line += 3; /* skip "ng " */ if (!(ptr = strchr(line, ' '))) goto out_err; len = ptr - line; GITERR_CHECK_ALLOC_ADD(&alloclen, len, 1); pkt->ref = git__malloc(alloclen); GITERR_CHECK_ALLOC(pkt->ref); memcpy(pkt->ref, line, len); pkt->ref[len] = '\0'; line = ptr + 1; if (!(ptr = strchr(line, '\n'))) goto out_err; len = ptr - line; GITERR_CHECK_ALLOC_ADD(&alloclen, len, 1); pkt->msg = git__malloc(alloclen); GITERR_CHECK_ALLOC(pkt->msg); memcpy(pkt->msg, line, len); pkt->msg[len] = '\0'; *out = (git_pkt *)pkt; return 0; out_err: giterr_set(GITERR_NET, "Invalid packet line"); git__free(pkt->ref); git__free(pkt); return -1; } static int unpack_pkt(git_pkt **out, const char *line, size_t len) { git_pkt_unpack *pkt; GIT_UNUSED(len); pkt = git__malloc(sizeof(*pkt)); GITERR_CHECK_ALLOC(pkt); pkt->type = GIT_PKT_UNPACK; if (!git__prefixcmp(line, "unpack ok")) pkt->unpack_ok = 1; else pkt->unpack_ok = 0; *out = (git_pkt *)pkt; return 0; } static int32_t parse_len(const char *line) { char num[PKT_LEN_SIZE + 1]; int i, k, error; int32_t len; const char *num_end; memcpy(num, line, PKT_LEN_SIZE); num[PKT_LEN_SIZE] = '\0'; for (i = 0; i < PKT_LEN_SIZE; ++i) { if (!isxdigit(num[i])) { /* Make sure there are no special characters before passing to error message */ for (k = 0; k < PKT_LEN_SIZE; ++k) { if(!isprint(num[k])) { num[k] = '.'; } } giterr_set(GITERR_NET, "invalid hex digit in length: '%s'", num); return -1; } } if ((error = git__strtol32(&len, num, &num_end, 16)) < 0) return error; return len; } /* * As per the documentation, the syntax is: * * pkt-line = data-pkt / flush-pkt * data-pkt = pkt-len pkt-payload * pkt-len = 4*(HEXDIG) * pkt-payload = (pkt-len -4)*(OCTET) * flush-pkt = "0000" * * Which means that the first four bytes are the length of the line, * in ASCII hexadecimal (including itself) */ int git_pkt_parse_line( git_pkt **head, const char *line, const char **out, size_t bufflen) { int ret; int32_t len; /* Not even enough for the length */ if (bufflen > 0 && bufflen < PKT_LEN_SIZE) return GIT_EBUFS; len = parse_len(line); if (len < 0) { /* * If we fail to parse the length, it might be because the * server is trying to send us the packfile already. */ if (bufflen >= 4 && !git__prefixcmp(line, "PACK")) { giterr_clear(); *out = line; return pack_pkt(head); } return (int)len; } /* * If we were given a buffer length, then make sure there is * enough in the buffer to satisfy this line */ if (bufflen > 0 && bufflen < (size_t)len) return GIT_EBUFS; /* * The length has to be exactly 0 in case of a flush * packet or greater than PKT_LEN_SIZE, as the decoded * length includes its own encoded length of four bytes. */ if (len != 0 && len < PKT_LEN_SIZE) return GIT_ERROR; line += PKT_LEN_SIZE; /* * The Git protocol does not specify empty lines as part * of the protocol. Not knowing what to do with an empty * line, we should return an error upon hitting one. */ if (len == PKT_LEN_SIZE) { giterr_set_str(GITERR_NET, "Invalid empty packet"); return GIT_ERROR; } if (len == 0) { /* Flush pkt */ *out = line; return flush_pkt(head); } len -= PKT_LEN_SIZE; /* the encoded length includes its own size */ if (*line == GIT_SIDE_BAND_DATA) ret = data_pkt(head, line, len); else if (*line == GIT_SIDE_BAND_PROGRESS) ret = sideband_progress_pkt(head, line, len); else if (*line == GIT_SIDE_BAND_ERROR) ret = sideband_error_pkt(head, line, len); else if (!git__prefixcmp(line, "ACK")) ret = ack_pkt(head, line, len); else if (!git__prefixcmp(line, "NAK")) ret = nak_pkt(head); else if (!git__prefixcmp(line, "ERR ")) ret = err_pkt(head, line, len); else if (*line == '#') ret = comment_pkt(head, line, len); else if (!git__prefixcmp(line, "ok")) ret = ok_pkt(head, line, len); else if (!git__prefixcmp(line, "ng")) ret = ng_pkt(head, line, len); else if (!git__prefixcmp(line, "unpack")) ret = unpack_pkt(head, line, len); else ret = ref_pkt(head, line, len); *out = line + len; return ret; } void git_pkt_free(git_pkt *pkt) { if (pkt->type == GIT_PKT_REF) { git_pkt_ref *p = (git_pkt_ref *) pkt; git__free(p->head.name); git__free(p->head.symref_target); } if (pkt->type == GIT_PKT_OK) { git_pkt_ok *p = (git_pkt_ok *) pkt; git__free(p->ref); } if (pkt->type == GIT_PKT_NG) { git_pkt_ng *p = (git_pkt_ng *) pkt; git__free(p->ref); git__free(p->msg); } git__free(pkt); } int git_pkt_buffer_flush(git_buf *buf) { return git_buf_put(buf, pkt_flush_str, strlen(pkt_flush_str)); } static int buffer_want_with_caps(const git_remote_head *head, transport_smart_caps *caps, git_buf *buf) { git_buf str = GIT_BUF_INIT; char oid[GIT_OID_HEXSZ +1] = {0}; size_t len; /* Prefer multi_ack_detailed */ if (caps->multi_ack_detailed) git_buf_puts(&str, GIT_CAP_MULTI_ACK_DETAILED " "); else if (caps->multi_ack) git_buf_puts(&str, GIT_CAP_MULTI_ACK " "); /* Prefer side-band-64k if the server supports both */ if (caps->side_band_64k) git_buf_printf(&str, "%s ", GIT_CAP_SIDE_BAND_64K); else if (caps->side_band) git_buf_printf(&str, "%s ", GIT_CAP_SIDE_BAND); if (caps->include_tag) git_buf_puts(&str, GIT_CAP_INCLUDE_TAG " "); if (caps->thin_pack) git_buf_puts(&str, GIT_CAP_THIN_PACK " "); if (caps->ofs_delta) git_buf_puts(&str, GIT_CAP_OFS_DELTA " "); if (git_buf_oom(&str)) return -1; len = strlen("XXXXwant ") + GIT_OID_HEXSZ + 1 /* NUL */ + git_buf_len(&str) + 1 /* LF */; if (len > 0xffff) { giterr_set(GITERR_NET, "Tried to produce packet with invalid length %" PRIuZ, len); return -1; } git_buf_grow_by(buf, len); git_oid_fmt(oid, &head->oid); git_buf_printf(buf, "%04xwant %s %s\n", (unsigned int)len, oid, git_buf_cstr(&str)); git_buf_free(&str); GITERR_CHECK_ALLOC_BUF(buf); return 0; } /* * All "want" packets have the same length and format, so what we do * is overwrite the OID each time. */ int git_pkt_buffer_wants( const git_remote_head * const *refs, size_t count, transport_smart_caps *caps, git_buf *buf) { size_t i = 0; const git_remote_head *head; if (caps->common) { for (; i < count; ++i) { head = refs[i]; if (!head->local) break; } if (buffer_want_with_caps(refs[i], caps, buf) < 0) return -1; i++; } for (; i < count; ++i) { char oid[GIT_OID_HEXSZ]; head = refs[i]; if (head->local) continue; git_oid_fmt(oid, &head->oid); git_buf_put(buf, pkt_want_prefix, strlen(pkt_want_prefix)); git_buf_put(buf, oid, GIT_OID_HEXSZ); git_buf_putc(buf, '\n'); if (git_buf_oom(buf)) return -1; } return git_pkt_buffer_flush(buf); } int git_pkt_buffer_have(git_oid *oid, git_buf *buf) { char oidhex[GIT_OID_HEXSZ + 1]; memset(oidhex, 0x0, sizeof(oidhex)); git_oid_fmt(oidhex, oid); return git_buf_printf(buf, "%s%s\n", pkt_have_prefix, oidhex); } int git_pkt_buffer_done(git_buf *buf) { return git_buf_puts(buf, pkt_done_str); }
int git_pkt_parse_line( git_pkt **head, const char *line, const char **out, size_t bufflen) { int ret; int32_t len; /* Not even enough for the length */ if (bufflen > 0 && bufflen < PKT_LEN_SIZE) return GIT_EBUFS; len = parse_len(line); if (len < 0) { /* * If we fail to parse the length, it might be because the * server is trying to send us the packfile already. */ if (bufflen >= 4 && !git__prefixcmp(line, "PACK")) { giterr_clear(); *out = line; return pack_pkt(head); } return (int)len; } /* * If we were given a buffer length, then make sure there is * enough in the buffer to satisfy this line */ if (bufflen > 0 && bufflen < (size_t)len) return GIT_EBUFS; /* * The length has to be exactly 0 in case of a flush * packet or greater than PKT_LEN_SIZE, as the decoded * length includes its own encoded length of four bytes. */ if (len != 0 && len < PKT_LEN_SIZE) return GIT_ERROR; line += PKT_LEN_SIZE; /* * TODO: How do we deal with empty lines? Try again? with the next * line? */ if (len == PKT_LEN_SIZE) { *head = NULL; *out = line; return 0; } if (len == 0) { /* Flush pkt */ *out = line; return flush_pkt(head); } len -= PKT_LEN_SIZE; /* the encoded length includes its own size */ if (*line == GIT_SIDE_BAND_DATA) ret = data_pkt(head, line, len); else if (*line == GIT_SIDE_BAND_PROGRESS) ret = sideband_progress_pkt(head, line, len); else if (*line == GIT_SIDE_BAND_ERROR) ret = sideband_error_pkt(head, line, len); else if (!git__prefixcmp(line, "ACK")) ret = ack_pkt(head, line, len); else if (!git__prefixcmp(line, "NAK")) ret = nak_pkt(head); else if (!git__prefixcmp(line, "ERR ")) ret = err_pkt(head, line, len); else if (*line == '#') ret = comment_pkt(head, line, len); else if (!git__prefixcmp(line, "ok")) ret = ok_pkt(head, line, len); else if (!git__prefixcmp(line, "ng")) ret = ng_pkt(head, line, len); else if (!git__prefixcmp(line, "unpack")) ret = unpack_pkt(head, line, len); else ret = ref_pkt(head, line, len); *out = line + len; return ret; }
int git_pkt_parse_line( git_pkt **head, const char *line, const char **out, size_t bufflen) { int ret; int32_t len; /* Not even enough for the length */ if (bufflen > 0 && bufflen < PKT_LEN_SIZE) return GIT_EBUFS; len = parse_len(line); if (len < 0) { /* * If we fail to parse the length, it might be because the * server is trying to send us the packfile already. */ if (bufflen >= 4 && !git__prefixcmp(line, "PACK")) { giterr_clear(); *out = line; return pack_pkt(head); } return (int)len; } /* * If we were given a buffer length, then make sure there is * enough in the buffer to satisfy this line */ if (bufflen > 0 && bufflen < (size_t)len) return GIT_EBUFS; /* * The length has to be exactly 0 in case of a flush * packet or greater than PKT_LEN_SIZE, as the decoded * length includes its own encoded length of four bytes. */ if (len != 0 && len < PKT_LEN_SIZE) return GIT_ERROR; line += PKT_LEN_SIZE; /* * The Git protocol does not specify empty lines as part * of the protocol. Not knowing what to do with an empty * line, we should return an error upon hitting one. */ if (len == PKT_LEN_SIZE) { giterr_set_str(GITERR_NET, "Invalid empty packet"); return GIT_ERROR; } if (len == 0) { /* Flush pkt */ *out = line; return flush_pkt(head); } len -= PKT_LEN_SIZE; /* the encoded length includes its own size */ if (*line == GIT_SIDE_BAND_DATA) ret = data_pkt(head, line, len); else if (*line == GIT_SIDE_BAND_PROGRESS) ret = sideband_progress_pkt(head, line, len); else if (*line == GIT_SIDE_BAND_ERROR) ret = sideband_error_pkt(head, line, len); else if (!git__prefixcmp(line, "ACK")) ret = ack_pkt(head, line, len); else if (!git__prefixcmp(line, "NAK")) ret = nak_pkt(head); else if (!git__prefixcmp(line, "ERR ")) ret = err_pkt(head, line, len); else if (*line == '#') ret = comment_pkt(head, line, len); else if (!git__prefixcmp(line, "ok")) ret = ok_pkt(head, line, len); else if (!git__prefixcmp(line, "ng")) ret = ng_pkt(head, line, len); else if (!git__prefixcmp(line, "unpack")) ret = unpack_pkt(head, line, len); else ret = ref_pkt(head, line, len); *out = line + len; return ret; }
{'added': [(440, '\t * The Git protocol does not specify empty lines as part'), (441, '\t * of the protocol. Not knowing what to do with an empty'), (442, '\t * line, we should return an error upon hitting one.'), (445, '\t\tgiterr_set_str(GITERR_NET, "Invalid empty packet");'), (446, '\t\treturn GIT_ERROR;')], 'deleted': [(440, '\t * TODO: How do we deal with empty lines? Try again? with the next'), (441, '\t * line?'), (444, '\t\t*head = NULL;'), (445, '\t\t*out = line;'), (446, '\t\treturn 0;')]}
5
5
431
2,777
https://github.com/libgit2/libgit2
CVE-2016-10129
['CWE-476']
riff.c
ParseRiffHeaderConfig
//////////////////////////////////////////////////////////////////////////// // **** WAVPACK **** // // Hybrid Lossless Wavefile Compressor // // Copyright (c) 1998 - 2016 David Bryant. // // All Rights Reserved. // // Distributed under the BSD Software License (see license.txt) // //////////////////////////////////////////////////////////////////////////// // riff.c // This module is a helper to the WavPack command-line programs to support WAV files // (both MS standard and rf64 varients). #include <string.h> #include <stdlib.h> #include <fcntl.h> #include <math.h> #include <stdio.h> #include <ctype.h> #include "wavpack.h" #include "utils.h" #include "md5.h" #pragma pack(push,4) typedef struct { char ckID [4]; uint64_t chunkSize64; } CS64Chunk; typedef struct { uint64_t riffSize64, dataSize64, sampleCount64; uint32_t tableLength; } DS64Chunk; typedef struct { char ckID [4]; uint32_t ckSize; char junk [28]; } JunkChunk; #pragma pack(pop) #define CS64ChunkFormat "4D" #define DS64ChunkFormat "DDDL" #define WAVPACK_NO_ERROR 0 #define WAVPACK_SOFT_ERROR 1 #define WAVPACK_HARD_ERROR 2 extern int debug_logging_mode; int ParseRiffHeaderConfig (FILE *infile, char *infilename, char *fourcc, WavpackContext *wpc, WavpackConfig *config) { int is_rf64 = !strncmp (fourcc, "RF64", 4), got_ds64 = 0; int64_t total_samples = 0, infilesize; RiffChunkHeader riff_chunk_header; ChunkHeader chunk_header; WaveHeader WaveHeader; DS64Chunk ds64_chunk; uint32_t bcount; CLEAR (WaveHeader); CLEAR (ds64_chunk); infilesize = DoGetFileSize (infile); if (!is_rf64 && infilesize >= 4294967296LL && !(config->qmode & QMODE_IGNORE_LENGTH)) { error_line ("can't handle .WAV files larger than 4 GB (non-standard)!"); return WAVPACK_SOFT_ERROR; } memcpy (&riff_chunk_header, fourcc, 4); if ((!DoReadFile (infile, ((char *) &riff_chunk_header) + 4, sizeof (RiffChunkHeader) - 4, &bcount) || bcount != sizeof (RiffChunkHeader) - 4 || strncmp (riff_chunk_header.formType, "WAVE", 4))) { error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &riff_chunk_header, sizeof (RiffChunkHeader))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } // loop through all elements of the RIFF wav header // (until the data chuck) and copy them to the output file while (1) { if (!DoReadFile (infile, &chunk_header, sizeof (ChunkHeader), &bcount) || bcount != sizeof (ChunkHeader)) { error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &chunk_header, sizeof (ChunkHeader))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } WavpackLittleEndianToNative (&chunk_header, ChunkHeaderFormat); if (!strncmp (chunk_header.ckID, "ds64", 4)) { if (chunk_header.ckSize < sizeof (DS64Chunk) || !DoReadFile (infile, &ds64_chunk, sizeof (DS64Chunk), &bcount) || bcount != sizeof (DS64Chunk)) { error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &ds64_chunk, sizeof (DS64Chunk))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } got_ds64 = 1; WavpackLittleEndianToNative (&ds64_chunk, DS64ChunkFormat); if (debug_logging_mode) error_line ("DS64: riffSize = %lld, dataSize = %lld, sampleCount = %lld, table_length = %d", (long long) ds64_chunk.riffSize64, (long long) ds64_chunk.dataSize64, (long long) ds64_chunk.sampleCount64, ds64_chunk.tableLength); if (ds64_chunk.tableLength * sizeof (CS64Chunk) != chunk_header.ckSize - sizeof (DS64Chunk)) { error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } while (ds64_chunk.tableLength--) { CS64Chunk cs64_chunk; if (!DoReadFile (infile, &cs64_chunk, sizeof (CS64Chunk), &bcount) || bcount != sizeof (CS64Chunk) || (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &cs64_chunk, sizeof (CS64Chunk)))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } } } else if (!strncmp (chunk_header.ckID, "fmt ", 4)) { // if it's the format chunk, we want to get some info out of there and int supported = TRUE, format; // make sure it's a .wav file we can handle if (chunk_header.ckSize < 16 || chunk_header.ckSize > sizeof (WaveHeader) || !DoReadFile (infile, &WaveHeader, chunk_header.ckSize, &bcount) || bcount != chunk_header.ckSize) { error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &WaveHeader, chunk_header.ckSize)) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } WavpackLittleEndianToNative (&WaveHeader, WaveHeaderFormat); if (debug_logging_mode) { error_line ("format tag size = %d", chunk_header.ckSize); error_line ("FormatTag = %x, NumChannels = %d, BitsPerSample = %d", WaveHeader.FormatTag, WaveHeader.NumChannels, WaveHeader.BitsPerSample); error_line ("BlockAlign = %d, SampleRate = %d, BytesPerSecond = %d", WaveHeader.BlockAlign, WaveHeader.SampleRate, WaveHeader.BytesPerSecond); if (chunk_header.ckSize > 16) error_line ("cbSize = %d, ValidBitsPerSample = %d", WaveHeader.cbSize, WaveHeader.ValidBitsPerSample); if (chunk_header.ckSize > 20) error_line ("ChannelMask = %x, SubFormat = %d", WaveHeader.ChannelMask, WaveHeader.SubFormat); } if (chunk_header.ckSize > 16 && WaveHeader.cbSize == 2) config->qmode |= QMODE_ADOBE_MODE; format = (WaveHeader.FormatTag == 0xfffe && chunk_header.ckSize == 40) ? WaveHeader.SubFormat : WaveHeader.FormatTag; config->bits_per_sample = (chunk_header.ckSize == 40 && WaveHeader.ValidBitsPerSample) ? WaveHeader.ValidBitsPerSample : WaveHeader.BitsPerSample; if (format != 1 && format != 3) supported = FALSE; if (format == 3 && config->bits_per_sample != 32) supported = FALSE; if (!WaveHeader.NumChannels || WaveHeader.NumChannels > 256 || WaveHeader.BlockAlign / WaveHeader.NumChannels < (config->bits_per_sample + 7) / 8 || WaveHeader.BlockAlign / WaveHeader.NumChannels > 4 || WaveHeader.BlockAlign % WaveHeader.NumChannels) supported = FALSE; if (config->bits_per_sample < 1 || config->bits_per_sample > 32) supported = FALSE; if (!supported) { error_line ("%s is an unsupported .WAV format!", infilename); return WAVPACK_SOFT_ERROR; } if (chunk_header.ckSize < 40) { if (!config->channel_mask && !(config->qmode & QMODE_CHANS_UNASSIGNED)) { if (WaveHeader.NumChannels <= 2) config->channel_mask = 0x5 - WaveHeader.NumChannels; else if (WaveHeader.NumChannels <= 18) config->channel_mask = (1 << WaveHeader.NumChannels) - 1; else config->channel_mask = 0x3ffff; } } else if (WaveHeader.ChannelMask && (config->channel_mask || (config->qmode & QMODE_CHANS_UNASSIGNED))) { error_line ("this WAV file already has channel order information!"); return WAVPACK_SOFT_ERROR; } else if (WaveHeader.ChannelMask) config->channel_mask = WaveHeader.ChannelMask; if (format == 3) config->float_norm_exp = 127; else if ((config->qmode & QMODE_ADOBE_MODE) && WaveHeader.BlockAlign / WaveHeader.NumChannels == 4) { if (WaveHeader.BitsPerSample == 24) config->float_norm_exp = 127 + 23; else if (WaveHeader.BitsPerSample == 32) config->float_norm_exp = 127 + 15; } if (debug_logging_mode) { if (config->float_norm_exp == 127) error_line ("data format: normalized 32-bit floating point"); else if (config->float_norm_exp) error_line ("data format: 32-bit floating point (Audition %d:%d float type 1)", config->float_norm_exp - 126, 150 - config->float_norm_exp); else error_line ("data format: %d-bit integers stored in %d byte(s)", config->bits_per_sample, WaveHeader.BlockAlign / WaveHeader.NumChannels); } } else if (!strncmp (chunk_header.ckID, "data", 4)) { // on the data chunk, get size and exit loop int64_t data_chunk_size = (got_ds64 && chunk_header.ckSize == (uint32_t) -1) ? ds64_chunk.dataSize64 : chunk_header.ckSize; if (!WaveHeader.NumChannels || (is_rf64 && !got_ds64)) { // make sure we saw "fmt" and "ds64" chunks (if required) error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } if (infilesize && !(config->qmode & QMODE_IGNORE_LENGTH) && infilesize - data_chunk_size > 16777216) { error_line ("this .WAV file has over 16 MB of extra RIFF data, probably is corrupt!"); return WAVPACK_SOFT_ERROR; } if (config->qmode & QMODE_IGNORE_LENGTH) { if (infilesize && DoGetFilePosition (infile) != -1) total_samples = (infilesize - DoGetFilePosition (infile)) / WaveHeader.BlockAlign; else total_samples = -1; } else { total_samples = data_chunk_size / WaveHeader.BlockAlign; if (got_ds64 && total_samples != ds64_chunk.sampleCount64) { error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } if (!total_samples) { error_line ("this .WAV file has no audio samples, probably is corrupt!"); return WAVPACK_SOFT_ERROR; } if (total_samples > MAX_WAVPACK_SAMPLES) { error_line ("%s has too many samples for WavPack!", infilename); return WAVPACK_SOFT_ERROR; } } config->bytes_per_sample = WaveHeader.BlockAlign / WaveHeader.NumChannels; config->num_channels = WaveHeader.NumChannels; config->sample_rate = WaveHeader.SampleRate; break; } else { // just copy unknown chunks to output file int bytes_to_copy = (chunk_header.ckSize + 1) & ~1L; char *buff = malloc (bytes_to_copy); if (debug_logging_mode) error_line ("extra unknown chunk \"%c%c%c%c\" of %d bytes", chunk_header.ckID [0], chunk_header.ckID [1], chunk_header.ckID [2], chunk_header.ckID [3], chunk_header.ckSize); if (!DoReadFile (infile, buff, bytes_to_copy, &bcount) || bcount != bytes_to_copy || (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, buff, bytes_to_copy))) { error_line ("%s", WavpackGetErrorMessage (wpc)); free (buff); return WAVPACK_SOFT_ERROR; } free (buff); } } if (!WavpackSetConfiguration64 (wpc, config, total_samples, NULL)) { error_line ("%s: %s", infilename, WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } return WAVPACK_NO_ERROR; } int WriteRiffHeader (FILE *outfile, WavpackContext *wpc, int64_t total_samples, int qmode) { int do_rf64 = 0, write_junk = 1, table_length = 0; ChunkHeader ds64hdr, datahdr, fmthdr; RiffChunkHeader riffhdr; DS64Chunk ds64_chunk; CS64Chunk cs64_chunk; JunkChunk junkchunk; WaveHeader wavhdr; uint32_t bcount; int64_t total_data_bytes, total_riff_bytes; int num_channels = WavpackGetNumChannels (wpc); int32_t channel_mask = WavpackGetChannelMask (wpc); int32_t sample_rate = WavpackGetSampleRate (wpc); int bytes_per_sample = WavpackGetBytesPerSample (wpc); int bits_per_sample = WavpackGetBitsPerSample (wpc); int format = WavpackGetFloatNormExp (wpc) ? 3 : 1; int wavhdrsize = 16; if (format == 3 && WavpackGetFloatNormExp (wpc) != 127) { error_line ("can't create valid RIFF wav header for non-normalized floating data!"); return FALSE; } if (total_samples == -1) total_samples = 0x7ffff000 / (bytes_per_sample * num_channels); total_data_bytes = total_samples * bytes_per_sample * num_channels; if (total_data_bytes > 0xff000000) { if (debug_logging_mode) error_line ("total_data_bytes = %lld, so rf64", total_data_bytes); write_junk = 0; do_rf64 = 1; } else if (debug_logging_mode) error_line ("total_data_bytes = %lld, so riff", total_data_bytes); CLEAR (wavhdr); wavhdr.FormatTag = format; wavhdr.NumChannels = num_channels; wavhdr.SampleRate = sample_rate; wavhdr.BytesPerSecond = sample_rate * num_channels * bytes_per_sample; wavhdr.BlockAlign = bytes_per_sample * num_channels; wavhdr.BitsPerSample = bits_per_sample; if (num_channels > 2 || channel_mask != 0x5 - num_channels) { wavhdrsize = sizeof (wavhdr); wavhdr.cbSize = 22; wavhdr.ValidBitsPerSample = bits_per_sample; wavhdr.SubFormat = format; wavhdr.ChannelMask = channel_mask; wavhdr.FormatTag = 0xfffe; wavhdr.BitsPerSample = bytes_per_sample * 8; wavhdr.GUID [4] = 0x10; wavhdr.GUID [6] = 0x80; wavhdr.GUID [9] = 0xaa; wavhdr.GUID [11] = 0x38; wavhdr.GUID [12] = 0x9b; wavhdr.GUID [13] = 0x71; } strncpy (riffhdr.ckID, do_rf64 ? "RF64" : "RIFF", sizeof (riffhdr.ckID)); strncpy (riffhdr.formType, "WAVE", sizeof (riffhdr.formType)); total_riff_bytes = sizeof (riffhdr) + wavhdrsize + sizeof (datahdr) + ((total_data_bytes + 1) & ~(int64_t)1); if (do_rf64) total_riff_bytes += sizeof (ds64hdr) + sizeof (ds64_chunk); total_riff_bytes += table_length * sizeof (CS64Chunk); if (write_junk) total_riff_bytes += sizeof (junkchunk); strncpy (fmthdr.ckID, "fmt ", sizeof (fmthdr.ckID)); strncpy (datahdr.ckID, "data", sizeof (datahdr.ckID)); fmthdr.ckSize = wavhdrsize; if (write_junk) { CLEAR (junkchunk); strncpy (junkchunk.ckID, "junk", sizeof (junkchunk.ckID)); junkchunk.ckSize = sizeof (junkchunk) - 8; WavpackNativeToLittleEndian (&junkchunk, ChunkHeaderFormat); } if (do_rf64) { strncpy (ds64hdr.ckID, "ds64", sizeof (ds64hdr.ckID)); ds64hdr.ckSize = sizeof (ds64_chunk) + (table_length * sizeof (CS64Chunk)); CLEAR (ds64_chunk); ds64_chunk.riffSize64 = total_riff_bytes; ds64_chunk.dataSize64 = total_data_bytes; ds64_chunk.sampleCount64 = total_samples; ds64_chunk.tableLength = table_length; riffhdr.ckSize = (uint32_t) -1; datahdr.ckSize = (uint32_t) -1; WavpackNativeToLittleEndian (&ds64hdr, ChunkHeaderFormat); WavpackNativeToLittleEndian (&ds64_chunk, DS64ChunkFormat); } else { riffhdr.ckSize = (uint32_t) total_riff_bytes; datahdr.ckSize = (uint32_t) total_data_bytes; } // this "table" is just a dummy placeholder for testing (normally not written) if (table_length) { strncpy (cs64_chunk.ckID, "dmmy", sizeof (cs64_chunk.ckID)); cs64_chunk.chunkSize64 = 12345678; WavpackNativeToLittleEndian (&cs64_chunk, CS64ChunkFormat); } // write the RIFF chunks up to just before the data starts WavpackNativeToLittleEndian (&riffhdr, ChunkHeaderFormat); WavpackNativeToLittleEndian (&fmthdr, ChunkHeaderFormat); WavpackNativeToLittleEndian (&wavhdr, WaveHeaderFormat); WavpackNativeToLittleEndian (&datahdr, ChunkHeaderFormat); if (!DoWriteFile (outfile, &riffhdr, sizeof (riffhdr), &bcount) || bcount != sizeof (riffhdr) || (do_rf64 && (!DoWriteFile (outfile, &ds64hdr, sizeof (ds64hdr), &bcount) || bcount != sizeof (ds64hdr))) || (do_rf64 && (!DoWriteFile (outfile, &ds64_chunk, sizeof (ds64_chunk), &bcount) || bcount != sizeof (ds64_chunk)))) { error_line ("can't write .WAV data, disk probably full!"); return FALSE; } // again, this is normally not written except for testing while (table_length--) if (!DoWriteFile (outfile, &cs64_chunk, sizeof (cs64_chunk), &bcount) || bcount != sizeof (cs64_chunk)) { error_line ("can't write .WAV data, disk probably full!"); return FALSE; } if ((write_junk && (!DoWriteFile (outfile, &junkchunk, sizeof (junkchunk), &bcount) || bcount != sizeof (junkchunk))) || !DoWriteFile (outfile, &fmthdr, sizeof (fmthdr), &bcount) || bcount != sizeof (fmthdr) || !DoWriteFile (outfile, &wavhdr, wavhdrsize, &bcount) || bcount != wavhdrsize || !DoWriteFile (outfile, &datahdr, sizeof (datahdr), &bcount) || bcount != sizeof (datahdr)) { error_line ("can't write .WAV data, disk probably full!"); return FALSE; } return TRUE; }
//////////////////////////////////////////////////////////////////////////// // **** WAVPACK **** // // Hybrid Lossless Wavefile Compressor // // Copyright (c) 1998 - 2016 David Bryant. // // All Rights Reserved. // // Distributed under the BSD Software License (see license.txt) // //////////////////////////////////////////////////////////////////////////// // riff.c // This module is a helper to the WavPack command-line programs to support WAV files // (both MS standard and rf64 varients). #include <string.h> #include <stdlib.h> #include <fcntl.h> #include <math.h> #include <stdio.h> #include <ctype.h> #include "wavpack.h" #include "utils.h" #include "md5.h" #pragma pack(push,4) typedef struct { char ckID [4]; uint64_t chunkSize64; } CS64Chunk; typedef struct { uint64_t riffSize64, dataSize64, sampleCount64; uint32_t tableLength; } DS64Chunk; typedef struct { char ckID [4]; uint32_t ckSize; char junk [28]; } JunkChunk; #pragma pack(pop) #define CS64ChunkFormat "4D" #define DS64ChunkFormat "DDDL" #define WAVPACK_NO_ERROR 0 #define WAVPACK_SOFT_ERROR 1 #define WAVPACK_HARD_ERROR 2 extern int debug_logging_mode; int ParseRiffHeaderConfig (FILE *infile, char *infilename, char *fourcc, WavpackContext *wpc, WavpackConfig *config) { int is_rf64 = !strncmp (fourcc, "RF64", 4), got_ds64 = 0; int64_t total_samples = 0, infilesize; RiffChunkHeader riff_chunk_header; ChunkHeader chunk_header; WaveHeader WaveHeader; DS64Chunk ds64_chunk; uint32_t bcount; CLEAR (WaveHeader); CLEAR (ds64_chunk); infilesize = DoGetFileSize (infile); if (!is_rf64 && infilesize >= 4294967296LL && !(config->qmode & QMODE_IGNORE_LENGTH)) { error_line ("can't handle .WAV files larger than 4 GB (non-standard)!"); return WAVPACK_SOFT_ERROR; } memcpy (&riff_chunk_header, fourcc, 4); if ((!DoReadFile (infile, ((char *) &riff_chunk_header) + 4, sizeof (RiffChunkHeader) - 4, &bcount) || bcount != sizeof (RiffChunkHeader) - 4 || strncmp (riff_chunk_header.formType, "WAVE", 4))) { error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &riff_chunk_header, sizeof (RiffChunkHeader))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } // loop through all elements of the RIFF wav header // (until the data chuck) and copy them to the output file while (1) { if (!DoReadFile (infile, &chunk_header, sizeof (ChunkHeader), &bcount) || bcount != sizeof (ChunkHeader)) { error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &chunk_header, sizeof (ChunkHeader))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } WavpackLittleEndianToNative (&chunk_header, ChunkHeaderFormat); if (!strncmp (chunk_header.ckID, "ds64", 4)) { if (chunk_header.ckSize < sizeof (DS64Chunk) || !DoReadFile (infile, &ds64_chunk, sizeof (DS64Chunk), &bcount) || bcount != sizeof (DS64Chunk)) { error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &ds64_chunk, sizeof (DS64Chunk))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } got_ds64 = 1; WavpackLittleEndianToNative (&ds64_chunk, DS64ChunkFormat); if (debug_logging_mode) error_line ("DS64: riffSize = %lld, dataSize = %lld, sampleCount = %lld, table_length = %d", (long long) ds64_chunk.riffSize64, (long long) ds64_chunk.dataSize64, (long long) ds64_chunk.sampleCount64, ds64_chunk.tableLength); if (ds64_chunk.tableLength * sizeof (CS64Chunk) != chunk_header.ckSize - sizeof (DS64Chunk)) { error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } while (ds64_chunk.tableLength--) { CS64Chunk cs64_chunk; if (!DoReadFile (infile, &cs64_chunk, sizeof (CS64Chunk), &bcount) || bcount != sizeof (CS64Chunk) || (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &cs64_chunk, sizeof (CS64Chunk)))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } } } else if (!strncmp (chunk_header.ckID, "fmt ", 4)) { // if it's the format chunk, we want to get some info out of there and int supported = TRUE, format; // make sure it's a .wav file we can handle if (chunk_header.ckSize < 16 || chunk_header.ckSize > sizeof (WaveHeader) || !DoReadFile (infile, &WaveHeader, chunk_header.ckSize, &bcount) || bcount != chunk_header.ckSize) { error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &WaveHeader, chunk_header.ckSize)) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } WavpackLittleEndianToNative (&WaveHeader, WaveHeaderFormat); if (debug_logging_mode) { error_line ("format tag size = %d", chunk_header.ckSize); error_line ("FormatTag = %x, NumChannels = %d, BitsPerSample = %d", WaveHeader.FormatTag, WaveHeader.NumChannels, WaveHeader.BitsPerSample); error_line ("BlockAlign = %d, SampleRate = %d, BytesPerSecond = %d", WaveHeader.BlockAlign, WaveHeader.SampleRate, WaveHeader.BytesPerSecond); if (chunk_header.ckSize > 16) error_line ("cbSize = %d, ValidBitsPerSample = %d", WaveHeader.cbSize, WaveHeader.ValidBitsPerSample); if (chunk_header.ckSize > 20) error_line ("ChannelMask = %x, SubFormat = %d", WaveHeader.ChannelMask, WaveHeader.SubFormat); } if (chunk_header.ckSize > 16 && WaveHeader.cbSize == 2) config->qmode |= QMODE_ADOBE_MODE; format = (WaveHeader.FormatTag == 0xfffe && chunk_header.ckSize == 40) ? WaveHeader.SubFormat : WaveHeader.FormatTag; config->bits_per_sample = (chunk_header.ckSize == 40 && WaveHeader.ValidBitsPerSample) ? WaveHeader.ValidBitsPerSample : WaveHeader.BitsPerSample; if (format != 1 && format != 3) supported = FALSE; if (format == 3 && config->bits_per_sample != 32) supported = FALSE; if (!WaveHeader.NumChannels || WaveHeader.NumChannels > 256 || WaveHeader.BlockAlign / WaveHeader.NumChannels < (config->bits_per_sample + 7) / 8 || WaveHeader.BlockAlign / WaveHeader.NumChannels > 4 || WaveHeader.BlockAlign % WaveHeader.NumChannels) supported = FALSE; if (config->bits_per_sample < 1 || config->bits_per_sample > 32) supported = FALSE; if (!supported) { error_line ("%s is an unsupported .WAV format!", infilename); return WAVPACK_SOFT_ERROR; } if (chunk_header.ckSize < 40) { if (!config->channel_mask && !(config->qmode & QMODE_CHANS_UNASSIGNED)) { if (WaveHeader.NumChannels <= 2) config->channel_mask = 0x5 - WaveHeader.NumChannels; else if (WaveHeader.NumChannels <= 18) config->channel_mask = (1 << WaveHeader.NumChannels) - 1; else config->channel_mask = 0x3ffff; } } else if (WaveHeader.ChannelMask && (config->channel_mask || (config->qmode & QMODE_CHANS_UNASSIGNED))) { error_line ("this WAV file already has channel order information!"); return WAVPACK_SOFT_ERROR; } else if (WaveHeader.ChannelMask) config->channel_mask = WaveHeader.ChannelMask; if (format == 3) config->float_norm_exp = 127; else if ((config->qmode & QMODE_ADOBE_MODE) && WaveHeader.BlockAlign / WaveHeader.NumChannels == 4) { if (WaveHeader.BitsPerSample == 24) config->float_norm_exp = 127 + 23; else if (WaveHeader.BitsPerSample == 32) config->float_norm_exp = 127 + 15; } if (debug_logging_mode) { if (config->float_norm_exp == 127) error_line ("data format: normalized 32-bit floating point"); else if (config->float_norm_exp) error_line ("data format: 32-bit floating point (Audition %d:%d float type 1)", config->float_norm_exp - 126, 150 - config->float_norm_exp); else error_line ("data format: %d-bit integers stored in %d byte(s)", config->bits_per_sample, WaveHeader.BlockAlign / WaveHeader.NumChannels); } } else if (!strncmp (chunk_header.ckID, "data", 4)) { // on the data chunk, get size and exit loop int64_t data_chunk_size = (got_ds64 && chunk_header.ckSize == (uint32_t) -1) ? ds64_chunk.dataSize64 : chunk_header.ckSize; if (!WaveHeader.NumChannels || (is_rf64 && !got_ds64)) { // make sure we saw "fmt" and "ds64" chunks (if required) error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } if (infilesize && !(config->qmode & QMODE_IGNORE_LENGTH) && infilesize - data_chunk_size > 16777216) { error_line ("this .WAV file has over 16 MB of extra RIFF data, probably is corrupt!"); return WAVPACK_SOFT_ERROR; } if (config->qmode & QMODE_IGNORE_LENGTH) { if (infilesize && DoGetFilePosition (infile) != -1) total_samples = (infilesize - DoGetFilePosition (infile)) / WaveHeader.BlockAlign; else total_samples = -1; } else { total_samples = data_chunk_size / WaveHeader.BlockAlign; if (got_ds64 && total_samples != ds64_chunk.sampleCount64) { error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } if (!total_samples) { error_line ("this .WAV file has no audio samples, probably is corrupt!"); return WAVPACK_SOFT_ERROR; } if (total_samples > MAX_WAVPACK_SAMPLES) { error_line ("%s has too many samples for WavPack!", infilename); return WAVPACK_SOFT_ERROR; } } config->bytes_per_sample = WaveHeader.BlockAlign / WaveHeader.NumChannels; config->num_channels = WaveHeader.NumChannels; config->sample_rate = WaveHeader.SampleRate; break; } else { // just copy unknown chunks to output file int bytes_to_copy = (chunk_header.ckSize + 1) & ~1L; char *buff; if (bytes_to_copy < 0 || bytes_to_copy > 4194304) { error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } buff = malloc (bytes_to_copy); if (debug_logging_mode) error_line ("extra unknown chunk \"%c%c%c%c\" of %d bytes", chunk_header.ckID [0], chunk_header.ckID [1], chunk_header.ckID [2], chunk_header.ckID [3], chunk_header.ckSize); if (!DoReadFile (infile, buff, bytes_to_copy, &bcount) || bcount != bytes_to_copy || (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, buff, bytes_to_copy))) { error_line ("%s", WavpackGetErrorMessage (wpc)); free (buff); return WAVPACK_SOFT_ERROR; } free (buff); } } if (!WavpackSetConfiguration64 (wpc, config, total_samples, NULL)) { error_line ("%s: %s", infilename, WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } return WAVPACK_NO_ERROR; } int WriteRiffHeader (FILE *outfile, WavpackContext *wpc, int64_t total_samples, int qmode) { int do_rf64 = 0, write_junk = 1, table_length = 0; ChunkHeader ds64hdr, datahdr, fmthdr; RiffChunkHeader riffhdr; DS64Chunk ds64_chunk; CS64Chunk cs64_chunk; JunkChunk junkchunk; WaveHeader wavhdr; uint32_t bcount; int64_t total_data_bytes, total_riff_bytes; int num_channels = WavpackGetNumChannels (wpc); int32_t channel_mask = WavpackGetChannelMask (wpc); int32_t sample_rate = WavpackGetSampleRate (wpc); int bytes_per_sample = WavpackGetBytesPerSample (wpc); int bits_per_sample = WavpackGetBitsPerSample (wpc); int format = WavpackGetFloatNormExp (wpc) ? 3 : 1; int wavhdrsize = 16; if (format == 3 && WavpackGetFloatNormExp (wpc) != 127) { error_line ("can't create valid RIFF wav header for non-normalized floating data!"); return FALSE; } if (total_samples == -1) total_samples = 0x7ffff000 / (bytes_per_sample * num_channels); total_data_bytes = total_samples * bytes_per_sample * num_channels; if (total_data_bytes > 0xff000000) { if (debug_logging_mode) error_line ("total_data_bytes = %lld, so rf64", total_data_bytes); write_junk = 0; do_rf64 = 1; } else if (debug_logging_mode) error_line ("total_data_bytes = %lld, so riff", total_data_bytes); CLEAR (wavhdr); wavhdr.FormatTag = format; wavhdr.NumChannels = num_channels; wavhdr.SampleRate = sample_rate; wavhdr.BytesPerSecond = sample_rate * num_channels * bytes_per_sample; wavhdr.BlockAlign = bytes_per_sample * num_channels; wavhdr.BitsPerSample = bits_per_sample; if (num_channels > 2 || channel_mask != 0x5 - num_channels) { wavhdrsize = sizeof (wavhdr); wavhdr.cbSize = 22; wavhdr.ValidBitsPerSample = bits_per_sample; wavhdr.SubFormat = format; wavhdr.ChannelMask = channel_mask; wavhdr.FormatTag = 0xfffe; wavhdr.BitsPerSample = bytes_per_sample * 8; wavhdr.GUID [4] = 0x10; wavhdr.GUID [6] = 0x80; wavhdr.GUID [9] = 0xaa; wavhdr.GUID [11] = 0x38; wavhdr.GUID [12] = 0x9b; wavhdr.GUID [13] = 0x71; } strncpy (riffhdr.ckID, do_rf64 ? "RF64" : "RIFF", sizeof (riffhdr.ckID)); strncpy (riffhdr.formType, "WAVE", sizeof (riffhdr.formType)); total_riff_bytes = sizeof (riffhdr) + wavhdrsize + sizeof (datahdr) + ((total_data_bytes + 1) & ~(int64_t)1); if (do_rf64) total_riff_bytes += sizeof (ds64hdr) + sizeof (ds64_chunk); total_riff_bytes += table_length * sizeof (CS64Chunk); if (write_junk) total_riff_bytes += sizeof (junkchunk); strncpy (fmthdr.ckID, "fmt ", sizeof (fmthdr.ckID)); strncpy (datahdr.ckID, "data", sizeof (datahdr.ckID)); fmthdr.ckSize = wavhdrsize; if (write_junk) { CLEAR (junkchunk); strncpy (junkchunk.ckID, "junk", sizeof (junkchunk.ckID)); junkchunk.ckSize = sizeof (junkchunk) - 8; WavpackNativeToLittleEndian (&junkchunk, ChunkHeaderFormat); } if (do_rf64) { strncpy (ds64hdr.ckID, "ds64", sizeof (ds64hdr.ckID)); ds64hdr.ckSize = sizeof (ds64_chunk) + (table_length * sizeof (CS64Chunk)); CLEAR (ds64_chunk); ds64_chunk.riffSize64 = total_riff_bytes; ds64_chunk.dataSize64 = total_data_bytes; ds64_chunk.sampleCount64 = total_samples; ds64_chunk.tableLength = table_length; riffhdr.ckSize = (uint32_t) -1; datahdr.ckSize = (uint32_t) -1; WavpackNativeToLittleEndian (&ds64hdr, ChunkHeaderFormat); WavpackNativeToLittleEndian (&ds64_chunk, DS64ChunkFormat); } else { riffhdr.ckSize = (uint32_t) total_riff_bytes; datahdr.ckSize = (uint32_t) total_data_bytes; } // this "table" is just a dummy placeholder for testing (normally not written) if (table_length) { strncpy (cs64_chunk.ckID, "dmmy", sizeof (cs64_chunk.ckID)); cs64_chunk.chunkSize64 = 12345678; WavpackNativeToLittleEndian (&cs64_chunk, CS64ChunkFormat); } // write the RIFF chunks up to just before the data starts WavpackNativeToLittleEndian (&riffhdr, ChunkHeaderFormat); WavpackNativeToLittleEndian (&fmthdr, ChunkHeaderFormat); WavpackNativeToLittleEndian (&wavhdr, WaveHeaderFormat); WavpackNativeToLittleEndian (&datahdr, ChunkHeaderFormat); if (!DoWriteFile (outfile, &riffhdr, sizeof (riffhdr), &bcount) || bcount != sizeof (riffhdr) || (do_rf64 && (!DoWriteFile (outfile, &ds64hdr, sizeof (ds64hdr), &bcount) || bcount != sizeof (ds64hdr))) || (do_rf64 && (!DoWriteFile (outfile, &ds64_chunk, sizeof (ds64_chunk), &bcount) || bcount != sizeof (ds64_chunk)))) { error_line ("can't write .WAV data, disk probably full!"); return FALSE; } // again, this is normally not written except for testing while (table_length--) if (!DoWriteFile (outfile, &cs64_chunk, sizeof (cs64_chunk), &bcount) || bcount != sizeof (cs64_chunk)) { error_line ("can't write .WAV data, disk probably full!"); return FALSE; } if ((write_junk && (!DoWriteFile (outfile, &junkchunk, sizeof (junkchunk), &bcount) || bcount != sizeof (junkchunk))) || !DoWriteFile (outfile, &fmthdr, sizeof (fmthdr), &bcount) || bcount != sizeof (fmthdr) || !DoWriteFile (outfile, &wavhdr, wavhdrsize, &bcount) || bcount != wavhdrsize || !DoWriteFile (outfile, &datahdr, sizeof (datahdr), &bcount) || bcount != sizeof (datahdr)) { error_line ("can't write .WAV data, disk probably full!"); return FALSE; } return TRUE; }
int ParseRiffHeaderConfig (FILE *infile, char *infilename, char *fourcc, WavpackContext *wpc, WavpackConfig *config) { int is_rf64 = !strncmp (fourcc, "RF64", 4), got_ds64 = 0; int64_t total_samples = 0, infilesize; RiffChunkHeader riff_chunk_header; ChunkHeader chunk_header; WaveHeader WaveHeader; DS64Chunk ds64_chunk; uint32_t bcount; CLEAR (WaveHeader); CLEAR (ds64_chunk); infilesize = DoGetFileSize (infile); if (!is_rf64 && infilesize >= 4294967296LL && !(config->qmode & QMODE_IGNORE_LENGTH)) { error_line ("can't handle .WAV files larger than 4 GB (non-standard)!"); return WAVPACK_SOFT_ERROR; } memcpy (&riff_chunk_header, fourcc, 4); if ((!DoReadFile (infile, ((char *) &riff_chunk_header) + 4, sizeof (RiffChunkHeader) - 4, &bcount) || bcount != sizeof (RiffChunkHeader) - 4 || strncmp (riff_chunk_header.formType, "WAVE", 4))) { error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &riff_chunk_header, sizeof (RiffChunkHeader))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } // loop through all elements of the RIFF wav header // (until the data chuck) and copy them to the output file while (1) { if (!DoReadFile (infile, &chunk_header, sizeof (ChunkHeader), &bcount) || bcount != sizeof (ChunkHeader)) { error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &chunk_header, sizeof (ChunkHeader))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } WavpackLittleEndianToNative (&chunk_header, ChunkHeaderFormat); if (!strncmp (chunk_header.ckID, "ds64", 4)) { if (chunk_header.ckSize < sizeof (DS64Chunk) || !DoReadFile (infile, &ds64_chunk, sizeof (DS64Chunk), &bcount) || bcount != sizeof (DS64Chunk)) { error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &ds64_chunk, sizeof (DS64Chunk))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } got_ds64 = 1; WavpackLittleEndianToNative (&ds64_chunk, DS64ChunkFormat); if (debug_logging_mode) error_line ("DS64: riffSize = %lld, dataSize = %lld, sampleCount = %lld, table_length = %d", (long long) ds64_chunk.riffSize64, (long long) ds64_chunk.dataSize64, (long long) ds64_chunk.sampleCount64, ds64_chunk.tableLength); if (ds64_chunk.tableLength * sizeof (CS64Chunk) != chunk_header.ckSize - sizeof (DS64Chunk)) { error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } while (ds64_chunk.tableLength--) { CS64Chunk cs64_chunk; if (!DoReadFile (infile, &cs64_chunk, sizeof (CS64Chunk), &bcount) || bcount != sizeof (CS64Chunk) || (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &cs64_chunk, sizeof (CS64Chunk)))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } } } else if (!strncmp (chunk_header.ckID, "fmt ", 4)) { // if it's the format chunk, we want to get some info out of there and int supported = TRUE, format; // make sure it's a .wav file we can handle if (chunk_header.ckSize < 16 || chunk_header.ckSize > sizeof (WaveHeader) || !DoReadFile (infile, &WaveHeader, chunk_header.ckSize, &bcount) || bcount != chunk_header.ckSize) { error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &WaveHeader, chunk_header.ckSize)) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } WavpackLittleEndianToNative (&WaveHeader, WaveHeaderFormat); if (debug_logging_mode) { error_line ("format tag size = %d", chunk_header.ckSize); error_line ("FormatTag = %x, NumChannels = %d, BitsPerSample = %d", WaveHeader.FormatTag, WaveHeader.NumChannels, WaveHeader.BitsPerSample); error_line ("BlockAlign = %d, SampleRate = %d, BytesPerSecond = %d", WaveHeader.BlockAlign, WaveHeader.SampleRate, WaveHeader.BytesPerSecond); if (chunk_header.ckSize > 16) error_line ("cbSize = %d, ValidBitsPerSample = %d", WaveHeader.cbSize, WaveHeader.ValidBitsPerSample); if (chunk_header.ckSize > 20) error_line ("ChannelMask = %x, SubFormat = %d", WaveHeader.ChannelMask, WaveHeader.SubFormat); } if (chunk_header.ckSize > 16 && WaveHeader.cbSize == 2) config->qmode |= QMODE_ADOBE_MODE; format = (WaveHeader.FormatTag == 0xfffe && chunk_header.ckSize == 40) ? WaveHeader.SubFormat : WaveHeader.FormatTag; config->bits_per_sample = (chunk_header.ckSize == 40 && WaveHeader.ValidBitsPerSample) ? WaveHeader.ValidBitsPerSample : WaveHeader.BitsPerSample; if (format != 1 && format != 3) supported = FALSE; if (format == 3 && config->bits_per_sample != 32) supported = FALSE; if (!WaveHeader.NumChannels || WaveHeader.NumChannels > 256 || WaveHeader.BlockAlign / WaveHeader.NumChannels < (config->bits_per_sample + 7) / 8 || WaveHeader.BlockAlign / WaveHeader.NumChannels > 4 || WaveHeader.BlockAlign % WaveHeader.NumChannels) supported = FALSE; if (config->bits_per_sample < 1 || config->bits_per_sample > 32) supported = FALSE; if (!supported) { error_line ("%s is an unsupported .WAV format!", infilename); return WAVPACK_SOFT_ERROR; } if (chunk_header.ckSize < 40) { if (!config->channel_mask && !(config->qmode & QMODE_CHANS_UNASSIGNED)) { if (WaveHeader.NumChannels <= 2) config->channel_mask = 0x5 - WaveHeader.NumChannels; else if (WaveHeader.NumChannels <= 18) config->channel_mask = (1 << WaveHeader.NumChannels) - 1; else config->channel_mask = 0x3ffff; } } else if (WaveHeader.ChannelMask && (config->channel_mask || (config->qmode & QMODE_CHANS_UNASSIGNED))) { error_line ("this WAV file already has channel order information!"); return WAVPACK_SOFT_ERROR; } else if (WaveHeader.ChannelMask) config->channel_mask = WaveHeader.ChannelMask; if (format == 3) config->float_norm_exp = 127; else if ((config->qmode & QMODE_ADOBE_MODE) && WaveHeader.BlockAlign / WaveHeader.NumChannels == 4) { if (WaveHeader.BitsPerSample == 24) config->float_norm_exp = 127 + 23; else if (WaveHeader.BitsPerSample == 32) config->float_norm_exp = 127 + 15; } if (debug_logging_mode) { if (config->float_norm_exp == 127) error_line ("data format: normalized 32-bit floating point"); else if (config->float_norm_exp) error_line ("data format: 32-bit floating point (Audition %d:%d float type 1)", config->float_norm_exp - 126, 150 - config->float_norm_exp); else error_line ("data format: %d-bit integers stored in %d byte(s)", config->bits_per_sample, WaveHeader.BlockAlign / WaveHeader.NumChannels); } } else if (!strncmp (chunk_header.ckID, "data", 4)) { // on the data chunk, get size and exit loop int64_t data_chunk_size = (got_ds64 && chunk_header.ckSize == (uint32_t) -1) ? ds64_chunk.dataSize64 : chunk_header.ckSize; if (!WaveHeader.NumChannels || (is_rf64 && !got_ds64)) { // make sure we saw "fmt" and "ds64" chunks (if required) error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } if (infilesize && !(config->qmode & QMODE_IGNORE_LENGTH) && infilesize - data_chunk_size > 16777216) { error_line ("this .WAV file has over 16 MB of extra RIFF data, probably is corrupt!"); return WAVPACK_SOFT_ERROR; } if (config->qmode & QMODE_IGNORE_LENGTH) { if (infilesize && DoGetFilePosition (infile) != -1) total_samples = (infilesize - DoGetFilePosition (infile)) / WaveHeader.BlockAlign; else total_samples = -1; } else { total_samples = data_chunk_size / WaveHeader.BlockAlign; if (got_ds64 && total_samples != ds64_chunk.sampleCount64) { error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } if (!total_samples) { error_line ("this .WAV file has no audio samples, probably is corrupt!"); return WAVPACK_SOFT_ERROR; } if (total_samples > MAX_WAVPACK_SAMPLES) { error_line ("%s has too many samples for WavPack!", infilename); return WAVPACK_SOFT_ERROR; } } config->bytes_per_sample = WaveHeader.BlockAlign / WaveHeader.NumChannels; config->num_channels = WaveHeader.NumChannels; config->sample_rate = WaveHeader.SampleRate; break; } else { // just copy unknown chunks to output file int bytes_to_copy = (chunk_header.ckSize + 1) & ~1L; char *buff = malloc (bytes_to_copy); if (debug_logging_mode) error_line ("extra unknown chunk \"%c%c%c%c\" of %d bytes", chunk_header.ckID [0], chunk_header.ckID [1], chunk_header.ckID [2], chunk_header.ckID [3], chunk_header.ckSize); if (!DoReadFile (infile, buff, bytes_to_copy, &bcount) || bcount != bytes_to_copy || (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, buff, bytes_to_copy))) { error_line ("%s", WavpackGetErrorMessage (wpc)); free (buff); return WAVPACK_SOFT_ERROR; } free (buff); } } if (!WavpackSetConfiguration64 (wpc, config, total_samples, NULL)) { error_line ("%s: %s", infilename, WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } return WAVPACK_NO_ERROR; }
int ParseRiffHeaderConfig (FILE *infile, char *infilename, char *fourcc, WavpackContext *wpc, WavpackConfig *config) { int is_rf64 = !strncmp (fourcc, "RF64", 4), got_ds64 = 0; int64_t total_samples = 0, infilesize; RiffChunkHeader riff_chunk_header; ChunkHeader chunk_header; WaveHeader WaveHeader; DS64Chunk ds64_chunk; uint32_t bcount; CLEAR (WaveHeader); CLEAR (ds64_chunk); infilesize = DoGetFileSize (infile); if (!is_rf64 && infilesize >= 4294967296LL && !(config->qmode & QMODE_IGNORE_LENGTH)) { error_line ("can't handle .WAV files larger than 4 GB (non-standard)!"); return WAVPACK_SOFT_ERROR; } memcpy (&riff_chunk_header, fourcc, 4); if ((!DoReadFile (infile, ((char *) &riff_chunk_header) + 4, sizeof (RiffChunkHeader) - 4, &bcount) || bcount != sizeof (RiffChunkHeader) - 4 || strncmp (riff_chunk_header.formType, "WAVE", 4))) { error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &riff_chunk_header, sizeof (RiffChunkHeader))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } // loop through all elements of the RIFF wav header // (until the data chuck) and copy them to the output file while (1) { if (!DoReadFile (infile, &chunk_header, sizeof (ChunkHeader), &bcount) || bcount != sizeof (ChunkHeader)) { error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &chunk_header, sizeof (ChunkHeader))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } WavpackLittleEndianToNative (&chunk_header, ChunkHeaderFormat); if (!strncmp (chunk_header.ckID, "ds64", 4)) { if (chunk_header.ckSize < sizeof (DS64Chunk) || !DoReadFile (infile, &ds64_chunk, sizeof (DS64Chunk), &bcount) || bcount != sizeof (DS64Chunk)) { error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &ds64_chunk, sizeof (DS64Chunk))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } got_ds64 = 1; WavpackLittleEndianToNative (&ds64_chunk, DS64ChunkFormat); if (debug_logging_mode) error_line ("DS64: riffSize = %lld, dataSize = %lld, sampleCount = %lld, table_length = %d", (long long) ds64_chunk.riffSize64, (long long) ds64_chunk.dataSize64, (long long) ds64_chunk.sampleCount64, ds64_chunk.tableLength); if (ds64_chunk.tableLength * sizeof (CS64Chunk) != chunk_header.ckSize - sizeof (DS64Chunk)) { error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } while (ds64_chunk.tableLength--) { CS64Chunk cs64_chunk; if (!DoReadFile (infile, &cs64_chunk, sizeof (CS64Chunk), &bcount) || bcount != sizeof (CS64Chunk) || (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &cs64_chunk, sizeof (CS64Chunk)))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } } } else if (!strncmp (chunk_header.ckID, "fmt ", 4)) { // if it's the format chunk, we want to get some info out of there and int supported = TRUE, format; // make sure it's a .wav file we can handle if (chunk_header.ckSize < 16 || chunk_header.ckSize > sizeof (WaveHeader) || !DoReadFile (infile, &WaveHeader, chunk_header.ckSize, &bcount) || bcount != chunk_header.ckSize) { error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &WaveHeader, chunk_header.ckSize)) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } WavpackLittleEndianToNative (&WaveHeader, WaveHeaderFormat); if (debug_logging_mode) { error_line ("format tag size = %d", chunk_header.ckSize); error_line ("FormatTag = %x, NumChannels = %d, BitsPerSample = %d", WaveHeader.FormatTag, WaveHeader.NumChannels, WaveHeader.BitsPerSample); error_line ("BlockAlign = %d, SampleRate = %d, BytesPerSecond = %d", WaveHeader.BlockAlign, WaveHeader.SampleRate, WaveHeader.BytesPerSecond); if (chunk_header.ckSize > 16) error_line ("cbSize = %d, ValidBitsPerSample = %d", WaveHeader.cbSize, WaveHeader.ValidBitsPerSample); if (chunk_header.ckSize > 20) error_line ("ChannelMask = %x, SubFormat = %d", WaveHeader.ChannelMask, WaveHeader.SubFormat); } if (chunk_header.ckSize > 16 && WaveHeader.cbSize == 2) config->qmode |= QMODE_ADOBE_MODE; format = (WaveHeader.FormatTag == 0xfffe && chunk_header.ckSize == 40) ? WaveHeader.SubFormat : WaveHeader.FormatTag; config->bits_per_sample = (chunk_header.ckSize == 40 && WaveHeader.ValidBitsPerSample) ? WaveHeader.ValidBitsPerSample : WaveHeader.BitsPerSample; if (format != 1 && format != 3) supported = FALSE; if (format == 3 && config->bits_per_sample != 32) supported = FALSE; if (!WaveHeader.NumChannels || WaveHeader.NumChannels > 256 || WaveHeader.BlockAlign / WaveHeader.NumChannels < (config->bits_per_sample + 7) / 8 || WaveHeader.BlockAlign / WaveHeader.NumChannels > 4 || WaveHeader.BlockAlign % WaveHeader.NumChannels) supported = FALSE; if (config->bits_per_sample < 1 || config->bits_per_sample > 32) supported = FALSE; if (!supported) { error_line ("%s is an unsupported .WAV format!", infilename); return WAVPACK_SOFT_ERROR; } if (chunk_header.ckSize < 40) { if (!config->channel_mask && !(config->qmode & QMODE_CHANS_UNASSIGNED)) { if (WaveHeader.NumChannels <= 2) config->channel_mask = 0x5 - WaveHeader.NumChannels; else if (WaveHeader.NumChannels <= 18) config->channel_mask = (1 << WaveHeader.NumChannels) - 1; else config->channel_mask = 0x3ffff; } } else if (WaveHeader.ChannelMask && (config->channel_mask || (config->qmode & QMODE_CHANS_UNASSIGNED))) { error_line ("this WAV file already has channel order information!"); return WAVPACK_SOFT_ERROR; } else if (WaveHeader.ChannelMask) config->channel_mask = WaveHeader.ChannelMask; if (format == 3) config->float_norm_exp = 127; else if ((config->qmode & QMODE_ADOBE_MODE) && WaveHeader.BlockAlign / WaveHeader.NumChannels == 4) { if (WaveHeader.BitsPerSample == 24) config->float_norm_exp = 127 + 23; else if (WaveHeader.BitsPerSample == 32) config->float_norm_exp = 127 + 15; } if (debug_logging_mode) { if (config->float_norm_exp == 127) error_line ("data format: normalized 32-bit floating point"); else if (config->float_norm_exp) error_line ("data format: 32-bit floating point (Audition %d:%d float type 1)", config->float_norm_exp - 126, 150 - config->float_norm_exp); else error_line ("data format: %d-bit integers stored in %d byte(s)", config->bits_per_sample, WaveHeader.BlockAlign / WaveHeader.NumChannels); } } else if (!strncmp (chunk_header.ckID, "data", 4)) { // on the data chunk, get size and exit loop int64_t data_chunk_size = (got_ds64 && chunk_header.ckSize == (uint32_t) -1) ? ds64_chunk.dataSize64 : chunk_header.ckSize; if (!WaveHeader.NumChannels || (is_rf64 && !got_ds64)) { // make sure we saw "fmt" and "ds64" chunks (if required) error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } if (infilesize && !(config->qmode & QMODE_IGNORE_LENGTH) && infilesize - data_chunk_size > 16777216) { error_line ("this .WAV file has over 16 MB of extra RIFF data, probably is corrupt!"); return WAVPACK_SOFT_ERROR; } if (config->qmode & QMODE_IGNORE_LENGTH) { if (infilesize && DoGetFilePosition (infile) != -1) total_samples = (infilesize - DoGetFilePosition (infile)) / WaveHeader.BlockAlign; else total_samples = -1; } else { total_samples = data_chunk_size / WaveHeader.BlockAlign; if (got_ds64 && total_samples != ds64_chunk.sampleCount64) { error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } if (!total_samples) { error_line ("this .WAV file has no audio samples, probably is corrupt!"); return WAVPACK_SOFT_ERROR; } if (total_samples > MAX_WAVPACK_SAMPLES) { error_line ("%s has too many samples for WavPack!", infilename); return WAVPACK_SOFT_ERROR; } } config->bytes_per_sample = WaveHeader.BlockAlign / WaveHeader.NumChannels; config->num_channels = WaveHeader.NumChannels; config->sample_rate = WaveHeader.SampleRate; break; } else { // just copy unknown chunks to output file int bytes_to_copy = (chunk_header.ckSize + 1) & ~1L; char *buff; if (bytes_to_copy < 0 || bytes_to_copy > 4194304) { error_line ("%s is not a valid .WAV file!", infilename); return WAVPACK_SOFT_ERROR; } buff = malloc (bytes_to_copy); if (debug_logging_mode) error_line ("extra unknown chunk \"%c%c%c%c\" of %d bytes", chunk_header.ckID [0], chunk_header.ckID [1], chunk_header.ckID [2], chunk_header.ckID [3], chunk_header.ckSize); if (!DoReadFile (infile, buff, bytes_to_copy, &bcount) || bcount != bytes_to_copy || (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, buff, bytes_to_copy))) { error_line ("%s", WavpackGetErrorMessage (wpc)); free (buff); return WAVPACK_SOFT_ERROR; } free (buff); } } if (!WavpackSetConfiguration64 (wpc, config, total_samples, NULL)) { error_line ("%s: %s", infilename, WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } return WAVPACK_NO_ERROR; }
{'added': [(289, ' char *buff;'), (290, ''), (291, ' if (bytes_to_copy < 0 || bytes_to_copy > 4194304) {'), (292, ' error_line ("%s is not a valid .WAV file!", infilename);'), (293, ' return WAVPACK_SOFT_ERROR;'), (294, ' }'), (295, ''), (296, ' buff = malloc (bytes_to_copy);')], 'deleted': [(289, ' char *buff = malloc (bytes_to_copy);')]}
8
1
361
2,721
https://github.com/dbry/WavPack
CVE-2018-10538
['CWE-787']
sas_expander.c
smp_task_done
/* * Serial Attached SCSI (SAS) Expander discovery and configuration * * Copyright (C) 2005 Adaptec, Inc. All rights reserved. * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> * * This file is licensed under GPLv2. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/scatterlist.h> #include <linux/blkdev.h> #include <linux/slab.h> #include "sas_internal.h" #include <scsi/sas_ata.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_sas.h> #include "../scsi_sas_internal.h" static int sas_discover_expander(struct domain_device *dev); static int sas_configure_routing(struct domain_device *dev, u8 *sas_addr); static int sas_configure_phy(struct domain_device *dev, int phy_id, u8 *sas_addr, int include); static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr); /* ---------- SMP task management ---------- */ static void smp_task_timedout(struct timer_list *t) { struct sas_task_slow *slow = from_timer(slow, t, timer); struct sas_task *task = slow->task; unsigned long flags; spin_lock_irqsave(&task->task_state_lock, flags); if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) task->task_state_flags |= SAS_TASK_STATE_ABORTED; spin_unlock_irqrestore(&task->task_state_lock, flags); complete(&task->slow_task->completion); } static void smp_task_done(struct sas_task *task) { if (!del_timer(&task->slow_task->timer)) return; complete(&task->slow_task->completion); } /* Give it some long enough timeout. In seconds. */ #define SMP_TIMEOUT 10 static int smp_execute_task_sg(struct domain_device *dev, struct scatterlist *req, struct scatterlist *resp) { int res, retry; struct sas_task *task = NULL; struct sas_internal *i = to_sas_internal(dev->port->ha->core.shost->transportt); mutex_lock(&dev->ex_dev.cmd_mutex); for (retry = 0; retry < 3; retry++) { if (test_bit(SAS_DEV_GONE, &dev->state)) { res = -ECOMM; break; } task = sas_alloc_slow_task(GFP_KERNEL); if (!task) { res = -ENOMEM; break; } task->dev = dev; task->task_proto = dev->tproto; task->smp_task.smp_req = *req; task->smp_task.smp_resp = *resp; task->task_done = smp_task_done; task->slow_task->timer.function = smp_task_timedout; task->slow_task->timer.expires = jiffies + SMP_TIMEOUT*HZ; add_timer(&task->slow_task->timer); res = i->dft->lldd_execute_task(task, GFP_KERNEL); if (res) { del_timer(&task->slow_task->timer); SAS_DPRINTK("executing SMP task failed:%d\n", res); break; } wait_for_completion(&task->slow_task->completion); res = -ECOMM; if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { SAS_DPRINTK("smp task timed out or aborted\n"); i->dft->lldd_abort_task(task); if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { SAS_DPRINTK("SMP task aborted and not done\n"); break; } } if (task->task_status.resp == SAS_TASK_COMPLETE && task->task_status.stat == SAM_STAT_GOOD) { res = 0; break; } if (task->task_status.resp == SAS_TASK_COMPLETE && task->task_status.stat == SAS_DATA_UNDERRUN) { /* no error, but return the number of bytes of * underrun */ res = task->task_status.residual; break; } if (task->task_status.resp == SAS_TASK_COMPLETE && task->task_status.stat == SAS_DATA_OVERRUN) { res = -EMSGSIZE; break; } if (task->task_status.resp == SAS_TASK_UNDELIVERED && task->task_status.stat == SAS_DEVICE_UNKNOWN) break; else { SAS_DPRINTK("%s: task to dev %016llx response: 0x%x " "status 0x%x\n", __func__, SAS_ADDR(dev->sas_addr), task->task_status.resp, task->task_status.stat); sas_free_task(task); task = NULL; } } mutex_unlock(&dev->ex_dev.cmd_mutex); BUG_ON(retry == 3 && task != NULL); sas_free_task(task); return res; } static int smp_execute_task(struct domain_device *dev, void *req, int req_size, void *resp, int resp_size) { struct scatterlist req_sg; struct scatterlist resp_sg; sg_init_one(&req_sg, req, req_size); sg_init_one(&resp_sg, resp, resp_size); return smp_execute_task_sg(dev, &req_sg, &resp_sg); } /* ---------- Allocations ---------- */ static inline void *alloc_smp_req(int size) { u8 *p = kzalloc(size, GFP_KERNEL); if (p) p[0] = SMP_REQUEST; return p; } static inline void *alloc_smp_resp(int size) { return kzalloc(size, GFP_KERNEL); } static char sas_route_char(struct domain_device *dev, struct ex_phy *phy) { switch (phy->routing_attr) { case TABLE_ROUTING: if (dev->ex_dev.t2t_supp) return 'U'; else return 'T'; case DIRECT_ROUTING: return 'D'; case SUBTRACTIVE_ROUTING: return 'S'; default: return '?'; } } static enum sas_device_type to_dev_type(struct discover_resp *dr) { /* This is detecting a failure to transmit initial dev to host * FIS as described in section J.5 of sas-2 r16 */ if (dr->attached_dev_type == SAS_PHY_UNUSED && dr->attached_sata_dev && dr->linkrate >= SAS_LINK_RATE_1_5_GBPS) return SAS_SATA_PENDING; else return dr->attached_dev_type; } static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) { enum sas_device_type dev_type; enum sas_linkrate linkrate; u8 sas_addr[SAS_ADDR_SIZE]; struct smp_resp *resp = rsp; struct discover_resp *dr = &resp->disc; struct sas_ha_struct *ha = dev->port->ha; struct expander_device *ex = &dev->ex_dev; struct ex_phy *phy = &ex->ex_phy[phy_id]; struct sas_rphy *rphy = dev->rphy; bool new_phy = !phy->phy; char *type; if (new_phy) { if (WARN_ON_ONCE(test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state))) return; phy->phy = sas_phy_alloc(&rphy->dev, phy_id); /* FIXME: error_handling */ BUG_ON(!phy->phy); } switch (resp->result) { case SMP_RESP_PHY_VACANT: phy->phy_state = PHY_VACANT; break; default: phy->phy_state = PHY_NOT_PRESENT; break; case SMP_RESP_FUNC_ACC: phy->phy_state = PHY_EMPTY; /* do not know yet */ break; } /* check if anything important changed to squelch debug */ dev_type = phy->attached_dev_type; linkrate = phy->linkrate; memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); /* Handle vacant phy - rest of dr data is not valid so skip it */ if (phy->phy_state == PHY_VACANT) { memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); phy->attached_dev_type = SAS_PHY_UNUSED; if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) { phy->phy_id = phy_id; goto skip; } else goto out; } phy->attached_dev_type = to_dev_type(dr); if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) goto out; phy->phy_id = phy_id; phy->linkrate = dr->linkrate; phy->attached_sata_host = dr->attached_sata_host; phy->attached_sata_dev = dr->attached_sata_dev; phy->attached_sata_ps = dr->attached_sata_ps; phy->attached_iproto = dr->iproto << 1; phy->attached_tproto = dr->tproto << 1; /* help some expanders that fail to zero sas_address in the 'no * device' case */ if (phy->attached_dev_type == SAS_PHY_UNUSED || phy->linkrate < SAS_LINK_RATE_1_5_GBPS) memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); else memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE); phy->attached_phy_id = dr->attached_phy_id; phy->phy_change_count = dr->change_count; phy->routing_attr = dr->routing_attr; phy->virtual = dr->virtual; phy->last_da_index = -1; phy->phy->identify.sas_address = SAS_ADDR(phy->attached_sas_addr); phy->phy->identify.device_type = dr->attached_dev_type; phy->phy->identify.initiator_port_protocols = phy->attached_iproto; phy->phy->identify.target_port_protocols = phy->attached_tproto; if (!phy->attached_tproto && dr->attached_sata_dev) phy->phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; phy->phy->identify.phy_identifier = phy_id; phy->phy->minimum_linkrate_hw = dr->hmin_linkrate; phy->phy->maximum_linkrate_hw = dr->hmax_linkrate; phy->phy->minimum_linkrate = dr->pmin_linkrate; phy->phy->maximum_linkrate = dr->pmax_linkrate; phy->phy->negotiated_linkrate = phy->linkrate; phy->phy->enabled = (phy->linkrate != SAS_PHY_DISABLED); skip: if (new_phy) if (sas_phy_add(phy->phy)) { sas_phy_free(phy->phy); return; } out: switch (phy->attached_dev_type) { case SAS_SATA_PENDING: type = "stp pending"; break; case SAS_PHY_UNUSED: type = "no device"; break; case SAS_END_DEVICE: if (phy->attached_iproto) { if (phy->attached_tproto) type = "host+target"; else type = "host"; } else { if (dr->attached_sata_dev) type = "stp"; else type = "ssp"; } break; case SAS_EDGE_EXPANDER_DEVICE: case SAS_FANOUT_EXPANDER_DEVICE: type = "smp"; break; default: type = "unknown"; } /* this routine is polled by libata error recovery so filter * unimportant messages */ if (new_phy || phy->attached_dev_type != dev_type || phy->linkrate != linkrate || SAS_ADDR(phy->attached_sas_addr) != SAS_ADDR(sas_addr)) /* pass */; else return; /* if the attached device type changed and ata_eh is active, * make sure we run revalidation when eh completes (see: * sas_enable_revalidation) */ if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) set_bit(DISCE_REVALIDATE_DOMAIN, &dev->port->disc.pending); SAS_DPRINTK("%sex %016llx phy%02d:%c:%X attached: %016llx (%s)\n", test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state) ? "ata: " : "", SAS_ADDR(dev->sas_addr), phy->phy_id, sas_route_char(dev, phy), phy->linkrate, SAS_ADDR(phy->attached_sas_addr), type); } /* check if we have an existing attached ata device on this expander phy */ struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id) { struct ex_phy *ex_phy = &ex_dev->ex_dev.ex_phy[phy_id]; struct domain_device *dev; struct sas_rphy *rphy; if (!ex_phy->port) return NULL; rphy = ex_phy->port->rphy; if (!rphy) return NULL; dev = sas_find_dev_by_rphy(rphy); if (dev && dev_is_sata(dev)) return dev; return NULL; } #define DISCOVER_REQ_SIZE 16 #define DISCOVER_RESP_SIZE 56 static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req, u8 *disc_resp, int single) { struct discover_resp *dr; int res; disc_req[9] = single; res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE, disc_resp, DISCOVER_RESP_SIZE); if (res) return res; dr = &((struct smp_resp *)disc_resp)->disc; if (memcmp(dev->sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE) == 0) { sas_printk("Found loopback topology, just ignore it!\n"); return 0; } sas_set_ex_phy(dev, single, disc_resp); return 0; } int sas_ex_phy_discover(struct domain_device *dev, int single) { struct expander_device *ex = &dev->ex_dev; int res = 0; u8 *disc_req; u8 *disc_resp; disc_req = alloc_smp_req(DISCOVER_REQ_SIZE); if (!disc_req) return -ENOMEM; disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE); if (!disc_resp) { kfree(disc_req); return -ENOMEM; } disc_req[1] = SMP_DISCOVER; if (0 <= single && single < ex->num_phys) { res = sas_ex_phy_discover_helper(dev, disc_req, disc_resp, single); } else { int i; for (i = 0; i < ex->num_phys; i++) { res = sas_ex_phy_discover_helper(dev, disc_req, disc_resp, i); if (res) goto out_err; } } out_err: kfree(disc_resp); kfree(disc_req); return res; } static int sas_expander_discover(struct domain_device *dev) { struct expander_device *ex = &dev->ex_dev; int res = -ENOMEM; ex->ex_phy = kcalloc(ex->num_phys, sizeof(*ex->ex_phy), GFP_KERNEL); if (!ex->ex_phy) return -ENOMEM; res = sas_ex_phy_discover(dev, -1); if (res) goto out_err; return 0; out_err: kfree(ex->ex_phy); ex->ex_phy = NULL; return res; } #define MAX_EXPANDER_PHYS 128 static void ex_assign_report_general(struct domain_device *dev, struct smp_resp *resp) { struct report_general_resp *rg = &resp->rg; dev->ex_dev.ex_change_count = be16_to_cpu(rg->change_count); dev->ex_dev.max_route_indexes = be16_to_cpu(rg->route_indexes); dev->ex_dev.num_phys = min(rg->num_phys, (u8)MAX_EXPANDER_PHYS); dev->ex_dev.t2t_supp = rg->t2t_supp; dev->ex_dev.conf_route_table = rg->conf_route_table; dev->ex_dev.configuring = rg->configuring; memcpy(dev->ex_dev.enclosure_logical_id, rg->enclosure_logical_id, 8); } #define RG_REQ_SIZE 8 #define RG_RESP_SIZE 32 static int sas_ex_general(struct domain_device *dev) { u8 *rg_req; struct smp_resp *rg_resp; int res; int i; rg_req = alloc_smp_req(RG_REQ_SIZE); if (!rg_req) return -ENOMEM; rg_resp = alloc_smp_resp(RG_RESP_SIZE); if (!rg_resp) { kfree(rg_req); return -ENOMEM; } rg_req[1] = SMP_REPORT_GENERAL; for (i = 0; i < 5; i++) { res = smp_execute_task(dev, rg_req, RG_REQ_SIZE, rg_resp, RG_RESP_SIZE); if (res) { SAS_DPRINTK("RG to ex %016llx failed:0x%x\n", SAS_ADDR(dev->sas_addr), res); goto out; } else if (rg_resp->result != SMP_RESP_FUNC_ACC) { SAS_DPRINTK("RG:ex %016llx returned SMP result:0x%x\n", SAS_ADDR(dev->sas_addr), rg_resp->result); res = rg_resp->result; goto out; } ex_assign_report_general(dev, rg_resp); if (dev->ex_dev.configuring) { SAS_DPRINTK("RG: ex %llx self-configuring...\n", SAS_ADDR(dev->sas_addr)); schedule_timeout_interruptible(5*HZ); } else break; } out: kfree(rg_req); kfree(rg_resp); return res; } static void ex_assign_manuf_info(struct domain_device *dev, void *_mi_resp) { u8 *mi_resp = _mi_resp; struct sas_rphy *rphy = dev->rphy; struct sas_expander_device *edev = rphy_to_expander_device(rphy); memcpy(edev->vendor_id, mi_resp + 12, SAS_EXPANDER_VENDOR_ID_LEN); memcpy(edev->product_id, mi_resp + 20, SAS_EXPANDER_PRODUCT_ID_LEN); memcpy(edev->product_rev, mi_resp + 36, SAS_EXPANDER_PRODUCT_REV_LEN); if (mi_resp[8] & 1) { memcpy(edev->component_vendor_id, mi_resp + 40, SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN); edev->component_id = mi_resp[48] << 8 | mi_resp[49]; edev->component_revision_id = mi_resp[50]; } } #define MI_REQ_SIZE 8 #define MI_RESP_SIZE 64 static int sas_ex_manuf_info(struct domain_device *dev) { u8 *mi_req; u8 *mi_resp; int res; mi_req = alloc_smp_req(MI_REQ_SIZE); if (!mi_req) return -ENOMEM; mi_resp = alloc_smp_resp(MI_RESP_SIZE); if (!mi_resp) { kfree(mi_req); return -ENOMEM; } mi_req[1] = SMP_REPORT_MANUF_INFO; res = smp_execute_task(dev, mi_req, MI_REQ_SIZE, mi_resp,MI_RESP_SIZE); if (res) { SAS_DPRINTK("MI: ex %016llx failed:0x%x\n", SAS_ADDR(dev->sas_addr), res); goto out; } else if (mi_resp[2] != SMP_RESP_FUNC_ACC) { SAS_DPRINTK("MI ex %016llx returned SMP result:0x%x\n", SAS_ADDR(dev->sas_addr), mi_resp[2]); goto out; } ex_assign_manuf_info(dev, mi_resp); out: kfree(mi_req); kfree(mi_resp); return res; } #define PC_REQ_SIZE 44 #define PC_RESP_SIZE 8 int sas_smp_phy_control(struct domain_device *dev, int phy_id, enum phy_func phy_func, struct sas_phy_linkrates *rates) { u8 *pc_req; u8 *pc_resp; int res; pc_req = alloc_smp_req(PC_REQ_SIZE); if (!pc_req) return -ENOMEM; pc_resp = alloc_smp_resp(PC_RESP_SIZE); if (!pc_resp) { kfree(pc_req); return -ENOMEM; } pc_req[1] = SMP_PHY_CONTROL; pc_req[9] = phy_id; pc_req[10]= phy_func; if (rates) { pc_req[32] = rates->minimum_linkrate << 4; pc_req[33] = rates->maximum_linkrate << 4; } res = smp_execute_task(dev, pc_req, PC_REQ_SIZE, pc_resp,PC_RESP_SIZE); kfree(pc_resp); kfree(pc_req); return res; } static void sas_ex_disable_phy(struct domain_device *dev, int phy_id) { struct expander_device *ex = &dev->ex_dev; struct ex_phy *phy = &ex->ex_phy[phy_id]; sas_smp_phy_control(dev, phy_id, PHY_FUNC_DISABLE, NULL); phy->linkrate = SAS_PHY_DISABLED; } static void sas_ex_disable_port(struct domain_device *dev, u8 *sas_addr) { struct expander_device *ex = &dev->ex_dev; int i; for (i = 0; i < ex->num_phys; i++) { struct ex_phy *phy = &ex->ex_phy[i]; if (phy->phy_state == PHY_VACANT || phy->phy_state == PHY_NOT_PRESENT) continue; if (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(sas_addr)) sas_ex_disable_phy(dev, i); } } static int sas_dev_present_in_domain(struct asd_sas_port *port, u8 *sas_addr) { struct domain_device *dev; if (SAS_ADDR(port->sas_addr) == SAS_ADDR(sas_addr)) return 1; list_for_each_entry(dev, &port->dev_list, dev_list_node) { if (SAS_ADDR(dev->sas_addr) == SAS_ADDR(sas_addr)) return 1; } return 0; } #define RPEL_REQ_SIZE 16 #define RPEL_RESP_SIZE 32 int sas_smp_get_phy_events(struct sas_phy *phy) { int res; u8 *req; u8 *resp; struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent); struct domain_device *dev = sas_find_dev_by_rphy(rphy); req = alloc_smp_req(RPEL_REQ_SIZE); if (!req) return -ENOMEM; resp = alloc_smp_resp(RPEL_RESP_SIZE); if (!resp) { kfree(req); return -ENOMEM; } req[1] = SMP_REPORT_PHY_ERR_LOG; req[9] = phy->number; res = smp_execute_task(dev, req, RPEL_REQ_SIZE, resp, RPEL_RESP_SIZE); if (res) goto out; phy->invalid_dword_count = scsi_to_u32(&resp[12]); phy->running_disparity_error_count = scsi_to_u32(&resp[16]); phy->loss_of_dword_sync_count = scsi_to_u32(&resp[20]); phy->phy_reset_problem_count = scsi_to_u32(&resp[24]); out: kfree(req); kfree(resp); return res; } #ifdef CONFIG_SCSI_SAS_ATA #define RPS_REQ_SIZE 16 #define RPS_RESP_SIZE 60 int sas_get_report_phy_sata(struct domain_device *dev, int phy_id, struct smp_resp *rps_resp) { int res; u8 *rps_req = alloc_smp_req(RPS_REQ_SIZE); u8 *resp = (u8 *)rps_resp; if (!rps_req) return -ENOMEM; rps_req[1] = SMP_REPORT_PHY_SATA; rps_req[9] = phy_id; res = smp_execute_task(dev, rps_req, RPS_REQ_SIZE, rps_resp, RPS_RESP_SIZE); /* 0x34 is the FIS type for the D2H fis. There's a potential * standards cockup here. sas-2 explicitly specifies the FIS * should be encoded so that FIS type is in resp[24]. * However, some expanders endian reverse this. Undo the * reversal here */ if (!res && resp[27] == 0x34 && resp[24] != 0x34) { int i; for (i = 0; i < 5; i++) { int j = 24 + (i*4); u8 a, b; a = resp[j + 0]; b = resp[j + 1]; resp[j + 0] = resp[j + 3]; resp[j + 1] = resp[j + 2]; resp[j + 2] = b; resp[j + 3] = a; } } kfree(rps_req); return res; } #endif static void sas_ex_get_linkrate(struct domain_device *parent, struct domain_device *child, struct ex_phy *parent_phy) { struct expander_device *parent_ex = &parent->ex_dev; struct sas_port *port; int i; child->pathways = 0; port = parent_phy->port; for (i = 0; i < parent_ex->num_phys; i++) { struct ex_phy *phy = &parent_ex->ex_phy[i]; if (phy->phy_state == PHY_VACANT || phy->phy_state == PHY_NOT_PRESENT) continue; if (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(child->sas_addr)) { child->min_linkrate = min(parent->min_linkrate, phy->linkrate); child->max_linkrate = max(parent->max_linkrate, phy->linkrate); child->pathways++; sas_port_add_phy(port, phy->phy); } } child->linkrate = min(parent_phy->linkrate, child->max_linkrate); child->pathways = min(child->pathways, parent->pathways); } static struct domain_device *sas_ex_discover_end_dev( struct domain_device *parent, int phy_id) { struct expander_device *parent_ex = &parent->ex_dev; struct ex_phy *phy = &parent_ex->ex_phy[phy_id]; struct domain_device *child = NULL; struct sas_rphy *rphy; int res; if (phy->attached_sata_host || phy->attached_sata_ps) return NULL; child = sas_alloc_device(); if (!child) return NULL; kref_get(&parent->kref); child->parent = parent; child->port = parent->port; child->iproto = phy->attached_iproto; memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); sas_hash_addr(child->hashed_sas_addr, child->sas_addr); if (!phy->port) { phy->port = sas_port_alloc(&parent->rphy->dev, phy_id); if (unlikely(!phy->port)) goto out_err; if (unlikely(sas_port_add(phy->port) != 0)) { sas_port_free(phy->port); goto out_err; } } sas_ex_get_linkrate(parent, child, phy); sas_device_set_phy(child, phy->port); #ifdef CONFIG_SCSI_SAS_ATA if ((phy->attached_tproto & SAS_PROTOCOL_STP) || phy->attached_sata_dev) { res = sas_get_ata_info(child, phy); if (res) goto out_free; sas_init_dev(child); res = sas_ata_init(child); if (res) goto out_free; rphy = sas_end_device_alloc(phy->port); if (!rphy) goto out_free; child->rphy = rphy; get_device(&rphy->dev); list_add_tail(&child->disco_list_node, &parent->port->disco_list); res = sas_discover_sata(child); if (res) { SAS_DPRINTK("sas_discover_sata() for device %16llx at " "%016llx:0x%x returned 0x%x\n", SAS_ADDR(child->sas_addr), SAS_ADDR(parent->sas_addr), phy_id, res); goto out_list_del; } } else #endif if (phy->attached_tproto & SAS_PROTOCOL_SSP) { child->dev_type = SAS_END_DEVICE; rphy = sas_end_device_alloc(phy->port); /* FIXME: error handling */ if (unlikely(!rphy)) goto out_free; child->tproto = phy->attached_tproto; sas_init_dev(child); child->rphy = rphy; get_device(&rphy->dev); sas_fill_in_rphy(child, rphy); list_add_tail(&child->disco_list_node, &parent->port->disco_list); res = sas_discover_end_dev(child); if (res) { SAS_DPRINTK("sas_discover_end_dev() for device %16llx " "at %016llx:0x%x returned 0x%x\n", SAS_ADDR(child->sas_addr), SAS_ADDR(parent->sas_addr), phy_id, res); goto out_list_del; } } else { SAS_DPRINTK("target proto 0x%x at %016llx:0x%x not handled\n", phy->attached_tproto, SAS_ADDR(parent->sas_addr), phy_id); goto out_free; } list_add_tail(&child->siblings, &parent_ex->children); return child; out_list_del: sas_rphy_free(child->rphy); list_del(&child->disco_list_node); spin_lock_irq(&parent->port->dev_list_lock); list_del(&child->dev_list_node); spin_unlock_irq(&parent->port->dev_list_lock); out_free: sas_port_delete(phy->port); out_err: phy->port = NULL; sas_put_device(child); return NULL; } /* See if this phy is part of a wide port */ static bool sas_ex_join_wide_port(struct domain_device *parent, int phy_id) { struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id]; int i; for (i = 0; i < parent->ex_dev.num_phys; i++) { struct ex_phy *ephy = &parent->ex_dev.ex_phy[i]; if (ephy == phy) continue; if (!memcmp(phy->attached_sas_addr, ephy->attached_sas_addr, SAS_ADDR_SIZE) && ephy->port) { sas_port_add_phy(ephy->port, phy->phy); phy->port = ephy->port; phy->phy_state = PHY_DEVICE_DISCOVERED; return true; } } return false; } static struct domain_device *sas_ex_discover_expander( struct domain_device *parent, int phy_id) { struct sas_expander_device *parent_ex = rphy_to_expander_device(parent->rphy); struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id]; struct domain_device *child = NULL; struct sas_rphy *rphy; struct sas_expander_device *edev; struct asd_sas_port *port; int res; if (phy->routing_attr == DIRECT_ROUTING) { SAS_DPRINTK("ex %016llx:0x%x:D <--> ex %016llx:0x%x is not " "allowed\n", SAS_ADDR(parent->sas_addr), phy_id, SAS_ADDR(phy->attached_sas_addr), phy->attached_phy_id); return NULL; } child = sas_alloc_device(); if (!child) return NULL; phy->port = sas_port_alloc(&parent->rphy->dev, phy_id); /* FIXME: better error handling */ BUG_ON(sas_port_add(phy->port) != 0); switch (phy->attached_dev_type) { case SAS_EDGE_EXPANDER_DEVICE: rphy = sas_expander_alloc(phy->port, SAS_EDGE_EXPANDER_DEVICE); break; case SAS_FANOUT_EXPANDER_DEVICE: rphy = sas_expander_alloc(phy->port, SAS_FANOUT_EXPANDER_DEVICE); break; default: rphy = NULL; /* shut gcc up */ BUG(); } port = parent->port; child->rphy = rphy; get_device(&rphy->dev); edev = rphy_to_expander_device(rphy); child->dev_type = phy->attached_dev_type; kref_get(&parent->kref); child->parent = parent; child->port = port; child->iproto = phy->attached_iproto; child->tproto = phy->attached_tproto; memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); sas_hash_addr(child->hashed_sas_addr, child->sas_addr); sas_ex_get_linkrate(parent, child, phy); edev->level = parent_ex->level + 1; parent->port->disc.max_level = max(parent->port->disc.max_level, edev->level); sas_init_dev(child); sas_fill_in_rphy(child, rphy); sas_rphy_add(rphy); spin_lock_irq(&parent->port->dev_list_lock); list_add_tail(&child->dev_list_node, &parent->port->dev_list); spin_unlock_irq(&parent->port->dev_list_lock); res = sas_discover_expander(child); if (res) { sas_rphy_delete(rphy); spin_lock_irq(&parent->port->dev_list_lock); list_del(&child->dev_list_node); spin_unlock_irq(&parent->port->dev_list_lock); sas_put_device(child); return NULL; } list_add_tail(&child->siblings, &parent->ex_dev.children); return child; } static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) { struct expander_device *ex = &dev->ex_dev; struct ex_phy *ex_phy = &ex->ex_phy[phy_id]; struct domain_device *child = NULL; int res = 0; /* Phy state */ if (ex_phy->linkrate == SAS_SATA_SPINUP_HOLD) { if (!sas_smp_phy_control(dev, phy_id, PHY_FUNC_LINK_RESET, NULL)) res = sas_ex_phy_discover(dev, phy_id); if (res) return res; } /* Parent and domain coherency */ if (!dev->parent && (SAS_ADDR(ex_phy->attached_sas_addr) == SAS_ADDR(dev->port->sas_addr))) { sas_add_parent_port(dev, phy_id); return 0; } if (dev->parent && (SAS_ADDR(ex_phy->attached_sas_addr) == SAS_ADDR(dev->parent->sas_addr))) { sas_add_parent_port(dev, phy_id); if (ex_phy->routing_attr == TABLE_ROUTING) sas_configure_phy(dev, phy_id, dev->port->sas_addr, 1); return 0; } if (sas_dev_present_in_domain(dev->port, ex_phy->attached_sas_addr)) sas_ex_disable_port(dev, ex_phy->attached_sas_addr); if (ex_phy->attached_dev_type == SAS_PHY_UNUSED) { if (ex_phy->routing_attr == DIRECT_ROUTING) { memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE); sas_configure_routing(dev, ex_phy->attached_sas_addr); } return 0; } else if (ex_phy->linkrate == SAS_LINK_RATE_UNKNOWN) return 0; if (ex_phy->attached_dev_type != SAS_END_DEVICE && ex_phy->attached_dev_type != SAS_FANOUT_EXPANDER_DEVICE && ex_phy->attached_dev_type != SAS_EDGE_EXPANDER_DEVICE && ex_phy->attached_dev_type != SAS_SATA_PENDING) { SAS_DPRINTK("unknown device type(0x%x) attached to ex %016llx " "phy 0x%x\n", ex_phy->attached_dev_type, SAS_ADDR(dev->sas_addr), phy_id); return 0; } res = sas_configure_routing(dev, ex_phy->attached_sas_addr); if (res) { SAS_DPRINTK("configure routing for dev %016llx " "reported 0x%x. Forgotten\n", SAS_ADDR(ex_phy->attached_sas_addr), res); sas_disable_routing(dev, ex_phy->attached_sas_addr); return res; } if (sas_ex_join_wide_port(dev, phy_id)) { SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n", phy_id, SAS_ADDR(ex_phy->attached_sas_addr)); return res; } switch (ex_phy->attached_dev_type) { case SAS_END_DEVICE: case SAS_SATA_PENDING: child = sas_ex_discover_end_dev(dev, phy_id); break; case SAS_FANOUT_EXPANDER_DEVICE: if (SAS_ADDR(dev->port->disc.fanout_sas_addr)) { SAS_DPRINTK("second fanout expander %016llx phy 0x%x " "attached to ex %016llx phy 0x%x\n", SAS_ADDR(ex_phy->attached_sas_addr), ex_phy->attached_phy_id, SAS_ADDR(dev->sas_addr), phy_id); sas_ex_disable_phy(dev, phy_id); break; } else memcpy(dev->port->disc.fanout_sas_addr, ex_phy->attached_sas_addr, SAS_ADDR_SIZE); /* fallthrough */ case SAS_EDGE_EXPANDER_DEVICE: child = sas_ex_discover_expander(dev, phy_id); break; default: break; } if (child) { int i; for (i = 0; i < ex->num_phys; i++) { if (ex->ex_phy[i].phy_state == PHY_VACANT || ex->ex_phy[i].phy_state == PHY_NOT_PRESENT) continue; /* * Due to races, the phy might not get added to the * wide port, so we add the phy to the wide port here. */ if (SAS_ADDR(ex->ex_phy[i].attached_sas_addr) == SAS_ADDR(child->sas_addr)) { ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED; if (sas_ex_join_wide_port(dev, i)) SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n", i, SAS_ADDR(ex->ex_phy[i].attached_sas_addr)); } } } return res; } static int sas_find_sub_addr(struct domain_device *dev, u8 *sub_addr) { struct expander_device *ex = &dev->ex_dev; int i; for (i = 0; i < ex->num_phys; i++) { struct ex_phy *phy = &ex->ex_phy[i]; if (phy->phy_state == PHY_VACANT || phy->phy_state == PHY_NOT_PRESENT) continue; if ((phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE || phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE) && phy->routing_attr == SUBTRACTIVE_ROUTING) { memcpy(sub_addr, phy->attached_sas_addr,SAS_ADDR_SIZE); return 1; } } return 0; } static int sas_check_level_subtractive_boundary(struct domain_device *dev) { struct expander_device *ex = &dev->ex_dev; struct domain_device *child; u8 sub_addr[8] = {0, }; list_for_each_entry(child, &ex->children, siblings) { if (child->dev_type != SAS_EDGE_EXPANDER_DEVICE && child->dev_type != SAS_FANOUT_EXPANDER_DEVICE) continue; if (sub_addr[0] == 0) { sas_find_sub_addr(child, sub_addr); continue; } else { u8 s2[8]; if (sas_find_sub_addr(child, s2) && (SAS_ADDR(sub_addr) != SAS_ADDR(s2))) { SAS_DPRINTK("ex %016llx->%016llx-?->%016llx " "diverges from subtractive " "boundary %016llx\n", SAS_ADDR(dev->sas_addr), SAS_ADDR(child->sas_addr), SAS_ADDR(s2), SAS_ADDR(sub_addr)); sas_ex_disable_port(child, s2); } } } return 0; } /** * sas_ex_discover_devices - discover devices attached to this expander * @dev: pointer to the expander domain device * @single: if you want to do a single phy, else set to -1; * * Configure this expander for use with its devices and register the * devices of this expander. */ static int sas_ex_discover_devices(struct domain_device *dev, int single) { struct expander_device *ex = &dev->ex_dev; int i = 0, end = ex->num_phys; int res = 0; if (0 <= single && single < end) { i = single; end = i+1; } for ( ; i < end; i++) { struct ex_phy *ex_phy = &ex->ex_phy[i]; if (ex_phy->phy_state == PHY_VACANT || ex_phy->phy_state == PHY_NOT_PRESENT || ex_phy->phy_state == PHY_DEVICE_DISCOVERED) continue; switch (ex_phy->linkrate) { case SAS_PHY_DISABLED: case SAS_PHY_RESET_PROBLEM: case SAS_SATA_PORT_SELECTOR: continue; default: res = sas_ex_discover_dev(dev, i); if (res) break; continue; } } if (!res) sas_check_level_subtractive_boundary(dev); return res; } static int sas_check_ex_subtractive_boundary(struct domain_device *dev) { struct expander_device *ex = &dev->ex_dev; int i; u8 *sub_sas_addr = NULL; if (dev->dev_type != SAS_EDGE_EXPANDER_DEVICE) return 0; for (i = 0; i < ex->num_phys; i++) { struct ex_phy *phy = &ex->ex_phy[i]; if (phy->phy_state == PHY_VACANT || phy->phy_state == PHY_NOT_PRESENT) continue; if ((phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE || phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE) && phy->routing_attr == SUBTRACTIVE_ROUTING) { if (!sub_sas_addr) sub_sas_addr = &phy->attached_sas_addr[0]; else if (SAS_ADDR(sub_sas_addr) != SAS_ADDR(phy->attached_sas_addr)) { SAS_DPRINTK("ex %016llx phy 0x%x " "diverges(%016llx) on subtractive " "boundary(%016llx). Disabled\n", SAS_ADDR(dev->sas_addr), i, SAS_ADDR(phy->attached_sas_addr), SAS_ADDR(sub_sas_addr)); sas_ex_disable_phy(dev, i); } } } return 0; } static void sas_print_parent_topology_bug(struct domain_device *child, struct ex_phy *parent_phy, struct ex_phy *child_phy) { static const char *ex_type[] = { [SAS_EDGE_EXPANDER_DEVICE] = "edge", [SAS_FANOUT_EXPANDER_DEVICE] = "fanout", }; struct domain_device *parent = child->parent; sas_printk("%s ex %016llx phy 0x%x <--> %s ex %016llx " "phy 0x%x has %c:%c routing link!\n", ex_type[parent->dev_type], SAS_ADDR(parent->sas_addr), parent_phy->phy_id, ex_type[child->dev_type], SAS_ADDR(child->sas_addr), child_phy->phy_id, sas_route_char(parent, parent_phy), sas_route_char(child, child_phy)); } static int sas_check_eeds(struct domain_device *child, struct ex_phy *parent_phy, struct ex_phy *child_phy) { int res = 0; struct domain_device *parent = child->parent; if (SAS_ADDR(parent->port->disc.fanout_sas_addr) != 0) { res = -ENODEV; SAS_DPRINTK("edge ex %016llx phy S:0x%x <--> edge ex %016llx " "phy S:0x%x, while there is a fanout ex %016llx\n", SAS_ADDR(parent->sas_addr), parent_phy->phy_id, SAS_ADDR(child->sas_addr), child_phy->phy_id, SAS_ADDR(parent->port->disc.fanout_sas_addr)); } else if (SAS_ADDR(parent->port->disc.eeds_a) == 0) { memcpy(parent->port->disc.eeds_a, parent->sas_addr, SAS_ADDR_SIZE); memcpy(parent->port->disc.eeds_b, child->sas_addr, SAS_ADDR_SIZE); } else if (((SAS_ADDR(parent->port->disc.eeds_a) == SAS_ADDR(parent->sas_addr)) || (SAS_ADDR(parent->port->disc.eeds_a) == SAS_ADDR(child->sas_addr))) && ((SAS_ADDR(parent->port->disc.eeds_b) == SAS_ADDR(parent->sas_addr)) || (SAS_ADDR(parent->port->disc.eeds_b) == SAS_ADDR(child->sas_addr)))) ; else { res = -ENODEV; SAS_DPRINTK("edge ex %016llx phy 0x%x <--> edge ex %016llx " "phy 0x%x link forms a third EEDS!\n", SAS_ADDR(parent->sas_addr), parent_phy->phy_id, SAS_ADDR(child->sas_addr), child_phy->phy_id); } return res; } /* Here we spill over 80 columns. It is intentional. */ static int sas_check_parent_topology(struct domain_device *child) { struct expander_device *child_ex = &child->ex_dev; struct expander_device *parent_ex; int i; int res = 0; if (!child->parent) return 0; if (child->parent->dev_type != SAS_EDGE_EXPANDER_DEVICE && child->parent->dev_type != SAS_FANOUT_EXPANDER_DEVICE) return 0; parent_ex = &child->parent->ex_dev; for (i = 0; i < parent_ex->num_phys; i++) { struct ex_phy *parent_phy = &parent_ex->ex_phy[i]; struct ex_phy *child_phy; if (parent_phy->phy_state == PHY_VACANT || parent_phy->phy_state == PHY_NOT_PRESENT) continue; if (SAS_ADDR(parent_phy->attached_sas_addr) != SAS_ADDR(child->sas_addr)) continue; child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id]; switch (child->parent->dev_type) { case SAS_EDGE_EXPANDER_DEVICE: if (child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING || child_phy->routing_attr != TABLE_ROUTING) { sas_print_parent_topology_bug(child, parent_phy, child_phy); res = -ENODEV; } } else if (parent_phy->routing_attr == SUBTRACTIVE_ROUTING) { if (child_phy->routing_attr == SUBTRACTIVE_ROUTING) { res = sas_check_eeds(child, parent_phy, child_phy); } else if (child_phy->routing_attr != TABLE_ROUTING) { sas_print_parent_topology_bug(child, parent_phy, child_phy); res = -ENODEV; } } else if (parent_phy->routing_attr == TABLE_ROUTING) { if (child_phy->routing_attr == SUBTRACTIVE_ROUTING || (child_phy->routing_attr == TABLE_ROUTING && child_ex->t2t_supp && parent_ex->t2t_supp)) { /* All good */; } else { sas_print_parent_topology_bug(child, parent_phy, child_phy); res = -ENODEV; } } break; case SAS_FANOUT_EXPANDER_DEVICE: if (parent_phy->routing_attr != TABLE_ROUTING || child_phy->routing_attr != SUBTRACTIVE_ROUTING) { sas_print_parent_topology_bug(child, parent_phy, child_phy); res = -ENODEV; } break; default: break; } } return res; } #define RRI_REQ_SIZE 16 #define RRI_RESP_SIZE 44 static int sas_configure_present(struct domain_device *dev, int phy_id, u8 *sas_addr, int *index, int *present) { int i, res = 0; struct expander_device *ex = &dev->ex_dev; struct ex_phy *phy = &ex->ex_phy[phy_id]; u8 *rri_req; u8 *rri_resp; *present = 0; *index = 0; rri_req = alloc_smp_req(RRI_REQ_SIZE); if (!rri_req) return -ENOMEM; rri_resp = alloc_smp_resp(RRI_RESP_SIZE); if (!rri_resp) { kfree(rri_req); return -ENOMEM; } rri_req[1] = SMP_REPORT_ROUTE_INFO; rri_req[9] = phy_id; for (i = 0; i < ex->max_route_indexes ; i++) { *(__be16 *)(rri_req+6) = cpu_to_be16(i); res = smp_execute_task(dev, rri_req, RRI_REQ_SIZE, rri_resp, RRI_RESP_SIZE); if (res) goto out; res = rri_resp[2]; if (res == SMP_RESP_NO_INDEX) { SAS_DPRINTK("overflow of indexes: dev %016llx " "phy 0x%x index 0x%x\n", SAS_ADDR(dev->sas_addr), phy_id, i); goto out; } else if (res != SMP_RESP_FUNC_ACC) { SAS_DPRINTK("%s: dev %016llx phy 0x%x index 0x%x " "result 0x%x\n", __func__, SAS_ADDR(dev->sas_addr), phy_id, i, res); goto out; } if (SAS_ADDR(sas_addr) != 0) { if (SAS_ADDR(rri_resp+16) == SAS_ADDR(sas_addr)) { *index = i; if ((rri_resp[12] & 0x80) == 0x80) *present = 0; else *present = 1; goto out; } else if (SAS_ADDR(rri_resp+16) == 0) { *index = i; *present = 0; goto out; } } else if (SAS_ADDR(rri_resp+16) == 0 && phy->last_da_index < i) { phy->last_da_index = i; *index = i; *present = 0; goto out; } } res = -1; out: kfree(rri_req); kfree(rri_resp); return res; } #define CRI_REQ_SIZE 44 #define CRI_RESP_SIZE 8 static int sas_configure_set(struct domain_device *dev, int phy_id, u8 *sas_addr, int index, int include) { int res; u8 *cri_req; u8 *cri_resp; cri_req = alloc_smp_req(CRI_REQ_SIZE); if (!cri_req) return -ENOMEM; cri_resp = alloc_smp_resp(CRI_RESP_SIZE); if (!cri_resp) { kfree(cri_req); return -ENOMEM; } cri_req[1] = SMP_CONF_ROUTE_INFO; *(__be16 *)(cri_req+6) = cpu_to_be16(index); cri_req[9] = phy_id; if (SAS_ADDR(sas_addr) == 0 || !include) cri_req[12] |= 0x80; memcpy(cri_req+16, sas_addr, SAS_ADDR_SIZE); res = smp_execute_task(dev, cri_req, CRI_REQ_SIZE, cri_resp, CRI_RESP_SIZE); if (res) goto out; res = cri_resp[2]; if (res == SMP_RESP_NO_INDEX) { SAS_DPRINTK("overflow of indexes: dev %016llx phy 0x%x " "index 0x%x\n", SAS_ADDR(dev->sas_addr), phy_id, index); } out: kfree(cri_req); kfree(cri_resp); return res; } static int sas_configure_phy(struct domain_device *dev, int phy_id, u8 *sas_addr, int include) { int index; int present; int res; res = sas_configure_present(dev, phy_id, sas_addr, &index, &present); if (res) return res; if (include ^ present) return sas_configure_set(dev, phy_id, sas_addr, index,include); return res; } /** * sas_configure_parent - configure routing table of parent * @parent: parent expander * @child: child expander * @sas_addr: SAS port identifier of device directly attached to child * @include: whether or not to include @child in the expander routing table */ static int sas_configure_parent(struct domain_device *parent, struct domain_device *child, u8 *sas_addr, int include) { struct expander_device *ex_parent = &parent->ex_dev; int res = 0; int i; if (parent->parent) { res = sas_configure_parent(parent->parent, parent, sas_addr, include); if (res) return res; } if (ex_parent->conf_route_table == 0) { SAS_DPRINTK("ex %016llx has self-configuring routing table\n", SAS_ADDR(parent->sas_addr)); return 0; } for (i = 0; i < ex_parent->num_phys; i++) { struct ex_phy *phy = &ex_parent->ex_phy[i]; if ((phy->routing_attr == TABLE_ROUTING) && (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(child->sas_addr))) { res = sas_configure_phy(parent, i, sas_addr, include); if (res) return res; } } return res; } /** * sas_configure_routing - configure routing * @dev: expander device * @sas_addr: port identifier of device directly attached to the expander device */ static int sas_configure_routing(struct domain_device *dev, u8 *sas_addr) { if (dev->parent) return sas_configure_parent(dev->parent, dev, sas_addr, 1); return 0; } static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr) { if (dev->parent) return sas_configure_parent(dev->parent, dev, sas_addr, 0); return 0; } /** * sas_discover_expander - expander discovery * @dev: pointer to expander domain device * * See comment in sas_discover_sata(). */ static int sas_discover_expander(struct domain_device *dev) { int res; res = sas_notify_lldd_dev_found(dev); if (res) return res; res = sas_ex_general(dev); if (res) goto out_err; res = sas_ex_manuf_info(dev); if (res) goto out_err; res = sas_expander_discover(dev); if (res) { SAS_DPRINTK("expander %016llx discovery failed(0x%x)\n", SAS_ADDR(dev->sas_addr), res); goto out_err; } sas_check_ex_subtractive_boundary(dev); res = sas_check_parent_topology(dev); if (res) goto out_err; return 0; out_err: sas_notify_lldd_dev_gone(dev); return res; } static int sas_ex_level_discovery(struct asd_sas_port *port, const int level) { int res = 0; struct domain_device *dev; list_for_each_entry(dev, &port->dev_list, dev_list_node) { if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy); if (level == ex->level) res = sas_ex_discover_devices(dev, -1); else if (level > 0) res = sas_ex_discover_devices(port->port_dev, -1); } } return res; } static int sas_ex_bfs_disc(struct asd_sas_port *port) { int res; int level; do { level = port->disc.max_level; res = sas_ex_level_discovery(port, level); mb(); } while (level < port->disc.max_level); return res; } int sas_discover_root_expander(struct domain_device *dev) { int res; struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy); res = sas_rphy_add(dev->rphy); if (res) goto out_err; ex->level = dev->port->disc.max_level; /* 0 */ res = sas_discover_expander(dev); if (res) goto out_err2; sas_ex_bfs_disc(dev->port); return res; out_err2: sas_rphy_remove(dev->rphy); out_err: return res; } /* ---------- Domain revalidation ---------- */ static int sas_get_phy_discover(struct domain_device *dev, int phy_id, struct smp_resp *disc_resp) { int res; u8 *disc_req; disc_req = alloc_smp_req(DISCOVER_REQ_SIZE); if (!disc_req) return -ENOMEM; disc_req[1] = SMP_DISCOVER; disc_req[9] = phy_id; res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE, disc_resp, DISCOVER_RESP_SIZE); if (res) goto out; else if (disc_resp->result != SMP_RESP_FUNC_ACC) { res = disc_resp->result; goto out; } out: kfree(disc_req); return res; } static int sas_get_phy_change_count(struct domain_device *dev, int phy_id, int *pcc) { int res; struct smp_resp *disc_resp; disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE); if (!disc_resp) return -ENOMEM; res = sas_get_phy_discover(dev, phy_id, disc_resp); if (!res) *pcc = disc_resp->disc.change_count; kfree(disc_resp); return res; } static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id, u8 *sas_addr, enum sas_device_type *type) { int res; struct smp_resp *disc_resp; struct discover_resp *dr; disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE); if (!disc_resp) return -ENOMEM; dr = &disc_resp->disc; res = sas_get_phy_discover(dev, phy_id, disc_resp); if (res == 0) { memcpy(sas_addr, disc_resp->disc.attached_sas_addr, 8); *type = to_dev_type(dr); if (*type == 0) memset(sas_addr, 0, 8); } kfree(disc_resp); return res; } static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id, int from_phy, bool update) { struct expander_device *ex = &dev->ex_dev; int res = 0; int i; for (i = from_phy; i < ex->num_phys; i++) { int phy_change_count = 0; res = sas_get_phy_change_count(dev, i, &phy_change_count); switch (res) { case SMP_RESP_PHY_VACANT: case SMP_RESP_NO_PHY: continue; case SMP_RESP_FUNC_ACC: break; default: return res; } if (phy_change_count != ex->ex_phy[i].phy_change_count) { if (update) ex->ex_phy[i].phy_change_count = phy_change_count; *phy_id = i; return 0; } } return 0; } static int sas_get_ex_change_count(struct domain_device *dev, int *ecc) { int res; u8 *rg_req; struct smp_resp *rg_resp; rg_req = alloc_smp_req(RG_REQ_SIZE); if (!rg_req) return -ENOMEM; rg_resp = alloc_smp_resp(RG_RESP_SIZE); if (!rg_resp) { kfree(rg_req); return -ENOMEM; } rg_req[1] = SMP_REPORT_GENERAL; res = smp_execute_task(dev, rg_req, RG_REQ_SIZE, rg_resp, RG_RESP_SIZE); if (res) goto out; if (rg_resp->result != SMP_RESP_FUNC_ACC) { res = rg_resp->result; goto out; } *ecc = be16_to_cpu(rg_resp->rg.change_count); out: kfree(rg_resp); kfree(rg_req); return res; } /** * sas_find_bcast_dev - find the device issue BROADCAST(CHANGE). * @dev:domain device to be detect. * @src_dev: the device which originated BROADCAST(CHANGE). * * Add self-configuration expander support. Suppose two expander cascading, * when the first level expander is self-configuring, hotplug the disks in * second level expander, BROADCAST(CHANGE) will not only be originated * in the second level expander, but also be originated in the first level * expander (see SAS protocol SAS 2r-14, 7.11 for detail), it is to say, * expander changed count in two level expanders will all increment at least * once, but the phy which chang count has changed is the source device which * we concerned. */ static int sas_find_bcast_dev(struct domain_device *dev, struct domain_device **src_dev) { struct expander_device *ex = &dev->ex_dev; int ex_change_count = -1; int phy_id = -1; int res; struct domain_device *ch; res = sas_get_ex_change_count(dev, &ex_change_count); if (res) goto out; if (ex_change_count != -1 && ex_change_count != ex->ex_change_count) { /* Just detect if this expander phys phy change count changed, * in order to determine if this expander originate BROADCAST, * and do not update phy change count field in our structure. */ res = sas_find_bcast_phy(dev, &phy_id, 0, false); if (phy_id != -1) { *src_dev = dev; ex->ex_change_count = ex_change_count; SAS_DPRINTK("Expander phy change count has changed\n"); return res; } else SAS_DPRINTK("Expander phys DID NOT change\n"); } list_for_each_entry(ch, &ex->children, siblings) { if (ch->dev_type == SAS_EDGE_EXPANDER_DEVICE || ch->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { res = sas_find_bcast_dev(ch, src_dev); if (*src_dev) return res; } } out: return res; } static void sas_unregister_ex_tree(struct asd_sas_port *port, struct domain_device *dev) { struct expander_device *ex = &dev->ex_dev; struct domain_device *child, *n; list_for_each_entry_safe(child, n, &ex->children, siblings) { set_bit(SAS_DEV_GONE, &child->state); if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE || child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) sas_unregister_ex_tree(port, child); else sas_unregister_dev(port, child); } sas_unregister_dev(port, dev); } static void sas_unregister_devs_sas_addr(struct domain_device *parent, int phy_id, bool last) { struct expander_device *ex_dev = &parent->ex_dev; struct ex_phy *phy = &ex_dev->ex_phy[phy_id]; struct domain_device *child, *n, *found = NULL; if (last) { list_for_each_entry_safe(child, n, &ex_dev->children, siblings) { if (SAS_ADDR(child->sas_addr) == SAS_ADDR(phy->attached_sas_addr)) { set_bit(SAS_DEV_GONE, &child->state); if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE || child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) sas_unregister_ex_tree(parent->port, child); else sas_unregister_dev(parent->port, child); found = child; break; } } sas_disable_routing(parent, phy->attached_sas_addr); } memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); if (phy->port) { sas_port_delete_phy(phy->port, phy->phy); sas_device_set_phy(found, phy->port); if (phy->port->num_phys == 0) list_add_tail(&phy->port->del_list, &parent->port->sas_port_del_list); phy->port = NULL; } } static int sas_discover_bfs_by_root_level(struct domain_device *root, const int level) { struct expander_device *ex_root = &root->ex_dev; struct domain_device *child; int res = 0; list_for_each_entry(child, &ex_root->children, siblings) { if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE || child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { struct sas_expander_device *ex = rphy_to_expander_device(child->rphy); if (level > ex->level) res = sas_discover_bfs_by_root_level(child, level); else if (level == ex->level) res = sas_ex_discover_devices(child, -1); } } return res; } static int sas_discover_bfs_by_root(struct domain_device *dev) { int res; struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy); int level = ex->level+1; res = sas_ex_discover_devices(dev, -1); if (res) goto out; do { res = sas_discover_bfs_by_root_level(dev, level); mb(); level += 1; } while (level <= dev->port->disc.max_level); out: return res; } static int sas_discover_new(struct domain_device *dev, int phy_id) { struct ex_phy *ex_phy = &dev->ex_dev.ex_phy[phy_id]; struct domain_device *child; int res; SAS_DPRINTK("ex %016llx phy%d new device attached\n", SAS_ADDR(dev->sas_addr), phy_id); res = sas_ex_phy_discover(dev, phy_id); if (res) return res; if (sas_ex_join_wide_port(dev, phy_id)) return 0; res = sas_ex_discover_devices(dev, phy_id); if (res) return res; list_for_each_entry(child, &dev->ex_dev.children, siblings) { if (SAS_ADDR(child->sas_addr) == SAS_ADDR(ex_phy->attached_sas_addr)) { if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE || child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) res = sas_discover_bfs_by_root(child); break; } } return res; } static bool dev_type_flutter(enum sas_device_type new, enum sas_device_type old) { if (old == new) return true; /* treat device directed resets as flutter, if we went * SAS_END_DEVICE to SAS_SATA_PENDING the link needs recovery */ if ((old == SAS_SATA_PENDING && new == SAS_END_DEVICE) || (old == SAS_END_DEVICE && new == SAS_SATA_PENDING)) return true; return false; } static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last) { struct expander_device *ex = &dev->ex_dev; struct ex_phy *phy = &ex->ex_phy[phy_id]; enum sas_device_type type = SAS_PHY_UNUSED; u8 sas_addr[8]; int res; memset(sas_addr, 0, 8); res = sas_get_phy_attached_dev(dev, phy_id, sas_addr, &type); switch (res) { case SMP_RESP_NO_PHY: phy->phy_state = PHY_NOT_PRESENT; sas_unregister_devs_sas_addr(dev, phy_id, last); return res; case SMP_RESP_PHY_VACANT: phy->phy_state = PHY_VACANT; sas_unregister_devs_sas_addr(dev, phy_id, last); return res; case SMP_RESP_FUNC_ACC: break; case -ECOMM: break; default: return res; } if ((SAS_ADDR(sas_addr) == 0) || (res == -ECOMM)) { phy->phy_state = PHY_EMPTY; sas_unregister_devs_sas_addr(dev, phy_id, last); return res; } else if (SAS_ADDR(sas_addr) == SAS_ADDR(phy->attached_sas_addr) && dev_type_flutter(type, phy->attached_dev_type)) { struct domain_device *ata_dev = sas_ex_to_ata(dev, phy_id); char *action = ""; sas_ex_phy_discover(dev, phy_id); if (ata_dev && phy->attached_dev_type == SAS_SATA_PENDING) action = ", needs recovery"; SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter%s\n", SAS_ADDR(dev->sas_addr), phy_id, action); return res; } /* we always have to delete the old device when we went here */ SAS_DPRINTK("ex %016llx phy 0x%x replace %016llx\n", SAS_ADDR(dev->sas_addr), phy_id, SAS_ADDR(phy->attached_sas_addr)); sas_unregister_devs_sas_addr(dev, phy_id, last); return sas_discover_new(dev, phy_id); } /** * sas_rediscover - revalidate the domain. * @dev:domain device to be detect. * @phy_id: the phy id will be detected. * * NOTE: this process _must_ quit (return) as soon as any connection * errors are encountered. Connection recovery is done elsewhere. * Discover process only interrogates devices in order to discover the * domain.For plugging out, we un-register the device only when it is * the last phy in the port, for other phys in this port, we just delete it * from the port.For inserting, we do discovery when it is the * first phy,for other phys in this port, we add it to the port to * forming the wide-port. */ static int sas_rediscover(struct domain_device *dev, const int phy_id) { struct expander_device *ex = &dev->ex_dev; struct ex_phy *changed_phy = &ex->ex_phy[phy_id]; int res = 0; int i; bool last = true; /* is this the last phy of the port */ SAS_DPRINTK("ex %016llx phy%d originated BROADCAST(CHANGE)\n", SAS_ADDR(dev->sas_addr), phy_id); if (SAS_ADDR(changed_phy->attached_sas_addr) != 0) { for (i = 0; i < ex->num_phys; i++) { struct ex_phy *phy = &ex->ex_phy[i]; if (i == phy_id) continue; if (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(changed_phy->attached_sas_addr)) { SAS_DPRINTK("phy%d part of wide port with " "phy%d\n", phy_id, i); last = false; break; } } res = sas_rediscover_dev(dev, phy_id, last); } else res = sas_discover_new(dev, phy_id); return res; } /** * sas_ex_revalidate_domain - revalidate the domain * @port_dev: port domain device. * * NOTE: this process _must_ quit (return) as soon as any connection * errors are encountered. Connection recovery is done elsewhere. * Discover process only interrogates devices in order to discover the * domain. */ int sas_ex_revalidate_domain(struct domain_device *port_dev) { int res; struct domain_device *dev = NULL; res = sas_find_bcast_dev(port_dev, &dev); if (res == 0 && dev) { struct expander_device *ex = &dev->ex_dev; int i = 0, phy_id; do { phy_id = -1; res = sas_find_bcast_phy(dev, &phy_id, i, true); if (phy_id == -1) break; res = sas_rediscover(dev, phy_id); i = phy_id + 1; } while (i < ex->num_phys); } return res; } void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, struct sas_rphy *rphy) { struct domain_device *dev; unsigned int rcvlen = 0; int ret = -EINVAL; /* no rphy means no smp target support (ie aic94xx host) */ if (!rphy) return sas_smp_host_handler(job, shost); switch (rphy->identify.device_type) { case SAS_EDGE_EXPANDER_DEVICE: case SAS_FANOUT_EXPANDER_DEVICE: break; default: printk("%s: can we send a smp request to a device?\n", __func__); goto out; } dev = sas_find_dev_by_rphy(rphy); if (!dev) { printk("%s: fail to find a domain_device?\n", __func__); goto out; } /* do we need to support multiple segments? */ if (job->request_payload.sg_cnt > 1 || job->reply_payload.sg_cnt > 1) { printk("%s: multiple segments req %u, rsp %u\n", __func__, job->request_payload.payload_len, job->reply_payload.payload_len); goto out; } ret = smp_execute_task_sg(dev, job->request_payload.sg_list, job->reply_payload.sg_list); if (ret >= 0) { /* bsg_job_done() requires the length received */ rcvlen = job->reply_payload.payload_len - ret; ret = 0; } out: bsg_job_done(job, ret, rcvlen); }
/* * Serial Attached SCSI (SAS) Expander discovery and configuration * * Copyright (C) 2005 Adaptec, Inc. All rights reserved. * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> * * This file is licensed under GPLv2. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/scatterlist.h> #include <linux/blkdev.h> #include <linux/slab.h> #include "sas_internal.h" #include <scsi/sas_ata.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_sas.h> #include "../scsi_sas_internal.h" static int sas_discover_expander(struct domain_device *dev); static int sas_configure_routing(struct domain_device *dev, u8 *sas_addr); static int sas_configure_phy(struct domain_device *dev, int phy_id, u8 *sas_addr, int include); static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr); /* ---------- SMP task management ---------- */ static void smp_task_timedout(struct timer_list *t) { struct sas_task_slow *slow = from_timer(slow, t, timer); struct sas_task *task = slow->task; unsigned long flags; spin_lock_irqsave(&task->task_state_lock, flags); if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { task->task_state_flags |= SAS_TASK_STATE_ABORTED; complete(&task->slow_task->completion); } spin_unlock_irqrestore(&task->task_state_lock, flags); } static void smp_task_done(struct sas_task *task) { del_timer(&task->slow_task->timer); complete(&task->slow_task->completion); } /* Give it some long enough timeout. In seconds. */ #define SMP_TIMEOUT 10 static int smp_execute_task_sg(struct domain_device *dev, struct scatterlist *req, struct scatterlist *resp) { int res, retry; struct sas_task *task = NULL; struct sas_internal *i = to_sas_internal(dev->port->ha->core.shost->transportt); mutex_lock(&dev->ex_dev.cmd_mutex); for (retry = 0; retry < 3; retry++) { if (test_bit(SAS_DEV_GONE, &dev->state)) { res = -ECOMM; break; } task = sas_alloc_slow_task(GFP_KERNEL); if (!task) { res = -ENOMEM; break; } task->dev = dev; task->task_proto = dev->tproto; task->smp_task.smp_req = *req; task->smp_task.smp_resp = *resp; task->task_done = smp_task_done; task->slow_task->timer.function = smp_task_timedout; task->slow_task->timer.expires = jiffies + SMP_TIMEOUT*HZ; add_timer(&task->slow_task->timer); res = i->dft->lldd_execute_task(task, GFP_KERNEL); if (res) { del_timer(&task->slow_task->timer); SAS_DPRINTK("executing SMP task failed:%d\n", res); break; } wait_for_completion(&task->slow_task->completion); res = -ECOMM; if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { SAS_DPRINTK("smp task timed out or aborted\n"); i->dft->lldd_abort_task(task); if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { SAS_DPRINTK("SMP task aborted and not done\n"); break; } } if (task->task_status.resp == SAS_TASK_COMPLETE && task->task_status.stat == SAM_STAT_GOOD) { res = 0; break; } if (task->task_status.resp == SAS_TASK_COMPLETE && task->task_status.stat == SAS_DATA_UNDERRUN) { /* no error, but return the number of bytes of * underrun */ res = task->task_status.residual; break; } if (task->task_status.resp == SAS_TASK_COMPLETE && task->task_status.stat == SAS_DATA_OVERRUN) { res = -EMSGSIZE; break; } if (task->task_status.resp == SAS_TASK_UNDELIVERED && task->task_status.stat == SAS_DEVICE_UNKNOWN) break; else { SAS_DPRINTK("%s: task to dev %016llx response: 0x%x " "status 0x%x\n", __func__, SAS_ADDR(dev->sas_addr), task->task_status.resp, task->task_status.stat); sas_free_task(task); task = NULL; } } mutex_unlock(&dev->ex_dev.cmd_mutex); BUG_ON(retry == 3 && task != NULL); sas_free_task(task); return res; } static int smp_execute_task(struct domain_device *dev, void *req, int req_size, void *resp, int resp_size) { struct scatterlist req_sg; struct scatterlist resp_sg; sg_init_one(&req_sg, req, req_size); sg_init_one(&resp_sg, resp, resp_size); return smp_execute_task_sg(dev, &req_sg, &resp_sg); } /* ---------- Allocations ---------- */ static inline void *alloc_smp_req(int size) { u8 *p = kzalloc(size, GFP_KERNEL); if (p) p[0] = SMP_REQUEST; return p; } static inline void *alloc_smp_resp(int size) { return kzalloc(size, GFP_KERNEL); } static char sas_route_char(struct domain_device *dev, struct ex_phy *phy) { switch (phy->routing_attr) { case TABLE_ROUTING: if (dev->ex_dev.t2t_supp) return 'U'; else return 'T'; case DIRECT_ROUTING: return 'D'; case SUBTRACTIVE_ROUTING: return 'S'; default: return '?'; } } static enum sas_device_type to_dev_type(struct discover_resp *dr) { /* This is detecting a failure to transmit initial dev to host * FIS as described in section J.5 of sas-2 r16 */ if (dr->attached_dev_type == SAS_PHY_UNUSED && dr->attached_sata_dev && dr->linkrate >= SAS_LINK_RATE_1_5_GBPS) return SAS_SATA_PENDING; else return dr->attached_dev_type; } static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) { enum sas_device_type dev_type; enum sas_linkrate linkrate; u8 sas_addr[SAS_ADDR_SIZE]; struct smp_resp *resp = rsp; struct discover_resp *dr = &resp->disc; struct sas_ha_struct *ha = dev->port->ha; struct expander_device *ex = &dev->ex_dev; struct ex_phy *phy = &ex->ex_phy[phy_id]; struct sas_rphy *rphy = dev->rphy; bool new_phy = !phy->phy; char *type; if (new_phy) { if (WARN_ON_ONCE(test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state))) return; phy->phy = sas_phy_alloc(&rphy->dev, phy_id); /* FIXME: error_handling */ BUG_ON(!phy->phy); } switch (resp->result) { case SMP_RESP_PHY_VACANT: phy->phy_state = PHY_VACANT; break; default: phy->phy_state = PHY_NOT_PRESENT; break; case SMP_RESP_FUNC_ACC: phy->phy_state = PHY_EMPTY; /* do not know yet */ break; } /* check if anything important changed to squelch debug */ dev_type = phy->attached_dev_type; linkrate = phy->linkrate; memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); /* Handle vacant phy - rest of dr data is not valid so skip it */ if (phy->phy_state == PHY_VACANT) { memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); phy->attached_dev_type = SAS_PHY_UNUSED; if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) { phy->phy_id = phy_id; goto skip; } else goto out; } phy->attached_dev_type = to_dev_type(dr); if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) goto out; phy->phy_id = phy_id; phy->linkrate = dr->linkrate; phy->attached_sata_host = dr->attached_sata_host; phy->attached_sata_dev = dr->attached_sata_dev; phy->attached_sata_ps = dr->attached_sata_ps; phy->attached_iproto = dr->iproto << 1; phy->attached_tproto = dr->tproto << 1; /* help some expanders that fail to zero sas_address in the 'no * device' case */ if (phy->attached_dev_type == SAS_PHY_UNUSED || phy->linkrate < SAS_LINK_RATE_1_5_GBPS) memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); else memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE); phy->attached_phy_id = dr->attached_phy_id; phy->phy_change_count = dr->change_count; phy->routing_attr = dr->routing_attr; phy->virtual = dr->virtual; phy->last_da_index = -1; phy->phy->identify.sas_address = SAS_ADDR(phy->attached_sas_addr); phy->phy->identify.device_type = dr->attached_dev_type; phy->phy->identify.initiator_port_protocols = phy->attached_iproto; phy->phy->identify.target_port_protocols = phy->attached_tproto; if (!phy->attached_tproto && dr->attached_sata_dev) phy->phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; phy->phy->identify.phy_identifier = phy_id; phy->phy->minimum_linkrate_hw = dr->hmin_linkrate; phy->phy->maximum_linkrate_hw = dr->hmax_linkrate; phy->phy->minimum_linkrate = dr->pmin_linkrate; phy->phy->maximum_linkrate = dr->pmax_linkrate; phy->phy->negotiated_linkrate = phy->linkrate; phy->phy->enabled = (phy->linkrate != SAS_PHY_DISABLED); skip: if (new_phy) if (sas_phy_add(phy->phy)) { sas_phy_free(phy->phy); return; } out: switch (phy->attached_dev_type) { case SAS_SATA_PENDING: type = "stp pending"; break; case SAS_PHY_UNUSED: type = "no device"; break; case SAS_END_DEVICE: if (phy->attached_iproto) { if (phy->attached_tproto) type = "host+target"; else type = "host"; } else { if (dr->attached_sata_dev) type = "stp"; else type = "ssp"; } break; case SAS_EDGE_EXPANDER_DEVICE: case SAS_FANOUT_EXPANDER_DEVICE: type = "smp"; break; default: type = "unknown"; } /* this routine is polled by libata error recovery so filter * unimportant messages */ if (new_phy || phy->attached_dev_type != dev_type || phy->linkrate != linkrate || SAS_ADDR(phy->attached_sas_addr) != SAS_ADDR(sas_addr)) /* pass */; else return; /* if the attached device type changed and ata_eh is active, * make sure we run revalidation when eh completes (see: * sas_enable_revalidation) */ if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) set_bit(DISCE_REVALIDATE_DOMAIN, &dev->port->disc.pending); SAS_DPRINTK("%sex %016llx phy%02d:%c:%X attached: %016llx (%s)\n", test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state) ? "ata: " : "", SAS_ADDR(dev->sas_addr), phy->phy_id, sas_route_char(dev, phy), phy->linkrate, SAS_ADDR(phy->attached_sas_addr), type); } /* check if we have an existing attached ata device on this expander phy */ struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id) { struct ex_phy *ex_phy = &ex_dev->ex_dev.ex_phy[phy_id]; struct domain_device *dev; struct sas_rphy *rphy; if (!ex_phy->port) return NULL; rphy = ex_phy->port->rphy; if (!rphy) return NULL; dev = sas_find_dev_by_rphy(rphy); if (dev && dev_is_sata(dev)) return dev; return NULL; } #define DISCOVER_REQ_SIZE 16 #define DISCOVER_RESP_SIZE 56 static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req, u8 *disc_resp, int single) { struct discover_resp *dr; int res; disc_req[9] = single; res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE, disc_resp, DISCOVER_RESP_SIZE); if (res) return res; dr = &((struct smp_resp *)disc_resp)->disc; if (memcmp(dev->sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE) == 0) { sas_printk("Found loopback topology, just ignore it!\n"); return 0; } sas_set_ex_phy(dev, single, disc_resp); return 0; } int sas_ex_phy_discover(struct domain_device *dev, int single) { struct expander_device *ex = &dev->ex_dev; int res = 0; u8 *disc_req; u8 *disc_resp; disc_req = alloc_smp_req(DISCOVER_REQ_SIZE); if (!disc_req) return -ENOMEM; disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE); if (!disc_resp) { kfree(disc_req); return -ENOMEM; } disc_req[1] = SMP_DISCOVER; if (0 <= single && single < ex->num_phys) { res = sas_ex_phy_discover_helper(dev, disc_req, disc_resp, single); } else { int i; for (i = 0; i < ex->num_phys; i++) { res = sas_ex_phy_discover_helper(dev, disc_req, disc_resp, i); if (res) goto out_err; } } out_err: kfree(disc_resp); kfree(disc_req); return res; } static int sas_expander_discover(struct domain_device *dev) { struct expander_device *ex = &dev->ex_dev; int res = -ENOMEM; ex->ex_phy = kcalloc(ex->num_phys, sizeof(*ex->ex_phy), GFP_KERNEL); if (!ex->ex_phy) return -ENOMEM; res = sas_ex_phy_discover(dev, -1); if (res) goto out_err; return 0; out_err: kfree(ex->ex_phy); ex->ex_phy = NULL; return res; } #define MAX_EXPANDER_PHYS 128 static void ex_assign_report_general(struct domain_device *dev, struct smp_resp *resp) { struct report_general_resp *rg = &resp->rg; dev->ex_dev.ex_change_count = be16_to_cpu(rg->change_count); dev->ex_dev.max_route_indexes = be16_to_cpu(rg->route_indexes); dev->ex_dev.num_phys = min(rg->num_phys, (u8)MAX_EXPANDER_PHYS); dev->ex_dev.t2t_supp = rg->t2t_supp; dev->ex_dev.conf_route_table = rg->conf_route_table; dev->ex_dev.configuring = rg->configuring; memcpy(dev->ex_dev.enclosure_logical_id, rg->enclosure_logical_id, 8); } #define RG_REQ_SIZE 8 #define RG_RESP_SIZE 32 static int sas_ex_general(struct domain_device *dev) { u8 *rg_req; struct smp_resp *rg_resp; int res; int i; rg_req = alloc_smp_req(RG_REQ_SIZE); if (!rg_req) return -ENOMEM; rg_resp = alloc_smp_resp(RG_RESP_SIZE); if (!rg_resp) { kfree(rg_req); return -ENOMEM; } rg_req[1] = SMP_REPORT_GENERAL; for (i = 0; i < 5; i++) { res = smp_execute_task(dev, rg_req, RG_REQ_SIZE, rg_resp, RG_RESP_SIZE); if (res) { SAS_DPRINTK("RG to ex %016llx failed:0x%x\n", SAS_ADDR(dev->sas_addr), res); goto out; } else if (rg_resp->result != SMP_RESP_FUNC_ACC) { SAS_DPRINTK("RG:ex %016llx returned SMP result:0x%x\n", SAS_ADDR(dev->sas_addr), rg_resp->result); res = rg_resp->result; goto out; } ex_assign_report_general(dev, rg_resp); if (dev->ex_dev.configuring) { SAS_DPRINTK("RG: ex %llx self-configuring...\n", SAS_ADDR(dev->sas_addr)); schedule_timeout_interruptible(5*HZ); } else break; } out: kfree(rg_req); kfree(rg_resp); return res; } static void ex_assign_manuf_info(struct domain_device *dev, void *_mi_resp) { u8 *mi_resp = _mi_resp; struct sas_rphy *rphy = dev->rphy; struct sas_expander_device *edev = rphy_to_expander_device(rphy); memcpy(edev->vendor_id, mi_resp + 12, SAS_EXPANDER_VENDOR_ID_LEN); memcpy(edev->product_id, mi_resp + 20, SAS_EXPANDER_PRODUCT_ID_LEN); memcpy(edev->product_rev, mi_resp + 36, SAS_EXPANDER_PRODUCT_REV_LEN); if (mi_resp[8] & 1) { memcpy(edev->component_vendor_id, mi_resp + 40, SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN); edev->component_id = mi_resp[48] << 8 | mi_resp[49]; edev->component_revision_id = mi_resp[50]; } } #define MI_REQ_SIZE 8 #define MI_RESP_SIZE 64 static int sas_ex_manuf_info(struct domain_device *dev) { u8 *mi_req; u8 *mi_resp; int res; mi_req = alloc_smp_req(MI_REQ_SIZE); if (!mi_req) return -ENOMEM; mi_resp = alloc_smp_resp(MI_RESP_SIZE); if (!mi_resp) { kfree(mi_req); return -ENOMEM; } mi_req[1] = SMP_REPORT_MANUF_INFO; res = smp_execute_task(dev, mi_req, MI_REQ_SIZE, mi_resp,MI_RESP_SIZE); if (res) { SAS_DPRINTK("MI: ex %016llx failed:0x%x\n", SAS_ADDR(dev->sas_addr), res); goto out; } else if (mi_resp[2] != SMP_RESP_FUNC_ACC) { SAS_DPRINTK("MI ex %016llx returned SMP result:0x%x\n", SAS_ADDR(dev->sas_addr), mi_resp[2]); goto out; } ex_assign_manuf_info(dev, mi_resp); out: kfree(mi_req); kfree(mi_resp); return res; } #define PC_REQ_SIZE 44 #define PC_RESP_SIZE 8 int sas_smp_phy_control(struct domain_device *dev, int phy_id, enum phy_func phy_func, struct sas_phy_linkrates *rates) { u8 *pc_req; u8 *pc_resp; int res; pc_req = alloc_smp_req(PC_REQ_SIZE); if (!pc_req) return -ENOMEM; pc_resp = alloc_smp_resp(PC_RESP_SIZE); if (!pc_resp) { kfree(pc_req); return -ENOMEM; } pc_req[1] = SMP_PHY_CONTROL; pc_req[9] = phy_id; pc_req[10]= phy_func; if (rates) { pc_req[32] = rates->minimum_linkrate << 4; pc_req[33] = rates->maximum_linkrate << 4; } res = smp_execute_task(dev, pc_req, PC_REQ_SIZE, pc_resp,PC_RESP_SIZE); kfree(pc_resp); kfree(pc_req); return res; } static void sas_ex_disable_phy(struct domain_device *dev, int phy_id) { struct expander_device *ex = &dev->ex_dev; struct ex_phy *phy = &ex->ex_phy[phy_id]; sas_smp_phy_control(dev, phy_id, PHY_FUNC_DISABLE, NULL); phy->linkrate = SAS_PHY_DISABLED; } static void sas_ex_disable_port(struct domain_device *dev, u8 *sas_addr) { struct expander_device *ex = &dev->ex_dev; int i; for (i = 0; i < ex->num_phys; i++) { struct ex_phy *phy = &ex->ex_phy[i]; if (phy->phy_state == PHY_VACANT || phy->phy_state == PHY_NOT_PRESENT) continue; if (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(sas_addr)) sas_ex_disable_phy(dev, i); } } static int sas_dev_present_in_domain(struct asd_sas_port *port, u8 *sas_addr) { struct domain_device *dev; if (SAS_ADDR(port->sas_addr) == SAS_ADDR(sas_addr)) return 1; list_for_each_entry(dev, &port->dev_list, dev_list_node) { if (SAS_ADDR(dev->sas_addr) == SAS_ADDR(sas_addr)) return 1; } return 0; } #define RPEL_REQ_SIZE 16 #define RPEL_RESP_SIZE 32 int sas_smp_get_phy_events(struct sas_phy *phy) { int res; u8 *req; u8 *resp; struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent); struct domain_device *dev = sas_find_dev_by_rphy(rphy); req = alloc_smp_req(RPEL_REQ_SIZE); if (!req) return -ENOMEM; resp = alloc_smp_resp(RPEL_RESP_SIZE); if (!resp) { kfree(req); return -ENOMEM; } req[1] = SMP_REPORT_PHY_ERR_LOG; req[9] = phy->number; res = smp_execute_task(dev, req, RPEL_REQ_SIZE, resp, RPEL_RESP_SIZE); if (res) goto out; phy->invalid_dword_count = scsi_to_u32(&resp[12]); phy->running_disparity_error_count = scsi_to_u32(&resp[16]); phy->loss_of_dword_sync_count = scsi_to_u32(&resp[20]); phy->phy_reset_problem_count = scsi_to_u32(&resp[24]); out: kfree(req); kfree(resp); return res; } #ifdef CONFIG_SCSI_SAS_ATA #define RPS_REQ_SIZE 16 #define RPS_RESP_SIZE 60 int sas_get_report_phy_sata(struct domain_device *dev, int phy_id, struct smp_resp *rps_resp) { int res; u8 *rps_req = alloc_smp_req(RPS_REQ_SIZE); u8 *resp = (u8 *)rps_resp; if (!rps_req) return -ENOMEM; rps_req[1] = SMP_REPORT_PHY_SATA; rps_req[9] = phy_id; res = smp_execute_task(dev, rps_req, RPS_REQ_SIZE, rps_resp, RPS_RESP_SIZE); /* 0x34 is the FIS type for the D2H fis. There's a potential * standards cockup here. sas-2 explicitly specifies the FIS * should be encoded so that FIS type is in resp[24]. * However, some expanders endian reverse this. Undo the * reversal here */ if (!res && resp[27] == 0x34 && resp[24] != 0x34) { int i; for (i = 0; i < 5; i++) { int j = 24 + (i*4); u8 a, b; a = resp[j + 0]; b = resp[j + 1]; resp[j + 0] = resp[j + 3]; resp[j + 1] = resp[j + 2]; resp[j + 2] = b; resp[j + 3] = a; } } kfree(rps_req); return res; } #endif static void sas_ex_get_linkrate(struct domain_device *parent, struct domain_device *child, struct ex_phy *parent_phy) { struct expander_device *parent_ex = &parent->ex_dev; struct sas_port *port; int i; child->pathways = 0; port = parent_phy->port; for (i = 0; i < parent_ex->num_phys; i++) { struct ex_phy *phy = &parent_ex->ex_phy[i]; if (phy->phy_state == PHY_VACANT || phy->phy_state == PHY_NOT_PRESENT) continue; if (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(child->sas_addr)) { child->min_linkrate = min(parent->min_linkrate, phy->linkrate); child->max_linkrate = max(parent->max_linkrate, phy->linkrate); child->pathways++; sas_port_add_phy(port, phy->phy); } } child->linkrate = min(parent_phy->linkrate, child->max_linkrate); child->pathways = min(child->pathways, parent->pathways); } static struct domain_device *sas_ex_discover_end_dev( struct domain_device *parent, int phy_id) { struct expander_device *parent_ex = &parent->ex_dev; struct ex_phy *phy = &parent_ex->ex_phy[phy_id]; struct domain_device *child = NULL; struct sas_rphy *rphy; int res; if (phy->attached_sata_host || phy->attached_sata_ps) return NULL; child = sas_alloc_device(); if (!child) return NULL; kref_get(&parent->kref); child->parent = parent; child->port = parent->port; child->iproto = phy->attached_iproto; memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); sas_hash_addr(child->hashed_sas_addr, child->sas_addr); if (!phy->port) { phy->port = sas_port_alloc(&parent->rphy->dev, phy_id); if (unlikely(!phy->port)) goto out_err; if (unlikely(sas_port_add(phy->port) != 0)) { sas_port_free(phy->port); goto out_err; } } sas_ex_get_linkrate(parent, child, phy); sas_device_set_phy(child, phy->port); #ifdef CONFIG_SCSI_SAS_ATA if ((phy->attached_tproto & SAS_PROTOCOL_STP) || phy->attached_sata_dev) { res = sas_get_ata_info(child, phy); if (res) goto out_free; sas_init_dev(child); res = sas_ata_init(child); if (res) goto out_free; rphy = sas_end_device_alloc(phy->port); if (!rphy) goto out_free; child->rphy = rphy; get_device(&rphy->dev); list_add_tail(&child->disco_list_node, &parent->port->disco_list); res = sas_discover_sata(child); if (res) { SAS_DPRINTK("sas_discover_sata() for device %16llx at " "%016llx:0x%x returned 0x%x\n", SAS_ADDR(child->sas_addr), SAS_ADDR(parent->sas_addr), phy_id, res); goto out_list_del; } } else #endif if (phy->attached_tproto & SAS_PROTOCOL_SSP) { child->dev_type = SAS_END_DEVICE; rphy = sas_end_device_alloc(phy->port); /* FIXME: error handling */ if (unlikely(!rphy)) goto out_free; child->tproto = phy->attached_tproto; sas_init_dev(child); child->rphy = rphy; get_device(&rphy->dev); sas_fill_in_rphy(child, rphy); list_add_tail(&child->disco_list_node, &parent->port->disco_list); res = sas_discover_end_dev(child); if (res) { SAS_DPRINTK("sas_discover_end_dev() for device %16llx " "at %016llx:0x%x returned 0x%x\n", SAS_ADDR(child->sas_addr), SAS_ADDR(parent->sas_addr), phy_id, res); goto out_list_del; } } else { SAS_DPRINTK("target proto 0x%x at %016llx:0x%x not handled\n", phy->attached_tproto, SAS_ADDR(parent->sas_addr), phy_id); goto out_free; } list_add_tail(&child->siblings, &parent_ex->children); return child; out_list_del: sas_rphy_free(child->rphy); list_del(&child->disco_list_node); spin_lock_irq(&parent->port->dev_list_lock); list_del(&child->dev_list_node); spin_unlock_irq(&parent->port->dev_list_lock); out_free: sas_port_delete(phy->port); out_err: phy->port = NULL; sas_put_device(child); return NULL; } /* See if this phy is part of a wide port */ static bool sas_ex_join_wide_port(struct domain_device *parent, int phy_id) { struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id]; int i; for (i = 0; i < parent->ex_dev.num_phys; i++) { struct ex_phy *ephy = &parent->ex_dev.ex_phy[i]; if (ephy == phy) continue; if (!memcmp(phy->attached_sas_addr, ephy->attached_sas_addr, SAS_ADDR_SIZE) && ephy->port) { sas_port_add_phy(ephy->port, phy->phy); phy->port = ephy->port; phy->phy_state = PHY_DEVICE_DISCOVERED; return true; } } return false; } static struct domain_device *sas_ex_discover_expander( struct domain_device *parent, int phy_id) { struct sas_expander_device *parent_ex = rphy_to_expander_device(parent->rphy); struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id]; struct domain_device *child = NULL; struct sas_rphy *rphy; struct sas_expander_device *edev; struct asd_sas_port *port; int res; if (phy->routing_attr == DIRECT_ROUTING) { SAS_DPRINTK("ex %016llx:0x%x:D <--> ex %016llx:0x%x is not " "allowed\n", SAS_ADDR(parent->sas_addr), phy_id, SAS_ADDR(phy->attached_sas_addr), phy->attached_phy_id); return NULL; } child = sas_alloc_device(); if (!child) return NULL; phy->port = sas_port_alloc(&parent->rphy->dev, phy_id); /* FIXME: better error handling */ BUG_ON(sas_port_add(phy->port) != 0); switch (phy->attached_dev_type) { case SAS_EDGE_EXPANDER_DEVICE: rphy = sas_expander_alloc(phy->port, SAS_EDGE_EXPANDER_DEVICE); break; case SAS_FANOUT_EXPANDER_DEVICE: rphy = sas_expander_alloc(phy->port, SAS_FANOUT_EXPANDER_DEVICE); break; default: rphy = NULL; /* shut gcc up */ BUG(); } port = parent->port; child->rphy = rphy; get_device(&rphy->dev); edev = rphy_to_expander_device(rphy); child->dev_type = phy->attached_dev_type; kref_get(&parent->kref); child->parent = parent; child->port = port; child->iproto = phy->attached_iproto; child->tproto = phy->attached_tproto; memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); sas_hash_addr(child->hashed_sas_addr, child->sas_addr); sas_ex_get_linkrate(parent, child, phy); edev->level = parent_ex->level + 1; parent->port->disc.max_level = max(parent->port->disc.max_level, edev->level); sas_init_dev(child); sas_fill_in_rphy(child, rphy); sas_rphy_add(rphy); spin_lock_irq(&parent->port->dev_list_lock); list_add_tail(&child->dev_list_node, &parent->port->dev_list); spin_unlock_irq(&parent->port->dev_list_lock); res = sas_discover_expander(child); if (res) { sas_rphy_delete(rphy); spin_lock_irq(&parent->port->dev_list_lock); list_del(&child->dev_list_node); spin_unlock_irq(&parent->port->dev_list_lock); sas_put_device(child); return NULL; } list_add_tail(&child->siblings, &parent->ex_dev.children); return child; } static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) { struct expander_device *ex = &dev->ex_dev; struct ex_phy *ex_phy = &ex->ex_phy[phy_id]; struct domain_device *child = NULL; int res = 0; /* Phy state */ if (ex_phy->linkrate == SAS_SATA_SPINUP_HOLD) { if (!sas_smp_phy_control(dev, phy_id, PHY_FUNC_LINK_RESET, NULL)) res = sas_ex_phy_discover(dev, phy_id); if (res) return res; } /* Parent and domain coherency */ if (!dev->parent && (SAS_ADDR(ex_phy->attached_sas_addr) == SAS_ADDR(dev->port->sas_addr))) { sas_add_parent_port(dev, phy_id); return 0; } if (dev->parent && (SAS_ADDR(ex_phy->attached_sas_addr) == SAS_ADDR(dev->parent->sas_addr))) { sas_add_parent_port(dev, phy_id); if (ex_phy->routing_attr == TABLE_ROUTING) sas_configure_phy(dev, phy_id, dev->port->sas_addr, 1); return 0; } if (sas_dev_present_in_domain(dev->port, ex_phy->attached_sas_addr)) sas_ex_disable_port(dev, ex_phy->attached_sas_addr); if (ex_phy->attached_dev_type == SAS_PHY_UNUSED) { if (ex_phy->routing_attr == DIRECT_ROUTING) { memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE); sas_configure_routing(dev, ex_phy->attached_sas_addr); } return 0; } else if (ex_phy->linkrate == SAS_LINK_RATE_UNKNOWN) return 0; if (ex_phy->attached_dev_type != SAS_END_DEVICE && ex_phy->attached_dev_type != SAS_FANOUT_EXPANDER_DEVICE && ex_phy->attached_dev_type != SAS_EDGE_EXPANDER_DEVICE && ex_phy->attached_dev_type != SAS_SATA_PENDING) { SAS_DPRINTK("unknown device type(0x%x) attached to ex %016llx " "phy 0x%x\n", ex_phy->attached_dev_type, SAS_ADDR(dev->sas_addr), phy_id); return 0; } res = sas_configure_routing(dev, ex_phy->attached_sas_addr); if (res) { SAS_DPRINTK("configure routing for dev %016llx " "reported 0x%x. Forgotten\n", SAS_ADDR(ex_phy->attached_sas_addr), res); sas_disable_routing(dev, ex_phy->attached_sas_addr); return res; } if (sas_ex_join_wide_port(dev, phy_id)) { SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n", phy_id, SAS_ADDR(ex_phy->attached_sas_addr)); return res; } switch (ex_phy->attached_dev_type) { case SAS_END_DEVICE: case SAS_SATA_PENDING: child = sas_ex_discover_end_dev(dev, phy_id); break; case SAS_FANOUT_EXPANDER_DEVICE: if (SAS_ADDR(dev->port->disc.fanout_sas_addr)) { SAS_DPRINTK("second fanout expander %016llx phy 0x%x " "attached to ex %016llx phy 0x%x\n", SAS_ADDR(ex_phy->attached_sas_addr), ex_phy->attached_phy_id, SAS_ADDR(dev->sas_addr), phy_id); sas_ex_disable_phy(dev, phy_id); break; } else memcpy(dev->port->disc.fanout_sas_addr, ex_phy->attached_sas_addr, SAS_ADDR_SIZE); /* fallthrough */ case SAS_EDGE_EXPANDER_DEVICE: child = sas_ex_discover_expander(dev, phy_id); break; default: break; } if (child) { int i; for (i = 0; i < ex->num_phys; i++) { if (ex->ex_phy[i].phy_state == PHY_VACANT || ex->ex_phy[i].phy_state == PHY_NOT_PRESENT) continue; /* * Due to races, the phy might not get added to the * wide port, so we add the phy to the wide port here. */ if (SAS_ADDR(ex->ex_phy[i].attached_sas_addr) == SAS_ADDR(child->sas_addr)) { ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED; if (sas_ex_join_wide_port(dev, i)) SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n", i, SAS_ADDR(ex->ex_phy[i].attached_sas_addr)); } } } return res; } static int sas_find_sub_addr(struct domain_device *dev, u8 *sub_addr) { struct expander_device *ex = &dev->ex_dev; int i; for (i = 0; i < ex->num_phys; i++) { struct ex_phy *phy = &ex->ex_phy[i]; if (phy->phy_state == PHY_VACANT || phy->phy_state == PHY_NOT_PRESENT) continue; if ((phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE || phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE) && phy->routing_attr == SUBTRACTIVE_ROUTING) { memcpy(sub_addr, phy->attached_sas_addr,SAS_ADDR_SIZE); return 1; } } return 0; } static int sas_check_level_subtractive_boundary(struct domain_device *dev) { struct expander_device *ex = &dev->ex_dev; struct domain_device *child; u8 sub_addr[8] = {0, }; list_for_each_entry(child, &ex->children, siblings) { if (child->dev_type != SAS_EDGE_EXPANDER_DEVICE && child->dev_type != SAS_FANOUT_EXPANDER_DEVICE) continue; if (sub_addr[0] == 0) { sas_find_sub_addr(child, sub_addr); continue; } else { u8 s2[8]; if (sas_find_sub_addr(child, s2) && (SAS_ADDR(sub_addr) != SAS_ADDR(s2))) { SAS_DPRINTK("ex %016llx->%016llx-?->%016llx " "diverges from subtractive " "boundary %016llx\n", SAS_ADDR(dev->sas_addr), SAS_ADDR(child->sas_addr), SAS_ADDR(s2), SAS_ADDR(sub_addr)); sas_ex_disable_port(child, s2); } } } return 0; } /** * sas_ex_discover_devices - discover devices attached to this expander * @dev: pointer to the expander domain device * @single: if you want to do a single phy, else set to -1; * * Configure this expander for use with its devices and register the * devices of this expander. */ static int sas_ex_discover_devices(struct domain_device *dev, int single) { struct expander_device *ex = &dev->ex_dev; int i = 0, end = ex->num_phys; int res = 0; if (0 <= single && single < end) { i = single; end = i+1; } for ( ; i < end; i++) { struct ex_phy *ex_phy = &ex->ex_phy[i]; if (ex_phy->phy_state == PHY_VACANT || ex_phy->phy_state == PHY_NOT_PRESENT || ex_phy->phy_state == PHY_DEVICE_DISCOVERED) continue; switch (ex_phy->linkrate) { case SAS_PHY_DISABLED: case SAS_PHY_RESET_PROBLEM: case SAS_SATA_PORT_SELECTOR: continue; default: res = sas_ex_discover_dev(dev, i); if (res) break; continue; } } if (!res) sas_check_level_subtractive_boundary(dev); return res; } static int sas_check_ex_subtractive_boundary(struct domain_device *dev) { struct expander_device *ex = &dev->ex_dev; int i; u8 *sub_sas_addr = NULL; if (dev->dev_type != SAS_EDGE_EXPANDER_DEVICE) return 0; for (i = 0; i < ex->num_phys; i++) { struct ex_phy *phy = &ex->ex_phy[i]; if (phy->phy_state == PHY_VACANT || phy->phy_state == PHY_NOT_PRESENT) continue; if ((phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE || phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE) && phy->routing_attr == SUBTRACTIVE_ROUTING) { if (!sub_sas_addr) sub_sas_addr = &phy->attached_sas_addr[0]; else if (SAS_ADDR(sub_sas_addr) != SAS_ADDR(phy->attached_sas_addr)) { SAS_DPRINTK("ex %016llx phy 0x%x " "diverges(%016llx) on subtractive " "boundary(%016llx). Disabled\n", SAS_ADDR(dev->sas_addr), i, SAS_ADDR(phy->attached_sas_addr), SAS_ADDR(sub_sas_addr)); sas_ex_disable_phy(dev, i); } } } return 0; } static void sas_print_parent_topology_bug(struct domain_device *child, struct ex_phy *parent_phy, struct ex_phy *child_phy) { static const char *ex_type[] = { [SAS_EDGE_EXPANDER_DEVICE] = "edge", [SAS_FANOUT_EXPANDER_DEVICE] = "fanout", }; struct domain_device *parent = child->parent; sas_printk("%s ex %016llx phy 0x%x <--> %s ex %016llx " "phy 0x%x has %c:%c routing link!\n", ex_type[parent->dev_type], SAS_ADDR(parent->sas_addr), parent_phy->phy_id, ex_type[child->dev_type], SAS_ADDR(child->sas_addr), child_phy->phy_id, sas_route_char(parent, parent_phy), sas_route_char(child, child_phy)); } static int sas_check_eeds(struct domain_device *child, struct ex_phy *parent_phy, struct ex_phy *child_phy) { int res = 0; struct domain_device *parent = child->parent; if (SAS_ADDR(parent->port->disc.fanout_sas_addr) != 0) { res = -ENODEV; SAS_DPRINTK("edge ex %016llx phy S:0x%x <--> edge ex %016llx " "phy S:0x%x, while there is a fanout ex %016llx\n", SAS_ADDR(parent->sas_addr), parent_phy->phy_id, SAS_ADDR(child->sas_addr), child_phy->phy_id, SAS_ADDR(parent->port->disc.fanout_sas_addr)); } else if (SAS_ADDR(parent->port->disc.eeds_a) == 0) { memcpy(parent->port->disc.eeds_a, parent->sas_addr, SAS_ADDR_SIZE); memcpy(parent->port->disc.eeds_b, child->sas_addr, SAS_ADDR_SIZE); } else if (((SAS_ADDR(parent->port->disc.eeds_a) == SAS_ADDR(parent->sas_addr)) || (SAS_ADDR(parent->port->disc.eeds_a) == SAS_ADDR(child->sas_addr))) && ((SAS_ADDR(parent->port->disc.eeds_b) == SAS_ADDR(parent->sas_addr)) || (SAS_ADDR(parent->port->disc.eeds_b) == SAS_ADDR(child->sas_addr)))) ; else { res = -ENODEV; SAS_DPRINTK("edge ex %016llx phy 0x%x <--> edge ex %016llx " "phy 0x%x link forms a third EEDS!\n", SAS_ADDR(parent->sas_addr), parent_phy->phy_id, SAS_ADDR(child->sas_addr), child_phy->phy_id); } return res; } /* Here we spill over 80 columns. It is intentional. */ static int sas_check_parent_topology(struct domain_device *child) { struct expander_device *child_ex = &child->ex_dev; struct expander_device *parent_ex; int i; int res = 0; if (!child->parent) return 0; if (child->parent->dev_type != SAS_EDGE_EXPANDER_DEVICE && child->parent->dev_type != SAS_FANOUT_EXPANDER_DEVICE) return 0; parent_ex = &child->parent->ex_dev; for (i = 0; i < parent_ex->num_phys; i++) { struct ex_phy *parent_phy = &parent_ex->ex_phy[i]; struct ex_phy *child_phy; if (parent_phy->phy_state == PHY_VACANT || parent_phy->phy_state == PHY_NOT_PRESENT) continue; if (SAS_ADDR(parent_phy->attached_sas_addr) != SAS_ADDR(child->sas_addr)) continue; child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id]; switch (child->parent->dev_type) { case SAS_EDGE_EXPANDER_DEVICE: if (child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING || child_phy->routing_attr != TABLE_ROUTING) { sas_print_parent_topology_bug(child, parent_phy, child_phy); res = -ENODEV; } } else if (parent_phy->routing_attr == SUBTRACTIVE_ROUTING) { if (child_phy->routing_attr == SUBTRACTIVE_ROUTING) { res = sas_check_eeds(child, parent_phy, child_phy); } else if (child_phy->routing_attr != TABLE_ROUTING) { sas_print_parent_topology_bug(child, parent_phy, child_phy); res = -ENODEV; } } else if (parent_phy->routing_attr == TABLE_ROUTING) { if (child_phy->routing_attr == SUBTRACTIVE_ROUTING || (child_phy->routing_attr == TABLE_ROUTING && child_ex->t2t_supp && parent_ex->t2t_supp)) { /* All good */; } else { sas_print_parent_topology_bug(child, parent_phy, child_phy); res = -ENODEV; } } break; case SAS_FANOUT_EXPANDER_DEVICE: if (parent_phy->routing_attr != TABLE_ROUTING || child_phy->routing_attr != SUBTRACTIVE_ROUTING) { sas_print_parent_topology_bug(child, parent_phy, child_phy); res = -ENODEV; } break; default: break; } } return res; } #define RRI_REQ_SIZE 16 #define RRI_RESP_SIZE 44 static int sas_configure_present(struct domain_device *dev, int phy_id, u8 *sas_addr, int *index, int *present) { int i, res = 0; struct expander_device *ex = &dev->ex_dev; struct ex_phy *phy = &ex->ex_phy[phy_id]; u8 *rri_req; u8 *rri_resp; *present = 0; *index = 0; rri_req = alloc_smp_req(RRI_REQ_SIZE); if (!rri_req) return -ENOMEM; rri_resp = alloc_smp_resp(RRI_RESP_SIZE); if (!rri_resp) { kfree(rri_req); return -ENOMEM; } rri_req[1] = SMP_REPORT_ROUTE_INFO; rri_req[9] = phy_id; for (i = 0; i < ex->max_route_indexes ; i++) { *(__be16 *)(rri_req+6) = cpu_to_be16(i); res = smp_execute_task(dev, rri_req, RRI_REQ_SIZE, rri_resp, RRI_RESP_SIZE); if (res) goto out; res = rri_resp[2]; if (res == SMP_RESP_NO_INDEX) { SAS_DPRINTK("overflow of indexes: dev %016llx " "phy 0x%x index 0x%x\n", SAS_ADDR(dev->sas_addr), phy_id, i); goto out; } else if (res != SMP_RESP_FUNC_ACC) { SAS_DPRINTK("%s: dev %016llx phy 0x%x index 0x%x " "result 0x%x\n", __func__, SAS_ADDR(dev->sas_addr), phy_id, i, res); goto out; } if (SAS_ADDR(sas_addr) != 0) { if (SAS_ADDR(rri_resp+16) == SAS_ADDR(sas_addr)) { *index = i; if ((rri_resp[12] & 0x80) == 0x80) *present = 0; else *present = 1; goto out; } else if (SAS_ADDR(rri_resp+16) == 0) { *index = i; *present = 0; goto out; } } else if (SAS_ADDR(rri_resp+16) == 0 && phy->last_da_index < i) { phy->last_da_index = i; *index = i; *present = 0; goto out; } } res = -1; out: kfree(rri_req); kfree(rri_resp); return res; } #define CRI_REQ_SIZE 44 #define CRI_RESP_SIZE 8 static int sas_configure_set(struct domain_device *dev, int phy_id, u8 *sas_addr, int index, int include) { int res; u8 *cri_req; u8 *cri_resp; cri_req = alloc_smp_req(CRI_REQ_SIZE); if (!cri_req) return -ENOMEM; cri_resp = alloc_smp_resp(CRI_RESP_SIZE); if (!cri_resp) { kfree(cri_req); return -ENOMEM; } cri_req[1] = SMP_CONF_ROUTE_INFO; *(__be16 *)(cri_req+6) = cpu_to_be16(index); cri_req[9] = phy_id; if (SAS_ADDR(sas_addr) == 0 || !include) cri_req[12] |= 0x80; memcpy(cri_req+16, sas_addr, SAS_ADDR_SIZE); res = smp_execute_task(dev, cri_req, CRI_REQ_SIZE, cri_resp, CRI_RESP_SIZE); if (res) goto out; res = cri_resp[2]; if (res == SMP_RESP_NO_INDEX) { SAS_DPRINTK("overflow of indexes: dev %016llx phy 0x%x " "index 0x%x\n", SAS_ADDR(dev->sas_addr), phy_id, index); } out: kfree(cri_req); kfree(cri_resp); return res; } static int sas_configure_phy(struct domain_device *dev, int phy_id, u8 *sas_addr, int include) { int index; int present; int res; res = sas_configure_present(dev, phy_id, sas_addr, &index, &present); if (res) return res; if (include ^ present) return sas_configure_set(dev, phy_id, sas_addr, index,include); return res; } /** * sas_configure_parent - configure routing table of parent * @parent: parent expander * @child: child expander * @sas_addr: SAS port identifier of device directly attached to child * @include: whether or not to include @child in the expander routing table */ static int sas_configure_parent(struct domain_device *parent, struct domain_device *child, u8 *sas_addr, int include) { struct expander_device *ex_parent = &parent->ex_dev; int res = 0; int i; if (parent->parent) { res = sas_configure_parent(parent->parent, parent, sas_addr, include); if (res) return res; } if (ex_parent->conf_route_table == 0) { SAS_DPRINTK("ex %016llx has self-configuring routing table\n", SAS_ADDR(parent->sas_addr)); return 0; } for (i = 0; i < ex_parent->num_phys; i++) { struct ex_phy *phy = &ex_parent->ex_phy[i]; if ((phy->routing_attr == TABLE_ROUTING) && (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(child->sas_addr))) { res = sas_configure_phy(parent, i, sas_addr, include); if (res) return res; } } return res; } /** * sas_configure_routing - configure routing * @dev: expander device * @sas_addr: port identifier of device directly attached to the expander device */ static int sas_configure_routing(struct domain_device *dev, u8 *sas_addr) { if (dev->parent) return sas_configure_parent(dev->parent, dev, sas_addr, 1); return 0; } static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr) { if (dev->parent) return sas_configure_parent(dev->parent, dev, sas_addr, 0); return 0; } /** * sas_discover_expander - expander discovery * @dev: pointer to expander domain device * * See comment in sas_discover_sata(). */ static int sas_discover_expander(struct domain_device *dev) { int res; res = sas_notify_lldd_dev_found(dev); if (res) return res; res = sas_ex_general(dev); if (res) goto out_err; res = sas_ex_manuf_info(dev); if (res) goto out_err; res = sas_expander_discover(dev); if (res) { SAS_DPRINTK("expander %016llx discovery failed(0x%x)\n", SAS_ADDR(dev->sas_addr), res); goto out_err; } sas_check_ex_subtractive_boundary(dev); res = sas_check_parent_topology(dev); if (res) goto out_err; return 0; out_err: sas_notify_lldd_dev_gone(dev); return res; } static int sas_ex_level_discovery(struct asd_sas_port *port, const int level) { int res = 0; struct domain_device *dev; list_for_each_entry(dev, &port->dev_list, dev_list_node) { if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy); if (level == ex->level) res = sas_ex_discover_devices(dev, -1); else if (level > 0) res = sas_ex_discover_devices(port->port_dev, -1); } } return res; } static int sas_ex_bfs_disc(struct asd_sas_port *port) { int res; int level; do { level = port->disc.max_level; res = sas_ex_level_discovery(port, level); mb(); } while (level < port->disc.max_level); return res; } int sas_discover_root_expander(struct domain_device *dev) { int res; struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy); res = sas_rphy_add(dev->rphy); if (res) goto out_err; ex->level = dev->port->disc.max_level; /* 0 */ res = sas_discover_expander(dev); if (res) goto out_err2; sas_ex_bfs_disc(dev->port); return res; out_err2: sas_rphy_remove(dev->rphy); out_err: return res; } /* ---------- Domain revalidation ---------- */ static int sas_get_phy_discover(struct domain_device *dev, int phy_id, struct smp_resp *disc_resp) { int res; u8 *disc_req; disc_req = alloc_smp_req(DISCOVER_REQ_SIZE); if (!disc_req) return -ENOMEM; disc_req[1] = SMP_DISCOVER; disc_req[9] = phy_id; res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE, disc_resp, DISCOVER_RESP_SIZE); if (res) goto out; else if (disc_resp->result != SMP_RESP_FUNC_ACC) { res = disc_resp->result; goto out; } out: kfree(disc_req); return res; } static int sas_get_phy_change_count(struct domain_device *dev, int phy_id, int *pcc) { int res; struct smp_resp *disc_resp; disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE); if (!disc_resp) return -ENOMEM; res = sas_get_phy_discover(dev, phy_id, disc_resp); if (!res) *pcc = disc_resp->disc.change_count; kfree(disc_resp); return res; } static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id, u8 *sas_addr, enum sas_device_type *type) { int res; struct smp_resp *disc_resp; struct discover_resp *dr; disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE); if (!disc_resp) return -ENOMEM; dr = &disc_resp->disc; res = sas_get_phy_discover(dev, phy_id, disc_resp); if (res == 0) { memcpy(sas_addr, disc_resp->disc.attached_sas_addr, 8); *type = to_dev_type(dr); if (*type == 0) memset(sas_addr, 0, 8); } kfree(disc_resp); return res; } static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id, int from_phy, bool update) { struct expander_device *ex = &dev->ex_dev; int res = 0; int i; for (i = from_phy; i < ex->num_phys; i++) { int phy_change_count = 0; res = sas_get_phy_change_count(dev, i, &phy_change_count); switch (res) { case SMP_RESP_PHY_VACANT: case SMP_RESP_NO_PHY: continue; case SMP_RESP_FUNC_ACC: break; default: return res; } if (phy_change_count != ex->ex_phy[i].phy_change_count) { if (update) ex->ex_phy[i].phy_change_count = phy_change_count; *phy_id = i; return 0; } } return 0; } static int sas_get_ex_change_count(struct domain_device *dev, int *ecc) { int res; u8 *rg_req; struct smp_resp *rg_resp; rg_req = alloc_smp_req(RG_REQ_SIZE); if (!rg_req) return -ENOMEM; rg_resp = alloc_smp_resp(RG_RESP_SIZE); if (!rg_resp) { kfree(rg_req); return -ENOMEM; } rg_req[1] = SMP_REPORT_GENERAL; res = smp_execute_task(dev, rg_req, RG_REQ_SIZE, rg_resp, RG_RESP_SIZE); if (res) goto out; if (rg_resp->result != SMP_RESP_FUNC_ACC) { res = rg_resp->result; goto out; } *ecc = be16_to_cpu(rg_resp->rg.change_count); out: kfree(rg_resp); kfree(rg_req); return res; } /** * sas_find_bcast_dev - find the device issue BROADCAST(CHANGE). * @dev:domain device to be detect. * @src_dev: the device which originated BROADCAST(CHANGE). * * Add self-configuration expander support. Suppose two expander cascading, * when the first level expander is self-configuring, hotplug the disks in * second level expander, BROADCAST(CHANGE) will not only be originated * in the second level expander, but also be originated in the first level * expander (see SAS protocol SAS 2r-14, 7.11 for detail), it is to say, * expander changed count in two level expanders will all increment at least * once, but the phy which chang count has changed is the source device which * we concerned. */ static int sas_find_bcast_dev(struct domain_device *dev, struct domain_device **src_dev) { struct expander_device *ex = &dev->ex_dev; int ex_change_count = -1; int phy_id = -1; int res; struct domain_device *ch; res = sas_get_ex_change_count(dev, &ex_change_count); if (res) goto out; if (ex_change_count != -1 && ex_change_count != ex->ex_change_count) { /* Just detect if this expander phys phy change count changed, * in order to determine if this expander originate BROADCAST, * and do not update phy change count field in our structure. */ res = sas_find_bcast_phy(dev, &phy_id, 0, false); if (phy_id != -1) { *src_dev = dev; ex->ex_change_count = ex_change_count; SAS_DPRINTK("Expander phy change count has changed\n"); return res; } else SAS_DPRINTK("Expander phys DID NOT change\n"); } list_for_each_entry(ch, &ex->children, siblings) { if (ch->dev_type == SAS_EDGE_EXPANDER_DEVICE || ch->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { res = sas_find_bcast_dev(ch, src_dev); if (*src_dev) return res; } } out: return res; } static void sas_unregister_ex_tree(struct asd_sas_port *port, struct domain_device *dev) { struct expander_device *ex = &dev->ex_dev; struct domain_device *child, *n; list_for_each_entry_safe(child, n, &ex->children, siblings) { set_bit(SAS_DEV_GONE, &child->state); if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE || child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) sas_unregister_ex_tree(port, child); else sas_unregister_dev(port, child); } sas_unregister_dev(port, dev); } static void sas_unregister_devs_sas_addr(struct domain_device *parent, int phy_id, bool last) { struct expander_device *ex_dev = &parent->ex_dev; struct ex_phy *phy = &ex_dev->ex_phy[phy_id]; struct domain_device *child, *n, *found = NULL; if (last) { list_for_each_entry_safe(child, n, &ex_dev->children, siblings) { if (SAS_ADDR(child->sas_addr) == SAS_ADDR(phy->attached_sas_addr)) { set_bit(SAS_DEV_GONE, &child->state); if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE || child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) sas_unregister_ex_tree(parent->port, child); else sas_unregister_dev(parent->port, child); found = child; break; } } sas_disable_routing(parent, phy->attached_sas_addr); } memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); if (phy->port) { sas_port_delete_phy(phy->port, phy->phy); sas_device_set_phy(found, phy->port); if (phy->port->num_phys == 0) list_add_tail(&phy->port->del_list, &parent->port->sas_port_del_list); phy->port = NULL; } } static int sas_discover_bfs_by_root_level(struct domain_device *root, const int level) { struct expander_device *ex_root = &root->ex_dev; struct domain_device *child; int res = 0; list_for_each_entry(child, &ex_root->children, siblings) { if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE || child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { struct sas_expander_device *ex = rphy_to_expander_device(child->rphy); if (level > ex->level) res = sas_discover_bfs_by_root_level(child, level); else if (level == ex->level) res = sas_ex_discover_devices(child, -1); } } return res; } static int sas_discover_bfs_by_root(struct domain_device *dev) { int res; struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy); int level = ex->level+1; res = sas_ex_discover_devices(dev, -1); if (res) goto out; do { res = sas_discover_bfs_by_root_level(dev, level); mb(); level += 1; } while (level <= dev->port->disc.max_level); out: return res; } static int sas_discover_new(struct domain_device *dev, int phy_id) { struct ex_phy *ex_phy = &dev->ex_dev.ex_phy[phy_id]; struct domain_device *child; int res; SAS_DPRINTK("ex %016llx phy%d new device attached\n", SAS_ADDR(dev->sas_addr), phy_id); res = sas_ex_phy_discover(dev, phy_id); if (res) return res; if (sas_ex_join_wide_port(dev, phy_id)) return 0; res = sas_ex_discover_devices(dev, phy_id); if (res) return res; list_for_each_entry(child, &dev->ex_dev.children, siblings) { if (SAS_ADDR(child->sas_addr) == SAS_ADDR(ex_phy->attached_sas_addr)) { if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE || child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) res = sas_discover_bfs_by_root(child); break; } } return res; } static bool dev_type_flutter(enum sas_device_type new, enum sas_device_type old) { if (old == new) return true; /* treat device directed resets as flutter, if we went * SAS_END_DEVICE to SAS_SATA_PENDING the link needs recovery */ if ((old == SAS_SATA_PENDING && new == SAS_END_DEVICE) || (old == SAS_END_DEVICE && new == SAS_SATA_PENDING)) return true; return false; } static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last) { struct expander_device *ex = &dev->ex_dev; struct ex_phy *phy = &ex->ex_phy[phy_id]; enum sas_device_type type = SAS_PHY_UNUSED; u8 sas_addr[8]; int res; memset(sas_addr, 0, 8); res = sas_get_phy_attached_dev(dev, phy_id, sas_addr, &type); switch (res) { case SMP_RESP_NO_PHY: phy->phy_state = PHY_NOT_PRESENT; sas_unregister_devs_sas_addr(dev, phy_id, last); return res; case SMP_RESP_PHY_VACANT: phy->phy_state = PHY_VACANT; sas_unregister_devs_sas_addr(dev, phy_id, last); return res; case SMP_RESP_FUNC_ACC: break; case -ECOMM: break; default: return res; } if ((SAS_ADDR(sas_addr) == 0) || (res == -ECOMM)) { phy->phy_state = PHY_EMPTY; sas_unregister_devs_sas_addr(dev, phy_id, last); return res; } else if (SAS_ADDR(sas_addr) == SAS_ADDR(phy->attached_sas_addr) && dev_type_flutter(type, phy->attached_dev_type)) { struct domain_device *ata_dev = sas_ex_to_ata(dev, phy_id); char *action = ""; sas_ex_phy_discover(dev, phy_id); if (ata_dev && phy->attached_dev_type == SAS_SATA_PENDING) action = ", needs recovery"; SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter%s\n", SAS_ADDR(dev->sas_addr), phy_id, action); return res; } /* we always have to delete the old device when we went here */ SAS_DPRINTK("ex %016llx phy 0x%x replace %016llx\n", SAS_ADDR(dev->sas_addr), phy_id, SAS_ADDR(phy->attached_sas_addr)); sas_unregister_devs_sas_addr(dev, phy_id, last); return sas_discover_new(dev, phy_id); } /** * sas_rediscover - revalidate the domain. * @dev:domain device to be detect. * @phy_id: the phy id will be detected. * * NOTE: this process _must_ quit (return) as soon as any connection * errors are encountered. Connection recovery is done elsewhere. * Discover process only interrogates devices in order to discover the * domain.For plugging out, we un-register the device only when it is * the last phy in the port, for other phys in this port, we just delete it * from the port.For inserting, we do discovery when it is the * first phy,for other phys in this port, we add it to the port to * forming the wide-port. */ static int sas_rediscover(struct domain_device *dev, const int phy_id) { struct expander_device *ex = &dev->ex_dev; struct ex_phy *changed_phy = &ex->ex_phy[phy_id]; int res = 0; int i; bool last = true; /* is this the last phy of the port */ SAS_DPRINTK("ex %016llx phy%d originated BROADCAST(CHANGE)\n", SAS_ADDR(dev->sas_addr), phy_id); if (SAS_ADDR(changed_phy->attached_sas_addr) != 0) { for (i = 0; i < ex->num_phys; i++) { struct ex_phy *phy = &ex->ex_phy[i]; if (i == phy_id) continue; if (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(changed_phy->attached_sas_addr)) { SAS_DPRINTK("phy%d part of wide port with " "phy%d\n", phy_id, i); last = false; break; } } res = sas_rediscover_dev(dev, phy_id, last); } else res = sas_discover_new(dev, phy_id); return res; } /** * sas_ex_revalidate_domain - revalidate the domain * @port_dev: port domain device. * * NOTE: this process _must_ quit (return) as soon as any connection * errors are encountered. Connection recovery is done elsewhere. * Discover process only interrogates devices in order to discover the * domain. */ int sas_ex_revalidate_domain(struct domain_device *port_dev) { int res; struct domain_device *dev = NULL; res = sas_find_bcast_dev(port_dev, &dev); if (res == 0 && dev) { struct expander_device *ex = &dev->ex_dev; int i = 0, phy_id; do { phy_id = -1; res = sas_find_bcast_phy(dev, &phy_id, i, true); if (phy_id == -1) break; res = sas_rediscover(dev, phy_id); i = phy_id + 1; } while (i < ex->num_phys); } return res; } void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, struct sas_rphy *rphy) { struct domain_device *dev; unsigned int rcvlen = 0; int ret = -EINVAL; /* no rphy means no smp target support (ie aic94xx host) */ if (!rphy) return sas_smp_host_handler(job, shost); switch (rphy->identify.device_type) { case SAS_EDGE_EXPANDER_DEVICE: case SAS_FANOUT_EXPANDER_DEVICE: break; default: printk("%s: can we send a smp request to a device?\n", __func__); goto out; } dev = sas_find_dev_by_rphy(rphy); if (!dev) { printk("%s: fail to find a domain_device?\n", __func__); goto out; } /* do we need to support multiple segments? */ if (job->request_payload.sg_cnt > 1 || job->reply_payload.sg_cnt > 1) { printk("%s: multiple segments req %u, rsp %u\n", __func__, job->request_payload.payload_len, job->reply_payload.payload_len); goto out; } ret = smp_execute_task_sg(dev, job->request_payload.sg_list, job->reply_payload.sg_list); if (ret >= 0) { /* bsg_job_done() requires the length received */ rcvlen = job->reply_payload.payload_len - ret; ret = 0; } out: bsg_job_done(job, ret, rcvlen); }
static void smp_task_done(struct sas_task *task) { if (!del_timer(&task->slow_task->timer)) return; complete(&task->slow_task->completion); }
static void smp_task_done(struct sas_task *task) { del_timer(&task->slow_task->timer); complete(&task->slow_task->completion); }
{'added': [(51, '\tif (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {'), (53, '\t\tcomplete(&task->slow_task->completion);'), (54, '\t}'), (60, '\tdel_timer(&task->slow_task->timer);')], 'deleted': [(51, '\tif (!(task->task_state_flags & SAS_TASK_STATE_DONE))'), (54, ''), (55, '\tcomplete(&task->slow_task->completion);'), (60, '\tif (!del_timer(&task->slow_task->timer))'), (61, '\t\treturn;')]}
4
5
1,724
10,440
https://github.com/torvalds/linux
CVE-2018-20836
['CWE-416', 'CWE-362']
sas_expander.c
smp_task_timedout
/* * Serial Attached SCSI (SAS) Expander discovery and configuration * * Copyright (C) 2005 Adaptec, Inc. All rights reserved. * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> * * This file is licensed under GPLv2. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/scatterlist.h> #include <linux/blkdev.h> #include <linux/slab.h> #include "sas_internal.h" #include <scsi/sas_ata.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_sas.h> #include "../scsi_sas_internal.h" static int sas_discover_expander(struct domain_device *dev); static int sas_configure_routing(struct domain_device *dev, u8 *sas_addr); static int sas_configure_phy(struct domain_device *dev, int phy_id, u8 *sas_addr, int include); static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr); /* ---------- SMP task management ---------- */ static void smp_task_timedout(struct timer_list *t) { struct sas_task_slow *slow = from_timer(slow, t, timer); struct sas_task *task = slow->task; unsigned long flags; spin_lock_irqsave(&task->task_state_lock, flags); if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) task->task_state_flags |= SAS_TASK_STATE_ABORTED; spin_unlock_irqrestore(&task->task_state_lock, flags); complete(&task->slow_task->completion); } static void smp_task_done(struct sas_task *task) { if (!del_timer(&task->slow_task->timer)) return; complete(&task->slow_task->completion); } /* Give it some long enough timeout. In seconds. */ #define SMP_TIMEOUT 10 static int smp_execute_task_sg(struct domain_device *dev, struct scatterlist *req, struct scatterlist *resp) { int res, retry; struct sas_task *task = NULL; struct sas_internal *i = to_sas_internal(dev->port->ha->core.shost->transportt); mutex_lock(&dev->ex_dev.cmd_mutex); for (retry = 0; retry < 3; retry++) { if (test_bit(SAS_DEV_GONE, &dev->state)) { res = -ECOMM; break; } task = sas_alloc_slow_task(GFP_KERNEL); if (!task) { res = -ENOMEM; break; } task->dev = dev; task->task_proto = dev->tproto; task->smp_task.smp_req = *req; task->smp_task.smp_resp = *resp; task->task_done = smp_task_done; task->slow_task->timer.function = smp_task_timedout; task->slow_task->timer.expires = jiffies + SMP_TIMEOUT*HZ; add_timer(&task->slow_task->timer); res = i->dft->lldd_execute_task(task, GFP_KERNEL); if (res) { del_timer(&task->slow_task->timer); SAS_DPRINTK("executing SMP task failed:%d\n", res); break; } wait_for_completion(&task->slow_task->completion); res = -ECOMM; if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { SAS_DPRINTK("smp task timed out or aborted\n"); i->dft->lldd_abort_task(task); if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { SAS_DPRINTK("SMP task aborted and not done\n"); break; } } if (task->task_status.resp == SAS_TASK_COMPLETE && task->task_status.stat == SAM_STAT_GOOD) { res = 0; break; } if (task->task_status.resp == SAS_TASK_COMPLETE && task->task_status.stat == SAS_DATA_UNDERRUN) { /* no error, but return the number of bytes of * underrun */ res = task->task_status.residual; break; } if (task->task_status.resp == SAS_TASK_COMPLETE && task->task_status.stat == SAS_DATA_OVERRUN) { res = -EMSGSIZE; break; } if (task->task_status.resp == SAS_TASK_UNDELIVERED && task->task_status.stat == SAS_DEVICE_UNKNOWN) break; else { SAS_DPRINTK("%s: task to dev %016llx response: 0x%x " "status 0x%x\n", __func__, SAS_ADDR(dev->sas_addr), task->task_status.resp, task->task_status.stat); sas_free_task(task); task = NULL; } } mutex_unlock(&dev->ex_dev.cmd_mutex); BUG_ON(retry == 3 && task != NULL); sas_free_task(task); return res; } static int smp_execute_task(struct domain_device *dev, void *req, int req_size, void *resp, int resp_size) { struct scatterlist req_sg; struct scatterlist resp_sg; sg_init_one(&req_sg, req, req_size); sg_init_one(&resp_sg, resp, resp_size); return smp_execute_task_sg(dev, &req_sg, &resp_sg); } /* ---------- Allocations ---------- */ static inline void *alloc_smp_req(int size) { u8 *p = kzalloc(size, GFP_KERNEL); if (p) p[0] = SMP_REQUEST; return p; } static inline void *alloc_smp_resp(int size) { return kzalloc(size, GFP_KERNEL); } static char sas_route_char(struct domain_device *dev, struct ex_phy *phy) { switch (phy->routing_attr) { case TABLE_ROUTING: if (dev->ex_dev.t2t_supp) return 'U'; else return 'T'; case DIRECT_ROUTING: return 'D'; case SUBTRACTIVE_ROUTING: return 'S'; default: return '?'; } } static enum sas_device_type to_dev_type(struct discover_resp *dr) { /* This is detecting a failure to transmit initial dev to host * FIS as described in section J.5 of sas-2 r16 */ if (dr->attached_dev_type == SAS_PHY_UNUSED && dr->attached_sata_dev && dr->linkrate >= SAS_LINK_RATE_1_5_GBPS) return SAS_SATA_PENDING; else return dr->attached_dev_type; } static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) { enum sas_device_type dev_type; enum sas_linkrate linkrate; u8 sas_addr[SAS_ADDR_SIZE]; struct smp_resp *resp = rsp; struct discover_resp *dr = &resp->disc; struct sas_ha_struct *ha = dev->port->ha; struct expander_device *ex = &dev->ex_dev; struct ex_phy *phy = &ex->ex_phy[phy_id]; struct sas_rphy *rphy = dev->rphy; bool new_phy = !phy->phy; char *type; if (new_phy) { if (WARN_ON_ONCE(test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state))) return; phy->phy = sas_phy_alloc(&rphy->dev, phy_id); /* FIXME: error_handling */ BUG_ON(!phy->phy); } switch (resp->result) { case SMP_RESP_PHY_VACANT: phy->phy_state = PHY_VACANT; break; default: phy->phy_state = PHY_NOT_PRESENT; break; case SMP_RESP_FUNC_ACC: phy->phy_state = PHY_EMPTY; /* do not know yet */ break; } /* check if anything important changed to squelch debug */ dev_type = phy->attached_dev_type; linkrate = phy->linkrate; memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); /* Handle vacant phy - rest of dr data is not valid so skip it */ if (phy->phy_state == PHY_VACANT) { memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); phy->attached_dev_type = SAS_PHY_UNUSED; if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) { phy->phy_id = phy_id; goto skip; } else goto out; } phy->attached_dev_type = to_dev_type(dr); if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) goto out; phy->phy_id = phy_id; phy->linkrate = dr->linkrate; phy->attached_sata_host = dr->attached_sata_host; phy->attached_sata_dev = dr->attached_sata_dev; phy->attached_sata_ps = dr->attached_sata_ps; phy->attached_iproto = dr->iproto << 1; phy->attached_tproto = dr->tproto << 1; /* help some expanders that fail to zero sas_address in the 'no * device' case */ if (phy->attached_dev_type == SAS_PHY_UNUSED || phy->linkrate < SAS_LINK_RATE_1_5_GBPS) memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); else memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE); phy->attached_phy_id = dr->attached_phy_id; phy->phy_change_count = dr->change_count; phy->routing_attr = dr->routing_attr; phy->virtual = dr->virtual; phy->last_da_index = -1; phy->phy->identify.sas_address = SAS_ADDR(phy->attached_sas_addr); phy->phy->identify.device_type = dr->attached_dev_type; phy->phy->identify.initiator_port_protocols = phy->attached_iproto; phy->phy->identify.target_port_protocols = phy->attached_tproto; if (!phy->attached_tproto && dr->attached_sata_dev) phy->phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; phy->phy->identify.phy_identifier = phy_id; phy->phy->minimum_linkrate_hw = dr->hmin_linkrate; phy->phy->maximum_linkrate_hw = dr->hmax_linkrate; phy->phy->minimum_linkrate = dr->pmin_linkrate; phy->phy->maximum_linkrate = dr->pmax_linkrate; phy->phy->negotiated_linkrate = phy->linkrate; phy->phy->enabled = (phy->linkrate != SAS_PHY_DISABLED); skip: if (new_phy) if (sas_phy_add(phy->phy)) { sas_phy_free(phy->phy); return; } out: switch (phy->attached_dev_type) { case SAS_SATA_PENDING: type = "stp pending"; break; case SAS_PHY_UNUSED: type = "no device"; break; case SAS_END_DEVICE: if (phy->attached_iproto) { if (phy->attached_tproto) type = "host+target"; else type = "host"; } else { if (dr->attached_sata_dev) type = "stp"; else type = "ssp"; } break; case SAS_EDGE_EXPANDER_DEVICE: case SAS_FANOUT_EXPANDER_DEVICE: type = "smp"; break; default: type = "unknown"; } /* this routine is polled by libata error recovery so filter * unimportant messages */ if (new_phy || phy->attached_dev_type != dev_type || phy->linkrate != linkrate || SAS_ADDR(phy->attached_sas_addr) != SAS_ADDR(sas_addr)) /* pass */; else return; /* if the attached device type changed and ata_eh is active, * make sure we run revalidation when eh completes (see: * sas_enable_revalidation) */ if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) set_bit(DISCE_REVALIDATE_DOMAIN, &dev->port->disc.pending); SAS_DPRINTK("%sex %016llx phy%02d:%c:%X attached: %016llx (%s)\n", test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state) ? "ata: " : "", SAS_ADDR(dev->sas_addr), phy->phy_id, sas_route_char(dev, phy), phy->linkrate, SAS_ADDR(phy->attached_sas_addr), type); } /* check if we have an existing attached ata device on this expander phy */ struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id) { struct ex_phy *ex_phy = &ex_dev->ex_dev.ex_phy[phy_id]; struct domain_device *dev; struct sas_rphy *rphy; if (!ex_phy->port) return NULL; rphy = ex_phy->port->rphy; if (!rphy) return NULL; dev = sas_find_dev_by_rphy(rphy); if (dev && dev_is_sata(dev)) return dev; return NULL; } #define DISCOVER_REQ_SIZE 16 #define DISCOVER_RESP_SIZE 56 static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req, u8 *disc_resp, int single) { struct discover_resp *dr; int res; disc_req[9] = single; res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE, disc_resp, DISCOVER_RESP_SIZE); if (res) return res; dr = &((struct smp_resp *)disc_resp)->disc; if (memcmp(dev->sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE) == 0) { sas_printk("Found loopback topology, just ignore it!\n"); return 0; } sas_set_ex_phy(dev, single, disc_resp); return 0; } int sas_ex_phy_discover(struct domain_device *dev, int single) { struct expander_device *ex = &dev->ex_dev; int res = 0; u8 *disc_req; u8 *disc_resp; disc_req = alloc_smp_req(DISCOVER_REQ_SIZE); if (!disc_req) return -ENOMEM; disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE); if (!disc_resp) { kfree(disc_req); return -ENOMEM; } disc_req[1] = SMP_DISCOVER; if (0 <= single && single < ex->num_phys) { res = sas_ex_phy_discover_helper(dev, disc_req, disc_resp, single); } else { int i; for (i = 0; i < ex->num_phys; i++) { res = sas_ex_phy_discover_helper(dev, disc_req, disc_resp, i); if (res) goto out_err; } } out_err: kfree(disc_resp); kfree(disc_req); return res; } static int sas_expander_discover(struct domain_device *dev) { struct expander_device *ex = &dev->ex_dev; int res = -ENOMEM; ex->ex_phy = kcalloc(ex->num_phys, sizeof(*ex->ex_phy), GFP_KERNEL); if (!ex->ex_phy) return -ENOMEM; res = sas_ex_phy_discover(dev, -1); if (res) goto out_err; return 0; out_err: kfree(ex->ex_phy); ex->ex_phy = NULL; return res; } #define MAX_EXPANDER_PHYS 128 static void ex_assign_report_general(struct domain_device *dev, struct smp_resp *resp) { struct report_general_resp *rg = &resp->rg; dev->ex_dev.ex_change_count = be16_to_cpu(rg->change_count); dev->ex_dev.max_route_indexes = be16_to_cpu(rg->route_indexes); dev->ex_dev.num_phys = min(rg->num_phys, (u8)MAX_EXPANDER_PHYS); dev->ex_dev.t2t_supp = rg->t2t_supp; dev->ex_dev.conf_route_table = rg->conf_route_table; dev->ex_dev.configuring = rg->configuring; memcpy(dev->ex_dev.enclosure_logical_id, rg->enclosure_logical_id, 8); } #define RG_REQ_SIZE 8 #define RG_RESP_SIZE 32 static int sas_ex_general(struct domain_device *dev) { u8 *rg_req; struct smp_resp *rg_resp; int res; int i; rg_req = alloc_smp_req(RG_REQ_SIZE); if (!rg_req) return -ENOMEM; rg_resp = alloc_smp_resp(RG_RESP_SIZE); if (!rg_resp) { kfree(rg_req); return -ENOMEM; } rg_req[1] = SMP_REPORT_GENERAL; for (i = 0; i < 5; i++) { res = smp_execute_task(dev, rg_req, RG_REQ_SIZE, rg_resp, RG_RESP_SIZE); if (res) { SAS_DPRINTK("RG to ex %016llx failed:0x%x\n", SAS_ADDR(dev->sas_addr), res); goto out; } else if (rg_resp->result != SMP_RESP_FUNC_ACC) { SAS_DPRINTK("RG:ex %016llx returned SMP result:0x%x\n", SAS_ADDR(dev->sas_addr), rg_resp->result); res = rg_resp->result; goto out; } ex_assign_report_general(dev, rg_resp); if (dev->ex_dev.configuring) { SAS_DPRINTK("RG: ex %llx self-configuring...\n", SAS_ADDR(dev->sas_addr)); schedule_timeout_interruptible(5*HZ); } else break; } out: kfree(rg_req); kfree(rg_resp); return res; } static void ex_assign_manuf_info(struct domain_device *dev, void *_mi_resp) { u8 *mi_resp = _mi_resp; struct sas_rphy *rphy = dev->rphy; struct sas_expander_device *edev = rphy_to_expander_device(rphy); memcpy(edev->vendor_id, mi_resp + 12, SAS_EXPANDER_VENDOR_ID_LEN); memcpy(edev->product_id, mi_resp + 20, SAS_EXPANDER_PRODUCT_ID_LEN); memcpy(edev->product_rev, mi_resp + 36, SAS_EXPANDER_PRODUCT_REV_LEN); if (mi_resp[8] & 1) { memcpy(edev->component_vendor_id, mi_resp + 40, SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN); edev->component_id = mi_resp[48] << 8 | mi_resp[49]; edev->component_revision_id = mi_resp[50]; } } #define MI_REQ_SIZE 8 #define MI_RESP_SIZE 64 static int sas_ex_manuf_info(struct domain_device *dev) { u8 *mi_req; u8 *mi_resp; int res; mi_req = alloc_smp_req(MI_REQ_SIZE); if (!mi_req) return -ENOMEM; mi_resp = alloc_smp_resp(MI_RESP_SIZE); if (!mi_resp) { kfree(mi_req); return -ENOMEM; } mi_req[1] = SMP_REPORT_MANUF_INFO; res = smp_execute_task(dev, mi_req, MI_REQ_SIZE, mi_resp,MI_RESP_SIZE); if (res) { SAS_DPRINTK("MI: ex %016llx failed:0x%x\n", SAS_ADDR(dev->sas_addr), res); goto out; } else if (mi_resp[2] != SMP_RESP_FUNC_ACC) { SAS_DPRINTK("MI ex %016llx returned SMP result:0x%x\n", SAS_ADDR(dev->sas_addr), mi_resp[2]); goto out; } ex_assign_manuf_info(dev, mi_resp); out: kfree(mi_req); kfree(mi_resp); return res; } #define PC_REQ_SIZE 44 #define PC_RESP_SIZE 8 int sas_smp_phy_control(struct domain_device *dev, int phy_id, enum phy_func phy_func, struct sas_phy_linkrates *rates) { u8 *pc_req; u8 *pc_resp; int res; pc_req = alloc_smp_req(PC_REQ_SIZE); if (!pc_req) return -ENOMEM; pc_resp = alloc_smp_resp(PC_RESP_SIZE); if (!pc_resp) { kfree(pc_req); return -ENOMEM; } pc_req[1] = SMP_PHY_CONTROL; pc_req[9] = phy_id; pc_req[10]= phy_func; if (rates) { pc_req[32] = rates->minimum_linkrate << 4; pc_req[33] = rates->maximum_linkrate << 4; } res = smp_execute_task(dev, pc_req, PC_REQ_SIZE, pc_resp,PC_RESP_SIZE); kfree(pc_resp); kfree(pc_req); return res; } static void sas_ex_disable_phy(struct domain_device *dev, int phy_id) { struct expander_device *ex = &dev->ex_dev; struct ex_phy *phy = &ex->ex_phy[phy_id]; sas_smp_phy_control(dev, phy_id, PHY_FUNC_DISABLE, NULL); phy->linkrate = SAS_PHY_DISABLED; } static void sas_ex_disable_port(struct domain_device *dev, u8 *sas_addr) { struct expander_device *ex = &dev->ex_dev; int i; for (i = 0; i < ex->num_phys; i++) { struct ex_phy *phy = &ex->ex_phy[i]; if (phy->phy_state == PHY_VACANT || phy->phy_state == PHY_NOT_PRESENT) continue; if (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(sas_addr)) sas_ex_disable_phy(dev, i); } } static int sas_dev_present_in_domain(struct asd_sas_port *port, u8 *sas_addr) { struct domain_device *dev; if (SAS_ADDR(port->sas_addr) == SAS_ADDR(sas_addr)) return 1; list_for_each_entry(dev, &port->dev_list, dev_list_node) { if (SAS_ADDR(dev->sas_addr) == SAS_ADDR(sas_addr)) return 1; } return 0; } #define RPEL_REQ_SIZE 16 #define RPEL_RESP_SIZE 32 int sas_smp_get_phy_events(struct sas_phy *phy) { int res; u8 *req; u8 *resp; struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent); struct domain_device *dev = sas_find_dev_by_rphy(rphy); req = alloc_smp_req(RPEL_REQ_SIZE); if (!req) return -ENOMEM; resp = alloc_smp_resp(RPEL_RESP_SIZE); if (!resp) { kfree(req); return -ENOMEM; } req[1] = SMP_REPORT_PHY_ERR_LOG; req[9] = phy->number; res = smp_execute_task(dev, req, RPEL_REQ_SIZE, resp, RPEL_RESP_SIZE); if (res) goto out; phy->invalid_dword_count = scsi_to_u32(&resp[12]); phy->running_disparity_error_count = scsi_to_u32(&resp[16]); phy->loss_of_dword_sync_count = scsi_to_u32(&resp[20]); phy->phy_reset_problem_count = scsi_to_u32(&resp[24]); out: kfree(req); kfree(resp); return res; } #ifdef CONFIG_SCSI_SAS_ATA #define RPS_REQ_SIZE 16 #define RPS_RESP_SIZE 60 int sas_get_report_phy_sata(struct domain_device *dev, int phy_id, struct smp_resp *rps_resp) { int res; u8 *rps_req = alloc_smp_req(RPS_REQ_SIZE); u8 *resp = (u8 *)rps_resp; if (!rps_req) return -ENOMEM; rps_req[1] = SMP_REPORT_PHY_SATA; rps_req[9] = phy_id; res = smp_execute_task(dev, rps_req, RPS_REQ_SIZE, rps_resp, RPS_RESP_SIZE); /* 0x34 is the FIS type for the D2H fis. There's a potential * standards cockup here. sas-2 explicitly specifies the FIS * should be encoded so that FIS type is in resp[24]. * However, some expanders endian reverse this. Undo the * reversal here */ if (!res && resp[27] == 0x34 && resp[24] != 0x34) { int i; for (i = 0; i < 5; i++) { int j = 24 + (i*4); u8 a, b; a = resp[j + 0]; b = resp[j + 1]; resp[j + 0] = resp[j + 3]; resp[j + 1] = resp[j + 2]; resp[j + 2] = b; resp[j + 3] = a; } } kfree(rps_req); return res; } #endif static void sas_ex_get_linkrate(struct domain_device *parent, struct domain_device *child, struct ex_phy *parent_phy) { struct expander_device *parent_ex = &parent->ex_dev; struct sas_port *port; int i; child->pathways = 0; port = parent_phy->port; for (i = 0; i < parent_ex->num_phys; i++) { struct ex_phy *phy = &parent_ex->ex_phy[i]; if (phy->phy_state == PHY_VACANT || phy->phy_state == PHY_NOT_PRESENT) continue; if (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(child->sas_addr)) { child->min_linkrate = min(parent->min_linkrate, phy->linkrate); child->max_linkrate = max(parent->max_linkrate, phy->linkrate); child->pathways++; sas_port_add_phy(port, phy->phy); } } child->linkrate = min(parent_phy->linkrate, child->max_linkrate); child->pathways = min(child->pathways, parent->pathways); } static struct domain_device *sas_ex_discover_end_dev( struct domain_device *parent, int phy_id) { struct expander_device *parent_ex = &parent->ex_dev; struct ex_phy *phy = &parent_ex->ex_phy[phy_id]; struct domain_device *child = NULL; struct sas_rphy *rphy; int res; if (phy->attached_sata_host || phy->attached_sata_ps) return NULL; child = sas_alloc_device(); if (!child) return NULL; kref_get(&parent->kref); child->parent = parent; child->port = parent->port; child->iproto = phy->attached_iproto; memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); sas_hash_addr(child->hashed_sas_addr, child->sas_addr); if (!phy->port) { phy->port = sas_port_alloc(&parent->rphy->dev, phy_id); if (unlikely(!phy->port)) goto out_err; if (unlikely(sas_port_add(phy->port) != 0)) { sas_port_free(phy->port); goto out_err; } } sas_ex_get_linkrate(parent, child, phy); sas_device_set_phy(child, phy->port); #ifdef CONFIG_SCSI_SAS_ATA if ((phy->attached_tproto & SAS_PROTOCOL_STP) || phy->attached_sata_dev) { res = sas_get_ata_info(child, phy); if (res) goto out_free; sas_init_dev(child); res = sas_ata_init(child); if (res) goto out_free; rphy = sas_end_device_alloc(phy->port); if (!rphy) goto out_free; child->rphy = rphy; get_device(&rphy->dev); list_add_tail(&child->disco_list_node, &parent->port->disco_list); res = sas_discover_sata(child); if (res) { SAS_DPRINTK("sas_discover_sata() for device %16llx at " "%016llx:0x%x returned 0x%x\n", SAS_ADDR(child->sas_addr), SAS_ADDR(parent->sas_addr), phy_id, res); goto out_list_del; } } else #endif if (phy->attached_tproto & SAS_PROTOCOL_SSP) { child->dev_type = SAS_END_DEVICE; rphy = sas_end_device_alloc(phy->port); /* FIXME: error handling */ if (unlikely(!rphy)) goto out_free; child->tproto = phy->attached_tproto; sas_init_dev(child); child->rphy = rphy; get_device(&rphy->dev); sas_fill_in_rphy(child, rphy); list_add_tail(&child->disco_list_node, &parent->port->disco_list); res = sas_discover_end_dev(child); if (res) { SAS_DPRINTK("sas_discover_end_dev() for device %16llx " "at %016llx:0x%x returned 0x%x\n", SAS_ADDR(child->sas_addr), SAS_ADDR(parent->sas_addr), phy_id, res); goto out_list_del; } } else { SAS_DPRINTK("target proto 0x%x at %016llx:0x%x not handled\n", phy->attached_tproto, SAS_ADDR(parent->sas_addr), phy_id); goto out_free; } list_add_tail(&child->siblings, &parent_ex->children); return child; out_list_del: sas_rphy_free(child->rphy); list_del(&child->disco_list_node); spin_lock_irq(&parent->port->dev_list_lock); list_del(&child->dev_list_node); spin_unlock_irq(&parent->port->dev_list_lock); out_free: sas_port_delete(phy->port); out_err: phy->port = NULL; sas_put_device(child); return NULL; } /* See if this phy is part of a wide port */ static bool sas_ex_join_wide_port(struct domain_device *parent, int phy_id) { struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id]; int i; for (i = 0; i < parent->ex_dev.num_phys; i++) { struct ex_phy *ephy = &parent->ex_dev.ex_phy[i]; if (ephy == phy) continue; if (!memcmp(phy->attached_sas_addr, ephy->attached_sas_addr, SAS_ADDR_SIZE) && ephy->port) { sas_port_add_phy(ephy->port, phy->phy); phy->port = ephy->port; phy->phy_state = PHY_DEVICE_DISCOVERED; return true; } } return false; } static struct domain_device *sas_ex_discover_expander( struct domain_device *parent, int phy_id) { struct sas_expander_device *parent_ex = rphy_to_expander_device(parent->rphy); struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id]; struct domain_device *child = NULL; struct sas_rphy *rphy; struct sas_expander_device *edev; struct asd_sas_port *port; int res; if (phy->routing_attr == DIRECT_ROUTING) { SAS_DPRINTK("ex %016llx:0x%x:D <--> ex %016llx:0x%x is not " "allowed\n", SAS_ADDR(parent->sas_addr), phy_id, SAS_ADDR(phy->attached_sas_addr), phy->attached_phy_id); return NULL; } child = sas_alloc_device(); if (!child) return NULL; phy->port = sas_port_alloc(&parent->rphy->dev, phy_id); /* FIXME: better error handling */ BUG_ON(sas_port_add(phy->port) != 0); switch (phy->attached_dev_type) { case SAS_EDGE_EXPANDER_DEVICE: rphy = sas_expander_alloc(phy->port, SAS_EDGE_EXPANDER_DEVICE); break; case SAS_FANOUT_EXPANDER_DEVICE: rphy = sas_expander_alloc(phy->port, SAS_FANOUT_EXPANDER_DEVICE); break; default: rphy = NULL; /* shut gcc up */ BUG(); } port = parent->port; child->rphy = rphy; get_device(&rphy->dev); edev = rphy_to_expander_device(rphy); child->dev_type = phy->attached_dev_type; kref_get(&parent->kref); child->parent = parent; child->port = port; child->iproto = phy->attached_iproto; child->tproto = phy->attached_tproto; memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); sas_hash_addr(child->hashed_sas_addr, child->sas_addr); sas_ex_get_linkrate(parent, child, phy); edev->level = parent_ex->level + 1; parent->port->disc.max_level = max(parent->port->disc.max_level, edev->level); sas_init_dev(child); sas_fill_in_rphy(child, rphy); sas_rphy_add(rphy); spin_lock_irq(&parent->port->dev_list_lock); list_add_tail(&child->dev_list_node, &parent->port->dev_list); spin_unlock_irq(&parent->port->dev_list_lock); res = sas_discover_expander(child); if (res) { sas_rphy_delete(rphy); spin_lock_irq(&parent->port->dev_list_lock); list_del(&child->dev_list_node); spin_unlock_irq(&parent->port->dev_list_lock); sas_put_device(child); return NULL; } list_add_tail(&child->siblings, &parent->ex_dev.children); return child; } static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) { struct expander_device *ex = &dev->ex_dev; struct ex_phy *ex_phy = &ex->ex_phy[phy_id]; struct domain_device *child = NULL; int res = 0; /* Phy state */ if (ex_phy->linkrate == SAS_SATA_SPINUP_HOLD) { if (!sas_smp_phy_control(dev, phy_id, PHY_FUNC_LINK_RESET, NULL)) res = sas_ex_phy_discover(dev, phy_id); if (res) return res; } /* Parent and domain coherency */ if (!dev->parent && (SAS_ADDR(ex_phy->attached_sas_addr) == SAS_ADDR(dev->port->sas_addr))) { sas_add_parent_port(dev, phy_id); return 0; } if (dev->parent && (SAS_ADDR(ex_phy->attached_sas_addr) == SAS_ADDR(dev->parent->sas_addr))) { sas_add_parent_port(dev, phy_id); if (ex_phy->routing_attr == TABLE_ROUTING) sas_configure_phy(dev, phy_id, dev->port->sas_addr, 1); return 0; } if (sas_dev_present_in_domain(dev->port, ex_phy->attached_sas_addr)) sas_ex_disable_port(dev, ex_phy->attached_sas_addr); if (ex_phy->attached_dev_type == SAS_PHY_UNUSED) { if (ex_phy->routing_attr == DIRECT_ROUTING) { memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE); sas_configure_routing(dev, ex_phy->attached_sas_addr); } return 0; } else if (ex_phy->linkrate == SAS_LINK_RATE_UNKNOWN) return 0; if (ex_phy->attached_dev_type != SAS_END_DEVICE && ex_phy->attached_dev_type != SAS_FANOUT_EXPANDER_DEVICE && ex_phy->attached_dev_type != SAS_EDGE_EXPANDER_DEVICE && ex_phy->attached_dev_type != SAS_SATA_PENDING) { SAS_DPRINTK("unknown device type(0x%x) attached to ex %016llx " "phy 0x%x\n", ex_phy->attached_dev_type, SAS_ADDR(dev->sas_addr), phy_id); return 0; } res = sas_configure_routing(dev, ex_phy->attached_sas_addr); if (res) { SAS_DPRINTK("configure routing for dev %016llx " "reported 0x%x. Forgotten\n", SAS_ADDR(ex_phy->attached_sas_addr), res); sas_disable_routing(dev, ex_phy->attached_sas_addr); return res; } if (sas_ex_join_wide_port(dev, phy_id)) { SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n", phy_id, SAS_ADDR(ex_phy->attached_sas_addr)); return res; } switch (ex_phy->attached_dev_type) { case SAS_END_DEVICE: case SAS_SATA_PENDING: child = sas_ex_discover_end_dev(dev, phy_id); break; case SAS_FANOUT_EXPANDER_DEVICE: if (SAS_ADDR(dev->port->disc.fanout_sas_addr)) { SAS_DPRINTK("second fanout expander %016llx phy 0x%x " "attached to ex %016llx phy 0x%x\n", SAS_ADDR(ex_phy->attached_sas_addr), ex_phy->attached_phy_id, SAS_ADDR(dev->sas_addr), phy_id); sas_ex_disable_phy(dev, phy_id); break; } else memcpy(dev->port->disc.fanout_sas_addr, ex_phy->attached_sas_addr, SAS_ADDR_SIZE); /* fallthrough */ case SAS_EDGE_EXPANDER_DEVICE: child = sas_ex_discover_expander(dev, phy_id); break; default: break; } if (child) { int i; for (i = 0; i < ex->num_phys; i++) { if (ex->ex_phy[i].phy_state == PHY_VACANT || ex->ex_phy[i].phy_state == PHY_NOT_PRESENT) continue; /* * Due to races, the phy might not get added to the * wide port, so we add the phy to the wide port here. */ if (SAS_ADDR(ex->ex_phy[i].attached_sas_addr) == SAS_ADDR(child->sas_addr)) { ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED; if (sas_ex_join_wide_port(dev, i)) SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n", i, SAS_ADDR(ex->ex_phy[i].attached_sas_addr)); } } } return res; } static int sas_find_sub_addr(struct domain_device *dev, u8 *sub_addr) { struct expander_device *ex = &dev->ex_dev; int i; for (i = 0; i < ex->num_phys; i++) { struct ex_phy *phy = &ex->ex_phy[i]; if (phy->phy_state == PHY_VACANT || phy->phy_state == PHY_NOT_PRESENT) continue; if ((phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE || phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE) && phy->routing_attr == SUBTRACTIVE_ROUTING) { memcpy(sub_addr, phy->attached_sas_addr,SAS_ADDR_SIZE); return 1; } } return 0; } static int sas_check_level_subtractive_boundary(struct domain_device *dev) { struct expander_device *ex = &dev->ex_dev; struct domain_device *child; u8 sub_addr[8] = {0, }; list_for_each_entry(child, &ex->children, siblings) { if (child->dev_type != SAS_EDGE_EXPANDER_DEVICE && child->dev_type != SAS_FANOUT_EXPANDER_DEVICE) continue; if (sub_addr[0] == 0) { sas_find_sub_addr(child, sub_addr); continue; } else { u8 s2[8]; if (sas_find_sub_addr(child, s2) && (SAS_ADDR(sub_addr) != SAS_ADDR(s2))) { SAS_DPRINTK("ex %016llx->%016llx-?->%016llx " "diverges from subtractive " "boundary %016llx\n", SAS_ADDR(dev->sas_addr), SAS_ADDR(child->sas_addr), SAS_ADDR(s2), SAS_ADDR(sub_addr)); sas_ex_disable_port(child, s2); } } } return 0; } /** * sas_ex_discover_devices - discover devices attached to this expander * @dev: pointer to the expander domain device * @single: if you want to do a single phy, else set to -1; * * Configure this expander for use with its devices and register the * devices of this expander. */ static int sas_ex_discover_devices(struct domain_device *dev, int single) { struct expander_device *ex = &dev->ex_dev; int i = 0, end = ex->num_phys; int res = 0; if (0 <= single && single < end) { i = single; end = i+1; } for ( ; i < end; i++) { struct ex_phy *ex_phy = &ex->ex_phy[i]; if (ex_phy->phy_state == PHY_VACANT || ex_phy->phy_state == PHY_NOT_PRESENT || ex_phy->phy_state == PHY_DEVICE_DISCOVERED) continue; switch (ex_phy->linkrate) { case SAS_PHY_DISABLED: case SAS_PHY_RESET_PROBLEM: case SAS_SATA_PORT_SELECTOR: continue; default: res = sas_ex_discover_dev(dev, i); if (res) break; continue; } } if (!res) sas_check_level_subtractive_boundary(dev); return res; } static int sas_check_ex_subtractive_boundary(struct domain_device *dev) { struct expander_device *ex = &dev->ex_dev; int i; u8 *sub_sas_addr = NULL; if (dev->dev_type != SAS_EDGE_EXPANDER_DEVICE) return 0; for (i = 0; i < ex->num_phys; i++) { struct ex_phy *phy = &ex->ex_phy[i]; if (phy->phy_state == PHY_VACANT || phy->phy_state == PHY_NOT_PRESENT) continue; if ((phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE || phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE) && phy->routing_attr == SUBTRACTIVE_ROUTING) { if (!sub_sas_addr) sub_sas_addr = &phy->attached_sas_addr[0]; else if (SAS_ADDR(sub_sas_addr) != SAS_ADDR(phy->attached_sas_addr)) { SAS_DPRINTK("ex %016llx phy 0x%x " "diverges(%016llx) on subtractive " "boundary(%016llx). Disabled\n", SAS_ADDR(dev->sas_addr), i, SAS_ADDR(phy->attached_sas_addr), SAS_ADDR(sub_sas_addr)); sas_ex_disable_phy(dev, i); } } } return 0; } static void sas_print_parent_topology_bug(struct domain_device *child, struct ex_phy *parent_phy, struct ex_phy *child_phy) { static const char *ex_type[] = { [SAS_EDGE_EXPANDER_DEVICE] = "edge", [SAS_FANOUT_EXPANDER_DEVICE] = "fanout", }; struct domain_device *parent = child->parent; sas_printk("%s ex %016llx phy 0x%x <--> %s ex %016llx " "phy 0x%x has %c:%c routing link!\n", ex_type[parent->dev_type], SAS_ADDR(parent->sas_addr), parent_phy->phy_id, ex_type[child->dev_type], SAS_ADDR(child->sas_addr), child_phy->phy_id, sas_route_char(parent, parent_phy), sas_route_char(child, child_phy)); } static int sas_check_eeds(struct domain_device *child, struct ex_phy *parent_phy, struct ex_phy *child_phy) { int res = 0; struct domain_device *parent = child->parent; if (SAS_ADDR(parent->port->disc.fanout_sas_addr) != 0) { res = -ENODEV; SAS_DPRINTK("edge ex %016llx phy S:0x%x <--> edge ex %016llx " "phy S:0x%x, while there is a fanout ex %016llx\n", SAS_ADDR(parent->sas_addr), parent_phy->phy_id, SAS_ADDR(child->sas_addr), child_phy->phy_id, SAS_ADDR(parent->port->disc.fanout_sas_addr)); } else if (SAS_ADDR(parent->port->disc.eeds_a) == 0) { memcpy(parent->port->disc.eeds_a, parent->sas_addr, SAS_ADDR_SIZE); memcpy(parent->port->disc.eeds_b, child->sas_addr, SAS_ADDR_SIZE); } else if (((SAS_ADDR(parent->port->disc.eeds_a) == SAS_ADDR(parent->sas_addr)) || (SAS_ADDR(parent->port->disc.eeds_a) == SAS_ADDR(child->sas_addr))) && ((SAS_ADDR(parent->port->disc.eeds_b) == SAS_ADDR(parent->sas_addr)) || (SAS_ADDR(parent->port->disc.eeds_b) == SAS_ADDR(child->sas_addr)))) ; else { res = -ENODEV; SAS_DPRINTK("edge ex %016llx phy 0x%x <--> edge ex %016llx " "phy 0x%x link forms a third EEDS!\n", SAS_ADDR(parent->sas_addr), parent_phy->phy_id, SAS_ADDR(child->sas_addr), child_phy->phy_id); } return res; } /* Here we spill over 80 columns. It is intentional. */ static int sas_check_parent_topology(struct domain_device *child) { struct expander_device *child_ex = &child->ex_dev; struct expander_device *parent_ex; int i; int res = 0; if (!child->parent) return 0; if (child->parent->dev_type != SAS_EDGE_EXPANDER_DEVICE && child->parent->dev_type != SAS_FANOUT_EXPANDER_DEVICE) return 0; parent_ex = &child->parent->ex_dev; for (i = 0; i < parent_ex->num_phys; i++) { struct ex_phy *parent_phy = &parent_ex->ex_phy[i]; struct ex_phy *child_phy; if (parent_phy->phy_state == PHY_VACANT || parent_phy->phy_state == PHY_NOT_PRESENT) continue; if (SAS_ADDR(parent_phy->attached_sas_addr) != SAS_ADDR(child->sas_addr)) continue; child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id]; switch (child->parent->dev_type) { case SAS_EDGE_EXPANDER_DEVICE: if (child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING || child_phy->routing_attr != TABLE_ROUTING) { sas_print_parent_topology_bug(child, parent_phy, child_phy); res = -ENODEV; } } else if (parent_phy->routing_attr == SUBTRACTIVE_ROUTING) { if (child_phy->routing_attr == SUBTRACTIVE_ROUTING) { res = sas_check_eeds(child, parent_phy, child_phy); } else if (child_phy->routing_attr != TABLE_ROUTING) { sas_print_parent_topology_bug(child, parent_phy, child_phy); res = -ENODEV; } } else if (parent_phy->routing_attr == TABLE_ROUTING) { if (child_phy->routing_attr == SUBTRACTIVE_ROUTING || (child_phy->routing_attr == TABLE_ROUTING && child_ex->t2t_supp && parent_ex->t2t_supp)) { /* All good */; } else { sas_print_parent_topology_bug(child, parent_phy, child_phy); res = -ENODEV; } } break; case SAS_FANOUT_EXPANDER_DEVICE: if (parent_phy->routing_attr != TABLE_ROUTING || child_phy->routing_attr != SUBTRACTIVE_ROUTING) { sas_print_parent_topology_bug(child, parent_phy, child_phy); res = -ENODEV; } break; default: break; } } return res; } #define RRI_REQ_SIZE 16 #define RRI_RESP_SIZE 44 static int sas_configure_present(struct domain_device *dev, int phy_id, u8 *sas_addr, int *index, int *present) { int i, res = 0; struct expander_device *ex = &dev->ex_dev; struct ex_phy *phy = &ex->ex_phy[phy_id]; u8 *rri_req; u8 *rri_resp; *present = 0; *index = 0; rri_req = alloc_smp_req(RRI_REQ_SIZE); if (!rri_req) return -ENOMEM; rri_resp = alloc_smp_resp(RRI_RESP_SIZE); if (!rri_resp) { kfree(rri_req); return -ENOMEM; } rri_req[1] = SMP_REPORT_ROUTE_INFO; rri_req[9] = phy_id; for (i = 0; i < ex->max_route_indexes ; i++) { *(__be16 *)(rri_req+6) = cpu_to_be16(i); res = smp_execute_task(dev, rri_req, RRI_REQ_SIZE, rri_resp, RRI_RESP_SIZE); if (res) goto out; res = rri_resp[2]; if (res == SMP_RESP_NO_INDEX) { SAS_DPRINTK("overflow of indexes: dev %016llx " "phy 0x%x index 0x%x\n", SAS_ADDR(dev->sas_addr), phy_id, i); goto out; } else if (res != SMP_RESP_FUNC_ACC) { SAS_DPRINTK("%s: dev %016llx phy 0x%x index 0x%x " "result 0x%x\n", __func__, SAS_ADDR(dev->sas_addr), phy_id, i, res); goto out; } if (SAS_ADDR(sas_addr) != 0) { if (SAS_ADDR(rri_resp+16) == SAS_ADDR(sas_addr)) { *index = i; if ((rri_resp[12] & 0x80) == 0x80) *present = 0; else *present = 1; goto out; } else if (SAS_ADDR(rri_resp+16) == 0) { *index = i; *present = 0; goto out; } } else if (SAS_ADDR(rri_resp+16) == 0 && phy->last_da_index < i) { phy->last_da_index = i; *index = i; *present = 0; goto out; } } res = -1; out: kfree(rri_req); kfree(rri_resp); return res; } #define CRI_REQ_SIZE 44 #define CRI_RESP_SIZE 8 static int sas_configure_set(struct domain_device *dev, int phy_id, u8 *sas_addr, int index, int include) { int res; u8 *cri_req; u8 *cri_resp; cri_req = alloc_smp_req(CRI_REQ_SIZE); if (!cri_req) return -ENOMEM; cri_resp = alloc_smp_resp(CRI_RESP_SIZE); if (!cri_resp) { kfree(cri_req); return -ENOMEM; } cri_req[1] = SMP_CONF_ROUTE_INFO; *(__be16 *)(cri_req+6) = cpu_to_be16(index); cri_req[9] = phy_id; if (SAS_ADDR(sas_addr) == 0 || !include) cri_req[12] |= 0x80; memcpy(cri_req+16, sas_addr, SAS_ADDR_SIZE); res = smp_execute_task(dev, cri_req, CRI_REQ_SIZE, cri_resp, CRI_RESP_SIZE); if (res) goto out; res = cri_resp[2]; if (res == SMP_RESP_NO_INDEX) { SAS_DPRINTK("overflow of indexes: dev %016llx phy 0x%x " "index 0x%x\n", SAS_ADDR(dev->sas_addr), phy_id, index); } out: kfree(cri_req); kfree(cri_resp); return res; } static int sas_configure_phy(struct domain_device *dev, int phy_id, u8 *sas_addr, int include) { int index; int present; int res; res = sas_configure_present(dev, phy_id, sas_addr, &index, &present); if (res) return res; if (include ^ present) return sas_configure_set(dev, phy_id, sas_addr, index,include); return res; } /** * sas_configure_parent - configure routing table of parent * @parent: parent expander * @child: child expander * @sas_addr: SAS port identifier of device directly attached to child * @include: whether or not to include @child in the expander routing table */ static int sas_configure_parent(struct domain_device *parent, struct domain_device *child, u8 *sas_addr, int include) { struct expander_device *ex_parent = &parent->ex_dev; int res = 0; int i; if (parent->parent) { res = sas_configure_parent(parent->parent, parent, sas_addr, include); if (res) return res; } if (ex_parent->conf_route_table == 0) { SAS_DPRINTK("ex %016llx has self-configuring routing table\n", SAS_ADDR(parent->sas_addr)); return 0; } for (i = 0; i < ex_parent->num_phys; i++) { struct ex_phy *phy = &ex_parent->ex_phy[i]; if ((phy->routing_attr == TABLE_ROUTING) && (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(child->sas_addr))) { res = sas_configure_phy(parent, i, sas_addr, include); if (res) return res; } } return res; } /** * sas_configure_routing - configure routing * @dev: expander device * @sas_addr: port identifier of device directly attached to the expander device */ static int sas_configure_routing(struct domain_device *dev, u8 *sas_addr) { if (dev->parent) return sas_configure_parent(dev->parent, dev, sas_addr, 1); return 0; } static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr) { if (dev->parent) return sas_configure_parent(dev->parent, dev, sas_addr, 0); return 0; } /** * sas_discover_expander - expander discovery * @dev: pointer to expander domain device * * See comment in sas_discover_sata(). */ static int sas_discover_expander(struct domain_device *dev) { int res; res = sas_notify_lldd_dev_found(dev); if (res) return res; res = sas_ex_general(dev); if (res) goto out_err; res = sas_ex_manuf_info(dev); if (res) goto out_err; res = sas_expander_discover(dev); if (res) { SAS_DPRINTK("expander %016llx discovery failed(0x%x)\n", SAS_ADDR(dev->sas_addr), res); goto out_err; } sas_check_ex_subtractive_boundary(dev); res = sas_check_parent_topology(dev); if (res) goto out_err; return 0; out_err: sas_notify_lldd_dev_gone(dev); return res; } static int sas_ex_level_discovery(struct asd_sas_port *port, const int level) { int res = 0; struct domain_device *dev; list_for_each_entry(dev, &port->dev_list, dev_list_node) { if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy); if (level == ex->level) res = sas_ex_discover_devices(dev, -1); else if (level > 0) res = sas_ex_discover_devices(port->port_dev, -1); } } return res; } static int sas_ex_bfs_disc(struct asd_sas_port *port) { int res; int level; do { level = port->disc.max_level; res = sas_ex_level_discovery(port, level); mb(); } while (level < port->disc.max_level); return res; } int sas_discover_root_expander(struct domain_device *dev) { int res; struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy); res = sas_rphy_add(dev->rphy); if (res) goto out_err; ex->level = dev->port->disc.max_level; /* 0 */ res = sas_discover_expander(dev); if (res) goto out_err2; sas_ex_bfs_disc(dev->port); return res; out_err2: sas_rphy_remove(dev->rphy); out_err: return res; } /* ---------- Domain revalidation ---------- */ static int sas_get_phy_discover(struct domain_device *dev, int phy_id, struct smp_resp *disc_resp) { int res; u8 *disc_req; disc_req = alloc_smp_req(DISCOVER_REQ_SIZE); if (!disc_req) return -ENOMEM; disc_req[1] = SMP_DISCOVER; disc_req[9] = phy_id; res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE, disc_resp, DISCOVER_RESP_SIZE); if (res) goto out; else if (disc_resp->result != SMP_RESP_FUNC_ACC) { res = disc_resp->result; goto out; } out: kfree(disc_req); return res; } static int sas_get_phy_change_count(struct domain_device *dev, int phy_id, int *pcc) { int res; struct smp_resp *disc_resp; disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE); if (!disc_resp) return -ENOMEM; res = sas_get_phy_discover(dev, phy_id, disc_resp); if (!res) *pcc = disc_resp->disc.change_count; kfree(disc_resp); return res; } static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id, u8 *sas_addr, enum sas_device_type *type) { int res; struct smp_resp *disc_resp; struct discover_resp *dr; disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE); if (!disc_resp) return -ENOMEM; dr = &disc_resp->disc; res = sas_get_phy_discover(dev, phy_id, disc_resp); if (res == 0) { memcpy(sas_addr, disc_resp->disc.attached_sas_addr, 8); *type = to_dev_type(dr); if (*type == 0) memset(sas_addr, 0, 8); } kfree(disc_resp); return res; } static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id, int from_phy, bool update) { struct expander_device *ex = &dev->ex_dev; int res = 0; int i; for (i = from_phy; i < ex->num_phys; i++) { int phy_change_count = 0; res = sas_get_phy_change_count(dev, i, &phy_change_count); switch (res) { case SMP_RESP_PHY_VACANT: case SMP_RESP_NO_PHY: continue; case SMP_RESP_FUNC_ACC: break; default: return res; } if (phy_change_count != ex->ex_phy[i].phy_change_count) { if (update) ex->ex_phy[i].phy_change_count = phy_change_count; *phy_id = i; return 0; } } return 0; } static int sas_get_ex_change_count(struct domain_device *dev, int *ecc) { int res; u8 *rg_req; struct smp_resp *rg_resp; rg_req = alloc_smp_req(RG_REQ_SIZE); if (!rg_req) return -ENOMEM; rg_resp = alloc_smp_resp(RG_RESP_SIZE); if (!rg_resp) { kfree(rg_req); return -ENOMEM; } rg_req[1] = SMP_REPORT_GENERAL; res = smp_execute_task(dev, rg_req, RG_REQ_SIZE, rg_resp, RG_RESP_SIZE); if (res) goto out; if (rg_resp->result != SMP_RESP_FUNC_ACC) { res = rg_resp->result; goto out; } *ecc = be16_to_cpu(rg_resp->rg.change_count); out: kfree(rg_resp); kfree(rg_req); return res; } /** * sas_find_bcast_dev - find the device issue BROADCAST(CHANGE). * @dev:domain device to be detect. * @src_dev: the device which originated BROADCAST(CHANGE). * * Add self-configuration expander support. Suppose two expander cascading, * when the first level expander is self-configuring, hotplug the disks in * second level expander, BROADCAST(CHANGE) will not only be originated * in the second level expander, but also be originated in the first level * expander (see SAS protocol SAS 2r-14, 7.11 for detail), it is to say, * expander changed count in two level expanders will all increment at least * once, but the phy which chang count has changed is the source device which * we concerned. */ static int sas_find_bcast_dev(struct domain_device *dev, struct domain_device **src_dev) { struct expander_device *ex = &dev->ex_dev; int ex_change_count = -1; int phy_id = -1; int res; struct domain_device *ch; res = sas_get_ex_change_count(dev, &ex_change_count); if (res) goto out; if (ex_change_count != -1 && ex_change_count != ex->ex_change_count) { /* Just detect if this expander phys phy change count changed, * in order to determine if this expander originate BROADCAST, * and do not update phy change count field in our structure. */ res = sas_find_bcast_phy(dev, &phy_id, 0, false); if (phy_id != -1) { *src_dev = dev; ex->ex_change_count = ex_change_count; SAS_DPRINTK("Expander phy change count has changed\n"); return res; } else SAS_DPRINTK("Expander phys DID NOT change\n"); } list_for_each_entry(ch, &ex->children, siblings) { if (ch->dev_type == SAS_EDGE_EXPANDER_DEVICE || ch->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { res = sas_find_bcast_dev(ch, src_dev); if (*src_dev) return res; } } out: return res; } static void sas_unregister_ex_tree(struct asd_sas_port *port, struct domain_device *dev) { struct expander_device *ex = &dev->ex_dev; struct domain_device *child, *n; list_for_each_entry_safe(child, n, &ex->children, siblings) { set_bit(SAS_DEV_GONE, &child->state); if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE || child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) sas_unregister_ex_tree(port, child); else sas_unregister_dev(port, child); } sas_unregister_dev(port, dev); } static void sas_unregister_devs_sas_addr(struct domain_device *parent, int phy_id, bool last) { struct expander_device *ex_dev = &parent->ex_dev; struct ex_phy *phy = &ex_dev->ex_phy[phy_id]; struct domain_device *child, *n, *found = NULL; if (last) { list_for_each_entry_safe(child, n, &ex_dev->children, siblings) { if (SAS_ADDR(child->sas_addr) == SAS_ADDR(phy->attached_sas_addr)) { set_bit(SAS_DEV_GONE, &child->state); if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE || child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) sas_unregister_ex_tree(parent->port, child); else sas_unregister_dev(parent->port, child); found = child; break; } } sas_disable_routing(parent, phy->attached_sas_addr); } memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); if (phy->port) { sas_port_delete_phy(phy->port, phy->phy); sas_device_set_phy(found, phy->port); if (phy->port->num_phys == 0) list_add_tail(&phy->port->del_list, &parent->port->sas_port_del_list); phy->port = NULL; } } static int sas_discover_bfs_by_root_level(struct domain_device *root, const int level) { struct expander_device *ex_root = &root->ex_dev; struct domain_device *child; int res = 0; list_for_each_entry(child, &ex_root->children, siblings) { if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE || child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { struct sas_expander_device *ex = rphy_to_expander_device(child->rphy); if (level > ex->level) res = sas_discover_bfs_by_root_level(child, level); else if (level == ex->level) res = sas_ex_discover_devices(child, -1); } } return res; } static int sas_discover_bfs_by_root(struct domain_device *dev) { int res; struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy); int level = ex->level+1; res = sas_ex_discover_devices(dev, -1); if (res) goto out; do { res = sas_discover_bfs_by_root_level(dev, level); mb(); level += 1; } while (level <= dev->port->disc.max_level); out: return res; } static int sas_discover_new(struct domain_device *dev, int phy_id) { struct ex_phy *ex_phy = &dev->ex_dev.ex_phy[phy_id]; struct domain_device *child; int res; SAS_DPRINTK("ex %016llx phy%d new device attached\n", SAS_ADDR(dev->sas_addr), phy_id); res = sas_ex_phy_discover(dev, phy_id); if (res) return res; if (sas_ex_join_wide_port(dev, phy_id)) return 0; res = sas_ex_discover_devices(dev, phy_id); if (res) return res; list_for_each_entry(child, &dev->ex_dev.children, siblings) { if (SAS_ADDR(child->sas_addr) == SAS_ADDR(ex_phy->attached_sas_addr)) { if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE || child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) res = sas_discover_bfs_by_root(child); break; } } return res; } static bool dev_type_flutter(enum sas_device_type new, enum sas_device_type old) { if (old == new) return true; /* treat device directed resets as flutter, if we went * SAS_END_DEVICE to SAS_SATA_PENDING the link needs recovery */ if ((old == SAS_SATA_PENDING && new == SAS_END_DEVICE) || (old == SAS_END_DEVICE && new == SAS_SATA_PENDING)) return true; return false; } static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last) { struct expander_device *ex = &dev->ex_dev; struct ex_phy *phy = &ex->ex_phy[phy_id]; enum sas_device_type type = SAS_PHY_UNUSED; u8 sas_addr[8]; int res; memset(sas_addr, 0, 8); res = sas_get_phy_attached_dev(dev, phy_id, sas_addr, &type); switch (res) { case SMP_RESP_NO_PHY: phy->phy_state = PHY_NOT_PRESENT; sas_unregister_devs_sas_addr(dev, phy_id, last); return res; case SMP_RESP_PHY_VACANT: phy->phy_state = PHY_VACANT; sas_unregister_devs_sas_addr(dev, phy_id, last); return res; case SMP_RESP_FUNC_ACC: break; case -ECOMM: break; default: return res; } if ((SAS_ADDR(sas_addr) == 0) || (res == -ECOMM)) { phy->phy_state = PHY_EMPTY; sas_unregister_devs_sas_addr(dev, phy_id, last); return res; } else if (SAS_ADDR(sas_addr) == SAS_ADDR(phy->attached_sas_addr) && dev_type_flutter(type, phy->attached_dev_type)) { struct domain_device *ata_dev = sas_ex_to_ata(dev, phy_id); char *action = ""; sas_ex_phy_discover(dev, phy_id); if (ata_dev && phy->attached_dev_type == SAS_SATA_PENDING) action = ", needs recovery"; SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter%s\n", SAS_ADDR(dev->sas_addr), phy_id, action); return res; } /* we always have to delete the old device when we went here */ SAS_DPRINTK("ex %016llx phy 0x%x replace %016llx\n", SAS_ADDR(dev->sas_addr), phy_id, SAS_ADDR(phy->attached_sas_addr)); sas_unregister_devs_sas_addr(dev, phy_id, last); return sas_discover_new(dev, phy_id); } /** * sas_rediscover - revalidate the domain. * @dev:domain device to be detect. * @phy_id: the phy id will be detected. * * NOTE: this process _must_ quit (return) as soon as any connection * errors are encountered. Connection recovery is done elsewhere. * Discover process only interrogates devices in order to discover the * domain.For plugging out, we un-register the device only when it is * the last phy in the port, for other phys in this port, we just delete it * from the port.For inserting, we do discovery when it is the * first phy,for other phys in this port, we add it to the port to * forming the wide-port. */ static int sas_rediscover(struct domain_device *dev, const int phy_id) { struct expander_device *ex = &dev->ex_dev; struct ex_phy *changed_phy = &ex->ex_phy[phy_id]; int res = 0; int i; bool last = true; /* is this the last phy of the port */ SAS_DPRINTK("ex %016llx phy%d originated BROADCAST(CHANGE)\n", SAS_ADDR(dev->sas_addr), phy_id); if (SAS_ADDR(changed_phy->attached_sas_addr) != 0) { for (i = 0; i < ex->num_phys; i++) { struct ex_phy *phy = &ex->ex_phy[i]; if (i == phy_id) continue; if (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(changed_phy->attached_sas_addr)) { SAS_DPRINTK("phy%d part of wide port with " "phy%d\n", phy_id, i); last = false; break; } } res = sas_rediscover_dev(dev, phy_id, last); } else res = sas_discover_new(dev, phy_id); return res; } /** * sas_ex_revalidate_domain - revalidate the domain * @port_dev: port domain device. * * NOTE: this process _must_ quit (return) as soon as any connection * errors are encountered. Connection recovery is done elsewhere. * Discover process only interrogates devices in order to discover the * domain. */ int sas_ex_revalidate_domain(struct domain_device *port_dev) { int res; struct domain_device *dev = NULL; res = sas_find_bcast_dev(port_dev, &dev); if (res == 0 && dev) { struct expander_device *ex = &dev->ex_dev; int i = 0, phy_id; do { phy_id = -1; res = sas_find_bcast_phy(dev, &phy_id, i, true); if (phy_id == -1) break; res = sas_rediscover(dev, phy_id); i = phy_id + 1; } while (i < ex->num_phys); } return res; } void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, struct sas_rphy *rphy) { struct domain_device *dev; unsigned int rcvlen = 0; int ret = -EINVAL; /* no rphy means no smp target support (ie aic94xx host) */ if (!rphy) return sas_smp_host_handler(job, shost); switch (rphy->identify.device_type) { case SAS_EDGE_EXPANDER_DEVICE: case SAS_FANOUT_EXPANDER_DEVICE: break; default: printk("%s: can we send a smp request to a device?\n", __func__); goto out; } dev = sas_find_dev_by_rphy(rphy); if (!dev) { printk("%s: fail to find a domain_device?\n", __func__); goto out; } /* do we need to support multiple segments? */ if (job->request_payload.sg_cnt > 1 || job->reply_payload.sg_cnt > 1) { printk("%s: multiple segments req %u, rsp %u\n", __func__, job->request_payload.payload_len, job->reply_payload.payload_len); goto out; } ret = smp_execute_task_sg(dev, job->request_payload.sg_list, job->reply_payload.sg_list); if (ret >= 0) { /* bsg_job_done() requires the length received */ rcvlen = job->reply_payload.payload_len - ret; ret = 0; } out: bsg_job_done(job, ret, rcvlen); }
/* * Serial Attached SCSI (SAS) Expander discovery and configuration * * Copyright (C) 2005 Adaptec, Inc. All rights reserved. * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> * * This file is licensed under GPLv2. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/scatterlist.h> #include <linux/blkdev.h> #include <linux/slab.h> #include "sas_internal.h" #include <scsi/sas_ata.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_sas.h> #include "../scsi_sas_internal.h" static int sas_discover_expander(struct domain_device *dev); static int sas_configure_routing(struct domain_device *dev, u8 *sas_addr); static int sas_configure_phy(struct domain_device *dev, int phy_id, u8 *sas_addr, int include); static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr); /* ---------- SMP task management ---------- */ static void smp_task_timedout(struct timer_list *t) { struct sas_task_slow *slow = from_timer(slow, t, timer); struct sas_task *task = slow->task; unsigned long flags; spin_lock_irqsave(&task->task_state_lock, flags); if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { task->task_state_flags |= SAS_TASK_STATE_ABORTED; complete(&task->slow_task->completion); } spin_unlock_irqrestore(&task->task_state_lock, flags); } static void smp_task_done(struct sas_task *task) { del_timer(&task->slow_task->timer); complete(&task->slow_task->completion); } /* Give it some long enough timeout. In seconds. */ #define SMP_TIMEOUT 10 static int smp_execute_task_sg(struct domain_device *dev, struct scatterlist *req, struct scatterlist *resp) { int res, retry; struct sas_task *task = NULL; struct sas_internal *i = to_sas_internal(dev->port->ha->core.shost->transportt); mutex_lock(&dev->ex_dev.cmd_mutex); for (retry = 0; retry < 3; retry++) { if (test_bit(SAS_DEV_GONE, &dev->state)) { res = -ECOMM; break; } task = sas_alloc_slow_task(GFP_KERNEL); if (!task) { res = -ENOMEM; break; } task->dev = dev; task->task_proto = dev->tproto; task->smp_task.smp_req = *req; task->smp_task.smp_resp = *resp; task->task_done = smp_task_done; task->slow_task->timer.function = smp_task_timedout; task->slow_task->timer.expires = jiffies + SMP_TIMEOUT*HZ; add_timer(&task->slow_task->timer); res = i->dft->lldd_execute_task(task, GFP_KERNEL); if (res) { del_timer(&task->slow_task->timer); SAS_DPRINTK("executing SMP task failed:%d\n", res); break; } wait_for_completion(&task->slow_task->completion); res = -ECOMM; if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { SAS_DPRINTK("smp task timed out or aborted\n"); i->dft->lldd_abort_task(task); if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { SAS_DPRINTK("SMP task aborted and not done\n"); break; } } if (task->task_status.resp == SAS_TASK_COMPLETE && task->task_status.stat == SAM_STAT_GOOD) { res = 0; break; } if (task->task_status.resp == SAS_TASK_COMPLETE && task->task_status.stat == SAS_DATA_UNDERRUN) { /* no error, but return the number of bytes of * underrun */ res = task->task_status.residual; break; } if (task->task_status.resp == SAS_TASK_COMPLETE && task->task_status.stat == SAS_DATA_OVERRUN) { res = -EMSGSIZE; break; } if (task->task_status.resp == SAS_TASK_UNDELIVERED && task->task_status.stat == SAS_DEVICE_UNKNOWN) break; else { SAS_DPRINTK("%s: task to dev %016llx response: 0x%x " "status 0x%x\n", __func__, SAS_ADDR(dev->sas_addr), task->task_status.resp, task->task_status.stat); sas_free_task(task); task = NULL; } } mutex_unlock(&dev->ex_dev.cmd_mutex); BUG_ON(retry == 3 && task != NULL); sas_free_task(task); return res; } static int smp_execute_task(struct domain_device *dev, void *req, int req_size, void *resp, int resp_size) { struct scatterlist req_sg; struct scatterlist resp_sg; sg_init_one(&req_sg, req, req_size); sg_init_one(&resp_sg, resp, resp_size); return smp_execute_task_sg(dev, &req_sg, &resp_sg); } /* ---------- Allocations ---------- */ static inline void *alloc_smp_req(int size) { u8 *p = kzalloc(size, GFP_KERNEL); if (p) p[0] = SMP_REQUEST; return p; } static inline void *alloc_smp_resp(int size) { return kzalloc(size, GFP_KERNEL); } static char sas_route_char(struct domain_device *dev, struct ex_phy *phy) { switch (phy->routing_attr) { case TABLE_ROUTING: if (dev->ex_dev.t2t_supp) return 'U'; else return 'T'; case DIRECT_ROUTING: return 'D'; case SUBTRACTIVE_ROUTING: return 'S'; default: return '?'; } } static enum sas_device_type to_dev_type(struct discover_resp *dr) { /* This is detecting a failure to transmit initial dev to host * FIS as described in section J.5 of sas-2 r16 */ if (dr->attached_dev_type == SAS_PHY_UNUSED && dr->attached_sata_dev && dr->linkrate >= SAS_LINK_RATE_1_5_GBPS) return SAS_SATA_PENDING; else return dr->attached_dev_type; } static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) { enum sas_device_type dev_type; enum sas_linkrate linkrate; u8 sas_addr[SAS_ADDR_SIZE]; struct smp_resp *resp = rsp; struct discover_resp *dr = &resp->disc; struct sas_ha_struct *ha = dev->port->ha; struct expander_device *ex = &dev->ex_dev; struct ex_phy *phy = &ex->ex_phy[phy_id]; struct sas_rphy *rphy = dev->rphy; bool new_phy = !phy->phy; char *type; if (new_phy) { if (WARN_ON_ONCE(test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state))) return; phy->phy = sas_phy_alloc(&rphy->dev, phy_id); /* FIXME: error_handling */ BUG_ON(!phy->phy); } switch (resp->result) { case SMP_RESP_PHY_VACANT: phy->phy_state = PHY_VACANT; break; default: phy->phy_state = PHY_NOT_PRESENT; break; case SMP_RESP_FUNC_ACC: phy->phy_state = PHY_EMPTY; /* do not know yet */ break; } /* check if anything important changed to squelch debug */ dev_type = phy->attached_dev_type; linkrate = phy->linkrate; memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); /* Handle vacant phy - rest of dr data is not valid so skip it */ if (phy->phy_state == PHY_VACANT) { memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); phy->attached_dev_type = SAS_PHY_UNUSED; if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) { phy->phy_id = phy_id; goto skip; } else goto out; } phy->attached_dev_type = to_dev_type(dr); if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) goto out; phy->phy_id = phy_id; phy->linkrate = dr->linkrate; phy->attached_sata_host = dr->attached_sata_host; phy->attached_sata_dev = dr->attached_sata_dev; phy->attached_sata_ps = dr->attached_sata_ps; phy->attached_iproto = dr->iproto << 1; phy->attached_tproto = dr->tproto << 1; /* help some expanders that fail to zero sas_address in the 'no * device' case */ if (phy->attached_dev_type == SAS_PHY_UNUSED || phy->linkrate < SAS_LINK_RATE_1_5_GBPS) memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); else memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE); phy->attached_phy_id = dr->attached_phy_id; phy->phy_change_count = dr->change_count; phy->routing_attr = dr->routing_attr; phy->virtual = dr->virtual; phy->last_da_index = -1; phy->phy->identify.sas_address = SAS_ADDR(phy->attached_sas_addr); phy->phy->identify.device_type = dr->attached_dev_type; phy->phy->identify.initiator_port_protocols = phy->attached_iproto; phy->phy->identify.target_port_protocols = phy->attached_tproto; if (!phy->attached_tproto && dr->attached_sata_dev) phy->phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; phy->phy->identify.phy_identifier = phy_id; phy->phy->minimum_linkrate_hw = dr->hmin_linkrate; phy->phy->maximum_linkrate_hw = dr->hmax_linkrate; phy->phy->minimum_linkrate = dr->pmin_linkrate; phy->phy->maximum_linkrate = dr->pmax_linkrate; phy->phy->negotiated_linkrate = phy->linkrate; phy->phy->enabled = (phy->linkrate != SAS_PHY_DISABLED); skip: if (new_phy) if (sas_phy_add(phy->phy)) { sas_phy_free(phy->phy); return; } out: switch (phy->attached_dev_type) { case SAS_SATA_PENDING: type = "stp pending"; break; case SAS_PHY_UNUSED: type = "no device"; break; case SAS_END_DEVICE: if (phy->attached_iproto) { if (phy->attached_tproto) type = "host+target"; else type = "host"; } else { if (dr->attached_sata_dev) type = "stp"; else type = "ssp"; } break; case SAS_EDGE_EXPANDER_DEVICE: case SAS_FANOUT_EXPANDER_DEVICE: type = "smp"; break; default: type = "unknown"; } /* this routine is polled by libata error recovery so filter * unimportant messages */ if (new_phy || phy->attached_dev_type != dev_type || phy->linkrate != linkrate || SAS_ADDR(phy->attached_sas_addr) != SAS_ADDR(sas_addr)) /* pass */; else return; /* if the attached device type changed and ata_eh is active, * make sure we run revalidation when eh completes (see: * sas_enable_revalidation) */ if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) set_bit(DISCE_REVALIDATE_DOMAIN, &dev->port->disc.pending); SAS_DPRINTK("%sex %016llx phy%02d:%c:%X attached: %016llx (%s)\n", test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state) ? "ata: " : "", SAS_ADDR(dev->sas_addr), phy->phy_id, sas_route_char(dev, phy), phy->linkrate, SAS_ADDR(phy->attached_sas_addr), type); } /* check if we have an existing attached ata device on this expander phy */ struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id) { struct ex_phy *ex_phy = &ex_dev->ex_dev.ex_phy[phy_id]; struct domain_device *dev; struct sas_rphy *rphy; if (!ex_phy->port) return NULL; rphy = ex_phy->port->rphy; if (!rphy) return NULL; dev = sas_find_dev_by_rphy(rphy); if (dev && dev_is_sata(dev)) return dev; return NULL; } #define DISCOVER_REQ_SIZE 16 #define DISCOVER_RESP_SIZE 56 static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req, u8 *disc_resp, int single) { struct discover_resp *dr; int res; disc_req[9] = single; res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE, disc_resp, DISCOVER_RESP_SIZE); if (res) return res; dr = &((struct smp_resp *)disc_resp)->disc; if (memcmp(dev->sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE) == 0) { sas_printk("Found loopback topology, just ignore it!\n"); return 0; } sas_set_ex_phy(dev, single, disc_resp); return 0; } int sas_ex_phy_discover(struct domain_device *dev, int single) { struct expander_device *ex = &dev->ex_dev; int res = 0; u8 *disc_req; u8 *disc_resp; disc_req = alloc_smp_req(DISCOVER_REQ_SIZE); if (!disc_req) return -ENOMEM; disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE); if (!disc_resp) { kfree(disc_req); return -ENOMEM; } disc_req[1] = SMP_DISCOVER; if (0 <= single && single < ex->num_phys) { res = sas_ex_phy_discover_helper(dev, disc_req, disc_resp, single); } else { int i; for (i = 0; i < ex->num_phys; i++) { res = sas_ex_phy_discover_helper(dev, disc_req, disc_resp, i); if (res) goto out_err; } } out_err: kfree(disc_resp); kfree(disc_req); return res; } static int sas_expander_discover(struct domain_device *dev) { struct expander_device *ex = &dev->ex_dev; int res = -ENOMEM; ex->ex_phy = kcalloc(ex->num_phys, sizeof(*ex->ex_phy), GFP_KERNEL); if (!ex->ex_phy) return -ENOMEM; res = sas_ex_phy_discover(dev, -1); if (res) goto out_err; return 0; out_err: kfree(ex->ex_phy); ex->ex_phy = NULL; return res; } #define MAX_EXPANDER_PHYS 128 static void ex_assign_report_general(struct domain_device *dev, struct smp_resp *resp) { struct report_general_resp *rg = &resp->rg; dev->ex_dev.ex_change_count = be16_to_cpu(rg->change_count); dev->ex_dev.max_route_indexes = be16_to_cpu(rg->route_indexes); dev->ex_dev.num_phys = min(rg->num_phys, (u8)MAX_EXPANDER_PHYS); dev->ex_dev.t2t_supp = rg->t2t_supp; dev->ex_dev.conf_route_table = rg->conf_route_table; dev->ex_dev.configuring = rg->configuring; memcpy(dev->ex_dev.enclosure_logical_id, rg->enclosure_logical_id, 8); } #define RG_REQ_SIZE 8 #define RG_RESP_SIZE 32 static int sas_ex_general(struct domain_device *dev) { u8 *rg_req; struct smp_resp *rg_resp; int res; int i; rg_req = alloc_smp_req(RG_REQ_SIZE); if (!rg_req) return -ENOMEM; rg_resp = alloc_smp_resp(RG_RESP_SIZE); if (!rg_resp) { kfree(rg_req); return -ENOMEM; } rg_req[1] = SMP_REPORT_GENERAL; for (i = 0; i < 5; i++) { res = smp_execute_task(dev, rg_req, RG_REQ_SIZE, rg_resp, RG_RESP_SIZE); if (res) { SAS_DPRINTK("RG to ex %016llx failed:0x%x\n", SAS_ADDR(dev->sas_addr), res); goto out; } else if (rg_resp->result != SMP_RESP_FUNC_ACC) { SAS_DPRINTK("RG:ex %016llx returned SMP result:0x%x\n", SAS_ADDR(dev->sas_addr), rg_resp->result); res = rg_resp->result; goto out; } ex_assign_report_general(dev, rg_resp); if (dev->ex_dev.configuring) { SAS_DPRINTK("RG: ex %llx self-configuring...\n", SAS_ADDR(dev->sas_addr)); schedule_timeout_interruptible(5*HZ); } else break; } out: kfree(rg_req); kfree(rg_resp); return res; } static void ex_assign_manuf_info(struct domain_device *dev, void *_mi_resp) { u8 *mi_resp = _mi_resp; struct sas_rphy *rphy = dev->rphy; struct sas_expander_device *edev = rphy_to_expander_device(rphy); memcpy(edev->vendor_id, mi_resp + 12, SAS_EXPANDER_VENDOR_ID_LEN); memcpy(edev->product_id, mi_resp + 20, SAS_EXPANDER_PRODUCT_ID_LEN); memcpy(edev->product_rev, mi_resp + 36, SAS_EXPANDER_PRODUCT_REV_LEN); if (mi_resp[8] & 1) { memcpy(edev->component_vendor_id, mi_resp + 40, SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN); edev->component_id = mi_resp[48] << 8 | mi_resp[49]; edev->component_revision_id = mi_resp[50]; } } #define MI_REQ_SIZE 8 #define MI_RESP_SIZE 64 static int sas_ex_manuf_info(struct domain_device *dev) { u8 *mi_req; u8 *mi_resp; int res; mi_req = alloc_smp_req(MI_REQ_SIZE); if (!mi_req) return -ENOMEM; mi_resp = alloc_smp_resp(MI_RESP_SIZE); if (!mi_resp) { kfree(mi_req); return -ENOMEM; } mi_req[1] = SMP_REPORT_MANUF_INFO; res = smp_execute_task(dev, mi_req, MI_REQ_SIZE, mi_resp,MI_RESP_SIZE); if (res) { SAS_DPRINTK("MI: ex %016llx failed:0x%x\n", SAS_ADDR(dev->sas_addr), res); goto out; } else if (mi_resp[2] != SMP_RESP_FUNC_ACC) { SAS_DPRINTK("MI ex %016llx returned SMP result:0x%x\n", SAS_ADDR(dev->sas_addr), mi_resp[2]); goto out; } ex_assign_manuf_info(dev, mi_resp); out: kfree(mi_req); kfree(mi_resp); return res; } #define PC_REQ_SIZE 44 #define PC_RESP_SIZE 8 int sas_smp_phy_control(struct domain_device *dev, int phy_id, enum phy_func phy_func, struct sas_phy_linkrates *rates) { u8 *pc_req; u8 *pc_resp; int res; pc_req = alloc_smp_req(PC_REQ_SIZE); if (!pc_req) return -ENOMEM; pc_resp = alloc_smp_resp(PC_RESP_SIZE); if (!pc_resp) { kfree(pc_req); return -ENOMEM; } pc_req[1] = SMP_PHY_CONTROL; pc_req[9] = phy_id; pc_req[10]= phy_func; if (rates) { pc_req[32] = rates->minimum_linkrate << 4; pc_req[33] = rates->maximum_linkrate << 4; } res = smp_execute_task(dev, pc_req, PC_REQ_SIZE, pc_resp,PC_RESP_SIZE); kfree(pc_resp); kfree(pc_req); return res; } static void sas_ex_disable_phy(struct domain_device *dev, int phy_id) { struct expander_device *ex = &dev->ex_dev; struct ex_phy *phy = &ex->ex_phy[phy_id]; sas_smp_phy_control(dev, phy_id, PHY_FUNC_DISABLE, NULL); phy->linkrate = SAS_PHY_DISABLED; } static void sas_ex_disable_port(struct domain_device *dev, u8 *sas_addr) { struct expander_device *ex = &dev->ex_dev; int i; for (i = 0; i < ex->num_phys; i++) { struct ex_phy *phy = &ex->ex_phy[i]; if (phy->phy_state == PHY_VACANT || phy->phy_state == PHY_NOT_PRESENT) continue; if (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(sas_addr)) sas_ex_disable_phy(dev, i); } } static int sas_dev_present_in_domain(struct asd_sas_port *port, u8 *sas_addr) { struct domain_device *dev; if (SAS_ADDR(port->sas_addr) == SAS_ADDR(sas_addr)) return 1; list_for_each_entry(dev, &port->dev_list, dev_list_node) { if (SAS_ADDR(dev->sas_addr) == SAS_ADDR(sas_addr)) return 1; } return 0; } #define RPEL_REQ_SIZE 16 #define RPEL_RESP_SIZE 32 int sas_smp_get_phy_events(struct sas_phy *phy) { int res; u8 *req; u8 *resp; struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent); struct domain_device *dev = sas_find_dev_by_rphy(rphy); req = alloc_smp_req(RPEL_REQ_SIZE); if (!req) return -ENOMEM; resp = alloc_smp_resp(RPEL_RESP_SIZE); if (!resp) { kfree(req); return -ENOMEM; } req[1] = SMP_REPORT_PHY_ERR_LOG; req[9] = phy->number; res = smp_execute_task(dev, req, RPEL_REQ_SIZE, resp, RPEL_RESP_SIZE); if (res) goto out; phy->invalid_dword_count = scsi_to_u32(&resp[12]); phy->running_disparity_error_count = scsi_to_u32(&resp[16]); phy->loss_of_dword_sync_count = scsi_to_u32(&resp[20]); phy->phy_reset_problem_count = scsi_to_u32(&resp[24]); out: kfree(req); kfree(resp); return res; } #ifdef CONFIG_SCSI_SAS_ATA #define RPS_REQ_SIZE 16 #define RPS_RESP_SIZE 60 int sas_get_report_phy_sata(struct domain_device *dev, int phy_id, struct smp_resp *rps_resp) { int res; u8 *rps_req = alloc_smp_req(RPS_REQ_SIZE); u8 *resp = (u8 *)rps_resp; if (!rps_req) return -ENOMEM; rps_req[1] = SMP_REPORT_PHY_SATA; rps_req[9] = phy_id; res = smp_execute_task(dev, rps_req, RPS_REQ_SIZE, rps_resp, RPS_RESP_SIZE); /* 0x34 is the FIS type for the D2H fis. There's a potential * standards cockup here. sas-2 explicitly specifies the FIS * should be encoded so that FIS type is in resp[24]. * However, some expanders endian reverse this. Undo the * reversal here */ if (!res && resp[27] == 0x34 && resp[24] != 0x34) { int i; for (i = 0; i < 5; i++) { int j = 24 + (i*4); u8 a, b; a = resp[j + 0]; b = resp[j + 1]; resp[j + 0] = resp[j + 3]; resp[j + 1] = resp[j + 2]; resp[j + 2] = b; resp[j + 3] = a; } } kfree(rps_req); return res; } #endif static void sas_ex_get_linkrate(struct domain_device *parent, struct domain_device *child, struct ex_phy *parent_phy) { struct expander_device *parent_ex = &parent->ex_dev; struct sas_port *port; int i; child->pathways = 0; port = parent_phy->port; for (i = 0; i < parent_ex->num_phys; i++) { struct ex_phy *phy = &parent_ex->ex_phy[i]; if (phy->phy_state == PHY_VACANT || phy->phy_state == PHY_NOT_PRESENT) continue; if (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(child->sas_addr)) { child->min_linkrate = min(parent->min_linkrate, phy->linkrate); child->max_linkrate = max(parent->max_linkrate, phy->linkrate); child->pathways++; sas_port_add_phy(port, phy->phy); } } child->linkrate = min(parent_phy->linkrate, child->max_linkrate); child->pathways = min(child->pathways, parent->pathways); } static struct domain_device *sas_ex_discover_end_dev( struct domain_device *parent, int phy_id) { struct expander_device *parent_ex = &parent->ex_dev; struct ex_phy *phy = &parent_ex->ex_phy[phy_id]; struct domain_device *child = NULL; struct sas_rphy *rphy; int res; if (phy->attached_sata_host || phy->attached_sata_ps) return NULL; child = sas_alloc_device(); if (!child) return NULL; kref_get(&parent->kref); child->parent = parent; child->port = parent->port; child->iproto = phy->attached_iproto; memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); sas_hash_addr(child->hashed_sas_addr, child->sas_addr); if (!phy->port) { phy->port = sas_port_alloc(&parent->rphy->dev, phy_id); if (unlikely(!phy->port)) goto out_err; if (unlikely(sas_port_add(phy->port) != 0)) { sas_port_free(phy->port); goto out_err; } } sas_ex_get_linkrate(parent, child, phy); sas_device_set_phy(child, phy->port); #ifdef CONFIG_SCSI_SAS_ATA if ((phy->attached_tproto & SAS_PROTOCOL_STP) || phy->attached_sata_dev) { res = sas_get_ata_info(child, phy); if (res) goto out_free; sas_init_dev(child); res = sas_ata_init(child); if (res) goto out_free; rphy = sas_end_device_alloc(phy->port); if (!rphy) goto out_free; child->rphy = rphy; get_device(&rphy->dev); list_add_tail(&child->disco_list_node, &parent->port->disco_list); res = sas_discover_sata(child); if (res) { SAS_DPRINTK("sas_discover_sata() for device %16llx at " "%016llx:0x%x returned 0x%x\n", SAS_ADDR(child->sas_addr), SAS_ADDR(parent->sas_addr), phy_id, res); goto out_list_del; } } else #endif if (phy->attached_tproto & SAS_PROTOCOL_SSP) { child->dev_type = SAS_END_DEVICE; rphy = sas_end_device_alloc(phy->port); /* FIXME: error handling */ if (unlikely(!rphy)) goto out_free; child->tproto = phy->attached_tproto; sas_init_dev(child); child->rphy = rphy; get_device(&rphy->dev); sas_fill_in_rphy(child, rphy); list_add_tail(&child->disco_list_node, &parent->port->disco_list); res = sas_discover_end_dev(child); if (res) { SAS_DPRINTK("sas_discover_end_dev() for device %16llx " "at %016llx:0x%x returned 0x%x\n", SAS_ADDR(child->sas_addr), SAS_ADDR(parent->sas_addr), phy_id, res); goto out_list_del; } } else { SAS_DPRINTK("target proto 0x%x at %016llx:0x%x not handled\n", phy->attached_tproto, SAS_ADDR(parent->sas_addr), phy_id); goto out_free; } list_add_tail(&child->siblings, &parent_ex->children); return child; out_list_del: sas_rphy_free(child->rphy); list_del(&child->disco_list_node); spin_lock_irq(&parent->port->dev_list_lock); list_del(&child->dev_list_node); spin_unlock_irq(&parent->port->dev_list_lock); out_free: sas_port_delete(phy->port); out_err: phy->port = NULL; sas_put_device(child); return NULL; } /* See if this phy is part of a wide port */ static bool sas_ex_join_wide_port(struct domain_device *parent, int phy_id) { struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id]; int i; for (i = 0; i < parent->ex_dev.num_phys; i++) { struct ex_phy *ephy = &parent->ex_dev.ex_phy[i]; if (ephy == phy) continue; if (!memcmp(phy->attached_sas_addr, ephy->attached_sas_addr, SAS_ADDR_SIZE) && ephy->port) { sas_port_add_phy(ephy->port, phy->phy); phy->port = ephy->port; phy->phy_state = PHY_DEVICE_DISCOVERED; return true; } } return false; } static struct domain_device *sas_ex_discover_expander( struct domain_device *parent, int phy_id) { struct sas_expander_device *parent_ex = rphy_to_expander_device(parent->rphy); struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id]; struct domain_device *child = NULL; struct sas_rphy *rphy; struct sas_expander_device *edev; struct asd_sas_port *port; int res; if (phy->routing_attr == DIRECT_ROUTING) { SAS_DPRINTK("ex %016llx:0x%x:D <--> ex %016llx:0x%x is not " "allowed\n", SAS_ADDR(parent->sas_addr), phy_id, SAS_ADDR(phy->attached_sas_addr), phy->attached_phy_id); return NULL; } child = sas_alloc_device(); if (!child) return NULL; phy->port = sas_port_alloc(&parent->rphy->dev, phy_id); /* FIXME: better error handling */ BUG_ON(sas_port_add(phy->port) != 0); switch (phy->attached_dev_type) { case SAS_EDGE_EXPANDER_DEVICE: rphy = sas_expander_alloc(phy->port, SAS_EDGE_EXPANDER_DEVICE); break; case SAS_FANOUT_EXPANDER_DEVICE: rphy = sas_expander_alloc(phy->port, SAS_FANOUT_EXPANDER_DEVICE); break; default: rphy = NULL; /* shut gcc up */ BUG(); } port = parent->port; child->rphy = rphy; get_device(&rphy->dev); edev = rphy_to_expander_device(rphy); child->dev_type = phy->attached_dev_type; kref_get(&parent->kref); child->parent = parent; child->port = port; child->iproto = phy->attached_iproto; child->tproto = phy->attached_tproto; memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); sas_hash_addr(child->hashed_sas_addr, child->sas_addr); sas_ex_get_linkrate(parent, child, phy); edev->level = parent_ex->level + 1; parent->port->disc.max_level = max(parent->port->disc.max_level, edev->level); sas_init_dev(child); sas_fill_in_rphy(child, rphy); sas_rphy_add(rphy); spin_lock_irq(&parent->port->dev_list_lock); list_add_tail(&child->dev_list_node, &parent->port->dev_list); spin_unlock_irq(&parent->port->dev_list_lock); res = sas_discover_expander(child); if (res) { sas_rphy_delete(rphy); spin_lock_irq(&parent->port->dev_list_lock); list_del(&child->dev_list_node); spin_unlock_irq(&parent->port->dev_list_lock); sas_put_device(child); return NULL; } list_add_tail(&child->siblings, &parent->ex_dev.children); return child; } static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) { struct expander_device *ex = &dev->ex_dev; struct ex_phy *ex_phy = &ex->ex_phy[phy_id]; struct domain_device *child = NULL; int res = 0; /* Phy state */ if (ex_phy->linkrate == SAS_SATA_SPINUP_HOLD) { if (!sas_smp_phy_control(dev, phy_id, PHY_FUNC_LINK_RESET, NULL)) res = sas_ex_phy_discover(dev, phy_id); if (res) return res; } /* Parent and domain coherency */ if (!dev->parent && (SAS_ADDR(ex_phy->attached_sas_addr) == SAS_ADDR(dev->port->sas_addr))) { sas_add_parent_port(dev, phy_id); return 0; } if (dev->parent && (SAS_ADDR(ex_phy->attached_sas_addr) == SAS_ADDR(dev->parent->sas_addr))) { sas_add_parent_port(dev, phy_id); if (ex_phy->routing_attr == TABLE_ROUTING) sas_configure_phy(dev, phy_id, dev->port->sas_addr, 1); return 0; } if (sas_dev_present_in_domain(dev->port, ex_phy->attached_sas_addr)) sas_ex_disable_port(dev, ex_phy->attached_sas_addr); if (ex_phy->attached_dev_type == SAS_PHY_UNUSED) { if (ex_phy->routing_attr == DIRECT_ROUTING) { memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE); sas_configure_routing(dev, ex_phy->attached_sas_addr); } return 0; } else if (ex_phy->linkrate == SAS_LINK_RATE_UNKNOWN) return 0; if (ex_phy->attached_dev_type != SAS_END_DEVICE && ex_phy->attached_dev_type != SAS_FANOUT_EXPANDER_DEVICE && ex_phy->attached_dev_type != SAS_EDGE_EXPANDER_DEVICE && ex_phy->attached_dev_type != SAS_SATA_PENDING) { SAS_DPRINTK("unknown device type(0x%x) attached to ex %016llx " "phy 0x%x\n", ex_phy->attached_dev_type, SAS_ADDR(dev->sas_addr), phy_id); return 0; } res = sas_configure_routing(dev, ex_phy->attached_sas_addr); if (res) { SAS_DPRINTK("configure routing for dev %016llx " "reported 0x%x. Forgotten\n", SAS_ADDR(ex_phy->attached_sas_addr), res); sas_disable_routing(dev, ex_phy->attached_sas_addr); return res; } if (sas_ex_join_wide_port(dev, phy_id)) { SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n", phy_id, SAS_ADDR(ex_phy->attached_sas_addr)); return res; } switch (ex_phy->attached_dev_type) { case SAS_END_DEVICE: case SAS_SATA_PENDING: child = sas_ex_discover_end_dev(dev, phy_id); break; case SAS_FANOUT_EXPANDER_DEVICE: if (SAS_ADDR(dev->port->disc.fanout_sas_addr)) { SAS_DPRINTK("second fanout expander %016llx phy 0x%x " "attached to ex %016llx phy 0x%x\n", SAS_ADDR(ex_phy->attached_sas_addr), ex_phy->attached_phy_id, SAS_ADDR(dev->sas_addr), phy_id); sas_ex_disable_phy(dev, phy_id); break; } else memcpy(dev->port->disc.fanout_sas_addr, ex_phy->attached_sas_addr, SAS_ADDR_SIZE); /* fallthrough */ case SAS_EDGE_EXPANDER_DEVICE: child = sas_ex_discover_expander(dev, phy_id); break; default: break; } if (child) { int i; for (i = 0; i < ex->num_phys; i++) { if (ex->ex_phy[i].phy_state == PHY_VACANT || ex->ex_phy[i].phy_state == PHY_NOT_PRESENT) continue; /* * Due to races, the phy might not get added to the * wide port, so we add the phy to the wide port here. */ if (SAS_ADDR(ex->ex_phy[i].attached_sas_addr) == SAS_ADDR(child->sas_addr)) { ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED; if (sas_ex_join_wide_port(dev, i)) SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n", i, SAS_ADDR(ex->ex_phy[i].attached_sas_addr)); } } } return res; } static int sas_find_sub_addr(struct domain_device *dev, u8 *sub_addr) { struct expander_device *ex = &dev->ex_dev; int i; for (i = 0; i < ex->num_phys; i++) { struct ex_phy *phy = &ex->ex_phy[i]; if (phy->phy_state == PHY_VACANT || phy->phy_state == PHY_NOT_PRESENT) continue; if ((phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE || phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE) && phy->routing_attr == SUBTRACTIVE_ROUTING) { memcpy(sub_addr, phy->attached_sas_addr,SAS_ADDR_SIZE); return 1; } } return 0; } static int sas_check_level_subtractive_boundary(struct domain_device *dev) { struct expander_device *ex = &dev->ex_dev; struct domain_device *child; u8 sub_addr[8] = {0, }; list_for_each_entry(child, &ex->children, siblings) { if (child->dev_type != SAS_EDGE_EXPANDER_DEVICE && child->dev_type != SAS_FANOUT_EXPANDER_DEVICE) continue; if (sub_addr[0] == 0) { sas_find_sub_addr(child, sub_addr); continue; } else { u8 s2[8]; if (sas_find_sub_addr(child, s2) && (SAS_ADDR(sub_addr) != SAS_ADDR(s2))) { SAS_DPRINTK("ex %016llx->%016llx-?->%016llx " "diverges from subtractive " "boundary %016llx\n", SAS_ADDR(dev->sas_addr), SAS_ADDR(child->sas_addr), SAS_ADDR(s2), SAS_ADDR(sub_addr)); sas_ex_disable_port(child, s2); } } } return 0; } /** * sas_ex_discover_devices - discover devices attached to this expander * @dev: pointer to the expander domain device * @single: if you want to do a single phy, else set to -1; * * Configure this expander for use with its devices and register the * devices of this expander. */ static int sas_ex_discover_devices(struct domain_device *dev, int single) { struct expander_device *ex = &dev->ex_dev; int i = 0, end = ex->num_phys; int res = 0; if (0 <= single && single < end) { i = single; end = i+1; } for ( ; i < end; i++) { struct ex_phy *ex_phy = &ex->ex_phy[i]; if (ex_phy->phy_state == PHY_VACANT || ex_phy->phy_state == PHY_NOT_PRESENT || ex_phy->phy_state == PHY_DEVICE_DISCOVERED) continue; switch (ex_phy->linkrate) { case SAS_PHY_DISABLED: case SAS_PHY_RESET_PROBLEM: case SAS_SATA_PORT_SELECTOR: continue; default: res = sas_ex_discover_dev(dev, i); if (res) break; continue; } } if (!res) sas_check_level_subtractive_boundary(dev); return res; } static int sas_check_ex_subtractive_boundary(struct domain_device *dev) { struct expander_device *ex = &dev->ex_dev; int i; u8 *sub_sas_addr = NULL; if (dev->dev_type != SAS_EDGE_EXPANDER_DEVICE) return 0; for (i = 0; i < ex->num_phys; i++) { struct ex_phy *phy = &ex->ex_phy[i]; if (phy->phy_state == PHY_VACANT || phy->phy_state == PHY_NOT_PRESENT) continue; if ((phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE || phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE) && phy->routing_attr == SUBTRACTIVE_ROUTING) { if (!sub_sas_addr) sub_sas_addr = &phy->attached_sas_addr[0]; else if (SAS_ADDR(sub_sas_addr) != SAS_ADDR(phy->attached_sas_addr)) { SAS_DPRINTK("ex %016llx phy 0x%x " "diverges(%016llx) on subtractive " "boundary(%016llx). Disabled\n", SAS_ADDR(dev->sas_addr), i, SAS_ADDR(phy->attached_sas_addr), SAS_ADDR(sub_sas_addr)); sas_ex_disable_phy(dev, i); } } } return 0; } static void sas_print_parent_topology_bug(struct domain_device *child, struct ex_phy *parent_phy, struct ex_phy *child_phy) { static const char *ex_type[] = { [SAS_EDGE_EXPANDER_DEVICE] = "edge", [SAS_FANOUT_EXPANDER_DEVICE] = "fanout", }; struct domain_device *parent = child->parent; sas_printk("%s ex %016llx phy 0x%x <--> %s ex %016llx " "phy 0x%x has %c:%c routing link!\n", ex_type[parent->dev_type], SAS_ADDR(parent->sas_addr), parent_phy->phy_id, ex_type[child->dev_type], SAS_ADDR(child->sas_addr), child_phy->phy_id, sas_route_char(parent, parent_phy), sas_route_char(child, child_phy)); } static int sas_check_eeds(struct domain_device *child, struct ex_phy *parent_phy, struct ex_phy *child_phy) { int res = 0; struct domain_device *parent = child->parent; if (SAS_ADDR(parent->port->disc.fanout_sas_addr) != 0) { res = -ENODEV; SAS_DPRINTK("edge ex %016llx phy S:0x%x <--> edge ex %016llx " "phy S:0x%x, while there is a fanout ex %016llx\n", SAS_ADDR(parent->sas_addr), parent_phy->phy_id, SAS_ADDR(child->sas_addr), child_phy->phy_id, SAS_ADDR(parent->port->disc.fanout_sas_addr)); } else if (SAS_ADDR(parent->port->disc.eeds_a) == 0) { memcpy(parent->port->disc.eeds_a, parent->sas_addr, SAS_ADDR_SIZE); memcpy(parent->port->disc.eeds_b, child->sas_addr, SAS_ADDR_SIZE); } else if (((SAS_ADDR(parent->port->disc.eeds_a) == SAS_ADDR(parent->sas_addr)) || (SAS_ADDR(parent->port->disc.eeds_a) == SAS_ADDR(child->sas_addr))) && ((SAS_ADDR(parent->port->disc.eeds_b) == SAS_ADDR(parent->sas_addr)) || (SAS_ADDR(parent->port->disc.eeds_b) == SAS_ADDR(child->sas_addr)))) ; else { res = -ENODEV; SAS_DPRINTK("edge ex %016llx phy 0x%x <--> edge ex %016llx " "phy 0x%x link forms a third EEDS!\n", SAS_ADDR(parent->sas_addr), parent_phy->phy_id, SAS_ADDR(child->sas_addr), child_phy->phy_id); } return res; } /* Here we spill over 80 columns. It is intentional. */ static int sas_check_parent_topology(struct domain_device *child) { struct expander_device *child_ex = &child->ex_dev; struct expander_device *parent_ex; int i; int res = 0; if (!child->parent) return 0; if (child->parent->dev_type != SAS_EDGE_EXPANDER_DEVICE && child->parent->dev_type != SAS_FANOUT_EXPANDER_DEVICE) return 0; parent_ex = &child->parent->ex_dev; for (i = 0; i < parent_ex->num_phys; i++) { struct ex_phy *parent_phy = &parent_ex->ex_phy[i]; struct ex_phy *child_phy; if (parent_phy->phy_state == PHY_VACANT || parent_phy->phy_state == PHY_NOT_PRESENT) continue; if (SAS_ADDR(parent_phy->attached_sas_addr) != SAS_ADDR(child->sas_addr)) continue; child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id]; switch (child->parent->dev_type) { case SAS_EDGE_EXPANDER_DEVICE: if (child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING || child_phy->routing_attr != TABLE_ROUTING) { sas_print_parent_topology_bug(child, parent_phy, child_phy); res = -ENODEV; } } else if (parent_phy->routing_attr == SUBTRACTIVE_ROUTING) { if (child_phy->routing_attr == SUBTRACTIVE_ROUTING) { res = sas_check_eeds(child, parent_phy, child_phy); } else if (child_phy->routing_attr != TABLE_ROUTING) { sas_print_parent_topology_bug(child, parent_phy, child_phy); res = -ENODEV; } } else if (parent_phy->routing_attr == TABLE_ROUTING) { if (child_phy->routing_attr == SUBTRACTIVE_ROUTING || (child_phy->routing_attr == TABLE_ROUTING && child_ex->t2t_supp && parent_ex->t2t_supp)) { /* All good */; } else { sas_print_parent_topology_bug(child, parent_phy, child_phy); res = -ENODEV; } } break; case SAS_FANOUT_EXPANDER_DEVICE: if (parent_phy->routing_attr != TABLE_ROUTING || child_phy->routing_attr != SUBTRACTIVE_ROUTING) { sas_print_parent_topology_bug(child, parent_phy, child_phy); res = -ENODEV; } break; default: break; } } return res; } #define RRI_REQ_SIZE 16 #define RRI_RESP_SIZE 44 static int sas_configure_present(struct domain_device *dev, int phy_id, u8 *sas_addr, int *index, int *present) { int i, res = 0; struct expander_device *ex = &dev->ex_dev; struct ex_phy *phy = &ex->ex_phy[phy_id]; u8 *rri_req; u8 *rri_resp; *present = 0; *index = 0; rri_req = alloc_smp_req(RRI_REQ_SIZE); if (!rri_req) return -ENOMEM; rri_resp = alloc_smp_resp(RRI_RESP_SIZE); if (!rri_resp) { kfree(rri_req); return -ENOMEM; } rri_req[1] = SMP_REPORT_ROUTE_INFO; rri_req[9] = phy_id; for (i = 0; i < ex->max_route_indexes ; i++) { *(__be16 *)(rri_req+6) = cpu_to_be16(i); res = smp_execute_task(dev, rri_req, RRI_REQ_SIZE, rri_resp, RRI_RESP_SIZE); if (res) goto out; res = rri_resp[2]; if (res == SMP_RESP_NO_INDEX) { SAS_DPRINTK("overflow of indexes: dev %016llx " "phy 0x%x index 0x%x\n", SAS_ADDR(dev->sas_addr), phy_id, i); goto out; } else if (res != SMP_RESP_FUNC_ACC) { SAS_DPRINTK("%s: dev %016llx phy 0x%x index 0x%x " "result 0x%x\n", __func__, SAS_ADDR(dev->sas_addr), phy_id, i, res); goto out; } if (SAS_ADDR(sas_addr) != 0) { if (SAS_ADDR(rri_resp+16) == SAS_ADDR(sas_addr)) { *index = i; if ((rri_resp[12] & 0x80) == 0x80) *present = 0; else *present = 1; goto out; } else if (SAS_ADDR(rri_resp+16) == 0) { *index = i; *present = 0; goto out; } } else if (SAS_ADDR(rri_resp+16) == 0 && phy->last_da_index < i) { phy->last_da_index = i; *index = i; *present = 0; goto out; } } res = -1; out: kfree(rri_req); kfree(rri_resp); return res; } #define CRI_REQ_SIZE 44 #define CRI_RESP_SIZE 8 static int sas_configure_set(struct domain_device *dev, int phy_id, u8 *sas_addr, int index, int include) { int res; u8 *cri_req; u8 *cri_resp; cri_req = alloc_smp_req(CRI_REQ_SIZE); if (!cri_req) return -ENOMEM; cri_resp = alloc_smp_resp(CRI_RESP_SIZE); if (!cri_resp) { kfree(cri_req); return -ENOMEM; } cri_req[1] = SMP_CONF_ROUTE_INFO; *(__be16 *)(cri_req+6) = cpu_to_be16(index); cri_req[9] = phy_id; if (SAS_ADDR(sas_addr) == 0 || !include) cri_req[12] |= 0x80; memcpy(cri_req+16, sas_addr, SAS_ADDR_SIZE); res = smp_execute_task(dev, cri_req, CRI_REQ_SIZE, cri_resp, CRI_RESP_SIZE); if (res) goto out; res = cri_resp[2]; if (res == SMP_RESP_NO_INDEX) { SAS_DPRINTK("overflow of indexes: dev %016llx phy 0x%x " "index 0x%x\n", SAS_ADDR(dev->sas_addr), phy_id, index); } out: kfree(cri_req); kfree(cri_resp); return res; } static int sas_configure_phy(struct domain_device *dev, int phy_id, u8 *sas_addr, int include) { int index; int present; int res; res = sas_configure_present(dev, phy_id, sas_addr, &index, &present); if (res) return res; if (include ^ present) return sas_configure_set(dev, phy_id, sas_addr, index,include); return res; } /** * sas_configure_parent - configure routing table of parent * @parent: parent expander * @child: child expander * @sas_addr: SAS port identifier of device directly attached to child * @include: whether or not to include @child in the expander routing table */ static int sas_configure_parent(struct domain_device *parent, struct domain_device *child, u8 *sas_addr, int include) { struct expander_device *ex_parent = &parent->ex_dev; int res = 0; int i; if (parent->parent) { res = sas_configure_parent(parent->parent, parent, sas_addr, include); if (res) return res; } if (ex_parent->conf_route_table == 0) { SAS_DPRINTK("ex %016llx has self-configuring routing table\n", SAS_ADDR(parent->sas_addr)); return 0; } for (i = 0; i < ex_parent->num_phys; i++) { struct ex_phy *phy = &ex_parent->ex_phy[i]; if ((phy->routing_attr == TABLE_ROUTING) && (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(child->sas_addr))) { res = sas_configure_phy(parent, i, sas_addr, include); if (res) return res; } } return res; } /** * sas_configure_routing - configure routing * @dev: expander device * @sas_addr: port identifier of device directly attached to the expander device */ static int sas_configure_routing(struct domain_device *dev, u8 *sas_addr) { if (dev->parent) return sas_configure_parent(dev->parent, dev, sas_addr, 1); return 0; } static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr) { if (dev->parent) return sas_configure_parent(dev->parent, dev, sas_addr, 0); return 0; } /** * sas_discover_expander - expander discovery * @dev: pointer to expander domain device * * See comment in sas_discover_sata(). */ static int sas_discover_expander(struct domain_device *dev) { int res; res = sas_notify_lldd_dev_found(dev); if (res) return res; res = sas_ex_general(dev); if (res) goto out_err; res = sas_ex_manuf_info(dev); if (res) goto out_err; res = sas_expander_discover(dev); if (res) { SAS_DPRINTK("expander %016llx discovery failed(0x%x)\n", SAS_ADDR(dev->sas_addr), res); goto out_err; } sas_check_ex_subtractive_boundary(dev); res = sas_check_parent_topology(dev); if (res) goto out_err; return 0; out_err: sas_notify_lldd_dev_gone(dev); return res; } static int sas_ex_level_discovery(struct asd_sas_port *port, const int level) { int res = 0; struct domain_device *dev; list_for_each_entry(dev, &port->dev_list, dev_list_node) { if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy); if (level == ex->level) res = sas_ex_discover_devices(dev, -1); else if (level > 0) res = sas_ex_discover_devices(port->port_dev, -1); } } return res; } static int sas_ex_bfs_disc(struct asd_sas_port *port) { int res; int level; do { level = port->disc.max_level; res = sas_ex_level_discovery(port, level); mb(); } while (level < port->disc.max_level); return res; } int sas_discover_root_expander(struct domain_device *dev) { int res; struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy); res = sas_rphy_add(dev->rphy); if (res) goto out_err; ex->level = dev->port->disc.max_level; /* 0 */ res = sas_discover_expander(dev); if (res) goto out_err2; sas_ex_bfs_disc(dev->port); return res; out_err2: sas_rphy_remove(dev->rphy); out_err: return res; } /* ---------- Domain revalidation ---------- */ static int sas_get_phy_discover(struct domain_device *dev, int phy_id, struct smp_resp *disc_resp) { int res; u8 *disc_req; disc_req = alloc_smp_req(DISCOVER_REQ_SIZE); if (!disc_req) return -ENOMEM; disc_req[1] = SMP_DISCOVER; disc_req[9] = phy_id; res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE, disc_resp, DISCOVER_RESP_SIZE); if (res) goto out; else if (disc_resp->result != SMP_RESP_FUNC_ACC) { res = disc_resp->result; goto out; } out: kfree(disc_req); return res; } static int sas_get_phy_change_count(struct domain_device *dev, int phy_id, int *pcc) { int res; struct smp_resp *disc_resp; disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE); if (!disc_resp) return -ENOMEM; res = sas_get_phy_discover(dev, phy_id, disc_resp); if (!res) *pcc = disc_resp->disc.change_count; kfree(disc_resp); return res; } static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id, u8 *sas_addr, enum sas_device_type *type) { int res; struct smp_resp *disc_resp; struct discover_resp *dr; disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE); if (!disc_resp) return -ENOMEM; dr = &disc_resp->disc; res = sas_get_phy_discover(dev, phy_id, disc_resp); if (res == 0) { memcpy(sas_addr, disc_resp->disc.attached_sas_addr, 8); *type = to_dev_type(dr); if (*type == 0) memset(sas_addr, 0, 8); } kfree(disc_resp); return res; } static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id, int from_phy, bool update) { struct expander_device *ex = &dev->ex_dev; int res = 0; int i; for (i = from_phy; i < ex->num_phys; i++) { int phy_change_count = 0; res = sas_get_phy_change_count(dev, i, &phy_change_count); switch (res) { case SMP_RESP_PHY_VACANT: case SMP_RESP_NO_PHY: continue; case SMP_RESP_FUNC_ACC: break; default: return res; } if (phy_change_count != ex->ex_phy[i].phy_change_count) { if (update) ex->ex_phy[i].phy_change_count = phy_change_count; *phy_id = i; return 0; } } return 0; } static int sas_get_ex_change_count(struct domain_device *dev, int *ecc) { int res; u8 *rg_req; struct smp_resp *rg_resp; rg_req = alloc_smp_req(RG_REQ_SIZE); if (!rg_req) return -ENOMEM; rg_resp = alloc_smp_resp(RG_RESP_SIZE); if (!rg_resp) { kfree(rg_req); return -ENOMEM; } rg_req[1] = SMP_REPORT_GENERAL; res = smp_execute_task(dev, rg_req, RG_REQ_SIZE, rg_resp, RG_RESP_SIZE); if (res) goto out; if (rg_resp->result != SMP_RESP_FUNC_ACC) { res = rg_resp->result; goto out; } *ecc = be16_to_cpu(rg_resp->rg.change_count); out: kfree(rg_resp); kfree(rg_req); return res; } /** * sas_find_bcast_dev - find the device issue BROADCAST(CHANGE). * @dev:domain device to be detect. * @src_dev: the device which originated BROADCAST(CHANGE). * * Add self-configuration expander support. Suppose two expander cascading, * when the first level expander is self-configuring, hotplug the disks in * second level expander, BROADCAST(CHANGE) will not only be originated * in the second level expander, but also be originated in the first level * expander (see SAS protocol SAS 2r-14, 7.11 for detail), it is to say, * expander changed count in two level expanders will all increment at least * once, but the phy which chang count has changed is the source device which * we concerned. */ static int sas_find_bcast_dev(struct domain_device *dev, struct domain_device **src_dev) { struct expander_device *ex = &dev->ex_dev; int ex_change_count = -1; int phy_id = -1; int res; struct domain_device *ch; res = sas_get_ex_change_count(dev, &ex_change_count); if (res) goto out; if (ex_change_count != -1 && ex_change_count != ex->ex_change_count) { /* Just detect if this expander phys phy change count changed, * in order to determine if this expander originate BROADCAST, * and do not update phy change count field in our structure. */ res = sas_find_bcast_phy(dev, &phy_id, 0, false); if (phy_id != -1) { *src_dev = dev; ex->ex_change_count = ex_change_count; SAS_DPRINTK("Expander phy change count has changed\n"); return res; } else SAS_DPRINTK("Expander phys DID NOT change\n"); } list_for_each_entry(ch, &ex->children, siblings) { if (ch->dev_type == SAS_EDGE_EXPANDER_DEVICE || ch->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { res = sas_find_bcast_dev(ch, src_dev); if (*src_dev) return res; } } out: return res; } static void sas_unregister_ex_tree(struct asd_sas_port *port, struct domain_device *dev) { struct expander_device *ex = &dev->ex_dev; struct domain_device *child, *n; list_for_each_entry_safe(child, n, &ex->children, siblings) { set_bit(SAS_DEV_GONE, &child->state); if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE || child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) sas_unregister_ex_tree(port, child); else sas_unregister_dev(port, child); } sas_unregister_dev(port, dev); } static void sas_unregister_devs_sas_addr(struct domain_device *parent, int phy_id, bool last) { struct expander_device *ex_dev = &parent->ex_dev; struct ex_phy *phy = &ex_dev->ex_phy[phy_id]; struct domain_device *child, *n, *found = NULL; if (last) { list_for_each_entry_safe(child, n, &ex_dev->children, siblings) { if (SAS_ADDR(child->sas_addr) == SAS_ADDR(phy->attached_sas_addr)) { set_bit(SAS_DEV_GONE, &child->state); if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE || child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) sas_unregister_ex_tree(parent->port, child); else sas_unregister_dev(parent->port, child); found = child; break; } } sas_disable_routing(parent, phy->attached_sas_addr); } memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); if (phy->port) { sas_port_delete_phy(phy->port, phy->phy); sas_device_set_phy(found, phy->port); if (phy->port->num_phys == 0) list_add_tail(&phy->port->del_list, &parent->port->sas_port_del_list); phy->port = NULL; } } static int sas_discover_bfs_by_root_level(struct domain_device *root, const int level) { struct expander_device *ex_root = &root->ex_dev; struct domain_device *child; int res = 0; list_for_each_entry(child, &ex_root->children, siblings) { if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE || child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { struct sas_expander_device *ex = rphy_to_expander_device(child->rphy); if (level > ex->level) res = sas_discover_bfs_by_root_level(child, level); else if (level == ex->level) res = sas_ex_discover_devices(child, -1); } } return res; } static int sas_discover_bfs_by_root(struct domain_device *dev) { int res; struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy); int level = ex->level+1; res = sas_ex_discover_devices(dev, -1); if (res) goto out; do { res = sas_discover_bfs_by_root_level(dev, level); mb(); level += 1; } while (level <= dev->port->disc.max_level); out: return res; } static int sas_discover_new(struct domain_device *dev, int phy_id) { struct ex_phy *ex_phy = &dev->ex_dev.ex_phy[phy_id]; struct domain_device *child; int res; SAS_DPRINTK("ex %016llx phy%d new device attached\n", SAS_ADDR(dev->sas_addr), phy_id); res = sas_ex_phy_discover(dev, phy_id); if (res) return res; if (sas_ex_join_wide_port(dev, phy_id)) return 0; res = sas_ex_discover_devices(dev, phy_id); if (res) return res; list_for_each_entry(child, &dev->ex_dev.children, siblings) { if (SAS_ADDR(child->sas_addr) == SAS_ADDR(ex_phy->attached_sas_addr)) { if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE || child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) res = sas_discover_bfs_by_root(child); break; } } return res; } static bool dev_type_flutter(enum sas_device_type new, enum sas_device_type old) { if (old == new) return true; /* treat device directed resets as flutter, if we went * SAS_END_DEVICE to SAS_SATA_PENDING the link needs recovery */ if ((old == SAS_SATA_PENDING && new == SAS_END_DEVICE) || (old == SAS_END_DEVICE && new == SAS_SATA_PENDING)) return true; return false; } static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last) { struct expander_device *ex = &dev->ex_dev; struct ex_phy *phy = &ex->ex_phy[phy_id]; enum sas_device_type type = SAS_PHY_UNUSED; u8 sas_addr[8]; int res; memset(sas_addr, 0, 8); res = sas_get_phy_attached_dev(dev, phy_id, sas_addr, &type); switch (res) { case SMP_RESP_NO_PHY: phy->phy_state = PHY_NOT_PRESENT; sas_unregister_devs_sas_addr(dev, phy_id, last); return res; case SMP_RESP_PHY_VACANT: phy->phy_state = PHY_VACANT; sas_unregister_devs_sas_addr(dev, phy_id, last); return res; case SMP_RESP_FUNC_ACC: break; case -ECOMM: break; default: return res; } if ((SAS_ADDR(sas_addr) == 0) || (res == -ECOMM)) { phy->phy_state = PHY_EMPTY; sas_unregister_devs_sas_addr(dev, phy_id, last); return res; } else if (SAS_ADDR(sas_addr) == SAS_ADDR(phy->attached_sas_addr) && dev_type_flutter(type, phy->attached_dev_type)) { struct domain_device *ata_dev = sas_ex_to_ata(dev, phy_id); char *action = ""; sas_ex_phy_discover(dev, phy_id); if (ata_dev && phy->attached_dev_type == SAS_SATA_PENDING) action = ", needs recovery"; SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter%s\n", SAS_ADDR(dev->sas_addr), phy_id, action); return res; } /* we always have to delete the old device when we went here */ SAS_DPRINTK("ex %016llx phy 0x%x replace %016llx\n", SAS_ADDR(dev->sas_addr), phy_id, SAS_ADDR(phy->attached_sas_addr)); sas_unregister_devs_sas_addr(dev, phy_id, last); return sas_discover_new(dev, phy_id); } /** * sas_rediscover - revalidate the domain. * @dev:domain device to be detect. * @phy_id: the phy id will be detected. * * NOTE: this process _must_ quit (return) as soon as any connection * errors are encountered. Connection recovery is done elsewhere. * Discover process only interrogates devices in order to discover the * domain.For plugging out, we un-register the device only when it is * the last phy in the port, for other phys in this port, we just delete it * from the port.For inserting, we do discovery when it is the * first phy,for other phys in this port, we add it to the port to * forming the wide-port. */ static int sas_rediscover(struct domain_device *dev, const int phy_id) { struct expander_device *ex = &dev->ex_dev; struct ex_phy *changed_phy = &ex->ex_phy[phy_id]; int res = 0; int i; bool last = true; /* is this the last phy of the port */ SAS_DPRINTK("ex %016llx phy%d originated BROADCAST(CHANGE)\n", SAS_ADDR(dev->sas_addr), phy_id); if (SAS_ADDR(changed_phy->attached_sas_addr) != 0) { for (i = 0; i < ex->num_phys; i++) { struct ex_phy *phy = &ex->ex_phy[i]; if (i == phy_id) continue; if (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(changed_phy->attached_sas_addr)) { SAS_DPRINTK("phy%d part of wide port with " "phy%d\n", phy_id, i); last = false; break; } } res = sas_rediscover_dev(dev, phy_id, last); } else res = sas_discover_new(dev, phy_id); return res; } /** * sas_ex_revalidate_domain - revalidate the domain * @port_dev: port domain device. * * NOTE: this process _must_ quit (return) as soon as any connection * errors are encountered. Connection recovery is done elsewhere. * Discover process only interrogates devices in order to discover the * domain. */ int sas_ex_revalidate_domain(struct domain_device *port_dev) { int res; struct domain_device *dev = NULL; res = sas_find_bcast_dev(port_dev, &dev); if (res == 0 && dev) { struct expander_device *ex = &dev->ex_dev; int i = 0, phy_id; do { phy_id = -1; res = sas_find_bcast_phy(dev, &phy_id, i, true); if (phy_id == -1) break; res = sas_rediscover(dev, phy_id); i = phy_id + 1; } while (i < ex->num_phys); } return res; } void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, struct sas_rphy *rphy) { struct domain_device *dev; unsigned int rcvlen = 0; int ret = -EINVAL; /* no rphy means no smp target support (ie aic94xx host) */ if (!rphy) return sas_smp_host_handler(job, shost); switch (rphy->identify.device_type) { case SAS_EDGE_EXPANDER_DEVICE: case SAS_FANOUT_EXPANDER_DEVICE: break; default: printk("%s: can we send a smp request to a device?\n", __func__); goto out; } dev = sas_find_dev_by_rphy(rphy); if (!dev) { printk("%s: fail to find a domain_device?\n", __func__); goto out; } /* do we need to support multiple segments? */ if (job->request_payload.sg_cnt > 1 || job->reply_payload.sg_cnt > 1) { printk("%s: multiple segments req %u, rsp %u\n", __func__, job->request_payload.payload_len, job->reply_payload.payload_len); goto out; } ret = smp_execute_task_sg(dev, job->request_payload.sg_list, job->reply_payload.sg_list); if (ret >= 0) { /* bsg_job_done() requires the length received */ rcvlen = job->reply_payload.payload_len - ret; ret = 0; } out: bsg_job_done(job, ret, rcvlen); }
static void smp_task_timedout(struct timer_list *t) { struct sas_task_slow *slow = from_timer(slow, t, timer); struct sas_task *task = slow->task; unsigned long flags; spin_lock_irqsave(&task->task_state_lock, flags); if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) task->task_state_flags |= SAS_TASK_STATE_ABORTED; spin_unlock_irqrestore(&task->task_state_lock, flags); complete(&task->slow_task->completion); }
static void smp_task_timedout(struct timer_list *t) { struct sas_task_slow *slow = from_timer(slow, t, timer); struct sas_task *task = slow->task; unsigned long flags; spin_lock_irqsave(&task->task_state_lock, flags); if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { task->task_state_flags |= SAS_TASK_STATE_ABORTED; complete(&task->slow_task->completion); } spin_unlock_irqrestore(&task->task_state_lock, flags); }
{'added': [(51, '\tif (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {'), (53, '\t\tcomplete(&task->slow_task->completion);'), (54, '\t}'), (60, '\tdel_timer(&task->slow_task->timer);')], 'deleted': [(51, '\tif (!(task->task_state_flags & SAS_TASK_STATE_DONE))'), (54, ''), (55, '\tcomplete(&task->slow_task->completion);'), (60, '\tif (!del_timer(&task->slow_task->timer))'), (61, '\t\treturn;')]}
4
5
1,724
10,440
https://github.com/torvalds/linux
CVE-2018-20836
['CWE-416', 'CWE-362']
msnd_midi.c
snd_msndmidi_input_read
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Copyright (c) 2009 by Krzysztof Helt * Routines for control of MPU-401 in UART mode * * MPU-401 supports UART mode which is not capable generate transmit * interrupts thus output is done via polling. Also, if irq < 0, then * input is done also via polling. Do not expect good performance. * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/io.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/ioport.h> #include <linux/errno.h> #include <linux/export.h> #include <sound/core.h> #include <sound/rawmidi.h> #include "msnd.h" #define MSNDMIDI_MODE_BIT_INPUT 0 #define MSNDMIDI_MODE_BIT_OUTPUT 1 #define MSNDMIDI_MODE_BIT_INPUT_TRIGGER 2 #define MSNDMIDI_MODE_BIT_OUTPUT_TRIGGER 3 struct snd_msndmidi { struct snd_msnd *dev; unsigned long mode; /* MSNDMIDI_MODE_XXXX */ struct snd_rawmidi_substream *substream_input; spinlock_t input_lock; }; /* * input/output open/close - protected by open_mutex in rawmidi.c */ static int snd_msndmidi_input_open(struct snd_rawmidi_substream *substream) { struct snd_msndmidi *mpu; snd_printdd("snd_msndmidi_input_open()\n"); mpu = substream->rmidi->private_data; mpu->substream_input = substream; snd_msnd_enable_irq(mpu->dev); snd_msnd_send_dsp_cmd(mpu->dev, HDEX_MIDI_IN_START); set_bit(MSNDMIDI_MODE_BIT_INPUT, &mpu->mode); return 0; } static int snd_msndmidi_input_close(struct snd_rawmidi_substream *substream) { struct snd_msndmidi *mpu; mpu = substream->rmidi->private_data; snd_msnd_send_dsp_cmd(mpu->dev, HDEX_MIDI_IN_STOP); clear_bit(MSNDMIDI_MODE_BIT_INPUT, &mpu->mode); mpu->substream_input = NULL; snd_msnd_disable_irq(mpu->dev); return 0; } static void snd_msndmidi_input_drop(struct snd_msndmidi *mpu) { u16 tail; tail = readw(mpu->dev->MIDQ + JQS_wTail); writew(tail, mpu->dev->MIDQ + JQS_wHead); } /* * trigger input */ static void snd_msndmidi_input_trigger(struct snd_rawmidi_substream *substream, int up) { unsigned long flags; struct snd_msndmidi *mpu; snd_printdd("snd_msndmidi_input_trigger(, %i)\n", up); mpu = substream->rmidi->private_data; spin_lock_irqsave(&mpu->input_lock, flags); if (up) { if (!test_and_set_bit(MSNDMIDI_MODE_BIT_INPUT_TRIGGER, &mpu->mode)) snd_msndmidi_input_drop(mpu); } else { clear_bit(MSNDMIDI_MODE_BIT_INPUT_TRIGGER, &mpu->mode); } spin_unlock_irqrestore(&mpu->input_lock, flags); if (up) snd_msndmidi_input_read(mpu); } void snd_msndmidi_input_read(void *mpuv) { unsigned long flags; struct snd_msndmidi *mpu = mpuv; void *pwMIDQData = mpu->dev->mappedbase + MIDQ_DATA_BUFF; spin_lock_irqsave(&mpu->input_lock, flags); while (readw(mpu->dev->MIDQ + JQS_wTail) != readw(mpu->dev->MIDQ + JQS_wHead)) { u16 wTmp, val; val = readw(pwMIDQData + 2 * readw(mpu->dev->MIDQ + JQS_wHead)); if (test_bit(MSNDMIDI_MODE_BIT_INPUT_TRIGGER, &mpu->mode)) snd_rawmidi_receive(mpu->substream_input, (unsigned char *)&val, 1); wTmp = readw(mpu->dev->MIDQ + JQS_wHead) + 1; if (wTmp > readw(mpu->dev->MIDQ + JQS_wSize)) writew(0, mpu->dev->MIDQ + JQS_wHead); else writew(wTmp, mpu->dev->MIDQ + JQS_wHead); } spin_unlock_irqrestore(&mpu->input_lock, flags); } EXPORT_SYMBOL(snd_msndmidi_input_read); static const struct snd_rawmidi_ops snd_msndmidi_input = { .open = snd_msndmidi_input_open, .close = snd_msndmidi_input_close, .trigger = snd_msndmidi_input_trigger, }; static void snd_msndmidi_free(struct snd_rawmidi *rmidi) { struct snd_msndmidi *mpu = rmidi->private_data; kfree(mpu); } int snd_msndmidi_new(struct snd_card *card, int device) { struct snd_msnd *chip = card->private_data; struct snd_msndmidi *mpu; struct snd_rawmidi *rmidi; int err; err = snd_rawmidi_new(card, "MSND-MIDI", device, 1, 1, &rmidi); if (err < 0) return err; mpu = kzalloc(sizeof(*mpu), GFP_KERNEL); if (mpu == NULL) { snd_device_free(card, rmidi); return -ENOMEM; } mpu->dev = chip; chip->msndmidi_mpu = mpu; rmidi->private_data = mpu; rmidi->private_free = snd_msndmidi_free; spin_lock_init(&mpu->input_lock); strcpy(rmidi->name, "MSND MIDI"); snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, &snd_msndmidi_input); rmidi->info_flags |= SNDRV_RAWMIDI_INFO_INPUT; return 0; }
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Copyright (c) 2009 by Krzysztof Helt * Routines for control of MPU-401 in UART mode * * MPU-401 supports UART mode which is not capable generate transmit * interrupts thus output is done via polling. Also, if irq < 0, then * input is done also via polling. Do not expect good performance. * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/io.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/ioport.h> #include <linux/errno.h> #include <linux/export.h> #include <sound/core.h> #include <sound/rawmidi.h> #include "msnd.h" #define MSNDMIDI_MODE_BIT_INPUT 0 #define MSNDMIDI_MODE_BIT_OUTPUT 1 #define MSNDMIDI_MODE_BIT_INPUT_TRIGGER 2 #define MSNDMIDI_MODE_BIT_OUTPUT_TRIGGER 3 struct snd_msndmidi { struct snd_msnd *dev; unsigned long mode; /* MSNDMIDI_MODE_XXXX */ struct snd_rawmidi_substream *substream_input; spinlock_t input_lock; }; /* * input/output open/close - protected by open_mutex in rawmidi.c */ static int snd_msndmidi_input_open(struct snd_rawmidi_substream *substream) { struct snd_msndmidi *mpu; snd_printdd("snd_msndmidi_input_open()\n"); mpu = substream->rmidi->private_data; mpu->substream_input = substream; snd_msnd_enable_irq(mpu->dev); snd_msnd_send_dsp_cmd(mpu->dev, HDEX_MIDI_IN_START); set_bit(MSNDMIDI_MODE_BIT_INPUT, &mpu->mode); return 0; } static int snd_msndmidi_input_close(struct snd_rawmidi_substream *substream) { struct snd_msndmidi *mpu; mpu = substream->rmidi->private_data; snd_msnd_send_dsp_cmd(mpu->dev, HDEX_MIDI_IN_STOP); clear_bit(MSNDMIDI_MODE_BIT_INPUT, &mpu->mode); mpu->substream_input = NULL; snd_msnd_disable_irq(mpu->dev); return 0; } static void snd_msndmidi_input_drop(struct snd_msndmidi *mpu) { u16 tail; tail = readw(mpu->dev->MIDQ + JQS_wTail); writew(tail, mpu->dev->MIDQ + JQS_wHead); } /* * trigger input */ static void snd_msndmidi_input_trigger(struct snd_rawmidi_substream *substream, int up) { unsigned long flags; struct snd_msndmidi *mpu; snd_printdd("snd_msndmidi_input_trigger(, %i)\n", up); mpu = substream->rmidi->private_data; spin_lock_irqsave(&mpu->input_lock, flags); if (up) { if (!test_and_set_bit(MSNDMIDI_MODE_BIT_INPUT_TRIGGER, &mpu->mode)) snd_msndmidi_input_drop(mpu); } else { clear_bit(MSNDMIDI_MODE_BIT_INPUT_TRIGGER, &mpu->mode); } spin_unlock_irqrestore(&mpu->input_lock, flags); if (up) snd_msndmidi_input_read(mpu); } void snd_msndmidi_input_read(void *mpuv) { unsigned long flags; struct snd_msndmidi *mpu = mpuv; void *pwMIDQData = mpu->dev->mappedbase + MIDQ_DATA_BUFF; u16 head, tail, size; spin_lock_irqsave(&mpu->input_lock, flags); head = readw(mpu->dev->MIDQ + JQS_wHead); tail = readw(mpu->dev->MIDQ + JQS_wTail); size = readw(mpu->dev->MIDQ + JQS_wSize); if (head > size || tail > size) goto out; while (head != tail) { unsigned char val = readw(pwMIDQData + 2 * head); if (test_bit(MSNDMIDI_MODE_BIT_INPUT_TRIGGER, &mpu->mode)) snd_rawmidi_receive(mpu->substream_input, &val, 1); if (++head > size) head = 0; writew(head, mpu->dev->MIDQ + JQS_wHead); } out: spin_unlock_irqrestore(&mpu->input_lock, flags); } EXPORT_SYMBOL(snd_msndmidi_input_read); static const struct snd_rawmidi_ops snd_msndmidi_input = { .open = snd_msndmidi_input_open, .close = snd_msndmidi_input_close, .trigger = snd_msndmidi_input_trigger, }; static void snd_msndmidi_free(struct snd_rawmidi *rmidi) { struct snd_msndmidi *mpu = rmidi->private_data; kfree(mpu); } int snd_msndmidi_new(struct snd_card *card, int device) { struct snd_msnd *chip = card->private_data; struct snd_msndmidi *mpu; struct snd_rawmidi *rmidi; int err; err = snd_rawmidi_new(card, "MSND-MIDI", device, 1, 1, &rmidi); if (err < 0) return err; mpu = kzalloc(sizeof(*mpu), GFP_KERNEL); if (mpu == NULL) { snd_device_free(card, rmidi); return -ENOMEM; } mpu->dev = chip; chip->msndmidi_mpu = mpu; rmidi->private_data = mpu; rmidi->private_free = snd_msndmidi_free; spin_lock_init(&mpu->input_lock); strcpy(rmidi->name, "MSND MIDI"); snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, &snd_msndmidi_input); rmidi->info_flags |= SNDRV_RAWMIDI_INFO_INPUT; return 0; }
void snd_msndmidi_input_read(void *mpuv) { unsigned long flags; struct snd_msndmidi *mpu = mpuv; void *pwMIDQData = mpu->dev->mappedbase + MIDQ_DATA_BUFF; spin_lock_irqsave(&mpu->input_lock, flags); while (readw(mpu->dev->MIDQ + JQS_wTail) != readw(mpu->dev->MIDQ + JQS_wHead)) { u16 wTmp, val; val = readw(pwMIDQData + 2 * readw(mpu->dev->MIDQ + JQS_wHead)); if (test_bit(MSNDMIDI_MODE_BIT_INPUT_TRIGGER, &mpu->mode)) snd_rawmidi_receive(mpu->substream_input, (unsigned char *)&val, 1); wTmp = readw(mpu->dev->MIDQ + JQS_wHead) + 1; if (wTmp > readw(mpu->dev->MIDQ + JQS_wSize)) writew(0, mpu->dev->MIDQ + JQS_wHead); else writew(wTmp, mpu->dev->MIDQ + JQS_wHead); } spin_unlock_irqrestore(&mpu->input_lock, flags); }
void snd_msndmidi_input_read(void *mpuv) { unsigned long flags; struct snd_msndmidi *mpu = mpuv; void *pwMIDQData = mpu->dev->mappedbase + MIDQ_DATA_BUFF; u16 head, tail, size; spin_lock_irqsave(&mpu->input_lock, flags); head = readw(mpu->dev->MIDQ + JQS_wHead); tail = readw(mpu->dev->MIDQ + JQS_wTail); size = readw(mpu->dev->MIDQ + JQS_wSize); if (head > size || tail > size) goto out; while (head != tail) { unsigned char val = readw(pwMIDQData + 2 * head); if (test_bit(MSNDMIDI_MODE_BIT_INPUT_TRIGGER, &mpu->mode)) snd_rawmidi_receive(mpu->substream_input, &val, 1); if (++head > size) head = 0; writew(head, mpu->dev->MIDQ + JQS_wHead); } out: spin_unlock_irqrestore(&mpu->input_lock, flags); }
{'added': [(123, '\tu16 head, tail, size;'), (126, '\thead = readw(mpu->dev->MIDQ + JQS_wHead);'), (127, '\ttail = readw(mpu->dev->MIDQ + JQS_wTail);'), (128, '\tsize = readw(mpu->dev->MIDQ + JQS_wSize);'), (129, '\tif (head > size || tail > size)'), (130, '\t\tgoto out;'), (131, '\twhile (head != tail) {'), (132, '\t\tunsigned char val = readw(pwMIDQData + 2 * head);'), (133, ''), (134, '\t\tif (test_bit(MSNDMIDI_MODE_BIT_INPUT_TRIGGER, &mpu->mode))'), (135, '\t\t\tsnd_rawmidi_receive(mpu->substream_input, &val, 1);'), (136, '\t\tif (++head > size)'), (137, '\t\t\thead = 0;'), (138, '\t\twritew(head, mpu->dev->MIDQ + JQS_wHead);'), (140, ' out:')], 'deleted': [(125, '\twhile (readw(mpu->dev->MIDQ + JQS_wTail) !='), (126, '\t readw(mpu->dev->MIDQ + JQS_wHead)) {'), (127, '\t\tu16 wTmp, val;'), (128, '\t\tval = readw(pwMIDQData + 2 * readw(mpu->dev->MIDQ + JQS_wHead));'), (129, ''), (130, '\t\t\tif (test_bit(MSNDMIDI_MODE_BIT_INPUT_TRIGGER,'), (131, '\t\t\t\t &mpu->mode))'), (132, '\t\t\t\tsnd_rawmidi_receive(mpu->substream_input,'), (133, '\t\t\t\t\t\t (unsigned char *)&val, 1);'), (134, ''), (135, '\t\twTmp = readw(mpu->dev->MIDQ + JQS_wHead) + 1;'), (136, '\t\tif (wTmp > readw(mpu->dev->MIDQ + JQS_wSize))'), (137, '\t\t\twritew(0, mpu->dev->MIDQ + JQS_wHead);'), (138, '\t\telse'), (139, '\t\t\twritew(wTmp, mpu->dev->MIDQ + JQS_wHead);')]}
15
15
120
697
https://github.com/torvalds/linux
CVE-2017-9984
['CWE-125']
keyring.c
__releases
/* Keyring handling * * Copyright (C) 2004-2005, 2008, 2013 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/security.h> #include <linux/seq_file.h> #include <linux/err.h> #include <keys/keyring-type.h> #include <keys/user-type.h> #include <linux/assoc_array_priv.h> #include <linux/uaccess.h> #include "internal.h" /* * When plumbing the depths of the key tree, this sets a hard limit * set on how deep we're willing to go. */ #define KEYRING_SEARCH_MAX_DEPTH 6 /* * We keep all named keyrings in a hash to speed looking them up. */ #define KEYRING_NAME_HASH_SIZE (1 << 5) /* * We mark pointers we pass to the associative array with bit 1 set if * they're keyrings and clear otherwise. */ #define KEYRING_PTR_SUBTYPE 0x2UL static inline bool keyring_ptr_is_keyring(const struct assoc_array_ptr *x) { return (unsigned long)x & KEYRING_PTR_SUBTYPE; } static inline struct key *keyring_ptr_to_key(const struct assoc_array_ptr *x) { void *object = assoc_array_ptr_to_leaf(x); return (struct key *)((unsigned long)object & ~KEYRING_PTR_SUBTYPE); } static inline void *keyring_key_to_ptr(struct key *key) { if (key->type == &key_type_keyring) return (void *)((unsigned long)key | KEYRING_PTR_SUBTYPE); return key; } static struct list_head keyring_name_hash[KEYRING_NAME_HASH_SIZE]; static DEFINE_RWLOCK(keyring_name_lock); static inline unsigned keyring_hash(const char *desc) { unsigned bucket = 0; for (; *desc; desc++) bucket += (unsigned char)*desc; return bucket & (KEYRING_NAME_HASH_SIZE - 1); } /* * The keyring key type definition. Keyrings are simply keys of this type and * can be treated as ordinary keys in addition to having their own special * operations. */ static int keyring_preparse(struct key_preparsed_payload *prep); static void keyring_free_preparse(struct key_preparsed_payload *prep); static int keyring_instantiate(struct key *keyring, struct key_preparsed_payload *prep); static void keyring_revoke(struct key *keyring); static void keyring_destroy(struct key *keyring); static void keyring_describe(const struct key *keyring, struct seq_file *m); static long keyring_read(const struct key *keyring, char __user *buffer, size_t buflen); struct key_type key_type_keyring = { .name = "keyring", .def_datalen = 0, .preparse = keyring_preparse, .free_preparse = keyring_free_preparse, .instantiate = keyring_instantiate, .revoke = keyring_revoke, .destroy = keyring_destroy, .describe = keyring_describe, .read = keyring_read, }; EXPORT_SYMBOL(key_type_keyring); /* * Semaphore to serialise link/link calls to prevent two link calls in parallel * introducing a cycle. */ static DECLARE_RWSEM(keyring_serialise_link_sem); /* * Publish the name of a keyring so that it can be found by name (if it has * one). */ static void keyring_publish_name(struct key *keyring) { int bucket; if (keyring->description) { bucket = keyring_hash(keyring->description); write_lock(&keyring_name_lock); if (!keyring_name_hash[bucket].next) INIT_LIST_HEAD(&keyring_name_hash[bucket]); list_add_tail(&keyring->type_data.link, &keyring_name_hash[bucket]); write_unlock(&keyring_name_lock); } } /* * Preparse a keyring payload */ static int keyring_preparse(struct key_preparsed_payload *prep) { return prep->datalen != 0 ? -EINVAL : 0; } /* * Free a preparse of a user defined key payload */ static void keyring_free_preparse(struct key_preparsed_payload *prep) { } /* * Initialise a keyring. * * Returns 0 on success, -EINVAL if given any data. */ static int keyring_instantiate(struct key *keyring, struct key_preparsed_payload *prep) { assoc_array_init(&keyring->keys); /* make the keyring available by name if it has one */ keyring_publish_name(keyring); return 0; } /* * Multiply 64-bits by 32-bits to 96-bits and fold back to 64-bit. Ideally we'd * fold the carry back too, but that requires inline asm. */ static u64 mult_64x32_and_fold(u64 x, u32 y) { u64 hi = (u64)(u32)(x >> 32) * y; u64 lo = (u64)(u32)(x) * y; return lo + ((u64)(u32)hi << 32) + (u32)(hi >> 32); } /* * Hash a key type and description. */ static unsigned long hash_key_type_and_desc(const struct keyring_index_key *index_key) { const unsigned level_shift = ASSOC_ARRAY_LEVEL_STEP; const unsigned long fan_mask = ASSOC_ARRAY_FAN_MASK; const char *description = index_key->description; unsigned long hash, type; u32 piece; u64 acc; int n, desc_len = index_key->desc_len; type = (unsigned long)index_key->type; acc = mult_64x32_and_fold(type, desc_len + 13); acc = mult_64x32_and_fold(acc, 9207); for (;;) { n = desc_len; if (n <= 0) break; if (n > 4) n = 4; piece = 0; memcpy(&piece, description, n); description += n; desc_len -= n; acc = mult_64x32_and_fold(acc, piece); acc = mult_64x32_and_fold(acc, 9207); } /* Fold the hash down to 32 bits if need be. */ hash = acc; if (ASSOC_ARRAY_KEY_CHUNK_SIZE == 32) hash ^= acc >> 32; /* Squidge all the keyrings into a separate part of the tree to * ordinary keys by making sure the lowest level segment in the hash is * zero for keyrings and non-zero otherwise. */ if (index_key->type != &key_type_keyring && (hash & fan_mask) == 0) return hash | (hash >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - level_shift)) | 1; if (index_key->type == &key_type_keyring && (hash & fan_mask) != 0) return (hash + (hash << level_shift)) & ~fan_mask; return hash; } /* * Build the next index key chunk. * * On 32-bit systems the index key is laid out as: * * 0 4 5 9... * hash desclen typeptr desc[] * * On 64-bit systems: * * 0 8 9 17... * hash desclen typeptr desc[] * * We return it one word-sized chunk at a time. */ static unsigned long keyring_get_key_chunk(const void *data, int level) { const struct keyring_index_key *index_key = data; unsigned long chunk = 0; long offset = 0; int desc_len = index_key->desc_len, n = sizeof(chunk); level /= ASSOC_ARRAY_KEY_CHUNK_SIZE; switch (level) { case 0: return hash_key_type_and_desc(index_key); case 1: return ((unsigned long)index_key->type << 8) | desc_len; case 2: if (desc_len == 0) return (u8)((unsigned long)index_key->type >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - 8)); n--; offset = 1; default: offset += sizeof(chunk) - 1; offset += (level - 3) * sizeof(chunk); if (offset >= desc_len) return 0; desc_len -= offset; if (desc_len > n) desc_len = n; offset += desc_len; do { chunk <<= 8; chunk |= ((u8*)index_key->description)[--offset]; } while (--desc_len > 0); if (level == 2) { chunk <<= 8; chunk |= (u8)((unsigned long)index_key->type >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - 8)); } return chunk; } } static unsigned long keyring_get_object_key_chunk(const void *object, int level) { const struct key *key = keyring_ptr_to_key(object); return keyring_get_key_chunk(&key->index_key, level); } static bool keyring_compare_object(const void *object, const void *data) { const struct keyring_index_key *index_key = data; const struct key *key = keyring_ptr_to_key(object); return key->index_key.type == index_key->type && key->index_key.desc_len == index_key->desc_len && memcmp(key->index_key.description, index_key->description, index_key->desc_len) == 0; } /* * Compare the index keys of a pair of objects and determine the bit position * at which they differ - if they differ. */ static int keyring_diff_objects(const void *object, const void *data) { const struct key *key_a = keyring_ptr_to_key(object); const struct keyring_index_key *a = &key_a->index_key; const struct keyring_index_key *b = data; unsigned long seg_a, seg_b; int level, i; level = 0; seg_a = hash_key_type_and_desc(a); seg_b = hash_key_type_and_desc(b); if ((seg_a ^ seg_b) != 0) goto differ; /* The number of bits contributed by the hash is controlled by a * constant in the assoc_array headers. Everything else thereafter we * can deal with as being machine word-size dependent. */ level += ASSOC_ARRAY_KEY_CHUNK_SIZE / 8; seg_a = a->desc_len; seg_b = b->desc_len; if ((seg_a ^ seg_b) != 0) goto differ; /* The next bit may not work on big endian */ level++; seg_a = (unsigned long)a->type; seg_b = (unsigned long)b->type; if ((seg_a ^ seg_b) != 0) goto differ; level += sizeof(unsigned long); if (a->desc_len == 0) goto same; i = 0; if (((unsigned long)a->description | (unsigned long)b->description) & (sizeof(unsigned long) - 1)) { do { seg_a = *(unsigned long *)(a->description + i); seg_b = *(unsigned long *)(b->description + i); if ((seg_a ^ seg_b) != 0) goto differ_plus_i; i += sizeof(unsigned long); } while (i < (a->desc_len & (sizeof(unsigned long) - 1))); } for (; i < a->desc_len; i++) { seg_a = *(unsigned char *)(a->description + i); seg_b = *(unsigned char *)(b->description + i); if ((seg_a ^ seg_b) != 0) goto differ_plus_i; } same: return -1; differ_plus_i: level += i; differ: i = level * 8 + __ffs(seg_a ^ seg_b); return i; } /* * Free an object after stripping the keyring flag off of the pointer. */ static void keyring_free_object(void *object) { key_put(keyring_ptr_to_key(object)); } /* * Operations for keyring management by the index-tree routines. */ static const struct assoc_array_ops keyring_assoc_array_ops = { .get_key_chunk = keyring_get_key_chunk, .get_object_key_chunk = keyring_get_object_key_chunk, .compare_object = keyring_compare_object, .diff_objects = keyring_diff_objects, .free_object = keyring_free_object, }; /* * Clean up a keyring when it is destroyed. Unpublish its name if it had one * and dispose of its data. * * The garbage collector detects the final key_put(), removes the keyring from * the serial number tree and then does RCU synchronisation before coming here, * so we shouldn't need to worry about code poking around here with the RCU * readlock held by this time. */ static void keyring_destroy(struct key *keyring) { if (keyring->description) { write_lock(&keyring_name_lock); if (keyring->type_data.link.next != NULL && !list_empty(&keyring->type_data.link)) list_del(&keyring->type_data.link); write_unlock(&keyring_name_lock); } assoc_array_destroy(&keyring->keys, &keyring_assoc_array_ops); } /* * Describe a keyring for /proc. */ static void keyring_describe(const struct key *keyring, struct seq_file *m) { if (keyring->description) seq_puts(m, keyring->description); else seq_puts(m, "[anon]"); if (key_is_instantiated(keyring)) { if (keyring->keys.nr_leaves_on_tree != 0) seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree); else seq_puts(m, ": empty"); } } struct keyring_read_iterator_context { size_t qty; size_t count; key_serial_t __user *buffer; }; static int keyring_read_iterator(const void *object, void *data) { struct keyring_read_iterator_context *ctx = data; const struct key *key = keyring_ptr_to_key(object); int ret; kenter("{%s,%d},,{%zu/%zu}", key->type->name, key->serial, ctx->count, ctx->qty); if (ctx->count >= ctx->qty) return 1; ret = put_user(key->serial, ctx->buffer); if (ret < 0) return ret; ctx->buffer++; ctx->count += sizeof(key->serial); return 0; } /* * Read a list of key IDs from the keyring's contents in binary form * * The keyring's semaphore is read-locked by the caller. This prevents someone * from modifying it under us - which could cause us to read key IDs multiple * times. */ static long keyring_read(const struct key *keyring, char __user *buffer, size_t buflen) { struct keyring_read_iterator_context ctx; unsigned long nr_keys; int ret; kenter("{%d},,%zu", key_serial(keyring), buflen); if (buflen & (sizeof(key_serial_t) - 1)) return -EINVAL; nr_keys = keyring->keys.nr_leaves_on_tree; if (nr_keys == 0) return 0; /* Calculate how much data we could return */ ctx.qty = nr_keys * sizeof(key_serial_t); if (!buffer || !buflen) return ctx.qty; if (buflen > ctx.qty) ctx.qty = buflen; /* Copy the IDs of the subscribed keys into the buffer */ ctx.buffer = (key_serial_t __user *)buffer; ctx.count = 0; ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx); if (ret < 0) { kleave(" = %d [iterate]", ret); return ret; } kleave(" = %zu [ok]", ctx.count); return ctx.count; } /* * Allocate a keyring and link into the destination keyring. */ struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid, const struct cred *cred, key_perm_t perm, unsigned long flags, struct key *dest) { struct key *keyring; int ret; keyring = key_alloc(&key_type_keyring, description, uid, gid, cred, perm, flags); if (!IS_ERR(keyring)) { ret = key_instantiate_and_link(keyring, NULL, 0, dest, NULL); if (ret < 0) { key_put(keyring); keyring = ERR_PTR(ret); } } return keyring; } EXPORT_SYMBOL(keyring_alloc); /* * By default, we keys found by getting an exact match on their descriptions. */ bool key_default_cmp(const struct key *key, const struct key_match_data *match_data) { return strcmp(key->description, match_data->raw_data) == 0; } /* * Iteration function to consider each key found. */ static int keyring_search_iterator(const void *object, void *iterator_data) { struct keyring_search_context *ctx = iterator_data; const struct key *key = keyring_ptr_to_key(object); unsigned long kflags = key->flags; kenter("{%d}", key->serial); /* ignore keys not of this type */ if (key->type != ctx->index_key.type) { kleave(" = 0 [!type]"); return 0; } /* skip invalidated, revoked and expired keys */ if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) { if (kflags & ((1 << KEY_FLAG_INVALIDATED) | (1 << KEY_FLAG_REVOKED))) { ctx->result = ERR_PTR(-EKEYREVOKED); kleave(" = %d [invrev]", ctx->skipped_ret); goto skipped; } if (key->expiry && ctx->now.tv_sec >= key->expiry) { if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED)) ctx->result = ERR_PTR(-EKEYEXPIRED); kleave(" = %d [expire]", ctx->skipped_ret); goto skipped; } } /* keys that don't match */ if (!ctx->match_data.cmp(key, &ctx->match_data)) { kleave(" = 0 [!match]"); return 0; } /* key must have search permissions */ if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) && key_task_permission(make_key_ref(key, ctx->possessed), ctx->cred, KEY_NEED_SEARCH) < 0) { ctx->result = ERR_PTR(-EACCES); kleave(" = %d [!perm]", ctx->skipped_ret); goto skipped; } if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) { /* we set a different error code if we pass a negative key */ if (kflags & (1 << KEY_FLAG_NEGATIVE)) { smp_rmb(); ctx->result = ERR_PTR(key->type_data.reject_error); kleave(" = %d [neg]", ctx->skipped_ret); goto skipped; } } /* Found */ ctx->result = make_key_ref(key, ctx->possessed); kleave(" = 1 [found]"); return 1; skipped: return ctx->skipped_ret; } /* * Search inside a keyring for a key. We can search by walking to it * directly based on its index-key or we can iterate over the entire * tree looking for it, based on the match function. */ static int search_keyring(struct key *keyring, struct keyring_search_context *ctx) { if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_DIRECT) { const void *object; object = assoc_array_find(&keyring->keys, &keyring_assoc_array_ops, &ctx->index_key); return object ? ctx->iterator(object, ctx) : 0; } return assoc_array_iterate(&keyring->keys, ctx->iterator, ctx); } /* * Search a tree of keyrings that point to other keyrings up to the maximum * depth. */ static bool search_nested_keyrings(struct key *keyring, struct keyring_search_context *ctx) { struct { struct key *keyring; struct assoc_array_node *node; int slot; } stack[KEYRING_SEARCH_MAX_DEPTH]; struct assoc_array_shortcut *shortcut; struct assoc_array_node *node; struct assoc_array_ptr *ptr; struct key *key; int sp = 0, slot; kenter("{%d},{%s,%s}", keyring->serial, ctx->index_key.type->name, ctx->index_key.description); #define STATE_CHECKS (KEYRING_SEARCH_NO_STATE_CHECK | KEYRING_SEARCH_DO_STATE_CHECK) BUG_ON((ctx->flags & STATE_CHECKS) == 0 || (ctx->flags & STATE_CHECKS) == STATE_CHECKS); if (ctx->index_key.description) ctx->index_key.desc_len = strlen(ctx->index_key.description); /* Check to see if this top-level keyring is what we are looking for * and whether it is valid or not. */ if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_ITERATE || keyring_compare_object(keyring, &ctx->index_key)) { ctx->skipped_ret = 2; switch (ctx->iterator(keyring_key_to_ptr(keyring), ctx)) { case 1: goto found; case 2: return false; default: break; } } ctx->skipped_ret = 0; /* Start processing a new keyring */ descend_to_keyring: kdebug("descend to %d", keyring->serial); if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) | (1 << KEY_FLAG_REVOKED))) goto not_this_keyring; /* Search through the keys in this keyring before its searching its * subtrees. */ if (search_keyring(keyring, ctx)) goto found; /* Then manually iterate through the keyrings nested in this one. * * Start from the root node of the index tree. Because of the way the * hash function has been set up, keyrings cluster on the leftmost * branch of the root node (root slot 0) or in the root node itself. * Non-keyrings avoid the leftmost branch of the root entirely (root * slots 1-15). */ ptr = ACCESS_ONCE(keyring->keys.root); if (!ptr) goto not_this_keyring; if (assoc_array_ptr_is_shortcut(ptr)) { /* If the root is a shortcut, either the keyring only contains * keyring pointers (everything clusters behind root slot 0) or * doesn't contain any keyring pointers. */ shortcut = assoc_array_ptr_to_shortcut(ptr); smp_read_barrier_depends(); if ((shortcut->index_key[0] & ASSOC_ARRAY_FAN_MASK) != 0) goto not_this_keyring; ptr = ACCESS_ONCE(shortcut->next_node); node = assoc_array_ptr_to_node(ptr); goto begin_node; } node = assoc_array_ptr_to_node(ptr); smp_read_barrier_depends(); ptr = node->slots[0]; if (!assoc_array_ptr_is_meta(ptr)) goto begin_node; descend_to_node: /* Descend to a more distal node in this keyring's content tree and go * through that. */ kdebug("descend"); if (assoc_array_ptr_is_shortcut(ptr)) { shortcut = assoc_array_ptr_to_shortcut(ptr); smp_read_barrier_depends(); ptr = ACCESS_ONCE(shortcut->next_node); BUG_ON(!assoc_array_ptr_is_node(ptr)); } node = assoc_array_ptr_to_node(ptr); begin_node: kdebug("begin_node"); smp_read_barrier_depends(); slot = 0; ascend_to_node: /* Go through the slots in a node */ for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) { ptr = ACCESS_ONCE(node->slots[slot]); if (assoc_array_ptr_is_meta(ptr) && node->back_pointer) goto descend_to_node; if (!keyring_ptr_is_keyring(ptr)) continue; key = keyring_ptr_to_key(ptr); if (sp >= KEYRING_SEARCH_MAX_DEPTH) { if (ctx->flags & KEYRING_SEARCH_DETECT_TOO_DEEP) { ctx->result = ERR_PTR(-ELOOP); return false; } goto not_this_keyring; } /* Search a nested keyring */ if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) && key_task_permission(make_key_ref(key, ctx->possessed), ctx->cred, KEY_NEED_SEARCH) < 0) continue; /* stack the current position */ stack[sp].keyring = keyring; stack[sp].node = node; stack[sp].slot = slot; sp++; /* begin again with the new keyring */ keyring = key; goto descend_to_keyring; } /* We've dealt with all the slots in the current node, so now we need * to ascend to the parent and continue processing there. */ ptr = ACCESS_ONCE(node->back_pointer); slot = node->parent_slot; if (ptr && assoc_array_ptr_is_shortcut(ptr)) { shortcut = assoc_array_ptr_to_shortcut(ptr); smp_read_barrier_depends(); ptr = ACCESS_ONCE(shortcut->back_pointer); slot = shortcut->parent_slot; } if (!ptr) goto not_this_keyring; node = assoc_array_ptr_to_node(ptr); smp_read_barrier_depends(); slot++; /* If we've ascended to the root (zero backpointer), we must have just * finished processing the leftmost branch rather than the root slots - * so there can't be any more keyrings for us to find. */ if (node->back_pointer) { kdebug("ascend %d", slot); goto ascend_to_node; } /* The keyring we're looking at was disqualified or didn't contain a * matching key. */ not_this_keyring: kdebug("not_this_keyring %d", sp); if (sp <= 0) { kleave(" = false"); return false; } /* Resume the processing of a keyring higher up in the tree */ sp--; keyring = stack[sp].keyring; node = stack[sp].node; slot = stack[sp].slot + 1; kdebug("ascend to %d [%d]", keyring->serial, slot); goto ascend_to_node; /* We found a viable match */ found: key = key_ref_to_ptr(ctx->result); key_check(key); if (!(ctx->flags & KEYRING_SEARCH_NO_UPDATE_TIME)) { key->last_used_at = ctx->now.tv_sec; keyring->last_used_at = ctx->now.tv_sec; while (sp > 0) stack[--sp].keyring->last_used_at = ctx->now.tv_sec; } kleave(" = true"); return true; } /** * keyring_search_aux - Search a keyring tree for a key matching some criteria * @keyring_ref: A pointer to the keyring with possession indicator. * @ctx: The keyring search context. * * Search the supplied keyring tree for a key that matches the criteria given. * The root keyring and any linked keyrings must grant Search permission to the * caller to be searchable and keys can only be found if they too grant Search * to the caller. The possession flag on the root keyring pointer controls use * of the possessor bits in permissions checking of the entire tree. In * addition, the LSM gets to forbid keyring searches and key matches. * * The search is performed as a breadth-then-depth search up to the prescribed * limit (KEYRING_SEARCH_MAX_DEPTH). * * Keys are matched to the type provided and are then filtered by the match * function, which is given the description to use in any way it sees fit. The * match function may use any attributes of a key that it wishes to to * determine the match. Normally the match function from the key type would be * used. * * RCU can be used to prevent the keyring key lists from disappearing without * the need to take lots of locks. * * Returns a pointer to the found key and increments the key usage count if * successful; -EAGAIN if no matching keys were found, or if expired or revoked * keys were found; -ENOKEY if only negative keys were found; -ENOTDIR if the * specified keyring wasn't a keyring. * * In the case of a successful return, the possession attribute from * @keyring_ref is propagated to the returned key reference. */ key_ref_t keyring_search_aux(key_ref_t keyring_ref, struct keyring_search_context *ctx) { struct key *keyring; long err; ctx->iterator = keyring_search_iterator; ctx->possessed = is_key_possessed(keyring_ref); ctx->result = ERR_PTR(-EAGAIN); keyring = key_ref_to_ptr(keyring_ref); key_check(keyring); if (keyring->type != &key_type_keyring) return ERR_PTR(-ENOTDIR); if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM)) { err = key_task_permission(keyring_ref, ctx->cred, KEY_NEED_SEARCH); if (err < 0) return ERR_PTR(err); } rcu_read_lock(); ctx->now = current_kernel_time(); if (search_nested_keyrings(keyring, ctx)) __key_get(key_ref_to_ptr(ctx->result)); rcu_read_unlock(); return ctx->result; } /** * keyring_search - Search the supplied keyring tree for a matching key * @keyring: The root of the keyring tree to be searched. * @type: The type of keyring we want to find. * @description: The name of the keyring we want to find. * * As keyring_search_aux() above, but using the current task's credentials and * type's default matching function and preferred search method. */ key_ref_t keyring_search(key_ref_t keyring, struct key_type *type, const char *description) { struct keyring_search_context ctx = { .index_key.type = type, .index_key.description = description, .cred = current_cred(), .match_data.cmp = key_default_cmp, .match_data.raw_data = description, .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, .flags = KEYRING_SEARCH_DO_STATE_CHECK, }; key_ref_t key; int ret; if (type->match_preparse) { ret = type->match_preparse(&ctx.match_data); if (ret < 0) return ERR_PTR(ret); } key = keyring_search_aux(keyring, &ctx); if (type->match_free) type->match_free(&ctx.match_data); return key; } EXPORT_SYMBOL(keyring_search); /* * Search the given keyring for a key that might be updated. * * The caller must guarantee that the keyring is a keyring and that the * permission is granted to modify the keyring as no check is made here. The * caller must also hold a lock on the keyring semaphore. * * Returns a pointer to the found key with usage count incremented if * successful and returns NULL if not found. Revoked and invalidated keys are * skipped over. * * If successful, the possession indicator is propagated from the keyring ref * to the returned key reference. */ key_ref_t find_key_to_update(key_ref_t keyring_ref, const struct keyring_index_key *index_key) { struct key *keyring, *key; const void *object; keyring = key_ref_to_ptr(keyring_ref); kenter("{%d},{%s,%s}", keyring->serial, index_key->type->name, index_key->description); object = assoc_array_find(&keyring->keys, &keyring_assoc_array_ops, index_key); if (object) goto found; kleave(" = NULL"); return NULL; found: key = keyring_ptr_to_key(object); if (key->flags & ((1 << KEY_FLAG_INVALIDATED) | (1 << KEY_FLAG_REVOKED))) { kleave(" = NULL [x]"); return NULL; } __key_get(key); kleave(" = {%d}", key->serial); return make_key_ref(key, is_key_possessed(keyring_ref)); } /* * Find a keyring with the specified name. * * All named keyrings in the current user namespace are searched, provided they * grant Search permission directly to the caller (unless this check is * skipped). Keyrings whose usage points have reached zero or who have been * revoked are skipped. * * Returns a pointer to the keyring with the keyring's refcount having being * incremented on success. -ENOKEY is returned if a key could not be found. */ struct key *find_keyring_by_name(const char *name, bool skip_perm_check) { struct key *keyring; int bucket; if (!name) return ERR_PTR(-EINVAL); bucket = keyring_hash(name); read_lock(&keyring_name_lock); if (keyring_name_hash[bucket].next) { /* search this hash bucket for a keyring with a matching name * that's readable and that hasn't been revoked */ list_for_each_entry(keyring, &keyring_name_hash[bucket], type_data.link ) { if (!kuid_has_mapping(current_user_ns(), keyring->user->uid)) continue; if (test_bit(KEY_FLAG_REVOKED, &keyring->flags)) continue; if (strcmp(keyring->description, name) != 0) continue; if (!skip_perm_check && key_permission(make_key_ref(keyring, 0), KEY_NEED_SEARCH) < 0) continue; /* we've got a match but we might end up racing with * key_cleanup() if the keyring is currently 'dead' * (ie. it has a zero usage count) */ if (!atomic_inc_not_zero(&keyring->usage)) continue; keyring->last_used_at = current_kernel_time().tv_sec; goto out; } } keyring = ERR_PTR(-ENOKEY); out: read_unlock(&keyring_name_lock); return keyring; } static int keyring_detect_cycle_iterator(const void *object, void *iterator_data) { struct keyring_search_context *ctx = iterator_data; const struct key *key = keyring_ptr_to_key(object); kenter("{%d}", key->serial); /* We might get a keyring with matching index-key that is nonetheless a * different keyring. */ if (key != ctx->match_data.raw_data) return 0; ctx->result = ERR_PTR(-EDEADLK); return 1; } /* * See if a cycle will will be created by inserting acyclic tree B in acyclic * tree A at the topmost level (ie: as a direct child of A). * * Since we are adding B to A at the top level, checking for cycles should just * be a matter of seeing if node A is somewhere in tree B. */ static int keyring_detect_cycle(struct key *A, struct key *B) { struct keyring_search_context ctx = { .index_key = A->index_key, .match_data.raw_data = A, .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, .iterator = keyring_detect_cycle_iterator, .flags = (KEYRING_SEARCH_NO_STATE_CHECK | KEYRING_SEARCH_NO_UPDATE_TIME | KEYRING_SEARCH_NO_CHECK_PERM | KEYRING_SEARCH_DETECT_TOO_DEEP), }; rcu_read_lock(); search_nested_keyrings(B, &ctx); rcu_read_unlock(); return PTR_ERR(ctx.result) == -EAGAIN ? 0 : PTR_ERR(ctx.result); } /* * Preallocate memory so that a key can be linked into to a keyring. */ int __key_link_begin(struct key *keyring, const struct keyring_index_key *index_key, struct assoc_array_edit **_edit) __acquires(&keyring->sem) __acquires(&keyring_serialise_link_sem) { struct assoc_array_edit *edit; int ret; kenter("%d,%s,%s,", keyring->serial, index_key->type->name, index_key->description); BUG_ON(index_key->desc_len == 0); if (keyring->type != &key_type_keyring) return -ENOTDIR; down_write(&keyring->sem); ret = -EKEYREVOKED; if (test_bit(KEY_FLAG_REVOKED, &keyring->flags)) goto error_krsem; /* serialise link/link calls to prevent parallel calls causing a cycle * when linking two keyring in opposite orders */ if (index_key->type == &key_type_keyring) down_write(&keyring_serialise_link_sem); /* Create an edit script that will insert/replace the key in the * keyring tree. */ edit = assoc_array_insert(&keyring->keys, &keyring_assoc_array_ops, index_key, NULL); if (IS_ERR(edit)) { ret = PTR_ERR(edit); goto error_sem; } /* If we're not replacing a link in-place then we're going to need some * extra quota. */ if (!edit->dead_leaf) { ret = key_payload_reserve(keyring, keyring->datalen + KEYQUOTA_LINK_BYTES); if (ret < 0) goto error_cancel; } *_edit = edit; kleave(" = 0"); return 0; error_cancel: assoc_array_cancel_edit(edit); error_sem: if (index_key->type == &key_type_keyring) up_write(&keyring_serialise_link_sem); error_krsem: up_write(&keyring->sem); kleave(" = %d", ret); return ret; } /* * Check already instantiated keys aren't going to be a problem. * * The caller must have called __key_link_begin(). Don't need to call this for * keys that were created since __key_link_begin() was called. */ int __key_link_check_live_key(struct key *keyring, struct key *key) { if (key->type == &key_type_keyring) /* check that we aren't going to create a cycle by linking one * keyring to another */ return keyring_detect_cycle(keyring, key); return 0; } /* * Link a key into to a keyring. * * Must be called with __key_link_begin() having being called. Discards any * already extant link to matching key if there is one, so that each keyring * holds at most one link to any given key of a particular type+description * combination. */ void __key_link(struct key *key, struct assoc_array_edit **_edit) { __key_get(key); assoc_array_insert_set_object(*_edit, keyring_key_to_ptr(key)); assoc_array_apply_edit(*_edit); *_edit = NULL; } /* * Finish linking a key into to a keyring. * * Must be called with __key_link_begin() having being called. */ void __key_link_end(struct key *keyring, const struct keyring_index_key *index_key, struct assoc_array_edit *edit) __releases(&keyring->sem) __releases(&keyring_serialise_link_sem) { BUG_ON(index_key->type == NULL); kenter("%d,%s,", keyring->serial, index_key->type->name); if (index_key->type == &key_type_keyring) up_write(&keyring_serialise_link_sem); if (edit && !edit->dead_leaf) { key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES); assoc_array_cancel_edit(edit); } up_write(&keyring->sem); } /** * key_link - Link a key to a keyring * @keyring: The keyring to make the link in. * @key: The key to link to. * * Make a link in a keyring to a key, such that the keyring holds a reference * on that key and the key can potentially be found by searching that keyring. * * This function will write-lock the keyring's semaphore and will consume some * of the user's key data quota to hold the link. * * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring, * -EKEYREVOKED if the keyring has been revoked, -ENFILE if the keyring is * full, -EDQUOT if there is insufficient key data quota remaining to add * another link or -ENOMEM if there's insufficient memory. * * It is assumed that the caller has checked that it is permitted for a link to * be made (the keyring should have Write permission and the key Link * permission). */ int key_link(struct key *keyring, struct key *key) { struct assoc_array_edit *edit; int ret; kenter("{%d,%d}", keyring->serial, atomic_read(&keyring->usage)); key_check(keyring); key_check(key); if (test_bit(KEY_FLAG_TRUSTED_ONLY, &keyring->flags) && !test_bit(KEY_FLAG_TRUSTED, &key->flags)) return -EPERM; ret = __key_link_begin(keyring, &key->index_key, &edit); if (ret == 0) { kdebug("begun {%d,%d}", keyring->serial, atomic_read(&keyring->usage)); ret = __key_link_check_live_key(keyring, key); if (ret == 0) __key_link(key, &edit); __key_link_end(keyring, &key->index_key, edit); } kleave(" = %d {%d,%d}", ret, keyring->serial, atomic_read(&keyring->usage)); return ret; } EXPORT_SYMBOL(key_link); /** * key_unlink - Unlink the first link to a key from a keyring. * @keyring: The keyring to remove the link from. * @key: The key the link is to. * * Remove a link from a keyring to a key. * * This function will write-lock the keyring's semaphore. * * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring, -ENOENT if * the key isn't linked to by the keyring or -ENOMEM if there's insufficient * memory. * * It is assumed that the caller has checked that it is permitted for a link to * be removed (the keyring should have Write permission; no permissions are * required on the key). */ int key_unlink(struct key *keyring, struct key *key) { struct assoc_array_edit *edit; int ret; key_check(keyring); key_check(key); if (keyring->type != &key_type_keyring) return -ENOTDIR; down_write(&keyring->sem); edit = assoc_array_delete(&keyring->keys, &keyring_assoc_array_ops, &key->index_key); if (IS_ERR(edit)) { ret = PTR_ERR(edit); goto error; } ret = -ENOENT; if (edit == NULL) goto error; assoc_array_apply_edit(edit); key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES); ret = 0; error: up_write(&keyring->sem); return ret; } EXPORT_SYMBOL(key_unlink); /** * keyring_clear - Clear a keyring * @keyring: The keyring to clear. * * Clear the contents of the specified keyring. * * Returns 0 if successful or -ENOTDIR if the keyring isn't a keyring. */ int keyring_clear(struct key *keyring) { struct assoc_array_edit *edit; int ret; if (keyring->type != &key_type_keyring) return -ENOTDIR; down_write(&keyring->sem); edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops); if (IS_ERR(edit)) { ret = PTR_ERR(edit); } else { if (edit) assoc_array_apply_edit(edit); key_payload_reserve(keyring, 0); ret = 0; } up_write(&keyring->sem); return ret; } EXPORT_SYMBOL(keyring_clear); /* * Dispose of the links from a revoked keyring. * * This is called with the key sem write-locked. */ static void keyring_revoke(struct key *keyring) { struct assoc_array_edit *edit; edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops); if (!IS_ERR(edit)) { if (edit) assoc_array_apply_edit(edit); key_payload_reserve(keyring, 0); } } static bool keyring_gc_select_iterator(void *object, void *iterator_data) { struct key *key = keyring_ptr_to_key(object); time_t *limit = iterator_data; if (key_is_dead(key, *limit)) return false; key_get(key); return true; } static int keyring_gc_check_iterator(const void *object, void *iterator_data) { const struct key *key = keyring_ptr_to_key(object); time_t *limit = iterator_data; key_check(key); return key_is_dead(key, *limit); } /* * Garbage collect pointers from a keyring. * * Not called with any locks held. The keyring's key struct will not be * deallocated under us as only our caller may deallocate it. */ void keyring_gc(struct key *keyring, time_t limit) { int result; kenter("%x{%s}", keyring->serial, keyring->description ?: ""); if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) | (1 << KEY_FLAG_REVOKED))) goto dont_gc; /* scan the keyring looking for dead keys */ rcu_read_lock(); result = assoc_array_iterate(&keyring->keys, keyring_gc_check_iterator, &limit); rcu_read_unlock(); if (result == true) goto do_gc; dont_gc: kleave(" [no gc]"); return; do_gc: down_write(&keyring->sem); assoc_array_gc(&keyring->keys, &keyring_assoc_array_ops, keyring_gc_select_iterator, &limit); up_write(&keyring->sem); kleave(" [gc]"); }
/* Keyring handling * * Copyright (C) 2004-2005, 2008, 2013 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/security.h> #include <linux/seq_file.h> #include <linux/err.h> #include <keys/keyring-type.h> #include <keys/user-type.h> #include <linux/assoc_array_priv.h> #include <linux/uaccess.h> #include "internal.h" /* * When plumbing the depths of the key tree, this sets a hard limit * set on how deep we're willing to go. */ #define KEYRING_SEARCH_MAX_DEPTH 6 /* * We keep all named keyrings in a hash to speed looking them up. */ #define KEYRING_NAME_HASH_SIZE (1 << 5) /* * We mark pointers we pass to the associative array with bit 1 set if * they're keyrings and clear otherwise. */ #define KEYRING_PTR_SUBTYPE 0x2UL static inline bool keyring_ptr_is_keyring(const struct assoc_array_ptr *x) { return (unsigned long)x & KEYRING_PTR_SUBTYPE; } static inline struct key *keyring_ptr_to_key(const struct assoc_array_ptr *x) { void *object = assoc_array_ptr_to_leaf(x); return (struct key *)((unsigned long)object & ~KEYRING_PTR_SUBTYPE); } static inline void *keyring_key_to_ptr(struct key *key) { if (key->type == &key_type_keyring) return (void *)((unsigned long)key | KEYRING_PTR_SUBTYPE); return key; } static struct list_head keyring_name_hash[KEYRING_NAME_HASH_SIZE]; static DEFINE_RWLOCK(keyring_name_lock); static inline unsigned keyring_hash(const char *desc) { unsigned bucket = 0; for (; *desc; desc++) bucket += (unsigned char)*desc; return bucket & (KEYRING_NAME_HASH_SIZE - 1); } /* * The keyring key type definition. Keyrings are simply keys of this type and * can be treated as ordinary keys in addition to having their own special * operations. */ static int keyring_preparse(struct key_preparsed_payload *prep); static void keyring_free_preparse(struct key_preparsed_payload *prep); static int keyring_instantiate(struct key *keyring, struct key_preparsed_payload *prep); static void keyring_revoke(struct key *keyring); static void keyring_destroy(struct key *keyring); static void keyring_describe(const struct key *keyring, struct seq_file *m); static long keyring_read(const struct key *keyring, char __user *buffer, size_t buflen); struct key_type key_type_keyring = { .name = "keyring", .def_datalen = 0, .preparse = keyring_preparse, .free_preparse = keyring_free_preparse, .instantiate = keyring_instantiate, .revoke = keyring_revoke, .destroy = keyring_destroy, .describe = keyring_describe, .read = keyring_read, }; EXPORT_SYMBOL(key_type_keyring); /* * Semaphore to serialise link/link calls to prevent two link calls in parallel * introducing a cycle. */ static DECLARE_RWSEM(keyring_serialise_link_sem); /* * Publish the name of a keyring so that it can be found by name (if it has * one). */ static void keyring_publish_name(struct key *keyring) { int bucket; if (keyring->description) { bucket = keyring_hash(keyring->description); write_lock(&keyring_name_lock); if (!keyring_name_hash[bucket].next) INIT_LIST_HEAD(&keyring_name_hash[bucket]); list_add_tail(&keyring->type_data.link, &keyring_name_hash[bucket]); write_unlock(&keyring_name_lock); } } /* * Preparse a keyring payload */ static int keyring_preparse(struct key_preparsed_payload *prep) { return prep->datalen != 0 ? -EINVAL : 0; } /* * Free a preparse of a user defined key payload */ static void keyring_free_preparse(struct key_preparsed_payload *prep) { } /* * Initialise a keyring. * * Returns 0 on success, -EINVAL if given any data. */ static int keyring_instantiate(struct key *keyring, struct key_preparsed_payload *prep) { assoc_array_init(&keyring->keys); /* make the keyring available by name if it has one */ keyring_publish_name(keyring); return 0; } /* * Multiply 64-bits by 32-bits to 96-bits and fold back to 64-bit. Ideally we'd * fold the carry back too, but that requires inline asm. */ static u64 mult_64x32_and_fold(u64 x, u32 y) { u64 hi = (u64)(u32)(x >> 32) * y; u64 lo = (u64)(u32)(x) * y; return lo + ((u64)(u32)hi << 32) + (u32)(hi >> 32); } /* * Hash a key type and description. */ static unsigned long hash_key_type_and_desc(const struct keyring_index_key *index_key) { const unsigned level_shift = ASSOC_ARRAY_LEVEL_STEP; const unsigned long fan_mask = ASSOC_ARRAY_FAN_MASK; const char *description = index_key->description; unsigned long hash, type; u32 piece; u64 acc; int n, desc_len = index_key->desc_len; type = (unsigned long)index_key->type; acc = mult_64x32_and_fold(type, desc_len + 13); acc = mult_64x32_and_fold(acc, 9207); for (;;) { n = desc_len; if (n <= 0) break; if (n > 4) n = 4; piece = 0; memcpy(&piece, description, n); description += n; desc_len -= n; acc = mult_64x32_and_fold(acc, piece); acc = mult_64x32_and_fold(acc, 9207); } /* Fold the hash down to 32 bits if need be. */ hash = acc; if (ASSOC_ARRAY_KEY_CHUNK_SIZE == 32) hash ^= acc >> 32; /* Squidge all the keyrings into a separate part of the tree to * ordinary keys by making sure the lowest level segment in the hash is * zero for keyrings and non-zero otherwise. */ if (index_key->type != &key_type_keyring && (hash & fan_mask) == 0) return hash | (hash >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - level_shift)) | 1; if (index_key->type == &key_type_keyring && (hash & fan_mask) != 0) return (hash + (hash << level_shift)) & ~fan_mask; return hash; } /* * Build the next index key chunk. * * On 32-bit systems the index key is laid out as: * * 0 4 5 9... * hash desclen typeptr desc[] * * On 64-bit systems: * * 0 8 9 17... * hash desclen typeptr desc[] * * We return it one word-sized chunk at a time. */ static unsigned long keyring_get_key_chunk(const void *data, int level) { const struct keyring_index_key *index_key = data; unsigned long chunk = 0; long offset = 0; int desc_len = index_key->desc_len, n = sizeof(chunk); level /= ASSOC_ARRAY_KEY_CHUNK_SIZE; switch (level) { case 0: return hash_key_type_and_desc(index_key); case 1: return ((unsigned long)index_key->type << 8) | desc_len; case 2: if (desc_len == 0) return (u8)((unsigned long)index_key->type >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - 8)); n--; offset = 1; default: offset += sizeof(chunk) - 1; offset += (level - 3) * sizeof(chunk); if (offset >= desc_len) return 0; desc_len -= offset; if (desc_len > n) desc_len = n; offset += desc_len; do { chunk <<= 8; chunk |= ((u8*)index_key->description)[--offset]; } while (--desc_len > 0); if (level == 2) { chunk <<= 8; chunk |= (u8)((unsigned long)index_key->type >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - 8)); } return chunk; } } static unsigned long keyring_get_object_key_chunk(const void *object, int level) { const struct key *key = keyring_ptr_to_key(object); return keyring_get_key_chunk(&key->index_key, level); } static bool keyring_compare_object(const void *object, const void *data) { const struct keyring_index_key *index_key = data; const struct key *key = keyring_ptr_to_key(object); return key->index_key.type == index_key->type && key->index_key.desc_len == index_key->desc_len && memcmp(key->index_key.description, index_key->description, index_key->desc_len) == 0; } /* * Compare the index keys of a pair of objects and determine the bit position * at which they differ - if they differ. */ static int keyring_diff_objects(const void *object, const void *data) { const struct key *key_a = keyring_ptr_to_key(object); const struct keyring_index_key *a = &key_a->index_key; const struct keyring_index_key *b = data; unsigned long seg_a, seg_b; int level, i; level = 0; seg_a = hash_key_type_and_desc(a); seg_b = hash_key_type_and_desc(b); if ((seg_a ^ seg_b) != 0) goto differ; /* The number of bits contributed by the hash is controlled by a * constant in the assoc_array headers. Everything else thereafter we * can deal with as being machine word-size dependent. */ level += ASSOC_ARRAY_KEY_CHUNK_SIZE / 8; seg_a = a->desc_len; seg_b = b->desc_len; if ((seg_a ^ seg_b) != 0) goto differ; /* The next bit may not work on big endian */ level++; seg_a = (unsigned long)a->type; seg_b = (unsigned long)b->type; if ((seg_a ^ seg_b) != 0) goto differ; level += sizeof(unsigned long); if (a->desc_len == 0) goto same; i = 0; if (((unsigned long)a->description | (unsigned long)b->description) & (sizeof(unsigned long) - 1)) { do { seg_a = *(unsigned long *)(a->description + i); seg_b = *(unsigned long *)(b->description + i); if ((seg_a ^ seg_b) != 0) goto differ_plus_i; i += sizeof(unsigned long); } while (i < (a->desc_len & (sizeof(unsigned long) - 1))); } for (; i < a->desc_len; i++) { seg_a = *(unsigned char *)(a->description + i); seg_b = *(unsigned char *)(b->description + i); if ((seg_a ^ seg_b) != 0) goto differ_plus_i; } same: return -1; differ_plus_i: level += i; differ: i = level * 8 + __ffs(seg_a ^ seg_b); return i; } /* * Free an object after stripping the keyring flag off of the pointer. */ static void keyring_free_object(void *object) { key_put(keyring_ptr_to_key(object)); } /* * Operations for keyring management by the index-tree routines. */ static const struct assoc_array_ops keyring_assoc_array_ops = { .get_key_chunk = keyring_get_key_chunk, .get_object_key_chunk = keyring_get_object_key_chunk, .compare_object = keyring_compare_object, .diff_objects = keyring_diff_objects, .free_object = keyring_free_object, }; /* * Clean up a keyring when it is destroyed. Unpublish its name if it had one * and dispose of its data. * * The garbage collector detects the final key_put(), removes the keyring from * the serial number tree and then does RCU synchronisation before coming here, * so we shouldn't need to worry about code poking around here with the RCU * readlock held by this time. */ static void keyring_destroy(struct key *keyring) { if (keyring->description) { write_lock(&keyring_name_lock); if (keyring->type_data.link.next != NULL && !list_empty(&keyring->type_data.link)) list_del(&keyring->type_data.link); write_unlock(&keyring_name_lock); } assoc_array_destroy(&keyring->keys, &keyring_assoc_array_ops); } /* * Describe a keyring for /proc. */ static void keyring_describe(const struct key *keyring, struct seq_file *m) { if (keyring->description) seq_puts(m, keyring->description); else seq_puts(m, "[anon]"); if (key_is_instantiated(keyring)) { if (keyring->keys.nr_leaves_on_tree != 0) seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree); else seq_puts(m, ": empty"); } } struct keyring_read_iterator_context { size_t qty; size_t count; key_serial_t __user *buffer; }; static int keyring_read_iterator(const void *object, void *data) { struct keyring_read_iterator_context *ctx = data; const struct key *key = keyring_ptr_to_key(object); int ret; kenter("{%s,%d},,{%zu/%zu}", key->type->name, key->serial, ctx->count, ctx->qty); if (ctx->count >= ctx->qty) return 1; ret = put_user(key->serial, ctx->buffer); if (ret < 0) return ret; ctx->buffer++; ctx->count += sizeof(key->serial); return 0; } /* * Read a list of key IDs from the keyring's contents in binary form * * The keyring's semaphore is read-locked by the caller. This prevents someone * from modifying it under us - which could cause us to read key IDs multiple * times. */ static long keyring_read(const struct key *keyring, char __user *buffer, size_t buflen) { struct keyring_read_iterator_context ctx; unsigned long nr_keys; int ret; kenter("{%d},,%zu", key_serial(keyring), buflen); if (buflen & (sizeof(key_serial_t) - 1)) return -EINVAL; nr_keys = keyring->keys.nr_leaves_on_tree; if (nr_keys == 0) return 0; /* Calculate how much data we could return */ ctx.qty = nr_keys * sizeof(key_serial_t); if (!buffer || !buflen) return ctx.qty; if (buflen > ctx.qty) ctx.qty = buflen; /* Copy the IDs of the subscribed keys into the buffer */ ctx.buffer = (key_serial_t __user *)buffer; ctx.count = 0; ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx); if (ret < 0) { kleave(" = %d [iterate]", ret); return ret; } kleave(" = %zu [ok]", ctx.count); return ctx.count; } /* * Allocate a keyring and link into the destination keyring. */ struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid, const struct cred *cred, key_perm_t perm, unsigned long flags, struct key *dest) { struct key *keyring; int ret; keyring = key_alloc(&key_type_keyring, description, uid, gid, cred, perm, flags); if (!IS_ERR(keyring)) { ret = key_instantiate_and_link(keyring, NULL, 0, dest, NULL); if (ret < 0) { key_put(keyring); keyring = ERR_PTR(ret); } } return keyring; } EXPORT_SYMBOL(keyring_alloc); /* * By default, we keys found by getting an exact match on their descriptions. */ bool key_default_cmp(const struct key *key, const struct key_match_data *match_data) { return strcmp(key->description, match_data->raw_data) == 0; } /* * Iteration function to consider each key found. */ static int keyring_search_iterator(const void *object, void *iterator_data) { struct keyring_search_context *ctx = iterator_data; const struct key *key = keyring_ptr_to_key(object); unsigned long kflags = key->flags; kenter("{%d}", key->serial); /* ignore keys not of this type */ if (key->type != ctx->index_key.type) { kleave(" = 0 [!type]"); return 0; } /* skip invalidated, revoked and expired keys */ if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) { if (kflags & ((1 << KEY_FLAG_INVALIDATED) | (1 << KEY_FLAG_REVOKED))) { ctx->result = ERR_PTR(-EKEYREVOKED); kleave(" = %d [invrev]", ctx->skipped_ret); goto skipped; } if (key->expiry && ctx->now.tv_sec >= key->expiry) { if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED)) ctx->result = ERR_PTR(-EKEYEXPIRED); kleave(" = %d [expire]", ctx->skipped_ret); goto skipped; } } /* keys that don't match */ if (!ctx->match_data.cmp(key, &ctx->match_data)) { kleave(" = 0 [!match]"); return 0; } /* key must have search permissions */ if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) && key_task_permission(make_key_ref(key, ctx->possessed), ctx->cred, KEY_NEED_SEARCH) < 0) { ctx->result = ERR_PTR(-EACCES); kleave(" = %d [!perm]", ctx->skipped_ret); goto skipped; } if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) { /* we set a different error code if we pass a negative key */ if (kflags & (1 << KEY_FLAG_NEGATIVE)) { smp_rmb(); ctx->result = ERR_PTR(key->type_data.reject_error); kleave(" = %d [neg]", ctx->skipped_ret); goto skipped; } } /* Found */ ctx->result = make_key_ref(key, ctx->possessed); kleave(" = 1 [found]"); return 1; skipped: return ctx->skipped_ret; } /* * Search inside a keyring for a key. We can search by walking to it * directly based on its index-key or we can iterate over the entire * tree looking for it, based on the match function. */ static int search_keyring(struct key *keyring, struct keyring_search_context *ctx) { if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_DIRECT) { const void *object; object = assoc_array_find(&keyring->keys, &keyring_assoc_array_ops, &ctx->index_key); return object ? ctx->iterator(object, ctx) : 0; } return assoc_array_iterate(&keyring->keys, ctx->iterator, ctx); } /* * Search a tree of keyrings that point to other keyrings up to the maximum * depth. */ static bool search_nested_keyrings(struct key *keyring, struct keyring_search_context *ctx) { struct { struct key *keyring; struct assoc_array_node *node; int slot; } stack[KEYRING_SEARCH_MAX_DEPTH]; struct assoc_array_shortcut *shortcut; struct assoc_array_node *node; struct assoc_array_ptr *ptr; struct key *key; int sp = 0, slot; kenter("{%d},{%s,%s}", keyring->serial, ctx->index_key.type->name, ctx->index_key.description); #define STATE_CHECKS (KEYRING_SEARCH_NO_STATE_CHECK | KEYRING_SEARCH_DO_STATE_CHECK) BUG_ON((ctx->flags & STATE_CHECKS) == 0 || (ctx->flags & STATE_CHECKS) == STATE_CHECKS); if (ctx->index_key.description) ctx->index_key.desc_len = strlen(ctx->index_key.description); /* Check to see if this top-level keyring is what we are looking for * and whether it is valid or not. */ if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_ITERATE || keyring_compare_object(keyring, &ctx->index_key)) { ctx->skipped_ret = 2; switch (ctx->iterator(keyring_key_to_ptr(keyring), ctx)) { case 1: goto found; case 2: return false; default: break; } } ctx->skipped_ret = 0; /* Start processing a new keyring */ descend_to_keyring: kdebug("descend to %d", keyring->serial); if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) | (1 << KEY_FLAG_REVOKED))) goto not_this_keyring; /* Search through the keys in this keyring before its searching its * subtrees. */ if (search_keyring(keyring, ctx)) goto found; /* Then manually iterate through the keyrings nested in this one. * * Start from the root node of the index tree. Because of the way the * hash function has been set up, keyrings cluster on the leftmost * branch of the root node (root slot 0) or in the root node itself. * Non-keyrings avoid the leftmost branch of the root entirely (root * slots 1-15). */ ptr = ACCESS_ONCE(keyring->keys.root); if (!ptr) goto not_this_keyring; if (assoc_array_ptr_is_shortcut(ptr)) { /* If the root is a shortcut, either the keyring only contains * keyring pointers (everything clusters behind root slot 0) or * doesn't contain any keyring pointers. */ shortcut = assoc_array_ptr_to_shortcut(ptr); smp_read_barrier_depends(); if ((shortcut->index_key[0] & ASSOC_ARRAY_FAN_MASK) != 0) goto not_this_keyring; ptr = ACCESS_ONCE(shortcut->next_node); node = assoc_array_ptr_to_node(ptr); goto begin_node; } node = assoc_array_ptr_to_node(ptr); smp_read_barrier_depends(); ptr = node->slots[0]; if (!assoc_array_ptr_is_meta(ptr)) goto begin_node; descend_to_node: /* Descend to a more distal node in this keyring's content tree and go * through that. */ kdebug("descend"); if (assoc_array_ptr_is_shortcut(ptr)) { shortcut = assoc_array_ptr_to_shortcut(ptr); smp_read_barrier_depends(); ptr = ACCESS_ONCE(shortcut->next_node); BUG_ON(!assoc_array_ptr_is_node(ptr)); } node = assoc_array_ptr_to_node(ptr); begin_node: kdebug("begin_node"); smp_read_barrier_depends(); slot = 0; ascend_to_node: /* Go through the slots in a node */ for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) { ptr = ACCESS_ONCE(node->slots[slot]); if (assoc_array_ptr_is_meta(ptr) && node->back_pointer) goto descend_to_node; if (!keyring_ptr_is_keyring(ptr)) continue; key = keyring_ptr_to_key(ptr); if (sp >= KEYRING_SEARCH_MAX_DEPTH) { if (ctx->flags & KEYRING_SEARCH_DETECT_TOO_DEEP) { ctx->result = ERR_PTR(-ELOOP); return false; } goto not_this_keyring; } /* Search a nested keyring */ if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) && key_task_permission(make_key_ref(key, ctx->possessed), ctx->cred, KEY_NEED_SEARCH) < 0) continue; /* stack the current position */ stack[sp].keyring = keyring; stack[sp].node = node; stack[sp].slot = slot; sp++; /* begin again with the new keyring */ keyring = key; goto descend_to_keyring; } /* We've dealt with all the slots in the current node, so now we need * to ascend to the parent and continue processing there. */ ptr = ACCESS_ONCE(node->back_pointer); slot = node->parent_slot; if (ptr && assoc_array_ptr_is_shortcut(ptr)) { shortcut = assoc_array_ptr_to_shortcut(ptr); smp_read_barrier_depends(); ptr = ACCESS_ONCE(shortcut->back_pointer); slot = shortcut->parent_slot; } if (!ptr) goto not_this_keyring; node = assoc_array_ptr_to_node(ptr); smp_read_barrier_depends(); slot++; /* If we've ascended to the root (zero backpointer), we must have just * finished processing the leftmost branch rather than the root slots - * so there can't be any more keyrings for us to find. */ if (node->back_pointer) { kdebug("ascend %d", slot); goto ascend_to_node; } /* The keyring we're looking at was disqualified or didn't contain a * matching key. */ not_this_keyring: kdebug("not_this_keyring %d", sp); if (sp <= 0) { kleave(" = false"); return false; } /* Resume the processing of a keyring higher up in the tree */ sp--; keyring = stack[sp].keyring; node = stack[sp].node; slot = stack[sp].slot + 1; kdebug("ascend to %d [%d]", keyring->serial, slot); goto ascend_to_node; /* We found a viable match */ found: key = key_ref_to_ptr(ctx->result); key_check(key); if (!(ctx->flags & KEYRING_SEARCH_NO_UPDATE_TIME)) { key->last_used_at = ctx->now.tv_sec; keyring->last_used_at = ctx->now.tv_sec; while (sp > 0) stack[--sp].keyring->last_used_at = ctx->now.tv_sec; } kleave(" = true"); return true; } /** * keyring_search_aux - Search a keyring tree for a key matching some criteria * @keyring_ref: A pointer to the keyring with possession indicator. * @ctx: The keyring search context. * * Search the supplied keyring tree for a key that matches the criteria given. * The root keyring and any linked keyrings must grant Search permission to the * caller to be searchable and keys can only be found if they too grant Search * to the caller. The possession flag on the root keyring pointer controls use * of the possessor bits in permissions checking of the entire tree. In * addition, the LSM gets to forbid keyring searches and key matches. * * The search is performed as a breadth-then-depth search up to the prescribed * limit (KEYRING_SEARCH_MAX_DEPTH). * * Keys are matched to the type provided and are then filtered by the match * function, which is given the description to use in any way it sees fit. The * match function may use any attributes of a key that it wishes to to * determine the match. Normally the match function from the key type would be * used. * * RCU can be used to prevent the keyring key lists from disappearing without * the need to take lots of locks. * * Returns a pointer to the found key and increments the key usage count if * successful; -EAGAIN if no matching keys were found, or if expired or revoked * keys were found; -ENOKEY if only negative keys were found; -ENOTDIR if the * specified keyring wasn't a keyring. * * In the case of a successful return, the possession attribute from * @keyring_ref is propagated to the returned key reference. */ key_ref_t keyring_search_aux(key_ref_t keyring_ref, struct keyring_search_context *ctx) { struct key *keyring; long err; ctx->iterator = keyring_search_iterator; ctx->possessed = is_key_possessed(keyring_ref); ctx->result = ERR_PTR(-EAGAIN); keyring = key_ref_to_ptr(keyring_ref); key_check(keyring); if (keyring->type != &key_type_keyring) return ERR_PTR(-ENOTDIR); if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM)) { err = key_task_permission(keyring_ref, ctx->cred, KEY_NEED_SEARCH); if (err < 0) return ERR_PTR(err); } rcu_read_lock(); ctx->now = current_kernel_time(); if (search_nested_keyrings(keyring, ctx)) __key_get(key_ref_to_ptr(ctx->result)); rcu_read_unlock(); return ctx->result; } /** * keyring_search - Search the supplied keyring tree for a matching key * @keyring: The root of the keyring tree to be searched. * @type: The type of keyring we want to find. * @description: The name of the keyring we want to find. * * As keyring_search_aux() above, but using the current task's credentials and * type's default matching function and preferred search method. */ key_ref_t keyring_search(key_ref_t keyring, struct key_type *type, const char *description) { struct keyring_search_context ctx = { .index_key.type = type, .index_key.description = description, .cred = current_cred(), .match_data.cmp = key_default_cmp, .match_data.raw_data = description, .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, .flags = KEYRING_SEARCH_DO_STATE_CHECK, }; key_ref_t key; int ret; if (type->match_preparse) { ret = type->match_preparse(&ctx.match_data); if (ret < 0) return ERR_PTR(ret); } key = keyring_search_aux(keyring, &ctx); if (type->match_free) type->match_free(&ctx.match_data); return key; } EXPORT_SYMBOL(keyring_search); /* * Search the given keyring for a key that might be updated. * * The caller must guarantee that the keyring is a keyring and that the * permission is granted to modify the keyring as no check is made here. The * caller must also hold a lock on the keyring semaphore. * * Returns a pointer to the found key with usage count incremented if * successful and returns NULL if not found. Revoked and invalidated keys are * skipped over. * * If successful, the possession indicator is propagated from the keyring ref * to the returned key reference. */ key_ref_t find_key_to_update(key_ref_t keyring_ref, const struct keyring_index_key *index_key) { struct key *keyring, *key; const void *object; keyring = key_ref_to_ptr(keyring_ref); kenter("{%d},{%s,%s}", keyring->serial, index_key->type->name, index_key->description); object = assoc_array_find(&keyring->keys, &keyring_assoc_array_ops, index_key); if (object) goto found; kleave(" = NULL"); return NULL; found: key = keyring_ptr_to_key(object); if (key->flags & ((1 << KEY_FLAG_INVALIDATED) | (1 << KEY_FLAG_REVOKED))) { kleave(" = NULL [x]"); return NULL; } __key_get(key); kleave(" = {%d}", key->serial); return make_key_ref(key, is_key_possessed(keyring_ref)); } /* * Find a keyring with the specified name. * * All named keyrings in the current user namespace are searched, provided they * grant Search permission directly to the caller (unless this check is * skipped). Keyrings whose usage points have reached zero or who have been * revoked are skipped. * * Returns a pointer to the keyring with the keyring's refcount having being * incremented on success. -ENOKEY is returned if a key could not be found. */ struct key *find_keyring_by_name(const char *name, bool skip_perm_check) { struct key *keyring; int bucket; if (!name) return ERR_PTR(-EINVAL); bucket = keyring_hash(name); read_lock(&keyring_name_lock); if (keyring_name_hash[bucket].next) { /* search this hash bucket for a keyring with a matching name * that's readable and that hasn't been revoked */ list_for_each_entry(keyring, &keyring_name_hash[bucket], type_data.link ) { if (!kuid_has_mapping(current_user_ns(), keyring->user->uid)) continue; if (test_bit(KEY_FLAG_REVOKED, &keyring->flags)) continue; if (strcmp(keyring->description, name) != 0) continue; if (!skip_perm_check && key_permission(make_key_ref(keyring, 0), KEY_NEED_SEARCH) < 0) continue; /* we've got a match but we might end up racing with * key_cleanup() if the keyring is currently 'dead' * (ie. it has a zero usage count) */ if (!atomic_inc_not_zero(&keyring->usage)) continue; keyring->last_used_at = current_kernel_time().tv_sec; goto out; } } keyring = ERR_PTR(-ENOKEY); out: read_unlock(&keyring_name_lock); return keyring; } static int keyring_detect_cycle_iterator(const void *object, void *iterator_data) { struct keyring_search_context *ctx = iterator_data; const struct key *key = keyring_ptr_to_key(object); kenter("{%d}", key->serial); /* We might get a keyring with matching index-key that is nonetheless a * different keyring. */ if (key != ctx->match_data.raw_data) return 0; ctx->result = ERR_PTR(-EDEADLK); return 1; } /* * See if a cycle will will be created by inserting acyclic tree B in acyclic * tree A at the topmost level (ie: as a direct child of A). * * Since we are adding B to A at the top level, checking for cycles should just * be a matter of seeing if node A is somewhere in tree B. */ static int keyring_detect_cycle(struct key *A, struct key *B) { struct keyring_search_context ctx = { .index_key = A->index_key, .match_data.raw_data = A, .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, .iterator = keyring_detect_cycle_iterator, .flags = (KEYRING_SEARCH_NO_STATE_CHECK | KEYRING_SEARCH_NO_UPDATE_TIME | KEYRING_SEARCH_NO_CHECK_PERM | KEYRING_SEARCH_DETECT_TOO_DEEP), }; rcu_read_lock(); search_nested_keyrings(B, &ctx); rcu_read_unlock(); return PTR_ERR(ctx.result) == -EAGAIN ? 0 : PTR_ERR(ctx.result); } /* * Preallocate memory so that a key can be linked into to a keyring. */ int __key_link_begin(struct key *keyring, const struct keyring_index_key *index_key, struct assoc_array_edit **_edit) __acquires(&keyring->sem) __acquires(&keyring_serialise_link_sem) { struct assoc_array_edit *edit; int ret; kenter("%d,%s,%s,", keyring->serial, index_key->type->name, index_key->description); BUG_ON(index_key->desc_len == 0); if (keyring->type != &key_type_keyring) return -ENOTDIR; down_write(&keyring->sem); ret = -EKEYREVOKED; if (test_bit(KEY_FLAG_REVOKED, &keyring->flags)) goto error_krsem; /* serialise link/link calls to prevent parallel calls causing a cycle * when linking two keyring in opposite orders */ if (index_key->type == &key_type_keyring) down_write(&keyring_serialise_link_sem); /* Create an edit script that will insert/replace the key in the * keyring tree. */ edit = assoc_array_insert(&keyring->keys, &keyring_assoc_array_ops, index_key, NULL); if (IS_ERR(edit)) { ret = PTR_ERR(edit); goto error_sem; } /* If we're not replacing a link in-place then we're going to need some * extra quota. */ if (!edit->dead_leaf) { ret = key_payload_reserve(keyring, keyring->datalen + KEYQUOTA_LINK_BYTES); if (ret < 0) goto error_cancel; } *_edit = edit; kleave(" = 0"); return 0; error_cancel: assoc_array_cancel_edit(edit); error_sem: if (index_key->type == &key_type_keyring) up_write(&keyring_serialise_link_sem); error_krsem: up_write(&keyring->sem); kleave(" = %d", ret); return ret; } /* * Check already instantiated keys aren't going to be a problem. * * The caller must have called __key_link_begin(). Don't need to call this for * keys that were created since __key_link_begin() was called. */ int __key_link_check_live_key(struct key *keyring, struct key *key) { if (key->type == &key_type_keyring) /* check that we aren't going to create a cycle by linking one * keyring to another */ return keyring_detect_cycle(keyring, key); return 0; } /* * Link a key into to a keyring. * * Must be called with __key_link_begin() having being called. Discards any * already extant link to matching key if there is one, so that each keyring * holds at most one link to any given key of a particular type+description * combination. */ void __key_link(struct key *key, struct assoc_array_edit **_edit) { __key_get(key); assoc_array_insert_set_object(*_edit, keyring_key_to_ptr(key)); assoc_array_apply_edit(*_edit); *_edit = NULL; } /* * Finish linking a key into to a keyring. * * Must be called with __key_link_begin() having being called. */ void __key_link_end(struct key *keyring, const struct keyring_index_key *index_key, struct assoc_array_edit *edit) __releases(&keyring->sem) __releases(&keyring_serialise_link_sem) { BUG_ON(index_key->type == NULL); kenter("%d,%s,", keyring->serial, index_key->type->name); if (index_key->type == &key_type_keyring) up_write(&keyring_serialise_link_sem); if (edit) { if (!edit->dead_leaf) { key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES); } assoc_array_cancel_edit(edit); } up_write(&keyring->sem); } /** * key_link - Link a key to a keyring * @keyring: The keyring to make the link in. * @key: The key to link to. * * Make a link in a keyring to a key, such that the keyring holds a reference * on that key and the key can potentially be found by searching that keyring. * * This function will write-lock the keyring's semaphore and will consume some * of the user's key data quota to hold the link. * * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring, * -EKEYREVOKED if the keyring has been revoked, -ENFILE if the keyring is * full, -EDQUOT if there is insufficient key data quota remaining to add * another link or -ENOMEM if there's insufficient memory. * * It is assumed that the caller has checked that it is permitted for a link to * be made (the keyring should have Write permission and the key Link * permission). */ int key_link(struct key *keyring, struct key *key) { struct assoc_array_edit *edit; int ret; kenter("{%d,%d}", keyring->serial, atomic_read(&keyring->usage)); key_check(keyring); key_check(key); if (test_bit(KEY_FLAG_TRUSTED_ONLY, &keyring->flags) && !test_bit(KEY_FLAG_TRUSTED, &key->flags)) return -EPERM; ret = __key_link_begin(keyring, &key->index_key, &edit); if (ret == 0) { kdebug("begun {%d,%d}", keyring->serial, atomic_read(&keyring->usage)); ret = __key_link_check_live_key(keyring, key); if (ret == 0) __key_link(key, &edit); __key_link_end(keyring, &key->index_key, edit); } kleave(" = %d {%d,%d}", ret, keyring->serial, atomic_read(&keyring->usage)); return ret; } EXPORT_SYMBOL(key_link); /** * key_unlink - Unlink the first link to a key from a keyring. * @keyring: The keyring to remove the link from. * @key: The key the link is to. * * Remove a link from a keyring to a key. * * This function will write-lock the keyring's semaphore. * * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring, -ENOENT if * the key isn't linked to by the keyring or -ENOMEM if there's insufficient * memory. * * It is assumed that the caller has checked that it is permitted for a link to * be removed (the keyring should have Write permission; no permissions are * required on the key). */ int key_unlink(struct key *keyring, struct key *key) { struct assoc_array_edit *edit; int ret; key_check(keyring); key_check(key); if (keyring->type != &key_type_keyring) return -ENOTDIR; down_write(&keyring->sem); edit = assoc_array_delete(&keyring->keys, &keyring_assoc_array_ops, &key->index_key); if (IS_ERR(edit)) { ret = PTR_ERR(edit); goto error; } ret = -ENOENT; if (edit == NULL) goto error; assoc_array_apply_edit(edit); key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES); ret = 0; error: up_write(&keyring->sem); return ret; } EXPORT_SYMBOL(key_unlink); /** * keyring_clear - Clear a keyring * @keyring: The keyring to clear. * * Clear the contents of the specified keyring. * * Returns 0 if successful or -ENOTDIR if the keyring isn't a keyring. */ int keyring_clear(struct key *keyring) { struct assoc_array_edit *edit; int ret; if (keyring->type != &key_type_keyring) return -ENOTDIR; down_write(&keyring->sem); edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops); if (IS_ERR(edit)) { ret = PTR_ERR(edit); } else { if (edit) assoc_array_apply_edit(edit); key_payload_reserve(keyring, 0); ret = 0; } up_write(&keyring->sem); return ret; } EXPORT_SYMBOL(keyring_clear); /* * Dispose of the links from a revoked keyring. * * This is called with the key sem write-locked. */ static void keyring_revoke(struct key *keyring) { struct assoc_array_edit *edit; edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops); if (!IS_ERR(edit)) { if (edit) assoc_array_apply_edit(edit); key_payload_reserve(keyring, 0); } } static bool keyring_gc_select_iterator(void *object, void *iterator_data) { struct key *key = keyring_ptr_to_key(object); time_t *limit = iterator_data; if (key_is_dead(key, *limit)) return false; key_get(key); return true; } static int keyring_gc_check_iterator(const void *object, void *iterator_data) { const struct key *key = keyring_ptr_to_key(object); time_t *limit = iterator_data; key_check(key); return key_is_dead(key, *limit); } /* * Garbage collect pointers from a keyring. * * Not called with any locks held. The keyring's key struct will not be * deallocated under us as only our caller may deallocate it. */ void keyring_gc(struct key *keyring, time_t limit) { int result; kenter("%x{%s}", keyring->serial, keyring->description ?: ""); if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) | (1 << KEY_FLAG_REVOKED))) goto dont_gc; /* scan the keyring looking for dead keys */ rcu_read_lock(); result = assoc_array_iterate(&keyring->keys, keyring_gc_check_iterator, &limit); rcu_read_unlock(); if (result == true) goto do_gc; dont_gc: kleave(" [no gc]"); return; do_gc: down_write(&keyring->sem); assoc_array_gc(&keyring->keys, &keyring_assoc_array_ops, keyring_gc_select_iterator, &limit); up_write(&keyring->sem); kleave(" [gc]"); }
__releases(&keyring_serialise_link_sem) { BUG_ON(index_key->type == NULL); kenter("%d,%s,", keyring->serial, index_key->type->name); if (index_key->type == &key_type_keyring) up_write(&keyring_serialise_link_sem); if (edit && !edit->dead_leaf) { key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES); assoc_array_cancel_edit(edit); } up_write(&keyring->sem); }
__releases(&keyring_serialise_link_sem) { BUG_ON(index_key->type == NULL); kenter("%d,%s,", keyring->serial, index_key->type->name); if (index_key->type == &key_type_keyring) up_write(&keyring_serialise_link_sem); if (edit) { if (!edit->dead_leaf) { key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES); } assoc_array_cancel_edit(edit); } up_write(&keyring->sem); }
{'added': [(1184, '\tif (edit) {'), (1185, '\t\tif (!edit->dead_leaf) {'), (1186, '\t\t\tkey_payload_reserve(keyring,'), (1187, '\t\t\t\tkeyring->datalen - KEYQUOTA_LINK_BYTES);'), (1188, '\t\t}')], 'deleted': [(1184, '\tif (edit && !edit->dead_leaf) {'), (1185, '\t\tkey_payload_reserve(keyring,'), (1186, '\t\t\t\t keyring->datalen - KEYQUOTA_LINK_BYTES);')]}
5
3
864
5,382
https://github.com/torvalds/linux
CVE-2015-1333
['CWE-119']
af_x25.c
x25_recvmsg
/* * X.25 Packet Layer release 002 * * This is ALPHA test software. This code may break your machine, * randomly fail to work with new releases, misbehave and/or generally * screw up. It might even work. * * This code REQUIRES 2.1.15 or higher * * This module: * This module is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * History * X.25 001 Jonathan Naylor Started coding. * X.25 002 Jonathan Naylor Centralised disconnect handling. * New timer architecture. * 2000-03-11 Henner Eisen MSG_EOR handling more POSIX compliant. * 2000-03-22 Daniela Squassoni Allowed disabling/enabling of * facilities negotiation and increased * the throughput upper limit. * 2000-08-27 Arnaldo C. Melo s/suser/capable/ + micro cleanups * 2000-09-04 Henner Eisen Set sock->state in x25_accept(). * Fixed x25_output() related skb leakage. * 2000-10-02 Henner Eisen Made x25_kick() single threaded per socket. * 2000-10-27 Henner Eisen MSG_DONTWAIT for fragment allocation. * 2000-11-14 Henner Eisen Closing datalink from NETDEV_GOING_DOWN * 2002-10-06 Arnaldo C. Melo Get rid of cli/sti, move proc stuff to * x25_proc.c, using seq_file * 2005-04-02 Shaun Pereira Selective sub address matching * with call user data * 2005-04-15 Shaun Pereira Fast select with no restriction on * response */ #include <linux/module.h> #include <linux/capability.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <net/sock.h> #include <net/tcp_states.h> #include <asm/uaccess.h> #include <linux/fcntl.h> #include <linux/termios.h> /* For TIOCINQ/OUTQ */ #include <linux/notifier.h> #include <linux/init.h> #include <linux/compat.h> #include <linux/ctype.h> #include <net/x25.h> #include <net/compat.h> int sysctl_x25_restart_request_timeout = X25_DEFAULT_T20; int sysctl_x25_call_request_timeout = X25_DEFAULT_T21; int sysctl_x25_reset_request_timeout = X25_DEFAULT_T22; int sysctl_x25_clear_request_timeout = X25_DEFAULT_T23; int sysctl_x25_ack_holdback_timeout = X25_DEFAULT_T2; int sysctl_x25_forward = 0; HLIST_HEAD(x25_list); DEFINE_RWLOCK(x25_list_lock); static const struct proto_ops x25_proto_ops; static struct x25_address null_x25_address = {" "}; #ifdef CONFIG_COMPAT struct compat_x25_subscrip_struct { char device[200-sizeof(compat_ulong_t)]; compat_ulong_t global_facil_mask; compat_uint_t extended; }; #endif int x25_parse_address_block(struct sk_buff *skb, struct x25_address *called_addr, struct x25_address *calling_addr) { unsigned char len; int needed; int rc; if (!pskb_may_pull(skb, 1)) { /* packet has no address block */ rc = 0; goto empty; } len = *skb->data; needed = 1 + (len >> 4) + (len & 0x0f); if (!pskb_may_pull(skb, needed)) { /* packet is too short to hold the addresses it claims to hold */ rc = -1; goto empty; } return x25_addr_ntoa(skb->data, called_addr, calling_addr); empty: *called_addr->x25_addr = 0; *calling_addr->x25_addr = 0; return rc; } int x25_addr_ntoa(unsigned char *p, struct x25_address *called_addr, struct x25_address *calling_addr) { unsigned int called_len, calling_len; char *called, *calling; unsigned int i; called_len = (*p >> 0) & 0x0F; calling_len = (*p >> 4) & 0x0F; called = called_addr->x25_addr; calling = calling_addr->x25_addr; p++; for (i = 0; i < (called_len + calling_len); i++) { if (i < called_len) { if (i % 2 != 0) { *called++ = ((*p >> 0) & 0x0F) + '0'; p++; } else { *called++ = ((*p >> 4) & 0x0F) + '0'; } } else { if (i % 2 != 0) { *calling++ = ((*p >> 0) & 0x0F) + '0'; p++; } else { *calling++ = ((*p >> 4) & 0x0F) + '0'; } } } *called = *calling = '\0'; return 1 + (called_len + calling_len + 1) / 2; } int x25_addr_aton(unsigned char *p, struct x25_address *called_addr, struct x25_address *calling_addr) { unsigned int called_len, calling_len; char *called, *calling; int i; called = called_addr->x25_addr; calling = calling_addr->x25_addr; called_len = strlen(called); calling_len = strlen(calling); *p++ = (calling_len << 4) | (called_len << 0); for (i = 0; i < (called_len + calling_len); i++) { if (i < called_len) { if (i % 2 != 0) { *p |= (*called++ - '0') << 0; p++; } else { *p = 0x00; *p |= (*called++ - '0') << 4; } } else { if (i % 2 != 0) { *p |= (*calling++ - '0') << 0; p++; } else { *p = 0x00; *p |= (*calling++ - '0') << 4; } } } return 1 + (called_len + calling_len + 1) / 2; } /* * Socket removal during an interrupt is now safe. */ static void x25_remove_socket(struct sock *sk) { write_lock_bh(&x25_list_lock); sk_del_node_init(sk); write_unlock_bh(&x25_list_lock); } /* * Kill all bound sockets on a dropped device. */ static void x25_kill_by_device(struct net_device *dev) { struct sock *s; write_lock_bh(&x25_list_lock); sk_for_each(s, &x25_list) if (x25_sk(s)->neighbour && x25_sk(s)->neighbour->dev == dev) x25_disconnect(s, ENETUNREACH, 0, 0); write_unlock_bh(&x25_list_lock); } /* * Handle device status changes. */ static int x25_device_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct x25_neigh *nb; if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; if (dev->type == ARPHRD_X25 #if IS_ENABLED(CONFIG_LLC) || dev->type == ARPHRD_ETHER #endif ) { switch (event) { case NETDEV_UP: x25_link_device_up(dev); break; case NETDEV_GOING_DOWN: nb = x25_get_neigh(dev); if (nb) { x25_terminate_link(nb); x25_neigh_put(nb); } break; case NETDEV_DOWN: x25_kill_by_device(dev); x25_route_device_down(dev); x25_link_device_down(dev); break; } } return NOTIFY_DONE; } /* * Add a socket to the bound sockets list. */ static void x25_insert_socket(struct sock *sk) { write_lock_bh(&x25_list_lock); sk_add_node(sk, &x25_list); write_unlock_bh(&x25_list_lock); } /* * Find a socket that wants to accept the Call Request we just * received. Check the full list for an address/cud match. * If no cuds match return the next_best thing, an address match. * Note: if a listening socket has cud set it must only get calls * with matching cud. */ static struct sock *x25_find_listener(struct x25_address *addr, struct sk_buff *skb) { struct sock *s; struct sock *next_best; read_lock_bh(&x25_list_lock); next_best = NULL; sk_for_each(s, &x25_list) if ((!strcmp(addr->x25_addr, x25_sk(s)->source_addr.x25_addr) || !strcmp(addr->x25_addr, null_x25_address.x25_addr)) && s->sk_state == TCP_LISTEN) { /* * Found a listening socket, now check the incoming * call user data vs this sockets call user data */ if (x25_sk(s)->cudmatchlength > 0 && skb->len >= x25_sk(s)->cudmatchlength) { if((memcmp(x25_sk(s)->calluserdata.cuddata, skb->data, x25_sk(s)->cudmatchlength)) == 0) { sock_hold(s); goto found; } } else next_best = s; } if (next_best) { s = next_best; sock_hold(s); goto found; } s = NULL; found: read_unlock_bh(&x25_list_lock); return s; } /* * Find a connected X.25 socket given my LCI and neighbour. */ static struct sock *__x25_find_socket(unsigned int lci, struct x25_neigh *nb) { struct sock *s; sk_for_each(s, &x25_list) if (x25_sk(s)->lci == lci && x25_sk(s)->neighbour == nb) { sock_hold(s); goto found; } s = NULL; found: return s; } struct sock *x25_find_socket(unsigned int lci, struct x25_neigh *nb) { struct sock *s; read_lock_bh(&x25_list_lock); s = __x25_find_socket(lci, nb); read_unlock_bh(&x25_list_lock); return s; } /* * Find a unique LCI for a given device. */ static unsigned int x25_new_lci(struct x25_neigh *nb) { unsigned int lci = 1; struct sock *sk; read_lock_bh(&x25_list_lock); while ((sk = __x25_find_socket(lci, nb)) != NULL) { sock_put(sk); if (++lci == 4096) { lci = 0; break; } } read_unlock_bh(&x25_list_lock); return lci; } /* * Deferred destroy. */ static void __x25_destroy_socket(struct sock *); /* * handler for deferred kills. */ static void x25_destroy_timer(unsigned long data) { x25_destroy_socket_from_timer((struct sock *)data); } /* * This is called from user mode and the timers. Thus it protects itself * against interrupt users but doesn't worry about being called during * work. Once it is removed from the queue no interrupt or bottom half * will touch it and we are (fairly 8-) ) safe. * Not static as it's used by the timer */ static void __x25_destroy_socket(struct sock *sk) { struct sk_buff *skb; x25_stop_heartbeat(sk); x25_stop_timer(sk); x25_remove_socket(sk); x25_clear_queues(sk); /* Flush the queues */ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { if (skb->sk != sk) { /* A pending connection */ /* * Queue the unaccepted socket for death */ skb->sk->sk_state = TCP_LISTEN; sock_set_flag(skb->sk, SOCK_DEAD); x25_start_heartbeat(skb->sk); x25_sk(skb->sk)->state = X25_STATE_0; } kfree_skb(skb); } if (sk_has_allocations(sk)) { /* Defer: outstanding buffers */ sk->sk_timer.expires = jiffies + 10 * HZ; sk->sk_timer.function = x25_destroy_timer; sk->sk_timer.data = (unsigned long)sk; add_timer(&sk->sk_timer); } else { /* drop last reference so sock_put will free */ __sock_put(sk); } } void x25_destroy_socket_from_timer(struct sock *sk) { sock_hold(sk); bh_lock_sock(sk); __x25_destroy_socket(sk); bh_unlock_sock(sk); sock_put(sk); } /* * Handling for system calls applied via the various interfaces to a * X.25 socket object. */ static int x25_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { int opt; struct sock *sk = sock->sk; int rc = -ENOPROTOOPT; if (level != SOL_X25 || optname != X25_QBITINCL) goto out; rc = -EINVAL; if (optlen < sizeof(int)) goto out; rc = -EFAULT; if (get_user(opt, (int __user *)optval)) goto out; if (opt) set_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags); else clear_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags); rc = 0; out: return rc; } static int x25_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; int val, len, rc = -ENOPROTOOPT; if (level != SOL_X25 || optname != X25_QBITINCL) goto out; rc = -EFAULT; if (get_user(len, optlen)) goto out; len = min_t(unsigned int, len, sizeof(int)); rc = -EINVAL; if (len < 0) goto out; rc = -EFAULT; if (put_user(len, optlen)) goto out; val = test_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags); rc = copy_to_user(optval, &val, len) ? -EFAULT : 0; out: return rc; } static int x25_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; int rc = -EOPNOTSUPP; lock_sock(sk); if (sk->sk_state != TCP_LISTEN) { memset(&x25_sk(sk)->dest_addr, 0, X25_ADDR_LEN); sk->sk_max_ack_backlog = backlog; sk->sk_state = TCP_LISTEN; rc = 0; } release_sock(sk); return rc; } static struct proto x25_proto = { .name = "X25", .owner = THIS_MODULE, .obj_size = sizeof(struct x25_sock), }; static struct sock *x25_alloc_socket(struct net *net) { struct x25_sock *x25; struct sock *sk = sk_alloc(net, AF_X25, GFP_ATOMIC, &x25_proto); if (!sk) goto out; sock_init_data(NULL, sk); x25 = x25_sk(sk); skb_queue_head_init(&x25->ack_queue); skb_queue_head_init(&x25->fragment_queue); skb_queue_head_init(&x25->interrupt_in_queue); skb_queue_head_init(&x25->interrupt_out_queue); out: return sk; } static int x25_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; struct x25_sock *x25; int rc = -EAFNOSUPPORT; if (!net_eq(net, &init_net)) goto out; rc = -ESOCKTNOSUPPORT; if (sock->type != SOCK_SEQPACKET) goto out; rc = -EINVAL; if (protocol) goto out; rc = -ENOBUFS; if ((sk = x25_alloc_socket(net)) == NULL) goto out; x25 = x25_sk(sk); sock_init_data(sock, sk); x25_init_timers(sk); sock->ops = &x25_proto_ops; sk->sk_protocol = protocol; sk->sk_backlog_rcv = x25_backlog_rcv; x25->t21 = sysctl_x25_call_request_timeout; x25->t22 = sysctl_x25_reset_request_timeout; x25->t23 = sysctl_x25_clear_request_timeout; x25->t2 = sysctl_x25_ack_holdback_timeout; x25->state = X25_STATE_0; x25->cudmatchlength = 0; set_bit(X25_ACCPT_APPRV_FLAG, &x25->flags); /* normally no cud */ /* on call accept */ x25->facilities.winsize_in = X25_DEFAULT_WINDOW_SIZE; x25->facilities.winsize_out = X25_DEFAULT_WINDOW_SIZE; x25->facilities.pacsize_in = X25_DEFAULT_PACKET_SIZE; x25->facilities.pacsize_out = X25_DEFAULT_PACKET_SIZE; x25->facilities.throughput = 0; /* by default don't negotiate throughput */ x25->facilities.reverse = X25_DEFAULT_REVERSE; x25->dte_facilities.calling_len = 0; x25->dte_facilities.called_len = 0; memset(x25->dte_facilities.called_ae, '\0', sizeof(x25->dte_facilities.called_ae)); memset(x25->dte_facilities.calling_ae, '\0', sizeof(x25->dte_facilities.calling_ae)); rc = 0; out: return rc; } static struct sock *x25_make_new(struct sock *osk) { struct sock *sk = NULL; struct x25_sock *x25, *ox25; if (osk->sk_type != SOCK_SEQPACKET) goto out; if ((sk = x25_alloc_socket(sock_net(osk))) == NULL) goto out; x25 = x25_sk(sk); sk->sk_type = osk->sk_type; sk->sk_priority = osk->sk_priority; sk->sk_protocol = osk->sk_protocol; sk->sk_rcvbuf = osk->sk_rcvbuf; sk->sk_sndbuf = osk->sk_sndbuf; sk->sk_state = TCP_ESTABLISHED; sk->sk_backlog_rcv = osk->sk_backlog_rcv; sock_copy_flags(sk, osk); ox25 = x25_sk(osk); x25->t21 = ox25->t21; x25->t22 = ox25->t22; x25->t23 = ox25->t23; x25->t2 = ox25->t2; x25->flags = ox25->flags; x25->facilities = ox25->facilities; x25->dte_facilities = ox25->dte_facilities; x25->cudmatchlength = ox25->cudmatchlength; clear_bit(X25_INTERRUPT_FLAG, &x25->flags); x25_init_timers(sk); out: return sk; } static int x25_release(struct socket *sock) { struct sock *sk = sock->sk; struct x25_sock *x25; if (!sk) return 0; x25 = x25_sk(sk); sock_hold(sk); lock_sock(sk); switch (x25->state) { case X25_STATE_0: case X25_STATE_2: x25_disconnect(sk, 0, 0, 0); __x25_destroy_socket(sk); goto out; case X25_STATE_1: case X25_STATE_3: case X25_STATE_4: x25_clear_queues(sk); x25_write_internal(sk, X25_CLEAR_REQUEST); x25_start_t23timer(sk); x25->state = X25_STATE_2; sk->sk_state = TCP_CLOSE; sk->sk_shutdown |= SEND_SHUTDOWN; sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); sock_set_flag(sk, SOCK_DESTROY); break; } sock_orphan(sk); out: release_sock(sk); sock_put(sk); return 0; } static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr; int len, i, rc = 0; if (!sock_flag(sk, SOCK_ZAPPED) || addr_len != sizeof(struct sockaddr_x25) || addr->sx25_family != AF_X25) { rc = -EINVAL; goto out; } len = strlen(addr->sx25_addr.x25_addr); for (i = 0; i < len; i++) { if (!isdigit(addr->sx25_addr.x25_addr[i])) { rc = -EINVAL; goto out; } } lock_sock(sk); x25_sk(sk)->source_addr = addr->sx25_addr; x25_insert_socket(sk); sock_reset_flag(sk, SOCK_ZAPPED); release_sock(sk); SOCK_DEBUG(sk, "x25_bind: socket is bound\n"); out: return rc; } static int x25_wait_for_connection_establishment(struct sock *sk) { DECLARE_WAITQUEUE(wait, current); int rc; add_wait_queue_exclusive(sk_sleep(sk), &wait); for (;;) { __set_current_state(TASK_INTERRUPTIBLE); rc = -ERESTARTSYS; if (signal_pending(current)) break; rc = sock_error(sk); if (rc) { sk->sk_socket->state = SS_UNCONNECTED; break; } rc = 0; if (sk->sk_state != TCP_ESTABLISHED) { release_sock(sk); schedule(); lock_sock(sk); } else break; } __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); return rc; } static int x25_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; struct x25_sock *x25 = x25_sk(sk); struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr; struct x25_route *rt; int rc = 0; lock_sock(sk); if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { sock->state = SS_CONNECTED; goto out; /* Connect completed during a ERESTARTSYS event */ } rc = -ECONNREFUSED; if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) { sock->state = SS_UNCONNECTED; goto out; } rc = -EISCONN; /* No reconnect on a seqpacket socket */ if (sk->sk_state == TCP_ESTABLISHED) goto out; sk->sk_state = TCP_CLOSE; sock->state = SS_UNCONNECTED; rc = -EINVAL; if (addr_len != sizeof(struct sockaddr_x25) || addr->sx25_family != AF_X25) goto out; rc = -ENETUNREACH; rt = x25_get_route(&addr->sx25_addr); if (!rt) goto out; x25->neighbour = x25_get_neigh(rt->dev); if (!x25->neighbour) goto out_put_route; x25_limit_facilities(&x25->facilities, x25->neighbour); x25->lci = x25_new_lci(x25->neighbour); if (!x25->lci) goto out_put_neigh; rc = -EINVAL; if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */ goto out_put_neigh; if (!strcmp(x25->source_addr.x25_addr, null_x25_address.x25_addr)) memset(&x25->source_addr, '\0', X25_ADDR_LEN); x25->dest_addr = addr->sx25_addr; /* Move to connecting socket, start sending Connect Requests */ sock->state = SS_CONNECTING; sk->sk_state = TCP_SYN_SENT; x25->state = X25_STATE_1; x25_write_internal(sk, X25_CALL_REQUEST); x25_start_heartbeat(sk); x25_start_t21timer(sk); /* Now the loop */ rc = -EINPROGRESS; if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) goto out_put_neigh; rc = x25_wait_for_connection_establishment(sk); if (rc) goto out_put_neigh; sock->state = SS_CONNECTED; rc = 0; out_put_neigh: if (rc) x25_neigh_put(x25->neighbour); out_put_route: x25_route_put(rt); out: release_sock(sk); return rc; } static int x25_wait_for_data(struct sock *sk, long timeout) { DECLARE_WAITQUEUE(wait, current); int rc = 0; add_wait_queue_exclusive(sk_sleep(sk), &wait); for (;;) { __set_current_state(TASK_INTERRUPTIBLE); if (sk->sk_shutdown & RCV_SHUTDOWN) break; rc = -ERESTARTSYS; if (signal_pending(current)) break; rc = -EAGAIN; if (!timeout) break; rc = 0; if (skb_queue_empty(&sk->sk_receive_queue)) { release_sock(sk); timeout = schedule_timeout(timeout); lock_sock(sk); } else break; } __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); return rc; } static int x25_accept(struct socket *sock, struct socket *newsock, int flags) { struct sock *sk = sock->sk; struct sock *newsk; struct sk_buff *skb; int rc = -EINVAL; if (!sk) goto out; rc = -EOPNOTSUPP; if (sk->sk_type != SOCK_SEQPACKET) goto out; lock_sock(sk); rc = -EINVAL; if (sk->sk_state != TCP_LISTEN) goto out2; rc = x25_wait_for_data(sk, sk->sk_rcvtimeo); if (rc) goto out2; skb = skb_dequeue(&sk->sk_receive_queue); rc = -EINVAL; if (!skb->sk) goto out2; newsk = skb->sk; sock_graft(newsk, newsock); /* Now attach up the new socket */ skb->sk = NULL; kfree_skb(skb); sk->sk_ack_backlog--; newsock->state = SS_CONNECTED; rc = 0; out2: release_sock(sk); out: return rc; } static int x25_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)uaddr; struct sock *sk = sock->sk; struct x25_sock *x25 = x25_sk(sk); int rc = 0; if (peer) { if (sk->sk_state != TCP_ESTABLISHED) { rc = -ENOTCONN; goto out; } sx25->sx25_addr = x25->dest_addr; } else sx25->sx25_addr = x25->source_addr; sx25->sx25_family = AF_X25; *uaddr_len = sizeof(*sx25); out: return rc; } int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb, unsigned int lci) { struct sock *sk; struct sock *make; struct x25_sock *makex25; struct x25_address source_addr, dest_addr; struct x25_facilities facilities; struct x25_dte_facilities dte_facilities; int len, addr_len, rc; /* * Remove the LCI and frame type. */ skb_pull(skb, X25_STD_MIN_LEN); /* * Extract the X.25 addresses and convert them to ASCII strings, * and remove them. * * Address block is mandatory in call request packets */ addr_len = x25_parse_address_block(skb, &source_addr, &dest_addr); if (addr_len <= 0) goto out_clear_request; skb_pull(skb, addr_len); /* * Get the length of the facilities, skip past them for the moment * get the call user data because this is needed to determine * the correct listener * * Facilities length is mandatory in call request packets */ if (!pskb_may_pull(skb, 1)) goto out_clear_request; len = skb->data[0] + 1; if (!pskb_may_pull(skb, len)) goto out_clear_request; skb_pull(skb,len); /* * Ensure that the amount of call user data is valid. */ if (skb->len > X25_MAX_CUD_LEN) goto out_clear_request; /* * Get all the call user data so it can be used in * x25_find_listener and skb_copy_from_linear_data up ahead. */ if (!pskb_may_pull(skb, skb->len)) goto out_clear_request; /* * Find a listener for the particular address/cud pair. */ sk = x25_find_listener(&source_addr,skb); skb_push(skb,len); if (sk != NULL && sk_acceptq_is_full(sk)) { goto out_sock_put; } /* * We dont have any listeners for this incoming call. * Try forwarding it. */ if (sk == NULL) { skb_push(skb, addr_len + X25_STD_MIN_LEN); if (sysctl_x25_forward && x25_forward_call(&dest_addr, nb, skb, lci) > 0) { /* Call was forwarded, dont process it any more */ kfree_skb(skb); rc = 1; goto out; } else { /* No listeners, can't forward, clear the call */ goto out_clear_request; } } /* * Try to reach a compromise on the requested facilities. */ len = x25_negotiate_facilities(skb, sk, &facilities, &dte_facilities); if (len == -1) goto out_sock_put; /* * current neighbour/link might impose additional limits * on certain facilties */ x25_limit_facilities(&facilities, nb); /* * Try to create a new socket. */ make = x25_make_new(sk); if (!make) goto out_sock_put; /* * Remove the facilities */ skb_pull(skb, len); skb->sk = make; make->sk_state = TCP_ESTABLISHED; makex25 = x25_sk(make); makex25->lci = lci; makex25->dest_addr = dest_addr; makex25->source_addr = source_addr; makex25->neighbour = nb; makex25->facilities = facilities; makex25->dte_facilities= dte_facilities; makex25->vc_facil_mask = x25_sk(sk)->vc_facil_mask; /* ensure no reverse facil on accept */ makex25->vc_facil_mask &= ~X25_MASK_REVERSE; /* ensure no calling address extension on accept */ makex25->vc_facil_mask &= ~X25_MASK_CALLING_AE; makex25->cudmatchlength = x25_sk(sk)->cudmatchlength; /* Normally all calls are accepted immediately */ if (test_bit(X25_ACCPT_APPRV_FLAG, &makex25->flags)) { x25_write_internal(make, X25_CALL_ACCEPTED); makex25->state = X25_STATE_3; } /* * Incoming Call User Data. */ skb_copy_from_linear_data(skb, makex25->calluserdata.cuddata, skb->len); makex25->calluserdata.cudlength = skb->len; sk->sk_ack_backlog++; x25_insert_socket(make); skb_queue_head(&sk->sk_receive_queue, skb); x25_start_heartbeat(make); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, skb->len); rc = 1; sock_put(sk); out: return rc; out_sock_put: sock_put(sk); out_clear_request: rc = 0; x25_transmit_clear_request(nb, lci, 0x01); goto out; } static int x25_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct x25_sock *x25 = x25_sk(sk); struct sockaddr_x25 *usx25 = (struct sockaddr_x25 *)msg->msg_name; struct sockaddr_x25 sx25; struct sk_buff *skb; unsigned char *asmptr; int noblock = msg->msg_flags & MSG_DONTWAIT; size_t size; int qbit = 0, rc = -EINVAL; lock_sock(sk); if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_OOB|MSG_EOR|MSG_CMSG_COMPAT)) goto out; /* we currently don't support segmented records at the user interface */ if (!(msg->msg_flags & (MSG_EOR|MSG_OOB))) goto out; rc = -EADDRNOTAVAIL; if (sock_flag(sk, SOCK_ZAPPED)) goto out; rc = -EPIPE; if (sk->sk_shutdown & SEND_SHUTDOWN) { send_sig(SIGPIPE, current, 0); goto out; } rc = -ENETUNREACH; if (!x25->neighbour) goto out; if (usx25) { rc = -EINVAL; if (msg->msg_namelen < sizeof(sx25)) goto out; memcpy(&sx25, usx25, sizeof(sx25)); rc = -EISCONN; if (strcmp(x25->dest_addr.x25_addr, sx25.sx25_addr.x25_addr)) goto out; rc = -EINVAL; if (sx25.sx25_family != AF_X25) goto out; } else { /* * FIXME 1003.1g - if the socket is like this because * it has become closed (not started closed) we ought * to SIGPIPE, EPIPE; */ rc = -ENOTCONN; if (sk->sk_state != TCP_ESTABLISHED) goto out; sx25.sx25_family = AF_X25; sx25.sx25_addr = x25->dest_addr; } /* Sanity check the packet size */ if (len > 65535) { rc = -EMSGSIZE; goto out; } SOCK_DEBUG(sk, "x25_sendmsg: sendto: Addresses built.\n"); /* Build a packet */ SOCK_DEBUG(sk, "x25_sendmsg: sendto: building packet.\n"); if ((msg->msg_flags & MSG_OOB) && len > 32) len = 32; size = len + X25_MAX_L2_LEN + X25_EXT_MIN_LEN; release_sock(sk); skb = sock_alloc_send_skb(sk, size, noblock, &rc); lock_sock(sk); if (!skb) goto out; X25_SKB_CB(skb)->flags = msg->msg_flags; skb_reserve(skb, X25_MAX_L2_LEN + X25_EXT_MIN_LEN); /* * Put the data on the end */ SOCK_DEBUG(sk, "x25_sendmsg: Copying user data\n"); skb_reset_transport_header(skb); skb_put(skb, len); rc = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); if (rc) goto out_kfree_skb; /* * If the Q BIT Include socket option is in force, the first * byte of the user data is the logical value of the Q Bit. */ if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { if (!pskb_may_pull(skb, 1)) goto out_kfree_skb; qbit = skb->data[0]; skb_pull(skb, 1); } /* * Push down the X.25 header */ SOCK_DEBUG(sk, "x25_sendmsg: Building X.25 Header.\n"); if (msg->msg_flags & MSG_OOB) { if (x25->neighbour->extended) { asmptr = skb_push(skb, X25_STD_MIN_LEN); *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_EXTSEQ; *asmptr++ = (x25->lci >> 0) & 0xFF; *asmptr++ = X25_INTERRUPT; } else { asmptr = skb_push(skb, X25_STD_MIN_LEN); *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_STDSEQ; *asmptr++ = (x25->lci >> 0) & 0xFF; *asmptr++ = X25_INTERRUPT; } } else { if (x25->neighbour->extended) { /* Build an Extended X.25 header */ asmptr = skb_push(skb, X25_EXT_MIN_LEN); *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_EXTSEQ; *asmptr++ = (x25->lci >> 0) & 0xFF; *asmptr++ = X25_DATA; *asmptr++ = X25_DATA; } else { /* Build an Standard X.25 header */ asmptr = skb_push(skb, X25_STD_MIN_LEN); *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_STDSEQ; *asmptr++ = (x25->lci >> 0) & 0xFF; *asmptr++ = X25_DATA; } if (qbit) skb->data[0] |= X25_Q_BIT; } SOCK_DEBUG(sk, "x25_sendmsg: Built header.\n"); SOCK_DEBUG(sk, "x25_sendmsg: Transmitting buffer\n"); rc = -ENOTCONN; if (sk->sk_state != TCP_ESTABLISHED) goto out_kfree_skb; if (msg->msg_flags & MSG_OOB) skb_queue_tail(&x25->interrupt_out_queue, skb); else { rc = x25_output(sk, skb); len = rc; if (rc < 0) kfree_skb(skb); else if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) len++; } x25_kick(sk); rc = len; out: release_sock(sk); return rc; out_kfree_skb: kfree_skb(skb); goto out; } static int x25_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct x25_sock *x25 = x25_sk(sk); struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)msg->msg_name; size_t copied; int qbit, header_len; struct sk_buff *skb; unsigned char *asmptr; int rc = -ENOTCONN; lock_sock(sk); if (x25->neighbour == NULL) goto out; header_len = x25->neighbour->extended ? X25_EXT_MIN_LEN : X25_STD_MIN_LEN; /* * This works for seqpacket too. The receiver has ordered the queue for * us! We do one quick check first though */ if (sk->sk_state != TCP_ESTABLISHED) goto out; if (flags & MSG_OOB) { rc = -EINVAL; if (sock_flag(sk, SOCK_URGINLINE) || !skb_peek(&x25->interrupt_in_queue)) goto out; skb = skb_dequeue(&x25->interrupt_in_queue); if (!pskb_may_pull(skb, X25_STD_MIN_LEN)) goto out_free_dgram; skb_pull(skb, X25_STD_MIN_LEN); /* * No Q bit information on Interrupt data. */ if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { asmptr = skb_push(skb, 1); *asmptr = 0x00; } msg->msg_flags |= MSG_OOB; } else { /* Now we can treat all alike */ release_sock(sk); skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &rc); lock_sock(sk); if (!skb) goto out; if (!pskb_may_pull(skb, header_len)) goto out_free_dgram; qbit = (skb->data[0] & X25_Q_BIT) == X25_Q_BIT; skb_pull(skb, header_len); if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { asmptr = skb_push(skb, 1); *asmptr = qbit; } } skb_reset_transport_header(skb); copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } /* Currently, each datagram always contains a complete record */ msg->msg_flags |= MSG_EOR; rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (rc) goto out_free_dgram; if (sx25) { sx25->sx25_family = AF_X25; sx25->sx25_addr = x25->dest_addr; } msg->msg_namelen = sizeof(struct sockaddr_x25); x25_check_rbuf(sk); rc = copied; out_free_dgram: skb_free_datagram(sk, skb); out: release_sock(sk); return rc; } static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; struct x25_sock *x25 = x25_sk(sk); void __user *argp = (void __user *)arg; int rc; switch (cmd) { case TIOCOUTQ: { int amount; amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amount < 0) amount = 0; rc = put_user(amount, (unsigned int __user *)argp); break; } case TIOCINQ: { struct sk_buff *skb; int amount = 0; /* * These two are safe on a single CPU system as * only user tasks fiddle here */ lock_sock(sk); if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) amount = skb->len; release_sock(sk); rc = put_user(amount, (unsigned int __user *)argp); break; } case SIOCGSTAMP: rc = -EINVAL; if (sk) rc = sock_get_timestamp(sk, (struct timeval __user *)argp); break; case SIOCGSTAMPNS: rc = -EINVAL; if (sk) rc = sock_get_timestampns(sk, (struct timespec __user *)argp); break; case SIOCGIFADDR: case SIOCSIFADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCGIFMETRIC: case SIOCSIFMETRIC: rc = -EINVAL; break; case SIOCADDRT: case SIOCDELRT: rc = -EPERM; if (!capable(CAP_NET_ADMIN)) break; rc = x25_route_ioctl(cmd, argp); break; case SIOCX25GSUBSCRIP: rc = x25_subscr_ioctl(cmd, argp); break; case SIOCX25SSUBSCRIP: rc = -EPERM; if (!capable(CAP_NET_ADMIN)) break; rc = x25_subscr_ioctl(cmd, argp); break; case SIOCX25GFACILITIES: { lock_sock(sk); rc = copy_to_user(argp, &x25->facilities, sizeof(x25->facilities)) ? -EFAULT : 0; release_sock(sk); break; } case SIOCX25SFACILITIES: { struct x25_facilities facilities; rc = -EFAULT; if (copy_from_user(&facilities, argp, sizeof(facilities))) break; rc = -EINVAL; lock_sock(sk); if (sk->sk_state != TCP_LISTEN && sk->sk_state != TCP_CLOSE) goto out_fac_release; if (facilities.pacsize_in < X25_PS16 || facilities.pacsize_in > X25_PS4096) goto out_fac_release; if (facilities.pacsize_out < X25_PS16 || facilities.pacsize_out > X25_PS4096) goto out_fac_release; if (facilities.winsize_in < 1 || facilities.winsize_in > 127) goto out_fac_release; if (facilities.throughput) { int out = facilities.throughput & 0xf0; int in = facilities.throughput & 0x0f; if (!out) facilities.throughput |= X25_DEFAULT_THROUGHPUT << 4; else if (out < 0x30 || out > 0xD0) goto out_fac_release; if (!in) facilities.throughput |= X25_DEFAULT_THROUGHPUT; else if (in < 0x03 || in > 0x0D) goto out_fac_release; } if (facilities.reverse && (facilities.reverse & 0x81) != 0x81) goto out_fac_release; x25->facilities = facilities; rc = 0; out_fac_release: release_sock(sk); break; } case SIOCX25GDTEFACILITIES: { lock_sock(sk); rc = copy_to_user(argp, &x25->dte_facilities, sizeof(x25->dte_facilities)); release_sock(sk); if (rc) rc = -EFAULT; break; } case SIOCX25SDTEFACILITIES: { struct x25_dte_facilities dtefacs; rc = -EFAULT; if (copy_from_user(&dtefacs, argp, sizeof(dtefacs))) break; rc = -EINVAL; lock_sock(sk); if (sk->sk_state != TCP_LISTEN && sk->sk_state != TCP_CLOSE) goto out_dtefac_release; if (dtefacs.calling_len > X25_MAX_AE_LEN) goto out_dtefac_release; if (dtefacs.calling_ae == NULL) goto out_dtefac_release; if (dtefacs.called_len > X25_MAX_AE_LEN) goto out_dtefac_release; if (dtefacs.called_ae == NULL) goto out_dtefac_release; x25->dte_facilities = dtefacs; rc = 0; out_dtefac_release: release_sock(sk); break; } case SIOCX25GCALLUSERDATA: { lock_sock(sk); rc = copy_to_user(argp, &x25->calluserdata, sizeof(x25->calluserdata)) ? -EFAULT : 0; release_sock(sk); break; } case SIOCX25SCALLUSERDATA: { struct x25_calluserdata calluserdata; rc = -EFAULT; if (copy_from_user(&calluserdata, argp, sizeof(calluserdata))) break; rc = -EINVAL; if (calluserdata.cudlength > X25_MAX_CUD_LEN) break; lock_sock(sk); x25->calluserdata = calluserdata; release_sock(sk); rc = 0; break; } case SIOCX25GCAUSEDIAG: { lock_sock(sk); rc = copy_to_user(argp, &x25->causediag, sizeof(x25->causediag)) ? -EFAULT : 0; release_sock(sk); break; } case SIOCX25SCAUSEDIAG: { struct x25_causediag causediag; rc = -EFAULT; if (copy_from_user(&causediag, argp, sizeof(causediag))) break; lock_sock(sk); x25->causediag = causediag; release_sock(sk); rc = 0; break; } case SIOCX25SCUDMATCHLEN: { struct x25_subaddr sub_addr; rc = -EINVAL; lock_sock(sk); if(sk->sk_state != TCP_CLOSE) goto out_cud_release; rc = -EFAULT; if (copy_from_user(&sub_addr, argp, sizeof(sub_addr))) goto out_cud_release; rc = -EINVAL; if (sub_addr.cudmatchlength > X25_MAX_CUD_LEN) goto out_cud_release; x25->cudmatchlength = sub_addr.cudmatchlength; rc = 0; out_cud_release: release_sock(sk); break; } case SIOCX25CALLACCPTAPPRV: { rc = -EINVAL; lock_sock(sk); if (sk->sk_state == TCP_CLOSE) { clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags); rc = 0; } release_sock(sk); break; } case SIOCX25SENDCALLACCPT: { rc = -EINVAL; lock_sock(sk); if (sk->sk_state != TCP_ESTABLISHED) goto out_sendcallaccpt_release; /* must call accptapprv above */ if (test_bit(X25_ACCPT_APPRV_FLAG, &x25->flags)) goto out_sendcallaccpt_release; x25_write_internal(sk, X25_CALL_ACCEPTED); x25->state = X25_STATE_3; rc = 0; out_sendcallaccpt_release: release_sock(sk); break; } default: rc = -ENOIOCTLCMD; break; } return rc; } static const struct net_proto_family x25_family_ops = { .family = AF_X25, .create = x25_create, .owner = THIS_MODULE, }; #ifdef CONFIG_COMPAT static int compat_x25_subscr_ioctl(unsigned int cmd, struct compat_x25_subscrip_struct __user *x25_subscr32) { struct compat_x25_subscrip_struct x25_subscr; struct x25_neigh *nb; struct net_device *dev; int rc = -EINVAL; rc = -EFAULT; if (copy_from_user(&x25_subscr, x25_subscr32, sizeof(*x25_subscr32))) goto out; rc = -EINVAL; dev = x25_dev_get(x25_subscr.device); if (dev == NULL) goto out; nb = x25_get_neigh(dev); if (nb == NULL) goto out_dev_put; dev_put(dev); if (cmd == SIOCX25GSUBSCRIP) { read_lock_bh(&x25_neigh_list_lock); x25_subscr.extended = nb->extended; x25_subscr.global_facil_mask = nb->global_facil_mask; read_unlock_bh(&x25_neigh_list_lock); rc = copy_to_user(x25_subscr32, &x25_subscr, sizeof(*x25_subscr32)) ? -EFAULT : 0; } else { rc = -EINVAL; if (x25_subscr.extended == 0 || x25_subscr.extended == 1) { rc = 0; write_lock_bh(&x25_neigh_list_lock); nb->extended = x25_subscr.extended; nb->global_facil_mask = x25_subscr.global_facil_mask; write_unlock_bh(&x25_neigh_list_lock); } } x25_neigh_put(nb); out: return rc; out_dev_put: dev_put(dev); goto out; } static int compat_x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { void __user *argp = compat_ptr(arg); struct sock *sk = sock->sk; int rc = -ENOIOCTLCMD; switch(cmd) { case TIOCOUTQ: case TIOCINQ: rc = x25_ioctl(sock, cmd, (unsigned long)argp); break; case SIOCGSTAMP: rc = -EINVAL; if (sk) rc = compat_sock_get_timestamp(sk, (struct timeval __user*)argp); break; case SIOCGSTAMPNS: rc = -EINVAL; if (sk) rc = compat_sock_get_timestampns(sk, (struct timespec __user*)argp); break; case SIOCGIFADDR: case SIOCSIFADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCGIFMETRIC: case SIOCSIFMETRIC: rc = -EINVAL; break; case SIOCADDRT: case SIOCDELRT: rc = -EPERM; if (!capable(CAP_NET_ADMIN)) break; rc = x25_route_ioctl(cmd, argp); break; case SIOCX25GSUBSCRIP: rc = compat_x25_subscr_ioctl(cmd, argp); break; case SIOCX25SSUBSCRIP: rc = -EPERM; if (!capable(CAP_NET_ADMIN)) break; rc = compat_x25_subscr_ioctl(cmd, argp); break; case SIOCX25GFACILITIES: case SIOCX25SFACILITIES: case SIOCX25GDTEFACILITIES: case SIOCX25SDTEFACILITIES: case SIOCX25GCALLUSERDATA: case SIOCX25SCALLUSERDATA: case SIOCX25GCAUSEDIAG: case SIOCX25SCAUSEDIAG: case SIOCX25SCUDMATCHLEN: case SIOCX25CALLACCPTAPPRV: case SIOCX25SENDCALLACCPT: rc = x25_ioctl(sock, cmd, (unsigned long)argp); break; default: rc = -ENOIOCTLCMD; break; } return rc; } #endif static const struct proto_ops x25_proto_ops = { .family = AF_X25, .owner = THIS_MODULE, .release = x25_release, .bind = x25_bind, .connect = x25_connect, .socketpair = sock_no_socketpair, .accept = x25_accept, .getname = x25_getname, .poll = datagram_poll, .ioctl = x25_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = compat_x25_ioctl, #endif .listen = x25_listen, .shutdown = sock_no_shutdown, .setsockopt = x25_setsockopt, .getsockopt = x25_getsockopt, .sendmsg = x25_sendmsg, .recvmsg = x25_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static struct packet_type x25_packet_type __read_mostly = { .type = cpu_to_be16(ETH_P_X25), .func = x25_lapb_receive_frame, }; static struct notifier_block x25_dev_notifier = { .notifier_call = x25_device_event, }; void x25_kill_by_neigh(struct x25_neigh *nb) { struct sock *s; write_lock_bh(&x25_list_lock); sk_for_each(s, &x25_list) if (x25_sk(s)->neighbour == nb) x25_disconnect(s, ENETUNREACH, 0, 0); write_unlock_bh(&x25_list_lock); /* Remove any related forwards */ x25_clear_forward_by_dev(nb->dev); } static int __init x25_init(void) { int rc = proto_register(&x25_proto, 0); if (rc != 0) goto out; rc = sock_register(&x25_family_ops); if (rc != 0) goto out_proto; dev_add_pack(&x25_packet_type); rc = register_netdevice_notifier(&x25_dev_notifier); if (rc != 0) goto out_sock; printk(KERN_INFO "X.25 for Linux Version 0.2\n"); x25_register_sysctl(); rc = x25_proc_init(); if (rc != 0) goto out_dev; out: return rc; out_dev: unregister_netdevice_notifier(&x25_dev_notifier); out_sock: sock_unregister(AF_X25); out_proto: proto_unregister(&x25_proto); goto out; } module_init(x25_init); static void __exit x25_exit(void) { x25_proc_exit(); x25_link_free(); x25_route_free(); x25_unregister_sysctl(); unregister_netdevice_notifier(&x25_dev_notifier); dev_remove_pack(&x25_packet_type); sock_unregister(AF_X25); proto_unregister(&x25_proto); } module_exit(x25_exit); MODULE_AUTHOR("Jonathan Naylor <g4klx@g4klx.demon.co.uk>"); MODULE_DESCRIPTION("The X.25 Packet Layer network layer protocol"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_X25);
/* * X.25 Packet Layer release 002 * * This is ALPHA test software. This code may break your machine, * randomly fail to work with new releases, misbehave and/or generally * screw up. It might even work. * * This code REQUIRES 2.1.15 or higher * * This module: * This module is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * History * X.25 001 Jonathan Naylor Started coding. * X.25 002 Jonathan Naylor Centralised disconnect handling. * New timer architecture. * 2000-03-11 Henner Eisen MSG_EOR handling more POSIX compliant. * 2000-03-22 Daniela Squassoni Allowed disabling/enabling of * facilities negotiation and increased * the throughput upper limit. * 2000-08-27 Arnaldo C. Melo s/suser/capable/ + micro cleanups * 2000-09-04 Henner Eisen Set sock->state in x25_accept(). * Fixed x25_output() related skb leakage. * 2000-10-02 Henner Eisen Made x25_kick() single threaded per socket. * 2000-10-27 Henner Eisen MSG_DONTWAIT for fragment allocation. * 2000-11-14 Henner Eisen Closing datalink from NETDEV_GOING_DOWN * 2002-10-06 Arnaldo C. Melo Get rid of cli/sti, move proc stuff to * x25_proc.c, using seq_file * 2005-04-02 Shaun Pereira Selective sub address matching * with call user data * 2005-04-15 Shaun Pereira Fast select with no restriction on * response */ #include <linux/module.h> #include <linux/capability.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <net/sock.h> #include <net/tcp_states.h> #include <asm/uaccess.h> #include <linux/fcntl.h> #include <linux/termios.h> /* For TIOCINQ/OUTQ */ #include <linux/notifier.h> #include <linux/init.h> #include <linux/compat.h> #include <linux/ctype.h> #include <net/x25.h> #include <net/compat.h> int sysctl_x25_restart_request_timeout = X25_DEFAULT_T20; int sysctl_x25_call_request_timeout = X25_DEFAULT_T21; int sysctl_x25_reset_request_timeout = X25_DEFAULT_T22; int sysctl_x25_clear_request_timeout = X25_DEFAULT_T23; int sysctl_x25_ack_holdback_timeout = X25_DEFAULT_T2; int sysctl_x25_forward = 0; HLIST_HEAD(x25_list); DEFINE_RWLOCK(x25_list_lock); static const struct proto_ops x25_proto_ops; static struct x25_address null_x25_address = {" "}; #ifdef CONFIG_COMPAT struct compat_x25_subscrip_struct { char device[200-sizeof(compat_ulong_t)]; compat_ulong_t global_facil_mask; compat_uint_t extended; }; #endif int x25_parse_address_block(struct sk_buff *skb, struct x25_address *called_addr, struct x25_address *calling_addr) { unsigned char len; int needed; int rc; if (!pskb_may_pull(skb, 1)) { /* packet has no address block */ rc = 0; goto empty; } len = *skb->data; needed = 1 + (len >> 4) + (len & 0x0f); if (!pskb_may_pull(skb, needed)) { /* packet is too short to hold the addresses it claims to hold */ rc = -1; goto empty; } return x25_addr_ntoa(skb->data, called_addr, calling_addr); empty: *called_addr->x25_addr = 0; *calling_addr->x25_addr = 0; return rc; } int x25_addr_ntoa(unsigned char *p, struct x25_address *called_addr, struct x25_address *calling_addr) { unsigned int called_len, calling_len; char *called, *calling; unsigned int i; called_len = (*p >> 0) & 0x0F; calling_len = (*p >> 4) & 0x0F; called = called_addr->x25_addr; calling = calling_addr->x25_addr; p++; for (i = 0; i < (called_len + calling_len); i++) { if (i < called_len) { if (i % 2 != 0) { *called++ = ((*p >> 0) & 0x0F) + '0'; p++; } else { *called++ = ((*p >> 4) & 0x0F) + '0'; } } else { if (i % 2 != 0) { *calling++ = ((*p >> 0) & 0x0F) + '0'; p++; } else { *calling++ = ((*p >> 4) & 0x0F) + '0'; } } } *called = *calling = '\0'; return 1 + (called_len + calling_len + 1) / 2; } int x25_addr_aton(unsigned char *p, struct x25_address *called_addr, struct x25_address *calling_addr) { unsigned int called_len, calling_len; char *called, *calling; int i; called = called_addr->x25_addr; calling = calling_addr->x25_addr; called_len = strlen(called); calling_len = strlen(calling); *p++ = (calling_len << 4) | (called_len << 0); for (i = 0; i < (called_len + calling_len); i++) { if (i < called_len) { if (i % 2 != 0) { *p |= (*called++ - '0') << 0; p++; } else { *p = 0x00; *p |= (*called++ - '0') << 4; } } else { if (i % 2 != 0) { *p |= (*calling++ - '0') << 0; p++; } else { *p = 0x00; *p |= (*calling++ - '0') << 4; } } } return 1 + (called_len + calling_len + 1) / 2; } /* * Socket removal during an interrupt is now safe. */ static void x25_remove_socket(struct sock *sk) { write_lock_bh(&x25_list_lock); sk_del_node_init(sk); write_unlock_bh(&x25_list_lock); } /* * Kill all bound sockets on a dropped device. */ static void x25_kill_by_device(struct net_device *dev) { struct sock *s; write_lock_bh(&x25_list_lock); sk_for_each(s, &x25_list) if (x25_sk(s)->neighbour && x25_sk(s)->neighbour->dev == dev) x25_disconnect(s, ENETUNREACH, 0, 0); write_unlock_bh(&x25_list_lock); } /* * Handle device status changes. */ static int x25_device_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct x25_neigh *nb; if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; if (dev->type == ARPHRD_X25 #if IS_ENABLED(CONFIG_LLC) || dev->type == ARPHRD_ETHER #endif ) { switch (event) { case NETDEV_UP: x25_link_device_up(dev); break; case NETDEV_GOING_DOWN: nb = x25_get_neigh(dev); if (nb) { x25_terminate_link(nb); x25_neigh_put(nb); } break; case NETDEV_DOWN: x25_kill_by_device(dev); x25_route_device_down(dev); x25_link_device_down(dev); break; } } return NOTIFY_DONE; } /* * Add a socket to the bound sockets list. */ static void x25_insert_socket(struct sock *sk) { write_lock_bh(&x25_list_lock); sk_add_node(sk, &x25_list); write_unlock_bh(&x25_list_lock); } /* * Find a socket that wants to accept the Call Request we just * received. Check the full list for an address/cud match. * If no cuds match return the next_best thing, an address match. * Note: if a listening socket has cud set it must only get calls * with matching cud. */ static struct sock *x25_find_listener(struct x25_address *addr, struct sk_buff *skb) { struct sock *s; struct sock *next_best; read_lock_bh(&x25_list_lock); next_best = NULL; sk_for_each(s, &x25_list) if ((!strcmp(addr->x25_addr, x25_sk(s)->source_addr.x25_addr) || !strcmp(addr->x25_addr, null_x25_address.x25_addr)) && s->sk_state == TCP_LISTEN) { /* * Found a listening socket, now check the incoming * call user data vs this sockets call user data */ if (x25_sk(s)->cudmatchlength > 0 && skb->len >= x25_sk(s)->cudmatchlength) { if((memcmp(x25_sk(s)->calluserdata.cuddata, skb->data, x25_sk(s)->cudmatchlength)) == 0) { sock_hold(s); goto found; } } else next_best = s; } if (next_best) { s = next_best; sock_hold(s); goto found; } s = NULL; found: read_unlock_bh(&x25_list_lock); return s; } /* * Find a connected X.25 socket given my LCI and neighbour. */ static struct sock *__x25_find_socket(unsigned int lci, struct x25_neigh *nb) { struct sock *s; sk_for_each(s, &x25_list) if (x25_sk(s)->lci == lci && x25_sk(s)->neighbour == nb) { sock_hold(s); goto found; } s = NULL; found: return s; } struct sock *x25_find_socket(unsigned int lci, struct x25_neigh *nb) { struct sock *s; read_lock_bh(&x25_list_lock); s = __x25_find_socket(lci, nb); read_unlock_bh(&x25_list_lock); return s; } /* * Find a unique LCI for a given device. */ static unsigned int x25_new_lci(struct x25_neigh *nb) { unsigned int lci = 1; struct sock *sk; read_lock_bh(&x25_list_lock); while ((sk = __x25_find_socket(lci, nb)) != NULL) { sock_put(sk); if (++lci == 4096) { lci = 0; break; } } read_unlock_bh(&x25_list_lock); return lci; } /* * Deferred destroy. */ static void __x25_destroy_socket(struct sock *); /* * handler for deferred kills. */ static void x25_destroy_timer(unsigned long data) { x25_destroy_socket_from_timer((struct sock *)data); } /* * This is called from user mode and the timers. Thus it protects itself * against interrupt users but doesn't worry about being called during * work. Once it is removed from the queue no interrupt or bottom half * will touch it and we are (fairly 8-) ) safe. * Not static as it's used by the timer */ static void __x25_destroy_socket(struct sock *sk) { struct sk_buff *skb; x25_stop_heartbeat(sk); x25_stop_timer(sk); x25_remove_socket(sk); x25_clear_queues(sk); /* Flush the queues */ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { if (skb->sk != sk) { /* A pending connection */ /* * Queue the unaccepted socket for death */ skb->sk->sk_state = TCP_LISTEN; sock_set_flag(skb->sk, SOCK_DEAD); x25_start_heartbeat(skb->sk); x25_sk(skb->sk)->state = X25_STATE_0; } kfree_skb(skb); } if (sk_has_allocations(sk)) { /* Defer: outstanding buffers */ sk->sk_timer.expires = jiffies + 10 * HZ; sk->sk_timer.function = x25_destroy_timer; sk->sk_timer.data = (unsigned long)sk; add_timer(&sk->sk_timer); } else { /* drop last reference so sock_put will free */ __sock_put(sk); } } void x25_destroy_socket_from_timer(struct sock *sk) { sock_hold(sk); bh_lock_sock(sk); __x25_destroy_socket(sk); bh_unlock_sock(sk); sock_put(sk); } /* * Handling for system calls applied via the various interfaces to a * X.25 socket object. */ static int x25_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { int opt; struct sock *sk = sock->sk; int rc = -ENOPROTOOPT; if (level != SOL_X25 || optname != X25_QBITINCL) goto out; rc = -EINVAL; if (optlen < sizeof(int)) goto out; rc = -EFAULT; if (get_user(opt, (int __user *)optval)) goto out; if (opt) set_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags); else clear_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags); rc = 0; out: return rc; } static int x25_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; int val, len, rc = -ENOPROTOOPT; if (level != SOL_X25 || optname != X25_QBITINCL) goto out; rc = -EFAULT; if (get_user(len, optlen)) goto out; len = min_t(unsigned int, len, sizeof(int)); rc = -EINVAL; if (len < 0) goto out; rc = -EFAULT; if (put_user(len, optlen)) goto out; val = test_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags); rc = copy_to_user(optval, &val, len) ? -EFAULT : 0; out: return rc; } static int x25_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; int rc = -EOPNOTSUPP; lock_sock(sk); if (sk->sk_state != TCP_LISTEN) { memset(&x25_sk(sk)->dest_addr, 0, X25_ADDR_LEN); sk->sk_max_ack_backlog = backlog; sk->sk_state = TCP_LISTEN; rc = 0; } release_sock(sk); return rc; } static struct proto x25_proto = { .name = "X25", .owner = THIS_MODULE, .obj_size = sizeof(struct x25_sock), }; static struct sock *x25_alloc_socket(struct net *net) { struct x25_sock *x25; struct sock *sk = sk_alloc(net, AF_X25, GFP_ATOMIC, &x25_proto); if (!sk) goto out; sock_init_data(NULL, sk); x25 = x25_sk(sk); skb_queue_head_init(&x25->ack_queue); skb_queue_head_init(&x25->fragment_queue); skb_queue_head_init(&x25->interrupt_in_queue); skb_queue_head_init(&x25->interrupt_out_queue); out: return sk; } static int x25_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; struct x25_sock *x25; int rc = -EAFNOSUPPORT; if (!net_eq(net, &init_net)) goto out; rc = -ESOCKTNOSUPPORT; if (sock->type != SOCK_SEQPACKET) goto out; rc = -EINVAL; if (protocol) goto out; rc = -ENOBUFS; if ((sk = x25_alloc_socket(net)) == NULL) goto out; x25 = x25_sk(sk); sock_init_data(sock, sk); x25_init_timers(sk); sock->ops = &x25_proto_ops; sk->sk_protocol = protocol; sk->sk_backlog_rcv = x25_backlog_rcv; x25->t21 = sysctl_x25_call_request_timeout; x25->t22 = sysctl_x25_reset_request_timeout; x25->t23 = sysctl_x25_clear_request_timeout; x25->t2 = sysctl_x25_ack_holdback_timeout; x25->state = X25_STATE_0; x25->cudmatchlength = 0; set_bit(X25_ACCPT_APPRV_FLAG, &x25->flags); /* normally no cud */ /* on call accept */ x25->facilities.winsize_in = X25_DEFAULT_WINDOW_SIZE; x25->facilities.winsize_out = X25_DEFAULT_WINDOW_SIZE; x25->facilities.pacsize_in = X25_DEFAULT_PACKET_SIZE; x25->facilities.pacsize_out = X25_DEFAULT_PACKET_SIZE; x25->facilities.throughput = 0; /* by default don't negotiate throughput */ x25->facilities.reverse = X25_DEFAULT_REVERSE; x25->dte_facilities.calling_len = 0; x25->dte_facilities.called_len = 0; memset(x25->dte_facilities.called_ae, '\0', sizeof(x25->dte_facilities.called_ae)); memset(x25->dte_facilities.calling_ae, '\0', sizeof(x25->dte_facilities.calling_ae)); rc = 0; out: return rc; } static struct sock *x25_make_new(struct sock *osk) { struct sock *sk = NULL; struct x25_sock *x25, *ox25; if (osk->sk_type != SOCK_SEQPACKET) goto out; if ((sk = x25_alloc_socket(sock_net(osk))) == NULL) goto out; x25 = x25_sk(sk); sk->sk_type = osk->sk_type; sk->sk_priority = osk->sk_priority; sk->sk_protocol = osk->sk_protocol; sk->sk_rcvbuf = osk->sk_rcvbuf; sk->sk_sndbuf = osk->sk_sndbuf; sk->sk_state = TCP_ESTABLISHED; sk->sk_backlog_rcv = osk->sk_backlog_rcv; sock_copy_flags(sk, osk); ox25 = x25_sk(osk); x25->t21 = ox25->t21; x25->t22 = ox25->t22; x25->t23 = ox25->t23; x25->t2 = ox25->t2; x25->flags = ox25->flags; x25->facilities = ox25->facilities; x25->dte_facilities = ox25->dte_facilities; x25->cudmatchlength = ox25->cudmatchlength; clear_bit(X25_INTERRUPT_FLAG, &x25->flags); x25_init_timers(sk); out: return sk; } static int x25_release(struct socket *sock) { struct sock *sk = sock->sk; struct x25_sock *x25; if (!sk) return 0; x25 = x25_sk(sk); sock_hold(sk); lock_sock(sk); switch (x25->state) { case X25_STATE_0: case X25_STATE_2: x25_disconnect(sk, 0, 0, 0); __x25_destroy_socket(sk); goto out; case X25_STATE_1: case X25_STATE_3: case X25_STATE_4: x25_clear_queues(sk); x25_write_internal(sk, X25_CLEAR_REQUEST); x25_start_t23timer(sk); x25->state = X25_STATE_2; sk->sk_state = TCP_CLOSE; sk->sk_shutdown |= SEND_SHUTDOWN; sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); sock_set_flag(sk, SOCK_DESTROY); break; } sock_orphan(sk); out: release_sock(sk); sock_put(sk); return 0; } static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr; int len, i, rc = 0; if (!sock_flag(sk, SOCK_ZAPPED) || addr_len != sizeof(struct sockaddr_x25) || addr->sx25_family != AF_X25) { rc = -EINVAL; goto out; } len = strlen(addr->sx25_addr.x25_addr); for (i = 0; i < len; i++) { if (!isdigit(addr->sx25_addr.x25_addr[i])) { rc = -EINVAL; goto out; } } lock_sock(sk); x25_sk(sk)->source_addr = addr->sx25_addr; x25_insert_socket(sk); sock_reset_flag(sk, SOCK_ZAPPED); release_sock(sk); SOCK_DEBUG(sk, "x25_bind: socket is bound\n"); out: return rc; } static int x25_wait_for_connection_establishment(struct sock *sk) { DECLARE_WAITQUEUE(wait, current); int rc; add_wait_queue_exclusive(sk_sleep(sk), &wait); for (;;) { __set_current_state(TASK_INTERRUPTIBLE); rc = -ERESTARTSYS; if (signal_pending(current)) break; rc = sock_error(sk); if (rc) { sk->sk_socket->state = SS_UNCONNECTED; break; } rc = 0; if (sk->sk_state != TCP_ESTABLISHED) { release_sock(sk); schedule(); lock_sock(sk); } else break; } __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); return rc; } static int x25_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; struct x25_sock *x25 = x25_sk(sk); struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr; struct x25_route *rt; int rc = 0; lock_sock(sk); if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { sock->state = SS_CONNECTED; goto out; /* Connect completed during a ERESTARTSYS event */ } rc = -ECONNREFUSED; if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) { sock->state = SS_UNCONNECTED; goto out; } rc = -EISCONN; /* No reconnect on a seqpacket socket */ if (sk->sk_state == TCP_ESTABLISHED) goto out; sk->sk_state = TCP_CLOSE; sock->state = SS_UNCONNECTED; rc = -EINVAL; if (addr_len != sizeof(struct sockaddr_x25) || addr->sx25_family != AF_X25) goto out; rc = -ENETUNREACH; rt = x25_get_route(&addr->sx25_addr); if (!rt) goto out; x25->neighbour = x25_get_neigh(rt->dev); if (!x25->neighbour) goto out_put_route; x25_limit_facilities(&x25->facilities, x25->neighbour); x25->lci = x25_new_lci(x25->neighbour); if (!x25->lci) goto out_put_neigh; rc = -EINVAL; if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */ goto out_put_neigh; if (!strcmp(x25->source_addr.x25_addr, null_x25_address.x25_addr)) memset(&x25->source_addr, '\0', X25_ADDR_LEN); x25->dest_addr = addr->sx25_addr; /* Move to connecting socket, start sending Connect Requests */ sock->state = SS_CONNECTING; sk->sk_state = TCP_SYN_SENT; x25->state = X25_STATE_1; x25_write_internal(sk, X25_CALL_REQUEST); x25_start_heartbeat(sk); x25_start_t21timer(sk); /* Now the loop */ rc = -EINPROGRESS; if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) goto out_put_neigh; rc = x25_wait_for_connection_establishment(sk); if (rc) goto out_put_neigh; sock->state = SS_CONNECTED; rc = 0; out_put_neigh: if (rc) x25_neigh_put(x25->neighbour); out_put_route: x25_route_put(rt); out: release_sock(sk); return rc; } static int x25_wait_for_data(struct sock *sk, long timeout) { DECLARE_WAITQUEUE(wait, current); int rc = 0; add_wait_queue_exclusive(sk_sleep(sk), &wait); for (;;) { __set_current_state(TASK_INTERRUPTIBLE); if (sk->sk_shutdown & RCV_SHUTDOWN) break; rc = -ERESTARTSYS; if (signal_pending(current)) break; rc = -EAGAIN; if (!timeout) break; rc = 0; if (skb_queue_empty(&sk->sk_receive_queue)) { release_sock(sk); timeout = schedule_timeout(timeout); lock_sock(sk); } else break; } __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); return rc; } static int x25_accept(struct socket *sock, struct socket *newsock, int flags) { struct sock *sk = sock->sk; struct sock *newsk; struct sk_buff *skb; int rc = -EINVAL; if (!sk) goto out; rc = -EOPNOTSUPP; if (sk->sk_type != SOCK_SEQPACKET) goto out; lock_sock(sk); rc = -EINVAL; if (sk->sk_state != TCP_LISTEN) goto out2; rc = x25_wait_for_data(sk, sk->sk_rcvtimeo); if (rc) goto out2; skb = skb_dequeue(&sk->sk_receive_queue); rc = -EINVAL; if (!skb->sk) goto out2; newsk = skb->sk; sock_graft(newsk, newsock); /* Now attach up the new socket */ skb->sk = NULL; kfree_skb(skb); sk->sk_ack_backlog--; newsock->state = SS_CONNECTED; rc = 0; out2: release_sock(sk); out: return rc; } static int x25_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)uaddr; struct sock *sk = sock->sk; struct x25_sock *x25 = x25_sk(sk); int rc = 0; if (peer) { if (sk->sk_state != TCP_ESTABLISHED) { rc = -ENOTCONN; goto out; } sx25->sx25_addr = x25->dest_addr; } else sx25->sx25_addr = x25->source_addr; sx25->sx25_family = AF_X25; *uaddr_len = sizeof(*sx25); out: return rc; } int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb, unsigned int lci) { struct sock *sk; struct sock *make; struct x25_sock *makex25; struct x25_address source_addr, dest_addr; struct x25_facilities facilities; struct x25_dte_facilities dte_facilities; int len, addr_len, rc; /* * Remove the LCI and frame type. */ skb_pull(skb, X25_STD_MIN_LEN); /* * Extract the X.25 addresses and convert them to ASCII strings, * and remove them. * * Address block is mandatory in call request packets */ addr_len = x25_parse_address_block(skb, &source_addr, &dest_addr); if (addr_len <= 0) goto out_clear_request; skb_pull(skb, addr_len); /* * Get the length of the facilities, skip past them for the moment * get the call user data because this is needed to determine * the correct listener * * Facilities length is mandatory in call request packets */ if (!pskb_may_pull(skb, 1)) goto out_clear_request; len = skb->data[0] + 1; if (!pskb_may_pull(skb, len)) goto out_clear_request; skb_pull(skb,len); /* * Ensure that the amount of call user data is valid. */ if (skb->len > X25_MAX_CUD_LEN) goto out_clear_request; /* * Get all the call user data so it can be used in * x25_find_listener and skb_copy_from_linear_data up ahead. */ if (!pskb_may_pull(skb, skb->len)) goto out_clear_request; /* * Find a listener for the particular address/cud pair. */ sk = x25_find_listener(&source_addr,skb); skb_push(skb,len); if (sk != NULL && sk_acceptq_is_full(sk)) { goto out_sock_put; } /* * We dont have any listeners for this incoming call. * Try forwarding it. */ if (sk == NULL) { skb_push(skb, addr_len + X25_STD_MIN_LEN); if (sysctl_x25_forward && x25_forward_call(&dest_addr, nb, skb, lci) > 0) { /* Call was forwarded, dont process it any more */ kfree_skb(skb); rc = 1; goto out; } else { /* No listeners, can't forward, clear the call */ goto out_clear_request; } } /* * Try to reach a compromise on the requested facilities. */ len = x25_negotiate_facilities(skb, sk, &facilities, &dte_facilities); if (len == -1) goto out_sock_put; /* * current neighbour/link might impose additional limits * on certain facilties */ x25_limit_facilities(&facilities, nb); /* * Try to create a new socket. */ make = x25_make_new(sk); if (!make) goto out_sock_put; /* * Remove the facilities */ skb_pull(skb, len); skb->sk = make; make->sk_state = TCP_ESTABLISHED; makex25 = x25_sk(make); makex25->lci = lci; makex25->dest_addr = dest_addr; makex25->source_addr = source_addr; makex25->neighbour = nb; makex25->facilities = facilities; makex25->dte_facilities= dte_facilities; makex25->vc_facil_mask = x25_sk(sk)->vc_facil_mask; /* ensure no reverse facil on accept */ makex25->vc_facil_mask &= ~X25_MASK_REVERSE; /* ensure no calling address extension on accept */ makex25->vc_facil_mask &= ~X25_MASK_CALLING_AE; makex25->cudmatchlength = x25_sk(sk)->cudmatchlength; /* Normally all calls are accepted immediately */ if (test_bit(X25_ACCPT_APPRV_FLAG, &makex25->flags)) { x25_write_internal(make, X25_CALL_ACCEPTED); makex25->state = X25_STATE_3; } /* * Incoming Call User Data. */ skb_copy_from_linear_data(skb, makex25->calluserdata.cuddata, skb->len); makex25->calluserdata.cudlength = skb->len; sk->sk_ack_backlog++; x25_insert_socket(make); skb_queue_head(&sk->sk_receive_queue, skb); x25_start_heartbeat(make); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, skb->len); rc = 1; sock_put(sk); out: return rc; out_sock_put: sock_put(sk); out_clear_request: rc = 0; x25_transmit_clear_request(nb, lci, 0x01); goto out; } static int x25_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct x25_sock *x25 = x25_sk(sk); struct sockaddr_x25 *usx25 = (struct sockaddr_x25 *)msg->msg_name; struct sockaddr_x25 sx25; struct sk_buff *skb; unsigned char *asmptr; int noblock = msg->msg_flags & MSG_DONTWAIT; size_t size; int qbit = 0, rc = -EINVAL; lock_sock(sk); if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_OOB|MSG_EOR|MSG_CMSG_COMPAT)) goto out; /* we currently don't support segmented records at the user interface */ if (!(msg->msg_flags & (MSG_EOR|MSG_OOB))) goto out; rc = -EADDRNOTAVAIL; if (sock_flag(sk, SOCK_ZAPPED)) goto out; rc = -EPIPE; if (sk->sk_shutdown & SEND_SHUTDOWN) { send_sig(SIGPIPE, current, 0); goto out; } rc = -ENETUNREACH; if (!x25->neighbour) goto out; if (usx25) { rc = -EINVAL; if (msg->msg_namelen < sizeof(sx25)) goto out; memcpy(&sx25, usx25, sizeof(sx25)); rc = -EISCONN; if (strcmp(x25->dest_addr.x25_addr, sx25.sx25_addr.x25_addr)) goto out; rc = -EINVAL; if (sx25.sx25_family != AF_X25) goto out; } else { /* * FIXME 1003.1g - if the socket is like this because * it has become closed (not started closed) we ought * to SIGPIPE, EPIPE; */ rc = -ENOTCONN; if (sk->sk_state != TCP_ESTABLISHED) goto out; sx25.sx25_family = AF_X25; sx25.sx25_addr = x25->dest_addr; } /* Sanity check the packet size */ if (len > 65535) { rc = -EMSGSIZE; goto out; } SOCK_DEBUG(sk, "x25_sendmsg: sendto: Addresses built.\n"); /* Build a packet */ SOCK_DEBUG(sk, "x25_sendmsg: sendto: building packet.\n"); if ((msg->msg_flags & MSG_OOB) && len > 32) len = 32; size = len + X25_MAX_L2_LEN + X25_EXT_MIN_LEN; release_sock(sk); skb = sock_alloc_send_skb(sk, size, noblock, &rc); lock_sock(sk); if (!skb) goto out; X25_SKB_CB(skb)->flags = msg->msg_flags; skb_reserve(skb, X25_MAX_L2_LEN + X25_EXT_MIN_LEN); /* * Put the data on the end */ SOCK_DEBUG(sk, "x25_sendmsg: Copying user data\n"); skb_reset_transport_header(skb); skb_put(skb, len); rc = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); if (rc) goto out_kfree_skb; /* * If the Q BIT Include socket option is in force, the first * byte of the user data is the logical value of the Q Bit. */ if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { if (!pskb_may_pull(skb, 1)) goto out_kfree_skb; qbit = skb->data[0]; skb_pull(skb, 1); } /* * Push down the X.25 header */ SOCK_DEBUG(sk, "x25_sendmsg: Building X.25 Header.\n"); if (msg->msg_flags & MSG_OOB) { if (x25->neighbour->extended) { asmptr = skb_push(skb, X25_STD_MIN_LEN); *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_EXTSEQ; *asmptr++ = (x25->lci >> 0) & 0xFF; *asmptr++ = X25_INTERRUPT; } else { asmptr = skb_push(skb, X25_STD_MIN_LEN); *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_STDSEQ; *asmptr++ = (x25->lci >> 0) & 0xFF; *asmptr++ = X25_INTERRUPT; } } else { if (x25->neighbour->extended) { /* Build an Extended X.25 header */ asmptr = skb_push(skb, X25_EXT_MIN_LEN); *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_EXTSEQ; *asmptr++ = (x25->lci >> 0) & 0xFF; *asmptr++ = X25_DATA; *asmptr++ = X25_DATA; } else { /* Build an Standard X.25 header */ asmptr = skb_push(skb, X25_STD_MIN_LEN); *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_STDSEQ; *asmptr++ = (x25->lci >> 0) & 0xFF; *asmptr++ = X25_DATA; } if (qbit) skb->data[0] |= X25_Q_BIT; } SOCK_DEBUG(sk, "x25_sendmsg: Built header.\n"); SOCK_DEBUG(sk, "x25_sendmsg: Transmitting buffer\n"); rc = -ENOTCONN; if (sk->sk_state != TCP_ESTABLISHED) goto out_kfree_skb; if (msg->msg_flags & MSG_OOB) skb_queue_tail(&x25->interrupt_out_queue, skb); else { rc = x25_output(sk, skb); len = rc; if (rc < 0) kfree_skb(skb); else if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) len++; } x25_kick(sk); rc = len; out: release_sock(sk); return rc; out_kfree_skb: kfree_skb(skb); goto out; } static int x25_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct x25_sock *x25 = x25_sk(sk); struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)msg->msg_name; size_t copied; int qbit, header_len; struct sk_buff *skb; unsigned char *asmptr; int rc = -ENOTCONN; lock_sock(sk); if (x25->neighbour == NULL) goto out; header_len = x25->neighbour->extended ? X25_EXT_MIN_LEN : X25_STD_MIN_LEN; /* * This works for seqpacket too. The receiver has ordered the queue for * us! We do one quick check first though */ if (sk->sk_state != TCP_ESTABLISHED) goto out; if (flags & MSG_OOB) { rc = -EINVAL; if (sock_flag(sk, SOCK_URGINLINE) || !skb_peek(&x25->interrupt_in_queue)) goto out; skb = skb_dequeue(&x25->interrupt_in_queue); if (!pskb_may_pull(skb, X25_STD_MIN_LEN)) goto out_free_dgram; skb_pull(skb, X25_STD_MIN_LEN); /* * No Q bit information on Interrupt data. */ if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { asmptr = skb_push(skb, 1); *asmptr = 0x00; } msg->msg_flags |= MSG_OOB; } else { /* Now we can treat all alike */ release_sock(sk); skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &rc); lock_sock(sk); if (!skb) goto out; if (!pskb_may_pull(skb, header_len)) goto out_free_dgram; qbit = (skb->data[0] & X25_Q_BIT) == X25_Q_BIT; skb_pull(skb, header_len); if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { asmptr = skb_push(skb, 1); *asmptr = qbit; } } skb_reset_transport_header(skb); copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } /* Currently, each datagram always contains a complete record */ msg->msg_flags |= MSG_EOR; rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (rc) goto out_free_dgram; if (sx25) { sx25->sx25_family = AF_X25; sx25->sx25_addr = x25->dest_addr; msg->msg_namelen = sizeof(*sx25); } x25_check_rbuf(sk); rc = copied; out_free_dgram: skb_free_datagram(sk, skb); out: release_sock(sk); return rc; } static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; struct x25_sock *x25 = x25_sk(sk); void __user *argp = (void __user *)arg; int rc; switch (cmd) { case TIOCOUTQ: { int amount; amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amount < 0) amount = 0; rc = put_user(amount, (unsigned int __user *)argp); break; } case TIOCINQ: { struct sk_buff *skb; int amount = 0; /* * These two are safe on a single CPU system as * only user tasks fiddle here */ lock_sock(sk); if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) amount = skb->len; release_sock(sk); rc = put_user(amount, (unsigned int __user *)argp); break; } case SIOCGSTAMP: rc = -EINVAL; if (sk) rc = sock_get_timestamp(sk, (struct timeval __user *)argp); break; case SIOCGSTAMPNS: rc = -EINVAL; if (sk) rc = sock_get_timestampns(sk, (struct timespec __user *)argp); break; case SIOCGIFADDR: case SIOCSIFADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCGIFMETRIC: case SIOCSIFMETRIC: rc = -EINVAL; break; case SIOCADDRT: case SIOCDELRT: rc = -EPERM; if (!capable(CAP_NET_ADMIN)) break; rc = x25_route_ioctl(cmd, argp); break; case SIOCX25GSUBSCRIP: rc = x25_subscr_ioctl(cmd, argp); break; case SIOCX25SSUBSCRIP: rc = -EPERM; if (!capable(CAP_NET_ADMIN)) break; rc = x25_subscr_ioctl(cmd, argp); break; case SIOCX25GFACILITIES: { lock_sock(sk); rc = copy_to_user(argp, &x25->facilities, sizeof(x25->facilities)) ? -EFAULT : 0; release_sock(sk); break; } case SIOCX25SFACILITIES: { struct x25_facilities facilities; rc = -EFAULT; if (copy_from_user(&facilities, argp, sizeof(facilities))) break; rc = -EINVAL; lock_sock(sk); if (sk->sk_state != TCP_LISTEN && sk->sk_state != TCP_CLOSE) goto out_fac_release; if (facilities.pacsize_in < X25_PS16 || facilities.pacsize_in > X25_PS4096) goto out_fac_release; if (facilities.pacsize_out < X25_PS16 || facilities.pacsize_out > X25_PS4096) goto out_fac_release; if (facilities.winsize_in < 1 || facilities.winsize_in > 127) goto out_fac_release; if (facilities.throughput) { int out = facilities.throughput & 0xf0; int in = facilities.throughput & 0x0f; if (!out) facilities.throughput |= X25_DEFAULT_THROUGHPUT << 4; else if (out < 0x30 || out > 0xD0) goto out_fac_release; if (!in) facilities.throughput |= X25_DEFAULT_THROUGHPUT; else if (in < 0x03 || in > 0x0D) goto out_fac_release; } if (facilities.reverse && (facilities.reverse & 0x81) != 0x81) goto out_fac_release; x25->facilities = facilities; rc = 0; out_fac_release: release_sock(sk); break; } case SIOCX25GDTEFACILITIES: { lock_sock(sk); rc = copy_to_user(argp, &x25->dte_facilities, sizeof(x25->dte_facilities)); release_sock(sk); if (rc) rc = -EFAULT; break; } case SIOCX25SDTEFACILITIES: { struct x25_dte_facilities dtefacs; rc = -EFAULT; if (copy_from_user(&dtefacs, argp, sizeof(dtefacs))) break; rc = -EINVAL; lock_sock(sk); if (sk->sk_state != TCP_LISTEN && sk->sk_state != TCP_CLOSE) goto out_dtefac_release; if (dtefacs.calling_len > X25_MAX_AE_LEN) goto out_dtefac_release; if (dtefacs.calling_ae == NULL) goto out_dtefac_release; if (dtefacs.called_len > X25_MAX_AE_LEN) goto out_dtefac_release; if (dtefacs.called_ae == NULL) goto out_dtefac_release; x25->dte_facilities = dtefacs; rc = 0; out_dtefac_release: release_sock(sk); break; } case SIOCX25GCALLUSERDATA: { lock_sock(sk); rc = copy_to_user(argp, &x25->calluserdata, sizeof(x25->calluserdata)) ? -EFAULT : 0; release_sock(sk); break; } case SIOCX25SCALLUSERDATA: { struct x25_calluserdata calluserdata; rc = -EFAULT; if (copy_from_user(&calluserdata, argp, sizeof(calluserdata))) break; rc = -EINVAL; if (calluserdata.cudlength > X25_MAX_CUD_LEN) break; lock_sock(sk); x25->calluserdata = calluserdata; release_sock(sk); rc = 0; break; } case SIOCX25GCAUSEDIAG: { lock_sock(sk); rc = copy_to_user(argp, &x25->causediag, sizeof(x25->causediag)) ? -EFAULT : 0; release_sock(sk); break; } case SIOCX25SCAUSEDIAG: { struct x25_causediag causediag; rc = -EFAULT; if (copy_from_user(&causediag, argp, sizeof(causediag))) break; lock_sock(sk); x25->causediag = causediag; release_sock(sk); rc = 0; break; } case SIOCX25SCUDMATCHLEN: { struct x25_subaddr sub_addr; rc = -EINVAL; lock_sock(sk); if(sk->sk_state != TCP_CLOSE) goto out_cud_release; rc = -EFAULT; if (copy_from_user(&sub_addr, argp, sizeof(sub_addr))) goto out_cud_release; rc = -EINVAL; if (sub_addr.cudmatchlength > X25_MAX_CUD_LEN) goto out_cud_release; x25->cudmatchlength = sub_addr.cudmatchlength; rc = 0; out_cud_release: release_sock(sk); break; } case SIOCX25CALLACCPTAPPRV: { rc = -EINVAL; lock_sock(sk); if (sk->sk_state == TCP_CLOSE) { clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags); rc = 0; } release_sock(sk); break; } case SIOCX25SENDCALLACCPT: { rc = -EINVAL; lock_sock(sk); if (sk->sk_state != TCP_ESTABLISHED) goto out_sendcallaccpt_release; /* must call accptapprv above */ if (test_bit(X25_ACCPT_APPRV_FLAG, &x25->flags)) goto out_sendcallaccpt_release; x25_write_internal(sk, X25_CALL_ACCEPTED); x25->state = X25_STATE_3; rc = 0; out_sendcallaccpt_release: release_sock(sk); break; } default: rc = -ENOIOCTLCMD; break; } return rc; } static const struct net_proto_family x25_family_ops = { .family = AF_X25, .create = x25_create, .owner = THIS_MODULE, }; #ifdef CONFIG_COMPAT static int compat_x25_subscr_ioctl(unsigned int cmd, struct compat_x25_subscrip_struct __user *x25_subscr32) { struct compat_x25_subscrip_struct x25_subscr; struct x25_neigh *nb; struct net_device *dev; int rc = -EINVAL; rc = -EFAULT; if (copy_from_user(&x25_subscr, x25_subscr32, sizeof(*x25_subscr32))) goto out; rc = -EINVAL; dev = x25_dev_get(x25_subscr.device); if (dev == NULL) goto out; nb = x25_get_neigh(dev); if (nb == NULL) goto out_dev_put; dev_put(dev); if (cmd == SIOCX25GSUBSCRIP) { read_lock_bh(&x25_neigh_list_lock); x25_subscr.extended = nb->extended; x25_subscr.global_facil_mask = nb->global_facil_mask; read_unlock_bh(&x25_neigh_list_lock); rc = copy_to_user(x25_subscr32, &x25_subscr, sizeof(*x25_subscr32)) ? -EFAULT : 0; } else { rc = -EINVAL; if (x25_subscr.extended == 0 || x25_subscr.extended == 1) { rc = 0; write_lock_bh(&x25_neigh_list_lock); nb->extended = x25_subscr.extended; nb->global_facil_mask = x25_subscr.global_facil_mask; write_unlock_bh(&x25_neigh_list_lock); } } x25_neigh_put(nb); out: return rc; out_dev_put: dev_put(dev); goto out; } static int compat_x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { void __user *argp = compat_ptr(arg); struct sock *sk = sock->sk; int rc = -ENOIOCTLCMD; switch(cmd) { case TIOCOUTQ: case TIOCINQ: rc = x25_ioctl(sock, cmd, (unsigned long)argp); break; case SIOCGSTAMP: rc = -EINVAL; if (sk) rc = compat_sock_get_timestamp(sk, (struct timeval __user*)argp); break; case SIOCGSTAMPNS: rc = -EINVAL; if (sk) rc = compat_sock_get_timestampns(sk, (struct timespec __user*)argp); break; case SIOCGIFADDR: case SIOCSIFADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCGIFMETRIC: case SIOCSIFMETRIC: rc = -EINVAL; break; case SIOCADDRT: case SIOCDELRT: rc = -EPERM; if (!capable(CAP_NET_ADMIN)) break; rc = x25_route_ioctl(cmd, argp); break; case SIOCX25GSUBSCRIP: rc = compat_x25_subscr_ioctl(cmd, argp); break; case SIOCX25SSUBSCRIP: rc = -EPERM; if (!capable(CAP_NET_ADMIN)) break; rc = compat_x25_subscr_ioctl(cmd, argp); break; case SIOCX25GFACILITIES: case SIOCX25SFACILITIES: case SIOCX25GDTEFACILITIES: case SIOCX25SDTEFACILITIES: case SIOCX25GCALLUSERDATA: case SIOCX25SCALLUSERDATA: case SIOCX25GCAUSEDIAG: case SIOCX25SCAUSEDIAG: case SIOCX25SCUDMATCHLEN: case SIOCX25CALLACCPTAPPRV: case SIOCX25SENDCALLACCPT: rc = x25_ioctl(sock, cmd, (unsigned long)argp); break; default: rc = -ENOIOCTLCMD; break; } return rc; } #endif static const struct proto_ops x25_proto_ops = { .family = AF_X25, .owner = THIS_MODULE, .release = x25_release, .bind = x25_bind, .connect = x25_connect, .socketpair = sock_no_socketpair, .accept = x25_accept, .getname = x25_getname, .poll = datagram_poll, .ioctl = x25_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = compat_x25_ioctl, #endif .listen = x25_listen, .shutdown = sock_no_shutdown, .setsockopt = x25_setsockopt, .getsockopt = x25_getsockopt, .sendmsg = x25_sendmsg, .recvmsg = x25_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static struct packet_type x25_packet_type __read_mostly = { .type = cpu_to_be16(ETH_P_X25), .func = x25_lapb_receive_frame, }; static struct notifier_block x25_dev_notifier = { .notifier_call = x25_device_event, }; void x25_kill_by_neigh(struct x25_neigh *nb) { struct sock *s; write_lock_bh(&x25_list_lock); sk_for_each(s, &x25_list) if (x25_sk(s)->neighbour == nb) x25_disconnect(s, ENETUNREACH, 0, 0); write_unlock_bh(&x25_list_lock); /* Remove any related forwards */ x25_clear_forward_by_dev(nb->dev); } static int __init x25_init(void) { int rc = proto_register(&x25_proto, 0); if (rc != 0) goto out; rc = sock_register(&x25_family_ops); if (rc != 0) goto out_proto; dev_add_pack(&x25_packet_type); rc = register_netdevice_notifier(&x25_dev_notifier); if (rc != 0) goto out_sock; printk(KERN_INFO "X.25 for Linux Version 0.2\n"); x25_register_sysctl(); rc = x25_proc_init(); if (rc != 0) goto out_dev; out: return rc; out_dev: unregister_netdevice_notifier(&x25_dev_notifier); out_sock: sock_unregister(AF_X25); out_proto: proto_unregister(&x25_proto); goto out; } module_init(x25_init); static void __exit x25_exit(void) { x25_proc_exit(); x25_link_free(); x25_route_free(); x25_unregister_sysctl(); unregister_netdevice_notifier(&x25_dev_notifier); dev_remove_pack(&x25_packet_type); sock_unregister(AF_X25); proto_unregister(&x25_proto); } module_exit(x25_exit); MODULE_AUTHOR("Jonathan Naylor <g4klx@g4klx.demon.co.uk>"); MODULE_DESCRIPTION("The X.25 Packet Layer network layer protocol"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_X25);
static int x25_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct x25_sock *x25 = x25_sk(sk); struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)msg->msg_name; size_t copied; int qbit, header_len; struct sk_buff *skb; unsigned char *asmptr; int rc = -ENOTCONN; lock_sock(sk); if (x25->neighbour == NULL) goto out; header_len = x25->neighbour->extended ? X25_EXT_MIN_LEN : X25_STD_MIN_LEN; /* * This works for seqpacket too. The receiver has ordered the queue for * us! We do one quick check first though */ if (sk->sk_state != TCP_ESTABLISHED) goto out; if (flags & MSG_OOB) { rc = -EINVAL; if (sock_flag(sk, SOCK_URGINLINE) || !skb_peek(&x25->interrupt_in_queue)) goto out; skb = skb_dequeue(&x25->interrupt_in_queue); if (!pskb_may_pull(skb, X25_STD_MIN_LEN)) goto out_free_dgram; skb_pull(skb, X25_STD_MIN_LEN); /* * No Q bit information on Interrupt data. */ if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { asmptr = skb_push(skb, 1); *asmptr = 0x00; } msg->msg_flags |= MSG_OOB; } else { /* Now we can treat all alike */ release_sock(sk); skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &rc); lock_sock(sk); if (!skb) goto out; if (!pskb_may_pull(skb, header_len)) goto out_free_dgram; qbit = (skb->data[0] & X25_Q_BIT) == X25_Q_BIT; skb_pull(skb, header_len); if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { asmptr = skb_push(skb, 1); *asmptr = qbit; } } skb_reset_transport_header(skb); copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } /* Currently, each datagram always contains a complete record */ msg->msg_flags |= MSG_EOR; rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (rc) goto out_free_dgram; if (sx25) { sx25->sx25_family = AF_X25; sx25->sx25_addr = x25->dest_addr; } msg->msg_namelen = sizeof(struct sockaddr_x25); x25_check_rbuf(sk); rc = copied; out_free_dgram: skb_free_datagram(sk, skb); out: release_sock(sk); return rc; }
static int x25_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct x25_sock *x25 = x25_sk(sk); struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)msg->msg_name; size_t copied; int qbit, header_len; struct sk_buff *skb; unsigned char *asmptr; int rc = -ENOTCONN; lock_sock(sk); if (x25->neighbour == NULL) goto out; header_len = x25->neighbour->extended ? X25_EXT_MIN_LEN : X25_STD_MIN_LEN; /* * This works for seqpacket too. The receiver has ordered the queue for * us! We do one quick check first though */ if (sk->sk_state != TCP_ESTABLISHED) goto out; if (flags & MSG_OOB) { rc = -EINVAL; if (sock_flag(sk, SOCK_URGINLINE) || !skb_peek(&x25->interrupt_in_queue)) goto out; skb = skb_dequeue(&x25->interrupt_in_queue); if (!pskb_may_pull(skb, X25_STD_MIN_LEN)) goto out_free_dgram; skb_pull(skb, X25_STD_MIN_LEN); /* * No Q bit information on Interrupt data. */ if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { asmptr = skb_push(skb, 1); *asmptr = 0x00; } msg->msg_flags |= MSG_OOB; } else { /* Now we can treat all alike */ release_sock(sk); skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &rc); lock_sock(sk); if (!skb) goto out; if (!pskb_may_pull(skb, header_len)) goto out_free_dgram; qbit = (skb->data[0] & X25_Q_BIT) == X25_Q_BIT; skb_pull(skb, header_len); if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { asmptr = skb_push(skb, 1); *asmptr = qbit; } } skb_reset_transport_header(skb); copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } /* Currently, each datagram always contains a complete record */ msg->msg_flags |= MSG_EOR; rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (rc) goto out_free_dgram; if (sx25) { sx25->sx25_family = AF_X25; sx25->sx25_addr = x25->dest_addr; msg->msg_namelen = sizeof(*sx25); } x25_check_rbuf(sk); rc = copied; out_free_dgram: skb_free_datagram(sk, skb); out: release_sock(sk); return rc; }
{'added': [(1343, '\t\tmsg->msg_namelen = sizeof(*sx25);')], 'deleted': [(1345, '\tmsg->msg_namelen = sizeof(struct sockaddr_x25);'), (1346, '')]}
1
2
1,395
7,920
https://github.com/torvalds/linux
CVE-2013-7266
['CWE-20']
tls.c
processClientServerHello
/* * tls.c - TLS/TLS/DTLS dissector * * Copyright (C) 2016-21 - ntop.org * * nDPI is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * nDPI is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with nDPI. If not, see <http://www.gnu.org/licenses/>. * */ #include "ndpi_protocol_ids.h" #define NDPI_CURRENT_PROTO NDPI_PROTOCOL_TLS #include "ndpi_api.h" #include "ndpi_md5.h" #include "ndpi_sha1.h" #include "ndpi_encryption.h" extern char *strptime(const char *s, const char *format, struct tm *tm); extern int processClientServerHello(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow, uint32_t quic_version); extern int http_process_user_agent(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow, const u_int8_t *ua_ptr, u_int16_t ua_ptr_len); /* QUIC/GQUIC stuff */ extern int quic_len(const uint8_t *buf, uint64_t *value); extern int quic_len_buffer_still_required(uint8_t value); extern int is_version_with_var_int_transport_params(uint32_t version); // #define DEBUG_TLS_MEMORY 1 // #define DEBUG_TLS 1 // #define DEBUG_TLS_BLOCKS 1 // #define DEBUG_CERTIFICATE_HASH // #define DEBUG_JA3C 1 /* #define DEBUG_FINGERPRINT 1 */ /* #define DEBUG_ENCRYPTED_SNI 1 */ /* **************************************** */ /* https://engineering.salesforce.com/tls-fingerprinting-with-ja3-and-ja3s-247362855967 */ #define JA3_STR_LEN 1024 #define MAX_NUM_JA3 512 #define MAX_JA3_STRLEN 256 union ja3_info { struct { u_int16_t tls_handshake_version; u_int16_t num_cipher, cipher[MAX_NUM_JA3]; u_int16_t num_tls_extension, tls_extension[MAX_NUM_JA3]; u_int16_t num_elliptic_curve, elliptic_curve[MAX_NUM_JA3]; u_int16_t num_elliptic_curve_point_format, elliptic_curve_point_format[MAX_NUM_JA3]; char signature_algorithms[MAX_JA3_STRLEN], supported_versions[MAX_JA3_STRLEN], alpn[MAX_JA3_STRLEN]; } client; struct { u_int16_t tls_handshake_version; u_int16_t num_cipher, cipher[MAX_NUM_JA3]; u_int16_t num_tls_extension, tls_extension[MAX_NUM_JA3]; u_int16_t tls_supported_version; u_int16_t num_elliptic_curve_point_format, elliptic_curve_point_format[MAX_NUM_JA3]; char alpn[MAX_JA3_STRLEN]; } server; /* Used for JA3+ */ }; /* NOTE How to view the certificate fingerprint 1. Using wireshark save the certificate on certificate.bin file as explained in https://security.stackexchange.com/questions/123851/how-can-i-extract-the-certificate-from-this-pcap-file 2. openssl x509 -inform der -in certificate.bin -text > certificate.der 3. openssl x509 -noout -fingerprint -sha1 -inform pem -in certificate.der SHA1 Fingerprint=15:9A:76.... $ shasum -a 1 www.grc.com.bin 159a76..... */ #define NDPI_MAX_TLS_REQUEST_SIZE 10000 /* skype.c */ extern u_int8_t is_skype_flow(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow); /* stun.c */ extern u_int32_t get_stun_lru_key(struct ndpi_flow_struct *flow, u_int8_t rev); static void ndpi_int_tls_add_connection(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow, u_int32_t protocol); /* **************************************** */ static u_int32_t ndpi_tls_refine_master_protocol(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow, u_int32_t protocol) { struct ndpi_packet_struct *packet = &flow->packet; // protocol = NDPI_PROTOCOL_TLS; if(packet->tcp != NULL) { switch(protocol) { case NDPI_PROTOCOL_TLS: { /* In case of TLS there are probably sub-protocols such as IMAPS that can be otherwise detected */ u_int16_t sport = ntohs(packet->tcp->source); u_int16_t dport = ntohs(packet->tcp->dest); if((sport == 465) || (dport == 465) || (sport == 587) || (dport == 587)) protocol = NDPI_PROTOCOL_MAIL_SMTPS; else if((sport == 993) || (dport == 993) || (flow->l4.tcp.mail_imap_starttls) ) protocol = NDPI_PROTOCOL_MAIL_IMAPS; else if((sport == 995) || (dport == 995)) protocol = NDPI_PROTOCOL_MAIL_POPS; } break; } } return(protocol); } /* **************************************** */ void ndpi_search_tls_tcp_memory(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { struct ndpi_packet_struct *packet = &flow->packet; u_int avail_bytes; /* TCP */ #ifdef DEBUG_TLS_MEMORY printf("[TLS Mem] Handling TCP/TLS flow [payload_len: %u][buffer_len: %u][direction: %u]\n", packet->payload_packet_len, flow->l4.tcp.tls.message.buffer_len, packet->packet_direction); #endif if(flow->l4.tcp.tls.message.buffer == NULL) { /* Allocate buffer */ flow->l4.tcp.tls.message.buffer_len = 2048, flow->l4.tcp.tls.message.buffer_used = 0; flow->l4.tcp.tls.message.buffer = (u_int8_t*)ndpi_malloc(flow->l4.tcp.tls.message.buffer_len); if(flow->l4.tcp.tls.message.buffer == NULL) return; #ifdef DEBUG_TLS_MEMORY printf("[TLS Mem] Allocating %u buffer\n", flow->l4.tcp.tls.message.buffer_len); #endif } avail_bytes = flow->l4.tcp.tls.message.buffer_len - flow->l4.tcp.tls.message.buffer_used; if(avail_bytes < packet->payload_packet_len) { u_int new_len = flow->l4.tcp.tls.message.buffer_len + packet->payload_packet_len - avail_bytes + 1; void *newbuf = ndpi_realloc(flow->l4.tcp.tls.message.buffer, flow->l4.tcp.tls.message.buffer_len, new_len); if(!newbuf) return; #ifdef DEBUG_TLS_MEMORY printf("[TLS Mem] Enlarging %u -> %u buffer\n", flow->l4.tcp.tls.message.buffer_len, new_len); #endif flow->l4.tcp.tls.message.buffer = (u_int8_t*)newbuf; flow->l4.tcp.tls.message.buffer_len = new_len; avail_bytes = flow->l4.tcp.tls.message.buffer_len - flow->l4.tcp.tls.message.buffer_used; } if(packet->payload_packet_len > 0 && avail_bytes >= packet->payload_packet_len) { u_int8_t ok = 0; if(flow->l4.tcp.tls.message.next_seq[packet->packet_direction] != 0) { if(ntohl(packet->tcp->seq) == flow->l4.tcp.tls.message.next_seq[packet->packet_direction]) ok = 1; } else ok = 1; if(ok) { memcpy(&flow->l4.tcp.tls.message.buffer[flow->l4.tcp.tls.message.buffer_used], packet->payload, packet->payload_packet_len); flow->l4.tcp.tls.message.buffer_used += packet->payload_packet_len; #ifdef DEBUG_TLS_MEMORY printf("[TLS Mem] Copied data to buffer [%u/%u bytes][direction: %u][tcp_seq: %u][next: %u]\n", flow->l4.tcp.tls.message.buffer_used, flow->l4.tcp.tls.message.buffer_len, packet->packet_direction, ntohl(packet->tcp->seq), ntohl(packet->tcp->seq)+packet->payload_packet_len); #endif flow->l4.tcp.tls.message.next_seq[packet->packet_direction] = ntohl(packet->tcp->seq)+packet->payload_packet_len; } else { #ifdef DEBUG_TLS_MEMORY printf("[TLS Mem] Skipping packet [%u bytes][direction: %u][tcp_seq: %u][expected next: %u]\n", flow->l4.tcp.tls.message.buffer_len, packet->packet_direction, ntohl(packet->tcp->seq), ntohl(packet->tcp->seq)+packet->payload_packet_len); #endif } } } /* **************************************** */ /* Can't call libc functions from kernel space, define some stub instead */ #define ndpi_isalpha(ch) (((ch) >= 'a' && (ch) <= 'z') || ((ch) >= 'A' && (ch) <= 'Z')) #define ndpi_isdigit(ch) ((ch) >= '0' && (ch) <= '9') #define ndpi_isspace(ch) (((ch) >= '\t' && (ch) <= '\r') || ((ch) == ' ')) #define ndpi_isprint(ch) ((ch) >= 0x20 && (ch) <= 0x7e) #define ndpi_ispunct(ch) (((ch) >= '!' && (ch) <= '/') || \ ((ch) >= ':' && (ch) <= '@') || \ ((ch) >= '[' && (ch) <= '`') || \ ((ch) >= '{' && (ch) <= '~')) /* **************************************** */ static void cleanupServerName(char *buffer, int buffer_len) { u_int i; /* Now all lowecase */ for(i=0; i<buffer_len; i++) buffer[i] = tolower(buffer[i]); } /* **************************************** */ /* Return code -1: error (buffer too short) 0: OK but buffer is not human readeable (so something went wrong) 1: OK */ static int extractRDNSequence(struct ndpi_packet_struct *packet, u_int offset, char *buffer, u_int buffer_len, char *rdnSeqBuf, u_int *rdnSeqBuf_offset, u_int rdnSeqBuf_len, const char *label) { u_int8_t str_len = packet->payload[offset+4], is_printable = 1; char *str; u_int len, j; if (*rdnSeqBuf_offset >= rdnSeqBuf_len) { #ifdef DEBUG_TLS printf("[TLS] %s() [buffer capacity reached][%u]\n", __FUNCTION__, rdnSeqBuf_len); #endif return -1; } // packet is truncated... further inspection is not needed if((offset+4+str_len) >= packet->payload_packet_len) return(-1); str = (char*)&packet->payload[offset+5]; len = (u_int)ndpi_min(str_len, buffer_len-1); strncpy(buffer, str, len); buffer[len] = '\0'; // check string is printable for(j = 0; j < len; j++) { if(!ndpi_isprint(buffer[j])) { is_printable = 0; break; } } if(is_printable) { int rc = snprintf(&rdnSeqBuf[*rdnSeqBuf_offset], rdnSeqBuf_len-(*rdnSeqBuf_offset), "%s%s=%s", (*rdnSeqBuf_offset > 0) ? ", " : "", label, buffer); if(rc > 0) (*rdnSeqBuf_offset) += rc; } return(is_printable); } /* **************************************** */ static void checkTLSSubprotocol(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { if(flow->detected_protocol_stack[1] == NDPI_PROTOCOL_UNKNOWN) { /* Subprotocol not yet set */ if(ndpi_struct->tls_cert_cache && flow->packet.iph) { u_int32_t key = flow->packet.iph->daddr + flow->packet.tcp->dest; u_int16_t cached_proto; if(ndpi_lru_find_cache(ndpi_struct->tls_cert_cache, key, &cached_proto, 0 /* Don't remove it as it can be used for other connections */)) { ndpi_protocol ret = { NDPI_PROTOCOL_TLS, cached_proto, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED }; flow->detected_protocol_stack[0] = cached_proto, flow->detected_protocol_stack[1] = NDPI_PROTOCOL_TLS; flow->category = ndpi_get_proto_category(ndpi_struct, ret); ndpi_check_subprotocol_risk(flow, cached_proto); } } } } /* **************************************** */ /* See https://blog.catchpoint.com/2017/05/12/dissecting-tls-using-wireshark/ */ static void processCertificateElements(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow, u_int16_t p_offset, u_int16_t certificate_len) { struct ndpi_packet_struct *packet = &flow->packet; u_int num_found = 0, i; char buffer[64] = { '\0' }, rdnSeqBuf[2048] = { '\0' }; u_int rdn_len = 0; #ifdef DEBUG_TLS printf("[TLS] %s() [offset: %u][certificate_len: %u]\n", __FUNCTION__, p_offset, certificate_len); #endif /* Check after handshake protocol header (5 bytes) and message header (4 bytes) */ for(i = p_offset; i < certificate_len; i++) { /* See https://www.ibm.com/support/knowledgecenter/SSFKSJ_7.5.0/com.ibm.mq.sec.doc/q009860_.htm for X.509 certificate labels */ if((packet->payload[i] == 0x55) && (packet->payload[i+1] == 0x04) && (packet->payload[i+2] == 0x03)) { /* Common Name */ int rc = extractRDNSequence(packet, i, buffer, sizeof(buffer), rdnSeqBuf, &rdn_len, sizeof(rdnSeqBuf), "CN"); if(rc == -1) break; #ifdef DEBUG_TLS printf("[TLS] %s() [%s][%s: %s]\n", __FUNCTION__, (num_found == 0) ? "Subject" : "Issuer", "Common Name", buffer); #endif } else if((packet->payload[i] == 0x55) && (packet->payload[i+1] == 0x04) && (packet->payload[i+2] == 0x06)) { /* Country */ int rc = extractRDNSequence(packet, i, buffer, sizeof(buffer), rdnSeqBuf, &rdn_len, sizeof(rdnSeqBuf), "C"); if(rc == -1) break; #ifdef DEBUG_TLS printf("[TLS] %s() [%s][%s: %s]\n", __FUNCTION__, (num_found == 0) ? "Subject" : "Issuer", "Country", buffer); #endif } else if((packet->payload[i] == 0x55) && (packet->payload[i+1] == 0x04) && (packet->payload[i+2] == 0x07)) { /* Locality */ int rc = extractRDNSequence(packet, i, buffer, sizeof(buffer), rdnSeqBuf, &rdn_len, sizeof(rdnSeqBuf), "L"); if(rc == -1) break; #ifdef DEBUG_TLS printf("[TLS] %s() [%s][%s: %s]\n", __FUNCTION__, (num_found == 0) ? "Subject" : "Issuer", "Locality", buffer); #endif } else if((packet->payload[i] == 0x55) && (packet->payload[i+1] == 0x04) && (packet->payload[i+2] == 0x08)) { /* State or Province */ int rc = extractRDNSequence(packet, i, buffer, sizeof(buffer), rdnSeqBuf, &rdn_len, sizeof(rdnSeqBuf), "ST"); if(rc == -1) break; #ifdef DEBUG_TLS printf("[TLS] %s() [%s][%s: %s]\n", __FUNCTION__, (num_found == 0) ? "Subject" : "Issuer", "State or Province", buffer); #endif } else if((packet->payload[i] == 0x55) && (packet->payload[i+1] == 0x04) && (packet->payload[i+2] == 0x0a)) { /* Organization Name */ int rc = extractRDNSequence(packet, i, buffer, sizeof(buffer), rdnSeqBuf, &rdn_len, sizeof(rdnSeqBuf), "O"); if(rc == -1) break; #ifdef DEBUG_TLS printf("[TLS] %s() [%s][%s: %s]\n", __FUNCTION__, (num_found == 0) ? "Subject" : "Issuer", "Organization Name", buffer); #endif } else if((packet->payload[i] == 0x55) && (packet->payload[i+1] == 0x04) && (packet->payload[i+2] == 0x0b)) { /* Organization Unit */ int rc = extractRDNSequence(packet, i, buffer, sizeof(buffer), rdnSeqBuf, &rdn_len, sizeof(rdnSeqBuf), "OU"); if(rc == -1) break; #ifdef DEBUG_TLS printf("[TLS] %s() [%s][%s: %s]\n", __FUNCTION__, (num_found == 0) ? "Subject" : "Issuer", "Organization Unit", buffer); #endif } else if((packet->payload[i] == 0x30) && (packet->payload[i+1] == 0x1e) && (packet->payload[i+2] == 0x17)) { /* Certificate Validity */ u_int8_t len = packet->payload[i+3]; u_int offset = i+4; if(num_found == 0) { num_found++; #ifdef DEBUG_TLS printf("[TLS] %s() IssuerDN [%s]\n", __FUNCTION__, rdnSeqBuf); #endif if(rdn_len && (flow->protos.tls_quic_stun.tls_quic.issuerDN == NULL)) flow->protos.tls_quic_stun.tls_quic.issuerDN = ndpi_strdup(rdnSeqBuf); rdn_len = 0; /* Reset buffer */ } if((offset+len) < packet->payload_packet_len) { char utcDate[32]; #ifdef DEBUG_TLS u_int j; printf("[CERTIFICATE] notBefore [len: %u][", len); for(j=0; j<len; j++) printf("%c", packet->payload[i+4+j]); printf("]\n"); #endif if(len < (sizeof(utcDate)-1)) { struct tm utc; utc.tm_isdst = -1; /* Not set by strptime */ strncpy(utcDate, (const char*)&packet->payload[i+4], len); utcDate[len] = '\0'; /* 141021000000Z */ if(strptime(utcDate, "%y%m%d%H%M%SZ", &utc) != NULL) { flow->protos.tls_quic_stun.tls_quic.notBefore = timegm(&utc); #ifdef DEBUG_TLS printf("[CERTIFICATE] notBefore %u [%s]\n", flow->protos.tls_quic_stun.tls_quic.notBefore, utcDate); #endif } } offset += len; if((offset+1) < packet->payload_packet_len) { len = packet->payload[offset+1]; offset += 2; if((offset+len) < packet->payload_packet_len) { u_int32_t time_sec = flow->packet.current_time_ms / 1000; #ifdef DEBUG_TLS u_int j; printf("[CERTIFICATE] notAfter [len: %u][", len); for(j=0; j<len; j++) printf("%c", packet->payload[offset+j]); printf("]\n"); #endif if(len < (sizeof(utcDate)-1)) { struct tm utc; utc.tm_isdst = -1; /* Not set by strptime */ strncpy(utcDate, (const char*)&packet->payload[offset], len); utcDate[len] = '\0'; /* 141021000000Z */ if(strptime(utcDate, "%y%m%d%H%M%SZ", &utc) != NULL) { flow->protos.tls_quic_stun.tls_quic.notAfter = timegm(&utc); #ifdef DEBUG_TLS printf("[CERTIFICATE] notAfter %u [%s]\n", flow->protos.tls_quic_stun.tls_quic.notAfter, utcDate); #endif } } if((time_sec < flow->protos.tls_quic_stun.tls_quic.notBefore) || (time_sec > flow->protos.tls_quic_stun.tls_quic.notAfter)) ndpi_set_risk(flow, NDPI_TLS_CERTIFICATE_EXPIRED); /* Certificate expired */ } } } } else if((packet->payload[i] == 0x55) && (packet->payload[i+1] == 0x1d) && (packet->payload[i+2] == 0x11)) { /* Organization OID: 2.5.29.17 (subjectAltName) */ u_int8_t matched_name = 0; #ifdef DEBUG_TLS printf("******* [TLS] Found subjectAltName\n"); #endif i += 3 /* skip the initial patten 55 1D 11 */; i++; /* skip the first type, 0x04 == BIT STRING, and jump to it's length */ if(i < packet->payload_packet_len) { i += (packet->payload[i] & 0x80) ? (packet->payload[i] & 0x7F) : 0; /* skip BIT STRING length */ if(i < packet->payload_packet_len) { i += 2; /* skip the second type, 0x30 == SEQUENCE, and jump to it's length */ if(i < packet->payload_packet_len) { i += (packet->payload[i] & 0x80) ? (packet->payload[i] & 0x7F) : 0; /* skip SEQUENCE length */ i++; while(i < packet->payload_packet_len) { if(packet->payload[i] == 0x82) { if((i < (packet->payload_packet_len - 1)) && ((i + packet->payload[i + 1] + 2) < packet->payload_packet_len)) { u_int8_t len = packet->payload[i + 1]; char dNSName[256]; i += 2; /* The check "len > sizeof(dNSName) - 1" will be always false. If we add it, the compiler is smart enough to detect it and throws a warning */ if((len == 0 /* Looks something went wrong */) || ((i+len) > packet->payload_packet_len)) break; strncpy(dNSName, (const char*)&packet->payload[i], len); dNSName[len] = '\0'; cleanupServerName(dNSName, len); #if DEBUG_TLS printf("[TLS] dNSName %s [%s][len: %u][leftover: %d]\n", dNSName, flow->protos.tls_quic_stun.tls_quic.client_requested_server_name, len, packet->payload_packet_len-i-len); #endif if(matched_name == 0) { if(flow->protos.tls_quic_stun.tls_quic.client_requested_server_name[0] == '\0') matched_name = 1; /* No SNI */ else if (dNSName[0] == '*') { char * label = strstr(flow->protos.tls_quic_stun.tls_quic.client_requested_server_name, &dNSName[1]); if (label != NULL) { char * first_dot = strchr(flow->protos.tls_quic_stun.tls_quic.client_requested_server_name, '.'); if (first_dot == NULL || first_dot >= label) { matched_name = 1; } } } else if(strcmp(flow->protos.tls_quic_stun.tls_quic.client_requested_server_name, dNSName) == 0) matched_name = 1; } if(flow->protos.tls_quic_stun.tls_quic.server_names == NULL) flow->protos.tls_quic_stun.tls_quic.server_names = ndpi_strdup(dNSName), flow->protos.tls_quic_stun.tls_quic.server_names_len = strlen(dNSName); else { u_int16_t dNSName_len = strlen(dNSName); u_int16_t newstr_len = flow->protos.tls_quic_stun.tls_quic.server_names_len + dNSName_len + 1; char *newstr = (char*)ndpi_realloc(flow->protos.tls_quic_stun.tls_quic.server_names, flow->protos.tls_quic_stun.tls_quic.server_names_len+1, newstr_len+1); if(newstr) { flow->protos.tls_quic_stun.tls_quic.server_names = newstr; flow->protos.tls_quic_stun.tls_quic.server_names[flow->protos.tls_quic_stun.tls_quic.server_names_len] = ','; strncpy(&flow->protos.tls_quic_stun.tls_quic.server_names[flow->protos.tls_quic_stun.tls_quic.server_names_len+1], dNSName, dNSName_len+1); flow->protos.tls_quic_stun.tls_quic.server_names[newstr_len] = '\0'; flow->protos.tls_quic_stun.tls_quic.server_names_len = newstr_len; } } if(!flow->l4.tcp.tls.subprotocol_detected) if(ndpi_match_hostname_protocol(ndpi_struct, flow, NDPI_PROTOCOL_TLS, dNSName, len)) flow->l4.tcp.tls.subprotocol_detected = 1; i += len; } else { #if DEBUG_TLS printf("[TLS] Leftover %u bytes", packet->payload_packet_len - i); #endif break; } } else { break; } } /* while */ if(!matched_name) ndpi_set_risk(flow, NDPI_TLS_CERTIFICATE_MISMATCH); /* Certificate mismatch */ } } } } } if(rdn_len && (flow->protos.tls_quic_stun.tls_quic.subjectDN == NULL)) { flow->protos.tls_quic_stun.tls_quic.subjectDN = ndpi_strdup(rdnSeqBuf); if(flow->detected_protocol_stack[1] == NDPI_PROTOCOL_UNKNOWN) { /* No idea what is happening behind the scenes: let's check the certificate */ u_int32_t proto_id; int rc = ndpi_match_string_value(ndpi_struct->tls_cert_subject_automa.ac_automa, rdnSeqBuf, strlen(rdnSeqBuf),&proto_id); if(rc == 0) { /* Match found */ ndpi_protocol ret = { NDPI_PROTOCOL_TLS, proto_id, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED}; flow->detected_protocol_stack[0] = proto_id, flow->detected_protocol_stack[1] = NDPI_PROTOCOL_TLS; flow->category = ndpi_get_proto_category(ndpi_struct, ret); ndpi_check_subprotocol_risk(flow, proto_id); if(ndpi_struct->tls_cert_cache == NULL) ndpi_struct->tls_cert_cache = ndpi_lru_cache_init(1024); if(ndpi_struct->tls_cert_cache && flow->packet.iph) { u_int32_t key = flow->packet.iph->daddr + flow->packet.tcp->dest; ndpi_lru_add_to_cache(ndpi_struct->tls_cert_cache, key, proto_id); } } } } if(flow->protos.tls_quic_stun.tls_quic.subjectDN && flow->protos.tls_quic_stun.tls_quic.issuerDN && (!strcmp(flow->protos.tls_quic_stun.tls_quic.subjectDN, flow->protos.tls_quic_stun.tls_quic.issuerDN))) ndpi_set_risk(flow, NDPI_TLS_SELFSIGNED_CERTIFICATE); #if DEBUG_TLS printf("[TLS] %s() SubjectDN [%s]\n", __FUNCTION__, rdnSeqBuf); #endif } /* **************************************** */ /* See https://blog.catchpoint.com/2017/05/12/dissecting-tls-using-wireshark/ */ int processCertificate(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { struct ndpi_packet_struct *packet = &flow->packet; int is_dtls = packet->udp ? 1 : 0; u_int32_t certificates_length, length = (packet->payload[1] << 16) + (packet->payload[2] << 8) + packet->payload[3]; u_int32_t certificates_offset = 7 + (is_dtls ? 8 : 0); u_int8_t num_certificates_found = 0; SHA1_CTX srv_cert_fingerprint_ctx ; #ifdef DEBUG_TLS printf("[TLS] %s() [payload_packet_len=%u][direction: %u][%02X %02X %02X %02X %02X %02X...]\n", __FUNCTION__, packet->payload_packet_len, packet->packet_direction, packet->payload[0], packet->payload[1], packet->payload[2], packet->payload[3], packet->payload[4], packet->payload[5]); #endif if((packet->payload_packet_len != (length + 4 + (is_dtls ? 8 : 0))) || (packet->payload[1] != 0x0)) { ndpi_set_risk(flow, NDPI_MALFORMED_PACKET); return(-1); /* Invalid length */ } certificates_length = (packet->payload[certificates_offset - 3] << 16) + (packet->payload[certificates_offset - 2] << 8) + packet->payload[certificates_offset - 1]; if((packet->payload[certificates_offset - 3] != 0x0) || ((certificates_length+3) != length)) { ndpi_set_risk(flow, NDPI_MALFORMED_PACKET); return(-2); /* Invalid length */ } /* Now let's process each individual certificates */ while(certificates_offset < certificates_length) { u_int32_t certificate_len = (packet->payload[certificates_offset] << 16) + (packet->payload[certificates_offset+1] << 8) + packet->payload[certificates_offset+2]; /* Invalid lenght */ if((certificate_len == 0) || (packet->payload[certificates_offset] != 0x0) || ((certificates_offset+certificate_len) > (4+certificates_length+(is_dtls ? 8 : 0)))) { #ifdef DEBUG_TLS printf("[TLS] Invalid length [certificate_len: %u][certificates_offset: %u][%u vs %u]\n", certificate_len, certificates_offset, (certificates_offset+certificate_len), certificates_length); #endif break; } certificates_offset += 3; #ifdef DEBUG_TLS printf("[TLS] Processing %u bytes certificate [%02X %02X %02X]\n", certificate_len, packet->payload[certificates_offset], packet->payload[certificates_offset+1], packet->payload[certificates_offset+2]); #endif if(num_certificates_found++ == 0) /* Dissect only the first certificate that is the one we care */ { /* For SHA-1 we take into account only the first certificate and not all of them */ SHA1Init(&srv_cert_fingerprint_ctx); #ifdef DEBUG_CERTIFICATE_HASH { int i; for(i=0;i<certificate_len;i++) printf("%02X ", packet->payload[certificates_offset+i]); printf("\n"); } #endif SHA1Update(&srv_cert_fingerprint_ctx, &packet->payload[certificates_offset], certificate_len); SHA1Final(flow->protos.tls_quic_stun.tls_quic.sha1_certificate_fingerprint, &srv_cert_fingerprint_ctx); flow->l4.tcp.tls.fingerprint_set = 1; uint8_t * sha1 = flow->protos.tls_quic_stun.tls_quic.sha1_certificate_fingerprint; const size_t sha1_siz = sizeof(flow->protos.tls_quic_stun.tls_quic.sha1_certificate_fingerprint); char sha1_str[20 /* sha1_siz */ * 2 + 1]; static const char hexalnum[] = "0123456789ABCDEF"; for (size_t i = 0; i < sha1_siz; ++i) { u_int8_t lower = (sha1[i] & 0x0F); u_int8_t upper = (sha1[i] & 0xF0) >> 4; sha1_str[i*2] = hexalnum[upper]; sha1_str[i*2 + 1] = hexalnum[lower]; } sha1_str[sha1_siz * 2] = '\0'; #ifdef DEBUG_TLS printf("[TLS] SHA-1: %s\n", sha1_str); #endif if (ndpi_struct->malicious_sha1_automa.ac_automa != NULL) { u_int16_t rc1 = ndpi_match_string(ndpi_struct->malicious_sha1_automa.ac_automa, sha1_str); if(rc1 > 0) ndpi_set_risk(flow, NDPI_MALICIOUS_SHA1_CERTIFICATE); } processCertificateElements(ndpi_struct, flow, certificates_offset, certificate_len); } certificates_offset += certificate_len; } if((ndpi_struct->num_tls_blocks_to_follow != 0) && (flow->l4.tcp.tls.num_tls_blocks >= ndpi_struct->num_tls_blocks_to_follow)) { #ifdef DEBUG_TLS_BLOCKS printf("*** [TLS Block] Enough blocks dissected\n"); #endif flow->extra_packets_func = NULL; /* We're good now */ } return(1); } /* **************************************** */ static int processTLSBlock(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { struct ndpi_packet_struct *packet = &flow->packet; int ret; switch(packet->payload[0] /* block type */) { case 0x01: /* Client Hello */ case 0x02: /* Server Hello */ processClientServerHello(ndpi_struct, flow, 0); flow->l4.tcp.tls.hello_processed = 1; ndpi_int_tls_add_connection(ndpi_struct, flow, NDPI_PROTOCOL_TLS); #ifdef DEBUG_TLS printf("*** TLS [version: %02X][%s Hello]\n", flow->protos.tls_quic_stun.tls_quic.ssl_version, (packet->payload[0] == 0x01) ? "Client" : "Server"); #endif if((flow->protos.tls_quic_stun.tls_quic.ssl_version >= 0x0304 /* TLS 1.3 */) && (packet->payload[0] == 0x02 /* Server Hello */)) { flow->l4.tcp.tls.certificate_processed = 1; /* No Certificate with TLS 1.3+ */ } checkTLSSubprotocol(ndpi_struct, flow); break; case 0x0b: /* Certificate */ /* Important: populate the tls union fields only after * ndpi_int_tls_add_connection has been called */ if(flow->l4.tcp.tls.hello_processed) { ret = processCertificate(ndpi_struct, flow); if (ret != 1) { #ifdef DEBUG_TLS printf("[TLS] Error processing certificate: %d\n", ret); #endif } flow->l4.tcp.tls.certificate_processed = 1; } break; default: return(-1); } return(0); } /* **************************************** */ static void ndpi_looks_like_tls(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { // ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_TLS, NDPI_PROTOCOL_UNKNOWN); if(flow->guessed_protocol_id == NDPI_PROTOCOL_UNKNOWN) flow->guessed_protocol_id = NDPI_PROTOCOL_TLS; } /* **************************************** */ static int ndpi_search_tls_tcp(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { struct ndpi_packet_struct *packet = &flow->packet; u_int8_t something_went_wrong = 0; #ifdef DEBUG_TLS_MEMORY printf("[TLS Mem] ndpi_search_tls_tcp() Processing new packet [payload_packet_len: %u]\n", packet->payload_packet_len); #endif if(packet->payload_packet_len == 0) return(1); /* Keep working */ ndpi_search_tls_tcp_memory(ndpi_struct, flow); while(!something_went_wrong) { u_int16_t len, p_len; const u_int8_t *p; u_int8_t content_type; if(flow->l4.tcp.tls.message.buffer_used < 5) return(1); /* Keep working */ len = (flow->l4.tcp.tls.message.buffer[3] << 8) + flow->l4.tcp.tls.message.buffer[4] + 5; if(len > flow->l4.tcp.tls.message.buffer_used) { #ifdef DEBUG_TLS_MEMORY printf("[TLS Mem] Not enough TLS data [%u < %u][%02X %02X %02X %02X %02X]\n", len, flow->l4.tcp.tls.message.buffer_used, flow->l4.tcp.tls.message.buffer[0], flow->l4.tcp.tls.message.buffer[1], flow->l4.tcp.tls.message.buffer[2], flow->l4.tcp.tls.message.buffer[3], flow->l4.tcp.tls.message.buffer[4]); #endif break; } if(len == 0) { something_went_wrong = 1; break; } #ifdef DEBUG_TLS_MEMORY printf("[TLS Mem] Processing %u bytes message\n", len); #endif content_type = flow->l4.tcp.tls.message.buffer[0]; /* Overwriting packet payload */ p = packet->payload; p_len = packet->payload_packet_len; /* Backup */ if(content_type == 0x14 /* Change Cipher Spec */) { if(ndpi_struct->skip_tls_blocks_until_change_cipher) { /* Ignore Application Data up until change cipher so in this case we reset the number of observed TLS blocks */ flow->l4.tcp.tls.num_tls_blocks = 0; } } if((len > 9) && (content_type != 0x17 /* Application Data */) && (!flow->l4.tcp.tls.certificate_processed)) { /* Split the element in blocks */ u_int16_t processed = 5; while((processed+4) <= len) { const u_int8_t *block = (const u_int8_t *)&flow->l4.tcp.tls.message.buffer[processed]; u_int32_t block_len = (block[1] << 16) + (block[2] << 8) + block[3]; if(/* (block_len == 0) || */ /* Note blocks can have zero lenght */ (block_len > len) || ((block[1] != 0x0))) { something_went_wrong = 1; break; } packet->payload = block; packet->payload_packet_len = ndpi_min(block_len+4, flow->l4.tcp.tls.message.buffer_used); if((processed+packet->payload_packet_len) > len) { something_went_wrong = 1; break; } processTLSBlock(ndpi_struct, flow); ndpi_looks_like_tls(ndpi_struct, flow); processed += packet->payload_packet_len; } } else { /* Process element as a whole */ if(content_type == 0x17 /* Application Data */) { ndpi_looks_like_tls(ndpi_struct, flow); if(flow->l4.tcp.tls.certificate_processed) { if(flow->l4.tcp.tls.num_tls_blocks < ndpi_struct->num_tls_blocks_to_follow) flow->l4.tcp.tls.tls_application_blocks_len[flow->l4.tcp.tls.num_tls_blocks++] = (packet->packet_direction == 0) ? (len-5) : -(len-5); #ifdef DEBUG_TLS_BLOCKS printf("*** [TLS Block] [len: %u][num_tls_blocks: %u/%u]\n", len-5, flow->l4.tcp.tls.num_tls_blocks, ndpi_struct->num_tls_blocks_to_follow); #endif } } } packet->payload = p; packet->payload_packet_len = p_len; /* Restore */ flow->l4.tcp.tls.message.buffer_used -= len; if(flow->l4.tcp.tls.message.buffer_used > 0) memmove(flow->l4.tcp.tls.message.buffer, &flow->l4.tcp.tls.message.buffer[len], flow->l4.tcp.tls.message.buffer_used); else break; #ifdef DEBUG_TLS_MEMORY printf("[TLS Mem] Left memory buffer %u bytes\n", flow->l4.tcp.tls.message.buffer_used); #endif } if(something_went_wrong || ((ndpi_struct->num_tls_blocks_to_follow > 0) && (flow->l4.tcp.tls.num_tls_blocks == ndpi_struct->num_tls_blocks_to_follow)) ) { #ifdef DEBUG_TLS_BLOCKS printf("*** [TLS Block] No more blocks\n"); #endif flow->check_extra_packets = 0; flow->extra_packets_func = NULL; return(0); /* That's all */ } else return(1); } /* **************************************** */ static int ndpi_search_tls_udp(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { struct ndpi_packet_struct *packet = &flow->packet; u_int32_t handshake_len; u_int16_t p_len, processed; const u_int8_t *p; u_int8_t no_dtls = 0, change_cipher_found = 0; #ifdef DEBUG_TLS printf("[TLS] %s()\n", __FUNCTION__); #endif /* Overwriting packet payload */ p = packet->payload, p_len = packet->payload_packet_len; /* Backup */ /* Split the element in blocks */ processed = 0; while(processed + 13 < p_len) { u_int32_t block_len; const u_int8_t *block = (const u_int8_t *)&p[processed]; if((block[0] != 0x16 && block[0] != 0x14) || /* Handshake, change-cipher-spec */ (block[1] != 0xfe) || /* We ignore old DTLS versions */ ((block[2] != 0xff) && (block[2] != 0xfd))) { #ifdef DEBUG_TLS printf("[TLS] DTLS invalid block 0x%x or old version 0x%x-0x%x-0x%x\n", block[0], block[1], block[2], block[3]); #endif no_dtls = 1; break; } block_len = ntohs(*((u_int16_t*)&block[11])); #ifdef DEBUG_TLS printf("[TLS] DTLS block len: %d\n", block_len); #endif if (block_len == 0 || (processed + block_len + 12 >= p_len)) { #ifdef DEBUG_TLS printf("[TLS] DTLS invalid block len %d (processed %d, p_len %d)\n", block_len, processed, p_len); #endif no_dtls = 1; break; } /* We process only handshake msgs */ if(block[0] == 0x16) { if (processed + block_len + 13 > p_len) { #ifdef DEBUG_TLS printf("[TLS] DTLS invalid len %d %d %d\n", processed, block_len, p_len); #endif no_dtls = 1; break; } /* TODO: handle (certificate) fragments */ handshake_len = (block[14] << 16) + (block[15] << 8) + block[16]; if((handshake_len + 12) != block_len) { #ifdef DEBUG_TLS printf("[TLS] DTLS invalid handshake_len %d, %d)\n", handshake_len, block_len); #endif no_dtls = 1; break; } packet->payload = &block[13]; packet->payload_packet_len = block_len; processTLSBlock(ndpi_struct, flow); } else { /* Change-cipher-spec: any subsequent block might be encrypted */ #ifdef DEBUG_TLS printf("[TLS] Change-cipher-spec\n"); #endif change_cipher_found = 1; processed += block_len + 13; break; } processed += block_len + 13; } if(processed != p_len) { #ifdef DEBUG_TLS printf("[TLS] DTLS invalid processed len %d/%d (%d)\n", processed, p_len, change_cipher_found); #endif if(!change_cipher_found) no_dtls = 1; } packet->payload = p; packet->payload_packet_len = p_len; /* Restore */ if(no_dtls || change_cipher_found) { NDPI_EXCLUDE_PROTO(ndpi_struct, flow); return(0); /* That's all */ } else { return(1); /* Keep working */ } } /* **************************************** */ static void tlsInitExtraPacketProcessing(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { flow->check_extra_packets = 1; /* At most 12 packets should almost always be enough to find the server certificate if it's there */ flow->max_extra_packets_to_check = 12 + (ndpi_struct->num_tls_blocks_to_follow*4); flow->extra_packets_func = (flow->packet.udp != NULL) ? ndpi_search_tls_udp : ndpi_search_tls_tcp; } /* **************************************** */ static void tlsCheckUncommonALPN(struct ndpi_flow_struct *flow) { /* see: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml */ static char const * const common_alpns[] = { "http/0.9", "http/1.0", "http/1.1", "spdy/1", "spdy/2", "spdy/3", "spdy/3.1", "stun.turn", "stun.nat-discovery", "h2", "h2c", "h2-16", "h2-15", "h2-14", "webrtc", "c-webrtc", "ftp", "imap", "pop3", "managesieve", "coap", "xmpp-client", "xmpp-server", "acme-tls/1", "mqtt", "dot", "ntske/1", "sunrpc", "h3", "smb", "irc", /* QUIC ALPNs */ "h3-T051", "h3-T050", "h3-32", "h3-30", "h3-29", "h3-28", "h3-27", "h3-24", "h3-22", "hq-30", "hq-29", "hq-28", "hq-27", "h3-fb-05", "h1q-fb", "doq-i00" }; /* * If the ALPN list increases in size, iterating over all items for every incoming ALPN may * have a performance impact. A hash map could solve this issue. */ char * alpn_start = flow->protos.tls_quic_stun.tls_quic.alpn; char * comma_or_nul = alpn_start; do { comma_or_nul = strchr(comma_or_nul, ','); if (comma_or_nul == NULL) { comma_or_nul = alpn_start + strlen(alpn_start); } int alpn_found = 0; int alpn_len = comma_or_nul - alpn_start; char const * const alpn = alpn_start; for (size_t i = 0; i < sizeof(common_alpns)/sizeof(common_alpns[0]); ++i) { if (strlen(common_alpns[i]) == alpn_len && strncmp(alpn, common_alpns[i], alpn_len) == 0) { alpn_found = 1; break; } } if (alpn_found == 0) { #ifdef DEBUG_TLS printf("TLS uncommon ALPN found: %.*s\n", alpn_len, alpn); #endif ndpi_set_risk(flow, NDPI_TLS_UNCOMMON_ALPN); break; } alpn_start = comma_or_nul + 1; } while (*(comma_or_nul++) != '\0'); } /* **************************************** */ static void ndpi_int_tls_add_connection(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow, u_int32_t protocol) { #if DEBUG_TLS printf("[TLS] %s()\n", __FUNCTION__); #endif if((flow->packet.udp != NULL) && (protocol == NDPI_PROTOCOL_TLS)) protocol = NDPI_PROTOCOL_DTLS; if((flow->detected_protocol_stack[0] == protocol) || (flow->detected_protocol_stack[1] == protocol)) { if(!flow->check_extra_packets) tlsInitExtraPacketProcessing(ndpi_struct, flow); return; } if(protocol != NDPI_PROTOCOL_TLS) ; else protocol = ndpi_tls_refine_master_protocol(ndpi_struct, flow, protocol); ndpi_set_detected_protocol(ndpi_struct, flow, protocol, protocol); tlsInitExtraPacketProcessing(ndpi_struct, flow); } /* **************************************** */ int processClientServerHello(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow, uint32_t quic_version) { struct ndpi_packet_struct *packet = &flow->packet; union ja3_info ja3; u_int8_t invalid_ja3 = 0; u_int16_t tls_version, ja3_str_len; char ja3_str[JA3_STR_LEN]; ndpi_MD5_CTX ctx; u_char md5_hash[16]; int i; u_int16_t total_len; u_int8_t handshake_type; char buffer[64] = { '\0' }; int is_quic = (quic_version != 0); int is_dtls = packet->udp && (!is_quic); #ifdef DEBUG_TLS printf("TLS %s() called\n", __FUNCTION__); #endif memset(&ja3, 0, sizeof(ja3)); handshake_type = packet->payload[0]; total_len = (packet->payload[1] << 16) + (packet->payload[2] << 8) + packet->payload[3]; if((total_len > packet->payload_packet_len) || (packet->payload[1] != 0x0)) return(0); /* Not found */ total_len = packet->payload_packet_len; /* At least "magic" 3 bytes, null for string end, otherwise no need to waste cpu cycles */ if(total_len > 4) { u_int16_t base_offset = (!is_dtls) ? 38 : 46; u_int16_t version_offset = (!is_dtls) ? 4 : 12; u_int16_t offset = (!is_dtls) ? 38 : 46, extension_len, j; u_int8_t session_id_len = 0; if((base_offset >= total_len) || (version_offset + 1) >= total_len) return 0; /* Not found */ session_id_len = packet->payload[base_offset]; #ifdef DEBUG_TLS printf("TLS [len: %u][handshake_type: %02X]\n", packet->payload_packet_len, handshake_type); #endif tls_version = ntohs(*((u_int16_t*)&packet->payload[version_offset])); if(handshake_type == 0x02 /* Server Hello */) { int i, rc; ja3.server.tls_handshake_version = tls_version; #ifdef DEBUG_TLS printf("TLS Server Hello [version: 0x%04X]\n", tls_version); #endif /* The server hello decides about the TLS version of this flow https://networkengineering.stackexchange.com/questions/55752/why-does-wireshark-show-version-tls-1-2-here-instead-of-tls-1-3 */ if(packet->udp) offset += session_id_len + 1; else { if(tls_version < 0x7F15 /* TLS 1.3 lacks of session id */) offset += session_id_len+1; } if((offset+3) > packet->payload_packet_len) return(0); /* Not found */ ja3.server.num_cipher = 1, ja3.server.cipher[0] = ntohs(*((u_int16_t*)&packet->payload[offset])); if((flow->protos.tls_quic_stun.tls_quic.server_unsafe_cipher = ndpi_is_safe_ssl_cipher(ja3.server.cipher[0])) == 1) ndpi_set_risk(flow, NDPI_TLS_WEAK_CIPHER); flow->protos.tls_quic_stun.tls_quic.server_cipher = ja3.server.cipher[0]; #ifdef DEBUG_TLS printf("TLS [server][session_id_len: %u][cipher: %04X]\n", session_id_len, ja3.server.cipher[0]); #endif offset += 2 + 1; if((offset + 1) < packet->payload_packet_len) /* +1 because we are goint to read 2 bytes */ extension_len = ntohs(*((u_int16_t*)&packet->payload[offset])); else extension_len = 0; #ifdef DEBUG_TLS printf("TLS [server][extension_len: %u]\n", extension_len); #endif offset += 2; for(i=0; i<extension_len; ) { u_int16_t extension_id, extension_len; if((offset+4) > packet->payload_packet_len) break; extension_id = ntohs(*((u_int16_t*)&packet->payload[offset])); extension_len = ntohs(*((u_int16_t*)&packet->payload[offset+2])); if(ja3.server.num_tls_extension < MAX_NUM_JA3) ja3.server.tls_extension[ja3.server.num_tls_extension++] = extension_id; #ifdef DEBUG_TLS printf("TLS [server][extension_id: %u/0x%04X][len: %u]\n", extension_id, extension_id, extension_len); #endif if(extension_id == 43 /* supported versions */) { if(extension_len >= 2) { u_int16_t tls_version = ntohs(*((u_int16_t*)&packet->payload[offset+4])); #ifdef DEBUG_TLS printf("TLS [server] [TLS version: 0x%04X]\n", tls_version); #endif flow->protos.tls_quic_stun.tls_quic.ssl_version = ja3.server.tls_supported_version = tls_version; } } else if(extension_id == 16 /* application_layer_protocol_negotiation (ALPN) */) { u_int16_t s_offset = offset+4; u_int16_t tot_alpn_len = ntohs(*((u_int16_t*)&packet->payload[s_offset])); char alpn_str[256]; u_int8_t alpn_str_len = 0, i; #ifdef DEBUG_TLS printf("Server TLS [ALPN: block_len=%u/len=%u]\n", extension_len, tot_alpn_len); #endif s_offset += 2; tot_alpn_len += s_offset; while(s_offset < tot_alpn_len && s_offset < total_len) { u_int8_t alpn_i, alpn_len = packet->payload[s_offset++]; if((s_offset + alpn_len) <= tot_alpn_len) { #ifdef DEBUG_TLS printf("Server TLS [ALPN: %u]\n", alpn_len); #endif if((alpn_str_len+alpn_len+1) < (sizeof(alpn_str)-1)) { if(alpn_str_len > 0) { alpn_str[alpn_str_len] = ','; alpn_str_len++; } for(alpn_i=0; alpn_i<alpn_len; alpn_i++) { alpn_str[alpn_str_len+alpn_i] = packet->payload[s_offset+alpn_i]; } s_offset += alpn_len, alpn_str_len += alpn_len;; } else { ndpi_set_risk(flow, NDPI_TLS_UNCOMMON_ALPN); break; } } else { ndpi_set_risk(flow, NDPI_TLS_UNCOMMON_ALPN); break; } } /* while */ alpn_str[alpn_str_len] = '\0'; #ifdef DEBUG_TLS printf("Server TLS [ALPN: %s][len: %u]\n", alpn_str, alpn_str_len); #endif if(flow->protos.tls_quic_stun.tls_quic.alpn == NULL) flow->protos.tls_quic_stun.tls_quic.alpn = ndpi_strdup(alpn_str); if(flow->protos.tls_quic_stun.tls_quic.alpn != NULL) tlsCheckUncommonALPN(flow); snprintf(ja3.server.alpn, sizeof(ja3.server.alpn), "%s", alpn_str); /* Replace , with - as in JA3 */ for(i=0; ja3.server.alpn[i] != '\0'; i++) if(ja3.server.alpn[i] == ',') ja3.server.alpn[i] = '-'; } else if(extension_id == 11 /* ec_point_formats groups */) { u_int16_t s_offset = offset+4 + 1; #ifdef DEBUG_TLS printf("Server TLS [EllipticCurveFormat: len=%u]\n", extension_len); #endif if((s_offset+extension_len-1) <= total_len) { for(i=0; i<extension_len-1; i++) { u_int8_t s_group = packet->payload[s_offset+i]; #ifdef DEBUG_TLS printf("Server TLS [EllipticCurveFormat: %u]\n", s_group); #endif if(ja3.server.num_elliptic_curve_point_format < MAX_NUM_JA3) ja3.server.elliptic_curve_point_format[ja3.server.num_elliptic_curve_point_format++] = s_group; else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Server TLS Invalid num elliptic %u\n", ja3.server.num_elliptic_curve_point_format); #endif } } } else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Server TLS Invalid len %u vs %u\n", s_offset+extension_len, total_len); #endif } } i += 4 + extension_len, offset += 4 + extension_len; } /* for */ ja3_str_len = snprintf(ja3_str, sizeof(ja3_str), "%u,", ja3.server.tls_handshake_version); for(i=0; i<ja3.server.num_cipher; i++) { rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.server.cipher[i]); if(rc <= 0) break; else ja3_str_len += rc; } rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, ","); if(rc > 0 && ja3_str_len + rc < JA3_STR_LEN) ja3_str_len += rc; /* ********** */ for(i=0; i<ja3.server.num_tls_extension; i++) { int rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.server.tls_extension[i]); if(rc <= 0) break; else ja3_str_len += rc; } if(ndpi_struct->enable_ja3_plus) { for(i=0; i<ja3.server.num_elliptic_curve_point_format; i++) { rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.server.elliptic_curve_point_format[i]); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; else break; } if(ja3.server.alpn[0] != '\0') { rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, ",%s", ja3.server.alpn); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; } #ifdef DEBUG_TLS printf("[JA3+] Server: %s \n", ja3_str); #endif } else { #ifdef DEBUG_TLS printf("[JA3] Server: %s \n", ja3_str); #endif } ndpi_MD5Init(&ctx); ndpi_MD5Update(&ctx, (const unsigned char *)ja3_str, strlen(ja3_str)); ndpi_MD5Final(md5_hash, &ctx); for(i=0, j=0; i<16; i++) { int rc = snprintf(&flow->protos.tls_quic_stun.tls_quic.ja3_server[j], sizeof(flow->protos.tls_quic_stun.tls_quic.ja3_server)-j, "%02x", md5_hash[i]); if(rc <= 0) break; else j += rc; } #ifdef DEBUG_TLS printf("[JA3] Server: %s \n", flow->protos.tls_quic_stun.tls_quic.ja3_server); #endif } else if(handshake_type == 0x01 /* Client Hello */) { u_int16_t cipher_len, cipher_offset; u_int8_t cookie_len = 0; flow->protos.tls_quic_stun.tls_quic.ssl_version = ja3.client.tls_handshake_version = tls_version; if(flow->protos.tls_quic_stun.tls_quic.ssl_version < 0x0302) /* TLSv1.1 */ ndpi_set_risk(flow, NDPI_TLS_OBSOLETE_VERSION); if((session_id_len+base_offset+3) > packet->payload_packet_len) return(0); /* Not found */ if(!is_dtls) { cipher_len = packet->payload[session_id_len+base_offset+2] + (packet->payload[session_id_len+base_offset+1] << 8); cipher_offset = base_offset + session_id_len + 3; } else { cookie_len = packet->payload[base_offset+session_id_len+1]; #ifdef DEBUG_TLS printf("[JA3] Client: DTLS cookie len %d\n", cookie_len); #endif if((session_id_len+base_offset+cookie_len+4) > packet->payload_packet_len) return(0); /* Not found */ cipher_len = ntohs(*((u_int16_t*)&packet->payload[base_offset+session_id_len+cookie_len+2])); cipher_offset = base_offset + session_id_len + cookie_len + 4; } #ifdef DEBUG_TLS printf("Client TLS [client cipher_len: %u][tls_version: 0x%04X]\n", cipher_len, tls_version); #endif if((cipher_offset+cipher_len) <= total_len) { u_int8_t safari_ciphers = 0, chrome_ciphers = 0; for(i=0; i<cipher_len;) { u_int16_t *id = (u_int16_t*)&packet->payload[cipher_offset+i]; #ifdef DEBUG_TLS printf("Client TLS [cipher suite: %u/0x%04X] [%d/%u]\n", ntohs(*id), ntohs(*id), i, cipher_len); #endif if((*id == 0) || (packet->payload[cipher_offset+i] != packet->payload[cipher_offset+i+1])) { u_int16_t cipher_id = ntohs(*id); /* Skip GREASE [https://tools.ietf.org/id/draft-ietf-tls-grease-01.html] https://engineering.salesforce.com/tls-fingerprinting-with-ja3-and-ja3s-247362855967 */ if(ja3.client.num_cipher < MAX_NUM_JA3) ja3.client.cipher[ja3.client.num_cipher++] = cipher_id; else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid cipher %u\n", ja3.client.num_cipher); #endif } switch(cipher_id) { case TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: case TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: safari_ciphers++; break; case TLS_CIPHER_GREASE_RESERVED_0: case TLS_AES_128_GCM_SHA256: case TLS_AES_256_GCM_SHA384: case TLS_CHACHA20_POLY1305_SHA256: chrome_ciphers++; break; case TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256: case TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: case TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: case TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256: case TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: case TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: case TLS_RSA_WITH_AES_128_CBC_SHA: case TLS_RSA_WITH_AES_256_CBC_SHA: case TLS_RSA_WITH_AES_128_GCM_SHA256: case TLS_RSA_WITH_AES_256_GCM_SHA384: safari_ciphers++, chrome_ciphers++; break; } } i += 2; } /* for */ if(chrome_ciphers == 13) flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_chrome_tls = 1; else if(safari_ciphers == 12) flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_safari_tls = 1; } else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid len %u vs %u\n", (cipher_offset+cipher_len), total_len); #endif } offset = base_offset + session_id_len + cookie_len + cipher_len + 2; offset += (!is_dtls) ? 1 : 2; if(offset < total_len) { u_int16_t compression_len; u_int16_t extensions_len; compression_len = packet->payload[offset]; offset++; #ifdef DEBUG_TLS printf("Client TLS [compression_len: %u]\n", compression_len); #endif // offset += compression_len + 3; offset += compression_len; if(offset+1 < total_len) { extensions_len = ntohs(*((u_int16_t*)&packet->payload[offset])); offset += 2; #ifdef DEBUG_TLS printf("Client TLS [extensions_len: %u]\n", extensions_len); #endif if((extensions_len+offset) <= total_len) { /* Move to the first extension Type is u_int to avoid possible overflow on extension_len addition */ u_int extension_offset = 0; u_int32_t j; while(extension_offset < extensions_len && offset+extension_offset+4 <= total_len) { u_int16_t extension_id, extension_len, extn_off = offset+extension_offset; extension_id = ntohs(*((u_int16_t*)&packet->payload[offset+extension_offset])); extension_offset += 2; extension_len = ntohs(*((u_int16_t*)&packet->payload[offset+extension_offset])); extension_offset += 2; #ifdef DEBUG_TLS printf("Client TLS [extension_id: %u][extension_len: %u]\n", extension_id, extension_len); #endif if((extension_id == 0) || (packet->payload[extn_off] != packet->payload[extn_off+1])) { /* Skip GREASE */ if(ja3.client.num_tls_extension < MAX_NUM_JA3) ja3.client.tls_extension[ja3.client.num_tls_extension++] = extension_id; else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid extensions %u\n", ja3.client.num_tls_extension); #endif } } if(extension_id == 0 /* server name */) { u_int16_t len; #ifdef DEBUG_TLS printf("[TLS] Extensions: found server name\n"); #endif if((offset+extension_offset+4) < packet->payload_packet_len) { len = (packet->payload[offset+extension_offset+3] << 8) + packet->payload[offset+extension_offset+4]; len = (u_int)ndpi_min(len, sizeof(buffer)-1); if((offset+extension_offset+5+len) <= packet->payload_packet_len) { strncpy(buffer, (char*)&packet->payload[offset+extension_offset+5], len); buffer[len] = '\0'; cleanupServerName(buffer, sizeof(buffer)); snprintf(flow->protos.tls_quic_stun.tls_quic.client_requested_server_name, sizeof(flow->protos.tls_quic_stun.tls_quic.client_requested_server_name), "%s", buffer); #ifdef DEBUG_TLS printf("[TLS] SNI: [%s]\n", buffer); #endif if(!is_quic) { if(ndpi_match_hostname_protocol(ndpi_struct, flow, NDPI_PROTOCOL_TLS, buffer, strlen(buffer))) flow->l4.tcp.tls.subprotocol_detected = 1; } else { if(ndpi_match_hostname_protocol(ndpi_struct, flow, NDPI_PROTOCOL_QUIC, buffer, strlen(buffer))) flow->l4.tcp.tls.subprotocol_detected = 1; } if(ndpi_check_dga_name(ndpi_struct, flow, flow->protos.tls_quic_stun.tls_quic.client_requested_server_name, 1)) { char *sni = flow->protos.tls_quic_stun.tls_quic.client_requested_server_name; int len = strlen(sni); #ifdef DEBUG_TLS printf("[TLS] SNI: (DGA) [%s]\n", flow->protos.tls_quic_stun.tls_quic.client_requested_server_name); #endif if((len >= 4) /* Check if it ends in .com or .net */ && ((strcmp(&sni[len-4], ".com") == 0) || (strcmp(&sni[len-4], ".net") == 0)) && (strncmp(sni, "www.", 4) == 0)) /* Not starting with www.... */ ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_TOR, NDPI_PROTOCOL_TLS); } else { #ifdef DEBUG_TLS printf("[TLS] SNI: (NO DGA) [%s]\n", flow->protos.tls_quic_stun.tls_quic.client_requested_server_name); #endif } } else { #ifdef DEBUG_TLS printf("[TLS] Extensions server len too short: %u vs %u\n", offset+extension_offset+5+len, packet->payload_packet_len); #endif } } } else if(extension_id == 10 /* supported groups */) { u_int16_t s_offset = offset+extension_offset + 2; #ifdef DEBUG_TLS printf("Client TLS [EllipticCurveGroups: len=%u]\n", extension_len); #endif if((s_offset+extension_len-2) <= total_len) { for(i=0; i<extension_len-2;) { u_int16_t s_group = ntohs(*((u_int16_t*)&packet->payload[s_offset+i])); #ifdef DEBUG_TLS printf("Client TLS [EllipticCurve: %u/0x%04X]\n", s_group, s_group); #endif if((s_group == 0) || (packet->payload[s_offset+i] != packet->payload[s_offset+i+1])) { /* Skip GREASE */ if(ja3.client.num_elliptic_curve < MAX_NUM_JA3) ja3.client.elliptic_curve[ja3.client.num_elliptic_curve++] = s_group; else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid num elliptic %u\n", ja3.client.num_elliptic_curve); #endif } } i += 2; } } else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid len %u vs %u\n", (s_offset+extension_len-1), total_len); #endif } } else if(extension_id == 11 /* ec_point_formats groups */) { u_int16_t s_offset = offset+extension_offset + 1; #ifdef DEBUG_TLS printf("Client TLS [EllipticCurveFormat: len=%u]\n", extension_len); #endif if((s_offset+extension_len-1) <= total_len) { for(i=0; i<extension_len-1; i++) { u_int8_t s_group = packet->payload[s_offset+i]; #ifdef DEBUG_TLS printf("Client TLS [EllipticCurveFormat: %u]\n", s_group); #endif if(ja3.client.num_elliptic_curve_point_format < MAX_NUM_JA3) ja3.client.elliptic_curve_point_format[ja3.client.num_elliptic_curve_point_format++] = s_group; else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid num elliptic %u\n", ja3.client.num_elliptic_curve_point_format); #endif } } } else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid len %u vs %u\n", s_offset+extension_len, total_len); #endif } } else if(extension_id == 13 /* signature algorithms */) { u_int16_t s_offset = offset+extension_offset, safari_signature_algorithms = 0, chrome_signature_algorithms = 0; u_int16_t tot_signature_algorithms_len = ntohs(*((u_int16_t*)&packet->payload[s_offset])); #ifdef DEBUG_TLS printf("Client TLS [SIGNATURE_ALGORITHMS: block_len=%u/len=%u]\n", extension_len, tot_signature_algorithms_len); #endif s_offset += 2; tot_signature_algorithms_len = ndpi_min((sizeof(ja3.client.signature_algorithms) / 2) - 1, tot_signature_algorithms_len); #ifdef TLS_HANDLE_SIGNATURE_ALGORITMS flow->protos.tls_quic_stun.tls_quic.num_tls_signature_algorithms = ndpi_min(tot_signature_algorithms_len / 2, MAX_NUM_TLS_SIGNATURE_ALGORITHMS); memcpy(flow->protos.tls_quic_stun.tls_quic.client_signature_algorithms, &packet->payload[s_offset], 2 /* 16 bit */*flow->protos.tls_quic_stun.tls_quic.num_tls_signature_algorithms); #endif for(i=0; i<tot_signature_algorithms_len; i++) { int rc = snprintf(&ja3.client.signature_algorithms[i*2], sizeof(ja3.client.signature_algorithms)-i*2, "%02X", packet->payload[s_offset+i]); if(rc < 0) break; } for(i=0; i<tot_signature_algorithms_len; i+=2) { u_int16_t cipher_id = (u_int16_t)ntohs(*((u_int16_t*)&packet->payload[s_offset+i])); // printf("=>> %04X\n", cipher_id); switch(cipher_id) { case ECDSA_SECP521R1_SHA512: flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_firefox_tls = 1; break; case ECDSA_SECP256R1_SHA256: case ECDSA_SECP384R1_SHA384: case RSA_PKCS1_SHA256: case RSA_PKCS1_SHA384: case RSA_PKCS1_SHA512: case RSA_PSS_RSAE_SHA256: case RSA_PSS_RSAE_SHA384: case RSA_PSS_RSAE_SHA512: chrome_signature_algorithms++, safari_signature_algorithms++; break; } } if(flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_firefox_tls) flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_safari_tls = 0, flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_chrome_tls = 0; if(safari_signature_algorithms != 8) flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_safari_tls = 0; if(chrome_signature_algorithms != 8) flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_chrome_tls = 0; ja3.client.signature_algorithms[i*2] = '\0'; #ifdef DEBUG_TLS printf("Client TLS [SIGNATURE_ALGORITHMS: %s]\n", ja3.client.signature_algorithms); #endif } else if(extension_id == 16 /* application_layer_protocol_negotiation */) { u_int16_t s_offset = offset+extension_offset; u_int16_t tot_alpn_len = ntohs(*((u_int16_t*)&packet->payload[s_offset])); char alpn_str[256]; u_int8_t alpn_str_len = 0, i; #ifdef DEBUG_TLS printf("Client TLS [ALPN: block_len=%u/len=%u]\n", extension_len, tot_alpn_len); #endif s_offset += 2; tot_alpn_len += s_offset; while(s_offset < tot_alpn_len && s_offset < total_len) { u_int8_t alpn_i, alpn_len = packet->payload[s_offset++]; if((s_offset + alpn_len) <= tot_alpn_len && (s_offset + alpn_len) <= total_len) { #ifdef DEBUG_TLS printf("Client TLS [ALPN: %u]\n", alpn_len); #endif if((alpn_str_len+alpn_len+1) < (sizeof(alpn_str)-1)) { if(alpn_str_len > 0) { alpn_str[alpn_str_len] = ','; alpn_str_len++; } for(alpn_i=0; alpn_i<alpn_len; alpn_i++) alpn_str[alpn_str_len+alpn_i] = packet->payload[s_offset+alpn_i]; s_offset += alpn_len, alpn_str_len += alpn_len;; } else break; } else break; } /* while */ alpn_str[alpn_str_len] = '\0'; #ifdef DEBUG_TLS printf("Client TLS [ALPN: %s][len: %u]\n", alpn_str, alpn_str_len); #endif if(flow->protos.tls_quic_stun.tls_quic.alpn == NULL) flow->protos.tls_quic_stun.tls_quic.alpn = ndpi_strdup(alpn_str); snprintf(ja3.client.alpn, sizeof(ja3.client.alpn), "%s", alpn_str); /* Replace , with - as in JA3 */ for(i=0; ja3.client.alpn[i] != '\0'; i++) if(ja3.client.alpn[i] == ',') ja3.client.alpn[i] = '-'; } else if(extension_id == 43 /* supported versions */) { u_int16_t s_offset = offset+extension_offset; u_int8_t version_len = packet->payload[s_offset]; char version_str[256]; u_int8_t version_str_len = 0; version_str[0] = 0; #ifdef DEBUG_TLS printf("Client TLS [TLS version len: %u]\n", version_len); #endif if(version_len == (extension_len-1)) { u_int8_t j; u_int16_t supported_versions_offset = 0; s_offset++; // careful not to overflow and loop forever with u_int8_t for(j=0; j+1<version_len; j += 2) { u_int16_t tls_version = ntohs(*((u_int16_t*)&packet->payload[s_offset+j])); u_int8_t unknown_tls_version; #ifdef DEBUG_TLS printf("Client TLS [TLS version: %s/0x%04X]\n", ndpi_ssl_version2str(flow, tls_version, &unknown_tls_version), tls_version); #endif if((version_str_len+8) < sizeof(version_str)) { int rc = snprintf(&version_str[version_str_len], sizeof(version_str) - version_str_len, "%s%s", (version_str_len > 0) ? "," : "", ndpi_ssl_version2str(flow, tls_version, &unknown_tls_version)); if(rc <= 0) break; else version_str_len += rc; rc = snprintf(&ja3.client.supported_versions[supported_versions_offset], sizeof(ja3.client.supported_versions)-supported_versions_offset, "%s%04X", (j > 0) ? "-" : "", tls_version); if(rc > 0) supported_versions_offset += rc; } } #ifdef DEBUG_TLS printf("Client TLS [SUPPORTED_VERSIONS: %s]\n", ja3.client.supported_versions); #endif if(flow->protos.tls_quic_stun.tls_quic.tls_supported_versions == NULL) flow->protos.tls_quic_stun.tls_quic.tls_supported_versions = ndpi_strdup(version_str); } } else if(extension_id == 65486 /* encrypted server name */) { /* - https://tools.ietf.org/html/draft-ietf-tls-esni-06 - https://blog.cloudflare.com/encrypted-sni/ */ u_int16_t e_offset = offset+extension_offset; u_int16_t initial_offset = e_offset; u_int16_t e_sni_len, cipher_suite = ntohs(*((u_int16_t*)&packet->payload[e_offset])); flow->protos.tls_quic_stun.tls_quic.encrypted_sni.cipher_suite = cipher_suite; e_offset += 2; /* Cipher suite len */ /* Key Share Entry */ e_offset += 2; /* Group */ e_offset += ntohs(*((u_int16_t*)&packet->payload[e_offset])) + 2; /* Lenght */ if((e_offset+4) < packet->payload_packet_len) { /* Record Digest */ e_offset += ntohs(*((u_int16_t*)&packet->payload[e_offset])) + 2; /* Lenght */ if((e_offset+4) < packet->payload_packet_len) { e_sni_len = ntohs(*((u_int16_t*)&packet->payload[e_offset])); e_offset += 2; if((e_offset+e_sni_len-extension_len-initial_offset) >= 0 && e_offset+e_sni_len < packet->payload_packet_len) { #ifdef DEBUG_ENCRYPTED_SNI printf("Client TLS [Encrypted Server Name len: %u]\n", e_sni_len); #endif if(flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni == NULL) { flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni = (char*)ndpi_malloc(e_sni_len*2+1); if(flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni) { u_int16_t i, off; for(i=e_offset, off=0; i<(e_offset+e_sni_len); i++) { int rc = sprintf(&flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni[off], "%02X", packet->payload[i] & 0XFF); if(rc <= 0) { flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni[off] = '\0'; break; } else off += rc; } } } } } } } else if(extension_id == 65445 || /* QUIC transport parameters (drafts version) */ extension_id == 57) { /* QUIC transport parameters (final version) */ u_int16_t s_offset = offset+extension_offset; uint16_t final_offset; int using_var_int = is_version_with_var_int_transport_params(quic_version); if(!using_var_int) { if(s_offset+1 >= total_len) { final_offset = 0; /* Force skipping extension */ } else { u_int16_t seq_len = ntohs(*((u_int16_t*)&packet->payload[s_offset])); s_offset += 2; final_offset = MIN(total_len, s_offset + seq_len); } } else { final_offset = MIN(total_len, s_offset + extension_len); } while(s_offset < final_offset) { u_int64_t param_type, param_len; if(!using_var_int) { if(s_offset+3 >= final_offset) break; param_type = ntohs(*((u_int16_t*)&packet->payload[s_offset])); param_len = ntohs(*((u_int16_t*)&packet->payload[s_offset + 2])); s_offset += 4; } else { if(s_offset >= final_offset || (s_offset + quic_len_buffer_still_required(packet->payload[s_offset])) >= final_offset) break; s_offset += quic_len(&packet->payload[s_offset], &param_type); if(s_offset >= final_offset || (s_offset + quic_len_buffer_still_required(packet->payload[s_offset])) >= final_offset) break; s_offset += quic_len(&packet->payload[s_offset], &param_len); } #ifdef DEBUG_TLS printf("Client TLS [QUIC TP: Param 0x%x Len %d]\n", (int)param_type, (int)param_len); #endif if(s_offset+param_len > final_offset) break; if(param_type==0x3129) { #ifdef DEBUG_TLS printf("UA [%.*s]\n", (int)param_len, &packet->payload[s_offset]); #endif http_process_user_agent(ndpi_struct, flow, &packet->payload[s_offset], param_len); break; } s_offset += param_len; } } extension_offset += extension_len; /* Move to the next extension */ #ifdef DEBUG_TLS printf("Client TLS [extension_offset/len: %u/%u]\n", extension_offset, extension_len); #endif } /* while */ if(!invalid_ja3) { int rc; compute_ja3c: ja3_str_len = snprintf(ja3_str, sizeof(ja3_str), "%u,", ja3.client.tls_handshake_version); for(i=0; i<ja3.client.num_cipher; i++) { rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.client.cipher[i]); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; else break; } rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, ","); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; /* ********** */ for(i=0; i<ja3.client.num_tls_extension; i++) { rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.client.tls_extension[i]); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; else break; } rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, ","); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; /* ********** */ for(i=0; i<ja3.client.num_elliptic_curve; i++) { rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.client.elliptic_curve[i]); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; else break; } rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, ","); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; for(i=0; i<ja3.client.num_elliptic_curve_point_format; i++) { rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.client.elliptic_curve_point_format[i]); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; else break; } if(ndpi_struct->enable_ja3_plus) { rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, ",%s,%s,%s", ja3.client.signature_algorithms, ja3.client.supported_versions, ja3.client.alpn); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; } #ifdef DEBUG_JA3C printf("[JA3+] Client: %s \n", ja3_str); #endif ndpi_MD5Init(&ctx); ndpi_MD5Update(&ctx, (const unsigned char *)ja3_str, strlen(ja3_str)); ndpi_MD5Final(md5_hash, &ctx); for(i=0, j=0; i<16; i++) { rc = snprintf(&flow->protos.tls_quic_stun.tls_quic.ja3_client[j], sizeof(flow->protos.tls_quic_stun.tls_quic.ja3_client)-j, "%02x", md5_hash[i]); if(rc > 0) j += rc; else break; } #ifdef DEBUG_JA3C printf("[JA3] Client: %s \n", flow->protos.tls_quic_stun.tls_quic.ja3_client); #endif if(ndpi_struct->malicious_ja3_automa.ac_automa != NULL) { u_int16_t rc1 = ndpi_match_string(ndpi_struct->malicious_ja3_automa.ac_automa, flow->protos.tls_quic_stun.tls_quic.ja3_client); if(rc1 > 0) ndpi_set_risk(flow, NDPI_MALICIOUS_JA3); } } /* Before returning to the caller we need to make a final check */ if((flow->protos.tls_quic_stun.tls_quic.ssl_version >= 0x0303) /* >= TLSv1.2 */ && (flow->protos.tls_quic_stun.tls_quic.alpn == NULL) /* No ALPN */) { ndpi_set_risk(flow, NDPI_TLS_NOT_CARRYING_HTTPS); } /* Suspicious Domain Fronting: https://github.com/SixGenInc/Noctilucent/blob/master/docs/ */ if(flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni && flow->protos.tls_quic_stun.tls_quic.client_requested_server_name[0] != '\0') { ndpi_set_risk(flow, NDPI_TLS_SUSPICIOUS_ESNI_USAGE); } /* Add check for missing SNI */ if((flow->protos.tls_quic_stun.tls_quic.client_requested_server_name[0] == 0) && (flow->protos.tls_quic_stun.tls_quic.ssl_version >= 0x0302) /* TLSv1.1 */ && (flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni == NULL) /* No ESNI */ ) { /* This is a bit suspicious */ ndpi_set_risk(flow, NDPI_TLS_MISSING_SNI); } return(2 /* Client Certificate */); } else { #ifdef DEBUG_TLS printf("[TLS] Client: too short [%u vs %u]\n", (extensions_len+offset), total_len); #endif } } else if(offset == total_len) { /* TLS does not have extensions etc */ goto compute_ja3c; } } else { #ifdef DEBUG_TLS printf("[JA3] Client: invalid length detected\n"); #endif } } } return(0); /* Not found */ } /* **************************************** */ static void ndpi_search_tls_wrapper(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { struct ndpi_packet_struct *packet = &flow->packet; #ifdef DEBUG_TLS printf("==>> %s() %u [len: %u][version: %u]\n", __FUNCTION__, flow->guessed_host_protocol_id, packet->payload_packet_len, flow->protos.tls_quic_stun.tls_quic.ssl_version); #endif if(packet->udp != NULL) ndpi_search_tls_udp(ndpi_struct, flow); else ndpi_search_tls_tcp(ndpi_struct, flow); } /* **************************************** */ void init_tls_dissector(struct ndpi_detection_module_struct *ndpi_struct, u_int32_t *id, NDPI_PROTOCOL_BITMASK *detection_bitmask) { ndpi_set_bitmask_protocol_detection("TLS", ndpi_struct, detection_bitmask, *id, NDPI_PROTOCOL_TLS, ndpi_search_tls_wrapper, NDPI_SELECTION_BITMASK_PROTOCOL_V4_V6_TCP_WITH_PAYLOAD_WITHOUT_RETRANSMISSION, SAVE_DETECTION_BITMASK_AS_UNKNOWN, ADD_TO_DETECTION_BITMASK); *id += 1; /* *************************************************** */ ndpi_set_bitmask_protocol_detection("DTLS", ndpi_struct, detection_bitmask, *id, NDPI_PROTOCOL_DTLS, ndpi_search_tls_wrapper, NDPI_SELECTION_BITMASK_PROTOCOL_V4_V6_UDP_WITH_PAYLOAD, SAVE_DETECTION_BITMASK_AS_UNKNOWN, ADD_TO_DETECTION_BITMASK); *id += 1; }
/* * tls.c - TLS/TLS/DTLS dissector * * Copyright (C) 2016-21 - ntop.org * * nDPI is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * nDPI is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with nDPI. If not, see <http://www.gnu.org/licenses/>. * */ #include "ndpi_protocol_ids.h" #define NDPI_CURRENT_PROTO NDPI_PROTOCOL_TLS #include "ndpi_api.h" #include "ndpi_md5.h" #include "ndpi_sha1.h" #include "ndpi_encryption.h" extern char *strptime(const char *s, const char *format, struct tm *tm); extern int processClientServerHello(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow, uint32_t quic_version); extern int http_process_user_agent(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow, const u_int8_t *ua_ptr, u_int16_t ua_ptr_len); /* QUIC/GQUIC stuff */ extern int quic_len(const uint8_t *buf, uint64_t *value); extern int quic_len_buffer_still_required(uint8_t value); extern int is_version_with_var_int_transport_params(uint32_t version); // #define DEBUG_TLS_MEMORY 1 // #define DEBUG_TLS 1 // #define DEBUG_TLS_BLOCKS 1 // #define DEBUG_CERTIFICATE_HASH // #define DEBUG_JA3C 1 /* #define DEBUG_FINGERPRINT 1 */ /* #define DEBUG_ENCRYPTED_SNI 1 */ /* **************************************** */ /* https://engineering.salesforce.com/tls-fingerprinting-with-ja3-and-ja3s-247362855967 */ #define JA3_STR_LEN 1024 #define MAX_NUM_JA3 512 #define MAX_JA3_STRLEN 256 union ja3_info { struct { u_int16_t tls_handshake_version; u_int16_t num_cipher, cipher[MAX_NUM_JA3]; u_int16_t num_tls_extension, tls_extension[MAX_NUM_JA3]; u_int16_t num_elliptic_curve, elliptic_curve[MAX_NUM_JA3]; u_int16_t num_elliptic_curve_point_format, elliptic_curve_point_format[MAX_NUM_JA3]; char signature_algorithms[MAX_JA3_STRLEN], supported_versions[MAX_JA3_STRLEN], alpn[MAX_JA3_STRLEN]; } client; struct { u_int16_t tls_handshake_version; u_int16_t num_cipher, cipher[MAX_NUM_JA3]; u_int16_t num_tls_extension, tls_extension[MAX_NUM_JA3]; u_int16_t tls_supported_version; u_int16_t num_elliptic_curve_point_format, elliptic_curve_point_format[MAX_NUM_JA3]; char alpn[MAX_JA3_STRLEN]; } server; /* Used for JA3+ */ }; /* NOTE How to view the certificate fingerprint 1. Using wireshark save the certificate on certificate.bin file as explained in https://security.stackexchange.com/questions/123851/how-can-i-extract-the-certificate-from-this-pcap-file 2. openssl x509 -inform der -in certificate.bin -text > certificate.der 3. openssl x509 -noout -fingerprint -sha1 -inform pem -in certificate.der SHA1 Fingerprint=15:9A:76.... $ shasum -a 1 www.grc.com.bin 159a76..... */ #define NDPI_MAX_TLS_REQUEST_SIZE 10000 /* skype.c */ extern u_int8_t is_skype_flow(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow); /* stun.c */ extern u_int32_t get_stun_lru_key(struct ndpi_flow_struct *flow, u_int8_t rev); static void ndpi_int_tls_add_connection(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow, u_int32_t protocol); /* **************************************** */ static u_int32_t ndpi_tls_refine_master_protocol(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow, u_int32_t protocol) { struct ndpi_packet_struct *packet = &flow->packet; // protocol = NDPI_PROTOCOL_TLS; if(packet->tcp != NULL) { switch(protocol) { case NDPI_PROTOCOL_TLS: { /* In case of TLS there are probably sub-protocols such as IMAPS that can be otherwise detected */ u_int16_t sport = ntohs(packet->tcp->source); u_int16_t dport = ntohs(packet->tcp->dest); if((sport == 465) || (dport == 465) || (sport == 587) || (dport == 587)) protocol = NDPI_PROTOCOL_MAIL_SMTPS; else if((sport == 993) || (dport == 993) || (flow->l4.tcp.mail_imap_starttls) ) protocol = NDPI_PROTOCOL_MAIL_IMAPS; else if((sport == 995) || (dport == 995)) protocol = NDPI_PROTOCOL_MAIL_POPS; } break; } } return(protocol); } /* **************************************** */ void ndpi_search_tls_tcp_memory(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { struct ndpi_packet_struct *packet = &flow->packet; u_int avail_bytes; /* TCP */ #ifdef DEBUG_TLS_MEMORY printf("[TLS Mem] Handling TCP/TLS flow [payload_len: %u][buffer_len: %u][direction: %u]\n", packet->payload_packet_len, flow->l4.tcp.tls.message.buffer_len, packet->packet_direction); #endif if(flow->l4.tcp.tls.message.buffer == NULL) { /* Allocate buffer */ flow->l4.tcp.tls.message.buffer_len = 2048, flow->l4.tcp.tls.message.buffer_used = 0; flow->l4.tcp.tls.message.buffer = (u_int8_t*)ndpi_malloc(flow->l4.tcp.tls.message.buffer_len); if(flow->l4.tcp.tls.message.buffer == NULL) return; #ifdef DEBUG_TLS_MEMORY printf("[TLS Mem] Allocating %u buffer\n", flow->l4.tcp.tls.message.buffer_len); #endif } avail_bytes = flow->l4.tcp.tls.message.buffer_len - flow->l4.tcp.tls.message.buffer_used; if(avail_bytes < packet->payload_packet_len) { u_int new_len = flow->l4.tcp.tls.message.buffer_len + packet->payload_packet_len - avail_bytes + 1; void *newbuf = ndpi_realloc(flow->l4.tcp.tls.message.buffer, flow->l4.tcp.tls.message.buffer_len, new_len); if(!newbuf) return; #ifdef DEBUG_TLS_MEMORY printf("[TLS Mem] Enlarging %u -> %u buffer\n", flow->l4.tcp.tls.message.buffer_len, new_len); #endif flow->l4.tcp.tls.message.buffer = (u_int8_t*)newbuf; flow->l4.tcp.tls.message.buffer_len = new_len; avail_bytes = flow->l4.tcp.tls.message.buffer_len - flow->l4.tcp.tls.message.buffer_used; } if(packet->payload_packet_len > 0 && avail_bytes >= packet->payload_packet_len) { u_int8_t ok = 0; if(flow->l4.tcp.tls.message.next_seq[packet->packet_direction] != 0) { if(ntohl(packet->tcp->seq) == flow->l4.tcp.tls.message.next_seq[packet->packet_direction]) ok = 1; } else ok = 1; if(ok) { memcpy(&flow->l4.tcp.tls.message.buffer[flow->l4.tcp.tls.message.buffer_used], packet->payload, packet->payload_packet_len); flow->l4.tcp.tls.message.buffer_used += packet->payload_packet_len; #ifdef DEBUG_TLS_MEMORY printf("[TLS Mem] Copied data to buffer [%u/%u bytes][direction: %u][tcp_seq: %u][next: %u]\n", flow->l4.tcp.tls.message.buffer_used, flow->l4.tcp.tls.message.buffer_len, packet->packet_direction, ntohl(packet->tcp->seq), ntohl(packet->tcp->seq)+packet->payload_packet_len); #endif flow->l4.tcp.tls.message.next_seq[packet->packet_direction] = ntohl(packet->tcp->seq)+packet->payload_packet_len; } else { #ifdef DEBUG_TLS_MEMORY printf("[TLS Mem] Skipping packet [%u bytes][direction: %u][tcp_seq: %u][expected next: %u]\n", flow->l4.tcp.tls.message.buffer_len, packet->packet_direction, ntohl(packet->tcp->seq), ntohl(packet->tcp->seq)+packet->payload_packet_len); #endif } } } /* **************************************** */ /* Can't call libc functions from kernel space, define some stub instead */ #define ndpi_isalpha(ch) (((ch) >= 'a' && (ch) <= 'z') || ((ch) >= 'A' && (ch) <= 'Z')) #define ndpi_isdigit(ch) ((ch) >= '0' && (ch) <= '9') #define ndpi_isspace(ch) (((ch) >= '\t' && (ch) <= '\r') || ((ch) == ' ')) #define ndpi_isprint(ch) ((ch) >= 0x20 && (ch) <= 0x7e) #define ndpi_ispunct(ch) (((ch) >= '!' && (ch) <= '/') || \ ((ch) >= ':' && (ch) <= '@') || \ ((ch) >= '[' && (ch) <= '`') || \ ((ch) >= '{' && (ch) <= '~')) /* **************************************** */ static void cleanupServerName(char *buffer, int buffer_len) { u_int i; /* Now all lowecase */ for(i=0; i<buffer_len; i++) buffer[i] = tolower(buffer[i]); } /* **************************************** */ /* Return code -1: error (buffer too short) 0: OK but buffer is not human readeable (so something went wrong) 1: OK */ static int extractRDNSequence(struct ndpi_packet_struct *packet, u_int offset, char *buffer, u_int buffer_len, char *rdnSeqBuf, u_int *rdnSeqBuf_offset, u_int rdnSeqBuf_len, const char *label) { u_int8_t str_len = packet->payload[offset+4], is_printable = 1; char *str; u_int len, j; if (*rdnSeqBuf_offset >= rdnSeqBuf_len) { #ifdef DEBUG_TLS printf("[TLS] %s() [buffer capacity reached][%u]\n", __FUNCTION__, rdnSeqBuf_len); #endif return -1; } // packet is truncated... further inspection is not needed if((offset+4+str_len) >= packet->payload_packet_len) return(-1); str = (char*)&packet->payload[offset+5]; len = (u_int)ndpi_min(str_len, buffer_len-1); strncpy(buffer, str, len); buffer[len] = '\0'; // check string is printable for(j = 0; j < len; j++) { if(!ndpi_isprint(buffer[j])) { is_printable = 0; break; } } if(is_printable) { int rc = snprintf(&rdnSeqBuf[*rdnSeqBuf_offset], rdnSeqBuf_len-(*rdnSeqBuf_offset), "%s%s=%s", (*rdnSeqBuf_offset > 0) ? ", " : "", label, buffer); if(rc > 0) (*rdnSeqBuf_offset) += rc; } return(is_printable); } /* **************************************** */ static void checkTLSSubprotocol(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { if(flow->detected_protocol_stack[1] == NDPI_PROTOCOL_UNKNOWN) { /* Subprotocol not yet set */ if(ndpi_struct->tls_cert_cache && flow->packet.iph) { u_int32_t key = flow->packet.iph->daddr + flow->packet.tcp->dest; u_int16_t cached_proto; if(ndpi_lru_find_cache(ndpi_struct->tls_cert_cache, key, &cached_proto, 0 /* Don't remove it as it can be used for other connections */)) { ndpi_protocol ret = { NDPI_PROTOCOL_TLS, cached_proto, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED }; flow->detected_protocol_stack[0] = cached_proto, flow->detected_protocol_stack[1] = NDPI_PROTOCOL_TLS; flow->category = ndpi_get_proto_category(ndpi_struct, ret); ndpi_check_subprotocol_risk(flow, cached_proto); } } } } /* **************************************** */ /* See https://blog.catchpoint.com/2017/05/12/dissecting-tls-using-wireshark/ */ static void processCertificateElements(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow, u_int16_t p_offset, u_int16_t certificate_len) { struct ndpi_packet_struct *packet = &flow->packet; u_int num_found = 0, i; char buffer[64] = { '\0' }, rdnSeqBuf[2048] = { '\0' }; u_int rdn_len = 0; #ifdef DEBUG_TLS printf("[TLS] %s() [offset: %u][certificate_len: %u]\n", __FUNCTION__, p_offset, certificate_len); #endif /* Check after handshake protocol header (5 bytes) and message header (4 bytes) */ for(i = p_offset; i < certificate_len; i++) { /* See https://www.ibm.com/support/knowledgecenter/SSFKSJ_7.5.0/com.ibm.mq.sec.doc/q009860_.htm for X.509 certificate labels */ if((packet->payload[i] == 0x55) && (packet->payload[i+1] == 0x04) && (packet->payload[i+2] == 0x03)) { /* Common Name */ int rc = extractRDNSequence(packet, i, buffer, sizeof(buffer), rdnSeqBuf, &rdn_len, sizeof(rdnSeqBuf), "CN"); if(rc == -1) break; #ifdef DEBUG_TLS printf("[TLS] %s() [%s][%s: %s]\n", __FUNCTION__, (num_found == 0) ? "Subject" : "Issuer", "Common Name", buffer); #endif } else if((packet->payload[i] == 0x55) && (packet->payload[i+1] == 0x04) && (packet->payload[i+2] == 0x06)) { /* Country */ int rc = extractRDNSequence(packet, i, buffer, sizeof(buffer), rdnSeqBuf, &rdn_len, sizeof(rdnSeqBuf), "C"); if(rc == -1) break; #ifdef DEBUG_TLS printf("[TLS] %s() [%s][%s: %s]\n", __FUNCTION__, (num_found == 0) ? "Subject" : "Issuer", "Country", buffer); #endif } else if((packet->payload[i] == 0x55) && (packet->payload[i+1] == 0x04) && (packet->payload[i+2] == 0x07)) { /* Locality */ int rc = extractRDNSequence(packet, i, buffer, sizeof(buffer), rdnSeqBuf, &rdn_len, sizeof(rdnSeqBuf), "L"); if(rc == -1) break; #ifdef DEBUG_TLS printf("[TLS] %s() [%s][%s: %s]\n", __FUNCTION__, (num_found == 0) ? "Subject" : "Issuer", "Locality", buffer); #endif } else if((packet->payload[i] == 0x55) && (packet->payload[i+1] == 0x04) && (packet->payload[i+2] == 0x08)) { /* State or Province */ int rc = extractRDNSequence(packet, i, buffer, sizeof(buffer), rdnSeqBuf, &rdn_len, sizeof(rdnSeqBuf), "ST"); if(rc == -1) break; #ifdef DEBUG_TLS printf("[TLS] %s() [%s][%s: %s]\n", __FUNCTION__, (num_found == 0) ? "Subject" : "Issuer", "State or Province", buffer); #endif } else if((packet->payload[i] == 0x55) && (packet->payload[i+1] == 0x04) && (packet->payload[i+2] == 0x0a)) { /* Organization Name */ int rc = extractRDNSequence(packet, i, buffer, sizeof(buffer), rdnSeqBuf, &rdn_len, sizeof(rdnSeqBuf), "O"); if(rc == -1) break; #ifdef DEBUG_TLS printf("[TLS] %s() [%s][%s: %s]\n", __FUNCTION__, (num_found == 0) ? "Subject" : "Issuer", "Organization Name", buffer); #endif } else if((packet->payload[i] == 0x55) && (packet->payload[i+1] == 0x04) && (packet->payload[i+2] == 0x0b)) { /* Organization Unit */ int rc = extractRDNSequence(packet, i, buffer, sizeof(buffer), rdnSeqBuf, &rdn_len, sizeof(rdnSeqBuf), "OU"); if(rc == -1) break; #ifdef DEBUG_TLS printf("[TLS] %s() [%s][%s: %s]\n", __FUNCTION__, (num_found == 0) ? "Subject" : "Issuer", "Organization Unit", buffer); #endif } else if((packet->payload[i] == 0x30) && (packet->payload[i+1] == 0x1e) && (packet->payload[i+2] == 0x17)) { /* Certificate Validity */ u_int8_t len = packet->payload[i+3]; u_int offset = i+4; if(num_found == 0) { num_found++; #ifdef DEBUG_TLS printf("[TLS] %s() IssuerDN [%s]\n", __FUNCTION__, rdnSeqBuf); #endif if(rdn_len && (flow->protos.tls_quic_stun.tls_quic.issuerDN == NULL)) flow->protos.tls_quic_stun.tls_quic.issuerDN = ndpi_strdup(rdnSeqBuf); rdn_len = 0; /* Reset buffer */ } if((offset+len) < packet->payload_packet_len) { char utcDate[32]; #ifdef DEBUG_TLS u_int j; printf("[CERTIFICATE] notBefore [len: %u][", len); for(j=0; j<len; j++) printf("%c", packet->payload[i+4+j]); printf("]\n"); #endif if(len < (sizeof(utcDate)-1)) { struct tm utc; utc.tm_isdst = -1; /* Not set by strptime */ strncpy(utcDate, (const char*)&packet->payload[i+4], len); utcDate[len] = '\0'; /* 141021000000Z */ if(strptime(utcDate, "%y%m%d%H%M%SZ", &utc) != NULL) { flow->protos.tls_quic_stun.tls_quic.notBefore = timegm(&utc); #ifdef DEBUG_TLS printf("[CERTIFICATE] notBefore %u [%s]\n", flow->protos.tls_quic_stun.tls_quic.notBefore, utcDate); #endif } } offset += len; if((offset+1) < packet->payload_packet_len) { len = packet->payload[offset+1]; offset += 2; if((offset+len) < packet->payload_packet_len) { u_int32_t time_sec = flow->packet.current_time_ms / 1000; #ifdef DEBUG_TLS u_int j; printf("[CERTIFICATE] notAfter [len: %u][", len); for(j=0; j<len; j++) printf("%c", packet->payload[offset+j]); printf("]\n"); #endif if(len < (sizeof(utcDate)-1)) { struct tm utc; utc.tm_isdst = -1; /* Not set by strptime */ strncpy(utcDate, (const char*)&packet->payload[offset], len); utcDate[len] = '\0'; /* 141021000000Z */ if(strptime(utcDate, "%y%m%d%H%M%SZ", &utc) != NULL) { flow->protos.tls_quic_stun.tls_quic.notAfter = timegm(&utc); #ifdef DEBUG_TLS printf("[CERTIFICATE] notAfter %u [%s]\n", flow->protos.tls_quic_stun.tls_quic.notAfter, utcDate); #endif } } if((time_sec < flow->protos.tls_quic_stun.tls_quic.notBefore) || (time_sec > flow->protos.tls_quic_stun.tls_quic.notAfter)) ndpi_set_risk(flow, NDPI_TLS_CERTIFICATE_EXPIRED); /* Certificate expired */ } } } } else if((packet->payload[i] == 0x55) && (packet->payload[i+1] == 0x1d) && (packet->payload[i+2] == 0x11)) { /* Organization OID: 2.5.29.17 (subjectAltName) */ u_int8_t matched_name = 0; #ifdef DEBUG_TLS printf("******* [TLS] Found subjectAltName\n"); #endif i += 3 /* skip the initial patten 55 1D 11 */; i++; /* skip the first type, 0x04 == BIT STRING, and jump to it's length */ if(i < packet->payload_packet_len) { i += (packet->payload[i] & 0x80) ? (packet->payload[i] & 0x7F) : 0; /* skip BIT STRING length */ if(i < packet->payload_packet_len) { i += 2; /* skip the second type, 0x30 == SEQUENCE, and jump to it's length */ if(i < packet->payload_packet_len) { i += (packet->payload[i] & 0x80) ? (packet->payload[i] & 0x7F) : 0; /* skip SEQUENCE length */ i++; while(i < packet->payload_packet_len) { if(packet->payload[i] == 0x82) { if((i < (packet->payload_packet_len - 1)) && ((i + packet->payload[i + 1] + 2) < packet->payload_packet_len)) { u_int8_t len = packet->payload[i + 1]; char dNSName[256]; i += 2; /* The check "len > sizeof(dNSName) - 1" will be always false. If we add it, the compiler is smart enough to detect it and throws a warning */ if((len == 0 /* Looks something went wrong */) || ((i+len) > packet->payload_packet_len)) break; strncpy(dNSName, (const char*)&packet->payload[i], len); dNSName[len] = '\0'; cleanupServerName(dNSName, len); #if DEBUG_TLS printf("[TLS] dNSName %s [%s][len: %u][leftover: %d]\n", dNSName, flow->protos.tls_quic_stun.tls_quic.client_requested_server_name, len, packet->payload_packet_len-i-len); #endif if(matched_name == 0) { if(flow->protos.tls_quic_stun.tls_quic.client_requested_server_name[0] == '\0') matched_name = 1; /* No SNI */ else if (dNSName[0] == '*') { char * label = strstr(flow->protos.tls_quic_stun.tls_quic.client_requested_server_name, &dNSName[1]); if (label != NULL) { char * first_dot = strchr(flow->protos.tls_quic_stun.tls_quic.client_requested_server_name, '.'); if (first_dot == NULL || first_dot >= label) { matched_name = 1; } } } else if(strcmp(flow->protos.tls_quic_stun.tls_quic.client_requested_server_name, dNSName) == 0) matched_name = 1; } if(flow->protos.tls_quic_stun.tls_quic.server_names == NULL) flow->protos.tls_quic_stun.tls_quic.server_names = ndpi_strdup(dNSName), flow->protos.tls_quic_stun.tls_quic.server_names_len = strlen(dNSName); else { u_int16_t dNSName_len = strlen(dNSName); u_int16_t newstr_len = flow->protos.tls_quic_stun.tls_quic.server_names_len + dNSName_len + 1; char *newstr = (char*)ndpi_realloc(flow->protos.tls_quic_stun.tls_quic.server_names, flow->protos.tls_quic_stun.tls_quic.server_names_len+1, newstr_len+1); if(newstr) { flow->protos.tls_quic_stun.tls_quic.server_names = newstr; flow->protos.tls_quic_stun.tls_quic.server_names[flow->protos.tls_quic_stun.tls_quic.server_names_len] = ','; strncpy(&flow->protos.tls_quic_stun.tls_quic.server_names[flow->protos.tls_quic_stun.tls_quic.server_names_len+1], dNSName, dNSName_len+1); flow->protos.tls_quic_stun.tls_quic.server_names[newstr_len] = '\0'; flow->protos.tls_quic_stun.tls_quic.server_names_len = newstr_len; } } if(!flow->l4.tcp.tls.subprotocol_detected) if(ndpi_match_hostname_protocol(ndpi_struct, flow, NDPI_PROTOCOL_TLS, dNSName, len)) flow->l4.tcp.tls.subprotocol_detected = 1; i += len; } else { #if DEBUG_TLS printf("[TLS] Leftover %u bytes", packet->payload_packet_len - i); #endif break; } } else { break; } } /* while */ if(!matched_name) ndpi_set_risk(flow, NDPI_TLS_CERTIFICATE_MISMATCH); /* Certificate mismatch */ } } } } } if(rdn_len && (flow->protos.tls_quic_stun.tls_quic.subjectDN == NULL)) { flow->protos.tls_quic_stun.tls_quic.subjectDN = ndpi_strdup(rdnSeqBuf); if(flow->detected_protocol_stack[1] == NDPI_PROTOCOL_UNKNOWN) { /* No idea what is happening behind the scenes: let's check the certificate */ u_int32_t proto_id; int rc = ndpi_match_string_value(ndpi_struct->tls_cert_subject_automa.ac_automa, rdnSeqBuf, strlen(rdnSeqBuf),&proto_id); if(rc == 0) { /* Match found */ ndpi_protocol ret = { NDPI_PROTOCOL_TLS, proto_id, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED}; flow->detected_protocol_stack[0] = proto_id, flow->detected_protocol_stack[1] = NDPI_PROTOCOL_TLS; flow->category = ndpi_get_proto_category(ndpi_struct, ret); ndpi_check_subprotocol_risk(flow, proto_id); if(ndpi_struct->tls_cert_cache == NULL) ndpi_struct->tls_cert_cache = ndpi_lru_cache_init(1024); if(ndpi_struct->tls_cert_cache && flow->packet.iph) { u_int32_t key = flow->packet.iph->daddr + flow->packet.tcp->dest; ndpi_lru_add_to_cache(ndpi_struct->tls_cert_cache, key, proto_id); } } } } if(flow->protos.tls_quic_stun.tls_quic.subjectDN && flow->protos.tls_quic_stun.tls_quic.issuerDN && (!strcmp(flow->protos.tls_quic_stun.tls_quic.subjectDN, flow->protos.tls_quic_stun.tls_quic.issuerDN))) ndpi_set_risk(flow, NDPI_TLS_SELFSIGNED_CERTIFICATE); #if DEBUG_TLS printf("[TLS] %s() SubjectDN [%s]\n", __FUNCTION__, rdnSeqBuf); #endif } /* **************************************** */ /* See https://blog.catchpoint.com/2017/05/12/dissecting-tls-using-wireshark/ */ int processCertificate(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { struct ndpi_packet_struct *packet = &flow->packet; int is_dtls = packet->udp ? 1 : 0; u_int32_t certificates_length, length = (packet->payload[1] << 16) + (packet->payload[2] << 8) + packet->payload[3]; u_int32_t certificates_offset = 7 + (is_dtls ? 8 : 0); u_int8_t num_certificates_found = 0; SHA1_CTX srv_cert_fingerprint_ctx ; #ifdef DEBUG_TLS printf("[TLS] %s() [payload_packet_len=%u][direction: %u][%02X %02X %02X %02X %02X %02X...]\n", __FUNCTION__, packet->payload_packet_len, packet->packet_direction, packet->payload[0], packet->payload[1], packet->payload[2], packet->payload[3], packet->payload[4], packet->payload[5]); #endif if((packet->payload_packet_len != (length + 4 + (is_dtls ? 8 : 0))) || (packet->payload[1] != 0x0)) { ndpi_set_risk(flow, NDPI_MALFORMED_PACKET); return(-1); /* Invalid length */ } certificates_length = (packet->payload[certificates_offset - 3] << 16) + (packet->payload[certificates_offset - 2] << 8) + packet->payload[certificates_offset - 1]; if((packet->payload[certificates_offset - 3] != 0x0) || ((certificates_length+3) != length)) { ndpi_set_risk(flow, NDPI_MALFORMED_PACKET); return(-2); /* Invalid length */ } /* Now let's process each individual certificates */ while(certificates_offset < certificates_length) { u_int32_t certificate_len = (packet->payload[certificates_offset] << 16) + (packet->payload[certificates_offset+1] << 8) + packet->payload[certificates_offset+2]; /* Invalid lenght */ if((certificate_len == 0) || (packet->payload[certificates_offset] != 0x0) || ((certificates_offset+certificate_len) > (4+certificates_length+(is_dtls ? 8 : 0)))) { #ifdef DEBUG_TLS printf("[TLS] Invalid length [certificate_len: %u][certificates_offset: %u][%u vs %u]\n", certificate_len, certificates_offset, (certificates_offset+certificate_len), certificates_length); #endif break; } certificates_offset += 3; #ifdef DEBUG_TLS printf("[TLS] Processing %u bytes certificate [%02X %02X %02X]\n", certificate_len, packet->payload[certificates_offset], packet->payload[certificates_offset+1], packet->payload[certificates_offset+2]); #endif if(num_certificates_found++ == 0) /* Dissect only the first certificate that is the one we care */ { /* For SHA-1 we take into account only the first certificate and not all of them */ SHA1Init(&srv_cert_fingerprint_ctx); #ifdef DEBUG_CERTIFICATE_HASH { int i; for(i=0;i<certificate_len;i++) printf("%02X ", packet->payload[certificates_offset+i]); printf("\n"); } #endif SHA1Update(&srv_cert_fingerprint_ctx, &packet->payload[certificates_offset], certificate_len); SHA1Final(flow->protos.tls_quic_stun.tls_quic.sha1_certificate_fingerprint, &srv_cert_fingerprint_ctx); flow->l4.tcp.tls.fingerprint_set = 1; uint8_t * sha1 = flow->protos.tls_quic_stun.tls_quic.sha1_certificate_fingerprint; const size_t sha1_siz = sizeof(flow->protos.tls_quic_stun.tls_quic.sha1_certificate_fingerprint); char sha1_str[20 /* sha1_siz */ * 2 + 1]; static const char hexalnum[] = "0123456789ABCDEF"; for (size_t i = 0; i < sha1_siz; ++i) { u_int8_t lower = (sha1[i] & 0x0F); u_int8_t upper = (sha1[i] & 0xF0) >> 4; sha1_str[i*2] = hexalnum[upper]; sha1_str[i*2 + 1] = hexalnum[lower]; } sha1_str[sha1_siz * 2] = '\0'; #ifdef DEBUG_TLS printf("[TLS] SHA-1: %s\n", sha1_str); #endif if (ndpi_struct->malicious_sha1_automa.ac_automa != NULL) { u_int16_t rc1 = ndpi_match_string(ndpi_struct->malicious_sha1_automa.ac_automa, sha1_str); if(rc1 > 0) ndpi_set_risk(flow, NDPI_MALICIOUS_SHA1_CERTIFICATE); } processCertificateElements(ndpi_struct, flow, certificates_offset, certificate_len); } certificates_offset += certificate_len; } if((ndpi_struct->num_tls_blocks_to_follow != 0) && (flow->l4.tcp.tls.num_tls_blocks >= ndpi_struct->num_tls_blocks_to_follow)) { #ifdef DEBUG_TLS_BLOCKS printf("*** [TLS Block] Enough blocks dissected\n"); #endif flow->extra_packets_func = NULL; /* We're good now */ } return(1); } /* **************************************** */ static int processTLSBlock(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { struct ndpi_packet_struct *packet = &flow->packet; int ret; switch(packet->payload[0] /* block type */) { case 0x01: /* Client Hello */ case 0x02: /* Server Hello */ processClientServerHello(ndpi_struct, flow, 0); flow->l4.tcp.tls.hello_processed = 1; ndpi_int_tls_add_connection(ndpi_struct, flow, NDPI_PROTOCOL_TLS); #ifdef DEBUG_TLS printf("*** TLS [version: %02X][%s Hello]\n", flow->protos.tls_quic_stun.tls_quic.ssl_version, (packet->payload[0] == 0x01) ? "Client" : "Server"); #endif if((flow->protos.tls_quic_stun.tls_quic.ssl_version >= 0x0304 /* TLS 1.3 */) && (packet->payload[0] == 0x02 /* Server Hello */)) { flow->l4.tcp.tls.certificate_processed = 1; /* No Certificate with TLS 1.3+ */ } checkTLSSubprotocol(ndpi_struct, flow); break; case 0x0b: /* Certificate */ /* Important: populate the tls union fields only after * ndpi_int_tls_add_connection has been called */ if(flow->l4.tcp.tls.hello_processed) { ret = processCertificate(ndpi_struct, flow); if (ret != 1) { #ifdef DEBUG_TLS printf("[TLS] Error processing certificate: %d\n", ret); #endif } flow->l4.tcp.tls.certificate_processed = 1; } break; default: return(-1); } return(0); } /* **************************************** */ static void ndpi_looks_like_tls(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { // ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_TLS, NDPI_PROTOCOL_UNKNOWN); if(flow->guessed_protocol_id == NDPI_PROTOCOL_UNKNOWN) flow->guessed_protocol_id = NDPI_PROTOCOL_TLS; } /* **************************************** */ static int ndpi_search_tls_tcp(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { struct ndpi_packet_struct *packet = &flow->packet; u_int8_t something_went_wrong = 0; #ifdef DEBUG_TLS_MEMORY printf("[TLS Mem] ndpi_search_tls_tcp() Processing new packet [payload_packet_len: %u]\n", packet->payload_packet_len); #endif if(packet->payload_packet_len == 0) return(1); /* Keep working */ ndpi_search_tls_tcp_memory(ndpi_struct, flow); while(!something_went_wrong) { u_int16_t len, p_len; const u_int8_t *p; u_int8_t content_type; if(flow->l4.tcp.tls.message.buffer_used < 5) return(1); /* Keep working */ len = (flow->l4.tcp.tls.message.buffer[3] << 8) + flow->l4.tcp.tls.message.buffer[4] + 5; if(len > flow->l4.tcp.tls.message.buffer_used) { #ifdef DEBUG_TLS_MEMORY printf("[TLS Mem] Not enough TLS data [%u < %u][%02X %02X %02X %02X %02X]\n", len, flow->l4.tcp.tls.message.buffer_used, flow->l4.tcp.tls.message.buffer[0], flow->l4.tcp.tls.message.buffer[1], flow->l4.tcp.tls.message.buffer[2], flow->l4.tcp.tls.message.buffer[3], flow->l4.tcp.tls.message.buffer[4]); #endif break; } if(len == 0) { something_went_wrong = 1; break; } #ifdef DEBUG_TLS_MEMORY printf("[TLS Mem] Processing %u bytes message\n", len); #endif content_type = flow->l4.tcp.tls.message.buffer[0]; /* Overwriting packet payload */ p = packet->payload; p_len = packet->payload_packet_len; /* Backup */ if(content_type == 0x14 /* Change Cipher Spec */) { if(ndpi_struct->skip_tls_blocks_until_change_cipher) { /* Ignore Application Data up until change cipher so in this case we reset the number of observed TLS blocks */ flow->l4.tcp.tls.num_tls_blocks = 0; } } if((len > 9) && (content_type != 0x17 /* Application Data */) && (!flow->l4.tcp.tls.certificate_processed)) { /* Split the element in blocks */ u_int16_t processed = 5; while((processed+4) <= len) { const u_int8_t *block = (const u_int8_t *)&flow->l4.tcp.tls.message.buffer[processed]; u_int32_t block_len = (block[1] << 16) + (block[2] << 8) + block[3]; if(/* (block_len == 0) || */ /* Note blocks can have zero lenght */ (block_len > len) || ((block[1] != 0x0))) { something_went_wrong = 1; break; } packet->payload = block; packet->payload_packet_len = ndpi_min(block_len+4, flow->l4.tcp.tls.message.buffer_used); if((processed+packet->payload_packet_len) > len) { something_went_wrong = 1; break; } processTLSBlock(ndpi_struct, flow); ndpi_looks_like_tls(ndpi_struct, flow); processed += packet->payload_packet_len; } } else { /* Process element as a whole */ if(content_type == 0x17 /* Application Data */) { ndpi_looks_like_tls(ndpi_struct, flow); if(flow->l4.tcp.tls.certificate_processed) { if(flow->l4.tcp.tls.num_tls_blocks < ndpi_struct->num_tls_blocks_to_follow) flow->l4.tcp.tls.tls_application_blocks_len[flow->l4.tcp.tls.num_tls_blocks++] = (packet->packet_direction == 0) ? (len-5) : -(len-5); #ifdef DEBUG_TLS_BLOCKS printf("*** [TLS Block] [len: %u][num_tls_blocks: %u/%u]\n", len-5, flow->l4.tcp.tls.num_tls_blocks, ndpi_struct->num_tls_blocks_to_follow); #endif } } } packet->payload = p; packet->payload_packet_len = p_len; /* Restore */ flow->l4.tcp.tls.message.buffer_used -= len; if(flow->l4.tcp.tls.message.buffer_used > 0) memmove(flow->l4.tcp.tls.message.buffer, &flow->l4.tcp.tls.message.buffer[len], flow->l4.tcp.tls.message.buffer_used); else break; #ifdef DEBUG_TLS_MEMORY printf("[TLS Mem] Left memory buffer %u bytes\n", flow->l4.tcp.tls.message.buffer_used); #endif } if(something_went_wrong || ((ndpi_struct->num_tls_blocks_to_follow > 0) && (flow->l4.tcp.tls.num_tls_blocks == ndpi_struct->num_tls_blocks_to_follow)) ) { #ifdef DEBUG_TLS_BLOCKS printf("*** [TLS Block] No more blocks\n"); #endif flow->check_extra_packets = 0; flow->extra_packets_func = NULL; return(0); /* That's all */ } else return(1); } /* **************************************** */ static int ndpi_search_tls_udp(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { struct ndpi_packet_struct *packet = &flow->packet; u_int32_t handshake_len; u_int16_t p_len, processed; const u_int8_t *p; u_int8_t no_dtls = 0, change_cipher_found = 0; #ifdef DEBUG_TLS printf("[TLS] %s()\n", __FUNCTION__); #endif /* Overwriting packet payload */ p = packet->payload, p_len = packet->payload_packet_len; /* Backup */ /* Split the element in blocks */ processed = 0; while(processed + 13 < p_len) { u_int32_t block_len; const u_int8_t *block = (const u_int8_t *)&p[processed]; if((block[0] != 0x16 && block[0] != 0x14) || /* Handshake, change-cipher-spec */ (block[1] != 0xfe) || /* We ignore old DTLS versions */ ((block[2] != 0xff) && (block[2] != 0xfd))) { #ifdef DEBUG_TLS printf("[TLS] DTLS invalid block 0x%x or old version 0x%x-0x%x-0x%x\n", block[0], block[1], block[2], block[3]); #endif no_dtls = 1; break; } block_len = ntohs(*((u_int16_t*)&block[11])); #ifdef DEBUG_TLS printf("[TLS] DTLS block len: %d\n", block_len); #endif if (block_len == 0 || (processed + block_len + 12 >= p_len)) { #ifdef DEBUG_TLS printf("[TLS] DTLS invalid block len %d (processed %d, p_len %d)\n", block_len, processed, p_len); #endif no_dtls = 1; break; } /* We process only handshake msgs */ if(block[0] == 0x16) { if (processed + block_len + 13 > p_len) { #ifdef DEBUG_TLS printf("[TLS] DTLS invalid len %d %d %d\n", processed, block_len, p_len); #endif no_dtls = 1; break; } /* TODO: handle (certificate) fragments */ handshake_len = (block[14] << 16) + (block[15] << 8) + block[16]; if((handshake_len + 12) != block_len) { #ifdef DEBUG_TLS printf("[TLS] DTLS invalid handshake_len %d, %d)\n", handshake_len, block_len); #endif no_dtls = 1; break; } packet->payload = &block[13]; packet->payload_packet_len = block_len; processTLSBlock(ndpi_struct, flow); } else { /* Change-cipher-spec: any subsequent block might be encrypted */ #ifdef DEBUG_TLS printf("[TLS] Change-cipher-spec\n"); #endif change_cipher_found = 1; processed += block_len + 13; break; } processed += block_len + 13; } if(processed != p_len) { #ifdef DEBUG_TLS printf("[TLS] DTLS invalid processed len %d/%d (%d)\n", processed, p_len, change_cipher_found); #endif if(!change_cipher_found) no_dtls = 1; } packet->payload = p; packet->payload_packet_len = p_len; /* Restore */ if(no_dtls || change_cipher_found) { NDPI_EXCLUDE_PROTO(ndpi_struct, flow); return(0); /* That's all */ } else { return(1); /* Keep working */ } } /* **************************************** */ static void tlsInitExtraPacketProcessing(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { flow->check_extra_packets = 1; /* At most 12 packets should almost always be enough to find the server certificate if it's there */ flow->max_extra_packets_to_check = 12 + (ndpi_struct->num_tls_blocks_to_follow*4); flow->extra_packets_func = (flow->packet.udp != NULL) ? ndpi_search_tls_udp : ndpi_search_tls_tcp; } /* **************************************** */ static void tlsCheckUncommonALPN(struct ndpi_flow_struct *flow) { /* see: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml */ static char const * const common_alpns[] = { "http/0.9", "http/1.0", "http/1.1", "spdy/1", "spdy/2", "spdy/3", "spdy/3.1", "stun.turn", "stun.nat-discovery", "h2", "h2c", "h2-16", "h2-15", "h2-14", "webrtc", "c-webrtc", "ftp", "imap", "pop3", "managesieve", "coap", "xmpp-client", "xmpp-server", "acme-tls/1", "mqtt", "dot", "ntske/1", "sunrpc", "h3", "smb", "irc", /* QUIC ALPNs */ "h3-T051", "h3-T050", "h3-32", "h3-30", "h3-29", "h3-28", "h3-27", "h3-24", "h3-22", "hq-30", "hq-29", "hq-28", "hq-27", "h3-fb-05", "h1q-fb", "doq-i00" }; /* * If the ALPN list increases in size, iterating over all items for every incoming ALPN may * have a performance impact. A hash map could solve this issue. */ char * alpn_start = flow->protos.tls_quic_stun.tls_quic.alpn; char * comma_or_nul = alpn_start; do { comma_or_nul = strchr(comma_or_nul, ','); if (comma_or_nul == NULL) { comma_or_nul = alpn_start + strlen(alpn_start); } int alpn_found = 0; int alpn_len = comma_or_nul - alpn_start; char const * const alpn = alpn_start; for (size_t i = 0; i < sizeof(common_alpns)/sizeof(common_alpns[0]); ++i) { if (strlen(common_alpns[i]) == alpn_len && strncmp(alpn, common_alpns[i], alpn_len) == 0) { alpn_found = 1; break; } } if (alpn_found == 0) { #ifdef DEBUG_TLS printf("TLS uncommon ALPN found: %.*s\n", alpn_len, alpn); #endif ndpi_set_risk(flow, NDPI_TLS_UNCOMMON_ALPN); break; } alpn_start = comma_or_nul + 1; } while (*(comma_or_nul++) != '\0'); } /* **************************************** */ static void ndpi_int_tls_add_connection(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow, u_int32_t protocol) { #if DEBUG_TLS printf("[TLS] %s()\n", __FUNCTION__); #endif if((flow->packet.udp != NULL) && (protocol == NDPI_PROTOCOL_TLS)) protocol = NDPI_PROTOCOL_DTLS; if((flow->detected_protocol_stack[0] == protocol) || (flow->detected_protocol_stack[1] == protocol)) { if(!flow->check_extra_packets) tlsInitExtraPacketProcessing(ndpi_struct, flow); return; } if(protocol != NDPI_PROTOCOL_TLS) ; else protocol = ndpi_tls_refine_master_protocol(ndpi_struct, flow, protocol); ndpi_set_detected_protocol(ndpi_struct, flow, protocol, protocol); tlsInitExtraPacketProcessing(ndpi_struct, flow); } /* **************************************** */ int processClientServerHello(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow, uint32_t quic_version) { struct ndpi_packet_struct *packet = &flow->packet; union ja3_info ja3; u_int8_t invalid_ja3 = 0; u_int16_t tls_version, ja3_str_len; char ja3_str[JA3_STR_LEN]; ndpi_MD5_CTX ctx; u_char md5_hash[16]; int i; u_int16_t total_len; u_int8_t handshake_type; char buffer[64] = { '\0' }; int is_quic = (quic_version != 0); int is_dtls = packet->udp && (!is_quic); #ifdef DEBUG_TLS printf("TLS %s() called\n", __FUNCTION__); #endif memset(&ja3, 0, sizeof(ja3)); handshake_type = packet->payload[0]; total_len = (packet->payload[1] << 16) + (packet->payload[2] << 8) + packet->payload[3]; if((total_len > packet->payload_packet_len) || (packet->payload[1] != 0x0)) return(0); /* Not found */ total_len = packet->payload_packet_len; /* At least "magic" 3 bytes, null for string end, otherwise no need to waste cpu cycles */ if(total_len > 4) { u_int16_t base_offset = (!is_dtls) ? 38 : 46; u_int16_t version_offset = (!is_dtls) ? 4 : 12; u_int16_t offset = (!is_dtls) ? 38 : 46, extension_len, j; u_int8_t session_id_len = 0; if((base_offset >= total_len) || (version_offset + 1) >= total_len) return 0; /* Not found */ session_id_len = packet->payload[base_offset]; #ifdef DEBUG_TLS printf("TLS [len: %u][handshake_type: %02X]\n", packet->payload_packet_len, handshake_type); #endif tls_version = ntohs(*((u_int16_t*)&packet->payload[version_offset])); if(handshake_type == 0x02 /* Server Hello */) { int i, rc; ja3.server.tls_handshake_version = tls_version; #ifdef DEBUG_TLS printf("TLS Server Hello [version: 0x%04X]\n", tls_version); #endif /* The server hello decides about the TLS version of this flow https://networkengineering.stackexchange.com/questions/55752/why-does-wireshark-show-version-tls-1-2-here-instead-of-tls-1-3 */ if(packet->udp) offset += session_id_len + 1; else { if(tls_version < 0x7F15 /* TLS 1.3 lacks of session id */) offset += session_id_len+1; } if((offset+3) > packet->payload_packet_len) return(0); /* Not found */ ja3.server.num_cipher = 1, ja3.server.cipher[0] = ntohs(*((u_int16_t*)&packet->payload[offset])); if((flow->protos.tls_quic_stun.tls_quic.server_unsafe_cipher = ndpi_is_safe_ssl_cipher(ja3.server.cipher[0])) == 1) ndpi_set_risk(flow, NDPI_TLS_WEAK_CIPHER); flow->protos.tls_quic_stun.tls_quic.server_cipher = ja3.server.cipher[0]; #ifdef DEBUG_TLS printf("TLS [server][session_id_len: %u][cipher: %04X]\n", session_id_len, ja3.server.cipher[0]); #endif offset += 2 + 1; if((offset + 1) < packet->payload_packet_len) /* +1 because we are goint to read 2 bytes */ extension_len = ntohs(*((u_int16_t*)&packet->payload[offset])); else extension_len = 0; #ifdef DEBUG_TLS printf("TLS [server][extension_len: %u]\n", extension_len); #endif offset += 2; for(i=0; i<extension_len; ) { u_int16_t extension_id, extension_len; if((offset+4) > packet->payload_packet_len) break; extension_id = ntohs(*((u_int16_t*)&packet->payload[offset])); extension_len = ntohs(*((u_int16_t*)&packet->payload[offset+2])); if(ja3.server.num_tls_extension < MAX_NUM_JA3) ja3.server.tls_extension[ja3.server.num_tls_extension++] = extension_id; #ifdef DEBUG_TLS printf("TLS [server][extension_id: %u/0x%04X][len: %u]\n", extension_id, extension_id, extension_len); #endif if(extension_id == 43 /* supported versions */) { if(extension_len >= 2) { u_int16_t tls_version = ntohs(*((u_int16_t*)&packet->payload[offset+4])); #ifdef DEBUG_TLS printf("TLS [server] [TLS version: 0x%04X]\n", tls_version); #endif flow->protos.tls_quic_stun.tls_quic.ssl_version = ja3.server.tls_supported_version = tls_version; } } else if(extension_id == 16 /* application_layer_protocol_negotiation (ALPN) */) { u_int16_t s_offset = offset+4; u_int16_t tot_alpn_len = ntohs(*((u_int16_t*)&packet->payload[s_offset])); char alpn_str[256]; u_int8_t alpn_str_len = 0, i; #ifdef DEBUG_TLS printf("Server TLS [ALPN: block_len=%u/len=%u]\n", extension_len, tot_alpn_len); #endif s_offset += 2; tot_alpn_len += s_offset; while(s_offset < tot_alpn_len && s_offset < total_len) { u_int8_t alpn_i, alpn_len = packet->payload[s_offset++]; if((s_offset + alpn_len) <= tot_alpn_len) { #ifdef DEBUG_TLS printf("Server TLS [ALPN: %u]\n", alpn_len); #endif if((alpn_str_len+alpn_len+1) < (sizeof(alpn_str)-1)) { if(alpn_str_len > 0) { alpn_str[alpn_str_len] = ','; alpn_str_len++; } for(alpn_i=0; alpn_i<alpn_len; alpn_i++) { alpn_str[alpn_str_len+alpn_i] = packet->payload[s_offset+alpn_i]; } s_offset += alpn_len, alpn_str_len += alpn_len;; } else { ndpi_set_risk(flow, NDPI_TLS_UNCOMMON_ALPN); break; } } else { ndpi_set_risk(flow, NDPI_TLS_UNCOMMON_ALPN); break; } } /* while */ alpn_str[alpn_str_len] = '\0'; #ifdef DEBUG_TLS printf("Server TLS [ALPN: %s][len: %u]\n", alpn_str, alpn_str_len); #endif if(flow->protos.tls_quic_stun.tls_quic.alpn == NULL) flow->protos.tls_quic_stun.tls_quic.alpn = ndpi_strdup(alpn_str); if(flow->protos.tls_quic_stun.tls_quic.alpn != NULL) tlsCheckUncommonALPN(flow); snprintf(ja3.server.alpn, sizeof(ja3.server.alpn), "%s", alpn_str); /* Replace , with - as in JA3 */ for(i=0; ja3.server.alpn[i] != '\0'; i++) if(ja3.server.alpn[i] == ',') ja3.server.alpn[i] = '-'; } else if(extension_id == 11 /* ec_point_formats groups */) { u_int16_t s_offset = offset+4 + 1; #ifdef DEBUG_TLS printf("Server TLS [EllipticCurveFormat: len=%u]\n", extension_len); #endif if((s_offset+extension_len-1) <= total_len) { for(i=0; i<extension_len-1; i++) { u_int8_t s_group = packet->payload[s_offset+i]; #ifdef DEBUG_TLS printf("Server TLS [EllipticCurveFormat: %u]\n", s_group); #endif if(ja3.server.num_elliptic_curve_point_format < MAX_NUM_JA3) ja3.server.elliptic_curve_point_format[ja3.server.num_elliptic_curve_point_format++] = s_group; else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Server TLS Invalid num elliptic %u\n", ja3.server.num_elliptic_curve_point_format); #endif } } } else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Server TLS Invalid len %u vs %u\n", s_offset+extension_len, total_len); #endif } } i += 4 + extension_len, offset += 4 + extension_len; } /* for */ ja3_str_len = snprintf(ja3_str, JA3_STR_LEN, "%u,", ja3.server.tls_handshake_version); for(i=0; (i<ja3.server.num_cipher) && (JA3_STR_LEN > ja3_str_len); i++) { rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.server.cipher[i]); if(rc <= 0) break; else ja3_str_len += rc; } if(JA3_STR_LEN > ja3_str_len) { rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, ","); if(rc > 0 && ja3_str_len + rc < JA3_STR_LEN) ja3_str_len += rc; } /* ********** */ for(i=0; (i<ja3.server.num_tls_extension) && (JA3_STR_LEN > ja3_str_len); i++) { int rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.server.tls_extension[i]); if(rc <= 0) break; else ja3_str_len += rc; } if(ndpi_struct->enable_ja3_plus) { for(i=0; (i<ja3.server.num_elliptic_curve_point_format) && (JA3_STR_LEN > ja3_str_len); i++) { rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.server.elliptic_curve_point_format[i]); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; else break; } if((ja3.server.alpn[0] != '\0') && (JA3_STR_LEN > ja3_str_len)) { rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, ",%s", ja3.server.alpn); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; } #ifdef DEBUG_TLS printf("[JA3+] Server: %s \n", ja3_str); #endif } else { #ifdef DEBUG_TLS printf("[JA3] Server: %s \n", ja3_str); #endif } ndpi_MD5Init(&ctx); ndpi_MD5Update(&ctx, (const unsigned char *)ja3_str, strlen(ja3_str)); ndpi_MD5Final(md5_hash, &ctx); for(i=0, j=0; i<16; i++) { int rc = snprintf(&flow->protos.tls_quic_stun.tls_quic.ja3_server[j], sizeof(flow->protos.tls_quic_stun.tls_quic.ja3_server)-j, "%02x", md5_hash[i]); if(rc <= 0) break; else j += rc; } #ifdef DEBUG_TLS printf("[JA3] Server: %s \n", flow->protos.tls_quic_stun.tls_quic.ja3_server); #endif } else if(handshake_type == 0x01 /* Client Hello */) { u_int16_t cipher_len, cipher_offset; u_int8_t cookie_len = 0; flow->protos.tls_quic_stun.tls_quic.ssl_version = ja3.client.tls_handshake_version = tls_version; if(flow->protos.tls_quic_stun.tls_quic.ssl_version < 0x0302) /* TLSv1.1 */ ndpi_set_risk(flow, NDPI_TLS_OBSOLETE_VERSION); if((session_id_len+base_offset+3) > packet->payload_packet_len) return(0); /* Not found */ if(!is_dtls) { cipher_len = packet->payload[session_id_len+base_offset+2] + (packet->payload[session_id_len+base_offset+1] << 8); cipher_offset = base_offset + session_id_len + 3; } else { cookie_len = packet->payload[base_offset+session_id_len+1]; #ifdef DEBUG_TLS printf("[JA3] Client: DTLS cookie len %d\n", cookie_len); #endif if((session_id_len+base_offset+cookie_len+4) > packet->payload_packet_len) return(0); /* Not found */ cipher_len = ntohs(*((u_int16_t*)&packet->payload[base_offset+session_id_len+cookie_len+2])); cipher_offset = base_offset + session_id_len + cookie_len + 4; } #ifdef DEBUG_TLS printf("Client TLS [client cipher_len: %u][tls_version: 0x%04X]\n", cipher_len, tls_version); #endif if((cipher_offset+cipher_len) <= total_len) { u_int8_t safari_ciphers = 0, chrome_ciphers = 0; for(i=0; i<cipher_len;) { u_int16_t *id = (u_int16_t*)&packet->payload[cipher_offset+i]; #ifdef DEBUG_TLS printf("Client TLS [cipher suite: %u/0x%04X] [%d/%u]\n", ntohs(*id), ntohs(*id), i, cipher_len); #endif if((*id == 0) || (packet->payload[cipher_offset+i] != packet->payload[cipher_offset+i+1])) { u_int16_t cipher_id = ntohs(*id); /* Skip GREASE [https://tools.ietf.org/id/draft-ietf-tls-grease-01.html] https://engineering.salesforce.com/tls-fingerprinting-with-ja3-and-ja3s-247362855967 */ if(ja3.client.num_cipher < MAX_NUM_JA3) ja3.client.cipher[ja3.client.num_cipher++] = cipher_id; else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid cipher %u\n", ja3.client.num_cipher); #endif } switch(cipher_id) { case TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: case TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: safari_ciphers++; break; case TLS_CIPHER_GREASE_RESERVED_0: case TLS_AES_128_GCM_SHA256: case TLS_AES_256_GCM_SHA384: case TLS_CHACHA20_POLY1305_SHA256: chrome_ciphers++; break; case TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256: case TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: case TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: case TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256: case TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: case TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: case TLS_RSA_WITH_AES_128_CBC_SHA: case TLS_RSA_WITH_AES_256_CBC_SHA: case TLS_RSA_WITH_AES_128_GCM_SHA256: case TLS_RSA_WITH_AES_256_GCM_SHA384: safari_ciphers++, chrome_ciphers++; break; } } i += 2; } /* for */ if(chrome_ciphers == 13) flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_chrome_tls = 1; else if(safari_ciphers == 12) flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_safari_tls = 1; } else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid len %u vs %u\n", (cipher_offset+cipher_len), total_len); #endif } offset = base_offset + session_id_len + cookie_len + cipher_len + 2; offset += (!is_dtls) ? 1 : 2; if(offset < total_len) { u_int16_t compression_len; u_int16_t extensions_len; compression_len = packet->payload[offset]; offset++; #ifdef DEBUG_TLS printf("Client TLS [compression_len: %u]\n", compression_len); #endif // offset += compression_len + 3; offset += compression_len; if(offset+1 < total_len) { extensions_len = ntohs(*((u_int16_t*)&packet->payload[offset])); offset += 2; #ifdef DEBUG_TLS printf("Client TLS [extensions_len: %u]\n", extensions_len); #endif if((extensions_len+offset) <= total_len) { /* Move to the first extension Type is u_int to avoid possible overflow on extension_len addition */ u_int extension_offset = 0; u_int32_t j; while(extension_offset < extensions_len && offset+extension_offset+4 <= total_len) { u_int16_t extension_id, extension_len, extn_off = offset+extension_offset; extension_id = ntohs(*((u_int16_t*)&packet->payload[offset+extension_offset])); extension_offset += 2; extension_len = ntohs(*((u_int16_t*)&packet->payload[offset+extension_offset])); extension_offset += 2; #ifdef DEBUG_TLS printf("Client TLS [extension_id: %u][extension_len: %u]\n", extension_id, extension_len); #endif if((extension_id == 0) || (packet->payload[extn_off] != packet->payload[extn_off+1])) { /* Skip GREASE */ if(ja3.client.num_tls_extension < MAX_NUM_JA3) ja3.client.tls_extension[ja3.client.num_tls_extension++] = extension_id; else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid extensions %u\n", ja3.client.num_tls_extension); #endif } } if(extension_id == 0 /* server name */) { u_int16_t len; #ifdef DEBUG_TLS printf("[TLS] Extensions: found server name\n"); #endif if((offset+extension_offset+4) < packet->payload_packet_len) { len = (packet->payload[offset+extension_offset+3] << 8) + packet->payload[offset+extension_offset+4]; len = (u_int)ndpi_min(len, sizeof(buffer)-1); if((offset+extension_offset+5+len) <= packet->payload_packet_len) { strncpy(buffer, (char*)&packet->payload[offset+extension_offset+5], len); buffer[len] = '\0'; cleanupServerName(buffer, sizeof(buffer)); snprintf(flow->protos.tls_quic_stun.tls_quic.client_requested_server_name, sizeof(flow->protos.tls_quic_stun.tls_quic.client_requested_server_name), "%s", buffer); #ifdef DEBUG_TLS printf("[TLS] SNI: [%s]\n", buffer); #endif if(!is_quic) { if(ndpi_match_hostname_protocol(ndpi_struct, flow, NDPI_PROTOCOL_TLS, buffer, strlen(buffer))) flow->l4.tcp.tls.subprotocol_detected = 1; } else { if(ndpi_match_hostname_protocol(ndpi_struct, flow, NDPI_PROTOCOL_QUIC, buffer, strlen(buffer))) flow->l4.tcp.tls.subprotocol_detected = 1; } if(ndpi_check_dga_name(ndpi_struct, flow, flow->protos.tls_quic_stun.tls_quic.client_requested_server_name, 1)) { char *sni = flow->protos.tls_quic_stun.tls_quic.client_requested_server_name; int len = strlen(sni); #ifdef DEBUG_TLS printf("[TLS] SNI: (DGA) [%s]\n", flow->protos.tls_quic_stun.tls_quic.client_requested_server_name); #endif if((len >= 4) /* Check if it ends in .com or .net */ && ((strcmp(&sni[len-4], ".com") == 0) || (strcmp(&sni[len-4], ".net") == 0)) && (strncmp(sni, "www.", 4) == 0)) /* Not starting with www.... */ ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_TOR, NDPI_PROTOCOL_TLS); } else { #ifdef DEBUG_TLS printf("[TLS] SNI: (NO DGA) [%s]\n", flow->protos.tls_quic_stun.tls_quic.client_requested_server_name); #endif } } else { #ifdef DEBUG_TLS printf("[TLS] Extensions server len too short: %u vs %u\n", offset+extension_offset+5+len, packet->payload_packet_len); #endif } } } else if(extension_id == 10 /* supported groups */) { u_int16_t s_offset = offset+extension_offset + 2; #ifdef DEBUG_TLS printf("Client TLS [EllipticCurveGroups: len=%u]\n", extension_len); #endif if((s_offset+extension_len-2) <= total_len) { for(i=0; i<extension_len-2;) { u_int16_t s_group = ntohs(*((u_int16_t*)&packet->payload[s_offset+i])); #ifdef DEBUG_TLS printf("Client TLS [EllipticCurve: %u/0x%04X]\n", s_group, s_group); #endif if((s_group == 0) || (packet->payload[s_offset+i] != packet->payload[s_offset+i+1])) { /* Skip GREASE */ if(ja3.client.num_elliptic_curve < MAX_NUM_JA3) ja3.client.elliptic_curve[ja3.client.num_elliptic_curve++] = s_group; else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid num elliptic %u\n", ja3.client.num_elliptic_curve); #endif } } i += 2; } } else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid len %u vs %u\n", (s_offset+extension_len-1), total_len); #endif } } else if(extension_id == 11 /* ec_point_formats groups */) { u_int16_t s_offset = offset+extension_offset + 1; #ifdef DEBUG_TLS printf("Client TLS [EllipticCurveFormat: len=%u]\n", extension_len); #endif if((s_offset+extension_len-1) <= total_len) { for(i=0; i<extension_len-1; i++) { u_int8_t s_group = packet->payload[s_offset+i]; #ifdef DEBUG_TLS printf("Client TLS [EllipticCurveFormat: %u]\n", s_group); #endif if(ja3.client.num_elliptic_curve_point_format < MAX_NUM_JA3) ja3.client.elliptic_curve_point_format[ja3.client.num_elliptic_curve_point_format++] = s_group; else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid num elliptic %u\n", ja3.client.num_elliptic_curve_point_format); #endif } } } else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid len %u vs %u\n", s_offset+extension_len, total_len); #endif } } else if(extension_id == 13 /* signature algorithms */) { u_int16_t s_offset = offset+extension_offset, safari_signature_algorithms = 0, chrome_signature_algorithms = 0; u_int16_t tot_signature_algorithms_len = ntohs(*((u_int16_t*)&packet->payload[s_offset])); #ifdef DEBUG_TLS printf("Client TLS [SIGNATURE_ALGORITHMS: block_len=%u/len=%u]\n", extension_len, tot_signature_algorithms_len); #endif s_offset += 2; tot_signature_algorithms_len = ndpi_min((sizeof(ja3.client.signature_algorithms) / 2) - 1, tot_signature_algorithms_len); #ifdef TLS_HANDLE_SIGNATURE_ALGORITMS flow->protos.tls_quic_stun.tls_quic.num_tls_signature_algorithms = ndpi_min(tot_signature_algorithms_len / 2, MAX_NUM_TLS_SIGNATURE_ALGORITHMS); memcpy(flow->protos.tls_quic_stun.tls_quic.client_signature_algorithms, &packet->payload[s_offset], 2 /* 16 bit */*flow->protos.tls_quic_stun.tls_quic.num_tls_signature_algorithms); #endif for(i=0; i<tot_signature_algorithms_len; i++) { int rc = snprintf(&ja3.client.signature_algorithms[i*2], sizeof(ja3.client.signature_algorithms)-i*2, "%02X", packet->payload[s_offset+i]); if(rc < 0) break; } for(i=0; i<tot_signature_algorithms_len; i+=2) { u_int16_t cipher_id = (u_int16_t)ntohs(*((u_int16_t*)&packet->payload[s_offset+i])); // printf("=>> %04X\n", cipher_id); switch(cipher_id) { case ECDSA_SECP521R1_SHA512: flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_firefox_tls = 1; break; case ECDSA_SECP256R1_SHA256: case ECDSA_SECP384R1_SHA384: case RSA_PKCS1_SHA256: case RSA_PKCS1_SHA384: case RSA_PKCS1_SHA512: case RSA_PSS_RSAE_SHA256: case RSA_PSS_RSAE_SHA384: case RSA_PSS_RSAE_SHA512: chrome_signature_algorithms++, safari_signature_algorithms++; break; } } if(flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_firefox_tls) flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_safari_tls = 0, flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_chrome_tls = 0; if(safari_signature_algorithms != 8) flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_safari_tls = 0; if(chrome_signature_algorithms != 8) flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_chrome_tls = 0; ja3.client.signature_algorithms[i*2] = '\0'; #ifdef DEBUG_TLS printf("Client TLS [SIGNATURE_ALGORITHMS: %s]\n", ja3.client.signature_algorithms); #endif } else if(extension_id == 16 /* application_layer_protocol_negotiation */) { u_int16_t s_offset = offset+extension_offset; u_int16_t tot_alpn_len = ntohs(*((u_int16_t*)&packet->payload[s_offset])); char alpn_str[256]; u_int8_t alpn_str_len = 0, i; #ifdef DEBUG_TLS printf("Client TLS [ALPN: block_len=%u/len=%u]\n", extension_len, tot_alpn_len); #endif s_offset += 2; tot_alpn_len += s_offset; while(s_offset < tot_alpn_len && s_offset < total_len) { u_int8_t alpn_i, alpn_len = packet->payload[s_offset++]; if((s_offset + alpn_len) <= tot_alpn_len && (s_offset + alpn_len) <= total_len) { #ifdef DEBUG_TLS printf("Client TLS [ALPN: %u]\n", alpn_len); #endif if((alpn_str_len+alpn_len+1) < (sizeof(alpn_str)-1)) { if(alpn_str_len > 0) { alpn_str[alpn_str_len] = ','; alpn_str_len++; } for(alpn_i=0; alpn_i<alpn_len; alpn_i++) alpn_str[alpn_str_len+alpn_i] = packet->payload[s_offset+alpn_i]; s_offset += alpn_len, alpn_str_len += alpn_len;; } else break; } else break; } /* while */ alpn_str[alpn_str_len] = '\0'; #ifdef DEBUG_TLS printf("Client TLS [ALPN: %s][len: %u]\n", alpn_str, alpn_str_len); #endif if(flow->protos.tls_quic_stun.tls_quic.alpn == NULL) flow->protos.tls_quic_stun.tls_quic.alpn = ndpi_strdup(alpn_str); snprintf(ja3.client.alpn, sizeof(ja3.client.alpn), "%s", alpn_str); /* Replace , with - as in JA3 */ for(i=0; ja3.client.alpn[i] != '\0'; i++) if(ja3.client.alpn[i] == ',') ja3.client.alpn[i] = '-'; } else if(extension_id == 43 /* supported versions */) { u_int16_t s_offset = offset+extension_offset; u_int8_t version_len = packet->payload[s_offset]; char version_str[256]; u_int8_t version_str_len = 0; version_str[0] = 0; #ifdef DEBUG_TLS printf("Client TLS [TLS version len: %u]\n", version_len); #endif if(version_len == (extension_len-1)) { u_int8_t j; u_int16_t supported_versions_offset = 0; s_offset++; // careful not to overflow and loop forever with u_int8_t for(j=0; j+1<version_len; j += 2) { u_int16_t tls_version = ntohs(*((u_int16_t*)&packet->payload[s_offset+j])); u_int8_t unknown_tls_version; #ifdef DEBUG_TLS printf("Client TLS [TLS version: %s/0x%04X]\n", ndpi_ssl_version2str(flow, tls_version, &unknown_tls_version), tls_version); #endif if((version_str_len+8) < sizeof(version_str)) { int rc = snprintf(&version_str[version_str_len], sizeof(version_str) - version_str_len, "%s%s", (version_str_len > 0) ? "," : "", ndpi_ssl_version2str(flow, tls_version, &unknown_tls_version)); if(rc <= 0) break; else version_str_len += rc; rc = snprintf(&ja3.client.supported_versions[supported_versions_offset], sizeof(ja3.client.supported_versions)-supported_versions_offset, "%s%04X", (j > 0) ? "-" : "", tls_version); if(rc > 0) supported_versions_offset += rc; } } #ifdef DEBUG_TLS printf("Client TLS [SUPPORTED_VERSIONS: %s]\n", ja3.client.supported_versions); #endif if(flow->protos.tls_quic_stun.tls_quic.tls_supported_versions == NULL) flow->protos.tls_quic_stun.tls_quic.tls_supported_versions = ndpi_strdup(version_str); } } else if(extension_id == 65486 /* encrypted server name */) { /* - https://tools.ietf.org/html/draft-ietf-tls-esni-06 - https://blog.cloudflare.com/encrypted-sni/ */ u_int16_t e_offset = offset+extension_offset; u_int16_t initial_offset = e_offset; u_int16_t e_sni_len, cipher_suite = ntohs(*((u_int16_t*)&packet->payload[e_offset])); flow->protos.tls_quic_stun.tls_quic.encrypted_sni.cipher_suite = cipher_suite; e_offset += 2; /* Cipher suite len */ /* Key Share Entry */ e_offset += 2; /* Group */ e_offset += ntohs(*((u_int16_t*)&packet->payload[e_offset])) + 2; /* Lenght */ if((e_offset+4) < packet->payload_packet_len) { /* Record Digest */ e_offset += ntohs(*((u_int16_t*)&packet->payload[e_offset])) + 2; /* Lenght */ if((e_offset+4) < packet->payload_packet_len) { e_sni_len = ntohs(*((u_int16_t*)&packet->payload[e_offset])); e_offset += 2; if((e_offset+e_sni_len-extension_len-initial_offset) >= 0 && e_offset+e_sni_len < packet->payload_packet_len) { #ifdef DEBUG_ENCRYPTED_SNI printf("Client TLS [Encrypted Server Name len: %u]\n", e_sni_len); #endif if(flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni == NULL) { flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni = (char*)ndpi_malloc(e_sni_len*2+1); if(flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni) { u_int16_t i, off; for(i=e_offset, off=0; i<(e_offset+e_sni_len); i++) { int rc = sprintf(&flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni[off], "%02X", packet->payload[i] & 0XFF); if(rc <= 0) { flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni[off] = '\0'; break; } else off += rc; } } } } } } } else if(extension_id == 65445 || /* QUIC transport parameters (drafts version) */ extension_id == 57) { /* QUIC transport parameters (final version) */ u_int16_t s_offset = offset+extension_offset; uint16_t final_offset; int using_var_int = is_version_with_var_int_transport_params(quic_version); if(!using_var_int) { if(s_offset+1 >= total_len) { final_offset = 0; /* Force skipping extension */ } else { u_int16_t seq_len = ntohs(*((u_int16_t*)&packet->payload[s_offset])); s_offset += 2; final_offset = MIN(total_len, s_offset + seq_len); } } else { final_offset = MIN(total_len, s_offset + extension_len); } while(s_offset < final_offset) { u_int64_t param_type, param_len; if(!using_var_int) { if(s_offset+3 >= final_offset) break; param_type = ntohs(*((u_int16_t*)&packet->payload[s_offset])); param_len = ntohs(*((u_int16_t*)&packet->payload[s_offset + 2])); s_offset += 4; } else { if(s_offset >= final_offset || (s_offset + quic_len_buffer_still_required(packet->payload[s_offset])) >= final_offset) break; s_offset += quic_len(&packet->payload[s_offset], &param_type); if(s_offset >= final_offset || (s_offset + quic_len_buffer_still_required(packet->payload[s_offset])) >= final_offset) break; s_offset += quic_len(&packet->payload[s_offset], &param_len); } #ifdef DEBUG_TLS printf("Client TLS [QUIC TP: Param 0x%x Len %d]\n", (int)param_type, (int)param_len); #endif if(s_offset+param_len > final_offset) break; if(param_type==0x3129) { #ifdef DEBUG_TLS printf("UA [%.*s]\n", (int)param_len, &packet->payload[s_offset]); #endif http_process_user_agent(ndpi_struct, flow, &packet->payload[s_offset], param_len); break; } s_offset += param_len; } } extension_offset += extension_len; /* Move to the next extension */ #ifdef DEBUG_TLS printf("Client TLS [extension_offset/len: %u/%u]\n", extension_offset, extension_len); #endif } /* while */ if(!invalid_ja3) { int rc; compute_ja3c: ja3_str_len = snprintf(ja3_str, JA3_STR_LEN, "%u,", ja3.client.tls_handshake_version); for(i=0; i<ja3.client.num_cipher; i++) { rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.client.cipher[i]); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; else break; } rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, ","); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; /* ********** */ for(i=0; i<ja3.client.num_tls_extension; i++) { rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.client.tls_extension[i]); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; else break; } rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, ","); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; /* ********** */ for(i=0; i<ja3.client.num_elliptic_curve; i++) { rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.client.elliptic_curve[i]); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; else break; } rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, ","); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; for(i=0; i<ja3.client.num_elliptic_curve_point_format; i++) { rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.client.elliptic_curve_point_format[i]); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; else break; } if(ndpi_struct->enable_ja3_plus) { rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, ",%s,%s,%s", ja3.client.signature_algorithms, ja3.client.supported_versions, ja3.client.alpn); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; } #ifdef DEBUG_JA3C printf("[JA3+] Client: %s \n", ja3_str); #endif ndpi_MD5Init(&ctx); ndpi_MD5Update(&ctx, (const unsigned char *)ja3_str, strlen(ja3_str)); ndpi_MD5Final(md5_hash, &ctx); for(i=0, j=0; i<16; i++) { rc = snprintf(&flow->protos.tls_quic_stun.tls_quic.ja3_client[j], sizeof(flow->protos.tls_quic_stun.tls_quic.ja3_client)-j, "%02x", md5_hash[i]); if(rc > 0) j += rc; else break; } #ifdef DEBUG_JA3C printf("[JA3] Client: %s \n", flow->protos.tls_quic_stun.tls_quic.ja3_client); #endif if(ndpi_struct->malicious_ja3_automa.ac_automa != NULL) { u_int16_t rc1 = ndpi_match_string(ndpi_struct->malicious_ja3_automa.ac_automa, flow->protos.tls_quic_stun.tls_quic.ja3_client); if(rc1 > 0) ndpi_set_risk(flow, NDPI_MALICIOUS_JA3); } } /* Before returning to the caller we need to make a final check */ if((flow->protos.tls_quic_stun.tls_quic.ssl_version >= 0x0303) /* >= TLSv1.2 */ && (flow->protos.tls_quic_stun.tls_quic.alpn == NULL) /* No ALPN */) { ndpi_set_risk(flow, NDPI_TLS_NOT_CARRYING_HTTPS); } /* Suspicious Domain Fronting: https://github.com/SixGenInc/Noctilucent/blob/master/docs/ */ if(flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni && flow->protos.tls_quic_stun.tls_quic.client_requested_server_name[0] != '\0') { ndpi_set_risk(flow, NDPI_TLS_SUSPICIOUS_ESNI_USAGE); } /* Add check for missing SNI */ if((flow->protos.tls_quic_stun.tls_quic.client_requested_server_name[0] == 0) && (flow->protos.tls_quic_stun.tls_quic.ssl_version >= 0x0302) /* TLSv1.1 */ && (flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni == NULL) /* No ESNI */ ) { /* This is a bit suspicious */ ndpi_set_risk(flow, NDPI_TLS_MISSING_SNI); } return(2 /* Client Certificate */); } else { #ifdef DEBUG_TLS printf("[TLS] Client: too short [%u vs %u]\n", (extensions_len+offset), total_len); #endif } } else if(offset == total_len) { /* TLS does not have extensions etc */ goto compute_ja3c; } } else { #ifdef DEBUG_TLS printf("[JA3] Client: invalid length detected\n"); #endif } } } return(0); /* Not found */ } /* **************************************** */ static void ndpi_search_tls_wrapper(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { struct ndpi_packet_struct *packet = &flow->packet; #ifdef DEBUG_TLS printf("==>> %s() %u [len: %u][version: %u]\n", __FUNCTION__, flow->guessed_host_protocol_id, packet->payload_packet_len, flow->protos.tls_quic_stun.tls_quic.ssl_version); #endif if(packet->udp != NULL) ndpi_search_tls_udp(ndpi_struct, flow); else ndpi_search_tls_tcp(ndpi_struct, flow); } /* **************************************** */ void init_tls_dissector(struct ndpi_detection_module_struct *ndpi_struct, u_int32_t *id, NDPI_PROTOCOL_BITMASK *detection_bitmask) { ndpi_set_bitmask_protocol_detection("TLS", ndpi_struct, detection_bitmask, *id, NDPI_PROTOCOL_TLS, ndpi_search_tls_wrapper, NDPI_SELECTION_BITMASK_PROTOCOL_V4_V6_TCP_WITH_PAYLOAD_WITHOUT_RETRANSMISSION, SAVE_DETECTION_BITMASK_AS_UNKNOWN, ADD_TO_DETECTION_BITMASK); *id += 1; /* *************************************************** */ ndpi_set_bitmask_protocol_detection("DTLS", ndpi_struct, detection_bitmask, *id, NDPI_PROTOCOL_DTLS, ndpi_search_tls_wrapper, NDPI_SELECTION_BITMASK_PROTOCOL_V4_V6_UDP_WITH_PAYLOAD, SAVE_DETECTION_BITMASK_AS_UNKNOWN, ADD_TO_DETECTION_BITMASK); *id += 1; }
int processClientServerHello(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow, uint32_t quic_version) { struct ndpi_packet_struct *packet = &flow->packet; union ja3_info ja3; u_int8_t invalid_ja3 = 0; u_int16_t tls_version, ja3_str_len; char ja3_str[JA3_STR_LEN]; ndpi_MD5_CTX ctx; u_char md5_hash[16]; int i; u_int16_t total_len; u_int8_t handshake_type; char buffer[64] = { '\0' }; int is_quic = (quic_version != 0); int is_dtls = packet->udp && (!is_quic); #ifdef DEBUG_TLS printf("TLS %s() called\n", __FUNCTION__); #endif memset(&ja3, 0, sizeof(ja3)); handshake_type = packet->payload[0]; total_len = (packet->payload[1] << 16) + (packet->payload[2] << 8) + packet->payload[3]; if((total_len > packet->payload_packet_len) || (packet->payload[1] != 0x0)) return(0); /* Not found */ total_len = packet->payload_packet_len; /* At least "magic" 3 bytes, null for string end, otherwise no need to waste cpu cycles */ if(total_len > 4) { u_int16_t base_offset = (!is_dtls) ? 38 : 46; u_int16_t version_offset = (!is_dtls) ? 4 : 12; u_int16_t offset = (!is_dtls) ? 38 : 46, extension_len, j; u_int8_t session_id_len = 0; if((base_offset >= total_len) || (version_offset + 1) >= total_len) return 0; /* Not found */ session_id_len = packet->payload[base_offset]; #ifdef DEBUG_TLS printf("TLS [len: %u][handshake_type: %02X]\n", packet->payload_packet_len, handshake_type); #endif tls_version = ntohs(*((u_int16_t*)&packet->payload[version_offset])); if(handshake_type == 0x02 /* Server Hello */) { int i, rc; ja3.server.tls_handshake_version = tls_version; #ifdef DEBUG_TLS printf("TLS Server Hello [version: 0x%04X]\n", tls_version); #endif /* The server hello decides about the TLS version of this flow https://networkengineering.stackexchange.com/questions/55752/why-does-wireshark-show-version-tls-1-2-here-instead-of-tls-1-3 */ if(packet->udp) offset += session_id_len + 1; else { if(tls_version < 0x7F15 /* TLS 1.3 lacks of session id */) offset += session_id_len+1; } if((offset+3) > packet->payload_packet_len) return(0); /* Not found */ ja3.server.num_cipher = 1, ja3.server.cipher[0] = ntohs(*((u_int16_t*)&packet->payload[offset])); if((flow->protos.tls_quic_stun.tls_quic.server_unsafe_cipher = ndpi_is_safe_ssl_cipher(ja3.server.cipher[0])) == 1) ndpi_set_risk(flow, NDPI_TLS_WEAK_CIPHER); flow->protos.tls_quic_stun.tls_quic.server_cipher = ja3.server.cipher[0]; #ifdef DEBUG_TLS printf("TLS [server][session_id_len: %u][cipher: %04X]\n", session_id_len, ja3.server.cipher[0]); #endif offset += 2 + 1; if((offset + 1) < packet->payload_packet_len) /* +1 because we are goint to read 2 bytes */ extension_len = ntohs(*((u_int16_t*)&packet->payload[offset])); else extension_len = 0; #ifdef DEBUG_TLS printf("TLS [server][extension_len: %u]\n", extension_len); #endif offset += 2; for(i=0; i<extension_len; ) { u_int16_t extension_id, extension_len; if((offset+4) > packet->payload_packet_len) break; extension_id = ntohs(*((u_int16_t*)&packet->payload[offset])); extension_len = ntohs(*((u_int16_t*)&packet->payload[offset+2])); if(ja3.server.num_tls_extension < MAX_NUM_JA3) ja3.server.tls_extension[ja3.server.num_tls_extension++] = extension_id; #ifdef DEBUG_TLS printf("TLS [server][extension_id: %u/0x%04X][len: %u]\n", extension_id, extension_id, extension_len); #endif if(extension_id == 43 /* supported versions */) { if(extension_len >= 2) { u_int16_t tls_version = ntohs(*((u_int16_t*)&packet->payload[offset+4])); #ifdef DEBUG_TLS printf("TLS [server] [TLS version: 0x%04X]\n", tls_version); #endif flow->protos.tls_quic_stun.tls_quic.ssl_version = ja3.server.tls_supported_version = tls_version; } } else if(extension_id == 16 /* application_layer_protocol_negotiation (ALPN) */) { u_int16_t s_offset = offset+4; u_int16_t tot_alpn_len = ntohs(*((u_int16_t*)&packet->payload[s_offset])); char alpn_str[256]; u_int8_t alpn_str_len = 0, i; #ifdef DEBUG_TLS printf("Server TLS [ALPN: block_len=%u/len=%u]\n", extension_len, tot_alpn_len); #endif s_offset += 2; tot_alpn_len += s_offset; while(s_offset < tot_alpn_len && s_offset < total_len) { u_int8_t alpn_i, alpn_len = packet->payload[s_offset++]; if((s_offset + alpn_len) <= tot_alpn_len) { #ifdef DEBUG_TLS printf("Server TLS [ALPN: %u]\n", alpn_len); #endif if((alpn_str_len+alpn_len+1) < (sizeof(alpn_str)-1)) { if(alpn_str_len > 0) { alpn_str[alpn_str_len] = ','; alpn_str_len++; } for(alpn_i=0; alpn_i<alpn_len; alpn_i++) { alpn_str[alpn_str_len+alpn_i] = packet->payload[s_offset+alpn_i]; } s_offset += alpn_len, alpn_str_len += alpn_len;; } else { ndpi_set_risk(flow, NDPI_TLS_UNCOMMON_ALPN); break; } } else { ndpi_set_risk(flow, NDPI_TLS_UNCOMMON_ALPN); break; } } /* while */ alpn_str[alpn_str_len] = '\0'; #ifdef DEBUG_TLS printf("Server TLS [ALPN: %s][len: %u]\n", alpn_str, alpn_str_len); #endif if(flow->protos.tls_quic_stun.tls_quic.alpn == NULL) flow->protos.tls_quic_stun.tls_quic.alpn = ndpi_strdup(alpn_str); if(flow->protos.tls_quic_stun.tls_quic.alpn != NULL) tlsCheckUncommonALPN(flow); snprintf(ja3.server.alpn, sizeof(ja3.server.alpn), "%s", alpn_str); /* Replace , with - as in JA3 */ for(i=0; ja3.server.alpn[i] != '\0'; i++) if(ja3.server.alpn[i] == ',') ja3.server.alpn[i] = '-'; } else if(extension_id == 11 /* ec_point_formats groups */) { u_int16_t s_offset = offset+4 + 1; #ifdef DEBUG_TLS printf("Server TLS [EllipticCurveFormat: len=%u]\n", extension_len); #endif if((s_offset+extension_len-1) <= total_len) { for(i=0; i<extension_len-1; i++) { u_int8_t s_group = packet->payload[s_offset+i]; #ifdef DEBUG_TLS printf("Server TLS [EllipticCurveFormat: %u]\n", s_group); #endif if(ja3.server.num_elliptic_curve_point_format < MAX_NUM_JA3) ja3.server.elliptic_curve_point_format[ja3.server.num_elliptic_curve_point_format++] = s_group; else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Server TLS Invalid num elliptic %u\n", ja3.server.num_elliptic_curve_point_format); #endif } } } else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Server TLS Invalid len %u vs %u\n", s_offset+extension_len, total_len); #endif } } i += 4 + extension_len, offset += 4 + extension_len; } /* for */ ja3_str_len = snprintf(ja3_str, sizeof(ja3_str), "%u,", ja3.server.tls_handshake_version); for(i=0; i<ja3.server.num_cipher; i++) { rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.server.cipher[i]); if(rc <= 0) break; else ja3_str_len += rc; } rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, ","); if(rc > 0 && ja3_str_len + rc < JA3_STR_LEN) ja3_str_len += rc; /* ********** */ for(i=0; i<ja3.server.num_tls_extension; i++) { int rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.server.tls_extension[i]); if(rc <= 0) break; else ja3_str_len += rc; } if(ndpi_struct->enable_ja3_plus) { for(i=0; i<ja3.server.num_elliptic_curve_point_format; i++) { rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.server.elliptic_curve_point_format[i]); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; else break; } if(ja3.server.alpn[0] != '\0') { rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, ",%s", ja3.server.alpn); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; } #ifdef DEBUG_TLS printf("[JA3+] Server: %s \n", ja3_str); #endif } else { #ifdef DEBUG_TLS printf("[JA3] Server: %s \n", ja3_str); #endif } ndpi_MD5Init(&ctx); ndpi_MD5Update(&ctx, (const unsigned char *)ja3_str, strlen(ja3_str)); ndpi_MD5Final(md5_hash, &ctx); for(i=0, j=0; i<16; i++) { int rc = snprintf(&flow->protos.tls_quic_stun.tls_quic.ja3_server[j], sizeof(flow->protos.tls_quic_stun.tls_quic.ja3_server)-j, "%02x", md5_hash[i]); if(rc <= 0) break; else j += rc; } #ifdef DEBUG_TLS printf("[JA3] Server: %s \n", flow->protos.tls_quic_stun.tls_quic.ja3_server); #endif } else if(handshake_type == 0x01 /* Client Hello */) { u_int16_t cipher_len, cipher_offset; u_int8_t cookie_len = 0; flow->protos.tls_quic_stun.tls_quic.ssl_version = ja3.client.tls_handshake_version = tls_version; if(flow->protos.tls_quic_stun.tls_quic.ssl_version < 0x0302) /* TLSv1.1 */ ndpi_set_risk(flow, NDPI_TLS_OBSOLETE_VERSION); if((session_id_len+base_offset+3) > packet->payload_packet_len) return(0); /* Not found */ if(!is_dtls) { cipher_len = packet->payload[session_id_len+base_offset+2] + (packet->payload[session_id_len+base_offset+1] << 8); cipher_offset = base_offset + session_id_len + 3; } else { cookie_len = packet->payload[base_offset+session_id_len+1]; #ifdef DEBUG_TLS printf("[JA3] Client: DTLS cookie len %d\n", cookie_len); #endif if((session_id_len+base_offset+cookie_len+4) > packet->payload_packet_len) return(0); /* Not found */ cipher_len = ntohs(*((u_int16_t*)&packet->payload[base_offset+session_id_len+cookie_len+2])); cipher_offset = base_offset + session_id_len + cookie_len + 4; } #ifdef DEBUG_TLS printf("Client TLS [client cipher_len: %u][tls_version: 0x%04X]\n", cipher_len, tls_version); #endif if((cipher_offset+cipher_len) <= total_len) { u_int8_t safari_ciphers = 0, chrome_ciphers = 0; for(i=0; i<cipher_len;) { u_int16_t *id = (u_int16_t*)&packet->payload[cipher_offset+i]; #ifdef DEBUG_TLS printf("Client TLS [cipher suite: %u/0x%04X] [%d/%u]\n", ntohs(*id), ntohs(*id), i, cipher_len); #endif if((*id == 0) || (packet->payload[cipher_offset+i] != packet->payload[cipher_offset+i+1])) { u_int16_t cipher_id = ntohs(*id); /* Skip GREASE [https://tools.ietf.org/id/draft-ietf-tls-grease-01.html] https://engineering.salesforce.com/tls-fingerprinting-with-ja3-and-ja3s-247362855967 */ if(ja3.client.num_cipher < MAX_NUM_JA3) ja3.client.cipher[ja3.client.num_cipher++] = cipher_id; else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid cipher %u\n", ja3.client.num_cipher); #endif } switch(cipher_id) { case TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: case TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: safari_ciphers++; break; case TLS_CIPHER_GREASE_RESERVED_0: case TLS_AES_128_GCM_SHA256: case TLS_AES_256_GCM_SHA384: case TLS_CHACHA20_POLY1305_SHA256: chrome_ciphers++; break; case TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256: case TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: case TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: case TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256: case TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: case TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: case TLS_RSA_WITH_AES_128_CBC_SHA: case TLS_RSA_WITH_AES_256_CBC_SHA: case TLS_RSA_WITH_AES_128_GCM_SHA256: case TLS_RSA_WITH_AES_256_GCM_SHA384: safari_ciphers++, chrome_ciphers++; break; } } i += 2; } /* for */ if(chrome_ciphers == 13) flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_chrome_tls = 1; else if(safari_ciphers == 12) flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_safari_tls = 1; } else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid len %u vs %u\n", (cipher_offset+cipher_len), total_len); #endif } offset = base_offset + session_id_len + cookie_len + cipher_len + 2; offset += (!is_dtls) ? 1 : 2; if(offset < total_len) { u_int16_t compression_len; u_int16_t extensions_len; compression_len = packet->payload[offset]; offset++; #ifdef DEBUG_TLS printf("Client TLS [compression_len: %u]\n", compression_len); #endif // offset += compression_len + 3; offset += compression_len; if(offset+1 < total_len) { extensions_len = ntohs(*((u_int16_t*)&packet->payload[offset])); offset += 2; #ifdef DEBUG_TLS printf("Client TLS [extensions_len: %u]\n", extensions_len); #endif if((extensions_len+offset) <= total_len) { /* Move to the first extension Type is u_int to avoid possible overflow on extension_len addition */ u_int extension_offset = 0; u_int32_t j; while(extension_offset < extensions_len && offset+extension_offset+4 <= total_len) { u_int16_t extension_id, extension_len, extn_off = offset+extension_offset; extension_id = ntohs(*((u_int16_t*)&packet->payload[offset+extension_offset])); extension_offset += 2; extension_len = ntohs(*((u_int16_t*)&packet->payload[offset+extension_offset])); extension_offset += 2; #ifdef DEBUG_TLS printf("Client TLS [extension_id: %u][extension_len: %u]\n", extension_id, extension_len); #endif if((extension_id == 0) || (packet->payload[extn_off] != packet->payload[extn_off+1])) { /* Skip GREASE */ if(ja3.client.num_tls_extension < MAX_NUM_JA3) ja3.client.tls_extension[ja3.client.num_tls_extension++] = extension_id; else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid extensions %u\n", ja3.client.num_tls_extension); #endif } } if(extension_id == 0 /* server name */) { u_int16_t len; #ifdef DEBUG_TLS printf("[TLS] Extensions: found server name\n"); #endif if((offset+extension_offset+4) < packet->payload_packet_len) { len = (packet->payload[offset+extension_offset+3] << 8) + packet->payload[offset+extension_offset+4]; len = (u_int)ndpi_min(len, sizeof(buffer)-1); if((offset+extension_offset+5+len) <= packet->payload_packet_len) { strncpy(buffer, (char*)&packet->payload[offset+extension_offset+5], len); buffer[len] = '\0'; cleanupServerName(buffer, sizeof(buffer)); snprintf(flow->protos.tls_quic_stun.tls_quic.client_requested_server_name, sizeof(flow->protos.tls_quic_stun.tls_quic.client_requested_server_name), "%s", buffer); #ifdef DEBUG_TLS printf("[TLS] SNI: [%s]\n", buffer); #endif if(!is_quic) { if(ndpi_match_hostname_protocol(ndpi_struct, flow, NDPI_PROTOCOL_TLS, buffer, strlen(buffer))) flow->l4.tcp.tls.subprotocol_detected = 1; } else { if(ndpi_match_hostname_protocol(ndpi_struct, flow, NDPI_PROTOCOL_QUIC, buffer, strlen(buffer))) flow->l4.tcp.tls.subprotocol_detected = 1; } if(ndpi_check_dga_name(ndpi_struct, flow, flow->protos.tls_quic_stun.tls_quic.client_requested_server_name, 1)) { char *sni = flow->protos.tls_quic_stun.tls_quic.client_requested_server_name; int len = strlen(sni); #ifdef DEBUG_TLS printf("[TLS] SNI: (DGA) [%s]\n", flow->protos.tls_quic_stun.tls_quic.client_requested_server_name); #endif if((len >= 4) /* Check if it ends in .com or .net */ && ((strcmp(&sni[len-4], ".com") == 0) || (strcmp(&sni[len-4], ".net") == 0)) && (strncmp(sni, "www.", 4) == 0)) /* Not starting with www.... */ ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_TOR, NDPI_PROTOCOL_TLS); } else { #ifdef DEBUG_TLS printf("[TLS] SNI: (NO DGA) [%s]\n", flow->protos.tls_quic_stun.tls_quic.client_requested_server_name); #endif } } else { #ifdef DEBUG_TLS printf("[TLS] Extensions server len too short: %u vs %u\n", offset+extension_offset+5+len, packet->payload_packet_len); #endif } } } else if(extension_id == 10 /* supported groups */) { u_int16_t s_offset = offset+extension_offset + 2; #ifdef DEBUG_TLS printf("Client TLS [EllipticCurveGroups: len=%u]\n", extension_len); #endif if((s_offset+extension_len-2) <= total_len) { for(i=0; i<extension_len-2;) { u_int16_t s_group = ntohs(*((u_int16_t*)&packet->payload[s_offset+i])); #ifdef DEBUG_TLS printf("Client TLS [EllipticCurve: %u/0x%04X]\n", s_group, s_group); #endif if((s_group == 0) || (packet->payload[s_offset+i] != packet->payload[s_offset+i+1])) { /* Skip GREASE */ if(ja3.client.num_elliptic_curve < MAX_NUM_JA3) ja3.client.elliptic_curve[ja3.client.num_elliptic_curve++] = s_group; else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid num elliptic %u\n", ja3.client.num_elliptic_curve); #endif } } i += 2; } } else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid len %u vs %u\n", (s_offset+extension_len-1), total_len); #endif } } else if(extension_id == 11 /* ec_point_formats groups */) { u_int16_t s_offset = offset+extension_offset + 1; #ifdef DEBUG_TLS printf("Client TLS [EllipticCurveFormat: len=%u]\n", extension_len); #endif if((s_offset+extension_len-1) <= total_len) { for(i=0; i<extension_len-1; i++) { u_int8_t s_group = packet->payload[s_offset+i]; #ifdef DEBUG_TLS printf("Client TLS [EllipticCurveFormat: %u]\n", s_group); #endif if(ja3.client.num_elliptic_curve_point_format < MAX_NUM_JA3) ja3.client.elliptic_curve_point_format[ja3.client.num_elliptic_curve_point_format++] = s_group; else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid num elliptic %u\n", ja3.client.num_elliptic_curve_point_format); #endif } } } else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid len %u vs %u\n", s_offset+extension_len, total_len); #endif } } else if(extension_id == 13 /* signature algorithms */) { u_int16_t s_offset = offset+extension_offset, safari_signature_algorithms = 0, chrome_signature_algorithms = 0; u_int16_t tot_signature_algorithms_len = ntohs(*((u_int16_t*)&packet->payload[s_offset])); #ifdef DEBUG_TLS printf("Client TLS [SIGNATURE_ALGORITHMS: block_len=%u/len=%u]\n", extension_len, tot_signature_algorithms_len); #endif s_offset += 2; tot_signature_algorithms_len = ndpi_min((sizeof(ja3.client.signature_algorithms) / 2) - 1, tot_signature_algorithms_len); #ifdef TLS_HANDLE_SIGNATURE_ALGORITMS flow->protos.tls_quic_stun.tls_quic.num_tls_signature_algorithms = ndpi_min(tot_signature_algorithms_len / 2, MAX_NUM_TLS_SIGNATURE_ALGORITHMS); memcpy(flow->protos.tls_quic_stun.tls_quic.client_signature_algorithms, &packet->payload[s_offset], 2 /* 16 bit */*flow->protos.tls_quic_stun.tls_quic.num_tls_signature_algorithms); #endif for(i=0; i<tot_signature_algorithms_len; i++) { int rc = snprintf(&ja3.client.signature_algorithms[i*2], sizeof(ja3.client.signature_algorithms)-i*2, "%02X", packet->payload[s_offset+i]); if(rc < 0) break; } for(i=0; i<tot_signature_algorithms_len; i+=2) { u_int16_t cipher_id = (u_int16_t)ntohs(*((u_int16_t*)&packet->payload[s_offset+i])); // printf("=>> %04X\n", cipher_id); switch(cipher_id) { case ECDSA_SECP521R1_SHA512: flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_firefox_tls = 1; break; case ECDSA_SECP256R1_SHA256: case ECDSA_SECP384R1_SHA384: case RSA_PKCS1_SHA256: case RSA_PKCS1_SHA384: case RSA_PKCS1_SHA512: case RSA_PSS_RSAE_SHA256: case RSA_PSS_RSAE_SHA384: case RSA_PSS_RSAE_SHA512: chrome_signature_algorithms++, safari_signature_algorithms++; break; } } if(flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_firefox_tls) flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_safari_tls = 0, flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_chrome_tls = 0; if(safari_signature_algorithms != 8) flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_safari_tls = 0; if(chrome_signature_algorithms != 8) flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_chrome_tls = 0; ja3.client.signature_algorithms[i*2] = '\0'; #ifdef DEBUG_TLS printf("Client TLS [SIGNATURE_ALGORITHMS: %s]\n", ja3.client.signature_algorithms); #endif } else if(extension_id == 16 /* application_layer_protocol_negotiation */) { u_int16_t s_offset = offset+extension_offset; u_int16_t tot_alpn_len = ntohs(*((u_int16_t*)&packet->payload[s_offset])); char alpn_str[256]; u_int8_t alpn_str_len = 0, i; #ifdef DEBUG_TLS printf("Client TLS [ALPN: block_len=%u/len=%u]\n", extension_len, tot_alpn_len); #endif s_offset += 2; tot_alpn_len += s_offset; while(s_offset < tot_alpn_len && s_offset < total_len) { u_int8_t alpn_i, alpn_len = packet->payload[s_offset++]; if((s_offset + alpn_len) <= tot_alpn_len && (s_offset + alpn_len) <= total_len) { #ifdef DEBUG_TLS printf("Client TLS [ALPN: %u]\n", alpn_len); #endif if((alpn_str_len+alpn_len+1) < (sizeof(alpn_str)-1)) { if(alpn_str_len > 0) { alpn_str[alpn_str_len] = ','; alpn_str_len++; } for(alpn_i=0; alpn_i<alpn_len; alpn_i++) alpn_str[alpn_str_len+alpn_i] = packet->payload[s_offset+alpn_i]; s_offset += alpn_len, alpn_str_len += alpn_len;; } else break; } else break; } /* while */ alpn_str[alpn_str_len] = '\0'; #ifdef DEBUG_TLS printf("Client TLS [ALPN: %s][len: %u]\n", alpn_str, alpn_str_len); #endif if(flow->protos.tls_quic_stun.tls_quic.alpn == NULL) flow->protos.tls_quic_stun.tls_quic.alpn = ndpi_strdup(alpn_str); snprintf(ja3.client.alpn, sizeof(ja3.client.alpn), "%s", alpn_str); /* Replace , with - as in JA3 */ for(i=0; ja3.client.alpn[i] != '\0'; i++) if(ja3.client.alpn[i] == ',') ja3.client.alpn[i] = '-'; } else if(extension_id == 43 /* supported versions */) { u_int16_t s_offset = offset+extension_offset; u_int8_t version_len = packet->payload[s_offset]; char version_str[256]; u_int8_t version_str_len = 0; version_str[0] = 0; #ifdef DEBUG_TLS printf("Client TLS [TLS version len: %u]\n", version_len); #endif if(version_len == (extension_len-1)) { u_int8_t j; u_int16_t supported_versions_offset = 0; s_offset++; // careful not to overflow and loop forever with u_int8_t for(j=0; j+1<version_len; j += 2) { u_int16_t tls_version = ntohs(*((u_int16_t*)&packet->payload[s_offset+j])); u_int8_t unknown_tls_version; #ifdef DEBUG_TLS printf("Client TLS [TLS version: %s/0x%04X]\n", ndpi_ssl_version2str(flow, tls_version, &unknown_tls_version), tls_version); #endif if((version_str_len+8) < sizeof(version_str)) { int rc = snprintf(&version_str[version_str_len], sizeof(version_str) - version_str_len, "%s%s", (version_str_len > 0) ? "," : "", ndpi_ssl_version2str(flow, tls_version, &unknown_tls_version)); if(rc <= 0) break; else version_str_len += rc; rc = snprintf(&ja3.client.supported_versions[supported_versions_offset], sizeof(ja3.client.supported_versions)-supported_versions_offset, "%s%04X", (j > 0) ? "-" : "", tls_version); if(rc > 0) supported_versions_offset += rc; } } #ifdef DEBUG_TLS printf("Client TLS [SUPPORTED_VERSIONS: %s]\n", ja3.client.supported_versions); #endif if(flow->protos.tls_quic_stun.tls_quic.tls_supported_versions == NULL) flow->protos.tls_quic_stun.tls_quic.tls_supported_versions = ndpi_strdup(version_str); } } else if(extension_id == 65486 /* encrypted server name */) { /* - https://tools.ietf.org/html/draft-ietf-tls-esni-06 - https://blog.cloudflare.com/encrypted-sni/ */ u_int16_t e_offset = offset+extension_offset; u_int16_t initial_offset = e_offset; u_int16_t e_sni_len, cipher_suite = ntohs(*((u_int16_t*)&packet->payload[e_offset])); flow->protos.tls_quic_stun.tls_quic.encrypted_sni.cipher_suite = cipher_suite; e_offset += 2; /* Cipher suite len */ /* Key Share Entry */ e_offset += 2; /* Group */ e_offset += ntohs(*((u_int16_t*)&packet->payload[e_offset])) + 2; /* Lenght */ if((e_offset+4) < packet->payload_packet_len) { /* Record Digest */ e_offset += ntohs(*((u_int16_t*)&packet->payload[e_offset])) + 2; /* Lenght */ if((e_offset+4) < packet->payload_packet_len) { e_sni_len = ntohs(*((u_int16_t*)&packet->payload[e_offset])); e_offset += 2; if((e_offset+e_sni_len-extension_len-initial_offset) >= 0 && e_offset+e_sni_len < packet->payload_packet_len) { #ifdef DEBUG_ENCRYPTED_SNI printf("Client TLS [Encrypted Server Name len: %u]\n", e_sni_len); #endif if(flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni == NULL) { flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni = (char*)ndpi_malloc(e_sni_len*2+1); if(flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni) { u_int16_t i, off; for(i=e_offset, off=0; i<(e_offset+e_sni_len); i++) { int rc = sprintf(&flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni[off], "%02X", packet->payload[i] & 0XFF); if(rc <= 0) { flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni[off] = '\0'; break; } else off += rc; } } } } } } } else if(extension_id == 65445 || /* QUIC transport parameters (drafts version) */ extension_id == 57) { /* QUIC transport parameters (final version) */ u_int16_t s_offset = offset+extension_offset; uint16_t final_offset; int using_var_int = is_version_with_var_int_transport_params(quic_version); if(!using_var_int) { if(s_offset+1 >= total_len) { final_offset = 0; /* Force skipping extension */ } else { u_int16_t seq_len = ntohs(*((u_int16_t*)&packet->payload[s_offset])); s_offset += 2; final_offset = MIN(total_len, s_offset + seq_len); } } else { final_offset = MIN(total_len, s_offset + extension_len); } while(s_offset < final_offset) { u_int64_t param_type, param_len; if(!using_var_int) { if(s_offset+3 >= final_offset) break; param_type = ntohs(*((u_int16_t*)&packet->payload[s_offset])); param_len = ntohs(*((u_int16_t*)&packet->payload[s_offset + 2])); s_offset += 4; } else { if(s_offset >= final_offset || (s_offset + quic_len_buffer_still_required(packet->payload[s_offset])) >= final_offset) break; s_offset += quic_len(&packet->payload[s_offset], &param_type); if(s_offset >= final_offset || (s_offset + quic_len_buffer_still_required(packet->payload[s_offset])) >= final_offset) break; s_offset += quic_len(&packet->payload[s_offset], &param_len); } #ifdef DEBUG_TLS printf("Client TLS [QUIC TP: Param 0x%x Len %d]\n", (int)param_type, (int)param_len); #endif if(s_offset+param_len > final_offset) break; if(param_type==0x3129) { #ifdef DEBUG_TLS printf("UA [%.*s]\n", (int)param_len, &packet->payload[s_offset]); #endif http_process_user_agent(ndpi_struct, flow, &packet->payload[s_offset], param_len); break; } s_offset += param_len; } } extension_offset += extension_len; /* Move to the next extension */ #ifdef DEBUG_TLS printf("Client TLS [extension_offset/len: %u/%u]\n", extension_offset, extension_len); #endif } /* while */ if(!invalid_ja3) { int rc; compute_ja3c: ja3_str_len = snprintf(ja3_str, sizeof(ja3_str), "%u,", ja3.client.tls_handshake_version); for(i=0; i<ja3.client.num_cipher; i++) { rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.client.cipher[i]); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; else break; } rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, ","); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; /* ********** */ for(i=0; i<ja3.client.num_tls_extension; i++) { rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.client.tls_extension[i]); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; else break; } rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, ","); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; /* ********** */ for(i=0; i<ja3.client.num_elliptic_curve; i++) { rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.client.elliptic_curve[i]); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; else break; } rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, ","); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; for(i=0; i<ja3.client.num_elliptic_curve_point_format; i++) { rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.client.elliptic_curve_point_format[i]); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; else break; } if(ndpi_struct->enable_ja3_plus) { rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, ",%s,%s,%s", ja3.client.signature_algorithms, ja3.client.supported_versions, ja3.client.alpn); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; } #ifdef DEBUG_JA3C printf("[JA3+] Client: %s \n", ja3_str); #endif ndpi_MD5Init(&ctx); ndpi_MD5Update(&ctx, (const unsigned char *)ja3_str, strlen(ja3_str)); ndpi_MD5Final(md5_hash, &ctx); for(i=0, j=0; i<16; i++) { rc = snprintf(&flow->protos.tls_quic_stun.tls_quic.ja3_client[j], sizeof(flow->protos.tls_quic_stun.tls_quic.ja3_client)-j, "%02x", md5_hash[i]); if(rc > 0) j += rc; else break; } #ifdef DEBUG_JA3C printf("[JA3] Client: %s \n", flow->protos.tls_quic_stun.tls_quic.ja3_client); #endif if(ndpi_struct->malicious_ja3_automa.ac_automa != NULL) { u_int16_t rc1 = ndpi_match_string(ndpi_struct->malicious_ja3_automa.ac_automa, flow->protos.tls_quic_stun.tls_quic.ja3_client); if(rc1 > 0) ndpi_set_risk(flow, NDPI_MALICIOUS_JA3); } } /* Before returning to the caller we need to make a final check */ if((flow->protos.tls_quic_stun.tls_quic.ssl_version >= 0x0303) /* >= TLSv1.2 */ && (flow->protos.tls_quic_stun.tls_quic.alpn == NULL) /* No ALPN */) { ndpi_set_risk(flow, NDPI_TLS_NOT_CARRYING_HTTPS); } /* Suspicious Domain Fronting: https://github.com/SixGenInc/Noctilucent/blob/master/docs/ */ if(flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni && flow->protos.tls_quic_stun.tls_quic.client_requested_server_name[0] != '\0') { ndpi_set_risk(flow, NDPI_TLS_SUSPICIOUS_ESNI_USAGE); } /* Add check for missing SNI */ if((flow->protos.tls_quic_stun.tls_quic.client_requested_server_name[0] == 0) && (flow->protos.tls_quic_stun.tls_quic.ssl_version >= 0x0302) /* TLSv1.1 */ && (flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni == NULL) /* No ESNI */ ) { /* This is a bit suspicious */ ndpi_set_risk(flow, NDPI_TLS_MISSING_SNI); } return(2 /* Client Certificate */); } else { #ifdef DEBUG_TLS printf("[TLS] Client: too short [%u vs %u]\n", (extensions_len+offset), total_len); #endif } } else if(offset == total_len) { /* TLS does not have extensions etc */ goto compute_ja3c; } } else { #ifdef DEBUG_TLS printf("[JA3] Client: invalid length detected\n"); #endif } } } return(0); /* Not found */ }
int processClientServerHello(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow, uint32_t quic_version) { struct ndpi_packet_struct *packet = &flow->packet; union ja3_info ja3; u_int8_t invalid_ja3 = 0; u_int16_t tls_version, ja3_str_len; char ja3_str[JA3_STR_LEN]; ndpi_MD5_CTX ctx; u_char md5_hash[16]; int i; u_int16_t total_len; u_int8_t handshake_type; char buffer[64] = { '\0' }; int is_quic = (quic_version != 0); int is_dtls = packet->udp && (!is_quic); #ifdef DEBUG_TLS printf("TLS %s() called\n", __FUNCTION__); #endif memset(&ja3, 0, sizeof(ja3)); handshake_type = packet->payload[0]; total_len = (packet->payload[1] << 16) + (packet->payload[2] << 8) + packet->payload[3]; if((total_len > packet->payload_packet_len) || (packet->payload[1] != 0x0)) return(0); /* Not found */ total_len = packet->payload_packet_len; /* At least "magic" 3 bytes, null for string end, otherwise no need to waste cpu cycles */ if(total_len > 4) { u_int16_t base_offset = (!is_dtls) ? 38 : 46; u_int16_t version_offset = (!is_dtls) ? 4 : 12; u_int16_t offset = (!is_dtls) ? 38 : 46, extension_len, j; u_int8_t session_id_len = 0; if((base_offset >= total_len) || (version_offset + 1) >= total_len) return 0; /* Not found */ session_id_len = packet->payload[base_offset]; #ifdef DEBUG_TLS printf("TLS [len: %u][handshake_type: %02X]\n", packet->payload_packet_len, handshake_type); #endif tls_version = ntohs(*((u_int16_t*)&packet->payload[version_offset])); if(handshake_type == 0x02 /* Server Hello */) { int i, rc; ja3.server.tls_handshake_version = tls_version; #ifdef DEBUG_TLS printf("TLS Server Hello [version: 0x%04X]\n", tls_version); #endif /* The server hello decides about the TLS version of this flow https://networkengineering.stackexchange.com/questions/55752/why-does-wireshark-show-version-tls-1-2-here-instead-of-tls-1-3 */ if(packet->udp) offset += session_id_len + 1; else { if(tls_version < 0x7F15 /* TLS 1.3 lacks of session id */) offset += session_id_len+1; } if((offset+3) > packet->payload_packet_len) return(0); /* Not found */ ja3.server.num_cipher = 1, ja3.server.cipher[0] = ntohs(*((u_int16_t*)&packet->payload[offset])); if((flow->protos.tls_quic_stun.tls_quic.server_unsafe_cipher = ndpi_is_safe_ssl_cipher(ja3.server.cipher[0])) == 1) ndpi_set_risk(flow, NDPI_TLS_WEAK_CIPHER); flow->protos.tls_quic_stun.tls_quic.server_cipher = ja3.server.cipher[0]; #ifdef DEBUG_TLS printf("TLS [server][session_id_len: %u][cipher: %04X]\n", session_id_len, ja3.server.cipher[0]); #endif offset += 2 + 1; if((offset + 1) < packet->payload_packet_len) /* +1 because we are goint to read 2 bytes */ extension_len = ntohs(*((u_int16_t*)&packet->payload[offset])); else extension_len = 0; #ifdef DEBUG_TLS printf("TLS [server][extension_len: %u]\n", extension_len); #endif offset += 2; for(i=0; i<extension_len; ) { u_int16_t extension_id, extension_len; if((offset+4) > packet->payload_packet_len) break; extension_id = ntohs(*((u_int16_t*)&packet->payload[offset])); extension_len = ntohs(*((u_int16_t*)&packet->payload[offset+2])); if(ja3.server.num_tls_extension < MAX_NUM_JA3) ja3.server.tls_extension[ja3.server.num_tls_extension++] = extension_id; #ifdef DEBUG_TLS printf("TLS [server][extension_id: %u/0x%04X][len: %u]\n", extension_id, extension_id, extension_len); #endif if(extension_id == 43 /* supported versions */) { if(extension_len >= 2) { u_int16_t tls_version = ntohs(*((u_int16_t*)&packet->payload[offset+4])); #ifdef DEBUG_TLS printf("TLS [server] [TLS version: 0x%04X]\n", tls_version); #endif flow->protos.tls_quic_stun.tls_quic.ssl_version = ja3.server.tls_supported_version = tls_version; } } else if(extension_id == 16 /* application_layer_protocol_negotiation (ALPN) */) { u_int16_t s_offset = offset+4; u_int16_t tot_alpn_len = ntohs(*((u_int16_t*)&packet->payload[s_offset])); char alpn_str[256]; u_int8_t alpn_str_len = 0, i; #ifdef DEBUG_TLS printf("Server TLS [ALPN: block_len=%u/len=%u]\n", extension_len, tot_alpn_len); #endif s_offset += 2; tot_alpn_len += s_offset; while(s_offset < tot_alpn_len && s_offset < total_len) { u_int8_t alpn_i, alpn_len = packet->payload[s_offset++]; if((s_offset + alpn_len) <= tot_alpn_len) { #ifdef DEBUG_TLS printf("Server TLS [ALPN: %u]\n", alpn_len); #endif if((alpn_str_len+alpn_len+1) < (sizeof(alpn_str)-1)) { if(alpn_str_len > 0) { alpn_str[alpn_str_len] = ','; alpn_str_len++; } for(alpn_i=0; alpn_i<alpn_len; alpn_i++) { alpn_str[alpn_str_len+alpn_i] = packet->payload[s_offset+alpn_i]; } s_offset += alpn_len, alpn_str_len += alpn_len;; } else { ndpi_set_risk(flow, NDPI_TLS_UNCOMMON_ALPN); break; } } else { ndpi_set_risk(flow, NDPI_TLS_UNCOMMON_ALPN); break; } } /* while */ alpn_str[alpn_str_len] = '\0'; #ifdef DEBUG_TLS printf("Server TLS [ALPN: %s][len: %u]\n", alpn_str, alpn_str_len); #endif if(flow->protos.tls_quic_stun.tls_quic.alpn == NULL) flow->protos.tls_quic_stun.tls_quic.alpn = ndpi_strdup(alpn_str); if(flow->protos.tls_quic_stun.tls_quic.alpn != NULL) tlsCheckUncommonALPN(flow); snprintf(ja3.server.alpn, sizeof(ja3.server.alpn), "%s", alpn_str); /* Replace , with - as in JA3 */ for(i=0; ja3.server.alpn[i] != '\0'; i++) if(ja3.server.alpn[i] == ',') ja3.server.alpn[i] = '-'; } else if(extension_id == 11 /* ec_point_formats groups */) { u_int16_t s_offset = offset+4 + 1; #ifdef DEBUG_TLS printf("Server TLS [EllipticCurveFormat: len=%u]\n", extension_len); #endif if((s_offset+extension_len-1) <= total_len) { for(i=0; i<extension_len-1; i++) { u_int8_t s_group = packet->payload[s_offset+i]; #ifdef DEBUG_TLS printf("Server TLS [EllipticCurveFormat: %u]\n", s_group); #endif if(ja3.server.num_elliptic_curve_point_format < MAX_NUM_JA3) ja3.server.elliptic_curve_point_format[ja3.server.num_elliptic_curve_point_format++] = s_group; else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Server TLS Invalid num elliptic %u\n", ja3.server.num_elliptic_curve_point_format); #endif } } } else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Server TLS Invalid len %u vs %u\n", s_offset+extension_len, total_len); #endif } } i += 4 + extension_len, offset += 4 + extension_len; } /* for */ ja3_str_len = snprintf(ja3_str, JA3_STR_LEN, "%u,", ja3.server.tls_handshake_version); for(i=0; (i<ja3.server.num_cipher) && (JA3_STR_LEN > ja3_str_len); i++) { rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.server.cipher[i]); if(rc <= 0) break; else ja3_str_len += rc; } if(JA3_STR_LEN > ja3_str_len) { rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, ","); if(rc > 0 && ja3_str_len + rc < JA3_STR_LEN) ja3_str_len += rc; } /* ********** */ for(i=0; (i<ja3.server.num_tls_extension) && (JA3_STR_LEN > ja3_str_len); i++) { int rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.server.tls_extension[i]); if(rc <= 0) break; else ja3_str_len += rc; } if(ndpi_struct->enable_ja3_plus) { for(i=0; (i<ja3.server.num_elliptic_curve_point_format) && (JA3_STR_LEN > ja3_str_len); i++) { rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.server.elliptic_curve_point_format[i]); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; else break; } if((ja3.server.alpn[0] != '\0') && (JA3_STR_LEN > ja3_str_len)) { rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, ",%s", ja3.server.alpn); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; } #ifdef DEBUG_TLS printf("[JA3+] Server: %s \n", ja3_str); #endif } else { #ifdef DEBUG_TLS printf("[JA3] Server: %s \n", ja3_str); #endif } ndpi_MD5Init(&ctx); ndpi_MD5Update(&ctx, (const unsigned char *)ja3_str, strlen(ja3_str)); ndpi_MD5Final(md5_hash, &ctx); for(i=0, j=0; i<16; i++) { int rc = snprintf(&flow->protos.tls_quic_stun.tls_quic.ja3_server[j], sizeof(flow->protos.tls_quic_stun.tls_quic.ja3_server)-j, "%02x", md5_hash[i]); if(rc <= 0) break; else j += rc; } #ifdef DEBUG_TLS printf("[JA3] Server: %s \n", flow->protos.tls_quic_stun.tls_quic.ja3_server); #endif } else if(handshake_type == 0x01 /* Client Hello */) { u_int16_t cipher_len, cipher_offset; u_int8_t cookie_len = 0; flow->protos.tls_quic_stun.tls_quic.ssl_version = ja3.client.tls_handshake_version = tls_version; if(flow->protos.tls_quic_stun.tls_quic.ssl_version < 0x0302) /* TLSv1.1 */ ndpi_set_risk(flow, NDPI_TLS_OBSOLETE_VERSION); if((session_id_len+base_offset+3) > packet->payload_packet_len) return(0); /* Not found */ if(!is_dtls) { cipher_len = packet->payload[session_id_len+base_offset+2] + (packet->payload[session_id_len+base_offset+1] << 8); cipher_offset = base_offset + session_id_len + 3; } else { cookie_len = packet->payload[base_offset+session_id_len+1]; #ifdef DEBUG_TLS printf("[JA3] Client: DTLS cookie len %d\n", cookie_len); #endif if((session_id_len+base_offset+cookie_len+4) > packet->payload_packet_len) return(0); /* Not found */ cipher_len = ntohs(*((u_int16_t*)&packet->payload[base_offset+session_id_len+cookie_len+2])); cipher_offset = base_offset + session_id_len + cookie_len + 4; } #ifdef DEBUG_TLS printf("Client TLS [client cipher_len: %u][tls_version: 0x%04X]\n", cipher_len, tls_version); #endif if((cipher_offset+cipher_len) <= total_len) { u_int8_t safari_ciphers = 0, chrome_ciphers = 0; for(i=0; i<cipher_len;) { u_int16_t *id = (u_int16_t*)&packet->payload[cipher_offset+i]; #ifdef DEBUG_TLS printf("Client TLS [cipher suite: %u/0x%04X] [%d/%u]\n", ntohs(*id), ntohs(*id), i, cipher_len); #endif if((*id == 0) || (packet->payload[cipher_offset+i] != packet->payload[cipher_offset+i+1])) { u_int16_t cipher_id = ntohs(*id); /* Skip GREASE [https://tools.ietf.org/id/draft-ietf-tls-grease-01.html] https://engineering.salesforce.com/tls-fingerprinting-with-ja3-and-ja3s-247362855967 */ if(ja3.client.num_cipher < MAX_NUM_JA3) ja3.client.cipher[ja3.client.num_cipher++] = cipher_id; else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid cipher %u\n", ja3.client.num_cipher); #endif } switch(cipher_id) { case TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: case TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: safari_ciphers++; break; case TLS_CIPHER_GREASE_RESERVED_0: case TLS_AES_128_GCM_SHA256: case TLS_AES_256_GCM_SHA384: case TLS_CHACHA20_POLY1305_SHA256: chrome_ciphers++; break; case TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256: case TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: case TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: case TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256: case TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: case TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: case TLS_RSA_WITH_AES_128_CBC_SHA: case TLS_RSA_WITH_AES_256_CBC_SHA: case TLS_RSA_WITH_AES_128_GCM_SHA256: case TLS_RSA_WITH_AES_256_GCM_SHA384: safari_ciphers++, chrome_ciphers++; break; } } i += 2; } /* for */ if(chrome_ciphers == 13) flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_chrome_tls = 1; else if(safari_ciphers == 12) flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_safari_tls = 1; } else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid len %u vs %u\n", (cipher_offset+cipher_len), total_len); #endif } offset = base_offset + session_id_len + cookie_len + cipher_len + 2; offset += (!is_dtls) ? 1 : 2; if(offset < total_len) { u_int16_t compression_len; u_int16_t extensions_len; compression_len = packet->payload[offset]; offset++; #ifdef DEBUG_TLS printf("Client TLS [compression_len: %u]\n", compression_len); #endif // offset += compression_len + 3; offset += compression_len; if(offset+1 < total_len) { extensions_len = ntohs(*((u_int16_t*)&packet->payload[offset])); offset += 2; #ifdef DEBUG_TLS printf("Client TLS [extensions_len: %u]\n", extensions_len); #endif if((extensions_len+offset) <= total_len) { /* Move to the first extension Type is u_int to avoid possible overflow on extension_len addition */ u_int extension_offset = 0; u_int32_t j; while(extension_offset < extensions_len && offset+extension_offset+4 <= total_len) { u_int16_t extension_id, extension_len, extn_off = offset+extension_offset; extension_id = ntohs(*((u_int16_t*)&packet->payload[offset+extension_offset])); extension_offset += 2; extension_len = ntohs(*((u_int16_t*)&packet->payload[offset+extension_offset])); extension_offset += 2; #ifdef DEBUG_TLS printf("Client TLS [extension_id: %u][extension_len: %u]\n", extension_id, extension_len); #endif if((extension_id == 0) || (packet->payload[extn_off] != packet->payload[extn_off+1])) { /* Skip GREASE */ if(ja3.client.num_tls_extension < MAX_NUM_JA3) ja3.client.tls_extension[ja3.client.num_tls_extension++] = extension_id; else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid extensions %u\n", ja3.client.num_tls_extension); #endif } } if(extension_id == 0 /* server name */) { u_int16_t len; #ifdef DEBUG_TLS printf("[TLS] Extensions: found server name\n"); #endif if((offset+extension_offset+4) < packet->payload_packet_len) { len = (packet->payload[offset+extension_offset+3] << 8) + packet->payload[offset+extension_offset+4]; len = (u_int)ndpi_min(len, sizeof(buffer)-1); if((offset+extension_offset+5+len) <= packet->payload_packet_len) { strncpy(buffer, (char*)&packet->payload[offset+extension_offset+5], len); buffer[len] = '\0'; cleanupServerName(buffer, sizeof(buffer)); snprintf(flow->protos.tls_quic_stun.tls_quic.client_requested_server_name, sizeof(flow->protos.tls_quic_stun.tls_quic.client_requested_server_name), "%s", buffer); #ifdef DEBUG_TLS printf("[TLS] SNI: [%s]\n", buffer); #endif if(!is_quic) { if(ndpi_match_hostname_protocol(ndpi_struct, flow, NDPI_PROTOCOL_TLS, buffer, strlen(buffer))) flow->l4.tcp.tls.subprotocol_detected = 1; } else { if(ndpi_match_hostname_protocol(ndpi_struct, flow, NDPI_PROTOCOL_QUIC, buffer, strlen(buffer))) flow->l4.tcp.tls.subprotocol_detected = 1; } if(ndpi_check_dga_name(ndpi_struct, flow, flow->protos.tls_quic_stun.tls_quic.client_requested_server_name, 1)) { char *sni = flow->protos.tls_quic_stun.tls_quic.client_requested_server_name; int len = strlen(sni); #ifdef DEBUG_TLS printf("[TLS] SNI: (DGA) [%s]\n", flow->protos.tls_quic_stun.tls_quic.client_requested_server_name); #endif if((len >= 4) /* Check if it ends in .com or .net */ && ((strcmp(&sni[len-4], ".com") == 0) || (strcmp(&sni[len-4], ".net") == 0)) && (strncmp(sni, "www.", 4) == 0)) /* Not starting with www.... */ ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_TOR, NDPI_PROTOCOL_TLS); } else { #ifdef DEBUG_TLS printf("[TLS] SNI: (NO DGA) [%s]\n", flow->protos.tls_quic_stun.tls_quic.client_requested_server_name); #endif } } else { #ifdef DEBUG_TLS printf("[TLS] Extensions server len too short: %u vs %u\n", offset+extension_offset+5+len, packet->payload_packet_len); #endif } } } else if(extension_id == 10 /* supported groups */) { u_int16_t s_offset = offset+extension_offset + 2; #ifdef DEBUG_TLS printf("Client TLS [EllipticCurveGroups: len=%u]\n", extension_len); #endif if((s_offset+extension_len-2) <= total_len) { for(i=0; i<extension_len-2;) { u_int16_t s_group = ntohs(*((u_int16_t*)&packet->payload[s_offset+i])); #ifdef DEBUG_TLS printf("Client TLS [EllipticCurve: %u/0x%04X]\n", s_group, s_group); #endif if((s_group == 0) || (packet->payload[s_offset+i] != packet->payload[s_offset+i+1])) { /* Skip GREASE */ if(ja3.client.num_elliptic_curve < MAX_NUM_JA3) ja3.client.elliptic_curve[ja3.client.num_elliptic_curve++] = s_group; else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid num elliptic %u\n", ja3.client.num_elliptic_curve); #endif } } i += 2; } } else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid len %u vs %u\n", (s_offset+extension_len-1), total_len); #endif } } else if(extension_id == 11 /* ec_point_formats groups */) { u_int16_t s_offset = offset+extension_offset + 1; #ifdef DEBUG_TLS printf("Client TLS [EllipticCurveFormat: len=%u]\n", extension_len); #endif if((s_offset+extension_len-1) <= total_len) { for(i=0; i<extension_len-1; i++) { u_int8_t s_group = packet->payload[s_offset+i]; #ifdef DEBUG_TLS printf("Client TLS [EllipticCurveFormat: %u]\n", s_group); #endif if(ja3.client.num_elliptic_curve_point_format < MAX_NUM_JA3) ja3.client.elliptic_curve_point_format[ja3.client.num_elliptic_curve_point_format++] = s_group; else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid num elliptic %u\n", ja3.client.num_elliptic_curve_point_format); #endif } } } else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid len %u vs %u\n", s_offset+extension_len, total_len); #endif } } else if(extension_id == 13 /* signature algorithms */) { u_int16_t s_offset = offset+extension_offset, safari_signature_algorithms = 0, chrome_signature_algorithms = 0; u_int16_t tot_signature_algorithms_len = ntohs(*((u_int16_t*)&packet->payload[s_offset])); #ifdef DEBUG_TLS printf("Client TLS [SIGNATURE_ALGORITHMS: block_len=%u/len=%u]\n", extension_len, tot_signature_algorithms_len); #endif s_offset += 2; tot_signature_algorithms_len = ndpi_min((sizeof(ja3.client.signature_algorithms) / 2) - 1, tot_signature_algorithms_len); #ifdef TLS_HANDLE_SIGNATURE_ALGORITMS flow->protos.tls_quic_stun.tls_quic.num_tls_signature_algorithms = ndpi_min(tot_signature_algorithms_len / 2, MAX_NUM_TLS_SIGNATURE_ALGORITHMS); memcpy(flow->protos.tls_quic_stun.tls_quic.client_signature_algorithms, &packet->payload[s_offset], 2 /* 16 bit */*flow->protos.tls_quic_stun.tls_quic.num_tls_signature_algorithms); #endif for(i=0; i<tot_signature_algorithms_len; i++) { int rc = snprintf(&ja3.client.signature_algorithms[i*2], sizeof(ja3.client.signature_algorithms)-i*2, "%02X", packet->payload[s_offset+i]); if(rc < 0) break; } for(i=0; i<tot_signature_algorithms_len; i+=2) { u_int16_t cipher_id = (u_int16_t)ntohs(*((u_int16_t*)&packet->payload[s_offset+i])); // printf("=>> %04X\n", cipher_id); switch(cipher_id) { case ECDSA_SECP521R1_SHA512: flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_firefox_tls = 1; break; case ECDSA_SECP256R1_SHA256: case ECDSA_SECP384R1_SHA384: case RSA_PKCS1_SHA256: case RSA_PKCS1_SHA384: case RSA_PKCS1_SHA512: case RSA_PSS_RSAE_SHA256: case RSA_PSS_RSAE_SHA384: case RSA_PSS_RSAE_SHA512: chrome_signature_algorithms++, safari_signature_algorithms++; break; } } if(flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_firefox_tls) flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_safari_tls = 0, flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_chrome_tls = 0; if(safari_signature_algorithms != 8) flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_safari_tls = 0; if(chrome_signature_algorithms != 8) flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_chrome_tls = 0; ja3.client.signature_algorithms[i*2] = '\0'; #ifdef DEBUG_TLS printf("Client TLS [SIGNATURE_ALGORITHMS: %s]\n", ja3.client.signature_algorithms); #endif } else if(extension_id == 16 /* application_layer_protocol_negotiation */) { u_int16_t s_offset = offset+extension_offset; u_int16_t tot_alpn_len = ntohs(*((u_int16_t*)&packet->payload[s_offset])); char alpn_str[256]; u_int8_t alpn_str_len = 0, i; #ifdef DEBUG_TLS printf("Client TLS [ALPN: block_len=%u/len=%u]\n", extension_len, tot_alpn_len); #endif s_offset += 2; tot_alpn_len += s_offset; while(s_offset < tot_alpn_len && s_offset < total_len) { u_int8_t alpn_i, alpn_len = packet->payload[s_offset++]; if((s_offset + alpn_len) <= tot_alpn_len && (s_offset + alpn_len) <= total_len) { #ifdef DEBUG_TLS printf("Client TLS [ALPN: %u]\n", alpn_len); #endif if((alpn_str_len+alpn_len+1) < (sizeof(alpn_str)-1)) { if(alpn_str_len > 0) { alpn_str[alpn_str_len] = ','; alpn_str_len++; } for(alpn_i=0; alpn_i<alpn_len; alpn_i++) alpn_str[alpn_str_len+alpn_i] = packet->payload[s_offset+alpn_i]; s_offset += alpn_len, alpn_str_len += alpn_len;; } else break; } else break; } /* while */ alpn_str[alpn_str_len] = '\0'; #ifdef DEBUG_TLS printf("Client TLS [ALPN: %s][len: %u]\n", alpn_str, alpn_str_len); #endif if(flow->protos.tls_quic_stun.tls_quic.alpn == NULL) flow->protos.tls_quic_stun.tls_quic.alpn = ndpi_strdup(alpn_str); snprintf(ja3.client.alpn, sizeof(ja3.client.alpn), "%s", alpn_str); /* Replace , with - as in JA3 */ for(i=0; ja3.client.alpn[i] != '\0'; i++) if(ja3.client.alpn[i] == ',') ja3.client.alpn[i] = '-'; } else if(extension_id == 43 /* supported versions */) { u_int16_t s_offset = offset+extension_offset; u_int8_t version_len = packet->payload[s_offset]; char version_str[256]; u_int8_t version_str_len = 0; version_str[0] = 0; #ifdef DEBUG_TLS printf("Client TLS [TLS version len: %u]\n", version_len); #endif if(version_len == (extension_len-1)) { u_int8_t j; u_int16_t supported_versions_offset = 0; s_offset++; // careful not to overflow and loop forever with u_int8_t for(j=0; j+1<version_len; j += 2) { u_int16_t tls_version = ntohs(*((u_int16_t*)&packet->payload[s_offset+j])); u_int8_t unknown_tls_version; #ifdef DEBUG_TLS printf("Client TLS [TLS version: %s/0x%04X]\n", ndpi_ssl_version2str(flow, tls_version, &unknown_tls_version), tls_version); #endif if((version_str_len+8) < sizeof(version_str)) { int rc = snprintf(&version_str[version_str_len], sizeof(version_str) - version_str_len, "%s%s", (version_str_len > 0) ? "," : "", ndpi_ssl_version2str(flow, tls_version, &unknown_tls_version)); if(rc <= 0) break; else version_str_len += rc; rc = snprintf(&ja3.client.supported_versions[supported_versions_offset], sizeof(ja3.client.supported_versions)-supported_versions_offset, "%s%04X", (j > 0) ? "-" : "", tls_version); if(rc > 0) supported_versions_offset += rc; } } #ifdef DEBUG_TLS printf("Client TLS [SUPPORTED_VERSIONS: %s]\n", ja3.client.supported_versions); #endif if(flow->protos.tls_quic_stun.tls_quic.tls_supported_versions == NULL) flow->protos.tls_quic_stun.tls_quic.tls_supported_versions = ndpi_strdup(version_str); } } else if(extension_id == 65486 /* encrypted server name */) { /* - https://tools.ietf.org/html/draft-ietf-tls-esni-06 - https://blog.cloudflare.com/encrypted-sni/ */ u_int16_t e_offset = offset+extension_offset; u_int16_t initial_offset = e_offset; u_int16_t e_sni_len, cipher_suite = ntohs(*((u_int16_t*)&packet->payload[e_offset])); flow->protos.tls_quic_stun.tls_quic.encrypted_sni.cipher_suite = cipher_suite; e_offset += 2; /* Cipher suite len */ /* Key Share Entry */ e_offset += 2; /* Group */ e_offset += ntohs(*((u_int16_t*)&packet->payload[e_offset])) + 2; /* Lenght */ if((e_offset+4) < packet->payload_packet_len) { /* Record Digest */ e_offset += ntohs(*((u_int16_t*)&packet->payload[e_offset])) + 2; /* Lenght */ if((e_offset+4) < packet->payload_packet_len) { e_sni_len = ntohs(*((u_int16_t*)&packet->payload[e_offset])); e_offset += 2; if((e_offset+e_sni_len-extension_len-initial_offset) >= 0 && e_offset+e_sni_len < packet->payload_packet_len) { #ifdef DEBUG_ENCRYPTED_SNI printf("Client TLS [Encrypted Server Name len: %u]\n", e_sni_len); #endif if(flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni == NULL) { flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni = (char*)ndpi_malloc(e_sni_len*2+1); if(flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni) { u_int16_t i, off; for(i=e_offset, off=0; i<(e_offset+e_sni_len); i++) { int rc = sprintf(&flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni[off], "%02X", packet->payload[i] & 0XFF); if(rc <= 0) { flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni[off] = '\0'; break; } else off += rc; } } } } } } } else if(extension_id == 65445 || /* QUIC transport parameters (drafts version) */ extension_id == 57) { /* QUIC transport parameters (final version) */ u_int16_t s_offset = offset+extension_offset; uint16_t final_offset; int using_var_int = is_version_with_var_int_transport_params(quic_version); if(!using_var_int) { if(s_offset+1 >= total_len) { final_offset = 0; /* Force skipping extension */ } else { u_int16_t seq_len = ntohs(*((u_int16_t*)&packet->payload[s_offset])); s_offset += 2; final_offset = MIN(total_len, s_offset + seq_len); } } else { final_offset = MIN(total_len, s_offset + extension_len); } while(s_offset < final_offset) { u_int64_t param_type, param_len; if(!using_var_int) { if(s_offset+3 >= final_offset) break; param_type = ntohs(*((u_int16_t*)&packet->payload[s_offset])); param_len = ntohs(*((u_int16_t*)&packet->payload[s_offset + 2])); s_offset += 4; } else { if(s_offset >= final_offset || (s_offset + quic_len_buffer_still_required(packet->payload[s_offset])) >= final_offset) break; s_offset += quic_len(&packet->payload[s_offset], &param_type); if(s_offset >= final_offset || (s_offset + quic_len_buffer_still_required(packet->payload[s_offset])) >= final_offset) break; s_offset += quic_len(&packet->payload[s_offset], &param_len); } #ifdef DEBUG_TLS printf("Client TLS [QUIC TP: Param 0x%x Len %d]\n", (int)param_type, (int)param_len); #endif if(s_offset+param_len > final_offset) break; if(param_type==0x3129) { #ifdef DEBUG_TLS printf("UA [%.*s]\n", (int)param_len, &packet->payload[s_offset]); #endif http_process_user_agent(ndpi_struct, flow, &packet->payload[s_offset], param_len); break; } s_offset += param_len; } } extension_offset += extension_len; /* Move to the next extension */ #ifdef DEBUG_TLS printf("Client TLS [extension_offset/len: %u/%u]\n", extension_offset, extension_len); #endif } /* while */ if(!invalid_ja3) { int rc; compute_ja3c: ja3_str_len = snprintf(ja3_str, JA3_STR_LEN, "%u,", ja3.client.tls_handshake_version); for(i=0; i<ja3.client.num_cipher; i++) { rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.client.cipher[i]); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; else break; } rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, ","); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; /* ********** */ for(i=0; i<ja3.client.num_tls_extension; i++) { rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.client.tls_extension[i]); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; else break; } rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, ","); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; /* ********** */ for(i=0; i<ja3.client.num_elliptic_curve; i++) { rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.client.elliptic_curve[i]); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; else break; } rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, ","); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; for(i=0; i<ja3.client.num_elliptic_curve_point_format; i++) { rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.client.elliptic_curve_point_format[i]); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; else break; } if(ndpi_struct->enable_ja3_plus) { rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, ",%s,%s,%s", ja3.client.signature_algorithms, ja3.client.supported_versions, ja3.client.alpn); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; } #ifdef DEBUG_JA3C printf("[JA3+] Client: %s \n", ja3_str); #endif ndpi_MD5Init(&ctx); ndpi_MD5Update(&ctx, (const unsigned char *)ja3_str, strlen(ja3_str)); ndpi_MD5Final(md5_hash, &ctx); for(i=0, j=0; i<16; i++) { rc = snprintf(&flow->protos.tls_quic_stun.tls_quic.ja3_client[j], sizeof(flow->protos.tls_quic_stun.tls_quic.ja3_client)-j, "%02x", md5_hash[i]); if(rc > 0) j += rc; else break; } #ifdef DEBUG_JA3C printf("[JA3] Client: %s \n", flow->protos.tls_quic_stun.tls_quic.ja3_client); #endif if(ndpi_struct->malicious_ja3_automa.ac_automa != NULL) { u_int16_t rc1 = ndpi_match_string(ndpi_struct->malicious_ja3_automa.ac_automa, flow->protos.tls_quic_stun.tls_quic.ja3_client); if(rc1 > 0) ndpi_set_risk(flow, NDPI_MALICIOUS_JA3); } } /* Before returning to the caller we need to make a final check */ if((flow->protos.tls_quic_stun.tls_quic.ssl_version >= 0x0303) /* >= TLSv1.2 */ && (flow->protos.tls_quic_stun.tls_quic.alpn == NULL) /* No ALPN */) { ndpi_set_risk(flow, NDPI_TLS_NOT_CARRYING_HTTPS); } /* Suspicious Domain Fronting: https://github.com/SixGenInc/Noctilucent/blob/master/docs/ */ if(flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni && flow->protos.tls_quic_stun.tls_quic.client_requested_server_name[0] != '\0') { ndpi_set_risk(flow, NDPI_TLS_SUSPICIOUS_ESNI_USAGE); } /* Add check for missing SNI */ if((flow->protos.tls_quic_stun.tls_quic.client_requested_server_name[0] == 0) && (flow->protos.tls_quic_stun.tls_quic.ssl_version >= 0x0302) /* TLSv1.1 */ && (flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni == NULL) /* No ESNI */ ) { /* This is a bit suspicious */ ndpi_set_risk(flow, NDPI_TLS_MISSING_SNI); } return(2 /* Client Certificate */); } else { #ifdef DEBUG_TLS printf("[TLS] Client: too short [%u vs %u]\n", (extensions_len+offset), total_len); #endif } } else if(offset == total_len) { /* TLS does not have extensions etc */ goto compute_ja3c; } } else { #ifdef DEBUG_TLS printf("[JA3] Client: invalid length detected\n"); #endif } } } return(0); /* Not found */ }
{'added': [(1372, ' ja3_str_len = snprintf(ja3_str, JA3_STR_LEN, "%u,", ja3.server.tls_handshake_version);'), (1374, ' for(i=0; (i<ja3.server.num_cipher) && (JA3_STR_LEN > ja3_str_len); i++) {'), (1375, '\trc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.server.cipher[i]);'), (1380, ' if(JA3_STR_LEN > ja3_str_len) {'), (1381, '\trc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, ",");'), (1382, '\tif(rc > 0 && ja3_str_len + rc < JA3_STR_LEN) ja3_str_len += rc;'), (1383, ' }'), (1384, ''), (1387, ' for(i=0; (i<ja3.server.num_tls_extension) && (JA3_STR_LEN > ja3_str_len); i++) {'), (1388, '\tint rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.server.tls_extension[i]);'), (1394, '\tfor(i=0; (i<ja3.server.num_elliptic_curve_point_format) && (JA3_STR_LEN > ja3_str_len); i++) {'), (1395, '\t rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, "%s%u",'), (1400, "\tif((ja3.server.alpn[0] != '\\0') && (JA3_STR_LEN > ja3_str_len)) {"), (1401, '\t rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, ",%s", ja3.server.alpn);'), (1987, '\t ja3_str_len = snprintf(ja3_str, JA3_STR_LEN, "%u,", ja3.client.tls_handshake_version);'), (1990, '\t\trc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, "%s%u",'), (1995, '\t rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, ",");'), (2001, '\t\trc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, "%s%u",'), (2006, '\t rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, ",");'), (2012, '\t\trc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, "%s%u",'), (2017, '\t rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, ",");'), (2021, '\t\trc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, "%s%u",'), (2027, '\t\trc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len,')], 'deleted': [(1372, ' ja3_str_len = snprintf(ja3_str, sizeof(ja3_str), "%u,", ja3.server.tls_handshake_version);'), (1374, ' for(i=0; i<ja3.server.num_cipher; i++) {'), (1375, '\trc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.server.cipher[i]);'), (1380, ' rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, ",");'), (1381, ' if(rc > 0 && ja3_str_len + rc < JA3_STR_LEN) ja3_str_len += rc;'), (1382, ''), (1385, ' for(i=0; i<ja3.server.num_tls_extension; i++) {'), (1386, '\tint rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.server.tls_extension[i]);'), (1392, '\tfor(i=0; i<ja3.server.num_elliptic_curve_point_format; i++) {'), (1393, '\t rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, "%s%u",'), (1398, "\tif(ja3.server.alpn[0] != '\\0') {"), (1399, '\t rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, ",%s", ja3.server.alpn);'), (1985, '\t ja3_str_len = snprintf(ja3_str, sizeof(ja3_str), "%u,", ja3.client.tls_handshake_version);'), (1988, '\t\trc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, "%s%u",'), (1993, '\t rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, ",");'), (1999, '\t\trc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, "%s%u",'), (2004, '\t rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, ",");'), (2010, '\t\trc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, "%s%u",'), (2015, '\t rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, ",");'), (2019, '\t\trc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, "%s%u",'), (2025, '\t\trc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len,')]}
23
21
1,368
12,328
https://github.com/ntop/nDPI
CVE-2021-36082
['CWE-787']
print-pim.c
cisco_autorp_print
/* * Copyright (c) 1995, 1996 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* \summary: Protocol Independent Multicast (PIM) printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include "netdissect.h" #include "addrtoname.h" #include "extract.h" #include "ip.h" #include "ip6.h" #include "ipproto.h" #define PIMV1_TYPE_QUERY 0 #define PIMV1_TYPE_REGISTER 1 #define PIMV1_TYPE_REGISTER_STOP 2 #define PIMV1_TYPE_JOIN_PRUNE 3 #define PIMV1_TYPE_RP_REACHABILITY 4 #define PIMV1_TYPE_ASSERT 5 #define PIMV1_TYPE_GRAFT 6 #define PIMV1_TYPE_GRAFT_ACK 7 static const struct tok pimv1_type_str[] = { { PIMV1_TYPE_QUERY, "Query" }, { PIMV1_TYPE_REGISTER, "Register" }, { PIMV1_TYPE_REGISTER_STOP, "Register-Stop" }, { PIMV1_TYPE_JOIN_PRUNE, "Join/Prune" }, { PIMV1_TYPE_RP_REACHABILITY, "RP-reachable" }, { PIMV1_TYPE_ASSERT, "Assert" }, { PIMV1_TYPE_GRAFT, "Graft" }, { PIMV1_TYPE_GRAFT_ACK, "Graft-ACK" }, { 0, NULL } }; #define PIMV2_TYPE_HELLO 0 #define PIMV2_TYPE_REGISTER 1 #define PIMV2_TYPE_REGISTER_STOP 2 #define PIMV2_TYPE_JOIN_PRUNE 3 #define PIMV2_TYPE_BOOTSTRAP 4 #define PIMV2_TYPE_ASSERT 5 #define PIMV2_TYPE_GRAFT 6 #define PIMV2_TYPE_GRAFT_ACK 7 #define PIMV2_TYPE_CANDIDATE_RP 8 #define PIMV2_TYPE_PRUNE_REFRESH 9 #define PIMV2_TYPE_DF_ELECTION 10 #define PIMV2_TYPE_ECMP_REDIRECT 11 static const struct tok pimv2_type_values[] = { { PIMV2_TYPE_HELLO, "Hello" }, { PIMV2_TYPE_REGISTER, "Register" }, { PIMV2_TYPE_REGISTER_STOP, "Register Stop" }, { PIMV2_TYPE_JOIN_PRUNE, "Join / Prune" }, { PIMV2_TYPE_BOOTSTRAP, "Bootstrap" }, { PIMV2_TYPE_ASSERT, "Assert" }, { PIMV2_TYPE_GRAFT, "Graft" }, { PIMV2_TYPE_GRAFT_ACK, "Graft Acknowledgement" }, { PIMV2_TYPE_CANDIDATE_RP, "Candidate RP Advertisement" }, { PIMV2_TYPE_PRUNE_REFRESH, "Prune Refresh" }, { PIMV2_TYPE_DF_ELECTION, "DF Election" }, { PIMV2_TYPE_ECMP_REDIRECT, "ECMP Redirect" }, { 0, NULL} }; #define PIMV2_HELLO_OPTION_HOLDTIME 1 #define PIMV2_HELLO_OPTION_LANPRUNEDELAY 2 #define PIMV2_HELLO_OPTION_DR_PRIORITY_OLD 18 #define PIMV2_HELLO_OPTION_DR_PRIORITY 19 #define PIMV2_HELLO_OPTION_GENID 20 #define PIMV2_HELLO_OPTION_REFRESH_CAP 21 #define PIMV2_HELLO_OPTION_BIDIR_CAP 22 #define PIMV2_HELLO_OPTION_ADDRESS_LIST 24 #define PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD 65001 static const struct tok pimv2_hello_option_values[] = { { PIMV2_HELLO_OPTION_HOLDTIME, "Hold Time" }, { PIMV2_HELLO_OPTION_LANPRUNEDELAY, "LAN Prune Delay" }, { PIMV2_HELLO_OPTION_DR_PRIORITY_OLD, "DR Priority (Old)" }, { PIMV2_HELLO_OPTION_DR_PRIORITY, "DR Priority" }, { PIMV2_HELLO_OPTION_GENID, "Generation ID" }, { PIMV2_HELLO_OPTION_REFRESH_CAP, "State Refresh Capability" }, { PIMV2_HELLO_OPTION_BIDIR_CAP, "Bi-Directional Capability" }, { PIMV2_HELLO_OPTION_ADDRESS_LIST, "Address List" }, { PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD, "Address List (Old)" }, { 0, NULL} }; #define PIMV2_REGISTER_FLAG_LEN 4 #define PIMV2_REGISTER_FLAG_BORDER 0x80000000 #define PIMV2_REGISTER_FLAG_NULL 0x40000000 static const struct tok pimv2_register_flag_values[] = { { PIMV2_REGISTER_FLAG_BORDER, "Border" }, { PIMV2_REGISTER_FLAG_NULL, "Null" }, { 0, NULL} }; /* * XXX: We consider a case where IPv6 is not ready yet for portability, * but PIM dependent defintions should be independent of IPv6... */ struct pim { uint8_t pim_typever; /* upper 4bit: PIM version number; 2 for PIMv2 */ /* lower 4bit: the PIM message type, currently they are: * Hello, Register, Register-Stop, Join/Prune, * Bootstrap, Assert, Graft (PIM-DM only), * Graft-Ack (PIM-DM only), C-RP-Adv */ #define PIM_VER(x) (((x) & 0xf0) >> 4) #define PIM_TYPE(x) ((x) & 0x0f) u_char pim_rsv; /* Reserved */ u_short pim_cksum; /* IP style check sum */ }; static void pimv2_print(netdissect_options *, register const u_char *bp, register u_int len, const u_char *); static void pimv1_join_prune_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int ngroups, njoin, nprune; int njp; /* If it's a single group and a single source, use 1-line output. */ if (ND_TTEST2(bp[0], 30) && bp[11] == 1 && ((njoin = EXTRACT_16BITS(&bp[20])) + EXTRACT_16BITS(&bp[22])) == 1) { int hold; ND_PRINT((ndo, " RPF %s ", ipaddr_string(ndo, bp))); hold = EXTRACT_16BITS(&bp[6]); if (hold != 180) { ND_PRINT((ndo, "Hold ")); unsigned_relts_print(ndo, hold); } ND_PRINT((ndo, "%s (%s/%d, %s", njoin ? "Join" : "Prune", ipaddr_string(ndo, &bp[26]), bp[25] & 0x3f, ipaddr_string(ndo, &bp[12]))); if (EXTRACT_32BITS(&bp[16]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[16]))); ND_PRINT((ndo, ") %s%s %s", (bp[24] & 0x01) ? "Sparse" : "Dense", (bp[25] & 0x80) ? " WC" : "", (bp[25] & 0x40) ? "RP" : "SPT")); return; } ND_TCHECK2(bp[0], sizeof(struct in_addr)); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Upstream Nbr: %s", ipaddr_string(ndo, bp))); ND_TCHECK2(bp[6], 2); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Hold time: ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[6])); if (ndo->ndo_vflag < 2) return; bp += 8; len -= 8; ND_TCHECK2(bp[0], 4); ngroups = bp[3]; bp += 4; len -= 4; while (ngroups--) { /* * XXX - does the address have length "addrlen" and the * mask length "maddrlen"? */ ND_TCHECK2(bp[0], sizeof(struct in_addr)); ND_PRINT((ndo, "\n\tGroup: %s", ipaddr_string(ndo, bp))); ND_TCHECK2(bp[4], sizeof(struct in_addr)); if (EXTRACT_32BITS(&bp[4]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[4]))); ND_TCHECK2(bp[8], 4); njoin = EXTRACT_16BITS(&bp[8]); nprune = EXTRACT_16BITS(&bp[10]); ND_PRINT((ndo, " joined: %d pruned: %d", njoin, nprune)); bp += 12; len -= 12; for (njp = 0; njp < (njoin + nprune); njp++) { const char *type; if (njp < njoin) type = "Join "; else type = "Prune"; ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "\n\t%s %s%s%s%s/%d", type, (bp[0] & 0x01) ? "Sparse " : "Dense ", (bp[1] & 0x80) ? "WC " : "", (bp[1] & 0x40) ? "RP " : "SPT ", ipaddr_string(ndo, &bp[2]), bp[1] & 0x3f)); bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|pim]")); return; } void pimv1_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { register const u_char *ep; register u_char type; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; ND_TCHECK(bp[1]); type = bp[1]; ND_PRINT((ndo, " %s", tok2str(pimv1_type_str, "[type %u]", type))); switch (type) { case PIMV1_TYPE_QUERY: if (ND_TTEST(bp[8])) { switch (bp[8] >> 4) { case 0: ND_PRINT((ndo, " Dense-mode")); break; case 1: ND_PRINT((ndo, " Sparse-mode")); break; case 2: ND_PRINT((ndo, " Sparse-Dense-mode")); break; default: ND_PRINT((ndo, " mode-%d", bp[8] >> 4)); break; } } if (ndo->ndo_vflag) { ND_TCHECK2(bp[10],2); ND_PRINT((ndo, " (Hold-time ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[10])); ND_PRINT((ndo, ")")); } break; case PIMV1_TYPE_REGISTER: ND_TCHECK2(bp[8], 20); /* ip header */ ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[20]), ipaddr_string(ndo, &bp[24]))); break; case PIMV1_TYPE_REGISTER_STOP: ND_TCHECK2(bp[12], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[8]), ipaddr_string(ndo, &bp[12]))); break; case PIMV1_TYPE_RP_REACHABILITY: if (ndo->ndo_vflag) { ND_TCHECK2(bp[22], 2); ND_PRINT((ndo, " group %s", ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_PRINT((ndo, " RP %s hold ", ipaddr_string(ndo, &bp[16]))); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[22])); } break; case PIMV1_TYPE_ASSERT: ND_TCHECK2(bp[16], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[16]), ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_TCHECK2(bp[24], 4); ND_PRINT((ndo, " %s pref %d metric %d", (bp[20] & 0x80) ? "RP-tree" : "SPT", EXTRACT_32BITS(&bp[20]) & 0x7fffffff, EXTRACT_32BITS(&bp[24]))); break; case PIMV1_TYPE_JOIN_PRUNE: case PIMV1_TYPE_GRAFT: case PIMV1_TYPE_GRAFT_ACK: if (ndo->ndo_vflag) pimv1_join_prune_print(ndo, &bp[8], len - 8); break; } ND_TCHECK(bp[4]); if ((bp[4] >> 4) != 1) ND_PRINT((ndo, " [v%d]", bp[4] >> 4)); return; trunc: ND_PRINT((ndo, "[|pim]")); return; } /* * auto-RP is a cisco protocol, documented at * ftp://ftpeng.cisco.com/ipmulticast/specs/pim-autorp-spec01.txt * * This implements version 1+, dated Sept 9, 1998. */ void cisco_autorp_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int type; int numrps; int hold; ND_TCHECK(bp[0]); ND_PRINT((ndo, " auto-rp ")); type = bp[0]; switch (type) { case 0x11: ND_PRINT((ndo, "candidate-advert")); break; case 0x12: ND_PRINT((ndo, "mapping")); break; default: ND_PRINT((ndo, "type-0x%02x", type)); break; } ND_TCHECK(bp[1]); numrps = bp[1]; ND_TCHECK2(bp[2], 2); ND_PRINT((ndo, " Hold ")); hold = EXTRACT_16BITS(&bp[2]); if (hold) unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); else ND_PRINT((ndo, "FOREVER")); /* Next 4 bytes are reserved. */ bp += 8; len -= 8; /*XXX skip unless -v? */ /* * Rest of packet: * numrps entries of the form: * 32 bits: RP * 6 bits: reserved * 2 bits: PIM version supported, bit 0 is "supports v1", 1 is "v2". * 8 bits: # of entries for this RP * each entry: 7 bits: reserved, 1 bit: negative, * 8 bits: mask 32 bits: source * lather, rinse, repeat. */ while (numrps--) { int nentries; char s; ND_TCHECK2(bp[0], 4); ND_PRINT((ndo, " RP %s", ipaddr_string(ndo, bp))); ND_TCHECK(bp[4]); switch (bp[4] & 0x3) { case 0: ND_PRINT((ndo, " PIMv?")); break; case 1: ND_PRINT((ndo, " PIMv1")); break; case 2: ND_PRINT((ndo, " PIMv2")); break; case 3: ND_PRINT((ndo, " PIMv1+2")); break; } if (bp[4] & 0xfc) ND_PRINT((ndo, " [rsvd=0x%02x]", bp[4] & 0xfc)); ND_TCHECK(bp[5]); nentries = bp[5]; bp += 6; len -= 6; s = ' '; for (; nentries; nentries--) { ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "%c%s%s/%d", s, bp[0] & 1 ? "!" : "", ipaddr_string(ndo, &bp[2]), bp[1])); if (bp[0] & 0x02) { ND_PRINT((ndo, " bidir")); } if (bp[0] & 0xfc) { ND_PRINT((ndo, "[rsvd=0x%02x]", bp[0] & 0xfc)); } s = ','; bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|autorp]")); return; } void pim_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const u_char *ep; register const struct pim *pim = (const struct pim *)bp; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; #ifdef notyet /* currently we see only version and type */ ND_TCHECK(pim->pim_rsv); #endif switch (PIM_VER(pim->pim_typever)) { case 2: if (!ndo->ndo_vflag) { ND_PRINT((ndo, "PIMv%u, %s, length %u", PIM_VER(pim->pim_typever), tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)), len)); return; } else { ND_PRINT((ndo, "PIMv%u, length %u\n\t%s", PIM_VER(pim->pim_typever), len, tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)))); pimv2_print(ndo, bp, len, bp2); } break; default: ND_PRINT((ndo, "PIMv%u, length %u", PIM_VER(pim->pim_typever), len)); break; } return; } /* * PIMv2 uses encoded address representations. * * The last PIM-SM I-D before RFC2117 was published specified the * following representation for unicast addresses. However, RFC2117 * specified no encoding for unicast addresses with the unicast * address length specified in the header. Therefore, we have to * guess which encoding is being used (Cisco's PIMv2 implementation * uses the non-RFC encoding). RFC2117 turns a previously "Reserved" * field into a 'unicast-address-length-in-bytes' field. We guess * that it's the draft encoding if this reserved field is zero. * * RFC2362 goes back to the encoded format, and calls the addr length * field "reserved" again. * * The first byte is the address family, from: * * 0 Reserved * 1 IP (IP version 4) * 2 IP6 (IP version 6) * 3 NSAP * 4 HDLC (8-bit multidrop) * 5 BBN 1822 * 6 802 (includes all 802 media plus Ethernet "canonical format") * 7 E.163 * 8 E.164 (SMDS, Frame Relay, ATM) * 9 F.69 (Telex) * 10 X.121 (X.25, Frame Relay) * 11 IPX * 12 Appletalk * 13 Decnet IV * 14 Banyan Vines * 15 E.164 with NSAP format subaddress * * In addition, the second byte is an "Encoding". 0 is the default * encoding for the address family, and no other encodings are currently * specified. * */ static int pimv2_addr_len; enum pimv2_addrtype { pimv2_unicast, pimv2_group, pimv2_source }; /* 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Unicast Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+++++++ * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Reserved | Mask Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Group multicast Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Rsrvd |S|W|R| Mask Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Source Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ static int pimv2_addr_print(netdissect_options *ndo, const u_char *bp, enum pimv2_addrtype at, int silent) { int af; int len, hdrlen; ND_TCHECK(bp[0]); if (pimv2_addr_len == 0) { ND_TCHECK(bp[1]); switch (bp[0]) { case 1: af = AF_INET; len = sizeof(struct in_addr); break; case 2: af = AF_INET6; len = sizeof(struct in6_addr); break; default: return -1; } if (bp[1] != 0) return -1; hdrlen = 2; } else { switch (pimv2_addr_len) { case sizeof(struct in_addr): af = AF_INET; break; case sizeof(struct in6_addr): af = AF_INET6; break; default: return -1; break; } len = pimv2_addr_len; hdrlen = 0; } bp += hdrlen; switch (at) { case pimv2_unicast: ND_TCHECK2(bp[0], len); if (af == AF_INET) { if (!silent) ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp))); } else if (af == AF_INET6) { if (!silent) ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp))); } return hdrlen + len; case pimv2_group: case pimv2_source: ND_TCHECK2(bp[0], len + 2); if (af == AF_INET) { if (!silent) { ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp + 2))); if (bp[1] != 32) ND_PRINT((ndo, "/%u", bp[1])); } } else if (af == AF_INET6) { if (!silent) { ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp + 2))); if (bp[1] != 128) ND_PRINT((ndo, "/%u", bp[1])); } } if (bp[0] && !silent) { if (at == pimv2_group) { ND_PRINT((ndo, "(0x%02x)", bp[0])); } else { ND_PRINT((ndo, "(%s%s%s", bp[0] & 0x04 ? "S" : "", bp[0] & 0x02 ? "W" : "", bp[0] & 0x01 ? "R" : "")); if (bp[0] & 0xf8) { ND_PRINT((ndo, "+0x%02x", bp[0] & 0xf8)); } ND_PRINT((ndo, ")")); } } return hdrlen + 2 + len; default: return -1; } trunc: return -1; } enum checksum_status { CORRECT, INCORRECT, UNVERIFIED }; static enum checksum_status pimv2_check_checksum(netdissect_options *ndo, const u_char *bp, const u_char *bp2, u_int len) { const struct ip *ip; u_int cksum; if (!ND_TTEST2(bp[0], len)) { /* We don't have all the data. */ return (UNVERIFIED); } ip = (const struct ip *)bp2; if (IP_V(ip) == 4) { struct cksum_vec vec[1]; vec[0].ptr = bp; vec[0].len = len; cksum = in_cksum(vec, 1); return (cksum ? INCORRECT : CORRECT); } else if (IP_V(ip) == 6) { const struct ip6_hdr *ip6; ip6 = (const struct ip6_hdr *)bp2; cksum = nextproto6_cksum(ndo, ip6, bp, len, len, IPPROTO_PIM); return (cksum ? INCORRECT : CORRECT); } else { return (UNVERIFIED); } } static void pimv2_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const u_char *ep; register const struct pim *pim = (const struct pim *)bp; int advance; enum checksum_status cksum_status; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; if (ep > bp + len) ep = bp + len; ND_TCHECK(pim->pim_rsv); pimv2_addr_len = pim->pim_rsv; if (pimv2_addr_len != 0) ND_PRINT((ndo, ", RFC2117-encoding")); ND_PRINT((ndo, ", cksum 0x%04x ", EXTRACT_16BITS(&pim->pim_cksum))); if (EXTRACT_16BITS(&pim->pim_cksum) == 0) { ND_PRINT((ndo, "(unverified)")); } else { if (PIM_TYPE(pim->pim_typever) == PIMV2_TYPE_REGISTER) { /* * The checksum only covers the packet header, * not the encapsulated packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, 8); if (cksum_status == INCORRECT) { /* * To quote RFC 4601, "For interoperability * reasons, a message carrying a checksum * calculated over the entire PIM Register * message should also be accepted." */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } } else { /* * The checksum covers the entire packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } switch (cksum_status) { case CORRECT: ND_PRINT((ndo, "(correct)")); break; case INCORRECT: ND_PRINT((ndo, "(incorrect)")); break; case UNVERIFIED: ND_PRINT((ndo, "(unverified)")); break; } } switch (PIM_TYPE(pim->pim_typever)) { case PIMV2_TYPE_HELLO: { uint16_t otype, olen; bp += 4; while (bp < ep) { ND_TCHECK2(bp[0], 4); otype = EXTRACT_16BITS(&bp[0]); olen = EXTRACT_16BITS(&bp[2]); ND_TCHECK2(bp[0], 4 + olen); ND_PRINT((ndo, "\n\t %s Option (%u), length %u, Value: ", tok2str(pimv2_hello_option_values, "Unknown", otype), otype, olen)); bp += 4; switch (otype) { case PIMV2_HELLO_OPTION_HOLDTIME: if (olen != 2) { ND_PRINT((ndo, "ERROR: Option Length != 2 Bytes (%u)", olen)); } else { unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); } break; case PIMV2_HELLO_OPTION_LANPRUNEDELAY: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { char t_bit; uint16_t lan_delay, override_interval; lan_delay = EXTRACT_16BITS(bp); override_interval = EXTRACT_16BITS(bp+2); t_bit = (lan_delay & 0x8000)? 1 : 0; lan_delay &= ~0x8000; ND_PRINT((ndo, "\n\t T-bit=%d, LAN delay %dms, Override interval %dms", t_bit, lan_delay, override_interval)); } break; case PIMV2_HELLO_OPTION_DR_PRIORITY_OLD: case PIMV2_HELLO_OPTION_DR_PRIORITY: switch (olen) { case 0: ND_PRINT((ndo, "Bi-Directional Capability (Old)")); break; case 4: ND_PRINT((ndo, "%u", EXTRACT_32BITS(bp))); break; default: ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); break; } break; case PIMV2_HELLO_OPTION_GENID: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "0x%08x", EXTRACT_32BITS(bp))); } break; case PIMV2_HELLO_OPTION_REFRESH_CAP: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "v%d", *bp)); if (*(bp+1) != 0) { ND_PRINT((ndo, ", interval ")); unsigned_relts_print(ndo, *(bp+1)); } if (EXTRACT_16BITS(bp+2) != 0) { ND_PRINT((ndo, " ?0x%04x?", EXTRACT_16BITS(bp+2))); } } break; case PIMV2_HELLO_OPTION_BIDIR_CAP: break; case PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD: case PIMV2_HELLO_OPTION_ADDRESS_LIST: if (ndo->ndo_vflag > 1) { const u_char *ptr = bp; while (ptr < (bp+olen)) { ND_PRINT((ndo, "\n\t ")); advance = pimv2_addr_print(ndo, ptr, pimv2_unicast, 0); if (advance < 0) { ND_PRINT((ndo, "...")); break; } ptr += advance; } } break; default: if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, bp, "\n\t ", olen); break; } /* do we want to see an additionally hexdump ? */ if (ndo->ndo_vflag> 1) print_unknown_data(ndo, bp, "\n\t ", olen); bp += olen; } break; } case PIMV2_TYPE_REGISTER: { const struct ip *ip; ND_TCHECK2(*(bp + 4), PIMV2_REGISTER_FLAG_LEN); ND_PRINT((ndo, ", Flags [ %s ]\n\t", tok2str(pimv2_register_flag_values, "none", EXTRACT_32BITS(bp+4)))); bp += 8; len -= 8; /* encapsulated multicast packet */ ip = (const struct ip *)bp; switch (IP_V(ip)) { case 0: /* Null header */ ND_PRINT((ndo, "IP-Null-header %s > %s", ipaddr_string(ndo, &ip->ip_src), ipaddr_string(ndo, &ip->ip_dst))); break; case 4: /* IPv4 */ ip_print(ndo, bp, len); break; case 6: /* IPv6 */ ip6_print(ndo, bp, len); break; default: ND_PRINT((ndo, "IP ver %d", IP_V(ip))); break; } break; } case PIMV2_TYPE_REGISTER_STOP: bp += 4; len -= 4; if (bp >= ep) break; ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp >= ep) break; ND_PRINT((ndo, " source=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; break; case PIMV2_TYPE_JOIN_PRUNE: case PIMV2_TYPE_GRAFT: case PIMV2_TYPE_GRAFT_ACK: /* * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |PIM Ver| Type | Addr length | Checksum | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Unicast-Upstream Neighbor Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Reserved | Num groups | Holdtime | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Number of Joined Sources | Number of Pruned Sources | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ { uint8_t ngroup; uint16_t holdtime; uint16_t njoin; uint16_t nprune; int i, j; bp += 4; len -= 4; if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ if (bp >= ep) break; ND_PRINT((ndo, ", upstream-neighbor: ")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; } if (bp + 4 > ep) break; ngroup = bp[1]; holdtime = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, "\n\t %u group(s)", ngroup)); if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ ND_PRINT((ndo, ", holdtime: ")); if (holdtime == 0xffff) ND_PRINT((ndo, "infinite")); else unsigned_relts_print(ndo, holdtime); } bp += 4; len -= 4; for (i = 0; i < ngroup; i++) { if (bp >= ep) goto jp_done; ND_PRINT((ndo, "\n\t group #%u: ", i+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; if (bp + 4 > ep) { ND_PRINT((ndo, "...)")); goto jp_done; } njoin = EXTRACT_16BITS(&bp[0]); nprune = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, ", joined sources: %u, pruned sources: %u", njoin, nprune)); bp += 4; len -= 4; for (j = 0; j < njoin; j++) { ND_PRINT((ndo, "\n\t joined source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; } for (j = 0; j < nprune; j++) { ND_PRINT((ndo, "\n\t pruned source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; } } jp_done: break; } case PIMV2_TYPE_BOOTSTRAP: { int i, j, frpcnt; bp += 4; /* Fragment Tag, Hash Mask len, and BSR-priority */ if (bp + sizeof(uint16_t) >= ep) break; ND_PRINT((ndo, " tag=%x", EXTRACT_16BITS(bp))); bp += sizeof(uint16_t); if (bp >= ep) break; ND_PRINT((ndo, " hashmlen=%d", bp[0])); if (bp + 1 >= ep) break; ND_PRINT((ndo, " BSRprio=%d", bp[1])); bp += 2; /* Encoded-Unicast-BSR-Address */ if (bp >= ep) break; ND_PRINT((ndo, " BSR=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; for (i = 0; bp < ep; i++) { /* Encoded-Group Address */ ND_PRINT((ndo, " (group%d: ", i)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...)")); goto bs_done; } bp += advance; /* RP-Count, Frag RP-Cnt, and rsvd */ if (bp >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, " RPcnt=%d", bp[0])); if (bp + 1 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, " FRPcnt=%d", frpcnt = bp[1])); bp += 4; for (j = 0; j < frpcnt && bp < ep; j++) { /* each RP info */ ND_PRINT((ndo, " RP%d=", j)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...)")); goto bs_done; } bp += advance; if (bp + 1 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, ",holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); if (bp + 2 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, ",prio=%d", bp[2])); bp += 4; } ND_PRINT((ndo, ")")); } bs_done: break; } case PIMV2_TYPE_ASSERT: bp += 4; len -= 4; if (bp >= ep) break; ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp >= ep) break; ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp + 8 > ep) break; if (bp[0] & 0x80) ND_PRINT((ndo, " RPT")); ND_PRINT((ndo, " pref=%u", EXTRACT_32BITS(&bp[0]) & 0x7fffffff)); ND_PRINT((ndo, " metric=%u", EXTRACT_32BITS(&bp[4]))); break; case PIMV2_TYPE_CANDIDATE_RP: { int i, pfxcnt; bp += 4; /* Prefix-Cnt, Priority, and Holdtime */ if (bp >= ep) break; ND_PRINT((ndo, " prefix-cnt=%d", bp[0])); pfxcnt = bp[0]; if (bp + 1 >= ep) break; ND_PRINT((ndo, " prio=%d", bp[1])); if (bp + 3 >= ep) break; ND_PRINT((ndo, " holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); bp += 4; /* Encoded-Unicast-RP-Address */ if (bp >= ep) break; ND_PRINT((ndo, " RP=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; /* Encoded-Group Addresses */ for (i = 0; i < pfxcnt && bp < ep; i++) { ND_PRINT((ndo, " Group%d=", i)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; } break; } case PIMV2_TYPE_PRUNE_REFRESH: ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_PRINT((ndo, " grp=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_PRINT((ndo, " forwarder=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_TCHECK2(bp[0], 2); ND_PRINT((ndo, " TUNR ")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); break; default: ND_PRINT((ndo, " [type %d]", PIM_TYPE(pim->pim_typever))); break; } return; trunc: ND_PRINT((ndo, "[|pim]")); } /* * Local Variables: * c-style: whitesmith * c-basic-offset: 8 * End: */
/* * Copyright (c) 1995, 1996 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* \summary: Protocol Independent Multicast (PIM) printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include "netdissect.h" #include "addrtoname.h" #include "extract.h" #include "ip.h" #include "ip6.h" #include "ipproto.h" #define PIMV1_TYPE_QUERY 0 #define PIMV1_TYPE_REGISTER 1 #define PIMV1_TYPE_REGISTER_STOP 2 #define PIMV1_TYPE_JOIN_PRUNE 3 #define PIMV1_TYPE_RP_REACHABILITY 4 #define PIMV1_TYPE_ASSERT 5 #define PIMV1_TYPE_GRAFT 6 #define PIMV1_TYPE_GRAFT_ACK 7 static const struct tok pimv1_type_str[] = { { PIMV1_TYPE_QUERY, "Query" }, { PIMV1_TYPE_REGISTER, "Register" }, { PIMV1_TYPE_REGISTER_STOP, "Register-Stop" }, { PIMV1_TYPE_JOIN_PRUNE, "Join/Prune" }, { PIMV1_TYPE_RP_REACHABILITY, "RP-reachable" }, { PIMV1_TYPE_ASSERT, "Assert" }, { PIMV1_TYPE_GRAFT, "Graft" }, { PIMV1_TYPE_GRAFT_ACK, "Graft-ACK" }, { 0, NULL } }; #define PIMV2_TYPE_HELLO 0 #define PIMV2_TYPE_REGISTER 1 #define PIMV2_TYPE_REGISTER_STOP 2 #define PIMV2_TYPE_JOIN_PRUNE 3 #define PIMV2_TYPE_BOOTSTRAP 4 #define PIMV2_TYPE_ASSERT 5 #define PIMV2_TYPE_GRAFT 6 #define PIMV2_TYPE_GRAFT_ACK 7 #define PIMV2_TYPE_CANDIDATE_RP 8 #define PIMV2_TYPE_PRUNE_REFRESH 9 #define PIMV2_TYPE_DF_ELECTION 10 #define PIMV2_TYPE_ECMP_REDIRECT 11 static const struct tok pimv2_type_values[] = { { PIMV2_TYPE_HELLO, "Hello" }, { PIMV2_TYPE_REGISTER, "Register" }, { PIMV2_TYPE_REGISTER_STOP, "Register Stop" }, { PIMV2_TYPE_JOIN_PRUNE, "Join / Prune" }, { PIMV2_TYPE_BOOTSTRAP, "Bootstrap" }, { PIMV2_TYPE_ASSERT, "Assert" }, { PIMV2_TYPE_GRAFT, "Graft" }, { PIMV2_TYPE_GRAFT_ACK, "Graft Acknowledgement" }, { PIMV2_TYPE_CANDIDATE_RP, "Candidate RP Advertisement" }, { PIMV2_TYPE_PRUNE_REFRESH, "Prune Refresh" }, { PIMV2_TYPE_DF_ELECTION, "DF Election" }, { PIMV2_TYPE_ECMP_REDIRECT, "ECMP Redirect" }, { 0, NULL} }; #define PIMV2_HELLO_OPTION_HOLDTIME 1 #define PIMV2_HELLO_OPTION_LANPRUNEDELAY 2 #define PIMV2_HELLO_OPTION_DR_PRIORITY_OLD 18 #define PIMV2_HELLO_OPTION_DR_PRIORITY 19 #define PIMV2_HELLO_OPTION_GENID 20 #define PIMV2_HELLO_OPTION_REFRESH_CAP 21 #define PIMV2_HELLO_OPTION_BIDIR_CAP 22 #define PIMV2_HELLO_OPTION_ADDRESS_LIST 24 #define PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD 65001 static const struct tok pimv2_hello_option_values[] = { { PIMV2_HELLO_OPTION_HOLDTIME, "Hold Time" }, { PIMV2_HELLO_OPTION_LANPRUNEDELAY, "LAN Prune Delay" }, { PIMV2_HELLO_OPTION_DR_PRIORITY_OLD, "DR Priority (Old)" }, { PIMV2_HELLO_OPTION_DR_PRIORITY, "DR Priority" }, { PIMV2_HELLO_OPTION_GENID, "Generation ID" }, { PIMV2_HELLO_OPTION_REFRESH_CAP, "State Refresh Capability" }, { PIMV2_HELLO_OPTION_BIDIR_CAP, "Bi-Directional Capability" }, { PIMV2_HELLO_OPTION_ADDRESS_LIST, "Address List" }, { PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD, "Address List (Old)" }, { 0, NULL} }; #define PIMV2_REGISTER_FLAG_LEN 4 #define PIMV2_REGISTER_FLAG_BORDER 0x80000000 #define PIMV2_REGISTER_FLAG_NULL 0x40000000 static const struct tok pimv2_register_flag_values[] = { { PIMV2_REGISTER_FLAG_BORDER, "Border" }, { PIMV2_REGISTER_FLAG_NULL, "Null" }, { 0, NULL} }; /* * XXX: We consider a case where IPv6 is not ready yet for portability, * but PIM dependent defintions should be independent of IPv6... */ struct pim { uint8_t pim_typever; /* upper 4bit: PIM version number; 2 for PIMv2 */ /* lower 4bit: the PIM message type, currently they are: * Hello, Register, Register-Stop, Join/Prune, * Bootstrap, Assert, Graft (PIM-DM only), * Graft-Ack (PIM-DM only), C-RP-Adv */ #define PIM_VER(x) (((x) & 0xf0) >> 4) #define PIM_TYPE(x) ((x) & 0x0f) u_char pim_rsv; /* Reserved */ u_short pim_cksum; /* IP style check sum */ }; static void pimv2_print(netdissect_options *, register const u_char *bp, register u_int len, const u_char *); static void pimv1_join_prune_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int ngroups, njoin, nprune; int njp; /* If it's a single group and a single source, use 1-line output. */ if (ND_TTEST2(bp[0], 30) && bp[11] == 1 && ((njoin = EXTRACT_16BITS(&bp[20])) + EXTRACT_16BITS(&bp[22])) == 1) { int hold; ND_PRINT((ndo, " RPF %s ", ipaddr_string(ndo, bp))); hold = EXTRACT_16BITS(&bp[6]); if (hold != 180) { ND_PRINT((ndo, "Hold ")); unsigned_relts_print(ndo, hold); } ND_PRINT((ndo, "%s (%s/%d, %s", njoin ? "Join" : "Prune", ipaddr_string(ndo, &bp[26]), bp[25] & 0x3f, ipaddr_string(ndo, &bp[12]))); if (EXTRACT_32BITS(&bp[16]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[16]))); ND_PRINT((ndo, ") %s%s %s", (bp[24] & 0x01) ? "Sparse" : "Dense", (bp[25] & 0x80) ? " WC" : "", (bp[25] & 0x40) ? "RP" : "SPT")); return; } if (len < sizeof(struct in_addr)) goto trunc; ND_TCHECK2(bp[0], sizeof(struct in_addr)); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Upstream Nbr: %s", ipaddr_string(ndo, bp))); bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[2], 2); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Hold time: ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); if (ndo->ndo_vflag < 2) return; bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); ngroups = bp[3]; bp += 4; len -= 4; while (ngroups--) { /* * XXX - does the address have length "addrlen" and the * mask length "maddrlen"? */ if (len < 4) goto trunc; ND_TCHECK2(bp[0], sizeof(struct in_addr)); ND_PRINT((ndo, "\n\tGroup: %s", ipaddr_string(ndo, bp))); bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[0], sizeof(struct in_addr)); if (EXTRACT_32BITS(&bp[0]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[0]))); bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); njoin = EXTRACT_16BITS(&bp[0]); nprune = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, " joined: %d pruned: %d", njoin, nprune)); bp += 4; len -= 4; for (njp = 0; njp < (njoin + nprune); njp++) { const char *type; if (njp < njoin) type = "Join "; else type = "Prune"; if (len < 6) goto trunc; ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "\n\t%s %s%s%s%s/%d", type, (bp[0] & 0x01) ? "Sparse " : "Dense ", (bp[1] & 0x80) ? "WC " : "", (bp[1] & 0x40) ? "RP " : "SPT ", ipaddr_string(ndo, &bp[2]), bp[1] & 0x3f)); bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|pim]")); return; } void pimv1_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { register u_char type; ND_TCHECK(bp[1]); type = bp[1]; ND_PRINT((ndo, " %s", tok2str(pimv1_type_str, "[type %u]", type))); switch (type) { case PIMV1_TYPE_QUERY: if (ND_TTEST(bp[8])) { switch (bp[8] >> 4) { case 0: ND_PRINT((ndo, " Dense-mode")); break; case 1: ND_PRINT((ndo, " Sparse-mode")); break; case 2: ND_PRINT((ndo, " Sparse-Dense-mode")); break; default: ND_PRINT((ndo, " mode-%d", bp[8] >> 4)); break; } } if (ndo->ndo_vflag) { ND_TCHECK2(bp[10],2); ND_PRINT((ndo, " (Hold-time ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[10])); ND_PRINT((ndo, ")")); } break; case PIMV1_TYPE_REGISTER: ND_TCHECK2(bp[8], 20); /* ip header */ ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[20]), ipaddr_string(ndo, &bp[24]))); break; case PIMV1_TYPE_REGISTER_STOP: ND_TCHECK2(bp[12], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[8]), ipaddr_string(ndo, &bp[12]))); break; case PIMV1_TYPE_RP_REACHABILITY: if (ndo->ndo_vflag) { ND_TCHECK2(bp[22], 2); ND_PRINT((ndo, " group %s", ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_PRINT((ndo, " RP %s hold ", ipaddr_string(ndo, &bp[16]))); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[22])); } break; case PIMV1_TYPE_ASSERT: ND_TCHECK2(bp[16], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[16]), ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_TCHECK2(bp[24], 4); ND_PRINT((ndo, " %s pref %d metric %d", (bp[20] & 0x80) ? "RP-tree" : "SPT", EXTRACT_32BITS(&bp[20]) & 0x7fffffff, EXTRACT_32BITS(&bp[24]))); break; case PIMV1_TYPE_JOIN_PRUNE: case PIMV1_TYPE_GRAFT: case PIMV1_TYPE_GRAFT_ACK: if (ndo->ndo_vflag) { if (len < 8) goto trunc; pimv1_join_prune_print(ndo, &bp[8], len - 8); } break; } ND_TCHECK(bp[4]); if ((bp[4] >> 4) != 1) ND_PRINT((ndo, " [v%d]", bp[4] >> 4)); return; trunc: ND_PRINT((ndo, "[|pim]")); return; } /* * auto-RP is a cisco protocol, documented at * ftp://ftpeng.cisco.com/ipmulticast/specs/pim-autorp-spec01.txt * * This implements version 1+, dated Sept 9, 1998. */ void cisco_autorp_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int type; int numrps; int hold; if (len < 8) goto trunc; ND_TCHECK(bp[0]); ND_PRINT((ndo, " auto-rp ")); type = bp[0]; switch (type) { case 0x11: ND_PRINT((ndo, "candidate-advert")); break; case 0x12: ND_PRINT((ndo, "mapping")); break; default: ND_PRINT((ndo, "type-0x%02x", type)); break; } ND_TCHECK(bp[1]); numrps = bp[1]; ND_TCHECK2(bp[2], 2); ND_PRINT((ndo, " Hold ")); hold = EXTRACT_16BITS(&bp[2]); if (hold) unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); else ND_PRINT((ndo, "FOREVER")); /* Next 4 bytes are reserved. */ bp += 8; len -= 8; /*XXX skip unless -v? */ /* * Rest of packet: * numrps entries of the form: * 32 bits: RP * 6 bits: reserved * 2 bits: PIM version supported, bit 0 is "supports v1", 1 is "v2". * 8 bits: # of entries for this RP * each entry: 7 bits: reserved, 1 bit: negative, * 8 bits: mask 32 bits: source * lather, rinse, repeat. */ while (numrps--) { int nentries; char s; if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); ND_PRINT((ndo, " RP %s", ipaddr_string(ndo, bp))); bp += 4; len -= 4; if (len < 1) goto trunc; ND_TCHECK(bp[0]); switch (bp[0] & 0x3) { case 0: ND_PRINT((ndo, " PIMv?")); break; case 1: ND_PRINT((ndo, " PIMv1")); break; case 2: ND_PRINT((ndo, " PIMv2")); break; case 3: ND_PRINT((ndo, " PIMv1+2")); break; } if (bp[0] & 0xfc) ND_PRINT((ndo, " [rsvd=0x%02x]", bp[0] & 0xfc)); bp += 1; len -= 1; if (len < 1) goto trunc; ND_TCHECK(bp[0]); nentries = bp[0]; bp += 1; len -= 1; s = ' '; for (; nentries; nentries--) { if (len < 6) goto trunc; ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "%c%s%s/%d", s, bp[0] & 1 ? "!" : "", ipaddr_string(ndo, &bp[2]), bp[1])); if (bp[0] & 0x02) { ND_PRINT((ndo, " bidir")); } if (bp[0] & 0xfc) { ND_PRINT((ndo, "[rsvd=0x%02x]", bp[0] & 0xfc)); } s = ','; bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|autorp]")); return; } void pim_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const struct pim *pim = (const struct pim *)bp; #ifdef notyet /* currently we see only version and type */ ND_TCHECK(pim->pim_rsv); #endif ND_TCHECK(pim->pim_typever); switch (PIM_VER(pim->pim_typever)) { case 2: if (!ndo->ndo_vflag) { ND_PRINT((ndo, "PIMv%u, %s, length %u", PIM_VER(pim->pim_typever), tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)), len)); return; } else { ND_PRINT((ndo, "PIMv%u, length %u\n\t%s", PIM_VER(pim->pim_typever), len, tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)))); pimv2_print(ndo, bp, len, bp2); } break; default: ND_PRINT((ndo, "PIMv%u, length %u", PIM_VER(pim->pim_typever), len)); break; } return; trunc: ND_PRINT((ndo, "[|pim]")); return; } /* * PIMv2 uses encoded address representations. * * The last PIM-SM I-D before RFC2117 was published specified the * following representation for unicast addresses. However, RFC2117 * specified no encoding for unicast addresses with the unicast * address length specified in the header. Therefore, we have to * guess which encoding is being used (Cisco's PIMv2 implementation * uses the non-RFC encoding). RFC2117 turns a previously "Reserved" * field into a 'unicast-address-length-in-bytes' field. We guess * that it's the draft encoding if this reserved field is zero. * * RFC2362 goes back to the encoded format, and calls the addr length * field "reserved" again. * * The first byte is the address family, from: * * 0 Reserved * 1 IP (IP version 4) * 2 IP6 (IP version 6) * 3 NSAP * 4 HDLC (8-bit multidrop) * 5 BBN 1822 * 6 802 (includes all 802 media plus Ethernet "canonical format") * 7 E.163 * 8 E.164 (SMDS, Frame Relay, ATM) * 9 F.69 (Telex) * 10 X.121 (X.25, Frame Relay) * 11 IPX * 12 Appletalk * 13 Decnet IV * 14 Banyan Vines * 15 E.164 with NSAP format subaddress * * In addition, the second byte is an "Encoding". 0 is the default * encoding for the address family, and no other encodings are currently * specified. * */ enum pimv2_addrtype { pimv2_unicast, pimv2_group, pimv2_source }; /* 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Unicast Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+++++++ * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Reserved | Mask Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Group multicast Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Rsrvd |S|W|R| Mask Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Source Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ static int pimv2_addr_print(netdissect_options *ndo, const u_char *bp, u_int len, enum pimv2_addrtype at, u_int addr_len, int silent) { int af; int hdrlen; if (addr_len == 0) { if (len < 2) goto trunc; ND_TCHECK(bp[1]); switch (bp[0]) { case 1: af = AF_INET; addr_len = (u_int)sizeof(struct in_addr); break; case 2: af = AF_INET6; addr_len = (u_int)sizeof(struct in6_addr); break; default: return -1; } if (bp[1] != 0) return -1; hdrlen = 2; } else { switch (addr_len) { case sizeof(struct in_addr): af = AF_INET; break; case sizeof(struct in6_addr): af = AF_INET6; break; default: return -1; break; } hdrlen = 0; } bp += hdrlen; len -= hdrlen; switch (at) { case pimv2_unicast: if (len < addr_len) goto trunc; ND_TCHECK2(bp[0], addr_len); if (af == AF_INET) { if (!silent) ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp))); } else if (af == AF_INET6) { if (!silent) ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp))); } return hdrlen + addr_len; case pimv2_group: case pimv2_source: if (len < addr_len + 2) goto trunc; ND_TCHECK2(bp[0], addr_len + 2); if (af == AF_INET) { if (!silent) { ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp + 2))); if (bp[1] != 32) ND_PRINT((ndo, "/%u", bp[1])); } } else if (af == AF_INET6) { if (!silent) { ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp + 2))); if (bp[1] != 128) ND_PRINT((ndo, "/%u", bp[1])); } } if (bp[0] && !silent) { if (at == pimv2_group) { ND_PRINT((ndo, "(0x%02x)", bp[0])); } else { ND_PRINT((ndo, "(%s%s%s", bp[0] & 0x04 ? "S" : "", bp[0] & 0x02 ? "W" : "", bp[0] & 0x01 ? "R" : "")); if (bp[0] & 0xf8) { ND_PRINT((ndo, "+0x%02x", bp[0] & 0xf8)); } ND_PRINT((ndo, ")")); } } return hdrlen + 2 + addr_len; default: return -1; } trunc: return -1; } enum checksum_status { CORRECT, INCORRECT, UNVERIFIED }; static enum checksum_status pimv2_check_checksum(netdissect_options *ndo, const u_char *bp, const u_char *bp2, u_int len) { const struct ip *ip; u_int cksum; if (!ND_TTEST2(bp[0], len)) { /* We don't have all the data. */ return (UNVERIFIED); } ip = (const struct ip *)bp2; if (IP_V(ip) == 4) { struct cksum_vec vec[1]; vec[0].ptr = bp; vec[0].len = len; cksum = in_cksum(vec, 1); return (cksum ? INCORRECT : CORRECT); } else if (IP_V(ip) == 6) { const struct ip6_hdr *ip6; ip6 = (const struct ip6_hdr *)bp2; cksum = nextproto6_cksum(ndo, ip6, bp, len, len, IPPROTO_PIM); return (cksum ? INCORRECT : CORRECT); } else { return (UNVERIFIED); } } static void pimv2_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const u_char *ep; register const struct pim *pim = (const struct pim *)bp; int advance; enum checksum_status cksum_status; int pimv2_addr_len; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; if (ep > bp + len) ep = bp + len; if (len < 2) goto trunc; ND_TCHECK(pim->pim_rsv); pimv2_addr_len = pim->pim_rsv; if (pimv2_addr_len != 0) ND_PRINT((ndo, ", RFC2117-encoding")); if (len < 4) goto trunc; ND_TCHECK(pim->pim_cksum); ND_PRINT((ndo, ", cksum 0x%04x ", EXTRACT_16BITS(&pim->pim_cksum))); if (EXTRACT_16BITS(&pim->pim_cksum) == 0) { ND_PRINT((ndo, "(unverified)")); } else { if (PIM_TYPE(pim->pim_typever) == PIMV2_TYPE_REGISTER) { /* * The checksum only covers the packet header, * not the encapsulated packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, 8); if (cksum_status == INCORRECT) { /* * To quote RFC 4601, "For interoperability * reasons, a message carrying a checksum * calculated over the entire PIM Register * message should also be accepted." */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } } else { /* * The checksum covers the entire packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } switch (cksum_status) { case CORRECT: ND_PRINT((ndo, "(correct)")); break; case INCORRECT: ND_PRINT((ndo, "(incorrect)")); break; case UNVERIFIED: ND_PRINT((ndo, "(unverified)")); break; } } bp += 4; len -= 4; switch (PIM_TYPE(pim->pim_typever)) { case PIMV2_TYPE_HELLO: { uint16_t otype, olen; while (len > 0) { if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); otype = EXTRACT_16BITS(&bp[0]); olen = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, "\n\t %s Option (%u), length %u, Value: ", tok2str(pimv2_hello_option_values, "Unknown", otype), otype, olen)); bp += 4; len -= 4; if (len < olen) goto trunc; ND_TCHECK2(bp[0], olen); switch (otype) { case PIMV2_HELLO_OPTION_HOLDTIME: if (olen != 2) { ND_PRINT((ndo, "ERROR: Option Length != 2 Bytes (%u)", olen)); } else { unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); } break; case PIMV2_HELLO_OPTION_LANPRUNEDELAY: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { char t_bit; uint16_t lan_delay, override_interval; lan_delay = EXTRACT_16BITS(bp); override_interval = EXTRACT_16BITS(bp+2); t_bit = (lan_delay & 0x8000)? 1 : 0; lan_delay &= ~0x8000; ND_PRINT((ndo, "\n\t T-bit=%d, LAN delay %dms, Override interval %dms", t_bit, lan_delay, override_interval)); } break; case PIMV2_HELLO_OPTION_DR_PRIORITY_OLD: case PIMV2_HELLO_OPTION_DR_PRIORITY: switch (olen) { case 0: ND_PRINT((ndo, "Bi-Directional Capability (Old)")); break; case 4: ND_PRINT((ndo, "%u", EXTRACT_32BITS(bp))); break; default: ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); break; } break; case PIMV2_HELLO_OPTION_GENID: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "0x%08x", EXTRACT_32BITS(bp))); } break; case PIMV2_HELLO_OPTION_REFRESH_CAP: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "v%d", *bp)); if (*(bp+1) != 0) { ND_PRINT((ndo, ", interval ")); unsigned_relts_print(ndo, *(bp+1)); } if (EXTRACT_16BITS(bp+2) != 0) { ND_PRINT((ndo, " ?0x%04x?", EXTRACT_16BITS(bp+2))); } } break; case PIMV2_HELLO_OPTION_BIDIR_CAP: break; case PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD: case PIMV2_HELLO_OPTION_ADDRESS_LIST: if (ndo->ndo_vflag > 1) { const u_char *ptr = bp; u_int plen = len; while (ptr < (bp+olen)) { ND_PRINT((ndo, "\n\t ")); advance = pimv2_addr_print(ndo, ptr, plen, pimv2_unicast, pimv2_addr_len, 0); if (advance < 0) goto trunc; ptr += advance; plen -= advance; } } break; default: if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, bp, "\n\t ", olen); break; } /* do we want to see an additionally hexdump ? */ if (ndo->ndo_vflag> 1) print_unknown_data(ndo, bp, "\n\t ", olen); bp += olen; len -= olen; } break; } case PIMV2_TYPE_REGISTER: { const struct ip *ip; if (len < 4) goto trunc; ND_TCHECK2(*bp, PIMV2_REGISTER_FLAG_LEN); ND_PRINT((ndo, ", Flags [ %s ]\n\t", tok2str(pimv2_register_flag_values, "none", EXTRACT_32BITS(bp)))); bp += 4; len -= 4; /* encapsulated multicast packet */ if (len == 0) goto trunc; ip = (const struct ip *)bp; ND_TCHECK(ip->ip_vhl); switch (IP_V(ip)) { case 0: /* Null header */ ND_TCHECK(ip->ip_dst); ND_PRINT((ndo, "IP-Null-header %s > %s", ipaddr_string(ndo, &ip->ip_src), ipaddr_string(ndo, &ip->ip_dst))); break; case 4: /* IPv4 */ ip_print(ndo, bp, len); break; case 6: /* IPv6 */ ip6_print(ndo, bp, len); break; default: ND_PRINT((ndo, "IP ver %d", IP_V(ip))); break; } break; } case PIMV2_TYPE_REGISTER_STOP: ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; ND_PRINT((ndo, " source=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; break; case PIMV2_TYPE_JOIN_PRUNE: case PIMV2_TYPE_GRAFT: case PIMV2_TYPE_GRAFT_ACK: /* * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |PIM Ver| Type | Addr length | Checksum | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Unicast-Upstream Neighbor Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Reserved | Num groups | Holdtime | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Number of Joined Sources | Number of Pruned Sources | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ { uint8_t ngroup; uint16_t holdtime; uint16_t njoin; uint16_t nprune; int i, j; if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ ND_PRINT((ndo, ", upstream-neighbor: ")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; } if (len < 4) goto trunc; ND_TCHECK2(*bp, 4); ngroup = bp[1]; holdtime = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, "\n\t %u group(s)", ngroup)); if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ ND_PRINT((ndo, ", holdtime: ")); if (holdtime == 0xffff) ND_PRINT((ndo, "infinite")); else unsigned_relts_print(ndo, holdtime); } bp += 4; len -= 4; for (i = 0; i < ngroup; i++) { ND_PRINT((ndo, "\n\t group #%u: ", i+1)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; if (len < 4) goto trunc; ND_TCHECK2(*bp, 4); njoin = EXTRACT_16BITS(&bp[0]); nprune = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, ", joined sources: %u, pruned sources: %u", njoin, nprune)); bp += 4; len -= 4; for (j = 0; j < njoin; j++) { ND_PRINT((ndo, "\n\t joined source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_source, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; } for (j = 0; j < nprune; j++) { ND_PRINT((ndo, "\n\t pruned source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_source, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; } } break; } case PIMV2_TYPE_BOOTSTRAP: { int i, j, frpcnt; /* Fragment Tag, Hash Mask len, and BSR-priority */ if (len < 2) goto trunc; ND_TCHECK_16BITS(bp); ND_PRINT((ndo, " tag=%x", EXTRACT_16BITS(bp))); bp += 2; len -= 2; if (len < 1) goto trunc; ND_TCHECK(bp[0]); ND_PRINT((ndo, " hashmlen=%d", bp[0])); if (len < 2) goto trunc; ND_TCHECK(bp[2]); ND_PRINT((ndo, " BSRprio=%d", bp[1])); bp += 2; len -= 2; /* Encoded-Unicast-BSR-Address */ ND_PRINT((ndo, " BSR=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; for (i = 0; bp < ep; i++) { /* Encoded-Group Address */ ND_PRINT((ndo, " (group%d: ", i)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; /* RP-Count, Frag RP-Cnt, and rsvd */ if (len < 1) goto trunc; ND_TCHECK(bp[0]); ND_PRINT((ndo, " RPcnt=%d", bp[0])); if (len < 2) goto trunc; ND_TCHECK(bp[1]); ND_PRINT((ndo, " FRPcnt=%d", frpcnt = bp[1])); if (len < 4) goto trunc; bp += 4; len -= 4; for (j = 0; j < frpcnt && bp < ep; j++) { /* each RP info */ ND_PRINT((ndo, " RP%d=", j)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; if (len < 2) goto trunc; ND_TCHECK_16BITS(bp); ND_PRINT((ndo, ",holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); if (len < 3) goto trunc; ND_TCHECK(bp[2]); ND_PRINT((ndo, ",prio=%d", bp[2])); if (len < 4) goto trunc; bp += 4; len -= 4; } ND_PRINT((ndo, ")")); } break; } case PIMV2_TYPE_ASSERT: ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; if (len < 8) goto trunc; ND_TCHECK2(*bp, 8); if (bp[0] & 0x80) ND_PRINT((ndo, " RPT")); ND_PRINT((ndo, " pref=%u", EXTRACT_32BITS(&bp[0]) & 0x7fffffff)); ND_PRINT((ndo, " metric=%u", EXTRACT_32BITS(&bp[4]))); break; case PIMV2_TYPE_CANDIDATE_RP: { int i, pfxcnt; /* Prefix-Cnt, Priority, and Holdtime */ if (len < 1) goto trunc; ND_TCHECK(bp[0]); ND_PRINT((ndo, " prefix-cnt=%d", bp[0])); pfxcnt = bp[0]; if (len < 2) goto trunc; ND_TCHECK(bp[1]); ND_PRINT((ndo, " prio=%d", bp[1])); if (len < 4) goto trunc; ND_TCHECK_16BITS(&bp[2]); ND_PRINT((ndo, " holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); bp += 4; len -= 4; /* Encoded-Unicast-RP-Address */ ND_PRINT((ndo, " RP=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; /* Encoded-Group Addresses */ for (i = 0; i < pfxcnt && bp < ep; i++) { ND_PRINT((ndo, " Group%d=", i)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; } break; } case PIMV2_TYPE_PRUNE_REFRESH: ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; ND_PRINT((ndo, " grp=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; ND_PRINT((ndo, " forwarder=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; if (len < 2) goto trunc; ND_TCHECK_16BITS(bp); ND_PRINT((ndo, " TUNR ")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); break; default: ND_PRINT((ndo, " [type %d]", PIM_TYPE(pim->pim_typever))); break; } return; trunc: ND_PRINT((ndo, "[|pim]")); } /* * Local Variables: * c-style: whitesmith * c-basic-offset: 8 * End: */
cisco_autorp_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int type; int numrps; int hold; ND_TCHECK(bp[0]); ND_PRINT((ndo, " auto-rp ")); type = bp[0]; switch (type) { case 0x11: ND_PRINT((ndo, "candidate-advert")); break; case 0x12: ND_PRINT((ndo, "mapping")); break; default: ND_PRINT((ndo, "type-0x%02x", type)); break; } ND_TCHECK(bp[1]); numrps = bp[1]; ND_TCHECK2(bp[2], 2); ND_PRINT((ndo, " Hold ")); hold = EXTRACT_16BITS(&bp[2]); if (hold) unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); else ND_PRINT((ndo, "FOREVER")); /* Next 4 bytes are reserved. */ bp += 8; len -= 8; /*XXX skip unless -v? */ /* * Rest of packet: * numrps entries of the form: * 32 bits: RP * 6 bits: reserved * 2 bits: PIM version supported, bit 0 is "supports v1", 1 is "v2". * 8 bits: # of entries for this RP * each entry: 7 bits: reserved, 1 bit: negative, * 8 bits: mask 32 bits: source * lather, rinse, repeat. */ while (numrps--) { int nentries; char s; ND_TCHECK2(bp[0], 4); ND_PRINT((ndo, " RP %s", ipaddr_string(ndo, bp))); ND_TCHECK(bp[4]); switch (bp[4] & 0x3) { case 0: ND_PRINT((ndo, " PIMv?")); break; case 1: ND_PRINT((ndo, " PIMv1")); break; case 2: ND_PRINT((ndo, " PIMv2")); break; case 3: ND_PRINT((ndo, " PIMv1+2")); break; } if (bp[4] & 0xfc) ND_PRINT((ndo, " [rsvd=0x%02x]", bp[4] & 0xfc)); ND_TCHECK(bp[5]); nentries = bp[5]; bp += 6; len -= 6; s = ' '; for (; nentries; nentries--) { ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "%c%s%s/%d", s, bp[0] & 1 ? "!" : "", ipaddr_string(ndo, &bp[2]), bp[1])); if (bp[0] & 0x02) { ND_PRINT((ndo, " bidir")); } if (bp[0] & 0xfc) { ND_PRINT((ndo, "[rsvd=0x%02x]", bp[0] & 0xfc)); } s = ','; bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|autorp]")); return; }
cisco_autorp_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int type; int numrps; int hold; if (len < 8) goto trunc; ND_TCHECK(bp[0]); ND_PRINT((ndo, " auto-rp ")); type = bp[0]; switch (type) { case 0x11: ND_PRINT((ndo, "candidate-advert")); break; case 0x12: ND_PRINT((ndo, "mapping")); break; default: ND_PRINT((ndo, "type-0x%02x", type)); break; } ND_TCHECK(bp[1]); numrps = bp[1]; ND_TCHECK2(bp[2], 2); ND_PRINT((ndo, " Hold ")); hold = EXTRACT_16BITS(&bp[2]); if (hold) unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); else ND_PRINT((ndo, "FOREVER")); /* Next 4 bytes are reserved. */ bp += 8; len -= 8; /*XXX skip unless -v? */ /* * Rest of packet: * numrps entries of the form: * 32 bits: RP * 6 bits: reserved * 2 bits: PIM version supported, bit 0 is "supports v1", 1 is "v2". * 8 bits: # of entries for this RP * each entry: 7 bits: reserved, 1 bit: negative, * 8 bits: mask 32 bits: source * lather, rinse, repeat. */ while (numrps--) { int nentries; char s; if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); ND_PRINT((ndo, " RP %s", ipaddr_string(ndo, bp))); bp += 4; len -= 4; if (len < 1) goto trunc; ND_TCHECK(bp[0]); switch (bp[0] & 0x3) { case 0: ND_PRINT((ndo, " PIMv?")); break; case 1: ND_PRINT((ndo, " PIMv1")); break; case 2: ND_PRINT((ndo, " PIMv2")); break; case 3: ND_PRINT((ndo, " PIMv1+2")); break; } if (bp[0] & 0xfc) ND_PRINT((ndo, " [rsvd=0x%02x]", bp[0] & 0xfc)); bp += 1; len -= 1; if (len < 1) goto trunc; ND_TCHECK(bp[0]); nentries = bp[0]; bp += 1; len -= 1; s = ' '; for (; nentries; nentries--) { if (len < 6) goto trunc; ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "%c%s%s/%d", s, bp[0] & 1 ? "!" : "", ipaddr_string(ndo, &bp[2]), bp[1])); if (bp[0] & 0x02) { ND_PRINT((ndo, " bidir")); } if (bp[0] & 0xfc) { ND_PRINT((ndo, "[rsvd=0x%02x]", bp[0] & 0xfc)); } s = ','; bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|autorp]")); return; }
{'added': [(172, '\tif (len < sizeof(struct in_addr))'), (173, '\t\tgoto trunc;'), (178, '\tbp += 4;'), (179, '\tlen -= 4;'), (180, '\tif (len < 4)'), (181, '\t\tgoto trunc;'), (182, '\tND_TCHECK2(bp[2], 2);'), (186, '\tunsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2]));'), (189, '\tbp += 4;'), (190, '\tlen -= 4;'), (192, '\tif (len < 4)'), (193, '\t\tgoto trunc;'), (203, '\t\tif (len < 4)'), (204, '\t\t\tgoto trunc;'), (207, '\t\tbp += 4;'), (208, '\t\tlen -= 4;'), (209, '\t\tif (len < 4)'), (210, '\t\t\tgoto trunc;'), (211, '\t\tND_TCHECK2(bp[0], sizeof(struct in_addr));'), (212, '\t\tif (EXTRACT_32BITS(&bp[0]) != 0xffffffff)'), (213, '\t\t\tND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[0])));'), (214, '\t\tbp += 4;'), (215, '\t\tlen -= 4;'), (216, '\t\tif (len < 4)'), (217, '\t\t\tgoto trunc;'), (218, '\t\tND_TCHECK2(bp[0], 4);'), (219, '\t\tnjoin = EXTRACT_16BITS(&bp[0]);'), (220, '\t\tnprune = EXTRACT_16BITS(&bp[2]);'), (222, '\t\tbp += 4;'), (223, '\t\tlen -= 4;'), (231, '\t\t\tif (len < 6)'), (232, '\t\t\t\tgoto trunc;'), (238, '\t\t\t ipaddr_string(ndo, &bp[2]),'), (239, '\t\t\t bp[1] & 0x3f));'), (321, '\t\tif (ndo->ndo_vflag) {'), (322, '\t\t\tif (len < 8)'), (323, '\t\t\t\tgoto trunc;'), (325, '\t\t}'), (352, '\tif (len < 8)'), (353, '\t\tgoto trunc;'), (401, '\t\tif (len < 4)'), (402, '\t\t\tgoto trunc;'), (405, '\t\tbp += 4;'), (406, '\t\tlen -= 4;'), (407, '\t\tif (len < 1)'), (408, '\t\t\tgoto trunc;'), (409, '\t\tND_TCHECK(bp[0]);'), (410, '\t\tswitch (bp[0] & 0x3) {'), (420, '\t\tif (bp[0] & 0xfc)'), (421, '\t\t\tND_PRINT((ndo, " [rsvd=0x%02x]", bp[0] & 0xfc));'), (422, '\t\tbp += 1;'), (423, '\t\tlen -= 1;'), (424, '\t\tif (len < 1)'), (425, '\t\t\tgoto trunc;'), (426, '\t\tND_TCHECK(bp[0]);'), (427, '\t\tnentries = bp[0];'), (428, '\t\tbp += 1;'), (429, '\t\tlen -= 1;'), (432, '\t\t\tif (len < 6)'), (433, '\t\t\t\tgoto trunc;'), (464, '\tND_TCHECK(pim->pim_typever);'), (488, ''), (489, 'trunc:'), (490, '\tND_PRINT((ndo, "[|pim]"));'), (491, '\treturn;'), (560, ' const u_char *bp, u_int len, enum pimv2_addrtype at,'), (561, ' u_int addr_len, int silent)'), (564, '\tint hdrlen;'), (566, '\tif (addr_len == 0) {'), (567, '\t\tif (len < 2)'), (568, '\t\t\tgoto trunc;'), (573, '\t\t\taddr_len = (u_int)sizeof(struct in_addr);'), (577, '\t\t\taddr_len = (u_int)sizeof(struct in6_addr);'), (586, '\t\tswitch (addr_len) {'), (601, '\tlen -= hdrlen;'), (604, '\t\tif (len < addr_len)'), (605, '\t\t\tgoto trunc;'), (606, '\t\tND_TCHECK2(bp[0], addr_len);'), (615, '\t\treturn hdrlen + addr_len;'), (618, '\t\tif (len < addr_len + 2)'), (619, '\t\t\tgoto trunc;'), (620, '\t\tND_TCHECK2(bp[0], addr_len + 2);'), (649, '\t\treturn hdrlen + 2 + addr_len;'), (701, '\tint pimv2_addr_len;'), (708, '\tif (len < 2)'), (709, '\t\tgoto trunc;'), (715, '\tif (len < 4)'), (716, '\t\tgoto trunc;'), (717, '\tND_TCHECK(pim->pim_cksum);'), (758, '\tbp += 4;'), (759, '\tlen -= 4;'), (765, '\t\twhile (len > 0) {'), (766, '\t\t\tif (len < 4)'), (767, '\t\t\t\tgoto trunc;'), (776, '\t\t\tlen -= 4;'), (778, '\t\t\tif (len < olen)'), (779, '\t\t\t\tgoto trunc;'), (780, '\t\t\tND_TCHECK2(bp[0], olen);'), (850, '\t\t\t\t\tu_int plen = len;'), (853, '\t\t\t\t\t\tadvance = pimv2_addr_print(ndo, ptr, plen, pimv2_unicast, pimv2_addr_len, 0);'), (854, '\t\t\t\t\t\tif (advance < 0)'), (855, '\t\t\t\t\t\t\tgoto trunc;'), (857, '\t\t\t\t\t\tplen -= advance;'), (870, '\t\t\tlen -= olen;'), (879, '\t\tif (len < 4)'), (880, '\t\t\tgoto trunc;'), (881, '\t\tND_TCHECK2(*bp, PIMV2_REGISTER_FLAG_LEN);'), (886, '\t\t EXTRACT_32BITS(bp))));'), (888, '\t\tbp += 4; len -= 4;'), (890, '\t\tif (len == 0)'), (891, '\t\t\tgoto trunc;'), (893, '\t\tND_TCHECK(ip->ip_vhl);'), (896, '\t\t\tND_TCHECK(ip->ip_dst);'), (919, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (920, '\t\t\tgoto trunc;'), (923, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (924, '\t\t\tgoto trunc;'), (977, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (978, '\t\t\t\tgoto trunc;'), (981, '\t\tif (len < 4)'), (982, '\t\t\tgoto trunc;'), (983, '\t\tND_TCHECK2(*bp, 4);'), (997, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (998, '\t\t\t\tgoto trunc;'), (1000, '\t\t\tif (len < 4)'), (1001, '\t\t\t\tgoto trunc;'), (1002, '\t\t\tND_TCHECK2(*bp, 4);'), (1009, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_source, pimv2_addr_len, 0)) < 0)'), (1010, '\t\t\t\t\tgoto trunc;'), (1015, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_source, pimv2_addr_len, 0)) < 0)'), (1016, '\t\t\t\t\tgoto trunc;'), (1028, '\t\tif (len < 2)'), (1029, '\t\t\tgoto trunc;'), (1030, '\t\tND_TCHECK_16BITS(bp);'), (1032, '\t\tbp += 2;'), (1033, '\t\tlen -= 2;'), (1034, '\t\tif (len < 1)'), (1035, '\t\t\tgoto trunc;'), (1036, '\t\tND_TCHECK(bp[0]);'), (1038, '\t\tif (len < 2)'), (1039, '\t\t\tgoto trunc;'), (1040, '\t\tND_TCHECK(bp[2]);'), (1043, '\t\tlen -= 2;'), (1047, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1048, '\t\t\tgoto trunc;'), (1050, '\t\tlen -= advance;'), (1055, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (1056, '\t\t\t\tgoto trunc;'), (1058, '\t\t\tlen -= advance;'), (1061, '\t\t\tif (len < 1)'), (1062, '\t\t\t\tgoto trunc;'), (1063, '\t\t\tND_TCHECK(bp[0]);'), (1065, '\t\t\tif (len < 2)'), (1066, '\t\t\t\tgoto trunc;'), (1067, '\t\t\tND_TCHECK(bp[1]);'), (1069, '\t\t\tif (len < 4)'), (1070, '\t\t\t\tgoto trunc;'), (1072, '\t\t\tlen -= 4;'), (1077, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len,'), (1079, '\t\t\t\t\t\t\t\tpimv2_addr_len,'), (1080, '\t\t\t\t\t\t\t\t0)) < 0)'), (1081, '\t\t\t\t\tgoto trunc;'), (1083, '\t\t\t\tlen -= advance;'), (1085, '\t\t\t\tif (len < 2)'), (1086, '\t\t\t\t\tgoto trunc;'), (1087, '\t\t\t\tND_TCHECK_16BITS(bp);'), (1090, '\t\t\t\tif (len < 3)'), (1091, '\t\t\t\t\tgoto trunc;'), (1092, '\t\t\t\tND_TCHECK(bp[2]);'), (1094, '\t\t\t\tif (len < 4)'), (1095, '\t\t\t\t\tgoto trunc;'), (1097, '\t\t\t\tlen -= 4;'), (1105, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (1106, '\t\t\tgoto trunc;'), (1109, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1110, '\t\t\tgoto trunc;'), (1112, '\t\tif (len < 8)'), (1113, '\t\t\tgoto trunc;'), (1114, '\t\tND_TCHECK2(*bp, 8);'), (1126, '\t\tif (len < 1)'), (1127, '\t\t\tgoto trunc;'), (1128, '\t\tND_TCHECK(bp[0]);'), (1131, '\t\tif (len < 2)'), (1132, '\t\t\tgoto trunc;'), (1133, '\t\tND_TCHECK(bp[1]);'), (1135, '\t\tif (len < 4)'), (1136, '\t\t\tgoto trunc;'), (1137, '\t\tND_TCHECK_16BITS(&bp[2]);'), (1141, '\t\tlen -= 4;'), (1145, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1146, '\t\t\tgoto trunc;'), (1148, '\t\tlen -= advance;'), (1153, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (1154, '\t\t\t\tgoto trunc;'), (1156, '\t\t\tlen -= advance;'), (1163, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1164, '\t\t\tgoto trunc;'), (1166, '\t\tlen -= advance;'), (1168, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (1169, '\t\t\tgoto trunc;'), (1171, '\t\tlen -= advance;'), (1173, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1174, '\t\t\tgoto trunc;'), (1176, '\t\tlen -= advance;'), (1177, '\t\tif (len < 2)'), (1178, '\t\t\tgoto trunc;'), (1179, '\t\tND_TCHECK_16BITS(bp);')], 'deleted': [(176, '\tND_TCHECK2(bp[6], 2);'), (180, '\tunsigned_relts_print(ndo, EXTRACT_16BITS(&bp[6]));'), (183, '\tbp += 8;'), (184, '\tlen -= 8;'), (197, '\t\tND_TCHECK2(bp[4], sizeof(struct in_addr));'), (198, '\t\tif (EXTRACT_32BITS(&bp[4]) != 0xffffffff)'), (199, '\t\t\tND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[4])));'), (200, '\t\tND_TCHECK2(bp[8], 4);'), (201, '\t\tnjoin = EXTRACT_16BITS(&bp[8]);'), (202, '\t\tnprune = EXTRACT_16BITS(&bp[10]);'), (204, '\t\tbp += 12;'), (205, '\t\tlen -= 12;'), (218, '\t\t\tipaddr_string(ndo, &bp[2]), bp[1] & 0x3f));'), (233, '\tregister const u_char *ep;'), (236, '\tep = (const u_char *)ndo->ndo_snapend;'), (237, '\tif (bp >= ep)'), (238, '\t\treturn;'), (239, ''), (305, '\t\tif (ndo->ndo_vflag)'), (382, '\t\tND_TCHECK(bp[4]);'), (383, '\t\tswitch (bp[4] & 0x3) {'), (393, '\t\tif (bp[4] & 0xfc)'), (394, '\t\t\tND_PRINT((ndo, " [rsvd=0x%02x]", bp[4] & 0xfc));'), (395, '\t\tND_TCHECK(bp[5]);'), (396, '\t\tnentries = bp[5];'), (397, '\t\tbp += 6; len -= 6;'), (424, '\tregister const u_char *ep;'), (427, '\tep = (const u_char *)ndo->ndo_snapend;'), (428, '\tif (bp >= ep)'), (429, '\t\treturn;'), (499, 'static int pimv2_addr_len;'), (500, ''), (527, ' const u_char *bp, enum pimv2_addrtype at, int silent)'), (530, '\tint len, hdrlen;'), (532, '\tND_TCHECK(bp[0]);'), (533, ''), (534, '\tif (pimv2_addr_len == 0) {'), (539, '\t\t\tlen = sizeof(struct in_addr);'), (543, '\t\t\tlen = sizeof(struct in6_addr);'), (552, '\t\tswitch (pimv2_addr_len) {'), (563, '\t\tlen = pimv2_addr_len;'), (570, '\t\tND_TCHECK2(bp[0], len);'), (579, '\t\treturn hdrlen + len;'), (582, '\t\tND_TCHECK2(bp[0], len + 2);'), (611, '\t\treturn hdrlen + 2 + len;'), (719, '\t\tbp += 4;'), (720, '\t\twhile (bp < ep) {'), (724, '\t\t\tND_TCHECK2(bp[0], 4 + olen);'), (802, '\t\t\t\t\t\tadvance = pimv2_addr_print(ndo, ptr, pimv2_unicast, 0);'), (803, '\t\t\t\t\t\tif (advance < 0) {'), (804, '\t\t\t\t\t\t\tND_PRINT((ndo, "..."));'), (805, '\t\t\t\t\t\t\tbreak;'), (806, '\t\t\t\t\t\t}'), (828, '\t\tND_TCHECK2(*(bp + 4), PIMV2_REGISTER_FLAG_LEN);'), (833, '\t\t EXTRACT_32BITS(bp+4))));'), (835, '\t\tbp += 8; len -= 8;'), (861, '\t\tbp += 4; len -= 4;'), (862, '\t\tif (bp >= ep)'), (863, '\t\t\tbreak;'), (865, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) {'), (866, '\t\t\tND_PRINT((ndo, "..."));'), (867, '\t\t\tbreak;'), (868, '\t\t}'), (870, '\t\tif (bp >= ep)'), (871, '\t\t\tbreak;'), (873, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (874, '\t\t\tND_PRINT((ndo, "..."));'), (875, '\t\t\tbreak;'), (876, '\t\t}'), (927, '\t\tbp += 4; len -= 4;'), (929, '\t\t\tif (bp >= ep)'), (930, '\t\t\t\tbreak;'), (932, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (933, '\t\t\t\tND_PRINT((ndo, "..."));'), (934, '\t\t\t\tbreak;'), (935, '\t\t\t}'), (938, '\t\tif (bp + 4 > ep)'), (939, '\t\t\tbreak;'), (952, '\t\t\tif (bp >= ep)'), (953, '\t\t\t\tgoto jp_done;'), (955, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) {'), (956, '\t\t\t\tND_PRINT((ndo, "...)"));'), (957, '\t\t\t\tgoto jp_done;'), (958, '\t\t\t}'), (960, '\t\t\tif (bp + 4 > ep) {'), (961, '\t\t\t\tND_PRINT((ndo, "...)"));'), (962, '\t\t\t\tgoto jp_done;'), (963, '\t\t\t}'), (970, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) {'), (971, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (972, '\t\t\t\t\tgoto jp_done;'), (973, '\t\t\t\t}'), (978, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) {'), (979, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (980, '\t\t\t\t\tgoto jp_done;'), (981, '\t\t\t\t}'), (985, '\tjp_done:'), (992, '\t\tbp += 4;'), (995, '\t\tif (bp + sizeof(uint16_t) >= ep) break;'), (997, '\t\tbp += sizeof(uint16_t);'), (998, '\t\tif (bp >= ep) break;'), (1000, '\t\tif (bp + 1 >= ep) break;'), (1005, '\t\tif (bp >= ep) break;'), (1007, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1008, '\t\t\tND_PRINT((ndo, "..."));'), (1009, '\t\t\tbreak;'), (1010, '\t\t}'), (1016, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0))'), (1017, '\t\t\t < 0) {'), (1018, '\t\t\t\tND_PRINT((ndo, "...)"));'), (1019, '\t\t\t\tgoto bs_done;'), (1020, '\t\t\t}'), (1024, '\t\t\tif (bp >= ep) {'), (1025, '\t\t\t\tND_PRINT((ndo, "...)"));'), (1026, '\t\t\t\tgoto bs_done;'), (1027, '\t\t\t}'), (1029, '\t\t\tif (bp + 1 >= ep) {'), (1030, '\t\t\t\tND_PRINT((ndo, "...)"));'), (1031, '\t\t\t\tgoto bs_done;'), (1032, '\t\t\t}'), (1039, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp,'), (1041, '\t\t\t\t\t\t\t\t0)) < 0) {'), (1042, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (1043, '\t\t\t\t\tgoto bs_done;'), (1044, '\t\t\t\t}'), (1047, '\t\t\t\tif (bp + 1 >= ep) {'), (1048, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (1049, '\t\t\t\t\tgoto bs_done;'), (1050, '\t\t\t\t}'), (1053, '\t\t\t\tif (bp + 2 >= ep) {'), (1054, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (1055, '\t\t\t\t\tgoto bs_done;'), (1056, '\t\t\t\t}'), (1062, '\t bs_done:'), (1066, '\t\tbp += 4; len -= 4;'), (1067, '\t\tif (bp >= ep)'), (1068, '\t\t\tbreak;'), (1070, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) {'), (1071, '\t\t\tND_PRINT((ndo, "..."));'), (1072, '\t\t\tbreak;'), (1073, '\t\t}'), (1075, '\t\tif (bp >= ep)'), (1076, '\t\t\tbreak;'), (1078, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1079, '\t\t\tND_PRINT((ndo, "..."));'), (1080, '\t\t\tbreak;'), (1081, '\t\t}'), (1083, '\t\tif (bp + 8 > ep)'), (1084, '\t\t\tbreak;'), (1094, '\t\tbp += 4;'), (1097, '\t\tif (bp >= ep) break;'), (1100, '\t\tif (bp + 1 >= ep) break;'), (1102, '\t\tif (bp + 3 >= ep) break;'), (1108, '\t\tif (bp >= ep) break;'), (1110, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1111, '\t\t\tND_PRINT((ndo, "..."));'), (1112, '\t\t\tbreak;'), (1113, '\t\t}'), (1119, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0))'), (1120, '\t\t\t < 0) {'), (1121, '\t\t\t\tND_PRINT((ndo, "..."));'), (1122, '\t\t\t\tbreak;'), (1123, '\t\t\t}'), (1131, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1132, '\t\t\tND_PRINT((ndo, "..."));'), (1133, '\t\t\tbreak;'), (1134, '\t\t}'), (1137, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) {'), (1138, '\t\t\tND_PRINT((ndo, "..."));'), (1139, '\t\t\tbreak;'), (1140, '\t\t}'), (1143, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1144, '\t\t\tND_PRINT((ndo, "..."));'), (1145, '\t\t\tbreak;'), (1146, '\t\t}'), (1148, '\t\tND_TCHECK2(bp[0], 2);')]}
207
176
890
5,856
https://github.com/the-tcpdump-group/tcpdump
CVE-2017-13030
['CWE-125']
print-pim.c
pim_print
/* * Copyright (c) 1995, 1996 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* \summary: Protocol Independent Multicast (PIM) printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include "netdissect.h" #include "addrtoname.h" #include "extract.h" #include "ip.h" #include "ip6.h" #include "ipproto.h" #define PIMV1_TYPE_QUERY 0 #define PIMV1_TYPE_REGISTER 1 #define PIMV1_TYPE_REGISTER_STOP 2 #define PIMV1_TYPE_JOIN_PRUNE 3 #define PIMV1_TYPE_RP_REACHABILITY 4 #define PIMV1_TYPE_ASSERT 5 #define PIMV1_TYPE_GRAFT 6 #define PIMV1_TYPE_GRAFT_ACK 7 static const struct tok pimv1_type_str[] = { { PIMV1_TYPE_QUERY, "Query" }, { PIMV1_TYPE_REGISTER, "Register" }, { PIMV1_TYPE_REGISTER_STOP, "Register-Stop" }, { PIMV1_TYPE_JOIN_PRUNE, "Join/Prune" }, { PIMV1_TYPE_RP_REACHABILITY, "RP-reachable" }, { PIMV1_TYPE_ASSERT, "Assert" }, { PIMV1_TYPE_GRAFT, "Graft" }, { PIMV1_TYPE_GRAFT_ACK, "Graft-ACK" }, { 0, NULL } }; #define PIMV2_TYPE_HELLO 0 #define PIMV2_TYPE_REGISTER 1 #define PIMV2_TYPE_REGISTER_STOP 2 #define PIMV2_TYPE_JOIN_PRUNE 3 #define PIMV2_TYPE_BOOTSTRAP 4 #define PIMV2_TYPE_ASSERT 5 #define PIMV2_TYPE_GRAFT 6 #define PIMV2_TYPE_GRAFT_ACK 7 #define PIMV2_TYPE_CANDIDATE_RP 8 #define PIMV2_TYPE_PRUNE_REFRESH 9 #define PIMV2_TYPE_DF_ELECTION 10 #define PIMV2_TYPE_ECMP_REDIRECT 11 static const struct tok pimv2_type_values[] = { { PIMV2_TYPE_HELLO, "Hello" }, { PIMV2_TYPE_REGISTER, "Register" }, { PIMV2_TYPE_REGISTER_STOP, "Register Stop" }, { PIMV2_TYPE_JOIN_PRUNE, "Join / Prune" }, { PIMV2_TYPE_BOOTSTRAP, "Bootstrap" }, { PIMV2_TYPE_ASSERT, "Assert" }, { PIMV2_TYPE_GRAFT, "Graft" }, { PIMV2_TYPE_GRAFT_ACK, "Graft Acknowledgement" }, { PIMV2_TYPE_CANDIDATE_RP, "Candidate RP Advertisement" }, { PIMV2_TYPE_PRUNE_REFRESH, "Prune Refresh" }, { PIMV2_TYPE_DF_ELECTION, "DF Election" }, { PIMV2_TYPE_ECMP_REDIRECT, "ECMP Redirect" }, { 0, NULL} }; #define PIMV2_HELLO_OPTION_HOLDTIME 1 #define PIMV2_HELLO_OPTION_LANPRUNEDELAY 2 #define PIMV2_HELLO_OPTION_DR_PRIORITY_OLD 18 #define PIMV2_HELLO_OPTION_DR_PRIORITY 19 #define PIMV2_HELLO_OPTION_GENID 20 #define PIMV2_HELLO_OPTION_REFRESH_CAP 21 #define PIMV2_HELLO_OPTION_BIDIR_CAP 22 #define PIMV2_HELLO_OPTION_ADDRESS_LIST 24 #define PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD 65001 static const struct tok pimv2_hello_option_values[] = { { PIMV2_HELLO_OPTION_HOLDTIME, "Hold Time" }, { PIMV2_HELLO_OPTION_LANPRUNEDELAY, "LAN Prune Delay" }, { PIMV2_HELLO_OPTION_DR_PRIORITY_OLD, "DR Priority (Old)" }, { PIMV2_HELLO_OPTION_DR_PRIORITY, "DR Priority" }, { PIMV2_HELLO_OPTION_GENID, "Generation ID" }, { PIMV2_HELLO_OPTION_REFRESH_CAP, "State Refresh Capability" }, { PIMV2_HELLO_OPTION_BIDIR_CAP, "Bi-Directional Capability" }, { PIMV2_HELLO_OPTION_ADDRESS_LIST, "Address List" }, { PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD, "Address List (Old)" }, { 0, NULL} }; #define PIMV2_REGISTER_FLAG_LEN 4 #define PIMV2_REGISTER_FLAG_BORDER 0x80000000 #define PIMV2_REGISTER_FLAG_NULL 0x40000000 static const struct tok pimv2_register_flag_values[] = { { PIMV2_REGISTER_FLAG_BORDER, "Border" }, { PIMV2_REGISTER_FLAG_NULL, "Null" }, { 0, NULL} }; /* * XXX: We consider a case where IPv6 is not ready yet for portability, * but PIM dependent defintions should be independent of IPv6... */ struct pim { uint8_t pim_typever; /* upper 4bit: PIM version number; 2 for PIMv2 */ /* lower 4bit: the PIM message type, currently they are: * Hello, Register, Register-Stop, Join/Prune, * Bootstrap, Assert, Graft (PIM-DM only), * Graft-Ack (PIM-DM only), C-RP-Adv */ #define PIM_VER(x) (((x) & 0xf0) >> 4) #define PIM_TYPE(x) ((x) & 0x0f) u_char pim_rsv; /* Reserved */ u_short pim_cksum; /* IP style check sum */ }; static void pimv2_print(netdissect_options *, register const u_char *bp, register u_int len, const u_char *); static void pimv1_join_prune_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int ngroups, njoin, nprune; int njp; /* If it's a single group and a single source, use 1-line output. */ if (ND_TTEST2(bp[0], 30) && bp[11] == 1 && ((njoin = EXTRACT_16BITS(&bp[20])) + EXTRACT_16BITS(&bp[22])) == 1) { int hold; ND_PRINT((ndo, " RPF %s ", ipaddr_string(ndo, bp))); hold = EXTRACT_16BITS(&bp[6]); if (hold != 180) { ND_PRINT((ndo, "Hold ")); unsigned_relts_print(ndo, hold); } ND_PRINT((ndo, "%s (%s/%d, %s", njoin ? "Join" : "Prune", ipaddr_string(ndo, &bp[26]), bp[25] & 0x3f, ipaddr_string(ndo, &bp[12]))); if (EXTRACT_32BITS(&bp[16]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[16]))); ND_PRINT((ndo, ") %s%s %s", (bp[24] & 0x01) ? "Sparse" : "Dense", (bp[25] & 0x80) ? " WC" : "", (bp[25] & 0x40) ? "RP" : "SPT")); return; } ND_TCHECK2(bp[0], sizeof(struct in_addr)); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Upstream Nbr: %s", ipaddr_string(ndo, bp))); ND_TCHECK2(bp[6], 2); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Hold time: ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[6])); if (ndo->ndo_vflag < 2) return; bp += 8; len -= 8; ND_TCHECK2(bp[0], 4); ngroups = bp[3]; bp += 4; len -= 4; while (ngroups--) { /* * XXX - does the address have length "addrlen" and the * mask length "maddrlen"? */ ND_TCHECK2(bp[0], sizeof(struct in_addr)); ND_PRINT((ndo, "\n\tGroup: %s", ipaddr_string(ndo, bp))); ND_TCHECK2(bp[4], sizeof(struct in_addr)); if (EXTRACT_32BITS(&bp[4]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[4]))); ND_TCHECK2(bp[8], 4); njoin = EXTRACT_16BITS(&bp[8]); nprune = EXTRACT_16BITS(&bp[10]); ND_PRINT((ndo, " joined: %d pruned: %d", njoin, nprune)); bp += 12; len -= 12; for (njp = 0; njp < (njoin + nprune); njp++) { const char *type; if (njp < njoin) type = "Join "; else type = "Prune"; ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "\n\t%s %s%s%s%s/%d", type, (bp[0] & 0x01) ? "Sparse " : "Dense ", (bp[1] & 0x80) ? "WC " : "", (bp[1] & 0x40) ? "RP " : "SPT ", ipaddr_string(ndo, &bp[2]), bp[1] & 0x3f)); bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|pim]")); return; } void pimv1_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { register const u_char *ep; register u_char type; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; ND_TCHECK(bp[1]); type = bp[1]; ND_PRINT((ndo, " %s", tok2str(pimv1_type_str, "[type %u]", type))); switch (type) { case PIMV1_TYPE_QUERY: if (ND_TTEST(bp[8])) { switch (bp[8] >> 4) { case 0: ND_PRINT((ndo, " Dense-mode")); break; case 1: ND_PRINT((ndo, " Sparse-mode")); break; case 2: ND_PRINT((ndo, " Sparse-Dense-mode")); break; default: ND_PRINT((ndo, " mode-%d", bp[8] >> 4)); break; } } if (ndo->ndo_vflag) { ND_TCHECK2(bp[10],2); ND_PRINT((ndo, " (Hold-time ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[10])); ND_PRINT((ndo, ")")); } break; case PIMV1_TYPE_REGISTER: ND_TCHECK2(bp[8], 20); /* ip header */ ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[20]), ipaddr_string(ndo, &bp[24]))); break; case PIMV1_TYPE_REGISTER_STOP: ND_TCHECK2(bp[12], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[8]), ipaddr_string(ndo, &bp[12]))); break; case PIMV1_TYPE_RP_REACHABILITY: if (ndo->ndo_vflag) { ND_TCHECK2(bp[22], 2); ND_PRINT((ndo, " group %s", ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_PRINT((ndo, " RP %s hold ", ipaddr_string(ndo, &bp[16]))); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[22])); } break; case PIMV1_TYPE_ASSERT: ND_TCHECK2(bp[16], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[16]), ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_TCHECK2(bp[24], 4); ND_PRINT((ndo, " %s pref %d metric %d", (bp[20] & 0x80) ? "RP-tree" : "SPT", EXTRACT_32BITS(&bp[20]) & 0x7fffffff, EXTRACT_32BITS(&bp[24]))); break; case PIMV1_TYPE_JOIN_PRUNE: case PIMV1_TYPE_GRAFT: case PIMV1_TYPE_GRAFT_ACK: if (ndo->ndo_vflag) pimv1_join_prune_print(ndo, &bp[8], len - 8); break; } ND_TCHECK(bp[4]); if ((bp[4] >> 4) != 1) ND_PRINT((ndo, " [v%d]", bp[4] >> 4)); return; trunc: ND_PRINT((ndo, "[|pim]")); return; } /* * auto-RP is a cisco protocol, documented at * ftp://ftpeng.cisco.com/ipmulticast/specs/pim-autorp-spec01.txt * * This implements version 1+, dated Sept 9, 1998. */ void cisco_autorp_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int type; int numrps; int hold; ND_TCHECK(bp[0]); ND_PRINT((ndo, " auto-rp ")); type = bp[0]; switch (type) { case 0x11: ND_PRINT((ndo, "candidate-advert")); break; case 0x12: ND_PRINT((ndo, "mapping")); break; default: ND_PRINT((ndo, "type-0x%02x", type)); break; } ND_TCHECK(bp[1]); numrps = bp[1]; ND_TCHECK2(bp[2], 2); ND_PRINT((ndo, " Hold ")); hold = EXTRACT_16BITS(&bp[2]); if (hold) unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); else ND_PRINT((ndo, "FOREVER")); /* Next 4 bytes are reserved. */ bp += 8; len -= 8; /*XXX skip unless -v? */ /* * Rest of packet: * numrps entries of the form: * 32 bits: RP * 6 bits: reserved * 2 bits: PIM version supported, bit 0 is "supports v1", 1 is "v2". * 8 bits: # of entries for this RP * each entry: 7 bits: reserved, 1 bit: negative, * 8 bits: mask 32 bits: source * lather, rinse, repeat. */ while (numrps--) { int nentries; char s; ND_TCHECK2(bp[0], 4); ND_PRINT((ndo, " RP %s", ipaddr_string(ndo, bp))); ND_TCHECK(bp[4]); switch (bp[4] & 0x3) { case 0: ND_PRINT((ndo, " PIMv?")); break; case 1: ND_PRINT((ndo, " PIMv1")); break; case 2: ND_PRINT((ndo, " PIMv2")); break; case 3: ND_PRINT((ndo, " PIMv1+2")); break; } if (bp[4] & 0xfc) ND_PRINT((ndo, " [rsvd=0x%02x]", bp[4] & 0xfc)); ND_TCHECK(bp[5]); nentries = bp[5]; bp += 6; len -= 6; s = ' '; for (; nentries; nentries--) { ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "%c%s%s/%d", s, bp[0] & 1 ? "!" : "", ipaddr_string(ndo, &bp[2]), bp[1])); if (bp[0] & 0x02) { ND_PRINT((ndo, " bidir")); } if (bp[0] & 0xfc) { ND_PRINT((ndo, "[rsvd=0x%02x]", bp[0] & 0xfc)); } s = ','; bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|autorp]")); return; } void pim_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const u_char *ep; register const struct pim *pim = (const struct pim *)bp; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; #ifdef notyet /* currently we see only version and type */ ND_TCHECK(pim->pim_rsv); #endif switch (PIM_VER(pim->pim_typever)) { case 2: if (!ndo->ndo_vflag) { ND_PRINT((ndo, "PIMv%u, %s, length %u", PIM_VER(pim->pim_typever), tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)), len)); return; } else { ND_PRINT((ndo, "PIMv%u, length %u\n\t%s", PIM_VER(pim->pim_typever), len, tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)))); pimv2_print(ndo, bp, len, bp2); } break; default: ND_PRINT((ndo, "PIMv%u, length %u", PIM_VER(pim->pim_typever), len)); break; } return; } /* * PIMv2 uses encoded address representations. * * The last PIM-SM I-D before RFC2117 was published specified the * following representation for unicast addresses. However, RFC2117 * specified no encoding for unicast addresses with the unicast * address length specified in the header. Therefore, we have to * guess which encoding is being used (Cisco's PIMv2 implementation * uses the non-RFC encoding). RFC2117 turns a previously "Reserved" * field into a 'unicast-address-length-in-bytes' field. We guess * that it's the draft encoding if this reserved field is zero. * * RFC2362 goes back to the encoded format, and calls the addr length * field "reserved" again. * * The first byte is the address family, from: * * 0 Reserved * 1 IP (IP version 4) * 2 IP6 (IP version 6) * 3 NSAP * 4 HDLC (8-bit multidrop) * 5 BBN 1822 * 6 802 (includes all 802 media plus Ethernet "canonical format") * 7 E.163 * 8 E.164 (SMDS, Frame Relay, ATM) * 9 F.69 (Telex) * 10 X.121 (X.25, Frame Relay) * 11 IPX * 12 Appletalk * 13 Decnet IV * 14 Banyan Vines * 15 E.164 with NSAP format subaddress * * In addition, the second byte is an "Encoding". 0 is the default * encoding for the address family, and no other encodings are currently * specified. * */ static int pimv2_addr_len; enum pimv2_addrtype { pimv2_unicast, pimv2_group, pimv2_source }; /* 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Unicast Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+++++++ * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Reserved | Mask Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Group multicast Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Rsrvd |S|W|R| Mask Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Source Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ static int pimv2_addr_print(netdissect_options *ndo, const u_char *bp, enum pimv2_addrtype at, int silent) { int af; int len, hdrlen; ND_TCHECK(bp[0]); if (pimv2_addr_len == 0) { ND_TCHECK(bp[1]); switch (bp[0]) { case 1: af = AF_INET; len = sizeof(struct in_addr); break; case 2: af = AF_INET6; len = sizeof(struct in6_addr); break; default: return -1; } if (bp[1] != 0) return -1; hdrlen = 2; } else { switch (pimv2_addr_len) { case sizeof(struct in_addr): af = AF_INET; break; case sizeof(struct in6_addr): af = AF_INET6; break; default: return -1; break; } len = pimv2_addr_len; hdrlen = 0; } bp += hdrlen; switch (at) { case pimv2_unicast: ND_TCHECK2(bp[0], len); if (af == AF_INET) { if (!silent) ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp))); } else if (af == AF_INET6) { if (!silent) ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp))); } return hdrlen + len; case pimv2_group: case pimv2_source: ND_TCHECK2(bp[0], len + 2); if (af == AF_INET) { if (!silent) { ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp + 2))); if (bp[1] != 32) ND_PRINT((ndo, "/%u", bp[1])); } } else if (af == AF_INET6) { if (!silent) { ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp + 2))); if (bp[1] != 128) ND_PRINT((ndo, "/%u", bp[1])); } } if (bp[0] && !silent) { if (at == pimv2_group) { ND_PRINT((ndo, "(0x%02x)", bp[0])); } else { ND_PRINT((ndo, "(%s%s%s", bp[0] & 0x04 ? "S" : "", bp[0] & 0x02 ? "W" : "", bp[0] & 0x01 ? "R" : "")); if (bp[0] & 0xf8) { ND_PRINT((ndo, "+0x%02x", bp[0] & 0xf8)); } ND_PRINT((ndo, ")")); } } return hdrlen + 2 + len; default: return -1; } trunc: return -1; } enum checksum_status { CORRECT, INCORRECT, UNVERIFIED }; static enum checksum_status pimv2_check_checksum(netdissect_options *ndo, const u_char *bp, const u_char *bp2, u_int len) { const struct ip *ip; u_int cksum; if (!ND_TTEST2(bp[0], len)) { /* We don't have all the data. */ return (UNVERIFIED); } ip = (const struct ip *)bp2; if (IP_V(ip) == 4) { struct cksum_vec vec[1]; vec[0].ptr = bp; vec[0].len = len; cksum = in_cksum(vec, 1); return (cksum ? INCORRECT : CORRECT); } else if (IP_V(ip) == 6) { const struct ip6_hdr *ip6; ip6 = (const struct ip6_hdr *)bp2; cksum = nextproto6_cksum(ndo, ip6, bp, len, len, IPPROTO_PIM); return (cksum ? INCORRECT : CORRECT); } else { return (UNVERIFIED); } } static void pimv2_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const u_char *ep; register const struct pim *pim = (const struct pim *)bp; int advance; enum checksum_status cksum_status; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; if (ep > bp + len) ep = bp + len; ND_TCHECK(pim->pim_rsv); pimv2_addr_len = pim->pim_rsv; if (pimv2_addr_len != 0) ND_PRINT((ndo, ", RFC2117-encoding")); ND_PRINT((ndo, ", cksum 0x%04x ", EXTRACT_16BITS(&pim->pim_cksum))); if (EXTRACT_16BITS(&pim->pim_cksum) == 0) { ND_PRINT((ndo, "(unverified)")); } else { if (PIM_TYPE(pim->pim_typever) == PIMV2_TYPE_REGISTER) { /* * The checksum only covers the packet header, * not the encapsulated packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, 8); if (cksum_status == INCORRECT) { /* * To quote RFC 4601, "For interoperability * reasons, a message carrying a checksum * calculated over the entire PIM Register * message should also be accepted." */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } } else { /* * The checksum covers the entire packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } switch (cksum_status) { case CORRECT: ND_PRINT((ndo, "(correct)")); break; case INCORRECT: ND_PRINT((ndo, "(incorrect)")); break; case UNVERIFIED: ND_PRINT((ndo, "(unverified)")); break; } } switch (PIM_TYPE(pim->pim_typever)) { case PIMV2_TYPE_HELLO: { uint16_t otype, olen; bp += 4; while (bp < ep) { ND_TCHECK2(bp[0], 4); otype = EXTRACT_16BITS(&bp[0]); olen = EXTRACT_16BITS(&bp[2]); ND_TCHECK2(bp[0], 4 + olen); ND_PRINT((ndo, "\n\t %s Option (%u), length %u, Value: ", tok2str(pimv2_hello_option_values, "Unknown", otype), otype, olen)); bp += 4; switch (otype) { case PIMV2_HELLO_OPTION_HOLDTIME: if (olen != 2) { ND_PRINT((ndo, "ERROR: Option Length != 2 Bytes (%u)", olen)); } else { unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); } break; case PIMV2_HELLO_OPTION_LANPRUNEDELAY: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { char t_bit; uint16_t lan_delay, override_interval; lan_delay = EXTRACT_16BITS(bp); override_interval = EXTRACT_16BITS(bp+2); t_bit = (lan_delay & 0x8000)? 1 : 0; lan_delay &= ~0x8000; ND_PRINT((ndo, "\n\t T-bit=%d, LAN delay %dms, Override interval %dms", t_bit, lan_delay, override_interval)); } break; case PIMV2_HELLO_OPTION_DR_PRIORITY_OLD: case PIMV2_HELLO_OPTION_DR_PRIORITY: switch (olen) { case 0: ND_PRINT((ndo, "Bi-Directional Capability (Old)")); break; case 4: ND_PRINT((ndo, "%u", EXTRACT_32BITS(bp))); break; default: ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); break; } break; case PIMV2_HELLO_OPTION_GENID: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "0x%08x", EXTRACT_32BITS(bp))); } break; case PIMV2_HELLO_OPTION_REFRESH_CAP: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "v%d", *bp)); if (*(bp+1) != 0) { ND_PRINT((ndo, ", interval ")); unsigned_relts_print(ndo, *(bp+1)); } if (EXTRACT_16BITS(bp+2) != 0) { ND_PRINT((ndo, " ?0x%04x?", EXTRACT_16BITS(bp+2))); } } break; case PIMV2_HELLO_OPTION_BIDIR_CAP: break; case PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD: case PIMV2_HELLO_OPTION_ADDRESS_LIST: if (ndo->ndo_vflag > 1) { const u_char *ptr = bp; while (ptr < (bp+olen)) { ND_PRINT((ndo, "\n\t ")); advance = pimv2_addr_print(ndo, ptr, pimv2_unicast, 0); if (advance < 0) { ND_PRINT((ndo, "...")); break; } ptr += advance; } } break; default: if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, bp, "\n\t ", olen); break; } /* do we want to see an additionally hexdump ? */ if (ndo->ndo_vflag> 1) print_unknown_data(ndo, bp, "\n\t ", olen); bp += olen; } break; } case PIMV2_TYPE_REGISTER: { const struct ip *ip; ND_TCHECK2(*(bp + 4), PIMV2_REGISTER_FLAG_LEN); ND_PRINT((ndo, ", Flags [ %s ]\n\t", tok2str(pimv2_register_flag_values, "none", EXTRACT_32BITS(bp+4)))); bp += 8; len -= 8; /* encapsulated multicast packet */ ip = (const struct ip *)bp; switch (IP_V(ip)) { case 0: /* Null header */ ND_PRINT((ndo, "IP-Null-header %s > %s", ipaddr_string(ndo, &ip->ip_src), ipaddr_string(ndo, &ip->ip_dst))); break; case 4: /* IPv4 */ ip_print(ndo, bp, len); break; case 6: /* IPv6 */ ip6_print(ndo, bp, len); break; default: ND_PRINT((ndo, "IP ver %d", IP_V(ip))); break; } break; } case PIMV2_TYPE_REGISTER_STOP: bp += 4; len -= 4; if (bp >= ep) break; ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp >= ep) break; ND_PRINT((ndo, " source=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; break; case PIMV2_TYPE_JOIN_PRUNE: case PIMV2_TYPE_GRAFT: case PIMV2_TYPE_GRAFT_ACK: /* * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |PIM Ver| Type | Addr length | Checksum | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Unicast-Upstream Neighbor Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Reserved | Num groups | Holdtime | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Number of Joined Sources | Number of Pruned Sources | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ { uint8_t ngroup; uint16_t holdtime; uint16_t njoin; uint16_t nprune; int i, j; bp += 4; len -= 4; if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ if (bp >= ep) break; ND_PRINT((ndo, ", upstream-neighbor: ")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; } if (bp + 4 > ep) break; ngroup = bp[1]; holdtime = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, "\n\t %u group(s)", ngroup)); if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ ND_PRINT((ndo, ", holdtime: ")); if (holdtime == 0xffff) ND_PRINT((ndo, "infinite")); else unsigned_relts_print(ndo, holdtime); } bp += 4; len -= 4; for (i = 0; i < ngroup; i++) { if (bp >= ep) goto jp_done; ND_PRINT((ndo, "\n\t group #%u: ", i+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; if (bp + 4 > ep) { ND_PRINT((ndo, "...)")); goto jp_done; } njoin = EXTRACT_16BITS(&bp[0]); nprune = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, ", joined sources: %u, pruned sources: %u", njoin, nprune)); bp += 4; len -= 4; for (j = 0; j < njoin; j++) { ND_PRINT((ndo, "\n\t joined source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; } for (j = 0; j < nprune; j++) { ND_PRINT((ndo, "\n\t pruned source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; } } jp_done: break; } case PIMV2_TYPE_BOOTSTRAP: { int i, j, frpcnt; bp += 4; /* Fragment Tag, Hash Mask len, and BSR-priority */ if (bp + sizeof(uint16_t) >= ep) break; ND_PRINT((ndo, " tag=%x", EXTRACT_16BITS(bp))); bp += sizeof(uint16_t); if (bp >= ep) break; ND_PRINT((ndo, " hashmlen=%d", bp[0])); if (bp + 1 >= ep) break; ND_PRINT((ndo, " BSRprio=%d", bp[1])); bp += 2; /* Encoded-Unicast-BSR-Address */ if (bp >= ep) break; ND_PRINT((ndo, " BSR=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; for (i = 0; bp < ep; i++) { /* Encoded-Group Address */ ND_PRINT((ndo, " (group%d: ", i)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...)")); goto bs_done; } bp += advance; /* RP-Count, Frag RP-Cnt, and rsvd */ if (bp >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, " RPcnt=%d", bp[0])); if (bp + 1 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, " FRPcnt=%d", frpcnt = bp[1])); bp += 4; for (j = 0; j < frpcnt && bp < ep; j++) { /* each RP info */ ND_PRINT((ndo, " RP%d=", j)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...)")); goto bs_done; } bp += advance; if (bp + 1 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, ",holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); if (bp + 2 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, ",prio=%d", bp[2])); bp += 4; } ND_PRINT((ndo, ")")); } bs_done: break; } case PIMV2_TYPE_ASSERT: bp += 4; len -= 4; if (bp >= ep) break; ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp >= ep) break; ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp + 8 > ep) break; if (bp[0] & 0x80) ND_PRINT((ndo, " RPT")); ND_PRINT((ndo, " pref=%u", EXTRACT_32BITS(&bp[0]) & 0x7fffffff)); ND_PRINT((ndo, " metric=%u", EXTRACT_32BITS(&bp[4]))); break; case PIMV2_TYPE_CANDIDATE_RP: { int i, pfxcnt; bp += 4; /* Prefix-Cnt, Priority, and Holdtime */ if (bp >= ep) break; ND_PRINT((ndo, " prefix-cnt=%d", bp[0])); pfxcnt = bp[0]; if (bp + 1 >= ep) break; ND_PRINT((ndo, " prio=%d", bp[1])); if (bp + 3 >= ep) break; ND_PRINT((ndo, " holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); bp += 4; /* Encoded-Unicast-RP-Address */ if (bp >= ep) break; ND_PRINT((ndo, " RP=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; /* Encoded-Group Addresses */ for (i = 0; i < pfxcnt && bp < ep; i++) { ND_PRINT((ndo, " Group%d=", i)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; } break; } case PIMV2_TYPE_PRUNE_REFRESH: ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_PRINT((ndo, " grp=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_PRINT((ndo, " forwarder=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_TCHECK2(bp[0], 2); ND_PRINT((ndo, " TUNR ")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); break; default: ND_PRINT((ndo, " [type %d]", PIM_TYPE(pim->pim_typever))); break; } return; trunc: ND_PRINT((ndo, "[|pim]")); } /* * Local Variables: * c-style: whitesmith * c-basic-offset: 8 * End: */
/* * Copyright (c) 1995, 1996 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* \summary: Protocol Independent Multicast (PIM) printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include "netdissect.h" #include "addrtoname.h" #include "extract.h" #include "ip.h" #include "ip6.h" #include "ipproto.h" #define PIMV1_TYPE_QUERY 0 #define PIMV1_TYPE_REGISTER 1 #define PIMV1_TYPE_REGISTER_STOP 2 #define PIMV1_TYPE_JOIN_PRUNE 3 #define PIMV1_TYPE_RP_REACHABILITY 4 #define PIMV1_TYPE_ASSERT 5 #define PIMV1_TYPE_GRAFT 6 #define PIMV1_TYPE_GRAFT_ACK 7 static const struct tok pimv1_type_str[] = { { PIMV1_TYPE_QUERY, "Query" }, { PIMV1_TYPE_REGISTER, "Register" }, { PIMV1_TYPE_REGISTER_STOP, "Register-Stop" }, { PIMV1_TYPE_JOIN_PRUNE, "Join/Prune" }, { PIMV1_TYPE_RP_REACHABILITY, "RP-reachable" }, { PIMV1_TYPE_ASSERT, "Assert" }, { PIMV1_TYPE_GRAFT, "Graft" }, { PIMV1_TYPE_GRAFT_ACK, "Graft-ACK" }, { 0, NULL } }; #define PIMV2_TYPE_HELLO 0 #define PIMV2_TYPE_REGISTER 1 #define PIMV2_TYPE_REGISTER_STOP 2 #define PIMV2_TYPE_JOIN_PRUNE 3 #define PIMV2_TYPE_BOOTSTRAP 4 #define PIMV2_TYPE_ASSERT 5 #define PIMV2_TYPE_GRAFT 6 #define PIMV2_TYPE_GRAFT_ACK 7 #define PIMV2_TYPE_CANDIDATE_RP 8 #define PIMV2_TYPE_PRUNE_REFRESH 9 #define PIMV2_TYPE_DF_ELECTION 10 #define PIMV2_TYPE_ECMP_REDIRECT 11 static const struct tok pimv2_type_values[] = { { PIMV2_TYPE_HELLO, "Hello" }, { PIMV2_TYPE_REGISTER, "Register" }, { PIMV2_TYPE_REGISTER_STOP, "Register Stop" }, { PIMV2_TYPE_JOIN_PRUNE, "Join / Prune" }, { PIMV2_TYPE_BOOTSTRAP, "Bootstrap" }, { PIMV2_TYPE_ASSERT, "Assert" }, { PIMV2_TYPE_GRAFT, "Graft" }, { PIMV2_TYPE_GRAFT_ACK, "Graft Acknowledgement" }, { PIMV2_TYPE_CANDIDATE_RP, "Candidate RP Advertisement" }, { PIMV2_TYPE_PRUNE_REFRESH, "Prune Refresh" }, { PIMV2_TYPE_DF_ELECTION, "DF Election" }, { PIMV2_TYPE_ECMP_REDIRECT, "ECMP Redirect" }, { 0, NULL} }; #define PIMV2_HELLO_OPTION_HOLDTIME 1 #define PIMV2_HELLO_OPTION_LANPRUNEDELAY 2 #define PIMV2_HELLO_OPTION_DR_PRIORITY_OLD 18 #define PIMV2_HELLO_OPTION_DR_PRIORITY 19 #define PIMV2_HELLO_OPTION_GENID 20 #define PIMV2_HELLO_OPTION_REFRESH_CAP 21 #define PIMV2_HELLO_OPTION_BIDIR_CAP 22 #define PIMV2_HELLO_OPTION_ADDRESS_LIST 24 #define PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD 65001 static const struct tok pimv2_hello_option_values[] = { { PIMV2_HELLO_OPTION_HOLDTIME, "Hold Time" }, { PIMV2_HELLO_OPTION_LANPRUNEDELAY, "LAN Prune Delay" }, { PIMV2_HELLO_OPTION_DR_PRIORITY_OLD, "DR Priority (Old)" }, { PIMV2_HELLO_OPTION_DR_PRIORITY, "DR Priority" }, { PIMV2_HELLO_OPTION_GENID, "Generation ID" }, { PIMV2_HELLO_OPTION_REFRESH_CAP, "State Refresh Capability" }, { PIMV2_HELLO_OPTION_BIDIR_CAP, "Bi-Directional Capability" }, { PIMV2_HELLO_OPTION_ADDRESS_LIST, "Address List" }, { PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD, "Address List (Old)" }, { 0, NULL} }; #define PIMV2_REGISTER_FLAG_LEN 4 #define PIMV2_REGISTER_FLAG_BORDER 0x80000000 #define PIMV2_REGISTER_FLAG_NULL 0x40000000 static const struct tok pimv2_register_flag_values[] = { { PIMV2_REGISTER_FLAG_BORDER, "Border" }, { PIMV2_REGISTER_FLAG_NULL, "Null" }, { 0, NULL} }; /* * XXX: We consider a case where IPv6 is not ready yet for portability, * but PIM dependent defintions should be independent of IPv6... */ struct pim { uint8_t pim_typever; /* upper 4bit: PIM version number; 2 for PIMv2 */ /* lower 4bit: the PIM message type, currently they are: * Hello, Register, Register-Stop, Join/Prune, * Bootstrap, Assert, Graft (PIM-DM only), * Graft-Ack (PIM-DM only), C-RP-Adv */ #define PIM_VER(x) (((x) & 0xf0) >> 4) #define PIM_TYPE(x) ((x) & 0x0f) u_char pim_rsv; /* Reserved */ u_short pim_cksum; /* IP style check sum */ }; static void pimv2_print(netdissect_options *, register const u_char *bp, register u_int len, const u_char *); static void pimv1_join_prune_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int ngroups, njoin, nprune; int njp; /* If it's a single group and a single source, use 1-line output. */ if (ND_TTEST2(bp[0], 30) && bp[11] == 1 && ((njoin = EXTRACT_16BITS(&bp[20])) + EXTRACT_16BITS(&bp[22])) == 1) { int hold; ND_PRINT((ndo, " RPF %s ", ipaddr_string(ndo, bp))); hold = EXTRACT_16BITS(&bp[6]); if (hold != 180) { ND_PRINT((ndo, "Hold ")); unsigned_relts_print(ndo, hold); } ND_PRINT((ndo, "%s (%s/%d, %s", njoin ? "Join" : "Prune", ipaddr_string(ndo, &bp[26]), bp[25] & 0x3f, ipaddr_string(ndo, &bp[12]))); if (EXTRACT_32BITS(&bp[16]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[16]))); ND_PRINT((ndo, ") %s%s %s", (bp[24] & 0x01) ? "Sparse" : "Dense", (bp[25] & 0x80) ? " WC" : "", (bp[25] & 0x40) ? "RP" : "SPT")); return; } if (len < sizeof(struct in_addr)) goto trunc; ND_TCHECK2(bp[0], sizeof(struct in_addr)); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Upstream Nbr: %s", ipaddr_string(ndo, bp))); bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[2], 2); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Hold time: ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); if (ndo->ndo_vflag < 2) return; bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); ngroups = bp[3]; bp += 4; len -= 4; while (ngroups--) { /* * XXX - does the address have length "addrlen" and the * mask length "maddrlen"? */ if (len < 4) goto trunc; ND_TCHECK2(bp[0], sizeof(struct in_addr)); ND_PRINT((ndo, "\n\tGroup: %s", ipaddr_string(ndo, bp))); bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[0], sizeof(struct in_addr)); if (EXTRACT_32BITS(&bp[0]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[0]))); bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); njoin = EXTRACT_16BITS(&bp[0]); nprune = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, " joined: %d pruned: %d", njoin, nprune)); bp += 4; len -= 4; for (njp = 0; njp < (njoin + nprune); njp++) { const char *type; if (njp < njoin) type = "Join "; else type = "Prune"; if (len < 6) goto trunc; ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "\n\t%s %s%s%s%s/%d", type, (bp[0] & 0x01) ? "Sparse " : "Dense ", (bp[1] & 0x80) ? "WC " : "", (bp[1] & 0x40) ? "RP " : "SPT ", ipaddr_string(ndo, &bp[2]), bp[1] & 0x3f)); bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|pim]")); return; } void pimv1_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { register u_char type; ND_TCHECK(bp[1]); type = bp[1]; ND_PRINT((ndo, " %s", tok2str(pimv1_type_str, "[type %u]", type))); switch (type) { case PIMV1_TYPE_QUERY: if (ND_TTEST(bp[8])) { switch (bp[8] >> 4) { case 0: ND_PRINT((ndo, " Dense-mode")); break; case 1: ND_PRINT((ndo, " Sparse-mode")); break; case 2: ND_PRINT((ndo, " Sparse-Dense-mode")); break; default: ND_PRINT((ndo, " mode-%d", bp[8] >> 4)); break; } } if (ndo->ndo_vflag) { ND_TCHECK2(bp[10],2); ND_PRINT((ndo, " (Hold-time ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[10])); ND_PRINT((ndo, ")")); } break; case PIMV1_TYPE_REGISTER: ND_TCHECK2(bp[8], 20); /* ip header */ ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[20]), ipaddr_string(ndo, &bp[24]))); break; case PIMV1_TYPE_REGISTER_STOP: ND_TCHECK2(bp[12], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[8]), ipaddr_string(ndo, &bp[12]))); break; case PIMV1_TYPE_RP_REACHABILITY: if (ndo->ndo_vflag) { ND_TCHECK2(bp[22], 2); ND_PRINT((ndo, " group %s", ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_PRINT((ndo, " RP %s hold ", ipaddr_string(ndo, &bp[16]))); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[22])); } break; case PIMV1_TYPE_ASSERT: ND_TCHECK2(bp[16], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[16]), ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_TCHECK2(bp[24], 4); ND_PRINT((ndo, " %s pref %d metric %d", (bp[20] & 0x80) ? "RP-tree" : "SPT", EXTRACT_32BITS(&bp[20]) & 0x7fffffff, EXTRACT_32BITS(&bp[24]))); break; case PIMV1_TYPE_JOIN_PRUNE: case PIMV1_TYPE_GRAFT: case PIMV1_TYPE_GRAFT_ACK: if (ndo->ndo_vflag) { if (len < 8) goto trunc; pimv1_join_prune_print(ndo, &bp[8], len - 8); } break; } ND_TCHECK(bp[4]); if ((bp[4] >> 4) != 1) ND_PRINT((ndo, " [v%d]", bp[4] >> 4)); return; trunc: ND_PRINT((ndo, "[|pim]")); return; } /* * auto-RP is a cisco protocol, documented at * ftp://ftpeng.cisco.com/ipmulticast/specs/pim-autorp-spec01.txt * * This implements version 1+, dated Sept 9, 1998. */ void cisco_autorp_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int type; int numrps; int hold; if (len < 8) goto trunc; ND_TCHECK(bp[0]); ND_PRINT((ndo, " auto-rp ")); type = bp[0]; switch (type) { case 0x11: ND_PRINT((ndo, "candidate-advert")); break; case 0x12: ND_PRINT((ndo, "mapping")); break; default: ND_PRINT((ndo, "type-0x%02x", type)); break; } ND_TCHECK(bp[1]); numrps = bp[1]; ND_TCHECK2(bp[2], 2); ND_PRINT((ndo, " Hold ")); hold = EXTRACT_16BITS(&bp[2]); if (hold) unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); else ND_PRINT((ndo, "FOREVER")); /* Next 4 bytes are reserved. */ bp += 8; len -= 8; /*XXX skip unless -v? */ /* * Rest of packet: * numrps entries of the form: * 32 bits: RP * 6 bits: reserved * 2 bits: PIM version supported, bit 0 is "supports v1", 1 is "v2". * 8 bits: # of entries for this RP * each entry: 7 bits: reserved, 1 bit: negative, * 8 bits: mask 32 bits: source * lather, rinse, repeat. */ while (numrps--) { int nentries; char s; if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); ND_PRINT((ndo, " RP %s", ipaddr_string(ndo, bp))); bp += 4; len -= 4; if (len < 1) goto trunc; ND_TCHECK(bp[0]); switch (bp[0] & 0x3) { case 0: ND_PRINT((ndo, " PIMv?")); break; case 1: ND_PRINT((ndo, " PIMv1")); break; case 2: ND_PRINT((ndo, " PIMv2")); break; case 3: ND_PRINT((ndo, " PIMv1+2")); break; } if (bp[0] & 0xfc) ND_PRINT((ndo, " [rsvd=0x%02x]", bp[0] & 0xfc)); bp += 1; len -= 1; if (len < 1) goto trunc; ND_TCHECK(bp[0]); nentries = bp[0]; bp += 1; len -= 1; s = ' '; for (; nentries; nentries--) { if (len < 6) goto trunc; ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "%c%s%s/%d", s, bp[0] & 1 ? "!" : "", ipaddr_string(ndo, &bp[2]), bp[1])); if (bp[0] & 0x02) { ND_PRINT((ndo, " bidir")); } if (bp[0] & 0xfc) { ND_PRINT((ndo, "[rsvd=0x%02x]", bp[0] & 0xfc)); } s = ','; bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|autorp]")); return; } void pim_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const struct pim *pim = (const struct pim *)bp; #ifdef notyet /* currently we see only version and type */ ND_TCHECK(pim->pim_rsv); #endif ND_TCHECK(pim->pim_typever); switch (PIM_VER(pim->pim_typever)) { case 2: if (!ndo->ndo_vflag) { ND_PRINT((ndo, "PIMv%u, %s, length %u", PIM_VER(pim->pim_typever), tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)), len)); return; } else { ND_PRINT((ndo, "PIMv%u, length %u\n\t%s", PIM_VER(pim->pim_typever), len, tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)))); pimv2_print(ndo, bp, len, bp2); } break; default: ND_PRINT((ndo, "PIMv%u, length %u", PIM_VER(pim->pim_typever), len)); break; } return; trunc: ND_PRINT((ndo, "[|pim]")); return; } /* * PIMv2 uses encoded address representations. * * The last PIM-SM I-D before RFC2117 was published specified the * following representation for unicast addresses. However, RFC2117 * specified no encoding for unicast addresses with the unicast * address length specified in the header. Therefore, we have to * guess which encoding is being used (Cisco's PIMv2 implementation * uses the non-RFC encoding). RFC2117 turns a previously "Reserved" * field into a 'unicast-address-length-in-bytes' field. We guess * that it's the draft encoding if this reserved field is zero. * * RFC2362 goes back to the encoded format, and calls the addr length * field "reserved" again. * * The first byte is the address family, from: * * 0 Reserved * 1 IP (IP version 4) * 2 IP6 (IP version 6) * 3 NSAP * 4 HDLC (8-bit multidrop) * 5 BBN 1822 * 6 802 (includes all 802 media plus Ethernet "canonical format") * 7 E.163 * 8 E.164 (SMDS, Frame Relay, ATM) * 9 F.69 (Telex) * 10 X.121 (X.25, Frame Relay) * 11 IPX * 12 Appletalk * 13 Decnet IV * 14 Banyan Vines * 15 E.164 with NSAP format subaddress * * In addition, the second byte is an "Encoding". 0 is the default * encoding for the address family, and no other encodings are currently * specified. * */ enum pimv2_addrtype { pimv2_unicast, pimv2_group, pimv2_source }; /* 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Unicast Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+++++++ * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Reserved | Mask Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Group multicast Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Rsrvd |S|W|R| Mask Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Source Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ static int pimv2_addr_print(netdissect_options *ndo, const u_char *bp, u_int len, enum pimv2_addrtype at, u_int addr_len, int silent) { int af; int hdrlen; if (addr_len == 0) { if (len < 2) goto trunc; ND_TCHECK(bp[1]); switch (bp[0]) { case 1: af = AF_INET; addr_len = (u_int)sizeof(struct in_addr); break; case 2: af = AF_INET6; addr_len = (u_int)sizeof(struct in6_addr); break; default: return -1; } if (bp[1] != 0) return -1; hdrlen = 2; } else { switch (addr_len) { case sizeof(struct in_addr): af = AF_INET; break; case sizeof(struct in6_addr): af = AF_INET6; break; default: return -1; break; } hdrlen = 0; } bp += hdrlen; len -= hdrlen; switch (at) { case pimv2_unicast: if (len < addr_len) goto trunc; ND_TCHECK2(bp[0], addr_len); if (af == AF_INET) { if (!silent) ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp))); } else if (af == AF_INET6) { if (!silent) ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp))); } return hdrlen + addr_len; case pimv2_group: case pimv2_source: if (len < addr_len + 2) goto trunc; ND_TCHECK2(bp[0], addr_len + 2); if (af == AF_INET) { if (!silent) { ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp + 2))); if (bp[1] != 32) ND_PRINT((ndo, "/%u", bp[1])); } } else if (af == AF_INET6) { if (!silent) { ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp + 2))); if (bp[1] != 128) ND_PRINT((ndo, "/%u", bp[1])); } } if (bp[0] && !silent) { if (at == pimv2_group) { ND_PRINT((ndo, "(0x%02x)", bp[0])); } else { ND_PRINT((ndo, "(%s%s%s", bp[0] & 0x04 ? "S" : "", bp[0] & 0x02 ? "W" : "", bp[0] & 0x01 ? "R" : "")); if (bp[0] & 0xf8) { ND_PRINT((ndo, "+0x%02x", bp[0] & 0xf8)); } ND_PRINT((ndo, ")")); } } return hdrlen + 2 + addr_len; default: return -1; } trunc: return -1; } enum checksum_status { CORRECT, INCORRECT, UNVERIFIED }; static enum checksum_status pimv2_check_checksum(netdissect_options *ndo, const u_char *bp, const u_char *bp2, u_int len) { const struct ip *ip; u_int cksum; if (!ND_TTEST2(bp[0], len)) { /* We don't have all the data. */ return (UNVERIFIED); } ip = (const struct ip *)bp2; if (IP_V(ip) == 4) { struct cksum_vec vec[1]; vec[0].ptr = bp; vec[0].len = len; cksum = in_cksum(vec, 1); return (cksum ? INCORRECT : CORRECT); } else if (IP_V(ip) == 6) { const struct ip6_hdr *ip6; ip6 = (const struct ip6_hdr *)bp2; cksum = nextproto6_cksum(ndo, ip6, bp, len, len, IPPROTO_PIM); return (cksum ? INCORRECT : CORRECT); } else { return (UNVERIFIED); } } static void pimv2_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const u_char *ep; register const struct pim *pim = (const struct pim *)bp; int advance; enum checksum_status cksum_status; int pimv2_addr_len; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; if (ep > bp + len) ep = bp + len; if (len < 2) goto trunc; ND_TCHECK(pim->pim_rsv); pimv2_addr_len = pim->pim_rsv; if (pimv2_addr_len != 0) ND_PRINT((ndo, ", RFC2117-encoding")); if (len < 4) goto trunc; ND_TCHECK(pim->pim_cksum); ND_PRINT((ndo, ", cksum 0x%04x ", EXTRACT_16BITS(&pim->pim_cksum))); if (EXTRACT_16BITS(&pim->pim_cksum) == 0) { ND_PRINT((ndo, "(unverified)")); } else { if (PIM_TYPE(pim->pim_typever) == PIMV2_TYPE_REGISTER) { /* * The checksum only covers the packet header, * not the encapsulated packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, 8); if (cksum_status == INCORRECT) { /* * To quote RFC 4601, "For interoperability * reasons, a message carrying a checksum * calculated over the entire PIM Register * message should also be accepted." */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } } else { /* * The checksum covers the entire packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } switch (cksum_status) { case CORRECT: ND_PRINT((ndo, "(correct)")); break; case INCORRECT: ND_PRINT((ndo, "(incorrect)")); break; case UNVERIFIED: ND_PRINT((ndo, "(unverified)")); break; } } bp += 4; len -= 4; switch (PIM_TYPE(pim->pim_typever)) { case PIMV2_TYPE_HELLO: { uint16_t otype, olen; while (len > 0) { if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); otype = EXTRACT_16BITS(&bp[0]); olen = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, "\n\t %s Option (%u), length %u, Value: ", tok2str(pimv2_hello_option_values, "Unknown", otype), otype, olen)); bp += 4; len -= 4; if (len < olen) goto trunc; ND_TCHECK2(bp[0], olen); switch (otype) { case PIMV2_HELLO_OPTION_HOLDTIME: if (olen != 2) { ND_PRINT((ndo, "ERROR: Option Length != 2 Bytes (%u)", olen)); } else { unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); } break; case PIMV2_HELLO_OPTION_LANPRUNEDELAY: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { char t_bit; uint16_t lan_delay, override_interval; lan_delay = EXTRACT_16BITS(bp); override_interval = EXTRACT_16BITS(bp+2); t_bit = (lan_delay & 0x8000)? 1 : 0; lan_delay &= ~0x8000; ND_PRINT((ndo, "\n\t T-bit=%d, LAN delay %dms, Override interval %dms", t_bit, lan_delay, override_interval)); } break; case PIMV2_HELLO_OPTION_DR_PRIORITY_OLD: case PIMV2_HELLO_OPTION_DR_PRIORITY: switch (olen) { case 0: ND_PRINT((ndo, "Bi-Directional Capability (Old)")); break; case 4: ND_PRINT((ndo, "%u", EXTRACT_32BITS(bp))); break; default: ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); break; } break; case PIMV2_HELLO_OPTION_GENID: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "0x%08x", EXTRACT_32BITS(bp))); } break; case PIMV2_HELLO_OPTION_REFRESH_CAP: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "v%d", *bp)); if (*(bp+1) != 0) { ND_PRINT((ndo, ", interval ")); unsigned_relts_print(ndo, *(bp+1)); } if (EXTRACT_16BITS(bp+2) != 0) { ND_PRINT((ndo, " ?0x%04x?", EXTRACT_16BITS(bp+2))); } } break; case PIMV2_HELLO_OPTION_BIDIR_CAP: break; case PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD: case PIMV2_HELLO_OPTION_ADDRESS_LIST: if (ndo->ndo_vflag > 1) { const u_char *ptr = bp; u_int plen = len; while (ptr < (bp+olen)) { ND_PRINT((ndo, "\n\t ")); advance = pimv2_addr_print(ndo, ptr, plen, pimv2_unicast, pimv2_addr_len, 0); if (advance < 0) goto trunc; ptr += advance; plen -= advance; } } break; default: if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, bp, "\n\t ", olen); break; } /* do we want to see an additionally hexdump ? */ if (ndo->ndo_vflag> 1) print_unknown_data(ndo, bp, "\n\t ", olen); bp += olen; len -= olen; } break; } case PIMV2_TYPE_REGISTER: { const struct ip *ip; if (len < 4) goto trunc; ND_TCHECK2(*bp, PIMV2_REGISTER_FLAG_LEN); ND_PRINT((ndo, ", Flags [ %s ]\n\t", tok2str(pimv2_register_flag_values, "none", EXTRACT_32BITS(bp)))); bp += 4; len -= 4; /* encapsulated multicast packet */ if (len == 0) goto trunc; ip = (const struct ip *)bp; ND_TCHECK(ip->ip_vhl); switch (IP_V(ip)) { case 0: /* Null header */ ND_TCHECK(ip->ip_dst); ND_PRINT((ndo, "IP-Null-header %s > %s", ipaddr_string(ndo, &ip->ip_src), ipaddr_string(ndo, &ip->ip_dst))); break; case 4: /* IPv4 */ ip_print(ndo, bp, len); break; case 6: /* IPv6 */ ip6_print(ndo, bp, len); break; default: ND_PRINT((ndo, "IP ver %d", IP_V(ip))); break; } break; } case PIMV2_TYPE_REGISTER_STOP: ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; ND_PRINT((ndo, " source=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; break; case PIMV2_TYPE_JOIN_PRUNE: case PIMV2_TYPE_GRAFT: case PIMV2_TYPE_GRAFT_ACK: /* * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |PIM Ver| Type | Addr length | Checksum | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Unicast-Upstream Neighbor Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Reserved | Num groups | Holdtime | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Number of Joined Sources | Number of Pruned Sources | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ { uint8_t ngroup; uint16_t holdtime; uint16_t njoin; uint16_t nprune; int i, j; if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ ND_PRINT((ndo, ", upstream-neighbor: ")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; } if (len < 4) goto trunc; ND_TCHECK2(*bp, 4); ngroup = bp[1]; holdtime = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, "\n\t %u group(s)", ngroup)); if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ ND_PRINT((ndo, ", holdtime: ")); if (holdtime == 0xffff) ND_PRINT((ndo, "infinite")); else unsigned_relts_print(ndo, holdtime); } bp += 4; len -= 4; for (i = 0; i < ngroup; i++) { ND_PRINT((ndo, "\n\t group #%u: ", i+1)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; if (len < 4) goto trunc; ND_TCHECK2(*bp, 4); njoin = EXTRACT_16BITS(&bp[0]); nprune = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, ", joined sources: %u, pruned sources: %u", njoin, nprune)); bp += 4; len -= 4; for (j = 0; j < njoin; j++) { ND_PRINT((ndo, "\n\t joined source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_source, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; } for (j = 0; j < nprune; j++) { ND_PRINT((ndo, "\n\t pruned source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_source, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; } } break; } case PIMV2_TYPE_BOOTSTRAP: { int i, j, frpcnt; /* Fragment Tag, Hash Mask len, and BSR-priority */ if (len < 2) goto trunc; ND_TCHECK_16BITS(bp); ND_PRINT((ndo, " tag=%x", EXTRACT_16BITS(bp))); bp += 2; len -= 2; if (len < 1) goto trunc; ND_TCHECK(bp[0]); ND_PRINT((ndo, " hashmlen=%d", bp[0])); if (len < 2) goto trunc; ND_TCHECK(bp[2]); ND_PRINT((ndo, " BSRprio=%d", bp[1])); bp += 2; len -= 2; /* Encoded-Unicast-BSR-Address */ ND_PRINT((ndo, " BSR=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; for (i = 0; bp < ep; i++) { /* Encoded-Group Address */ ND_PRINT((ndo, " (group%d: ", i)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; /* RP-Count, Frag RP-Cnt, and rsvd */ if (len < 1) goto trunc; ND_TCHECK(bp[0]); ND_PRINT((ndo, " RPcnt=%d", bp[0])); if (len < 2) goto trunc; ND_TCHECK(bp[1]); ND_PRINT((ndo, " FRPcnt=%d", frpcnt = bp[1])); if (len < 4) goto trunc; bp += 4; len -= 4; for (j = 0; j < frpcnt && bp < ep; j++) { /* each RP info */ ND_PRINT((ndo, " RP%d=", j)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; if (len < 2) goto trunc; ND_TCHECK_16BITS(bp); ND_PRINT((ndo, ",holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); if (len < 3) goto trunc; ND_TCHECK(bp[2]); ND_PRINT((ndo, ",prio=%d", bp[2])); if (len < 4) goto trunc; bp += 4; len -= 4; } ND_PRINT((ndo, ")")); } break; } case PIMV2_TYPE_ASSERT: ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; if (len < 8) goto trunc; ND_TCHECK2(*bp, 8); if (bp[0] & 0x80) ND_PRINT((ndo, " RPT")); ND_PRINT((ndo, " pref=%u", EXTRACT_32BITS(&bp[0]) & 0x7fffffff)); ND_PRINT((ndo, " metric=%u", EXTRACT_32BITS(&bp[4]))); break; case PIMV2_TYPE_CANDIDATE_RP: { int i, pfxcnt; /* Prefix-Cnt, Priority, and Holdtime */ if (len < 1) goto trunc; ND_TCHECK(bp[0]); ND_PRINT((ndo, " prefix-cnt=%d", bp[0])); pfxcnt = bp[0]; if (len < 2) goto trunc; ND_TCHECK(bp[1]); ND_PRINT((ndo, " prio=%d", bp[1])); if (len < 4) goto trunc; ND_TCHECK_16BITS(&bp[2]); ND_PRINT((ndo, " holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); bp += 4; len -= 4; /* Encoded-Unicast-RP-Address */ ND_PRINT((ndo, " RP=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; /* Encoded-Group Addresses */ for (i = 0; i < pfxcnt && bp < ep; i++) { ND_PRINT((ndo, " Group%d=", i)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; } break; } case PIMV2_TYPE_PRUNE_REFRESH: ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; ND_PRINT((ndo, " grp=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; ND_PRINT((ndo, " forwarder=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; if (len < 2) goto trunc; ND_TCHECK_16BITS(bp); ND_PRINT((ndo, " TUNR ")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); break; default: ND_PRINT((ndo, " [type %d]", PIM_TYPE(pim->pim_typever))); break; } return; trunc: ND_PRINT((ndo, "[|pim]")); } /* * Local Variables: * c-style: whitesmith * c-basic-offset: 8 * End: */
pim_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const u_char *ep; register const struct pim *pim = (const struct pim *)bp; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; #ifdef notyet /* currently we see only version and type */ ND_TCHECK(pim->pim_rsv); #endif switch (PIM_VER(pim->pim_typever)) { case 2: if (!ndo->ndo_vflag) { ND_PRINT((ndo, "PIMv%u, %s, length %u", PIM_VER(pim->pim_typever), tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)), len)); return; } else { ND_PRINT((ndo, "PIMv%u, length %u\n\t%s", PIM_VER(pim->pim_typever), len, tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)))); pimv2_print(ndo, bp, len, bp2); } break; default: ND_PRINT((ndo, "PIMv%u, length %u", PIM_VER(pim->pim_typever), len)); break; } return; }
pim_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const struct pim *pim = (const struct pim *)bp; #ifdef notyet /* currently we see only version and type */ ND_TCHECK(pim->pim_rsv); #endif ND_TCHECK(pim->pim_typever); switch (PIM_VER(pim->pim_typever)) { case 2: if (!ndo->ndo_vflag) { ND_PRINT((ndo, "PIMv%u, %s, length %u", PIM_VER(pim->pim_typever), tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)), len)); return; } else { ND_PRINT((ndo, "PIMv%u, length %u\n\t%s", PIM_VER(pim->pim_typever), len, tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)))); pimv2_print(ndo, bp, len, bp2); } break; default: ND_PRINT((ndo, "PIMv%u, length %u", PIM_VER(pim->pim_typever), len)); break; } return; trunc: ND_PRINT((ndo, "[|pim]")); return; }
{'added': [(172, '\tif (len < sizeof(struct in_addr))'), (173, '\t\tgoto trunc;'), (178, '\tbp += 4;'), (179, '\tlen -= 4;'), (180, '\tif (len < 4)'), (181, '\t\tgoto trunc;'), (182, '\tND_TCHECK2(bp[2], 2);'), (186, '\tunsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2]));'), (189, '\tbp += 4;'), (190, '\tlen -= 4;'), (192, '\tif (len < 4)'), (193, '\t\tgoto trunc;'), (203, '\t\tif (len < 4)'), (204, '\t\t\tgoto trunc;'), (207, '\t\tbp += 4;'), (208, '\t\tlen -= 4;'), (209, '\t\tif (len < 4)'), (210, '\t\t\tgoto trunc;'), (211, '\t\tND_TCHECK2(bp[0], sizeof(struct in_addr));'), (212, '\t\tif (EXTRACT_32BITS(&bp[0]) != 0xffffffff)'), (213, '\t\t\tND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[0])));'), (214, '\t\tbp += 4;'), (215, '\t\tlen -= 4;'), (216, '\t\tif (len < 4)'), (217, '\t\t\tgoto trunc;'), (218, '\t\tND_TCHECK2(bp[0], 4);'), (219, '\t\tnjoin = EXTRACT_16BITS(&bp[0]);'), (220, '\t\tnprune = EXTRACT_16BITS(&bp[2]);'), (222, '\t\tbp += 4;'), (223, '\t\tlen -= 4;'), (231, '\t\t\tif (len < 6)'), (232, '\t\t\t\tgoto trunc;'), (238, '\t\t\t ipaddr_string(ndo, &bp[2]),'), (239, '\t\t\t bp[1] & 0x3f));'), (321, '\t\tif (ndo->ndo_vflag) {'), (322, '\t\t\tif (len < 8)'), (323, '\t\t\t\tgoto trunc;'), (325, '\t\t}'), (352, '\tif (len < 8)'), (353, '\t\tgoto trunc;'), (401, '\t\tif (len < 4)'), (402, '\t\t\tgoto trunc;'), (405, '\t\tbp += 4;'), (406, '\t\tlen -= 4;'), (407, '\t\tif (len < 1)'), (408, '\t\t\tgoto trunc;'), (409, '\t\tND_TCHECK(bp[0]);'), (410, '\t\tswitch (bp[0] & 0x3) {'), (420, '\t\tif (bp[0] & 0xfc)'), (421, '\t\t\tND_PRINT((ndo, " [rsvd=0x%02x]", bp[0] & 0xfc));'), (422, '\t\tbp += 1;'), (423, '\t\tlen -= 1;'), (424, '\t\tif (len < 1)'), (425, '\t\t\tgoto trunc;'), (426, '\t\tND_TCHECK(bp[0]);'), (427, '\t\tnentries = bp[0];'), (428, '\t\tbp += 1;'), (429, '\t\tlen -= 1;'), (432, '\t\t\tif (len < 6)'), (433, '\t\t\t\tgoto trunc;'), (464, '\tND_TCHECK(pim->pim_typever);'), (488, ''), (489, 'trunc:'), (490, '\tND_PRINT((ndo, "[|pim]"));'), (491, '\treturn;'), (560, ' const u_char *bp, u_int len, enum pimv2_addrtype at,'), (561, ' u_int addr_len, int silent)'), (564, '\tint hdrlen;'), (566, '\tif (addr_len == 0) {'), (567, '\t\tif (len < 2)'), (568, '\t\t\tgoto trunc;'), (573, '\t\t\taddr_len = (u_int)sizeof(struct in_addr);'), (577, '\t\t\taddr_len = (u_int)sizeof(struct in6_addr);'), (586, '\t\tswitch (addr_len) {'), (601, '\tlen -= hdrlen;'), (604, '\t\tif (len < addr_len)'), (605, '\t\t\tgoto trunc;'), (606, '\t\tND_TCHECK2(bp[0], addr_len);'), (615, '\t\treturn hdrlen + addr_len;'), (618, '\t\tif (len < addr_len + 2)'), (619, '\t\t\tgoto trunc;'), (620, '\t\tND_TCHECK2(bp[0], addr_len + 2);'), (649, '\t\treturn hdrlen + 2 + addr_len;'), (701, '\tint pimv2_addr_len;'), (708, '\tif (len < 2)'), (709, '\t\tgoto trunc;'), (715, '\tif (len < 4)'), (716, '\t\tgoto trunc;'), (717, '\tND_TCHECK(pim->pim_cksum);'), (758, '\tbp += 4;'), (759, '\tlen -= 4;'), (765, '\t\twhile (len > 0) {'), (766, '\t\t\tif (len < 4)'), (767, '\t\t\t\tgoto trunc;'), (776, '\t\t\tlen -= 4;'), (778, '\t\t\tif (len < olen)'), (779, '\t\t\t\tgoto trunc;'), (780, '\t\t\tND_TCHECK2(bp[0], olen);'), (850, '\t\t\t\t\tu_int plen = len;'), (853, '\t\t\t\t\t\tadvance = pimv2_addr_print(ndo, ptr, plen, pimv2_unicast, pimv2_addr_len, 0);'), (854, '\t\t\t\t\t\tif (advance < 0)'), (855, '\t\t\t\t\t\t\tgoto trunc;'), (857, '\t\t\t\t\t\tplen -= advance;'), (870, '\t\t\tlen -= olen;'), (879, '\t\tif (len < 4)'), (880, '\t\t\tgoto trunc;'), (881, '\t\tND_TCHECK2(*bp, PIMV2_REGISTER_FLAG_LEN);'), (886, '\t\t EXTRACT_32BITS(bp))));'), (888, '\t\tbp += 4; len -= 4;'), (890, '\t\tif (len == 0)'), (891, '\t\t\tgoto trunc;'), (893, '\t\tND_TCHECK(ip->ip_vhl);'), (896, '\t\t\tND_TCHECK(ip->ip_dst);'), (919, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (920, '\t\t\tgoto trunc;'), (923, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (924, '\t\t\tgoto trunc;'), (977, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (978, '\t\t\t\tgoto trunc;'), (981, '\t\tif (len < 4)'), (982, '\t\t\tgoto trunc;'), (983, '\t\tND_TCHECK2(*bp, 4);'), (997, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (998, '\t\t\t\tgoto trunc;'), (1000, '\t\t\tif (len < 4)'), (1001, '\t\t\t\tgoto trunc;'), (1002, '\t\t\tND_TCHECK2(*bp, 4);'), (1009, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_source, pimv2_addr_len, 0)) < 0)'), (1010, '\t\t\t\t\tgoto trunc;'), (1015, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_source, pimv2_addr_len, 0)) < 0)'), (1016, '\t\t\t\t\tgoto trunc;'), (1028, '\t\tif (len < 2)'), (1029, '\t\t\tgoto trunc;'), (1030, '\t\tND_TCHECK_16BITS(bp);'), (1032, '\t\tbp += 2;'), (1033, '\t\tlen -= 2;'), (1034, '\t\tif (len < 1)'), (1035, '\t\t\tgoto trunc;'), (1036, '\t\tND_TCHECK(bp[0]);'), (1038, '\t\tif (len < 2)'), (1039, '\t\t\tgoto trunc;'), (1040, '\t\tND_TCHECK(bp[2]);'), (1043, '\t\tlen -= 2;'), (1047, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1048, '\t\t\tgoto trunc;'), (1050, '\t\tlen -= advance;'), (1055, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (1056, '\t\t\t\tgoto trunc;'), (1058, '\t\t\tlen -= advance;'), (1061, '\t\t\tif (len < 1)'), (1062, '\t\t\t\tgoto trunc;'), (1063, '\t\t\tND_TCHECK(bp[0]);'), (1065, '\t\t\tif (len < 2)'), (1066, '\t\t\t\tgoto trunc;'), (1067, '\t\t\tND_TCHECK(bp[1]);'), (1069, '\t\t\tif (len < 4)'), (1070, '\t\t\t\tgoto trunc;'), (1072, '\t\t\tlen -= 4;'), (1077, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len,'), (1079, '\t\t\t\t\t\t\t\tpimv2_addr_len,'), (1080, '\t\t\t\t\t\t\t\t0)) < 0)'), (1081, '\t\t\t\t\tgoto trunc;'), (1083, '\t\t\t\tlen -= advance;'), (1085, '\t\t\t\tif (len < 2)'), (1086, '\t\t\t\t\tgoto trunc;'), (1087, '\t\t\t\tND_TCHECK_16BITS(bp);'), (1090, '\t\t\t\tif (len < 3)'), (1091, '\t\t\t\t\tgoto trunc;'), (1092, '\t\t\t\tND_TCHECK(bp[2]);'), (1094, '\t\t\t\tif (len < 4)'), (1095, '\t\t\t\t\tgoto trunc;'), (1097, '\t\t\t\tlen -= 4;'), (1105, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (1106, '\t\t\tgoto trunc;'), (1109, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1110, '\t\t\tgoto trunc;'), (1112, '\t\tif (len < 8)'), (1113, '\t\t\tgoto trunc;'), (1114, '\t\tND_TCHECK2(*bp, 8);'), (1126, '\t\tif (len < 1)'), (1127, '\t\t\tgoto trunc;'), (1128, '\t\tND_TCHECK(bp[0]);'), (1131, '\t\tif (len < 2)'), (1132, '\t\t\tgoto trunc;'), (1133, '\t\tND_TCHECK(bp[1]);'), (1135, '\t\tif (len < 4)'), (1136, '\t\t\tgoto trunc;'), (1137, '\t\tND_TCHECK_16BITS(&bp[2]);'), (1141, '\t\tlen -= 4;'), (1145, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1146, '\t\t\tgoto trunc;'), (1148, '\t\tlen -= advance;'), (1153, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (1154, '\t\t\t\tgoto trunc;'), (1156, '\t\t\tlen -= advance;'), (1163, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1164, '\t\t\tgoto trunc;'), (1166, '\t\tlen -= advance;'), (1168, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (1169, '\t\t\tgoto trunc;'), (1171, '\t\tlen -= advance;'), (1173, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1174, '\t\t\tgoto trunc;'), (1176, '\t\tlen -= advance;'), (1177, '\t\tif (len < 2)'), (1178, '\t\t\tgoto trunc;'), (1179, '\t\tND_TCHECK_16BITS(bp);')], 'deleted': [(176, '\tND_TCHECK2(bp[6], 2);'), (180, '\tunsigned_relts_print(ndo, EXTRACT_16BITS(&bp[6]));'), (183, '\tbp += 8;'), (184, '\tlen -= 8;'), (197, '\t\tND_TCHECK2(bp[4], sizeof(struct in_addr));'), (198, '\t\tif (EXTRACT_32BITS(&bp[4]) != 0xffffffff)'), (199, '\t\t\tND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[4])));'), (200, '\t\tND_TCHECK2(bp[8], 4);'), (201, '\t\tnjoin = EXTRACT_16BITS(&bp[8]);'), (202, '\t\tnprune = EXTRACT_16BITS(&bp[10]);'), (204, '\t\tbp += 12;'), (205, '\t\tlen -= 12;'), (218, '\t\t\tipaddr_string(ndo, &bp[2]), bp[1] & 0x3f));'), (233, '\tregister const u_char *ep;'), (236, '\tep = (const u_char *)ndo->ndo_snapend;'), (237, '\tif (bp >= ep)'), (238, '\t\treturn;'), (239, ''), (305, '\t\tif (ndo->ndo_vflag)'), (382, '\t\tND_TCHECK(bp[4]);'), (383, '\t\tswitch (bp[4] & 0x3) {'), (393, '\t\tif (bp[4] & 0xfc)'), (394, '\t\t\tND_PRINT((ndo, " [rsvd=0x%02x]", bp[4] & 0xfc));'), (395, '\t\tND_TCHECK(bp[5]);'), (396, '\t\tnentries = bp[5];'), (397, '\t\tbp += 6; len -= 6;'), (424, '\tregister const u_char *ep;'), (427, '\tep = (const u_char *)ndo->ndo_snapend;'), (428, '\tif (bp >= ep)'), (429, '\t\treturn;'), (499, 'static int pimv2_addr_len;'), (500, ''), (527, ' const u_char *bp, enum pimv2_addrtype at, int silent)'), (530, '\tint len, hdrlen;'), (532, '\tND_TCHECK(bp[0]);'), (533, ''), (534, '\tif (pimv2_addr_len == 0) {'), (539, '\t\t\tlen = sizeof(struct in_addr);'), (543, '\t\t\tlen = sizeof(struct in6_addr);'), (552, '\t\tswitch (pimv2_addr_len) {'), (563, '\t\tlen = pimv2_addr_len;'), (570, '\t\tND_TCHECK2(bp[0], len);'), (579, '\t\treturn hdrlen + len;'), (582, '\t\tND_TCHECK2(bp[0], len + 2);'), (611, '\t\treturn hdrlen + 2 + len;'), (719, '\t\tbp += 4;'), (720, '\t\twhile (bp < ep) {'), (724, '\t\t\tND_TCHECK2(bp[0], 4 + olen);'), (802, '\t\t\t\t\t\tadvance = pimv2_addr_print(ndo, ptr, pimv2_unicast, 0);'), (803, '\t\t\t\t\t\tif (advance < 0) {'), (804, '\t\t\t\t\t\t\tND_PRINT((ndo, "..."));'), (805, '\t\t\t\t\t\t\tbreak;'), (806, '\t\t\t\t\t\t}'), (828, '\t\tND_TCHECK2(*(bp + 4), PIMV2_REGISTER_FLAG_LEN);'), (833, '\t\t EXTRACT_32BITS(bp+4))));'), (835, '\t\tbp += 8; len -= 8;'), (861, '\t\tbp += 4; len -= 4;'), (862, '\t\tif (bp >= ep)'), (863, '\t\t\tbreak;'), (865, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) {'), (866, '\t\t\tND_PRINT((ndo, "..."));'), (867, '\t\t\tbreak;'), (868, '\t\t}'), (870, '\t\tif (bp >= ep)'), (871, '\t\t\tbreak;'), (873, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (874, '\t\t\tND_PRINT((ndo, "..."));'), (875, '\t\t\tbreak;'), (876, '\t\t}'), (927, '\t\tbp += 4; len -= 4;'), (929, '\t\t\tif (bp >= ep)'), (930, '\t\t\t\tbreak;'), (932, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (933, '\t\t\t\tND_PRINT((ndo, "..."));'), (934, '\t\t\t\tbreak;'), (935, '\t\t\t}'), (938, '\t\tif (bp + 4 > ep)'), (939, '\t\t\tbreak;'), (952, '\t\t\tif (bp >= ep)'), (953, '\t\t\t\tgoto jp_done;'), (955, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) {'), (956, '\t\t\t\tND_PRINT((ndo, "...)"));'), (957, '\t\t\t\tgoto jp_done;'), (958, '\t\t\t}'), (960, '\t\t\tif (bp + 4 > ep) {'), (961, '\t\t\t\tND_PRINT((ndo, "...)"));'), (962, '\t\t\t\tgoto jp_done;'), (963, '\t\t\t}'), (970, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) {'), (971, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (972, '\t\t\t\t\tgoto jp_done;'), (973, '\t\t\t\t}'), (978, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) {'), (979, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (980, '\t\t\t\t\tgoto jp_done;'), (981, '\t\t\t\t}'), (985, '\tjp_done:'), (992, '\t\tbp += 4;'), (995, '\t\tif (bp + sizeof(uint16_t) >= ep) break;'), (997, '\t\tbp += sizeof(uint16_t);'), (998, '\t\tif (bp >= ep) break;'), (1000, '\t\tif (bp + 1 >= ep) break;'), (1005, '\t\tif (bp >= ep) break;'), (1007, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1008, '\t\t\tND_PRINT((ndo, "..."));'), (1009, '\t\t\tbreak;'), (1010, '\t\t}'), (1016, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0))'), (1017, '\t\t\t < 0) {'), (1018, '\t\t\t\tND_PRINT((ndo, "...)"));'), (1019, '\t\t\t\tgoto bs_done;'), (1020, '\t\t\t}'), (1024, '\t\t\tif (bp >= ep) {'), (1025, '\t\t\t\tND_PRINT((ndo, "...)"));'), (1026, '\t\t\t\tgoto bs_done;'), (1027, '\t\t\t}'), (1029, '\t\t\tif (bp + 1 >= ep) {'), (1030, '\t\t\t\tND_PRINT((ndo, "...)"));'), (1031, '\t\t\t\tgoto bs_done;'), (1032, '\t\t\t}'), (1039, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp,'), (1041, '\t\t\t\t\t\t\t\t0)) < 0) {'), (1042, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (1043, '\t\t\t\t\tgoto bs_done;'), (1044, '\t\t\t\t}'), (1047, '\t\t\t\tif (bp + 1 >= ep) {'), (1048, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (1049, '\t\t\t\t\tgoto bs_done;'), (1050, '\t\t\t\t}'), (1053, '\t\t\t\tif (bp + 2 >= ep) {'), (1054, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (1055, '\t\t\t\t\tgoto bs_done;'), (1056, '\t\t\t\t}'), (1062, '\t bs_done:'), (1066, '\t\tbp += 4; len -= 4;'), (1067, '\t\tif (bp >= ep)'), (1068, '\t\t\tbreak;'), (1070, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) {'), (1071, '\t\t\tND_PRINT((ndo, "..."));'), (1072, '\t\t\tbreak;'), (1073, '\t\t}'), (1075, '\t\tif (bp >= ep)'), (1076, '\t\t\tbreak;'), (1078, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1079, '\t\t\tND_PRINT((ndo, "..."));'), (1080, '\t\t\tbreak;'), (1081, '\t\t}'), (1083, '\t\tif (bp + 8 > ep)'), (1084, '\t\t\tbreak;'), (1094, '\t\tbp += 4;'), (1097, '\t\tif (bp >= ep) break;'), (1100, '\t\tif (bp + 1 >= ep) break;'), (1102, '\t\tif (bp + 3 >= ep) break;'), (1108, '\t\tif (bp >= ep) break;'), (1110, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1111, '\t\t\tND_PRINT((ndo, "..."));'), (1112, '\t\t\tbreak;'), (1113, '\t\t}'), (1119, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0))'), (1120, '\t\t\t < 0) {'), (1121, '\t\t\t\tND_PRINT((ndo, "..."));'), (1122, '\t\t\t\tbreak;'), (1123, '\t\t\t}'), (1131, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1132, '\t\t\tND_PRINT((ndo, "..."));'), (1133, '\t\t\tbreak;'), (1134, '\t\t}'), (1137, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) {'), (1138, '\t\t\tND_PRINT((ndo, "..."));'), (1139, '\t\t\tbreak;'), (1140, '\t\t}'), (1143, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1144, '\t\t\tND_PRINT((ndo, "..."));'), (1145, '\t\t\tbreak;'), (1146, '\t\t}'), (1148, '\t\tND_TCHECK2(bp[0], 2);')]}
207
176
890
5,856
https://github.com/the-tcpdump-group/tcpdump
CVE-2017-13030
['CWE-125']
print-pim.c
pimv1_join_prune_print
/* * Copyright (c) 1995, 1996 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* \summary: Protocol Independent Multicast (PIM) printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include "netdissect.h" #include "addrtoname.h" #include "extract.h" #include "ip.h" #include "ip6.h" #include "ipproto.h" #define PIMV1_TYPE_QUERY 0 #define PIMV1_TYPE_REGISTER 1 #define PIMV1_TYPE_REGISTER_STOP 2 #define PIMV1_TYPE_JOIN_PRUNE 3 #define PIMV1_TYPE_RP_REACHABILITY 4 #define PIMV1_TYPE_ASSERT 5 #define PIMV1_TYPE_GRAFT 6 #define PIMV1_TYPE_GRAFT_ACK 7 static const struct tok pimv1_type_str[] = { { PIMV1_TYPE_QUERY, "Query" }, { PIMV1_TYPE_REGISTER, "Register" }, { PIMV1_TYPE_REGISTER_STOP, "Register-Stop" }, { PIMV1_TYPE_JOIN_PRUNE, "Join/Prune" }, { PIMV1_TYPE_RP_REACHABILITY, "RP-reachable" }, { PIMV1_TYPE_ASSERT, "Assert" }, { PIMV1_TYPE_GRAFT, "Graft" }, { PIMV1_TYPE_GRAFT_ACK, "Graft-ACK" }, { 0, NULL } }; #define PIMV2_TYPE_HELLO 0 #define PIMV2_TYPE_REGISTER 1 #define PIMV2_TYPE_REGISTER_STOP 2 #define PIMV2_TYPE_JOIN_PRUNE 3 #define PIMV2_TYPE_BOOTSTRAP 4 #define PIMV2_TYPE_ASSERT 5 #define PIMV2_TYPE_GRAFT 6 #define PIMV2_TYPE_GRAFT_ACK 7 #define PIMV2_TYPE_CANDIDATE_RP 8 #define PIMV2_TYPE_PRUNE_REFRESH 9 #define PIMV2_TYPE_DF_ELECTION 10 #define PIMV2_TYPE_ECMP_REDIRECT 11 static const struct tok pimv2_type_values[] = { { PIMV2_TYPE_HELLO, "Hello" }, { PIMV2_TYPE_REGISTER, "Register" }, { PIMV2_TYPE_REGISTER_STOP, "Register Stop" }, { PIMV2_TYPE_JOIN_PRUNE, "Join / Prune" }, { PIMV2_TYPE_BOOTSTRAP, "Bootstrap" }, { PIMV2_TYPE_ASSERT, "Assert" }, { PIMV2_TYPE_GRAFT, "Graft" }, { PIMV2_TYPE_GRAFT_ACK, "Graft Acknowledgement" }, { PIMV2_TYPE_CANDIDATE_RP, "Candidate RP Advertisement" }, { PIMV2_TYPE_PRUNE_REFRESH, "Prune Refresh" }, { PIMV2_TYPE_DF_ELECTION, "DF Election" }, { PIMV2_TYPE_ECMP_REDIRECT, "ECMP Redirect" }, { 0, NULL} }; #define PIMV2_HELLO_OPTION_HOLDTIME 1 #define PIMV2_HELLO_OPTION_LANPRUNEDELAY 2 #define PIMV2_HELLO_OPTION_DR_PRIORITY_OLD 18 #define PIMV2_HELLO_OPTION_DR_PRIORITY 19 #define PIMV2_HELLO_OPTION_GENID 20 #define PIMV2_HELLO_OPTION_REFRESH_CAP 21 #define PIMV2_HELLO_OPTION_BIDIR_CAP 22 #define PIMV2_HELLO_OPTION_ADDRESS_LIST 24 #define PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD 65001 static const struct tok pimv2_hello_option_values[] = { { PIMV2_HELLO_OPTION_HOLDTIME, "Hold Time" }, { PIMV2_HELLO_OPTION_LANPRUNEDELAY, "LAN Prune Delay" }, { PIMV2_HELLO_OPTION_DR_PRIORITY_OLD, "DR Priority (Old)" }, { PIMV2_HELLO_OPTION_DR_PRIORITY, "DR Priority" }, { PIMV2_HELLO_OPTION_GENID, "Generation ID" }, { PIMV2_HELLO_OPTION_REFRESH_CAP, "State Refresh Capability" }, { PIMV2_HELLO_OPTION_BIDIR_CAP, "Bi-Directional Capability" }, { PIMV2_HELLO_OPTION_ADDRESS_LIST, "Address List" }, { PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD, "Address List (Old)" }, { 0, NULL} }; #define PIMV2_REGISTER_FLAG_LEN 4 #define PIMV2_REGISTER_FLAG_BORDER 0x80000000 #define PIMV2_REGISTER_FLAG_NULL 0x40000000 static const struct tok pimv2_register_flag_values[] = { { PIMV2_REGISTER_FLAG_BORDER, "Border" }, { PIMV2_REGISTER_FLAG_NULL, "Null" }, { 0, NULL} }; /* * XXX: We consider a case where IPv6 is not ready yet for portability, * but PIM dependent defintions should be independent of IPv6... */ struct pim { uint8_t pim_typever; /* upper 4bit: PIM version number; 2 for PIMv2 */ /* lower 4bit: the PIM message type, currently they are: * Hello, Register, Register-Stop, Join/Prune, * Bootstrap, Assert, Graft (PIM-DM only), * Graft-Ack (PIM-DM only), C-RP-Adv */ #define PIM_VER(x) (((x) & 0xf0) >> 4) #define PIM_TYPE(x) ((x) & 0x0f) u_char pim_rsv; /* Reserved */ u_short pim_cksum; /* IP style check sum */ }; static void pimv2_print(netdissect_options *, register const u_char *bp, register u_int len, const u_char *); static void pimv1_join_prune_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int ngroups, njoin, nprune; int njp; /* If it's a single group and a single source, use 1-line output. */ if (ND_TTEST2(bp[0], 30) && bp[11] == 1 && ((njoin = EXTRACT_16BITS(&bp[20])) + EXTRACT_16BITS(&bp[22])) == 1) { int hold; ND_PRINT((ndo, " RPF %s ", ipaddr_string(ndo, bp))); hold = EXTRACT_16BITS(&bp[6]); if (hold != 180) { ND_PRINT((ndo, "Hold ")); unsigned_relts_print(ndo, hold); } ND_PRINT((ndo, "%s (%s/%d, %s", njoin ? "Join" : "Prune", ipaddr_string(ndo, &bp[26]), bp[25] & 0x3f, ipaddr_string(ndo, &bp[12]))); if (EXTRACT_32BITS(&bp[16]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[16]))); ND_PRINT((ndo, ") %s%s %s", (bp[24] & 0x01) ? "Sparse" : "Dense", (bp[25] & 0x80) ? " WC" : "", (bp[25] & 0x40) ? "RP" : "SPT")); return; } ND_TCHECK2(bp[0], sizeof(struct in_addr)); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Upstream Nbr: %s", ipaddr_string(ndo, bp))); ND_TCHECK2(bp[6], 2); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Hold time: ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[6])); if (ndo->ndo_vflag < 2) return; bp += 8; len -= 8; ND_TCHECK2(bp[0], 4); ngroups = bp[3]; bp += 4; len -= 4; while (ngroups--) { /* * XXX - does the address have length "addrlen" and the * mask length "maddrlen"? */ ND_TCHECK2(bp[0], sizeof(struct in_addr)); ND_PRINT((ndo, "\n\tGroup: %s", ipaddr_string(ndo, bp))); ND_TCHECK2(bp[4], sizeof(struct in_addr)); if (EXTRACT_32BITS(&bp[4]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[4]))); ND_TCHECK2(bp[8], 4); njoin = EXTRACT_16BITS(&bp[8]); nprune = EXTRACT_16BITS(&bp[10]); ND_PRINT((ndo, " joined: %d pruned: %d", njoin, nprune)); bp += 12; len -= 12; for (njp = 0; njp < (njoin + nprune); njp++) { const char *type; if (njp < njoin) type = "Join "; else type = "Prune"; ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "\n\t%s %s%s%s%s/%d", type, (bp[0] & 0x01) ? "Sparse " : "Dense ", (bp[1] & 0x80) ? "WC " : "", (bp[1] & 0x40) ? "RP " : "SPT ", ipaddr_string(ndo, &bp[2]), bp[1] & 0x3f)); bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|pim]")); return; } void pimv1_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { register const u_char *ep; register u_char type; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; ND_TCHECK(bp[1]); type = bp[1]; ND_PRINT((ndo, " %s", tok2str(pimv1_type_str, "[type %u]", type))); switch (type) { case PIMV1_TYPE_QUERY: if (ND_TTEST(bp[8])) { switch (bp[8] >> 4) { case 0: ND_PRINT((ndo, " Dense-mode")); break; case 1: ND_PRINT((ndo, " Sparse-mode")); break; case 2: ND_PRINT((ndo, " Sparse-Dense-mode")); break; default: ND_PRINT((ndo, " mode-%d", bp[8] >> 4)); break; } } if (ndo->ndo_vflag) { ND_TCHECK2(bp[10],2); ND_PRINT((ndo, " (Hold-time ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[10])); ND_PRINT((ndo, ")")); } break; case PIMV1_TYPE_REGISTER: ND_TCHECK2(bp[8], 20); /* ip header */ ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[20]), ipaddr_string(ndo, &bp[24]))); break; case PIMV1_TYPE_REGISTER_STOP: ND_TCHECK2(bp[12], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[8]), ipaddr_string(ndo, &bp[12]))); break; case PIMV1_TYPE_RP_REACHABILITY: if (ndo->ndo_vflag) { ND_TCHECK2(bp[22], 2); ND_PRINT((ndo, " group %s", ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_PRINT((ndo, " RP %s hold ", ipaddr_string(ndo, &bp[16]))); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[22])); } break; case PIMV1_TYPE_ASSERT: ND_TCHECK2(bp[16], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[16]), ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_TCHECK2(bp[24], 4); ND_PRINT((ndo, " %s pref %d metric %d", (bp[20] & 0x80) ? "RP-tree" : "SPT", EXTRACT_32BITS(&bp[20]) & 0x7fffffff, EXTRACT_32BITS(&bp[24]))); break; case PIMV1_TYPE_JOIN_PRUNE: case PIMV1_TYPE_GRAFT: case PIMV1_TYPE_GRAFT_ACK: if (ndo->ndo_vflag) pimv1_join_prune_print(ndo, &bp[8], len - 8); break; } ND_TCHECK(bp[4]); if ((bp[4] >> 4) != 1) ND_PRINT((ndo, " [v%d]", bp[4] >> 4)); return; trunc: ND_PRINT((ndo, "[|pim]")); return; } /* * auto-RP is a cisco protocol, documented at * ftp://ftpeng.cisco.com/ipmulticast/specs/pim-autorp-spec01.txt * * This implements version 1+, dated Sept 9, 1998. */ void cisco_autorp_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int type; int numrps; int hold; ND_TCHECK(bp[0]); ND_PRINT((ndo, " auto-rp ")); type = bp[0]; switch (type) { case 0x11: ND_PRINT((ndo, "candidate-advert")); break; case 0x12: ND_PRINT((ndo, "mapping")); break; default: ND_PRINT((ndo, "type-0x%02x", type)); break; } ND_TCHECK(bp[1]); numrps = bp[1]; ND_TCHECK2(bp[2], 2); ND_PRINT((ndo, " Hold ")); hold = EXTRACT_16BITS(&bp[2]); if (hold) unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); else ND_PRINT((ndo, "FOREVER")); /* Next 4 bytes are reserved. */ bp += 8; len -= 8; /*XXX skip unless -v? */ /* * Rest of packet: * numrps entries of the form: * 32 bits: RP * 6 bits: reserved * 2 bits: PIM version supported, bit 0 is "supports v1", 1 is "v2". * 8 bits: # of entries for this RP * each entry: 7 bits: reserved, 1 bit: negative, * 8 bits: mask 32 bits: source * lather, rinse, repeat. */ while (numrps--) { int nentries; char s; ND_TCHECK2(bp[0], 4); ND_PRINT((ndo, " RP %s", ipaddr_string(ndo, bp))); ND_TCHECK(bp[4]); switch (bp[4] & 0x3) { case 0: ND_PRINT((ndo, " PIMv?")); break; case 1: ND_PRINT((ndo, " PIMv1")); break; case 2: ND_PRINT((ndo, " PIMv2")); break; case 3: ND_PRINT((ndo, " PIMv1+2")); break; } if (bp[4] & 0xfc) ND_PRINT((ndo, " [rsvd=0x%02x]", bp[4] & 0xfc)); ND_TCHECK(bp[5]); nentries = bp[5]; bp += 6; len -= 6; s = ' '; for (; nentries; nentries--) { ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "%c%s%s/%d", s, bp[0] & 1 ? "!" : "", ipaddr_string(ndo, &bp[2]), bp[1])); if (bp[0] & 0x02) { ND_PRINT((ndo, " bidir")); } if (bp[0] & 0xfc) { ND_PRINT((ndo, "[rsvd=0x%02x]", bp[0] & 0xfc)); } s = ','; bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|autorp]")); return; } void pim_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const u_char *ep; register const struct pim *pim = (const struct pim *)bp; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; #ifdef notyet /* currently we see only version and type */ ND_TCHECK(pim->pim_rsv); #endif switch (PIM_VER(pim->pim_typever)) { case 2: if (!ndo->ndo_vflag) { ND_PRINT((ndo, "PIMv%u, %s, length %u", PIM_VER(pim->pim_typever), tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)), len)); return; } else { ND_PRINT((ndo, "PIMv%u, length %u\n\t%s", PIM_VER(pim->pim_typever), len, tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)))); pimv2_print(ndo, bp, len, bp2); } break; default: ND_PRINT((ndo, "PIMv%u, length %u", PIM_VER(pim->pim_typever), len)); break; } return; } /* * PIMv2 uses encoded address representations. * * The last PIM-SM I-D before RFC2117 was published specified the * following representation for unicast addresses. However, RFC2117 * specified no encoding for unicast addresses with the unicast * address length specified in the header. Therefore, we have to * guess which encoding is being used (Cisco's PIMv2 implementation * uses the non-RFC encoding). RFC2117 turns a previously "Reserved" * field into a 'unicast-address-length-in-bytes' field. We guess * that it's the draft encoding if this reserved field is zero. * * RFC2362 goes back to the encoded format, and calls the addr length * field "reserved" again. * * The first byte is the address family, from: * * 0 Reserved * 1 IP (IP version 4) * 2 IP6 (IP version 6) * 3 NSAP * 4 HDLC (8-bit multidrop) * 5 BBN 1822 * 6 802 (includes all 802 media plus Ethernet "canonical format") * 7 E.163 * 8 E.164 (SMDS, Frame Relay, ATM) * 9 F.69 (Telex) * 10 X.121 (X.25, Frame Relay) * 11 IPX * 12 Appletalk * 13 Decnet IV * 14 Banyan Vines * 15 E.164 with NSAP format subaddress * * In addition, the second byte is an "Encoding". 0 is the default * encoding for the address family, and no other encodings are currently * specified. * */ static int pimv2_addr_len; enum pimv2_addrtype { pimv2_unicast, pimv2_group, pimv2_source }; /* 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Unicast Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+++++++ * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Reserved | Mask Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Group multicast Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Rsrvd |S|W|R| Mask Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Source Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ static int pimv2_addr_print(netdissect_options *ndo, const u_char *bp, enum pimv2_addrtype at, int silent) { int af; int len, hdrlen; ND_TCHECK(bp[0]); if (pimv2_addr_len == 0) { ND_TCHECK(bp[1]); switch (bp[0]) { case 1: af = AF_INET; len = sizeof(struct in_addr); break; case 2: af = AF_INET6; len = sizeof(struct in6_addr); break; default: return -1; } if (bp[1] != 0) return -1; hdrlen = 2; } else { switch (pimv2_addr_len) { case sizeof(struct in_addr): af = AF_INET; break; case sizeof(struct in6_addr): af = AF_INET6; break; default: return -1; break; } len = pimv2_addr_len; hdrlen = 0; } bp += hdrlen; switch (at) { case pimv2_unicast: ND_TCHECK2(bp[0], len); if (af == AF_INET) { if (!silent) ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp))); } else if (af == AF_INET6) { if (!silent) ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp))); } return hdrlen + len; case pimv2_group: case pimv2_source: ND_TCHECK2(bp[0], len + 2); if (af == AF_INET) { if (!silent) { ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp + 2))); if (bp[1] != 32) ND_PRINT((ndo, "/%u", bp[1])); } } else if (af == AF_INET6) { if (!silent) { ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp + 2))); if (bp[1] != 128) ND_PRINT((ndo, "/%u", bp[1])); } } if (bp[0] && !silent) { if (at == pimv2_group) { ND_PRINT((ndo, "(0x%02x)", bp[0])); } else { ND_PRINT((ndo, "(%s%s%s", bp[0] & 0x04 ? "S" : "", bp[0] & 0x02 ? "W" : "", bp[0] & 0x01 ? "R" : "")); if (bp[0] & 0xf8) { ND_PRINT((ndo, "+0x%02x", bp[0] & 0xf8)); } ND_PRINT((ndo, ")")); } } return hdrlen + 2 + len; default: return -1; } trunc: return -1; } enum checksum_status { CORRECT, INCORRECT, UNVERIFIED }; static enum checksum_status pimv2_check_checksum(netdissect_options *ndo, const u_char *bp, const u_char *bp2, u_int len) { const struct ip *ip; u_int cksum; if (!ND_TTEST2(bp[0], len)) { /* We don't have all the data. */ return (UNVERIFIED); } ip = (const struct ip *)bp2; if (IP_V(ip) == 4) { struct cksum_vec vec[1]; vec[0].ptr = bp; vec[0].len = len; cksum = in_cksum(vec, 1); return (cksum ? INCORRECT : CORRECT); } else if (IP_V(ip) == 6) { const struct ip6_hdr *ip6; ip6 = (const struct ip6_hdr *)bp2; cksum = nextproto6_cksum(ndo, ip6, bp, len, len, IPPROTO_PIM); return (cksum ? INCORRECT : CORRECT); } else { return (UNVERIFIED); } } static void pimv2_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const u_char *ep; register const struct pim *pim = (const struct pim *)bp; int advance; enum checksum_status cksum_status; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; if (ep > bp + len) ep = bp + len; ND_TCHECK(pim->pim_rsv); pimv2_addr_len = pim->pim_rsv; if (pimv2_addr_len != 0) ND_PRINT((ndo, ", RFC2117-encoding")); ND_PRINT((ndo, ", cksum 0x%04x ", EXTRACT_16BITS(&pim->pim_cksum))); if (EXTRACT_16BITS(&pim->pim_cksum) == 0) { ND_PRINT((ndo, "(unverified)")); } else { if (PIM_TYPE(pim->pim_typever) == PIMV2_TYPE_REGISTER) { /* * The checksum only covers the packet header, * not the encapsulated packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, 8); if (cksum_status == INCORRECT) { /* * To quote RFC 4601, "For interoperability * reasons, a message carrying a checksum * calculated over the entire PIM Register * message should also be accepted." */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } } else { /* * The checksum covers the entire packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } switch (cksum_status) { case CORRECT: ND_PRINT((ndo, "(correct)")); break; case INCORRECT: ND_PRINT((ndo, "(incorrect)")); break; case UNVERIFIED: ND_PRINT((ndo, "(unverified)")); break; } } switch (PIM_TYPE(pim->pim_typever)) { case PIMV2_TYPE_HELLO: { uint16_t otype, olen; bp += 4; while (bp < ep) { ND_TCHECK2(bp[0], 4); otype = EXTRACT_16BITS(&bp[0]); olen = EXTRACT_16BITS(&bp[2]); ND_TCHECK2(bp[0], 4 + olen); ND_PRINT((ndo, "\n\t %s Option (%u), length %u, Value: ", tok2str(pimv2_hello_option_values, "Unknown", otype), otype, olen)); bp += 4; switch (otype) { case PIMV2_HELLO_OPTION_HOLDTIME: if (olen != 2) { ND_PRINT((ndo, "ERROR: Option Length != 2 Bytes (%u)", olen)); } else { unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); } break; case PIMV2_HELLO_OPTION_LANPRUNEDELAY: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { char t_bit; uint16_t lan_delay, override_interval; lan_delay = EXTRACT_16BITS(bp); override_interval = EXTRACT_16BITS(bp+2); t_bit = (lan_delay & 0x8000)? 1 : 0; lan_delay &= ~0x8000; ND_PRINT((ndo, "\n\t T-bit=%d, LAN delay %dms, Override interval %dms", t_bit, lan_delay, override_interval)); } break; case PIMV2_HELLO_OPTION_DR_PRIORITY_OLD: case PIMV2_HELLO_OPTION_DR_PRIORITY: switch (olen) { case 0: ND_PRINT((ndo, "Bi-Directional Capability (Old)")); break; case 4: ND_PRINT((ndo, "%u", EXTRACT_32BITS(bp))); break; default: ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); break; } break; case PIMV2_HELLO_OPTION_GENID: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "0x%08x", EXTRACT_32BITS(bp))); } break; case PIMV2_HELLO_OPTION_REFRESH_CAP: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "v%d", *bp)); if (*(bp+1) != 0) { ND_PRINT((ndo, ", interval ")); unsigned_relts_print(ndo, *(bp+1)); } if (EXTRACT_16BITS(bp+2) != 0) { ND_PRINT((ndo, " ?0x%04x?", EXTRACT_16BITS(bp+2))); } } break; case PIMV2_HELLO_OPTION_BIDIR_CAP: break; case PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD: case PIMV2_HELLO_OPTION_ADDRESS_LIST: if (ndo->ndo_vflag > 1) { const u_char *ptr = bp; while (ptr < (bp+olen)) { ND_PRINT((ndo, "\n\t ")); advance = pimv2_addr_print(ndo, ptr, pimv2_unicast, 0); if (advance < 0) { ND_PRINT((ndo, "...")); break; } ptr += advance; } } break; default: if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, bp, "\n\t ", olen); break; } /* do we want to see an additionally hexdump ? */ if (ndo->ndo_vflag> 1) print_unknown_data(ndo, bp, "\n\t ", olen); bp += olen; } break; } case PIMV2_TYPE_REGISTER: { const struct ip *ip; ND_TCHECK2(*(bp + 4), PIMV2_REGISTER_FLAG_LEN); ND_PRINT((ndo, ", Flags [ %s ]\n\t", tok2str(pimv2_register_flag_values, "none", EXTRACT_32BITS(bp+4)))); bp += 8; len -= 8; /* encapsulated multicast packet */ ip = (const struct ip *)bp; switch (IP_V(ip)) { case 0: /* Null header */ ND_PRINT((ndo, "IP-Null-header %s > %s", ipaddr_string(ndo, &ip->ip_src), ipaddr_string(ndo, &ip->ip_dst))); break; case 4: /* IPv4 */ ip_print(ndo, bp, len); break; case 6: /* IPv6 */ ip6_print(ndo, bp, len); break; default: ND_PRINT((ndo, "IP ver %d", IP_V(ip))); break; } break; } case PIMV2_TYPE_REGISTER_STOP: bp += 4; len -= 4; if (bp >= ep) break; ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp >= ep) break; ND_PRINT((ndo, " source=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; break; case PIMV2_TYPE_JOIN_PRUNE: case PIMV2_TYPE_GRAFT: case PIMV2_TYPE_GRAFT_ACK: /* * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |PIM Ver| Type | Addr length | Checksum | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Unicast-Upstream Neighbor Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Reserved | Num groups | Holdtime | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Number of Joined Sources | Number of Pruned Sources | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ { uint8_t ngroup; uint16_t holdtime; uint16_t njoin; uint16_t nprune; int i, j; bp += 4; len -= 4; if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ if (bp >= ep) break; ND_PRINT((ndo, ", upstream-neighbor: ")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; } if (bp + 4 > ep) break; ngroup = bp[1]; holdtime = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, "\n\t %u group(s)", ngroup)); if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ ND_PRINT((ndo, ", holdtime: ")); if (holdtime == 0xffff) ND_PRINT((ndo, "infinite")); else unsigned_relts_print(ndo, holdtime); } bp += 4; len -= 4; for (i = 0; i < ngroup; i++) { if (bp >= ep) goto jp_done; ND_PRINT((ndo, "\n\t group #%u: ", i+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; if (bp + 4 > ep) { ND_PRINT((ndo, "...)")); goto jp_done; } njoin = EXTRACT_16BITS(&bp[0]); nprune = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, ", joined sources: %u, pruned sources: %u", njoin, nprune)); bp += 4; len -= 4; for (j = 0; j < njoin; j++) { ND_PRINT((ndo, "\n\t joined source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; } for (j = 0; j < nprune; j++) { ND_PRINT((ndo, "\n\t pruned source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; } } jp_done: break; } case PIMV2_TYPE_BOOTSTRAP: { int i, j, frpcnt; bp += 4; /* Fragment Tag, Hash Mask len, and BSR-priority */ if (bp + sizeof(uint16_t) >= ep) break; ND_PRINT((ndo, " tag=%x", EXTRACT_16BITS(bp))); bp += sizeof(uint16_t); if (bp >= ep) break; ND_PRINT((ndo, " hashmlen=%d", bp[0])); if (bp + 1 >= ep) break; ND_PRINT((ndo, " BSRprio=%d", bp[1])); bp += 2; /* Encoded-Unicast-BSR-Address */ if (bp >= ep) break; ND_PRINT((ndo, " BSR=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; for (i = 0; bp < ep; i++) { /* Encoded-Group Address */ ND_PRINT((ndo, " (group%d: ", i)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...)")); goto bs_done; } bp += advance; /* RP-Count, Frag RP-Cnt, and rsvd */ if (bp >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, " RPcnt=%d", bp[0])); if (bp + 1 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, " FRPcnt=%d", frpcnt = bp[1])); bp += 4; for (j = 0; j < frpcnt && bp < ep; j++) { /* each RP info */ ND_PRINT((ndo, " RP%d=", j)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...)")); goto bs_done; } bp += advance; if (bp + 1 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, ",holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); if (bp + 2 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, ",prio=%d", bp[2])); bp += 4; } ND_PRINT((ndo, ")")); } bs_done: break; } case PIMV2_TYPE_ASSERT: bp += 4; len -= 4; if (bp >= ep) break; ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp >= ep) break; ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp + 8 > ep) break; if (bp[0] & 0x80) ND_PRINT((ndo, " RPT")); ND_PRINT((ndo, " pref=%u", EXTRACT_32BITS(&bp[0]) & 0x7fffffff)); ND_PRINT((ndo, " metric=%u", EXTRACT_32BITS(&bp[4]))); break; case PIMV2_TYPE_CANDIDATE_RP: { int i, pfxcnt; bp += 4; /* Prefix-Cnt, Priority, and Holdtime */ if (bp >= ep) break; ND_PRINT((ndo, " prefix-cnt=%d", bp[0])); pfxcnt = bp[0]; if (bp + 1 >= ep) break; ND_PRINT((ndo, " prio=%d", bp[1])); if (bp + 3 >= ep) break; ND_PRINT((ndo, " holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); bp += 4; /* Encoded-Unicast-RP-Address */ if (bp >= ep) break; ND_PRINT((ndo, " RP=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; /* Encoded-Group Addresses */ for (i = 0; i < pfxcnt && bp < ep; i++) { ND_PRINT((ndo, " Group%d=", i)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; } break; } case PIMV2_TYPE_PRUNE_REFRESH: ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_PRINT((ndo, " grp=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_PRINT((ndo, " forwarder=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_TCHECK2(bp[0], 2); ND_PRINT((ndo, " TUNR ")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); break; default: ND_PRINT((ndo, " [type %d]", PIM_TYPE(pim->pim_typever))); break; } return; trunc: ND_PRINT((ndo, "[|pim]")); } /* * Local Variables: * c-style: whitesmith * c-basic-offset: 8 * End: */
/* * Copyright (c) 1995, 1996 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* \summary: Protocol Independent Multicast (PIM) printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include "netdissect.h" #include "addrtoname.h" #include "extract.h" #include "ip.h" #include "ip6.h" #include "ipproto.h" #define PIMV1_TYPE_QUERY 0 #define PIMV1_TYPE_REGISTER 1 #define PIMV1_TYPE_REGISTER_STOP 2 #define PIMV1_TYPE_JOIN_PRUNE 3 #define PIMV1_TYPE_RP_REACHABILITY 4 #define PIMV1_TYPE_ASSERT 5 #define PIMV1_TYPE_GRAFT 6 #define PIMV1_TYPE_GRAFT_ACK 7 static const struct tok pimv1_type_str[] = { { PIMV1_TYPE_QUERY, "Query" }, { PIMV1_TYPE_REGISTER, "Register" }, { PIMV1_TYPE_REGISTER_STOP, "Register-Stop" }, { PIMV1_TYPE_JOIN_PRUNE, "Join/Prune" }, { PIMV1_TYPE_RP_REACHABILITY, "RP-reachable" }, { PIMV1_TYPE_ASSERT, "Assert" }, { PIMV1_TYPE_GRAFT, "Graft" }, { PIMV1_TYPE_GRAFT_ACK, "Graft-ACK" }, { 0, NULL } }; #define PIMV2_TYPE_HELLO 0 #define PIMV2_TYPE_REGISTER 1 #define PIMV2_TYPE_REGISTER_STOP 2 #define PIMV2_TYPE_JOIN_PRUNE 3 #define PIMV2_TYPE_BOOTSTRAP 4 #define PIMV2_TYPE_ASSERT 5 #define PIMV2_TYPE_GRAFT 6 #define PIMV2_TYPE_GRAFT_ACK 7 #define PIMV2_TYPE_CANDIDATE_RP 8 #define PIMV2_TYPE_PRUNE_REFRESH 9 #define PIMV2_TYPE_DF_ELECTION 10 #define PIMV2_TYPE_ECMP_REDIRECT 11 static const struct tok pimv2_type_values[] = { { PIMV2_TYPE_HELLO, "Hello" }, { PIMV2_TYPE_REGISTER, "Register" }, { PIMV2_TYPE_REGISTER_STOP, "Register Stop" }, { PIMV2_TYPE_JOIN_PRUNE, "Join / Prune" }, { PIMV2_TYPE_BOOTSTRAP, "Bootstrap" }, { PIMV2_TYPE_ASSERT, "Assert" }, { PIMV2_TYPE_GRAFT, "Graft" }, { PIMV2_TYPE_GRAFT_ACK, "Graft Acknowledgement" }, { PIMV2_TYPE_CANDIDATE_RP, "Candidate RP Advertisement" }, { PIMV2_TYPE_PRUNE_REFRESH, "Prune Refresh" }, { PIMV2_TYPE_DF_ELECTION, "DF Election" }, { PIMV2_TYPE_ECMP_REDIRECT, "ECMP Redirect" }, { 0, NULL} }; #define PIMV2_HELLO_OPTION_HOLDTIME 1 #define PIMV2_HELLO_OPTION_LANPRUNEDELAY 2 #define PIMV2_HELLO_OPTION_DR_PRIORITY_OLD 18 #define PIMV2_HELLO_OPTION_DR_PRIORITY 19 #define PIMV2_HELLO_OPTION_GENID 20 #define PIMV2_HELLO_OPTION_REFRESH_CAP 21 #define PIMV2_HELLO_OPTION_BIDIR_CAP 22 #define PIMV2_HELLO_OPTION_ADDRESS_LIST 24 #define PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD 65001 static const struct tok pimv2_hello_option_values[] = { { PIMV2_HELLO_OPTION_HOLDTIME, "Hold Time" }, { PIMV2_HELLO_OPTION_LANPRUNEDELAY, "LAN Prune Delay" }, { PIMV2_HELLO_OPTION_DR_PRIORITY_OLD, "DR Priority (Old)" }, { PIMV2_HELLO_OPTION_DR_PRIORITY, "DR Priority" }, { PIMV2_HELLO_OPTION_GENID, "Generation ID" }, { PIMV2_HELLO_OPTION_REFRESH_CAP, "State Refresh Capability" }, { PIMV2_HELLO_OPTION_BIDIR_CAP, "Bi-Directional Capability" }, { PIMV2_HELLO_OPTION_ADDRESS_LIST, "Address List" }, { PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD, "Address List (Old)" }, { 0, NULL} }; #define PIMV2_REGISTER_FLAG_LEN 4 #define PIMV2_REGISTER_FLAG_BORDER 0x80000000 #define PIMV2_REGISTER_FLAG_NULL 0x40000000 static const struct tok pimv2_register_flag_values[] = { { PIMV2_REGISTER_FLAG_BORDER, "Border" }, { PIMV2_REGISTER_FLAG_NULL, "Null" }, { 0, NULL} }; /* * XXX: We consider a case where IPv6 is not ready yet for portability, * but PIM dependent defintions should be independent of IPv6... */ struct pim { uint8_t pim_typever; /* upper 4bit: PIM version number; 2 for PIMv2 */ /* lower 4bit: the PIM message type, currently they are: * Hello, Register, Register-Stop, Join/Prune, * Bootstrap, Assert, Graft (PIM-DM only), * Graft-Ack (PIM-DM only), C-RP-Adv */ #define PIM_VER(x) (((x) & 0xf0) >> 4) #define PIM_TYPE(x) ((x) & 0x0f) u_char pim_rsv; /* Reserved */ u_short pim_cksum; /* IP style check sum */ }; static void pimv2_print(netdissect_options *, register const u_char *bp, register u_int len, const u_char *); static void pimv1_join_prune_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int ngroups, njoin, nprune; int njp; /* If it's a single group and a single source, use 1-line output. */ if (ND_TTEST2(bp[0], 30) && bp[11] == 1 && ((njoin = EXTRACT_16BITS(&bp[20])) + EXTRACT_16BITS(&bp[22])) == 1) { int hold; ND_PRINT((ndo, " RPF %s ", ipaddr_string(ndo, bp))); hold = EXTRACT_16BITS(&bp[6]); if (hold != 180) { ND_PRINT((ndo, "Hold ")); unsigned_relts_print(ndo, hold); } ND_PRINT((ndo, "%s (%s/%d, %s", njoin ? "Join" : "Prune", ipaddr_string(ndo, &bp[26]), bp[25] & 0x3f, ipaddr_string(ndo, &bp[12]))); if (EXTRACT_32BITS(&bp[16]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[16]))); ND_PRINT((ndo, ") %s%s %s", (bp[24] & 0x01) ? "Sparse" : "Dense", (bp[25] & 0x80) ? " WC" : "", (bp[25] & 0x40) ? "RP" : "SPT")); return; } if (len < sizeof(struct in_addr)) goto trunc; ND_TCHECK2(bp[0], sizeof(struct in_addr)); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Upstream Nbr: %s", ipaddr_string(ndo, bp))); bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[2], 2); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Hold time: ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); if (ndo->ndo_vflag < 2) return; bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); ngroups = bp[3]; bp += 4; len -= 4; while (ngroups--) { /* * XXX - does the address have length "addrlen" and the * mask length "maddrlen"? */ if (len < 4) goto trunc; ND_TCHECK2(bp[0], sizeof(struct in_addr)); ND_PRINT((ndo, "\n\tGroup: %s", ipaddr_string(ndo, bp))); bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[0], sizeof(struct in_addr)); if (EXTRACT_32BITS(&bp[0]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[0]))); bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); njoin = EXTRACT_16BITS(&bp[0]); nprune = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, " joined: %d pruned: %d", njoin, nprune)); bp += 4; len -= 4; for (njp = 0; njp < (njoin + nprune); njp++) { const char *type; if (njp < njoin) type = "Join "; else type = "Prune"; if (len < 6) goto trunc; ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "\n\t%s %s%s%s%s/%d", type, (bp[0] & 0x01) ? "Sparse " : "Dense ", (bp[1] & 0x80) ? "WC " : "", (bp[1] & 0x40) ? "RP " : "SPT ", ipaddr_string(ndo, &bp[2]), bp[1] & 0x3f)); bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|pim]")); return; } void pimv1_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { register u_char type; ND_TCHECK(bp[1]); type = bp[1]; ND_PRINT((ndo, " %s", tok2str(pimv1_type_str, "[type %u]", type))); switch (type) { case PIMV1_TYPE_QUERY: if (ND_TTEST(bp[8])) { switch (bp[8] >> 4) { case 0: ND_PRINT((ndo, " Dense-mode")); break; case 1: ND_PRINT((ndo, " Sparse-mode")); break; case 2: ND_PRINT((ndo, " Sparse-Dense-mode")); break; default: ND_PRINT((ndo, " mode-%d", bp[8] >> 4)); break; } } if (ndo->ndo_vflag) { ND_TCHECK2(bp[10],2); ND_PRINT((ndo, " (Hold-time ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[10])); ND_PRINT((ndo, ")")); } break; case PIMV1_TYPE_REGISTER: ND_TCHECK2(bp[8], 20); /* ip header */ ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[20]), ipaddr_string(ndo, &bp[24]))); break; case PIMV1_TYPE_REGISTER_STOP: ND_TCHECK2(bp[12], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[8]), ipaddr_string(ndo, &bp[12]))); break; case PIMV1_TYPE_RP_REACHABILITY: if (ndo->ndo_vflag) { ND_TCHECK2(bp[22], 2); ND_PRINT((ndo, " group %s", ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_PRINT((ndo, " RP %s hold ", ipaddr_string(ndo, &bp[16]))); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[22])); } break; case PIMV1_TYPE_ASSERT: ND_TCHECK2(bp[16], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[16]), ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_TCHECK2(bp[24], 4); ND_PRINT((ndo, " %s pref %d metric %d", (bp[20] & 0x80) ? "RP-tree" : "SPT", EXTRACT_32BITS(&bp[20]) & 0x7fffffff, EXTRACT_32BITS(&bp[24]))); break; case PIMV1_TYPE_JOIN_PRUNE: case PIMV1_TYPE_GRAFT: case PIMV1_TYPE_GRAFT_ACK: if (ndo->ndo_vflag) { if (len < 8) goto trunc; pimv1_join_prune_print(ndo, &bp[8], len - 8); } break; } ND_TCHECK(bp[4]); if ((bp[4] >> 4) != 1) ND_PRINT((ndo, " [v%d]", bp[4] >> 4)); return; trunc: ND_PRINT((ndo, "[|pim]")); return; } /* * auto-RP is a cisco protocol, documented at * ftp://ftpeng.cisco.com/ipmulticast/specs/pim-autorp-spec01.txt * * This implements version 1+, dated Sept 9, 1998. */ void cisco_autorp_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int type; int numrps; int hold; if (len < 8) goto trunc; ND_TCHECK(bp[0]); ND_PRINT((ndo, " auto-rp ")); type = bp[0]; switch (type) { case 0x11: ND_PRINT((ndo, "candidate-advert")); break; case 0x12: ND_PRINT((ndo, "mapping")); break; default: ND_PRINT((ndo, "type-0x%02x", type)); break; } ND_TCHECK(bp[1]); numrps = bp[1]; ND_TCHECK2(bp[2], 2); ND_PRINT((ndo, " Hold ")); hold = EXTRACT_16BITS(&bp[2]); if (hold) unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); else ND_PRINT((ndo, "FOREVER")); /* Next 4 bytes are reserved. */ bp += 8; len -= 8; /*XXX skip unless -v? */ /* * Rest of packet: * numrps entries of the form: * 32 bits: RP * 6 bits: reserved * 2 bits: PIM version supported, bit 0 is "supports v1", 1 is "v2". * 8 bits: # of entries for this RP * each entry: 7 bits: reserved, 1 bit: negative, * 8 bits: mask 32 bits: source * lather, rinse, repeat. */ while (numrps--) { int nentries; char s; if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); ND_PRINT((ndo, " RP %s", ipaddr_string(ndo, bp))); bp += 4; len -= 4; if (len < 1) goto trunc; ND_TCHECK(bp[0]); switch (bp[0] & 0x3) { case 0: ND_PRINT((ndo, " PIMv?")); break; case 1: ND_PRINT((ndo, " PIMv1")); break; case 2: ND_PRINT((ndo, " PIMv2")); break; case 3: ND_PRINT((ndo, " PIMv1+2")); break; } if (bp[0] & 0xfc) ND_PRINT((ndo, " [rsvd=0x%02x]", bp[0] & 0xfc)); bp += 1; len -= 1; if (len < 1) goto trunc; ND_TCHECK(bp[0]); nentries = bp[0]; bp += 1; len -= 1; s = ' '; for (; nentries; nentries--) { if (len < 6) goto trunc; ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "%c%s%s/%d", s, bp[0] & 1 ? "!" : "", ipaddr_string(ndo, &bp[2]), bp[1])); if (bp[0] & 0x02) { ND_PRINT((ndo, " bidir")); } if (bp[0] & 0xfc) { ND_PRINT((ndo, "[rsvd=0x%02x]", bp[0] & 0xfc)); } s = ','; bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|autorp]")); return; } void pim_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const struct pim *pim = (const struct pim *)bp; #ifdef notyet /* currently we see only version and type */ ND_TCHECK(pim->pim_rsv); #endif ND_TCHECK(pim->pim_typever); switch (PIM_VER(pim->pim_typever)) { case 2: if (!ndo->ndo_vflag) { ND_PRINT((ndo, "PIMv%u, %s, length %u", PIM_VER(pim->pim_typever), tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)), len)); return; } else { ND_PRINT((ndo, "PIMv%u, length %u\n\t%s", PIM_VER(pim->pim_typever), len, tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)))); pimv2_print(ndo, bp, len, bp2); } break; default: ND_PRINT((ndo, "PIMv%u, length %u", PIM_VER(pim->pim_typever), len)); break; } return; trunc: ND_PRINT((ndo, "[|pim]")); return; } /* * PIMv2 uses encoded address representations. * * The last PIM-SM I-D before RFC2117 was published specified the * following representation for unicast addresses. However, RFC2117 * specified no encoding for unicast addresses with the unicast * address length specified in the header. Therefore, we have to * guess which encoding is being used (Cisco's PIMv2 implementation * uses the non-RFC encoding). RFC2117 turns a previously "Reserved" * field into a 'unicast-address-length-in-bytes' field. We guess * that it's the draft encoding if this reserved field is zero. * * RFC2362 goes back to the encoded format, and calls the addr length * field "reserved" again. * * The first byte is the address family, from: * * 0 Reserved * 1 IP (IP version 4) * 2 IP6 (IP version 6) * 3 NSAP * 4 HDLC (8-bit multidrop) * 5 BBN 1822 * 6 802 (includes all 802 media plus Ethernet "canonical format") * 7 E.163 * 8 E.164 (SMDS, Frame Relay, ATM) * 9 F.69 (Telex) * 10 X.121 (X.25, Frame Relay) * 11 IPX * 12 Appletalk * 13 Decnet IV * 14 Banyan Vines * 15 E.164 with NSAP format subaddress * * In addition, the second byte is an "Encoding". 0 is the default * encoding for the address family, and no other encodings are currently * specified. * */ enum pimv2_addrtype { pimv2_unicast, pimv2_group, pimv2_source }; /* 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Unicast Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+++++++ * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Reserved | Mask Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Group multicast Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Rsrvd |S|W|R| Mask Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Source Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ static int pimv2_addr_print(netdissect_options *ndo, const u_char *bp, u_int len, enum pimv2_addrtype at, u_int addr_len, int silent) { int af; int hdrlen; if (addr_len == 0) { if (len < 2) goto trunc; ND_TCHECK(bp[1]); switch (bp[0]) { case 1: af = AF_INET; addr_len = (u_int)sizeof(struct in_addr); break; case 2: af = AF_INET6; addr_len = (u_int)sizeof(struct in6_addr); break; default: return -1; } if (bp[1] != 0) return -1; hdrlen = 2; } else { switch (addr_len) { case sizeof(struct in_addr): af = AF_INET; break; case sizeof(struct in6_addr): af = AF_INET6; break; default: return -1; break; } hdrlen = 0; } bp += hdrlen; len -= hdrlen; switch (at) { case pimv2_unicast: if (len < addr_len) goto trunc; ND_TCHECK2(bp[0], addr_len); if (af == AF_INET) { if (!silent) ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp))); } else if (af == AF_INET6) { if (!silent) ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp))); } return hdrlen + addr_len; case pimv2_group: case pimv2_source: if (len < addr_len + 2) goto trunc; ND_TCHECK2(bp[0], addr_len + 2); if (af == AF_INET) { if (!silent) { ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp + 2))); if (bp[1] != 32) ND_PRINT((ndo, "/%u", bp[1])); } } else if (af == AF_INET6) { if (!silent) { ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp + 2))); if (bp[1] != 128) ND_PRINT((ndo, "/%u", bp[1])); } } if (bp[0] && !silent) { if (at == pimv2_group) { ND_PRINT((ndo, "(0x%02x)", bp[0])); } else { ND_PRINT((ndo, "(%s%s%s", bp[0] & 0x04 ? "S" : "", bp[0] & 0x02 ? "W" : "", bp[0] & 0x01 ? "R" : "")); if (bp[0] & 0xf8) { ND_PRINT((ndo, "+0x%02x", bp[0] & 0xf8)); } ND_PRINT((ndo, ")")); } } return hdrlen + 2 + addr_len; default: return -1; } trunc: return -1; } enum checksum_status { CORRECT, INCORRECT, UNVERIFIED }; static enum checksum_status pimv2_check_checksum(netdissect_options *ndo, const u_char *bp, const u_char *bp2, u_int len) { const struct ip *ip; u_int cksum; if (!ND_TTEST2(bp[0], len)) { /* We don't have all the data. */ return (UNVERIFIED); } ip = (const struct ip *)bp2; if (IP_V(ip) == 4) { struct cksum_vec vec[1]; vec[0].ptr = bp; vec[0].len = len; cksum = in_cksum(vec, 1); return (cksum ? INCORRECT : CORRECT); } else if (IP_V(ip) == 6) { const struct ip6_hdr *ip6; ip6 = (const struct ip6_hdr *)bp2; cksum = nextproto6_cksum(ndo, ip6, bp, len, len, IPPROTO_PIM); return (cksum ? INCORRECT : CORRECT); } else { return (UNVERIFIED); } } static void pimv2_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const u_char *ep; register const struct pim *pim = (const struct pim *)bp; int advance; enum checksum_status cksum_status; int pimv2_addr_len; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; if (ep > bp + len) ep = bp + len; if (len < 2) goto trunc; ND_TCHECK(pim->pim_rsv); pimv2_addr_len = pim->pim_rsv; if (pimv2_addr_len != 0) ND_PRINT((ndo, ", RFC2117-encoding")); if (len < 4) goto trunc; ND_TCHECK(pim->pim_cksum); ND_PRINT((ndo, ", cksum 0x%04x ", EXTRACT_16BITS(&pim->pim_cksum))); if (EXTRACT_16BITS(&pim->pim_cksum) == 0) { ND_PRINT((ndo, "(unverified)")); } else { if (PIM_TYPE(pim->pim_typever) == PIMV2_TYPE_REGISTER) { /* * The checksum only covers the packet header, * not the encapsulated packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, 8); if (cksum_status == INCORRECT) { /* * To quote RFC 4601, "For interoperability * reasons, a message carrying a checksum * calculated over the entire PIM Register * message should also be accepted." */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } } else { /* * The checksum covers the entire packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } switch (cksum_status) { case CORRECT: ND_PRINT((ndo, "(correct)")); break; case INCORRECT: ND_PRINT((ndo, "(incorrect)")); break; case UNVERIFIED: ND_PRINT((ndo, "(unverified)")); break; } } bp += 4; len -= 4; switch (PIM_TYPE(pim->pim_typever)) { case PIMV2_TYPE_HELLO: { uint16_t otype, olen; while (len > 0) { if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); otype = EXTRACT_16BITS(&bp[0]); olen = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, "\n\t %s Option (%u), length %u, Value: ", tok2str(pimv2_hello_option_values, "Unknown", otype), otype, olen)); bp += 4; len -= 4; if (len < olen) goto trunc; ND_TCHECK2(bp[0], olen); switch (otype) { case PIMV2_HELLO_OPTION_HOLDTIME: if (olen != 2) { ND_PRINT((ndo, "ERROR: Option Length != 2 Bytes (%u)", olen)); } else { unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); } break; case PIMV2_HELLO_OPTION_LANPRUNEDELAY: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { char t_bit; uint16_t lan_delay, override_interval; lan_delay = EXTRACT_16BITS(bp); override_interval = EXTRACT_16BITS(bp+2); t_bit = (lan_delay & 0x8000)? 1 : 0; lan_delay &= ~0x8000; ND_PRINT((ndo, "\n\t T-bit=%d, LAN delay %dms, Override interval %dms", t_bit, lan_delay, override_interval)); } break; case PIMV2_HELLO_OPTION_DR_PRIORITY_OLD: case PIMV2_HELLO_OPTION_DR_PRIORITY: switch (olen) { case 0: ND_PRINT((ndo, "Bi-Directional Capability (Old)")); break; case 4: ND_PRINT((ndo, "%u", EXTRACT_32BITS(bp))); break; default: ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); break; } break; case PIMV2_HELLO_OPTION_GENID: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "0x%08x", EXTRACT_32BITS(bp))); } break; case PIMV2_HELLO_OPTION_REFRESH_CAP: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "v%d", *bp)); if (*(bp+1) != 0) { ND_PRINT((ndo, ", interval ")); unsigned_relts_print(ndo, *(bp+1)); } if (EXTRACT_16BITS(bp+2) != 0) { ND_PRINT((ndo, " ?0x%04x?", EXTRACT_16BITS(bp+2))); } } break; case PIMV2_HELLO_OPTION_BIDIR_CAP: break; case PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD: case PIMV2_HELLO_OPTION_ADDRESS_LIST: if (ndo->ndo_vflag > 1) { const u_char *ptr = bp; u_int plen = len; while (ptr < (bp+olen)) { ND_PRINT((ndo, "\n\t ")); advance = pimv2_addr_print(ndo, ptr, plen, pimv2_unicast, pimv2_addr_len, 0); if (advance < 0) goto trunc; ptr += advance; plen -= advance; } } break; default: if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, bp, "\n\t ", olen); break; } /* do we want to see an additionally hexdump ? */ if (ndo->ndo_vflag> 1) print_unknown_data(ndo, bp, "\n\t ", olen); bp += olen; len -= olen; } break; } case PIMV2_TYPE_REGISTER: { const struct ip *ip; if (len < 4) goto trunc; ND_TCHECK2(*bp, PIMV2_REGISTER_FLAG_LEN); ND_PRINT((ndo, ", Flags [ %s ]\n\t", tok2str(pimv2_register_flag_values, "none", EXTRACT_32BITS(bp)))); bp += 4; len -= 4; /* encapsulated multicast packet */ if (len == 0) goto trunc; ip = (const struct ip *)bp; ND_TCHECK(ip->ip_vhl); switch (IP_V(ip)) { case 0: /* Null header */ ND_TCHECK(ip->ip_dst); ND_PRINT((ndo, "IP-Null-header %s > %s", ipaddr_string(ndo, &ip->ip_src), ipaddr_string(ndo, &ip->ip_dst))); break; case 4: /* IPv4 */ ip_print(ndo, bp, len); break; case 6: /* IPv6 */ ip6_print(ndo, bp, len); break; default: ND_PRINT((ndo, "IP ver %d", IP_V(ip))); break; } break; } case PIMV2_TYPE_REGISTER_STOP: ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; ND_PRINT((ndo, " source=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; break; case PIMV2_TYPE_JOIN_PRUNE: case PIMV2_TYPE_GRAFT: case PIMV2_TYPE_GRAFT_ACK: /* * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |PIM Ver| Type | Addr length | Checksum | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Unicast-Upstream Neighbor Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Reserved | Num groups | Holdtime | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Number of Joined Sources | Number of Pruned Sources | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ { uint8_t ngroup; uint16_t holdtime; uint16_t njoin; uint16_t nprune; int i, j; if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ ND_PRINT((ndo, ", upstream-neighbor: ")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; } if (len < 4) goto trunc; ND_TCHECK2(*bp, 4); ngroup = bp[1]; holdtime = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, "\n\t %u group(s)", ngroup)); if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ ND_PRINT((ndo, ", holdtime: ")); if (holdtime == 0xffff) ND_PRINT((ndo, "infinite")); else unsigned_relts_print(ndo, holdtime); } bp += 4; len -= 4; for (i = 0; i < ngroup; i++) { ND_PRINT((ndo, "\n\t group #%u: ", i+1)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; if (len < 4) goto trunc; ND_TCHECK2(*bp, 4); njoin = EXTRACT_16BITS(&bp[0]); nprune = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, ", joined sources: %u, pruned sources: %u", njoin, nprune)); bp += 4; len -= 4; for (j = 0; j < njoin; j++) { ND_PRINT((ndo, "\n\t joined source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_source, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; } for (j = 0; j < nprune; j++) { ND_PRINT((ndo, "\n\t pruned source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_source, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; } } break; } case PIMV2_TYPE_BOOTSTRAP: { int i, j, frpcnt; /* Fragment Tag, Hash Mask len, and BSR-priority */ if (len < 2) goto trunc; ND_TCHECK_16BITS(bp); ND_PRINT((ndo, " tag=%x", EXTRACT_16BITS(bp))); bp += 2; len -= 2; if (len < 1) goto trunc; ND_TCHECK(bp[0]); ND_PRINT((ndo, " hashmlen=%d", bp[0])); if (len < 2) goto trunc; ND_TCHECK(bp[2]); ND_PRINT((ndo, " BSRprio=%d", bp[1])); bp += 2; len -= 2; /* Encoded-Unicast-BSR-Address */ ND_PRINT((ndo, " BSR=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; for (i = 0; bp < ep; i++) { /* Encoded-Group Address */ ND_PRINT((ndo, " (group%d: ", i)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; /* RP-Count, Frag RP-Cnt, and rsvd */ if (len < 1) goto trunc; ND_TCHECK(bp[0]); ND_PRINT((ndo, " RPcnt=%d", bp[0])); if (len < 2) goto trunc; ND_TCHECK(bp[1]); ND_PRINT((ndo, " FRPcnt=%d", frpcnt = bp[1])); if (len < 4) goto trunc; bp += 4; len -= 4; for (j = 0; j < frpcnt && bp < ep; j++) { /* each RP info */ ND_PRINT((ndo, " RP%d=", j)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; if (len < 2) goto trunc; ND_TCHECK_16BITS(bp); ND_PRINT((ndo, ",holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); if (len < 3) goto trunc; ND_TCHECK(bp[2]); ND_PRINT((ndo, ",prio=%d", bp[2])); if (len < 4) goto trunc; bp += 4; len -= 4; } ND_PRINT((ndo, ")")); } break; } case PIMV2_TYPE_ASSERT: ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; if (len < 8) goto trunc; ND_TCHECK2(*bp, 8); if (bp[0] & 0x80) ND_PRINT((ndo, " RPT")); ND_PRINT((ndo, " pref=%u", EXTRACT_32BITS(&bp[0]) & 0x7fffffff)); ND_PRINT((ndo, " metric=%u", EXTRACT_32BITS(&bp[4]))); break; case PIMV2_TYPE_CANDIDATE_RP: { int i, pfxcnt; /* Prefix-Cnt, Priority, and Holdtime */ if (len < 1) goto trunc; ND_TCHECK(bp[0]); ND_PRINT((ndo, " prefix-cnt=%d", bp[0])); pfxcnt = bp[0]; if (len < 2) goto trunc; ND_TCHECK(bp[1]); ND_PRINT((ndo, " prio=%d", bp[1])); if (len < 4) goto trunc; ND_TCHECK_16BITS(&bp[2]); ND_PRINT((ndo, " holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); bp += 4; len -= 4; /* Encoded-Unicast-RP-Address */ ND_PRINT((ndo, " RP=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; /* Encoded-Group Addresses */ for (i = 0; i < pfxcnt && bp < ep; i++) { ND_PRINT((ndo, " Group%d=", i)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; } break; } case PIMV2_TYPE_PRUNE_REFRESH: ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; ND_PRINT((ndo, " grp=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; ND_PRINT((ndo, " forwarder=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; if (len < 2) goto trunc; ND_TCHECK_16BITS(bp); ND_PRINT((ndo, " TUNR ")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); break; default: ND_PRINT((ndo, " [type %d]", PIM_TYPE(pim->pim_typever))); break; } return; trunc: ND_PRINT((ndo, "[|pim]")); } /* * Local Variables: * c-style: whitesmith * c-basic-offset: 8 * End: */
pimv1_join_prune_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int ngroups, njoin, nprune; int njp; /* If it's a single group and a single source, use 1-line output. */ if (ND_TTEST2(bp[0], 30) && bp[11] == 1 && ((njoin = EXTRACT_16BITS(&bp[20])) + EXTRACT_16BITS(&bp[22])) == 1) { int hold; ND_PRINT((ndo, " RPF %s ", ipaddr_string(ndo, bp))); hold = EXTRACT_16BITS(&bp[6]); if (hold != 180) { ND_PRINT((ndo, "Hold ")); unsigned_relts_print(ndo, hold); } ND_PRINT((ndo, "%s (%s/%d, %s", njoin ? "Join" : "Prune", ipaddr_string(ndo, &bp[26]), bp[25] & 0x3f, ipaddr_string(ndo, &bp[12]))); if (EXTRACT_32BITS(&bp[16]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[16]))); ND_PRINT((ndo, ") %s%s %s", (bp[24] & 0x01) ? "Sparse" : "Dense", (bp[25] & 0x80) ? " WC" : "", (bp[25] & 0x40) ? "RP" : "SPT")); return; } ND_TCHECK2(bp[0], sizeof(struct in_addr)); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Upstream Nbr: %s", ipaddr_string(ndo, bp))); ND_TCHECK2(bp[6], 2); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Hold time: ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[6])); if (ndo->ndo_vflag < 2) return; bp += 8; len -= 8; ND_TCHECK2(bp[0], 4); ngroups = bp[3]; bp += 4; len -= 4; while (ngroups--) { /* * XXX - does the address have length "addrlen" and the * mask length "maddrlen"? */ ND_TCHECK2(bp[0], sizeof(struct in_addr)); ND_PRINT((ndo, "\n\tGroup: %s", ipaddr_string(ndo, bp))); ND_TCHECK2(bp[4], sizeof(struct in_addr)); if (EXTRACT_32BITS(&bp[4]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[4]))); ND_TCHECK2(bp[8], 4); njoin = EXTRACT_16BITS(&bp[8]); nprune = EXTRACT_16BITS(&bp[10]); ND_PRINT((ndo, " joined: %d pruned: %d", njoin, nprune)); bp += 12; len -= 12; for (njp = 0; njp < (njoin + nprune); njp++) { const char *type; if (njp < njoin) type = "Join "; else type = "Prune"; ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "\n\t%s %s%s%s%s/%d", type, (bp[0] & 0x01) ? "Sparse " : "Dense ", (bp[1] & 0x80) ? "WC " : "", (bp[1] & 0x40) ? "RP " : "SPT ", ipaddr_string(ndo, &bp[2]), bp[1] & 0x3f)); bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|pim]")); return; }
pimv1_join_prune_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int ngroups, njoin, nprune; int njp; /* If it's a single group and a single source, use 1-line output. */ if (ND_TTEST2(bp[0], 30) && bp[11] == 1 && ((njoin = EXTRACT_16BITS(&bp[20])) + EXTRACT_16BITS(&bp[22])) == 1) { int hold; ND_PRINT((ndo, " RPF %s ", ipaddr_string(ndo, bp))); hold = EXTRACT_16BITS(&bp[6]); if (hold != 180) { ND_PRINT((ndo, "Hold ")); unsigned_relts_print(ndo, hold); } ND_PRINT((ndo, "%s (%s/%d, %s", njoin ? "Join" : "Prune", ipaddr_string(ndo, &bp[26]), bp[25] & 0x3f, ipaddr_string(ndo, &bp[12]))); if (EXTRACT_32BITS(&bp[16]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[16]))); ND_PRINT((ndo, ") %s%s %s", (bp[24] & 0x01) ? "Sparse" : "Dense", (bp[25] & 0x80) ? " WC" : "", (bp[25] & 0x40) ? "RP" : "SPT")); return; } if (len < sizeof(struct in_addr)) goto trunc; ND_TCHECK2(bp[0], sizeof(struct in_addr)); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Upstream Nbr: %s", ipaddr_string(ndo, bp))); bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[2], 2); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Hold time: ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); if (ndo->ndo_vflag < 2) return; bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); ngroups = bp[3]; bp += 4; len -= 4; while (ngroups--) { /* * XXX - does the address have length "addrlen" and the * mask length "maddrlen"? */ if (len < 4) goto trunc; ND_TCHECK2(bp[0], sizeof(struct in_addr)); ND_PRINT((ndo, "\n\tGroup: %s", ipaddr_string(ndo, bp))); bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[0], sizeof(struct in_addr)); if (EXTRACT_32BITS(&bp[0]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[0]))); bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); njoin = EXTRACT_16BITS(&bp[0]); nprune = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, " joined: %d pruned: %d", njoin, nprune)); bp += 4; len -= 4; for (njp = 0; njp < (njoin + nprune); njp++) { const char *type; if (njp < njoin) type = "Join "; else type = "Prune"; if (len < 6) goto trunc; ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "\n\t%s %s%s%s%s/%d", type, (bp[0] & 0x01) ? "Sparse " : "Dense ", (bp[1] & 0x80) ? "WC " : "", (bp[1] & 0x40) ? "RP " : "SPT ", ipaddr_string(ndo, &bp[2]), bp[1] & 0x3f)); bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|pim]")); return; }
{'added': [(172, '\tif (len < sizeof(struct in_addr))'), (173, '\t\tgoto trunc;'), (178, '\tbp += 4;'), (179, '\tlen -= 4;'), (180, '\tif (len < 4)'), (181, '\t\tgoto trunc;'), (182, '\tND_TCHECK2(bp[2], 2);'), (186, '\tunsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2]));'), (189, '\tbp += 4;'), (190, '\tlen -= 4;'), (192, '\tif (len < 4)'), (193, '\t\tgoto trunc;'), (203, '\t\tif (len < 4)'), (204, '\t\t\tgoto trunc;'), (207, '\t\tbp += 4;'), (208, '\t\tlen -= 4;'), (209, '\t\tif (len < 4)'), (210, '\t\t\tgoto trunc;'), (211, '\t\tND_TCHECK2(bp[0], sizeof(struct in_addr));'), (212, '\t\tif (EXTRACT_32BITS(&bp[0]) != 0xffffffff)'), (213, '\t\t\tND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[0])));'), (214, '\t\tbp += 4;'), (215, '\t\tlen -= 4;'), (216, '\t\tif (len < 4)'), (217, '\t\t\tgoto trunc;'), (218, '\t\tND_TCHECK2(bp[0], 4);'), (219, '\t\tnjoin = EXTRACT_16BITS(&bp[0]);'), (220, '\t\tnprune = EXTRACT_16BITS(&bp[2]);'), (222, '\t\tbp += 4;'), (223, '\t\tlen -= 4;'), (231, '\t\t\tif (len < 6)'), (232, '\t\t\t\tgoto trunc;'), (238, '\t\t\t ipaddr_string(ndo, &bp[2]),'), (239, '\t\t\t bp[1] & 0x3f));'), (321, '\t\tif (ndo->ndo_vflag) {'), (322, '\t\t\tif (len < 8)'), (323, '\t\t\t\tgoto trunc;'), (325, '\t\t}'), (352, '\tif (len < 8)'), (353, '\t\tgoto trunc;'), (401, '\t\tif (len < 4)'), (402, '\t\t\tgoto trunc;'), (405, '\t\tbp += 4;'), (406, '\t\tlen -= 4;'), (407, '\t\tif (len < 1)'), (408, '\t\t\tgoto trunc;'), (409, '\t\tND_TCHECK(bp[0]);'), (410, '\t\tswitch (bp[0] & 0x3) {'), (420, '\t\tif (bp[0] & 0xfc)'), (421, '\t\t\tND_PRINT((ndo, " [rsvd=0x%02x]", bp[0] & 0xfc));'), (422, '\t\tbp += 1;'), (423, '\t\tlen -= 1;'), (424, '\t\tif (len < 1)'), (425, '\t\t\tgoto trunc;'), (426, '\t\tND_TCHECK(bp[0]);'), (427, '\t\tnentries = bp[0];'), (428, '\t\tbp += 1;'), (429, '\t\tlen -= 1;'), (432, '\t\t\tif (len < 6)'), (433, '\t\t\t\tgoto trunc;'), (464, '\tND_TCHECK(pim->pim_typever);'), (488, ''), (489, 'trunc:'), (490, '\tND_PRINT((ndo, "[|pim]"));'), (491, '\treturn;'), (560, ' const u_char *bp, u_int len, enum pimv2_addrtype at,'), (561, ' u_int addr_len, int silent)'), (564, '\tint hdrlen;'), (566, '\tif (addr_len == 0) {'), (567, '\t\tif (len < 2)'), (568, '\t\t\tgoto trunc;'), (573, '\t\t\taddr_len = (u_int)sizeof(struct in_addr);'), (577, '\t\t\taddr_len = (u_int)sizeof(struct in6_addr);'), (586, '\t\tswitch (addr_len) {'), (601, '\tlen -= hdrlen;'), (604, '\t\tif (len < addr_len)'), (605, '\t\t\tgoto trunc;'), (606, '\t\tND_TCHECK2(bp[0], addr_len);'), (615, '\t\treturn hdrlen + addr_len;'), (618, '\t\tif (len < addr_len + 2)'), (619, '\t\t\tgoto trunc;'), (620, '\t\tND_TCHECK2(bp[0], addr_len + 2);'), (649, '\t\treturn hdrlen + 2 + addr_len;'), (701, '\tint pimv2_addr_len;'), (708, '\tif (len < 2)'), (709, '\t\tgoto trunc;'), (715, '\tif (len < 4)'), (716, '\t\tgoto trunc;'), (717, '\tND_TCHECK(pim->pim_cksum);'), (758, '\tbp += 4;'), (759, '\tlen -= 4;'), (765, '\t\twhile (len > 0) {'), (766, '\t\t\tif (len < 4)'), (767, '\t\t\t\tgoto trunc;'), (776, '\t\t\tlen -= 4;'), (778, '\t\t\tif (len < olen)'), (779, '\t\t\t\tgoto trunc;'), (780, '\t\t\tND_TCHECK2(bp[0], olen);'), (850, '\t\t\t\t\tu_int plen = len;'), (853, '\t\t\t\t\t\tadvance = pimv2_addr_print(ndo, ptr, plen, pimv2_unicast, pimv2_addr_len, 0);'), (854, '\t\t\t\t\t\tif (advance < 0)'), (855, '\t\t\t\t\t\t\tgoto trunc;'), (857, '\t\t\t\t\t\tplen -= advance;'), (870, '\t\t\tlen -= olen;'), (879, '\t\tif (len < 4)'), (880, '\t\t\tgoto trunc;'), (881, '\t\tND_TCHECK2(*bp, PIMV2_REGISTER_FLAG_LEN);'), (886, '\t\t EXTRACT_32BITS(bp))));'), (888, '\t\tbp += 4; len -= 4;'), (890, '\t\tif (len == 0)'), (891, '\t\t\tgoto trunc;'), (893, '\t\tND_TCHECK(ip->ip_vhl);'), (896, '\t\t\tND_TCHECK(ip->ip_dst);'), (919, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (920, '\t\t\tgoto trunc;'), (923, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (924, '\t\t\tgoto trunc;'), (977, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (978, '\t\t\t\tgoto trunc;'), (981, '\t\tif (len < 4)'), (982, '\t\t\tgoto trunc;'), (983, '\t\tND_TCHECK2(*bp, 4);'), (997, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (998, '\t\t\t\tgoto trunc;'), (1000, '\t\t\tif (len < 4)'), (1001, '\t\t\t\tgoto trunc;'), (1002, '\t\t\tND_TCHECK2(*bp, 4);'), (1009, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_source, pimv2_addr_len, 0)) < 0)'), (1010, '\t\t\t\t\tgoto trunc;'), (1015, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_source, pimv2_addr_len, 0)) < 0)'), (1016, '\t\t\t\t\tgoto trunc;'), (1028, '\t\tif (len < 2)'), (1029, '\t\t\tgoto trunc;'), (1030, '\t\tND_TCHECK_16BITS(bp);'), (1032, '\t\tbp += 2;'), (1033, '\t\tlen -= 2;'), (1034, '\t\tif (len < 1)'), (1035, '\t\t\tgoto trunc;'), (1036, '\t\tND_TCHECK(bp[0]);'), (1038, '\t\tif (len < 2)'), (1039, '\t\t\tgoto trunc;'), (1040, '\t\tND_TCHECK(bp[2]);'), (1043, '\t\tlen -= 2;'), (1047, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1048, '\t\t\tgoto trunc;'), (1050, '\t\tlen -= advance;'), (1055, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (1056, '\t\t\t\tgoto trunc;'), (1058, '\t\t\tlen -= advance;'), (1061, '\t\t\tif (len < 1)'), (1062, '\t\t\t\tgoto trunc;'), (1063, '\t\t\tND_TCHECK(bp[0]);'), (1065, '\t\t\tif (len < 2)'), (1066, '\t\t\t\tgoto trunc;'), (1067, '\t\t\tND_TCHECK(bp[1]);'), (1069, '\t\t\tif (len < 4)'), (1070, '\t\t\t\tgoto trunc;'), (1072, '\t\t\tlen -= 4;'), (1077, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len,'), (1079, '\t\t\t\t\t\t\t\tpimv2_addr_len,'), (1080, '\t\t\t\t\t\t\t\t0)) < 0)'), (1081, '\t\t\t\t\tgoto trunc;'), (1083, '\t\t\t\tlen -= advance;'), (1085, '\t\t\t\tif (len < 2)'), (1086, '\t\t\t\t\tgoto trunc;'), (1087, '\t\t\t\tND_TCHECK_16BITS(bp);'), (1090, '\t\t\t\tif (len < 3)'), (1091, '\t\t\t\t\tgoto trunc;'), (1092, '\t\t\t\tND_TCHECK(bp[2]);'), (1094, '\t\t\t\tif (len < 4)'), (1095, '\t\t\t\t\tgoto trunc;'), (1097, '\t\t\t\tlen -= 4;'), (1105, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (1106, '\t\t\tgoto trunc;'), (1109, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1110, '\t\t\tgoto trunc;'), (1112, '\t\tif (len < 8)'), (1113, '\t\t\tgoto trunc;'), (1114, '\t\tND_TCHECK2(*bp, 8);'), (1126, '\t\tif (len < 1)'), (1127, '\t\t\tgoto trunc;'), (1128, '\t\tND_TCHECK(bp[0]);'), (1131, '\t\tif (len < 2)'), (1132, '\t\t\tgoto trunc;'), (1133, '\t\tND_TCHECK(bp[1]);'), (1135, '\t\tif (len < 4)'), (1136, '\t\t\tgoto trunc;'), (1137, '\t\tND_TCHECK_16BITS(&bp[2]);'), (1141, '\t\tlen -= 4;'), (1145, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1146, '\t\t\tgoto trunc;'), (1148, '\t\tlen -= advance;'), (1153, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (1154, '\t\t\t\tgoto trunc;'), (1156, '\t\t\tlen -= advance;'), (1163, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1164, '\t\t\tgoto trunc;'), (1166, '\t\tlen -= advance;'), (1168, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (1169, '\t\t\tgoto trunc;'), (1171, '\t\tlen -= advance;'), (1173, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1174, '\t\t\tgoto trunc;'), (1176, '\t\tlen -= advance;'), (1177, '\t\tif (len < 2)'), (1178, '\t\t\tgoto trunc;'), (1179, '\t\tND_TCHECK_16BITS(bp);')], 'deleted': [(176, '\tND_TCHECK2(bp[6], 2);'), (180, '\tunsigned_relts_print(ndo, EXTRACT_16BITS(&bp[6]));'), (183, '\tbp += 8;'), (184, '\tlen -= 8;'), (197, '\t\tND_TCHECK2(bp[4], sizeof(struct in_addr));'), (198, '\t\tif (EXTRACT_32BITS(&bp[4]) != 0xffffffff)'), (199, '\t\t\tND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[4])));'), (200, '\t\tND_TCHECK2(bp[8], 4);'), (201, '\t\tnjoin = EXTRACT_16BITS(&bp[8]);'), (202, '\t\tnprune = EXTRACT_16BITS(&bp[10]);'), (204, '\t\tbp += 12;'), (205, '\t\tlen -= 12;'), (218, '\t\t\tipaddr_string(ndo, &bp[2]), bp[1] & 0x3f));'), (233, '\tregister const u_char *ep;'), (236, '\tep = (const u_char *)ndo->ndo_snapend;'), (237, '\tif (bp >= ep)'), (238, '\t\treturn;'), (239, ''), (305, '\t\tif (ndo->ndo_vflag)'), (382, '\t\tND_TCHECK(bp[4]);'), (383, '\t\tswitch (bp[4] & 0x3) {'), (393, '\t\tif (bp[4] & 0xfc)'), (394, '\t\t\tND_PRINT((ndo, " [rsvd=0x%02x]", bp[4] & 0xfc));'), (395, '\t\tND_TCHECK(bp[5]);'), (396, '\t\tnentries = bp[5];'), (397, '\t\tbp += 6; len -= 6;'), (424, '\tregister const u_char *ep;'), (427, '\tep = (const u_char *)ndo->ndo_snapend;'), (428, '\tif (bp >= ep)'), (429, '\t\treturn;'), (499, 'static int pimv2_addr_len;'), (500, ''), (527, ' const u_char *bp, enum pimv2_addrtype at, int silent)'), (530, '\tint len, hdrlen;'), (532, '\tND_TCHECK(bp[0]);'), (533, ''), (534, '\tif (pimv2_addr_len == 0) {'), (539, '\t\t\tlen = sizeof(struct in_addr);'), (543, '\t\t\tlen = sizeof(struct in6_addr);'), (552, '\t\tswitch (pimv2_addr_len) {'), (563, '\t\tlen = pimv2_addr_len;'), (570, '\t\tND_TCHECK2(bp[0], len);'), (579, '\t\treturn hdrlen + len;'), (582, '\t\tND_TCHECK2(bp[0], len + 2);'), (611, '\t\treturn hdrlen + 2 + len;'), (719, '\t\tbp += 4;'), (720, '\t\twhile (bp < ep) {'), (724, '\t\t\tND_TCHECK2(bp[0], 4 + olen);'), (802, '\t\t\t\t\t\tadvance = pimv2_addr_print(ndo, ptr, pimv2_unicast, 0);'), (803, '\t\t\t\t\t\tif (advance < 0) {'), (804, '\t\t\t\t\t\t\tND_PRINT((ndo, "..."));'), (805, '\t\t\t\t\t\t\tbreak;'), (806, '\t\t\t\t\t\t}'), (828, '\t\tND_TCHECK2(*(bp + 4), PIMV2_REGISTER_FLAG_LEN);'), (833, '\t\t EXTRACT_32BITS(bp+4))));'), (835, '\t\tbp += 8; len -= 8;'), (861, '\t\tbp += 4; len -= 4;'), (862, '\t\tif (bp >= ep)'), (863, '\t\t\tbreak;'), (865, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) {'), (866, '\t\t\tND_PRINT((ndo, "..."));'), (867, '\t\t\tbreak;'), (868, '\t\t}'), (870, '\t\tif (bp >= ep)'), (871, '\t\t\tbreak;'), (873, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (874, '\t\t\tND_PRINT((ndo, "..."));'), (875, '\t\t\tbreak;'), (876, '\t\t}'), (927, '\t\tbp += 4; len -= 4;'), (929, '\t\t\tif (bp >= ep)'), (930, '\t\t\t\tbreak;'), (932, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (933, '\t\t\t\tND_PRINT((ndo, "..."));'), (934, '\t\t\t\tbreak;'), (935, '\t\t\t}'), (938, '\t\tif (bp + 4 > ep)'), (939, '\t\t\tbreak;'), (952, '\t\t\tif (bp >= ep)'), (953, '\t\t\t\tgoto jp_done;'), (955, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) {'), (956, '\t\t\t\tND_PRINT((ndo, "...)"));'), (957, '\t\t\t\tgoto jp_done;'), (958, '\t\t\t}'), (960, '\t\t\tif (bp + 4 > ep) {'), (961, '\t\t\t\tND_PRINT((ndo, "...)"));'), (962, '\t\t\t\tgoto jp_done;'), (963, '\t\t\t}'), (970, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) {'), (971, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (972, '\t\t\t\t\tgoto jp_done;'), (973, '\t\t\t\t}'), (978, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) {'), (979, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (980, '\t\t\t\t\tgoto jp_done;'), (981, '\t\t\t\t}'), (985, '\tjp_done:'), (992, '\t\tbp += 4;'), (995, '\t\tif (bp + sizeof(uint16_t) >= ep) break;'), (997, '\t\tbp += sizeof(uint16_t);'), (998, '\t\tif (bp >= ep) break;'), (1000, '\t\tif (bp + 1 >= ep) break;'), (1005, '\t\tif (bp >= ep) break;'), (1007, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1008, '\t\t\tND_PRINT((ndo, "..."));'), (1009, '\t\t\tbreak;'), (1010, '\t\t}'), (1016, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0))'), (1017, '\t\t\t < 0) {'), (1018, '\t\t\t\tND_PRINT((ndo, "...)"));'), (1019, '\t\t\t\tgoto bs_done;'), (1020, '\t\t\t}'), (1024, '\t\t\tif (bp >= ep) {'), (1025, '\t\t\t\tND_PRINT((ndo, "...)"));'), (1026, '\t\t\t\tgoto bs_done;'), (1027, '\t\t\t}'), (1029, '\t\t\tif (bp + 1 >= ep) {'), (1030, '\t\t\t\tND_PRINT((ndo, "...)"));'), (1031, '\t\t\t\tgoto bs_done;'), (1032, '\t\t\t}'), (1039, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp,'), (1041, '\t\t\t\t\t\t\t\t0)) < 0) {'), (1042, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (1043, '\t\t\t\t\tgoto bs_done;'), (1044, '\t\t\t\t}'), (1047, '\t\t\t\tif (bp + 1 >= ep) {'), (1048, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (1049, '\t\t\t\t\tgoto bs_done;'), (1050, '\t\t\t\t}'), (1053, '\t\t\t\tif (bp + 2 >= ep) {'), (1054, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (1055, '\t\t\t\t\tgoto bs_done;'), (1056, '\t\t\t\t}'), (1062, '\t bs_done:'), (1066, '\t\tbp += 4; len -= 4;'), (1067, '\t\tif (bp >= ep)'), (1068, '\t\t\tbreak;'), (1070, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) {'), (1071, '\t\t\tND_PRINT((ndo, "..."));'), (1072, '\t\t\tbreak;'), (1073, '\t\t}'), (1075, '\t\tif (bp >= ep)'), (1076, '\t\t\tbreak;'), (1078, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1079, '\t\t\tND_PRINT((ndo, "..."));'), (1080, '\t\t\tbreak;'), (1081, '\t\t}'), (1083, '\t\tif (bp + 8 > ep)'), (1084, '\t\t\tbreak;'), (1094, '\t\tbp += 4;'), (1097, '\t\tif (bp >= ep) break;'), (1100, '\t\tif (bp + 1 >= ep) break;'), (1102, '\t\tif (bp + 3 >= ep) break;'), (1108, '\t\tif (bp >= ep) break;'), (1110, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1111, '\t\t\tND_PRINT((ndo, "..."));'), (1112, '\t\t\tbreak;'), (1113, '\t\t}'), (1119, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0))'), (1120, '\t\t\t < 0) {'), (1121, '\t\t\t\tND_PRINT((ndo, "..."));'), (1122, '\t\t\t\tbreak;'), (1123, '\t\t\t}'), (1131, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1132, '\t\t\tND_PRINT((ndo, "..."));'), (1133, '\t\t\tbreak;'), (1134, '\t\t}'), (1137, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) {'), (1138, '\t\t\tND_PRINT((ndo, "..."));'), (1139, '\t\t\tbreak;'), (1140, '\t\t}'), (1143, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1144, '\t\t\tND_PRINT((ndo, "..."));'), (1145, '\t\t\tbreak;'), (1146, '\t\t}'), (1148, '\t\tND_TCHECK2(bp[0], 2);')]}
207
176
890
5,856
https://github.com/the-tcpdump-group/tcpdump
CVE-2017-13030
['CWE-125']
print-pim.c
pimv1_print
/* * Copyright (c) 1995, 1996 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* \summary: Protocol Independent Multicast (PIM) printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include "netdissect.h" #include "addrtoname.h" #include "extract.h" #include "ip.h" #include "ip6.h" #include "ipproto.h" #define PIMV1_TYPE_QUERY 0 #define PIMV1_TYPE_REGISTER 1 #define PIMV1_TYPE_REGISTER_STOP 2 #define PIMV1_TYPE_JOIN_PRUNE 3 #define PIMV1_TYPE_RP_REACHABILITY 4 #define PIMV1_TYPE_ASSERT 5 #define PIMV1_TYPE_GRAFT 6 #define PIMV1_TYPE_GRAFT_ACK 7 static const struct tok pimv1_type_str[] = { { PIMV1_TYPE_QUERY, "Query" }, { PIMV1_TYPE_REGISTER, "Register" }, { PIMV1_TYPE_REGISTER_STOP, "Register-Stop" }, { PIMV1_TYPE_JOIN_PRUNE, "Join/Prune" }, { PIMV1_TYPE_RP_REACHABILITY, "RP-reachable" }, { PIMV1_TYPE_ASSERT, "Assert" }, { PIMV1_TYPE_GRAFT, "Graft" }, { PIMV1_TYPE_GRAFT_ACK, "Graft-ACK" }, { 0, NULL } }; #define PIMV2_TYPE_HELLO 0 #define PIMV2_TYPE_REGISTER 1 #define PIMV2_TYPE_REGISTER_STOP 2 #define PIMV2_TYPE_JOIN_PRUNE 3 #define PIMV2_TYPE_BOOTSTRAP 4 #define PIMV2_TYPE_ASSERT 5 #define PIMV2_TYPE_GRAFT 6 #define PIMV2_TYPE_GRAFT_ACK 7 #define PIMV2_TYPE_CANDIDATE_RP 8 #define PIMV2_TYPE_PRUNE_REFRESH 9 #define PIMV2_TYPE_DF_ELECTION 10 #define PIMV2_TYPE_ECMP_REDIRECT 11 static const struct tok pimv2_type_values[] = { { PIMV2_TYPE_HELLO, "Hello" }, { PIMV2_TYPE_REGISTER, "Register" }, { PIMV2_TYPE_REGISTER_STOP, "Register Stop" }, { PIMV2_TYPE_JOIN_PRUNE, "Join / Prune" }, { PIMV2_TYPE_BOOTSTRAP, "Bootstrap" }, { PIMV2_TYPE_ASSERT, "Assert" }, { PIMV2_TYPE_GRAFT, "Graft" }, { PIMV2_TYPE_GRAFT_ACK, "Graft Acknowledgement" }, { PIMV2_TYPE_CANDIDATE_RP, "Candidate RP Advertisement" }, { PIMV2_TYPE_PRUNE_REFRESH, "Prune Refresh" }, { PIMV2_TYPE_DF_ELECTION, "DF Election" }, { PIMV2_TYPE_ECMP_REDIRECT, "ECMP Redirect" }, { 0, NULL} }; #define PIMV2_HELLO_OPTION_HOLDTIME 1 #define PIMV2_HELLO_OPTION_LANPRUNEDELAY 2 #define PIMV2_HELLO_OPTION_DR_PRIORITY_OLD 18 #define PIMV2_HELLO_OPTION_DR_PRIORITY 19 #define PIMV2_HELLO_OPTION_GENID 20 #define PIMV2_HELLO_OPTION_REFRESH_CAP 21 #define PIMV2_HELLO_OPTION_BIDIR_CAP 22 #define PIMV2_HELLO_OPTION_ADDRESS_LIST 24 #define PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD 65001 static const struct tok pimv2_hello_option_values[] = { { PIMV2_HELLO_OPTION_HOLDTIME, "Hold Time" }, { PIMV2_HELLO_OPTION_LANPRUNEDELAY, "LAN Prune Delay" }, { PIMV2_HELLO_OPTION_DR_PRIORITY_OLD, "DR Priority (Old)" }, { PIMV2_HELLO_OPTION_DR_PRIORITY, "DR Priority" }, { PIMV2_HELLO_OPTION_GENID, "Generation ID" }, { PIMV2_HELLO_OPTION_REFRESH_CAP, "State Refresh Capability" }, { PIMV2_HELLO_OPTION_BIDIR_CAP, "Bi-Directional Capability" }, { PIMV2_HELLO_OPTION_ADDRESS_LIST, "Address List" }, { PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD, "Address List (Old)" }, { 0, NULL} }; #define PIMV2_REGISTER_FLAG_LEN 4 #define PIMV2_REGISTER_FLAG_BORDER 0x80000000 #define PIMV2_REGISTER_FLAG_NULL 0x40000000 static const struct tok pimv2_register_flag_values[] = { { PIMV2_REGISTER_FLAG_BORDER, "Border" }, { PIMV2_REGISTER_FLAG_NULL, "Null" }, { 0, NULL} }; /* * XXX: We consider a case where IPv6 is not ready yet for portability, * but PIM dependent defintions should be independent of IPv6... */ struct pim { uint8_t pim_typever; /* upper 4bit: PIM version number; 2 for PIMv2 */ /* lower 4bit: the PIM message type, currently they are: * Hello, Register, Register-Stop, Join/Prune, * Bootstrap, Assert, Graft (PIM-DM only), * Graft-Ack (PIM-DM only), C-RP-Adv */ #define PIM_VER(x) (((x) & 0xf0) >> 4) #define PIM_TYPE(x) ((x) & 0x0f) u_char pim_rsv; /* Reserved */ u_short pim_cksum; /* IP style check sum */ }; static void pimv2_print(netdissect_options *, register const u_char *bp, register u_int len, const u_char *); static void pimv1_join_prune_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int ngroups, njoin, nprune; int njp; /* If it's a single group and a single source, use 1-line output. */ if (ND_TTEST2(bp[0], 30) && bp[11] == 1 && ((njoin = EXTRACT_16BITS(&bp[20])) + EXTRACT_16BITS(&bp[22])) == 1) { int hold; ND_PRINT((ndo, " RPF %s ", ipaddr_string(ndo, bp))); hold = EXTRACT_16BITS(&bp[6]); if (hold != 180) { ND_PRINT((ndo, "Hold ")); unsigned_relts_print(ndo, hold); } ND_PRINT((ndo, "%s (%s/%d, %s", njoin ? "Join" : "Prune", ipaddr_string(ndo, &bp[26]), bp[25] & 0x3f, ipaddr_string(ndo, &bp[12]))); if (EXTRACT_32BITS(&bp[16]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[16]))); ND_PRINT((ndo, ") %s%s %s", (bp[24] & 0x01) ? "Sparse" : "Dense", (bp[25] & 0x80) ? " WC" : "", (bp[25] & 0x40) ? "RP" : "SPT")); return; } ND_TCHECK2(bp[0], sizeof(struct in_addr)); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Upstream Nbr: %s", ipaddr_string(ndo, bp))); ND_TCHECK2(bp[6], 2); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Hold time: ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[6])); if (ndo->ndo_vflag < 2) return; bp += 8; len -= 8; ND_TCHECK2(bp[0], 4); ngroups = bp[3]; bp += 4; len -= 4; while (ngroups--) { /* * XXX - does the address have length "addrlen" and the * mask length "maddrlen"? */ ND_TCHECK2(bp[0], sizeof(struct in_addr)); ND_PRINT((ndo, "\n\tGroup: %s", ipaddr_string(ndo, bp))); ND_TCHECK2(bp[4], sizeof(struct in_addr)); if (EXTRACT_32BITS(&bp[4]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[4]))); ND_TCHECK2(bp[8], 4); njoin = EXTRACT_16BITS(&bp[8]); nprune = EXTRACT_16BITS(&bp[10]); ND_PRINT((ndo, " joined: %d pruned: %d", njoin, nprune)); bp += 12; len -= 12; for (njp = 0; njp < (njoin + nprune); njp++) { const char *type; if (njp < njoin) type = "Join "; else type = "Prune"; ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "\n\t%s %s%s%s%s/%d", type, (bp[0] & 0x01) ? "Sparse " : "Dense ", (bp[1] & 0x80) ? "WC " : "", (bp[1] & 0x40) ? "RP " : "SPT ", ipaddr_string(ndo, &bp[2]), bp[1] & 0x3f)); bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|pim]")); return; } void pimv1_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { register const u_char *ep; register u_char type; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; ND_TCHECK(bp[1]); type = bp[1]; ND_PRINT((ndo, " %s", tok2str(pimv1_type_str, "[type %u]", type))); switch (type) { case PIMV1_TYPE_QUERY: if (ND_TTEST(bp[8])) { switch (bp[8] >> 4) { case 0: ND_PRINT((ndo, " Dense-mode")); break; case 1: ND_PRINT((ndo, " Sparse-mode")); break; case 2: ND_PRINT((ndo, " Sparse-Dense-mode")); break; default: ND_PRINT((ndo, " mode-%d", bp[8] >> 4)); break; } } if (ndo->ndo_vflag) { ND_TCHECK2(bp[10],2); ND_PRINT((ndo, " (Hold-time ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[10])); ND_PRINT((ndo, ")")); } break; case PIMV1_TYPE_REGISTER: ND_TCHECK2(bp[8], 20); /* ip header */ ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[20]), ipaddr_string(ndo, &bp[24]))); break; case PIMV1_TYPE_REGISTER_STOP: ND_TCHECK2(bp[12], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[8]), ipaddr_string(ndo, &bp[12]))); break; case PIMV1_TYPE_RP_REACHABILITY: if (ndo->ndo_vflag) { ND_TCHECK2(bp[22], 2); ND_PRINT((ndo, " group %s", ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_PRINT((ndo, " RP %s hold ", ipaddr_string(ndo, &bp[16]))); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[22])); } break; case PIMV1_TYPE_ASSERT: ND_TCHECK2(bp[16], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[16]), ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_TCHECK2(bp[24], 4); ND_PRINT((ndo, " %s pref %d metric %d", (bp[20] & 0x80) ? "RP-tree" : "SPT", EXTRACT_32BITS(&bp[20]) & 0x7fffffff, EXTRACT_32BITS(&bp[24]))); break; case PIMV1_TYPE_JOIN_PRUNE: case PIMV1_TYPE_GRAFT: case PIMV1_TYPE_GRAFT_ACK: if (ndo->ndo_vflag) pimv1_join_prune_print(ndo, &bp[8], len - 8); break; } ND_TCHECK(bp[4]); if ((bp[4] >> 4) != 1) ND_PRINT((ndo, " [v%d]", bp[4] >> 4)); return; trunc: ND_PRINT((ndo, "[|pim]")); return; } /* * auto-RP is a cisco protocol, documented at * ftp://ftpeng.cisco.com/ipmulticast/specs/pim-autorp-spec01.txt * * This implements version 1+, dated Sept 9, 1998. */ void cisco_autorp_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int type; int numrps; int hold; ND_TCHECK(bp[0]); ND_PRINT((ndo, " auto-rp ")); type = bp[0]; switch (type) { case 0x11: ND_PRINT((ndo, "candidate-advert")); break; case 0x12: ND_PRINT((ndo, "mapping")); break; default: ND_PRINT((ndo, "type-0x%02x", type)); break; } ND_TCHECK(bp[1]); numrps = bp[1]; ND_TCHECK2(bp[2], 2); ND_PRINT((ndo, " Hold ")); hold = EXTRACT_16BITS(&bp[2]); if (hold) unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); else ND_PRINT((ndo, "FOREVER")); /* Next 4 bytes are reserved. */ bp += 8; len -= 8; /*XXX skip unless -v? */ /* * Rest of packet: * numrps entries of the form: * 32 bits: RP * 6 bits: reserved * 2 bits: PIM version supported, bit 0 is "supports v1", 1 is "v2". * 8 bits: # of entries for this RP * each entry: 7 bits: reserved, 1 bit: negative, * 8 bits: mask 32 bits: source * lather, rinse, repeat. */ while (numrps--) { int nentries; char s; ND_TCHECK2(bp[0], 4); ND_PRINT((ndo, " RP %s", ipaddr_string(ndo, bp))); ND_TCHECK(bp[4]); switch (bp[4] & 0x3) { case 0: ND_PRINT((ndo, " PIMv?")); break; case 1: ND_PRINT((ndo, " PIMv1")); break; case 2: ND_PRINT((ndo, " PIMv2")); break; case 3: ND_PRINT((ndo, " PIMv1+2")); break; } if (bp[4] & 0xfc) ND_PRINT((ndo, " [rsvd=0x%02x]", bp[4] & 0xfc)); ND_TCHECK(bp[5]); nentries = bp[5]; bp += 6; len -= 6; s = ' '; for (; nentries; nentries--) { ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "%c%s%s/%d", s, bp[0] & 1 ? "!" : "", ipaddr_string(ndo, &bp[2]), bp[1])); if (bp[0] & 0x02) { ND_PRINT((ndo, " bidir")); } if (bp[0] & 0xfc) { ND_PRINT((ndo, "[rsvd=0x%02x]", bp[0] & 0xfc)); } s = ','; bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|autorp]")); return; } void pim_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const u_char *ep; register const struct pim *pim = (const struct pim *)bp; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; #ifdef notyet /* currently we see only version and type */ ND_TCHECK(pim->pim_rsv); #endif switch (PIM_VER(pim->pim_typever)) { case 2: if (!ndo->ndo_vflag) { ND_PRINT((ndo, "PIMv%u, %s, length %u", PIM_VER(pim->pim_typever), tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)), len)); return; } else { ND_PRINT((ndo, "PIMv%u, length %u\n\t%s", PIM_VER(pim->pim_typever), len, tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)))); pimv2_print(ndo, bp, len, bp2); } break; default: ND_PRINT((ndo, "PIMv%u, length %u", PIM_VER(pim->pim_typever), len)); break; } return; } /* * PIMv2 uses encoded address representations. * * The last PIM-SM I-D before RFC2117 was published specified the * following representation for unicast addresses. However, RFC2117 * specified no encoding for unicast addresses with the unicast * address length specified in the header. Therefore, we have to * guess which encoding is being used (Cisco's PIMv2 implementation * uses the non-RFC encoding). RFC2117 turns a previously "Reserved" * field into a 'unicast-address-length-in-bytes' field. We guess * that it's the draft encoding if this reserved field is zero. * * RFC2362 goes back to the encoded format, and calls the addr length * field "reserved" again. * * The first byte is the address family, from: * * 0 Reserved * 1 IP (IP version 4) * 2 IP6 (IP version 6) * 3 NSAP * 4 HDLC (8-bit multidrop) * 5 BBN 1822 * 6 802 (includes all 802 media plus Ethernet "canonical format") * 7 E.163 * 8 E.164 (SMDS, Frame Relay, ATM) * 9 F.69 (Telex) * 10 X.121 (X.25, Frame Relay) * 11 IPX * 12 Appletalk * 13 Decnet IV * 14 Banyan Vines * 15 E.164 with NSAP format subaddress * * In addition, the second byte is an "Encoding". 0 is the default * encoding for the address family, and no other encodings are currently * specified. * */ static int pimv2_addr_len; enum pimv2_addrtype { pimv2_unicast, pimv2_group, pimv2_source }; /* 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Unicast Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+++++++ * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Reserved | Mask Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Group multicast Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Rsrvd |S|W|R| Mask Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Source Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ static int pimv2_addr_print(netdissect_options *ndo, const u_char *bp, enum pimv2_addrtype at, int silent) { int af; int len, hdrlen; ND_TCHECK(bp[0]); if (pimv2_addr_len == 0) { ND_TCHECK(bp[1]); switch (bp[0]) { case 1: af = AF_INET; len = sizeof(struct in_addr); break; case 2: af = AF_INET6; len = sizeof(struct in6_addr); break; default: return -1; } if (bp[1] != 0) return -1; hdrlen = 2; } else { switch (pimv2_addr_len) { case sizeof(struct in_addr): af = AF_INET; break; case sizeof(struct in6_addr): af = AF_INET6; break; default: return -1; break; } len = pimv2_addr_len; hdrlen = 0; } bp += hdrlen; switch (at) { case pimv2_unicast: ND_TCHECK2(bp[0], len); if (af == AF_INET) { if (!silent) ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp))); } else if (af == AF_INET6) { if (!silent) ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp))); } return hdrlen + len; case pimv2_group: case pimv2_source: ND_TCHECK2(bp[0], len + 2); if (af == AF_INET) { if (!silent) { ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp + 2))); if (bp[1] != 32) ND_PRINT((ndo, "/%u", bp[1])); } } else if (af == AF_INET6) { if (!silent) { ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp + 2))); if (bp[1] != 128) ND_PRINT((ndo, "/%u", bp[1])); } } if (bp[0] && !silent) { if (at == pimv2_group) { ND_PRINT((ndo, "(0x%02x)", bp[0])); } else { ND_PRINT((ndo, "(%s%s%s", bp[0] & 0x04 ? "S" : "", bp[0] & 0x02 ? "W" : "", bp[0] & 0x01 ? "R" : "")); if (bp[0] & 0xf8) { ND_PRINT((ndo, "+0x%02x", bp[0] & 0xf8)); } ND_PRINT((ndo, ")")); } } return hdrlen + 2 + len; default: return -1; } trunc: return -1; } enum checksum_status { CORRECT, INCORRECT, UNVERIFIED }; static enum checksum_status pimv2_check_checksum(netdissect_options *ndo, const u_char *bp, const u_char *bp2, u_int len) { const struct ip *ip; u_int cksum; if (!ND_TTEST2(bp[0], len)) { /* We don't have all the data. */ return (UNVERIFIED); } ip = (const struct ip *)bp2; if (IP_V(ip) == 4) { struct cksum_vec vec[1]; vec[0].ptr = bp; vec[0].len = len; cksum = in_cksum(vec, 1); return (cksum ? INCORRECT : CORRECT); } else if (IP_V(ip) == 6) { const struct ip6_hdr *ip6; ip6 = (const struct ip6_hdr *)bp2; cksum = nextproto6_cksum(ndo, ip6, bp, len, len, IPPROTO_PIM); return (cksum ? INCORRECT : CORRECT); } else { return (UNVERIFIED); } } static void pimv2_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const u_char *ep; register const struct pim *pim = (const struct pim *)bp; int advance; enum checksum_status cksum_status; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; if (ep > bp + len) ep = bp + len; ND_TCHECK(pim->pim_rsv); pimv2_addr_len = pim->pim_rsv; if (pimv2_addr_len != 0) ND_PRINT((ndo, ", RFC2117-encoding")); ND_PRINT((ndo, ", cksum 0x%04x ", EXTRACT_16BITS(&pim->pim_cksum))); if (EXTRACT_16BITS(&pim->pim_cksum) == 0) { ND_PRINT((ndo, "(unverified)")); } else { if (PIM_TYPE(pim->pim_typever) == PIMV2_TYPE_REGISTER) { /* * The checksum only covers the packet header, * not the encapsulated packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, 8); if (cksum_status == INCORRECT) { /* * To quote RFC 4601, "For interoperability * reasons, a message carrying a checksum * calculated over the entire PIM Register * message should also be accepted." */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } } else { /* * The checksum covers the entire packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } switch (cksum_status) { case CORRECT: ND_PRINT((ndo, "(correct)")); break; case INCORRECT: ND_PRINT((ndo, "(incorrect)")); break; case UNVERIFIED: ND_PRINT((ndo, "(unverified)")); break; } } switch (PIM_TYPE(pim->pim_typever)) { case PIMV2_TYPE_HELLO: { uint16_t otype, olen; bp += 4; while (bp < ep) { ND_TCHECK2(bp[0], 4); otype = EXTRACT_16BITS(&bp[0]); olen = EXTRACT_16BITS(&bp[2]); ND_TCHECK2(bp[0], 4 + olen); ND_PRINT((ndo, "\n\t %s Option (%u), length %u, Value: ", tok2str(pimv2_hello_option_values, "Unknown", otype), otype, olen)); bp += 4; switch (otype) { case PIMV2_HELLO_OPTION_HOLDTIME: if (olen != 2) { ND_PRINT((ndo, "ERROR: Option Length != 2 Bytes (%u)", olen)); } else { unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); } break; case PIMV2_HELLO_OPTION_LANPRUNEDELAY: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { char t_bit; uint16_t lan_delay, override_interval; lan_delay = EXTRACT_16BITS(bp); override_interval = EXTRACT_16BITS(bp+2); t_bit = (lan_delay & 0x8000)? 1 : 0; lan_delay &= ~0x8000; ND_PRINT((ndo, "\n\t T-bit=%d, LAN delay %dms, Override interval %dms", t_bit, lan_delay, override_interval)); } break; case PIMV2_HELLO_OPTION_DR_PRIORITY_OLD: case PIMV2_HELLO_OPTION_DR_PRIORITY: switch (olen) { case 0: ND_PRINT((ndo, "Bi-Directional Capability (Old)")); break; case 4: ND_PRINT((ndo, "%u", EXTRACT_32BITS(bp))); break; default: ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); break; } break; case PIMV2_HELLO_OPTION_GENID: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "0x%08x", EXTRACT_32BITS(bp))); } break; case PIMV2_HELLO_OPTION_REFRESH_CAP: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "v%d", *bp)); if (*(bp+1) != 0) { ND_PRINT((ndo, ", interval ")); unsigned_relts_print(ndo, *(bp+1)); } if (EXTRACT_16BITS(bp+2) != 0) { ND_PRINT((ndo, " ?0x%04x?", EXTRACT_16BITS(bp+2))); } } break; case PIMV2_HELLO_OPTION_BIDIR_CAP: break; case PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD: case PIMV2_HELLO_OPTION_ADDRESS_LIST: if (ndo->ndo_vflag > 1) { const u_char *ptr = bp; while (ptr < (bp+olen)) { ND_PRINT((ndo, "\n\t ")); advance = pimv2_addr_print(ndo, ptr, pimv2_unicast, 0); if (advance < 0) { ND_PRINT((ndo, "...")); break; } ptr += advance; } } break; default: if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, bp, "\n\t ", olen); break; } /* do we want to see an additionally hexdump ? */ if (ndo->ndo_vflag> 1) print_unknown_data(ndo, bp, "\n\t ", olen); bp += olen; } break; } case PIMV2_TYPE_REGISTER: { const struct ip *ip; ND_TCHECK2(*(bp + 4), PIMV2_REGISTER_FLAG_LEN); ND_PRINT((ndo, ", Flags [ %s ]\n\t", tok2str(pimv2_register_flag_values, "none", EXTRACT_32BITS(bp+4)))); bp += 8; len -= 8; /* encapsulated multicast packet */ ip = (const struct ip *)bp; switch (IP_V(ip)) { case 0: /* Null header */ ND_PRINT((ndo, "IP-Null-header %s > %s", ipaddr_string(ndo, &ip->ip_src), ipaddr_string(ndo, &ip->ip_dst))); break; case 4: /* IPv4 */ ip_print(ndo, bp, len); break; case 6: /* IPv6 */ ip6_print(ndo, bp, len); break; default: ND_PRINT((ndo, "IP ver %d", IP_V(ip))); break; } break; } case PIMV2_TYPE_REGISTER_STOP: bp += 4; len -= 4; if (bp >= ep) break; ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp >= ep) break; ND_PRINT((ndo, " source=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; break; case PIMV2_TYPE_JOIN_PRUNE: case PIMV2_TYPE_GRAFT: case PIMV2_TYPE_GRAFT_ACK: /* * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |PIM Ver| Type | Addr length | Checksum | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Unicast-Upstream Neighbor Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Reserved | Num groups | Holdtime | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Number of Joined Sources | Number of Pruned Sources | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ { uint8_t ngroup; uint16_t holdtime; uint16_t njoin; uint16_t nprune; int i, j; bp += 4; len -= 4; if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ if (bp >= ep) break; ND_PRINT((ndo, ", upstream-neighbor: ")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; } if (bp + 4 > ep) break; ngroup = bp[1]; holdtime = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, "\n\t %u group(s)", ngroup)); if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ ND_PRINT((ndo, ", holdtime: ")); if (holdtime == 0xffff) ND_PRINT((ndo, "infinite")); else unsigned_relts_print(ndo, holdtime); } bp += 4; len -= 4; for (i = 0; i < ngroup; i++) { if (bp >= ep) goto jp_done; ND_PRINT((ndo, "\n\t group #%u: ", i+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; if (bp + 4 > ep) { ND_PRINT((ndo, "...)")); goto jp_done; } njoin = EXTRACT_16BITS(&bp[0]); nprune = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, ", joined sources: %u, pruned sources: %u", njoin, nprune)); bp += 4; len -= 4; for (j = 0; j < njoin; j++) { ND_PRINT((ndo, "\n\t joined source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; } for (j = 0; j < nprune; j++) { ND_PRINT((ndo, "\n\t pruned source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; } } jp_done: break; } case PIMV2_TYPE_BOOTSTRAP: { int i, j, frpcnt; bp += 4; /* Fragment Tag, Hash Mask len, and BSR-priority */ if (bp + sizeof(uint16_t) >= ep) break; ND_PRINT((ndo, " tag=%x", EXTRACT_16BITS(bp))); bp += sizeof(uint16_t); if (bp >= ep) break; ND_PRINT((ndo, " hashmlen=%d", bp[0])); if (bp + 1 >= ep) break; ND_PRINT((ndo, " BSRprio=%d", bp[1])); bp += 2; /* Encoded-Unicast-BSR-Address */ if (bp >= ep) break; ND_PRINT((ndo, " BSR=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; for (i = 0; bp < ep; i++) { /* Encoded-Group Address */ ND_PRINT((ndo, " (group%d: ", i)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...)")); goto bs_done; } bp += advance; /* RP-Count, Frag RP-Cnt, and rsvd */ if (bp >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, " RPcnt=%d", bp[0])); if (bp + 1 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, " FRPcnt=%d", frpcnt = bp[1])); bp += 4; for (j = 0; j < frpcnt && bp < ep; j++) { /* each RP info */ ND_PRINT((ndo, " RP%d=", j)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...)")); goto bs_done; } bp += advance; if (bp + 1 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, ",holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); if (bp + 2 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, ",prio=%d", bp[2])); bp += 4; } ND_PRINT((ndo, ")")); } bs_done: break; } case PIMV2_TYPE_ASSERT: bp += 4; len -= 4; if (bp >= ep) break; ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp >= ep) break; ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp + 8 > ep) break; if (bp[0] & 0x80) ND_PRINT((ndo, " RPT")); ND_PRINT((ndo, " pref=%u", EXTRACT_32BITS(&bp[0]) & 0x7fffffff)); ND_PRINT((ndo, " metric=%u", EXTRACT_32BITS(&bp[4]))); break; case PIMV2_TYPE_CANDIDATE_RP: { int i, pfxcnt; bp += 4; /* Prefix-Cnt, Priority, and Holdtime */ if (bp >= ep) break; ND_PRINT((ndo, " prefix-cnt=%d", bp[0])); pfxcnt = bp[0]; if (bp + 1 >= ep) break; ND_PRINT((ndo, " prio=%d", bp[1])); if (bp + 3 >= ep) break; ND_PRINT((ndo, " holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); bp += 4; /* Encoded-Unicast-RP-Address */ if (bp >= ep) break; ND_PRINT((ndo, " RP=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; /* Encoded-Group Addresses */ for (i = 0; i < pfxcnt && bp < ep; i++) { ND_PRINT((ndo, " Group%d=", i)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; } break; } case PIMV2_TYPE_PRUNE_REFRESH: ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_PRINT((ndo, " grp=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_PRINT((ndo, " forwarder=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_TCHECK2(bp[0], 2); ND_PRINT((ndo, " TUNR ")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); break; default: ND_PRINT((ndo, " [type %d]", PIM_TYPE(pim->pim_typever))); break; } return; trunc: ND_PRINT((ndo, "[|pim]")); } /* * Local Variables: * c-style: whitesmith * c-basic-offset: 8 * End: */
/* * Copyright (c) 1995, 1996 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* \summary: Protocol Independent Multicast (PIM) printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include "netdissect.h" #include "addrtoname.h" #include "extract.h" #include "ip.h" #include "ip6.h" #include "ipproto.h" #define PIMV1_TYPE_QUERY 0 #define PIMV1_TYPE_REGISTER 1 #define PIMV1_TYPE_REGISTER_STOP 2 #define PIMV1_TYPE_JOIN_PRUNE 3 #define PIMV1_TYPE_RP_REACHABILITY 4 #define PIMV1_TYPE_ASSERT 5 #define PIMV1_TYPE_GRAFT 6 #define PIMV1_TYPE_GRAFT_ACK 7 static const struct tok pimv1_type_str[] = { { PIMV1_TYPE_QUERY, "Query" }, { PIMV1_TYPE_REGISTER, "Register" }, { PIMV1_TYPE_REGISTER_STOP, "Register-Stop" }, { PIMV1_TYPE_JOIN_PRUNE, "Join/Prune" }, { PIMV1_TYPE_RP_REACHABILITY, "RP-reachable" }, { PIMV1_TYPE_ASSERT, "Assert" }, { PIMV1_TYPE_GRAFT, "Graft" }, { PIMV1_TYPE_GRAFT_ACK, "Graft-ACK" }, { 0, NULL } }; #define PIMV2_TYPE_HELLO 0 #define PIMV2_TYPE_REGISTER 1 #define PIMV2_TYPE_REGISTER_STOP 2 #define PIMV2_TYPE_JOIN_PRUNE 3 #define PIMV2_TYPE_BOOTSTRAP 4 #define PIMV2_TYPE_ASSERT 5 #define PIMV2_TYPE_GRAFT 6 #define PIMV2_TYPE_GRAFT_ACK 7 #define PIMV2_TYPE_CANDIDATE_RP 8 #define PIMV2_TYPE_PRUNE_REFRESH 9 #define PIMV2_TYPE_DF_ELECTION 10 #define PIMV2_TYPE_ECMP_REDIRECT 11 static const struct tok pimv2_type_values[] = { { PIMV2_TYPE_HELLO, "Hello" }, { PIMV2_TYPE_REGISTER, "Register" }, { PIMV2_TYPE_REGISTER_STOP, "Register Stop" }, { PIMV2_TYPE_JOIN_PRUNE, "Join / Prune" }, { PIMV2_TYPE_BOOTSTRAP, "Bootstrap" }, { PIMV2_TYPE_ASSERT, "Assert" }, { PIMV2_TYPE_GRAFT, "Graft" }, { PIMV2_TYPE_GRAFT_ACK, "Graft Acknowledgement" }, { PIMV2_TYPE_CANDIDATE_RP, "Candidate RP Advertisement" }, { PIMV2_TYPE_PRUNE_REFRESH, "Prune Refresh" }, { PIMV2_TYPE_DF_ELECTION, "DF Election" }, { PIMV2_TYPE_ECMP_REDIRECT, "ECMP Redirect" }, { 0, NULL} }; #define PIMV2_HELLO_OPTION_HOLDTIME 1 #define PIMV2_HELLO_OPTION_LANPRUNEDELAY 2 #define PIMV2_HELLO_OPTION_DR_PRIORITY_OLD 18 #define PIMV2_HELLO_OPTION_DR_PRIORITY 19 #define PIMV2_HELLO_OPTION_GENID 20 #define PIMV2_HELLO_OPTION_REFRESH_CAP 21 #define PIMV2_HELLO_OPTION_BIDIR_CAP 22 #define PIMV2_HELLO_OPTION_ADDRESS_LIST 24 #define PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD 65001 static const struct tok pimv2_hello_option_values[] = { { PIMV2_HELLO_OPTION_HOLDTIME, "Hold Time" }, { PIMV2_HELLO_OPTION_LANPRUNEDELAY, "LAN Prune Delay" }, { PIMV2_HELLO_OPTION_DR_PRIORITY_OLD, "DR Priority (Old)" }, { PIMV2_HELLO_OPTION_DR_PRIORITY, "DR Priority" }, { PIMV2_HELLO_OPTION_GENID, "Generation ID" }, { PIMV2_HELLO_OPTION_REFRESH_CAP, "State Refresh Capability" }, { PIMV2_HELLO_OPTION_BIDIR_CAP, "Bi-Directional Capability" }, { PIMV2_HELLO_OPTION_ADDRESS_LIST, "Address List" }, { PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD, "Address List (Old)" }, { 0, NULL} }; #define PIMV2_REGISTER_FLAG_LEN 4 #define PIMV2_REGISTER_FLAG_BORDER 0x80000000 #define PIMV2_REGISTER_FLAG_NULL 0x40000000 static const struct tok pimv2_register_flag_values[] = { { PIMV2_REGISTER_FLAG_BORDER, "Border" }, { PIMV2_REGISTER_FLAG_NULL, "Null" }, { 0, NULL} }; /* * XXX: We consider a case where IPv6 is not ready yet for portability, * but PIM dependent defintions should be independent of IPv6... */ struct pim { uint8_t pim_typever; /* upper 4bit: PIM version number; 2 for PIMv2 */ /* lower 4bit: the PIM message type, currently they are: * Hello, Register, Register-Stop, Join/Prune, * Bootstrap, Assert, Graft (PIM-DM only), * Graft-Ack (PIM-DM only), C-RP-Adv */ #define PIM_VER(x) (((x) & 0xf0) >> 4) #define PIM_TYPE(x) ((x) & 0x0f) u_char pim_rsv; /* Reserved */ u_short pim_cksum; /* IP style check sum */ }; static void pimv2_print(netdissect_options *, register const u_char *bp, register u_int len, const u_char *); static void pimv1_join_prune_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int ngroups, njoin, nprune; int njp; /* If it's a single group and a single source, use 1-line output. */ if (ND_TTEST2(bp[0], 30) && bp[11] == 1 && ((njoin = EXTRACT_16BITS(&bp[20])) + EXTRACT_16BITS(&bp[22])) == 1) { int hold; ND_PRINT((ndo, " RPF %s ", ipaddr_string(ndo, bp))); hold = EXTRACT_16BITS(&bp[6]); if (hold != 180) { ND_PRINT((ndo, "Hold ")); unsigned_relts_print(ndo, hold); } ND_PRINT((ndo, "%s (%s/%d, %s", njoin ? "Join" : "Prune", ipaddr_string(ndo, &bp[26]), bp[25] & 0x3f, ipaddr_string(ndo, &bp[12]))); if (EXTRACT_32BITS(&bp[16]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[16]))); ND_PRINT((ndo, ") %s%s %s", (bp[24] & 0x01) ? "Sparse" : "Dense", (bp[25] & 0x80) ? " WC" : "", (bp[25] & 0x40) ? "RP" : "SPT")); return; } if (len < sizeof(struct in_addr)) goto trunc; ND_TCHECK2(bp[0], sizeof(struct in_addr)); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Upstream Nbr: %s", ipaddr_string(ndo, bp))); bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[2], 2); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Hold time: ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); if (ndo->ndo_vflag < 2) return; bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); ngroups = bp[3]; bp += 4; len -= 4; while (ngroups--) { /* * XXX - does the address have length "addrlen" and the * mask length "maddrlen"? */ if (len < 4) goto trunc; ND_TCHECK2(bp[0], sizeof(struct in_addr)); ND_PRINT((ndo, "\n\tGroup: %s", ipaddr_string(ndo, bp))); bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[0], sizeof(struct in_addr)); if (EXTRACT_32BITS(&bp[0]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[0]))); bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); njoin = EXTRACT_16BITS(&bp[0]); nprune = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, " joined: %d pruned: %d", njoin, nprune)); bp += 4; len -= 4; for (njp = 0; njp < (njoin + nprune); njp++) { const char *type; if (njp < njoin) type = "Join "; else type = "Prune"; if (len < 6) goto trunc; ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "\n\t%s %s%s%s%s/%d", type, (bp[0] & 0x01) ? "Sparse " : "Dense ", (bp[1] & 0x80) ? "WC " : "", (bp[1] & 0x40) ? "RP " : "SPT ", ipaddr_string(ndo, &bp[2]), bp[1] & 0x3f)); bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|pim]")); return; } void pimv1_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { register u_char type; ND_TCHECK(bp[1]); type = bp[1]; ND_PRINT((ndo, " %s", tok2str(pimv1_type_str, "[type %u]", type))); switch (type) { case PIMV1_TYPE_QUERY: if (ND_TTEST(bp[8])) { switch (bp[8] >> 4) { case 0: ND_PRINT((ndo, " Dense-mode")); break; case 1: ND_PRINT((ndo, " Sparse-mode")); break; case 2: ND_PRINT((ndo, " Sparse-Dense-mode")); break; default: ND_PRINT((ndo, " mode-%d", bp[8] >> 4)); break; } } if (ndo->ndo_vflag) { ND_TCHECK2(bp[10],2); ND_PRINT((ndo, " (Hold-time ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[10])); ND_PRINT((ndo, ")")); } break; case PIMV1_TYPE_REGISTER: ND_TCHECK2(bp[8], 20); /* ip header */ ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[20]), ipaddr_string(ndo, &bp[24]))); break; case PIMV1_TYPE_REGISTER_STOP: ND_TCHECK2(bp[12], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[8]), ipaddr_string(ndo, &bp[12]))); break; case PIMV1_TYPE_RP_REACHABILITY: if (ndo->ndo_vflag) { ND_TCHECK2(bp[22], 2); ND_PRINT((ndo, " group %s", ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_PRINT((ndo, " RP %s hold ", ipaddr_string(ndo, &bp[16]))); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[22])); } break; case PIMV1_TYPE_ASSERT: ND_TCHECK2(bp[16], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[16]), ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_TCHECK2(bp[24], 4); ND_PRINT((ndo, " %s pref %d metric %d", (bp[20] & 0x80) ? "RP-tree" : "SPT", EXTRACT_32BITS(&bp[20]) & 0x7fffffff, EXTRACT_32BITS(&bp[24]))); break; case PIMV1_TYPE_JOIN_PRUNE: case PIMV1_TYPE_GRAFT: case PIMV1_TYPE_GRAFT_ACK: if (ndo->ndo_vflag) { if (len < 8) goto trunc; pimv1_join_prune_print(ndo, &bp[8], len - 8); } break; } ND_TCHECK(bp[4]); if ((bp[4] >> 4) != 1) ND_PRINT((ndo, " [v%d]", bp[4] >> 4)); return; trunc: ND_PRINT((ndo, "[|pim]")); return; } /* * auto-RP is a cisco protocol, documented at * ftp://ftpeng.cisco.com/ipmulticast/specs/pim-autorp-spec01.txt * * This implements version 1+, dated Sept 9, 1998. */ void cisco_autorp_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int type; int numrps; int hold; if (len < 8) goto trunc; ND_TCHECK(bp[0]); ND_PRINT((ndo, " auto-rp ")); type = bp[0]; switch (type) { case 0x11: ND_PRINT((ndo, "candidate-advert")); break; case 0x12: ND_PRINT((ndo, "mapping")); break; default: ND_PRINT((ndo, "type-0x%02x", type)); break; } ND_TCHECK(bp[1]); numrps = bp[1]; ND_TCHECK2(bp[2], 2); ND_PRINT((ndo, " Hold ")); hold = EXTRACT_16BITS(&bp[2]); if (hold) unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); else ND_PRINT((ndo, "FOREVER")); /* Next 4 bytes are reserved. */ bp += 8; len -= 8; /*XXX skip unless -v? */ /* * Rest of packet: * numrps entries of the form: * 32 bits: RP * 6 bits: reserved * 2 bits: PIM version supported, bit 0 is "supports v1", 1 is "v2". * 8 bits: # of entries for this RP * each entry: 7 bits: reserved, 1 bit: negative, * 8 bits: mask 32 bits: source * lather, rinse, repeat. */ while (numrps--) { int nentries; char s; if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); ND_PRINT((ndo, " RP %s", ipaddr_string(ndo, bp))); bp += 4; len -= 4; if (len < 1) goto trunc; ND_TCHECK(bp[0]); switch (bp[0] & 0x3) { case 0: ND_PRINT((ndo, " PIMv?")); break; case 1: ND_PRINT((ndo, " PIMv1")); break; case 2: ND_PRINT((ndo, " PIMv2")); break; case 3: ND_PRINT((ndo, " PIMv1+2")); break; } if (bp[0] & 0xfc) ND_PRINT((ndo, " [rsvd=0x%02x]", bp[0] & 0xfc)); bp += 1; len -= 1; if (len < 1) goto trunc; ND_TCHECK(bp[0]); nentries = bp[0]; bp += 1; len -= 1; s = ' '; for (; nentries; nentries--) { if (len < 6) goto trunc; ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "%c%s%s/%d", s, bp[0] & 1 ? "!" : "", ipaddr_string(ndo, &bp[2]), bp[1])); if (bp[0] & 0x02) { ND_PRINT((ndo, " bidir")); } if (bp[0] & 0xfc) { ND_PRINT((ndo, "[rsvd=0x%02x]", bp[0] & 0xfc)); } s = ','; bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|autorp]")); return; } void pim_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const struct pim *pim = (const struct pim *)bp; #ifdef notyet /* currently we see only version and type */ ND_TCHECK(pim->pim_rsv); #endif ND_TCHECK(pim->pim_typever); switch (PIM_VER(pim->pim_typever)) { case 2: if (!ndo->ndo_vflag) { ND_PRINT((ndo, "PIMv%u, %s, length %u", PIM_VER(pim->pim_typever), tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)), len)); return; } else { ND_PRINT((ndo, "PIMv%u, length %u\n\t%s", PIM_VER(pim->pim_typever), len, tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)))); pimv2_print(ndo, bp, len, bp2); } break; default: ND_PRINT((ndo, "PIMv%u, length %u", PIM_VER(pim->pim_typever), len)); break; } return; trunc: ND_PRINT((ndo, "[|pim]")); return; } /* * PIMv2 uses encoded address representations. * * The last PIM-SM I-D before RFC2117 was published specified the * following representation for unicast addresses. However, RFC2117 * specified no encoding for unicast addresses with the unicast * address length specified in the header. Therefore, we have to * guess which encoding is being used (Cisco's PIMv2 implementation * uses the non-RFC encoding). RFC2117 turns a previously "Reserved" * field into a 'unicast-address-length-in-bytes' field. We guess * that it's the draft encoding if this reserved field is zero. * * RFC2362 goes back to the encoded format, and calls the addr length * field "reserved" again. * * The first byte is the address family, from: * * 0 Reserved * 1 IP (IP version 4) * 2 IP6 (IP version 6) * 3 NSAP * 4 HDLC (8-bit multidrop) * 5 BBN 1822 * 6 802 (includes all 802 media plus Ethernet "canonical format") * 7 E.163 * 8 E.164 (SMDS, Frame Relay, ATM) * 9 F.69 (Telex) * 10 X.121 (X.25, Frame Relay) * 11 IPX * 12 Appletalk * 13 Decnet IV * 14 Banyan Vines * 15 E.164 with NSAP format subaddress * * In addition, the second byte is an "Encoding". 0 is the default * encoding for the address family, and no other encodings are currently * specified. * */ enum pimv2_addrtype { pimv2_unicast, pimv2_group, pimv2_source }; /* 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Unicast Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+++++++ * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Reserved | Mask Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Group multicast Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Rsrvd |S|W|R| Mask Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Source Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ static int pimv2_addr_print(netdissect_options *ndo, const u_char *bp, u_int len, enum pimv2_addrtype at, u_int addr_len, int silent) { int af; int hdrlen; if (addr_len == 0) { if (len < 2) goto trunc; ND_TCHECK(bp[1]); switch (bp[0]) { case 1: af = AF_INET; addr_len = (u_int)sizeof(struct in_addr); break; case 2: af = AF_INET6; addr_len = (u_int)sizeof(struct in6_addr); break; default: return -1; } if (bp[1] != 0) return -1; hdrlen = 2; } else { switch (addr_len) { case sizeof(struct in_addr): af = AF_INET; break; case sizeof(struct in6_addr): af = AF_INET6; break; default: return -1; break; } hdrlen = 0; } bp += hdrlen; len -= hdrlen; switch (at) { case pimv2_unicast: if (len < addr_len) goto trunc; ND_TCHECK2(bp[0], addr_len); if (af == AF_INET) { if (!silent) ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp))); } else if (af == AF_INET6) { if (!silent) ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp))); } return hdrlen + addr_len; case pimv2_group: case pimv2_source: if (len < addr_len + 2) goto trunc; ND_TCHECK2(bp[0], addr_len + 2); if (af == AF_INET) { if (!silent) { ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp + 2))); if (bp[1] != 32) ND_PRINT((ndo, "/%u", bp[1])); } } else if (af == AF_INET6) { if (!silent) { ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp + 2))); if (bp[1] != 128) ND_PRINT((ndo, "/%u", bp[1])); } } if (bp[0] && !silent) { if (at == pimv2_group) { ND_PRINT((ndo, "(0x%02x)", bp[0])); } else { ND_PRINT((ndo, "(%s%s%s", bp[0] & 0x04 ? "S" : "", bp[0] & 0x02 ? "W" : "", bp[0] & 0x01 ? "R" : "")); if (bp[0] & 0xf8) { ND_PRINT((ndo, "+0x%02x", bp[0] & 0xf8)); } ND_PRINT((ndo, ")")); } } return hdrlen + 2 + addr_len; default: return -1; } trunc: return -1; } enum checksum_status { CORRECT, INCORRECT, UNVERIFIED }; static enum checksum_status pimv2_check_checksum(netdissect_options *ndo, const u_char *bp, const u_char *bp2, u_int len) { const struct ip *ip; u_int cksum; if (!ND_TTEST2(bp[0], len)) { /* We don't have all the data. */ return (UNVERIFIED); } ip = (const struct ip *)bp2; if (IP_V(ip) == 4) { struct cksum_vec vec[1]; vec[0].ptr = bp; vec[0].len = len; cksum = in_cksum(vec, 1); return (cksum ? INCORRECT : CORRECT); } else if (IP_V(ip) == 6) { const struct ip6_hdr *ip6; ip6 = (const struct ip6_hdr *)bp2; cksum = nextproto6_cksum(ndo, ip6, bp, len, len, IPPROTO_PIM); return (cksum ? INCORRECT : CORRECT); } else { return (UNVERIFIED); } } static void pimv2_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const u_char *ep; register const struct pim *pim = (const struct pim *)bp; int advance; enum checksum_status cksum_status; int pimv2_addr_len; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; if (ep > bp + len) ep = bp + len; if (len < 2) goto trunc; ND_TCHECK(pim->pim_rsv); pimv2_addr_len = pim->pim_rsv; if (pimv2_addr_len != 0) ND_PRINT((ndo, ", RFC2117-encoding")); if (len < 4) goto trunc; ND_TCHECK(pim->pim_cksum); ND_PRINT((ndo, ", cksum 0x%04x ", EXTRACT_16BITS(&pim->pim_cksum))); if (EXTRACT_16BITS(&pim->pim_cksum) == 0) { ND_PRINT((ndo, "(unverified)")); } else { if (PIM_TYPE(pim->pim_typever) == PIMV2_TYPE_REGISTER) { /* * The checksum only covers the packet header, * not the encapsulated packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, 8); if (cksum_status == INCORRECT) { /* * To quote RFC 4601, "For interoperability * reasons, a message carrying a checksum * calculated over the entire PIM Register * message should also be accepted." */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } } else { /* * The checksum covers the entire packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } switch (cksum_status) { case CORRECT: ND_PRINT((ndo, "(correct)")); break; case INCORRECT: ND_PRINT((ndo, "(incorrect)")); break; case UNVERIFIED: ND_PRINT((ndo, "(unverified)")); break; } } bp += 4; len -= 4; switch (PIM_TYPE(pim->pim_typever)) { case PIMV2_TYPE_HELLO: { uint16_t otype, olen; while (len > 0) { if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); otype = EXTRACT_16BITS(&bp[0]); olen = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, "\n\t %s Option (%u), length %u, Value: ", tok2str(pimv2_hello_option_values, "Unknown", otype), otype, olen)); bp += 4; len -= 4; if (len < olen) goto trunc; ND_TCHECK2(bp[0], olen); switch (otype) { case PIMV2_HELLO_OPTION_HOLDTIME: if (olen != 2) { ND_PRINT((ndo, "ERROR: Option Length != 2 Bytes (%u)", olen)); } else { unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); } break; case PIMV2_HELLO_OPTION_LANPRUNEDELAY: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { char t_bit; uint16_t lan_delay, override_interval; lan_delay = EXTRACT_16BITS(bp); override_interval = EXTRACT_16BITS(bp+2); t_bit = (lan_delay & 0x8000)? 1 : 0; lan_delay &= ~0x8000; ND_PRINT((ndo, "\n\t T-bit=%d, LAN delay %dms, Override interval %dms", t_bit, lan_delay, override_interval)); } break; case PIMV2_HELLO_OPTION_DR_PRIORITY_OLD: case PIMV2_HELLO_OPTION_DR_PRIORITY: switch (olen) { case 0: ND_PRINT((ndo, "Bi-Directional Capability (Old)")); break; case 4: ND_PRINT((ndo, "%u", EXTRACT_32BITS(bp))); break; default: ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); break; } break; case PIMV2_HELLO_OPTION_GENID: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "0x%08x", EXTRACT_32BITS(bp))); } break; case PIMV2_HELLO_OPTION_REFRESH_CAP: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "v%d", *bp)); if (*(bp+1) != 0) { ND_PRINT((ndo, ", interval ")); unsigned_relts_print(ndo, *(bp+1)); } if (EXTRACT_16BITS(bp+2) != 0) { ND_PRINT((ndo, " ?0x%04x?", EXTRACT_16BITS(bp+2))); } } break; case PIMV2_HELLO_OPTION_BIDIR_CAP: break; case PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD: case PIMV2_HELLO_OPTION_ADDRESS_LIST: if (ndo->ndo_vflag > 1) { const u_char *ptr = bp; u_int plen = len; while (ptr < (bp+olen)) { ND_PRINT((ndo, "\n\t ")); advance = pimv2_addr_print(ndo, ptr, plen, pimv2_unicast, pimv2_addr_len, 0); if (advance < 0) goto trunc; ptr += advance; plen -= advance; } } break; default: if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, bp, "\n\t ", olen); break; } /* do we want to see an additionally hexdump ? */ if (ndo->ndo_vflag> 1) print_unknown_data(ndo, bp, "\n\t ", olen); bp += olen; len -= olen; } break; } case PIMV2_TYPE_REGISTER: { const struct ip *ip; if (len < 4) goto trunc; ND_TCHECK2(*bp, PIMV2_REGISTER_FLAG_LEN); ND_PRINT((ndo, ", Flags [ %s ]\n\t", tok2str(pimv2_register_flag_values, "none", EXTRACT_32BITS(bp)))); bp += 4; len -= 4; /* encapsulated multicast packet */ if (len == 0) goto trunc; ip = (const struct ip *)bp; ND_TCHECK(ip->ip_vhl); switch (IP_V(ip)) { case 0: /* Null header */ ND_TCHECK(ip->ip_dst); ND_PRINT((ndo, "IP-Null-header %s > %s", ipaddr_string(ndo, &ip->ip_src), ipaddr_string(ndo, &ip->ip_dst))); break; case 4: /* IPv4 */ ip_print(ndo, bp, len); break; case 6: /* IPv6 */ ip6_print(ndo, bp, len); break; default: ND_PRINT((ndo, "IP ver %d", IP_V(ip))); break; } break; } case PIMV2_TYPE_REGISTER_STOP: ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; ND_PRINT((ndo, " source=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; break; case PIMV2_TYPE_JOIN_PRUNE: case PIMV2_TYPE_GRAFT: case PIMV2_TYPE_GRAFT_ACK: /* * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |PIM Ver| Type | Addr length | Checksum | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Unicast-Upstream Neighbor Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Reserved | Num groups | Holdtime | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Number of Joined Sources | Number of Pruned Sources | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ { uint8_t ngroup; uint16_t holdtime; uint16_t njoin; uint16_t nprune; int i, j; if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ ND_PRINT((ndo, ", upstream-neighbor: ")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; } if (len < 4) goto trunc; ND_TCHECK2(*bp, 4); ngroup = bp[1]; holdtime = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, "\n\t %u group(s)", ngroup)); if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ ND_PRINT((ndo, ", holdtime: ")); if (holdtime == 0xffff) ND_PRINT((ndo, "infinite")); else unsigned_relts_print(ndo, holdtime); } bp += 4; len -= 4; for (i = 0; i < ngroup; i++) { ND_PRINT((ndo, "\n\t group #%u: ", i+1)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; if (len < 4) goto trunc; ND_TCHECK2(*bp, 4); njoin = EXTRACT_16BITS(&bp[0]); nprune = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, ", joined sources: %u, pruned sources: %u", njoin, nprune)); bp += 4; len -= 4; for (j = 0; j < njoin; j++) { ND_PRINT((ndo, "\n\t joined source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_source, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; } for (j = 0; j < nprune; j++) { ND_PRINT((ndo, "\n\t pruned source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_source, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; } } break; } case PIMV2_TYPE_BOOTSTRAP: { int i, j, frpcnt; /* Fragment Tag, Hash Mask len, and BSR-priority */ if (len < 2) goto trunc; ND_TCHECK_16BITS(bp); ND_PRINT((ndo, " tag=%x", EXTRACT_16BITS(bp))); bp += 2; len -= 2; if (len < 1) goto trunc; ND_TCHECK(bp[0]); ND_PRINT((ndo, " hashmlen=%d", bp[0])); if (len < 2) goto trunc; ND_TCHECK(bp[2]); ND_PRINT((ndo, " BSRprio=%d", bp[1])); bp += 2; len -= 2; /* Encoded-Unicast-BSR-Address */ ND_PRINT((ndo, " BSR=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; for (i = 0; bp < ep; i++) { /* Encoded-Group Address */ ND_PRINT((ndo, " (group%d: ", i)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; /* RP-Count, Frag RP-Cnt, and rsvd */ if (len < 1) goto trunc; ND_TCHECK(bp[0]); ND_PRINT((ndo, " RPcnt=%d", bp[0])); if (len < 2) goto trunc; ND_TCHECK(bp[1]); ND_PRINT((ndo, " FRPcnt=%d", frpcnt = bp[1])); if (len < 4) goto trunc; bp += 4; len -= 4; for (j = 0; j < frpcnt && bp < ep; j++) { /* each RP info */ ND_PRINT((ndo, " RP%d=", j)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; if (len < 2) goto trunc; ND_TCHECK_16BITS(bp); ND_PRINT((ndo, ",holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); if (len < 3) goto trunc; ND_TCHECK(bp[2]); ND_PRINT((ndo, ",prio=%d", bp[2])); if (len < 4) goto trunc; bp += 4; len -= 4; } ND_PRINT((ndo, ")")); } break; } case PIMV2_TYPE_ASSERT: ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; if (len < 8) goto trunc; ND_TCHECK2(*bp, 8); if (bp[0] & 0x80) ND_PRINT((ndo, " RPT")); ND_PRINT((ndo, " pref=%u", EXTRACT_32BITS(&bp[0]) & 0x7fffffff)); ND_PRINT((ndo, " metric=%u", EXTRACT_32BITS(&bp[4]))); break; case PIMV2_TYPE_CANDIDATE_RP: { int i, pfxcnt; /* Prefix-Cnt, Priority, and Holdtime */ if (len < 1) goto trunc; ND_TCHECK(bp[0]); ND_PRINT((ndo, " prefix-cnt=%d", bp[0])); pfxcnt = bp[0]; if (len < 2) goto trunc; ND_TCHECK(bp[1]); ND_PRINT((ndo, " prio=%d", bp[1])); if (len < 4) goto trunc; ND_TCHECK_16BITS(&bp[2]); ND_PRINT((ndo, " holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); bp += 4; len -= 4; /* Encoded-Unicast-RP-Address */ ND_PRINT((ndo, " RP=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; /* Encoded-Group Addresses */ for (i = 0; i < pfxcnt && bp < ep; i++) { ND_PRINT((ndo, " Group%d=", i)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; } break; } case PIMV2_TYPE_PRUNE_REFRESH: ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; ND_PRINT((ndo, " grp=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; ND_PRINT((ndo, " forwarder=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; if (len < 2) goto trunc; ND_TCHECK_16BITS(bp); ND_PRINT((ndo, " TUNR ")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); break; default: ND_PRINT((ndo, " [type %d]", PIM_TYPE(pim->pim_typever))); break; } return; trunc: ND_PRINT((ndo, "[|pim]")); } /* * Local Variables: * c-style: whitesmith * c-basic-offset: 8 * End: */
pimv1_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { register const u_char *ep; register u_char type; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; ND_TCHECK(bp[1]); type = bp[1]; ND_PRINT((ndo, " %s", tok2str(pimv1_type_str, "[type %u]", type))); switch (type) { case PIMV1_TYPE_QUERY: if (ND_TTEST(bp[8])) { switch (bp[8] >> 4) { case 0: ND_PRINT((ndo, " Dense-mode")); break; case 1: ND_PRINT((ndo, " Sparse-mode")); break; case 2: ND_PRINT((ndo, " Sparse-Dense-mode")); break; default: ND_PRINT((ndo, " mode-%d", bp[8] >> 4)); break; } } if (ndo->ndo_vflag) { ND_TCHECK2(bp[10],2); ND_PRINT((ndo, " (Hold-time ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[10])); ND_PRINT((ndo, ")")); } break; case PIMV1_TYPE_REGISTER: ND_TCHECK2(bp[8], 20); /* ip header */ ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[20]), ipaddr_string(ndo, &bp[24]))); break; case PIMV1_TYPE_REGISTER_STOP: ND_TCHECK2(bp[12], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[8]), ipaddr_string(ndo, &bp[12]))); break; case PIMV1_TYPE_RP_REACHABILITY: if (ndo->ndo_vflag) { ND_TCHECK2(bp[22], 2); ND_PRINT((ndo, " group %s", ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_PRINT((ndo, " RP %s hold ", ipaddr_string(ndo, &bp[16]))); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[22])); } break; case PIMV1_TYPE_ASSERT: ND_TCHECK2(bp[16], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[16]), ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_TCHECK2(bp[24], 4); ND_PRINT((ndo, " %s pref %d metric %d", (bp[20] & 0x80) ? "RP-tree" : "SPT", EXTRACT_32BITS(&bp[20]) & 0x7fffffff, EXTRACT_32BITS(&bp[24]))); break; case PIMV1_TYPE_JOIN_PRUNE: case PIMV1_TYPE_GRAFT: case PIMV1_TYPE_GRAFT_ACK: if (ndo->ndo_vflag) pimv1_join_prune_print(ndo, &bp[8], len - 8); break; } ND_TCHECK(bp[4]); if ((bp[4] >> 4) != 1) ND_PRINT((ndo, " [v%d]", bp[4] >> 4)); return; trunc: ND_PRINT((ndo, "[|pim]")); return; }
pimv1_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { register u_char type; ND_TCHECK(bp[1]); type = bp[1]; ND_PRINT((ndo, " %s", tok2str(pimv1_type_str, "[type %u]", type))); switch (type) { case PIMV1_TYPE_QUERY: if (ND_TTEST(bp[8])) { switch (bp[8] >> 4) { case 0: ND_PRINT((ndo, " Dense-mode")); break; case 1: ND_PRINT((ndo, " Sparse-mode")); break; case 2: ND_PRINT((ndo, " Sparse-Dense-mode")); break; default: ND_PRINT((ndo, " mode-%d", bp[8] >> 4)); break; } } if (ndo->ndo_vflag) { ND_TCHECK2(bp[10],2); ND_PRINT((ndo, " (Hold-time ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[10])); ND_PRINT((ndo, ")")); } break; case PIMV1_TYPE_REGISTER: ND_TCHECK2(bp[8], 20); /* ip header */ ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[20]), ipaddr_string(ndo, &bp[24]))); break; case PIMV1_TYPE_REGISTER_STOP: ND_TCHECK2(bp[12], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[8]), ipaddr_string(ndo, &bp[12]))); break; case PIMV1_TYPE_RP_REACHABILITY: if (ndo->ndo_vflag) { ND_TCHECK2(bp[22], 2); ND_PRINT((ndo, " group %s", ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_PRINT((ndo, " RP %s hold ", ipaddr_string(ndo, &bp[16]))); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[22])); } break; case PIMV1_TYPE_ASSERT: ND_TCHECK2(bp[16], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[16]), ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_TCHECK2(bp[24], 4); ND_PRINT((ndo, " %s pref %d metric %d", (bp[20] & 0x80) ? "RP-tree" : "SPT", EXTRACT_32BITS(&bp[20]) & 0x7fffffff, EXTRACT_32BITS(&bp[24]))); break; case PIMV1_TYPE_JOIN_PRUNE: case PIMV1_TYPE_GRAFT: case PIMV1_TYPE_GRAFT_ACK: if (ndo->ndo_vflag) { if (len < 8) goto trunc; pimv1_join_prune_print(ndo, &bp[8], len - 8); } break; } ND_TCHECK(bp[4]); if ((bp[4] >> 4) != 1) ND_PRINT((ndo, " [v%d]", bp[4] >> 4)); return; trunc: ND_PRINT((ndo, "[|pim]")); return; }
{'added': [(172, '\tif (len < sizeof(struct in_addr))'), (173, '\t\tgoto trunc;'), (178, '\tbp += 4;'), (179, '\tlen -= 4;'), (180, '\tif (len < 4)'), (181, '\t\tgoto trunc;'), (182, '\tND_TCHECK2(bp[2], 2);'), (186, '\tunsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2]));'), (189, '\tbp += 4;'), (190, '\tlen -= 4;'), (192, '\tif (len < 4)'), (193, '\t\tgoto trunc;'), (203, '\t\tif (len < 4)'), (204, '\t\t\tgoto trunc;'), (207, '\t\tbp += 4;'), (208, '\t\tlen -= 4;'), (209, '\t\tif (len < 4)'), (210, '\t\t\tgoto trunc;'), (211, '\t\tND_TCHECK2(bp[0], sizeof(struct in_addr));'), (212, '\t\tif (EXTRACT_32BITS(&bp[0]) != 0xffffffff)'), (213, '\t\t\tND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[0])));'), (214, '\t\tbp += 4;'), (215, '\t\tlen -= 4;'), (216, '\t\tif (len < 4)'), (217, '\t\t\tgoto trunc;'), (218, '\t\tND_TCHECK2(bp[0], 4);'), (219, '\t\tnjoin = EXTRACT_16BITS(&bp[0]);'), (220, '\t\tnprune = EXTRACT_16BITS(&bp[2]);'), (222, '\t\tbp += 4;'), (223, '\t\tlen -= 4;'), (231, '\t\t\tif (len < 6)'), (232, '\t\t\t\tgoto trunc;'), (238, '\t\t\t ipaddr_string(ndo, &bp[2]),'), (239, '\t\t\t bp[1] & 0x3f));'), (321, '\t\tif (ndo->ndo_vflag) {'), (322, '\t\t\tif (len < 8)'), (323, '\t\t\t\tgoto trunc;'), (325, '\t\t}'), (352, '\tif (len < 8)'), (353, '\t\tgoto trunc;'), (401, '\t\tif (len < 4)'), (402, '\t\t\tgoto trunc;'), (405, '\t\tbp += 4;'), (406, '\t\tlen -= 4;'), (407, '\t\tif (len < 1)'), (408, '\t\t\tgoto trunc;'), (409, '\t\tND_TCHECK(bp[0]);'), (410, '\t\tswitch (bp[0] & 0x3) {'), (420, '\t\tif (bp[0] & 0xfc)'), (421, '\t\t\tND_PRINT((ndo, " [rsvd=0x%02x]", bp[0] & 0xfc));'), (422, '\t\tbp += 1;'), (423, '\t\tlen -= 1;'), (424, '\t\tif (len < 1)'), (425, '\t\t\tgoto trunc;'), (426, '\t\tND_TCHECK(bp[0]);'), (427, '\t\tnentries = bp[0];'), (428, '\t\tbp += 1;'), (429, '\t\tlen -= 1;'), (432, '\t\t\tif (len < 6)'), (433, '\t\t\t\tgoto trunc;'), (464, '\tND_TCHECK(pim->pim_typever);'), (488, ''), (489, 'trunc:'), (490, '\tND_PRINT((ndo, "[|pim]"));'), (491, '\treturn;'), (560, ' const u_char *bp, u_int len, enum pimv2_addrtype at,'), (561, ' u_int addr_len, int silent)'), (564, '\tint hdrlen;'), (566, '\tif (addr_len == 0) {'), (567, '\t\tif (len < 2)'), (568, '\t\t\tgoto trunc;'), (573, '\t\t\taddr_len = (u_int)sizeof(struct in_addr);'), (577, '\t\t\taddr_len = (u_int)sizeof(struct in6_addr);'), (586, '\t\tswitch (addr_len) {'), (601, '\tlen -= hdrlen;'), (604, '\t\tif (len < addr_len)'), (605, '\t\t\tgoto trunc;'), (606, '\t\tND_TCHECK2(bp[0], addr_len);'), (615, '\t\treturn hdrlen + addr_len;'), (618, '\t\tif (len < addr_len + 2)'), (619, '\t\t\tgoto trunc;'), (620, '\t\tND_TCHECK2(bp[0], addr_len + 2);'), (649, '\t\treturn hdrlen + 2 + addr_len;'), (701, '\tint pimv2_addr_len;'), (708, '\tif (len < 2)'), (709, '\t\tgoto trunc;'), (715, '\tif (len < 4)'), (716, '\t\tgoto trunc;'), (717, '\tND_TCHECK(pim->pim_cksum);'), (758, '\tbp += 4;'), (759, '\tlen -= 4;'), (765, '\t\twhile (len > 0) {'), (766, '\t\t\tif (len < 4)'), (767, '\t\t\t\tgoto trunc;'), (776, '\t\t\tlen -= 4;'), (778, '\t\t\tif (len < olen)'), (779, '\t\t\t\tgoto trunc;'), (780, '\t\t\tND_TCHECK2(bp[0], olen);'), (850, '\t\t\t\t\tu_int plen = len;'), (853, '\t\t\t\t\t\tadvance = pimv2_addr_print(ndo, ptr, plen, pimv2_unicast, pimv2_addr_len, 0);'), (854, '\t\t\t\t\t\tif (advance < 0)'), (855, '\t\t\t\t\t\t\tgoto trunc;'), (857, '\t\t\t\t\t\tplen -= advance;'), (870, '\t\t\tlen -= olen;'), (879, '\t\tif (len < 4)'), (880, '\t\t\tgoto trunc;'), (881, '\t\tND_TCHECK2(*bp, PIMV2_REGISTER_FLAG_LEN);'), (886, '\t\t EXTRACT_32BITS(bp))));'), (888, '\t\tbp += 4; len -= 4;'), (890, '\t\tif (len == 0)'), (891, '\t\t\tgoto trunc;'), (893, '\t\tND_TCHECK(ip->ip_vhl);'), (896, '\t\t\tND_TCHECK(ip->ip_dst);'), (919, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (920, '\t\t\tgoto trunc;'), (923, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (924, '\t\t\tgoto trunc;'), (977, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (978, '\t\t\t\tgoto trunc;'), (981, '\t\tif (len < 4)'), (982, '\t\t\tgoto trunc;'), (983, '\t\tND_TCHECK2(*bp, 4);'), (997, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (998, '\t\t\t\tgoto trunc;'), (1000, '\t\t\tif (len < 4)'), (1001, '\t\t\t\tgoto trunc;'), (1002, '\t\t\tND_TCHECK2(*bp, 4);'), (1009, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_source, pimv2_addr_len, 0)) < 0)'), (1010, '\t\t\t\t\tgoto trunc;'), (1015, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_source, pimv2_addr_len, 0)) < 0)'), (1016, '\t\t\t\t\tgoto trunc;'), (1028, '\t\tif (len < 2)'), (1029, '\t\t\tgoto trunc;'), (1030, '\t\tND_TCHECK_16BITS(bp);'), (1032, '\t\tbp += 2;'), (1033, '\t\tlen -= 2;'), (1034, '\t\tif (len < 1)'), (1035, '\t\t\tgoto trunc;'), (1036, '\t\tND_TCHECK(bp[0]);'), (1038, '\t\tif (len < 2)'), (1039, '\t\t\tgoto trunc;'), (1040, '\t\tND_TCHECK(bp[2]);'), (1043, '\t\tlen -= 2;'), (1047, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1048, '\t\t\tgoto trunc;'), (1050, '\t\tlen -= advance;'), (1055, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (1056, '\t\t\t\tgoto trunc;'), (1058, '\t\t\tlen -= advance;'), (1061, '\t\t\tif (len < 1)'), (1062, '\t\t\t\tgoto trunc;'), (1063, '\t\t\tND_TCHECK(bp[0]);'), (1065, '\t\t\tif (len < 2)'), (1066, '\t\t\t\tgoto trunc;'), (1067, '\t\t\tND_TCHECK(bp[1]);'), (1069, '\t\t\tif (len < 4)'), (1070, '\t\t\t\tgoto trunc;'), (1072, '\t\t\tlen -= 4;'), (1077, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len,'), (1079, '\t\t\t\t\t\t\t\tpimv2_addr_len,'), (1080, '\t\t\t\t\t\t\t\t0)) < 0)'), (1081, '\t\t\t\t\tgoto trunc;'), (1083, '\t\t\t\tlen -= advance;'), (1085, '\t\t\t\tif (len < 2)'), (1086, '\t\t\t\t\tgoto trunc;'), (1087, '\t\t\t\tND_TCHECK_16BITS(bp);'), (1090, '\t\t\t\tif (len < 3)'), (1091, '\t\t\t\t\tgoto trunc;'), (1092, '\t\t\t\tND_TCHECK(bp[2]);'), (1094, '\t\t\t\tif (len < 4)'), (1095, '\t\t\t\t\tgoto trunc;'), (1097, '\t\t\t\tlen -= 4;'), (1105, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (1106, '\t\t\tgoto trunc;'), (1109, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1110, '\t\t\tgoto trunc;'), (1112, '\t\tif (len < 8)'), (1113, '\t\t\tgoto trunc;'), (1114, '\t\tND_TCHECK2(*bp, 8);'), (1126, '\t\tif (len < 1)'), (1127, '\t\t\tgoto trunc;'), (1128, '\t\tND_TCHECK(bp[0]);'), (1131, '\t\tif (len < 2)'), (1132, '\t\t\tgoto trunc;'), (1133, '\t\tND_TCHECK(bp[1]);'), (1135, '\t\tif (len < 4)'), (1136, '\t\t\tgoto trunc;'), (1137, '\t\tND_TCHECK_16BITS(&bp[2]);'), (1141, '\t\tlen -= 4;'), (1145, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1146, '\t\t\tgoto trunc;'), (1148, '\t\tlen -= advance;'), (1153, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (1154, '\t\t\t\tgoto trunc;'), (1156, '\t\t\tlen -= advance;'), (1163, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1164, '\t\t\tgoto trunc;'), (1166, '\t\tlen -= advance;'), (1168, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (1169, '\t\t\tgoto trunc;'), (1171, '\t\tlen -= advance;'), (1173, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1174, '\t\t\tgoto trunc;'), (1176, '\t\tlen -= advance;'), (1177, '\t\tif (len < 2)'), (1178, '\t\t\tgoto trunc;'), (1179, '\t\tND_TCHECK_16BITS(bp);')], 'deleted': [(176, '\tND_TCHECK2(bp[6], 2);'), (180, '\tunsigned_relts_print(ndo, EXTRACT_16BITS(&bp[6]));'), (183, '\tbp += 8;'), (184, '\tlen -= 8;'), (197, '\t\tND_TCHECK2(bp[4], sizeof(struct in_addr));'), (198, '\t\tif (EXTRACT_32BITS(&bp[4]) != 0xffffffff)'), (199, '\t\t\tND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[4])));'), (200, '\t\tND_TCHECK2(bp[8], 4);'), (201, '\t\tnjoin = EXTRACT_16BITS(&bp[8]);'), (202, '\t\tnprune = EXTRACT_16BITS(&bp[10]);'), (204, '\t\tbp += 12;'), (205, '\t\tlen -= 12;'), (218, '\t\t\tipaddr_string(ndo, &bp[2]), bp[1] & 0x3f));'), (233, '\tregister const u_char *ep;'), (236, '\tep = (const u_char *)ndo->ndo_snapend;'), (237, '\tif (bp >= ep)'), (238, '\t\treturn;'), (239, ''), (305, '\t\tif (ndo->ndo_vflag)'), (382, '\t\tND_TCHECK(bp[4]);'), (383, '\t\tswitch (bp[4] & 0x3) {'), (393, '\t\tif (bp[4] & 0xfc)'), (394, '\t\t\tND_PRINT((ndo, " [rsvd=0x%02x]", bp[4] & 0xfc));'), (395, '\t\tND_TCHECK(bp[5]);'), (396, '\t\tnentries = bp[5];'), (397, '\t\tbp += 6; len -= 6;'), (424, '\tregister const u_char *ep;'), (427, '\tep = (const u_char *)ndo->ndo_snapend;'), (428, '\tif (bp >= ep)'), (429, '\t\treturn;'), (499, 'static int pimv2_addr_len;'), (500, ''), (527, ' const u_char *bp, enum pimv2_addrtype at, int silent)'), (530, '\tint len, hdrlen;'), (532, '\tND_TCHECK(bp[0]);'), (533, ''), (534, '\tif (pimv2_addr_len == 0) {'), (539, '\t\t\tlen = sizeof(struct in_addr);'), (543, '\t\t\tlen = sizeof(struct in6_addr);'), (552, '\t\tswitch (pimv2_addr_len) {'), (563, '\t\tlen = pimv2_addr_len;'), (570, '\t\tND_TCHECK2(bp[0], len);'), (579, '\t\treturn hdrlen + len;'), (582, '\t\tND_TCHECK2(bp[0], len + 2);'), (611, '\t\treturn hdrlen + 2 + len;'), (719, '\t\tbp += 4;'), (720, '\t\twhile (bp < ep) {'), (724, '\t\t\tND_TCHECK2(bp[0], 4 + olen);'), (802, '\t\t\t\t\t\tadvance = pimv2_addr_print(ndo, ptr, pimv2_unicast, 0);'), (803, '\t\t\t\t\t\tif (advance < 0) {'), (804, '\t\t\t\t\t\t\tND_PRINT((ndo, "..."));'), (805, '\t\t\t\t\t\t\tbreak;'), (806, '\t\t\t\t\t\t}'), (828, '\t\tND_TCHECK2(*(bp + 4), PIMV2_REGISTER_FLAG_LEN);'), (833, '\t\t EXTRACT_32BITS(bp+4))));'), (835, '\t\tbp += 8; len -= 8;'), (861, '\t\tbp += 4; len -= 4;'), (862, '\t\tif (bp >= ep)'), (863, '\t\t\tbreak;'), (865, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) {'), (866, '\t\t\tND_PRINT((ndo, "..."));'), (867, '\t\t\tbreak;'), (868, '\t\t}'), (870, '\t\tif (bp >= ep)'), (871, '\t\t\tbreak;'), (873, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (874, '\t\t\tND_PRINT((ndo, "..."));'), (875, '\t\t\tbreak;'), (876, '\t\t}'), (927, '\t\tbp += 4; len -= 4;'), (929, '\t\t\tif (bp >= ep)'), (930, '\t\t\t\tbreak;'), (932, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (933, '\t\t\t\tND_PRINT((ndo, "..."));'), (934, '\t\t\t\tbreak;'), (935, '\t\t\t}'), (938, '\t\tif (bp + 4 > ep)'), (939, '\t\t\tbreak;'), (952, '\t\t\tif (bp >= ep)'), (953, '\t\t\t\tgoto jp_done;'), (955, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) {'), (956, '\t\t\t\tND_PRINT((ndo, "...)"));'), (957, '\t\t\t\tgoto jp_done;'), (958, '\t\t\t}'), (960, '\t\t\tif (bp + 4 > ep) {'), (961, '\t\t\t\tND_PRINT((ndo, "...)"));'), (962, '\t\t\t\tgoto jp_done;'), (963, '\t\t\t}'), (970, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) {'), (971, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (972, '\t\t\t\t\tgoto jp_done;'), (973, '\t\t\t\t}'), (978, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) {'), (979, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (980, '\t\t\t\t\tgoto jp_done;'), (981, '\t\t\t\t}'), (985, '\tjp_done:'), (992, '\t\tbp += 4;'), (995, '\t\tif (bp + sizeof(uint16_t) >= ep) break;'), (997, '\t\tbp += sizeof(uint16_t);'), (998, '\t\tif (bp >= ep) break;'), (1000, '\t\tif (bp + 1 >= ep) break;'), (1005, '\t\tif (bp >= ep) break;'), (1007, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1008, '\t\t\tND_PRINT((ndo, "..."));'), (1009, '\t\t\tbreak;'), (1010, '\t\t}'), (1016, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0))'), (1017, '\t\t\t < 0) {'), (1018, '\t\t\t\tND_PRINT((ndo, "...)"));'), (1019, '\t\t\t\tgoto bs_done;'), (1020, '\t\t\t}'), (1024, '\t\t\tif (bp >= ep) {'), (1025, '\t\t\t\tND_PRINT((ndo, "...)"));'), (1026, '\t\t\t\tgoto bs_done;'), (1027, '\t\t\t}'), (1029, '\t\t\tif (bp + 1 >= ep) {'), (1030, '\t\t\t\tND_PRINT((ndo, "...)"));'), (1031, '\t\t\t\tgoto bs_done;'), (1032, '\t\t\t}'), (1039, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp,'), (1041, '\t\t\t\t\t\t\t\t0)) < 0) {'), (1042, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (1043, '\t\t\t\t\tgoto bs_done;'), (1044, '\t\t\t\t}'), (1047, '\t\t\t\tif (bp + 1 >= ep) {'), (1048, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (1049, '\t\t\t\t\tgoto bs_done;'), (1050, '\t\t\t\t}'), (1053, '\t\t\t\tif (bp + 2 >= ep) {'), (1054, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (1055, '\t\t\t\t\tgoto bs_done;'), (1056, '\t\t\t\t}'), (1062, '\t bs_done:'), (1066, '\t\tbp += 4; len -= 4;'), (1067, '\t\tif (bp >= ep)'), (1068, '\t\t\tbreak;'), (1070, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) {'), (1071, '\t\t\tND_PRINT((ndo, "..."));'), (1072, '\t\t\tbreak;'), (1073, '\t\t}'), (1075, '\t\tif (bp >= ep)'), (1076, '\t\t\tbreak;'), (1078, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1079, '\t\t\tND_PRINT((ndo, "..."));'), (1080, '\t\t\tbreak;'), (1081, '\t\t}'), (1083, '\t\tif (bp + 8 > ep)'), (1084, '\t\t\tbreak;'), (1094, '\t\tbp += 4;'), (1097, '\t\tif (bp >= ep) break;'), (1100, '\t\tif (bp + 1 >= ep) break;'), (1102, '\t\tif (bp + 3 >= ep) break;'), (1108, '\t\tif (bp >= ep) break;'), (1110, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1111, '\t\t\tND_PRINT((ndo, "..."));'), (1112, '\t\t\tbreak;'), (1113, '\t\t}'), (1119, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0))'), (1120, '\t\t\t < 0) {'), (1121, '\t\t\t\tND_PRINT((ndo, "..."));'), (1122, '\t\t\t\tbreak;'), (1123, '\t\t\t}'), (1131, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1132, '\t\t\tND_PRINT((ndo, "..."));'), (1133, '\t\t\tbreak;'), (1134, '\t\t}'), (1137, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) {'), (1138, '\t\t\tND_PRINT((ndo, "..."));'), (1139, '\t\t\tbreak;'), (1140, '\t\t}'), (1143, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1144, '\t\t\tND_PRINT((ndo, "..."));'), (1145, '\t\t\tbreak;'), (1146, '\t\t}'), (1148, '\t\tND_TCHECK2(bp[0], 2);')]}
207
176
890
5,856
https://github.com/the-tcpdump-group/tcpdump
CVE-2017-13030
['CWE-125']
print-pim.c
pimv2_addr_print
/* * Copyright (c) 1995, 1996 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* \summary: Protocol Independent Multicast (PIM) printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include "netdissect.h" #include "addrtoname.h" #include "extract.h" #include "ip.h" #include "ip6.h" #include "ipproto.h" #define PIMV1_TYPE_QUERY 0 #define PIMV1_TYPE_REGISTER 1 #define PIMV1_TYPE_REGISTER_STOP 2 #define PIMV1_TYPE_JOIN_PRUNE 3 #define PIMV1_TYPE_RP_REACHABILITY 4 #define PIMV1_TYPE_ASSERT 5 #define PIMV1_TYPE_GRAFT 6 #define PIMV1_TYPE_GRAFT_ACK 7 static const struct tok pimv1_type_str[] = { { PIMV1_TYPE_QUERY, "Query" }, { PIMV1_TYPE_REGISTER, "Register" }, { PIMV1_TYPE_REGISTER_STOP, "Register-Stop" }, { PIMV1_TYPE_JOIN_PRUNE, "Join/Prune" }, { PIMV1_TYPE_RP_REACHABILITY, "RP-reachable" }, { PIMV1_TYPE_ASSERT, "Assert" }, { PIMV1_TYPE_GRAFT, "Graft" }, { PIMV1_TYPE_GRAFT_ACK, "Graft-ACK" }, { 0, NULL } }; #define PIMV2_TYPE_HELLO 0 #define PIMV2_TYPE_REGISTER 1 #define PIMV2_TYPE_REGISTER_STOP 2 #define PIMV2_TYPE_JOIN_PRUNE 3 #define PIMV2_TYPE_BOOTSTRAP 4 #define PIMV2_TYPE_ASSERT 5 #define PIMV2_TYPE_GRAFT 6 #define PIMV2_TYPE_GRAFT_ACK 7 #define PIMV2_TYPE_CANDIDATE_RP 8 #define PIMV2_TYPE_PRUNE_REFRESH 9 #define PIMV2_TYPE_DF_ELECTION 10 #define PIMV2_TYPE_ECMP_REDIRECT 11 static const struct tok pimv2_type_values[] = { { PIMV2_TYPE_HELLO, "Hello" }, { PIMV2_TYPE_REGISTER, "Register" }, { PIMV2_TYPE_REGISTER_STOP, "Register Stop" }, { PIMV2_TYPE_JOIN_PRUNE, "Join / Prune" }, { PIMV2_TYPE_BOOTSTRAP, "Bootstrap" }, { PIMV2_TYPE_ASSERT, "Assert" }, { PIMV2_TYPE_GRAFT, "Graft" }, { PIMV2_TYPE_GRAFT_ACK, "Graft Acknowledgement" }, { PIMV2_TYPE_CANDIDATE_RP, "Candidate RP Advertisement" }, { PIMV2_TYPE_PRUNE_REFRESH, "Prune Refresh" }, { PIMV2_TYPE_DF_ELECTION, "DF Election" }, { PIMV2_TYPE_ECMP_REDIRECT, "ECMP Redirect" }, { 0, NULL} }; #define PIMV2_HELLO_OPTION_HOLDTIME 1 #define PIMV2_HELLO_OPTION_LANPRUNEDELAY 2 #define PIMV2_HELLO_OPTION_DR_PRIORITY_OLD 18 #define PIMV2_HELLO_OPTION_DR_PRIORITY 19 #define PIMV2_HELLO_OPTION_GENID 20 #define PIMV2_HELLO_OPTION_REFRESH_CAP 21 #define PIMV2_HELLO_OPTION_BIDIR_CAP 22 #define PIMV2_HELLO_OPTION_ADDRESS_LIST 24 #define PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD 65001 static const struct tok pimv2_hello_option_values[] = { { PIMV2_HELLO_OPTION_HOLDTIME, "Hold Time" }, { PIMV2_HELLO_OPTION_LANPRUNEDELAY, "LAN Prune Delay" }, { PIMV2_HELLO_OPTION_DR_PRIORITY_OLD, "DR Priority (Old)" }, { PIMV2_HELLO_OPTION_DR_PRIORITY, "DR Priority" }, { PIMV2_HELLO_OPTION_GENID, "Generation ID" }, { PIMV2_HELLO_OPTION_REFRESH_CAP, "State Refresh Capability" }, { PIMV2_HELLO_OPTION_BIDIR_CAP, "Bi-Directional Capability" }, { PIMV2_HELLO_OPTION_ADDRESS_LIST, "Address List" }, { PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD, "Address List (Old)" }, { 0, NULL} }; #define PIMV2_REGISTER_FLAG_LEN 4 #define PIMV2_REGISTER_FLAG_BORDER 0x80000000 #define PIMV2_REGISTER_FLAG_NULL 0x40000000 static const struct tok pimv2_register_flag_values[] = { { PIMV2_REGISTER_FLAG_BORDER, "Border" }, { PIMV2_REGISTER_FLAG_NULL, "Null" }, { 0, NULL} }; /* * XXX: We consider a case where IPv6 is not ready yet for portability, * but PIM dependent defintions should be independent of IPv6... */ struct pim { uint8_t pim_typever; /* upper 4bit: PIM version number; 2 for PIMv2 */ /* lower 4bit: the PIM message type, currently they are: * Hello, Register, Register-Stop, Join/Prune, * Bootstrap, Assert, Graft (PIM-DM only), * Graft-Ack (PIM-DM only), C-RP-Adv */ #define PIM_VER(x) (((x) & 0xf0) >> 4) #define PIM_TYPE(x) ((x) & 0x0f) u_char pim_rsv; /* Reserved */ u_short pim_cksum; /* IP style check sum */ }; static void pimv2_print(netdissect_options *, register const u_char *bp, register u_int len, const u_char *); static void pimv1_join_prune_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int ngroups, njoin, nprune; int njp; /* If it's a single group and a single source, use 1-line output. */ if (ND_TTEST2(bp[0], 30) && bp[11] == 1 && ((njoin = EXTRACT_16BITS(&bp[20])) + EXTRACT_16BITS(&bp[22])) == 1) { int hold; ND_PRINT((ndo, " RPF %s ", ipaddr_string(ndo, bp))); hold = EXTRACT_16BITS(&bp[6]); if (hold != 180) { ND_PRINT((ndo, "Hold ")); unsigned_relts_print(ndo, hold); } ND_PRINT((ndo, "%s (%s/%d, %s", njoin ? "Join" : "Prune", ipaddr_string(ndo, &bp[26]), bp[25] & 0x3f, ipaddr_string(ndo, &bp[12]))); if (EXTRACT_32BITS(&bp[16]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[16]))); ND_PRINT((ndo, ") %s%s %s", (bp[24] & 0x01) ? "Sparse" : "Dense", (bp[25] & 0x80) ? " WC" : "", (bp[25] & 0x40) ? "RP" : "SPT")); return; } ND_TCHECK2(bp[0], sizeof(struct in_addr)); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Upstream Nbr: %s", ipaddr_string(ndo, bp))); ND_TCHECK2(bp[6], 2); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Hold time: ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[6])); if (ndo->ndo_vflag < 2) return; bp += 8; len -= 8; ND_TCHECK2(bp[0], 4); ngroups = bp[3]; bp += 4; len -= 4; while (ngroups--) { /* * XXX - does the address have length "addrlen" and the * mask length "maddrlen"? */ ND_TCHECK2(bp[0], sizeof(struct in_addr)); ND_PRINT((ndo, "\n\tGroup: %s", ipaddr_string(ndo, bp))); ND_TCHECK2(bp[4], sizeof(struct in_addr)); if (EXTRACT_32BITS(&bp[4]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[4]))); ND_TCHECK2(bp[8], 4); njoin = EXTRACT_16BITS(&bp[8]); nprune = EXTRACT_16BITS(&bp[10]); ND_PRINT((ndo, " joined: %d pruned: %d", njoin, nprune)); bp += 12; len -= 12; for (njp = 0; njp < (njoin + nprune); njp++) { const char *type; if (njp < njoin) type = "Join "; else type = "Prune"; ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "\n\t%s %s%s%s%s/%d", type, (bp[0] & 0x01) ? "Sparse " : "Dense ", (bp[1] & 0x80) ? "WC " : "", (bp[1] & 0x40) ? "RP " : "SPT ", ipaddr_string(ndo, &bp[2]), bp[1] & 0x3f)); bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|pim]")); return; } void pimv1_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { register const u_char *ep; register u_char type; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; ND_TCHECK(bp[1]); type = bp[1]; ND_PRINT((ndo, " %s", tok2str(pimv1_type_str, "[type %u]", type))); switch (type) { case PIMV1_TYPE_QUERY: if (ND_TTEST(bp[8])) { switch (bp[8] >> 4) { case 0: ND_PRINT((ndo, " Dense-mode")); break; case 1: ND_PRINT((ndo, " Sparse-mode")); break; case 2: ND_PRINT((ndo, " Sparse-Dense-mode")); break; default: ND_PRINT((ndo, " mode-%d", bp[8] >> 4)); break; } } if (ndo->ndo_vflag) { ND_TCHECK2(bp[10],2); ND_PRINT((ndo, " (Hold-time ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[10])); ND_PRINT((ndo, ")")); } break; case PIMV1_TYPE_REGISTER: ND_TCHECK2(bp[8], 20); /* ip header */ ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[20]), ipaddr_string(ndo, &bp[24]))); break; case PIMV1_TYPE_REGISTER_STOP: ND_TCHECK2(bp[12], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[8]), ipaddr_string(ndo, &bp[12]))); break; case PIMV1_TYPE_RP_REACHABILITY: if (ndo->ndo_vflag) { ND_TCHECK2(bp[22], 2); ND_PRINT((ndo, " group %s", ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_PRINT((ndo, " RP %s hold ", ipaddr_string(ndo, &bp[16]))); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[22])); } break; case PIMV1_TYPE_ASSERT: ND_TCHECK2(bp[16], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[16]), ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_TCHECK2(bp[24], 4); ND_PRINT((ndo, " %s pref %d metric %d", (bp[20] & 0x80) ? "RP-tree" : "SPT", EXTRACT_32BITS(&bp[20]) & 0x7fffffff, EXTRACT_32BITS(&bp[24]))); break; case PIMV1_TYPE_JOIN_PRUNE: case PIMV1_TYPE_GRAFT: case PIMV1_TYPE_GRAFT_ACK: if (ndo->ndo_vflag) pimv1_join_prune_print(ndo, &bp[8], len - 8); break; } ND_TCHECK(bp[4]); if ((bp[4] >> 4) != 1) ND_PRINT((ndo, " [v%d]", bp[4] >> 4)); return; trunc: ND_PRINT((ndo, "[|pim]")); return; } /* * auto-RP is a cisco protocol, documented at * ftp://ftpeng.cisco.com/ipmulticast/specs/pim-autorp-spec01.txt * * This implements version 1+, dated Sept 9, 1998. */ void cisco_autorp_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int type; int numrps; int hold; ND_TCHECK(bp[0]); ND_PRINT((ndo, " auto-rp ")); type = bp[0]; switch (type) { case 0x11: ND_PRINT((ndo, "candidate-advert")); break; case 0x12: ND_PRINT((ndo, "mapping")); break; default: ND_PRINT((ndo, "type-0x%02x", type)); break; } ND_TCHECK(bp[1]); numrps = bp[1]; ND_TCHECK2(bp[2], 2); ND_PRINT((ndo, " Hold ")); hold = EXTRACT_16BITS(&bp[2]); if (hold) unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); else ND_PRINT((ndo, "FOREVER")); /* Next 4 bytes are reserved. */ bp += 8; len -= 8; /*XXX skip unless -v? */ /* * Rest of packet: * numrps entries of the form: * 32 bits: RP * 6 bits: reserved * 2 bits: PIM version supported, bit 0 is "supports v1", 1 is "v2". * 8 bits: # of entries for this RP * each entry: 7 bits: reserved, 1 bit: negative, * 8 bits: mask 32 bits: source * lather, rinse, repeat. */ while (numrps--) { int nentries; char s; ND_TCHECK2(bp[0], 4); ND_PRINT((ndo, " RP %s", ipaddr_string(ndo, bp))); ND_TCHECK(bp[4]); switch (bp[4] & 0x3) { case 0: ND_PRINT((ndo, " PIMv?")); break; case 1: ND_PRINT((ndo, " PIMv1")); break; case 2: ND_PRINT((ndo, " PIMv2")); break; case 3: ND_PRINT((ndo, " PIMv1+2")); break; } if (bp[4] & 0xfc) ND_PRINT((ndo, " [rsvd=0x%02x]", bp[4] & 0xfc)); ND_TCHECK(bp[5]); nentries = bp[5]; bp += 6; len -= 6; s = ' '; for (; nentries; nentries--) { ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "%c%s%s/%d", s, bp[0] & 1 ? "!" : "", ipaddr_string(ndo, &bp[2]), bp[1])); if (bp[0] & 0x02) { ND_PRINT((ndo, " bidir")); } if (bp[0] & 0xfc) { ND_PRINT((ndo, "[rsvd=0x%02x]", bp[0] & 0xfc)); } s = ','; bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|autorp]")); return; } void pim_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const u_char *ep; register const struct pim *pim = (const struct pim *)bp; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; #ifdef notyet /* currently we see only version and type */ ND_TCHECK(pim->pim_rsv); #endif switch (PIM_VER(pim->pim_typever)) { case 2: if (!ndo->ndo_vflag) { ND_PRINT((ndo, "PIMv%u, %s, length %u", PIM_VER(pim->pim_typever), tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)), len)); return; } else { ND_PRINT((ndo, "PIMv%u, length %u\n\t%s", PIM_VER(pim->pim_typever), len, tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)))); pimv2_print(ndo, bp, len, bp2); } break; default: ND_PRINT((ndo, "PIMv%u, length %u", PIM_VER(pim->pim_typever), len)); break; } return; } /* * PIMv2 uses encoded address representations. * * The last PIM-SM I-D before RFC2117 was published specified the * following representation for unicast addresses. However, RFC2117 * specified no encoding for unicast addresses with the unicast * address length specified in the header. Therefore, we have to * guess which encoding is being used (Cisco's PIMv2 implementation * uses the non-RFC encoding). RFC2117 turns a previously "Reserved" * field into a 'unicast-address-length-in-bytes' field. We guess * that it's the draft encoding if this reserved field is zero. * * RFC2362 goes back to the encoded format, and calls the addr length * field "reserved" again. * * The first byte is the address family, from: * * 0 Reserved * 1 IP (IP version 4) * 2 IP6 (IP version 6) * 3 NSAP * 4 HDLC (8-bit multidrop) * 5 BBN 1822 * 6 802 (includes all 802 media plus Ethernet "canonical format") * 7 E.163 * 8 E.164 (SMDS, Frame Relay, ATM) * 9 F.69 (Telex) * 10 X.121 (X.25, Frame Relay) * 11 IPX * 12 Appletalk * 13 Decnet IV * 14 Banyan Vines * 15 E.164 with NSAP format subaddress * * In addition, the second byte is an "Encoding". 0 is the default * encoding for the address family, and no other encodings are currently * specified. * */ static int pimv2_addr_len; enum pimv2_addrtype { pimv2_unicast, pimv2_group, pimv2_source }; /* 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Unicast Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+++++++ * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Reserved | Mask Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Group multicast Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Rsrvd |S|W|R| Mask Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Source Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ static int pimv2_addr_print(netdissect_options *ndo, const u_char *bp, enum pimv2_addrtype at, int silent) { int af; int len, hdrlen; ND_TCHECK(bp[0]); if (pimv2_addr_len == 0) { ND_TCHECK(bp[1]); switch (bp[0]) { case 1: af = AF_INET; len = sizeof(struct in_addr); break; case 2: af = AF_INET6; len = sizeof(struct in6_addr); break; default: return -1; } if (bp[1] != 0) return -1; hdrlen = 2; } else { switch (pimv2_addr_len) { case sizeof(struct in_addr): af = AF_INET; break; case sizeof(struct in6_addr): af = AF_INET6; break; default: return -1; break; } len = pimv2_addr_len; hdrlen = 0; } bp += hdrlen; switch (at) { case pimv2_unicast: ND_TCHECK2(bp[0], len); if (af == AF_INET) { if (!silent) ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp))); } else if (af == AF_INET6) { if (!silent) ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp))); } return hdrlen + len; case pimv2_group: case pimv2_source: ND_TCHECK2(bp[0], len + 2); if (af == AF_INET) { if (!silent) { ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp + 2))); if (bp[1] != 32) ND_PRINT((ndo, "/%u", bp[1])); } } else if (af == AF_INET6) { if (!silent) { ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp + 2))); if (bp[1] != 128) ND_PRINT((ndo, "/%u", bp[1])); } } if (bp[0] && !silent) { if (at == pimv2_group) { ND_PRINT((ndo, "(0x%02x)", bp[0])); } else { ND_PRINT((ndo, "(%s%s%s", bp[0] & 0x04 ? "S" : "", bp[0] & 0x02 ? "W" : "", bp[0] & 0x01 ? "R" : "")); if (bp[0] & 0xf8) { ND_PRINT((ndo, "+0x%02x", bp[0] & 0xf8)); } ND_PRINT((ndo, ")")); } } return hdrlen + 2 + len; default: return -1; } trunc: return -1; } enum checksum_status { CORRECT, INCORRECT, UNVERIFIED }; static enum checksum_status pimv2_check_checksum(netdissect_options *ndo, const u_char *bp, const u_char *bp2, u_int len) { const struct ip *ip; u_int cksum; if (!ND_TTEST2(bp[0], len)) { /* We don't have all the data. */ return (UNVERIFIED); } ip = (const struct ip *)bp2; if (IP_V(ip) == 4) { struct cksum_vec vec[1]; vec[0].ptr = bp; vec[0].len = len; cksum = in_cksum(vec, 1); return (cksum ? INCORRECT : CORRECT); } else if (IP_V(ip) == 6) { const struct ip6_hdr *ip6; ip6 = (const struct ip6_hdr *)bp2; cksum = nextproto6_cksum(ndo, ip6, bp, len, len, IPPROTO_PIM); return (cksum ? INCORRECT : CORRECT); } else { return (UNVERIFIED); } } static void pimv2_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const u_char *ep; register const struct pim *pim = (const struct pim *)bp; int advance; enum checksum_status cksum_status; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; if (ep > bp + len) ep = bp + len; ND_TCHECK(pim->pim_rsv); pimv2_addr_len = pim->pim_rsv; if (pimv2_addr_len != 0) ND_PRINT((ndo, ", RFC2117-encoding")); ND_PRINT((ndo, ", cksum 0x%04x ", EXTRACT_16BITS(&pim->pim_cksum))); if (EXTRACT_16BITS(&pim->pim_cksum) == 0) { ND_PRINT((ndo, "(unverified)")); } else { if (PIM_TYPE(pim->pim_typever) == PIMV2_TYPE_REGISTER) { /* * The checksum only covers the packet header, * not the encapsulated packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, 8); if (cksum_status == INCORRECT) { /* * To quote RFC 4601, "For interoperability * reasons, a message carrying a checksum * calculated over the entire PIM Register * message should also be accepted." */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } } else { /* * The checksum covers the entire packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } switch (cksum_status) { case CORRECT: ND_PRINT((ndo, "(correct)")); break; case INCORRECT: ND_PRINT((ndo, "(incorrect)")); break; case UNVERIFIED: ND_PRINT((ndo, "(unverified)")); break; } } switch (PIM_TYPE(pim->pim_typever)) { case PIMV2_TYPE_HELLO: { uint16_t otype, olen; bp += 4; while (bp < ep) { ND_TCHECK2(bp[0], 4); otype = EXTRACT_16BITS(&bp[0]); olen = EXTRACT_16BITS(&bp[2]); ND_TCHECK2(bp[0], 4 + olen); ND_PRINT((ndo, "\n\t %s Option (%u), length %u, Value: ", tok2str(pimv2_hello_option_values, "Unknown", otype), otype, olen)); bp += 4; switch (otype) { case PIMV2_HELLO_OPTION_HOLDTIME: if (olen != 2) { ND_PRINT((ndo, "ERROR: Option Length != 2 Bytes (%u)", olen)); } else { unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); } break; case PIMV2_HELLO_OPTION_LANPRUNEDELAY: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { char t_bit; uint16_t lan_delay, override_interval; lan_delay = EXTRACT_16BITS(bp); override_interval = EXTRACT_16BITS(bp+2); t_bit = (lan_delay & 0x8000)? 1 : 0; lan_delay &= ~0x8000; ND_PRINT((ndo, "\n\t T-bit=%d, LAN delay %dms, Override interval %dms", t_bit, lan_delay, override_interval)); } break; case PIMV2_HELLO_OPTION_DR_PRIORITY_OLD: case PIMV2_HELLO_OPTION_DR_PRIORITY: switch (olen) { case 0: ND_PRINT((ndo, "Bi-Directional Capability (Old)")); break; case 4: ND_PRINT((ndo, "%u", EXTRACT_32BITS(bp))); break; default: ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); break; } break; case PIMV2_HELLO_OPTION_GENID: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "0x%08x", EXTRACT_32BITS(bp))); } break; case PIMV2_HELLO_OPTION_REFRESH_CAP: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "v%d", *bp)); if (*(bp+1) != 0) { ND_PRINT((ndo, ", interval ")); unsigned_relts_print(ndo, *(bp+1)); } if (EXTRACT_16BITS(bp+2) != 0) { ND_PRINT((ndo, " ?0x%04x?", EXTRACT_16BITS(bp+2))); } } break; case PIMV2_HELLO_OPTION_BIDIR_CAP: break; case PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD: case PIMV2_HELLO_OPTION_ADDRESS_LIST: if (ndo->ndo_vflag > 1) { const u_char *ptr = bp; while (ptr < (bp+olen)) { ND_PRINT((ndo, "\n\t ")); advance = pimv2_addr_print(ndo, ptr, pimv2_unicast, 0); if (advance < 0) { ND_PRINT((ndo, "...")); break; } ptr += advance; } } break; default: if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, bp, "\n\t ", olen); break; } /* do we want to see an additionally hexdump ? */ if (ndo->ndo_vflag> 1) print_unknown_data(ndo, bp, "\n\t ", olen); bp += olen; } break; } case PIMV2_TYPE_REGISTER: { const struct ip *ip; ND_TCHECK2(*(bp + 4), PIMV2_REGISTER_FLAG_LEN); ND_PRINT((ndo, ", Flags [ %s ]\n\t", tok2str(pimv2_register_flag_values, "none", EXTRACT_32BITS(bp+4)))); bp += 8; len -= 8; /* encapsulated multicast packet */ ip = (const struct ip *)bp; switch (IP_V(ip)) { case 0: /* Null header */ ND_PRINT((ndo, "IP-Null-header %s > %s", ipaddr_string(ndo, &ip->ip_src), ipaddr_string(ndo, &ip->ip_dst))); break; case 4: /* IPv4 */ ip_print(ndo, bp, len); break; case 6: /* IPv6 */ ip6_print(ndo, bp, len); break; default: ND_PRINT((ndo, "IP ver %d", IP_V(ip))); break; } break; } case PIMV2_TYPE_REGISTER_STOP: bp += 4; len -= 4; if (bp >= ep) break; ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp >= ep) break; ND_PRINT((ndo, " source=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; break; case PIMV2_TYPE_JOIN_PRUNE: case PIMV2_TYPE_GRAFT: case PIMV2_TYPE_GRAFT_ACK: /* * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |PIM Ver| Type | Addr length | Checksum | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Unicast-Upstream Neighbor Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Reserved | Num groups | Holdtime | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Number of Joined Sources | Number of Pruned Sources | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ { uint8_t ngroup; uint16_t holdtime; uint16_t njoin; uint16_t nprune; int i, j; bp += 4; len -= 4; if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ if (bp >= ep) break; ND_PRINT((ndo, ", upstream-neighbor: ")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; } if (bp + 4 > ep) break; ngroup = bp[1]; holdtime = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, "\n\t %u group(s)", ngroup)); if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ ND_PRINT((ndo, ", holdtime: ")); if (holdtime == 0xffff) ND_PRINT((ndo, "infinite")); else unsigned_relts_print(ndo, holdtime); } bp += 4; len -= 4; for (i = 0; i < ngroup; i++) { if (bp >= ep) goto jp_done; ND_PRINT((ndo, "\n\t group #%u: ", i+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; if (bp + 4 > ep) { ND_PRINT((ndo, "...)")); goto jp_done; } njoin = EXTRACT_16BITS(&bp[0]); nprune = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, ", joined sources: %u, pruned sources: %u", njoin, nprune)); bp += 4; len -= 4; for (j = 0; j < njoin; j++) { ND_PRINT((ndo, "\n\t joined source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; } for (j = 0; j < nprune; j++) { ND_PRINT((ndo, "\n\t pruned source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; } } jp_done: break; } case PIMV2_TYPE_BOOTSTRAP: { int i, j, frpcnt; bp += 4; /* Fragment Tag, Hash Mask len, and BSR-priority */ if (bp + sizeof(uint16_t) >= ep) break; ND_PRINT((ndo, " tag=%x", EXTRACT_16BITS(bp))); bp += sizeof(uint16_t); if (bp >= ep) break; ND_PRINT((ndo, " hashmlen=%d", bp[0])); if (bp + 1 >= ep) break; ND_PRINT((ndo, " BSRprio=%d", bp[1])); bp += 2; /* Encoded-Unicast-BSR-Address */ if (bp >= ep) break; ND_PRINT((ndo, " BSR=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; for (i = 0; bp < ep; i++) { /* Encoded-Group Address */ ND_PRINT((ndo, " (group%d: ", i)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...)")); goto bs_done; } bp += advance; /* RP-Count, Frag RP-Cnt, and rsvd */ if (bp >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, " RPcnt=%d", bp[0])); if (bp + 1 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, " FRPcnt=%d", frpcnt = bp[1])); bp += 4; for (j = 0; j < frpcnt && bp < ep; j++) { /* each RP info */ ND_PRINT((ndo, " RP%d=", j)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...)")); goto bs_done; } bp += advance; if (bp + 1 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, ",holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); if (bp + 2 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, ",prio=%d", bp[2])); bp += 4; } ND_PRINT((ndo, ")")); } bs_done: break; } case PIMV2_TYPE_ASSERT: bp += 4; len -= 4; if (bp >= ep) break; ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp >= ep) break; ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp + 8 > ep) break; if (bp[0] & 0x80) ND_PRINT((ndo, " RPT")); ND_PRINT((ndo, " pref=%u", EXTRACT_32BITS(&bp[0]) & 0x7fffffff)); ND_PRINT((ndo, " metric=%u", EXTRACT_32BITS(&bp[4]))); break; case PIMV2_TYPE_CANDIDATE_RP: { int i, pfxcnt; bp += 4; /* Prefix-Cnt, Priority, and Holdtime */ if (bp >= ep) break; ND_PRINT((ndo, " prefix-cnt=%d", bp[0])); pfxcnt = bp[0]; if (bp + 1 >= ep) break; ND_PRINT((ndo, " prio=%d", bp[1])); if (bp + 3 >= ep) break; ND_PRINT((ndo, " holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); bp += 4; /* Encoded-Unicast-RP-Address */ if (bp >= ep) break; ND_PRINT((ndo, " RP=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; /* Encoded-Group Addresses */ for (i = 0; i < pfxcnt && bp < ep; i++) { ND_PRINT((ndo, " Group%d=", i)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; } break; } case PIMV2_TYPE_PRUNE_REFRESH: ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_PRINT((ndo, " grp=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_PRINT((ndo, " forwarder=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_TCHECK2(bp[0], 2); ND_PRINT((ndo, " TUNR ")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); break; default: ND_PRINT((ndo, " [type %d]", PIM_TYPE(pim->pim_typever))); break; } return; trunc: ND_PRINT((ndo, "[|pim]")); } /* * Local Variables: * c-style: whitesmith * c-basic-offset: 8 * End: */
/* * Copyright (c) 1995, 1996 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* \summary: Protocol Independent Multicast (PIM) printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include "netdissect.h" #include "addrtoname.h" #include "extract.h" #include "ip.h" #include "ip6.h" #include "ipproto.h" #define PIMV1_TYPE_QUERY 0 #define PIMV1_TYPE_REGISTER 1 #define PIMV1_TYPE_REGISTER_STOP 2 #define PIMV1_TYPE_JOIN_PRUNE 3 #define PIMV1_TYPE_RP_REACHABILITY 4 #define PIMV1_TYPE_ASSERT 5 #define PIMV1_TYPE_GRAFT 6 #define PIMV1_TYPE_GRAFT_ACK 7 static const struct tok pimv1_type_str[] = { { PIMV1_TYPE_QUERY, "Query" }, { PIMV1_TYPE_REGISTER, "Register" }, { PIMV1_TYPE_REGISTER_STOP, "Register-Stop" }, { PIMV1_TYPE_JOIN_PRUNE, "Join/Prune" }, { PIMV1_TYPE_RP_REACHABILITY, "RP-reachable" }, { PIMV1_TYPE_ASSERT, "Assert" }, { PIMV1_TYPE_GRAFT, "Graft" }, { PIMV1_TYPE_GRAFT_ACK, "Graft-ACK" }, { 0, NULL } }; #define PIMV2_TYPE_HELLO 0 #define PIMV2_TYPE_REGISTER 1 #define PIMV2_TYPE_REGISTER_STOP 2 #define PIMV2_TYPE_JOIN_PRUNE 3 #define PIMV2_TYPE_BOOTSTRAP 4 #define PIMV2_TYPE_ASSERT 5 #define PIMV2_TYPE_GRAFT 6 #define PIMV2_TYPE_GRAFT_ACK 7 #define PIMV2_TYPE_CANDIDATE_RP 8 #define PIMV2_TYPE_PRUNE_REFRESH 9 #define PIMV2_TYPE_DF_ELECTION 10 #define PIMV2_TYPE_ECMP_REDIRECT 11 static const struct tok pimv2_type_values[] = { { PIMV2_TYPE_HELLO, "Hello" }, { PIMV2_TYPE_REGISTER, "Register" }, { PIMV2_TYPE_REGISTER_STOP, "Register Stop" }, { PIMV2_TYPE_JOIN_PRUNE, "Join / Prune" }, { PIMV2_TYPE_BOOTSTRAP, "Bootstrap" }, { PIMV2_TYPE_ASSERT, "Assert" }, { PIMV2_TYPE_GRAFT, "Graft" }, { PIMV2_TYPE_GRAFT_ACK, "Graft Acknowledgement" }, { PIMV2_TYPE_CANDIDATE_RP, "Candidate RP Advertisement" }, { PIMV2_TYPE_PRUNE_REFRESH, "Prune Refresh" }, { PIMV2_TYPE_DF_ELECTION, "DF Election" }, { PIMV2_TYPE_ECMP_REDIRECT, "ECMP Redirect" }, { 0, NULL} }; #define PIMV2_HELLO_OPTION_HOLDTIME 1 #define PIMV2_HELLO_OPTION_LANPRUNEDELAY 2 #define PIMV2_HELLO_OPTION_DR_PRIORITY_OLD 18 #define PIMV2_HELLO_OPTION_DR_PRIORITY 19 #define PIMV2_HELLO_OPTION_GENID 20 #define PIMV2_HELLO_OPTION_REFRESH_CAP 21 #define PIMV2_HELLO_OPTION_BIDIR_CAP 22 #define PIMV2_HELLO_OPTION_ADDRESS_LIST 24 #define PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD 65001 static const struct tok pimv2_hello_option_values[] = { { PIMV2_HELLO_OPTION_HOLDTIME, "Hold Time" }, { PIMV2_HELLO_OPTION_LANPRUNEDELAY, "LAN Prune Delay" }, { PIMV2_HELLO_OPTION_DR_PRIORITY_OLD, "DR Priority (Old)" }, { PIMV2_HELLO_OPTION_DR_PRIORITY, "DR Priority" }, { PIMV2_HELLO_OPTION_GENID, "Generation ID" }, { PIMV2_HELLO_OPTION_REFRESH_CAP, "State Refresh Capability" }, { PIMV2_HELLO_OPTION_BIDIR_CAP, "Bi-Directional Capability" }, { PIMV2_HELLO_OPTION_ADDRESS_LIST, "Address List" }, { PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD, "Address List (Old)" }, { 0, NULL} }; #define PIMV2_REGISTER_FLAG_LEN 4 #define PIMV2_REGISTER_FLAG_BORDER 0x80000000 #define PIMV2_REGISTER_FLAG_NULL 0x40000000 static const struct tok pimv2_register_flag_values[] = { { PIMV2_REGISTER_FLAG_BORDER, "Border" }, { PIMV2_REGISTER_FLAG_NULL, "Null" }, { 0, NULL} }; /* * XXX: We consider a case where IPv6 is not ready yet for portability, * but PIM dependent defintions should be independent of IPv6... */ struct pim { uint8_t pim_typever; /* upper 4bit: PIM version number; 2 for PIMv2 */ /* lower 4bit: the PIM message type, currently they are: * Hello, Register, Register-Stop, Join/Prune, * Bootstrap, Assert, Graft (PIM-DM only), * Graft-Ack (PIM-DM only), C-RP-Adv */ #define PIM_VER(x) (((x) & 0xf0) >> 4) #define PIM_TYPE(x) ((x) & 0x0f) u_char pim_rsv; /* Reserved */ u_short pim_cksum; /* IP style check sum */ }; static void pimv2_print(netdissect_options *, register const u_char *bp, register u_int len, const u_char *); static void pimv1_join_prune_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int ngroups, njoin, nprune; int njp; /* If it's a single group and a single source, use 1-line output. */ if (ND_TTEST2(bp[0], 30) && bp[11] == 1 && ((njoin = EXTRACT_16BITS(&bp[20])) + EXTRACT_16BITS(&bp[22])) == 1) { int hold; ND_PRINT((ndo, " RPF %s ", ipaddr_string(ndo, bp))); hold = EXTRACT_16BITS(&bp[6]); if (hold != 180) { ND_PRINT((ndo, "Hold ")); unsigned_relts_print(ndo, hold); } ND_PRINT((ndo, "%s (%s/%d, %s", njoin ? "Join" : "Prune", ipaddr_string(ndo, &bp[26]), bp[25] & 0x3f, ipaddr_string(ndo, &bp[12]))); if (EXTRACT_32BITS(&bp[16]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[16]))); ND_PRINT((ndo, ") %s%s %s", (bp[24] & 0x01) ? "Sparse" : "Dense", (bp[25] & 0x80) ? " WC" : "", (bp[25] & 0x40) ? "RP" : "SPT")); return; } if (len < sizeof(struct in_addr)) goto trunc; ND_TCHECK2(bp[0], sizeof(struct in_addr)); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Upstream Nbr: %s", ipaddr_string(ndo, bp))); bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[2], 2); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Hold time: ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); if (ndo->ndo_vflag < 2) return; bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); ngroups = bp[3]; bp += 4; len -= 4; while (ngroups--) { /* * XXX - does the address have length "addrlen" and the * mask length "maddrlen"? */ if (len < 4) goto trunc; ND_TCHECK2(bp[0], sizeof(struct in_addr)); ND_PRINT((ndo, "\n\tGroup: %s", ipaddr_string(ndo, bp))); bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[0], sizeof(struct in_addr)); if (EXTRACT_32BITS(&bp[0]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[0]))); bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); njoin = EXTRACT_16BITS(&bp[0]); nprune = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, " joined: %d pruned: %d", njoin, nprune)); bp += 4; len -= 4; for (njp = 0; njp < (njoin + nprune); njp++) { const char *type; if (njp < njoin) type = "Join "; else type = "Prune"; if (len < 6) goto trunc; ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "\n\t%s %s%s%s%s/%d", type, (bp[0] & 0x01) ? "Sparse " : "Dense ", (bp[1] & 0x80) ? "WC " : "", (bp[1] & 0x40) ? "RP " : "SPT ", ipaddr_string(ndo, &bp[2]), bp[1] & 0x3f)); bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|pim]")); return; } void pimv1_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { register u_char type; ND_TCHECK(bp[1]); type = bp[1]; ND_PRINT((ndo, " %s", tok2str(pimv1_type_str, "[type %u]", type))); switch (type) { case PIMV1_TYPE_QUERY: if (ND_TTEST(bp[8])) { switch (bp[8] >> 4) { case 0: ND_PRINT((ndo, " Dense-mode")); break; case 1: ND_PRINT((ndo, " Sparse-mode")); break; case 2: ND_PRINT((ndo, " Sparse-Dense-mode")); break; default: ND_PRINT((ndo, " mode-%d", bp[8] >> 4)); break; } } if (ndo->ndo_vflag) { ND_TCHECK2(bp[10],2); ND_PRINT((ndo, " (Hold-time ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[10])); ND_PRINT((ndo, ")")); } break; case PIMV1_TYPE_REGISTER: ND_TCHECK2(bp[8], 20); /* ip header */ ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[20]), ipaddr_string(ndo, &bp[24]))); break; case PIMV1_TYPE_REGISTER_STOP: ND_TCHECK2(bp[12], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[8]), ipaddr_string(ndo, &bp[12]))); break; case PIMV1_TYPE_RP_REACHABILITY: if (ndo->ndo_vflag) { ND_TCHECK2(bp[22], 2); ND_PRINT((ndo, " group %s", ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_PRINT((ndo, " RP %s hold ", ipaddr_string(ndo, &bp[16]))); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[22])); } break; case PIMV1_TYPE_ASSERT: ND_TCHECK2(bp[16], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[16]), ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_TCHECK2(bp[24], 4); ND_PRINT((ndo, " %s pref %d metric %d", (bp[20] & 0x80) ? "RP-tree" : "SPT", EXTRACT_32BITS(&bp[20]) & 0x7fffffff, EXTRACT_32BITS(&bp[24]))); break; case PIMV1_TYPE_JOIN_PRUNE: case PIMV1_TYPE_GRAFT: case PIMV1_TYPE_GRAFT_ACK: if (ndo->ndo_vflag) { if (len < 8) goto trunc; pimv1_join_prune_print(ndo, &bp[8], len - 8); } break; } ND_TCHECK(bp[4]); if ((bp[4] >> 4) != 1) ND_PRINT((ndo, " [v%d]", bp[4] >> 4)); return; trunc: ND_PRINT((ndo, "[|pim]")); return; } /* * auto-RP is a cisco protocol, documented at * ftp://ftpeng.cisco.com/ipmulticast/specs/pim-autorp-spec01.txt * * This implements version 1+, dated Sept 9, 1998. */ void cisco_autorp_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int type; int numrps; int hold; if (len < 8) goto trunc; ND_TCHECK(bp[0]); ND_PRINT((ndo, " auto-rp ")); type = bp[0]; switch (type) { case 0x11: ND_PRINT((ndo, "candidate-advert")); break; case 0x12: ND_PRINT((ndo, "mapping")); break; default: ND_PRINT((ndo, "type-0x%02x", type)); break; } ND_TCHECK(bp[1]); numrps = bp[1]; ND_TCHECK2(bp[2], 2); ND_PRINT((ndo, " Hold ")); hold = EXTRACT_16BITS(&bp[2]); if (hold) unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); else ND_PRINT((ndo, "FOREVER")); /* Next 4 bytes are reserved. */ bp += 8; len -= 8; /*XXX skip unless -v? */ /* * Rest of packet: * numrps entries of the form: * 32 bits: RP * 6 bits: reserved * 2 bits: PIM version supported, bit 0 is "supports v1", 1 is "v2". * 8 bits: # of entries for this RP * each entry: 7 bits: reserved, 1 bit: negative, * 8 bits: mask 32 bits: source * lather, rinse, repeat. */ while (numrps--) { int nentries; char s; if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); ND_PRINT((ndo, " RP %s", ipaddr_string(ndo, bp))); bp += 4; len -= 4; if (len < 1) goto trunc; ND_TCHECK(bp[0]); switch (bp[0] & 0x3) { case 0: ND_PRINT((ndo, " PIMv?")); break; case 1: ND_PRINT((ndo, " PIMv1")); break; case 2: ND_PRINT((ndo, " PIMv2")); break; case 3: ND_PRINT((ndo, " PIMv1+2")); break; } if (bp[0] & 0xfc) ND_PRINT((ndo, " [rsvd=0x%02x]", bp[0] & 0xfc)); bp += 1; len -= 1; if (len < 1) goto trunc; ND_TCHECK(bp[0]); nentries = bp[0]; bp += 1; len -= 1; s = ' '; for (; nentries; nentries--) { if (len < 6) goto trunc; ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "%c%s%s/%d", s, bp[0] & 1 ? "!" : "", ipaddr_string(ndo, &bp[2]), bp[1])); if (bp[0] & 0x02) { ND_PRINT((ndo, " bidir")); } if (bp[0] & 0xfc) { ND_PRINT((ndo, "[rsvd=0x%02x]", bp[0] & 0xfc)); } s = ','; bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|autorp]")); return; } void pim_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const struct pim *pim = (const struct pim *)bp; #ifdef notyet /* currently we see only version and type */ ND_TCHECK(pim->pim_rsv); #endif ND_TCHECK(pim->pim_typever); switch (PIM_VER(pim->pim_typever)) { case 2: if (!ndo->ndo_vflag) { ND_PRINT((ndo, "PIMv%u, %s, length %u", PIM_VER(pim->pim_typever), tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)), len)); return; } else { ND_PRINT((ndo, "PIMv%u, length %u\n\t%s", PIM_VER(pim->pim_typever), len, tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)))); pimv2_print(ndo, bp, len, bp2); } break; default: ND_PRINT((ndo, "PIMv%u, length %u", PIM_VER(pim->pim_typever), len)); break; } return; trunc: ND_PRINT((ndo, "[|pim]")); return; } /* * PIMv2 uses encoded address representations. * * The last PIM-SM I-D before RFC2117 was published specified the * following representation for unicast addresses. However, RFC2117 * specified no encoding for unicast addresses with the unicast * address length specified in the header. Therefore, we have to * guess which encoding is being used (Cisco's PIMv2 implementation * uses the non-RFC encoding). RFC2117 turns a previously "Reserved" * field into a 'unicast-address-length-in-bytes' field. We guess * that it's the draft encoding if this reserved field is zero. * * RFC2362 goes back to the encoded format, and calls the addr length * field "reserved" again. * * The first byte is the address family, from: * * 0 Reserved * 1 IP (IP version 4) * 2 IP6 (IP version 6) * 3 NSAP * 4 HDLC (8-bit multidrop) * 5 BBN 1822 * 6 802 (includes all 802 media plus Ethernet "canonical format") * 7 E.163 * 8 E.164 (SMDS, Frame Relay, ATM) * 9 F.69 (Telex) * 10 X.121 (X.25, Frame Relay) * 11 IPX * 12 Appletalk * 13 Decnet IV * 14 Banyan Vines * 15 E.164 with NSAP format subaddress * * In addition, the second byte is an "Encoding". 0 is the default * encoding for the address family, and no other encodings are currently * specified. * */ enum pimv2_addrtype { pimv2_unicast, pimv2_group, pimv2_source }; /* 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Unicast Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+++++++ * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Reserved | Mask Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Group multicast Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Rsrvd |S|W|R| Mask Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Source Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ static int pimv2_addr_print(netdissect_options *ndo, const u_char *bp, u_int len, enum pimv2_addrtype at, u_int addr_len, int silent) { int af; int hdrlen; if (addr_len == 0) { if (len < 2) goto trunc; ND_TCHECK(bp[1]); switch (bp[0]) { case 1: af = AF_INET; addr_len = (u_int)sizeof(struct in_addr); break; case 2: af = AF_INET6; addr_len = (u_int)sizeof(struct in6_addr); break; default: return -1; } if (bp[1] != 0) return -1; hdrlen = 2; } else { switch (addr_len) { case sizeof(struct in_addr): af = AF_INET; break; case sizeof(struct in6_addr): af = AF_INET6; break; default: return -1; break; } hdrlen = 0; } bp += hdrlen; len -= hdrlen; switch (at) { case pimv2_unicast: if (len < addr_len) goto trunc; ND_TCHECK2(bp[0], addr_len); if (af == AF_INET) { if (!silent) ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp))); } else if (af == AF_INET6) { if (!silent) ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp))); } return hdrlen + addr_len; case pimv2_group: case pimv2_source: if (len < addr_len + 2) goto trunc; ND_TCHECK2(bp[0], addr_len + 2); if (af == AF_INET) { if (!silent) { ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp + 2))); if (bp[1] != 32) ND_PRINT((ndo, "/%u", bp[1])); } } else if (af == AF_INET6) { if (!silent) { ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp + 2))); if (bp[1] != 128) ND_PRINT((ndo, "/%u", bp[1])); } } if (bp[0] && !silent) { if (at == pimv2_group) { ND_PRINT((ndo, "(0x%02x)", bp[0])); } else { ND_PRINT((ndo, "(%s%s%s", bp[0] & 0x04 ? "S" : "", bp[0] & 0x02 ? "W" : "", bp[0] & 0x01 ? "R" : "")); if (bp[0] & 0xf8) { ND_PRINT((ndo, "+0x%02x", bp[0] & 0xf8)); } ND_PRINT((ndo, ")")); } } return hdrlen + 2 + addr_len; default: return -1; } trunc: return -1; } enum checksum_status { CORRECT, INCORRECT, UNVERIFIED }; static enum checksum_status pimv2_check_checksum(netdissect_options *ndo, const u_char *bp, const u_char *bp2, u_int len) { const struct ip *ip; u_int cksum; if (!ND_TTEST2(bp[0], len)) { /* We don't have all the data. */ return (UNVERIFIED); } ip = (const struct ip *)bp2; if (IP_V(ip) == 4) { struct cksum_vec vec[1]; vec[0].ptr = bp; vec[0].len = len; cksum = in_cksum(vec, 1); return (cksum ? INCORRECT : CORRECT); } else if (IP_V(ip) == 6) { const struct ip6_hdr *ip6; ip6 = (const struct ip6_hdr *)bp2; cksum = nextproto6_cksum(ndo, ip6, bp, len, len, IPPROTO_PIM); return (cksum ? INCORRECT : CORRECT); } else { return (UNVERIFIED); } } static void pimv2_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const u_char *ep; register const struct pim *pim = (const struct pim *)bp; int advance; enum checksum_status cksum_status; int pimv2_addr_len; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; if (ep > bp + len) ep = bp + len; if (len < 2) goto trunc; ND_TCHECK(pim->pim_rsv); pimv2_addr_len = pim->pim_rsv; if (pimv2_addr_len != 0) ND_PRINT((ndo, ", RFC2117-encoding")); if (len < 4) goto trunc; ND_TCHECK(pim->pim_cksum); ND_PRINT((ndo, ", cksum 0x%04x ", EXTRACT_16BITS(&pim->pim_cksum))); if (EXTRACT_16BITS(&pim->pim_cksum) == 0) { ND_PRINT((ndo, "(unverified)")); } else { if (PIM_TYPE(pim->pim_typever) == PIMV2_TYPE_REGISTER) { /* * The checksum only covers the packet header, * not the encapsulated packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, 8); if (cksum_status == INCORRECT) { /* * To quote RFC 4601, "For interoperability * reasons, a message carrying a checksum * calculated over the entire PIM Register * message should also be accepted." */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } } else { /* * The checksum covers the entire packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } switch (cksum_status) { case CORRECT: ND_PRINT((ndo, "(correct)")); break; case INCORRECT: ND_PRINT((ndo, "(incorrect)")); break; case UNVERIFIED: ND_PRINT((ndo, "(unverified)")); break; } } bp += 4; len -= 4; switch (PIM_TYPE(pim->pim_typever)) { case PIMV2_TYPE_HELLO: { uint16_t otype, olen; while (len > 0) { if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); otype = EXTRACT_16BITS(&bp[0]); olen = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, "\n\t %s Option (%u), length %u, Value: ", tok2str(pimv2_hello_option_values, "Unknown", otype), otype, olen)); bp += 4; len -= 4; if (len < olen) goto trunc; ND_TCHECK2(bp[0], olen); switch (otype) { case PIMV2_HELLO_OPTION_HOLDTIME: if (olen != 2) { ND_PRINT((ndo, "ERROR: Option Length != 2 Bytes (%u)", olen)); } else { unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); } break; case PIMV2_HELLO_OPTION_LANPRUNEDELAY: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { char t_bit; uint16_t lan_delay, override_interval; lan_delay = EXTRACT_16BITS(bp); override_interval = EXTRACT_16BITS(bp+2); t_bit = (lan_delay & 0x8000)? 1 : 0; lan_delay &= ~0x8000; ND_PRINT((ndo, "\n\t T-bit=%d, LAN delay %dms, Override interval %dms", t_bit, lan_delay, override_interval)); } break; case PIMV2_HELLO_OPTION_DR_PRIORITY_OLD: case PIMV2_HELLO_OPTION_DR_PRIORITY: switch (olen) { case 0: ND_PRINT((ndo, "Bi-Directional Capability (Old)")); break; case 4: ND_PRINT((ndo, "%u", EXTRACT_32BITS(bp))); break; default: ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); break; } break; case PIMV2_HELLO_OPTION_GENID: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "0x%08x", EXTRACT_32BITS(bp))); } break; case PIMV2_HELLO_OPTION_REFRESH_CAP: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "v%d", *bp)); if (*(bp+1) != 0) { ND_PRINT((ndo, ", interval ")); unsigned_relts_print(ndo, *(bp+1)); } if (EXTRACT_16BITS(bp+2) != 0) { ND_PRINT((ndo, " ?0x%04x?", EXTRACT_16BITS(bp+2))); } } break; case PIMV2_HELLO_OPTION_BIDIR_CAP: break; case PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD: case PIMV2_HELLO_OPTION_ADDRESS_LIST: if (ndo->ndo_vflag > 1) { const u_char *ptr = bp; u_int plen = len; while (ptr < (bp+olen)) { ND_PRINT((ndo, "\n\t ")); advance = pimv2_addr_print(ndo, ptr, plen, pimv2_unicast, pimv2_addr_len, 0); if (advance < 0) goto trunc; ptr += advance; plen -= advance; } } break; default: if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, bp, "\n\t ", olen); break; } /* do we want to see an additionally hexdump ? */ if (ndo->ndo_vflag> 1) print_unknown_data(ndo, bp, "\n\t ", olen); bp += olen; len -= olen; } break; } case PIMV2_TYPE_REGISTER: { const struct ip *ip; if (len < 4) goto trunc; ND_TCHECK2(*bp, PIMV2_REGISTER_FLAG_LEN); ND_PRINT((ndo, ", Flags [ %s ]\n\t", tok2str(pimv2_register_flag_values, "none", EXTRACT_32BITS(bp)))); bp += 4; len -= 4; /* encapsulated multicast packet */ if (len == 0) goto trunc; ip = (const struct ip *)bp; ND_TCHECK(ip->ip_vhl); switch (IP_V(ip)) { case 0: /* Null header */ ND_TCHECK(ip->ip_dst); ND_PRINT((ndo, "IP-Null-header %s > %s", ipaddr_string(ndo, &ip->ip_src), ipaddr_string(ndo, &ip->ip_dst))); break; case 4: /* IPv4 */ ip_print(ndo, bp, len); break; case 6: /* IPv6 */ ip6_print(ndo, bp, len); break; default: ND_PRINT((ndo, "IP ver %d", IP_V(ip))); break; } break; } case PIMV2_TYPE_REGISTER_STOP: ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; ND_PRINT((ndo, " source=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; break; case PIMV2_TYPE_JOIN_PRUNE: case PIMV2_TYPE_GRAFT: case PIMV2_TYPE_GRAFT_ACK: /* * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |PIM Ver| Type | Addr length | Checksum | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Unicast-Upstream Neighbor Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Reserved | Num groups | Holdtime | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Number of Joined Sources | Number of Pruned Sources | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ { uint8_t ngroup; uint16_t holdtime; uint16_t njoin; uint16_t nprune; int i, j; if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ ND_PRINT((ndo, ", upstream-neighbor: ")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; } if (len < 4) goto trunc; ND_TCHECK2(*bp, 4); ngroup = bp[1]; holdtime = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, "\n\t %u group(s)", ngroup)); if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ ND_PRINT((ndo, ", holdtime: ")); if (holdtime == 0xffff) ND_PRINT((ndo, "infinite")); else unsigned_relts_print(ndo, holdtime); } bp += 4; len -= 4; for (i = 0; i < ngroup; i++) { ND_PRINT((ndo, "\n\t group #%u: ", i+1)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; if (len < 4) goto trunc; ND_TCHECK2(*bp, 4); njoin = EXTRACT_16BITS(&bp[0]); nprune = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, ", joined sources: %u, pruned sources: %u", njoin, nprune)); bp += 4; len -= 4; for (j = 0; j < njoin; j++) { ND_PRINT((ndo, "\n\t joined source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_source, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; } for (j = 0; j < nprune; j++) { ND_PRINT((ndo, "\n\t pruned source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_source, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; } } break; } case PIMV2_TYPE_BOOTSTRAP: { int i, j, frpcnt; /* Fragment Tag, Hash Mask len, and BSR-priority */ if (len < 2) goto trunc; ND_TCHECK_16BITS(bp); ND_PRINT((ndo, " tag=%x", EXTRACT_16BITS(bp))); bp += 2; len -= 2; if (len < 1) goto trunc; ND_TCHECK(bp[0]); ND_PRINT((ndo, " hashmlen=%d", bp[0])); if (len < 2) goto trunc; ND_TCHECK(bp[2]); ND_PRINT((ndo, " BSRprio=%d", bp[1])); bp += 2; len -= 2; /* Encoded-Unicast-BSR-Address */ ND_PRINT((ndo, " BSR=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; for (i = 0; bp < ep; i++) { /* Encoded-Group Address */ ND_PRINT((ndo, " (group%d: ", i)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; /* RP-Count, Frag RP-Cnt, and rsvd */ if (len < 1) goto trunc; ND_TCHECK(bp[0]); ND_PRINT((ndo, " RPcnt=%d", bp[0])); if (len < 2) goto trunc; ND_TCHECK(bp[1]); ND_PRINT((ndo, " FRPcnt=%d", frpcnt = bp[1])); if (len < 4) goto trunc; bp += 4; len -= 4; for (j = 0; j < frpcnt && bp < ep; j++) { /* each RP info */ ND_PRINT((ndo, " RP%d=", j)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; if (len < 2) goto trunc; ND_TCHECK_16BITS(bp); ND_PRINT((ndo, ",holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); if (len < 3) goto trunc; ND_TCHECK(bp[2]); ND_PRINT((ndo, ",prio=%d", bp[2])); if (len < 4) goto trunc; bp += 4; len -= 4; } ND_PRINT((ndo, ")")); } break; } case PIMV2_TYPE_ASSERT: ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; if (len < 8) goto trunc; ND_TCHECK2(*bp, 8); if (bp[0] & 0x80) ND_PRINT((ndo, " RPT")); ND_PRINT((ndo, " pref=%u", EXTRACT_32BITS(&bp[0]) & 0x7fffffff)); ND_PRINT((ndo, " metric=%u", EXTRACT_32BITS(&bp[4]))); break; case PIMV2_TYPE_CANDIDATE_RP: { int i, pfxcnt; /* Prefix-Cnt, Priority, and Holdtime */ if (len < 1) goto trunc; ND_TCHECK(bp[0]); ND_PRINT((ndo, " prefix-cnt=%d", bp[0])); pfxcnt = bp[0]; if (len < 2) goto trunc; ND_TCHECK(bp[1]); ND_PRINT((ndo, " prio=%d", bp[1])); if (len < 4) goto trunc; ND_TCHECK_16BITS(&bp[2]); ND_PRINT((ndo, " holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); bp += 4; len -= 4; /* Encoded-Unicast-RP-Address */ ND_PRINT((ndo, " RP=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; /* Encoded-Group Addresses */ for (i = 0; i < pfxcnt && bp < ep; i++) { ND_PRINT((ndo, " Group%d=", i)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; } break; } case PIMV2_TYPE_PRUNE_REFRESH: ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; ND_PRINT((ndo, " grp=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; ND_PRINT((ndo, " forwarder=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; if (len < 2) goto trunc; ND_TCHECK_16BITS(bp); ND_PRINT((ndo, " TUNR ")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); break; default: ND_PRINT((ndo, " [type %d]", PIM_TYPE(pim->pim_typever))); break; } return; trunc: ND_PRINT((ndo, "[|pim]")); } /* * Local Variables: * c-style: whitesmith * c-basic-offset: 8 * End: */
pimv2_addr_print(netdissect_options *ndo, const u_char *bp, enum pimv2_addrtype at, int silent) { int af; int len, hdrlen; ND_TCHECK(bp[0]); if (pimv2_addr_len == 0) { ND_TCHECK(bp[1]); switch (bp[0]) { case 1: af = AF_INET; len = sizeof(struct in_addr); break; case 2: af = AF_INET6; len = sizeof(struct in6_addr); break; default: return -1; } if (bp[1] != 0) return -1; hdrlen = 2; } else { switch (pimv2_addr_len) { case sizeof(struct in_addr): af = AF_INET; break; case sizeof(struct in6_addr): af = AF_INET6; break; default: return -1; break; } len = pimv2_addr_len; hdrlen = 0; } bp += hdrlen; switch (at) { case pimv2_unicast: ND_TCHECK2(bp[0], len); if (af == AF_INET) { if (!silent) ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp))); } else if (af == AF_INET6) { if (!silent) ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp))); } return hdrlen + len; case pimv2_group: case pimv2_source: ND_TCHECK2(bp[0], len + 2); if (af == AF_INET) { if (!silent) { ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp + 2))); if (bp[1] != 32) ND_PRINT((ndo, "/%u", bp[1])); } } else if (af == AF_INET6) { if (!silent) { ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp + 2))); if (bp[1] != 128) ND_PRINT((ndo, "/%u", bp[1])); } } if (bp[0] && !silent) { if (at == pimv2_group) { ND_PRINT((ndo, "(0x%02x)", bp[0])); } else { ND_PRINT((ndo, "(%s%s%s", bp[0] & 0x04 ? "S" : "", bp[0] & 0x02 ? "W" : "", bp[0] & 0x01 ? "R" : "")); if (bp[0] & 0xf8) { ND_PRINT((ndo, "+0x%02x", bp[0] & 0xf8)); } ND_PRINT((ndo, ")")); } } return hdrlen + 2 + len; default: return -1; } trunc: return -1; }
pimv2_addr_print(netdissect_options *ndo, const u_char *bp, u_int len, enum pimv2_addrtype at, u_int addr_len, int silent) { int af; int hdrlen; if (addr_len == 0) { if (len < 2) goto trunc; ND_TCHECK(bp[1]); switch (bp[0]) { case 1: af = AF_INET; addr_len = (u_int)sizeof(struct in_addr); break; case 2: af = AF_INET6; addr_len = (u_int)sizeof(struct in6_addr); break; default: return -1; } if (bp[1] != 0) return -1; hdrlen = 2; } else { switch (addr_len) { case sizeof(struct in_addr): af = AF_INET; break; case sizeof(struct in6_addr): af = AF_INET6; break; default: return -1; break; } hdrlen = 0; } bp += hdrlen; len -= hdrlen; switch (at) { case pimv2_unicast: if (len < addr_len) goto trunc; ND_TCHECK2(bp[0], addr_len); if (af == AF_INET) { if (!silent) ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp))); } else if (af == AF_INET6) { if (!silent) ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp))); } return hdrlen + addr_len; case pimv2_group: case pimv2_source: if (len < addr_len + 2) goto trunc; ND_TCHECK2(bp[0], addr_len + 2); if (af == AF_INET) { if (!silent) { ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp + 2))); if (bp[1] != 32) ND_PRINT((ndo, "/%u", bp[1])); } } else if (af == AF_INET6) { if (!silent) { ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp + 2))); if (bp[1] != 128) ND_PRINT((ndo, "/%u", bp[1])); } } if (bp[0] && !silent) { if (at == pimv2_group) { ND_PRINT((ndo, "(0x%02x)", bp[0])); } else { ND_PRINT((ndo, "(%s%s%s", bp[0] & 0x04 ? "S" : "", bp[0] & 0x02 ? "W" : "", bp[0] & 0x01 ? "R" : "")); if (bp[0] & 0xf8) { ND_PRINT((ndo, "+0x%02x", bp[0] & 0xf8)); } ND_PRINT((ndo, ")")); } } return hdrlen + 2 + addr_len; default: return -1; } trunc: return -1; }
{'added': [(172, '\tif (len < sizeof(struct in_addr))'), (173, '\t\tgoto trunc;'), (178, '\tbp += 4;'), (179, '\tlen -= 4;'), (180, '\tif (len < 4)'), (181, '\t\tgoto trunc;'), (182, '\tND_TCHECK2(bp[2], 2);'), (186, '\tunsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2]));'), (189, '\tbp += 4;'), (190, '\tlen -= 4;'), (192, '\tif (len < 4)'), (193, '\t\tgoto trunc;'), (203, '\t\tif (len < 4)'), (204, '\t\t\tgoto trunc;'), (207, '\t\tbp += 4;'), (208, '\t\tlen -= 4;'), (209, '\t\tif (len < 4)'), (210, '\t\t\tgoto trunc;'), (211, '\t\tND_TCHECK2(bp[0], sizeof(struct in_addr));'), (212, '\t\tif (EXTRACT_32BITS(&bp[0]) != 0xffffffff)'), (213, '\t\t\tND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[0])));'), (214, '\t\tbp += 4;'), (215, '\t\tlen -= 4;'), (216, '\t\tif (len < 4)'), (217, '\t\t\tgoto trunc;'), (218, '\t\tND_TCHECK2(bp[0], 4);'), (219, '\t\tnjoin = EXTRACT_16BITS(&bp[0]);'), (220, '\t\tnprune = EXTRACT_16BITS(&bp[2]);'), (222, '\t\tbp += 4;'), (223, '\t\tlen -= 4;'), (231, '\t\t\tif (len < 6)'), (232, '\t\t\t\tgoto trunc;'), (238, '\t\t\t ipaddr_string(ndo, &bp[2]),'), (239, '\t\t\t bp[1] & 0x3f));'), (321, '\t\tif (ndo->ndo_vflag) {'), (322, '\t\t\tif (len < 8)'), (323, '\t\t\t\tgoto trunc;'), (325, '\t\t}'), (352, '\tif (len < 8)'), (353, '\t\tgoto trunc;'), (401, '\t\tif (len < 4)'), (402, '\t\t\tgoto trunc;'), (405, '\t\tbp += 4;'), (406, '\t\tlen -= 4;'), (407, '\t\tif (len < 1)'), (408, '\t\t\tgoto trunc;'), (409, '\t\tND_TCHECK(bp[0]);'), (410, '\t\tswitch (bp[0] & 0x3) {'), (420, '\t\tif (bp[0] & 0xfc)'), (421, '\t\t\tND_PRINT((ndo, " [rsvd=0x%02x]", bp[0] & 0xfc));'), (422, '\t\tbp += 1;'), (423, '\t\tlen -= 1;'), (424, '\t\tif (len < 1)'), (425, '\t\t\tgoto trunc;'), (426, '\t\tND_TCHECK(bp[0]);'), (427, '\t\tnentries = bp[0];'), (428, '\t\tbp += 1;'), (429, '\t\tlen -= 1;'), (432, '\t\t\tif (len < 6)'), (433, '\t\t\t\tgoto trunc;'), (464, '\tND_TCHECK(pim->pim_typever);'), (488, ''), (489, 'trunc:'), (490, '\tND_PRINT((ndo, "[|pim]"));'), (491, '\treturn;'), (560, ' const u_char *bp, u_int len, enum pimv2_addrtype at,'), (561, ' u_int addr_len, int silent)'), (564, '\tint hdrlen;'), (566, '\tif (addr_len == 0) {'), (567, '\t\tif (len < 2)'), (568, '\t\t\tgoto trunc;'), (573, '\t\t\taddr_len = (u_int)sizeof(struct in_addr);'), (577, '\t\t\taddr_len = (u_int)sizeof(struct in6_addr);'), (586, '\t\tswitch (addr_len) {'), (601, '\tlen -= hdrlen;'), (604, '\t\tif (len < addr_len)'), (605, '\t\t\tgoto trunc;'), (606, '\t\tND_TCHECK2(bp[0], addr_len);'), (615, '\t\treturn hdrlen + addr_len;'), (618, '\t\tif (len < addr_len + 2)'), (619, '\t\t\tgoto trunc;'), (620, '\t\tND_TCHECK2(bp[0], addr_len + 2);'), (649, '\t\treturn hdrlen + 2 + addr_len;'), (701, '\tint pimv2_addr_len;'), (708, '\tif (len < 2)'), (709, '\t\tgoto trunc;'), (715, '\tif (len < 4)'), (716, '\t\tgoto trunc;'), (717, '\tND_TCHECK(pim->pim_cksum);'), (758, '\tbp += 4;'), (759, '\tlen -= 4;'), (765, '\t\twhile (len > 0) {'), (766, '\t\t\tif (len < 4)'), (767, '\t\t\t\tgoto trunc;'), (776, '\t\t\tlen -= 4;'), (778, '\t\t\tif (len < olen)'), (779, '\t\t\t\tgoto trunc;'), (780, '\t\t\tND_TCHECK2(bp[0], olen);'), (850, '\t\t\t\t\tu_int plen = len;'), (853, '\t\t\t\t\t\tadvance = pimv2_addr_print(ndo, ptr, plen, pimv2_unicast, pimv2_addr_len, 0);'), (854, '\t\t\t\t\t\tif (advance < 0)'), (855, '\t\t\t\t\t\t\tgoto trunc;'), (857, '\t\t\t\t\t\tplen -= advance;'), (870, '\t\t\tlen -= olen;'), (879, '\t\tif (len < 4)'), (880, '\t\t\tgoto trunc;'), (881, '\t\tND_TCHECK2(*bp, PIMV2_REGISTER_FLAG_LEN);'), (886, '\t\t EXTRACT_32BITS(bp))));'), (888, '\t\tbp += 4; len -= 4;'), (890, '\t\tif (len == 0)'), (891, '\t\t\tgoto trunc;'), (893, '\t\tND_TCHECK(ip->ip_vhl);'), (896, '\t\t\tND_TCHECK(ip->ip_dst);'), (919, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (920, '\t\t\tgoto trunc;'), (923, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (924, '\t\t\tgoto trunc;'), (977, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (978, '\t\t\t\tgoto trunc;'), (981, '\t\tif (len < 4)'), (982, '\t\t\tgoto trunc;'), (983, '\t\tND_TCHECK2(*bp, 4);'), (997, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (998, '\t\t\t\tgoto trunc;'), (1000, '\t\t\tif (len < 4)'), (1001, '\t\t\t\tgoto trunc;'), (1002, '\t\t\tND_TCHECK2(*bp, 4);'), (1009, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_source, pimv2_addr_len, 0)) < 0)'), (1010, '\t\t\t\t\tgoto trunc;'), (1015, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_source, pimv2_addr_len, 0)) < 0)'), (1016, '\t\t\t\t\tgoto trunc;'), (1028, '\t\tif (len < 2)'), (1029, '\t\t\tgoto trunc;'), (1030, '\t\tND_TCHECK_16BITS(bp);'), (1032, '\t\tbp += 2;'), (1033, '\t\tlen -= 2;'), (1034, '\t\tif (len < 1)'), (1035, '\t\t\tgoto trunc;'), (1036, '\t\tND_TCHECK(bp[0]);'), (1038, '\t\tif (len < 2)'), (1039, '\t\t\tgoto trunc;'), (1040, '\t\tND_TCHECK(bp[2]);'), (1043, '\t\tlen -= 2;'), (1047, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1048, '\t\t\tgoto trunc;'), (1050, '\t\tlen -= advance;'), (1055, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (1056, '\t\t\t\tgoto trunc;'), (1058, '\t\t\tlen -= advance;'), (1061, '\t\t\tif (len < 1)'), (1062, '\t\t\t\tgoto trunc;'), (1063, '\t\t\tND_TCHECK(bp[0]);'), (1065, '\t\t\tif (len < 2)'), (1066, '\t\t\t\tgoto trunc;'), (1067, '\t\t\tND_TCHECK(bp[1]);'), (1069, '\t\t\tif (len < 4)'), (1070, '\t\t\t\tgoto trunc;'), (1072, '\t\t\tlen -= 4;'), (1077, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len,'), (1079, '\t\t\t\t\t\t\t\tpimv2_addr_len,'), (1080, '\t\t\t\t\t\t\t\t0)) < 0)'), (1081, '\t\t\t\t\tgoto trunc;'), (1083, '\t\t\t\tlen -= advance;'), (1085, '\t\t\t\tif (len < 2)'), (1086, '\t\t\t\t\tgoto trunc;'), (1087, '\t\t\t\tND_TCHECK_16BITS(bp);'), (1090, '\t\t\t\tif (len < 3)'), (1091, '\t\t\t\t\tgoto trunc;'), (1092, '\t\t\t\tND_TCHECK(bp[2]);'), (1094, '\t\t\t\tif (len < 4)'), (1095, '\t\t\t\t\tgoto trunc;'), (1097, '\t\t\t\tlen -= 4;'), (1105, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (1106, '\t\t\tgoto trunc;'), (1109, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1110, '\t\t\tgoto trunc;'), (1112, '\t\tif (len < 8)'), (1113, '\t\t\tgoto trunc;'), (1114, '\t\tND_TCHECK2(*bp, 8);'), (1126, '\t\tif (len < 1)'), (1127, '\t\t\tgoto trunc;'), (1128, '\t\tND_TCHECK(bp[0]);'), (1131, '\t\tif (len < 2)'), (1132, '\t\t\tgoto trunc;'), (1133, '\t\tND_TCHECK(bp[1]);'), (1135, '\t\tif (len < 4)'), (1136, '\t\t\tgoto trunc;'), (1137, '\t\tND_TCHECK_16BITS(&bp[2]);'), (1141, '\t\tlen -= 4;'), (1145, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1146, '\t\t\tgoto trunc;'), (1148, '\t\tlen -= advance;'), (1153, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (1154, '\t\t\t\tgoto trunc;'), (1156, '\t\t\tlen -= advance;'), (1163, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1164, '\t\t\tgoto trunc;'), (1166, '\t\tlen -= advance;'), (1168, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (1169, '\t\t\tgoto trunc;'), (1171, '\t\tlen -= advance;'), (1173, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1174, '\t\t\tgoto trunc;'), (1176, '\t\tlen -= advance;'), (1177, '\t\tif (len < 2)'), (1178, '\t\t\tgoto trunc;'), (1179, '\t\tND_TCHECK_16BITS(bp);')], 'deleted': [(176, '\tND_TCHECK2(bp[6], 2);'), (180, '\tunsigned_relts_print(ndo, EXTRACT_16BITS(&bp[6]));'), (183, '\tbp += 8;'), (184, '\tlen -= 8;'), (197, '\t\tND_TCHECK2(bp[4], sizeof(struct in_addr));'), (198, '\t\tif (EXTRACT_32BITS(&bp[4]) != 0xffffffff)'), (199, '\t\t\tND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[4])));'), (200, '\t\tND_TCHECK2(bp[8], 4);'), (201, '\t\tnjoin = EXTRACT_16BITS(&bp[8]);'), (202, '\t\tnprune = EXTRACT_16BITS(&bp[10]);'), (204, '\t\tbp += 12;'), (205, '\t\tlen -= 12;'), (218, '\t\t\tipaddr_string(ndo, &bp[2]), bp[1] & 0x3f));'), (233, '\tregister const u_char *ep;'), (236, '\tep = (const u_char *)ndo->ndo_snapend;'), (237, '\tif (bp >= ep)'), (238, '\t\treturn;'), (239, ''), (305, '\t\tif (ndo->ndo_vflag)'), (382, '\t\tND_TCHECK(bp[4]);'), (383, '\t\tswitch (bp[4] & 0x3) {'), (393, '\t\tif (bp[4] & 0xfc)'), (394, '\t\t\tND_PRINT((ndo, " [rsvd=0x%02x]", bp[4] & 0xfc));'), (395, '\t\tND_TCHECK(bp[5]);'), (396, '\t\tnentries = bp[5];'), (397, '\t\tbp += 6; len -= 6;'), (424, '\tregister const u_char *ep;'), (427, '\tep = (const u_char *)ndo->ndo_snapend;'), (428, '\tif (bp >= ep)'), (429, '\t\treturn;'), (499, 'static int pimv2_addr_len;'), (500, ''), (527, ' const u_char *bp, enum pimv2_addrtype at, int silent)'), (530, '\tint len, hdrlen;'), (532, '\tND_TCHECK(bp[0]);'), (533, ''), (534, '\tif (pimv2_addr_len == 0) {'), (539, '\t\t\tlen = sizeof(struct in_addr);'), (543, '\t\t\tlen = sizeof(struct in6_addr);'), (552, '\t\tswitch (pimv2_addr_len) {'), (563, '\t\tlen = pimv2_addr_len;'), (570, '\t\tND_TCHECK2(bp[0], len);'), (579, '\t\treturn hdrlen + len;'), (582, '\t\tND_TCHECK2(bp[0], len + 2);'), (611, '\t\treturn hdrlen + 2 + len;'), (719, '\t\tbp += 4;'), (720, '\t\twhile (bp < ep) {'), (724, '\t\t\tND_TCHECK2(bp[0], 4 + olen);'), (802, '\t\t\t\t\t\tadvance = pimv2_addr_print(ndo, ptr, pimv2_unicast, 0);'), (803, '\t\t\t\t\t\tif (advance < 0) {'), (804, '\t\t\t\t\t\t\tND_PRINT((ndo, "..."));'), (805, '\t\t\t\t\t\t\tbreak;'), (806, '\t\t\t\t\t\t}'), (828, '\t\tND_TCHECK2(*(bp + 4), PIMV2_REGISTER_FLAG_LEN);'), (833, '\t\t EXTRACT_32BITS(bp+4))));'), (835, '\t\tbp += 8; len -= 8;'), (861, '\t\tbp += 4; len -= 4;'), (862, '\t\tif (bp >= ep)'), (863, '\t\t\tbreak;'), (865, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) {'), (866, '\t\t\tND_PRINT((ndo, "..."));'), (867, '\t\t\tbreak;'), (868, '\t\t}'), (870, '\t\tif (bp >= ep)'), (871, '\t\t\tbreak;'), (873, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (874, '\t\t\tND_PRINT((ndo, "..."));'), (875, '\t\t\tbreak;'), (876, '\t\t}'), (927, '\t\tbp += 4; len -= 4;'), (929, '\t\t\tif (bp >= ep)'), (930, '\t\t\t\tbreak;'), (932, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (933, '\t\t\t\tND_PRINT((ndo, "..."));'), (934, '\t\t\t\tbreak;'), (935, '\t\t\t}'), (938, '\t\tif (bp + 4 > ep)'), (939, '\t\t\tbreak;'), (952, '\t\t\tif (bp >= ep)'), (953, '\t\t\t\tgoto jp_done;'), (955, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) {'), (956, '\t\t\t\tND_PRINT((ndo, "...)"));'), (957, '\t\t\t\tgoto jp_done;'), (958, '\t\t\t}'), (960, '\t\t\tif (bp + 4 > ep) {'), (961, '\t\t\t\tND_PRINT((ndo, "...)"));'), (962, '\t\t\t\tgoto jp_done;'), (963, '\t\t\t}'), (970, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) {'), (971, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (972, '\t\t\t\t\tgoto jp_done;'), (973, '\t\t\t\t}'), (978, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) {'), (979, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (980, '\t\t\t\t\tgoto jp_done;'), (981, '\t\t\t\t}'), (985, '\tjp_done:'), (992, '\t\tbp += 4;'), (995, '\t\tif (bp + sizeof(uint16_t) >= ep) break;'), (997, '\t\tbp += sizeof(uint16_t);'), (998, '\t\tif (bp >= ep) break;'), (1000, '\t\tif (bp + 1 >= ep) break;'), (1005, '\t\tif (bp >= ep) break;'), (1007, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1008, '\t\t\tND_PRINT((ndo, "..."));'), (1009, '\t\t\tbreak;'), (1010, '\t\t}'), (1016, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0))'), (1017, '\t\t\t < 0) {'), (1018, '\t\t\t\tND_PRINT((ndo, "...)"));'), (1019, '\t\t\t\tgoto bs_done;'), (1020, '\t\t\t}'), (1024, '\t\t\tif (bp >= ep) {'), (1025, '\t\t\t\tND_PRINT((ndo, "...)"));'), (1026, '\t\t\t\tgoto bs_done;'), (1027, '\t\t\t}'), (1029, '\t\t\tif (bp + 1 >= ep) {'), (1030, '\t\t\t\tND_PRINT((ndo, "...)"));'), (1031, '\t\t\t\tgoto bs_done;'), (1032, '\t\t\t}'), (1039, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp,'), (1041, '\t\t\t\t\t\t\t\t0)) < 0) {'), (1042, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (1043, '\t\t\t\t\tgoto bs_done;'), (1044, '\t\t\t\t}'), (1047, '\t\t\t\tif (bp + 1 >= ep) {'), (1048, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (1049, '\t\t\t\t\tgoto bs_done;'), (1050, '\t\t\t\t}'), (1053, '\t\t\t\tif (bp + 2 >= ep) {'), (1054, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (1055, '\t\t\t\t\tgoto bs_done;'), (1056, '\t\t\t\t}'), (1062, '\t bs_done:'), (1066, '\t\tbp += 4; len -= 4;'), (1067, '\t\tif (bp >= ep)'), (1068, '\t\t\tbreak;'), (1070, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) {'), (1071, '\t\t\tND_PRINT((ndo, "..."));'), (1072, '\t\t\tbreak;'), (1073, '\t\t}'), (1075, '\t\tif (bp >= ep)'), (1076, '\t\t\tbreak;'), (1078, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1079, '\t\t\tND_PRINT((ndo, "..."));'), (1080, '\t\t\tbreak;'), (1081, '\t\t}'), (1083, '\t\tif (bp + 8 > ep)'), (1084, '\t\t\tbreak;'), (1094, '\t\tbp += 4;'), (1097, '\t\tif (bp >= ep) break;'), (1100, '\t\tif (bp + 1 >= ep) break;'), (1102, '\t\tif (bp + 3 >= ep) break;'), (1108, '\t\tif (bp >= ep) break;'), (1110, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1111, '\t\t\tND_PRINT((ndo, "..."));'), (1112, '\t\t\tbreak;'), (1113, '\t\t}'), (1119, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0))'), (1120, '\t\t\t < 0) {'), (1121, '\t\t\t\tND_PRINT((ndo, "..."));'), (1122, '\t\t\t\tbreak;'), (1123, '\t\t\t}'), (1131, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1132, '\t\t\tND_PRINT((ndo, "..."));'), (1133, '\t\t\tbreak;'), (1134, '\t\t}'), (1137, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) {'), (1138, '\t\t\tND_PRINT((ndo, "..."));'), (1139, '\t\t\tbreak;'), (1140, '\t\t}'), (1143, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1144, '\t\t\tND_PRINT((ndo, "..."));'), (1145, '\t\t\tbreak;'), (1146, '\t\t}'), (1148, '\t\tND_TCHECK2(bp[0], 2);')]}
207
176
890
5,856
https://github.com/the-tcpdump-group/tcpdump
CVE-2017-13030
['CWE-125']
print-pim.c
pimv2_print
/* * Copyright (c) 1995, 1996 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* \summary: Protocol Independent Multicast (PIM) printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include "netdissect.h" #include "addrtoname.h" #include "extract.h" #include "ip.h" #include "ip6.h" #include "ipproto.h" #define PIMV1_TYPE_QUERY 0 #define PIMV1_TYPE_REGISTER 1 #define PIMV1_TYPE_REGISTER_STOP 2 #define PIMV1_TYPE_JOIN_PRUNE 3 #define PIMV1_TYPE_RP_REACHABILITY 4 #define PIMV1_TYPE_ASSERT 5 #define PIMV1_TYPE_GRAFT 6 #define PIMV1_TYPE_GRAFT_ACK 7 static const struct tok pimv1_type_str[] = { { PIMV1_TYPE_QUERY, "Query" }, { PIMV1_TYPE_REGISTER, "Register" }, { PIMV1_TYPE_REGISTER_STOP, "Register-Stop" }, { PIMV1_TYPE_JOIN_PRUNE, "Join/Prune" }, { PIMV1_TYPE_RP_REACHABILITY, "RP-reachable" }, { PIMV1_TYPE_ASSERT, "Assert" }, { PIMV1_TYPE_GRAFT, "Graft" }, { PIMV1_TYPE_GRAFT_ACK, "Graft-ACK" }, { 0, NULL } }; #define PIMV2_TYPE_HELLO 0 #define PIMV2_TYPE_REGISTER 1 #define PIMV2_TYPE_REGISTER_STOP 2 #define PIMV2_TYPE_JOIN_PRUNE 3 #define PIMV2_TYPE_BOOTSTRAP 4 #define PIMV2_TYPE_ASSERT 5 #define PIMV2_TYPE_GRAFT 6 #define PIMV2_TYPE_GRAFT_ACK 7 #define PIMV2_TYPE_CANDIDATE_RP 8 #define PIMV2_TYPE_PRUNE_REFRESH 9 #define PIMV2_TYPE_DF_ELECTION 10 #define PIMV2_TYPE_ECMP_REDIRECT 11 static const struct tok pimv2_type_values[] = { { PIMV2_TYPE_HELLO, "Hello" }, { PIMV2_TYPE_REGISTER, "Register" }, { PIMV2_TYPE_REGISTER_STOP, "Register Stop" }, { PIMV2_TYPE_JOIN_PRUNE, "Join / Prune" }, { PIMV2_TYPE_BOOTSTRAP, "Bootstrap" }, { PIMV2_TYPE_ASSERT, "Assert" }, { PIMV2_TYPE_GRAFT, "Graft" }, { PIMV2_TYPE_GRAFT_ACK, "Graft Acknowledgement" }, { PIMV2_TYPE_CANDIDATE_RP, "Candidate RP Advertisement" }, { PIMV2_TYPE_PRUNE_REFRESH, "Prune Refresh" }, { PIMV2_TYPE_DF_ELECTION, "DF Election" }, { PIMV2_TYPE_ECMP_REDIRECT, "ECMP Redirect" }, { 0, NULL} }; #define PIMV2_HELLO_OPTION_HOLDTIME 1 #define PIMV2_HELLO_OPTION_LANPRUNEDELAY 2 #define PIMV2_HELLO_OPTION_DR_PRIORITY_OLD 18 #define PIMV2_HELLO_OPTION_DR_PRIORITY 19 #define PIMV2_HELLO_OPTION_GENID 20 #define PIMV2_HELLO_OPTION_REFRESH_CAP 21 #define PIMV2_HELLO_OPTION_BIDIR_CAP 22 #define PIMV2_HELLO_OPTION_ADDRESS_LIST 24 #define PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD 65001 static const struct tok pimv2_hello_option_values[] = { { PIMV2_HELLO_OPTION_HOLDTIME, "Hold Time" }, { PIMV2_HELLO_OPTION_LANPRUNEDELAY, "LAN Prune Delay" }, { PIMV2_HELLO_OPTION_DR_PRIORITY_OLD, "DR Priority (Old)" }, { PIMV2_HELLO_OPTION_DR_PRIORITY, "DR Priority" }, { PIMV2_HELLO_OPTION_GENID, "Generation ID" }, { PIMV2_HELLO_OPTION_REFRESH_CAP, "State Refresh Capability" }, { PIMV2_HELLO_OPTION_BIDIR_CAP, "Bi-Directional Capability" }, { PIMV2_HELLO_OPTION_ADDRESS_LIST, "Address List" }, { PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD, "Address List (Old)" }, { 0, NULL} }; #define PIMV2_REGISTER_FLAG_LEN 4 #define PIMV2_REGISTER_FLAG_BORDER 0x80000000 #define PIMV2_REGISTER_FLAG_NULL 0x40000000 static const struct tok pimv2_register_flag_values[] = { { PIMV2_REGISTER_FLAG_BORDER, "Border" }, { PIMV2_REGISTER_FLAG_NULL, "Null" }, { 0, NULL} }; /* * XXX: We consider a case where IPv6 is not ready yet for portability, * but PIM dependent defintions should be independent of IPv6... */ struct pim { uint8_t pim_typever; /* upper 4bit: PIM version number; 2 for PIMv2 */ /* lower 4bit: the PIM message type, currently they are: * Hello, Register, Register-Stop, Join/Prune, * Bootstrap, Assert, Graft (PIM-DM only), * Graft-Ack (PIM-DM only), C-RP-Adv */ #define PIM_VER(x) (((x) & 0xf0) >> 4) #define PIM_TYPE(x) ((x) & 0x0f) u_char pim_rsv; /* Reserved */ u_short pim_cksum; /* IP style check sum */ }; static void pimv2_print(netdissect_options *, register const u_char *bp, register u_int len, const u_char *); static void pimv1_join_prune_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int ngroups, njoin, nprune; int njp; /* If it's a single group and a single source, use 1-line output. */ if (ND_TTEST2(bp[0], 30) && bp[11] == 1 && ((njoin = EXTRACT_16BITS(&bp[20])) + EXTRACT_16BITS(&bp[22])) == 1) { int hold; ND_PRINT((ndo, " RPF %s ", ipaddr_string(ndo, bp))); hold = EXTRACT_16BITS(&bp[6]); if (hold != 180) { ND_PRINT((ndo, "Hold ")); unsigned_relts_print(ndo, hold); } ND_PRINT((ndo, "%s (%s/%d, %s", njoin ? "Join" : "Prune", ipaddr_string(ndo, &bp[26]), bp[25] & 0x3f, ipaddr_string(ndo, &bp[12]))); if (EXTRACT_32BITS(&bp[16]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[16]))); ND_PRINT((ndo, ") %s%s %s", (bp[24] & 0x01) ? "Sparse" : "Dense", (bp[25] & 0x80) ? " WC" : "", (bp[25] & 0x40) ? "RP" : "SPT")); return; } ND_TCHECK2(bp[0], sizeof(struct in_addr)); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Upstream Nbr: %s", ipaddr_string(ndo, bp))); ND_TCHECK2(bp[6], 2); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Hold time: ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[6])); if (ndo->ndo_vflag < 2) return; bp += 8; len -= 8; ND_TCHECK2(bp[0], 4); ngroups = bp[3]; bp += 4; len -= 4; while (ngroups--) { /* * XXX - does the address have length "addrlen" and the * mask length "maddrlen"? */ ND_TCHECK2(bp[0], sizeof(struct in_addr)); ND_PRINT((ndo, "\n\tGroup: %s", ipaddr_string(ndo, bp))); ND_TCHECK2(bp[4], sizeof(struct in_addr)); if (EXTRACT_32BITS(&bp[4]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[4]))); ND_TCHECK2(bp[8], 4); njoin = EXTRACT_16BITS(&bp[8]); nprune = EXTRACT_16BITS(&bp[10]); ND_PRINT((ndo, " joined: %d pruned: %d", njoin, nprune)); bp += 12; len -= 12; for (njp = 0; njp < (njoin + nprune); njp++) { const char *type; if (njp < njoin) type = "Join "; else type = "Prune"; ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "\n\t%s %s%s%s%s/%d", type, (bp[0] & 0x01) ? "Sparse " : "Dense ", (bp[1] & 0x80) ? "WC " : "", (bp[1] & 0x40) ? "RP " : "SPT ", ipaddr_string(ndo, &bp[2]), bp[1] & 0x3f)); bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|pim]")); return; } void pimv1_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { register const u_char *ep; register u_char type; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; ND_TCHECK(bp[1]); type = bp[1]; ND_PRINT((ndo, " %s", tok2str(pimv1_type_str, "[type %u]", type))); switch (type) { case PIMV1_TYPE_QUERY: if (ND_TTEST(bp[8])) { switch (bp[8] >> 4) { case 0: ND_PRINT((ndo, " Dense-mode")); break; case 1: ND_PRINT((ndo, " Sparse-mode")); break; case 2: ND_PRINT((ndo, " Sparse-Dense-mode")); break; default: ND_PRINT((ndo, " mode-%d", bp[8] >> 4)); break; } } if (ndo->ndo_vflag) { ND_TCHECK2(bp[10],2); ND_PRINT((ndo, " (Hold-time ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[10])); ND_PRINT((ndo, ")")); } break; case PIMV1_TYPE_REGISTER: ND_TCHECK2(bp[8], 20); /* ip header */ ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[20]), ipaddr_string(ndo, &bp[24]))); break; case PIMV1_TYPE_REGISTER_STOP: ND_TCHECK2(bp[12], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[8]), ipaddr_string(ndo, &bp[12]))); break; case PIMV1_TYPE_RP_REACHABILITY: if (ndo->ndo_vflag) { ND_TCHECK2(bp[22], 2); ND_PRINT((ndo, " group %s", ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_PRINT((ndo, " RP %s hold ", ipaddr_string(ndo, &bp[16]))); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[22])); } break; case PIMV1_TYPE_ASSERT: ND_TCHECK2(bp[16], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[16]), ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_TCHECK2(bp[24], 4); ND_PRINT((ndo, " %s pref %d metric %d", (bp[20] & 0x80) ? "RP-tree" : "SPT", EXTRACT_32BITS(&bp[20]) & 0x7fffffff, EXTRACT_32BITS(&bp[24]))); break; case PIMV1_TYPE_JOIN_PRUNE: case PIMV1_TYPE_GRAFT: case PIMV1_TYPE_GRAFT_ACK: if (ndo->ndo_vflag) pimv1_join_prune_print(ndo, &bp[8], len - 8); break; } ND_TCHECK(bp[4]); if ((bp[4] >> 4) != 1) ND_PRINT((ndo, " [v%d]", bp[4] >> 4)); return; trunc: ND_PRINT((ndo, "[|pim]")); return; } /* * auto-RP is a cisco protocol, documented at * ftp://ftpeng.cisco.com/ipmulticast/specs/pim-autorp-spec01.txt * * This implements version 1+, dated Sept 9, 1998. */ void cisco_autorp_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int type; int numrps; int hold; ND_TCHECK(bp[0]); ND_PRINT((ndo, " auto-rp ")); type = bp[0]; switch (type) { case 0x11: ND_PRINT((ndo, "candidate-advert")); break; case 0x12: ND_PRINT((ndo, "mapping")); break; default: ND_PRINT((ndo, "type-0x%02x", type)); break; } ND_TCHECK(bp[1]); numrps = bp[1]; ND_TCHECK2(bp[2], 2); ND_PRINT((ndo, " Hold ")); hold = EXTRACT_16BITS(&bp[2]); if (hold) unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); else ND_PRINT((ndo, "FOREVER")); /* Next 4 bytes are reserved. */ bp += 8; len -= 8; /*XXX skip unless -v? */ /* * Rest of packet: * numrps entries of the form: * 32 bits: RP * 6 bits: reserved * 2 bits: PIM version supported, bit 0 is "supports v1", 1 is "v2". * 8 bits: # of entries for this RP * each entry: 7 bits: reserved, 1 bit: negative, * 8 bits: mask 32 bits: source * lather, rinse, repeat. */ while (numrps--) { int nentries; char s; ND_TCHECK2(bp[0], 4); ND_PRINT((ndo, " RP %s", ipaddr_string(ndo, bp))); ND_TCHECK(bp[4]); switch (bp[4] & 0x3) { case 0: ND_PRINT((ndo, " PIMv?")); break; case 1: ND_PRINT((ndo, " PIMv1")); break; case 2: ND_PRINT((ndo, " PIMv2")); break; case 3: ND_PRINT((ndo, " PIMv1+2")); break; } if (bp[4] & 0xfc) ND_PRINT((ndo, " [rsvd=0x%02x]", bp[4] & 0xfc)); ND_TCHECK(bp[5]); nentries = bp[5]; bp += 6; len -= 6; s = ' '; for (; nentries; nentries--) { ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "%c%s%s/%d", s, bp[0] & 1 ? "!" : "", ipaddr_string(ndo, &bp[2]), bp[1])); if (bp[0] & 0x02) { ND_PRINT((ndo, " bidir")); } if (bp[0] & 0xfc) { ND_PRINT((ndo, "[rsvd=0x%02x]", bp[0] & 0xfc)); } s = ','; bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|autorp]")); return; } void pim_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const u_char *ep; register const struct pim *pim = (const struct pim *)bp; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; #ifdef notyet /* currently we see only version and type */ ND_TCHECK(pim->pim_rsv); #endif switch (PIM_VER(pim->pim_typever)) { case 2: if (!ndo->ndo_vflag) { ND_PRINT((ndo, "PIMv%u, %s, length %u", PIM_VER(pim->pim_typever), tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)), len)); return; } else { ND_PRINT((ndo, "PIMv%u, length %u\n\t%s", PIM_VER(pim->pim_typever), len, tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)))); pimv2_print(ndo, bp, len, bp2); } break; default: ND_PRINT((ndo, "PIMv%u, length %u", PIM_VER(pim->pim_typever), len)); break; } return; } /* * PIMv2 uses encoded address representations. * * The last PIM-SM I-D before RFC2117 was published specified the * following representation for unicast addresses. However, RFC2117 * specified no encoding for unicast addresses with the unicast * address length specified in the header. Therefore, we have to * guess which encoding is being used (Cisco's PIMv2 implementation * uses the non-RFC encoding). RFC2117 turns a previously "Reserved" * field into a 'unicast-address-length-in-bytes' field. We guess * that it's the draft encoding if this reserved field is zero. * * RFC2362 goes back to the encoded format, and calls the addr length * field "reserved" again. * * The first byte is the address family, from: * * 0 Reserved * 1 IP (IP version 4) * 2 IP6 (IP version 6) * 3 NSAP * 4 HDLC (8-bit multidrop) * 5 BBN 1822 * 6 802 (includes all 802 media plus Ethernet "canonical format") * 7 E.163 * 8 E.164 (SMDS, Frame Relay, ATM) * 9 F.69 (Telex) * 10 X.121 (X.25, Frame Relay) * 11 IPX * 12 Appletalk * 13 Decnet IV * 14 Banyan Vines * 15 E.164 with NSAP format subaddress * * In addition, the second byte is an "Encoding". 0 is the default * encoding for the address family, and no other encodings are currently * specified. * */ static int pimv2_addr_len; enum pimv2_addrtype { pimv2_unicast, pimv2_group, pimv2_source }; /* 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Unicast Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+++++++ * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Reserved | Mask Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Group multicast Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Rsrvd |S|W|R| Mask Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Source Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ static int pimv2_addr_print(netdissect_options *ndo, const u_char *bp, enum pimv2_addrtype at, int silent) { int af; int len, hdrlen; ND_TCHECK(bp[0]); if (pimv2_addr_len == 0) { ND_TCHECK(bp[1]); switch (bp[0]) { case 1: af = AF_INET; len = sizeof(struct in_addr); break; case 2: af = AF_INET6; len = sizeof(struct in6_addr); break; default: return -1; } if (bp[1] != 0) return -1; hdrlen = 2; } else { switch (pimv2_addr_len) { case sizeof(struct in_addr): af = AF_INET; break; case sizeof(struct in6_addr): af = AF_INET6; break; default: return -1; break; } len = pimv2_addr_len; hdrlen = 0; } bp += hdrlen; switch (at) { case pimv2_unicast: ND_TCHECK2(bp[0], len); if (af == AF_INET) { if (!silent) ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp))); } else if (af == AF_INET6) { if (!silent) ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp))); } return hdrlen + len; case pimv2_group: case pimv2_source: ND_TCHECK2(bp[0], len + 2); if (af == AF_INET) { if (!silent) { ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp + 2))); if (bp[1] != 32) ND_PRINT((ndo, "/%u", bp[1])); } } else if (af == AF_INET6) { if (!silent) { ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp + 2))); if (bp[1] != 128) ND_PRINT((ndo, "/%u", bp[1])); } } if (bp[0] && !silent) { if (at == pimv2_group) { ND_PRINT((ndo, "(0x%02x)", bp[0])); } else { ND_PRINT((ndo, "(%s%s%s", bp[0] & 0x04 ? "S" : "", bp[0] & 0x02 ? "W" : "", bp[0] & 0x01 ? "R" : "")); if (bp[0] & 0xf8) { ND_PRINT((ndo, "+0x%02x", bp[0] & 0xf8)); } ND_PRINT((ndo, ")")); } } return hdrlen + 2 + len; default: return -1; } trunc: return -1; } enum checksum_status { CORRECT, INCORRECT, UNVERIFIED }; static enum checksum_status pimv2_check_checksum(netdissect_options *ndo, const u_char *bp, const u_char *bp2, u_int len) { const struct ip *ip; u_int cksum; if (!ND_TTEST2(bp[0], len)) { /* We don't have all the data. */ return (UNVERIFIED); } ip = (const struct ip *)bp2; if (IP_V(ip) == 4) { struct cksum_vec vec[1]; vec[0].ptr = bp; vec[0].len = len; cksum = in_cksum(vec, 1); return (cksum ? INCORRECT : CORRECT); } else if (IP_V(ip) == 6) { const struct ip6_hdr *ip6; ip6 = (const struct ip6_hdr *)bp2; cksum = nextproto6_cksum(ndo, ip6, bp, len, len, IPPROTO_PIM); return (cksum ? INCORRECT : CORRECT); } else { return (UNVERIFIED); } } static void pimv2_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const u_char *ep; register const struct pim *pim = (const struct pim *)bp; int advance; enum checksum_status cksum_status; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; if (ep > bp + len) ep = bp + len; ND_TCHECK(pim->pim_rsv); pimv2_addr_len = pim->pim_rsv; if (pimv2_addr_len != 0) ND_PRINT((ndo, ", RFC2117-encoding")); ND_PRINT((ndo, ", cksum 0x%04x ", EXTRACT_16BITS(&pim->pim_cksum))); if (EXTRACT_16BITS(&pim->pim_cksum) == 0) { ND_PRINT((ndo, "(unverified)")); } else { if (PIM_TYPE(pim->pim_typever) == PIMV2_TYPE_REGISTER) { /* * The checksum only covers the packet header, * not the encapsulated packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, 8); if (cksum_status == INCORRECT) { /* * To quote RFC 4601, "For interoperability * reasons, a message carrying a checksum * calculated over the entire PIM Register * message should also be accepted." */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } } else { /* * The checksum covers the entire packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } switch (cksum_status) { case CORRECT: ND_PRINT((ndo, "(correct)")); break; case INCORRECT: ND_PRINT((ndo, "(incorrect)")); break; case UNVERIFIED: ND_PRINT((ndo, "(unverified)")); break; } } switch (PIM_TYPE(pim->pim_typever)) { case PIMV2_TYPE_HELLO: { uint16_t otype, olen; bp += 4; while (bp < ep) { ND_TCHECK2(bp[0], 4); otype = EXTRACT_16BITS(&bp[0]); olen = EXTRACT_16BITS(&bp[2]); ND_TCHECK2(bp[0], 4 + olen); ND_PRINT((ndo, "\n\t %s Option (%u), length %u, Value: ", tok2str(pimv2_hello_option_values, "Unknown", otype), otype, olen)); bp += 4; switch (otype) { case PIMV2_HELLO_OPTION_HOLDTIME: if (olen != 2) { ND_PRINT((ndo, "ERROR: Option Length != 2 Bytes (%u)", olen)); } else { unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); } break; case PIMV2_HELLO_OPTION_LANPRUNEDELAY: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { char t_bit; uint16_t lan_delay, override_interval; lan_delay = EXTRACT_16BITS(bp); override_interval = EXTRACT_16BITS(bp+2); t_bit = (lan_delay & 0x8000)? 1 : 0; lan_delay &= ~0x8000; ND_PRINT((ndo, "\n\t T-bit=%d, LAN delay %dms, Override interval %dms", t_bit, lan_delay, override_interval)); } break; case PIMV2_HELLO_OPTION_DR_PRIORITY_OLD: case PIMV2_HELLO_OPTION_DR_PRIORITY: switch (olen) { case 0: ND_PRINT((ndo, "Bi-Directional Capability (Old)")); break; case 4: ND_PRINT((ndo, "%u", EXTRACT_32BITS(bp))); break; default: ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); break; } break; case PIMV2_HELLO_OPTION_GENID: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "0x%08x", EXTRACT_32BITS(bp))); } break; case PIMV2_HELLO_OPTION_REFRESH_CAP: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "v%d", *bp)); if (*(bp+1) != 0) { ND_PRINT((ndo, ", interval ")); unsigned_relts_print(ndo, *(bp+1)); } if (EXTRACT_16BITS(bp+2) != 0) { ND_PRINT((ndo, " ?0x%04x?", EXTRACT_16BITS(bp+2))); } } break; case PIMV2_HELLO_OPTION_BIDIR_CAP: break; case PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD: case PIMV2_HELLO_OPTION_ADDRESS_LIST: if (ndo->ndo_vflag > 1) { const u_char *ptr = bp; while (ptr < (bp+olen)) { ND_PRINT((ndo, "\n\t ")); advance = pimv2_addr_print(ndo, ptr, pimv2_unicast, 0); if (advance < 0) { ND_PRINT((ndo, "...")); break; } ptr += advance; } } break; default: if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, bp, "\n\t ", olen); break; } /* do we want to see an additionally hexdump ? */ if (ndo->ndo_vflag> 1) print_unknown_data(ndo, bp, "\n\t ", olen); bp += olen; } break; } case PIMV2_TYPE_REGISTER: { const struct ip *ip; ND_TCHECK2(*(bp + 4), PIMV2_REGISTER_FLAG_LEN); ND_PRINT((ndo, ", Flags [ %s ]\n\t", tok2str(pimv2_register_flag_values, "none", EXTRACT_32BITS(bp+4)))); bp += 8; len -= 8; /* encapsulated multicast packet */ ip = (const struct ip *)bp; switch (IP_V(ip)) { case 0: /* Null header */ ND_PRINT((ndo, "IP-Null-header %s > %s", ipaddr_string(ndo, &ip->ip_src), ipaddr_string(ndo, &ip->ip_dst))); break; case 4: /* IPv4 */ ip_print(ndo, bp, len); break; case 6: /* IPv6 */ ip6_print(ndo, bp, len); break; default: ND_PRINT((ndo, "IP ver %d", IP_V(ip))); break; } break; } case PIMV2_TYPE_REGISTER_STOP: bp += 4; len -= 4; if (bp >= ep) break; ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp >= ep) break; ND_PRINT((ndo, " source=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; break; case PIMV2_TYPE_JOIN_PRUNE: case PIMV2_TYPE_GRAFT: case PIMV2_TYPE_GRAFT_ACK: /* * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |PIM Ver| Type | Addr length | Checksum | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Unicast-Upstream Neighbor Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Reserved | Num groups | Holdtime | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Number of Joined Sources | Number of Pruned Sources | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ { uint8_t ngroup; uint16_t holdtime; uint16_t njoin; uint16_t nprune; int i, j; bp += 4; len -= 4; if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ if (bp >= ep) break; ND_PRINT((ndo, ", upstream-neighbor: ")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; } if (bp + 4 > ep) break; ngroup = bp[1]; holdtime = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, "\n\t %u group(s)", ngroup)); if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ ND_PRINT((ndo, ", holdtime: ")); if (holdtime == 0xffff) ND_PRINT((ndo, "infinite")); else unsigned_relts_print(ndo, holdtime); } bp += 4; len -= 4; for (i = 0; i < ngroup; i++) { if (bp >= ep) goto jp_done; ND_PRINT((ndo, "\n\t group #%u: ", i+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; if (bp + 4 > ep) { ND_PRINT((ndo, "...)")); goto jp_done; } njoin = EXTRACT_16BITS(&bp[0]); nprune = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, ", joined sources: %u, pruned sources: %u", njoin, nprune)); bp += 4; len -= 4; for (j = 0; j < njoin; j++) { ND_PRINT((ndo, "\n\t joined source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; } for (j = 0; j < nprune; j++) { ND_PRINT((ndo, "\n\t pruned source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; } } jp_done: break; } case PIMV2_TYPE_BOOTSTRAP: { int i, j, frpcnt; bp += 4; /* Fragment Tag, Hash Mask len, and BSR-priority */ if (bp + sizeof(uint16_t) >= ep) break; ND_PRINT((ndo, " tag=%x", EXTRACT_16BITS(bp))); bp += sizeof(uint16_t); if (bp >= ep) break; ND_PRINT((ndo, " hashmlen=%d", bp[0])); if (bp + 1 >= ep) break; ND_PRINT((ndo, " BSRprio=%d", bp[1])); bp += 2; /* Encoded-Unicast-BSR-Address */ if (bp >= ep) break; ND_PRINT((ndo, " BSR=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; for (i = 0; bp < ep; i++) { /* Encoded-Group Address */ ND_PRINT((ndo, " (group%d: ", i)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...)")); goto bs_done; } bp += advance; /* RP-Count, Frag RP-Cnt, and rsvd */ if (bp >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, " RPcnt=%d", bp[0])); if (bp + 1 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, " FRPcnt=%d", frpcnt = bp[1])); bp += 4; for (j = 0; j < frpcnt && bp < ep; j++) { /* each RP info */ ND_PRINT((ndo, " RP%d=", j)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...)")); goto bs_done; } bp += advance; if (bp + 1 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, ",holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); if (bp + 2 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, ",prio=%d", bp[2])); bp += 4; } ND_PRINT((ndo, ")")); } bs_done: break; } case PIMV2_TYPE_ASSERT: bp += 4; len -= 4; if (bp >= ep) break; ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp >= ep) break; ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp + 8 > ep) break; if (bp[0] & 0x80) ND_PRINT((ndo, " RPT")); ND_PRINT((ndo, " pref=%u", EXTRACT_32BITS(&bp[0]) & 0x7fffffff)); ND_PRINT((ndo, " metric=%u", EXTRACT_32BITS(&bp[4]))); break; case PIMV2_TYPE_CANDIDATE_RP: { int i, pfxcnt; bp += 4; /* Prefix-Cnt, Priority, and Holdtime */ if (bp >= ep) break; ND_PRINT((ndo, " prefix-cnt=%d", bp[0])); pfxcnt = bp[0]; if (bp + 1 >= ep) break; ND_PRINT((ndo, " prio=%d", bp[1])); if (bp + 3 >= ep) break; ND_PRINT((ndo, " holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); bp += 4; /* Encoded-Unicast-RP-Address */ if (bp >= ep) break; ND_PRINT((ndo, " RP=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; /* Encoded-Group Addresses */ for (i = 0; i < pfxcnt && bp < ep; i++) { ND_PRINT((ndo, " Group%d=", i)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; } break; } case PIMV2_TYPE_PRUNE_REFRESH: ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_PRINT((ndo, " grp=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_PRINT((ndo, " forwarder=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_TCHECK2(bp[0], 2); ND_PRINT((ndo, " TUNR ")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); break; default: ND_PRINT((ndo, " [type %d]", PIM_TYPE(pim->pim_typever))); break; } return; trunc: ND_PRINT((ndo, "[|pim]")); } /* * Local Variables: * c-style: whitesmith * c-basic-offset: 8 * End: */
/* * Copyright (c) 1995, 1996 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* \summary: Protocol Independent Multicast (PIM) printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include "netdissect.h" #include "addrtoname.h" #include "extract.h" #include "ip.h" #include "ip6.h" #include "ipproto.h" #define PIMV1_TYPE_QUERY 0 #define PIMV1_TYPE_REGISTER 1 #define PIMV1_TYPE_REGISTER_STOP 2 #define PIMV1_TYPE_JOIN_PRUNE 3 #define PIMV1_TYPE_RP_REACHABILITY 4 #define PIMV1_TYPE_ASSERT 5 #define PIMV1_TYPE_GRAFT 6 #define PIMV1_TYPE_GRAFT_ACK 7 static const struct tok pimv1_type_str[] = { { PIMV1_TYPE_QUERY, "Query" }, { PIMV1_TYPE_REGISTER, "Register" }, { PIMV1_TYPE_REGISTER_STOP, "Register-Stop" }, { PIMV1_TYPE_JOIN_PRUNE, "Join/Prune" }, { PIMV1_TYPE_RP_REACHABILITY, "RP-reachable" }, { PIMV1_TYPE_ASSERT, "Assert" }, { PIMV1_TYPE_GRAFT, "Graft" }, { PIMV1_TYPE_GRAFT_ACK, "Graft-ACK" }, { 0, NULL } }; #define PIMV2_TYPE_HELLO 0 #define PIMV2_TYPE_REGISTER 1 #define PIMV2_TYPE_REGISTER_STOP 2 #define PIMV2_TYPE_JOIN_PRUNE 3 #define PIMV2_TYPE_BOOTSTRAP 4 #define PIMV2_TYPE_ASSERT 5 #define PIMV2_TYPE_GRAFT 6 #define PIMV2_TYPE_GRAFT_ACK 7 #define PIMV2_TYPE_CANDIDATE_RP 8 #define PIMV2_TYPE_PRUNE_REFRESH 9 #define PIMV2_TYPE_DF_ELECTION 10 #define PIMV2_TYPE_ECMP_REDIRECT 11 static const struct tok pimv2_type_values[] = { { PIMV2_TYPE_HELLO, "Hello" }, { PIMV2_TYPE_REGISTER, "Register" }, { PIMV2_TYPE_REGISTER_STOP, "Register Stop" }, { PIMV2_TYPE_JOIN_PRUNE, "Join / Prune" }, { PIMV2_TYPE_BOOTSTRAP, "Bootstrap" }, { PIMV2_TYPE_ASSERT, "Assert" }, { PIMV2_TYPE_GRAFT, "Graft" }, { PIMV2_TYPE_GRAFT_ACK, "Graft Acknowledgement" }, { PIMV2_TYPE_CANDIDATE_RP, "Candidate RP Advertisement" }, { PIMV2_TYPE_PRUNE_REFRESH, "Prune Refresh" }, { PIMV2_TYPE_DF_ELECTION, "DF Election" }, { PIMV2_TYPE_ECMP_REDIRECT, "ECMP Redirect" }, { 0, NULL} }; #define PIMV2_HELLO_OPTION_HOLDTIME 1 #define PIMV2_HELLO_OPTION_LANPRUNEDELAY 2 #define PIMV2_HELLO_OPTION_DR_PRIORITY_OLD 18 #define PIMV2_HELLO_OPTION_DR_PRIORITY 19 #define PIMV2_HELLO_OPTION_GENID 20 #define PIMV2_HELLO_OPTION_REFRESH_CAP 21 #define PIMV2_HELLO_OPTION_BIDIR_CAP 22 #define PIMV2_HELLO_OPTION_ADDRESS_LIST 24 #define PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD 65001 static const struct tok pimv2_hello_option_values[] = { { PIMV2_HELLO_OPTION_HOLDTIME, "Hold Time" }, { PIMV2_HELLO_OPTION_LANPRUNEDELAY, "LAN Prune Delay" }, { PIMV2_HELLO_OPTION_DR_PRIORITY_OLD, "DR Priority (Old)" }, { PIMV2_HELLO_OPTION_DR_PRIORITY, "DR Priority" }, { PIMV2_HELLO_OPTION_GENID, "Generation ID" }, { PIMV2_HELLO_OPTION_REFRESH_CAP, "State Refresh Capability" }, { PIMV2_HELLO_OPTION_BIDIR_CAP, "Bi-Directional Capability" }, { PIMV2_HELLO_OPTION_ADDRESS_LIST, "Address List" }, { PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD, "Address List (Old)" }, { 0, NULL} }; #define PIMV2_REGISTER_FLAG_LEN 4 #define PIMV2_REGISTER_FLAG_BORDER 0x80000000 #define PIMV2_REGISTER_FLAG_NULL 0x40000000 static const struct tok pimv2_register_flag_values[] = { { PIMV2_REGISTER_FLAG_BORDER, "Border" }, { PIMV2_REGISTER_FLAG_NULL, "Null" }, { 0, NULL} }; /* * XXX: We consider a case where IPv6 is not ready yet for portability, * but PIM dependent defintions should be independent of IPv6... */ struct pim { uint8_t pim_typever; /* upper 4bit: PIM version number; 2 for PIMv2 */ /* lower 4bit: the PIM message type, currently they are: * Hello, Register, Register-Stop, Join/Prune, * Bootstrap, Assert, Graft (PIM-DM only), * Graft-Ack (PIM-DM only), C-RP-Adv */ #define PIM_VER(x) (((x) & 0xf0) >> 4) #define PIM_TYPE(x) ((x) & 0x0f) u_char pim_rsv; /* Reserved */ u_short pim_cksum; /* IP style check sum */ }; static void pimv2_print(netdissect_options *, register const u_char *bp, register u_int len, const u_char *); static void pimv1_join_prune_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int ngroups, njoin, nprune; int njp; /* If it's a single group and a single source, use 1-line output. */ if (ND_TTEST2(bp[0], 30) && bp[11] == 1 && ((njoin = EXTRACT_16BITS(&bp[20])) + EXTRACT_16BITS(&bp[22])) == 1) { int hold; ND_PRINT((ndo, " RPF %s ", ipaddr_string(ndo, bp))); hold = EXTRACT_16BITS(&bp[6]); if (hold != 180) { ND_PRINT((ndo, "Hold ")); unsigned_relts_print(ndo, hold); } ND_PRINT((ndo, "%s (%s/%d, %s", njoin ? "Join" : "Prune", ipaddr_string(ndo, &bp[26]), bp[25] & 0x3f, ipaddr_string(ndo, &bp[12]))); if (EXTRACT_32BITS(&bp[16]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[16]))); ND_PRINT((ndo, ") %s%s %s", (bp[24] & 0x01) ? "Sparse" : "Dense", (bp[25] & 0x80) ? " WC" : "", (bp[25] & 0x40) ? "RP" : "SPT")); return; } if (len < sizeof(struct in_addr)) goto trunc; ND_TCHECK2(bp[0], sizeof(struct in_addr)); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Upstream Nbr: %s", ipaddr_string(ndo, bp))); bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[2], 2); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Hold time: ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); if (ndo->ndo_vflag < 2) return; bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); ngroups = bp[3]; bp += 4; len -= 4; while (ngroups--) { /* * XXX - does the address have length "addrlen" and the * mask length "maddrlen"? */ if (len < 4) goto trunc; ND_TCHECK2(bp[0], sizeof(struct in_addr)); ND_PRINT((ndo, "\n\tGroup: %s", ipaddr_string(ndo, bp))); bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[0], sizeof(struct in_addr)); if (EXTRACT_32BITS(&bp[0]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[0]))); bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); njoin = EXTRACT_16BITS(&bp[0]); nprune = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, " joined: %d pruned: %d", njoin, nprune)); bp += 4; len -= 4; for (njp = 0; njp < (njoin + nprune); njp++) { const char *type; if (njp < njoin) type = "Join "; else type = "Prune"; if (len < 6) goto trunc; ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "\n\t%s %s%s%s%s/%d", type, (bp[0] & 0x01) ? "Sparse " : "Dense ", (bp[1] & 0x80) ? "WC " : "", (bp[1] & 0x40) ? "RP " : "SPT ", ipaddr_string(ndo, &bp[2]), bp[1] & 0x3f)); bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|pim]")); return; } void pimv1_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { register u_char type; ND_TCHECK(bp[1]); type = bp[1]; ND_PRINT((ndo, " %s", tok2str(pimv1_type_str, "[type %u]", type))); switch (type) { case PIMV1_TYPE_QUERY: if (ND_TTEST(bp[8])) { switch (bp[8] >> 4) { case 0: ND_PRINT((ndo, " Dense-mode")); break; case 1: ND_PRINT((ndo, " Sparse-mode")); break; case 2: ND_PRINT((ndo, " Sparse-Dense-mode")); break; default: ND_PRINT((ndo, " mode-%d", bp[8] >> 4)); break; } } if (ndo->ndo_vflag) { ND_TCHECK2(bp[10],2); ND_PRINT((ndo, " (Hold-time ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[10])); ND_PRINT((ndo, ")")); } break; case PIMV1_TYPE_REGISTER: ND_TCHECK2(bp[8], 20); /* ip header */ ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[20]), ipaddr_string(ndo, &bp[24]))); break; case PIMV1_TYPE_REGISTER_STOP: ND_TCHECK2(bp[12], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[8]), ipaddr_string(ndo, &bp[12]))); break; case PIMV1_TYPE_RP_REACHABILITY: if (ndo->ndo_vflag) { ND_TCHECK2(bp[22], 2); ND_PRINT((ndo, " group %s", ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_PRINT((ndo, " RP %s hold ", ipaddr_string(ndo, &bp[16]))); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[22])); } break; case PIMV1_TYPE_ASSERT: ND_TCHECK2(bp[16], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[16]), ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_TCHECK2(bp[24], 4); ND_PRINT((ndo, " %s pref %d metric %d", (bp[20] & 0x80) ? "RP-tree" : "SPT", EXTRACT_32BITS(&bp[20]) & 0x7fffffff, EXTRACT_32BITS(&bp[24]))); break; case PIMV1_TYPE_JOIN_PRUNE: case PIMV1_TYPE_GRAFT: case PIMV1_TYPE_GRAFT_ACK: if (ndo->ndo_vflag) { if (len < 8) goto trunc; pimv1_join_prune_print(ndo, &bp[8], len - 8); } break; } ND_TCHECK(bp[4]); if ((bp[4] >> 4) != 1) ND_PRINT((ndo, " [v%d]", bp[4] >> 4)); return; trunc: ND_PRINT((ndo, "[|pim]")); return; } /* * auto-RP is a cisco protocol, documented at * ftp://ftpeng.cisco.com/ipmulticast/specs/pim-autorp-spec01.txt * * This implements version 1+, dated Sept 9, 1998. */ void cisco_autorp_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int type; int numrps; int hold; if (len < 8) goto trunc; ND_TCHECK(bp[0]); ND_PRINT((ndo, " auto-rp ")); type = bp[0]; switch (type) { case 0x11: ND_PRINT((ndo, "candidate-advert")); break; case 0x12: ND_PRINT((ndo, "mapping")); break; default: ND_PRINT((ndo, "type-0x%02x", type)); break; } ND_TCHECK(bp[1]); numrps = bp[1]; ND_TCHECK2(bp[2], 2); ND_PRINT((ndo, " Hold ")); hold = EXTRACT_16BITS(&bp[2]); if (hold) unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); else ND_PRINT((ndo, "FOREVER")); /* Next 4 bytes are reserved. */ bp += 8; len -= 8; /*XXX skip unless -v? */ /* * Rest of packet: * numrps entries of the form: * 32 bits: RP * 6 bits: reserved * 2 bits: PIM version supported, bit 0 is "supports v1", 1 is "v2". * 8 bits: # of entries for this RP * each entry: 7 bits: reserved, 1 bit: negative, * 8 bits: mask 32 bits: source * lather, rinse, repeat. */ while (numrps--) { int nentries; char s; if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); ND_PRINT((ndo, " RP %s", ipaddr_string(ndo, bp))); bp += 4; len -= 4; if (len < 1) goto trunc; ND_TCHECK(bp[0]); switch (bp[0] & 0x3) { case 0: ND_PRINT((ndo, " PIMv?")); break; case 1: ND_PRINT((ndo, " PIMv1")); break; case 2: ND_PRINT((ndo, " PIMv2")); break; case 3: ND_PRINT((ndo, " PIMv1+2")); break; } if (bp[0] & 0xfc) ND_PRINT((ndo, " [rsvd=0x%02x]", bp[0] & 0xfc)); bp += 1; len -= 1; if (len < 1) goto trunc; ND_TCHECK(bp[0]); nentries = bp[0]; bp += 1; len -= 1; s = ' '; for (; nentries; nentries--) { if (len < 6) goto trunc; ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "%c%s%s/%d", s, bp[0] & 1 ? "!" : "", ipaddr_string(ndo, &bp[2]), bp[1])); if (bp[0] & 0x02) { ND_PRINT((ndo, " bidir")); } if (bp[0] & 0xfc) { ND_PRINT((ndo, "[rsvd=0x%02x]", bp[0] & 0xfc)); } s = ','; bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|autorp]")); return; } void pim_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const struct pim *pim = (const struct pim *)bp; #ifdef notyet /* currently we see only version and type */ ND_TCHECK(pim->pim_rsv); #endif ND_TCHECK(pim->pim_typever); switch (PIM_VER(pim->pim_typever)) { case 2: if (!ndo->ndo_vflag) { ND_PRINT((ndo, "PIMv%u, %s, length %u", PIM_VER(pim->pim_typever), tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)), len)); return; } else { ND_PRINT((ndo, "PIMv%u, length %u\n\t%s", PIM_VER(pim->pim_typever), len, tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)))); pimv2_print(ndo, bp, len, bp2); } break; default: ND_PRINT((ndo, "PIMv%u, length %u", PIM_VER(pim->pim_typever), len)); break; } return; trunc: ND_PRINT((ndo, "[|pim]")); return; } /* * PIMv2 uses encoded address representations. * * The last PIM-SM I-D before RFC2117 was published specified the * following representation for unicast addresses. However, RFC2117 * specified no encoding for unicast addresses with the unicast * address length specified in the header. Therefore, we have to * guess which encoding is being used (Cisco's PIMv2 implementation * uses the non-RFC encoding). RFC2117 turns a previously "Reserved" * field into a 'unicast-address-length-in-bytes' field. We guess * that it's the draft encoding if this reserved field is zero. * * RFC2362 goes back to the encoded format, and calls the addr length * field "reserved" again. * * The first byte is the address family, from: * * 0 Reserved * 1 IP (IP version 4) * 2 IP6 (IP version 6) * 3 NSAP * 4 HDLC (8-bit multidrop) * 5 BBN 1822 * 6 802 (includes all 802 media plus Ethernet "canonical format") * 7 E.163 * 8 E.164 (SMDS, Frame Relay, ATM) * 9 F.69 (Telex) * 10 X.121 (X.25, Frame Relay) * 11 IPX * 12 Appletalk * 13 Decnet IV * 14 Banyan Vines * 15 E.164 with NSAP format subaddress * * In addition, the second byte is an "Encoding". 0 is the default * encoding for the address family, and no other encodings are currently * specified. * */ enum pimv2_addrtype { pimv2_unicast, pimv2_group, pimv2_source }; /* 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Unicast Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+++++++ * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Reserved | Mask Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Group multicast Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Rsrvd |S|W|R| Mask Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Source Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ static int pimv2_addr_print(netdissect_options *ndo, const u_char *bp, u_int len, enum pimv2_addrtype at, u_int addr_len, int silent) { int af; int hdrlen; if (addr_len == 0) { if (len < 2) goto trunc; ND_TCHECK(bp[1]); switch (bp[0]) { case 1: af = AF_INET; addr_len = (u_int)sizeof(struct in_addr); break; case 2: af = AF_INET6; addr_len = (u_int)sizeof(struct in6_addr); break; default: return -1; } if (bp[1] != 0) return -1; hdrlen = 2; } else { switch (addr_len) { case sizeof(struct in_addr): af = AF_INET; break; case sizeof(struct in6_addr): af = AF_INET6; break; default: return -1; break; } hdrlen = 0; } bp += hdrlen; len -= hdrlen; switch (at) { case pimv2_unicast: if (len < addr_len) goto trunc; ND_TCHECK2(bp[0], addr_len); if (af == AF_INET) { if (!silent) ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp))); } else if (af == AF_INET6) { if (!silent) ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp))); } return hdrlen + addr_len; case pimv2_group: case pimv2_source: if (len < addr_len + 2) goto trunc; ND_TCHECK2(bp[0], addr_len + 2); if (af == AF_INET) { if (!silent) { ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp + 2))); if (bp[1] != 32) ND_PRINT((ndo, "/%u", bp[1])); } } else if (af == AF_INET6) { if (!silent) { ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp + 2))); if (bp[1] != 128) ND_PRINT((ndo, "/%u", bp[1])); } } if (bp[0] && !silent) { if (at == pimv2_group) { ND_PRINT((ndo, "(0x%02x)", bp[0])); } else { ND_PRINT((ndo, "(%s%s%s", bp[0] & 0x04 ? "S" : "", bp[0] & 0x02 ? "W" : "", bp[0] & 0x01 ? "R" : "")); if (bp[0] & 0xf8) { ND_PRINT((ndo, "+0x%02x", bp[0] & 0xf8)); } ND_PRINT((ndo, ")")); } } return hdrlen + 2 + addr_len; default: return -1; } trunc: return -1; } enum checksum_status { CORRECT, INCORRECT, UNVERIFIED }; static enum checksum_status pimv2_check_checksum(netdissect_options *ndo, const u_char *bp, const u_char *bp2, u_int len) { const struct ip *ip; u_int cksum; if (!ND_TTEST2(bp[0], len)) { /* We don't have all the data. */ return (UNVERIFIED); } ip = (const struct ip *)bp2; if (IP_V(ip) == 4) { struct cksum_vec vec[1]; vec[0].ptr = bp; vec[0].len = len; cksum = in_cksum(vec, 1); return (cksum ? INCORRECT : CORRECT); } else if (IP_V(ip) == 6) { const struct ip6_hdr *ip6; ip6 = (const struct ip6_hdr *)bp2; cksum = nextproto6_cksum(ndo, ip6, bp, len, len, IPPROTO_PIM); return (cksum ? INCORRECT : CORRECT); } else { return (UNVERIFIED); } } static void pimv2_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const u_char *ep; register const struct pim *pim = (const struct pim *)bp; int advance; enum checksum_status cksum_status; int pimv2_addr_len; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; if (ep > bp + len) ep = bp + len; if (len < 2) goto trunc; ND_TCHECK(pim->pim_rsv); pimv2_addr_len = pim->pim_rsv; if (pimv2_addr_len != 0) ND_PRINT((ndo, ", RFC2117-encoding")); if (len < 4) goto trunc; ND_TCHECK(pim->pim_cksum); ND_PRINT((ndo, ", cksum 0x%04x ", EXTRACT_16BITS(&pim->pim_cksum))); if (EXTRACT_16BITS(&pim->pim_cksum) == 0) { ND_PRINT((ndo, "(unverified)")); } else { if (PIM_TYPE(pim->pim_typever) == PIMV2_TYPE_REGISTER) { /* * The checksum only covers the packet header, * not the encapsulated packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, 8); if (cksum_status == INCORRECT) { /* * To quote RFC 4601, "For interoperability * reasons, a message carrying a checksum * calculated over the entire PIM Register * message should also be accepted." */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } } else { /* * The checksum covers the entire packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } switch (cksum_status) { case CORRECT: ND_PRINT((ndo, "(correct)")); break; case INCORRECT: ND_PRINT((ndo, "(incorrect)")); break; case UNVERIFIED: ND_PRINT((ndo, "(unverified)")); break; } } bp += 4; len -= 4; switch (PIM_TYPE(pim->pim_typever)) { case PIMV2_TYPE_HELLO: { uint16_t otype, olen; while (len > 0) { if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); otype = EXTRACT_16BITS(&bp[0]); olen = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, "\n\t %s Option (%u), length %u, Value: ", tok2str(pimv2_hello_option_values, "Unknown", otype), otype, olen)); bp += 4; len -= 4; if (len < olen) goto trunc; ND_TCHECK2(bp[0], olen); switch (otype) { case PIMV2_HELLO_OPTION_HOLDTIME: if (olen != 2) { ND_PRINT((ndo, "ERROR: Option Length != 2 Bytes (%u)", olen)); } else { unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); } break; case PIMV2_HELLO_OPTION_LANPRUNEDELAY: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { char t_bit; uint16_t lan_delay, override_interval; lan_delay = EXTRACT_16BITS(bp); override_interval = EXTRACT_16BITS(bp+2); t_bit = (lan_delay & 0x8000)? 1 : 0; lan_delay &= ~0x8000; ND_PRINT((ndo, "\n\t T-bit=%d, LAN delay %dms, Override interval %dms", t_bit, lan_delay, override_interval)); } break; case PIMV2_HELLO_OPTION_DR_PRIORITY_OLD: case PIMV2_HELLO_OPTION_DR_PRIORITY: switch (olen) { case 0: ND_PRINT((ndo, "Bi-Directional Capability (Old)")); break; case 4: ND_PRINT((ndo, "%u", EXTRACT_32BITS(bp))); break; default: ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); break; } break; case PIMV2_HELLO_OPTION_GENID: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "0x%08x", EXTRACT_32BITS(bp))); } break; case PIMV2_HELLO_OPTION_REFRESH_CAP: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "v%d", *bp)); if (*(bp+1) != 0) { ND_PRINT((ndo, ", interval ")); unsigned_relts_print(ndo, *(bp+1)); } if (EXTRACT_16BITS(bp+2) != 0) { ND_PRINT((ndo, " ?0x%04x?", EXTRACT_16BITS(bp+2))); } } break; case PIMV2_HELLO_OPTION_BIDIR_CAP: break; case PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD: case PIMV2_HELLO_OPTION_ADDRESS_LIST: if (ndo->ndo_vflag > 1) { const u_char *ptr = bp; u_int plen = len; while (ptr < (bp+olen)) { ND_PRINT((ndo, "\n\t ")); advance = pimv2_addr_print(ndo, ptr, plen, pimv2_unicast, pimv2_addr_len, 0); if (advance < 0) goto trunc; ptr += advance; plen -= advance; } } break; default: if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, bp, "\n\t ", olen); break; } /* do we want to see an additionally hexdump ? */ if (ndo->ndo_vflag> 1) print_unknown_data(ndo, bp, "\n\t ", olen); bp += olen; len -= olen; } break; } case PIMV2_TYPE_REGISTER: { const struct ip *ip; if (len < 4) goto trunc; ND_TCHECK2(*bp, PIMV2_REGISTER_FLAG_LEN); ND_PRINT((ndo, ", Flags [ %s ]\n\t", tok2str(pimv2_register_flag_values, "none", EXTRACT_32BITS(bp)))); bp += 4; len -= 4; /* encapsulated multicast packet */ if (len == 0) goto trunc; ip = (const struct ip *)bp; ND_TCHECK(ip->ip_vhl); switch (IP_V(ip)) { case 0: /* Null header */ ND_TCHECK(ip->ip_dst); ND_PRINT((ndo, "IP-Null-header %s > %s", ipaddr_string(ndo, &ip->ip_src), ipaddr_string(ndo, &ip->ip_dst))); break; case 4: /* IPv4 */ ip_print(ndo, bp, len); break; case 6: /* IPv6 */ ip6_print(ndo, bp, len); break; default: ND_PRINT((ndo, "IP ver %d", IP_V(ip))); break; } break; } case PIMV2_TYPE_REGISTER_STOP: ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; ND_PRINT((ndo, " source=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; break; case PIMV2_TYPE_JOIN_PRUNE: case PIMV2_TYPE_GRAFT: case PIMV2_TYPE_GRAFT_ACK: /* * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |PIM Ver| Type | Addr length | Checksum | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Unicast-Upstream Neighbor Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Reserved | Num groups | Holdtime | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Number of Joined Sources | Number of Pruned Sources | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ { uint8_t ngroup; uint16_t holdtime; uint16_t njoin; uint16_t nprune; int i, j; if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ ND_PRINT((ndo, ", upstream-neighbor: ")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; } if (len < 4) goto trunc; ND_TCHECK2(*bp, 4); ngroup = bp[1]; holdtime = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, "\n\t %u group(s)", ngroup)); if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ ND_PRINT((ndo, ", holdtime: ")); if (holdtime == 0xffff) ND_PRINT((ndo, "infinite")); else unsigned_relts_print(ndo, holdtime); } bp += 4; len -= 4; for (i = 0; i < ngroup; i++) { ND_PRINT((ndo, "\n\t group #%u: ", i+1)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; if (len < 4) goto trunc; ND_TCHECK2(*bp, 4); njoin = EXTRACT_16BITS(&bp[0]); nprune = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, ", joined sources: %u, pruned sources: %u", njoin, nprune)); bp += 4; len -= 4; for (j = 0; j < njoin; j++) { ND_PRINT((ndo, "\n\t joined source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_source, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; } for (j = 0; j < nprune; j++) { ND_PRINT((ndo, "\n\t pruned source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_source, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; } } break; } case PIMV2_TYPE_BOOTSTRAP: { int i, j, frpcnt; /* Fragment Tag, Hash Mask len, and BSR-priority */ if (len < 2) goto trunc; ND_TCHECK_16BITS(bp); ND_PRINT((ndo, " tag=%x", EXTRACT_16BITS(bp))); bp += 2; len -= 2; if (len < 1) goto trunc; ND_TCHECK(bp[0]); ND_PRINT((ndo, " hashmlen=%d", bp[0])); if (len < 2) goto trunc; ND_TCHECK(bp[2]); ND_PRINT((ndo, " BSRprio=%d", bp[1])); bp += 2; len -= 2; /* Encoded-Unicast-BSR-Address */ ND_PRINT((ndo, " BSR=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; for (i = 0; bp < ep; i++) { /* Encoded-Group Address */ ND_PRINT((ndo, " (group%d: ", i)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; /* RP-Count, Frag RP-Cnt, and rsvd */ if (len < 1) goto trunc; ND_TCHECK(bp[0]); ND_PRINT((ndo, " RPcnt=%d", bp[0])); if (len < 2) goto trunc; ND_TCHECK(bp[1]); ND_PRINT((ndo, " FRPcnt=%d", frpcnt = bp[1])); if (len < 4) goto trunc; bp += 4; len -= 4; for (j = 0; j < frpcnt && bp < ep; j++) { /* each RP info */ ND_PRINT((ndo, " RP%d=", j)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; if (len < 2) goto trunc; ND_TCHECK_16BITS(bp); ND_PRINT((ndo, ",holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); if (len < 3) goto trunc; ND_TCHECK(bp[2]); ND_PRINT((ndo, ",prio=%d", bp[2])); if (len < 4) goto trunc; bp += 4; len -= 4; } ND_PRINT((ndo, ")")); } break; } case PIMV2_TYPE_ASSERT: ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; if (len < 8) goto trunc; ND_TCHECK2(*bp, 8); if (bp[0] & 0x80) ND_PRINT((ndo, " RPT")); ND_PRINT((ndo, " pref=%u", EXTRACT_32BITS(&bp[0]) & 0x7fffffff)); ND_PRINT((ndo, " metric=%u", EXTRACT_32BITS(&bp[4]))); break; case PIMV2_TYPE_CANDIDATE_RP: { int i, pfxcnt; /* Prefix-Cnt, Priority, and Holdtime */ if (len < 1) goto trunc; ND_TCHECK(bp[0]); ND_PRINT((ndo, " prefix-cnt=%d", bp[0])); pfxcnt = bp[0]; if (len < 2) goto trunc; ND_TCHECK(bp[1]); ND_PRINT((ndo, " prio=%d", bp[1])); if (len < 4) goto trunc; ND_TCHECK_16BITS(&bp[2]); ND_PRINT((ndo, " holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); bp += 4; len -= 4; /* Encoded-Unicast-RP-Address */ ND_PRINT((ndo, " RP=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; /* Encoded-Group Addresses */ for (i = 0; i < pfxcnt && bp < ep; i++) { ND_PRINT((ndo, " Group%d=", i)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; } break; } case PIMV2_TYPE_PRUNE_REFRESH: ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; ND_PRINT((ndo, " grp=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; ND_PRINT((ndo, " forwarder=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; if (len < 2) goto trunc; ND_TCHECK_16BITS(bp); ND_PRINT((ndo, " TUNR ")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); break; default: ND_PRINT((ndo, " [type %d]", PIM_TYPE(pim->pim_typever))); break; } return; trunc: ND_PRINT((ndo, "[|pim]")); } /* * Local Variables: * c-style: whitesmith * c-basic-offset: 8 * End: */
pimv2_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const u_char *ep; register const struct pim *pim = (const struct pim *)bp; int advance; enum checksum_status cksum_status; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; if (ep > bp + len) ep = bp + len; ND_TCHECK(pim->pim_rsv); pimv2_addr_len = pim->pim_rsv; if (pimv2_addr_len != 0) ND_PRINT((ndo, ", RFC2117-encoding")); ND_PRINT((ndo, ", cksum 0x%04x ", EXTRACT_16BITS(&pim->pim_cksum))); if (EXTRACT_16BITS(&pim->pim_cksum) == 0) { ND_PRINT((ndo, "(unverified)")); } else { if (PIM_TYPE(pim->pim_typever) == PIMV2_TYPE_REGISTER) { /* * The checksum only covers the packet header, * not the encapsulated packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, 8); if (cksum_status == INCORRECT) { /* * To quote RFC 4601, "For interoperability * reasons, a message carrying a checksum * calculated over the entire PIM Register * message should also be accepted." */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } } else { /* * The checksum covers the entire packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } switch (cksum_status) { case CORRECT: ND_PRINT((ndo, "(correct)")); break; case INCORRECT: ND_PRINT((ndo, "(incorrect)")); break; case UNVERIFIED: ND_PRINT((ndo, "(unverified)")); break; } } switch (PIM_TYPE(pim->pim_typever)) { case PIMV2_TYPE_HELLO: { uint16_t otype, olen; bp += 4; while (bp < ep) { ND_TCHECK2(bp[0], 4); otype = EXTRACT_16BITS(&bp[0]); olen = EXTRACT_16BITS(&bp[2]); ND_TCHECK2(bp[0], 4 + olen); ND_PRINT((ndo, "\n\t %s Option (%u), length %u, Value: ", tok2str(pimv2_hello_option_values, "Unknown", otype), otype, olen)); bp += 4; switch (otype) { case PIMV2_HELLO_OPTION_HOLDTIME: if (olen != 2) { ND_PRINT((ndo, "ERROR: Option Length != 2 Bytes (%u)", olen)); } else { unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); } break; case PIMV2_HELLO_OPTION_LANPRUNEDELAY: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { char t_bit; uint16_t lan_delay, override_interval; lan_delay = EXTRACT_16BITS(bp); override_interval = EXTRACT_16BITS(bp+2); t_bit = (lan_delay & 0x8000)? 1 : 0; lan_delay &= ~0x8000; ND_PRINT((ndo, "\n\t T-bit=%d, LAN delay %dms, Override interval %dms", t_bit, lan_delay, override_interval)); } break; case PIMV2_HELLO_OPTION_DR_PRIORITY_OLD: case PIMV2_HELLO_OPTION_DR_PRIORITY: switch (olen) { case 0: ND_PRINT((ndo, "Bi-Directional Capability (Old)")); break; case 4: ND_PRINT((ndo, "%u", EXTRACT_32BITS(bp))); break; default: ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); break; } break; case PIMV2_HELLO_OPTION_GENID: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "0x%08x", EXTRACT_32BITS(bp))); } break; case PIMV2_HELLO_OPTION_REFRESH_CAP: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "v%d", *bp)); if (*(bp+1) != 0) { ND_PRINT((ndo, ", interval ")); unsigned_relts_print(ndo, *(bp+1)); } if (EXTRACT_16BITS(bp+2) != 0) { ND_PRINT((ndo, " ?0x%04x?", EXTRACT_16BITS(bp+2))); } } break; case PIMV2_HELLO_OPTION_BIDIR_CAP: break; case PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD: case PIMV2_HELLO_OPTION_ADDRESS_LIST: if (ndo->ndo_vflag > 1) { const u_char *ptr = bp; while (ptr < (bp+olen)) { ND_PRINT((ndo, "\n\t ")); advance = pimv2_addr_print(ndo, ptr, pimv2_unicast, 0); if (advance < 0) { ND_PRINT((ndo, "...")); break; } ptr += advance; } } break; default: if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, bp, "\n\t ", olen); break; } /* do we want to see an additionally hexdump ? */ if (ndo->ndo_vflag> 1) print_unknown_data(ndo, bp, "\n\t ", olen); bp += olen; } break; } case PIMV2_TYPE_REGISTER: { const struct ip *ip; ND_TCHECK2(*(bp + 4), PIMV2_REGISTER_FLAG_LEN); ND_PRINT((ndo, ", Flags [ %s ]\n\t", tok2str(pimv2_register_flag_values, "none", EXTRACT_32BITS(bp+4)))); bp += 8; len -= 8; /* encapsulated multicast packet */ ip = (const struct ip *)bp; switch (IP_V(ip)) { case 0: /* Null header */ ND_PRINT((ndo, "IP-Null-header %s > %s", ipaddr_string(ndo, &ip->ip_src), ipaddr_string(ndo, &ip->ip_dst))); break; case 4: /* IPv4 */ ip_print(ndo, bp, len); break; case 6: /* IPv6 */ ip6_print(ndo, bp, len); break; default: ND_PRINT((ndo, "IP ver %d", IP_V(ip))); break; } break; } case PIMV2_TYPE_REGISTER_STOP: bp += 4; len -= 4; if (bp >= ep) break; ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp >= ep) break; ND_PRINT((ndo, " source=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; break; case PIMV2_TYPE_JOIN_PRUNE: case PIMV2_TYPE_GRAFT: case PIMV2_TYPE_GRAFT_ACK: /* * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |PIM Ver| Type | Addr length | Checksum | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Unicast-Upstream Neighbor Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Reserved | Num groups | Holdtime | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Number of Joined Sources | Number of Pruned Sources | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ { uint8_t ngroup; uint16_t holdtime; uint16_t njoin; uint16_t nprune; int i, j; bp += 4; len -= 4; if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ if (bp >= ep) break; ND_PRINT((ndo, ", upstream-neighbor: ")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; } if (bp + 4 > ep) break; ngroup = bp[1]; holdtime = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, "\n\t %u group(s)", ngroup)); if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ ND_PRINT((ndo, ", holdtime: ")); if (holdtime == 0xffff) ND_PRINT((ndo, "infinite")); else unsigned_relts_print(ndo, holdtime); } bp += 4; len -= 4; for (i = 0; i < ngroup; i++) { if (bp >= ep) goto jp_done; ND_PRINT((ndo, "\n\t group #%u: ", i+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; if (bp + 4 > ep) { ND_PRINT((ndo, "...)")); goto jp_done; } njoin = EXTRACT_16BITS(&bp[0]); nprune = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, ", joined sources: %u, pruned sources: %u", njoin, nprune)); bp += 4; len -= 4; for (j = 0; j < njoin; j++) { ND_PRINT((ndo, "\n\t joined source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; } for (j = 0; j < nprune; j++) { ND_PRINT((ndo, "\n\t pruned source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; } } jp_done: break; } case PIMV2_TYPE_BOOTSTRAP: { int i, j, frpcnt; bp += 4; /* Fragment Tag, Hash Mask len, and BSR-priority */ if (bp + sizeof(uint16_t) >= ep) break; ND_PRINT((ndo, " tag=%x", EXTRACT_16BITS(bp))); bp += sizeof(uint16_t); if (bp >= ep) break; ND_PRINT((ndo, " hashmlen=%d", bp[0])); if (bp + 1 >= ep) break; ND_PRINT((ndo, " BSRprio=%d", bp[1])); bp += 2; /* Encoded-Unicast-BSR-Address */ if (bp >= ep) break; ND_PRINT((ndo, " BSR=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; for (i = 0; bp < ep; i++) { /* Encoded-Group Address */ ND_PRINT((ndo, " (group%d: ", i)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...)")); goto bs_done; } bp += advance; /* RP-Count, Frag RP-Cnt, and rsvd */ if (bp >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, " RPcnt=%d", bp[0])); if (bp + 1 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, " FRPcnt=%d", frpcnt = bp[1])); bp += 4; for (j = 0; j < frpcnt && bp < ep; j++) { /* each RP info */ ND_PRINT((ndo, " RP%d=", j)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...)")); goto bs_done; } bp += advance; if (bp + 1 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, ",holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); if (bp + 2 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, ",prio=%d", bp[2])); bp += 4; } ND_PRINT((ndo, ")")); } bs_done: break; } case PIMV2_TYPE_ASSERT: bp += 4; len -= 4; if (bp >= ep) break; ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp >= ep) break; ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp + 8 > ep) break; if (bp[0] & 0x80) ND_PRINT((ndo, " RPT")); ND_PRINT((ndo, " pref=%u", EXTRACT_32BITS(&bp[0]) & 0x7fffffff)); ND_PRINT((ndo, " metric=%u", EXTRACT_32BITS(&bp[4]))); break; case PIMV2_TYPE_CANDIDATE_RP: { int i, pfxcnt; bp += 4; /* Prefix-Cnt, Priority, and Holdtime */ if (bp >= ep) break; ND_PRINT((ndo, " prefix-cnt=%d", bp[0])); pfxcnt = bp[0]; if (bp + 1 >= ep) break; ND_PRINT((ndo, " prio=%d", bp[1])); if (bp + 3 >= ep) break; ND_PRINT((ndo, " holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); bp += 4; /* Encoded-Unicast-RP-Address */ if (bp >= ep) break; ND_PRINT((ndo, " RP=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; /* Encoded-Group Addresses */ for (i = 0; i < pfxcnt && bp < ep; i++) { ND_PRINT((ndo, " Group%d=", i)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; } break; } case PIMV2_TYPE_PRUNE_REFRESH: ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_PRINT((ndo, " grp=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_PRINT((ndo, " forwarder=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_TCHECK2(bp[0], 2); ND_PRINT((ndo, " TUNR ")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); break; default: ND_PRINT((ndo, " [type %d]", PIM_TYPE(pim->pim_typever))); break; } return; trunc: ND_PRINT((ndo, "[|pim]")); }
pimv2_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const u_char *ep; register const struct pim *pim = (const struct pim *)bp; int advance; enum checksum_status cksum_status; int pimv2_addr_len; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; if (ep > bp + len) ep = bp + len; if (len < 2) goto trunc; ND_TCHECK(pim->pim_rsv); pimv2_addr_len = pim->pim_rsv; if (pimv2_addr_len != 0) ND_PRINT((ndo, ", RFC2117-encoding")); if (len < 4) goto trunc; ND_TCHECK(pim->pim_cksum); ND_PRINT((ndo, ", cksum 0x%04x ", EXTRACT_16BITS(&pim->pim_cksum))); if (EXTRACT_16BITS(&pim->pim_cksum) == 0) { ND_PRINT((ndo, "(unverified)")); } else { if (PIM_TYPE(pim->pim_typever) == PIMV2_TYPE_REGISTER) { /* * The checksum only covers the packet header, * not the encapsulated packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, 8); if (cksum_status == INCORRECT) { /* * To quote RFC 4601, "For interoperability * reasons, a message carrying a checksum * calculated over the entire PIM Register * message should also be accepted." */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } } else { /* * The checksum covers the entire packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } switch (cksum_status) { case CORRECT: ND_PRINT((ndo, "(correct)")); break; case INCORRECT: ND_PRINT((ndo, "(incorrect)")); break; case UNVERIFIED: ND_PRINT((ndo, "(unverified)")); break; } } bp += 4; len -= 4; switch (PIM_TYPE(pim->pim_typever)) { case PIMV2_TYPE_HELLO: { uint16_t otype, olen; while (len > 0) { if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); otype = EXTRACT_16BITS(&bp[0]); olen = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, "\n\t %s Option (%u), length %u, Value: ", tok2str(pimv2_hello_option_values, "Unknown", otype), otype, olen)); bp += 4; len -= 4; if (len < olen) goto trunc; ND_TCHECK2(bp[0], olen); switch (otype) { case PIMV2_HELLO_OPTION_HOLDTIME: if (olen != 2) { ND_PRINT((ndo, "ERROR: Option Length != 2 Bytes (%u)", olen)); } else { unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); } break; case PIMV2_HELLO_OPTION_LANPRUNEDELAY: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { char t_bit; uint16_t lan_delay, override_interval; lan_delay = EXTRACT_16BITS(bp); override_interval = EXTRACT_16BITS(bp+2); t_bit = (lan_delay & 0x8000)? 1 : 0; lan_delay &= ~0x8000; ND_PRINT((ndo, "\n\t T-bit=%d, LAN delay %dms, Override interval %dms", t_bit, lan_delay, override_interval)); } break; case PIMV2_HELLO_OPTION_DR_PRIORITY_OLD: case PIMV2_HELLO_OPTION_DR_PRIORITY: switch (olen) { case 0: ND_PRINT((ndo, "Bi-Directional Capability (Old)")); break; case 4: ND_PRINT((ndo, "%u", EXTRACT_32BITS(bp))); break; default: ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); break; } break; case PIMV2_HELLO_OPTION_GENID: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "0x%08x", EXTRACT_32BITS(bp))); } break; case PIMV2_HELLO_OPTION_REFRESH_CAP: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "v%d", *bp)); if (*(bp+1) != 0) { ND_PRINT((ndo, ", interval ")); unsigned_relts_print(ndo, *(bp+1)); } if (EXTRACT_16BITS(bp+2) != 0) { ND_PRINT((ndo, " ?0x%04x?", EXTRACT_16BITS(bp+2))); } } break; case PIMV2_HELLO_OPTION_BIDIR_CAP: break; case PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD: case PIMV2_HELLO_OPTION_ADDRESS_LIST: if (ndo->ndo_vflag > 1) { const u_char *ptr = bp; u_int plen = len; while (ptr < (bp+olen)) { ND_PRINT((ndo, "\n\t ")); advance = pimv2_addr_print(ndo, ptr, plen, pimv2_unicast, pimv2_addr_len, 0); if (advance < 0) goto trunc; ptr += advance; plen -= advance; } } break; default: if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, bp, "\n\t ", olen); break; } /* do we want to see an additionally hexdump ? */ if (ndo->ndo_vflag> 1) print_unknown_data(ndo, bp, "\n\t ", olen); bp += olen; len -= olen; } break; } case PIMV2_TYPE_REGISTER: { const struct ip *ip; if (len < 4) goto trunc; ND_TCHECK2(*bp, PIMV2_REGISTER_FLAG_LEN); ND_PRINT((ndo, ", Flags [ %s ]\n\t", tok2str(pimv2_register_flag_values, "none", EXTRACT_32BITS(bp)))); bp += 4; len -= 4; /* encapsulated multicast packet */ if (len == 0) goto trunc; ip = (const struct ip *)bp; ND_TCHECK(ip->ip_vhl); switch (IP_V(ip)) { case 0: /* Null header */ ND_TCHECK(ip->ip_dst); ND_PRINT((ndo, "IP-Null-header %s > %s", ipaddr_string(ndo, &ip->ip_src), ipaddr_string(ndo, &ip->ip_dst))); break; case 4: /* IPv4 */ ip_print(ndo, bp, len); break; case 6: /* IPv6 */ ip6_print(ndo, bp, len); break; default: ND_PRINT((ndo, "IP ver %d", IP_V(ip))); break; } break; } case PIMV2_TYPE_REGISTER_STOP: ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; ND_PRINT((ndo, " source=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; break; case PIMV2_TYPE_JOIN_PRUNE: case PIMV2_TYPE_GRAFT: case PIMV2_TYPE_GRAFT_ACK: /* * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |PIM Ver| Type | Addr length | Checksum | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Unicast-Upstream Neighbor Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Reserved | Num groups | Holdtime | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Number of Joined Sources | Number of Pruned Sources | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ { uint8_t ngroup; uint16_t holdtime; uint16_t njoin; uint16_t nprune; int i, j; if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ ND_PRINT((ndo, ", upstream-neighbor: ")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; } if (len < 4) goto trunc; ND_TCHECK2(*bp, 4); ngroup = bp[1]; holdtime = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, "\n\t %u group(s)", ngroup)); if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ ND_PRINT((ndo, ", holdtime: ")); if (holdtime == 0xffff) ND_PRINT((ndo, "infinite")); else unsigned_relts_print(ndo, holdtime); } bp += 4; len -= 4; for (i = 0; i < ngroup; i++) { ND_PRINT((ndo, "\n\t group #%u: ", i+1)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; if (len < 4) goto trunc; ND_TCHECK2(*bp, 4); njoin = EXTRACT_16BITS(&bp[0]); nprune = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, ", joined sources: %u, pruned sources: %u", njoin, nprune)); bp += 4; len -= 4; for (j = 0; j < njoin; j++) { ND_PRINT((ndo, "\n\t joined source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_source, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; } for (j = 0; j < nprune; j++) { ND_PRINT((ndo, "\n\t pruned source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_source, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; } } break; } case PIMV2_TYPE_BOOTSTRAP: { int i, j, frpcnt; /* Fragment Tag, Hash Mask len, and BSR-priority */ if (len < 2) goto trunc; ND_TCHECK_16BITS(bp); ND_PRINT((ndo, " tag=%x", EXTRACT_16BITS(bp))); bp += 2; len -= 2; if (len < 1) goto trunc; ND_TCHECK(bp[0]); ND_PRINT((ndo, " hashmlen=%d", bp[0])); if (len < 2) goto trunc; ND_TCHECK(bp[2]); ND_PRINT((ndo, " BSRprio=%d", bp[1])); bp += 2; len -= 2; /* Encoded-Unicast-BSR-Address */ ND_PRINT((ndo, " BSR=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; for (i = 0; bp < ep; i++) { /* Encoded-Group Address */ ND_PRINT((ndo, " (group%d: ", i)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; /* RP-Count, Frag RP-Cnt, and rsvd */ if (len < 1) goto trunc; ND_TCHECK(bp[0]); ND_PRINT((ndo, " RPcnt=%d", bp[0])); if (len < 2) goto trunc; ND_TCHECK(bp[1]); ND_PRINT((ndo, " FRPcnt=%d", frpcnt = bp[1])); if (len < 4) goto trunc; bp += 4; len -= 4; for (j = 0; j < frpcnt && bp < ep; j++) { /* each RP info */ ND_PRINT((ndo, " RP%d=", j)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; if (len < 2) goto trunc; ND_TCHECK_16BITS(bp); ND_PRINT((ndo, ",holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); if (len < 3) goto trunc; ND_TCHECK(bp[2]); ND_PRINT((ndo, ",prio=%d", bp[2])); if (len < 4) goto trunc; bp += 4; len -= 4; } ND_PRINT((ndo, ")")); } break; } case PIMV2_TYPE_ASSERT: ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; if (len < 8) goto trunc; ND_TCHECK2(*bp, 8); if (bp[0] & 0x80) ND_PRINT((ndo, " RPT")); ND_PRINT((ndo, " pref=%u", EXTRACT_32BITS(&bp[0]) & 0x7fffffff)); ND_PRINT((ndo, " metric=%u", EXTRACT_32BITS(&bp[4]))); break; case PIMV2_TYPE_CANDIDATE_RP: { int i, pfxcnt; /* Prefix-Cnt, Priority, and Holdtime */ if (len < 1) goto trunc; ND_TCHECK(bp[0]); ND_PRINT((ndo, " prefix-cnt=%d", bp[0])); pfxcnt = bp[0]; if (len < 2) goto trunc; ND_TCHECK(bp[1]); ND_PRINT((ndo, " prio=%d", bp[1])); if (len < 4) goto trunc; ND_TCHECK_16BITS(&bp[2]); ND_PRINT((ndo, " holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); bp += 4; len -= 4; /* Encoded-Unicast-RP-Address */ ND_PRINT((ndo, " RP=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; /* Encoded-Group Addresses */ for (i = 0; i < pfxcnt && bp < ep; i++) { ND_PRINT((ndo, " Group%d=", i)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; } break; } case PIMV2_TYPE_PRUNE_REFRESH: ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; ND_PRINT((ndo, " grp=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; ND_PRINT((ndo, " forwarder=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; if (len < 2) goto trunc; ND_TCHECK_16BITS(bp); ND_PRINT((ndo, " TUNR ")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); break; default: ND_PRINT((ndo, " [type %d]", PIM_TYPE(pim->pim_typever))); break; } return; trunc: ND_PRINT((ndo, "[|pim]")); }
{'added': [(172, '\tif (len < sizeof(struct in_addr))'), (173, '\t\tgoto trunc;'), (178, '\tbp += 4;'), (179, '\tlen -= 4;'), (180, '\tif (len < 4)'), (181, '\t\tgoto trunc;'), (182, '\tND_TCHECK2(bp[2], 2);'), (186, '\tunsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2]));'), (189, '\tbp += 4;'), (190, '\tlen -= 4;'), (192, '\tif (len < 4)'), (193, '\t\tgoto trunc;'), (203, '\t\tif (len < 4)'), (204, '\t\t\tgoto trunc;'), (207, '\t\tbp += 4;'), (208, '\t\tlen -= 4;'), (209, '\t\tif (len < 4)'), (210, '\t\t\tgoto trunc;'), (211, '\t\tND_TCHECK2(bp[0], sizeof(struct in_addr));'), (212, '\t\tif (EXTRACT_32BITS(&bp[0]) != 0xffffffff)'), (213, '\t\t\tND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[0])));'), (214, '\t\tbp += 4;'), (215, '\t\tlen -= 4;'), (216, '\t\tif (len < 4)'), (217, '\t\t\tgoto trunc;'), (218, '\t\tND_TCHECK2(bp[0], 4);'), (219, '\t\tnjoin = EXTRACT_16BITS(&bp[0]);'), (220, '\t\tnprune = EXTRACT_16BITS(&bp[2]);'), (222, '\t\tbp += 4;'), (223, '\t\tlen -= 4;'), (231, '\t\t\tif (len < 6)'), (232, '\t\t\t\tgoto trunc;'), (238, '\t\t\t ipaddr_string(ndo, &bp[2]),'), (239, '\t\t\t bp[1] & 0x3f));'), (321, '\t\tif (ndo->ndo_vflag) {'), (322, '\t\t\tif (len < 8)'), (323, '\t\t\t\tgoto trunc;'), (325, '\t\t}'), (352, '\tif (len < 8)'), (353, '\t\tgoto trunc;'), (401, '\t\tif (len < 4)'), (402, '\t\t\tgoto trunc;'), (405, '\t\tbp += 4;'), (406, '\t\tlen -= 4;'), (407, '\t\tif (len < 1)'), (408, '\t\t\tgoto trunc;'), (409, '\t\tND_TCHECK(bp[0]);'), (410, '\t\tswitch (bp[0] & 0x3) {'), (420, '\t\tif (bp[0] & 0xfc)'), (421, '\t\t\tND_PRINT((ndo, " [rsvd=0x%02x]", bp[0] & 0xfc));'), (422, '\t\tbp += 1;'), (423, '\t\tlen -= 1;'), (424, '\t\tif (len < 1)'), (425, '\t\t\tgoto trunc;'), (426, '\t\tND_TCHECK(bp[0]);'), (427, '\t\tnentries = bp[0];'), (428, '\t\tbp += 1;'), (429, '\t\tlen -= 1;'), (432, '\t\t\tif (len < 6)'), (433, '\t\t\t\tgoto trunc;'), (464, '\tND_TCHECK(pim->pim_typever);'), (488, ''), (489, 'trunc:'), (490, '\tND_PRINT((ndo, "[|pim]"));'), (491, '\treturn;'), (560, ' const u_char *bp, u_int len, enum pimv2_addrtype at,'), (561, ' u_int addr_len, int silent)'), (564, '\tint hdrlen;'), (566, '\tif (addr_len == 0) {'), (567, '\t\tif (len < 2)'), (568, '\t\t\tgoto trunc;'), (573, '\t\t\taddr_len = (u_int)sizeof(struct in_addr);'), (577, '\t\t\taddr_len = (u_int)sizeof(struct in6_addr);'), (586, '\t\tswitch (addr_len) {'), (601, '\tlen -= hdrlen;'), (604, '\t\tif (len < addr_len)'), (605, '\t\t\tgoto trunc;'), (606, '\t\tND_TCHECK2(bp[0], addr_len);'), (615, '\t\treturn hdrlen + addr_len;'), (618, '\t\tif (len < addr_len + 2)'), (619, '\t\t\tgoto trunc;'), (620, '\t\tND_TCHECK2(bp[0], addr_len + 2);'), (649, '\t\treturn hdrlen + 2 + addr_len;'), (701, '\tint pimv2_addr_len;'), (708, '\tif (len < 2)'), (709, '\t\tgoto trunc;'), (715, '\tif (len < 4)'), (716, '\t\tgoto trunc;'), (717, '\tND_TCHECK(pim->pim_cksum);'), (758, '\tbp += 4;'), (759, '\tlen -= 4;'), (765, '\t\twhile (len > 0) {'), (766, '\t\t\tif (len < 4)'), (767, '\t\t\t\tgoto trunc;'), (776, '\t\t\tlen -= 4;'), (778, '\t\t\tif (len < olen)'), (779, '\t\t\t\tgoto trunc;'), (780, '\t\t\tND_TCHECK2(bp[0], olen);'), (850, '\t\t\t\t\tu_int plen = len;'), (853, '\t\t\t\t\t\tadvance = pimv2_addr_print(ndo, ptr, plen, pimv2_unicast, pimv2_addr_len, 0);'), (854, '\t\t\t\t\t\tif (advance < 0)'), (855, '\t\t\t\t\t\t\tgoto trunc;'), (857, '\t\t\t\t\t\tplen -= advance;'), (870, '\t\t\tlen -= olen;'), (879, '\t\tif (len < 4)'), (880, '\t\t\tgoto trunc;'), (881, '\t\tND_TCHECK2(*bp, PIMV2_REGISTER_FLAG_LEN);'), (886, '\t\t EXTRACT_32BITS(bp))));'), (888, '\t\tbp += 4; len -= 4;'), (890, '\t\tif (len == 0)'), (891, '\t\t\tgoto trunc;'), (893, '\t\tND_TCHECK(ip->ip_vhl);'), (896, '\t\t\tND_TCHECK(ip->ip_dst);'), (919, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (920, '\t\t\tgoto trunc;'), (923, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (924, '\t\t\tgoto trunc;'), (977, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (978, '\t\t\t\tgoto trunc;'), (981, '\t\tif (len < 4)'), (982, '\t\t\tgoto trunc;'), (983, '\t\tND_TCHECK2(*bp, 4);'), (997, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (998, '\t\t\t\tgoto trunc;'), (1000, '\t\t\tif (len < 4)'), (1001, '\t\t\t\tgoto trunc;'), (1002, '\t\t\tND_TCHECK2(*bp, 4);'), (1009, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_source, pimv2_addr_len, 0)) < 0)'), (1010, '\t\t\t\t\tgoto trunc;'), (1015, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_source, pimv2_addr_len, 0)) < 0)'), (1016, '\t\t\t\t\tgoto trunc;'), (1028, '\t\tif (len < 2)'), (1029, '\t\t\tgoto trunc;'), (1030, '\t\tND_TCHECK_16BITS(bp);'), (1032, '\t\tbp += 2;'), (1033, '\t\tlen -= 2;'), (1034, '\t\tif (len < 1)'), (1035, '\t\t\tgoto trunc;'), (1036, '\t\tND_TCHECK(bp[0]);'), (1038, '\t\tif (len < 2)'), (1039, '\t\t\tgoto trunc;'), (1040, '\t\tND_TCHECK(bp[2]);'), (1043, '\t\tlen -= 2;'), (1047, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1048, '\t\t\tgoto trunc;'), (1050, '\t\tlen -= advance;'), (1055, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (1056, '\t\t\t\tgoto trunc;'), (1058, '\t\t\tlen -= advance;'), (1061, '\t\t\tif (len < 1)'), (1062, '\t\t\t\tgoto trunc;'), (1063, '\t\t\tND_TCHECK(bp[0]);'), (1065, '\t\t\tif (len < 2)'), (1066, '\t\t\t\tgoto trunc;'), (1067, '\t\t\tND_TCHECK(bp[1]);'), (1069, '\t\t\tif (len < 4)'), (1070, '\t\t\t\tgoto trunc;'), (1072, '\t\t\tlen -= 4;'), (1077, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len,'), (1079, '\t\t\t\t\t\t\t\tpimv2_addr_len,'), (1080, '\t\t\t\t\t\t\t\t0)) < 0)'), (1081, '\t\t\t\t\tgoto trunc;'), (1083, '\t\t\t\tlen -= advance;'), (1085, '\t\t\t\tif (len < 2)'), (1086, '\t\t\t\t\tgoto trunc;'), (1087, '\t\t\t\tND_TCHECK_16BITS(bp);'), (1090, '\t\t\t\tif (len < 3)'), (1091, '\t\t\t\t\tgoto trunc;'), (1092, '\t\t\t\tND_TCHECK(bp[2]);'), (1094, '\t\t\t\tif (len < 4)'), (1095, '\t\t\t\t\tgoto trunc;'), (1097, '\t\t\t\tlen -= 4;'), (1105, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (1106, '\t\t\tgoto trunc;'), (1109, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1110, '\t\t\tgoto trunc;'), (1112, '\t\tif (len < 8)'), (1113, '\t\t\tgoto trunc;'), (1114, '\t\tND_TCHECK2(*bp, 8);'), (1126, '\t\tif (len < 1)'), (1127, '\t\t\tgoto trunc;'), (1128, '\t\tND_TCHECK(bp[0]);'), (1131, '\t\tif (len < 2)'), (1132, '\t\t\tgoto trunc;'), (1133, '\t\tND_TCHECK(bp[1]);'), (1135, '\t\tif (len < 4)'), (1136, '\t\t\tgoto trunc;'), (1137, '\t\tND_TCHECK_16BITS(&bp[2]);'), (1141, '\t\tlen -= 4;'), (1145, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1146, '\t\t\tgoto trunc;'), (1148, '\t\tlen -= advance;'), (1153, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (1154, '\t\t\t\tgoto trunc;'), (1156, '\t\t\tlen -= advance;'), (1163, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1164, '\t\t\tgoto trunc;'), (1166, '\t\tlen -= advance;'), (1168, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (1169, '\t\t\tgoto trunc;'), (1171, '\t\tlen -= advance;'), (1173, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1174, '\t\t\tgoto trunc;'), (1176, '\t\tlen -= advance;'), (1177, '\t\tif (len < 2)'), (1178, '\t\t\tgoto trunc;'), (1179, '\t\tND_TCHECK_16BITS(bp);')], 'deleted': [(176, '\tND_TCHECK2(bp[6], 2);'), (180, '\tunsigned_relts_print(ndo, EXTRACT_16BITS(&bp[6]));'), (183, '\tbp += 8;'), (184, '\tlen -= 8;'), (197, '\t\tND_TCHECK2(bp[4], sizeof(struct in_addr));'), (198, '\t\tif (EXTRACT_32BITS(&bp[4]) != 0xffffffff)'), (199, '\t\t\tND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[4])));'), (200, '\t\tND_TCHECK2(bp[8], 4);'), (201, '\t\tnjoin = EXTRACT_16BITS(&bp[8]);'), (202, '\t\tnprune = EXTRACT_16BITS(&bp[10]);'), (204, '\t\tbp += 12;'), (205, '\t\tlen -= 12;'), (218, '\t\t\tipaddr_string(ndo, &bp[2]), bp[1] & 0x3f));'), (233, '\tregister const u_char *ep;'), (236, '\tep = (const u_char *)ndo->ndo_snapend;'), (237, '\tif (bp >= ep)'), (238, '\t\treturn;'), (239, ''), (305, '\t\tif (ndo->ndo_vflag)'), (382, '\t\tND_TCHECK(bp[4]);'), (383, '\t\tswitch (bp[4] & 0x3) {'), (393, '\t\tif (bp[4] & 0xfc)'), (394, '\t\t\tND_PRINT((ndo, " [rsvd=0x%02x]", bp[4] & 0xfc));'), (395, '\t\tND_TCHECK(bp[5]);'), (396, '\t\tnentries = bp[5];'), (397, '\t\tbp += 6; len -= 6;'), (424, '\tregister const u_char *ep;'), (427, '\tep = (const u_char *)ndo->ndo_snapend;'), (428, '\tif (bp >= ep)'), (429, '\t\treturn;'), (499, 'static int pimv2_addr_len;'), (500, ''), (527, ' const u_char *bp, enum pimv2_addrtype at, int silent)'), (530, '\tint len, hdrlen;'), (532, '\tND_TCHECK(bp[0]);'), (533, ''), (534, '\tif (pimv2_addr_len == 0) {'), (539, '\t\t\tlen = sizeof(struct in_addr);'), (543, '\t\t\tlen = sizeof(struct in6_addr);'), (552, '\t\tswitch (pimv2_addr_len) {'), (563, '\t\tlen = pimv2_addr_len;'), (570, '\t\tND_TCHECK2(bp[0], len);'), (579, '\t\treturn hdrlen + len;'), (582, '\t\tND_TCHECK2(bp[0], len + 2);'), (611, '\t\treturn hdrlen + 2 + len;'), (719, '\t\tbp += 4;'), (720, '\t\twhile (bp < ep) {'), (724, '\t\t\tND_TCHECK2(bp[0], 4 + olen);'), (802, '\t\t\t\t\t\tadvance = pimv2_addr_print(ndo, ptr, pimv2_unicast, 0);'), (803, '\t\t\t\t\t\tif (advance < 0) {'), (804, '\t\t\t\t\t\t\tND_PRINT((ndo, "..."));'), (805, '\t\t\t\t\t\t\tbreak;'), (806, '\t\t\t\t\t\t}'), (828, '\t\tND_TCHECK2(*(bp + 4), PIMV2_REGISTER_FLAG_LEN);'), (833, '\t\t EXTRACT_32BITS(bp+4))));'), (835, '\t\tbp += 8; len -= 8;'), (861, '\t\tbp += 4; len -= 4;'), (862, '\t\tif (bp >= ep)'), (863, '\t\t\tbreak;'), (865, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) {'), (866, '\t\t\tND_PRINT((ndo, "..."));'), (867, '\t\t\tbreak;'), (868, '\t\t}'), (870, '\t\tif (bp >= ep)'), (871, '\t\t\tbreak;'), (873, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (874, '\t\t\tND_PRINT((ndo, "..."));'), (875, '\t\t\tbreak;'), (876, '\t\t}'), (927, '\t\tbp += 4; len -= 4;'), (929, '\t\t\tif (bp >= ep)'), (930, '\t\t\t\tbreak;'), (932, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (933, '\t\t\t\tND_PRINT((ndo, "..."));'), (934, '\t\t\t\tbreak;'), (935, '\t\t\t}'), (938, '\t\tif (bp + 4 > ep)'), (939, '\t\t\tbreak;'), (952, '\t\t\tif (bp >= ep)'), (953, '\t\t\t\tgoto jp_done;'), (955, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) {'), (956, '\t\t\t\tND_PRINT((ndo, "...)"));'), (957, '\t\t\t\tgoto jp_done;'), (958, '\t\t\t}'), (960, '\t\t\tif (bp + 4 > ep) {'), (961, '\t\t\t\tND_PRINT((ndo, "...)"));'), (962, '\t\t\t\tgoto jp_done;'), (963, '\t\t\t}'), (970, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) {'), (971, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (972, '\t\t\t\t\tgoto jp_done;'), (973, '\t\t\t\t}'), (978, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) {'), (979, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (980, '\t\t\t\t\tgoto jp_done;'), (981, '\t\t\t\t}'), (985, '\tjp_done:'), (992, '\t\tbp += 4;'), (995, '\t\tif (bp + sizeof(uint16_t) >= ep) break;'), (997, '\t\tbp += sizeof(uint16_t);'), (998, '\t\tif (bp >= ep) break;'), (1000, '\t\tif (bp + 1 >= ep) break;'), (1005, '\t\tif (bp >= ep) break;'), (1007, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1008, '\t\t\tND_PRINT((ndo, "..."));'), (1009, '\t\t\tbreak;'), (1010, '\t\t}'), (1016, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0))'), (1017, '\t\t\t < 0) {'), (1018, '\t\t\t\tND_PRINT((ndo, "...)"));'), (1019, '\t\t\t\tgoto bs_done;'), (1020, '\t\t\t}'), (1024, '\t\t\tif (bp >= ep) {'), (1025, '\t\t\t\tND_PRINT((ndo, "...)"));'), (1026, '\t\t\t\tgoto bs_done;'), (1027, '\t\t\t}'), (1029, '\t\t\tif (bp + 1 >= ep) {'), (1030, '\t\t\t\tND_PRINT((ndo, "...)"));'), (1031, '\t\t\t\tgoto bs_done;'), (1032, '\t\t\t}'), (1039, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp,'), (1041, '\t\t\t\t\t\t\t\t0)) < 0) {'), (1042, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (1043, '\t\t\t\t\tgoto bs_done;'), (1044, '\t\t\t\t}'), (1047, '\t\t\t\tif (bp + 1 >= ep) {'), (1048, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (1049, '\t\t\t\t\tgoto bs_done;'), (1050, '\t\t\t\t}'), (1053, '\t\t\t\tif (bp + 2 >= ep) {'), (1054, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (1055, '\t\t\t\t\tgoto bs_done;'), (1056, '\t\t\t\t}'), (1062, '\t bs_done:'), (1066, '\t\tbp += 4; len -= 4;'), (1067, '\t\tif (bp >= ep)'), (1068, '\t\t\tbreak;'), (1070, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) {'), (1071, '\t\t\tND_PRINT((ndo, "..."));'), (1072, '\t\t\tbreak;'), (1073, '\t\t}'), (1075, '\t\tif (bp >= ep)'), (1076, '\t\t\tbreak;'), (1078, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1079, '\t\t\tND_PRINT((ndo, "..."));'), (1080, '\t\t\tbreak;'), (1081, '\t\t}'), (1083, '\t\tif (bp + 8 > ep)'), (1084, '\t\t\tbreak;'), (1094, '\t\tbp += 4;'), (1097, '\t\tif (bp >= ep) break;'), (1100, '\t\tif (bp + 1 >= ep) break;'), (1102, '\t\tif (bp + 3 >= ep) break;'), (1108, '\t\tif (bp >= ep) break;'), (1110, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1111, '\t\t\tND_PRINT((ndo, "..."));'), (1112, '\t\t\tbreak;'), (1113, '\t\t}'), (1119, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0))'), (1120, '\t\t\t < 0) {'), (1121, '\t\t\t\tND_PRINT((ndo, "..."));'), (1122, '\t\t\t\tbreak;'), (1123, '\t\t\t}'), (1131, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1132, '\t\t\tND_PRINT((ndo, "..."));'), (1133, '\t\t\tbreak;'), (1134, '\t\t}'), (1137, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) {'), (1138, '\t\t\tND_PRINT((ndo, "..."));'), (1139, '\t\t\tbreak;'), (1140, '\t\t}'), (1143, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1144, '\t\t\tND_PRINT((ndo, "..."));'), (1145, '\t\t\tbreak;'), (1146, '\t\t}'), (1148, '\t\tND_TCHECK2(bp[0], 2);')]}
207
176
890
5,856
https://github.com/the-tcpdump-group/tcpdump
CVE-2017-13030
['CWE-125']
print-atm.c
atm_if_print
/* * Copyright (c) 1994, 1995, 1996, 1997 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* \summary: Asynchronous Transfer Mode (ATM) printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include "netdissect.h" #include "extract.h" #include "addrtoname.h" #include "atm.h" #include "llc.h" /* start of the original atmuni31.h */ /* * Copyright (c) 1997 Yen Yen Lim and North Dakota State University * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Yen Yen Lim and North Dakota State University * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* Based on UNI3.1 standard by ATM Forum */ /* ATM traffic types based on VPI=0 and (the following VCI */ #define VCI_PPC 0x05 /* Point-to-point signal msg */ #define VCI_BCC 0x02 /* Broadcast signal msg */ #define VCI_OAMF4SC 0x03 /* Segment OAM F4 flow cell */ #define VCI_OAMF4EC 0x04 /* End-to-end OAM F4 flow cell */ #define VCI_METAC 0x01 /* Meta signal msg */ #define VCI_ILMIC 0x10 /* ILMI msg */ /* Q.2931 signalling messages */ #define CALL_PROCEED 0x02 /* call proceeding */ #define CONNECT 0x07 /* connect */ #define CONNECT_ACK 0x0f /* connect_ack */ #define SETUP 0x05 /* setup */ #define RELEASE 0x4d /* release */ #define RELEASE_DONE 0x5a /* release_done */ #define RESTART 0x46 /* restart */ #define RESTART_ACK 0x4e /* restart ack */ #define STATUS 0x7d /* status */ #define STATUS_ENQ 0x75 /* status ack */ #define ADD_PARTY 0x80 /* add party */ #define ADD_PARTY_ACK 0x81 /* add party ack */ #define ADD_PARTY_REJ 0x82 /* add party rej */ #define DROP_PARTY 0x83 /* drop party */ #define DROP_PARTY_ACK 0x84 /* drop party ack */ /* Information Element Parameters in the signalling messages */ #define CAUSE 0x08 /* cause */ #define ENDPT_REF 0x54 /* endpoint reference */ #define AAL_PARA 0x58 /* ATM adaptation layer parameters */ #define TRAFF_DESCRIP 0x59 /* atm traffic descriptors */ #define CONNECT_ID 0x5a /* connection identifier */ #define QOS_PARA 0x5c /* quality of service parameters */ #define B_HIGHER 0x5d /* broadband higher layer information */ #define B_BEARER 0x5e /* broadband bearer capability */ #define B_LOWER 0x5f /* broadband lower information */ #define CALLING_PARTY 0x6c /* calling party number */ #define CALLED_PARTY 0x70 /* called party nmber */ #define Q2931 0x09 /* Q.2931 signalling general messages format */ #define PROTO_POS 0 /* offset of protocol discriminator */ #define CALL_REF_POS 2 /* offset of call reference value */ #define MSG_TYPE_POS 5 /* offset of message type */ #define MSG_LEN_POS 7 /* offset of mesage length */ #define IE_BEGIN_POS 9 /* offset of first information element */ /* format of signalling messages */ #define TYPE_POS 0 #define LEN_POS 2 #define FIELD_BEGIN_POS 4 /* end of the original atmuni31.h */ static const char tstr[] = "[|atm]"; #define OAM_CRC10_MASK 0x3ff #define OAM_PAYLOAD_LEN 48 #define OAM_FUNCTION_SPECIFIC_LEN 45 /* this excludes crc10 and cell-type/function-type */ #define OAM_CELLTYPE_FUNCTYPE_LEN 1 static const struct tok oam_f_values[] = { { VCI_OAMF4SC, "OAM F4 (segment)" }, { VCI_OAMF4EC, "OAM F4 (end)" }, { 0, NULL } }; static const struct tok atm_pty_values[] = { { 0x0, "user data, uncongested, SDU 0" }, { 0x1, "user data, uncongested, SDU 1" }, { 0x2, "user data, congested, SDU 0" }, { 0x3, "user data, congested, SDU 1" }, { 0x4, "VCC OAM F5 flow segment" }, { 0x5, "VCC OAM F5 flow end-to-end" }, { 0x6, "Traffic Control and resource Mgmt" }, { 0, NULL } }; #define OAM_CELLTYPE_FM 0x1 #define OAM_CELLTYPE_PM 0x2 #define OAM_CELLTYPE_AD 0x8 #define OAM_CELLTYPE_SM 0xf static const struct tok oam_celltype_values[] = { { OAM_CELLTYPE_FM, "Fault Management" }, { OAM_CELLTYPE_PM, "Performance Management" }, { OAM_CELLTYPE_AD, "activate/deactivate" }, { OAM_CELLTYPE_SM, "System Management" }, { 0, NULL } }; #define OAM_FM_FUNCTYPE_AIS 0x0 #define OAM_FM_FUNCTYPE_RDI 0x1 #define OAM_FM_FUNCTYPE_CONTCHECK 0x4 #define OAM_FM_FUNCTYPE_LOOPBACK 0x8 static const struct tok oam_fm_functype_values[] = { { OAM_FM_FUNCTYPE_AIS, "AIS" }, { OAM_FM_FUNCTYPE_RDI, "RDI" }, { OAM_FM_FUNCTYPE_CONTCHECK, "Continuity Check" }, { OAM_FM_FUNCTYPE_LOOPBACK, "Loopback" }, { 0, NULL } }; static const struct tok oam_pm_functype_values[] = { { 0x0, "Forward Monitoring" }, { 0x1, "Backward Reporting" }, { 0x2, "Monitoring and Reporting" }, { 0, NULL } }; static const struct tok oam_ad_functype_values[] = { { 0x0, "Performance Monitoring" }, { 0x1, "Continuity Check" }, { 0, NULL } }; #define OAM_FM_LOOPBACK_INDICATOR_MASK 0x1 static const struct tok oam_fm_loopback_indicator_values[] = { { 0x0, "Reply" }, { 0x1, "Request" }, { 0, NULL } }; static const struct tok *oam_functype_values[16] = { NULL, oam_fm_functype_values, /* 1 */ oam_pm_functype_values, /* 2 */ NULL, NULL, NULL, NULL, NULL, oam_ad_functype_values, /* 8 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL }; /* * Print an RFC 1483 LLC-encapsulated ATM frame. */ static u_int atm_llc_print(netdissect_options *ndo, const u_char *p, int length, int caplen) { int llc_hdrlen; llc_hdrlen = llc_print(ndo, p, length, caplen, NULL, NULL); if (llc_hdrlen < 0) { /* packet not known, print raw packet */ if (!ndo->ndo_suppress_default_print) ND_DEFAULTPRINT(p, caplen); llc_hdrlen = -llc_hdrlen; } return (llc_hdrlen); } /* * Given a SAP value, generate the LLC header value for a UI packet * with that SAP as the source and destination SAP. */ #define LLC_UI_HDR(sap) ((sap)<<16 | (sap<<8) | 0x03) /* * This is the top level routine of the printer. 'p' points * to the LLC/SNAP header of the packet, 'h->ts' is the timestamp, * 'h->len' is the length of the packet off the wire, and 'h->caplen' * is the number of bytes actually captured. */ u_int atm_if_print(netdissect_options *ndo, const struct pcap_pkthdr *h, const u_char *p) { u_int caplen = h->caplen; u_int length = h->len; uint32_t llchdr; u_int hdrlen = 0; if (caplen < 1 || length < 1) { ND_PRINT((ndo, "%s", tstr)); return (caplen); } /* Cisco Style NLPID ? */ if (*p == LLC_UI) { if (ndo->ndo_eflag) ND_PRINT((ndo, "CNLPID ")); isoclns_print(ndo, p + 1, length - 1, caplen - 1); return hdrlen; } /* * Must have at least a DSAP, an SSAP, and the first byte of the * control field. */ if (caplen < 3 || length < 3) { ND_PRINT((ndo, "%s", tstr)); return (caplen); } /* * Extract the presumed LLC header into a variable, for quick * testing. * Then check for a header that's neither a header for a SNAP * packet nor an RFC 2684 routed NLPID-formatted PDU nor * an 802.2-but-no-SNAP IP packet. */ llchdr = EXTRACT_24BITS(p); if (llchdr != LLC_UI_HDR(LLCSAP_SNAP) && llchdr != LLC_UI_HDR(LLCSAP_ISONS) && llchdr != LLC_UI_HDR(LLCSAP_IP)) { /* * XXX - assume 802.6 MAC header from Fore driver. * * Unfortunately, the above list doesn't check for * all known SAPs, doesn't check for headers where * the source and destination SAP aren't the same, * and doesn't check for non-UI frames. It also * runs the risk of an 802.6 MAC header that happens * to begin with one of those values being * incorrectly treated as an 802.2 header. * * So is that Fore driver still around? And, if so, * is it still putting 802.6 MAC headers on ATM * packets? If so, could it be changed to use a * new DLT_IEEE802_6 value if we added it? */ if (caplen < 20 || length < 20) { ND_PRINT((ndo, "%s", tstr)); return (caplen); } if (ndo->ndo_eflag) ND_PRINT((ndo, "%08x%08x %08x%08x ", EXTRACT_32BITS(p), EXTRACT_32BITS(p+4), EXTRACT_32BITS(p+8), EXTRACT_32BITS(p+12))); p += 20; length -= 20; caplen -= 20; hdrlen += 20; } hdrlen += atm_llc_print(ndo, p, length, caplen); return (hdrlen); } /* * ATM signalling. */ static const struct tok msgtype2str[] = { { CALL_PROCEED, "Call_proceeding" }, { CONNECT, "Connect" }, { CONNECT_ACK, "Connect_ack" }, { SETUP, "Setup" }, { RELEASE, "Release" }, { RELEASE_DONE, "Release_complete" }, { RESTART, "Restart" }, { RESTART_ACK, "Restart_ack" }, { STATUS, "Status" }, { STATUS_ENQ, "Status_enquiry" }, { ADD_PARTY, "Add_party" }, { ADD_PARTY_ACK, "Add_party_ack" }, { ADD_PARTY_REJ, "Add_party_reject" }, { DROP_PARTY, "Drop_party" }, { DROP_PARTY_ACK, "Drop_party_ack" }, { 0, NULL } }; static void sig_print(netdissect_options *ndo, const u_char *p) { uint32_t call_ref; ND_TCHECK(p[PROTO_POS]); if (p[PROTO_POS] == Q2931) { /* * protocol:Q.2931 for User to Network Interface * (UNI 3.1) signalling */ ND_PRINT((ndo, "Q.2931")); ND_TCHECK(p[MSG_TYPE_POS]); ND_PRINT((ndo, ":%s ", tok2str(msgtype2str, "msgtype#%d", p[MSG_TYPE_POS]))); /* * The call reference comes before the message type, * so if we know we have the message type, which we * do from the caplen test above, we also know we have * the call reference. */ call_ref = EXTRACT_24BITS(&p[CALL_REF_POS]); ND_PRINT((ndo, "CALL_REF:0x%06x", call_ref)); } else { /* SSCOP with some unknown protocol atop it */ ND_PRINT((ndo, "SSCOP, proto %d ", p[PROTO_POS])); } return; trunc: ND_PRINT((ndo, " %s", tstr)); } /* * Print an ATM PDU (such as an AAL5 PDU). */ void atm_print(netdissect_options *ndo, u_int vpi, u_int vci, u_int traftype, const u_char *p, u_int length, u_int caplen) { if (ndo->ndo_eflag) ND_PRINT((ndo, "VPI:%u VCI:%u ", vpi, vci)); if (vpi == 0) { switch (vci) { case VCI_PPC: sig_print(ndo, p); return; case VCI_BCC: ND_PRINT((ndo, "broadcast sig: ")); return; case VCI_OAMF4SC: /* fall through */ case VCI_OAMF4EC: oam_print(ndo, p, length, ATM_OAM_HEC); return; case VCI_METAC: ND_PRINT((ndo, "meta: ")); return; case VCI_ILMIC: ND_PRINT((ndo, "ilmi: ")); snmp_print(ndo, p, length); return; } } switch (traftype) { case ATM_LLC: default: /* * Assumes traffic is LLC if unknown. */ atm_llc_print(ndo, p, length, caplen); break; case ATM_LANE: lane_print(ndo, p, length, caplen); break; } } struct oam_fm_loopback_t { uint8_t loopback_indicator; uint8_t correlation_tag[4]; uint8_t loopback_id[12]; uint8_t source_id[12]; uint8_t unused[16]; }; struct oam_fm_ais_rdi_t { uint8_t failure_type; uint8_t failure_location[16]; uint8_t unused[28]; }; void oam_print (netdissect_options *ndo, const u_char *p, u_int length, u_int hec) { uint32_t cell_header; uint16_t vpi, vci, cksum, cksum_shouldbe, idx; uint8_t cell_type, func_type, payload, clp; union { const struct oam_fm_loopback_t *oam_fm_loopback; const struct oam_fm_ais_rdi_t *oam_fm_ais_rdi; } oam_ptr; ND_TCHECK(*(p+ATM_HDR_LEN_NOHEC+hec)); cell_header = EXTRACT_32BITS(p+hec); cell_type = ((*(p+ATM_HDR_LEN_NOHEC+hec))>>4) & 0x0f; func_type = (*(p+ATM_HDR_LEN_NOHEC+hec)) & 0x0f; vpi = (cell_header>>20)&0xff; vci = (cell_header>>4)&0xffff; payload = (cell_header>>1)&0x7; clp = cell_header&0x1; ND_PRINT((ndo, "%s, vpi %u, vci %u, payload [ %s ], clp %u, length %u", tok2str(oam_f_values, "OAM F5", vci), vpi, vci, tok2str(atm_pty_values, "Unknown", payload), clp, length)); if (!ndo->ndo_vflag) { return; } ND_PRINT((ndo, "\n\tcell-type %s (%u)", tok2str(oam_celltype_values, "unknown", cell_type), cell_type)); if (oam_functype_values[cell_type] == NULL) ND_PRINT((ndo, ", func-type unknown (%u)", func_type)); else ND_PRINT((ndo, ", func-type %s (%u)", tok2str(oam_functype_values[cell_type],"none",func_type), func_type)); p += ATM_HDR_LEN_NOHEC + hec; switch (cell_type << 4 | func_type) { case (OAM_CELLTYPE_FM << 4 | OAM_FM_FUNCTYPE_LOOPBACK): oam_ptr.oam_fm_loopback = (const struct oam_fm_loopback_t *)(p + OAM_CELLTYPE_FUNCTYPE_LEN); ND_TCHECK(*oam_ptr.oam_fm_loopback); ND_PRINT((ndo, "\n\tLoopback-Indicator %s, Correlation-Tag 0x%08x", tok2str(oam_fm_loopback_indicator_values, "Unknown", oam_ptr.oam_fm_loopback->loopback_indicator & OAM_FM_LOOPBACK_INDICATOR_MASK), EXTRACT_32BITS(&oam_ptr.oam_fm_loopback->correlation_tag))); ND_PRINT((ndo, "\n\tLocation-ID ")); for (idx = 0; idx < sizeof(oam_ptr.oam_fm_loopback->loopback_id); idx++) { if (idx % 2) { ND_PRINT((ndo, "%04x ", EXTRACT_16BITS(&oam_ptr.oam_fm_loopback->loopback_id[idx]))); } } ND_PRINT((ndo, "\n\tSource-ID ")); for (idx = 0; idx < sizeof(oam_ptr.oam_fm_loopback->source_id); idx++) { if (idx % 2) { ND_PRINT((ndo, "%04x ", EXTRACT_16BITS(&oam_ptr.oam_fm_loopback->source_id[idx]))); } } break; case (OAM_CELLTYPE_FM << 4 | OAM_FM_FUNCTYPE_AIS): case (OAM_CELLTYPE_FM << 4 | OAM_FM_FUNCTYPE_RDI): oam_ptr.oam_fm_ais_rdi = (const struct oam_fm_ais_rdi_t *)(p + OAM_CELLTYPE_FUNCTYPE_LEN); ND_TCHECK(*oam_ptr.oam_fm_ais_rdi); ND_PRINT((ndo, "\n\tFailure-type 0x%02x", oam_ptr.oam_fm_ais_rdi->failure_type)); ND_PRINT((ndo, "\n\tLocation-ID ")); for (idx = 0; idx < sizeof(oam_ptr.oam_fm_ais_rdi->failure_location); idx++) { if (idx % 2) { ND_PRINT((ndo, "%04x ", EXTRACT_16BITS(&oam_ptr.oam_fm_ais_rdi->failure_location[idx]))); } } break; case (OAM_CELLTYPE_FM << 4 | OAM_FM_FUNCTYPE_CONTCHECK): /* FIXME */ break; default: break; } /* crc10 checksum verification */ ND_TCHECK2(*(p + OAM_CELLTYPE_FUNCTYPE_LEN + OAM_FUNCTION_SPECIFIC_LEN), 2); cksum = EXTRACT_16BITS(p + OAM_CELLTYPE_FUNCTYPE_LEN + OAM_FUNCTION_SPECIFIC_LEN) & OAM_CRC10_MASK; cksum_shouldbe = verify_crc10_cksum(0, p, OAM_PAYLOAD_LEN); ND_PRINT((ndo, "\n\tcksum 0x%03x (%scorrect)", cksum, cksum_shouldbe == 0 ? "" : "in")); return; trunc: ND_PRINT((ndo, "[|oam]")); return; }
/* * Copyright (c) 1994, 1995, 1996, 1997 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* \summary: Asynchronous Transfer Mode (ATM) printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include "netdissect.h" #include "extract.h" #include "addrtoname.h" #include "atm.h" #include "llc.h" /* start of the original atmuni31.h */ /* * Copyright (c) 1997 Yen Yen Lim and North Dakota State University * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Yen Yen Lim and North Dakota State University * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* Based on UNI3.1 standard by ATM Forum */ /* ATM traffic types based on VPI=0 and (the following VCI */ #define VCI_PPC 0x05 /* Point-to-point signal msg */ #define VCI_BCC 0x02 /* Broadcast signal msg */ #define VCI_OAMF4SC 0x03 /* Segment OAM F4 flow cell */ #define VCI_OAMF4EC 0x04 /* End-to-end OAM F4 flow cell */ #define VCI_METAC 0x01 /* Meta signal msg */ #define VCI_ILMIC 0x10 /* ILMI msg */ /* Q.2931 signalling messages */ #define CALL_PROCEED 0x02 /* call proceeding */ #define CONNECT 0x07 /* connect */ #define CONNECT_ACK 0x0f /* connect_ack */ #define SETUP 0x05 /* setup */ #define RELEASE 0x4d /* release */ #define RELEASE_DONE 0x5a /* release_done */ #define RESTART 0x46 /* restart */ #define RESTART_ACK 0x4e /* restart ack */ #define STATUS 0x7d /* status */ #define STATUS_ENQ 0x75 /* status ack */ #define ADD_PARTY 0x80 /* add party */ #define ADD_PARTY_ACK 0x81 /* add party ack */ #define ADD_PARTY_REJ 0x82 /* add party rej */ #define DROP_PARTY 0x83 /* drop party */ #define DROP_PARTY_ACK 0x84 /* drop party ack */ /* Information Element Parameters in the signalling messages */ #define CAUSE 0x08 /* cause */ #define ENDPT_REF 0x54 /* endpoint reference */ #define AAL_PARA 0x58 /* ATM adaptation layer parameters */ #define TRAFF_DESCRIP 0x59 /* atm traffic descriptors */ #define CONNECT_ID 0x5a /* connection identifier */ #define QOS_PARA 0x5c /* quality of service parameters */ #define B_HIGHER 0x5d /* broadband higher layer information */ #define B_BEARER 0x5e /* broadband bearer capability */ #define B_LOWER 0x5f /* broadband lower information */ #define CALLING_PARTY 0x6c /* calling party number */ #define CALLED_PARTY 0x70 /* called party nmber */ #define Q2931 0x09 /* Q.2931 signalling general messages format */ #define PROTO_POS 0 /* offset of protocol discriminator */ #define CALL_REF_POS 2 /* offset of call reference value */ #define MSG_TYPE_POS 5 /* offset of message type */ #define MSG_LEN_POS 7 /* offset of mesage length */ #define IE_BEGIN_POS 9 /* offset of first information element */ /* format of signalling messages */ #define TYPE_POS 0 #define LEN_POS 2 #define FIELD_BEGIN_POS 4 /* end of the original atmuni31.h */ static const char tstr[] = "[|atm]"; #define OAM_CRC10_MASK 0x3ff #define OAM_PAYLOAD_LEN 48 #define OAM_FUNCTION_SPECIFIC_LEN 45 /* this excludes crc10 and cell-type/function-type */ #define OAM_CELLTYPE_FUNCTYPE_LEN 1 static const struct tok oam_f_values[] = { { VCI_OAMF4SC, "OAM F4 (segment)" }, { VCI_OAMF4EC, "OAM F4 (end)" }, { 0, NULL } }; static const struct tok atm_pty_values[] = { { 0x0, "user data, uncongested, SDU 0" }, { 0x1, "user data, uncongested, SDU 1" }, { 0x2, "user data, congested, SDU 0" }, { 0x3, "user data, congested, SDU 1" }, { 0x4, "VCC OAM F5 flow segment" }, { 0x5, "VCC OAM F5 flow end-to-end" }, { 0x6, "Traffic Control and resource Mgmt" }, { 0, NULL } }; #define OAM_CELLTYPE_FM 0x1 #define OAM_CELLTYPE_PM 0x2 #define OAM_CELLTYPE_AD 0x8 #define OAM_CELLTYPE_SM 0xf static const struct tok oam_celltype_values[] = { { OAM_CELLTYPE_FM, "Fault Management" }, { OAM_CELLTYPE_PM, "Performance Management" }, { OAM_CELLTYPE_AD, "activate/deactivate" }, { OAM_CELLTYPE_SM, "System Management" }, { 0, NULL } }; #define OAM_FM_FUNCTYPE_AIS 0x0 #define OAM_FM_FUNCTYPE_RDI 0x1 #define OAM_FM_FUNCTYPE_CONTCHECK 0x4 #define OAM_FM_FUNCTYPE_LOOPBACK 0x8 static const struct tok oam_fm_functype_values[] = { { OAM_FM_FUNCTYPE_AIS, "AIS" }, { OAM_FM_FUNCTYPE_RDI, "RDI" }, { OAM_FM_FUNCTYPE_CONTCHECK, "Continuity Check" }, { OAM_FM_FUNCTYPE_LOOPBACK, "Loopback" }, { 0, NULL } }; static const struct tok oam_pm_functype_values[] = { { 0x0, "Forward Monitoring" }, { 0x1, "Backward Reporting" }, { 0x2, "Monitoring and Reporting" }, { 0, NULL } }; static const struct tok oam_ad_functype_values[] = { { 0x0, "Performance Monitoring" }, { 0x1, "Continuity Check" }, { 0, NULL } }; #define OAM_FM_LOOPBACK_INDICATOR_MASK 0x1 static const struct tok oam_fm_loopback_indicator_values[] = { { 0x0, "Reply" }, { 0x1, "Request" }, { 0, NULL } }; static const struct tok *oam_functype_values[16] = { NULL, oam_fm_functype_values, /* 1 */ oam_pm_functype_values, /* 2 */ NULL, NULL, NULL, NULL, NULL, oam_ad_functype_values, /* 8 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL }; /* * Print an RFC 1483 LLC-encapsulated ATM frame. */ static u_int atm_llc_print(netdissect_options *ndo, const u_char *p, int length, int caplen) { int llc_hdrlen; llc_hdrlen = llc_print(ndo, p, length, caplen, NULL, NULL); if (llc_hdrlen < 0) { /* packet not known, print raw packet */ if (!ndo->ndo_suppress_default_print) ND_DEFAULTPRINT(p, caplen); llc_hdrlen = -llc_hdrlen; } return (llc_hdrlen); } /* * Given a SAP value, generate the LLC header value for a UI packet * with that SAP as the source and destination SAP. */ #define LLC_UI_HDR(sap) ((sap)<<16 | (sap<<8) | 0x03) /* * This is the top level routine of the printer. 'p' points * to the LLC/SNAP header of the packet, 'h->ts' is the timestamp, * 'h->len' is the length of the packet off the wire, and 'h->caplen' * is the number of bytes actually captured. */ u_int atm_if_print(netdissect_options *ndo, const struct pcap_pkthdr *h, const u_char *p) { u_int caplen = h->caplen; u_int length = h->len; uint32_t llchdr; u_int hdrlen = 0; if (caplen < 1 || length < 1) { ND_PRINT((ndo, "%s", tstr)); return (caplen); } /* Cisco Style NLPID ? */ if (*p == LLC_UI) { if (ndo->ndo_eflag) ND_PRINT((ndo, "CNLPID ")); isoclns_print(ndo, p + 1, length - 1); return hdrlen; } /* * Must have at least a DSAP, an SSAP, and the first byte of the * control field. */ if (caplen < 3 || length < 3) { ND_PRINT((ndo, "%s", tstr)); return (caplen); } /* * Extract the presumed LLC header into a variable, for quick * testing. * Then check for a header that's neither a header for a SNAP * packet nor an RFC 2684 routed NLPID-formatted PDU nor * an 802.2-but-no-SNAP IP packet. */ llchdr = EXTRACT_24BITS(p); if (llchdr != LLC_UI_HDR(LLCSAP_SNAP) && llchdr != LLC_UI_HDR(LLCSAP_ISONS) && llchdr != LLC_UI_HDR(LLCSAP_IP)) { /* * XXX - assume 802.6 MAC header from Fore driver. * * Unfortunately, the above list doesn't check for * all known SAPs, doesn't check for headers where * the source and destination SAP aren't the same, * and doesn't check for non-UI frames. It also * runs the risk of an 802.6 MAC header that happens * to begin with one of those values being * incorrectly treated as an 802.2 header. * * So is that Fore driver still around? And, if so, * is it still putting 802.6 MAC headers on ATM * packets? If so, could it be changed to use a * new DLT_IEEE802_6 value if we added it? */ if (caplen < 20 || length < 20) { ND_PRINT((ndo, "%s", tstr)); return (caplen); } if (ndo->ndo_eflag) ND_PRINT((ndo, "%08x%08x %08x%08x ", EXTRACT_32BITS(p), EXTRACT_32BITS(p+4), EXTRACT_32BITS(p+8), EXTRACT_32BITS(p+12))); p += 20; length -= 20; caplen -= 20; hdrlen += 20; } hdrlen += atm_llc_print(ndo, p, length, caplen); return (hdrlen); } /* * ATM signalling. */ static const struct tok msgtype2str[] = { { CALL_PROCEED, "Call_proceeding" }, { CONNECT, "Connect" }, { CONNECT_ACK, "Connect_ack" }, { SETUP, "Setup" }, { RELEASE, "Release" }, { RELEASE_DONE, "Release_complete" }, { RESTART, "Restart" }, { RESTART_ACK, "Restart_ack" }, { STATUS, "Status" }, { STATUS_ENQ, "Status_enquiry" }, { ADD_PARTY, "Add_party" }, { ADD_PARTY_ACK, "Add_party_ack" }, { ADD_PARTY_REJ, "Add_party_reject" }, { DROP_PARTY, "Drop_party" }, { DROP_PARTY_ACK, "Drop_party_ack" }, { 0, NULL } }; static void sig_print(netdissect_options *ndo, const u_char *p) { uint32_t call_ref; ND_TCHECK(p[PROTO_POS]); if (p[PROTO_POS] == Q2931) { /* * protocol:Q.2931 for User to Network Interface * (UNI 3.1) signalling */ ND_PRINT((ndo, "Q.2931")); ND_TCHECK(p[MSG_TYPE_POS]); ND_PRINT((ndo, ":%s ", tok2str(msgtype2str, "msgtype#%d", p[MSG_TYPE_POS]))); /* * The call reference comes before the message type, * so if we know we have the message type, which we * do from the caplen test above, we also know we have * the call reference. */ call_ref = EXTRACT_24BITS(&p[CALL_REF_POS]); ND_PRINT((ndo, "CALL_REF:0x%06x", call_ref)); } else { /* SSCOP with some unknown protocol atop it */ ND_PRINT((ndo, "SSCOP, proto %d ", p[PROTO_POS])); } return; trunc: ND_PRINT((ndo, " %s", tstr)); } /* * Print an ATM PDU (such as an AAL5 PDU). */ void atm_print(netdissect_options *ndo, u_int vpi, u_int vci, u_int traftype, const u_char *p, u_int length, u_int caplen) { if (ndo->ndo_eflag) ND_PRINT((ndo, "VPI:%u VCI:%u ", vpi, vci)); if (vpi == 0) { switch (vci) { case VCI_PPC: sig_print(ndo, p); return; case VCI_BCC: ND_PRINT((ndo, "broadcast sig: ")); return; case VCI_OAMF4SC: /* fall through */ case VCI_OAMF4EC: oam_print(ndo, p, length, ATM_OAM_HEC); return; case VCI_METAC: ND_PRINT((ndo, "meta: ")); return; case VCI_ILMIC: ND_PRINT((ndo, "ilmi: ")); snmp_print(ndo, p, length); return; } } switch (traftype) { case ATM_LLC: default: /* * Assumes traffic is LLC if unknown. */ atm_llc_print(ndo, p, length, caplen); break; case ATM_LANE: lane_print(ndo, p, length, caplen); break; } } struct oam_fm_loopback_t { uint8_t loopback_indicator; uint8_t correlation_tag[4]; uint8_t loopback_id[12]; uint8_t source_id[12]; uint8_t unused[16]; }; struct oam_fm_ais_rdi_t { uint8_t failure_type; uint8_t failure_location[16]; uint8_t unused[28]; }; void oam_print (netdissect_options *ndo, const u_char *p, u_int length, u_int hec) { uint32_t cell_header; uint16_t vpi, vci, cksum, cksum_shouldbe, idx; uint8_t cell_type, func_type, payload, clp; union { const struct oam_fm_loopback_t *oam_fm_loopback; const struct oam_fm_ais_rdi_t *oam_fm_ais_rdi; } oam_ptr; ND_TCHECK(*(p+ATM_HDR_LEN_NOHEC+hec)); cell_header = EXTRACT_32BITS(p+hec); cell_type = ((*(p+ATM_HDR_LEN_NOHEC+hec))>>4) & 0x0f; func_type = (*(p+ATM_HDR_LEN_NOHEC+hec)) & 0x0f; vpi = (cell_header>>20)&0xff; vci = (cell_header>>4)&0xffff; payload = (cell_header>>1)&0x7; clp = cell_header&0x1; ND_PRINT((ndo, "%s, vpi %u, vci %u, payload [ %s ], clp %u, length %u", tok2str(oam_f_values, "OAM F5", vci), vpi, vci, tok2str(atm_pty_values, "Unknown", payload), clp, length)); if (!ndo->ndo_vflag) { return; } ND_PRINT((ndo, "\n\tcell-type %s (%u)", tok2str(oam_celltype_values, "unknown", cell_type), cell_type)); if (oam_functype_values[cell_type] == NULL) ND_PRINT((ndo, ", func-type unknown (%u)", func_type)); else ND_PRINT((ndo, ", func-type %s (%u)", tok2str(oam_functype_values[cell_type],"none",func_type), func_type)); p += ATM_HDR_LEN_NOHEC + hec; switch (cell_type << 4 | func_type) { case (OAM_CELLTYPE_FM << 4 | OAM_FM_FUNCTYPE_LOOPBACK): oam_ptr.oam_fm_loopback = (const struct oam_fm_loopback_t *)(p + OAM_CELLTYPE_FUNCTYPE_LEN); ND_TCHECK(*oam_ptr.oam_fm_loopback); ND_PRINT((ndo, "\n\tLoopback-Indicator %s, Correlation-Tag 0x%08x", tok2str(oam_fm_loopback_indicator_values, "Unknown", oam_ptr.oam_fm_loopback->loopback_indicator & OAM_FM_LOOPBACK_INDICATOR_MASK), EXTRACT_32BITS(&oam_ptr.oam_fm_loopback->correlation_tag))); ND_PRINT((ndo, "\n\tLocation-ID ")); for (idx = 0; idx < sizeof(oam_ptr.oam_fm_loopback->loopback_id); idx++) { if (idx % 2) { ND_PRINT((ndo, "%04x ", EXTRACT_16BITS(&oam_ptr.oam_fm_loopback->loopback_id[idx]))); } } ND_PRINT((ndo, "\n\tSource-ID ")); for (idx = 0; idx < sizeof(oam_ptr.oam_fm_loopback->source_id); idx++) { if (idx % 2) { ND_PRINT((ndo, "%04x ", EXTRACT_16BITS(&oam_ptr.oam_fm_loopback->source_id[idx]))); } } break; case (OAM_CELLTYPE_FM << 4 | OAM_FM_FUNCTYPE_AIS): case (OAM_CELLTYPE_FM << 4 | OAM_FM_FUNCTYPE_RDI): oam_ptr.oam_fm_ais_rdi = (const struct oam_fm_ais_rdi_t *)(p + OAM_CELLTYPE_FUNCTYPE_LEN); ND_TCHECK(*oam_ptr.oam_fm_ais_rdi); ND_PRINT((ndo, "\n\tFailure-type 0x%02x", oam_ptr.oam_fm_ais_rdi->failure_type)); ND_PRINT((ndo, "\n\tLocation-ID ")); for (idx = 0; idx < sizeof(oam_ptr.oam_fm_ais_rdi->failure_location); idx++) { if (idx % 2) { ND_PRINT((ndo, "%04x ", EXTRACT_16BITS(&oam_ptr.oam_fm_ais_rdi->failure_location[idx]))); } } break; case (OAM_CELLTYPE_FM << 4 | OAM_FM_FUNCTYPE_CONTCHECK): /* FIXME */ break; default: break; } /* crc10 checksum verification */ ND_TCHECK2(*(p + OAM_CELLTYPE_FUNCTYPE_LEN + OAM_FUNCTION_SPECIFIC_LEN), 2); cksum = EXTRACT_16BITS(p + OAM_CELLTYPE_FUNCTYPE_LEN + OAM_FUNCTION_SPECIFIC_LEN) & OAM_CRC10_MASK; cksum_shouldbe = verify_crc10_cksum(0, p, OAM_PAYLOAD_LEN); ND_PRINT((ndo, "\n\tcksum 0x%03x (%scorrect)", cksum, cksum_shouldbe == 0 ? "" : "in")); return; trunc: ND_PRINT((ndo, "[|oam]")); return; }
atm_if_print(netdissect_options *ndo, const struct pcap_pkthdr *h, const u_char *p) { u_int caplen = h->caplen; u_int length = h->len; uint32_t llchdr; u_int hdrlen = 0; if (caplen < 1 || length < 1) { ND_PRINT((ndo, "%s", tstr)); return (caplen); } /* Cisco Style NLPID ? */ if (*p == LLC_UI) { if (ndo->ndo_eflag) ND_PRINT((ndo, "CNLPID ")); isoclns_print(ndo, p + 1, length - 1, caplen - 1); return hdrlen; } /* * Must have at least a DSAP, an SSAP, and the first byte of the * control field. */ if (caplen < 3 || length < 3) { ND_PRINT((ndo, "%s", tstr)); return (caplen); } /* * Extract the presumed LLC header into a variable, for quick * testing. * Then check for a header that's neither a header for a SNAP * packet nor an RFC 2684 routed NLPID-formatted PDU nor * an 802.2-but-no-SNAP IP packet. */ llchdr = EXTRACT_24BITS(p); if (llchdr != LLC_UI_HDR(LLCSAP_SNAP) && llchdr != LLC_UI_HDR(LLCSAP_ISONS) && llchdr != LLC_UI_HDR(LLCSAP_IP)) { /* * XXX - assume 802.6 MAC header from Fore driver. * * Unfortunately, the above list doesn't check for * all known SAPs, doesn't check for headers where * the source and destination SAP aren't the same, * and doesn't check for non-UI frames. It also * runs the risk of an 802.6 MAC header that happens * to begin with one of those values being * incorrectly treated as an 802.2 header. * * So is that Fore driver still around? And, if so, * is it still putting 802.6 MAC headers on ATM * packets? If so, could it be changed to use a * new DLT_IEEE802_6 value if we added it? */ if (caplen < 20 || length < 20) { ND_PRINT((ndo, "%s", tstr)); return (caplen); } if (ndo->ndo_eflag) ND_PRINT((ndo, "%08x%08x %08x%08x ", EXTRACT_32BITS(p), EXTRACT_32BITS(p+4), EXTRACT_32BITS(p+8), EXTRACT_32BITS(p+12))); p += 20; length -= 20; caplen -= 20; hdrlen += 20; } hdrlen += atm_llc_print(ndo, p, length, caplen); return (hdrlen); }
atm_if_print(netdissect_options *ndo, const struct pcap_pkthdr *h, const u_char *p) { u_int caplen = h->caplen; u_int length = h->len; uint32_t llchdr; u_int hdrlen = 0; if (caplen < 1 || length < 1) { ND_PRINT((ndo, "%s", tstr)); return (caplen); } /* Cisco Style NLPID ? */ if (*p == LLC_UI) { if (ndo->ndo_eflag) ND_PRINT((ndo, "CNLPID ")); isoclns_print(ndo, p + 1, length - 1); return hdrlen; } /* * Must have at least a DSAP, an SSAP, and the first byte of the * control field. */ if (caplen < 3 || length < 3) { ND_PRINT((ndo, "%s", tstr)); return (caplen); } /* * Extract the presumed LLC header into a variable, for quick * testing. * Then check for a header that's neither a header for a SNAP * packet nor an RFC 2684 routed NLPID-formatted PDU nor * an 802.2-but-no-SNAP IP packet. */ llchdr = EXTRACT_24BITS(p); if (llchdr != LLC_UI_HDR(LLCSAP_SNAP) && llchdr != LLC_UI_HDR(LLCSAP_ISONS) && llchdr != LLC_UI_HDR(LLCSAP_IP)) { /* * XXX - assume 802.6 MAC header from Fore driver. * * Unfortunately, the above list doesn't check for * all known SAPs, doesn't check for headers where * the source and destination SAP aren't the same, * and doesn't check for non-UI frames. It also * runs the risk of an 802.6 MAC header that happens * to begin with one of those values being * incorrectly treated as an 802.2 header. * * So is that Fore driver still around? And, if so, * is it still putting 802.6 MAC headers on ATM * packets? If so, could it be changed to use a * new DLT_IEEE802_6 value if we added it? */ if (caplen < 20 || length < 20) { ND_PRINT((ndo, "%s", tstr)); return (caplen); } if (ndo->ndo_eflag) ND_PRINT((ndo, "%08x%08x %08x%08x ", EXTRACT_32BITS(p), EXTRACT_32BITS(p+4), EXTRACT_32BITS(p+8), EXTRACT_32BITS(p+12))); p += 20; length -= 20; caplen -= 20; hdrlen += 20; } hdrlen += atm_llc_print(ndo, p, length, caplen); return (hdrlen); }
{'added': [(265, ' isoclns_print(ndo, p + 1, length - 1);')], 'deleted': [(265, ' isoclns_print(ndo, p + 1, length - 1, caplen - 1);')]}
1
1
304
1,824
https://github.com/the-tcpdump-group/tcpdump
CVE-2017-12897
['CWE-125']
stm32h7xx_eth_driver.c
stm32h7xxEthInitGpio
/** * @file stm32h7xx_eth_driver.c * @brief STM32H7 Ethernet MAC driver * * @section License * * SPDX-License-Identifier: GPL-2.0-or-later * * Copyright (C) 2010-2020 Oryx Embedded SARL. All rights reserved. * * This file is part of CycloneTCP Open. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * @author Oryx Embedded SARL (www.oryx-embedded.com) * @version 2.0.0 **/ //Switch to the appropriate trace level #define TRACE_LEVEL NIC_TRACE_LEVEL //Dependencies #include "stm32h7xx.h" #include "stm32h7xx_hal.h" #include "core/net.h" #include "drivers/mac/stm32h7xx_eth_driver.h" #include "debug.h" //Underlying network interface static NetInterface *nicDriverInterface; //IAR EWARM compiler? #if defined(__ICCARM__) //Transmit buffer #pragma data_alignment = 4 #pragma location = STM32H7XX_ETH_RAM_SECTION static uint8_t txBuffer[STM32H7XX_ETH_TX_BUFFER_COUNT][STM32H7XX_ETH_TX_BUFFER_SIZE]; //Receive buffer #pragma data_alignment = 4 #pragma location = STM32H7XX_ETH_RAM_SECTION static uint8_t rxBuffer[STM32H7XX_ETH_RX_BUFFER_COUNT][STM32H7XX_ETH_RX_BUFFER_SIZE]; //Transmit DMA descriptors #pragma data_alignment = 4 #pragma location = STM32H7XX_ETH_RAM_SECTION static Stm32h7xxTxDmaDesc txDmaDesc[STM32H7XX_ETH_TX_BUFFER_COUNT]; //Receive DMA descriptors #pragma data_alignment = 4 #pragma location = STM32H7XX_ETH_RAM_SECTION static Stm32h7xxRxDmaDesc rxDmaDesc[STM32H7XX_ETH_RX_BUFFER_COUNT]; //Keil MDK-ARM or GCC compiler? #else //Transmit buffer static uint8_t txBuffer[STM32H7XX_ETH_TX_BUFFER_COUNT][STM32H7XX_ETH_TX_BUFFER_SIZE] __attribute__((aligned(4), __section__(STM32H7XX_ETH_RAM_SECTION))); //Receive buffer static uint8_t rxBuffer[STM32H7XX_ETH_RX_BUFFER_COUNT][STM32H7XX_ETH_RX_BUFFER_SIZE] __attribute__((aligned(4), __section__(STM32H7XX_ETH_RAM_SECTION))); //Transmit DMA descriptors static Stm32h7xxTxDmaDesc txDmaDesc[STM32H7XX_ETH_TX_BUFFER_COUNT] __attribute__((aligned(4), __section__(STM32H7XX_ETH_RAM_SECTION))); //Receive DMA descriptors static Stm32h7xxRxDmaDesc rxDmaDesc[STM32H7XX_ETH_RX_BUFFER_COUNT] __attribute__((aligned(4), __section__(STM32H7XX_ETH_RAM_SECTION))); #endif //Current transmit descriptor static uint_t txIndex; //Current receive descriptor static uint_t rxIndex; /** * @brief STM32H7 Ethernet MAC driver **/ const NicDriver stm32h7xxEthDriver = { NIC_TYPE_ETHERNET, ETH_MTU, stm32h7xxEthInit, stm32h7xxEthTick, stm32h7xxEthEnableIrq, stm32h7xxEthDisableIrq, stm32h7xxEthEventHandler, stm32h7xxEthSendPacket, stm32h7xxEthUpdateMacAddrFilter, stm32h7xxEthUpdateMacConfig, stm32h7xxEthWritePhyReg, stm32h7xxEthReadPhyReg, TRUE, TRUE, TRUE, FALSE }; /** * @brief STM32H7 Ethernet MAC initialization * @param[in] interface Underlying network interface * @return Error code **/ error_t stm32h7xxEthInit(NetInterface *interface) { error_t error; //Debug message TRACE_INFO("Initializing STM32H7 Ethernet MAC...\r\n"); //Save underlying network interface nicDriverInterface = interface; //GPIO configuration stm32h7xxEthInitGpio(interface); //Enable Ethernet MAC clock __HAL_RCC_ETH1MAC_CLK_ENABLE(); __HAL_RCC_ETH1TX_CLK_ENABLE(); __HAL_RCC_ETH1RX_CLK_ENABLE(); //Reset Ethernet MAC peripheral __HAL_RCC_ETH1MAC_FORCE_RESET(); __HAL_RCC_ETH1MAC_RELEASE_RESET(); //Perform a software reset ETH->DMAMR |= ETH_DMAMR_SWR; //Wait for the reset to complete while((ETH->DMAMR & ETH_DMAMR_SWR) != 0) { } //Adjust MDC clock range depending on HCLK frequency ETH->MACMDIOAR = ETH_MACMDIOAR_CR_DIV124; //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Ethernet PHY initialization error = interface->phyDriver->init(interface); } else if(interface->switchDriver != NULL) { //Ethernet switch initialization error = interface->switchDriver->init(interface); } else { //The interface is not properly configured error = ERROR_FAILURE; } //Any error to report? if(error) { return error; } //Use default MAC configuration ETH->MACCR = ETH_MACCR_RESERVED15 | ETH_MACCR_DO; //Set the MAC address of the station ETH->MACA0LR = interface->macAddr.w[0] | (interface->macAddr.w[1] << 16); ETH->MACA0HR = interface->macAddr.w[2]; //The MAC supports 3 additional addresses for unicast perfect filtering ETH->MACA1LR = 0; ETH->MACA1HR = 0; ETH->MACA2LR = 0; ETH->MACA2HR = 0; ETH->MACA3LR = 0; ETH->MACA3HR = 0; //Initialize hash table ETH->MACHT0R = 0; ETH->MACHT1R = 0; //Configure the receive filter ETH->MACPFR = ETH_MACPFR_HPF | ETH_MACPFR_HMC; //Disable flow control ETH->MACTFCR = 0; ETH->MACRFCR = 0; //Configure DMA operating mode ETH->DMAMR = ETH_DMAMR_INTM_0 | ETH_DMAMR_PR_1_1; //Configure system bus mode ETH->DMASBMR |= ETH_DMASBMR_AAL; //The DMA takes the descriptor table as contiguous ETH->DMACCR = ETH_DMACCR_DSL_0BIT; //Configure TX features ETH->DMACTCR = ETH_DMACTCR_TPBL_1PBL; //Configure RX features ETH->DMACRCR = ETH_DMACRCR_RPBL_1PBL; ETH->DMACRCR |= (STM32H7XX_ETH_RX_BUFFER_SIZE << 1) & ETH_DMACRCR_RBSZ; //Enable store and forward mode ETH->MTLTQOMR |= ETH_MTLTQOMR_TSF; ETH->MTLRQOMR |= ETH_MTLRQOMR_RSF; //Initialize DMA descriptor lists stm32h7xxEthInitDmaDesc(interface); //Prevent interrupts from being generated when the transmit statistic //counters reach half their maximum value ETH->MMCTIMR = ETH_MMCTIMR_TXLPITRCIM | ETH_MMCTIMR_TXLPIUSCIM | ETH_MMCTIMR_TXGPKTIM | ETH_MMCTIMR_TXMCOLGPIM | ETH_MMCTIMR_TXSCOLGPIM; //Prevent interrupts from being generated when the receive statistic //counters reach half their maximum value ETH->MMCRIMR = ETH_MMCRIMR_RXLPITRCIM | ETH_MMCRIMR_RXLPIUSCIM | ETH_MMCRIMR_RXUCGPIM | ETH_MMCRIMR_RXALGNERPIM | ETH_MMCRIMR_RXCRCERPIM; //Disable MAC interrupts ETH->MACIER = 0; //Enable the desired DMA interrupts ETH->DMACIER = ETH_DMACIER_NIE | ETH_DMACIER_RIE | ETH_DMACIER_TIE; //Set priority grouping (4 bits for pre-emption priority, no bits for subpriority) NVIC_SetPriorityGrouping(STM32H7XX_ETH_IRQ_PRIORITY_GROUPING); //Configure Ethernet interrupt priority NVIC_SetPriority(ETH_IRQn, NVIC_EncodePriority(STM32H7XX_ETH_IRQ_PRIORITY_GROUPING, STM32H7XX_ETH_IRQ_GROUP_PRIORITY, STM32H7XX_ETH_IRQ_SUB_PRIORITY)); //Enable MAC transmission and reception ETH->MACCR |= ETH_MACCR_TE | ETH_MACCR_RE; //Enable DMA transmission and reception ETH->DMACTCR |= ETH_DMACTCR_ST; ETH->DMACRCR |= ETH_DMACRCR_SR; //Accept any packets from the upper layer osSetEvent(&interface->nicTxEvent); //Successful initialization return NO_ERROR; } //STM32F743I-EVAL, STM32F747I-EVAL, STM32H745I-Discovery, STM32H747I-Discovery, //STM32H750B-DK, Nucleo-H743ZI, Nucleo-H743ZI2 or Nucleo-H745ZI-Q evaluation board? #if defined(USE_STM32H743I_EVAL) || defined(USE_STM32H747I_EVAL) || \ defined(USE_STM32H745I_DISCO) || defined(USE_STM32H747I_DISCO) || \ defined(USE_STM32H750B_DISCO) || defined(USE_STM32H7XX_NUCLEO_144) || \ defined(USE_STM32H7XX_NUCLEO_144_MB1363) || \ defined(USE_STM32H7XX_NUCLEO_144_MB1364) /** * @brief GPIO configuration * @param[in] interface Underlying network interface **/ void stm32h7xxEthInitGpio(NetInterface *interface) { GPIO_InitTypeDef GPIO_InitStructure; //STM32F743I-EVAL, STM32F747I-EVAL or STM32H747I-Discovery evaluation board? #if defined(USE_STM32H743I_EVAL) || defined(USE_STM32H747I_EVAL) || \ defined(USE_STM32H747I_DISCO) //Enable SYSCFG clock __HAL_RCC_SYSCFG_CLK_ENABLE(); //Enable GPIO clocks __HAL_RCC_GPIOA_CLK_ENABLE(); __HAL_RCC_GPIOC_CLK_ENABLE(); __HAL_RCC_GPIOG_CLK_ENABLE(); //Select RMII interface mode HAL_SYSCFG_ETHInterfaceSelect(SYSCFG_ETH_RMII); //Configure RMII pins GPIO_InitStructure.Mode = GPIO_MODE_AF_PP; GPIO_InitStructure.Pull = GPIO_NOPULL; GPIO_InitStructure.Speed = GPIO_SPEED_FREQ_VERY_HIGH; GPIO_InitStructure.Alternate = GPIO_AF11_ETH; //Configure ETH_RMII_REF_CLK (PA1), ETH_MDIO (PA2) and ETH_RMII_CRS_DV (PA7) GPIO_InitStructure.Pin = GPIO_PIN_1 | GPIO_PIN_2 | GPIO_PIN_7; HAL_GPIO_Init(GPIOA, &GPIO_InitStructure); //Configure ETH_MDC (PC1), ETH_RMII_RXD0 (PC4) and ETH_RMII_RXD1 (PC5) GPIO_InitStructure.Pin = GPIO_PIN_1 | GPIO_PIN_4 | GPIO_PIN_5; HAL_GPIO_Init(GPIOC, &GPIO_InitStructure); //Configure RMII_TX_EN (PG11), ETH_RMII_TXD1 (PG12) and ETH_RMII_TXD0 (PG13) GPIO_InitStructure.Pin = GPIO_PIN_11 | GPIO_PIN_12 | GPIO_PIN_13; HAL_GPIO_Init(GPIOG, &GPIO_InitStructure); //STM32H745I-Discovery or STM32H750B-DK evaluation board? #elif defined(USE_STM32H745I_DISCO) || defined(USE_STM32H750B_DISCO) //Enable SYSCFG clock __HAL_RCC_SYSCFG_CLK_ENABLE(); //Enable GPIO clocks __HAL_RCC_GPIOA_CLK_ENABLE(); __HAL_RCC_GPIOB_CLK_ENABLE(); __HAL_RCC_GPIOC_CLK_ENABLE(); __HAL_RCC_GPIOE_CLK_ENABLE(); __HAL_RCC_GPIOG_CLK_ENABLE(); __HAL_RCC_GPIOH_CLK_ENABLE(); __HAL_RCC_GPIOI_CLK_ENABLE(); //Select MII interface mode HAL_SYSCFG_ETHInterfaceSelect(SYSCFG_ETH_MII); //Configure MII pins GPIO_InitStructure.Mode = GPIO_MODE_AF_PP; GPIO_InitStructure.Pull = GPIO_NOPULL; GPIO_InitStructure.Speed = GPIO_SPEED_FREQ_VERY_HIGH; GPIO_InitStructure.Alternate = GPIO_AF11_ETH; //Configure ETH_MII_RX_CLK (PA1), ETH_MDIO (PA2) and ETH_MII_RX_DV (PA7) GPIO_InitStructure.Pin = GPIO_PIN_1 | GPIO_PIN_2 | GPIO_PIN_7; HAL_GPIO_Init(GPIOA, &GPIO_InitStructure); //Configure ETH_MII_RXD2 (PB0), ETH_MII_RXD3 (PB1) and ETH_MII_RX_ER (PB2) GPIO_InitStructure.Pin = GPIO_PIN_0 | GPIO_PIN_1 | GPIO_PIN_2; HAL_GPIO_Init(GPIOB, &GPIO_InitStructure); //Configure ETH_MDC (PC1), ETH_MII_TXD2 (PC2), ETH_MII_TX_CLK (PC3), //ETH_MII_RXD0 (PC4) and ETH_MII_RXD1 (PC5) GPIO_InitStructure.Pin = GPIO_PIN_1 | GPIO_PIN_2 | GPIO_PIN_3 | GPIO_PIN_4 | GPIO_PIN_5; HAL_GPIO_Init(GPIOC, &GPIO_InitStructure); //Configure ETH_MII_TXD3 (PE2) GPIO_InitStructure.Pin = GPIO_PIN_2; HAL_GPIO_Init(GPIOE, &GPIO_InitStructure); //Configure ETH_MII_TX_EN (PG11), ETH_MII_TXD1 (PG12) and ETH_MII_TXD0 (PG13) GPIO_InitStructure.Pin = GPIO_PIN_11 | GPIO_PIN_12 | GPIO_PIN_13; HAL_GPIO_Init(GPIOG, &GPIO_InitStructure); //Configure ETH_MII_CRS (PH2) and ETH_MII_COL (PH3) //GPIO_InitStructure.Pin = GPIO_PIN_2 | GPIO_PIN_3; //HAL_GPIO_Init(GPIOH, &GPIO_InitStructure); //Configure ETH_MII_RX_ER (PI10) GPIO_InitStructure.Pin = GPIO_PIN_10; HAL_GPIO_Init(GPIOI, &GPIO_InitStructure); //Nucleo-H743ZI, Nucleo-H743ZI2 or Nucleo-H745ZI-Q evaluation board? #elif defined(USE_STM32H7XX_NUCLEO_144) || defined(USE_STM32H7XX_NUCLEO_144_MB1363) || \ defined(USE_STM32H7XX_NUCLEO_144_MB1364) //Enable SYSCFG clock __HAL_RCC_SYSCFG_CLK_ENABLE(); //Enable GPIO clocks __HAL_RCC_GPIOA_CLK_ENABLE(); __HAL_RCC_GPIOB_CLK_ENABLE(); __HAL_RCC_GPIOC_CLK_ENABLE(); __HAL_RCC_GPIOG_CLK_ENABLE(); //Select RMII interface mode HAL_SYSCFG_ETHInterfaceSelect(SYSCFG_ETH_RMII); //Configure RMII pins GPIO_InitStructure.Mode = GPIO_MODE_AF_PP; GPIO_InitStructure.Pull = GPIO_NOPULL; GPIO_InitStructure.Speed = GPIO_SPEED_FREQ_VERY_HIGH; GPIO_InitStructure.Alternate = GPIO_AF11_ETH; //Configure ETH_RMII_REF_CLK (PA1), ETH_MDIO (PA2) and ETH_RMII_CRS_DV (PA7) GPIO_InitStructure.Pin = GPIO_PIN_1 | GPIO_PIN_2 | GPIO_PIN_7; HAL_GPIO_Init(GPIOA, &GPIO_InitStructure); //Configure ETH_RMII_TXD1 (PB13) GPIO_InitStructure.Pin = GPIO_PIN_13; HAL_GPIO_Init(GPIOB, &GPIO_InitStructure); //Configure ETH_MDC (PC1), ETH_RMII_RXD0 (PC4) and ETH_RMII_RXD1 (PC5) GPIO_InitStructure.Pin = GPIO_PIN_1 | GPIO_PIN_4 | GPIO_PIN_5; HAL_GPIO_Init(GPIOC, &GPIO_InitStructure); //Configure RMII_TX_EN (PG11) and ETH_RMII_TXD0 (PG13) GPIO_InitStructure.Pin = GPIO_PIN_11 | GPIO_PIN_13; HAL_GPIO_Init(GPIOG, &GPIO_InitStructure); #endif } #endif /** * @brief Initialize DMA descriptor lists * @param[in] interface Underlying network interface **/ void stm32h7xxEthInitDmaDesc(NetInterface *interface) { uint_t i; //Initialize TX DMA descriptor list for(i = 0; i < STM32H7XX_ETH_TX_BUFFER_COUNT; i++) { //The descriptor is initially owned by the application txDmaDesc[i].tdes0 = 0; txDmaDesc[i].tdes1 = 0; txDmaDesc[i].tdes2 = 0; txDmaDesc[i].tdes3 = 0; } //Initialize TX descriptor index txIndex = 0; //Initialize RX DMA descriptor list for(i = 0; i < STM32H7XX_ETH_RX_BUFFER_COUNT; i++) { //The descriptor is initially owned by the DMA rxDmaDesc[i].rdes0 = (uint32_t) rxBuffer[i]; rxDmaDesc[i].rdes1 = 0; rxDmaDesc[i].rdes2 = 0; rxDmaDesc[i].rdes3 = ETH_RDES3_OWN | ETH_RDES3_IOC | ETH_RDES3_BUF1V; } //Initialize RX descriptor index rxIndex = 0; //Start location of the TX descriptor list ETH->DMACTDLAR = (uint32_t) &txDmaDesc[0]; //Length of the transmit descriptor ring ETH->DMACTDRLR = STM32H7XX_ETH_TX_BUFFER_COUNT - 1; //Start location of the RX descriptor list ETH->DMACRDLAR = (uint32_t) &rxDmaDesc[0]; //Length of the receive descriptor ring ETH->DMACRDRLR = STM32H7XX_ETH_RX_BUFFER_COUNT - 1; } /** * @brief STM32H7 Ethernet MAC timer handler * * This routine is periodically called by the TCP/IP stack to handle periodic * operations such as polling the link state * * @param[in] interface Underlying network interface **/ void stm32h7xxEthTick(NetInterface *interface) { //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Handle periodic operations interface->phyDriver->tick(interface); } else if(interface->switchDriver != NULL) { //Handle periodic operations interface->switchDriver->tick(interface); } else { //Just for sanity } } /** * @brief Enable interrupts * @param[in] interface Underlying network interface **/ void stm32h7xxEthEnableIrq(NetInterface *interface) { //Enable Ethernet MAC interrupts NVIC_EnableIRQ(ETH_IRQn); //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Enable Ethernet PHY interrupts interface->phyDriver->enableIrq(interface); } else if(interface->switchDriver != NULL) { //Enable Ethernet switch interrupts interface->switchDriver->enableIrq(interface); } else { //Just for sanity } } /** * @brief Disable interrupts * @param[in] interface Underlying network interface **/ void stm32h7xxEthDisableIrq(NetInterface *interface) { //Disable Ethernet MAC interrupts NVIC_DisableIRQ(ETH_IRQn); //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Disable Ethernet PHY interrupts interface->phyDriver->disableIrq(interface); } else if(interface->switchDriver != NULL) { //Disable Ethernet switch interrupts interface->switchDriver->disableIrq(interface); } else { //Just for sanity } } /** * @brief STM32H7 Ethernet MAC interrupt service routine **/ void ETH_IRQHandler(void) { bool_t flag; uint32_t status; //Interrupt service routine prologue osEnterIsr(); //This flag will be set if a higher priority task must be woken flag = FALSE; //Read DMA status register status = ETH->DMACSR; //Packet transmitted? if((status & ETH_DMACSR_TI) != 0) { //Clear TI interrupt flag ETH->DMACSR = ETH_DMACSR_TI; //Check whether the TX buffer is available for writing if((txDmaDesc[txIndex].tdes3 & ETH_TDES3_OWN) == 0) { //Notify the TCP/IP stack that the transmitter is ready to send flag |= osSetEventFromIsr(&nicDriverInterface->nicTxEvent); } } //Packet received? if((status & ETH_DMACSR_RI) != 0) { //Disable RIE interrupt ETH->DMACIER &= ~ETH_DMACIER_RIE; //Set event flag nicDriverInterface->nicEvent = TRUE; //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //Clear NIS interrupt flag ETH->DMACSR = ETH_DMACSR_NIS; //Interrupt service routine epilogue osExitIsr(flag); } /** * @brief STM32H7 Ethernet MAC event handler * @param[in] interface Underlying network interface **/ void stm32h7xxEthEventHandler(NetInterface *interface) { error_t error; //Packet received? if((ETH->DMACSR & ETH_DMACSR_RI) != 0) { //Clear interrupt flag ETH->DMACSR = ETH_DMACSR_RI; //Process all pending packets do { //Read incoming packet error = stm32h7xxEthReceivePacket(interface); //No more data in the receive buffer? } while(error != ERROR_BUFFER_EMPTY); } //Re-enable DMA interrupts ETH->DMACIER = ETH_DMACIER_NIE | ETH_DMACIER_RIE | ETH_DMACIER_TIE; } /** * @brief Send a packet * @param[in] interface Underlying network interface * @param[in] buffer Multi-part buffer containing the data to send * @param[in] offset Offset to the first data byte * @param[in] ancillary Additional options passed to the stack along with * the packet * @return Error code **/ error_t stm32h7xxEthSendPacket(NetInterface *interface, const NetBuffer *buffer, size_t offset, NetTxAncillary *ancillary) { static uint8_t temp[STM32H7XX_ETH_TX_BUFFER_SIZE]; size_t length; //Retrieve the length of the packet length = netBufferGetLength(buffer) - offset; //Check the frame length if(length > STM32H7XX_ETH_TX_BUFFER_SIZE) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Report an error return ERROR_INVALID_LENGTH; } //Make sure the current buffer is available for writing if((txDmaDesc[txIndex].tdes3 & ETH_TDES3_OWN) != 0) { return ERROR_FAILURE; } //Copy user data to the transmit buffer netBufferRead(temp, buffer, offset, length); osMemcpy(txBuffer[txIndex], temp, (length + 3) & ~3UL); //Set the start address of the buffer txDmaDesc[txIndex].tdes0 = (uint32_t) txBuffer[txIndex]; //Write the number of bytes to send txDmaDesc[txIndex].tdes2 = ETH_TDES2_IOC | (length & ETH_TDES2_B1L); //Give the ownership of the descriptor to the DMA txDmaDesc[txIndex].tdes3 = ETH_TDES3_OWN | ETH_TDES3_FD | ETH_TDES3_LD; //Data synchronization barrier __DSB(); //Clear TBU flag to resume processing ETH->DMACSR = ETH_DMACSR_TBU; //Instruct the DMA to poll the transmit descriptor list ETH->DMACTDTPR = 0; //Increment index and wrap around if necessary if(++txIndex >= STM32H7XX_ETH_TX_BUFFER_COUNT) { txIndex = 0; } //Check whether the next buffer is available for writing if((txDmaDesc[txIndex].tdes3 & ETH_TDES3_OWN) == 0) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); } //Data successfully written return NO_ERROR; } /** * @brief Receive a packet * @param[in] interface Underlying network interface * @return Error code **/ error_t stm32h7xxEthReceivePacket(NetInterface *interface) { static uint8_t temp[STM32H7XX_ETH_RX_BUFFER_SIZE]; error_t error; size_t n; NetRxAncillary ancillary; //The current buffer is available for reading? if((rxDmaDesc[rxIndex].rdes3 & ETH_RDES3_OWN) == 0) { //FD and LD flags should be set if((rxDmaDesc[rxIndex].rdes3 & ETH_RDES3_FD) != 0 && (rxDmaDesc[rxIndex].rdes3 & ETH_RDES3_LD) != 0) { //Make sure no error occurred if((rxDmaDesc[rxIndex].rdes3 & ETH_RDES3_ES) == 0) { //Retrieve the length of the frame n = rxDmaDesc[rxIndex].rdes3 & ETH_RDES3_PL; //Limit the number of data to read n = MIN(n, STM32H7XX_ETH_RX_BUFFER_SIZE); //Copy data from the receive buffer osMemcpy(temp, rxBuffer[rxIndex], (n + 3) & ~3UL); //Additional options can be passed to the stack along with the packet ancillary = NET_DEFAULT_RX_ANCILLARY; //Pass the packet to the upper layer nicProcessPacket(interface, temp, n, &ancillary); //Valid packet received error = NO_ERROR; } else { //The received packet contains an error error = ERROR_INVALID_PACKET; } } else { //The packet is not valid error = ERROR_INVALID_PACKET; } //Set the start address of the buffer rxDmaDesc[rxIndex].rdes0 = (uint32_t) rxBuffer[rxIndex]; //Give the ownership of the descriptor back to the DMA rxDmaDesc[rxIndex].rdes3 = ETH_RDES3_OWN | ETH_RDES3_IOC | ETH_RDES3_BUF1V; //Increment index and wrap around if necessary if(++rxIndex >= STM32H7XX_ETH_RX_BUFFER_COUNT) { rxIndex = 0; } } else { //No more data in the receive buffer error = ERROR_BUFFER_EMPTY; } //Clear RBU flag to resume processing ETH->DMACSR = ETH_DMACSR_RBU; //Instruct the DMA to poll the receive descriptor list ETH->DMACRDTPR = 0; //Return status code return error; } /** * @brief Configure MAC address filtering * @param[in] interface Underlying network interface * @return Error code **/ error_t stm32h7xxEthUpdateMacAddrFilter(NetInterface *interface) { uint_t i; uint_t j; uint_t k; uint32_t crc; uint32_t hashTable[2]; MacAddr unicastMacAddr[3]; MacFilterEntry *entry; //Debug message TRACE_DEBUG("Updating MAC filter...\r\n"); //Set the MAC address of the station ETH->MACA0LR = interface->macAddr.w[0] | (interface->macAddr.w[1] << 16); ETH->MACA0HR = interface->macAddr.w[2]; //The MAC supports 3 additional addresses for unicast perfect filtering unicastMacAddr[0] = MAC_UNSPECIFIED_ADDR; unicastMacAddr[1] = MAC_UNSPECIFIED_ADDR; unicastMacAddr[2] = MAC_UNSPECIFIED_ADDR; //The hash table is used for multicast address filtering hashTable[0] = 0; hashTable[1] = 0; //The MAC address filter contains the list of MAC addresses to accept //when receiving an Ethernet frame for(i = 0, j = 0; i < MAC_ADDR_FILTER_SIZE; i++) { //Point to the current entry entry = &interface->macAddrFilter[i]; //Valid entry? if(entry->refCount > 0) { //Multicast address? if(macIsMulticastAddr(&entry->addr)) { //Compute CRC over the current MAC address crc = stm32h7xxEthCalcCrc(&entry->addr, sizeof(MacAddr)); //The upper 6 bits in the CRC register are used to index the //contents of the hash table k = (crc >> 26) & 0x3F; //Update hash table contents hashTable[k / 32] |= (1 << (k % 32)); } else { //Up to 3 additional MAC addresses can be specified if(j < 3) { //Save the unicast address unicastMacAddr[j++] = entry->addr; } } } } //Configure the first unicast address filter if(j >= 1) { //When the AE bit is set, the entry is used for perfect filtering ETH->MACA1LR = unicastMacAddr[0].w[0] | (unicastMacAddr[0].w[1] << 16); ETH->MACA1HR = unicastMacAddr[0].w[2] | ETH_MACAHR_AE; } else { //When the AE bit is cleared, the entry is ignored ETH->MACA1LR = 0; ETH->MACA1HR = 0; } //Configure the second unicast address filter if(j >= 2) { //When the AE bit is set, the entry is used for perfect filtering ETH->MACA2LR = unicastMacAddr[1].w[0] | (unicastMacAddr[1].w[1] << 16); ETH->MACA2HR = unicastMacAddr[1].w[2] | ETH_MACAHR_AE; } else { //When the AE bit is cleared, the entry is ignored ETH->MACA2LR = 0; ETH->MACA2HR = 0; } //Configure the third unicast address filter if(j >= 3) { //When the AE bit is set, the entry is used for perfect filtering ETH->MACA3LR = unicastMacAddr[2].w[0] | (unicastMacAddr[2].w[1] << 16); ETH->MACA3HR = unicastMacAddr[2].w[2] | ETH_MACAHR_AE; } else { //When the AE bit is cleared, the entry is ignored ETH->MACA3LR = 0; ETH->MACA3HR = 0; } //Configure the multicast address filter ETH->MACHT0R = hashTable[0]; ETH->MACHT1R = hashTable[1]; //Debug message TRACE_DEBUG(" MACHT0R = %08" PRIX32 "\r\n", ETH->MACHT0R); TRACE_DEBUG(" MACHT1R = %08" PRIX32 "\r\n", ETH->MACHT1R); //Successful processing return NO_ERROR; } /** * @brief Adjust MAC configuration parameters for proper operation * @param[in] interface Underlying network interface * @return Error code **/ error_t stm32h7xxEthUpdateMacConfig(NetInterface *interface) { uint32_t config; //Read current MAC configuration config = ETH->MACCR; //10BASE-T or 100BASE-TX operation mode? if(interface->linkSpeed == NIC_LINK_SPEED_100MBPS) { config |= ETH_MACCR_FES; } else { config &= ~ETH_MACCR_FES; } //Half-duplex or full-duplex mode? if(interface->duplexMode == NIC_FULL_DUPLEX_MODE) { config |= ETH_MACCR_DM; } else { config &= ~ETH_MACCR_DM; } //Update MAC configuration register ETH->MACCR = config; //Successful processing return NO_ERROR; } /** * @brief Write PHY register * @param[in] opcode Access type (2 bits) * @param[in] phyAddr PHY address (5 bits) * @param[in] regAddr Register address (5 bits) * @param[in] data Register value **/ void stm32h7xxEthWritePhyReg(uint8_t opcode, uint8_t phyAddr, uint8_t regAddr, uint16_t data) { uint32_t temp; //Valid opcode? if(opcode == SMI_OPCODE_WRITE) { //Take care not to alter MDC clock configuration temp = ETH->MACMDIOAR & ETH_MACMDIOAR_CR; //Set up a write operation temp |= ETH_MACMDIOAR_MOC_WR | ETH_MACMDIOAR_MB; //PHY address temp |= (phyAddr << 21) & ETH_MACMDIOAR_PA; //Register address temp |= (regAddr << 16) & ETH_MACMDIOAR_RDA; //Data to be written in the PHY register ETH->MACMDIODR = data & ETH_MACMDIODR_MD; //Start a write operation ETH->MACMDIOAR = temp; //Wait for the write to complete while((ETH->MACMDIOAR & ETH_MACMDIOAR_MB) != 0) { } } else { //The MAC peripheral only supports standard Clause 22 opcodes } } /** * @brief Read PHY register * @param[in] opcode Access type (2 bits) * @param[in] phyAddr PHY address (5 bits) * @param[in] regAddr Register address (5 bits) * @return Register value **/ uint16_t stm32h7xxEthReadPhyReg(uint8_t opcode, uint8_t phyAddr, uint8_t regAddr) { uint16_t data; uint32_t temp; //Valid opcode? if(opcode == SMI_OPCODE_READ) { //Take care not to alter MDC clock configuration temp = ETH->MACMDIOAR & ETH_MACMDIOAR_CR; //Set up a read operation temp |= ETH_MACMDIOAR_MOC_RD | ETH_MACMDIOAR_MB; //PHY address temp |= (phyAddr << 21) & ETH_MACMDIOAR_PA; //Register address temp |= (regAddr << 16) & ETH_MACMDIOAR_RDA; //Start a read operation ETH->MACMDIOAR = temp; //Wait for the read to complete while((ETH->MACMDIOAR & ETH_MACMDIOAR_MB) != 0) { } //Get register value data = ETH->MACMDIODR & ETH_MACMDIODR_MD; } else { //The MAC peripheral only supports standard Clause 22 opcodes data = 0; } //Return the value of the PHY register return data; } /** * @brief CRC calculation * @param[in] data Pointer to the data over which to calculate the CRC * @param[in] length Number of bytes to process * @return Resulting CRC value **/ uint32_t stm32h7xxEthCalcCrc(const void *data, size_t length) { uint_t i; uint_t j; uint32_t crc; const uint8_t *p; //Point to the data over which to calculate the CRC p = (uint8_t *) data; //CRC preset value crc = 0xFFFFFFFF; //Loop through data for(i = 0; i < length; i++) { //The message is processed bit by bit for(j = 0; j < 8; j++) { //Update CRC value if((((crc >> 31) ^ (p[i] >> j)) & 0x01) != 0) { crc = (crc << 1) ^ 0x04C11DB7; } else { crc = crc << 1; } } } //Return CRC value return ~crc; }
/** * @file stm32h7xx_eth_driver.c * @brief STM32H7 Ethernet MAC driver * * @section License * * SPDX-License-Identifier: GPL-2.0-or-later * * Copyright (C) 2010-2021 Oryx Embedded SARL. All rights reserved. * * This file is part of CycloneTCP Open. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * @author Oryx Embedded SARL (www.oryx-embedded.com) * @version 2.0.2 **/ //Switch to the appropriate trace level #define TRACE_LEVEL NIC_TRACE_LEVEL //Dependencies #include "stm32h7xx.h" #include "stm32h7xx_hal.h" #include "core/net.h" #include "drivers/mac/stm32h7xx_eth_driver.h" #include "debug.h" //Underlying network interface static NetInterface *nicDriverInterface; //IAR EWARM compiler? #if defined(__ICCARM__) //Transmit buffer #pragma data_alignment = 4 #pragma location = STM32H7XX_ETH_RAM_SECTION static uint8_t txBuffer[STM32H7XX_ETH_TX_BUFFER_COUNT][STM32H7XX_ETH_TX_BUFFER_SIZE]; //Receive buffer #pragma data_alignment = 4 #pragma location = STM32H7XX_ETH_RAM_SECTION static uint8_t rxBuffer[STM32H7XX_ETH_RX_BUFFER_COUNT][STM32H7XX_ETH_RX_BUFFER_SIZE]; //Transmit DMA descriptors #pragma data_alignment = 4 #pragma location = STM32H7XX_ETH_RAM_SECTION static Stm32h7xxTxDmaDesc txDmaDesc[STM32H7XX_ETH_TX_BUFFER_COUNT]; //Receive DMA descriptors #pragma data_alignment = 4 #pragma location = STM32H7XX_ETH_RAM_SECTION static Stm32h7xxRxDmaDesc rxDmaDesc[STM32H7XX_ETH_RX_BUFFER_COUNT]; //Keil MDK-ARM or GCC compiler? #else //Transmit buffer static uint8_t txBuffer[STM32H7XX_ETH_TX_BUFFER_COUNT][STM32H7XX_ETH_TX_BUFFER_SIZE] __attribute__((aligned(4), __section__(STM32H7XX_ETH_RAM_SECTION))); //Receive buffer static uint8_t rxBuffer[STM32H7XX_ETH_RX_BUFFER_COUNT][STM32H7XX_ETH_RX_BUFFER_SIZE] __attribute__((aligned(4), __section__(STM32H7XX_ETH_RAM_SECTION))); //Transmit DMA descriptors static Stm32h7xxTxDmaDesc txDmaDesc[STM32H7XX_ETH_TX_BUFFER_COUNT] __attribute__((aligned(4), __section__(STM32H7XX_ETH_RAM_SECTION))); //Receive DMA descriptors static Stm32h7xxRxDmaDesc rxDmaDesc[STM32H7XX_ETH_RX_BUFFER_COUNT] __attribute__((aligned(4), __section__(STM32H7XX_ETH_RAM_SECTION))); #endif //Current transmit descriptor static uint_t txIndex; //Current receive descriptor static uint_t rxIndex; /** * @brief STM32H7 Ethernet MAC driver **/ const NicDriver stm32h7xxEthDriver = { NIC_TYPE_ETHERNET, ETH_MTU, stm32h7xxEthInit, stm32h7xxEthTick, stm32h7xxEthEnableIrq, stm32h7xxEthDisableIrq, stm32h7xxEthEventHandler, stm32h7xxEthSendPacket, stm32h7xxEthUpdateMacAddrFilter, stm32h7xxEthUpdateMacConfig, stm32h7xxEthWritePhyReg, stm32h7xxEthReadPhyReg, TRUE, TRUE, TRUE, FALSE }; /** * @brief STM32H7 Ethernet MAC initialization * @param[in] interface Underlying network interface * @return Error code **/ error_t stm32h7xxEthInit(NetInterface *interface) { error_t error; //Debug message TRACE_INFO("Initializing STM32H7 Ethernet MAC...\r\n"); //Save underlying network interface nicDriverInterface = interface; //GPIO configuration stm32h7xxEthInitGpio(interface); //Enable Ethernet MAC clock __HAL_RCC_ETH1MAC_CLK_ENABLE(); __HAL_RCC_ETH1TX_CLK_ENABLE(); __HAL_RCC_ETH1RX_CLK_ENABLE(); //Reset Ethernet MAC peripheral __HAL_RCC_ETH1MAC_FORCE_RESET(); __HAL_RCC_ETH1MAC_RELEASE_RESET(); //Perform a software reset ETH->DMAMR |= ETH_DMAMR_SWR; //Wait for the reset to complete while((ETH->DMAMR & ETH_DMAMR_SWR) != 0) { } //Adjust MDC clock range depending on HCLK frequency ETH->MACMDIOAR = ETH_MACMDIOAR_CR_DIV124; //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Ethernet PHY initialization error = interface->phyDriver->init(interface); } else if(interface->switchDriver != NULL) { //Ethernet switch initialization error = interface->switchDriver->init(interface); } else { //The interface is not properly configured error = ERROR_FAILURE; } //Any error to report? if(error) { return error; } //Use default MAC configuration ETH->MACCR = ETH_MACCR_RESERVED15 | ETH_MACCR_DO; //Set the MAC address of the station ETH->MACA0LR = interface->macAddr.w[0] | (interface->macAddr.w[1] << 16); ETH->MACA0HR = interface->macAddr.w[2]; //The MAC supports 3 additional addresses for unicast perfect filtering ETH->MACA1LR = 0; ETH->MACA1HR = 0; ETH->MACA2LR = 0; ETH->MACA2HR = 0; ETH->MACA3LR = 0; ETH->MACA3HR = 0; //Initialize hash table ETH->MACHT0R = 0; ETH->MACHT1R = 0; //Configure the receive filter ETH->MACPFR = ETH_MACPFR_HPF | ETH_MACPFR_HMC; //Disable flow control ETH->MACTFCR = 0; ETH->MACRFCR = 0; //Configure DMA operating mode ETH->DMAMR = ETH_DMAMR_INTM_0 | ETH_DMAMR_PR_1_1; //Configure system bus mode ETH->DMASBMR |= ETH_DMASBMR_AAL; //The DMA takes the descriptor table as contiguous ETH->DMACCR = ETH_DMACCR_DSL_0BIT; //Configure TX features ETH->DMACTCR = ETH_DMACTCR_TPBL_1PBL; //Configure RX features ETH->DMACRCR = ETH_DMACRCR_RPBL_1PBL; ETH->DMACRCR |= (STM32H7XX_ETH_RX_BUFFER_SIZE << 1) & ETH_DMACRCR_RBSZ; //Enable store and forward mode ETH->MTLTQOMR |= ETH_MTLTQOMR_TSF; ETH->MTLRQOMR |= ETH_MTLRQOMR_RSF; //Initialize DMA descriptor lists stm32h7xxEthInitDmaDesc(interface); //Prevent interrupts from being generated when the transmit statistic //counters reach half their maximum value ETH->MMCTIMR = ETH_MMCTIMR_TXLPITRCIM | ETH_MMCTIMR_TXLPIUSCIM | ETH_MMCTIMR_TXGPKTIM | ETH_MMCTIMR_TXMCOLGPIM | ETH_MMCTIMR_TXSCOLGPIM; //Prevent interrupts from being generated when the receive statistic //counters reach half their maximum value ETH->MMCRIMR = ETH_MMCRIMR_RXLPITRCIM | ETH_MMCRIMR_RXLPIUSCIM | ETH_MMCRIMR_RXUCGPIM | ETH_MMCRIMR_RXALGNERPIM | ETH_MMCRIMR_RXCRCERPIM; //Disable MAC interrupts ETH->MACIER = 0; //Enable the desired DMA interrupts ETH->DMACIER = ETH_DMACIER_NIE | ETH_DMACIER_RIE | ETH_DMACIER_TIE; //Set priority grouping (4 bits for pre-emption priority, no bits for subpriority) NVIC_SetPriorityGrouping(STM32H7XX_ETH_IRQ_PRIORITY_GROUPING); //Configure Ethernet interrupt priority NVIC_SetPriority(ETH_IRQn, NVIC_EncodePriority(STM32H7XX_ETH_IRQ_PRIORITY_GROUPING, STM32H7XX_ETH_IRQ_GROUP_PRIORITY, STM32H7XX_ETH_IRQ_SUB_PRIORITY)); //Enable MAC transmission and reception ETH->MACCR |= ETH_MACCR_TE | ETH_MACCR_RE; //Enable DMA transmission and reception ETH->DMACTCR |= ETH_DMACTCR_ST; ETH->DMACRCR |= ETH_DMACRCR_SR; //Accept any packets from the upper layer osSetEvent(&interface->nicTxEvent); //Successful initialization return NO_ERROR; } //STM32F743I-EVAL, STM32F747I-EVAL, STM32H735G-DK, STM32H745I-Discovery, //STM32H747I-Discovery, STM32H750B-DK, Nucleo-H723ZG, Nucleo-H743ZI, //Nucleo-H743ZI2 or Nucleo-H745ZI-Q evaluation board? #if defined(USE_STM32H743I_EVAL) || defined(USE_STM32H747I_EVAL) || \ defined(USE_STM32H735G_DK) || defined(USE_STM32H745I_DISCO) || \ defined(USE_STM32H747I_DISCO) || defined(USE_STM32H750B_DISCO) || \ defined(USE_NUCLEO_H723ZG) || defined(USE_NUCLEO_H743ZI) || \ defined(USE_NUCLEO_H743ZI2) || defined(USE_NUCLEO_H745ZI_Q) /** * @brief GPIO configuration * @param[in] interface Underlying network interface **/ void stm32h7xxEthInitGpio(NetInterface *interface) { GPIO_InitTypeDef GPIO_InitStructure; //STM32F743I-EVAL, STM32F747I-EVAL or STM32H747I-Discovery evaluation board? #if defined(USE_STM32H743I_EVAL) || defined(USE_STM32H747I_EVAL) || \ defined(USE_STM32H747I_DISCO) //Enable SYSCFG clock __HAL_RCC_SYSCFG_CLK_ENABLE(); //Enable GPIO clocks __HAL_RCC_GPIOA_CLK_ENABLE(); __HAL_RCC_GPIOC_CLK_ENABLE(); __HAL_RCC_GPIOG_CLK_ENABLE(); //Select RMII interface mode HAL_SYSCFG_ETHInterfaceSelect(SYSCFG_ETH_RMII); //Configure RMII pins GPIO_InitStructure.Mode = GPIO_MODE_AF_PP; GPIO_InitStructure.Pull = GPIO_NOPULL; GPIO_InitStructure.Speed = GPIO_SPEED_FREQ_VERY_HIGH; GPIO_InitStructure.Alternate = GPIO_AF11_ETH; //Configure ETH_RMII_REF_CLK (PA1), ETH_MDIO (PA2) and ETH_RMII_CRS_DV (PA7) GPIO_InitStructure.Pin = GPIO_PIN_1 | GPIO_PIN_2 | GPIO_PIN_7; HAL_GPIO_Init(GPIOA, &GPIO_InitStructure); //Configure ETH_MDC (PC1), ETH_RMII_RXD0 (PC4) and ETH_RMII_RXD1 (PC5) GPIO_InitStructure.Pin = GPIO_PIN_1 | GPIO_PIN_4 | GPIO_PIN_5; HAL_GPIO_Init(GPIOC, &GPIO_InitStructure); //Configure RMII_TX_EN (PG11), ETH_RMII_TXD1 (PG12) and ETH_RMII_TXD0 (PG13) GPIO_InitStructure.Pin = GPIO_PIN_11 | GPIO_PIN_12 | GPIO_PIN_13; HAL_GPIO_Init(GPIOG, &GPIO_InitStructure); //STM32H735G-DK evaluation board? #elif defined(USE_STM32H735G_DK) //Enable SYSCFG clock __HAL_RCC_SYSCFG_CLK_ENABLE(); //Enable GPIO clocks __HAL_RCC_GPIOA_CLK_ENABLE(); __HAL_RCC_GPIOB_CLK_ENABLE(); __HAL_RCC_GPIOC_CLK_ENABLE(); //Select RMII interface mode HAL_SYSCFG_ETHInterfaceSelect(SYSCFG_ETH_RMII); //Configure RMII pins GPIO_InitStructure.Mode = GPIO_MODE_AF_PP; GPIO_InitStructure.Pull = GPIO_NOPULL; GPIO_InitStructure.Speed = GPIO_SPEED_FREQ_VERY_HIGH; GPIO_InitStructure.Alternate = GPIO_AF11_ETH; //Configure ETH_RMII_REF_CLK (PA1), ETH_MDIO (PA2) and ETH_RMII_CRS_DV (PA7) GPIO_InitStructure.Pin = GPIO_PIN_1 | GPIO_PIN_2 | GPIO_PIN_7; HAL_GPIO_Init(GPIOA, &GPIO_InitStructure); //Configure RMII_RX_ER (PB10), RMII_TX_EN (PB11), ETH_RMII_TXD1 (PB12) //and ETH_RMII_TXD0 (PB13) GPIO_InitStructure.Pin = GPIO_PIN_10 | GPIO_PIN_11 | GPIO_PIN_12 | GPIO_PIN_13; HAL_GPIO_Init(GPIOB, &GPIO_InitStructure); //Configure ETH_MDC (PC1), ETH_RMII_RXD0 (PC4) and ETH_RMII_RXD1 (PC5) GPIO_InitStructure.Pin = GPIO_PIN_1 | GPIO_PIN_4 | GPIO_PIN_5; HAL_GPIO_Init(GPIOC, &GPIO_InitStructure); //STM32H745I-Discovery or STM32H750B-DK evaluation board? #elif defined(USE_STM32H745I_DISCO) || defined(USE_STM32H750B_DISCO) //Enable SYSCFG clock __HAL_RCC_SYSCFG_CLK_ENABLE(); //Enable GPIO clocks __HAL_RCC_GPIOA_CLK_ENABLE(); __HAL_RCC_GPIOB_CLK_ENABLE(); __HAL_RCC_GPIOC_CLK_ENABLE(); __HAL_RCC_GPIOE_CLK_ENABLE(); __HAL_RCC_GPIOG_CLK_ENABLE(); __HAL_RCC_GPIOH_CLK_ENABLE(); __HAL_RCC_GPIOI_CLK_ENABLE(); //Select MII interface mode HAL_SYSCFG_ETHInterfaceSelect(SYSCFG_ETH_MII); //Configure MII pins GPIO_InitStructure.Mode = GPIO_MODE_AF_PP; GPIO_InitStructure.Pull = GPIO_NOPULL; GPIO_InitStructure.Speed = GPIO_SPEED_FREQ_VERY_HIGH; GPIO_InitStructure.Alternate = GPIO_AF11_ETH; //Configure ETH_MII_RX_CLK (PA1), ETH_MDIO (PA2) and ETH_MII_RX_DV (PA7) GPIO_InitStructure.Pin = GPIO_PIN_1 | GPIO_PIN_2 | GPIO_PIN_7; HAL_GPIO_Init(GPIOA, &GPIO_InitStructure); //Configure ETH_MII_RXD2 (PB0), ETH_MII_RXD3 (PB1) and ETH_MII_RX_ER (PB2) GPIO_InitStructure.Pin = GPIO_PIN_0 | GPIO_PIN_1 | GPIO_PIN_2; HAL_GPIO_Init(GPIOB, &GPIO_InitStructure); //Configure ETH_MDC (PC1), ETH_MII_TXD2 (PC2), ETH_MII_TX_CLK (PC3), //ETH_MII_RXD0 (PC4) and ETH_MII_RXD1 (PC5) GPIO_InitStructure.Pin = GPIO_PIN_1 | GPIO_PIN_2 | GPIO_PIN_3 | GPIO_PIN_4 | GPIO_PIN_5; HAL_GPIO_Init(GPIOC, &GPIO_InitStructure); //Configure ETH_MII_TXD3 (PE2) GPIO_InitStructure.Pin = GPIO_PIN_2; HAL_GPIO_Init(GPIOE, &GPIO_InitStructure); //Configure ETH_MII_TX_EN (PG11), ETH_MII_TXD1 (PG12) and ETH_MII_TXD0 (PG13) GPIO_InitStructure.Pin = GPIO_PIN_11 | GPIO_PIN_12 | GPIO_PIN_13; HAL_GPIO_Init(GPIOG, &GPIO_InitStructure); //Configure ETH_MII_CRS (PH2) and ETH_MII_COL (PH3) //GPIO_InitStructure.Pin = GPIO_PIN_2 | GPIO_PIN_3; //HAL_GPIO_Init(GPIOH, &GPIO_InitStructure); //Configure ETH_MII_RX_ER (PI10) GPIO_InitStructure.Pin = GPIO_PIN_10; HAL_GPIO_Init(GPIOI, &GPIO_InitStructure); //Nucleo-H723ZG, Nucleo-H743ZI, Nucleo-H743ZI2 or Nucleo-H745ZI-Q evaluation //board? #elif defined(USE_NUCLEO_H723ZG) || defined(USE_NUCLEO_H743ZI) || \ defined(USE_NUCLEO_H743ZI2) || defined(USE_NUCLEO_H745ZI_Q) //Enable SYSCFG clock __HAL_RCC_SYSCFG_CLK_ENABLE(); //Enable GPIO clocks __HAL_RCC_GPIOA_CLK_ENABLE(); __HAL_RCC_GPIOB_CLK_ENABLE(); __HAL_RCC_GPIOC_CLK_ENABLE(); __HAL_RCC_GPIOG_CLK_ENABLE(); //Select RMII interface mode HAL_SYSCFG_ETHInterfaceSelect(SYSCFG_ETH_RMII); //Configure RMII pins GPIO_InitStructure.Mode = GPIO_MODE_AF_PP; GPIO_InitStructure.Pull = GPIO_NOPULL; GPIO_InitStructure.Speed = GPIO_SPEED_FREQ_VERY_HIGH; GPIO_InitStructure.Alternate = GPIO_AF11_ETH; //Configure ETH_RMII_REF_CLK (PA1), ETH_MDIO (PA2) and ETH_RMII_CRS_DV (PA7) GPIO_InitStructure.Pin = GPIO_PIN_1 | GPIO_PIN_2 | GPIO_PIN_7; HAL_GPIO_Init(GPIOA, &GPIO_InitStructure); //Configure ETH_RMII_TXD1 (PB13) GPIO_InitStructure.Pin = GPIO_PIN_13; HAL_GPIO_Init(GPIOB, &GPIO_InitStructure); //Configure ETH_MDC (PC1), ETH_RMII_RXD0 (PC4) and ETH_RMII_RXD1 (PC5) GPIO_InitStructure.Pin = GPIO_PIN_1 | GPIO_PIN_4 | GPIO_PIN_5; HAL_GPIO_Init(GPIOC, &GPIO_InitStructure); //Configure RMII_TX_EN (PG11) and ETH_RMII_TXD0 (PG13) GPIO_InitStructure.Pin = GPIO_PIN_11 | GPIO_PIN_13; HAL_GPIO_Init(GPIOG, &GPIO_InitStructure); #endif } #endif /** * @brief Initialize DMA descriptor lists * @param[in] interface Underlying network interface **/ void stm32h7xxEthInitDmaDesc(NetInterface *interface) { uint_t i; //Initialize TX DMA descriptor list for(i = 0; i < STM32H7XX_ETH_TX_BUFFER_COUNT; i++) { //The descriptor is initially owned by the application txDmaDesc[i].tdes0 = 0; txDmaDesc[i].tdes1 = 0; txDmaDesc[i].tdes2 = 0; txDmaDesc[i].tdes3 = 0; } //Initialize TX descriptor index txIndex = 0; //Initialize RX DMA descriptor list for(i = 0; i < STM32H7XX_ETH_RX_BUFFER_COUNT; i++) { //The descriptor is initially owned by the DMA rxDmaDesc[i].rdes0 = (uint32_t) rxBuffer[i]; rxDmaDesc[i].rdes1 = 0; rxDmaDesc[i].rdes2 = 0; rxDmaDesc[i].rdes3 = ETH_RDES3_OWN | ETH_RDES3_IOC | ETH_RDES3_BUF1V; } //Initialize RX descriptor index rxIndex = 0; //Start location of the TX descriptor list ETH->DMACTDLAR = (uint32_t) &txDmaDesc[0]; //Length of the transmit descriptor ring ETH->DMACTDRLR = STM32H7XX_ETH_TX_BUFFER_COUNT - 1; //Start location of the RX descriptor list ETH->DMACRDLAR = (uint32_t) &rxDmaDesc[0]; //Length of the receive descriptor ring ETH->DMACRDRLR = STM32H7XX_ETH_RX_BUFFER_COUNT - 1; } /** * @brief STM32H7 Ethernet MAC timer handler * * This routine is periodically called by the TCP/IP stack to handle periodic * operations such as polling the link state * * @param[in] interface Underlying network interface **/ void stm32h7xxEthTick(NetInterface *interface) { //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Handle periodic operations interface->phyDriver->tick(interface); } else if(interface->switchDriver != NULL) { //Handle periodic operations interface->switchDriver->tick(interface); } else { //Just for sanity } } /** * @brief Enable interrupts * @param[in] interface Underlying network interface **/ void stm32h7xxEthEnableIrq(NetInterface *interface) { //Enable Ethernet MAC interrupts NVIC_EnableIRQ(ETH_IRQn); //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Enable Ethernet PHY interrupts interface->phyDriver->enableIrq(interface); } else if(interface->switchDriver != NULL) { //Enable Ethernet switch interrupts interface->switchDriver->enableIrq(interface); } else { //Just for sanity } } /** * @brief Disable interrupts * @param[in] interface Underlying network interface **/ void stm32h7xxEthDisableIrq(NetInterface *interface) { //Disable Ethernet MAC interrupts NVIC_DisableIRQ(ETH_IRQn); //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Disable Ethernet PHY interrupts interface->phyDriver->disableIrq(interface); } else if(interface->switchDriver != NULL) { //Disable Ethernet switch interrupts interface->switchDriver->disableIrq(interface); } else { //Just for sanity } } /** * @brief STM32H7 Ethernet MAC interrupt service routine **/ void ETH_IRQHandler(void) { bool_t flag; uint32_t status; //Interrupt service routine prologue osEnterIsr(); //This flag will be set if a higher priority task must be woken flag = FALSE; //Read DMA status register status = ETH->DMACSR; //Packet transmitted? if((status & ETH_DMACSR_TI) != 0) { //Clear TI interrupt flag ETH->DMACSR = ETH_DMACSR_TI; //Check whether the TX buffer is available for writing if((txDmaDesc[txIndex].tdes3 & ETH_TDES3_OWN) == 0) { //Notify the TCP/IP stack that the transmitter is ready to send flag |= osSetEventFromIsr(&nicDriverInterface->nicTxEvent); } } //Packet received? if((status & ETH_DMACSR_RI) != 0) { //Disable RIE interrupt ETH->DMACIER &= ~ETH_DMACIER_RIE; //Set event flag nicDriverInterface->nicEvent = TRUE; //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //Clear NIS interrupt flag ETH->DMACSR = ETH_DMACSR_NIS; //Interrupt service routine epilogue osExitIsr(flag); } /** * @brief STM32H7 Ethernet MAC event handler * @param[in] interface Underlying network interface **/ void stm32h7xxEthEventHandler(NetInterface *interface) { error_t error; //Packet received? if((ETH->DMACSR & ETH_DMACSR_RI) != 0) { //Clear interrupt flag ETH->DMACSR = ETH_DMACSR_RI; //Process all pending packets do { //Read incoming packet error = stm32h7xxEthReceivePacket(interface); //No more data in the receive buffer? } while(error != ERROR_BUFFER_EMPTY); } //Re-enable DMA interrupts ETH->DMACIER = ETH_DMACIER_NIE | ETH_DMACIER_RIE | ETH_DMACIER_TIE; } /** * @brief Send a packet * @param[in] interface Underlying network interface * @param[in] buffer Multi-part buffer containing the data to send * @param[in] offset Offset to the first data byte * @param[in] ancillary Additional options passed to the stack along with * the packet * @return Error code **/ error_t stm32h7xxEthSendPacket(NetInterface *interface, const NetBuffer *buffer, size_t offset, NetTxAncillary *ancillary) { static uint8_t temp[STM32H7XX_ETH_TX_BUFFER_SIZE]; size_t length; //Retrieve the length of the packet length = netBufferGetLength(buffer) - offset; //Check the frame length if(length > STM32H7XX_ETH_TX_BUFFER_SIZE) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Report an error return ERROR_INVALID_LENGTH; } //Make sure the current buffer is available for writing if((txDmaDesc[txIndex].tdes3 & ETH_TDES3_OWN) != 0) { return ERROR_FAILURE; } //Copy user data to the transmit buffer netBufferRead(temp, buffer, offset, length); osMemcpy(txBuffer[txIndex], temp, (length + 3) & ~3UL); //Set the start address of the buffer txDmaDesc[txIndex].tdes0 = (uint32_t) txBuffer[txIndex]; //Write the number of bytes to send txDmaDesc[txIndex].tdes2 = ETH_TDES2_IOC | (length & ETH_TDES2_B1L); //Give the ownership of the descriptor to the DMA txDmaDesc[txIndex].tdes3 = ETH_TDES3_OWN | ETH_TDES3_FD | ETH_TDES3_LD; //Data synchronization barrier __DSB(); //Clear TBU flag to resume processing ETH->DMACSR = ETH_DMACSR_TBU; //Instruct the DMA to poll the transmit descriptor list ETH->DMACTDTPR = 0; //Increment index and wrap around if necessary if(++txIndex >= STM32H7XX_ETH_TX_BUFFER_COUNT) { txIndex = 0; } //Check whether the next buffer is available for writing if((txDmaDesc[txIndex].tdes3 & ETH_TDES3_OWN) == 0) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); } //Data successfully written return NO_ERROR; } /** * @brief Receive a packet * @param[in] interface Underlying network interface * @return Error code **/ error_t stm32h7xxEthReceivePacket(NetInterface *interface) { static uint8_t temp[STM32H7XX_ETH_RX_BUFFER_SIZE]; error_t error; size_t n; NetRxAncillary ancillary; //The current buffer is available for reading? if((rxDmaDesc[rxIndex].rdes3 & ETH_RDES3_OWN) == 0) { //FD and LD flags should be set if((rxDmaDesc[rxIndex].rdes3 & ETH_RDES3_FD) != 0 && (rxDmaDesc[rxIndex].rdes3 & ETH_RDES3_LD) != 0) { //Make sure no error occurred if((rxDmaDesc[rxIndex].rdes3 & ETH_RDES3_ES) == 0) { //Retrieve the length of the frame n = rxDmaDesc[rxIndex].rdes3 & ETH_RDES3_PL; //Limit the number of data to read n = MIN(n, STM32H7XX_ETH_RX_BUFFER_SIZE); //Copy data from the receive buffer osMemcpy(temp, rxBuffer[rxIndex], (n + 3) & ~3UL); //Additional options can be passed to the stack along with the packet ancillary = NET_DEFAULT_RX_ANCILLARY; //Pass the packet to the upper layer nicProcessPacket(interface, temp, n, &ancillary); //Valid packet received error = NO_ERROR; } else { //The received packet contains an error error = ERROR_INVALID_PACKET; } } else { //The packet is not valid error = ERROR_INVALID_PACKET; } //Set the start address of the buffer rxDmaDesc[rxIndex].rdes0 = (uint32_t) rxBuffer[rxIndex]; //Give the ownership of the descriptor back to the DMA rxDmaDesc[rxIndex].rdes3 = ETH_RDES3_OWN | ETH_RDES3_IOC | ETH_RDES3_BUF1V; //Increment index and wrap around if necessary if(++rxIndex >= STM32H7XX_ETH_RX_BUFFER_COUNT) { rxIndex = 0; } } else { //No more data in the receive buffer error = ERROR_BUFFER_EMPTY; } //Clear RBU flag to resume processing ETH->DMACSR = ETH_DMACSR_RBU; //Instruct the DMA to poll the receive descriptor list ETH->DMACRDTPR = 0; //Return status code return error; } /** * @brief Configure MAC address filtering * @param[in] interface Underlying network interface * @return Error code **/ error_t stm32h7xxEthUpdateMacAddrFilter(NetInterface *interface) { uint_t i; uint_t j; uint_t k; uint32_t crc; uint32_t hashTable[2]; MacAddr unicastMacAddr[3]; MacFilterEntry *entry; //Debug message TRACE_DEBUG("Updating MAC filter...\r\n"); //Set the MAC address of the station ETH->MACA0LR = interface->macAddr.w[0] | (interface->macAddr.w[1] << 16); ETH->MACA0HR = interface->macAddr.w[2]; //The MAC supports 3 additional addresses for unicast perfect filtering unicastMacAddr[0] = MAC_UNSPECIFIED_ADDR; unicastMacAddr[1] = MAC_UNSPECIFIED_ADDR; unicastMacAddr[2] = MAC_UNSPECIFIED_ADDR; //The hash table is used for multicast address filtering hashTable[0] = 0; hashTable[1] = 0; //The MAC address filter contains the list of MAC addresses to accept //when receiving an Ethernet frame for(i = 0, j = 0; i < MAC_ADDR_FILTER_SIZE; i++) { //Point to the current entry entry = &interface->macAddrFilter[i]; //Valid entry? if(entry->refCount > 0) { //Multicast address? if(macIsMulticastAddr(&entry->addr)) { //Compute CRC over the current MAC address crc = stm32h7xxEthCalcCrc(&entry->addr, sizeof(MacAddr)); //The upper 6 bits in the CRC register are used to index the //contents of the hash table k = (crc >> 26) & 0x3F; //Update hash table contents hashTable[k / 32] |= (1 << (k % 32)); } else { //Up to 3 additional MAC addresses can be specified if(j < 3) { //Save the unicast address unicastMacAddr[j++] = entry->addr; } } } } //Configure the first unicast address filter if(j >= 1) { //When the AE bit is set, the entry is used for perfect filtering ETH->MACA1LR = unicastMacAddr[0].w[0] | (unicastMacAddr[0].w[1] << 16); ETH->MACA1HR = unicastMacAddr[0].w[2] | ETH_MACAHR_AE; } else { //When the AE bit is cleared, the entry is ignored ETH->MACA1LR = 0; ETH->MACA1HR = 0; } //Configure the second unicast address filter if(j >= 2) { //When the AE bit is set, the entry is used for perfect filtering ETH->MACA2LR = unicastMacAddr[1].w[0] | (unicastMacAddr[1].w[1] << 16); ETH->MACA2HR = unicastMacAddr[1].w[2] | ETH_MACAHR_AE; } else { //When the AE bit is cleared, the entry is ignored ETH->MACA2LR = 0; ETH->MACA2HR = 0; } //Configure the third unicast address filter if(j >= 3) { //When the AE bit is set, the entry is used for perfect filtering ETH->MACA3LR = unicastMacAddr[2].w[0] | (unicastMacAddr[2].w[1] << 16); ETH->MACA3HR = unicastMacAddr[2].w[2] | ETH_MACAHR_AE; } else { //When the AE bit is cleared, the entry is ignored ETH->MACA3LR = 0; ETH->MACA3HR = 0; } //Configure the multicast address filter ETH->MACHT0R = hashTable[0]; ETH->MACHT1R = hashTable[1]; //Debug message TRACE_DEBUG(" MACHT0R = %08" PRIX32 "\r\n", ETH->MACHT0R); TRACE_DEBUG(" MACHT1R = %08" PRIX32 "\r\n", ETH->MACHT1R); //Successful processing return NO_ERROR; } /** * @brief Adjust MAC configuration parameters for proper operation * @param[in] interface Underlying network interface * @return Error code **/ error_t stm32h7xxEthUpdateMacConfig(NetInterface *interface) { uint32_t config; //Read current MAC configuration config = ETH->MACCR; //10BASE-T or 100BASE-TX operation mode? if(interface->linkSpeed == NIC_LINK_SPEED_100MBPS) { config |= ETH_MACCR_FES; } else { config &= ~ETH_MACCR_FES; } //Half-duplex or full-duplex mode? if(interface->duplexMode == NIC_FULL_DUPLEX_MODE) { config |= ETH_MACCR_DM; } else { config &= ~ETH_MACCR_DM; } //Update MAC configuration register ETH->MACCR = config; //Successful processing return NO_ERROR; } /** * @brief Write PHY register * @param[in] opcode Access type (2 bits) * @param[in] phyAddr PHY address (5 bits) * @param[in] regAddr Register address (5 bits) * @param[in] data Register value **/ void stm32h7xxEthWritePhyReg(uint8_t opcode, uint8_t phyAddr, uint8_t regAddr, uint16_t data) { uint32_t temp; //Valid opcode? if(opcode == SMI_OPCODE_WRITE) { //Take care not to alter MDC clock configuration temp = ETH->MACMDIOAR & ETH_MACMDIOAR_CR; //Set up a write operation temp |= ETH_MACMDIOAR_MOC_WR | ETH_MACMDIOAR_MB; //PHY address temp |= (phyAddr << 21) & ETH_MACMDIOAR_PA; //Register address temp |= (regAddr << 16) & ETH_MACMDIOAR_RDA; //Data to be written in the PHY register ETH->MACMDIODR = data & ETH_MACMDIODR_MD; //Start a write operation ETH->MACMDIOAR = temp; //Wait for the write to complete while((ETH->MACMDIOAR & ETH_MACMDIOAR_MB) != 0) { } } else { //The MAC peripheral only supports standard Clause 22 opcodes } } /** * @brief Read PHY register * @param[in] opcode Access type (2 bits) * @param[in] phyAddr PHY address (5 bits) * @param[in] regAddr Register address (5 bits) * @return Register value **/ uint16_t stm32h7xxEthReadPhyReg(uint8_t opcode, uint8_t phyAddr, uint8_t regAddr) { uint16_t data; uint32_t temp; //Valid opcode? if(opcode == SMI_OPCODE_READ) { //Take care not to alter MDC clock configuration temp = ETH->MACMDIOAR & ETH_MACMDIOAR_CR; //Set up a read operation temp |= ETH_MACMDIOAR_MOC_RD | ETH_MACMDIOAR_MB; //PHY address temp |= (phyAddr << 21) & ETH_MACMDIOAR_PA; //Register address temp |= (regAddr << 16) & ETH_MACMDIOAR_RDA; //Start a read operation ETH->MACMDIOAR = temp; //Wait for the read to complete while((ETH->MACMDIOAR & ETH_MACMDIOAR_MB) != 0) { } //Get register value data = ETH->MACMDIODR & ETH_MACMDIODR_MD; } else { //The MAC peripheral only supports standard Clause 22 opcodes data = 0; } //Return the value of the PHY register return data; } /** * @brief CRC calculation * @param[in] data Pointer to the data over which to calculate the CRC * @param[in] length Number of bytes to process * @return Resulting CRC value **/ uint32_t stm32h7xxEthCalcCrc(const void *data, size_t length) { uint_t i; uint_t j; uint32_t crc; const uint8_t *p; //Point to the data over which to calculate the CRC p = (uint8_t *) data; //CRC preset value crc = 0xFFFFFFFF; //Loop through data for(i = 0; i < length; i++) { //The message is processed bit by bit for(j = 0; j < 8; j++) { //Update CRC value if((((crc >> 31) ^ (p[i] >> j)) & 0x01) != 0) { crc = (crc << 1) ^ 0x04C11DB7; } else { crc = crc << 1; } } } //Return CRC value return ~crc; }
void stm32h7xxEthInitGpio(NetInterface *interface) { GPIO_InitTypeDef GPIO_InitStructure; //STM32F743I-EVAL, STM32F747I-EVAL or STM32H747I-Discovery evaluation board? #if defined(USE_STM32H743I_EVAL) || defined(USE_STM32H747I_EVAL) || \ defined(USE_STM32H747I_DISCO) //Enable SYSCFG clock __HAL_RCC_SYSCFG_CLK_ENABLE(); //Enable GPIO clocks __HAL_RCC_GPIOA_CLK_ENABLE(); __HAL_RCC_GPIOC_CLK_ENABLE(); __HAL_RCC_GPIOG_CLK_ENABLE(); //Select RMII interface mode HAL_SYSCFG_ETHInterfaceSelect(SYSCFG_ETH_RMII); //Configure RMII pins GPIO_InitStructure.Mode = GPIO_MODE_AF_PP; GPIO_InitStructure.Pull = GPIO_NOPULL; GPIO_InitStructure.Speed = GPIO_SPEED_FREQ_VERY_HIGH; GPIO_InitStructure.Alternate = GPIO_AF11_ETH; //Configure ETH_RMII_REF_CLK (PA1), ETH_MDIO (PA2) and ETH_RMII_CRS_DV (PA7) GPIO_InitStructure.Pin = GPIO_PIN_1 | GPIO_PIN_2 | GPIO_PIN_7; HAL_GPIO_Init(GPIOA, &GPIO_InitStructure); //Configure ETH_MDC (PC1), ETH_RMII_RXD0 (PC4) and ETH_RMII_RXD1 (PC5) GPIO_InitStructure.Pin = GPIO_PIN_1 | GPIO_PIN_4 | GPIO_PIN_5; HAL_GPIO_Init(GPIOC, &GPIO_InitStructure); //Configure RMII_TX_EN (PG11), ETH_RMII_TXD1 (PG12) and ETH_RMII_TXD0 (PG13) GPIO_InitStructure.Pin = GPIO_PIN_11 | GPIO_PIN_12 | GPIO_PIN_13; HAL_GPIO_Init(GPIOG, &GPIO_InitStructure); //STM32H745I-Discovery or STM32H750B-DK evaluation board? #elif defined(USE_STM32H745I_DISCO) || defined(USE_STM32H750B_DISCO) //Enable SYSCFG clock __HAL_RCC_SYSCFG_CLK_ENABLE(); //Enable GPIO clocks __HAL_RCC_GPIOA_CLK_ENABLE(); __HAL_RCC_GPIOB_CLK_ENABLE(); __HAL_RCC_GPIOC_CLK_ENABLE(); __HAL_RCC_GPIOE_CLK_ENABLE(); __HAL_RCC_GPIOG_CLK_ENABLE(); __HAL_RCC_GPIOH_CLK_ENABLE(); __HAL_RCC_GPIOI_CLK_ENABLE(); //Select MII interface mode HAL_SYSCFG_ETHInterfaceSelect(SYSCFG_ETH_MII); //Configure MII pins GPIO_InitStructure.Mode = GPIO_MODE_AF_PP; GPIO_InitStructure.Pull = GPIO_NOPULL; GPIO_InitStructure.Speed = GPIO_SPEED_FREQ_VERY_HIGH; GPIO_InitStructure.Alternate = GPIO_AF11_ETH; //Configure ETH_MII_RX_CLK (PA1), ETH_MDIO (PA2) and ETH_MII_RX_DV (PA7) GPIO_InitStructure.Pin = GPIO_PIN_1 | GPIO_PIN_2 | GPIO_PIN_7; HAL_GPIO_Init(GPIOA, &GPIO_InitStructure); //Configure ETH_MII_RXD2 (PB0), ETH_MII_RXD3 (PB1) and ETH_MII_RX_ER (PB2) GPIO_InitStructure.Pin = GPIO_PIN_0 | GPIO_PIN_1 | GPIO_PIN_2; HAL_GPIO_Init(GPIOB, &GPIO_InitStructure); //Configure ETH_MDC (PC1), ETH_MII_TXD2 (PC2), ETH_MII_TX_CLK (PC3), //ETH_MII_RXD0 (PC4) and ETH_MII_RXD1 (PC5) GPIO_InitStructure.Pin = GPIO_PIN_1 | GPIO_PIN_2 | GPIO_PIN_3 | GPIO_PIN_4 | GPIO_PIN_5; HAL_GPIO_Init(GPIOC, &GPIO_InitStructure); //Configure ETH_MII_TXD3 (PE2) GPIO_InitStructure.Pin = GPIO_PIN_2; HAL_GPIO_Init(GPIOE, &GPIO_InitStructure); //Configure ETH_MII_TX_EN (PG11), ETH_MII_TXD1 (PG12) and ETH_MII_TXD0 (PG13) GPIO_InitStructure.Pin = GPIO_PIN_11 | GPIO_PIN_12 | GPIO_PIN_13; HAL_GPIO_Init(GPIOG, &GPIO_InitStructure); //Configure ETH_MII_CRS (PH2) and ETH_MII_COL (PH3) //GPIO_InitStructure.Pin = GPIO_PIN_2 | GPIO_PIN_3; //HAL_GPIO_Init(GPIOH, &GPIO_InitStructure); //Configure ETH_MII_RX_ER (PI10) GPIO_InitStructure.Pin = GPIO_PIN_10; HAL_GPIO_Init(GPIOI, &GPIO_InitStructure); //Nucleo-H743ZI, Nucleo-H743ZI2 or Nucleo-H745ZI-Q evaluation board? #elif defined(USE_STM32H7XX_NUCLEO_144) || defined(USE_STM32H7XX_NUCLEO_144_MB1363) || \ defined(USE_STM32H7XX_NUCLEO_144_MB1364) //Enable SYSCFG clock __HAL_RCC_SYSCFG_CLK_ENABLE(); //Enable GPIO clocks __HAL_RCC_GPIOA_CLK_ENABLE(); __HAL_RCC_GPIOB_CLK_ENABLE(); __HAL_RCC_GPIOC_CLK_ENABLE(); __HAL_RCC_GPIOG_CLK_ENABLE(); //Select RMII interface mode HAL_SYSCFG_ETHInterfaceSelect(SYSCFG_ETH_RMII); //Configure RMII pins GPIO_InitStructure.Mode = GPIO_MODE_AF_PP; GPIO_InitStructure.Pull = GPIO_NOPULL; GPIO_InitStructure.Speed = GPIO_SPEED_FREQ_VERY_HIGH; GPIO_InitStructure.Alternate = GPIO_AF11_ETH; //Configure ETH_RMII_REF_CLK (PA1), ETH_MDIO (PA2) and ETH_RMII_CRS_DV (PA7) GPIO_InitStructure.Pin = GPIO_PIN_1 | GPIO_PIN_2 | GPIO_PIN_7; HAL_GPIO_Init(GPIOA, &GPIO_InitStructure); //Configure ETH_RMII_TXD1 (PB13) GPIO_InitStructure.Pin = GPIO_PIN_13; HAL_GPIO_Init(GPIOB, &GPIO_InitStructure); //Configure ETH_MDC (PC1), ETH_RMII_RXD0 (PC4) and ETH_RMII_RXD1 (PC5) GPIO_InitStructure.Pin = GPIO_PIN_1 | GPIO_PIN_4 | GPIO_PIN_5; HAL_GPIO_Init(GPIOC, &GPIO_InitStructure); //Configure RMII_TX_EN (PG11) and ETH_RMII_TXD0 (PG13) GPIO_InitStructure.Pin = GPIO_PIN_11 | GPIO_PIN_13; HAL_GPIO_Init(GPIOG, &GPIO_InitStructure); #endif }
void stm32h7xxEthInitGpio(NetInterface *interface) { GPIO_InitTypeDef GPIO_InitStructure; //STM32F743I-EVAL, STM32F747I-EVAL or STM32H747I-Discovery evaluation board? #if defined(USE_STM32H743I_EVAL) || defined(USE_STM32H747I_EVAL) || \ defined(USE_STM32H747I_DISCO) //Enable SYSCFG clock __HAL_RCC_SYSCFG_CLK_ENABLE(); //Enable GPIO clocks __HAL_RCC_GPIOA_CLK_ENABLE(); __HAL_RCC_GPIOC_CLK_ENABLE(); __HAL_RCC_GPIOG_CLK_ENABLE(); //Select RMII interface mode HAL_SYSCFG_ETHInterfaceSelect(SYSCFG_ETH_RMII); //Configure RMII pins GPIO_InitStructure.Mode = GPIO_MODE_AF_PP; GPIO_InitStructure.Pull = GPIO_NOPULL; GPIO_InitStructure.Speed = GPIO_SPEED_FREQ_VERY_HIGH; GPIO_InitStructure.Alternate = GPIO_AF11_ETH; //Configure ETH_RMII_REF_CLK (PA1), ETH_MDIO (PA2) and ETH_RMII_CRS_DV (PA7) GPIO_InitStructure.Pin = GPIO_PIN_1 | GPIO_PIN_2 | GPIO_PIN_7; HAL_GPIO_Init(GPIOA, &GPIO_InitStructure); //Configure ETH_MDC (PC1), ETH_RMII_RXD0 (PC4) and ETH_RMII_RXD1 (PC5) GPIO_InitStructure.Pin = GPIO_PIN_1 | GPIO_PIN_4 | GPIO_PIN_5; HAL_GPIO_Init(GPIOC, &GPIO_InitStructure); //Configure RMII_TX_EN (PG11), ETH_RMII_TXD1 (PG12) and ETH_RMII_TXD0 (PG13) GPIO_InitStructure.Pin = GPIO_PIN_11 | GPIO_PIN_12 | GPIO_PIN_13; HAL_GPIO_Init(GPIOG, &GPIO_InitStructure); //STM32H735G-DK evaluation board? #elif defined(USE_STM32H735G_DK) //Enable SYSCFG clock __HAL_RCC_SYSCFG_CLK_ENABLE(); //Enable GPIO clocks __HAL_RCC_GPIOA_CLK_ENABLE(); __HAL_RCC_GPIOB_CLK_ENABLE(); __HAL_RCC_GPIOC_CLK_ENABLE(); //Select RMII interface mode HAL_SYSCFG_ETHInterfaceSelect(SYSCFG_ETH_RMII); //Configure RMII pins GPIO_InitStructure.Mode = GPIO_MODE_AF_PP; GPIO_InitStructure.Pull = GPIO_NOPULL; GPIO_InitStructure.Speed = GPIO_SPEED_FREQ_VERY_HIGH; GPIO_InitStructure.Alternate = GPIO_AF11_ETH; //Configure ETH_RMII_REF_CLK (PA1), ETH_MDIO (PA2) and ETH_RMII_CRS_DV (PA7) GPIO_InitStructure.Pin = GPIO_PIN_1 | GPIO_PIN_2 | GPIO_PIN_7; HAL_GPIO_Init(GPIOA, &GPIO_InitStructure); //Configure RMII_RX_ER (PB10), RMII_TX_EN (PB11), ETH_RMII_TXD1 (PB12) //and ETH_RMII_TXD0 (PB13) GPIO_InitStructure.Pin = GPIO_PIN_10 | GPIO_PIN_11 | GPIO_PIN_12 | GPIO_PIN_13; HAL_GPIO_Init(GPIOB, &GPIO_InitStructure); //Configure ETH_MDC (PC1), ETH_RMII_RXD0 (PC4) and ETH_RMII_RXD1 (PC5) GPIO_InitStructure.Pin = GPIO_PIN_1 | GPIO_PIN_4 | GPIO_PIN_5; HAL_GPIO_Init(GPIOC, &GPIO_InitStructure); //STM32H745I-Discovery or STM32H750B-DK evaluation board? #elif defined(USE_STM32H745I_DISCO) || defined(USE_STM32H750B_DISCO) //Enable SYSCFG clock __HAL_RCC_SYSCFG_CLK_ENABLE(); //Enable GPIO clocks __HAL_RCC_GPIOA_CLK_ENABLE(); __HAL_RCC_GPIOB_CLK_ENABLE(); __HAL_RCC_GPIOC_CLK_ENABLE(); __HAL_RCC_GPIOE_CLK_ENABLE(); __HAL_RCC_GPIOG_CLK_ENABLE(); __HAL_RCC_GPIOH_CLK_ENABLE(); __HAL_RCC_GPIOI_CLK_ENABLE(); //Select MII interface mode HAL_SYSCFG_ETHInterfaceSelect(SYSCFG_ETH_MII); //Configure MII pins GPIO_InitStructure.Mode = GPIO_MODE_AF_PP; GPIO_InitStructure.Pull = GPIO_NOPULL; GPIO_InitStructure.Speed = GPIO_SPEED_FREQ_VERY_HIGH; GPIO_InitStructure.Alternate = GPIO_AF11_ETH; //Configure ETH_MII_RX_CLK (PA1), ETH_MDIO (PA2) and ETH_MII_RX_DV (PA7) GPIO_InitStructure.Pin = GPIO_PIN_1 | GPIO_PIN_2 | GPIO_PIN_7; HAL_GPIO_Init(GPIOA, &GPIO_InitStructure); //Configure ETH_MII_RXD2 (PB0), ETH_MII_RXD3 (PB1) and ETH_MII_RX_ER (PB2) GPIO_InitStructure.Pin = GPIO_PIN_0 | GPIO_PIN_1 | GPIO_PIN_2; HAL_GPIO_Init(GPIOB, &GPIO_InitStructure); //Configure ETH_MDC (PC1), ETH_MII_TXD2 (PC2), ETH_MII_TX_CLK (PC3), //ETH_MII_RXD0 (PC4) and ETH_MII_RXD1 (PC5) GPIO_InitStructure.Pin = GPIO_PIN_1 | GPIO_PIN_2 | GPIO_PIN_3 | GPIO_PIN_4 | GPIO_PIN_5; HAL_GPIO_Init(GPIOC, &GPIO_InitStructure); //Configure ETH_MII_TXD3 (PE2) GPIO_InitStructure.Pin = GPIO_PIN_2; HAL_GPIO_Init(GPIOE, &GPIO_InitStructure); //Configure ETH_MII_TX_EN (PG11), ETH_MII_TXD1 (PG12) and ETH_MII_TXD0 (PG13) GPIO_InitStructure.Pin = GPIO_PIN_11 | GPIO_PIN_12 | GPIO_PIN_13; HAL_GPIO_Init(GPIOG, &GPIO_InitStructure); //Configure ETH_MII_CRS (PH2) and ETH_MII_COL (PH3) //GPIO_InitStructure.Pin = GPIO_PIN_2 | GPIO_PIN_3; //HAL_GPIO_Init(GPIOH, &GPIO_InitStructure); //Configure ETH_MII_RX_ER (PI10) GPIO_InitStructure.Pin = GPIO_PIN_10; HAL_GPIO_Init(GPIOI, &GPIO_InitStructure); //Nucleo-H723ZG, Nucleo-H743ZI, Nucleo-H743ZI2 or Nucleo-H745ZI-Q evaluation //board? #elif defined(USE_NUCLEO_H723ZG) || defined(USE_NUCLEO_H743ZI) || \ defined(USE_NUCLEO_H743ZI2) || defined(USE_NUCLEO_H745ZI_Q) //Enable SYSCFG clock __HAL_RCC_SYSCFG_CLK_ENABLE(); //Enable GPIO clocks __HAL_RCC_GPIOA_CLK_ENABLE(); __HAL_RCC_GPIOB_CLK_ENABLE(); __HAL_RCC_GPIOC_CLK_ENABLE(); __HAL_RCC_GPIOG_CLK_ENABLE(); //Select RMII interface mode HAL_SYSCFG_ETHInterfaceSelect(SYSCFG_ETH_RMII); //Configure RMII pins GPIO_InitStructure.Mode = GPIO_MODE_AF_PP; GPIO_InitStructure.Pull = GPIO_NOPULL; GPIO_InitStructure.Speed = GPIO_SPEED_FREQ_VERY_HIGH; GPIO_InitStructure.Alternate = GPIO_AF11_ETH; //Configure ETH_RMII_REF_CLK (PA1), ETH_MDIO (PA2) and ETH_RMII_CRS_DV (PA7) GPIO_InitStructure.Pin = GPIO_PIN_1 | GPIO_PIN_2 | GPIO_PIN_7; HAL_GPIO_Init(GPIOA, &GPIO_InitStructure); //Configure ETH_RMII_TXD1 (PB13) GPIO_InitStructure.Pin = GPIO_PIN_13; HAL_GPIO_Init(GPIOB, &GPIO_InitStructure); //Configure ETH_MDC (PC1), ETH_RMII_RXD0 (PC4) and ETH_RMII_RXD1 (PC5) GPIO_InitStructure.Pin = GPIO_PIN_1 | GPIO_PIN_4 | GPIO_PIN_5; HAL_GPIO_Init(GPIOC, &GPIO_InitStructure); //Configure RMII_TX_EN (PG11) and ETH_RMII_TXD0 (PG13) GPIO_InitStructure.Pin = GPIO_PIN_11 | GPIO_PIN_13; HAL_GPIO_Init(GPIOG, &GPIO_InitStructure); #endif }
{'added': [(9, ' * Copyright (C) 2010-2021 Oryx Embedded SARL. All rights reserved.'), (28, ' * @version 2.0.2'), (258, '//STM32F743I-EVAL, STM32F747I-EVAL, STM32H735G-DK, STM32H745I-Discovery,'), (259, '//STM32H747I-Discovery, STM32H750B-DK, Nucleo-H723ZG, Nucleo-H743ZI,'), (260, '//Nucleo-H743ZI2 or Nucleo-H745ZI-Q evaluation board?'), (262, ' defined(USE_STM32H735G_DK) || defined(USE_STM32H745I_DISCO) || \\'), (263, ' defined(USE_STM32H747I_DISCO) || defined(USE_STM32H750B_DISCO) || \\'), (264, ' defined(USE_NUCLEO_H723ZG) || defined(USE_NUCLEO_H743ZI) || \\'), (265, ' defined(USE_NUCLEO_H743ZI2) || defined(USE_NUCLEO_H745ZI_Q)'), (308, '//STM32H735G-DK evaluation board?'), (309, '#elif defined(USE_STM32H735G_DK)'), (310, ' //Enable SYSCFG clock'), (311, ' __HAL_RCC_SYSCFG_CLK_ENABLE();'), (312, ''), (313, ' //Enable GPIO clocks'), (314, ' __HAL_RCC_GPIOA_CLK_ENABLE();'), (315, ' __HAL_RCC_GPIOB_CLK_ENABLE();'), (316, ' __HAL_RCC_GPIOC_CLK_ENABLE();'), (317, ''), (318, ' //Select RMII interface mode'), (319, ' HAL_SYSCFG_ETHInterfaceSelect(SYSCFG_ETH_RMII);'), (320, ''), (321, ' //Configure RMII pins'), (322, ' GPIO_InitStructure.Mode = GPIO_MODE_AF_PP;'), (323, ' GPIO_InitStructure.Pull = GPIO_NOPULL;'), (324, ' GPIO_InitStructure.Speed = GPIO_SPEED_FREQ_VERY_HIGH;'), (325, ' GPIO_InitStructure.Alternate = GPIO_AF11_ETH;'), (326, ''), (327, ' //Configure ETH_RMII_REF_CLK (PA1), ETH_MDIO (PA2) and ETH_RMII_CRS_DV (PA7)'), (328, ' GPIO_InitStructure.Pin = GPIO_PIN_1 | GPIO_PIN_2 | GPIO_PIN_7;'), (329, ' HAL_GPIO_Init(GPIOA, &GPIO_InitStructure);'), (330, ''), (331, ' //Configure RMII_RX_ER (PB10), RMII_TX_EN (PB11), ETH_RMII_TXD1 (PB12)'), (332, ' //and ETH_RMII_TXD0 (PB13)'), (333, ' GPIO_InitStructure.Pin = GPIO_PIN_10 | GPIO_PIN_11 | GPIO_PIN_12 | GPIO_PIN_13;'), (334, ' HAL_GPIO_Init(GPIOB, &GPIO_InitStructure);'), (335, ''), (336, ' //Configure ETH_MDC (PC1), ETH_RMII_RXD0 (PC4) and ETH_RMII_RXD1 (PC5)'), (337, ' GPIO_InitStructure.Pin = GPIO_PIN_1 | GPIO_PIN_4 | GPIO_PIN_5;'), (338, ' HAL_GPIO_Init(GPIOC, &GPIO_InitStructure);'), (339, ''), (392, '//Nucleo-H723ZG, Nucleo-H743ZI, Nucleo-H743ZI2 or Nucleo-H745ZI-Q evaluation'), (393, '//board?'), (394, '#elif defined(USE_NUCLEO_H723ZG) || defined(USE_NUCLEO_H743ZI) || \\'), (395, ' defined(USE_NUCLEO_H743ZI2) || defined(USE_NUCLEO_H745ZI_Q)')], 'deleted': [(9, ' * Copyright (C) 2010-2020 Oryx Embedded SARL. All rights reserved.'), (28, ' * @version 2.0.0'), (258, '//STM32F743I-EVAL, STM32F747I-EVAL, STM32H745I-Discovery, STM32H747I-Discovery,'), (259, '//STM32H750B-DK, Nucleo-H743ZI, Nucleo-H743ZI2 or Nucleo-H745ZI-Q evaluation board?'), (261, ' defined(USE_STM32H745I_DISCO) || defined(USE_STM32H747I_DISCO) || \\'), (262, ' defined(USE_STM32H750B_DISCO) || defined(USE_STM32H7XX_NUCLEO_144) || \\'), (263, ' defined(USE_STM32H7XX_NUCLEO_144_MB1363) || \\'), (264, ' defined(USE_STM32H7XX_NUCLEO_144_MB1364)'), (359, '//Nucleo-H743ZI, Nucleo-H743ZI2 or Nucleo-H745ZI-Q evaluation board?'), (360, '#elif defined(USE_STM32H7XX_NUCLEO_144) || defined(USE_STM32H7XX_NUCLEO_144_MB1363) || \\'), (361, ' defined(USE_STM32H7XX_NUCLEO_144_MB1364)')]}
45
11
531
2,891
https://github.com/Oryx-Embedded/CycloneTCP
CVE-2021-26788
['CWE-20']
openssl.c
ossl_connect_step1
/*************************************************************************** * _ _ ____ _ * Project ___| | | | _ \| | * / __| | | | |_) | | * | (__| |_| | _ <| |___ * \___|\___/|_| \_\_____| * * Copyright (C) 1998 - 2021, Daniel Stenberg, <daniel@haxx.se>, et al. * * This software is licensed as described in the file COPYING, which * you should have received as part of this distribution. The terms * are also available at https://curl.se/docs/copyright.html. * * You may opt to use, copy, modify, merge, publish, distribute and/or sell * copies of the Software, and permit persons to whom the Software is * furnished to do so, under the terms of the COPYING file. * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY * KIND, either express or implied. * ***************************************************************************/ /* * Source file for all OpenSSL-specific code for the TLS/SSL layer. No code * but vtls.c should ever call or use these functions. */ #include "curl_setup.h" #ifdef USE_OPENSSL #include <limits.h> /* Wincrypt must be included before anything that could include OpenSSL. */ #if defined(USE_WIN32_CRYPTO) #include <wincrypt.h> /* Undefine wincrypt conflicting symbols for BoringSSL. */ #undef X509_NAME #undef X509_EXTENSIONS #undef PKCS7_ISSUER_AND_SERIAL #undef PKCS7_SIGNER_INFO #undef OCSP_REQUEST #undef OCSP_RESPONSE #endif #include "urldata.h" #include "sendf.h" #include "formdata.h" /* for the boundary function */ #include "url.h" /* for the ssl config check function */ #include "inet_pton.h" #include "openssl.h" #include "connect.h" #include "slist.h" #include "select.h" #include "vtls.h" #include "keylog.h" #include "strcase.h" #include "hostcheck.h" #include "multiif.h" #include "strerror.h" #include "curl_printf.h" #include <openssl/ssl.h> #include <openssl/rand.h> #include <openssl/x509v3.h> #ifndef OPENSSL_NO_DSA #include <openssl/dsa.h> #endif #include <openssl/dh.h> #include <openssl/err.h> #include <openssl/md5.h> #include <openssl/conf.h> #include <openssl/bn.h> #include <openssl/rsa.h> #include <openssl/bio.h> #include <openssl/buffer.h> #include <openssl/pkcs12.h> #ifdef USE_AMISSL #include "amigaos.h" #endif #if (OPENSSL_VERSION_NUMBER >= 0x0090808fL) && !defined(OPENSSL_NO_OCSP) #include <openssl/ocsp.h> #endif #if (OPENSSL_VERSION_NUMBER >= 0x0090700fL) && /* 0.9.7 or later */ \ !defined(OPENSSL_NO_ENGINE) && !defined(OPENSSL_NO_UI_CONSOLE) #define USE_OPENSSL_ENGINE #include <openssl/engine.h> #endif #include "warnless.h" #include "non-ascii.h" /* for Curl_convert_from_utf8 prototype */ /* The last #include files should be: */ #include "curl_memory.h" #include "memdebug.h" /* Uncomment the ALLOW_RENEG line to a real #define if you want to allow TLS renegotiations when built with BoringSSL. Renegotiating is non-compliant with HTTP/2 and "an extremely dangerous protocol feature". Beware. #define ALLOW_RENEG 1 */ #ifndef OPENSSL_VERSION_NUMBER #error "OPENSSL_VERSION_NUMBER not defined" #endif #ifdef USE_OPENSSL_ENGINE #include <openssl/ui.h> #endif #if OPENSSL_VERSION_NUMBER >= 0x00909000L #define SSL_METHOD_QUAL const #else #define SSL_METHOD_QUAL #endif #if (OPENSSL_VERSION_NUMBER >= 0x10000000L) #define HAVE_ERR_REMOVE_THREAD_STATE 1 #endif #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) && /* OpenSSL 1.1.0+ */ \ !(defined(LIBRESSL_VERSION_NUMBER) && \ LIBRESSL_VERSION_NUMBER < 0x20700000L) #define SSLEAY_VERSION_NUMBER OPENSSL_VERSION_NUMBER #define HAVE_X509_GET0_EXTENSIONS 1 /* added in 1.1.0 -pre1 */ #define HAVE_OPAQUE_EVP_PKEY 1 /* since 1.1.0 -pre3 */ #define HAVE_OPAQUE_RSA_DSA_DH 1 /* since 1.1.0 -pre5 */ #define CONST_EXTS const #define HAVE_ERR_REMOVE_THREAD_STATE_DEPRECATED 1 /* funny typecast define due to difference in API */ #ifdef LIBRESSL_VERSION_NUMBER #define ARG2_X509_signature_print (X509_ALGOR *) #else #define ARG2_X509_signature_print #endif #else /* For OpenSSL before 1.1.0 */ #define ASN1_STRING_get0_data(x) ASN1_STRING_data(x) #define X509_get0_notBefore(x) X509_get_notBefore(x) #define X509_get0_notAfter(x) X509_get_notAfter(x) #define CONST_EXTS /* nope */ #ifndef LIBRESSL_VERSION_NUMBER #define OpenSSL_version_num() SSLeay() #endif #endif #if (OPENSSL_VERSION_NUMBER >= 0x1000200fL) && /* 1.0.2 or later */ \ !(defined(LIBRESSL_VERSION_NUMBER) && \ LIBRESSL_VERSION_NUMBER < 0x20700000L) #define HAVE_X509_GET0_SIGNATURE 1 #endif #if (OPENSSL_VERSION_NUMBER >= 0x1000200fL) /* 1.0.2 or later */ #define HAVE_SSL_GET_SHUTDOWN 1 #endif #if OPENSSL_VERSION_NUMBER >= 0x10002003L && \ OPENSSL_VERSION_NUMBER <= 0x10002FFFL && \ !defined(OPENSSL_NO_COMP) #define HAVE_SSL_COMP_FREE_COMPRESSION_METHODS 1 #endif #if (OPENSSL_VERSION_NUMBER < 0x0090808fL) /* not present in older OpenSSL */ #define OPENSSL_load_builtin_modules(x) #endif /* * Whether SSL_CTX_set_keylog_callback is available. * OpenSSL: supported since 1.1.1 https://github.com/openssl/openssl/pull/2287 * BoringSSL: supported since d28f59c27bac (committed 2015-11-19) * LibreSSL: unsupported in at least 2.7.2 (explicitly check for it since it * lies and pretends to be OpenSSL 2.0.0). */ #if (OPENSSL_VERSION_NUMBER >= 0x10101000L && \ !defined(LIBRESSL_VERSION_NUMBER)) || \ defined(OPENSSL_IS_BORINGSSL) #define HAVE_KEYLOG_CALLBACK #endif /* Whether SSL_CTX_set_ciphersuites is available. * OpenSSL: supported since 1.1.1 (commit a53b5be6a05) * BoringSSL: no * LibreSSL: no */ #if ((OPENSSL_VERSION_NUMBER >= 0x10101000L) && \ !defined(LIBRESSL_VERSION_NUMBER) && \ !defined(OPENSSL_IS_BORINGSSL)) #define HAVE_SSL_CTX_SET_CIPHERSUITES #define HAVE_SSL_CTX_SET_POST_HANDSHAKE_AUTH /* SET_EC_CURVES available under the same preconditions: see * https://www.openssl.org/docs/manmaster/man3/SSL_CTX_set1_groups.html */ #define HAVE_SSL_CTX_SET_EC_CURVES #endif #if defined(LIBRESSL_VERSION_NUMBER) #define OSSL_PACKAGE "LibreSSL" #elif defined(OPENSSL_IS_BORINGSSL) #define OSSL_PACKAGE "BoringSSL" #else #define OSSL_PACKAGE "OpenSSL" #endif #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) /* up2date versions of OpenSSL maintain the default reasonably secure without * breaking compatibility, so it is better not to override the default by curl */ #define DEFAULT_CIPHER_SELECTION NULL #else /* ... but it is not the case with old versions of OpenSSL */ #define DEFAULT_CIPHER_SELECTION \ "ALL:!EXPORT:!EXPORT40:!EXPORT56:!aNULL:!LOW:!RC4:@STRENGTH" #endif #ifdef HAVE_OPENSSL_SRP /* the function exists */ #ifdef USE_TLS_SRP /* the functionality is not disabled */ #define USE_OPENSSL_SRP #endif #endif struct ssl_backend_data { struct Curl_easy *logger; /* transfer handle to pass trace logs to, only using sockindex 0 */ /* these ones requires specific SSL-types */ SSL_CTX* ctx; SSL* handle; X509* server_cert; #ifndef HAVE_KEYLOG_CALLBACK /* Set to true once a valid keylog entry has been created to avoid dupes. */ bool keylog_done; #endif }; /* * Number of bytes to read from the random number seed file. This must be * a finite value (because some entropy "files" like /dev/urandom have * an infinite length), but must be large enough to provide enough * entropy to properly seed OpenSSL's PRNG. */ #define RAND_LOAD_LENGTH 1024 #ifdef HAVE_KEYLOG_CALLBACK static void ossl_keylog_callback(const SSL *ssl, const char *line) { (void)ssl; Curl_tls_keylog_write_line(line); } #else /* * ossl_log_tls12_secret is called by libcurl to make the CLIENT_RANDOMs if the * OpenSSL being used doesn't have native support for doing that. */ static void ossl_log_tls12_secret(const SSL *ssl, bool *keylog_done) { const SSL_SESSION *session = SSL_get_session(ssl); unsigned char client_random[SSL3_RANDOM_SIZE]; unsigned char master_key[SSL_MAX_MASTER_KEY_LENGTH]; int master_key_length = 0; if(!session || *keylog_done) return; #if OPENSSL_VERSION_NUMBER >= 0x10100000L && \ !(defined(LIBRESSL_VERSION_NUMBER) && \ LIBRESSL_VERSION_NUMBER < 0x20700000L) /* ssl->s3 is not checked in openssl 1.1.0-pre6, but let's assume that * we have a valid SSL context if we have a non-NULL session. */ SSL_get_client_random(ssl, client_random, SSL3_RANDOM_SIZE); master_key_length = (int) SSL_SESSION_get_master_key(session, master_key, SSL_MAX_MASTER_KEY_LENGTH); #else if(ssl->s3 && session->master_key_length > 0) { master_key_length = session->master_key_length; memcpy(master_key, session->master_key, session->master_key_length); memcpy(client_random, ssl->s3->client_random, SSL3_RANDOM_SIZE); } #endif /* The handshake has not progressed sufficiently yet, or this is a TLS 1.3 * session (when curl was built with older OpenSSL headers and running with * newer OpenSSL runtime libraries). */ if(master_key_length <= 0) return; *keylog_done = true; Curl_tls_keylog_write("CLIENT_RANDOM", client_random, master_key, master_key_length); } #endif /* !HAVE_KEYLOG_CALLBACK */ static const char *SSL_ERROR_to_str(int err) { switch(err) { case SSL_ERROR_NONE: return "SSL_ERROR_NONE"; case SSL_ERROR_SSL: return "SSL_ERROR_SSL"; case SSL_ERROR_WANT_READ: return "SSL_ERROR_WANT_READ"; case SSL_ERROR_WANT_WRITE: return "SSL_ERROR_WANT_WRITE"; case SSL_ERROR_WANT_X509_LOOKUP: return "SSL_ERROR_WANT_X509_LOOKUP"; case SSL_ERROR_SYSCALL: return "SSL_ERROR_SYSCALL"; case SSL_ERROR_ZERO_RETURN: return "SSL_ERROR_ZERO_RETURN"; case SSL_ERROR_WANT_CONNECT: return "SSL_ERROR_WANT_CONNECT"; case SSL_ERROR_WANT_ACCEPT: return "SSL_ERROR_WANT_ACCEPT"; #if defined(SSL_ERROR_WANT_ASYNC) case SSL_ERROR_WANT_ASYNC: return "SSL_ERROR_WANT_ASYNC"; #endif #if defined(SSL_ERROR_WANT_ASYNC_JOB) case SSL_ERROR_WANT_ASYNC_JOB: return "SSL_ERROR_WANT_ASYNC_JOB"; #endif #if defined(SSL_ERROR_WANT_EARLY) case SSL_ERROR_WANT_EARLY: return "SSL_ERROR_WANT_EARLY"; #endif default: return "SSL_ERROR unknown"; } } /* Return error string for last OpenSSL error */ static char *ossl_strerror(unsigned long error, char *buf, size_t size) { if(size) *buf = '\0'; #ifdef OPENSSL_IS_BORINGSSL ERR_error_string_n((uint32_t)error, buf, size); #else ERR_error_string_n(error, buf, size); #endif if(size > 1 && !*buf) { strncpy(buf, (error ? "Unknown error" : "No error"), size); buf[size - 1] = '\0'; } return buf; } /* Return an extra data index for the transfer data. * This index can be used with SSL_get_ex_data() and SSL_set_ex_data(). */ static int ossl_get_ssl_data_index(void) { static int ssl_ex_data_data_index = -1; if(ssl_ex_data_data_index < 0) { ssl_ex_data_data_index = SSL_get_ex_new_index(0, NULL, NULL, NULL, NULL); } return ssl_ex_data_data_index; } /* Return an extra data index for the connection data. * This index can be used with SSL_get_ex_data() and SSL_set_ex_data(). */ static int ossl_get_ssl_conn_index(void) { static int ssl_ex_data_conn_index = -1; if(ssl_ex_data_conn_index < 0) { ssl_ex_data_conn_index = SSL_get_ex_new_index(0, NULL, NULL, NULL, NULL); } return ssl_ex_data_conn_index; } /* Return an extra data index for the sockindex. * This index can be used with SSL_get_ex_data() and SSL_set_ex_data(). */ static int ossl_get_ssl_sockindex_index(void) { static int sockindex_index = -1; if(sockindex_index < 0) { sockindex_index = SSL_get_ex_new_index(0, NULL, NULL, NULL, NULL); } return sockindex_index; } /* Return an extra data index for proxy boolean. * This index can be used with SSL_get_ex_data() and SSL_set_ex_data(). */ static int ossl_get_proxy_index(void) { static int proxy_index = -1; if(proxy_index < 0) { proxy_index = SSL_get_ex_new_index(0, NULL, NULL, NULL, NULL); } return proxy_index; } static int passwd_callback(char *buf, int num, int encrypting, void *global_passwd) { DEBUGASSERT(0 == encrypting); if(!encrypting) { int klen = curlx_uztosi(strlen((char *)global_passwd)); if(num > klen) { memcpy(buf, global_passwd, klen + 1); return klen; } } return 0; } /* * rand_enough() returns TRUE if we have seeded the random engine properly. */ static bool rand_enough(void) { return (0 != RAND_status()) ? TRUE : FALSE; } static CURLcode ossl_seed(struct Curl_easy *data) { /* we have the "SSL is seeded" boolean static to prevent multiple time-consuming seedings in vain */ static bool ssl_seeded = FALSE; char fname[256]; if(ssl_seeded) return CURLE_OK; if(rand_enough()) { /* OpenSSL 1.1.0+ will return here */ ssl_seeded = TRUE; return CURLE_OK; } #ifndef RANDOM_FILE /* if RANDOM_FILE isn't defined, we only perform this if an option tells us to! */ if(data->set.str[STRING_SSL_RANDOM_FILE]) #define RANDOM_FILE "" /* doesn't matter won't be used */ #endif { /* let the option override the define */ RAND_load_file((data->set.str[STRING_SSL_RANDOM_FILE]? data->set.str[STRING_SSL_RANDOM_FILE]: RANDOM_FILE), RAND_LOAD_LENGTH); if(rand_enough()) return CURLE_OK; } #if defined(HAVE_RAND_EGD) /* only available in OpenSSL 0.9.5 and later */ /* EGD_SOCKET is set at configure time or not at all */ #ifndef EGD_SOCKET /* If we don't have the define set, we only do this if the egd-option is set */ if(data->set.str[STRING_SSL_EGDSOCKET]) #define EGD_SOCKET "" /* doesn't matter won't be used */ #endif { /* If there's an option and a define, the option overrides the define */ int ret = RAND_egd(data->set.str[STRING_SSL_EGDSOCKET]? data->set.str[STRING_SSL_EGDSOCKET]:EGD_SOCKET); if(-1 != ret) { if(rand_enough()) return CURLE_OK; } } #endif /* fallback to a custom seeding of the PRNG using a hash based on a current time */ do { unsigned char randb[64]; size_t len = sizeof(randb); size_t i, i_max; for(i = 0, i_max = len / sizeof(struct curltime); i < i_max; ++i) { struct curltime tv = Curl_now(); Curl_wait_ms(1); tv.tv_sec *= i + 1; tv.tv_usec *= (unsigned int)i + 2; tv.tv_sec ^= ((Curl_now().tv_sec + Curl_now().tv_usec) * (i + 3)) << 8; tv.tv_usec ^= (unsigned int) ((Curl_now().tv_sec + Curl_now().tv_usec) * (i + 4)) << 16; memcpy(&randb[i * sizeof(struct curltime)], &tv, sizeof(struct curltime)); } RAND_add(randb, (int)len, (double)len/2); } while(!rand_enough()); /* generates a default path for the random seed file */ fname[0] = 0; /* blank it first */ RAND_file_name(fname, sizeof(fname)); if(fname[0]) { /* we got a file name to try */ RAND_load_file(fname, RAND_LOAD_LENGTH); if(rand_enough()) return CURLE_OK; } infof(data, "libcurl is now using a weak random seed!\n"); return (rand_enough() ? CURLE_OK : CURLE_SSL_CONNECT_ERROR /* confusing error code */); } #ifndef SSL_FILETYPE_ENGINE #define SSL_FILETYPE_ENGINE 42 #endif #ifndef SSL_FILETYPE_PKCS12 #define SSL_FILETYPE_PKCS12 43 #endif static int do_file_type(const char *type) { if(!type || !type[0]) return SSL_FILETYPE_PEM; if(strcasecompare(type, "PEM")) return SSL_FILETYPE_PEM; if(strcasecompare(type, "DER")) return SSL_FILETYPE_ASN1; if(strcasecompare(type, "ENG")) return SSL_FILETYPE_ENGINE; if(strcasecompare(type, "P12")) return SSL_FILETYPE_PKCS12; return -1; } #ifdef USE_OPENSSL_ENGINE /* * Supply default password to the engine user interface conversation. * The password is passed by OpenSSL engine from ENGINE_load_private_key() * last argument to the ui and can be obtained by UI_get0_user_data(ui) here. */ static int ssl_ui_reader(UI *ui, UI_STRING *uis) { const char *password; switch(UI_get_string_type(uis)) { case UIT_PROMPT: case UIT_VERIFY: password = (const char *)UI_get0_user_data(ui); if(password && (UI_get_input_flags(uis) & UI_INPUT_FLAG_DEFAULT_PWD)) { UI_set_result(ui, uis, password); return 1; } default: break; } return (UI_method_get_reader(UI_OpenSSL()))(ui, uis); } /* * Suppress interactive request for a default password if available. */ static int ssl_ui_writer(UI *ui, UI_STRING *uis) { switch(UI_get_string_type(uis)) { case UIT_PROMPT: case UIT_VERIFY: if(UI_get0_user_data(ui) && (UI_get_input_flags(uis) & UI_INPUT_FLAG_DEFAULT_PWD)) { return 1; } default: break; } return (UI_method_get_writer(UI_OpenSSL()))(ui, uis); } /* * Check if a given string is a PKCS#11 URI */ static bool is_pkcs11_uri(const char *string) { return (string && strncasecompare(string, "pkcs11:", 7)); } #endif static CURLcode ossl_set_engine(struct Curl_easy *data, const char *engine); static int SSL_CTX_use_certificate_blob(SSL_CTX *ctx, const struct curl_blob *blob, int type, const char *key_passwd) { int ret = 0; X509 *x = NULL; /* the typecast of blob->len is fine since it is guaranteed to never be larger than CURL_MAX_INPUT_LENGTH */ BIO *in = BIO_new_mem_buf(blob->data, (int)(blob->len)); if(!in) return CURLE_OUT_OF_MEMORY; if(type == SSL_FILETYPE_ASN1) { /* j = ERR_R_ASN1_LIB; */ x = d2i_X509_bio(in, NULL); } else if(type == SSL_FILETYPE_PEM) { /* ERR_R_PEM_LIB; */ x = PEM_read_bio_X509(in, NULL, passwd_callback, (void *)key_passwd); } else { ret = 0; goto end; } if(!x) { ret = 0; goto end; } ret = SSL_CTX_use_certificate(ctx, x); end: X509_free(x); BIO_free(in); return ret; } static int SSL_CTX_use_PrivateKey_blob(SSL_CTX *ctx, const struct curl_blob *blob, int type, const char *key_passwd) { int ret = 0; EVP_PKEY *pkey = NULL; BIO *in = BIO_new_mem_buf(blob->data, (int)(blob->len)); if(!in) return CURLE_OUT_OF_MEMORY; if(type == SSL_FILETYPE_PEM) pkey = PEM_read_bio_PrivateKey(in, NULL, passwd_callback, (void *)key_passwd); else if(type == SSL_FILETYPE_ASN1) pkey = d2i_PrivateKey_bio(in, NULL); else { ret = 0; goto end; } if(!pkey) { ret = 0; goto end; } ret = SSL_CTX_use_PrivateKey(ctx, pkey); EVP_PKEY_free(pkey); end: BIO_free(in); return ret; } static int SSL_CTX_use_certificate_chain_blob(SSL_CTX *ctx, const struct curl_blob *blob, const char *key_passwd) { /* SSL_CTX_add1_chain_cert introduced in OpenSSL 1.0.2 */ #if (OPENSSL_VERSION_NUMBER >= 0x1000200fL) && /* OpenSSL 1.0.2 or later */ \ !(defined(LIBRESSL_VERSION_NUMBER) && \ (LIBRESSL_VERSION_NUMBER < 0x2090100fL)) /* LibreSSL 2.9.1 or later */ int ret = 0; X509 *x = NULL; void *passwd_callback_userdata = (void *)key_passwd; BIO *in = BIO_new_mem_buf(blob->data, (int)(blob->len)); if(!in) return CURLE_OUT_OF_MEMORY; ERR_clear_error(); x = PEM_read_bio_X509_AUX(in, NULL, passwd_callback, (void *)key_passwd); if(!x) { ret = 0; goto end; } ret = SSL_CTX_use_certificate(ctx, x); if(ERR_peek_error() != 0) ret = 0; if(ret) { X509 *ca; unsigned long err; if(!SSL_CTX_clear_chain_certs(ctx)) { ret = 0; goto end; } while((ca = PEM_read_bio_X509(in, NULL, passwd_callback, passwd_callback_userdata)) != NULL) { if(!SSL_CTX_add0_chain_cert(ctx, ca)) { X509_free(ca); ret = 0; goto end; } } err = ERR_peek_last_error(); if((ERR_GET_LIB(err) == ERR_LIB_PEM) && (ERR_GET_REASON(err) == PEM_R_NO_START_LINE)) ERR_clear_error(); else ret = 0; } end: X509_free(x); BIO_free(in); return ret; #else (void)ctx; /* unused */ (void)blob; /* unused */ (void)key_passwd; /* unused */ return 0; #endif } static int cert_stuff(struct Curl_easy *data, SSL_CTX* ctx, char *cert_file, const struct curl_blob *cert_blob, const char *cert_type, char *key_file, const struct curl_blob *key_blob, const char *key_type, char *key_passwd) { char error_buffer[256]; bool check_privkey = TRUE; int file_type = do_file_type(cert_type); if(cert_file || cert_blob || (file_type == SSL_FILETYPE_ENGINE)) { SSL *ssl; X509 *x509; int cert_done = 0; int cert_use_result; if(key_passwd) { /* set the password in the callback userdata */ SSL_CTX_set_default_passwd_cb_userdata(ctx, key_passwd); /* Set passwd callback: */ SSL_CTX_set_default_passwd_cb(ctx, passwd_callback); } switch(file_type) { case SSL_FILETYPE_PEM: /* SSL_CTX_use_certificate_chain_file() only works on PEM files */ cert_use_result = cert_blob ? SSL_CTX_use_certificate_chain_blob(ctx, cert_blob, key_passwd) : SSL_CTX_use_certificate_chain_file(ctx, cert_file); if(cert_use_result != 1) { failf(data, "could not load PEM client certificate, " OSSL_PACKAGE " error %s, " "(no key found, wrong pass phrase, or wrong file format?)", ossl_strerror(ERR_get_error(), error_buffer, sizeof(error_buffer)) ); return 0; } break; case SSL_FILETYPE_ASN1: /* SSL_CTX_use_certificate_file() works with either PEM or ASN1, but we use the case above for PEM so this can only be performed with ASN1 files. */ cert_use_result = cert_blob ? SSL_CTX_use_certificate_blob(ctx, cert_blob, file_type, key_passwd) : SSL_CTX_use_certificate_file(ctx, cert_file, file_type); if(cert_use_result != 1) { failf(data, "could not load ASN1 client certificate, " OSSL_PACKAGE " error %s, " "(no key found, wrong pass phrase, or wrong file format?)", ossl_strerror(ERR_get_error(), error_buffer, sizeof(error_buffer)) ); return 0; } break; case SSL_FILETYPE_ENGINE: #if defined(USE_OPENSSL_ENGINE) && defined(ENGINE_CTRL_GET_CMD_FROM_NAME) { /* Implicitly use pkcs11 engine if none was provided and the * cert_file is a PKCS#11 URI */ if(!data->state.engine) { if(is_pkcs11_uri(cert_file)) { if(ossl_set_engine(data, "pkcs11") != CURLE_OK) { return 0; } } } if(data->state.engine) { const char *cmd_name = "LOAD_CERT_CTRL"; struct { const char *cert_id; X509 *cert; } params; params.cert_id = cert_file; params.cert = NULL; /* Does the engine supports LOAD_CERT_CTRL ? */ if(!ENGINE_ctrl(data->state.engine, ENGINE_CTRL_GET_CMD_FROM_NAME, 0, (void *)cmd_name, NULL)) { failf(data, "ssl engine does not support loading certificates"); return 0; } /* Load the certificate from the engine */ if(!ENGINE_ctrl_cmd(data->state.engine, cmd_name, 0, &params, NULL, 1)) { failf(data, "ssl engine cannot load client cert with id" " '%s' [%s]", cert_file, ossl_strerror(ERR_get_error(), error_buffer, sizeof(error_buffer))); return 0; } if(!params.cert) { failf(data, "ssl engine didn't initialized the certificate " "properly."); return 0; } if(SSL_CTX_use_certificate(ctx, params.cert) != 1) { failf(data, "unable to set client certificate"); X509_free(params.cert); return 0; } X509_free(params.cert); /* we don't need the handle any more... */ } else { failf(data, "crypto engine not set, can't load certificate"); return 0; } } break; #else failf(data, "file type ENG for certificate not implemented"); return 0; #endif case SSL_FILETYPE_PKCS12: { BIO *cert_bio = NULL; PKCS12 *p12 = NULL; EVP_PKEY *pri; STACK_OF(X509) *ca = NULL; if(cert_blob) { cert_bio = BIO_new_mem_buf(cert_blob->data, (int)(cert_blob->len)); if(!cert_bio) { failf(data, "BIO_new_mem_buf NULL, " OSSL_PACKAGE " error %s", ossl_strerror(ERR_get_error(), error_buffer, sizeof(error_buffer)) ); return 0; } } else { cert_bio = BIO_new(BIO_s_file()); if(!cert_bio) { failf(data, "BIO_new return NULL, " OSSL_PACKAGE " error %s", ossl_strerror(ERR_get_error(), error_buffer, sizeof(error_buffer)) ); return 0; } if(BIO_read_filename(cert_bio, cert_file) <= 0) { failf(data, "could not open PKCS12 file '%s'", cert_file); BIO_free(cert_bio); return 0; } } p12 = d2i_PKCS12_bio(cert_bio, NULL); BIO_free(cert_bio); if(!p12) { failf(data, "error reading PKCS12 file '%s'", cert_blob ? "(memory blob)" : cert_file); return 0; } PKCS12_PBE_add(); if(!PKCS12_parse(p12, key_passwd, &pri, &x509, &ca)) { failf(data, "could not parse PKCS12 file, check password, " OSSL_PACKAGE " error %s", ossl_strerror(ERR_get_error(), error_buffer, sizeof(error_buffer)) ); PKCS12_free(p12); return 0; } PKCS12_free(p12); if(SSL_CTX_use_certificate(ctx, x509) != 1) { failf(data, "could not load PKCS12 client certificate, " OSSL_PACKAGE " error %s", ossl_strerror(ERR_get_error(), error_buffer, sizeof(error_buffer)) ); goto fail; } if(SSL_CTX_use_PrivateKey(ctx, pri) != 1) { failf(data, "unable to use private key from PKCS12 file '%s'", cert_file); goto fail; } if(!SSL_CTX_check_private_key (ctx)) { failf(data, "private key from PKCS12 file '%s' " "does not match certificate in same file", cert_file); goto fail; } /* Set Certificate Verification chain */ if(ca) { while(sk_X509_num(ca)) { /* * Note that sk_X509_pop() is used below to make sure the cert is * removed from the stack properly before getting passed to * SSL_CTX_add_extra_chain_cert(), which takes ownership. Previously * we used sk_X509_value() instead, but then we'd clean it in the * subsequent sk_X509_pop_free() call. */ X509 *x = sk_X509_pop(ca); if(!SSL_CTX_add_client_CA(ctx, x)) { X509_free(x); failf(data, "cannot add certificate to client CA list"); goto fail; } if(!SSL_CTX_add_extra_chain_cert(ctx, x)) { X509_free(x); failf(data, "cannot add certificate to certificate chain"); goto fail; } } } cert_done = 1; fail: EVP_PKEY_free(pri); X509_free(x509); #ifdef USE_AMISSL sk_X509_pop_free(ca, Curl_amiga_X509_free); #else sk_X509_pop_free(ca, X509_free); #endif if(!cert_done) return 0; /* failure! */ break; } default: failf(data, "not supported file type '%s' for certificate", cert_type); return 0; } if((!key_file) && (!key_blob)) { key_file = cert_file; key_blob = cert_blob; } else file_type = do_file_type(key_type); switch(file_type) { case SSL_FILETYPE_PEM: if(cert_done) break; /* FALLTHROUGH */ case SSL_FILETYPE_ASN1: cert_use_result = key_blob ? SSL_CTX_use_PrivateKey_blob(ctx, key_blob, file_type, key_passwd) : SSL_CTX_use_PrivateKey_file(ctx, key_file, file_type); if(cert_use_result != 1) { failf(data, "unable to set private key file: '%s' type %s", key_file?key_file:"(memory blob)", key_type?key_type:"PEM"); return 0; } break; case SSL_FILETYPE_ENGINE: #ifdef USE_OPENSSL_ENGINE { /* XXXX still needs some work */ EVP_PKEY *priv_key = NULL; /* Implicitly use pkcs11 engine if none was provided and the * key_file is a PKCS#11 URI */ if(!data->state.engine) { if(is_pkcs11_uri(key_file)) { if(ossl_set_engine(data, "pkcs11") != CURLE_OK) { return 0; } } } if(data->state.engine) { UI_METHOD *ui_method = UI_create_method((char *)"curl user interface"); if(!ui_method) { failf(data, "unable do create " OSSL_PACKAGE " user-interface method"); return 0; } UI_method_set_opener(ui_method, UI_method_get_opener(UI_OpenSSL())); UI_method_set_closer(ui_method, UI_method_get_closer(UI_OpenSSL())); UI_method_set_reader(ui_method, ssl_ui_reader); UI_method_set_writer(ui_method, ssl_ui_writer); /* the typecast below was added to please mingw32 */ priv_key = (EVP_PKEY *) ENGINE_load_private_key(data->state.engine, key_file, ui_method, key_passwd); UI_destroy_method(ui_method); if(!priv_key) { failf(data, "failed to load private key from crypto engine"); return 0; } if(SSL_CTX_use_PrivateKey(ctx, priv_key) != 1) { failf(data, "unable to set private key"); EVP_PKEY_free(priv_key); return 0; } EVP_PKEY_free(priv_key); /* we don't need the handle any more... */ } else { failf(data, "crypto engine not set, can't load private key"); return 0; } } break; #else failf(data, "file type ENG for private key not supported"); return 0; #endif case SSL_FILETYPE_PKCS12: if(!cert_done) { failf(data, "file type P12 for private key not supported"); return 0; } break; default: failf(data, "not supported file type for private key"); return 0; } ssl = SSL_new(ctx); if(!ssl) { failf(data, "unable to create an SSL structure"); return 0; } x509 = SSL_get_certificate(ssl); /* This version was provided by Evan Jordan and is supposed to not leak memory as the previous version: */ if(x509) { EVP_PKEY *pktmp = X509_get_pubkey(x509); EVP_PKEY_copy_parameters(pktmp, SSL_get_privatekey(ssl)); EVP_PKEY_free(pktmp); } #if !defined(OPENSSL_NO_RSA) && !defined(OPENSSL_IS_BORINGSSL) { /* If RSA is used, don't check the private key if its flags indicate * it doesn't support it. */ EVP_PKEY *priv_key = SSL_get_privatekey(ssl); int pktype; #ifdef HAVE_OPAQUE_EVP_PKEY pktype = EVP_PKEY_id(priv_key); #else pktype = priv_key->type; #endif if(pktype == EVP_PKEY_RSA) { RSA *rsa = EVP_PKEY_get1_RSA(priv_key); if(RSA_flags(rsa) & RSA_METHOD_FLAG_NO_CHECK) check_privkey = FALSE; RSA_free(rsa); /* Decrement reference count */ } } #endif SSL_free(ssl); /* If we are using DSA, we can copy the parameters from * the private key */ if(check_privkey == TRUE) { /* Now we know that a key and cert have been set against * the SSL context */ if(!SSL_CTX_check_private_key(ctx)) { failf(data, "Private key does not match the certificate public key"); return 0; } } } return 1; } /* returns non-zero on failure */ static int x509_name_oneline(X509_NAME *a, char *buf, size_t size) { BIO *bio_out = BIO_new(BIO_s_mem()); BUF_MEM *biomem; int rc; if(!bio_out) return 1; /* alloc failed! */ rc = X509_NAME_print_ex(bio_out, a, 0, XN_FLAG_SEP_SPLUS_SPC); BIO_get_mem_ptr(bio_out, &biomem); if((size_t)biomem->length < size) size = biomem->length; else size--; /* don't overwrite the buffer end */ memcpy(buf, biomem->data, size); buf[size] = 0; BIO_free(bio_out); return !rc; } /** * Global SSL init * * @retval 0 error initializing SSL * @retval 1 SSL initialized successfully */ static int ossl_init(void) { #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) && \ !defined(LIBRESSL_VERSION_NUMBER) const uint64_t flags = #ifdef OPENSSL_INIT_ENGINE_ALL_BUILTIN /* not present in BoringSSL */ OPENSSL_INIT_ENGINE_ALL_BUILTIN | #endif #ifdef CURL_DISABLE_OPENSSL_AUTO_LOAD_CONFIG OPENSSL_INIT_NO_LOAD_CONFIG | #else OPENSSL_INIT_LOAD_CONFIG | #endif 0; OPENSSL_init_ssl(flags, NULL); #else OPENSSL_load_builtin_modules(); #ifdef USE_OPENSSL_ENGINE ENGINE_load_builtin_engines(); #endif /* CONF_MFLAGS_DEFAULT_SECTION was introduced some time between 0.9.8b and 0.9.8e */ #ifndef CONF_MFLAGS_DEFAULT_SECTION #define CONF_MFLAGS_DEFAULT_SECTION 0x0 #endif #ifndef CURL_DISABLE_OPENSSL_AUTO_LOAD_CONFIG CONF_modules_load_file(NULL, NULL, CONF_MFLAGS_DEFAULT_SECTION| CONF_MFLAGS_IGNORE_MISSING_FILE); #endif /* Lets get nice error messages */ SSL_load_error_strings(); /* Init the global ciphers and digests */ if(!SSLeay_add_ssl_algorithms()) return 0; OpenSSL_add_all_algorithms(); #endif Curl_tls_keylog_open(); /* Initialize the extra data indexes */ if(ossl_get_ssl_data_index() < 0 || ossl_get_ssl_conn_index() < 0 || ossl_get_ssl_sockindex_index() < 0 || ossl_get_proxy_index() < 0) return 0; return 1; } /* Global cleanup */ static void ossl_cleanup(void) { #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) && \ !defined(LIBRESSL_VERSION_NUMBER) /* OpenSSL 1.1 deprecates all these cleanup functions and turns them into no-ops in OpenSSL 1.0 compatibility mode */ #else /* Free ciphers and digests lists */ EVP_cleanup(); #ifdef USE_OPENSSL_ENGINE /* Free engine list */ ENGINE_cleanup(); #endif /* Free OpenSSL error strings */ ERR_free_strings(); /* Free thread local error state, destroying hash upon zero refcount */ #ifdef HAVE_ERR_REMOVE_THREAD_STATE ERR_remove_thread_state(NULL); #else ERR_remove_state(0); #endif /* Free all memory allocated by all configuration modules */ CONF_modules_free(); #ifdef HAVE_SSL_COMP_FREE_COMPRESSION_METHODS SSL_COMP_free_compression_methods(); #endif #endif Curl_tls_keylog_close(); } /* * This function is used to determine connection status. * * Return codes: * 1 means the connection is still in place * 0 means the connection has been closed * -1 means the connection status is unknown */ static int ossl_check_cxn(struct connectdata *conn) { /* SSL_peek takes data out of the raw recv buffer without peeking so we use recv MSG_PEEK instead. Bug #795 */ #ifdef MSG_PEEK char buf; ssize_t nread; nread = recv((RECV_TYPE_ARG1)conn->sock[FIRSTSOCKET], (RECV_TYPE_ARG2)&buf, (RECV_TYPE_ARG3)1, (RECV_TYPE_ARG4)MSG_PEEK); if(nread == 0) return 0; /* connection has been closed */ if(nread == 1) return 1; /* connection still in place */ else if(nread == -1) { int err = SOCKERRNO; if(err == EINPROGRESS || #if defined(EAGAIN) && (EAGAIN != EWOULDBLOCK) err == EAGAIN || #endif err == EWOULDBLOCK) return 1; /* connection still in place */ if(err == ECONNRESET || #ifdef ECONNABORTED err == ECONNABORTED || #endif #ifdef ENETDOWN err == ENETDOWN || #endif #ifdef ENETRESET err == ENETRESET || #endif #ifdef ESHUTDOWN err == ESHUTDOWN || #endif #ifdef ETIMEDOUT err == ETIMEDOUT || #endif err == ENOTCONN) return 0; /* connection has been closed */ } #endif return -1; /* connection status unknown */ } /* Selects an OpenSSL crypto engine */ static CURLcode ossl_set_engine(struct Curl_easy *data, const char *engine) { #ifdef USE_OPENSSL_ENGINE ENGINE *e; #if OPENSSL_VERSION_NUMBER >= 0x00909000L e = ENGINE_by_id(engine); #else /* avoid memory leak */ for(e = ENGINE_get_first(); e; e = ENGINE_get_next(e)) { const char *e_id = ENGINE_get_id(e); if(!strcmp(engine, e_id)) break; } #endif if(!e) { failf(data, "SSL Engine '%s' not found", engine); return CURLE_SSL_ENGINE_NOTFOUND; } if(data->state.engine) { ENGINE_finish(data->state.engine); ENGINE_free(data->state.engine); data->state.engine = NULL; } if(!ENGINE_init(e)) { char buf[256]; ENGINE_free(e); failf(data, "Failed to initialise SSL Engine '%s': %s", engine, ossl_strerror(ERR_get_error(), buf, sizeof(buf))); return CURLE_SSL_ENGINE_INITFAILED; } data->state.engine = e; return CURLE_OK; #else (void)engine; failf(data, "SSL Engine not supported"); return CURLE_SSL_ENGINE_NOTFOUND; #endif } /* Sets engine as default for all SSL operations */ static CURLcode ossl_set_engine_default(struct Curl_easy *data) { #ifdef USE_OPENSSL_ENGINE if(data->state.engine) { if(ENGINE_set_default(data->state.engine, ENGINE_METHOD_ALL) > 0) { infof(data, "set default crypto engine '%s'\n", ENGINE_get_id(data->state.engine)); } else { failf(data, "set default crypto engine '%s' failed", ENGINE_get_id(data->state.engine)); return CURLE_SSL_ENGINE_SETFAILED; } } #else (void) data; #endif return CURLE_OK; } /* Return list of OpenSSL crypto engine names. */ static struct curl_slist *ossl_engines_list(struct Curl_easy *data) { struct curl_slist *list = NULL; #ifdef USE_OPENSSL_ENGINE struct curl_slist *beg; ENGINE *e; for(e = ENGINE_get_first(); e; e = ENGINE_get_next(e)) { beg = curl_slist_append(list, ENGINE_get_id(e)); if(!beg) { curl_slist_free_all(list); return NULL; } list = beg; } #endif (void) data; return list; } #define set_logger(conn, data) \ conn->ssl[0].backend->logger = data static void ossl_closeone(struct Curl_easy *data, struct connectdata *conn, struct ssl_connect_data *connssl) { struct ssl_backend_data *backend = connssl->backend; if(backend->handle) { set_logger(conn, data); (void)SSL_shutdown(backend->handle); SSL_set_connect_state(backend->handle); SSL_free(backend->handle); backend->handle = NULL; } if(backend->ctx) { SSL_CTX_free(backend->ctx); backend->ctx = NULL; } } /* * This function is called when an SSL connection is closed. */ static void ossl_close(struct Curl_easy *data, struct connectdata *conn, int sockindex) { ossl_closeone(data, conn, &conn->ssl[sockindex]); #ifndef CURL_DISABLE_PROXY ossl_closeone(data, conn, &conn->proxy_ssl[sockindex]); #endif } /* * This function is called to shut down the SSL layer but keep the * socket open (CCC - Clear Command Channel) */ static int ossl_shutdown(struct Curl_easy *data, struct connectdata *conn, int sockindex) { int retval = 0; struct ssl_connect_data *connssl = &conn->ssl[sockindex]; char buf[256]; /* We will use this for the OpenSSL error buffer, so it has to be at least 256 bytes long. */ unsigned long sslerror; ssize_t nread; int buffsize; int err; bool done = FALSE; struct ssl_backend_data *backend = connssl->backend; #ifndef CURL_DISABLE_FTP /* This has only been tested on the proftpd server, and the mod_tls code sends a close notify alert without waiting for a close notify alert in response. Thus we wait for a close notify alert from the server, but we do not send one. Let's hope other servers do the same... */ if(data->set.ftp_ccc == CURLFTPSSL_CCC_ACTIVE) (void)SSL_shutdown(backend->handle); #endif if(backend->handle) { buffsize = (int)sizeof(buf); while(!done) { int what = SOCKET_READABLE(conn->sock[sockindex], SSL_SHUTDOWN_TIMEOUT); if(what > 0) { ERR_clear_error(); /* Something to read, let's do it and hope that it is the close notify alert from the server */ nread = (ssize_t)SSL_read(backend->handle, buf, buffsize); err = SSL_get_error(backend->handle, (int)nread); switch(err) { case SSL_ERROR_NONE: /* this is not an error */ case SSL_ERROR_ZERO_RETURN: /* no more data */ /* This is the expected response. There was no data but only the close notify alert */ done = TRUE; break; case SSL_ERROR_WANT_READ: /* there's data pending, re-invoke SSL_read() */ infof(data, "SSL_ERROR_WANT_READ\n"); break; case SSL_ERROR_WANT_WRITE: /* SSL wants a write. Really odd. Let's bail out. */ infof(data, "SSL_ERROR_WANT_WRITE\n"); done = TRUE; break; default: /* openssl/ssl.h says "look at error stack/return value/errno" */ sslerror = ERR_get_error(); failf(data, OSSL_PACKAGE " SSL_read on shutdown: %s, errno %d", (sslerror ? ossl_strerror(sslerror, buf, sizeof(buf)) : SSL_ERROR_to_str(err)), SOCKERRNO); done = TRUE; break; } } else if(0 == what) { /* timeout */ failf(data, "SSL shutdown timeout"); done = TRUE; } else { /* anything that gets here is fatally bad */ failf(data, "select/poll on SSL socket, errno: %d", SOCKERRNO); retval = -1; done = TRUE; } } /* while()-loop for the select() */ if(data->set.verbose) { #ifdef HAVE_SSL_GET_SHUTDOWN switch(SSL_get_shutdown(backend->handle)) { case SSL_SENT_SHUTDOWN: infof(data, "SSL_get_shutdown() returned SSL_SENT_SHUTDOWN\n"); break; case SSL_RECEIVED_SHUTDOWN: infof(data, "SSL_get_shutdown() returned SSL_RECEIVED_SHUTDOWN\n"); break; case SSL_SENT_SHUTDOWN|SSL_RECEIVED_SHUTDOWN: infof(data, "SSL_get_shutdown() returned SSL_SENT_SHUTDOWN|" "SSL_RECEIVED__SHUTDOWN\n"); break; } #endif } SSL_free(backend->handle); backend->handle = NULL; } return retval; } static void ossl_session_free(void *ptr) { /* free the ID */ SSL_SESSION_free(ptr); } /* * This function is called when the 'data' struct is going away. Close * down everything and free all resources! */ static void ossl_close_all(struct Curl_easy *data) { #ifdef USE_OPENSSL_ENGINE if(data->state.engine) { ENGINE_finish(data->state.engine); ENGINE_free(data->state.engine); data->state.engine = NULL; } #else (void)data; #endif #if !defined(HAVE_ERR_REMOVE_THREAD_STATE_DEPRECATED) && \ defined(HAVE_ERR_REMOVE_THREAD_STATE) /* OpenSSL 1.0.1 and 1.0.2 build an error queue that is stored per-thread so we need to clean it here in case the thread will be killed. All OpenSSL code should extract the error in association with the error so clearing this queue here should be harmless at worst. */ ERR_remove_thread_state(NULL); #endif } /* ====================================================== */ /* * Match subjectAltName against the host name. This requires a conversion * in CURL_DOES_CONVERSIONS builds. */ static bool subj_alt_hostcheck(struct Curl_easy *data, const char *match_pattern, const char *hostname, const char *dispname) #ifdef CURL_DOES_CONVERSIONS { bool res = FALSE; /* Curl_cert_hostcheck uses host encoding, but we get ASCII from OpenSSl. */ char *match_pattern2 = strdup(match_pattern); if(match_pattern2) { if(Curl_convert_from_network(data, match_pattern2, strlen(match_pattern2)) == CURLE_OK) { if(Curl_cert_hostcheck(match_pattern2, hostname)) { res = TRUE; infof(data, " subjectAltName: host \"%s\" matched cert's \"%s\"\n", dispname, match_pattern2); } } free(match_pattern2); } else { failf(data, "SSL: out of memory when allocating temporary for subjectAltName"); } return res; } #else { #ifdef CURL_DISABLE_VERBOSE_STRINGS (void)dispname; (void)data; #endif if(Curl_cert_hostcheck(match_pattern, hostname)) { infof(data, " subjectAltName: host \"%s\" matched cert's \"%s\"\n", dispname, match_pattern); return TRUE; } return FALSE; } #endif /* Quote from RFC2818 section 3.1 "Server Identity" If a subjectAltName extension of type dNSName is present, that MUST be used as the identity. Otherwise, the (most specific) Common Name field in the Subject field of the certificate MUST be used. Although the use of the Common Name is existing practice, it is deprecated and Certification Authorities are encouraged to use the dNSName instead. Matching is performed using the matching rules specified by [RFC2459]. If more than one identity of a given type is present in the certificate (e.g., more than one dNSName name, a match in any one of the set is considered acceptable.) Names may contain the wildcard character * which is considered to match any single domain name component or component fragment. E.g., *.a.com matches foo.a.com but not bar.foo.a.com. f*.com matches foo.com but not bar.com. In some cases, the URI is specified as an IP address rather than a hostname. In this case, the iPAddress subjectAltName must be present in the certificate and must exactly match the IP in the URI. */ static CURLcode verifyhost(struct Curl_easy *data, struct connectdata *conn, X509 *server_cert) { bool matched = FALSE; int target = GEN_DNS; /* target type, GEN_DNS or GEN_IPADD */ size_t addrlen = 0; STACK_OF(GENERAL_NAME) *altnames; #ifdef ENABLE_IPV6 struct in6_addr addr; #else struct in_addr addr; #endif CURLcode result = CURLE_OK; bool dNSName = FALSE; /* if a dNSName field exists in the cert */ bool iPAddress = FALSE; /* if a iPAddress field exists in the cert */ const char * const hostname = SSL_HOST_NAME(); const char * const dispname = SSL_HOST_DISPNAME(); #ifdef ENABLE_IPV6 if(conn->bits.ipv6_ip && Curl_inet_pton(AF_INET6, hostname, &addr)) { target = GEN_IPADD; addrlen = sizeof(struct in6_addr); } else #endif if(Curl_inet_pton(AF_INET, hostname, &addr)) { target = GEN_IPADD; addrlen = sizeof(struct in_addr); } /* get a "list" of alternative names */ altnames = X509_get_ext_d2i(server_cert, NID_subject_alt_name, NULL, NULL); if(altnames) { #ifdef OPENSSL_IS_BORINGSSL size_t numalts; size_t i; #else int numalts; int i; #endif bool dnsmatched = FALSE; bool ipmatched = FALSE; /* get amount of alternatives, RFC2459 claims there MUST be at least one, but we don't depend on it... */ numalts = sk_GENERAL_NAME_num(altnames); /* loop through all alternatives - until a dnsmatch */ for(i = 0; (i < numalts) && !dnsmatched; i++) { /* get a handle to alternative name number i */ const GENERAL_NAME *check = sk_GENERAL_NAME_value(altnames, i); if(check->type == GEN_DNS) dNSName = TRUE; else if(check->type == GEN_IPADD) iPAddress = TRUE; /* only check alternatives of the same type the target is */ if(check->type == target) { /* get data and length */ const char *altptr = (char *)ASN1_STRING_get0_data(check->d.ia5); size_t altlen = (size_t) ASN1_STRING_length(check->d.ia5); switch(target) { case GEN_DNS: /* name/pattern comparison */ /* The OpenSSL man page explicitly says: "In general it cannot be assumed that the data returned by ASN1_STRING_data() is null terminated or does not contain embedded nulls." But also that "The actual format of the data will depend on the actual string type itself: for example for an IA5String the data will be ASCII" It has been however verified that in 0.9.6 and 0.9.7, IA5String is always null-terminated. */ if((altlen == strlen(altptr)) && /* if this isn't true, there was an embedded zero in the name string and we cannot match it. */ subj_alt_hostcheck(data, altptr, hostname, dispname)) { dnsmatched = TRUE; } break; case GEN_IPADD: /* IP address comparison */ /* compare alternative IP address if the data chunk is the same size our server IP address is */ if((altlen == addrlen) && !memcmp(altptr, &addr, altlen)) { ipmatched = TRUE; infof(data, " subjectAltName: host \"%s\" matched cert's IP address!\n", dispname); } break; } } } GENERAL_NAMES_free(altnames); if(dnsmatched || ipmatched) matched = TRUE; } if(matched) /* an alternative name matched */ ; else if(dNSName || iPAddress) { infof(data, " subjectAltName does not match %s\n", dispname); failf(data, "SSL: no alternative certificate subject name matches " "target host name '%s'", dispname); result = CURLE_PEER_FAILED_VERIFICATION; } else { /* we have to look to the last occurrence of a commonName in the distinguished one to get the most significant one. */ int j, i = -1; /* The following is done because of a bug in 0.9.6b */ unsigned char *nulstr = (unsigned char *)""; unsigned char *peer_CN = nulstr; X509_NAME *name = X509_get_subject_name(server_cert); if(name) while((j = X509_NAME_get_index_by_NID(name, NID_commonName, i)) >= 0) i = j; /* we have the name entry and we will now convert this to a string that we can use for comparison. Doing this we support BMPstring, UTF8 etc. */ if(i >= 0) { ASN1_STRING *tmp = X509_NAME_ENTRY_get_data(X509_NAME_get_entry(name, i)); /* In OpenSSL 0.9.7d and earlier, ASN1_STRING_to_UTF8 fails if the input is already UTF-8 encoded. We check for this case and copy the raw string manually to avoid the problem. This code can be made conditional in the future when OpenSSL has been fixed. */ if(tmp) { if(ASN1_STRING_type(tmp) == V_ASN1_UTF8STRING) { j = ASN1_STRING_length(tmp); if(j >= 0) { peer_CN = OPENSSL_malloc(j + 1); if(peer_CN) { memcpy(peer_CN, ASN1_STRING_get0_data(tmp), j); peer_CN[j] = '\0'; } } } else /* not a UTF8 name */ j = ASN1_STRING_to_UTF8(&peer_CN, tmp); if(peer_CN && (curlx_uztosi(strlen((char *)peer_CN)) != j)) { /* there was a terminating zero before the end of string, this cannot match and we return failure! */ failf(data, "SSL: illegal cert name field"); result = CURLE_PEER_FAILED_VERIFICATION; } } } if(peer_CN == nulstr) peer_CN = NULL; else { /* convert peer_CN from UTF8 */ CURLcode rc = Curl_convert_from_utf8(data, (char *)peer_CN, strlen((char *)peer_CN)); /* Curl_convert_from_utf8 calls failf if unsuccessful */ if(rc) { OPENSSL_free(peer_CN); return rc; } } if(result) /* error already detected, pass through */ ; else if(!peer_CN) { failf(data, "SSL: unable to obtain common name from peer certificate"); result = CURLE_PEER_FAILED_VERIFICATION; } else if(!Curl_cert_hostcheck((const char *)peer_CN, hostname)) { failf(data, "SSL: certificate subject name '%s' does not match " "target host name '%s'", peer_CN, dispname); result = CURLE_PEER_FAILED_VERIFICATION; } else { infof(data, " common name: %s (matched)\n", peer_CN); } if(peer_CN) OPENSSL_free(peer_CN); } return result; } #if (OPENSSL_VERSION_NUMBER >= 0x0090808fL) && !defined(OPENSSL_NO_TLSEXT) && \ !defined(OPENSSL_NO_OCSP) static CURLcode verifystatus(struct Curl_easy *data, struct ssl_connect_data *connssl) { int i, ocsp_status; unsigned char *status; const unsigned char *p; CURLcode result = CURLE_OK; OCSP_RESPONSE *rsp = NULL; OCSP_BASICRESP *br = NULL; X509_STORE *st = NULL; STACK_OF(X509) *ch = NULL; struct ssl_backend_data *backend = connssl->backend; X509 *cert; OCSP_CERTID *id = NULL; int cert_status, crl_reason; ASN1_GENERALIZEDTIME *rev, *thisupd, *nextupd; int ret; long len = SSL_get_tlsext_status_ocsp_resp(backend->handle, &status); if(!status) { failf(data, "No OCSP response received"); result = CURLE_SSL_INVALIDCERTSTATUS; goto end; } p = status; rsp = d2i_OCSP_RESPONSE(NULL, &p, len); if(!rsp) { failf(data, "Invalid OCSP response"); result = CURLE_SSL_INVALIDCERTSTATUS; goto end; } ocsp_status = OCSP_response_status(rsp); if(ocsp_status != OCSP_RESPONSE_STATUS_SUCCESSFUL) { failf(data, "Invalid OCSP response status: %s (%d)", OCSP_response_status_str(ocsp_status), ocsp_status); result = CURLE_SSL_INVALIDCERTSTATUS; goto end; } br = OCSP_response_get1_basic(rsp); if(!br) { failf(data, "Invalid OCSP response"); result = CURLE_SSL_INVALIDCERTSTATUS; goto end; } ch = SSL_get_peer_cert_chain(backend->handle); st = SSL_CTX_get_cert_store(backend->ctx); #if ((OPENSSL_VERSION_NUMBER <= 0x1000201fL) /* Fixed after 1.0.2a */ || \ (defined(LIBRESSL_VERSION_NUMBER) && \ LIBRESSL_VERSION_NUMBER <= 0x2040200fL)) /* The authorized responder cert in the OCSP response MUST be signed by the peer cert's issuer (see RFC6960 section 4.2.2.2). If that's a root cert, no problem, but if it's an intermediate cert OpenSSL has a bug where it expects this issuer to be present in the chain embedded in the OCSP response. So we add it if necessary. */ /* First make sure the peer cert chain includes both a peer and an issuer, and the OCSP response contains a responder cert. */ if(sk_X509_num(ch) >= 2 && sk_X509_num(br->certs) >= 1) { X509 *responder = sk_X509_value(br->certs, sk_X509_num(br->certs) - 1); /* Find issuer of responder cert and add it to the OCSP response chain */ for(i = 0; i < sk_X509_num(ch); i++) { X509 *issuer = sk_X509_value(ch, i); if(X509_check_issued(issuer, responder) == X509_V_OK) { if(!OCSP_basic_add1_cert(br, issuer)) { failf(data, "Could not add issuer cert to OCSP response"); result = CURLE_SSL_INVALIDCERTSTATUS; goto end; } } } } #endif if(OCSP_basic_verify(br, ch, st, 0) <= 0) { failf(data, "OCSP response verification failed"); result = CURLE_SSL_INVALIDCERTSTATUS; goto end; } /* Compute the certificate's ID */ cert = SSL_get_peer_certificate(backend->handle); if(!cert) { failf(data, "Error getting peer certificate"); result = CURLE_SSL_INVALIDCERTSTATUS; goto end; } for(i = 0; i < sk_X509_num(ch); i++) { X509 *issuer = sk_X509_value(ch, i); if(X509_check_issued(issuer, cert) == X509_V_OK) { id = OCSP_cert_to_id(EVP_sha1(), cert, issuer); break; } } X509_free(cert); if(!id) { failf(data, "Error computing OCSP ID"); result = CURLE_SSL_INVALIDCERTSTATUS; goto end; } /* Find the single OCSP response corresponding to the certificate ID */ ret = OCSP_resp_find_status(br, id, &cert_status, &crl_reason, &rev, &thisupd, &nextupd); OCSP_CERTID_free(id); if(ret != 1) { failf(data, "Could not find certificate ID in OCSP response"); result = CURLE_SSL_INVALIDCERTSTATUS; goto end; } /* Validate the corresponding single OCSP response */ if(!OCSP_check_validity(thisupd, nextupd, 300L, -1L)) { failf(data, "OCSP response has expired"); result = CURLE_SSL_INVALIDCERTSTATUS; goto end; } infof(data, "SSL certificate status: %s (%d)\n", OCSP_cert_status_str(cert_status), cert_status); switch(cert_status) { case V_OCSP_CERTSTATUS_GOOD: break; case V_OCSP_CERTSTATUS_REVOKED: result = CURLE_SSL_INVALIDCERTSTATUS; failf(data, "SSL certificate revocation reason: %s (%d)", OCSP_crl_reason_str(crl_reason), crl_reason); goto end; case V_OCSP_CERTSTATUS_UNKNOWN: default: result = CURLE_SSL_INVALIDCERTSTATUS; goto end; } end: if(br) OCSP_BASICRESP_free(br); OCSP_RESPONSE_free(rsp); return result; } #endif #endif /* USE_OPENSSL */ /* The SSL_CTRL_SET_MSG_CALLBACK doesn't exist in ancient OpenSSL versions and thus this cannot be done there. */ #ifdef SSL_CTRL_SET_MSG_CALLBACK static const char *ssl_msg_type(int ssl_ver, int msg) { #ifdef SSL2_VERSION_MAJOR if(ssl_ver == SSL2_VERSION_MAJOR) { switch(msg) { case SSL2_MT_ERROR: return "Error"; case SSL2_MT_CLIENT_HELLO: return "Client hello"; case SSL2_MT_CLIENT_MASTER_KEY: return "Client key"; case SSL2_MT_CLIENT_FINISHED: return "Client finished"; case SSL2_MT_SERVER_HELLO: return "Server hello"; case SSL2_MT_SERVER_VERIFY: return "Server verify"; case SSL2_MT_SERVER_FINISHED: return "Server finished"; case SSL2_MT_REQUEST_CERTIFICATE: return "Request CERT"; case SSL2_MT_CLIENT_CERTIFICATE: return "Client CERT"; } } else #endif if(ssl_ver == SSL3_VERSION_MAJOR) { switch(msg) { case SSL3_MT_HELLO_REQUEST: return "Hello request"; case SSL3_MT_CLIENT_HELLO: return "Client hello"; case SSL3_MT_SERVER_HELLO: return "Server hello"; #ifdef SSL3_MT_NEWSESSION_TICKET case SSL3_MT_NEWSESSION_TICKET: return "Newsession Ticket"; #endif case SSL3_MT_CERTIFICATE: return "Certificate"; case SSL3_MT_SERVER_KEY_EXCHANGE: return "Server key exchange"; case SSL3_MT_CLIENT_KEY_EXCHANGE: return "Client key exchange"; case SSL3_MT_CERTIFICATE_REQUEST: return "Request CERT"; case SSL3_MT_SERVER_DONE: return "Server finished"; case SSL3_MT_CERTIFICATE_VERIFY: return "CERT verify"; case SSL3_MT_FINISHED: return "Finished"; #ifdef SSL3_MT_CERTIFICATE_STATUS case SSL3_MT_CERTIFICATE_STATUS: return "Certificate Status"; #endif #ifdef SSL3_MT_ENCRYPTED_EXTENSIONS case SSL3_MT_ENCRYPTED_EXTENSIONS: return "Encrypted Extensions"; #endif #ifdef SSL3_MT_END_OF_EARLY_DATA case SSL3_MT_END_OF_EARLY_DATA: return "End of early data"; #endif #ifdef SSL3_MT_KEY_UPDATE case SSL3_MT_KEY_UPDATE: return "Key update"; #endif #ifdef SSL3_MT_NEXT_PROTO case SSL3_MT_NEXT_PROTO: return "Next protocol"; #endif #ifdef SSL3_MT_MESSAGE_HASH case SSL3_MT_MESSAGE_HASH: return "Message hash"; #endif } } return "Unknown"; } static const char *tls_rt_type(int type) { switch(type) { #ifdef SSL3_RT_HEADER case SSL3_RT_HEADER: return "TLS header"; #endif case SSL3_RT_CHANGE_CIPHER_SPEC: return "TLS change cipher"; case SSL3_RT_ALERT: return "TLS alert"; case SSL3_RT_HANDSHAKE: return "TLS handshake"; case SSL3_RT_APPLICATION_DATA: return "TLS app data"; default: return "TLS Unknown"; } } /* * Our callback from the SSL/TLS layers. */ static void ossl_trace(int direction, int ssl_ver, int content_type, const void *buf, size_t len, SSL *ssl, void *userp) { char unknown[32]; const char *verstr = NULL; struct connectdata *conn = userp; struct ssl_connect_data *connssl = &conn->ssl[0]; struct ssl_backend_data *backend = connssl->backend; struct Curl_easy *data = backend->logger; if(!conn || !data || !data->set.fdebug || (direction != 0 && direction != 1)) return; switch(ssl_ver) { #ifdef SSL2_VERSION /* removed in recent versions */ case SSL2_VERSION: verstr = "SSLv2"; break; #endif #ifdef SSL3_VERSION case SSL3_VERSION: verstr = "SSLv3"; break; #endif case TLS1_VERSION: verstr = "TLSv1.0"; break; #ifdef TLS1_1_VERSION case TLS1_1_VERSION: verstr = "TLSv1.1"; break; #endif #ifdef TLS1_2_VERSION case TLS1_2_VERSION: verstr = "TLSv1.2"; break; #endif #ifdef TLS1_3_VERSION case TLS1_3_VERSION: verstr = "TLSv1.3"; break; #endif case 0: break; default: msnprintf(unknown, sizeof(unknown), "(%x)", ssl_ver); verstr = unknown; break; } /* Log progress for interesting records only (like Handshake or Alert), skip * all raw record headers (content_type == SSL3_RT_HEADER or ssl_ver == 0). * For TLS 1.3, skip notification of the decrypted inner Content Type. */ if(ssl_ver #ifdef SSL3_RT_INNER_CONTENT_TYPE && content_type != SSL3_RT_INNER_CONTENT_TYPE #endif ) { const char *msg_name, *tls_rt_name; char ssl_buf[1024]; int msg_type, txt_len; /* the info given when the version is zero is not that useful for us */ ssl_ver >>= 8; /* check the upper 8 bits only below */ /* SSLv2 doesn't seem to have TLS record-type headers, so OpenSSL * always pass-up content-type as 0. But the interesting message-type * is at 'buf[0]'. */ if(ssl_ver == SSL3_VERSION_MAJOR && content_type) tls_rt_name = tls_rt_type(content_type); else tls_rt_name = ""; if(content_type == SSL3_RT_CHANGE_CIPHER_SPEC) { msg_type = *(char *)buf; msg_name = "Change cipher spec"; } else if(content_type == SSL3_RT_ALERT) { msg_type = (((char *)buf)[0] << 8) + ((char *)buf)[1]; msg_name = SSL_alert_desc_string_long(msg_type); } else { msg_type = *(char *)buf; msg_name = ssl_msg_type(ssl_ver, msg_type); } txt_len = msnprintf(ssl_buf, sizeof(ssl_buf), "%s (%s), %s, %s (%d):\n", verstr, direction?"OUT":"IN", tls_rt_name, msg_name, msg_type); if(0 <= txt_len && (unsigned)txt_len < sizeof(ssl_buf)) { Curl_debug(data, CURLINFO_TEXT, ssl_buf, (size_t)txt_len); } } Curl_debug(data, (direction == 1) ? CURLINFO_SSL_DATA_OUT : CURLINFO_SSL_DATA_IN, (char *)buf, len); (void) ssl; } #endif #ifdef USE_OPENSSL /* ====================================================== */ #ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME # define use_sni(x) sni = (x) #else # define use_sni(x) Curl_nop_stmt #endif /* Check for OpenSSL 1.0.2 which has ALPN support. */ #undef HAS_ALPN #if OPENSSL_VERSION_NUMBER >= 0x10002000L \ && !defined(OPENSSL_NO_TLSEXT) # define HAS_ALPN 1 #endif /* Check for OpenSSL 1.0.1 which has NPN support. */ #undef HAS_NPN #if OPENSSL_VERSION_NUMBER >= 0x10001000L \ && !defined(OPENSSL_NO_TLSEXT) \ && !defined(OPENSSL_NO_NEXTPROTONEG) # define HAS_NPN 1 #endif #ifdef HAS_NPN /* * in is a list of length prefixed strings. this function has to select * the protocol we want to use from the list and write its string into out. */ static int select_next_protocol(unsigned char **out, unsigned char *outlen, const unsigned char *in, unsigned int inlen, const char *key, unsigned int keylen) { unsigned int i; for(i = 0; i + keylen <= inlen; i += in[i] + 1) { if(memcmp(&in[i + 1], key, keylen) == 0) { *out = (unsigned char *) &in[i + 1]; *outlen = in[i]; return 0; } } return -1; } static int select_next_proto_cb(SSL *ssl, unsigned char **out, unsigned char *outlen, const unsigned char *in, unsigned int inlen, void *arg) { struct Curl_easy *data = (struct Curl_easy *)arg; struct connectdata *conn = data->conn; (void)ssl; #ifdef USE_HTTP2 if(data->state.httpwant >= CURL_HTTP_VERSION_2 && !select_next_protocol(out, outlen, in, inlen, ALPN_H2, ALPN_H2_LENGTH)) { infof(data, "NPN, negotiated HTTP2 (%s)\n", ALPN_H2); conn->negnpn = CURL_HTTP_VERSION_2; return SSL_TLSEXT_ERR_OK; } #endif if(!select_next_protocol(out, outlen, in, inlen, ALPN_HTTP_1_1, ALPN_HTTP_1_1_LENGTH)) { infof(data, "NPN, negotiated HTTP1.1\n"); conn->negnpn = CURL_HTTP_VERSION_1_1; return SSL_TLSEXT_ERR_OK; } infof(data, "NPN, no overlap, use HTTP1.1\n"); *out = (unsigned char *)ALPN_HTTP_1_1; *outlen = ALPN_HTTP_1_1_LENGTH; conn->negnpn = CURL_HTTP_VERSION_1_1; return SSL_TLSEXT_ERR_OK; } #endif /* HAS_NPN */ #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) /* 1.1.0 */ static CURLcode set_ssl_version_min_max(SSL_CTX *ctx, struct connectdata *conn) { /* first, TLS min version... */ long curl_ssl_version_min = SSL_CONN_CONFIG(version); long curl_ssl_version_max; /* convert cURL min SSL version option to OpenSSL constant */ #if defined(OPENSSL_IS_BORINGSSL) || defined(LIBRESSL_VERSION_NUMBER) uint16_t ossl_ssl_version_min = 0; uint16_t ossl_ssl_version_max = 0; #else long ossl_ssl_version_min = 0; long ossl_ssl_version_max = 0; #endif switch(curl_ssl_version_min) { case CURL_SSLVERSION_TLSv1: /* TLS 1.x */ case CURL_SSLVERSION_TLSv1_0: ossl_ssl_version_min = TLS1_VERSION; break; case CURL_SSLVERSION_TLSv1_1: ossl_ssl_version_min = TLS1_1_VERSION; break; case CURL_SSLVERSION_TLSv1_2: ossl_ssl_version_min = TLS1_2_VERSION; break; #ifdef TLS1_3_VERSION case CURL_SSLVERSION_TLSv1_3: ossl_ssl_version_min = TLS1_3_VERSION; break; #endif } /* CURL_SSLVERSION_DEFAULT means that no option was selected. We don't want to pass 0 to SSL_CTX_set_min_proto_version as it would enable all versions down to the lowest supported by the library. So we skip this, and stay with the OS default */ if(curl_ssl_version_min != CURL_SSLVERSION_DEFAULT) { if(!SSL_CTX_set_min_proto_version(ctx, ossl_ssl_version_min)) { return CURLE_SSL_CONNECT_ERROR; } } /* ... then, TLS max version */ curl_ssl_version_max = SSL_CONN_CONFIG(version_max); /* convert cURL max SSL version option to OpenSSL constant */ switch(curl_ssl_version_max) { case CURL_SSLVERSION_MAX_TLSv1_0: ossl_ssl_version_max = TLS1_VERSION; break; case CURL_SSLVERSION_MAX_TLSv1_1: ossl_ssl_version_max = TLS1_1_VERSION; break; case CURL_SSLVERSION_MAX_TLSv1_2: ossl_ssl_version_max = TLS1_2_VERSION; break; #ifdef TLS1_3_VERSION case CURL_SSLVERSION_MAX_TLSv1_3: ossl_ssl_version_max = TLS1_3_VERSION; break; #endif case CURL_SSLVERSION_MAX_NONE: /* none selected */ case CURL_SSLVERSION_MAX_DEFAULT: /* max selected */ default: /* SSL_CTX_set_max_proto_version states that: setting the maximum to 0 will enable protocol versions up to the highest version supported by the library */ ossl_ssl_version_max = 0; break; } if(!SSL_CTX_set_max_proto_version(ctx, ossl_ssl_version_max)) { return CURLE_SSL_CONNECT_ERROR; } return CURLE_OK; } #endif #ifdef OPENSSL_IS_BORINGSSL typedef uint32_t ctx_option_t; #else typedef long ctx_option_t; #endif #if (OPENSSL_VERSION_NUMBER < 0x10100000L) /* 1.1.0 */ static CURLcode set_ssl_version_min_max_legacy(ctx_option_t *ctx_options, struct Curl_easy *data, struct connectdata *conn, int sockindex) { long ssl_version = SSL_CONN_CONFIG(version); long ssl_version_max = SSL_CONN_CONFIG(version_max); (void) data; /* In case it's unused. */ switch(ssl_version) { case CURL_SSLVERSION_TLSv1_3: #ifdef TLS1_3_VERSION { struct ssl_connect_data *connssl = &conn->ssl[sockindex]; SSL_CTX_set_max_proto_version(backend->ctx, TLS1_3_VERSION); *ctx_options |= SSL_OP_NO_TLSv1_2; } #else (void)sockindex; (void)ctx_options; failf(data, OSSL_PACKAGE " was built without TLS 1.3 support"); return CURLE_NOT_BUILT_IN; #endif /* FALLTHROUGH */ case CURL_SSLVERSION_TLSv1_2: #if OPENSSL_VERSION_NUMBER >= 0x1000100FL *ctx_options |= SSL_OP_NO_TLSv1_1; #else failf(data, OSSL_PACKAGE " was built without TLS 1.2 support"); return CURLE_NOT_BUILT_IN; #endif /* FALLTHROUGH */ case CURL_SSLVERSION_TLSv1_1: #if OPENSSL_VERSION_NUMBER >= 0x1000100FL *ctx_options |= SSL_OP_NO_TLSv1; #else failf(data, OSSL_PACKAGE " was built without TLS 1.1 support"); return CURLE_NOT_BUILT_IN; #endif /* FALLTHROUGH */ case CURL_SSLVERSION_TLSv1_0: case CURL_SSLVERSION_TLSv1: break; } switch(ssl_version_max) { case CURL_SSLVERSION_MAX_TLSv1_0: #if OPENSSL_VERSION_NUMBER >= 0x1000100FL *ctx_options |= SSL_OP_NO_TLSv1_1; #endif /* FALLTHROUGH */ case CURL_SSLVERSION_MAX_TLSv1_1: #if OPENSSL_VERSION_NUMBER >= 0x1000100FL *ctx_options |= SSL_OP_NO_TLSv1_2; #endif /* FALLTHROUGH */ case CURL_SSLVERSION_MAX_TLSv1_2: #ifdef TLS1_3_VERSION *ctx_options |= SSL_OP_NO_TLSv1_3; #endif break; case CURL_SSLVERSION_MAX_TLSv1_3: #ifdef TLS1_3_VERSION break; #else failf(data, OSSL_PACKAGE " was built without TLS 1.3 support"); return CURLE_NOT_BUILT_IN; #endif } return CURLE_OK; } #endif /* The "new session" callback must return zero if the session can be removed * or non-zero if the session has been put into the session cache. */ static int ossl_new_session_cb(SSL *ssl, SSL_SESSION *ssl_sessionid) { int res = 0; struct connectdata *conn; struct Curl_easy *data; int sockindex; curl_socket_t *sockindex_ptr; int data_idx = ossl_get_ssl_data_index(); int connectdata_idx = ossl_get_ssl_conn_index(); int sockindex_idx = ossl_get_ssl_sockindex_index(); int proxy_idx = ossl_get_proxy_index(); bool isproxy; if(data_idx < 0 || connectdata_idx < 0 || sockindex_idx < 0 || proxy_idx < 0) return 0; conn = (struct connectdata*) SSL_get_ex_data(ssl, connectdata_idx); if(!conn) return 0; data = (struct Curl_easy *) SSL_get_ex_data(ssl, data_idx); /* The sockindex has been stored as a pointer to an array element */ sockindex_ptr = (curl_socket_t*) SSL_get_ex_data(ssl, sockindex_idx); sockindex = (int)(sockindex_ptr - conn->sock); isproxy = SSL_get_ex_data(ssl, proxy_idx) ? TRUE : FALSE; if(SSL_SET_OPTION(primary.sessionid)) { bool incache; void *old_ssl_sessionid = NULL; Curl_ssl_sessionid_lock(data); if(isproxy) incache = FALSE; else incache = !(Curl_ssl_getsessionid(data, conn, isproxy, &old_ssl_sessionid, NULL, sockindex)); if(incache) { if(old_ssl_sessionid != ssl_sessionid) { infof(data, "old SSL session ID is stale, removing\n"); Curl_ssl_delsessionid(data, old_ssl_sessionid); incache = FALSE; } } if(!incache) { if(!Curl_ssl_addsessionid(data, conn, isproxy, ssl_sessionid, 0 /* unknown size */, sockindex)) { /* the session has been put into the session cache */ res = 1; } else failf(data, "failed to store ssl session"); } Curl_ssl_sessionid_unlock(data); } return res; } static CURLcode load_cacert_from_memory(SSL_CTX *ctx, const struct curl_blob *ca_info_blob) { /* these need freed at the end */ BIO *cbio = NULL; STACK_OF(X509_INFO) *inf = NULL; /* everything else is just a reference */ int i, count = 0; X509_STORE *cts = NULL; X509_INFO *itmp = NULL; if(ca_info_blob->len > (size_t)INT_MAX) return CURLE_SSL_CACERT_BADFILE; cts = SSL_CTX_get_cert_store(ctx); if(!cts) return CURLE_OUT_OF_MEMORY; cbio = BIO_new_mem_buf(ca_info_blob->data, (int)ca_info_blob->len); if(!cbio) return CURLE_OUT_OF_MEMORY; inf = PEM_X509_INFO_read_bio(cbio, NULL, NULL, NULL); if(!inf) { BIO_free(cbio); return CURLE_SSL_CACERT_BADFILE; } /* add each entry from PEM file to x509_store */ for(i = 0; i < (int)sk_X509_INFO_num(inf); ++i) { itmp = sk_X509_INFO_value(inf, i); if(itmp->x509) { if(X509_STORE_add_cert(cts, itmp->x509)) { ++count; } else { /* set count to 0 to return an error */ count = 0; break; } } if(itmp->crl) { if(X509_STORE_add_crl(cts, itmp->crl)) { ++count; } else { /* set count to 0 to return an error */ count = 0; break; } } } sk_X509_INFO_pop_free(inf, X509_INFO_free); BIO_free(cbio); /* if we didn't end up importing anything, treat that as an error */ return (count > 0 ? CURLE_OK : CURLE_SSL_CACERT_BADFILE); } static CURLcode ossl_connect_step1(struct Curl_easy *data, struct connectdata *conn, int sockindex) { CURLcode result = CURLE_OK; char *ciphers; SSL_METHOD_QUAL SSL_METHOD *req_method = NULL; X509_LOOKUP *lookup = NULL; curl_socket_t sockfd = conn->sock[sockindex]; struct ssl_connect_data *connssl = &conn->ssl[sockindex]; ctx_option_t ctx_options = 0; #ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME bool sni; const char * const hostname = SSL_HOST_NAME(); #ifdef ENABLE_IPV6 struct in6_addr addr; #else struct in_addr addr; #endif #endif const long int ssl_version = SSL_CONN_CONFIG(version); #ifdef USE_OPENSSL_SRP const enum CURL_TLSAUTH ssl_authtype = SSL_SET_OPTION(authtype); #endif char * const ssl_cert = SSL_SET_OPTION(primary.clientcert); const struct curl_blob *ssl_cert_blob = SSL_SET_OPTION(primary.cert_blob); const struct curl_blob *ca_info_blob = SSL_CONN_CONFIG(ca_info_blob); const char * const ssl_cert_type = SSL_SET_OPTION(cert_type); const char * const ssl_cafile = /* CURLOPT_CAINFO_BLOB overrides CURLOPT_CAINFO */ (ca_info_blob ? NULL : SSL_CONN_CONFIG(CAfile)); const char * const ssl_capath = SSL_CONN_CONFIG(CApath); const bool verifypeer = SSL_CONN_CONFIG(verifypeer); const char * const ssl_crlfile = SSL_SET_OPTION(CRLfile); char error_buffer[256]; struct ssl_backend_data *backend = connssl->backend; bool imported_native_ca = false; DEBUGASSERT(ssl_connect_1 == connssl->connecting_state); /* Make funny stuff to get random input */ result = ossl_seed(data); if(result) return result; SSL_SET_OPTION_LVALUE(certverifyresult) = !X509_V_OK; /* check to see if we've been told to use an explicit SSL/TLS version */ switch(ssl_version) { case CURL_SSLVERSION_DEFAULT: case CURL_SSLVERSION_TLSv1: case CURL_SSLVERSION_TLSv1_0: case CURL_SSLVERSION_TLSv1_1: case CURL_SSLVERSION_TLSv1_2: case CURL_SSLVERSION_TLSv1_3: /* it will be handled later with the context options */ #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) req_method = TLS_client_method(); #else req_method = SSLv23_client_method(); #endif use_sni(TRUE); break; case CURL_SSLVERSION_SSLv2: failf(data, "No SSLv2 support"); return CURLE_NOT_BUILT_IN; case CURL_SSLVERSION_SSLv3: failf(data, "No SSLv3 support"); return CURLE_NOT_BUILT_IN; default: failf(data, "Unrecognized parameter passed via CURLOPT_SSLVERSION"); return CURLE_SSL_CONNECT_ERROR; } if(backend->ctx) SSL_CTX_free(backend->ctx); backend->ctx = SSL_CTX_new(req_method); if(!backend->ctx) { failf(data, "SSL: couldn't create a context: %s", ossl_strerror(ERR_peek_error(), error_buffer, sizeof(error_buffer))); return CURLE_OUT_OF_MEMORY; } #ifdef SSL_MODE_RELEASE_BUFFERS SSL_CTX_set_mode(backend->ctx, SSL_MODE_RELEASE_BUFFERS); #endif #ifdef SSL_CTRL_SET_MSG_CALLBACK if(data->set.fdebug && data->set.verbose) { /* the SSL trace callback is only used for verbose logging */ SSL_CTX_set_msg_callback(backend->ctx, ossl_trace); SSL_CTX_set_msg_callback_arg(backend->ctx, conn); set_logger(conn, data); } #endif /* OpenSSL contains code to work-around lots of bugs and flaws in various SSL-implementations. SSL_CTX_set_options() is used to enabled those work-arounds. The man page for this option states that SSL_OP_ALL enables all the work-arounds and that "It is usually safe to use SSL_OP_ALL to enable the bug workaround options if compatibility with somewhat broken implementations is desired." The "-no_ticket" option was introduced in Openssl0.9.8j. It's a flag to disable "rfc4507bis session ticket support". rfc4507bis was later turned into the proper RFC5077 it seems: https://tools.ietf.org/html/rfc5077 The enabled extension concerns the session management. I wonder how often libcurl stops a connection and then resumes a TLS session. also, sending the session data is some overhead. .I suggest that you just use your proposed patch (which explicitly disables TICKET). If someone writes an application with libcurl and openssl who wants to enable the feature, one can do this in the SSL callback. SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG option enabling allowed proper interoperability with web server Netscape Enterprise Server 2.0.1 which was released back in 1996. Due to CVE-2010-4180, option SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG has become ineffective as of OpenSSL 0.9.8q and 1.0.0c. In order to mitigate CVE-2010-4180 when using previous OpenSSL versions we no longer enable this option regardless of OpenSSL version and SSL_OP_ALL definition. OpenSSL added a work-around for a SSL 3.0/TLS 1.0 CBC vulnerability (https://www.openssl.org/~bodo/tls-cbc.txt). In 0.9.6e they added a bit to SSL_OP_ALL that _disables_ that work-around despite the fact that SSL_OP_ALL is documented to do "rather harmless" workarounds. In order to keep the secure work-around, the SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS bit must not be set. */ ctx_options = SSL_OP_ALL; #ifdef SSL_OP_NO_TICKET ctx_options |= SSL_OP_NO_TICKET; #endif #ifdef SSL_OP_NO_COMPRESSION ctx_options |= SSL_OP_NO_COMPRESSION; #endif #ifdef SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG /* mitigate CVE-2010-4180 */ ctx_options &= ~SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG; #endif #ifdef SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS /* unless the user explicitly ask to allow the protocol vulnerability we use the work-around */ if(!SSL_SET_OPTION(enable_beast)) ctx_options &= ~SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS; #endif switch(ssl_version) { case CURL_SSLVERSION_SSLv2: case CURL_SSLVERSION_SSLv3: return CURLE_NOT_BUILT_IN; /* "--tlsv<x.y>" options mean TLS >= version <x.y> */ case CURL_SSLVERSION_DEFAULT: case CURL_SSLVERSION_TLSv1: /* TLS >= version 1.0 */ case CURL_SSLVERSION_TLSv1_0: /* TLS >= version 1.0 */ case CURL_SSLVERSION_TLSv1_1: /* TLS >= version 1.1 */ case CURL_SSLVERSION_TLSv1_2: /* TLS >= version 1.2 */ case CURL_SSLVERSION_TLSv1_3: /* TLS >= version 1.3 */ /* asking for any TLS version as the minimum, means no SSL versions allowed */ ctx_options |= SSL_OP_NO_SSLv2; ctx_options |= SSL_OP_NO_SSLv3; #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) /* 1.1.0 */ result = set_ssl_version_min_max(backend->ctx, conn); #else result = set_ssl_version_min_max_legacy(&ctx_options, data, conn, sockindex); #endif if(result != CURLE_OK) return result; break; default: failf(data, "Unrecognized parameter passed via CURLOPT_SSLVERSION"); return CURLE_SSL_CONNECT_ERROR; } SSL_CTX_set_options(backend->ctx, ctx_options); #ifdef HAS_NPN if(conn->bits.tls_enable_npn) SSL_CTX_set_next_proto_select_cb(backend->ctx, select_next_proto_cb, data); #endif #ifdef HAS_ALPN if(conn->bits.tls_enable_alpn) { int cur = 0; unsigned char protocols[128]; #ifdef USE_HTTP2 if(data->state.httpwant >= CURL_HTTP_VERSION_2 #ifndef CURL_DISABLE_PROXY && (!SSL_IS_PROXY() || !conn->bits.tunnel_proxy) #endif ) { protocols[cur++] = ALPN_H2_LENGTH; memcpy(&protocols[cur], ALPN_H2, ALPN_H2_LENGTH); cur += ALPN_H2_LENGTH; infof(data, "ALPN, offering %s\n", ALPN_H2); } #endif protocols[cur++] = ALPN_HTTP_1_1_LENGTH; memcpy(&protocols[cur], ALPN_HTTP_1_1, ALPN_HTTP_1_1_LENGTH); cur += ALPN_HTTP_1_1_LENGTH; infof(data, "ALPN, offering %s\n", ALPN_HTTP_1_1); /* expects length prefixed preference ordered list of protocols in wire * format */ if(SSL_CTX_set_alpn_protos(backend->ctx, protocols, cur)) { failf(data, "Error setting ALPN"); return CURLE_SSL_CONNECT_ERROR; } } #endif if(ssl_cert || ssl_cert_blob || ssl_cert_type) { if(!result && !cert_stuff(data, backend->ctx, ssl_cert, ssl_cert_blob, ssl_cert_type, SSL_SET_OPTION(key), SSL_SET_OPTION(key_blob), SSL_SET_OPTION(key_type), SSL_SET_OPTION(key_passwd))) result = CURLE_SSL_CERTPROBLEM; if(result) /* failf() is already done in cert_stuff() */ return result; } ciphers = SSL_CONN_CONFIG(cipher_list); if(!ciphers) ciphers = (char *)DEFAULT_CIPHER_SELECTION; if(ciphers) { if(!SSL_CTX_set_cipher_list(backend->ctx, ciphers)) { failf(data, "failed setting cipher list: %s", ciphers); return CURLE_SSL_CIPHER; } infof(data, "Cipher selection: %s\n", ciphers); } #ifdef HAVE_SSL_CTX_SET_CIPHERSUITES { char *ciphers13 = SSL_CONN_CONFIG(cipher_list13); if(ciphers13) { if(!SSL_CTX_set_ciphersuites(backend->ctx, ciphers13)) { failf(data, "failed setting TLS 1.3 cipher suite: %s", ciphers13); return CURLE_SSL_CIPHER; } infof(data, "TLS 1.3 cipher selection: %s\n", ciphers13); } } #endif #ifdef HAVE_SSL_CTX_SET_POST_HANDSHAKE_AUTH /* OpenSSL 1.1.1 requires clients to opt-in for PHA */ SSL_CTX_set_post_handshake_auth(backend->ctx, 1); #endif #ifdef HAVE_SSL_CTX_SET_EC_CURVES { char *curves = SSL_CONN_CONFIG(curves); if(curves) { if(!SSL_CTX_set1_curves_list(backend->ctx, curves)) { failf(data, "failed setting curves list: '%s'", curves); return CURLE_SSL_CIPHER; } } } #endif #ifdef USE_OPENSSL_SRP if(ssl_authtype == CURL_TLSAUTH_SRP) { char * const ssl_username = SSL_SET_OPTION(username); infof(data, "Using TLS-SRP username: %s\n", ssl_username); if(!SSL_CTX_set_srp_username(backend->ctx, ssl_username)) { failf(data, "Unable to set SRP user name"); return CURLE_BAD_FUNCTION_ARGUMENT; } if(!SSL_CTX_set_srp_password(backend->ctx, SSL_SET_OPTION(password))) { failf(data, "failed setting SRP password"); return CURLE_BAD_FUNCTION_ARGUMENT; } if(!SSL_CONN_CONFIG(cipher_list)) { infof(data, "Setting cipher list SRP\n"); if(!SSL_CTX_set_cipher_list(backend->ctx, "SRP")) { failf(data, "failed setting SRP cipher list"); return CURLE_SSL_CIPHER; } } } #endif #if defined(USE_WIN32_CRYPTO) /* Import certificates from the Windows root certificate store if requested. https://stackoverflow.com/questions/9507184/ https://github.com/d3x0r/SACK/blob/master/src/netlib/ssl_layer.c#L1037 https://tools.ietf.org/html/rfc5280 */ if((SSL_CONN_CONFIG(verifypeer) || SSL_CONN_CONFIG(verifyhost)) && (SSL_SET_OPTION(native_ca_store))) { X509_STORE *store = SSL_CTX_get_cert_store(backend->ctx); HCERTSTORE hStore = CertOpenSystemStore(0, TEXT("ROOT")); if(hStore) { PCCERT_CONTEXT pContext = NULL; /* The array of enhanced key usage OIDs will vary per certificate and is declared outside of the loop so that rather than malloc/free each iteration we can grow it with realloc, when necessary. */ CERT_ENHKEY_USAGE *enhkey_usage = NULL; DWORD enhkey_usage_size = 0; /* This loop makes a best effort to import all valid certificates from the MS root store. If a certificate cannot be imported it is skipped. 'result' is used to store only hard-fail conditions (such as out of memory) that cause an early break. */ result = CURLE_OK; for(;;) { X509 *x509; FILETIME now; BYTE key_usage[2]; DWORD req_size; const unsigned char *encoded_cert; #if defined(DEBUGBUILD) && !defined(CURL_DISABLE_VERBOSE_STRINGS) char cert_name[256]; #endif pContext = CertEnumCertificatesInStore(hStore, pContext); if(!pContext) break; #if defined(DEBUGBUILD) && !defined(CURL_DISABLE_VERBOSE_STRINGS) if(!CertGetNameStringA(pContext, CERT_NAME_SIMPLE_DISPLAY_TYPE, 0, NULL, cert_name, sizeof(cert_name))) { strcpy(cert_name, "Unknown"); } infof(data, "SSL: Checking cert \"%s\"\n", cert_name); #endif encoded_cert = (const unsigned char *)pContext->pbCertEncoded; if(!encoded_cert) continue; GetSystemTimeAsFileTime(&now); if(CompareFileTime(&pContext->pCertInfo->NotBefore, &now) > 0 || CompareFileTime(&now, &pContext->pCertInfo->NotAfter) > 0) continue; /* If key usage exists check for signing attribute */ if(CertGetIntendedKeyUsage(pContext->dwCertEncodingType, pContext->pCertInfo, key_usage, sizeof(key_usage))) { if(!(key_usage[0] & CERT_KEY_CERT_SIGN_KEY_USAGE)) continue; } else if(GetLastError()) continue; /* If enhanced key usage exists check for server auth attribute. * * Note "In a Microsoft environment, a certificate might also have EKU * extended properties that specify valid uses for the certificate." * The call below checks both, and behavior varies depending on what is * found. For more details see CertGetEnhancedKeyUsage doc. */ if(CertGetEnhancedKeyUsage(pContext, 0, NULL, &req_size)) { if(req_size && req_size > enhkey_usage_size) { void *tmp = realloc(enhkey_usage, req_size); if(!tmp) { failf(data, "SSL: Out of memory allocating for OID list"); result = CURLE_OUT_OF_MEMORY; break; } enhkey_usage = (CERT_ENHKEY_USAGE *)tmp; enhkey_usage_size = req_size; } if(CertGetEnhancedKeyUsage(pContext, 0, enhkey_usage, &req_size)) { if(!enhkey_usage->cUsageIdentifier) { /* "If GetLastError returns CRYPT_E_NOT_FOUND, the certificate is good for all uses. If it returns zero, the certificate has no valid uses." */ if((HRESULT)GetLastError() != CRYPT_E_NOT_FOUND) continue; } else { DWORD i; bool found = false; for(i = 0; i < enhkey_usage->cUsageIdentifier; ++i) { if(!strcmp("1.3.6.1.5.5.7.3.1" /* OID server auth */, enhkey_usage->rgpszUsageIdentifier[i])) { found = true; break; } } if(!found) continue; } } else continue; } else continue; x509 = d2i_X509(NULL, &encoded_cert, pContext->cbCertEncoded); if(!x509) continue; /* Try to import the certificate. This may fail for legitimate reasons such as duplicate certificate, which is allowed by MS but not OpenSSL. */ if(X509_STORE_add_cert(store, x509) == 1) { #if defined(DEBUGBUILD) && !defined(CURL_DISABLE_VERBOSE_STRINGS) infof(data, "SSL: Imported cert \"%s\"\n", cert_name); #endif imported_native_ca = true; } X509_free(x509); } free(enhkey_usage); CertFreeCertificateContext(pContext); CertCloseStore(hStore, 0); if(result) return result; } if(imported_native_ca) infof(data, "successfully imported windows ca store\n"); else infof(data, "error importing windows ca store, continuing anyway\n"); } #endif if(ca_info_blob) { result = load_cacert_from_memory(backend->ctx, ca_info_blob); if(result) { if(result == CURLE_OUT_OF_MEMORY || (verifypeer && !imported_native_ca)) { failf(data, "error importing CA certificate blob"); return result; } /* Only warning if no certificate verification is required. */ infof(data, "error importing CA certificate blob, continuing anyway\n"); } } #if defined(OPENSSL_VERSION_MAJOR) && (OPENSSL_VERSION_MAJOR >= 3) /* OpenSSL 3.0.0 has deprecated SSL_CTX_load_verify_locations */ { if(ssl_cafile) { if(!SSL_CTX_load_verify_file(backend->ctx, ssl_cafile)) { if(verifypeer && !imported_native_ca) { /* Fail if we insist on successfully verifying the server. */ failf(data, "error setting certificate file: %s", ssl_cafile); return CURLE_SSL_CACERT_BADFILE; } /* Continue with a warning if no certificate verif is required. */ infof(data, "error setting certificate file, continuing anyway\n"); } infof(data, " CAfile: %s\n", ssl_cafile); } if(ssl_capath) { if(!SSL_CTX_load_verify_dir(backend->ctx, ssl_capath)) { if(verifypeer && !imported_native_ca) { /* Fail if we insist on successfully verifying the server. */ failf(data, "error setting certificate path: %s", ssl_capath); return CURLE_SSL_CACERT_BADFILE; } /* Continue with a warning if no certificate verif is required. */ infof(data, "error setting certificate path, continuing anyway\n"); } infof(data, " CApath: %s\n", ssl_capath); } } #else if(ssl_cafile || ssl_capath) { /* tell SSL where to find CA certificates that are used to verify the servers certificate. */ if(!SSL_CTX_load_verify_locations(backend->ctx, ssl_cafile, ssl_capath)) { if(verifypeer && !imported_native_ca) { /* Fail if we insist on successfully verifying the server. */ failf(data, "error setting certificate verify locations:" " CAfile: %s CApath: %s", ssl_cafile ? ssl_cafile : "none", ssl_capath ? ssl_capath : "none"); return CURLE_SSL_CACERT_BADFILE; } /* Just continue with a warning if no strict certificate verification is required. */ infof(data, "error setting certificate verify locations," " continuing anyway:\n"); } else { /* Everything is fine. */ infof(data, "successfully set certificate verify locations:\n"); } infof(data, " CAfile: %s\n", ssl_cafile ? ssl_cafile : "none"); infof(data, " CApath: %s\n", ssl_capath ? ssl_capath : "none"); } #endif #ifdef CURL_CA_FALLBACK if(verifypeer && !ca_info_blob && !ssl_cafile && !ssl_capath && !imported_native_ca) { /* verifying the peer without any CA certificates won't work so use openssl's built in default as fallback */ SSL_CTX_set_default_verify_paths(backend->ctx); } #endif if(ssl_crlfile) { /* tell SSL where to find CRL file that is used to check certificate * revocation */ lookup = X509_STORE_add_lookup(SSL_CTX_get_cert_store(backend->ctx), X509_LOOKUP_file()); if(!lookup || (!X509_load_crl_file(lookup, ssl_crlfile, X509_FILETYPE_PEM)) ) { failf(data, "error loading CRL file: %s", ssl_crlfile); return CURLE_SSL_CRL_BADFILE; } /* Everything is fine. */ infof(data, "successfully load CRL file:\n"); X509_STORE_set_flags(SSL_CTX_get_cert_store(backend->ctx), X509_V_FLAG_CRL_CHECK|X509_V_FLAG_CRL_CHECK_ALL); infof(data, " CRLfile: %s\n", ssl_crlfile); } if(verifypeer) { /* Try building a chain using issuers in the trusted store first to avoid problems with server-sent legacy intermediates. Newer versions of OpenSSL do alternate chain checking by default but we do not know how to determine that in a reliable manner. https://rt.openssl.org/Ticket/Display.html?id=3621&user=guest&pass=guest */ #if defined(X509_V_FLAG_TRUSTED_FIRST) X509_STORE_set_flags(SSL_CTX_get_cert_store(backend->ctx), X509_V_FLAG_TRUSTED_FIRST); #endif #ifdef X509_V_FLAG_PARTIAL_CHAIN if(!SSL_SET_OPTION(no_partialchain) && !ssl_crlfile) { /* Have intermediate certificates in the trust store be treated as trust-anchors, in the same way as self-signed root CA certificates are. This allows users to verify servers using the intermediate cert only, instead of needing the whole chain. Due to OpenSSL bug https://github.com/openssl/openssl/issues/5081 we cannot do partial chains with CRL check. */ X509_STORE_set_flags(SSL_CTX_get_cert_store(backend->ctx), X509_V_FLAG_PARTIAL_CHAIN); } #endif } /* SSL always tries to verify the peer, this only says whether it should * fail to connect if the verification fails, or if it should continue * anyway. In the latter case the result of the verification is checked with * SSL_get_verify_result() below. */ SSL_CTX_set_verify(backend->ctx, verifypeer ? SSL_VERIFY_PEER : SSL_VERIFY_NONE, NULL); /* Enable logging of secrets to the file specified in env SSLKEYLOGFILE. */ #ifdef HAVE_KEYLOG_CALLBACK if(Curl_tls_keylog_enabled()) { SSL_CTX_set_keylog_callback(backend->ctx, ossl_keylog_callback); } #endif /* Enable the session cache because it's a prerequisite for the "new session" * callback. Use the "external storage" mode to avoid that OpenSSL creates * an internal session cache. */ SSL_CTX_set_session_cache_mode(backend->ctx, SSL_SESS_CACHE_CLIENT | SSL_SESS_CACHE_NO_INTERNAL); SSL_CTX_sess_set_new_cb(backend->ctx, ossl_new_session_cb); /* give application a chance to interfere with SSL set up. */ if(data->set.ssl.fsslctx) { Curl_set_in_callback(data, true); result = (*data->set.ssl.fsslctx)(data, backend->ctx, data->set.ssl.fsslctxp); Curl_set_in_callback(data, false); if(result) { failf(data, "error signaled by ssl ctx callback"); return result; } } /* Lets make an SSL structure */ if(backend->handle) SSL_free(backend->handle); backend->handle = SSL_new(backend->ctx); if(!backend->handle) { failf(data, "SSL: couldn't create a context (handle)!"); return CURLE_OUT_OF_MEMORY; } #if (OPENSSL_VERSION_NUMBER >= 0x0090808fL) && !defined(OPENSSL_NO_TLSEXT) && \ !defined(OPENSSL_NO_OCSP) if(SSL_CONN_CONFIG(verifystatus)) SSL_set_tlsext_status_type(backend->handle, TLSEXT_STATUSTYPE_ocsp); #endif #if defined(OPENSSL_IS_BORINGSSL) && defined(ALLOW_RENEG) SSL_set_renegotiate_mode(backend->handle, ssl_renegotiate_freely); #endif SSL_set_connect_state(backend->handle); backend->server_cert = 0x0; #ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME if((0 == Curl_inet_pton(AF_INET, hostname, &addr)) && #ifdef ENABLE_IPV6 (0 == Curl_inet_pton(AF_INET6, hostname, &addr)) && #endif sni) { size_t nlen = strlen(hostname); if((long)nlen >= data->set.buffer_size) /* this is seriously messed up */ return CURLE_SSL_CONNECT_ERROR; /* RFC 6066 section 3 says the SNI field is case insensitive, but browsers send the data lowercase and subsequently there are now numerous servers out there that don't work unless the name is lowercased */ Curl_strntolower(data->state.buffer, hostname, nlen); data->state.buffer[nlen] = 0; if(!SSL_set_tlsext_host_name(backend->handle, data->state.buffer)) infof(data, "WARNING: failed to configure server name indication (SNI) " "TLS extension\n"); } #endif /* Check if there's a cached ID we can/should use here! */ if(SSL_SET_OPTION(primary.sessionid)) { void *ssl_sessionid = NULL; int data_idx = ossl_get_ssl_data_index(); int connectdata_idx = ossl_get_ssl_conn_index(); int sockindex_idx = ossl_get_ssl_sockindex_index(); int proxy_idx = ossl_get_proxy_index(); if(data_idx >= 0 && connectdata_idx >= 0 && sockindex_idx >= 0 && proxy_idx >= 0) { /* Store the data needed for the "new session" callback. * The sockindex is stored as a pointer to an array element. */ SSL_set_ex_data(backend->handle, data_idx, data); SSL_set_ex_data(backend->handle, connectdata_idx, conn); SSL_set_ex_data(backend->handle, sockindex_idx, conn->sock + sockindex); #ifndef CURL_DISABLE_PROXY SSL_set_ex_data(backend->handle, proxy_idx, SSL_IS_PROXY() ? (void *) 1: NULL); #else SSL_set_ex_data(backend->handle, proxy_idx, NULL); #endif } Curl_ssl_sessionid_lock(data); if(!Curl_ssl_getsessionid(data, conn, SSL_IS_PROXY() ? TRUE : FALSE, &ssl_sessionid, NULL, sockindex)) { /* we got a session id, use it! */ if(!SSL_set_session(backend->handle, ssl_sessionid)) { Curl_ssl_sessionid_unlock(data); failf(data, "SSL: SSL_set_session failed: %s", ossl_strerror(ERR_get_error(), error_buffer, sizeof(error_buffer))); return CURLE_SSL_CONNECT_ERROR; } /* Informational message */ infof(data, "SSL re-using session ID\n"); } Curl_ssl_sessionid_unlock(data); } #ifndef CURL_DISABLE_PROXY if(conn->proxy_ssl[sockindex].use) { BIO *const bio = BIO_new(BIO_f_ssl()); SSL *handle = conn->proxy_ssl[sockindex].backend->handle; DEBUGASSERT(ssl_connection_complete == conn->proxy_ssl[sockindex].state); DEBUGASSERT(handle != NULL); DEBUGASSERT(bio != NULL); BIO_set_ssl(bio, handle, FALSE); SSL_set_bio(backend->handle, bio, bio); } else #endif if(!SSL_set_fd(backend->handle, (int)sockfd)) { /* pass the raw socket into the SSL layers */ failf(data, "SSL: SSL_set_fd failed: %s", ossl_strerror(ERR_get_error(), error_buffer, sizeof(error_buffer))); return CURLE_SSL_CONNECT_ERROR; } connssl->connecting_state = ssl_connect_2; return CURLE_OK; } static CURLcode ossl_connect_step2(struct Curl_easy *data, struct connectdata *conn, int sockindex) { int err; struct ssl_connect_data *connssl = &conn->ssl[sockindex]; struct ssl_backend_data *backend = connssl->backend; DEBUGASSERT(ssl_connect_2 == connssl->connecting_state || ssl_connect_2_reading == connssl->connecting_state || ssl_connect_2_writing == connssl->connecting_state); ERR_clear_error(); err = SSL_connect(backend->handle); #ifndef HAVE_KEYLOG_CALLBACK if(Curl_tls_keylog_enabled()) { /* If key logging is enabled, wait for the handshake to complete and then * proceed with logging secrets (for TLS 1.2 or older). */ ossl_log_tls12_secret(backend->handle, &backend->keylog_done); } #endif /* 1 is fine 0 is "not successful but was shut down controlled" <0 is "handshake was not successful, because a fatal error occurred" */ if(1 != err) { int detail = SSL_get_error(backend->handle, err); if(SSL_ERROR_WANT_READ == detail) { connssl->connecting_state = ssl_connect_2_reading; return CURLE_OK; } if(SSL_ERROR_WANT_WRITE == detail) { connssl->connecting_state = ssl_connect_2_writing; return CURLE_OK; } #ifdef SSL_ERROR_WANT_ASYNC if(SSL_ERROR_WANT_ASYNC == detail) { connssl->connecting_state = ssl_connect_2; return CURLE_OK; } #endif else { /* untreated error */ unsigned long errdetail; char error_buffer[256]=""; CURLcode result; long lerr; int lib; int reason; /* the connection failed, we're not waiting for anything else. */ connssl->connecting_state = ssl_connect_2; /* Get the earliest error code from the thread's error queue and removes the entry. */ errdetail = ERR_get_error(); /* Extract which lib and reason */ lib = ERR_GET_LIB(errdetail); reason = ERR_GET_REASON(errdetail); if((lib == ERR_LIB_SSL) && ((reason == SSL_R_CERTIFICATE_VERIFY_FAILED) || (reason == SSL_R_SSLV3_ALERT_CERTIFICATE_EXPIRED))) { result = CURLE_PEER_FAILED_VERIFICATION; lerr = SSL_get_verify_result(backend->handle); if(lerr != X509_V_OK) { SSL_SET_OPTION_LVALUE(certverifyresult) = lerr; msnprintf(error_buffer, sizeof(error_buffer), "SSL certificate problem: %s", X509_verify_cert_error_string(lerr)); } else /* strcpy() is fine here as long as the string fits within error_buffer */ strcpy(error_buffer, "SSL certificate verification failed"); } #if (OPENSSL_VERSION_NUMBER >= 0x10101000L && \ !defined(LIBRESSL_VERSION_NUMBER) && \ !defined(OPENSSL_IS_BORINGSSL)) /* SSL_R_TLSV13_ALERT_CERTIFICATE_REQUIRED is only available on OpenSSL version above v1.1.1, not Libre SSL nor BoringSSL */ else if((lib == ERR_LIB_SSL) && (reason == SSL_R_TLSV13_ALERT_CERTIFICATE_REQUIRED)) { /* If client certificate is required, communicate the error to client */ result = CURLE_SSL_CLIENTCERT; ossl_strerror(errdetail, error_buffer, sizeof(error_buffer)); } #endif else { result = CURLE_SSL_CONNECT_ERROR; ossl_strerror(errdetail, error_buffer, sizeof(error_buffer)); } /* detail is already set to the SSL error above */ /* If we e.g. use SSLv2 request-method and the server doesn't like us * (RST connection etc.), OpenSSL gives no explanation whatsoever and * the SO_ERROR is also lost. */ if(CURLE_SSL_CONNECT_ERROR == result && errdetail == 0) { const char * const hostname = SSL_HOST_NAME(); const long int port = SSL_HOST_PORT(); char extramsg[80]=""; int sockerr = SOCKERRNO; if(sockerr && detail == SSL_ERROR_SYSCALL) Curl_strerror(sockerr, extramsg, sizeof(extramsg)); failf(data, OSSL_PACKAGE " SSL_connect: %s in connection to %s:%ld ", extramsg[0] ? extramsg : SSL_ERROR_to_str(detail), hostname, port); return result; } /* Could be a CERT problem */ failf(data, "%s", error_buffer); return result; } } else { /* we have been connected fine, we're not waiting for anything else. */ connssl->connecting_state = ssl_connect_3; /* Informational message */ infof(data, "SSL connection using %s / %s\n", SSL_get_version(backend->handle), SSL_get_cipher(backend->handle)); #ifdef HAS_ALPN /* Sets data and len to negotiated protocol, len is 0 if no protocol was * negotiated */ if(conn->bits.tls_enable_alpn) { const unsigned char *neg_protocol; unsigned int len; SSL_get0_alpn_selected(backend->handle, &neg_protocol, &len); if(len) { infof(data, "ALPN, server accepted to use %.*s\n", len, neg_protocol); #ifdef USE_HTTP2 if(len == ALPN_H2_LENGTH && !memcmp(ALPN_H2, neg_protocol, len)) { conn->negnpn = CURL_HTTP_VERSION_2; } else #endif if(len == ALPN_HTTP_1_1_LENGTH && !memcmp(ALPN_HTTP_1_1, neg_protocol, ALPN_HTTP_1_1_LENGTH)) { conn->negnpn = CURL_HTTP_VERSION_1_1; } } else infof(data, "ALPN, server did not agree to a protocol\n"); Curl_multiuse_state(data, conn->negnpn == CURL_HTTP_VERSION_2 ? BUNDLE_MULTIPLEX : BUNDLE_NO_MULTIUSE); } #endif return CURLE_OK; } } static int asn1_object_dump(ASN1_OBJECT *a, char *buf, size_t len) { int i, ilen; ilen = (int)len; if(ilen < 0) return 1; /* buffer too big */ i = i2t_ASN1_OBJECT(buf, ilen, a); if(i >= ilen) return 1; /* buffer too small */ return 0; } #define push_certinfo(_label, _num) \ do { \ long info_len = BIO_get_mem_data(mem, &ptr); \ Curl_ssl_push_certinfo_len(data, _num, _label, ptr, info_len); \ if(1 != BIO_reset(mem)) \ break; \ } while(0) static void pubkey_show(struct Curl_easy *data, BIO *mem, int num, const char *type, const char *name, #ifdef HAVE_OPAQUE_RSA_DSA_DH const #endif BIGNUM *bn) { char *ptr; char namebuf[32]; msnprintf(namebuf, sizeof(namebuf), "%s(%s)", type, name); if(bn) BN_print(mem, bn); push_certinfo(namebuf, num); } #ifdef HAVE_OPAQUE_RSA_DSA_DH #define print_pubkey_BN(_type, _name, _num) \ pubkey_show(data, mem, _num, #_type, #_name, _name) #else #define print_pubkey_BN(_type, _name, _num) \ do { \ if(_type->_name) { \ pubkey_show(data, mem, _num, #_type, #_name, _type->_name); \ } \ } while(0) #endif static void X509V3_ext(struct Curl_easy *data, int certnum, CONST_EXTS STACK_OF(X509_EXTENSION) *exts) { int i; if((int)sk_X509_EXTENSION_num(exts) <= 0) /* no extensions, bail out */ return; for(i = 0; i < (int)sk_X509_EXTENSION_num(exts); i++) { ASN1_OBJECT *obj; X509_EXTENSION *ext = sk_X509_EXTENSION_value(exts, i); BUF_MEM *biomem; char namebuf[128]; BIO *bio_out = BIO_new(BIO_s_mem()); if(!bio_out) return; obj = X509_EXTENSION_get_object(ext); asn1_object_dump(obj, namebuf, sizeof(namebuf)); if(!X509V3_EXT_print(bio_out, ext, 0, 0)) ASN1_STRING_print(bio_out, (ASN1_STRING *)X509_EXTENSION_get_data(ext)); BIO_get_mem_ptr(bio_out, &biomem); Curl_ssl_push_certinfo_len(data, certnum, namebuf, biomem->data, biomem->length); BIO_free(bio_out); } } #ifdef OPENSSL_IS_BORINGSSL typedef size_t numcert_t; #else typedef int numcert_t; #endif #if defined(OPENSSL_VERSION_MAJOR) && (OPENSSL_VERSION_MAJOR >= 3) #define OSSL3_CONST const #else #define OSSL3_CONST #endif static CURLcode get_cert_chain(struct Curl_easy *data, struct ssl_connect_data *connssl) { CURLcode result; STACK_OF(X509) *sk; int i; numcert_t numcerts; BIO *mem; struct ssl_backend_data *backend = connssl->backend; sk = SSL_get_peer_cert_chain(backend->handle); if(!sk) { return CURLE_OUT_OF_MEMORY; } numcerts = sk_X509_num(sk); result = Curl_ssl_init_certinfo(data, (int)numcerts); if(result) { return result; } mem = BIO_new(BIO_s_mem()); for(i = 0; i < (int)numcerts; i++) { ASN1_INTEGER *num; X509 *x = sk_X509_value(sk, i); EVP_PKEY *pubkey = NULL; int j; char *ptr; const ASN1_BIT_STRING *psig = NULL; X509_NAME_print_ex(mem, X509_get_subject_name(x), 0, XN_FLAG_ONELINE); push_certinfo("Subject", i); X509_NAME_print_ex(mem, X509_get_issuer_name(x), 0, XN_FLAG_ONELINE); push_certinfo("Issuer", i); BIO_printf(mem, "%lx", X509_get_version(x)); push_certinfo("Version", i); num = X509_get_serialNumber(x); if(num->type == V_ASN1_NEG_INTEGER) BIO_puts(mem, "-"); for(j = 0; j < num->length; j++) BIO_printf(mem, "%02x", num->data[j]); push_certinfo("Serial Number", i); #if defined(HAVE_X509_GET0_SIGNATURE) && defined(HAVE_X509_GET0_EXTENSIONS) { const X509_ALGOR *sigalg = NULL; X509_PUBKEY *xpubkey = NULL; ASN1_OBJECT *pubkeyoid = NULL; X509_get0_signature(&psig, &sigalg, x); if(sigalg) { i2a_ASN1_OBJECT(mem, sigalg->algorithm); push_certinfo("Signature Algorithm", i); } xpubkey = X509_get_X509_PUBKEY(x); if(xpubkey) { X509_PUBKEY_get0_param(&pubkeyoid, NULL, NULL, NULL, xpubkey); if(pubkeyoid) { i2a_ASN1_OBJECT(mem, pubkeyoid); push_certinfo("Public Key Algorithm", i); } } X509V3_ext(data, i, X509_get0_extensions(x)); } #else { /* before OpenSSL 1.0.2 */ X509_CINF *cinf = x->cert_info; i2a_ASN1_OBJECT(mem, cinf->signature->algorithm); push_certinfo("Signature Algorithm", i); i2a_ASN1_OBJECT(mem, cinf->key->algor->algorithm); push_certinfo("Public Key Algorithm", i); X509V3_ext(data, i, cinf->extensions); psig = x->signature; } #endif ASN1_TIME_print(mem, X509_get0_notBefore(x)); push_certinfo("Start date", i); ASN1_TIME_print(mem, X509_get0_notAfter(x)); push_certinfo("Expire date", i); pubkey = X509_get_pubkey(x); if(!pubkey) infof(data, " Unable to load public key\n"); else { int pktype; #ifdef HAVE_OPAQUE_EVP_PKEY pktype = EVP_PKEY_id(pubkey); #else pktype = pubkey->type; #endif switch(pktype) { case EVP_PKEY_RSA: { OSSL3_CONST RSA *rsa; #ifdef HAVE_OPAQUE_EVP_PKEY rsa = EVP_PKEY_get0_RSA(pubkey); #else rsa = pubkey->pkey.rsa; #endif #ifdef HAVE_OPAQUE_RSA_DSA_DH { const BIGNUM *n; const BIGNUM *e; RSA_get0_key(rsa, &n, &e, NULL); BIO_printf(mem, "%d", BN_num_bits(n)); push_certinfo("RSA Public Key", i); print_pubkey_BN(rsa, n, i); print_pubkey_BN(rsa, e, i); } #else BIO_printf(mem, "%d", BN_num_bits(rsa->n)); push_certinfo("RSA Public Key", i); print_pubkey_BN(rsa, n, i); print_pubkey_BN(rsa, e, i); #endif break; } case EVP_PKEY_DSA: { #ifndef OPENSSL_NO_DSA OSSL3_CONST DSA *dsa; #ifdef HAVE_OPAQUE_EVP_PKEY dsa = EVP_PKEY_get0_DSA(pubkey); #else dsa = pubkey->pkey.dsa; #endif #ifdef HAVE_OPAQUE_RSA_DSA_DH { const BIGNUM *p; const BIGNUM *q; const BIGNUM *g; const BIGNUM *pub_key; DSA_get0_pqg(dsa, &p, &q, &g); DSA_get0_key(dsa, &pub_key, NULL); print_pubkey_BN(dsa, p, i); print_pubkey_BN(dsa, q, i); print_pubkey_BN(dsa, g, i); print_pubkey_BN(dsa, pub_key, i); } #else print_pubkey_BN(dsa, p, i); print_pubkey_BN(dsa, q, i); print_pubkey_BN(dsa, g, i); print_pubkey_BN(dsa, pub_key, i); #endif #endif /* !OPENSSL_NO_DSA */ break; } case EVP_PKEY_DH: { OSSL3_CONST DH *dh; #ifdef HAVE_OPAQUE_EVP_PKEY dh = EVP_PKEY_get0_DH(pubkey); #else dh = pubkey->pkey.dh; #endif #ifdef HAVE_OPAQUE_RSA_DSA_DH { const BIGNUM *p; const BIGNUM *q; const BIGNUM *g; const BIGNUM *pub_key; DH_get0_pqg(dh, &p, &q, &g); DH_get0_key(dh, &pub_key, NULL); print_pubkey_BN(dh, p, i); print_pubkey_BN(dh, q, i); print_pubkey_BN(dh, g, i); print_pubkey_BN(dh, pub_key, i); } #else print_pubkey_BN(dh, p, i); print_pubkey_BN(dh, g, i); print_pubkey_BN(dh, pub_key, i); #endif break; } } EVP_PKEY_free(pubkey); } if(psig) { for(j = 0; j < psig->length; j++) BIO_printf(mem, "%02x:", psig->data[j]); push_certinfo("Signature", i); } PEM_write_bio_X509(mem, x); push_certinfo("Cert", i); } BIO_free(mem); return CURLE_OK; } /* * Heavily modified from: * https://www.owasp.org/index.php/Certificate_and_Public_Key_Pinning#OpenSSL */ static CURLcode pkp_pin_peer_pubkey(struct Curl_easy *data, X509* cert, const char *pinnedpubkey) { /* Scratch */ int len1 = 0, len2 = 0; unsigned char *buff1 = NULL, *temp = NULL; /* Result is returned to caller */ CURLcode result = CURLE_SSL_PINNEDPUBKEYNOTMATCH; /* if a path wasn't specified, don't pin */ if(!pinnedpubkey) return CURLE_OK; if(!cert) return result; do { /* Begin Gyrations to get the subjectPublicKeyInfo */ /* Thanks to Viktor Dukhovni on the OpenSSL mailing list */ /* https://groups.google.com/group/mailing.openssl.users/browse_thread /thread/d61858dae102c6c7 */ len1 = i2d_X509_PUBKEY(X509_get_X509_PUBKEY(cert), NULL); if(len1 < 1) break; /* failed */ buff1 = temp = malloc(len1); if(!buff1) break; /* failed */ /* https://www.openssl.org/docs/crypto/d2i_X509.html */ len2 = i2d_X509_PUBKEY(X509_get_X509_PUBKEY(cert), &temp); /* * These checks are verifying we got back the same values as when we * sized the buffer. It's pretty weak since they should always be the * same. But it gives us something to test. */ if((len1 != len2) || !temp || ((temp - buff1) != len1)) break; /* failed */ /* End Gyrations */ /* The one good exit point */ result = Curl_pin_peer_pubkey(data, pinnedpubkey, buff1, len1); } while(0); if(buff1) free(buff1); return result; } /* * Get the server cert, verify it and show it etc, only call failf() if the * 'strict' argument is TRUE as otherwise all this is for informational * purposes only! * * We check certificates to authenticate the server; otherwise we risk * man-in-the-middle attack. */ static CURLcode servercert(struct Curl_easy *data, struct connectdata *conn, struct ssl_connect_data *connssl, bool strict) { CURLcode result = CURLE_OK; int rc; long lerr; X509 *issuer; BIO *fp = NULL; char error_buffer[256]=""; char buffer[2048]; const char *ptr; BIO *mem = BIO_new(BIO_s_mem()); struct ssl_backend_data *backend = connssl->backend; if(data->set.ssl.certinfo) /* we've been asked to gather certificate info! */ (void)get_cert_chain(data, connssl); backend->server_cert = SSL_get_peer_certificate(backend->handle); if(!backend->server_cert) { BIO_free(mem); if(!strict) return CURLE_OK; failf(data, "SSL: couldn't get peer certificate!"); return CURLE_PEER_FAILED_VERIFICATION; } infof(data, "%s certificate:\n", SSL_IS_PROXY() ? "Proxy" : "Server"); rc = x509_name_oneline(X509_get_subject_name(backend->server_cert), buffer, sizeof(buffer)); infof(data, " subject: %s\n", rc?"[NONE]":buffer); #ifndef CURL_DISABLE_VERBOSE_STRINGS { long len; ASN1_TIME_print(mem, X509_get0_notBefore(backend->server_cert)); len = BIO_get_mem_data(mem, (char **) &ptr); infof(data, " start date: %.*s\n", len, ptr); (void)BIO_reset(mem); ASN1_TIME_print(mem, X509_get0_notAfter(backend->server_cert)); len = BIO_get_mem_data(mem, (char **) &ptr); infof(data, " expire date: %.*s\n", len, ptr); (void)BIO_reset(mem); } #endif BIO_free(mem); if(SSL_CONN_CONFIG(verifyhost)) { result = verifyhost(data, conn, backend->server_cert); if(result) { X509_free(backend->server_cert); backend->server_cert = NULL; return result; } } rc = x509_name_oneline(X509_get_issuer_name(backend->server_cert), buffer, sizeof(buffer)); if(rc) { if(strict) failf(data, "SSL: couldn't get X509-issuer name!"); result = CURLE_PEER_FAILED_VERIFICATION; } else { infof(data, " issuer: %s\n", buffer); /* We could do all sorts of certificate verification stuff here before deallocating the certificate. */ /* e.g. match issuer name with provided issuer certificate */ if(SSL_SET_OPTION(issuercert) || SSL_SET_OPTION(issuercert_blob)) { if(SSL_SET_OPTION(issuercert_blob)) fp = BIO_new_mem_buf(SSL_SET_OPTION(issuercert_blob)->data, (int)SSL_SET_OPTION(issuercert_blob)->len); else { fp = BIO_new(BIO_s_file()); if(!fp) { failf(data, "BIO_new return NULL, " OSSL_PACKAGE " error %s", ossl_strerror(ERR_get_error(), error_buffer, sizeof(error_buffer)) ); X509_free(backend->server_cert); backend->server_cert = NULL; return CURLE_OUT_OF_MEMORY; } if(BIO_read_filename(fp, SSL_SET_OPTION(issuercert)) <= 0) { if(strict) failf(data, "SSL: Unable to open issuer cert (%s)", SSL_SET_OPTION(issuercert)); BIO_free(fp); X509_free(backend->server_cert); backend->server_cert = NULL; return CURLE_SSL_ISSUER_ERROR; } } issuer = PEM_read_bio_X509(fp, NULL, ZERO_NULL, NULL); if(!issuer) { if(strict) failf(data, "SSL: Unable to read issuer cert (%s)", SSL_SET_OPTION(issuercert)); BIO_free(fp); X509_free(issuer); X509_free(backend->server_cert); backend->server_cert = NULL; return CURLE_SSL_ISSUER_ERROR; } if(X509_check_issued(issuer, backend->server_cert) != X509_V_OK) { if(strict) failf(data, "SSL: Certificate issuer check failed (%s)", SSL_SET_OPTION(issuercert)); BIO_free(fp); X509_free(issuer); X509_free(backend->server_cert); backend->server_cert = NULL; return CURLE_SSL_ISSUER_ERROR; } infof(data, " SSL certificate issuer check ok (%s)\n", SSL_SET_OPTION(issuercert)); BIO_free(fp); X509_free(issuer); } lerr = SSL_get_verify_result(backend->handle); SSL_SET_OPTION_LVALUE(certverifyresult) = lerr; if(lerr != X509_V_OK) { if(SSL_CONN_CONFIG(verifypeer)) { /* We probably never reach this, because SSL_connect() will fail and we return earlier if verifypeer is set? */ if(strict) failf(data, "SSL certificate verify result: %s (%ld)", X509_verify_cert_error_string(lerr), lerr); result = CURLE_PEER_FAILED_VERIFICATION; } else infof(data, " SSL certificate verify result: %s (%ld)," " continuing anyway.\n", X509_verify_cert_error_string(lerr), lerr); } else infof(data, " SSL certificate verify ok.\n"); } #if (OPENSSL_VERSION_NUMBER >= 0x0090808fL) && !defined(OPENSSL_NO_TLSEXT) && \ !defined(OPENSSL_NO_OCSP) if(SSL_CONN_CONFIG(verifystatus)) { result = verifystatus(data, connssl); if(result) { X509_free(backend->server_cert); backend->server_cert = NULL; return result; } } #endif if(!strict) /* when not strict, we don't bother about the verify cert problems */ result = CURLE_OK; ptr = SSL_PINNED_PUB_KEY(); if(!result && ptr) { result = pkp_pin_peer_pubkey(data, backend->server_cert, ptr); if(result) failf(data, "SSL: public key does not match pinned public key!"); } X509_free(backend->server_cert); backend->server_cert = NULL; connssl->connecting_state = ssl_connect_done; return result; } static CURLcode ossl_connect_step3(struct Curl_easy *data, struct connectdata *conn, int sockindex) { CURLcode result = CURLE_OK; struct ssl_connect_data *connssl = &conn->ssl[sockindex]; DEBUGASSERT(ssl_connect_3 == connssl->connecting_state); /* * We check certificates to authenticate the server; otherwise we risk * man-in-the-middle attack; NEVERTHELESS, if we're told explicitly not to * verify the peer ignore faults and failures from the server cert * operations. */ result = servercert(data, conn, connssl, (SSL_CONN_CONFIG(verifypeer) || SSL_CONN_CONFIG(verifyhost))); if(!result) connssl->connecting_state = ssl_connect_done; return result; } static Curl_recv ossl_recv; static Curl_send ossl_send; static CURLcode ossl_connect_common(struct Curl_easy *data, struct connectdata *conn, int sockindex, bool nonblocking, bool *done) { CURLcode result; struct ssl_connect_data *connssl = &conn->ssl[sockindex]; curl_socket_t sockfd = conn->sock[sockindex]; int what; /* check if the connection has already been established */ if(ssl_connection_complete == connssl->state) { *done = TRUE; return CURLE_OK; } if(ssl_connect_1 == connssl->connecting_state) { /* Find out how much more time we're allowed */ const timediff_t timeout_ms = Curl_timeleft(data, NULL, TRUE); if(timeout_ms < 0) { /* no need to continue if time already is up */ failf(data, "SSL connection timeout"); return CURLE_OPERATION_TIMEDOUT; } result = ossl_connect_step1(data, conn, sockindex); if(result) return result; } while(ssl_connect_2 == connssl->connecting_state || ssl_connect_2_reading == connssl->connecting_state || ssl_connect_2_writing == connssl->connecting_state) { /* check allowed time left */ const timediff_t timeout_ms = Curl_timeleft(data, NULL, TRUE); if(timeout_ms < 0) { /* no need to continue if time already is up */ failf(data, "SSL connection timeout"); return CURLE_OPERATION_TIMEDOUT; } /* if ssl is expecting something, check if it's available. */ if(connssl->connecting_state == ssl_connect_2_reading || connssl->connecting_state == ssl_connect_2_writing) { curl_socket_t writefd = ssl_connect_2_writing == connssl->connecting_state?sockfd:CURL_SOCKET_BAD; curl_socket_t readfd = ssl_connect_2_reading == connssl->connecting_state?sockfd:CURL_SOCKET_BAD; what = Curl_socket_check(readfd, CURL_SOCKET_BAD, writefd, nonblocking?0:timeout_ms); if(what < 0) { /* fatal error */ failf(data, "select/poll on SSL socket, errno: %d", SOCKERRNO); return CURLE_SSL_CONNECT_ERROR; } if(0 == what) { if(nonblocking) { *done = FALSE; return CURLE_OK; } /* timeout */ failf(data, "SSL connection timeout"); return CURLE_OPERATION_TIMEDOUT; } /* socket is readable or writable */ } /* Run transaction, and return to the caller if it failed or if this * connection is done nonblocking and this loop would execute again. This * permits the owner of a multi handle to abort a connection attempt * before step2 has completed while ensuring that a client using select() * or epoll() will always have a valid fdset to wait on. */ result = ossl_connect_step2(data, conn, sockindex); if(result || (nonblocking && (ssl_connect_2 == connssl->connecting_state || ssl_connect_2_reading == connssl->connecting_state || ssl_connect_2_writing == connssl->connecting_state))) return result; } /* repeat step2 until all transactions are done. */ if(ssl_connect_3 == connssl->connecting_state) { result = ossl_connect_step3(data, conn, sockindex); if(result) return result; } if(ssl_connect_done == connssl->connecting_state) { connssl->state = ssl_connection_complete; conn->recv[sockindex] = ossl_recv; conn->send[sockindex] = ossl_send; *done = TRUE; } else *done = FALSE; /* Reset our connect state machine */ connssl->connecting_state = ssl_connect_1; return CURLE_OK; } static CURLcode ossl_connect_nonblocking(struct Curl_easy *data, struct connectdata *conn, int sockindex, bool *done) { return ossl_connect_common(data, conn, sockindex, TRUE, done); } static CURLcode ossl_connect(struct Curl_easy *data, struct connectdata *conn, int sockindex) { CURLcode result; bool done = FALSE; result = ossl_connect_common(data, conn, sockindex, FALSE, &done); if(result) return result; DEBUGASSERT(done); return CURLE_OK; } static bool ossl_data_pending(const struct connectdata *conn, int connindex) { const struct ssl_connect_data *connssl = &conn->ssl[connindex]; if(connssl->backend->handle && SSL_pending(connssl->backend->handle)) return TRUE; #ifndef CURL_DISABLE_PROXY { const struct ssl_connect_data *proxyssl = &conn->proxy_ssl[connindex]; if(proxyssl->backend->handle && SSL_pending(proxyssl->backend->handle)) return TRUE; } #endif return FALSE; } static size_t ossl_version(char *buffer, size_t size); static ssize_t ossl_send(struct Curl_easy *data, int sockindex, const void *mem, size_t len, CURLcode *curlcode) { /* SSL_write() is said to return 'int' while write() and send() returns 'size_t' */ int err; char error_buffer[256]; unsigned long sslerror; int memlen; int rc; struct connectdata *conn = data->conn; struct ssl_connect_data *connssl = &conn->ssl[sockindex]; struct ssl_backend_data *backend = connssl->backend; ERR_clear_error(); memlen = (len > (size_t)INT_MAX) ? INT_MAX : (int)len; set_logger(conn, data); rc = SSL_write(backend->handle, mem, memlen); if(rc <= 0) { err = SSL_get_error(backend->handle, rc); switch(err) { case SSL_ERROR_WANT_READ: case SSL_ERROR_WANT_WRITE: /* The operation did not complete; the same TLS/SSL I/O function should be called again later. This is basically an EWOULDBLOCK equivalent. */ *curlcode = CURLE_AGAIN; return -1; case SSL_ERROR_SYSCALL: { int sockerr = SOCKERRNO; sslerror = ERR_get_error(); if(sslerror) ossl_strerror(sslerror, error_buffer, sizeof(error_buffer)); else if(sockerr) Curl_strerror(sockerr, error_buffer, sizeof(error_buffer)); else { strncpy(error_buffer, SSL_ERROR_to_str(err), sizeof(error_buffer)); error_buffer[sizeof(error_buffer) - 1] = '\0'; } failf(data, OSSL_PACKAGE " SSL_write: %s, errno %d", error_buffer, sockerr); *curlcode = CURLE_SEND_ERROR; return -1; } case SSL_ERROR_SSL: /* A failure in the SSL library occurred, usually a protocol error. The OpenSSL error queue contains more information on the error. */ sslerror = ERR_get_error(); if(ERR_GET_LIB(sslerror) == ERR_LIB_SSL && ERR_GET_REASON(sslerror) == SSL_R_BIO_NOT_SET && conn->ssl[sockindex].state == ssl_connection_complete #ifndef CURL_DISABLE_PROXY && conn->proxy_ssl[sockindex].state == ssl_connection_complete #endif ) { char ver[120]; ossl_version(ver, 120); failf(data, "Error: %s does not support double SSL tunneling.", ver); } else failf(data, "SSL_write() error: %s", ossl_strerror(sslerror, error_buffer, sizeof(error_buffer))); *curlcode = CURLE_SEND_ERROR; return -1; } /* a true error */ failf(data, OSSL_PACKAGE " SSL_write: %s, errno %d", SSL_ERROR_to_str(err), SOCKERRNO); *curlcode = CURLE_SEND_ERROR; return -1; } *curlcode = CURLE_OK; return (ssize_t)rc; /* number of bytes */ } static ssize_t ossl_recv(struct Curl_easy *data, /* transfer */ int num, /* socketindex */ char *buf, /* store read data here */ size_t buffersize, /* max amount to read */ CURLcode *curlcode) { char error_buffer[256]; unsigned long sslerror; ssize_t nread; int buffsize; struct connectdata *conn = data->conn; struct ssl_connect_data *connssl = &conn->ssl[num]; struct ssl_backend_data *backend = connssl->backend; ERR_clear_error(); buffsize = (buffersize > (size_t)INT_MAX) ? INT_MAX : (int)buffersize; set_logger(conn, data); nread = (ssize_t)SSL_read(backend->handle, buf, buffsize); if(nread <= 0) { /* failed SSL_read */ int err = SSL_get_error(backend->handle, (int)nread); switch(err) { case SSL_ERROR_NONE: /* this is not an error */ break; case SSL_ERROR_ZERO_RETURN: /* no more data */ /* close_notify alert */ if(num == FIRSTSOCKET) /* mark the connection for close if it is indeed the control connection */ connclose(conn, "TLS close_notify"); break; case SSL_ERROR_WANT_READ: case SSL_ERROR_WANT_WRITE: /* there's data pending, re-invoke SSL_read() */ *curlcode = CURLE_AGAIN; return -1; default: /* openssl/ssl.h for SSL_ERROR_SYSCALL says "look at error stack/return value/errno" */ /* https://www.openssl.org/docs/crypto/ERR_get_error.html */ sslerror = ERR_get_error(); if((nread < 0) || sslerror) { /* If the return code was negative or there actually is an error in the queue */ int sockerr = SOCKERRNO; if(sslerror) ossl_strerror(sslerror, error_buffer, sizeof(error_buffer)); else if(sockerr && err == SSL_ERROR_SYSCALL) Curl_strerror(sockerr, error_buffer, sizeof(error_buffer)); else { strncpy(error_buffer, SSL_ERROR_to_str(err), sizeof(error_buffer)); error_buffer[sizeof(error_buffer) - 1] = '\0'; } failf(data, OSSL_PACKAGE " SSL_read: %s, errno %d", error_buffer, sockerr); *curlcode = CURLE_RECV_ERROR; return -1; } /* For debug builds be a little stricter and error on any SSL_ERROR_SYSCALL. For example a server may have closed the connection abruptly without a close_notify alert. For compatibility with older peers we don't do this by default. #4624 We can use this to gauge how many users may be affected, and if it goes ok eventually transition to allow in dev and release with the newest OpenSSL: #if (OPENSSL_VERSION_NUMBER >= 0x10101000L) */ #ifdef DEBUGBUILD if(err == SSL_ERROR_SYSCALL) { int sockerr = SOCKERRNO; if(sockerr) Curl_strerror(sockerr, error_buffer, sizeof(error_buffer)); else { msnprintf(error_buffer, sizeof(error_buffer), "Connection closed abruptly"); } failf(data, OSSL_PACKAGE " SSL_read: %s, errno %d" " (Fatal because this is a curl debug build)", error_buffer, sockerr); *curlcode = CURLE_RECV_ERROR; return -1; } #endif } } return nread; } static size_t ossl_version(char *buffer, size_t size) { #ifdef LIBRESSL_VERSION_NUMBER #if LIBRESSL_VERSION_NUMBER < 0x2070100fL return msnprintf(buffer, size, "%s/%lx.%lx.%lx", OSSL_PACKAGE, (LIBRESSL_VERSION_NUMBER>>28)&0xf, (LIBRESSL_VERSION_NUMBER>>20)&0xff, (LIBRESSL_VERSION_NUMBER>>12)&0xff); #else /* OpenSSL_version() first appeared in LibreSSL 2.7.1 */ char *p; int count; const char *ver = OpenSSL_version(OPENSSL_VERSION); const char expected[] = OSSL_PACKAGE " "; /* ie "LibreSSL " */ if(Curl_strncasecompare(ver, expected, sizeof(expected) - 1)) { ver += sizeof(expected) - 1; } count = msnprintf(buffer, size, "%s/%s", OSSL_PACKAGE, ver); for(p = buffer; *p; ++p) { if(ISSPACE(*p)) *p = '_'; } return count; #endif #elif defined(OPENSSL_IS_BORINGSSL) return msnprintf(buffer, size, OSSL_PACKAGE); #elif defined(HAVE_OPENSSL_VERSION) && defined(OPENSSL_VERSION_STRING) return msnprintf(buffer, size, "%s/%s", OSSL_PACKAGE, OpenSSL_version(OPENSSL_VERSION_STRING)); #else /* not LibreSSL, BoringSSL and not using OpenSSL_version */ char sub[3]; unsigned long ssleay_value; sub[2]='\0'; sub[1]='\0'; ssleay_value = OpenSSL_version_num(); if(ssleay_value < 0x906000) { ssleay_value = SSLEAY_VERSION_NUMBER; sub[0]='\0'; } else { if(ssleay_value&0xff0) { int minor_ver = (ssleay_value >> 4) & 0xff; if(minor_ver > 26) { /* handle extended version introduced for 0.9.8za */ sub[1] = (char) ((minor_ver - 1) % 26 + 'a' + 1); sub[0] = 'z'; } else { sub[0] = (char) (minor_ver + 'a' - 1); } } else sub[0]='\0'; } return msnprintf(buffer, size, "%s/%lx.%lx.%lx%s" #ifdef OPENSSL_FIPS "-fips" #endif , OSSL_PACKAGE, (ssleay_value>>28)&0xf, (ssleay_value>>20)&0xff, (ssleay_value>>12)&0xff, sub); #endif /* OPENSSL_IS_BORINGSSL */ } /* can be called with data == NULL */ static CURLcode ossl_random(struct Curl_easy *data, unsigned char *entropy, size_t length) { int rc; if(data) { if(ossl_seed(data)) /* Initiate the seed if not already done */ return CURLE_FAILED_INIT; /* couldn't seed for some reason */ } else { if(!rand_enough()) return CURLE_FAILED_INIT; } /* RAND_bytes() returns 1 on success, 0 otherwise. */ rc = RAND_bytes(entropy, curlx_uztosi(length)); return (rc == 1 ? CURLE_OK : CURLE_FAILED_INIT); } #if (OPENSSL_VERSION_NUMBER >= 0x0090800fL) && !defined(OPENSSL_NO_SHA256) static CURLcode ossl_sha256sum(const unsigned char *tmp, /* input */ size_t tmplen, unsigned char *sha256sum /* output */, size_t unused) { EVP_MD_CTX *mdctx; unsigned int len = 0; (void) unused; mdctx = EVP_MD_CTX_create(); if(!mdctx) return CURLE_OUT_OF_MEMORY; EVP_DigestInit(mdctx, EVP_sha256()); EVP_DigestUpdate(mdctx, tmp, tmplen); EVP_DigestFinal_ex(mdctx, sha256sum, &len); EVP_MD_CTX_destroy(mdctx); return CURLE_OK; } #endif static bool ossl_cert_status_request(void) { #if (OPENSSL_VERSION_NUMBER >= 0x0090808fL) && !defined(OPENSSL_NO_TLSEXT) && \ !defined(OPENSSL_NO_OCSP) return TRUE; #else return FALSE; #endif } static void *ossl_get_internals(struct ssl_connect_data *connssl, CURLINFO info) { /* Legacy: CURLINFO_TLS_SESSION must return an SSL_CTX pointer. */ struct ssl_backend_data *backend = connssl->backend; return info == CURLINFO_TLS_SESSION ? (void *)backend->ctx : (void *)backend->handle; } const struct Curl_ssl Curl_ssl_openssl = { { CURLSSLBACKEND_OPENSSL, "openssl" }, /* info */ SSLSUPP_CA_PATH | SSLSUPP_CAINFO_BLOB | SSLSUPP_CERTINFO | SSLSUPP_PINNEDPUBKEY | SSLSUPP_SSL_CTX | #ifdef HAVE_SSL_CTX_SET_CIPHERSUITES SSLSUPP_TLS13_CIPHERSUITES | #endif SSLSUPP_HTTPS_PROXY, sizeof(struct ssl_backend_data), ossl_init, /* init */ ossl_cleanup, /* cleanup */ ossl_version, /* version */ ossl_check_cxn, /* check_cxn */ ossl_shutdown, /* shutdown */ ossl_data_pending, /* data_pending */ ossl_random, /* random */ ossl_cert_status_request, /* cert_status_request */ ossl_connect, /* connect */ ossl_connect_nonblocking, /* connect_nonblocking */ Curl_ssl_getsock, /* getsock */ ossl_get_internals, /* get_internals */ ossl_close, /* close_one */ ossl_close_all, /* close_all */ ossl_session_free, /* session_free */ ossl_set_engine, /* set_engine */ ossl_set_engine_default, /* set_engine_default */ ossl_engines_list, /* engines_list */ Curl_none_false_start, /* false_start */ #if (OPENSSL_VERSION_NUMBER >= 0x0090800fL) && !defined(OPENSSL_NO_SHA256) ossl_sha256sum /* sha256sum */ #else NULL /* sha256sum */ #endif }; #endif /* USE_OPENSSL */
/*************************************************************************** * _ _ ____ _ * Project ___| | | | _ \| | * / __| | | | |_) | | * | (__| |_| | _ <| |___ * \___|\___/|_| \_\_____| * * Copyright (C) 1998 - 2021, Daniel Stenberg, <daniel@haxx.se>, et al. * * This software is licensed as described in the file COPYING, which * you should have received as part of this distribution. The terms * are also available at https://curl.se/docs/copyright.html. * * You may opt to use, copy, modify, merge, publish, distribute and/or sell * copies of the Software, and permit persons to whom the Software is * furnished to do so, under the terms of the COPYING file. * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY * KIND, either express or implied. * ***************************************************************************/ /* * Source file for all OpenSSL-specific code for the TLS/SSL layer. No code * but vtls.c should ever call or use these functions. */ #include "curl_setup.h" #ifdef USE_OPENSSL #include <limits.h> /* Wincrypt must be included before anything that could include OpenSSL. */ #if defined(USE_WIN32_CRYPTO) #include <wincrypt.h> /* Undefine wincrypt conflicting symbols for BoringSSL. */ #undef X509_NAME #undef X509_EXTENSIONS #undef PKCS7_ISSUER_AND_SERIAL #undef PKCS7_SIGNER_INFO #undef OCSP_REQUEST #undef OCSP_RESPONSE #endif #include "urldata.h" #include "sendf.h" #include "formdata.h" /* for the boundary function */ #include "url.h" /* for the ssl config check function */ #include "inet_pton.h" #include "openssl.h" #include "connect.h" #include "slist.h" #include "select.h" #include "vtls.h" #include "keylog.h" #include "strcase.h" #include "hostcheck.h" #include "multiif.h" #include "strerror.h" #include "curl_printf.h" #include <openssl/ssl.h> #include <openssl/rand.h> #include <openssl/x509v3.h> #ifndef OPENSSL_NO_DSA #include <openssl/dsa.h> #endif #include <openssl/dh.h> #include <openssl/err.h> #include <openssl/md5.h> #include <openssl/conf.h> #include <openssl/bn.h> #include <openssl/rsa.h> #include <openssl/bio.h> #include <openssl/buffer.h> #include <openssl/pkcs12.h> #ifdef USE_AMISSL #include "amigaos.h" #endif #if (OPENSSL_VERSION_NUMBER >= 0x0090808fL) && !defined(OPENSSL_NO_OCSP) #include <openssl/ocsp.h> #endif #if (OPENSSL_VERSION_NUMBER >= 0x0090700fL) && /* 0.9.7 or later */ \ !defined(OPENSSL_NO_ENGINE) && !defined(OPENSSL_NO_UI_CONSOLE) #define USE_OPENSSL_ENGINE #include <openssl/engine.h> #endif #include "warnless.h" #include "non-ascii.h" /* for Curl_convert_from_utf8 prototype */ /* The last #include files should be: */ #include "curl_memory.h" #include "memdebug.h" /* Uncomment the ALLOW_RENEG line to a real #define if you want to allow TLS renegotiations when built with BoringSSL. Renegotiating is non-compliant with HTTP/2 and "an extremely dangerous protocol feature". Beware. #define ALLOW_RENEG 1 */ #ifndef OPENSSL_VERSION_NUMBER #error "OPENSSL_VERSION_NUMBER not defined" #endif #ifdef USE_OPENSSL_ENGINE #include <openssl/ui.h> #endif #if OPENSSL_VERSION_NUMBER >= 0x00909000L #define SSL_METHOD_QUAL const #else #define SSL_METHOD_QUAL #endif #if (OPENSSL_VERSION_NUMBER >= 0x10000000L) #define HAVE_ERR_REMOVE_THREAD_STATE 1 #endif #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) && /* OpenSSL 1.1.0+ */ \ !(defined(LIBRESSL_VERSION_NUMBER) && \ LIBRESSL_VERSION_NUMBER < 0x20700000L) #define SSLEAY_VERSION_NUMBER OPENSSL_VERSION_NUMBER #define HAVE_X509_GET0_EXTENSIONS 1 /* added in 1.1.0 -pre1 */ #define HAVE_OPAQUE_EVP_PKEY 1 /* since 1.1.0 -pre3 */ #define HAVE_OPAQUE_RSA_DSA_DH 1 /* since 1.1.0 -pre5 */ #define CONST_EXTS const #define HAVE_ERR_REMOVE_THREAD_STATE_DEPRECATED 1 /* funny typecast define due to difference in API */ #ifdef LIBRESSL_VERSION_NUMBER #define ARG2_X509_signature_print (X509_ALGOR *) #else #define ARG2_X509_signature_print #endif #else /* For OpenSSL before 1.1.0 */ #define ASN1_STRING_get0_data(x) ASN1_STRING_data(x) #define X509_get0_notBefore(x) X509_get_notBefore(x) #define X509_get0_notAfter(x) X509_get_notAfter(x) #define CONST_EXTS /* nope */ #ifndef LIBRESSL_VERSION_NUMBER #define OpenSSL_version_num() SSLeay() #endif #endif #if (OPENSSL_VERSION_NUMBER >= 0x1000200fL) && /* 1.0.2 or later */ \ !(defined(LIBRESSL_VERSION_NUMBER) && \ LIBRESSL_VERSION_NUMBER < 0x20700000L) #define HAVE_X509_GET0_SIGNATURE 1 #endif #if (OPENSSL_VERSION_NUMBER >= 0x1000200fL) /* 1.0.2 or later */ #define HAVE_SSL_GET_SHUTDOWN 1 #endif #if OPENSSL_VERSION_NUMBER >= 0x10002003L && \ OPENSSL_VERSION_NUMBER <= 0x10002FFFL && \ !defined(OPENSSL_NO_COMP) #define HAVE_SSL_COMP_FREE_COMPRESSION_METHODS 1 #endif #if (OPENSSL_VERSION_NUMBER < 0x0090808fL) /* not present in older OpenSSL */ #define OPENSSL_load_builtin_modules(x) #endif /* * Whether SSL_CTX_set_keylog_callback is available. * OpenSSL: supported since 1.1.1 https://github.com/openssl/openssl/pull/2287 * BoringSSL: supported since d28f59c27bac (committed 2015-11-19) * LibreSSL: unsupported in at least 2.7.2 (explicitly check for it since it * lies and pretends to be OpenSSL 2.0.0). */ #if (OPENSSL_VERSION_NUMBER >= 0x10101000L && \ !defined(LIBRESSL_VERSION_NUMBER)) || \ defined(OPENSSL_IS_BORINGSSL) #define HAVE_KEYLOG_CALLBACK #endif /* Whether SSL_CTX_set_ciphersuites is available. * OpenSSL: supported since 1.1.1 (commit a53b5be6a05) * BoringSSL: no * LibreSSL: no */ #if ((OPENSSL_VERSION_NUMBER >= 0x10101000L) && \ !defined(LIBRESSL_VERSION_NUMBER) && \ !defined(OPENSSL_IS_BORINGSSL)) #define HAVE_SSL_CTX_SET_CIPHERSUITES #define HAVE_SSL_CTX_SET_POST_HANDSHAKE_AUTH /* SET_EC_CURVES available under the same preconditions: see * https://www.openssl.org/docs/manmaster/man3/SSL_CTX_set1_groups.html */ #define HAVE_SSL_CTX_SET_EC_CURVES #endif #if defined(LIBRESSL_VERSION_NUMBER) #define OSSL_PACKAGE "LibreSSL" #elif defined(OPENSSL_IS_BORINGSSL) #define OSSL_PACKAGE "BoringSSL" #else #define OSSL_PACKAGE "OpenSSL" #endif #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) /* up2date versions of OpenSSL maintain the default reasonably secure without * breaking compatibility, so it is better not to override the default by curl */ #define DEFAULT_CIPHER_SELECTION NULL #else /* ... but it is not the case with old versions of OpenSSL */ #define DEFAULT_CIPHER_SELECTION \ "ALL:!EXPORT:!EXPORT40:!EXPORT56:!aNULL:!LOW:!RC4:@STRENGTH" #endif #ifdef HAVE_OPENSSL_SRP /* the function exists */ #ifdef USE_TLS_SRP /* the functionality is not disabled */ #define USE_OPENSSL_SRP #endif #endif struct ssl_backend_data { struct Curl_easy *logger; /* transfer handle to pass trace logs to, only using sockindex 0 */ /* these ones requires specific SSL-types */ SSL_CTX* ctx; SSL* handle; X509* server_cert; #ifndef HAVE_KEYLOG_CALLBACK /* Set to true once a valid keylog entry has been created to avoid dupes. */ bool keylog_done; #endif }; static void ossl_associate_connection(struct Curl_easy *data, struct connectdata *conn, int sockindex); /* * Number of bytes to read from the random number seed file. This must be * a finite value (because some entropy "files" like /dev/urandom have * an infinite length), but must be large enough to provide enough * entropy to properly seed OpenSSL's PRNG. */ #define RAND_LOAD_LENGTH 1024 #ifdef HAVE_KEYLOG_CALLBACK static void ossl_keylog_callback(const SSL *ssl, const char *line) { (void)ssl; Curl_tls_keylog_write_line(line); } #else /* * ossl_log_tls12_secret is called by libcurl to make the CLIENT_RANDOMs if the * OpenSSL being used doesn't have native support for doing that. */ static void ossl_log_tls12_secret(const SSL *ssl, bool *keylog_done) { const SSL_SESSION *session = SSL_get_session(ssl); unsigned char client_random[SSL3_RANDOM_SIZE]; unsigned char master_key[SSL_MAX_MASTER_KEY_LENGTH]; int master_key_length = 0; if(!session || *keylog_done) return; #if OPENSSL_VERSION_NUMBER >= 0x10100000L && \ !(defined(LIBRESSL_VERSION_NUMBER) && \ LIBRESSL_VERSION_NUMBER < 0x20700000L) /* ssl->s3 is not checked in openssl 1.1.0-pre6, but let's assume that * we have a valid SSL context if we have a non-NULL session. */ SSL_get_client_random(ssl, client_random, SSL3_RANDOM_SIZE); master_key_length = (int) SSL_SESSION_get_master_key(session, master_key, SSL_MAX_MASTER_KEY_LENGTH); #else if(ssl->s3 && session->master_key_length > 0) { master_key_length = session->master_key_length; memcpy(master_key, session->master_key, session->master_key_length); memcpy(client_random, ssl->s3->client_random, SSL3_RANDOM_SIZE); } #endif /* The handshake has not progressed sufficiently yet, or this is a TLS 1.3 * session (when curl was built with older OpenSSL headers and running with * newer OpenSSL runtime libraries). */ if(master_key_length <= 0) return; *keylog_done = true; Curl_tls_keylog_write("CLIENT_RANDOM", client_random, master_key, master_key_length); } #endif /* !HAVE_KEYLOG_CALLBACK */ static const char *SSL_ERROR_to_str(int err) { switch(err) { case SSL_ERROR_NONE: return "SSL_ERROR_NONE"; case SSL_ERROR_SSL: return "SSL_ERROR_SSL"; case SSL_ERROR_WANT_READ: return "SSL_ERROR_WANT_READ"; case SSL_ERROR_WANT_WRITE: return "SSL_ERROR_WANT_WRITE"; case SSL_ERROR_WANT_X509_LOOKUP: return "SSL_ERROR_WANT_X509_LOOKUP"; case SSL_ERROR_SYSCALL: return "SSL_ERROR_SYSCALL"; case SSL_ERROR_ZERO_RETURN: return "SSL_ERROR_ZERO_RETURN"; case SSL_ERROR_WANT_CONNECT: return "SSL_ERROR_WANT_CONNECT"; case SSL_ERROR_WANT_ACCEPT: return "SSL_ERROR_WANT_ACCEPT"; #if defined(SSL_ERROR_WANT_ASYNC) case SSL_ERROR_WANT_ASYNC: return "SSL_ERROR_WANT_ASYNC"; #endif #if defined(SSL_ERROR_WANT_ASYNC_JOB) case SSL_ERROR_WANT_ASYNC_JOB: return "SSL_ERROR_WANT_ASYNC_JOB"; #endif #if defined(SSL_ERROR_WANT_EARLY) case SSL_ERROR_WANT_EARLY: return "SSL_ERROR_WANT_EARLY"; #endif default: return "SSL_ERROR unknown"; } } /* Return error string for last OpenSSL error */ static char *ossl_strerror(unsigned long error, char *buf, size_t size) { if(size) *buf = '\0'; #ifdef OPENSSL_IS_BORINGSSL ERR_error_string_n((uint32_t)error, buf, size); #else ERR_error_string_n(error, buf, size); #endif if(size > 1 && !*buf) { strncpy(buf, (error ? "Unknown error" : "No error"), size); buf[size - 1] = '\0'; } return buf; } /* Return an extra data index for the transfer data. * This index can be used with SSL_get_ex_data() and SSL_set_ex_data(). */ static int ossl_get_ssl_data_index(void) { static int ssl_ex_data_data_index = -1; if(ssl_ex_data_data_index < 0) { ssl_ex_data_data_index = SSL_get_ex_new_index(0, NULL, NULL, NULL, NULL); } return ssl_ex_data_data_index; } /* Return an extra data index for the connection data. * This index can be used with SSL_get_ex_data() and SSL_set_ex_data(). */ static int ossl_get_ssl_conn_index(void) { static int ssl_ex_data_conn_index = -1; if(ssl_ex_data_conn_index < 0) { ssl_ex_data_conn_index = SSL_get_ex_new_index(0, NULL, NULL, NULL, NULL); } return ssl_ex_data_conn_index; } /* Return an extra data index for the sockindex. * This index can be used with SSL_get_ex_data() and SSL_set_ex_data(). */ static int ossl_get_ssl_sockindex_index(void) { static int sockindex_index = -1; if(sockindex_index < 0) { sockindex_index = SSL_get_ex_new_index(0, NULL, NULL, NULL, NULL); } return sockindex_index; } /* Return an extra data index for proxy boolean. * This index can be used with SSL_get_ex_data() and SSL_set_ex_data(). */ static int ossl_get_proxy_index(void) { static int proxy_index = -1; if(proxy_index < 0) { proxy_index = SSL_get_ex_new_index(0, NULL, NULL, NULL, NULL); } return proxy_index; } static int passwd_callback(char *buf, int num, int encrypting, void *global_passwd) { DEBUGASSERT(0 == encrypting); if(!encrypting) { int klen = curlx_uztosi(strlen((char *)global_passwd)); if(num > klen) { memcpy(buf, global_passwd, klen + 1); return klen; } } return 0; } /* * rand_enough() returns TRUE if we have seeded the random engine properly. */ static bool rand_enough(void) { return (0 != RAND_status()) ? TRUE : FALSE; } static CURLcode ossl_seed(struct Curl_easy *data) { /* we have the "SSL is seeded" boolean static to prevent multiple time-consuming seedings in vain */ static bool ssl_seeded = FALSE; char fname[256]; if(ssl_seeded) return CURLE_OK; if(rand_enough()) { /* OpenSSL 1.1.0+ will return here */ ssl_seeded = TRUE; return CURLE_OK; } #ifndef RANDOM_FILE /* if RANDOM_FILE isn't defined, we only perform this if an option tells us to! */ if(data->set.str[STRING_SSL_RANDOM_FILE]) #define RANDOM_FILE "" /* doesn't matter won't be used */ #endif { /* let the option override the define */ RAND_load_file((data->set.str[STRING_SSL_RANDOM_FILE]? data->set.str[STRING_SSL_RANDOM_FILE]: RANDOM_FILE), RAND_LOAD_LENGTH); if(rand_enough()) return CURLE_OK; } #if defined(HAVE_RAND_EGD) /* only available in OpenSSL 0.9.5 and later */ /* EGD_SOCKET is set at configure time or not at all */ #ifndef EGD_SOCKET /* If we don't have the define set, we only do this if the egd-option is set */ if(data->set.str[STRING_SSL_EGDSOCKET]) #define EGD_SOCKET "" /* doesn't matter won't be used */ #endif { /* If there's an option and a define, the option overrides the define */ int ret = RAND_egd(data->set.str[STRING_SSL_EGDSOCKET]? data->set.str[STRING_SSL_EGDSOCKET]:EGD_SOCKET); if(-1 != ret) { if(rand_enough()) return CURLE_OK; } } #endif /* fallback to a custom seeding of the PRNG using a hash based on a current time */ do { unsigned char randb[64]; size_t len = sizeof(randb); size_t i, i_max; for(i = 0, i_max = len / sizeof(struct curltime); i < i_max; ++i) { struct curltime tv = Curl_now(); Curl_wait_ms(1); tv.tv_sec *= i + 1; tv.tv_usec *= (unsigned int)i + 2; tv.tv_sec ^= ((Curl_now().tv_sec + Curl_now().tv_usec) * (i + 3)) << 8; tv.tv_usec ^= (unsigned int) ((Curl_now().tv_sec + Curl_now().tv_usec) * (i + 4)) << 16; memcpy(&randb[i * sizeof(struct curltime)], &tv, sizeof(struct curltime)); } RAND_add(randb, (int)len, (double)len/2); } while(!rand_enough()); /* generates a default path for the random seed file */ fname[0] = 0; /* blank it first */ RAND_file_name(fname, sizeof(fname)); if(fname[0]) { /* we got a file name to try */ RAND_load_file(fname, RAND_LOAD_LENGTH); if(rand_enough()) return CURLE_OK; } infof(data, "libcurl is now using a weak random seed!\n"); return (rand_enough() ? CURLE_OK : CURLE_SSL_CONNECT_ERROR /* confusing error code */); } #ifndef SSL_FILETYPE_ENGINE #define SSL_FILETYPE_ENGINE 42 #endif #ifndef SSL_FILETYPE_PKCS12 #define SSL_FILETYPE_PKCS12 43 #endif static int do_file_type(const char *type) { if(!type || !type[0]) return SSL_FILETYPE_PEM; if(strcasecompare(type, "PEM")) return SSL_FILETYPE_PEM; if(strcasecompare(type, "DER")) return SSL_FILETYPE_ASN1; if(strcasecompare(type, "ENG")) return SSL_FILETYPE_ENGINE; if(strcasecompare(type, "P12")) return SSL_FILETYPE_PKCS12; return -1; } #ifdef USE_OPENSSL_ENGINE /* * Supply default password to the engine user interface conversation. * The password is passed by OpenSSL engine from ENGINE_load_private_key() * last argument to the ui and can be obtained by UI_get0_user_data(ui) here. */ static int ssl_ui_reader(UI *ui, UI_STRING *uis) { const char *password; switch(UI_get_string_type(uis)) { case UIT_PROMPT: case UIT_VERIFY: password = (const char *)UI_get0_user_data(ui); if(password && (UI_get_input_flags(uis) & UI_INPUT_FLAG_DEFAULT_PWD)) { UI_set_result(ui, uis, password); return 1; } default: break; } return (UI_method_get_reader(UI_OpenSSL()))(ui, uis); } /* * Suppress interactive request for a default password if available. */ static int ssl_ui_writer(UI *ui, UI_STRING *uis) { switch(UI_get_string_type(uis)) { case UIT_PROMPT: case UIT_VERIFY: if(UI_get0_user_data(ui) && (UI_get_input_flags(uis) & UI_INPUT_FLAG_DEFAULT_PWD)) { return 1; } default: break; } return (UI_method_get_writer(UI_OpenSSL()))(ui, uis); } /* * Check if a given string is a PKCS#11 URI */ static bool is_pkcs11_uri(const char *string) { return (string && strncasecompare(string, "pkcs11:", 7)); } #endif static CURLcode ossl_set_engine(struct Curl_easy *data, const char *engine); static int SSL_CTX_use_certificate_blob(SSL_CTX *ctx, const struct curl_blob *blob, int type, const char *key_passwd) { int ret = 0; X509 *x = NULL; /* the typecast of blob->len is fine since it is guaranteed to never be larger than CURL_MAX_INPUT_LENGTH */ BIO *in = BIO_new_mem_buf(blob->data, (int)(blob->len)); if(!in) return CURLE_OUT_OF_MEMORY; if(type == SSL_FILETYPE_ASN1) { /* j = ERR_R_ASN1_LIB; */ x = d2i_X509_bio(in, NULL); } else if(type == SSL_FILETYPE_PEM) { /* ERR_R_PEM_LIB; */ x = PEM_read_bio_X509(in, NULL, passwd_callback, (void *)key_passwd); } else { ret = 0; goto end; } if(!x) { ret = 0; goto end; } ret = SSL_CTX_use_certificate(ctx, x); end: X509_free(x); BIO_free(in); return ret; } static int SSL_CTX_use_PrivateKey_blob(SSL_CTX *ctx, const struct curl_blob *blob, int type, const char *key_passwd) { int ret = 0; EVP_PKEY *pkey = NULL; BIO *in = BIO_new_mem_buf(blob->data, (int)(blob->len)); if(!in) return CURLE_OUT_OF_MEMORY; if(type == SSL_FILETYPE_PEM) pkey = PEM_read_bio_PrivateKey(in, NULL, passwd_callback, (void *)key_passwd); else if(type == SSL_FILETYPE_ASN1) pkey = d2i_PrivateKey_bio(in, NULL); else { ret = 0; goto end; } if(!pkey) { ret = 0; goto end; } ret = SSL_CTX_use_PrivateKey(ctx, pkey); EVP_PKEY_free(pkey); end: BIO_free(in); return ret; } static int SSL_CTX_use_certificate_chain_blob(SSL_CTX *ctx, const struct curl_blob *blob, const char *key_passwd) { /* SSL_CTX_add1_chain_cert introduced in OpenSSL 1.0.2 */ #if (OPENSSL_VERSION_NUMBER >= 0x1000200fL) && /* OpenSSL 1.0.2 or later */ \ !(defined(LIBRESSL_VERSION_NUMBER) && \ (LIBRESSL_VERSION_NUMBER < 0x2090100fL)) /* LibreSSL 2.9.1 or later */ int ret = 0; X509 *x = NULL; void *passwd_callback_userdata = (void *)key_passwd; BIO *in = BIO_new_mem_buf(blob->data, (int)(blob->len)); if(!in) return CURLE_OUT_OF_MEMORY; ERR_clear_error(); x = PEM_read_bio_X509_AUX(in, NULL, passwd_callback, (void *)key_passwd); if(!x) { ret = 0; goto end; } ret = SSL_CTX_use_certificate(ctx, x); if(ERR_peek_error() != 0) ret = 0; if(ret) { X509 *ca; unsigned long err; if(!SSL_CTX_clear_chain_certs(ctx)) { ret = 0; goto end; } while((ca = PEM_read_bio_X509(in, NULL, passwd_callback, passwd_callback_userdata)) != NULL) { if(!SSL_CTX_add0_chain_cert(ctx, ca)) { X509_free(ca); ret = 0; goto end; } } err = ERR_peek_last_error(); if((ERR_GET_LIB(err) == ERR_LIB_PEM) && (ERR_GET_REASON(err) == PEM_R_NO_START_LINE)) ERR_clear_error(); else ret = 0; } end: X509_free(x); BIO_free(in); return ret; #else (void)ctx; /* unused */ (void)blob; /* unused */ (void)key_passwd; /* unused */ return 0; #endif } static int cert_stuff(struct Curl_easy *data, SSL_CTX* ctx, char *cert_file, const struct curl_blob *cert_blob, const char *cert_type, char *key_file, const struct curl_blob *key_blob, const char *key_type, char *key_passwd) { char error_buffer[256]; bool check_privkey = TRUE; int file_type = do_file_type(cert_type); if(cert_file || cert_blob || (file_type == SSL_FILETYPE_ENGINE)) { SSL *ssl; X509 *x509; int cert_done = 0; int cert_use_result; if(key_passwd) { /* set the password in the callback userdata */ SSL_CTX_set_default_passwd_cb_userdata(ctx, key_passwd); /* Set passwd callback: */ SSL_CTX_set_default_passwd_cb(ctx, passwd_callback); } switch(file_type) { case SSL_FILETYPE_PEM: /* SSL_CTX_use_certificate_chain_file() only works on PEM files */ cert_use_result = cert_blob ? SSL_CTX_use_certificate_chain_blob(ctx, cert_blob, key_passwd) : SSL_CTX_use_certificate_chain_file(ctx, cert_file); if(cert_use_result != 1) { failf(data, "could not load PEM client certificate, " OSSL_PACKAGE " error %s, " "(no key found, wrong pass phrase, or wrong file format?)", ossl_strerror(ERR_get_error(), error_buffer, sizeof(error_buffer)) ); return 0; } break; case SSL_FILETYPE_ASN1: /* SSL_CTX_use_certificate_file() works with either PEM or ASN1, but we use the case above for PEM so this can only be performed with ASN1 files. */ cert_use_result = cert_blob ? SSL_CTX_use_certificate_blob(ctx, cert_blob, file_type, key_passwd) : SSL_CTX_use_certificate_file(ctx, cert_file, file_type); if(cert_use_result != 1) { failf(data, "could not load ASN1 client certificate, " OSSL_PACKAGE " error %s, " "(no key found, wrong pass phrase, or wrong file format?)", ossl_strerror(ERR_get_error(), error_buffer, sizeof(error_buffer)) ); return 0; } break; case SSL_FILETYPE_ENGINE: #if defined(USE_OPENSSL_ENGINE) && defined(ENGINE_CTRL_GET_CMD_FROM_NAME) { /* Implicitly use pkcs11 engine if none was provided and the * cert_file is a PKCS#11 URI */ if(!data->state.engine) { if(is_pkcs11_uri(cert_file)) { if(ossl_set_engine(data, "pkcs11") != CURLE_OK) { return 0; } } } if(data->state.engine) { const char *cmd_name = "LOAD_CERT_CTRL"; struct { const char *cert_id; X509 *cert; } params; params.cert_id = cert_file; params.cert = NULL; /* Does the engine supports LOAD_CERT_CTRL ? */ if(!ENGINE_ctrl(data->state.engine, ENGINE_CTRL_GET_CMD_FROM_NAME, 0, (void *)cmd_name, NULL)) { failf(data, "ssl engine does not support loading certificates"); return 0; } /* Load the certificate from the engine */ if(!ENGINE_ctrl_cmd(data->state.engine, cmd_name, 0, &params, NULL, 1)) { failf(data, "ssl engine cannot load client cert with id" " '%s' [%s]", cert_file, ossl_strerror(ERR_get_error(), error_buffer, sizeof(error_buffer))); return 0; } if(!params.cert) { failf(data, "ssl engine didn't initialized the certificate " "properly."); return 0; } if(SSL_CTX_use_certificate(ctx, params.cert) != 1) { failf(data, "unable to set client certificate"); X509_free(params.cert); return 0; } X509_free(params.cert); /* we don't need the handle any more... */ } else { failf(data, "crypto engine not set, can't load certificate"); return 0; } } break; #else failf(data, "file type ENG for certificate not implemented"); return 0; #endif case SSL_FILETYPE_PKCS12: { BIO *cert_bio = NULL; PKCS12 *p12 = NULL; EVP_PKEY *pri; STACK_OF(X509) *ca = NULL; if(cert_blob) { cert_bio = BIO_new_mem_buf(cert_blob->data, (int)(cert_blob->len)); if(!cert_bio) { failf(data, "BIO_new_mem_buf NULL, " OSSL_PACKAGE " error %s", ossl_strerror(ERR_get_error(), error_buffer, sizeof(error_buffer)) ); return 0; } } else { cert_bio = BIO_new(BIO_s_file()); if(!cert_bio) { failf(data, "BIO_new return NULL, " OSSL_PACKAGE " error %s", ossl_strerror(ERR_get_error(), error_buffer, sizeof(error_buffer)) ); return 0; } if(BIO_read_filename(cert_bio, cert_file) <= 0) { failf(data, "could not open PKCS12 file '%s'", cert_file); BIO_free(cert_bio); return 0; } } p12 = d2i_PKCS12_bio(cert_bio, NULL); BIO_free(cert_bio); if(!p12) { failf(data, "error reading PKCS12 file '%s'", cert_blob ? "(memory blob)" : cert_file); return 0; } PKCS12_PBE_add(); if(!PKCS12_parse(p12, key_passwd, &pri, &x509, &ca)) { failf(data, "could not parse PKCS12 file, check password, " OSSL_PACKAGE " error %s", ossl_strerror(ERR_get_error(), error_buffer, sizeof(error_buffer)) ); PKCS12_free(p12); return 0; } PKCS12_free(p12); if(SSL_CTX_use_certificate(ctx, x509) != 1) { failf(data, "could not load PKCS12 client certificate, " OSSL_PACKAGE " error %s", ossl_strerror(ERR_get_error(), error_buffer, sizeof(error_buffer)) ); goto fail; } if(SSL_CTX_use_PrivateKey(ctx, pri) != 1) { failf(data, "unable to use private key from PKCS12 file '%s'", cert_file); goto fail; } if(!SSL_CTX_check_private_key (ctx)) { failf(data, "private key from PKCS12 file '%s' " "does not match certificate in same file", cert_file); goto fail; } /* Set Certificate Verification chain */ if(ca) { while(sk_X509_num(ca)) { /* * Note that sk_X509_pop() is used below to make sure the cert is * removed from the stack properly before getting passed to * SSL_CTX_add_extra_chain_cert(), which takes ownership. Previously * we used sk_X509_value() instead, but then we'd clean it in the * subsequent sk_X509_pop_free() call. */ X509 *x = sk_X509_pop(ca); if(!SSL_CTX_add_client_CA(ctx, x)) { X509_free(x); failf(data, "cannot add certificate to client CA list"); goto fail; } if(!SSL_CTX_add_extra_chain_cert(ctx, x)) { X509_free(x); failf(data, "cannot add certificate to certificate chain"); goto fail; } } } cert_done = 1; fail: EVP_PKEY_free(pri); X509_free(x509); #ifdef USE_AMISSL sk_X509_pop_free(ca, Curl_amiga_X509_free); #else sk_X509_pop_free(ca, X509_free); #endif if(!cert_done) return 0; /* failure! */ break; } default: failf(data, "not supported file type '%s' for certificate", cert_type); return 0; } if((!key_file) && (!key_blob)) { key_file = cert_file; key_blob = cert_blob; } else file_type = do_file_type(key_type); switch(file_type) { case SSL_FILETYPE_PEM: if(cert_done) break; /* FALLTHROUGH */ case SSL_FILETYPE_ASN1: cert_use_result = key_blob ? SSL_CTX_use_PrivateKey_blob(ctx, key_blob, file_type, key_passwd) : SSL_CTX_use_PrivateKey_file(ctx, key_file, file_type); if(cert_use_result != 1) { failf(data, "unable to set private key file: '%s' type %s", key_file?key_file:"(memory blob)", key_type?key_type:"PEM"); return 0; } break; case SSL_FILETYPE_ENGINE: #ifdef USE_OPENSSL_ENGINE { /* XXXX still needs some work */ EVP_PKEY *priv_key = NULL; /* Implicitly use pkcs11 engine if none was provided and the * key_file is a PKCS#11 URI */ if(!data->state.engine) { if(is_pkcs11_uri(key_file)) { if(ossl_set_engine(data, "pkcs11") != CURLE_OK) { return 0; } } } if(data->state.engine) { UI_METHOD *ui_method = UI_create_method((char *)"curl user interface"); if(!ui_method) { failf(data, "unable do create " OSSL_PACKAGE " user-interface method"); return 0; } UI_method_set_opener(ui_method, UI_method_get_opener(UI_OpenSSL())); UI_method_set_closer(ui_method, UI_method_get_closer(UI_OpenSSL())); UI_method_set_reader(ui_method, ssl_ui_reader); UI_method_set_writer(ui_method, ssl_ui_writer); /* the typecast below was added to please mingw32 */ priv_key = (EVP_PKEY *) ENGINE_load_private_key(data->state.engine, key_file, ui_method, key_passwd); UI_destroy_method(ui_method); if(!priv_key) { failf(data, "failed to load private key from crypto engine"); return 0; } if(SSL_CTX_use_PrivateKey(ctx, priv_key) != 1) { failf(data, "unable to set private key"); EVP_PKEY_free(priv_key); return 0; } EVP_PKEY_free(priv_key); /* we don't need the handle any more... */ } else { failf(data, "crypto engine not set, can't load private key"); return 0; } } break; #else failf(data, "file type ENG for private key not supported"); return 0; #endif case SSL_FILETYPE_PKCS12: if(!cert_done) { failf(data, "file type P12 for private key not supported"); return 0; } break; default: failf(data, "not supported file type for private key"); return 0; } ssl = SSL_new(ctx); if(!ssl) { failf(data, "unable to create an SSL structure"); return 0; } x509 = SSL_get_certificate(ssl); /* This version was provided by Evan Jordan and is supposed to not leak memory as the previous version: */ if(x509) { EVP_PKEY *pktmp = X509_get_pubkey(x509); EVP_PKEY_copy_parameters(pktmp, SSL_get_privatekey(ssl)); EVP_PKEY_free(pktmp); } #if !defined(OPENSSL_NO_RSA) && !defined(OPENSSL_IS_BORINGSSL) { /* If RSA is used, don't check the private key if its flags indicate * it doesn't support it. */ EVP_PKEY *priv_key = SSL_get_privatekey(ssl); int pktype; #ifdef HAVE_OPAQUE_EVP_PKEY pktype = EVP_PKEY_id(priv_key); #else pktype = priv_key->type; #endif if(pktype == EVP_PKEY_RSA) { RSA *rsa = EVP_PKEY_get1_RSA(priv_key); if(RSA_flags(rsa) & RSA_METHOD_FLAG_NO_CHECK) check_privkey = FALSE; RSA_free(rsa); /* Decrement reference count */ } } #endif SSL_free(ssl); /* If we are using DSA, we can copy the parameters from * the private key */ if(check_privkey == TRUE) { /* Now we know that a key and cert have been set against * the SSL context */ if(!SSL_CTX_check_private_key(ctx)) { failf(data, "Private key does not match the certificate public key"); return 0; } } } return 1; } /* returns non-zero on failure */ static int x509_name_oneline(X509_NAME *a, char *buf, size_t size) { BIO *bio_out = BIO_new(BIO_s_mem()); BUF_MEM *biomem; int rc; if(!bio_out) return 1; /* alloc failed! */ rc = X509_NAME_print_ex(bio_out, a, 0, XN_FLAG_SEP_SPLUS_SPC); BIO_get_mem_ptr(bio_out, &biomem); if((size_t)biomem->length < size) size = biomem->length; else size--; /* don't overwrite the buffer end */ memcpy(buf, biomem->data, size); buf[size] = 0; BIO_free(bio_out); return !rc; } /** * Global SSL init * * @retval 0 error initializing SSL * @retval 1 SSL initialized successfully */ static int ossl_init(void) { #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) && \ !defined(LIBRESSL_VERSION_NUMBER) const uint64_t flags = #ifdef OPENSSL_INIT_ENGINE_ALL_BUILTIN /* not present in BoringSSL */ OPENSSL_INIT_ENGINE_ALL_BUILTIN | #endif #ifdef CURL_DISABLE_OPENSSL_AUTO_LOAD_CONFIG OPENSSL_INIT_NO_LOAD_CONFIG | #else OPENSSL_INIT_LOAD_CONFIG | #endif 0; OPENSSL_init_ssl(flags, NULL); #else OPENSSL_load_builtin_modules(); #ifdef USE_OPENSSL_ENGINE ENGINE_load_builtin_engines(); #endif /* CONF_MFLAGS_DEFAULT_SECTION was introduced some time between 0.9.8b and 0.9.8e */ #ifndef CONF_MFLAGS_DEFAULT_SECTION #define CONF_MFLAGS_DEFAULT_SECTION 0x0 #endif #ifndef CURL_DISABLE_OPENSSL_AUTO_LOAD_CONFIG CONF_modules_load_file(NULL, NULL, CONF_MFLAGS_DEFAULT_SECTION| CONF_MFLAGS_IGNORE_MISSING_FILE); #endif /* Lets get nice error messages */ SSL_load_error_strings(); /* Init the global ciphers and digests */ if(!SSLeay_add_ssl_algorithms()) return 0; OpenSSL_add_all_algorithms(); #endif Curl_tls_keylog_open(); /* Initialize the extra data indexes */ if(ossl_get_ssl_data_index() < 0 || ossl_get_ssl_conn_index() < 0 || ossl_get_ssl_sockindex_index() < 0 || ossl_get_proxy_index() < 0) return 0; return 1; } /* Global cleanup */ static void ossl_cleanup(void) { #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) && \ !defined(LIBRESSL_VERSION_NUMBER) /* OpenSSL 1.1 deprecates all these cleanup functions and turns them into no-ops in OpenSSL 1.0 compatibility mode */ #else /* Free ciphers and digests lists */ EVP_cleanup(); #ifdef USE_OPENSSL_ENGINE /* Free engine list */ ENGINE_cleanup(); #endif /* Free OpenSSL error strings */ ERR_free_strings(); /* Free thread local error state, destroying hash upon zero refcount */ #ifdef HAVE_ERR_REMOVE_THREAD_STATE ERR_remove_thread_state(NULL); #else ERR_remove_state(0); #endif /* Free all memory allocated by all configuration modules */ CONF_modules_free(); #ifdef HAVE_SSL_COMP_FREE_COMPRESSION_METHODS SSL_COMP_free_compression_methods(); #endif #endif Curl_tls_keylog_close(); } /* * This function is used to determine connection status. * * Return codes: * 1 means the connection is still in place * 0 means the connection has been closed * -1 means the connection status is unknown */ static int ossl_check_cxn(struct connectdata *conn) { /* SSL_peek takes data out of the raw recv buffer without peeking so we use recv MSG_PEEK instead. Bug #795 */ #ifdef MSG_PEEK char buf; ssize_t nread; nread = recv((RECV_TYPE_ARG1)conn->sock[FIRSTSOCKET], (RECV_TYPE_ARG2)&buf, (RECV_TYPE_ARG3)1, (RECV_TYPE_ARG4)MSG_PEEK); if(nread == 0) return 0; /* connection has been closed */ if(nread == 1) return 1; /* connection still in place */ else if(nread == -1) { int err = SOCKERRNO; if(err == EINPROGRESS || #if defined(EAGAIN) && (EAGAIN != EWOULDBLOCK) err == EAGAIN || #endif err == EWOULDBLOCK) return 1; /* connection still in place */ if(err == ECONNRESET || #ifdef ECONNABORTED err == ECONNABORTED || #endif #ifdef ENETDOWN err == ENETDOWN || #endif #ifdef ENETRESET err == ENETRESET || #endif #ifdef ESHUTDOWN err == ESHUTDOWN || #endif #ifdef ETIMEDOUT err == ETIMEDOUT || #endif err == ENOTCONN) return 0; /* connection has been closed */ } #endif return -1; /* connection status unknown */ } /* Selects an OpenSSL crypto engine */ static CURLcode ossl_set_engine(struct Curl_easy *data, const char *engine) { #ifdef USE_OPENSSL_ENGINE ENGINE *e; #if OPENSSL_VERSION_NUMBER >= 0x00909000L e = ENGINE_by_id(engine); #else /* avoid memory leak */ for(e = ENGINE_get_first(); e; e = ENGINE_get_next(e)) { const char *e_id = ENGINE_get_id(e); if(!strcmp(engine, e_id)) break; } #endif if(!e) { failf(data, "SSL Engine '%s' not found", engine); return CURLE_SSL_ENGINE_NOTFOUND; } if(data->state.engine) { ENGINE_finish(data->state.engine); ENGINE_free(data->state.engine); data->state.engine = NULL; } if(!ENGINE_init(e)) { char buf[256]; ENGINE_free(e); failf(data, "Failed to initialise SSL Engine '%s': %s", engine, ossl_strerror(ERR_get_error(), buf, sizeof(buf))); return CURLE_SSL_ENGINE_INITFAILED; } data->state.engine = e; return CURLE_OK; #else (void)engine; failf(data, "SSL Engine not supported"); return CURLE_SSL_ENGINE_NOTFOUND; #endif } /* Sets engine as default for all SSL operations */ static CURLcode ossl_set_engine_default(struct Curl_easy *data) { #ifdef USE_OPENSSL_ENGINE if(data->state.engine) { if(ENGINE_set_default(data->state.engine, ENGINE_METHOD_ALL) > 0) { infof(data, "set default crypto engine '%s'\n", ENGINE_get_id(data->state.engine)); } else { failf(data, "set default crypto engine '%s' failed", ENGINE_get_id(data->state.engine)); return CURLE_SSL_ENGINE_SETFAILED; } } #else (void) data; #endif return CURLE_OK; } /* Return list of OpenSSL crypto engine names. */ static struct curl_slist *ossl_engines_list(struct Curl_easy *data) { struct curl_slist *list = NULL; #ifdef USE_OPENSSL_ENGINE struct curl_slist *beg; ENGINE *e; for(e = ENGINE_get_first(); e; e = ENGINE_get_next(e)) { beg = curl_slist_append(list, ENGINE_get_id(e)); if(!beg) { curl_slist_free_all(list); return NULL; } list = beg; } #endif (void) data; return list; } #define set_logger(conn, data) \ conn->ssl[0].backend->logger = data static void ossl_closeone(struct Curl_easy *data, struct connectdata *conn, struct ssl_connect_data *connssl) { struct ssl_backend_data *backend = connssl->backend; if(backend->handle) { set_logger(conn, data); (void)SSL_shutdown(backend->handle); SSL_set_connect_state(backend->handle); SSL_free(backend->handle); backend->handle = NULL; } if(backend->ctx) { SSL_CTX_free(backend->ctx); backend->ctx = NULL; } } /* * This function is called when an SSL connection is closed. */ static void ossl_close(struct Curl_easy *data, struct connectdata *conn, int sockindex) { ossl_closeone(data, conn, &conn->ssl[sockindex]); #ifndef CURL_DISABLE_PROXY ossl_closeone(data, conn, &conn->proxy_ssl[sockindex]); #endif } /* * This function is called to shut down the SSL layer but keep the * socket open (CCC - Clear Command Channel) */ static int ossl_shutdown(struct Curl_easy *data, struct connectdata *conn, int sockindex) { int retval = 0; struct ssl_connect_data *connssl = &conn->ssl[sockindex]; char buf[256]; /* We will use this for the OpenSSL error buffer, so it has to be at least 256 bytes long. */ unsigned long sslerror; ssize_t nread; int buffsize; int err; bool done = FALSE; struct ssl_backend_data *backend = connssl->backend; #ifndef CURL_DISABLE_FTP /* This has only been tested on the proftpd server, and the mod_tls code sends a close notify alert without waiting for a close notify alert in response. Thus we wait for a close notify alert from the server, but we do not send one. Let's hope other servers do the same... */ if(data->set.ftp_ccc == CURLFTPSSL_CCC_ACTIVE) (void)SSL_shutdown(backend->handle); #endif if(backend->handle) { buffsize = (int)sizeof(buf); while(!done) { int what = SOCKET_READABLE(conn->sock[sockindex], SSL_SHUTDOWN_TIMEOUT); if(what > 0) { ERR_clear_error(); /* Something to read, let's do it and hope that it is the close notify alert from the server */ nread = (ssize_t)SSL_read(backend->handle, buf, buffsize); err = SSL_get_error(backend->handle, (int)nread); switch(err) { case SSL_ERROR_NONE: /* this is not an error */ case SSL_ERROR_ZERO_RETURN: /* no more data */ /* This is the expected response. There was no data but only the close notify alert */ done = TRUE; break; case SSL_ERROR_WANT_READ: /* there's data pending, re-invoke SSL_read() */ infof(data, "SSL_ERROR_WANT_READ\n"); break; case SSL_ERROR_WANT_WRITE: /* SSL wants a write. Really odd. Let's bail out. */ infof(data, "SSL_ERROR_WANT_WRITE\n"); done = TRUE; break; default: /* openssl/ssl.h says "look at error stack/return value/errno" */ sslerror = ERR_get_error(); failf(data, OSSL_PACKAGE " SSL_read on shutdown: %s, errno %d", (sslerror ? ossl_strerror(sslerror, buf, sizeof(buf)) : SSL_ERROR_to_str(err)), SOCKERRNO); done = TRUE; break; } } else if(0 == what) { /* timeout */ failf(data, "SSL shutdown timeout"); done = TRUE; } else { /* anything that gets here is fatally bad */ failf(data, "select/poll on SSL socket, errno: %d", SOCKERRNO); retval = -1; done = TRUE; } } /* while()-loop for the select() */ if(data->set.verbose) { #ifdef HAVE_SSL_GET_SHUTDOWN switch(SSL_get_shutdown(backend->handle)) { case SSL_SENT_SHUTDOWN: infof(data, "SSL_get_shutdown() returned SSL_SENT_SHUTDOWN\n"); break; case SSL_RECEIVED_SHUTDOWN: infof(data, "SSL_get_shutdown() returned SSL_RECEIVED_SHUTDOWN\n"); break; case SSL_SENT_SHUTDOWN|SSL_RECEIVED_SHUTDOWN: infof(data, "SSL_get_shutdown() returned SSL_SENT_SHUTDOWN|" "SSL_RECEIVED__SHUTDOWN\n"); break; } #endif } SSL_free(backend->handle); backend->handle = NULL; } return retval; } static void ossl_session_free(void *ptr) { /* free the ID */ SSL_SESSION_free(ptr); } /* * This function is called when the 'data' struct is going away. Close * down everything and free all resources! */ static void ossl_close_all(struct Curl_easy *data) { #ifdef USE_OPENSSL_ENGINE if(data->state.engine) { ENGINE_finish(data->state.engine); ENGINE_free(data->state.engine); data->state.engine = NULL; } #else (void)data; #endif #if !defined(HAVE_ERR_REMOVE_THREAD_STATE_DEPRECATED) && \ defined(HAVE_ERR_REMOVE_THREAD_STATE) /* OpenSSL 1.0.1 and 1.0.2 build an error queue that is stored per-thread so we need to clean it here in case the thread will be killed. All OpenSSL code should extract the error in association with the error so clearing this queue here should be harmless at worst. */ ERR_remove_thread_state(NULL); #endif } /* ====================================================== */ /* * Match subjectAltName against the host name. This requires a conversion * in CURL_DOES_CONVERSIONS builds. */ static bool subj_alt_hostcheck(struct Curl_easy *data, const char *match_pattern, const char *hostname, const char *dispname) #ifdef CURL_DOES_CONVERSIONS { bool res = FALSE; /* Curl_cert_hostcheck uses host encoding, but we get ASCII from OpenSSl. */ char *match_pattern2 = strdup(match_pattern); if(match_pattern2) { if(Curl_convert_from_network(data, match_pattern2, strlen(match_pattern2)) == CURLE_OK) { if(Curl_cert_hostcheck(match_pattern2, hostname)) { res = TRUE; infof(data, " subjectAltName: host \"%s\" matched cert's \"%s\"\n", dispname, match_pattern2); } } free(match_pattern2); } else { failf(data, "SSL: out of memory when allocating temporary for subjectAltName"); } return res; } #else { #ifdef CURL_DISABLE_VERBOSE_STRINGS (void)dispname; (void)data; #endif if(Curl_cert_hostcheck(match_pattern, hostname)) { infof(data, " subjectAltName: host \"%s\" matched cert's \"%s\"\n", dispname, match_pattern); return TRUE; } return FALSE; } #endif /* Quote from RFC2818 section 3.1 "Server Identity" If a subjectAltName extension of type dNSName is present, that MUST be used as the identity. Otherwise, the (most specific) Common Name field in the Subject field of the certificate MUST be used. Although the use of the Common Name is existing practice, it is deprecated and Certification Authorities are encouraged to use the dNSName instead. Matching is performed using the matching rules specified by [RFC2459]. If more than one identity of a given type is present in the certificate (e.g., more than one dNSName name, a match in any one of the set is considered acceptable.) Names may contain the wildcard character * which is considered to match any single domain name component or component fragment. E.g., *.a.com matches foo.a.com but not bar.foo.a.com. f*.com matches foo.com but not bar.com. In some cases, the URI is specified as an IP address rather than a hostname. In this case, the iPAddress subjectAltName must be present in the certificate and must exactly match the IP in the URI. */ static CURLcode verifyhost(struct Curl_easy *data, struct connectdata *conn, X509 *server_cert) { bool matched = FALSE; int target = GEN_DNS; /* target type, GEN_DNS or GEN_IPADD */ size_t addrlen = 0; STACK_OF(GENERAL_NAME) *altnames; #ifdef ENABLE_IPV6 struct in6_addr addr; #else struct in_addr addr; #endif CURLcode result = CURLE_OK; bool dNSName = FALSE; /* if a dNSName field exists in the cert */ bool iPAddress = FALSE; /* if a iPAddress field exists in the cert */ const char * const hostname = SSL_HOST_NAME(); const char * const dispname = SSL_HOST_DISPNAME(); #ifdef ENABLE_IPV6 if(conn->bits.ipv6_ip && Curl_inet_pton(AF_INET6, hostname, &addr)) { target = GEN_IPADD; addrlen = sizeof(struct in6_addr); } else #endif if(Curl_inet_pton(AF_INET, hostname, &addr)) { target = GEN_IPADD; addrlen = sizeof(struct in_addr); } /* get a "list" of alternative names */ altnames = X509_get_ext_d2i(server_cert, NID_subject_alt_name, NULL, NULL); if(altnames) { #ifdef OPENSSL_IS_BORINGSSL size_t numalts; size_t i; #else int numalts; int i; #endif bool dnsmatched = FALSE; bool ipmatched = FALSE; /* get amount of alternatives, RFC2459 claims there MUST be at least one, but we don't depend on it... */ numalts = sk_GENERAL_NAME_num(altnames); /* loop through all alternatives - until a dnsmatch */ for(i = 0; (i < numalts) && !dnsmatched; i++) { /* get a handle to alternative name number i */ const GENERAL_NAME *check = sk_GENERAL_NAME_value(altnames, i); if(check->type == GEN_DNS) dNSName = TRUE; else if(check->type == GEN_IPADD) iPAddress = TRUE; /* only check alternatives of the same type the target is */ if(check->type == target) { /* get data and length */ const char *altptr = (char *)ASN1_STRING_get0_data(check->d.ia5); size_t altlen = (size_t) ASN1_STRING_length(check->d.ia5); switch(target) { case GEN_DNS: /* name/pattern comparison */ /* The OpenSSL man page explicitly says: "In general it cannot be assumed that the data returned by ASN1_STRING_data() is null terminated or does not contain embedded nulls." But also that "The actual format of the data will depend on the actual string type itself: for example for an IA5String the data will be ASCII" It has been however verified that in 0.9.6 and 0.9.7, IA5String is always null-terminated. */ if((altlen == strlen(altptr)) && /* if this isn't true, there was an embedded zero in the name string and we cannot match it. */ subj_alt_hostcheck(data, altptr, hostname, dispname)) { dnsmatched = TRUE; } break; case GEN_IPADD: /* IP address comparison */ /* compare alternative IP address if the data chunk is the same size our server IP address is */ if((altlen == addrlen) && !memcmp(altptr, &addr, altlen)) { ipmatched = TRUE; infof(data, " subjectAltName: host \"%s\" matched cert's IP address!\n", dispname); } break; } } } GENERAL_NAMES_free(altnames); if(dnsmatched || ipmatched) matched = TRUE; } if(matched) /* an alternative name matched */ ; else if(dNSName || iPAddress) { infof(data, " subjectAltName does not match %s\n", dispname); failf(data, "SSL: no alternative certificate subject name matches " "target host name '%s'", dispname); result = CURLE_PEER_FAILED_VERIFICATION; } else { /* we have to look to the last occurrence of a commonName in the distinguished one to get the most significant one. */ int j, i = -1; /* The following is done because of a bug in 0.9.6b */ unsigned char *nulstr = (unsigned char *)""; unsigned char *peer_CN = nulstr; X509_NAME *name = X509_get_subject_name(server_cert); if(name) while((j = X509_NAME_get_index_by_NID(name, NID_commonName, i)) >= 0) i = j; /* we have the name entry and we will now convert this to a string that we can use for comparison. Doing this we support BMPstring, UTF8 etc. */ if(i >= 0) { ASN1_STRING *tmp = X509_NAME_ENTRY_get_data(X509_NAME_get_entry(name, i)); /* In OpenSSL 0.9.7d and earlier, ASN1_STRING_to_UTF8 fails if the input is already UTF-8 encoded. We check for this case and copy the raw string manually to avoid the problem. This code can be made conditional in the future when OpenSSL has been fixed. */ if(tmp) { if(ASN1_STRING_type(tmp) == V_ASN1_UTF8STRING) { j = ASN1_STRING_length(tmp); if(j >= 0) { peer_CN = OPENSSL_malloc(j + 1); if(peer_CN) { memcpy(peer_CN, ASN1_STRING_get0_data(tmp), j); peer_CN[j] = '\0'; } } } else /* not a UTF8 name */ j = ASN1_STRING_to_UTF8(&peer_CN, tmp); if(peer_CN && (curlx_uztosi(strlen((char *)peer_CN)) != j)) { /* there was a terminating zero before the end of string, this cannot match and we return failure! */ failf(data, "SSL: illegal cert name field"); result = CURLE_PEER_FAILED_VERIFICATION; } } } if(peer_CN == nulstr) peer_CN = NULL; else { /* convert peer_CN from UTF8 */ CURLcode rc = Curl_convert_from_utf8(data, (char *)peer_CN, strlen((char *)peer_CN)); /* Curl_convert_from_utf8 calls failf if unsuccessful */ if(rc) { OPENSSL_free(peer_CN); return rc; } } if(result) /* error already detected, pass through */ ; else if(!peer_CN) { failf(data, "SSL: unable to obtain common name from peer certificate"); result = CURLE_PEER_FAILED_VERIFICATION; } else if(!Curl_cert_hostcheck((const char *)peer_CN, hostname)) { failf(data, "SSL: certificate subject name '%s' does not match " "target host name '%s'", peer_CN, dispname); result = CURLE_PEER_FAILED_VERIFICATION; } else { infof(data, " common name: %s (matched)\n", peer_CN); } if(peer_CN) OPENSSL_free(peer_CN); } return result; } #if (OPENSSL_VERSION_NUMBER >= 0x0090808fL) && !defined(OPENSSL_NO_TLSEXT) && \ !defined(OPENSSL_NO_OCSP) static CURLcode verifystatus(struct Curl_easy *data, struct ssl_connect_data *connssl) { int i, ocsp_status; unsigned char *status; const unsigned char *p; CURLcode result = CURLE_OK; OCSP_RESPONSE *rsp = NULL; OCSP_BASICRESP *br = NULL; X509_STORE *st = NULL; STACK_OF(X509) *ch = NULL; struct ssl_backend_data *backend = connssl->backend; X509 *cert; OCSP_CERTID *id = NULL; int cert_status, crl_reason; ASN1_GENERALIZEDTIME *rev, *thisupd, *nextupd; int ret; long len = SSL_get_tlsext_status_ocsp_resp(backend->handle, &status); if(!status) { failf(data, "No OCSP response received"); result = CURLE_SSL_INVALIDCERTSTATUS; goto end; } p = status; rsp = d2i_OCSP_RESPONSE(NULL, &p, len); if(!rsp) { failf(data, "Invalid OCSP response"); result = CURLE_SSL_INVALIDCERTSTATUS; goto end; } ocsp_status = OCSP_response_status(rsp); if(ocsp_status != OCSP_RESPONSE_STATUS_SUCCESSFUL) { failf(data, "Invalid OCSP response status: %s (%d)", OCSP_response_status_str(ocsp_status), ocsp_status); result = CURLE_SSL_INVALIDCERTSTATUS; goto end; } br = OCSP_response_get1_basic(rsp); if(!br) { failf(data, "Invalid OCSP response"); result = CURLE_SSL_INVALIDCERTSTATUS; goto end; } ch = SSL_get_peer_cert_chain(backend->handle); st = SSL_CTX_get_cert_store(backend->ctx); #if ((OPENSSL_VERSION_NUMBER <= 0x1000201fL) /* Fixed after 1.0.2a */ || \ (defined(LIBRESSL_VERSION_NUMBER) && \ LIBRESSL_VERSION_NUMBER <= 0x2040200fL)) /* The authorized responder cert in the OCSP response MUST be signed by the peer cert's issuer (see RFC6960 section 4.2.2.2). If that's a root cert, no problem, but if it's an intermediate cert OpenSSL has a bug where it expects this issuer to be present in the chain embedded in the OCSP response. So we add it if necessary. */ /* First make sure the peer cert chain includes both a peer and an issuer, and the OCSP response contains a responder cert. */ if(sk_X509_num(ch) >= 2 && sk_X509_num(br->certs) >= 1) { X509 *responder = sk_X509_value(br->certs, sk_X509_num(br->certs) - 1); /* Find issuer of responder cert and add it to the OCSP response chain */ for(i = 0; i < sk_X509_num(ch); i++) { X509 *issuer = sk_X509_value(ch, i); if(X509_check_issued(issuer, responder) == X509_V_OK) { if(!OCSP_basic_add1_cert(br, issuer)) { failf(data, "Could not add issuer cert to OCSP response"); result = CURLE_SSL_INVALIDCERTSTATUS; goto end; } } } } #endif if(OCSP_basic_verify(br, ch, st, 0) <= 0) { failf(data, "OCSP response verification failed"); result = CURLE_SSL_INVALIDCERTSTATUS; goto end; } /* Compute the certificate's ID */ cert = SSL_get_peer_certificate(backend->handle); if(!cert) { failf(data, "Error getting peer certificate"); result = CURLE_SSL_INVALIDCERTSTATUS; goto end; } for(i = 0; i < sk_X509_num(ch); i++) { X509 *issuer = sk_X509_value(ch, i); if(X509_check_issued(issuer, cert) == X509_V_OK) { id = OCSP_cert_to_id(EVP_sha1(), cert, issuer); break; } } X509_free(cert); if(!id) { failf(data, "Error computing OCSP ID"); result = CURLE_SSL_INVALIDCERTSTATUS; goto end; } /* Find the single OCSP response corresponding to the certificate ID */ ret = OCSP_resp_find_status(br, id, &cert_status, &crl_reason, &rev, &thisupd, &nextupd); OCSP_CERTID_free(id); if(ret != 1) { failf(data, "Could not find certificate ID in OCSP response"); result = CURLE_SSL_INVALIDCERTSTATUS; goto end; } /* Validate the corresponding single OCSP response */ if(!OCSP_check_validity(thisupd, nextupd, 300L, -1L)) { failf(data, "OCSP response has expired"); result = CURLE_SSL_INVALIDCERTSTATUS; goto end; } infof(data, "SSL certificate status: %s (%d)\n", OCSP_cert_status_str(cert_status), cert_status); switch(cert_status) { case V_OCSP_CERTSTATUS_GOOD: break; case V_OCSP_CERTSTATUS_REVOKED: result = CURLE_SSL_INVALIDCERTSTATUS; failf(data, "SSL certificate revocation reason: %s (%d)", OCSP_crl_reason_str(crl_reason), crl_reason); goto end; case V_OCSP_CERTSTATUS_UNKNOWN: default: result = CURLE_SSL_INVALIDCERTSTATUS; goto end; } end: if(br) OCSP_BASICRESP_free(br); OCSP_RESPONSE_free(rsp); return result; } #endif #endif /* USE_OPENSSL */ /* The SSL_CTRL_SET_MSG_CALLBACK doesn't exist in ancient OpenSSL versions and thus this cannot be done there. */ #ifdef SSL_CTRL_SET_MSG_CALLBACK static const char *ssl_msg_type(int ssl_ver, int msg) { #ifdef SSL2_VERSION_MAJOR if(ssl_ver == SSL2_VERSION_MAJOR) { switch(msg) { case SSL2_MT_ERROR: return "Error"; case SSL2_MT_CLIENT_HELLO: return "Client hello"; case SSL2_MT_CLIENT_MASTER_KEY: return "Client key"; case SSL2_MT_CLIENT_FINISHED: return "Client finished"; case SSL2_MT_SERVER_HELLO: return "Server hello"; case SSL2_MT_SERVER_VERIFY: return "Server verify"; case SSL2_MT_SERVER_FINISHED: return "Server finished"; case SSL2_MT_REQUEST_CERTIFICATE: return "Request CERT"; case SSL2_MT_CLIENT_CERTIFICATE: return "Client CERT"; } } else #endif if(ssl_ver == SSL3_VERSION_MAJOR) { switch(msg) { case SSL3_MT_HELLO_REQUEST: return "Hello request"; case SSL3_MT_CLIENT_HELLO: return "Client hello"; case SSL3_MT_SERVER_HELLO: return "Server hello"; #ifdef SSL3_MT_NEWSESSION_TICKET case SSL3_MT_NEWSESSION_TICKET: return "Newsession Ticket"; #endif case SSL3_MT_CERTIFICATE: return "Certificate"; case SSL3_MT_SERVER_KEY_EXCHANGE: return "Server key exchange"; case SSL3_MT_CLIENT_KEY_EXCHANGE: return "Client key exchange"; case SSL3_MT_CERTIFICATE_REQUEST: return "Request CERT"; case SSL3_MT_SERVER_DONE: return "Server finished"; case SSL3_MT_CERTIFICATE_VERIFY: return "CERT verify"; case SSL3_MT_FINISHED: return "Finished"; #ifdef SSL3_MT_CERTIFICATE_STATUS case SSL3_MT_CERTIFICATE_STATUS: return "Certificate Status"; #endif #ifdef SSL3_MT_ENCRYPTED_EXTENSIONS case SSL3_MT_ENCRYPTED_EXTENSIONS: return "Encrypted Extensions"; #endif #ifdef SSL3_MT_END_OF_EARLY_DATA case SSL3_MT_END_OF_EARLY_DATA: return "End of early data"; #endif #ifdef SSL3_MT_KEY_UPDATE case SSL3_MT_KEY_UPDATE: return "Key update"; #endif #ifdef SSL3_MT_NEXT_PROTO case SSL3_MT_NEXT_PROTO: return "Next protocol"; #endif #ifdef SSL3_MT_MESSAGE_HASH case SSL3_MT_MESSAGE_HASH: return "Message hash"; #endif } } return "Unknown"; } static const char *tls_rt_type(int type) { switch(type) { #ifdef SSL3_RT_HEADER case SSL3_RT_HEADER: return "TLS header"; #endif case SSL3_RT_CHANGE_CIPHER_SPEC: return "TLS change cipher"; case SSL3_RT_ALERT: return "TLS alert"; case SSL3_RT_HANDSHAKE: return "TLS handshake"; case SSL3_RT_APPLICATION_DATA: return "TLS app data"; default: return "TLS Unknown"; } } /* * Our callback from the SSL/TLS layers. */ static void ossl_trace(int direction, int ssl_ver, int content_type, const void *buf, size_t len, SSL *ssl, void *userp) { char unknown[32]; const char *verstr = NULL; struct connectdata *conn = userp; struct ssl_connect_data *connssl = &conn->ssl[0]; struct ssl_backend_data *backend = connssl->backend; struct Curl_easy *data = backend->logger; if(!conn || !data || !data->set.fdebug || (direction != 0 && direction != 1)) return; switch(ssl_ver) { #ifdef SSL2_VERSION /* removed in recent versions */ case SSL2_VERSION: verstr = "SSLv2"; break; #endif #ifdef SSL3_VERSION case SSL3_VERSION: verstr = "SSLv3"; break; #endif case TLS1_VERSION: verstr = "TLSv1.0"; break; #ifdef TLS1_1_VERSION case TLS1_1_VERSION: verstr = "TLSv1.1"; break; #endif #ifdef TLS1_2_VERSION case TLS1_2_VERSION: verstr = "TLSv1.2"; break; #endif #ifdef TLS1_3_VERSION case TLS1_3_VERSION: verstr = "TLSv1.3"; break; #endif case 0: break; default: msnprintf(unknown, sizeof(unknown), "(%x)", ssl_ver); verstr = unknown; break; } /* Log progress for interesting records only (like Handshake or Alert), skip * all raw record headers (content_type == SSL3_RT_HEADER or ssl_ver == 0). * For TLS 1.3, skip notification of the decrypted inner Content Type. */ if(ssl_ver #ifdef SSL3_RT_INNER_CONTENT_TYPE && content_type != SSL3_RT_INNER_CONTENT_TYPE #endif ) { const char *msg_name, *tls_rt_name; char ssl_buf[1024]; int msg_type, txt_len; /* the info given when the version is zero is not that useful for us */ ssl_ver >>= 8; /* check the upper 8 bits only below */ /* SSLv2 doesn't seem to have TLS record-type headers, so OpenSSL * always pass-up content-type as 0. But the interesting message-type * is at 'buf[0]'. */ if(ssl_ver == SSL3_VERSION_MAJOR && content_type) tls_rt_name = tls_rt_type(content_type); else tls_rt_name = ""; if(content_type == SSL3_RT_CHANGE_CIPHER_SPEC) { msg_type = *(char *)buf; msg_name = "Change cipher spec"; } else if(content_type == SSL3_RT_ALERT) { msg_type = (((char *)buf)[0] << 8) + ((char *)buf)[1]; msg_name = SSL_alert_desc_string_long(msg_type); } else { msg_type = *(char *)buf; msg_name = ssl_msg_type(ssl_ver, msg_type); } txt_len = msnprintf(ssl_buf, sizeof(ssl_buf), "%s (%s), %s, %s (%d):\n", verstr, direction?"OUT":"IN", tls_rt_name, msg_name, msg_type); if(0 <= txt_len && (unsigned)txt_len < sizeof(ssl_buf)) { Curl_debug(data, CURLINFO_TEXT, ssl_buf, (size_t)txt_len); } } Curl_debug(data, (direction == 1) ? CURLINFO_SSL_DATA_OUT : CURLINFO_SSL_DATA_IN, (char *)buf, len); (void) ssl; } #endif #ifdef USE_OPENSSL /* ====================================================== */ #ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME # define use_sni(x) sni = (x) #else # define use_sni(x) Curl_nop_stmt #endif /* Check for OpenSSL 1.0.2 which has ALPN support. */ #undef HAS_ALPN #if OPENSSL_VERSION_NUMBER >= 0x10002000L \ && !defined(OPENSSL_NO_TLSEXT) # define HAS_ALPN 1 #endif /* Check for OpenSSL 1.0.1 which has NPN support. */ #undef HAS_NPN #if OPENSSL_VERSION_NUMBER >= 0x10001000L \ && !defined(OPENSSL_NO_TLSEXT) \ && !defined(OPENSSL_NO_NEXTPROTONEG) # define HAS_NPN 1 #endif #ifdef HAS_NPN /* * in is a list of length prefixed strings. this function has to select * the protocol we want to use from the list and write its string into out. */ static int select_next_protocol(unsigned char **out, unsigned char *outlen, const unsigned char *in, unsigned int inlen, const char *key, unsigned int keylen) { unsigned int i; for(i = 0; i + keylen <= inlen; i += in[i] + 1) { if(memcmp(&in[i + 1], key, keylen) == 0) { *out = (unsigned char *) &in[i + 1]; *outlen = in[i]; return 0; } } return -1; } static int select_next_proto_cb(SSL *ssl, unsigned char **out, unsigned char *outlen, const unsigned char *in, unsigned int inlen, void *arg) { struct Curl_easy *data = (struct Curl_easy *)arg; struct connectdata *conn = data->conn; (void)ssl; #ifdef USE_HTTP2 if(data->state.httpwant >= CURL_HTTP_VERSION_2 && !select_next_protocol(out, outlen, in, inlen, ALPN_H2, ALPN_H2_LENGTH)) { infof(data, "NPN, negotiated HTTP2 (%s)\n", ALPN_H2); conn->negnpn = CURL_HTTP_VERSION_2; return SSL_TLSEXT_ERR_OK; } #endif if(!select_next_protocol(out, outlen, in, inlen, ALPN_HTTP_1_1, ALPN_HTTP_1_1_LENGTH)) { infof(data, "NPN, negotiated HTTP1.1\n"); conn->negnpn = CURL_HTTP_VERSION_1_1; return SSL_TLSEXT_ERR_OK; } infof(data, "NPN, no overlap, use HTTP1.1\n"); *out = (unsigned char *)ALPN_HTTP_1_1; *outlen = ALPN_HTTP_1_1_LENGTH; conn->negnpn = CURL_HTTP_VERSION_1_1; return SSL_TLSEXT_ERR_OK; } #endif /* HAS_NPN */ #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) /* 1.1.0 */ static CURLcode set_ssl_version_min_max(SSL_CTX *ctx, struct connectdata *conn) { /* first, TLS min version... */ long curl_ssl_version_min = SSL_CONN_CONFIG(version); long curl_ssl_version_max; /* convert cURL min SSL version option to OpenSSL constant */ #if defined(OPENSSL_IS_BORINGSSL) || defined(LIBRESSL_VERSION_NUMBER) uint16_t ossl_ssl_version_min = 0; uint16_t ossl_ssl_version_max = 0; #else long ossl_ssl_version_min = 0; long ossl_ssl_version_max = 0; #endif switch(curl_ssl_version_min) { case CURL_SSLVERSION_TLSv1: /* TLS 1.x */ case CURL_SSLVERSION_TLSv1_0: ossl_ssl_version_min = TLS1_VERSION; break; case CURL_SSLVERSION_TLSv1_1: ossl_ssl_version_min = TLS1_1_VERSION; break; case CURL_SSLVERSION_TLSv1_2: ossl_ssl_version_min = TLS1_2_VERSION; break; #ifdef TLS1_3_VERSION case CURL_SSLVERSION_TLSv1_3: ossl_ssl_version_min = TLS1_3_VERSION; break; #endif } /* CURL_SSLVERSION_DEFAULT means that no option was selected. We don't want to pass 0 to SSL_CTX_set_min_proto_version as it would enable all versions down to the lowest supported by the library. So we skip this, and stay with the OS default */ if(curl_ssl_version_min != CURL_SSLVERSION_DEFAULT) { if(!SSL_CTX_set_min_proto_version(ctx, ossl_ssl_version_min)) { return CURLE_SSL_CONNECT_ERROR; } } /* ... then, TLS max version */ curl_ssl_version_max = SSL_CONN_CONFIG(version_max); /* convert cURL max SSL version option to OpenSSL constant */ switch(curl_ssl_version_max) { case CURL_SSLVERSION_MAX_TLSv1_0: ossl_ssl_version_max = TLS1_VERSION; break; case CURL_SSLVERSION_MAX_TLSv1_1: ossl_ssl_version_max = TLS1_1_VERSION; break; case CURL_SSLVERSION_MAX_TLSv1_2: ossl_ssl_version_max = TLS1_2_VERSION; break; #ifdef TLS1_3_VERSION case CURL_SSLVERSION_MAX_TLSv1_3: ossl_ssl_version_max = TLS1_3_VERSION; break; #endif case CURL_SSLVERSION_MAX_NONE: /* none selected */ case CURL_SSLVERSION_MAX_DEFAULT: /* max selected */ default: /* SSL_CTX_set_max_proto_version states that: setting the maximum to 0 will enable protocol versions up to the highest version supported by the library */ ossl_ssl_version_max = 0; break; } if(!SSL_CTX_set_max_proto_version(ctx, ossl_ssl_version_max)) { return CURLE_SSL_CONNECT_ERROR; } return CURLE_OK; } #endif #ifdef OPENSSL_IS_BORINGSSL typedef uint32_t ctx_option_t; #else typedef long ctx_option_t; #endif #if (OPENSSL_VERSION_NUMBER < 0x10100000L) /* 1.1.0 */ static CURLcode set_ssl_version_min_max_legacy(ctx_option_t *ctx_options, struct Curl_easy *data, struct connectdata *conn, int sockindex) { long ssl_version = SSL_CONN_CONFIG(version); long ssl_version_max = SSL_CONN_CONFIG(version_max); (void) data; /* In case it's unused. */ switch(ssl_version) { case CURL_SSLVERSION_TLSv1_3: #ifdef TLS1_3_VERSION { struct ssl_connect_data *connssl = &conn->ssl[sockindex]; SSL_CTX_set_max_proto_version(backend->ctx, TLS1_3_VERSION); *ctx_options |= SSL_OP_NO_TLSv1_2; } #else (void)sockindex; (void)ctx_options; failf(data, OSSL_PACKAGE " was built without TLS 1.3 support"); return CURLE_NOT_BUILT_IN; #endif /* FALLTHROUGH */ case CURL_SSLVERSION_TLSv1_2: #if OPENSSL_VERSION_NUMBER >= 0x1000100FL *ctx_options |= SSL_OP_NO_TLSv1_1; #else failf(data, OSSL_PACKAGE " was built without TLS 1.2 support"); return CURLE_NOT_BUILT_IN; #endif /* FALLTHROUGH */ case CURL_SSLVERSION_TLSv1_1: #if OPENSSL_VERSION_NUMBER >= 0x1000100FL *ctx_options |= SSL_OP_NO_TLSv1; #else failf(data, OSSL_PACKAGE " was built without TLS 1.1 support"); return CURLE_NOT_BUILT_IN; #endif /* FALLTHROUGH */ case CURL_SSLVERSION_TLSv1_0: case CURL_SSLVERSION_TLSv1: break; } switch(ssl_version_max) { case CURL_SSLVERSION_MAX_TLSv1_0: #if OPENSSL_VERSION_NUMBER >= 0x1000100FL *ctx_options |= SSL_OP_NO_TLSv1_1; #endif /* FALLTHROUGH */ case CURL_SSLVERSION_MAX_TLSv1_1: #if OPENSSL_VERSION_NUMBER >= 0x1000100FL *ctx_options |= SSL_OP_NO_TLSv1_2; #endif /* FALLTHROUGH */ case CURL_SSLVERSION_MAX_TLSv1_2: #ifdef TLS1_3_VERSION *ctx_options |= SSL_OP_NO_TLSv1_3; #endif break; case CURL_SSLVERSION_MAX_TLSv1_3: #ifdef TLS1_3_VERSION break; #else failf(data, OSSL_PACKAGE " was built without TLS 1.3 support"); return CURLE_NOT_BUILT_IN; #endif } return CURLE_OK; } #endif /* The "new session" callback must return zero if the session can be removed * or non-zero if the session has been put into the session cache. */ static int ossl_new_session_cb(SSL *ssl, SSL_SESSION *ssl_sessionid) { int res = 0; struct connectdata *conn; struct Curl_easy *data; int sockindex; curl_socket_t *sockindex_ptr; int data_idx = ossl_get_ssl_data_index(); int connectdata_idx = ossl_get_ssl_conn_index(); int sockindex_idx = ossl_get_ssl_sockindex_index(); int proxy_idx = ossl_get_proxy_index(); bool isproxy; if(data_idx < 0 || connectdata_idx < 0 || sockindex_idx < 0 || proxy_idx < 0) return 0; conn = (struct connectdata*) SSL_get_ex_data(ssl, connectdata_idx); if(!conn) return 0; data = (struct Curl_easy *) SSL_get_ex_data(ssl, data_idx); /* The sockindex has been stored as a pointer to an array element */ sockindex_ptr = (curl_socket_t*) SSL_get_ex_data(ssl, sockindex_idx); sockindex = (int)(sockindex_ptr - conn->sock); isproxy = SSL_get_ex_data(ssl, proxy_idx) ? TRUE : FALSE; if(SSL_SET_OPTION(primary.sessionid)) { bool incache; void *old_ssl_sessionid = NULL; Curl_ssl_sessionid_lock(data); if(isproxy) incache = FALSE; else incache = !(Curl_ssl_getsessionid(data, conn, isproxy, &old_ssl_sessionid, NULL, sockindex)); if(incache) { if(old_ssl_sessionid != ssl_sessionid) { infof(data, "old SSL session ID is stale, removing\n"); Curl_ssl_delsessionid(data, old_ssl_sessionid); incache = FALSE; } } if(!incache) { if(!Curl_ssl_addsessionid(data, conn, isproxy, ssl_sessionid, 0 /* unknown size */, sockindex)) { /* the session has been put into the session cache */ res = 1; } else failf(data, "failed to store ssl session"); } Curl_ssl_sessionid_unlock(data); } return res; } static CURLcode load_cacert_from_memory(SSL_CTX *ctx, const struct curl_blob *ca_info_blob) { /* these need freed at the end */ BIO *cbio = NULL; STACK_OF(X509_INFO) *inf = NULL; /* everything else is just a reference */ int i, count = 0; X509_STORE *cts = NULL; X509_INFO *itmp = NULL; if(ca_info_blob->len > (size_t)INT_MAX) return CURLE_SSL_CACERT_BADFILE; cts = SSL_CTX_get_cert_store(ctx); if(!cts) return CURLE_OUT_OF_MEMORY; cbio = BIO_new_mem_buf(ca_info_blob->data, (int)ca_info_blob->len); if(!cbio) return CURLE_OUT_OF_MEMORY; inf = PEM_X509_INFO_read_bio(cbio, NULL, NULL, NULL); if(!inf) { BIO_free(cbio); return CURLE_SSL_CACERT_BADFILE; } /* add each entry from PEM file to x509_store */ for(i = 0; i < (int)sk_X509_INFO_num(inf); ++i) { itmp = sk_X509_INFO_value(inf, i); if(itmp->x509) { if(X509_STORE_add_cert(cts, itmp->x509)) { ++count; } else { /* set count to 0 to return an error */ count = 0; break; } } if(itmp->crl) { if(X509_STORE_add_crl(cts, itmp->crl)) { ++count; } else { /* set count to 0 to return an error */ count = 0; break; } } } sk_X509_INFO_pop_free(inf, X509_INFO_free); BIO_free(cbio); /* if we didn't end up importing anything, treat that as an error */ return (count > 0 ? CURLE_OK : CURLE_SSL_CACERT_BADFILE); } static CURLcode ossl_connect_step1(struct Curl_easy *data, struct connectdata *conn, int sockindex) { CURLcode result = CURLE_OK; char *ciphers; SSL_METHOD_QUAL SSL_METHOD *req_method = NULL; X509_LOOKUP *lookup = NULL; curl_socket_t sockfd = conn->sock[sockindex]; struct ssl_connect_data *connssl = &conn->ssl[sockindex]; ctx_option_t ctx_options = 0; void *ssl_sessionid = NULL; #ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME bool sni; const char * const hostname = SSL_HOST_NAME(); #ifdef ENABLE_IPV6 struct in6_addr addr; #else struct in_addr addr; #endif #endif const long int ssl_version = SSL_CONN_CONFIG(version); #ifdef USE_OPENSSL_SRP const enum CURL_TLSAUTH ssl_authtype = SSL_SET_OPTION(authtype); #endif char * const ssl_cert = SSL_SET_OPTION(primary.clientcert); const struct curl_blob *ssl_cert_blob = SSL_SET_OPTION(primary.cert_blob); const struct curl_blob *ca_info_blob = SSL_CONN_CONFIG(ca_info_blob); const char * const ssl_cert_type = SSL_SET_OPTION(cert_type); const char * const ssl_cafile = /* CURLOPT_CAINFO_BLOB overrides CURLOPT_CAINFO */ (ca_info_blob ? NULL : SSL_CONN_CONFIG(CAfile)); const char * const ssl_capath = SSL_CONN_CONFIG(CApath); const bool verifypeer = SSL_CONN_CONFIG(verifypeer); const char * const ssl_crlfile = SSL_SET_OPTION(CRLfile); char error_buffer[256]; struct ssl_backend_data *backend = connssl->backend; bool imported_native_ca = false; DEBUGASSERT(ssl_connect_1 == connssl->connecting_state); /* Make funny stuff to get random input */ result = ossl_seed(data); if(result) return result; SSL_SET_OPTION_LVALUE(certverifyresult) = !X509_V_OK; /* check to see if we've been told to use an explicit SSL/TLS version */ switch(ssl_version) { case CURL_SSLVERSION_DEFAULT: case CURL_SSLVERSION_TLSv1: case CURL_SSLVERSION_TLSv1_0: case CURL_SSLVERSION_TLSv1_1: case CURL_SSLVERSION_TLSv1_2: case CURL_SSLVERSION_TLSv1_3: /* it will be handled later with the context options */ #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) req_method = TLS_client_method(); #else req_method = SSLv23_client_method(); #endif use_sni(TRUE); break; case CURL_SSLVERSION_SSLv2: failf(data, "No SSLv2 support"); return CURLE_NOT_BUILT_IN; case CURL_SSLVERSION_SSLv3: failf(data, "No SSLv3 support"); return CURLE_NOT_BUILT_IN; default: failf(data, "Unrecognized parameter passed via CURLOPT_SSLVERSION"); return CURLE_SSL_CONNECT_ERROR; } if(backend->ctx) SSL_CTX_free(backend->ctx); backend->ctx = SSL_CTX_new(req_method); if(!backend->ctx) { failf(data, "SSL: couldn't create a context: %s", ossl_strerror(ERR_peek_error(), error_buffer, sizeof(error_buffer))); return CURLE_OUT_OF_MEMORY; } #ifdef SSL_MODE_RELEASE_BUFFERS SSL_CTX_set_mode(backend->ctx, SSL_MODE_RELEASE_BUFFERS); #endif #ifdef SSL_CTRL_SET_MSG_CALLBACK if(data->set.fdebug && data->set.verbose) { /* the SSL trace callback is only used for verbose logging */ SSL_CTX_set_msg_callback(backend->ctx, ossl_trace); SSL_CTX_set_msg_callback_arg(backend->ctx, conn); set_logger(conn, data); } #endif /* OpenSSL contains code to work-around lots of bugs and flaws in various SSL-implementations. SSL_CTX_set_options() is used to enabled those work-arounds. The man page for this option states that SSL_OP_ALL enables all the work-arounds and that "It is usually safe to use SSL_OP_ALL to enable the bug workaround options if compatibility with somewhat broken implementations is desired." The "-no_ticket" option was introduced in Openssl0.9.8j. It's a flag to disable "rfc4507bis session ticket support". rfc4507bis was later turned into the proper RFC5077 it seems: https://tools.ietf.org/html/rfc5077 The enabled extension concerns the session management. I wonder how often libcurl stops a connection and then resumes a TLS session. also, sending the session data is some overhead. .I suggest that you just use your proposed patch (which explicitly disables TICKET). If someone writes an application with libcurl and openssl who wants to enable the feature, one can do this in the SSL callback. SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG option enabling allowed proper interoperability with web server Netscape Enterprise Server 2.0.1 which was released back in 1996. Due to CVE-2010-4180, option SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG has become ineffective as of OpenSSL 0.9.8q and 1.0.0c. In order to mitigate CVE-2010-4180 when using previous OpenSSL versions we no longer enable this option regardless of OpenSSL version and SSL_OP_ALL definition. OpenSSL added a work-around for a SSL 3.0/TLS 1.0 CBC vulnerability (https://www.openssl.org/~bodo/tls-cbc.txt). In 0.9.6e they added a bit to SSL_OP_ALL that _disables_ that work-around despite the fact that SSL_OP_ALL is documented to do "rather harmless" workarounds. In order to keep the secure work-around, the SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS bit must not be set. */ ctx_options = SSL_OP_ALL; #ifdef SSL_OP_NO_TICKET ctx_options |= SSL_OP_NO_TICKET; #endif #ifdef SSL_OP_NO_COMPRESSION ctx_options |= SSL_OP_NO_COMPRESSION; #endif #ifdef SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG /* mitigate CVE-2010-4180 */ ctx_options &= ~SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG; #endif #ifdef SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS /* unless the user explicitly ask to allow the protocol vulnerability we use the work-around */ if(!SSL_SET_OPTION(enable_beast)) ctx_options &= ~SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS; #endif switch(ssl_version) { case CURL_SSLVERSION_SSLv2: case CURL_SSLVERSION_SSLv3: return CURLE_NOT_BUILT_IN; /* "--tlsv<x.y>" options mean TLS >= version <x.y> */ case CURL_SSLVERSION_DEFAULT: case CURL_SSLVERSION_TLSv1: /* TLS >= version 1.0 */ case CURL_SSLVERSION_TLSv1_0: /* TLS >= version 1.0 */ case CURL_SSLVERSION_TLSv1_1: /* TLS >= version 1.1 */ case CURL_SSLVERSION_TLSv1_2: /* TLS >= version 1.2 */ case CURL_SSLVERSION_TLSv1_3: /* TLS >= version 1.3 */ /* asking for any TLS version as the minimum, means no SSL versions allowed */ ctx_options |= SSL_OP_NO_SSLv2; ctx_options |= SSL_OP_NO_SSLv3; #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) /* 1.1.0 */ result = set_ssl_version_min_max(backend->ctx, conn); #else result = set_ssl_version_min_max_legacy(&ctx_options, data, conn, sockindex); #endif if(result != CURLE_OK) return result; break; default: failf(data, "Unrecognized parameter passed via CURLOPT_SSLVERSION"); return CURLE_SSL_CONNECT_ERROR; } SSL_CTX_set_options(backend->ctx, ctx_options); #ifdef HAS_NPN if(conn->bits.tls_enable_npn) SSL_CTX_set_next_proto_select_cb(backend->ctx, select_next_proto_cb, data); #endif #ifdef HAS_ALPN if(conn->bits.tls_enable_alpn) { int cur = 0; unsigned char protocols[128]; #ifdef USE_HTTP2 if(data->state.httpwant >= CURL_HTTP_VERSION_2 #ifndef CURL_DISABLE_PROXY && (!SSL_IS_PROXY() || !conn->bits.tunnel_proxy) #endif ) { protocols[cur++] = ALPN_H2_LENGTH; memcpy(&protocols[cur], ALPN_H2, ALPN_H2_LENGTH); cur += ALPN_H2_LENGTH; infof(data, "ALPN, offering %s\n", ALPN_H2); } #endif protocols[cur++] = ALPN_HTTP_1_1_LENGTH; memcpy(&protocols[cur], ALPN_HTTP_1_1, ALPN_HTTP_1_1_LENGTH); cur += ALPN_HTTP_1_1_LENGTH; infof(data, "ALPN, offering %s\n", ALPN_HTTP_1_1); /* expects length prefixed preference ordered list of protocols in wire * format */ if(SSL_CTX_set_alpn_protos(backend->ctx, protocols, cur)) { failf(data, "Error setting ALPN"); return CURLE_SSL_CONNECT_ERROR; } } #endif if(ssl_cert || ssl_cert_blob || ssl_cert_type) { if(!result && !cert_stuff(data, backend->ctx, ssl_cert, ssl_cert_blob, ssl_cert_type, SSL_SET_OPTION(key), SSL_SET_OPTION(key_blob), SSL_SET_OPTION(key_type), SSL_SET_OPTION(key_passwd))) result = CURLE_SSL_CERTPROBLEM; if(result) /* failf() is already done in cert_stuff() */ return result; } ciphers = SSL_CONN_CONFIG(cipher_list); if(!ciphers) ciphers = (char *)DEFAULT_CIPHER_SELECTION; if(ciphers) { if(!SSL_CTX_set_cipher_list(backend->ctx, ciphers)) { failf(data, "failed setting cipher list: %s", ciphers); return CURLE_SSL_CIPHER; } infof(data, "Cipher selection: %s\n", ciphers); } #ifdef HAVE_SSL_CTX_SET_CIPHERSUITES { char *ciphers13 = SSL_CONN_CONFIG(cipher_list13); if(ciphers13) { if(!SSL_CTX_set_ciphersuites(backend->ctx, ciphers13)) { failf(data, "failed setting TLS 1.3 cipher suite: %s", ciphers13); return CURLE_SSL_CIPHER; } infof(data, "TLS 1.3 cipher selection: %s\n", ciphers13); } } #endif #ifdef HAVE_SSL_CTX_SET_POST_HANDSHAKE_AUTH /* OpenSSL 1.1.1 requires clients to opt-in for PHA */ SSL_CTX_set_post_handshake_auth(backend->ctx, 1); #endif #ifdef HAVE_SSL_CTX_SET_EC_CURVES { char *curves = SSL_CONN_CONFIG(curves); if(curves) { if(!SSL_CTX_set1_curves_list(backend->ctx, curves)) { failf(data, "failed setting curves list: '%s'", curves); return CURLE_SSL_CIPHER; } } } #endif #ifdef USE_OPENSSL_SRP if(ssl_authtype == CURL_TLSAUTH_SRP) { char * const ssl_username = SSL_SET_OPTION(username); infof(data, "Using TLS-SRP username: %s\n", ssl_username); if(!SSL_CTX_set_srp_username(backend->ctx, ssl_username)) { failf(data, "Unable to set SRP user name"); return CURLE_BAD_FUNCTION_ARGUMENT; } if(!SSL_CTX_set_srp_password(backend->ctx, SSL_SET_OPTION(password))) { failf(data, "failed setting SRP password"); return CURLE_BAD_FUNCTION_ARGUMENT; } if(!SSL_CONN_CONFIG(cipher_list)) { infof(data, "Setting cipher list SRP\n"); if(!SSL_CTX_set_cipher_list(backend->ctx, "SRP")) { failf(data, "failed setting SRP cipher list"); return CURLE_SSL_CIPHER; } } } #endif #if defined(USE_WIN32_CRYPTO) /* Import certificates from the Windows root certificate store if requested. https://stackoverflow.com/questions/9507184/ https://github.com/d3x0r/SACK/blob/master/src/netlib/ssl_layer.c#L1037 https://tools.ietf.org/html/rfc5280 */ if((SSL_CONN_CONFIG(verifypeer) || SSL_CONN_CONFIG(verifyhost)) && (SSL_SET_OPTION(native_ca_store))) { X509_STORE *store = SSL_CTX_get_cert_store(backend->ctx); HCERTSTORE hStore = CertOpenSystemStore(0, TEXT("ROOT")); if(hStore) { PCCERT_CONTEXT pContext = NULL; /* The array of enhanced key usage OIDs will vary per certificate and is declared outside of the loop so that rather than malloc/free each iteration we can grow it with realloc, when necessary. */ CERT_ENHKEY_USAGE *enhkey_usage = NULL; DWORD enhkey_usage_size = 0; /* This loop makes a best effort to import all valid certificates from the MS root store. If a certificate cannot be imported it is skipped. 'result' is used to store only hard-fail conditions (such as out of memory) that cause an early break. */ result = CURLE_OK; for(;;) { X509 *x509; FILETIME now; BYTE key_usage[2]; DWORD req_size; const unsigned char *encoded_cert; #if defined(DEBUGBUILD) && !defined(CURL_DISABLE_VERBOSE_STRINGS) char cert_name[256]; #endif pContext = CertEnumCertificatesInStore(hStore, pContext); if(!pContext) break; #if defined(DEBUGBUILD) && !defined(CURL_DISABLE_VERBOSE_STRINGS) if(!CertGetNameStringA(pContext, CERT_NAME_SIMPLE_DISPLAY_TYPE, 0, NULL, cert_name, sizeof(cert_name))) { strcpy(cert_name, "Unknown"); } infof(data, "SSL: Checking cert \"%s\"\n", cert_name); #endif encoded_cert = (const unsigned char *)pContext->pbCertEncoded; if(!encoded_cert) continue; GetSystemTimeAsFileTime(&now); if(CompareFileTime(&pContext->pCertInfo->NotBefore, &now) > 0 || CompareFileTime(&now, &pContext->pCertInfo->NotAfter) > 0) continue; /* If key usage exists check for signing attribute */ if(CertGetIntendedKeyUsage(pContext->dwCertEncodingType, pContext->pCertInfo, key_usage, sizeof(key_usage))) { if(!(key_usage[0] & CERT_KEY_CERT_SIGN_KEY_USAGE)) continue; } else if(GetLastError()) continue; /* If enhanced key usage exists check for server auth attribute. * * Note "In a Microsoft environment, a certificate might also have EKU * extended properties that specify valid uses for the certificate." * The call below checks both, and behavior varies depending on what is * found. For more details see CertGetEnhancedKeyUsage doc. */ if(CertGetEnhancedKeyUsage(pContext, 0, NULL, &req_size)) { if(req_size && req_size > enhkey_usage_size) { void *tmp = realloc(enhkey_usage, req_size); if(!tmp) { failf(data, "SSL: Out of memory allocating for OID list"); result = CURLE_OUT_OF_MEMORY; break; } enhkey_usage = (CERT_ENHKEY_USAGE *)tmp; enhkey_usage_size = req_size; } if(CertGetEnhancedKeyUsage(pContext, 0, enhkey_usage, &req_size)) { if(!enhkey_usage->cUsageIdentifier) { /* "If GetLastError returns CRYPT_E_NOT_FOUND, the certificate is good for all uses. If it returns zero, the certificate has no valid uses." */ if((HRESULT)GetLastError() != CRYPT_E_NOT_FOUND) continue; } else { DWORD i; bool found = false; for(i = 0; i < enhkey_usage->cUsageIdentifier; ++i) { if(!strcmp("1.3.6.1.5.5.7.3.1" /* OID server auth */, enhkey_usage->rgpszUsageIdentifier[i])) { found = true; break; } } if(!found) continue; } } else continue; } else continue; x509 = d2i_X509(NULL, &encoded_cert, pContext->cbCertEncoded); if(!x509) continue; /* Try to import the certificate. This may fail for legitimate reasons such as duplicate certificate, which is allowed by MS but not OpenSSL. */ if(X509_STORE_add_cert(store, x509) == 1) { #if defined(DEBUGBUILD) && !defined(CURL_DISABLE_VERBOSE_STRINGS) infof(data, "SSL: Imported cert \"%s\"\n", cert_name); #endif imported_native_ca = true; } X509_free(x509); } free(enhkey_usage); CertFreeCertificateContext(pContext); CertCloseStore(hStore, 0); if(result) return result; } if(imported_native_ca) infof(data, "successfully imported windows ca store\n"); else infof(data, "error importing windows ca store, continuing anyway\n"); } #endif if(ca_info_blob) { result = load_cacert_from_memory(backend->ctx, ca_info_blob); if(result) { if(result == CURLE_OUT_OF_MEMORY || (verifypeer && !imported_native_ca)) { failf(data, "error importing CA certificate blob"); return result; } /* Only warning if no certificate verification is required. */ infof(data, "error importing CA certificate blob, continuing anyway\n"); } } #if defined(OPENSSL_VERSION_MAJOR) && (OPENSSL_VERSION_MAJOR >= 3) /* OpenSSL 3.0.0 has deprecated SSL_CTX_load_verify_locations */ { if(ssl_cafile) { if(!SSL_CTX_load_verify_file(backend->ctx, ssl_cafile)) { if(verifypeer && !imported_native_ca) { /* Fail if we insist on successfully verifying the server. */ failf(data, "error setting certificate file: %s", ssl_cafile); return CURLE_SSL_CACERT_BADFILE; } /* Continue with a warning if no certificate verif is required. */ infof(data, "error setting certificate file, continuing anyway\n"); } infof(data, " CAfile: %s\n", ssl_cafile); } if(ssl_capath) { if(!SSL_CTX_load_verify_dir(backend->ctx, ssl_capath)) { if(verifypeer && !imported_native_ca) { /* Fail if we insist on successfully verifying the server. */ failf(data, "error setting certificate path: %s", ssl_capath); return CURLE_SSL_CACERT_BADFILE; } /* Continue with a warning if no certificate verif is required. */ infof(data, "error setting certificate path, continuing anyway\n"); } infof(data, " CApath: %s\n", ssl_capath); } } #else if(ssl_cafile || ssl_capath) { /* tell SSL where to find CA certificates that are used to verify the servers certificate. */ if(!SSL_CTX_load_verify_locations(backend->ctx, ssl_cafile, ssl_capath)) { if(verifypeer && !imported_native_ca) { /* Fail if we insist on successfully verifying the server. */ failf(data, "error setting certificate verify locations:" " CAfile: %s CApath: %s", ssl_cafile ? ssl_cafile : "none", ssl_capath ? ssl_capath : "none"); return CURLE_SSL_CACERT_BADFILE; } /* Just continue with a warning if no strict certificate verification is required. */ infof(data, "error setting certificate verify locations," " continuing anyway:\n"); } else { /* Everything is fine. */ infof(data, "successfully set certificate verify locations:\n"); } infof(data, " CAfile: %s\n", ssl_cafile ? ssl_cafile : "none"); infof(data, " CApath: %s\n", ssl_capath ? ssl_capath : "none"); } #endif #ifdef CURL_CA_FALLBACK if(verifypeer && !ca_info_blob && !ssl_cafile && !ssl_capath && !imported_native_ca) { /* verifying the peer without any CA certificates won't work so use openssl's built in default as fallback */ SSL_CTX_set_default_verify_paths(backend->ctx); } #endif if(ssl_crlfile) { /* tell SSL where to find CRL file that is used to check certificate * revocation */ lookup = X509_STORE_add_lookup(SSL_CTX_get_cert_store(backend->ctx), X509_LOOKUP_file()); if(!lookup || (!X509_load_crl_file(lookup, ssl_crlfile, X509_FILETYPE_PEM)) ) { failf(data, "error loading CRL file: %s", ssl_crlfile); return CURLE_SSL_CRL_BADFILE; } /* Everything is fine. */ infof(data, "successfully load CRL file:\n"); X509_STORE_set_flags(SSL_CTX_get_cert_store(backend->ctx), X509_V_FLAG_CRL_CHECK|X509_V_FLAG_CRL_CHECK_ALL); infof(data, " CRLfile: %s\n", ssl_crlfile); } if(verifypeer) { /* Try building a chain using issuers in the trusted store first to avoid problems with server-sent legacy intermediates. Newer versions of OpenSSL do alternate chain checking by default but we do not know how to determine that in a reliable manner. https://rt.openssl.org/Ticket/Display.html?id=3621&user=guest&pass=guest */ #if defined(X509_V_FLAG_TRUSTED_FIRST) X509_STORE_set_flags(SSL_CTX_get_cert_store(backend->ctx), X509_V_FLAG_TRUSTED_FIRST); #endif #ifdef X509_V_FLAG_PARTIAL_CHAIN if(!SSL_SET_OPTION(no_partialchain) && !ssl_crlfile) { /* Have intermediate certificates in the trust store be treated as trust-anchors, in the same way as self-signed root CA certificates are. This allows users to verify servers using the intermediate cert only, instead of needing the whole chain. Due to OpenSSL bug https://github.com/openssl/openssl/issues/5081 we cannot do partial chains with CRL check. */ X509_STORE_set_flags(SSL_CTX_get_cert_store(backend->ctx), X509_V_FLAG_PARTIAL_CHAIN); } #endif } /* SSL always tries to verify the peer, this only says whether it should * fail to connect if the verification fails, or if it should continue * anyway. In the latter case the result of the verification is checked with * SSL_get_verify_result() below. */ SSL_CTX_set_verify(backend->ctx, verifypeer ? SSL_VERIFY_PEER : SSL_VERIFY_NONE, NULL); /* Enable logging of secrets to the file specified in env SSLKEYLOGFILE. */ #ifdef HAVE_KEYLOG_CALLBACK if(Curl_tls_keylog_enabled()) { SSL_CTX_set_keylog_callback(backend->ctx, ossl_keylog_callback); } #endif /* Enable the session cache because it's a prerequisite for the "new session" * callback. Use the "external storage" mode to avoid that OpenSSL creates * an internal session cache. */ SSL_CTX_set_session_cache_mode(backend->ctx, SSL_SESS_CACHE_CLIENT | SSL_SESS_CACHE_NO_INTERNAL); SSL_CTX_sess_set_new_cb(backend->ctx, ossl_new_session_cb); /* give application a chance to interfere with SSL set up. */ if(data->set.ssl.fsslctx) { Curl_set_in_callback(data, true); result = (*data->set.ssl.fsslctx)(data, backend->ctx, data->set.ssl.fsslctxp); Curl_set_in_callback(data, false); if(result) { failf(data, "error signaled by ssl ctx callback"); return result; } } /* Lets make an SSL structure */ if(backend->handle) SSL_free(backend->handle); backend->handle = SSL_new(backend->ctx); if(!backend->handle) { failf(data, "SSL: couldn't create a context (handle)!"); return CURLE_OUT_OF_MEMORY; } #if (OPENSSL_VERSION_NUMBER >= 0x0090808fL) && !defined(OPENSSL_NO_TLSEXT) && \ !defined(OPENSSL_NO_OCSP) if(SSL_CONN_CONFIG(verifystatus)) SSL_set_tlsext_status_type(backend->handle, TLSEXT_STATUSTYPE_ocsp); #endif #if defined(OPENSSL_IS_BORINGSSL) && defined(ALLOW_RENEG) SSL_set_renegotiate_mode(backend->handle, ssl_renegotiate_freely); #endif SSL_set_connect_state(backend->handle); backend->server_cert = 0x0; #ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME if((0 == Curl_inet_pton(AF_INET, hostname, &addr)) && #ifdef ENABLE_IPV6 (0 == Curl_inet_pton(AF_INET6, hostname, &addr)) && #endif sni) { size_t nlen = strlen(hostname); if((long)nlen >= data->set.buffer_size) /* this is seriously messed up */ return CURLE_SSL_CONNECT_ERROR; /* RFC 6066 section 3 says the SNI field is case insensitive, but browsers send the data lowercase and subsequently there are now numerous servers out there that don't work unless the name is lowercased */ Curl_strntolower(data->state.buffer, hostname, nlen); data->state.buffer[nlen] = 0; if(!SSL_set_tlsext_host_name(backend->handle, data->state.buffer)) infof(data, "WARNING: failed to configure server name indication (SNI) " "TLS extension\n"); } #endif ossl_associate_connection(data, conn, sockindex); Curl_ssl_sessionid_lock(data); if(!Curl_ssl_getsessionid(data, conn, SSL_IS_PROXY() ? TRUE : FALSE, &ssl_sessionid, NULL, sockindex)) { /* we got a session id, use it! */ if(!SSL_set_session(backend->handle, ssl_sessionid)) { Curl_ssl_sessionid_unlock(data); failf(data, "SSL: SSL_set_session failed: %s", ossl_strerror(ERR_get_error(), error_buffer, sizeof(error_buffer))); return CURLE_SSL_CONNECT_ERROR; } /* Informational message */ infof(data, "SSL re-using session ID\n"); } Curl_ssl_sessionid_unlock(data); #ifndef CURL_DISABLE_PROXY if(conn->proxy_ssl[sockindex].use) { BIO *const bio = BIO_new(BIO_f_ssl()); SSL *handle = conn->proxy_ssl[sockindex].backend->handle; DEBUGASSERT(ssl_connection_complete == conn->proxy_ssl[sockindex].state); DEBUGASSERT(handle != NULL); DEBUGASSERT(bio != NULL); BIO_set_ssl(bio, handle, FALSE); SSL_set_bio(backend->handle, bio, bio); } else #endif if(!SSL_set_fd(backend->handle, (int)sockfd)) { /* pass the raw socket into the SSL layers */ failf(data, "SSL: SSL_set_fd failed: %s", ossl_strerror(ERR_get_error(), error_buffer, sizeof(error_buffer))); return CURLE_SSL_CONNECT_ERROR; } connssl->connecting_state = ssl_connect_2; return CURLE_OK; } static CURLcode ossl_connect_step2(struct Curl_easy *data, struct connectdata *conn, int sockindex) { int err; struct ssl_connect_data *connssl = &conn->ssl[sockindex]; struct ssl_backend_data *backend = connssl->backend; DEBUGASSERT(ssl_connect_2 == connssl->connecting_state || ssl_connect_2_reading == connssl->connecting_state || ssl_connect_2_writing == connssl->connecting_state); ERR_clear_error(); err = SSL_connect(backend->handle); #ifndef HAVE_KEYLOG_CALLBACK if(Curl_tls_keylog_enabled()) { /* If key logging is enabled, wait for the handshake to complete and then * proceed with logging secrets (for TLS 1.2 or older). */ ossl_log_tls12_secret(backend->handle, &backend->keylog_done); } #endif /* 1 is fine 0 is "not successful but was shut down controlled" <0 is "handshake was not successful, because a fatal error occurred" */ if(1 != err) { int detail = SSL_get_error(backend->handle, err); if(SSL_ERROR_WANT_READ == detail) { connssl->connecting_state = ssl_connect_2_reading; return CURLE_OK; } if(SSL_ERROR_WANT_WRITE == detail) { connssl->connecting_state = ssl_connect_2_writing; return CURLE_OK; } #ifdef SSL_ERROR_WANT_ASYNC if(SSL_ERROR_WANT_ASYNC == detail) { connssl->connecting_state = ssl_connect_2; return CURLE_OK; } #endif else { /* untreated error */ unsigned long errdetail; char error_buffer[256]=""; CURLcode result; long lerr; int lib; int reason; /* the connection failed, we're not waiting for anything else. */ connssl->connecting_state = ssl_connect_2; /* Get the earliest error code from the thread's error queue and removes the entry. */ errdetail = ERR_get_error(); /* Extract which lib and reason */ lib = ERR_GET_LIB(errdetail); reason = ERR_GET_REASON(errdetail); if((lib == ERR_LIB_SSL) && ((reason == SSL_R_CERTIFICATE_VERIFY_FAILED) || (reason == SSL_R_SSLV3_ALERT_CERTIFICATE_EXPIRED))) { result = CURLE_PEER_FAILED_VERIFICATION; lerr = SSL_get_verify_result(backend->handle); if(lerr != X509_V_OK) { SSL_SET_OPTION_LVALUE(certverifyresult) = lerr; msnprintf(error_buffer, sizeof(error_buffer), "SSL certificate problem: %s", X509_verify_cert_error_string(lerr)); } else /* strcpy() is fine here as long as the string fits within error_buffer */ strcpy(error_buffer, "SSL certificate verification failed"); } #if (OPENSSL_VERSION_NUMBER >= 0x10101000L && \ !defined(LIBRESSL_VERSION_NUMBER) && \ !defined(OPENSSL_IS_BORINGSSL)) /* SSL_R_TLSV13_ALERT_CERTIFICATE_REQUIRED is only available on OpenSSL version above v1.1.1, not Libre SSL nor BoringSSL */ else if((lib == ERR_LIB_SSL) && (reason == SSL_R_TLSV13_ALERT_CERTIFICATE_REQUIRED)) { /* If client certificate is required, communicate the error to client */ result = CURLE_SSL_CLIENTCERT; ossl_strerror(errdetail, error_buffer, sizeof(error_buffer)); } #endif else { result = CURLE_SSL_CONNECT_ERROR; ossl_strerror(errdetail, error_buffer, sizeof(error_buffer)); } /* detail is already set to the SSL error above */ /* If we e.g. use SSLv2 request-method and the server doesn't like us * (RST connection etc.), OpenSSL gives no explanation whatsoever and * the SO_ERROR is also lost. */ if(CURLE_SSL_CONNECT_ERROR == result && errdetail == 0) { const char * const hostname = SSL_HOST_NAME(); const long int port = SSL_HOST_PORT(); char extramsg[80]=""; int sockerr = SOCKERRNO; if(sockerr && detail == SSL_ERROR_SYSCALL) Curl_strerror(sockerr, extramsg, sizeof(extramsg)); failf(data, OSSL_PACKAGE " SSL_connect: %s in connection to %s:%ld ", extramsg[0] ? extramsg : SSL_ERROR_to_str(detail), hostname, port); return result; } /* Could be a CERT problem */ failf(data, "%s", error_buffer); return result; } } else { /* we have been connected fine, we're not waiting for anything else. */ connssl->connecting_state = ssl_connect_3; /* Informational message */ infof(data, "SSL connection using %s / %s\n", SSL_get_version(backend->handle), SSL_get_cipher(backend->handle)); #ifdef HAS_ALPN /* Sets data and len to negotiated protocol, len is 0 if no protocol was * negotiated */ if(conn->bits.tls_enable_alpn) { const unsigned char *neg_protocol; unsigned int len; SSL_get0_alpn_selected(backend->handle, &neg_protocol, &len); if(len) { infof(data, "ALPN, server accepted to use %.*s\n", len, neg_protocol); #ifdef USE_HTTP2 if(len == ALPN_H2_LENGTH && !memcmp(ALPN_H2, neg_protocol, len)) { conn->negnpn = CURL_HTTP_VERSION_2; } else #endif if(len == ALPN_HTTP_1_1_LENGTH && !memcmp(ALPN_HTTP_1_1, neg_protocol, ALPN_HTTP_1_1_LENGTH)) { conn->negnpn = CURL_HTTP_VERSION_1_1; } } else infof(data, "ALPN, server did not agree to a protocol\n"); Curl_multiuse_state(data, conn->negnpn == CURL_HTTP_VERSION_2 ? BUNDLE_MULTIPLEX : BUNDLE_NO_MULTIUSE); } #endif return CURLE_OK; } } static int asn1_object_dump(ASN1_OBJECT *a, char *buf, size_t len) { int i, ilen; ilen = (int)len; if(ilen < 0) return 1; /* buffer too big */ i = i2t_ASN1_OBJECT(buf, ilen, a); if(i >= ilen) return 1; /* buffer too small */ return 0; } #define push_certinfo(_label, _num) \ do { \ long info_len = BIO_get_mem_data(mem, &ptr); \ Curl_ssl_push_certinfo_len(data, _num, _label, ptr, info_len); \ if(1 != BIO_reset(mem)) \ break; \ } while(0) static void pubkey_show(struct Curl_easy *data, BIO *mem, int num, const char *type, const char *name, #ifdef HAVE_OPAQUE_RSA_DSA_DH const #endif BIGNUM *bn) { char *ptr; char namebuf[32]; msnprintf(namebuf, sizeof(namebuf), "%s(%s)", type, name); if(bn) BN_print(mem, bn); push_certinfo(namebuf, num); } #ifdef HAVE_OPAQUE_RSA_DSA_DH #define print_pubkey_BN(_type, _name, _num) \ pubkey_show(data, mem, _num, #_type, #_name, _name) #else #define print_pubkey_BN(_type, _name, _num) \ do { \ if(_type->_name) { \ pubkey_show(data, mem, _num, #_type, #_name, _type->_name); \ } \ } while(0) #endif static void X509V3_ext(struct Curl_easy *data, int certnum, CONST_EXTS STACK_OF(X509_EXTENSION) *exts) { int i; if((int)sk_X509_EXTENSION_num(exts) <= 0) /* no extensions, bail out */ return; for(i = 0; i < (int)sk_X509_EXTENSION_num(exts); i++) { ASN1_OBJECT *obj; X509_EXTENSION *ext = sk_X509_EXTENSION_value(exts, i); BUF_MEM *biomem; char namebuf[128]; BIO *bio_out = BIO_new(BIO_s_mem()); if(!bio_out) return; obj = X509_EXTENSION_get_object(ext); asn1_object_dump(obj, namebuf, sizeof(namebuf)); if(!X509V3_EXT_print(bio_out, ext, 0, 0)) ASN1_STRING_print(bio_out, (ASN1_STRING *)X509_EXTENSION_get_data(ext)); BIO_get_mem_ptr(bio_out, &biomem); Curl_ssl_push_certinfo_len(data, certnum, namebuf, biomem->data, biomem->length); BIO_free(bio_out); } } #ifdef OPENSSL_IS_BORINGSSL typedef size_t numcert_t; #else typedef int numcert_t; #endif #if defined(OPENSSL_VERSION_MAJOR) && (OPENSSL_VERSION_MAJOR >= 3) #define OSSL3_CONST const #else #define OSSL3_CONST #endif static CURLcode get_cert_chain(struct Curl_easy *data, struct ssl_connect_data *connssl) { CURLcode result; STACK_OF(X509) *sk; int i; numcert_t numcerts; BIO *mem; struct ssl_backend_data *backend = connssl->backend; sk = SSL_get_peer_cert_chain(backend->handle); if(!sk) { return CURLE_OUT_OF_MEMORY; } numcerts = sk_X509_num(sk); result = Curl_ssl_init_certinfo(data, (int)numcerts); if(result) { return result; } mem = BIO_new(BIO_s_mem()); for(i = 0; i < (int)numcerts; i++) { ASN1_INTEGER *num; X509 *x = sk_X509_value(sk, i); EVP_PKEY *pubkey = NULL; int j; char *ptr; const ASN1_BIT_STRING *psig = NULL; X509_NAME_print_ex(mem, X509_get_subject_name(x), 0, XN_FLAG_ONELINE); push_certinfo("Subject", i); X509_NAME_print_ex(mem, X509_get_issuer_name(x), 0, XN_FLAG_ONELINE); push_certinfo("Issuer", i); BIO_printf(mem, "%lx", X509_get_version(x)); push_certinfo("Version", i); num = X509_get_serialNumber(x); if(num->type == V_ASN1_NEG_INTEGER) BIO_puts(mem, "-"); for(j = 0; j < num->length; j++) BIO_printf(mem, "%02x", num->data[j]); push_certinfo("Serial Number", i); #if defined(HAVE_X509_GET0_SIGNATURE) && defined(HAVE_X509_GET0_EXTENSIONS) { const X509_ALGOR *sigalg = NULL; X509_PUBKEY *xpubkey = NULL; ASN1_OBJECT *pubkeyoid = NULL; X509_get0_signature(&psig, &sigalg, x); if(sigalg) { i2a_ASN1_OBJECT(mem, sigalg->algorithm); push_certinfo("Signature Algorithm", i); } xpubkey = X509_get_X509_PUBKEY(x); if(xpubkey) { X509_PUBKEY_get0_param(&pubkeyoid, NULL, NULL, NULL, xpubkey); if(pubkeyoid) { i2a_ASN1_OBJECT(mem, pubkeyoid); push_certinfo("Public Key Algorithm", i); } } X509V3_ext(data, i, X509_get0_extensions(x)); } #else { /* before OpenSSL 1.0.2 */ X509_CINF *cinf = x->cert_info; i2a_ASN1_OBJECT(mem, cinf->signature->algorithm); push_certinfo("Signature Algorithm", i); i2a_ASN1_OBJECT(mem, cinf->key->algor->algorithm); push_certinfo("Public Key Algorithm", i); X509V3_ext(data, i, cinf->extensions); psig = x->signature; } #endif ASN1_TIME_print(mem, X509_get0_notBefore(x)); push_certinfo("Start date", i); ASN1_TIME_print(mem, X509_get0_notAfter(x)); push_certinfo("Expire date", i); pubkey = X509_get_pubkey(x); if(!pubkey) infof(data, " Unable to load public key\n"); else { int pktype; #ifdef HAVE_OPAQUE_EVP_PKEY pktype = EVP_PKEY_id(pubkey); #else pktype = pubkey->type; #endif switch(pktype) { case EVP_PKEY_RSA: { OSSL3_CONST RSA *rsa; #ifdef HAVE_OPAQUE_EVP_PKEY rsa = EVP_PKEY_get0_RSA(pubkey); #else rsa = pubkey->pkey.rsa; #endif #ifdef HAVE_OPAQUE_RSA_DSA_DH { const BIGNUM *n; const BIGNUM *e; RSA_get0_key(rsa, &n, &e, NULL); BIO_printf(mem, "%d", BN_num_bits(n)); push_certinfo("RSA Public Key", i); print_pubkey_BN(rsa, n, i); print_pubkey_BN(rsa, e, i); } #else BIO_printf(mem, "%d", BN_num_bits(rsa->n)); push_certinfo("RSA Public Key", i); print_pubkey_BN(rsa, n, i); print_pubkey_BN(rsa, e, i); #endif break; } case EVP_PKEY_DSA: { #ifndef OPENSSL_NO_DSA OSSL3_CONST DSA *dsa; #ifdef HAVE_OPAQUE_EVP_PKEY dsa = EVP_PKEY_get0_DSA(pubkey); #else dsa = pubkey->pkey.dsa; #endif #ifdef HAVE_OPAQUE_RSA_DSA_DH { const BIGNUM *p; const BIGNUM *q; const BIGNUM *g; const BIGNUM *pub_key; DSA_get0_pqg(dsa, &p, &q, &g); DSA_get0_key(dsa, &pub_key, NULL); print_pubkey_BN(dsa, p, i); print_pubkey_BN(dsa, q, i); print_pubkey_BN(dsa, g, i); print_pubkey_BN(dsa, pub_key, i); } #else print_pubkey_BN(dsa, p, i); print_pubkey_BN(dsa, q, i); print_pubkey_BN(dsa, g, i); print_pubkey_BN(dsa, pub_key, i); #endif #endif /* !OPENSSL_NO_DSA */ break; } case EVP_PKEY_DH: { OSSL3_CONST DH *dh; #ifdef HAVE_OPAQUE_EVP_PKEY dh = EVP_PKEY_get0_DH(pubkey); #else dh = pubkey->pkey.dh; #endif #ifdef HAVE_OPAQUE_RSA_DSA_DH { const BIGNUM *p; const BIGNUM *q; const BIGNUM *g; const BIGNUM *pub_key; DH_get0_pqg(dh, &p, &q, &g); DH_get0_key(dh, &pub_key, NULL); print_pubkey_BN(dh, p, i); print_pubkey_BN(dh, q, i); print_pubkey_BN(dh, g, i); print_pubkey_BN(dh, pub_key, i); } #else print_pubkey_BN(dh, p, i); print_pubkey_BN(dh, g, i); print_pubkey_BN(dh, pub_key, i); #endif break; } } EVP_PKEY_free(pubkey); } if(psig) { for(j = 0; j < psig->length; j++) BIO_printf(mem, "%02x:", psig->data[j]); push_certinfo("Signature", i); } PEM_write_bio_X509(mem, x); push_certinfo("Cert", i); } BIO_free(mem); return CURLE_OK; } /* * Heavily modified from: * https://www.owasp.org/index.php/Certificate_and_Public_Key_Pinning#OpenSSL */ static CURLcode pkp_pin_peer_pubkey(struct Curl_easy *data, X509* cert, const char *pinnedpubkey) { /* Scratch */ int len1 = 0, len2 = 0; unsigned char *buff1 = NULL, *temp = NULL; /* Result is returned to caller */ CURLcode result = CURLE_SSL_PINNEDPUBKEYNOTMATCH; /* if a path wasn't specified, don't pin */ if(!pinnedpubkey) return CURLE_OK; if(!cert) return result; do { /* Begin Gyrations to get the subjectPublicKeyInfo */ /* Thanks to Viktor Dukhovni on the OpenSSL mailing list */ /* https://groups.google.com/group/mailing.openssl.users/browse_thread /thread/d61858dae102c6c7 */ len1 = i2d_X509_PUBKEY(X509_get_X509_PUBKEY(cert), NULL); if(len1 < 1) break; /* failed */ buff1 = temp = malloc(len1); if(!buff1) break; /* failed */ /* https://www.openssl.org/docs/crypto/d2i_X509.html */ len2 = i2d_X509_PUBKEY(X509_get_X509_PUBKEY(cert), &temp); /* * These checks are verifying we got back the same values as when we * sized the buffer. It's pretty weak since they should always be the * same. But it gives us something to test. */ if((len1 != len2) || !temp || ((temp - buff1) != len1)) break; /* failed */ /* End Gyrations */ /* The one good exit point */ result = Curl_pin_peer_pubkey(data, pinnedpubkey, buff1, len1); } while(0); if(buff1) free(buff1); return result; } /* * Get the server cert, verify it and show it etc, only call failf() if the * 'strict' argument is TRUE as otherwise all this is for informational * purposes only! * * We check certificates to authenticate the server; otherwise we risk * man-in-the-middle attack. */ static CURLcode servercert(struct Curl_easy *data, struct connectdata *conn, struct ssl_connect_data *connssl, bool strict) { CURLcode result = CURLE_OK; int rc; long lerr; X509 *issuer; BIO *fp = NULL; char error_buffer[256]=""; char buffer[2048]; const char *ptr; BIO *mem = BIO_new(BIO_s_mem()); struct ssl_backend_data *backend = connssl->backend; if(data->set.ssl.certinfo) /* we've been asked to gather certificate info! */ (void)get_cert_chain(data, connssl); backend->server_cert = SSL_get_peer_certificate(backend->handle); if(!backend->server_cert) { BIO_free(mem); if(!strict) return CURLE_OK; failf(data, "SSL: couldn't get peer certificate!"); return CURLE_PEER_FAILED_VERIFICATION; } infof(data, "%s certificate:\n", SSL_IS_PROXY() ? "Proxy" : "Server"); rc = x509_name_oneline(X509_get_subject_name(backend->server_cert), buffer, sizeof(buffer)); infof(data, " subject: %s\n", rc?"[NONE]":buffer); #ifndef CURL_DISABLE_VERBOSE_STRINGS { long len; ASN1_TIME_print(mem, X509_get0_notBefore(backend->server_cert)); len = BIO_get_mem_data(mem, (char **) &ptr); infof(data, " start date: %.*s\n", len, ptr); (void)BIO_reset(mem); ASN1_TIME_print(mem, X509_get0_notAfter(backend->server_cert)); len = BIO_get_mem_data(mem, (char **) &ptr); infof(data, " expire date: %.*s\n", len, ptr); (void)BIO_reset(mem); } #endif BIO_free(mem); if(SSL_CONN_CONFIG(verifyhost)) { result = verifyhost(data, conn, backend->server_cert); if(result) { X509_free(backend->server_cert); backend->server_cert = NULL; return result; } } rc = x509_name_oneline(X509_get_issuer_name(backend->server_cert), buffer, sizeof(buffer)); if(rc) { if(strict) failf(data, "SSL: couldn't get X509-issuer name!"); result = CURLE_PEER_FAILED_VERIFICATION; } else { infof(data, " issuer: %s\n", buffer); /* We could do all sorts of certificate verification stuff here before deallocating the certificate. */ /* e.g. match issuer name with provided issuer certificate */ if(SSL_SET_OPTION(issuercert) || SSL_SET_OPTION(issuercert_blob)) { if(SSL_SET_OPTION(issuercert_blob)) fp = BIO_new_mem_buf(SSL_SET_OPTION(issuercert_blob)->data, (int)SSL_SET_OPTION(issuercert_blob)->len); else { fp = BIO_new(BIO_s_file()); if(!fp) { failf(data, "BIO_new return NULL, " OSSL_PACKAGE " error %s", ossl_strerror(ERR_get_error(), error_buffer, sizeof(error_buffer)) ); X509_free(backend->server_cert); backend->server_cert = NULL; return CURLE_OUT_OF_MEMORY; } if(BIO_read_filename(fp, SSL_SET_OPTION(issuercert)) <= 0) { if(strict) failf(data, "SSL: Unable to open issuer cert (%s)", SSL_SET_OPTION(issuercert)); BIO_free(fp); X509_free(backend->server_cert); backend->server_cert = NULL; return CURLE_SSL_ISSUER_ERROR; } } issuer = PEM_read_bio_X509(fp, NULL, ZERO_NULL, NULL); if(!issuer) { if(strict) failf(data, "SSL: Unable to read issuer cert (%s)", SSL_SET_OPTION(issuercert)); BIO_free(fp); X509_free(issuer); X509_free(backend->server_cert); backend->server_cert = NULL; return CURLE_SSL_ISSUER_ERROR; } if(X509_check_issued(issuer, backend->server_cert) != X509_V_OK) { if(strict) failf(data, "SSL: Certificate issuer check failed (%s)", SSL_SET_OPTION(issuercert)); BIO_free(fp); X509_free(issuer); X509_free(backend->server_cert); backend->server_cert = NULL; return CURLE_SSL_ISSUER_ERROR; } infof(data, " SSL certificate issuer check ok (%s)\n", SSL_SET_OPTION(issuercert)); BIO_free(fp); X509_free(issuer); } lerr = SSL_get_verify_result(backend->handle); SSL_SET_OPTION_LVALUE(certverifyresult) = lerr; if(lerr != X509_V_OK) { if(SSL_CONN_CONFIG(verifypeer)) { /* We probably never reach this, because SSL_connect() will fail and we return earlier if verifypeer is set? */ if(strict) failf(data, "SSL certificate verify result: %s (%ld)", X509_verify_cert_error_string(lerr), lerr); result = CURLE_PEER_FAILED_VERIFICATION; } else infof(data, " SSL certificate verify result: %s (%ld)," " continuing anyway.\n", X509_verify_cert_error_string(lerr), lerr); } else infof(data, " SSL certificate verify ok.\n"); } #if (OPENSSL_VERSION_NUMBER >= 0x0090808fL) && !defined(OPENSSL_NO_TLSEXT) && \ !defined(OPENSSL_NO_OCSP) if(SSL_CONN_CONFIG(verifystatus)) { result = verifystatus(data, connssl); if(result) { X509_free(backend->server_cert); backend->server_cert = NULL; return result; } } #endif if(!strict) /* when not strict, we don't bother about the verify cert problems */ result = CURLE_OK; ptr = SSL_PINNED_PUB_KEY(); if(!result && ptr) { result = pkp_pin_peer_pubkey(data, backend->server_cert, ptr); if(result) failf(data, "SSL: public key does not match pinned public key!"); } X509_free(backend->server_cert); backend->server_cert = NULL; connssl->connecting_state = ssl_connect_done; return result; } static CURLcode ossl_connect_step3(struct Curl_easy *data, struct connectdata *conn, int sockindex) { CURLcode result = CURLE_OK; struct ssl_connect_data *connssl = &conn->ssl[sockindex]; DEBUGASSERT(ssl_connect_3 == connssl->connecting_state); /* * We check certificates to authenticate the server; otherwise we risk * man-in-the-middle attack; NEVERTHELESS, if we're told explicitly not to * verify the peer ignore faults and failures from the server cert * operations. */ result = servercert(data, conn, connssl, (SSL_CONN_CONFIG(verifypeer) || SSL_CONN_CONFIG(verifyhost))); if(!result) connssl->connecting_state = ssl_connect_done; return result; } static Curl_recv ossl_recv; static Curl_send ossl_send; static CURLcode ossl_connect_common(struct Curl_easy *data, struct connectdata *conn, int sockindex, bool nonblocking, bool *done) { CURLcode result; struct ssl_connect_data *connssl = &conn->ssl[sockindex]; curl_socket_t sockfd = conn->sock[sockindex]; int what; /* check if the connection has already been established */ if(ssl_connection_complete == connssl->state) { *done = TRUE; return CURLE_OK; } if(ssl_connect_1 == connssl->connecting_state) { /* Find out how much more time we're allowed */ const timediff_t timeout_ms = Curl_timeleft(data, NULL, TRUE); if(timeout_ms < 0) { /* no need to continue if time already is up */ failf(data, "SSL connection timeout"); return CURLE_OPERATION_TIMEDOUT; } result = ossl_connect_step1(data, conn, sockindex); if(result) return result; } while(ssl_connect_2 == connssl->connecting_state || ssl_connect_2_reading == connssl->connecting_state || ssl_connect_2_writing == connssl->connecting_state) { /* check allowed time left */ const timediff_t timeout_ms = Curl_timeleft(data, NULL, TRUE); if(timeout_ms < 0) { /* no need to continue if time already is up */ failf(data, "SSL connection timeout"); return CURLE_OPERATION_TIMEDOUT; } /* if ssl is expecting something, check if it's available. */ if(connssl->connecting_state == ssl_connect_2_reading || connssl->connecting_state == ssl_connect_2_writing) { curl_socket_t writefd = ssl_connect_2_writing == connssl->connecting_state?sockfd:CURL_SOCKET_BAD; curl_socket_t readfd = ssl_connect_2_reading == connssl->connecting_state?sockfd:CURL_SOCKET_BAD; what = Curl_socket_check(readfd, CURL_SOCKET_BAD, writefd, nonblocking?0:timeout_ms); if(what < 0) { /* fatal error */ failf(data, "select/poll on SSL socket, errno: %d", SOCKERRNO); return CURLE_SSL_CONNECT_ERROR; } if(0 == what) { if(nonblocking) { *done = FALSE; return CURLE_OK; } /* timeout */ failf(data, "SSL connection timeout"); return CURLE_OPERATION_TIMEDOUT; } /* socket is readable or writable */ } /* Run transaction, and return to the caller if it failed or if this * connection is done nonblocking and this loop would execute again. This * permits the owner of a multi handle to abort a connection attempt * before step2 has completed while ensuring that a client using select() * or epoll() will always have a valid fdset to wait on. */ result = ossl_connect_step2(data, conn, sockindex); if(result || (nonblocking && (ssl_connect_2 == connssl->connecting_state || ssl_connect_2_reading == connssl->connecting_state || ssl_connect_2_writing == connssl->connecting_state))) return result; } /* repeat step2 until all transactions are done. */ if(ssl_connect_3 == connssl->connecting_state) { result = ossl_connect_step3(data, conn, sockindex); if(result) return result; } if(ssl_connect_done == connssl->connecting_state) { connssl->state = ssl_connection_complete; conn->recv[sockindex] = ossl_recv; conn->send[sockindex] = ossl_send; *done = TRUE; } else *done = FALSE; /* Reset our connect state machine */ connssl->connecting_state = ssl_connect_1; return CURLE_OK; } static CURLcode ossl_connect_nonblocking(struct Curl_easy *data, struct connectdata *conn, int sockindex, bool *done) { return ossl_connect_common(data, conn, sockindex, TRUE, done); } static CURLcode ossl_connect(struct Curl_easy *data, struct connectdata *conn, int sockindex) { CURLcode result; bool done = FALSE; result = ossl_connect_common(data, conn, sockindex, FALSE, &done); if(result) return result; DEBUGASSERT(done); return CURLE_OK; } static bool ossl_data_pending(const struct connectdata *conn, int connindex) { const struct ssl_connect_data *connssl = &conn->ssl[connindex]; if(connssl->backend->handle && SSL_pending(connssl->backend->handle)) return TRUE; #ifndef CURL_DISABLE_PROXY { const struct ssl_connect_data *proxyssl = &conn->proxy_ssl[connindex]; if(proxyssl->backend->handle && SSL_pending(proxyssl->backend->handle)) return TRUE; } #endif return FALSE; } static size_t ossl_version(char *buffer, size_t size); static ssize_t ossl_send(struct Curl_easy *data, int sockindex, const void *mem, size_t len, CURLcode *curlcode) { /* SSL_write() is said to return 'int' while write() and send() returns 'size_t' */ int err; char error_buffer[256]; unsigned long sslerror; int memlen; int rc; struct connectdata *conn = data->conn; struct ssl_connect_data *connssl = &conn->ssl[sockindex]; struct ssl_backend_data *backend = connssl->backend; ERR_clear_error(); memlen = (len > (size_t)INT_MAX) ? INT_MAX : (int)len; set_logger(conn, data); rc = SSL_write(backend->handle, mem, memlen); if(rc <= 0) { err = SSL_get_error(backend->handle, rc); switch(err) { case SSL_ERROR_WANT_READ: case SSL_ERROR_WANT_WRITE: /* The operation did not complete; the same TLS/SSL I/O function should be called again later. This is basically an EWOULDBLOCK equivalent. */ *curlcode = CURLE_AGAIN; return -1; case SSL_ERROR_SYSCALL: { int sockerr = SOCKERRNO; sslerror = ERR_get_error(); if(sslerror) ossl_strerror(sslerror, error_buffer, sizeof(error_buffer)); else if(sockerr) Curl_strerror(sockerr, error_buffer, sizeof(error_buffer)); else { strncpy(error_buffer, SSL_ERROR_to_str(err), sizeof(error_buffer)); error_buffer[sizeof(error_buffer) - 1] = '\0'; } failf(data, OSSL_PACKAGE " SSL_write: %s, errno %d", error_buffer, sockerr); *curlcode = CURLE_SEND_ERROR; return -1; } case SSL_ERROR_SSL: /* A failure in the SSL library occurred, usually a protocol error. The OpenSSL error queue contains more information on the error. */ sslerror = ERR_get_error(); if(ERR_GET_LIB(sslerror) == ERR_LIB_SSL && ERR_GET_REASON(sslerror) == SSL_R_BIO_NOT_SET && conn->ssl[sockindex].state == ssl_connection_complete #ifndef CURL_DISABLE_PROXY && conn->proxy_ssl[sockindex].state == ssl_connection_complete #endif ) { char ver[120]; ossl_version(ver, 120); failf(data, "Error: %s does not support double SSL tunneling.", ver); } else failf(data, "SSL_write() error: %s", ossl_strerror(sslerror, error_buffer, sizeof(error_buffer))); *curlcode = CURLE_SEND_ERROR; return -1; } /* a true error */ failf(data, OSSL_PACKAGE " SSL_write: %s, errno %d", SSL_ERROR_to_str(err), SOCKERRNO); *curlcode = CURLE_SEND_ERROR; return -1; } *curlcode = CURLE_OK; return (ssize_t)rc; /* number of bytes */ } static ssize_t ossl_recv(struct Curl_easy *data, /* transfer */ int num, /* socketindex */ char *buf, /* store read data here */ size_t buffersize, /* max amount to read */ CURLcode *curlcode) { char error_buffer[256]; unsigned long sslerror; ssize_t nread; int buffsize; struct connectdata *conn = data->conn; struct ssl_connect_data *connssl = &conn->ssl[num]; struct ssl_backend_data *backend = connssl->backend; ERR_clear_error(); buffsize = (buffersize > (size_t)INT_MAX) ? INT_MAX : (int)buffersize; set_logger(conn, data); nread = (ssize_t)SSL_read(backend->handle, buf, buffsize); if(nread <= 0) { /* failed SSL_read */ int err = SSL_get_error(backend->handle, (int)nread); switch(err) { case SSL_ERROR_NONE: /* this is not an error */ break; case SSL_ERROR_ZERO_RETURN: /* no more data */ /* close_notify alert */ if(num == FIRSTSOCKET) /* mark the connection for close if it is indeed the control connection */ connclose(conn, "TLS close_notify"); break; case SSL_ERROR_WANT_READ: case SSL_ERROR_WANT_WRITE: /* there's data pending, re-invoke SSL_read() */ *curlcode = CURLE_AGAIN; return -1; default: /* openssl/ssl.h for SSL_ERROR_SYSCALL says "look at error stack/return value/errno" */ /* https://www.openssl.org/docs/crypto/ERR_get_error.html */ sslerror = ERR_get_error(); if((nread < 0) || sslerror) { /* If the return code was negative or there actually is an error in the queue */ int sockerr = SOCKERRNO; if(sslerror) ossl_strerror(sslerror, error_buffer, sizeof(error_buffer)); else if(sockerr && err == SSL_ERROR_SYSCALL) Curl_strerror(sockerr, error_buffer, sizeof(error_buffer)); else { strncpy(error_buffer, SSL_ERROR_to_str(err), sizeof(error_buffer)); error_buffer[sizeof(error_buffer) - 1] = '\0'; } failf(data, OSSL_PACKAGE " SSL_read: %s, errno %d", error_buffer, sockerr); *curlcode = CURLE_RECV_ERROR; return -1; } /* For debug builds be a little stricter and error on any SSL_ERROR_SYSCALL. For example a server may have closed the connection abruptly without a close_notify alert. For compatibility with older peers we don't do this by default. #4624 We can use this to gauge how many users may be affected, and if it goes ok eventually transition to allow in dev and release with the newest OpenSSL: #if (OPENSSL_VERSION_NUMBER >= 0x10101000L) */ #ifdef DEBUGBUILD if(err == SSL_ERROR_SYSCALL) { int sockerr = SOCKERRNO; if(sockerr) Curl_strerror(sockerr, error_buffer, sizeof(error_buffer)); else { msnprintf(error_buffer, sizeof(error_buffer), "Connection closed abruptly"); } failf(data, OSSL_PACKAGE " SSL_read: %s, errno %d" " (Fatal because this is a curl debug build)", error_buffer, sockerr); *curlcode = CURLE_RECV_ERROR; return -1; } #endif } } return nread; } static size_t ossl_version(char *buffer, size_t size) { #ifdef LIBRESSL_VERSION_NUMBER #if LIBRESSL_VERSION_NUMBER < 0x2070100fL return msnprintf(buffer, size, "%s/%lx.%lx.%lx", OSSL_PACKAGE, (LIBRESSL_VERSION_NUMBER>>28)&0xf, (LIBRESSL_VERSION_NUMBER>>20)&0xff, (LIBRESSL_VERSION_NUMBER>>12)&0xff); #else /* OpenSSL_version() first appeared in LibreSSL 2.7.1 */ char *p; int count; const char *ver = OpenSSL_version(OPENSSL_VERSION); const char expected[] = OSSL_PACKAGE " "; /* ie "LibreSSL " */ if(Curl_strncasecompare(ver, expected, sizeof(expected) - 1)) { ver += sizeof(expected) - 1; } count = msnprintf(buffer, size, "%s/%s", OSSL_PACKAGE, ver); for(p = buffer; *p; ++p) { if(ISSPACE(*p)) *p = '_'; } return count; #endif #elif defined(OPENSSL_IS_BORINGSSL) return msnprintf(buffer, size, OSSL_PACKAGE); #elif defined(HAVE_OPENSSL_VERSION) && defined(OPENSSL_VERSION_STRING) return msnprintf(buffer, size, "%s/%s", OSSL_PACKAGE, OpenSSL_version(OPENSSL_VERSION_STRING)); #else /* not LibreSSL, BoringSSL and not using OpenSSL_version */ char sub[3]; unsigned long ssleay_value; sub[2]='\0'; sub[1]='\0'; ssleay_value = OpenSSL_version_num(); if(ssleay_value < 0x906000) { ssleay_value = SSLEAY_VERSION_NUMBER; sub[0]='\0'; } else { if(ssleay_value&0xff0) { int minor_ver = (ssleay_value >> 4) & 0xff; if(minor_ver > 26) { /* handle extended version introduced for 0.9.8za */ sub[1] = (char) ((minor_ver - 1) % 26 + 'a' + 1); sub[0] = 'z'; } else { sub[0] = (char) (minor_ver + 'a' - 1); } } else sub[0]='\0'; } return msnprintf(buffer, size, "%s/%lx.%lx.%lx%s" #ifdef OPENSSL_FIPS "-fips" #endif , OSSL_PACKAGE, (ssleay_value>>28)&0xf, (ssleay_value>>20)&0xff, (ssleay_value>>12)&0xff, sub); #endif /* OPENSSL_IS_BORINGSSL */ } /* can be called with data == NULL */ static CURLcode ossl_random(struct Curl_easy *data, unsigned char *entropy, size_t length) { int rc; if(data) { if(ossl_seed(data)) /* Initiate the seed if not already done */ return CURLE_FAILED_INIT; /* couldn't seed for some reason */ } else { if(!rand_enough()) return CURLE_FAILED_INIT; } /* RAND_bytes() returns 1 on success, 0 otherwise. */ rc = RAND_bytes(entropy, curlx_uztosi(length)); return (rc == 1 ? CURLE_OK : CURLE_FAILED_INIT); } #if (OPENSSL_VERSION_NUMBER >= 0x0090800fL) && !defined(OPENSSL_NO_SHA256) static CURLcode ossl_sha256sum(const unsigned char *tmp, /* input */ size_t tmplen, unsigned char *sha256sum /* output */, size_t unused) { EVP_MD_CTX *mdctx; unsigned int len = 0; (void) unused; mdctx = EVP_MD_CTX_create(); if(!mdctx) return CURLE_OUT_OF_MEMORY; EVP_DigestInit(mdctx, EVP_sha256()); EVP_DigestUpdate(mdctx, tmp, tmplen); EVP_DigestFinal_ex(mdctx, sha256sum, &len); EVP_MD_CTX_destroy(mdctx); return CURLE_OK; } #endif static bool ossl_cert_status_request(void) { #if (OPENSSL_VERSION_NUMBER >= 0x0090808fL) && !defined(OPENSSL_NO_TLSEXT) && \ !defined(OPENSSL_NO_OCSP) return TRUE; #else return FALSE; #endif } static void *ossl_get_internals(struct ssl_connect_data *connssl, CURLINFO info) { /* Legacy: CURLINFO_TLS_SESSION must return an SSL_CTX pointer. */ struct ssl_backend_data *backend = connssl->backend; return info == CURLINFO_TLS_SESSION ? (void *)backend->ctx : (void *)backend->handle; } static void ossl_associate_connection(struct Curl_easy *data, struct connectdata *conn, int sockindex) { struct ssl_connect_data *connssl = &conn->ssl[sockindex]; struct ssl_backend_data *backend = connssl->backend; /* If we don't have SSL context, do nothing. */ if(!backend->handle) return; if(SSL_SET_OPTION(primary.sessionid)) { int data_idx = ossl_get_ssl_data_index(); int connectdata_idx = ossl_get_ssl_conn_index(); int sockindex_idx = ossl_get_ssl_sockindex_index(); int proxy_idx = ossl_get_proxy_index(); if(data_idx >= 0 && connectdata_idx >= 0 && sockindex_idx >= 0 && proxy_idx >= 0) { /* Store the data needed for the "new session" callback. * The sockindex is stored as a pointer to an array element. */ SSL_set_ex_data(backend->handle, data_idx, data); SSL_set_ex_data(backend->handle, connectdata_idx, conn); SSL_set_ex_data(backend->handle, sockindex_idx, conn->sock + sockindex); #ifndef CURL_DISABLE_PROXY SSL_set_ex_data(backend->handle, proxy_idx, SSL_IS_PROXY() ? (void *) 1: NULL); #else SSL_set_ex_data(backend->handle, proxy_idx, NULL); #endif } } } /* * Starting with TLS 1.3, the ossl_new_session_cb callback gets called after * the handshake. If the transfer that sets up the callback gets killed before * this callback arrives, we must make sure to properly clear the data to * avoid UAF problems. A future optimization could be to instead store another * transfer that might still be using the same connection. */ static void ossl_disassociate_connection(struct Curl_easy *data, int sockindex) { struct connectdata *conn = data->conn; struct ssl_connect_data *connssl = &conn->ssl[sockindex]; struct ssl_backend_data *backend = connssl->backend; /* If we don't have SSL context, do nothing. */ if(!backend->handle) return; if(SSL_SET_OPTION(primary.sessionid)) { bool isproxy = FALSE; bool incache; void *old_ssl_sessionid = NULL; int data_idx = ossl_get_ssl_data_index(); int connectdata_idx = ossl_get_ssl_conn_index(); int sockindex_idx = ossl_get_ssl_sockindex_index(); int proxy_idx = ossl_get_proxy_index(); if(data_idx >= 0 && connectdata_idx >= 0 && sockindex_idx >= 0 && proxy_idx >= 0) { /* Invalidate the session cache entry, if any */ isproxy = SSL_get_ex_data(backend->handle, proxy_idx) ? TRUE : FALSE; /* Disable references to data in "new session" callback to avoid * accessing a stale pointer. */ SSL_set_ex_data(backend->handle, data_idx, NULL); SSL_set_ex_data(backend->handle, connectdata_idx, NULL); SSL_set_ex_data(backend->handle, sockindex_idx, NULL); SSL_set_ex_data(backend->handle, proxy_idx, NULL); } Curl_ssl_sessionid_lock(data); incache = !(Curl_ssl_getsessionid(data, conn, isproxy, &old_ssl_sessionid, NULL, sockindex)); if(incache) Curl_ssl_delsessionid(data, old_ssl_sessionid); Curl_ssl_sessionid_unlock(data); } } const struct Curl_ssl Curl_ssl_openssl = { { CURLSSLBACKEND_OPENSSL, "openssl" }, /* info */ SSLSUPP_CA_PATH | SSLSUPP_CAINFO_BLOB | SSLSUPP_CERTINFO | SSLSUPP_PINNEDPUBKEY | SSLSUPP_SSL_CTX | #ifdef HAVE_SSL_CTX_SET_CIPHERSUITES SSLSUPP_TLS13_CIPHERSUITES | #endif SSLSUPP_HTTPS_PROXY, sizeof(struct ssl_backend_data), ossl_init, /* init */ ossl_cleanup, /* cleanup */ ossl_version, /* version */ ossl_check_cxn, /* check_cxn */ ossl_shutdown, /* shutdown */ ossl_data_pending, /* data_pending */ ossl_random, /* random */ ossl_cert_status_request, /* cert_status_request */ ossl_connect, /* connect */ ossl_connect_nonblocking, /* connect_nonblocking */ Curl_ssl_getsock, /* getsock */ ossl_get_internals, /* get_internals */ ossl_close, /* close_one */ ossl_close_all, /* close_all */ ossl_session_free, /* session_free */ ossl_set_engine, /* set_engine */ ossl_set_engine_default, /* set_engine_default */ ossl_engines_list, /* engines_list */ Curl_none_false_start, /* false_start */ #if (OPENSSL_VERSION_NUMBER >= 0x0090800fL) && !defined(OPENSSL_NO_SHA256) ossl_sha256sum, /* sha256sum */ #else NULL, /* sha256sum */ #endif ossl_associate_connection, /* associate_connection */ ossl_disassociate_connection /* disassociate_connection */ }; #endif /* USE_OPENSSL */
static CURLcode ossl_connect_step1(struct Curl_easy *data, struct connectdata *conn, int sockindex) { CURLcode result = CURLE_OK; char *ciphers; SSL_METHOD_QUAL SSL_METHOD *req_method = NULL; X509_LOOKUP *lookup = NULL; curl_socket_t sockfd = conn->sock[sockindex]; struct ssl_connect_data *connssl = &conn->ssl[sockindex]; ctx_option_t ctx_options = 0; #ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME bool sni; const char * const hostname = SSL_HOST_NAME(); #ifdef ENABLE_IPV6 struct in6_addr addr; #else struct in_addr addr; #endif #endif const long int ssl_version = SSL_CONN_CONFIG(version); #ifdef USE_OPENSSL_SRP const enum CURL_TLSAUTH ssl_authtype = SSL_SET_OPTION(authtype); #endif char * const ssl_cert = SSL_SET_OPTION(primary.clientcert); const struct curl_blob *ssl_cert_blob = SSL_SET_OPTION(primary.cert_blob); const struct curl_blob *ca_info_blob = SSL_CONN_CONFIG(ca_info_blob); const char * const ssl_cert_type = SSL_SET_OPTION(cert_type); const char * const ssl_cafile = /* CURLOPT_CAINFO_BLOB overrides CURLOPT_CAINFO */ (ca_info_blob ? NULL : SSL_CONN_CONFIG(CAfile)); const char * const ssl_capath = SSL_CONN_CONFIG(CApath); const bool verifypeer = SSL_CONN_CONFIG(verifypeer); const char * const ssl_crlfile = SSL_SET_OPTION(CRLfile); char error_buffer[256]; struct ssl_backend_data *backend = connssl->backend; bool imported_native_ca = false; DEBUGASSERT(ssl_connect_1 == connssl->connecting_state); /* Make funny stuff to get random input */ result = ossl_seed(data); if(result) return result; SSL_SET_OPTION_LVALUE(certverifyresult) = !X509_V_OK; /* check to see if we've been told to use an explicit SSL/TLS version */ switch(ssl_version) { case CURL_SSLVERSION_DEFAULT: case CURL_SSLVERSION_TLSv1: case CURL_SSLVERSION_TLSv1_0: case CURL_SSLVERSION_TLSv1_1: case CURL_SSLVERSION_TLSv1_2: case CURL_SSLVERSION_TLSv1_3: /* it will be handled later with the context options */ #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) req_method = TLS_client_method(); #else req_method = SSLv23_client_method(); #endif use_sni(TRUE); break; case CURL_SSLVERSION_SSLv2: failf(data, "No SSLv2 support"); return CURLE_NOT_BUILT_IN; case CURL_SSLVERSION_SSLv3: failf(data, "No SSLv3 support"); return CURLE_NOT_BUILT_IN; default: failf(data, "Unrecognized parameter passed via CURLOPT_SSLVERSION"); return CURLE_SSL_CONNECT_ERROR; } if(backend->ctx) SSL_CTX_free(backend->ctx); backend->ctx = SSL_CTX_new(req_method); if(!backend->ctx) { failf(data, "SSL: couldn't create a context: %s", ossl_strerror(ERR_peek_error(), error_buffer, sizeof(error_buffer))); return CURLE_OUT_OF_MEMORY; } #ifdef SSL_MODE_RELEASE_BUFFERS SSL_CTX_set_mode(backend->ctx, SSL_MODE_RELEASE_BUFFERS); #endif #ifdef SSL_CTRL_SET_MSG_CALLBACK if(data->set.fdebug && data->set.verbose) { /* the SSL trace callback is only used for verbose logging */ SSL_CTX_set_msg_callback(backend->ctx, ossl_trace); SSL_CTX_set_msg_callback_arg(backend->ctx, conn); set_logger(conn, data); } #endif /* OpenSSL contains code to work-around lots of bugs and flaws in various SSL-implementations. SSL_CTX_set_options() is used to enabled those work-arounds. The man page for this option states that SSL_OP_ALL enables all the work-arounds and that "It is usually safe to use SSL_OP_ALL to enable the bug workaround options if compatibility with somewhat broken implementations is desired." The "-no_ticket" option was introduced in Openssl0.9.8j. It's a flag to disable "rfc4507bis session ticket support". rfc4507bis was later turned into the proper RFC5077 it seems: https://tools.ietf.org/html/rfc5077 The enabled extension concerns the session management. I wonder how often libcurl stops a connection and then resumes a TLS session. also, sending the session data is some overhead. .I suggest that you just use your proposed patch (which explicitly disables TICKET). If someone writes an application with libcurl and openssl who wants to enable the feature, one can do this in the SSL callback. SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG option enabling allowed proper interoperability with web server Netscape Enterprise Server 2.0.1 which was released back in 1996. Due to CVE-2010-4180, option SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG has become ineffective as of OpenSSL 0.9.8q and 1.0.0c. In order to mitigate CVE-2010-4180 when using previous OpenSSL versions we no longer enable this option regardless of OpenSSL version and SSL_OP_ALL definition. OpenSSL added a work-around for a SSL 3.0/TLS 1.0 CBC vulnerability (https://www.openssl.org/~bodo/tls-cbc.txt). In 0.9.6e they added a bit to SSL_OP_ALL that _disables_ that work-around despite the fact that SSL_OP_ALL is documented to do "rather harmless" workarounds. In order to keep the secure work-around, the SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS bit must not be set. */ ctx_options = SSL_OP_ALL; #ifdef SSL_OP_NO_TICKET ctx_options |= SSL_OP_NO_TICKET; #endif #ifdef SSL_OP_NO_COMPRESSION ctx_options |= SSL_OP_NO_COMPRESSION; #endif #ifdef SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG /* mitigate CVE-2010-4180 */ ctx_options &= ~SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG; #endif #ifdef SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS /* unless the user explicitly ask to allow the protocol vulnerability we use the work-around */ if(!SSL_SET_OPTION(enable_beast)) ctx_options &= ~SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS; #endif switch(ssl_version) { case CURL_SSLVERSION_SSLv2: case CURL_SSLVERSION_SSLv3: return CURLE_NOT_BUILT_IN; /* "--tlsv<x.y>" options mean TLS >= version <x.y> */ case CURL_SSLVERSION_DEFAULT: case CURL_SSLVERSION_TLSv1: /* TLS >= version 1.0 */ case CURL_SSLVERSION_TLSv1_0: /* TLS >= version 1.0 */ case CURL_SSLVERSION_TLSv1_1: /* TLS >= version 1.1 */ case CURL_SSLVERSION_TLSv1_2: /* TLS >= version 1.2 */ case CURL_SSLVERSION_TLSv1_3: /* TLS >= version 1.3 */ /* asking for any TLS version as the minimum, means no SSL versions allowed */ ctx_options |= SSL_OP_NO_SSLv2; ctx_options |= SSL_OP_NO_SSLv3; #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) /* 1.1.0 */ result = set_ssl_version_min_max(backend->ctx, conn); #else result = set_ssl_version_min_max_legacy(&ctx_options, data, conn, sockindex); #endif if(result != CURLE_OK) return result; break; default: failf(data, "Unrecognized parameter passed via CURLOPT_SSLVERSION"); return CURLE_SSL_CONNECT_ERROR; } SSL_CTX_set_options(backend->ctx, ctx_options); #ifdef HAS_NPN if(conn->bits.tls_enable_npn) SSL_CTX_set_next_proto_select_cb(backend->ctx, select_next_proto_cb, data); #endif #ifdef HAS_ALPN if(conn->bits.tls_enable_alpn) { int cur = 0; unsigned char protocols[128]; #ifdef USE_HTTP2 if(data->state.httpwant >= CURL_HTTP_VERSION_2 #ifndef CURL_DISABLE_PROXY && (!SSL_IS_PROXY() || !conn->bits.tunnel_proxy) #endif ) { protocols[cur++] = ALPN_H2_LENGTH; memcpy(&protocols[cur], ALPN_H2, ALPN_H2_LENGTH); cur += ALPN_H2_LENGTH; infof(data, "ALPN, offering %s\n", ALPN_H2); } #endif protocols[cur++] = ALPN_HTTP_1_1_LENGTH; memcpy(&protocols[cur], ALPN_HTTP_1_1, ALPN_HTTP_1_1_LENGTH); cur += ALPN_HTTP_1_1_LENGTH; infof(data, "ALPN, offering %s\n", ALPN_HTTP_1_1); /* expects length prefixed preference ordered list of protocols in wire * format */ if(SSL_CTX_set_alpn_protos(backend->ctx, protocols, cur)) { failf(data, "Error setting ALPN"); return CURLE_SSL_CONNECT_ERROR; } } #endif if(ssl_cert || ssl_cert_blob || ssl_cert_type) { if(!result && !cert_stuff(data, backend->ctx, ssl_cert, ssl_cert_blob, ssl_cert_type, SSL_SET_OPTION(key), SSL_SET_OPTION(key_blob), SSL_SET_OPTION(key_type), SSL_SET_OPTION(key_passwd))) result = CURLE_SSL_CERTPROBLEM; if(result) /* failf() is already done in cert_stuff() */ return result; } ciphers = SSL_CONN_CONFIG(cipher_list); if(!ciphers) ciphers = (char *)DEFAULT_CIPHER_SELECTION; if(ciphers) { if(!SSL_CTX_set_cipher_list(backend->ctx, ciphers)) { failf(data, "failed setting cipher list: %s", ciphers); return CURLE_SSL_CIPHER; } infof(data, "Cipher selection: %s\n", ciphers); } #ifdef HAVE_SSL_CTX_SET_CIPHERSUITES { char *ciphers13 = SSL_CONN_CONFIG(cipher_list13); if(ciphers13) { if(!SSL_CTX_set_ciphersuites(backend->ctx, ciphers13)) { failf(data, "failed setting TLS 1.3 cipher suite: %s", ciphers13); return CURLE_SSL_CIPHER; } infof(data, "TLS 1.3 cipher selection: %s\n", ciphers13); } } #endif #ifdef HAVE_SSL_CTX_SET_POST_HANDSHAKE_AUTH /* OpenSSL 1.1.1 requires clients to opt-in for PHA */ SSL_CTX_set_post_handshake_auth(backend->ctx, 1); #endif #ifdef HAVE_SSL_CTX_SET_EC_CURVES { char *curves = SSL_CONN_CONFIG(curves); if(curves) { if(!SSL_CTX_set1_curves_list(backend->ctx, curves)) { failf(data, "failed setting curves list: '%s'", curves); return CURLE_SSL_CIPHER; } } } #endif #ifdef USE_OPENSSL_SRP if(ssl_authtype == CURL_TLSAUTH_SRP) { char * const ssl_username = SSL_SET_OPTION(username); infof(data, "Using TLS-SRP username: %s\n", ssl_username); if(!SSL_CTX_set_srp_username(backend->ctx, ssl_username)) { failf(data, "Unable to set SRP user name"); return CURLE_BAD_FUNCTION_ARGUMENT; } if(!SSL_CTX_set_srp_password(backend->ctx, SSL_SET_OPTION(password))) { failf(data, "failed setting SRP password"); return CURLE_BAD_FUNCTION_ARGUMENT; } if(!SSL_CONN_CONFIG(cipher_list)) { infof(data, "Setting cipher list SRP\n"); if(!SSL_CTX_set_cipher_list(backend->ctx, "SRP")) { failf(data, "failed setting SRP cipher list"); return CURLE_SSL_CIPHER; } } } #endif #if defined(USE_WIN32_CRYPTO) /* Import certificates from the Windows root certificate store if requested. https://stackoverflow.com/questions/9507184/ https://github.com/d3x0r/SACK/blob/master/src/netlib/ssl_layer.c#L1037 https://tools.ietf.org/html/rfc5280 */ if((SSL_CONN_CONFIG(verifypeer) || SSL_CONN_CONFIG(verifyhost)) && (SSL_SET_OPTION(native_ca_store))) { X509_STORE *store = SSL_CTX_get_cert_store(backend->ctx); HCERTSTORE hStore = CertOpenSystemStore(0, TEXT("ROOT")); if(hStore) { PCCERT_CONTEXT pContext = NULL; /* The array of enhanced key usage OIDs will vary per certificate and is declared outside of the loop so that rather than malloc/free each iteration we can grow it with realloc, when necessary. */ CERT_ENHKEY_USAGE *enhkey_usage = NULL; DWORD enhkey_usage_size = 0; /* This loop makes a best effort to import all valid certificates from the MS root store. If a certificate cannot be imported it is skipped. 'result' is used to store only hard-fail conditions (such as out of memory) that cause an early break. */ result = CURLE_OK; for(;;) { X509 *x509; FILETIME now; BYTE key_usage[2]; DWORD req_size; const unsigned char *encoded_cert; #if defined(DEBUGBUILD) && !defined(CURL_DISABLE_VERBOSE_STRINGS) char cert_name[256]; #endif pContext = CertEnumCertificatesInStore(hStore, pContext); if(!pContext) break; #if defined(DEBUGBUILD) && !defined(CURL_DISABLE_VERBOSE_STRINGS) if(!CertGetNameStringA(pContext, CERT_NAME_SIMPLE_DISPLAY_TYPE, 0, NULL, cert_name, sizeof(cert_name))) { strcpy(cert_name, "Unknown"); } infof(data, "SSL: Checking cert \"%s\"\n", cert_name); #endif encoded_cert = (const unsigned char *)pContext->pbCertEncoded; if(!encoded_cert) continue; GetSystemTimeAsFileTime(&now); if(CompareFileTime(&pContext->pCertInfo->NotBefore, &now) > 0 || CompareFileTime(&now, &pContext->pCertInfo->NotAfter) > 0) continue; /* If key usage exists check for signing attribute */ if(CertGetIntendedKeyUsage(pContext->dwCertEncodingType, pContext->pCertInfo, key_usage, sizeof(key_usage))) { if(!(key_usage[0] & CERT_KEY_CERT_SIGN_KEY_USAGE)) continue; } else if(GetLastError()) continue; /* If enhanced key usage exists check for server auth attribute. * * Note "In a Microsoft environment, a certificate might also have EKU * extended properties that specify valid uses for the certificate." * The call below checks both, and behavior varies depending on what is * found. For more details see CertGetEnhancedKeyUsage doc. */ if(CertGetEnhancedKeyUsage(pContext, 0, NULL, &req_size)) { if(req_size && req_size > enhkey_usage_size) { void *tmp = realloc(enhkey_usage, req_size); if(!tmp) { failf(data, "SSL: Out of memory allocating for OID list"); result = CURLE_OUT_OF_MEMORY; break; } enhkey_usage = (CERT_ENHKEY_USAGE *)tmp; enhkey_usage_size = req_size; } if(CertGetEnhancedKeyUsage(pContext, 0, enhkey_usage, &req_size)) { if(!enhkey_usage->cUsageIdentifier) { /* "If GetLastError returns CRYPT_E_NOT_FOUND, the certificate is good for all uses. If it returns zero, the certificate has no valid uses." */ if((HRESULT)GetLastError() != CRYPT_E_NOT_FOUND) continue; } else { DWORD i; bool found = false; for(i = 0; i < enhkey_usage->cUsageIdentifier; ++i) { if(!strcmp("1.3.6.1.5.5.7.3.1" /* OID server auth */, enhkey_usage->rgpszUsageIdentifier[i])) { found = true; break; } } if(!found) continue; } } else continue; } else continue; x509 = d2i_X509(NULL, &encoded_cert, pContext->cbCertEncoded); if(!x509) continue; /* Try to import the certificate. This may fail for legitimate reasons such as duplicate certificate, which is allowed by MS but not OpenSSL. */ if(X509_STORE_add_cert(store, x509) == 1) { #if defined(DEBUGBUILD) && !defined(CURL_DISABLE_VERBOSE_STRINGS) infof(data, "SSL: Imported cert \"%s\"\n", cert_name); #endif imported_native_ca = true; } X509_free(x509); } free(enhkey_usage); CertFreeCertificateContext(pContext); CertCloseStore(hStore, 0); if(result) return result; } if(imported_native_ca) infof(data, "successfully imported windows ca store\n"); else infof(data, "error importing windows ca store, continuing anyway\n"); } #endif if(ca_info_blob) { result = load_cacert_from_memory(backend->ctx, ca_info_blob); if(result) { if(result == CURLE_OUT_OF_MEMORY || (verifypeer && !imported_native_ca)) { failf(data, "error importing CA certificate blob"); return result; } /* Only warning if no certificate verification is required. */ infof(data, "error importing CA certificate blob, continuing anyway\n"); } } #if defined(OPENSSL_VERSION_MAJOR) && (OPENSSL_VERSION_MAJOR >= 3) /* OpenSSL 3.0.0 has deprecated SSL_CTX_load_verify_locations */ { if(ssl_cafile) { if(!SSL_CTX_load_verify_file(backend->ctx, ssl_cafile)) { if(verifypeer && !imported_native_ca) { /* Fail if we insist on successfully verifying the server. */ failf(data, "error setting certificate file: %s", ssl_cafile); return CURLE_SSL_CACERT_BADFILE; } /* Continue with a warning if no certificate verif is required. */ infof(data, "error setting certificate file, continuing anyway\n"); } infof(data, " CAfile: %s\n", ssl_cafile); } if(ssl_capath) { if(!SSL_CTX_load_verify_dir(backend->ctx, ssl_capath)) { if(verifypeer && !imported_native_ca) { /* Fail if we insist on successfully verifying the server. */ failf(data, "error setting certificate path: %s", ssl_capath); return CURLE_SSL_CACERT_BADFILE; } /* Continue with a warning if no certificate verif is required. */ infof(data, "error setting certificate path, continuing anyway\n"); } infof(data, " CApath: %s\n", ssl_capath); } } #else if(ssl_cafile || ssl_capath) { /* tell SSL where to find CA certificates that are used to verify the servers certificate. */ if(!SSL_CTX_load_verify_locations(backend->ctx, ssl_cafile, ssl_capath)) { if(verifypeer && !imported_native_ca) { /* Fail if we insist on successfully verifying the server. */ failf(data, "error setting certificate verify locations:" " CAfile: %s CApath: %s", ssl_cafile ? ssl_cafile : "none", ssl_capath ? ssl_capath : "none"); return CURLE_SSL_CACERT_BADFILE; } /* Just continue with a warning if no strict certificate verification is required. */ infof(data, "error setting certificate verify locations," " continuing anyway:\n"); } else { /* Everything is fine. */ infof(data, "successfully set certificate verify locations:\n"); } infof(data, " CAfile: %s\n", ssl_cafile ? ssl_cafile : "none"); infof(data, " CApath: %s\n", ssl_capath ? ssl_capath : "none"); } #endif #ifdef CURL_CA_FALLBACK if(verifypeer && !ca_info_blob && !ssl_cafile && !ssl_capath && !imported_native_ca) { /* verifying the peer without any CA certificates won't work so use openssl's built in default as fallback */ SSL_CTX_set_default_verify_paths(backend->ctx); } #endif if(ssl_crlfile) { /* tell SSL where to find CRL file that is used to check certificate * revocation */ lookup = X509_STORE_add_lookup(SSL_CTX_get_cert_store(backend->ctx), X509_LOOKUP_file()); if(!lookup || (!X509_load_crl_file(lookup, ssl_crlfile, X509_FILETYPE_PEM)) ) { failf(data, "error loading CRL file: %s", ssl_crlfile); return CURLE_SSL_CRL_BADFILE; } /* Everything is fine. */ infof(data, "successfully load CRL file:\n"); X509_STORE_set_flags(SSL_CTX_get_cert_store(backend->ctx), X509_V_FLAG_CRL_CHECK|X509_V_FLAG_CRL_CHECK_ALL); infof(data, " CRLfile: %s\n", ssl_crlfile); } if(verifypeer) { /* Try building a chain using issuers in the trusted store first to avoid problems with server-sent legacy intermediates. Newer versions of OpenSSL do alternate chain checking by default but we do not know how to determine that in a reliable manner. https://rt.openssl.org/Ticket/Display.html?id=3621&user=guest&pass=guest */ #if defined(X509_V_FLAG_TRUSTED_FIRST) X509_STORE_set_flags(SSL_CTX_get_cert_store(backend->ctx), X509_V_FLAG_TRUSTED_FIRST); #endif #ifdef X509_V_FLAG_PARTIAL_CHAIN if(!SSL_SET_OPTION(no_partialchain) && !ssl_crlfile) { /* Have intermediate certificates in the trust store be treated as trust-anchors, in the same way as self-signed root CA certificates are. This allows users to verify servers using the intermediate cert only, instead of needing the whole chain. Due to OpenSSL bug https://github.com/openssl/openssl/issues/5081 we cannot do partial chains with CRL check. */ X509_STORE_set_flags(SSL_CTX_get_cert_store(backend->ctx), X509_V_FLAG_PARTIAL_CHAIN); } #endif } /* SSL always tries to verify the peer, this only says whether it should * fail to connect if the verification fails, or if it should continue * anyway. In the latter case the result of the verification is checked with * SSL_get_verify_result() below. */ SSL_CTX_set_verify(backend->ctx, verifypeer ? SSL_VERIFY_PEER : SSL_VERIFY_NONE, NULL); /* Enable logging of secrets to the file specified in env SSLKEYLOGFILE. */ #ifdef HAVE_KEYLOG_CALLBACK if(Curl_tls_keylog_enabled()) { SSL_CTX_set_keylog_callback(backend->ctx, ossl_keylog_callback); } #endif /* Enable the session cache because it's a prerequisite for the "new session" * callback. Use the "external storage" mode to avoid that OpenSSL creates * an internal session cache. */ SSL_CTX_set_session_cache_mode(backend->ctx, SSL_SESS_CACHE_CLIENT | SSL_SESS_CACHE_NO_INTERNAL); SSL_CTX_sess_set_new_cb(backend->ctx, ossl_new_session_cb); /* give application a chance to interfere with SSL set up. */ if(data->set.ssl.fsslctx) { Curl_set_in_callback(data, true); result = (*data->set.ssl.fsslctx)(data, backend->ctx, data->set.ssl.fsslctxp); Curl_set_in_callback(data, false); if(result) { failf(data, "error signaled by ssl ctx callback"); return result; } } /* Lets make an SSL structure */ if(backend->handle) SSL_free(backend->handle); backend->handle = SSL_new(backend->ctx); if(!backend->handle) { failf(data, "SSL: couldn't create a context (handle)!"); return CURLE_OUT_OF_MEMORY; } #if (OPENSSL_VERSION_NUMBER >= 0x0090808fL) && !defined(OPENSSL_NO_TLSEXT) && \ !defined(OPENSSL_NO_OCSP) if(SSL_CONN_CONFIG(verifystatus)) SSL_set_tlsext_status_type(backend->handle, TLSEXT_STATUSTYPE_ocsp); #endif #if defined(OPENSSL_IS_BORINGSSL) && defined(ALLOW_RENEG) SSL_set_renegotiate_mode(backend->handle, ssl_renegotiate_freely); #endif SSL_set_connect_state(backend->handle); backend->server_cert = 0x0; #ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME if((0 == Curl_inet_pton(AF_INET, hostname, &addr)) && #ifdef ENABLE_IPV6 (0 == Curl_inet_pton(AF_INET6, hostname, &addr)) && #endif sni) { size_t nlen = strlen(hostname); if((long)nlen >= data->set.buffer_size) /* this is seriously messed up */ return CURLE_SSL_CONNECT_ERROR; /* RFC 6066 section 3 says the SNI field is case insensitive, but browsers send the data lowercase and subsequently there are now numerous servers out there that don't work unless the name is lowercased */ Curl_strntolower(data->state.buffer, hostname, nlen); data->state.buffer[nlen] = 0; if(!SSL_set_tlsext_host_name(backend->handle, data->state.buffer)) infof(data, "WARNING: failed to configure server name indication (SNI) " "TLS extension\n"); } #endif /* Check if there's a cached ID we can/should use here! */ if(SSL_SET_OPTION(primary.sessionid)) { void *ssl_sessionid = NULL; int data_idx = ossl_get_ssl_data_index(); int connectdata_idx = ossl_get_ssl_conn_index(); int sockindex_idx = ossl_get_ssl_sockindex_index(); int proxy_idx = ossl_get_proxy_index(); if(data_idx >= 0 && connectdata_idx >= 0 && sockindex_idx >= 0 && proxy_idx >= 0) { /* Store the data needed for the "new session" callback. * The sockindex is stored as a pointer to an array element. */ SSL_set_ex_data(backend->handle, data_idx, data); SSL_set_ex_data(backend->handle, connectdata_idx, conn); SSL_set_ex_data(backend->handle, sockindex_idx, conn->sock + sockindex); #ifndef CURL_DISABLE_PROXY SSL_set_ex_data(backend->handle, proxy_idx, SSL_IS_PROXY() ? (void *) 1: NULL); #else SSL_set_ex_data(backend->handle, proxy_idx, NULL); #endif } Curl_ssl_sessionid_lock(data); if(!Curl_ssl_getsessionid(data, conn, SSL_IS_PROXY() ? TRUE : FALSE, &ssl_sessionid, NULL, sockindex)) { /* we got a session id, use it! */ if(!SSL_set_session(backend->handle, ssl_sessionid)) { Curl_ssl_sessionid_unlock(data); failf(data, "SSL: SSL_set_session failed: %s", ossl_strerror(ERR_get_error(), error_buffer, sizeof(error_buffer))); return CURLE_SSL_CONNECT_ERROR; } /* Informational message */ infof(data, "SSL re-using session ID\n"); } Curl_ssl_sessionid_unlock(data); } #ifndef CURL_DISABLE_PROXY if(conn->proxy_ssl[sockindex].use) { BIO *const bio = BIO_new(BIO_f_ssl()); SSL *handle = conn->proxy_ssl[sockindex].backend->handle; DEBUGASSERT(ssl_connection_complete == conn->proxy_ssl[sockindex].state); DEBUGASSERT(handle != NULL); DEBUGASSERT(bio != NULL); BIO_set_ssl(bio, handle, FALSE); SSL_set_bio(backend->handle, bio, bio); } else #endif if(!SSL_set_fd(backend->handle, (int)sockfd)) { /* pass the raw socket into the SSL layers */ failf(data, "SSL: SSL_set_fd failed: %s", ossl_strerror(ERR_get_error(), error_buffer, sizeof(error_buffer))); return CURLE_SSL_CONNECT_ERROR; } connssl->connecting_state = ssl_connect_2; return CURLE_OK; }
static CURLcode ossl_connect_step1(struct Curl_easy *data, struct connectdata *conn, int sockindex) { CURLcode result = CURLE_OK; char *ciphers; SSL_METHOD_QUAL SSL_METHOD *req_method = NULL; X509_LOOKUP *lookup = NULL; curl_socket_t sockfd = conn->sock[sockindex]; struct ssl_connect_data *connssl = &conn->ssl[sockindex]; ctx_option_t ctx_options = 0; void *ssl_sessionid = NULL; #ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME bool sni; const char * const hostname = SSL_HOST_NAME(); #ifdef ENABLE_IPV6 struct in6_addr addr; #else struct in_addr addr; #endif #endif const long int ssl_version = SSL_CONN_CONFIG(version); #ifdef USE_OPENSSL_SRP const enum CURL_TLSAUTH ssl_authtype = SSL_SET_OPTION(authtype); #endif char * const ssl_cert = SSL_SET_OPTION(primary.clientcert); const struct curl_blob *ssl_cert_blob = SSL_SET_OPTION(primary.cert_blob); const struct curl_blob *ca_info_blob = SSL_CONN_CONFIG(ca_info_blob); const char * const ssl_cert_type = SSL_SET_OPTION(cert_type); const char * const ssl_cafile = /* CURLOPT_CAINFO_BLOB overrides CURLOPT_CAINFO */ (ca_info_blob ? NULL : SSL_CONN_CONFIG(CAfile)); const char * const ssl_capath = SSL_CONN_CONFIG(CApath); const bool verifypeer = SSL_CONN_CONFIG(verifypeer); const char * const ssl_crlfile = SSL_SET_OPTION(CRLfile); char error_buffer[256]; struct ssl_backend_data *backend = connssl->backend; bool imported_native_ca = false; DEBUGASSERT(ssl_connect_1 == connssl->connecting_state); /* Make funny stuff to get random input */ result = ossl_seed(data); if(result) return result; SSL_SET_OPTION_LVALUE(certverifyresult) = !X509_V_OK; /* check to see if we've been told to use an explicit SSL/TLS version */ switch(ssl_version) { case CURL_SSLVERSION_DEFAULT: case CURL_SSLVERSION_TLSv1: case CURL_SSLVERSION_TLSv1_0: case CURL_SSLVERSION_TLSv1_1: case CURL_SSLVERSION_TLSv1_2: case CURL_SSLVERSION_TLSv1_3: /* it will be handled later with the context options */ #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) req_method = TLS_client_method(); #else req_method = SSLv23_client_method(); #endif use_sni(TRUE); break; case CURL_SSLVERSION_SSLv2: failf(data, "No SSLv2 support"); return CURLE_NOT_BUILT_IN; case CURL_SSLVERSION_SSLv3: failf(data, "No SSLv3 support"); return CURLE_NOT_BUILT_IN; default: failf(data, "Unrecognized parameter passed via CURLOPT_SSLVERSION"); return CURLE_SSL_CONNECT_ERROR; } if(backend->ctx) SSL_CTX_free(backend->ctx); backend->ctx = SSL_CTX_new(req_method); if(!backend->ctx) { failf(data, "SSL: couldn't create a context: %s", ossl_strerror(ERR_peek_error(), error_buffer, sizeof(error_buffer))); return CURLE_OUT_OF_MEMORY; } #ifdef SSL_MODE_RELEASE_BUFFERS SSL_CTX_set_mode(backend->ctx, SSL_MODE_RELEASE_BUFFERS); #endif #ifdef SSL_CTRL_SET_MSG_CALLBACK if(data->set.fdebug && data->set.verbose) { /* the SSL trace callback is only used for verbose logging */ SSL_CTX_set_msg_callback(backend->ctx, ossl_trace); SSL_CTX_set_msg_callback_arg(backend->ctx, conn); set_logger(conn, data); } #endif /* OpenSSL contains code to work-around lots of bugs and flaws in various SSL-implementations. SSL_CTX_set_options() is used to enabled those work-arounds. The man page for this option states that SSL_OP_ALL enables all the work-arounds and that "It is usually safe to use SSL_OP_ALL to enable the bug workaround options if compatibility with somewhat broken implementations is desired." The "-no_ticket" option was introduced in Openssl0.9.8j. It's a flag to disable "rfc4507bis session ticket support". rfc4507bis was later turned into the proper RFC5077 it seems: https://tools.ietf.org/html/rfc5077 The enabled extension concerns the session management. I wonder how often libcurl stops a connection and then resumes a TLS session. also, sending the session data is some overhead. .I suggest that you just use your proposed patch (which explicitly disables TICKET). If someone writes an application with libcurl and openssl who wants to enable the feature, one can do this in the SSL callback. SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG option enabling allowed proper interoperability with web server Netscape Enterprise Server 2.0.1 which was released back in 1996. Due to CVE-2010-4180, option SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG has become ineffective as of OpenSSL 0.9.8q and 1.0.0c. In order to mitigate CVE-2010-4180 when using previous OpenSSL versions we no longer enable this option regardless of OpenSSL version and SSL_OP_ALL definition. OpenSSL added a work-around for a SSL 3.0/TLS 1.0 CBC vulnerability (https://www.openssl.org/~bodo/tls-cbc.txt). In 0.9.6e they added a bit to SSL_OP_ALL that _disables_ that work-around despite the fact that SSL_OP_ALL is documented to do "rather harmless" workarounds. In order to keep the secure work-around, the SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS bit must not be set. */ ctx_options = SSL_OP_ALL; #ifdef SSL_OP_NO_TICKET ctx_options |= SSL_OP_NO_TICKET; #endif #ifdef SSL_OP_NO_COMPRESSION ctx_options |= SSL_OP_NO_COMPRESSION; #endif #ifdef SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG /* mitigate CVE-2010-4180 */ ctx_options &= ~SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG; #endif #ifdef SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS /* unless the user explicitly ask to allow the protocol vulnerability we use the work-around */ if(!SSL_SET_OPTION(enable_beast)) ctx_options &= ~SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS; #endif switch(ssl_version) { case CURL_SSLVERSION_SSLv2: case CURL_SSLVERSION_SSLv3: return CURLE_NOT_BUILT_IN; /* "--tlsv<x.y>" options mean TLS >= version <x.y> */ case CURL_SSLVERSION_DEFAULT: case CURL_SSLVERSION_TLSv1: /* TLS >= version 1.0 */ case CURL_SSLVERSION_TLSv1_0: /* TLS >= version 1.0 */ case CURL_SSLVERSION_TLSv1_1: /* TLS >= version 1.1 */ case CURL_SSLVERSION_TLSv1_2: /* TLS >= version 1.2 */ case CURL_SSLVERSION_TLSv1_3: /* TLS >= version 1.3 */ /* asking for any TLS version as the minimum, means no SSL versions allowed */ ctx_options |= SSL_OP_NO_SSLv2; ctx_options |= SSL_OP_NO_SSLv3; #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) /* 1.1.0 */ result = set_ssl_version_min_max(backend->ctx, conn); #else result = set_ssl_version_min_max_legacy(&ctx_options, data, conn, sockindex); #endif if(result != CURLE_OK) return result; break; default: failf(data, "Unrecognized parameter passed via CURLOPT_SSLVERSION"); return CURLE_SSL_CONNECT_ERROR; } SSL_CTX_set_options(backend->ctx, ctx_options); #ifdef HAS_NPN if(conn->bits.tls_enable_npn) SSL_CTX_set_next_proto_select_cb(backend->ctx, select_next_proto_cb, data); #endif #ifdef HAS_ALPN if(conn->bits.tls_enable_alpn) { int cur = 0; unsigned char protocols[128]; #ifdef USE_HTTP2 if(data->state.httpwant >= CURL_HTTP_VERSION_2 #ifndef CURL_DISABLE_PROXY && (!SSL_IS_PROXY() || !conn->bits.tunnel_proxy) #endif ) { protocols[cur++] = ALPN_H2_LENGTH; memcpy(&protocols[cur], ALPN_H2, ALPN_H2_LENGTH); cur += ALPN_H2_LENGTH; infof(data, "ALPN, offering %s\n", ALPN_H2); } #endif protocols[cur++] = ALPN_HTTP_1_1_LENGTH; memcpy(&protocols[cur], ALPN_HTTP_1_1, ALPN_HTTP_1_1_LENGTH); cur += ALPN_HTTP_1_1_LENGTH; infof(data, "ALPN, offering %s\n", ALPN_HTTP_1_1); /* expects length prefixed preference ordered list of protocols in wire * format */ if(SSL_CTX_set_alpn_protos(backend->ctx, protocols, cur)) { failf(data, "Error setting ALPN"); return CURLE_SSL_CONNECT_ERROR; } } #endif if(ssl_cert || ssl_cert_blob || ssl_cert_type) { if(!result && !cert_stuff(data, backend->ctx, ssl_cert, ssl_cert_blob, ssl_cert_type, SSL_SET_OPTION(key), SSL_SET_OPTION(key_blob), SSL_SET_OPTION(key_type), SSL_SET_OPTION(key_passwd))) result = CURLE_SSL_CERTPROBLEM; if(result) /* failf() is already done in cert_stuff() */ return result; } ciphers = SSL_CONN_CONFIG(cipher_list); if(!ciphers) ciphers = (char *)DEFAULT_CIPHER_SELECTION; if(ciphers) { if(!SSL_CTX_set_cipher_list(backend->ctx, ciphers)) { failf(data, "failed setting cipher list: %s", ciphers); return CURLE_SSL_CIPHER; } infof(data, "Cipher selection: %s\n", ciphers); } #ifdef HAVE_SSL_CTX_SET_CIPHERSUITES { char *ciphers13 = SSL_CONN_CONFIG(cipher_list13); if(ciphers13) { if(!SSL_CTX_set_ciphersuites(backend->ctx, ciphers13)) { failf(data, "failed setting TLS 1.3 cipher suite: %s", ciphers13); return CURLE_SSL_CIPHER; } infof(data, "TLS 1.3 cipher selection: %s\n", ciphers13); } } #endif #ifdef HAVE_SSL_CTX_SET_POST_HANDSHAKE_AUTH /* OpenSSL 1.1.1 requires clients to opt-in for PHA */ SSL_CTX_set_post_handshake_auth(backend->ctx, 1); #endif #ifdef HAVE_SSL_CTX_SET_EC_CURVES { char *curves = SSL_CONN_CONFIG(curves); if(curves) { if(!SSL_CTX_set1_curves_list(backend->ctx, curves)) { failf(data, "failed setting curves list: '%s'", curves); return CURLE_SSL_CIPHER; } } } #endif #ifdef USE_OPENSSL_SRP if(ssl_authtype == CURL_TLSAUTH_SRP) { char * const ssl_username = SSL_SET_OPTION(username); infof(data, "Using TLS-SRP username: %s\n", ssl_username); if(!SSL_CTX_set_srp_username(backend->ctx, ssl_username)) { failf(data, "Unable to set SRP user name"); return CURLE_BAD_FUNCTION_ARGUMENT; } if(!SSL_CTX_set_srp_password(backend->ctx, SSL_SET_OPTION(password))) { failf(data, "failed setting SRP password"); return CURLE_BAD_FUNCTION_ARGUMENT; } if(!SSL_CONN_CONFIG(cipher_list)) { infof(data, "Setting cipher list SRP\n"); if(!SSL_CTX_set_cipher_list(backend->ctx, "SRP")) { failf(data, "failed setting SRP cipher list"); return CURLE_SSL_CIPHER; } } } #endif #if defined(USE_WIN32_CRYPTO) /* Import certificates from the Windows root certificate store if requested. https://stackoverflow.com/questions/9507184/ https://github.com/d3x0r/SACK/blob/master/src/netlib/ssl_layer.c#L1037 https://tools.ietf.org/html/rfc5280 */ if((SSL_CONN_CONFIG(verifypeer) || SSL_CONN_CONFIG(verifyhost)) && (SSL_SET_OPTION(native_ca_store))) { X509_STORE *store = SSL_CTX_get_cert_store(backend->ctx); HCERTSTORE hStore = CertOpenSystemStore(0, TEXT("ROOT")); if(hStore) { PCCERT_CONTEXT pContext = NULL; /* The array of enhanced key usage OIDs will vary per certificate and is declared outside of the loop so that rather than malloc/free each iteration we can grow it with realloc, when necessary. */ CERT_ENHKEY_USAGE *enhkey_usage = NULL; DWORD enhkey_usage_size = 0; /* This loop makes a best effort to import all valid certificates from the MS root store. If a certificate cannot be imported it is skipped. 'result' is used to store only hard-fail conditions (such as out of memory) that cause an early break. */ result = CURLE_OK; for(;;) { X509 *x509; FILETIME now; BYTE key_usage[2]; DWORD req_size; const unsigned char *encoded_cert; #if defined(DEBUGBUILD) && !defined(CURL_DISABLE_VERBOSE_STRINGS) char cert_name[256]; #endif pContext = CertEnumCertificatesInStore(hStore, pContext); if(!pContext) break; #if defined(DEBUGBUILD) && !defined(CURL_DISABLE_VERBOSE_STRINGS) if(!CertGetNameStringA(pContext, CERT_NAME_SIMPLE_DISPLAY_TYPE, 0, NULL, cert_name, sizeof(cert_name))) { strcpy(cert_name, "Unknown"); } infof(data, "SSL: Checking cert \"%s\"\n", cert_name); #endif encoded_cert = (const unsigned char *)pContext->pbCertEncoded; if(!encoded_cert) continue; GetSystemTimeAsFileTime(&now); if(CompareFileTime(&pContext->pCertInfo->NotBefore, &now) > 0 || CompareFileTime(&now, &pContext->pCertInfo->NotAfter) > 0) continue; /* If key usage exists check for signing attribute */ if(CertGetIntendedKeyUsage(pContext->dwCertEncodingType, pContext->pCertInfo, key_usage, sizeof(key_usage))) { if(!(key_usage[0] & CERT_KEY_CERT_SIGN_KEY_USAGE)) continue; } else if(GetLastError()) continue; /* If enhanced key usage exists check for server auth attribute. * * Note "In a Microsoft environment, a certificate might also have EKU * extended properties that specify valid uses for the certificate." * The call below checks both, and behavior varies depending on what is * found. For more details see CertGetEnhancedKeyUsage doc. */ if(CertGetEnhancedKeyUsage(pContext, 0, NULL, &req_size)) { if(req_size && req_size > enhkey_usage_size) { void *tmp = realloc(enhkey_usage, req_size); if(!tmp) { failf(data, "SSL: Out of memory allocating for OID list"); result = CURLE_OUT_OF_MEMORY; break; } enhkey_usage = (CERT_ENHKEY_USAGE *)tmp; enhkey_usage_size = req_size; } if(CertGetEnhancedKeyUsage(pContext, 0, enhkey_usage, &req_size)) { if(!enhkey_usage->cUsageIdentifier) { /* "If GetLastError returns CRYPT_E_NOT_FOUND, the certificate is good for all uses. If it returns zero, the certificate has no valid uses." */ if((HRESULT)GetLastError() != CRYPT_E_NOT_FOUND) continue; } else { DWORD i; bool found = false; for(i = 0; i < enhkey_usage->cUsageIdentifier; ++i) { if(!strcmp("1.3.6.1.5.5.7.3.1" /* OID server auth */, enhkey_usage->rgpszUsageIdentifier[i])) { found = true; break; } } if(!found) continue; } } else continue; } else continue; x509 = d2i_X509(NULL, &encoded_cert, pContext->cbCertEncoded); if(!x509) continue; /* Try to import the certificate. This may fail for legitimate reasons such as duplicate certificate, which is allowed by MS but not OpenSSL. */ if(X509_STORE_add_cert(store, x509) == 1) { #if defined(DEBUGBUILD) && !defined(CURL_DISABLE_VERBOSE_STRINGS) infof(data, "SSL: Imported cert \"%s\"\n", cert_name); #endif imported_native_ca = true; } X509_free(x509); } free(enhkey_usage); CertFreeCertificateContext(pContext); CertCloseStore(hStore, 0); if(result) return result; } if(imported_native_ca) infof(data, "successfully imported windows ca store\n"); else infof(data, "error importing windows ca store, continuing anyway\n"); } #endif if(ca_info_blob) { result = load_cacert_from_memory(backend->ctx, ca_info_blob); if(result) { if(result == CURLE_OUT_OF_MEMORY || (verifypeer && !imported_native_ca)) { failf(data, "error importing CA certificate blob"); return result; } /* Only warning if no certificate verification is required. */ infof(data, "error importing CA certificate blob, continuing anyway\n"); } } #if defined(OPENSSL_VERSION_MAJOR) && (OPENSSL_VERSION_MAJOR >= 3) /* OpenSSL 3.0.0 has deprecated SSL_CTX_load_verify_locations */ { if(ssl_cafile) { if(!SSL_CTX_load_verify_file(backend->ctx, ssl_cafile)) { if(verifypeer && !imported_native_ca) { /* Fail if we insist on successfully verifying the server. */ failf(data, "error setting certificate file: %s", ssl_cafile); return CURLE_SSL_CACERT_BADFILE; } /* Continue with a warning if no certificate verif is required. */ infof(data, "error setting certificate file, continuing anyway\n"); } infof(data, " CAfile: %s\n", ssl_cafile); } if(ssl_capath) { if(!SSL_CTX_load_verify_dir(backend->ctx, ssl_capath)) { if(verifypeer && !imported_native_ca) { /* Fail if we insist on successfully verifying the server. */ failf(data, "error setting certificate path: %s", ssl_capath); return CURLE_SSL_CACERT_BADFILE; } /* Continue with a warning if no certificate verif is required. */ infof(data, "error setting certificate path, continuing anyway\n"); } infof(data, " CApath: %s\n", ssl_capath); } } #else if(ssl_cafile || ssl_capath) { /* tell SSL where to find CA certificates that are used to verify the servers certificate. */ if(!SSL_CTX_load_verify_locations(backend->ctx, ssl_cafile, ssl_capath)) { if(verifypeer && !imported_native_ca) { /* Fail if we insist on successfully verifying the server. */ failf(data, "error setting certificate verify locations:" " CAfile: %s CApath: %s", ssl_cafile ? ssl_cafile : "none", ssl_capath ? ssl_capath : "none"); return CURLE_SSL_CACERT_BADFILE; } /* Just continue with a warning if no strict certificate verification is required. */ infof(data, "error setting certificate verify locations," " continuing anyway:\n"); } else { /* Everything is fine. */ infof(data, "successfully set certificate verify locations:\n"); } infof(data, " CAfile: %s\n", ssl_cafile ? ssl_cafile : "none"); infof(data, " CApath: %s\n", ssl_capath ? ssl_capath : "none"); } #endif #ifdef CURL_CA_FALLBACK if(verifypeer && !ca_info_blob && !ssl_cafile && !ssl_capath && !imported_native_ca) { /* verifying the peer without any CA certificates won't work so use openssl's built in default as fallback */ SSL_CTX_set_default_verify_paths(backend->ctx); } #endif if(ssl_crlfile) { /* tell SSL where to find CRL file that is used to check certificate * revocation */ lookup = X509_STORE_add_lookup(SSL_CTX_get_cert_store(backend->ctx), X509_LOOKUP_file()); if(!lookup || (!X509_load_crl_file(lookup, ssl_crlfile, X509_FILETYPE_PEM)) ) { failf(data, "error loading CRL file: %s", ssl_crlfile); return CURLE_SSL_CRL_BADFILE; } /* Everything is fine. */ infof(data, "successfully load CRL file:\n"); X509_STORE_set_flags(SSL_CTX_get_cert_store(backend->ctx), X509_V_FLAG_CRL_CHECK|X509_V_FLAG_CRL_CHECK_ALL); infof(data, " CRLfile: %s\n", ssl_crlfile); } if(verifypeer) { /* Try building a chain using issuers in the trusted store first to avoid problems with server-sent legacy intermediates. Newer versions of OpenSSL do alternate chain checking by default but we do not know how to determine that in a reliable manner. https://rt.openssl.org/Ticket/Display.html?id=3621&user=guest&pass=guest */ #if defined(X509_V_FLAG_TRUSTED_FIRST) X509_STORE_set_flags(SSL_CTX_get_cert_store(backend->ctx), X509_V_FLAG_TRUSTED_FIRST); #endif #ifdef X509_V_FLAG_PARTIAL_CHAIN if(!SSL_SET_OPTION(no_partialchain) && !ssl_crlfile) { /* Have intermediate certificates in the trust store be treated as trust-anchors, in the same way as self-signed root CA certificates are. This allows users to verify servers using the intermediate cert only, instead of needing the whole chain. Due to OpenSSL bug https://github.com/openssl/openssl/issues/5081 we cannot do partial chains with CRL check. */ X509_STORE_set_flags(SSL_CTX_get_cert_store(backend->ctx), X509_V_FLAG_PARTIAL_CHAIN); } #endif } /* SSL always tries to verify the peer, this only says whether it should * fail to connect if the verification fails, or if it should continue * anyway. In the latter case the result of the verification is checked with * SSL_get_verify_result() below. */ SSL_CTX_set_verify(backend->ctx, verifypeer ? SSL_VERIFY_PEER : SSL_VERIFY_NONE, NULL); /* Enable logging of secrets to the file specified in env SSLKEYLOGFILE. */ #ifdef HAVE_KEYLOG_CALLBACK if(Curl_tls_keylog_enabled()) { SSL_CTX_set_keylog_callback(backend->ctx, ossl_keylog_callback); } #endif /* Enable the session cache because it's a prerequisite for the "new session" * callback. Use the "external storage" mode to avoid that OpenSSL creates * an internal session cache. */ SSL_CTX_set_session_cache_mode(backend->ctx, SSL_SESS_CACHE_CLIENT | SSL_SESS_CACHE_NO_INTERNAL); SSL_CTX_sess_set_new_cb(backend->ctx, ossl_new_session_cb); /* give application a chance to interfere with SSL set up. */ if(data->set.ssl.fsslctx) { Curl_set_in_callback(data, true); result = (*data->set.ssl.fsslctx)(data, backend->ctx, data->set.ssl.fsslctxp); Curl_set_in_callback(data, false); if(result) { failf(data, "error signaled by ssl ctx callback"); return result; } } /* Lets make an SSL structure */ if(backend->handle) SSL_free(backend->handle); backend->handle = SSL_new(backend->ctx); if(!backend->handle) { failf(data, "SSL: couldn't create a context (handle)!"); return CURLE_OUT_OF_MEMORY; } #if (OPENSSL_VERSION_NUMBER >= 0x0090808fL) && !defined(OPENSSL_NO_TLSEXT) && \ !defined(OPENSSL_NO_OCSP) if(SSL_CONN_CONFIG(verifystatus)) SSL_set_tlsext_status_type(backend->handle, TLSEXT_STATUSTYPE_ocsp); #endif #if defined(OPENSSL_IS_BORINGSSL) && defined(ALLOW_RENEG) SSL_set_renegotiate_mode(backend->handle, ssl_renegotiate_freely); #endif SSL_set_connect_state(backend->handle); backend->server_cert = 0x0; #ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME if((0 == Curl_inet_pton(AF_INET, hostname, &addr)) && #ifdef ENABLE_IPV6 (0 == Curl_inet_pton(AF_INET6, hostname, &addr)) && #endif sni) { size_t nlen = strlen(hostname); if((long)nlen >= data->set.buffer_size) /* this is seriously messed up */ return CURLE_SSL_CONNECT_ERROR; /* RFC 6066 section 3 says the SNI field is case insensitive, but browsers send the data lowercase and subsequently there are now numerous servers out there that don't work unless the name is lowercased */ Curl_strntolower(data->state.buffer, hostname, nlen); data->state.buffer[nlen] = 0; if(!SSL_set_tlsext_host_name(backend->handle, data->state.buffer)) infof(data, "WARNING: failed to configure server name indication (SNI) " "TLS extension\n"); } #endif ossl_associate_connection(data, conn, sockindex); Curl_ssl_sessionid_lock(data); if(!Curl_ssl_getsessionid(data, conn, SSL_IS_PROXY() ? TRUE : FALSE, &ssl_sessionid, NULL, sockindex)) { /* we got a session id, use it! */ if(!SSL_set_session(backend->handle, ssl_sessionid)) { Curl_ssl_sessionid_unlock(data); failf(data, "SSL: SSL_set_session failed: %s", ossl_strerror(ERR_get_error(), error_buffer, sizeof(error_buffer))); return CURLE_SSL_CONNECT_ERROR; } /* Informational message */ infof(data, "SSL re-using session ID\n"); } Curl_ssl_sessionid_unlock(data); #ifndef CURL_DISABLE_PROXY if(conn->proxy_ssl[sockindex].use) { BIO *const bio = BIO_new(BIO_f_ssl()); SSL *handle = conn->proxy_ssl[sockindex].backend->handle; DEBUGASSERT(ssl_connection_complete == conn->proxy_ssl[sockindex].state); DEBUGASSERT(handle != NULL); DEBUGASSERT(bio != NULL); BIO_set_ssl(bio, handle, FALSE); SSL_set_bio(backend->handle, bio, bio); } else #endif if(!SSL_set_fd(backend->handle, (int)sockfd)) { /* pass the raw socket into the SSL layers */ failf(data, "SSL: SSL_set_fd failed: %s", ossl_strerror(ERR_get_error(), error_buffer, sizeof(error_buffer))); return CURLE_SSL_CONNECT_ERROR; } connssl->connecting_state = ssl_connect_2; return CURLE_OK; }
{'added': [(243, 'static void ossl_associate_connection(struct Curl_easy *data,'), (244, ' struct connectdata *conn,'), (245, ' int sockindex);'), (246, ''), (2588, ' void *ssl_sessionid = NULL;'), (3233, ' ossl_associate_connection(data, conn, sockindex);'), (3235, ' Curl_ssl_sessionid_lock(data);'), (3236, ' if(!Curl_ssl_getsessionid(data, conn, SSL_IS_PROXY() ? TRUE : FALSE,'), (3237, ' &ssl_sessionid, NULL, sockindex)) {'), (3238, ' /* we got a session id, use it! */'), (3239, ' if(!SSL_set_session(backend->handle, ssl_sessionid)) {'), (3240, ' Curl_ssl_sessionid_unlock(data);'), (3241, ' failf(data, "SSL: SSL_set_session failed: %s",'), (3242, ' ossl_strerror(ERR_get_error(), error_buffer,'), (3243, ' sizeof(error_buffer)));'), (3244, ' return CURLE_SSL_CONNECT_ERROR;'), (3246, ' /* Informational message */'), (3247, ' infof(data, "SSL re-using session ID\\n");'), (3249, ' Curl_ssl_sessionid_unlock(data);'), (4483, 'static void ossl_associate_connection(struct Curl_easy *data,'), (4484, ' struct connectdata *conn,'), (4485, ' int sockindex)'), (4486, '{'), (4487, ' struct ssl_connect_data *connssl = &conn->ssl[sockindex];'), (4488, ' struct ssl_backend_data *backend = connssl->backend;'), (4489, ''), (4490, " /* If we don't have SSL context, do nothing. */"), (4491, ' if(!backend->handle)'), (4492, ' return;'), (4493, ''), (4494, ' if(SSL_SET_OPTION(primary.sessionid)) {'), (4495, ' int data_idx = ossl_get_ssl_data_index();'), (4496, ' int connectdata_idx = ossl_get_ssl_conn_index();'), (4497, ' int sockindex_idx = ossl_get_ssl_sockindex_index();'), (4498, ' int proxy_idx = ossl_get_proxy_index();'), (4499, ''), (4500, ' if(data_idx >= 0 && connectdata_idx >= 0 && sockindex_idx >= 0 &&'), (4501, ' proxy_idx >= 0) {'), (4502, ' /* Store the data needed for the "new session" callback.'), (4503, ' * The sockindex is stored as a pointer to an array element. */'), (4504, ' SSL_set_ex_data(backend->handle, data_idx, data);'), (4505, ' SSL_set_ex_data(backend->handle, connectdata_idx, conn);'), (4506, ' SSL_set_ex_data(backend->handle, sockindex_idx, conn->sock + sockindex);'), (4507, '#ifndef CURL_DISABLE_PROXY'), (4508, ' SSL_set_ex_data(backend->handle, proxy_idx, SSL_IS_PROXY() ? (void *) 1:'), (4509, ' NULL);'), (4510, '#else'), (4511, ' SSL_set_ex_data(backend->handle, proxy_idx, NULL);'), (4512, '#endif'), (4513, ' }'), (4514, ' }'), (4515, '}'), (4516, ''), (4517, '/*'), (4518, ' * Starting with TLS 1.3, the ossl_new_session_cb callback gets called after'), (4519, ' * the handshake. If the transfer that sets up the callback gets killed before'), (4520, ' * this callback arrives, we must make sure to properly clear the data to'), (4521, ' * avoid UAF problems. A future optimization could be to instead store another'), (4522, ' * transfer that might still be using the same connection.'), (4523, ' */'), (4524, ''), (4525, 'static void ossl_disassociate_connection(struct Curl_easy *data,'), (4526, ' int sockindex)'), (4527, '{'), (4528, ' struct connectdata *conn = data->conn;'), (4529, ' struct ssl_connect_data *connssl = &conn->ssl[sockindex];'), (4530, ' struct ssl_backend_data *backend = connssl->backend;'), (4531, ''), (4532, " /* If we don't have SSL context, do nothing. */"), (4533, ' if(!backend->handle)'), (4534, ' return;'), (4535, ''), (4536, ' if(SSL_SET_OPTION(primary.sessionid)) {'), (4537, ' bool isproxy = FALSE;'), (4538, ' bool incache;'), (4539, ' void *old_ssl_sessionid = NULL;'), (4540, ' int data_idx = ossl_get_ssl_data_index();'), (4541, ' int connectdata_idx = ossl_get_ssl_conn_index();'), (4542, ' int sockindex_idx = ossl_get_ssl_sockindex_index();'), (4543, ' int proxy_idx = ossl_get_proxy_index();'), (4544, ''), (4545, ' if(data_idx >= 0 && connectdata_idx >= 0 && sockindex_idx >= 0 &&'), (4546, ' proxy_idx >= 0) {'), (4547, ' /* Invalidate the session cache entry, if any */'), (4548, ' isproxy = SSL_get_ex_data(backend->handle, proxy_idx) ? TRUE : FALSE;'), (4549, ''), (4550, ' /* Disable references to data in "new session" callback to avoid'), (4551, ' * accessing a stale pointer. */'), (4552, ' SSL_set_ex_data(backend->handle, data_idx, NULL);'), (4553, ' SSL_set_ex_data(backend->handle, connectdata_idx, NULL);'), (4554, ' SSL_set_ex_data(backend->handle, sockindex_idx, NULL);'), (4555, ' SSL_set_ex_data(backend->handle, proxy_idx, NULL);'), (4556, ' }'), (4557, ''), (4558, ' Curl_ssl_sessionid_lock(data);'), (4559, ' incache = !(Curl_ssl_getsessionid(data, conn, isproxy,'), (4560, ' &old_ssl_sessionid, NULL, sockindex));'), (4561, ' if(incache)'), (4562, ' Curl_ssl_delsessionid(data, old_ssl_sessionid);'), (4563, ' Curl_ssl_sessionid_unlock(data);'), (4564, ' }'), (4565, '}'), (4566, ''), (4602, ' ossl_sha256sum, /* sha256sum */'), (4604, ' NULL, /* sha256sum */'), (4606, ' ossl_associate_connection, /* associate_connection */'), (4607, ' ossl_disassociate_connection /* disassociate_connection */')], 'deleted': [(3228, " /* Check if there's a cached ID we can/should use here! */"), (3229, ' if(SSL_SET_OPTION(primary.sessionid)) {'), (3230, ' void *ssl_sessionid = NULL;'), (3231, ' int data_idx = ossl_get_ssl_data_index();'), (3232, ' int connectdata_idx = ossl_get_ssl_conn_index();'), (3233, ' int sockindex_idx = ossl_get_ssl_sockindex_index();'), (3234, ' int proxy_idx = ossl_get_proxy_index();'), (3235, ''), (3236, ' if(data_idx >= 0 && connectdata_idx >= 0 && sockindex_idx >= 0 &&'), (3237, ' proxy_idx >= 0) {'), (3238, ' /* Store the data needed for the "new session" callback.'), (3239, ' * The sockindex is stored as a pointer to an array element. */'), (3240, ' SSL_set_ex_data(backend->handle, data_idx, data);'), (3241, ' SSL_set_ex_data(backend->handle, connectdata_idx, conn);'), (3242, ' SSL_set_ex_data(backend->handle, sockindex_idx, conn->sock + sockindex);'), (3243, '#ifndef CURL_DISABLE_PROXY'), (3244, ' SSL_set_ex_data(backend->handle, proxy_idx, SSL_IS_PROXY() ? (void *) 1:'), (3245, ' NULL);'), (3246, '#else'), (3247, ' SSL_set_ex_data(backend->handle, proxy_idx, NULL);'), (3248, '#endif'), (3249, ''), (3250, ' }'), (3252, ' Curl_ssl_sessionid_lock(data);'), (3253, ' if(!Curl_ssl_getsessionid(data, conn, SSL_IS_PROXY() ? TRUE : FALSE,'), (3254, ' &ssl_sessionid, NULL, sockindex)) {'), (3255, ' /* we got a session id, use it! */'), (3256, ' if(!SSL_set_session(backend->handle, ssl_sessionid)) {'), (3257, ' Curl_ssl_sessionid_unlock(data);'), (3258, ' failf(data, "SSL: SSL_set_session failed: %s",'), (3259, ' ossl_strerror(ERR_get_error(), error_buffer,'), (3260, ' sizeof(error_buffer)));'), (3261, ' return CURLE_SSL_CONNECT_ERROR;'), (3262, ' }'), (3263, ' /* Informational message */'), (3264, ' infof(data, "SSL re-using session ID\\n");'), (3266, ' Curl_ssl_sessionid_unlock(data);'), (4536, ' ossl_sha256sum /* sha256sum */'), (4538, ' NULL /* sha256sum */')]}
107
39
2,985
16,423
https://github.com/curl/curl
CVE-2021-22901
['CWE-416']
http_common.c
httpCheckCharset
/** * @file http_common.c * @brief Definitions common to HTTP client and server * * @section License * * SPDX-License-Identifier: GPL-2.0-or-later * * Copyright (C) 2010-2020 Oryx Embedded SARL. All rights reserved. * * This file is part of CycloneTCP Open. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * @author Oryx Embedded SARL (www.oryx-embedded.com) * @version 2.0.0 **/ //Switch to the appropriate trace level #define TRACE_LEVEL HTTP_TRACE_LEVEL //Dependencies #include <ctype.h> #include "core/net.h" #include "http/http_common.h" #include "debug.h" /** * @brief Check whether a string contains valid characters * @param[in] s Pointer to the string * @param[in] length Length of the string * @param[in] charset Acceptable charset * @return Error code **/ error_t httpCheckCharset(const char_t *s, size_t length, uint_t charset) { error_t error; size_t i; uint8_t c; uint_t m; //Initialize status code error = NO_ERROR; //Parse string for(i = 0; i < length; i++) { //Get current character c = (uint8_t) s[i]; //Any 8-bit sequence of data m = HTTP_CHARSET_OCTET; //Check if character is a control character if(iscntrl(c)) m |= HTTP_CHARSET_CTL; //Check if character is printable if(isprint(c) && c <= 126) m |= HTTP_CHARSET_TEXT | HTTP_CHARSET_VCHAR; //Check if character is blank if(c == ' ' || c == '\t') m |= HTTP_CHARSET_TEXT | HTTP_CHARSET_LWS; //Check if character is alphabetic if(isalpha(c)) m |= HTTP_CHARSET_TCHAR | HTTP_CHARSET_ALPHA; //Check if character is decimal digit if(osIsdigit(c)) m |= HTTP_CHARSET_TCHAR | HTTP_CHARSET_DIGIT; //Check if character is hexadecimal digit if(isxdigit(c)) m |= HTTP_CHARSET_HEX; //Check if character is in the extended character set if(c >= 128) m |= HTTP_CHARSET_TEXT | HTTP_CHARSET_OBS_TEXT; //Check if character is a token character if(strchr("!#$%&'*+-.^_`|~", c)) m |= HTTP_CHARSET_TCHAR; //Invalid character? if((m & charset) == 0) error = ERROR_INVALID_SYNTAX; } //Return status code return error; } /** * @brief Parse a list of parameters * @param[in,out] pos Actual position if the list of parameters * @param[out] param Structure that contains the parameter name and value * @return Error code **/ error_t httpParseParam(const char_t **pos, HttpParam *param) { error_t error; size_t i; uint8_t c; bool_t escapeFlag; bool_t separatorFound; const char_t *p; //Check parameters if(pos == NULL || param == NULL) return ERROR_INVALID_PARAMETER; //Initialize structure param->name = NULL; param->nameLen = 0; param->value = NULL; param->valueLen = 0; //Initialize variables escapeFlag = FALSE; separatorFound = FALSE; //Initialize status code error = ERROR_IN_PROGRESS; //Point to the first character i = 0; p = *pos; //Loop through the list of parameters while(error == ERROR_IN_PROGRESS) { //Get current character c = (uint8_t) p[i]; //Check current state if(param->name == NULL) { //Check current character if(c == '\0') { //The list of parameters is empty error = ERROR_NOT_FOUND; } else if(c == ' ' || c == '\t' || c == ',' || c == ';') { //Discard whitespace and separator characters } else if(isalnum(c) || strchr("!#$%&'*+-.^_`|~", c) || c >= 128) { //Point to the first character of the parameter name param->name = p + i; } else { //Invalid character error = ERROR_INVALID_SYNTAX; } } else if(param->nameLen == 0) { //Check current character if(c == '\0' || c == ',' || c == ';') { //Save the length of the parameter name param->nameLen = p + i - param->name; //Successful processing error = NO_ERROR; } else if(c == ' ' || c == '\t') { //Save the length of the parameter name param->nameLen = p + i - param->name; } else if(c == '=') { //The key/value separator has been found separatorFound = TRUE; //Save the length of the parameter name param->nameLen = p + i - param->name; } else if(isalnum(c) || strchr("!#$%&'*+-.^_`|~", c) || c >= 128) { //Advance data pointer } else { //Invalid character error = ERROR_INVALID_SYNTAX; } } else if(!separatorFound) { //Check current character if(c == '\0' || c == ',' || c == ';') { //Successful processing error = NO_ERROR; } else if(c == ' ' || c == '\t') { //Discard whitespace characters } else if(c == '=') { //The key/value separator has been found separatorFound = TRUE; } else if(c == '\"') { //Point to the first character that follows the parameter name i = param->name + param->nameLen - p; //Successful processing error = NO_ERROR; } else if(isalnum(c) || strchr("!#$%&'*+-.^_`|~", c) || c >= 128) { //Point to the first character that follows the parameter name i = param->name + param->nameLen - p; //Successful processing error = NO_ERROR; } else { //Invalid character error = ERROR_INVALID_SYNTAX; } } else if(param->value == NULL) { //Check current character if(c == '\0' || c == ',' || c == ';') { //Successful processing error = NO_ERROR; } else if(c == ' ' || c == '\t') { //Discard whitespace characters } else if(c == '\"') { //A string of text is parsed as a single word if it is quoted //using double-quote marks (refer to RFC 7230, section 3.2.6) param->value = p + i; } else if(isalnum(c) || strchr("!#$%&'*+-.^_`|~", c) || c >= 128) { //Point to the first character of the parameter value param->value = p + i; } else { //Invalid character error = ERROR_INVALID_SYNTAX; } } else { //Quoted string? if(param->value[0] == '\"') { //Check current character if(c == '\0') { //The second double quote is missing error = ERROR_INVALID_SYNTAX; } else if(escapeFlag) { //Recipients that process the value of a quoted-string must //handle a quoted-pair as if it were replaced by the octet //following the backslash escapeFlag = FALSE; } else if(c == '\\') { //The backslash octet can be used as a single-octet quoting //mechanism within quoted-string and comment constructs escapeFlag = TRUE; } else if(c == '\"') { //Advance pointer over the double quote i++; //Save the length of the parameter value param->valueLen = p + i - param->value; //Successful processing error = NO_ERROR; } else if(isprint(c) || c == '\t' || c >= 128) { //Advance data pointer } else { //Invalid character error = ERROR_INVALID_SYNTAX; } } else { //Check current character if(c == '\0' || c == ' ' || c == '\t' || c == ',' || c == ';') { //Save the length of the parameter value param->valueLen = p + i - param->value; //Successful processing error = NO_ERROR; } else if(isalnum(c) || strchr("!#$%&'*+-.^_`|~", c) || c >= 128) { //Advance data pointer } else { //Invalid character error = ERROR_INVALID_SYNTAX; } } } //Point to the next character of the string if(error == ERROR_IN_PROGRESS) i++; } //Check whether the parameter value is a quoted string if(param->valueLen >= 2 && param->value[0] == '\"') { //Discard the surrounding quotes param->value++; param->valueLen -= 2; } //Actual position if the list of parameters *pos = p + i; //Return status code return error; } /** * @brief Compare parameter name with the supplied string * @param[in] param Pointer to the parameter * @param[in] name NULL-terminated string * @return Comparison result **/ bool_t httpCompareParamName(const HttpParam *param, const char_t *name) { bool_t res; size_t n; //Initialize flag res = FALSE; //Determine the length of the string n = osStrlen(name); //Check the length of the parameter name if(param->name != NULL && param->nameLen == n) { //Compare names if(!osStrncasecmp(param->name, name, n)) { res = TRUE; } } //Return comparison result return res; } /** * @brief Compare parameter name with the supplied string * @param[in] param Pointer to the parameter * @param[in] value NULL-terminated string * @return Comparison result **/ bool_t httpCompareParamValue(const HttpParam *param, const char_t *value) { bool_t res; size_t n; //Initialize flag res = FALSE; //Determine the length of the string n = osStrlen(value); //Check the length of the parameter value if(param->value != NULL && param->valueLen == n) { //Perform case-insensitive comparison if(!osStrncasecmp(param->value, value, n)) { res = TRUE; } } //Return comparison result return res; } /** * @brief Copy the value of a parameter * @param[in] param Pointer to the parameter * @param[out] value Pointer to the buffer where to copy the parameter value * @param[out] maxLen Maximum number of characters the buffer can hold * @return Error code **/ error_t httpCopyParamValue(const HttpParam *param, char_t *value, size_t maxLen) { error_t error; size_t n; //Initialize status code error = NO_ERROR; //Check the length of the parameter value if(param->valueLen <= maxLen) { //Get the length of the string n = param->valueLen; } else { //Limit the number of characters to copy n = maxLen; //Report an error error = ERROR_BUFFER_OVERFLOW; } //Copy the value of the parameter osMemcpy(value, param->value, n); //Properly terminate the string with a NULL character value[n] = '\0'; //Return status code return error; } /** * @brief Convert byte array to hex string * @param[in] input Point to the byte array * @param[in] inputLen Length of the byte array * @param[out] output NULL-terminated string resulting from the conversion * @return Error code **/ void httpEncodeHexString(const uint8_t *input, size_t inputLen, char_t *output) { int_t i; //Hex conversion table static const char_t hexDigit[16] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' }; //Process byte array for(i = inputLen - 1; i >= 0; i--) { //Convert lower nibble output[i * 2 + 1] = hexDigit[input[i] & 0x0F]; //Then convert upper nibble output[i * 2] = hexDigit[(input[i] >> 4) & 0x0F]; } //Properly terminate the string with a NULL character output[inputLen * 2] = '\0'; }
/** * @file http_common.c * @brief Definitions common to HTTP client and server * * @section License * * SPDX-License-Identifier: GPL-2.0-or-later * * Copyright (C) 2010-2021 Oryx Embedded SARL. All rights reserved. * * This file is part of CycloneTCP Open. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * @author Oryx Embedded SARL (www.oryx-embedded.com) * @version 2.0.2 **/ //Switch to the appropriate trace level #define TRACE_LEVEL HTTP_TRACE_LEVEL //Dependencies #include <ctype.h> #include "core/net.h" #include "http/http_common.h" #include "debug.h" /** * @brief Check whether a string contains valid characters * @param[in] s Pointer to the string * @param[in] length Length of the string * @param[in] charset Acceptable charset * @return Error code **/ error_t httpCheckCharset(const char_t *s, size_t length, uint_t charset) { error_t error; size_t i; uint8_t c; uint_t m; //Initialize status code error = NO_ERROR; //Parse string for(i = 0; i < length; i++) { //Get current character c = (uint8_t) s[i]; //Any 8-bit sequence of data m = HTTP_CHARSET_OCTET; //Check if character is a control character if(iscntrl(c)) m |= HTTP_CHARSET_CTL; //Check if character is printable if(isprint(c) && c <= 126) m |= HTTP_CHARSET_TEXT | HTTP_CHARSET_VCHAR; //Check if character is blank if(c == ' ' || c == '\t') m |= HTTP_CHARSET_TEXT | HTTP_CHARSET_LWS; //Check if character is alphabetic if(isalpha(c)) m |= HTTP_CHARSET_TCHAR | HTTP_CHARSET_ALPHA; //Check if character is decimal digit if(osIsdigit(c)) m |= HTTP_CHARSET_TCHAR | HTTP_CHARSET_DIGIT; //Check if character is hexadecimal digit if(isxdigit(c)) m |= HTTP_CHARSET_HEX; //Check if character is in the extended character set if(c >= 128) m |= HTTP_CHARSET_TEXT | HTTP_CHARSET_OBS_TEXT; //Check if character is a token character if(osStrchr("!#$%&'*+-.^_`|~", c)) m |= HTTP_CHARSET_TCHAR; //Invalid character? if((m & charset) == 0) error = ERROR_INVALID_SYNTAX; } //Return status code return error; } /** * @brief Parse a list of parameters * @param[in,out] pos Actual position if the list of parameters * @param[out] param Structure that contains the parameter name and value * @return Error code **/ error_t httpParseParam(const char_t **pos, HttpParam *param) { error_t error; size_t i; uint8_t c; bool_t escapeFlag; bool_t separatorFound; const char_t *p; //Check parameters if(pos == NULL || param == NULL) return ERROR_INVALID_PARAMETER; //Initialize structure param->name = NULL; param->nameLen = 0; param->value = NULL; param->valueLen = 0; //Initialize variables escapeFlag = FALSE; separatorFound = FALSE; //Initialize status code error = ERROR_IN_PROGRESS; //Point to the first character i = 0; p = *pos; //Loop through the list of parameters while(error == ERROR_IN_PROGRESS) { //Get current character c = (uint8_t) p[i]; //Check current state if(param->name == NULL) { //Check current character if(c == '\0') { //The list of parameters is empty error = ERROR_NOT_FOUND; } else if(c == ' ' || c == '\t' || c == ',' || c == ';') { //Discard whitespace and separator characters } else if(isalnum(c) || osStrchr("!#$%&'*+-.^_`|~", c) || c >= 128) { //Point to the first character of the parameter name param->name = p + i; } else { //Invalid character error = ERROR_INVALID_SYNTAX; } } else if(param->nameLen == 0) { //Check current character if(c == '\0' || c == ',' || c == ';') { //Save the length of the parameter name param->nameLen = p + i - param->name; //Successful processing error = NO_ERROR; } else if(c == ' ' || c == '\t') { //Save the length of the parameter name param->nameLen = p + i - param->name; } else if(c == '=') { //The key/value separator has been found separatorFound = TRUE; //Save the length of the parameter name param->nameLen = p + i - param->name; } else if(isalnum(c) || osStrchr("!#$%&'*+-.^_`|~", c) || c >= 128) { //Advance data pointer } else { //Invalid character error = ERROR_INVALID_SYNTAX; } } else if(!separatorFound) { //Check current character if(c == '\0' || c == ',' || c == ';') { //Successful processing error = NO_ERROR; } else if(c == ' ' || c == '\t') { //Discard whitespace characters } else if(c == '=') { //The key/value separator has been found separatorFound = TRUE; } else if(c == '\"') { //Point to the first character that follows the parameter name i = param->name + param->nameLen - p; //Successful processing error = NO_ERROR; } else if(isalnum(c) || osStrchr("!#$%&'*+-.^_`|~", c) || c >= 128) { //Point to the first character that follows the parameter name i = param->name + param->nameLen - p; //Successful processing error = NO_ERROR; } else { //Invalid character error = ERROR_INVALID_SYNTAX; } } else if(param->value == NULL) { //Check current character if(c == '\0' || c == ',' || c == ';') { //Successful processing error = NO_ERROR; } else if(c == ' ' || c == '\t') { //Discard whitespace characters } else if(c == '\"') { //A string of text is parsed as a single word if it is quoted //using double-quote marks (refer to RFC 7230, section 3.2.6) param->value = p + i; } else if(isalnum(c) || osStrchr("!#$%&'*+-.^_`|~", c) || c >= 128) { //Point to the first character of the parameter value param->value = p + i; } else { //Invalid character error = ERROR_INVALID_SYNTAX; } } else { //Quoted string? if(param->value[0] == '\"') { //Check current character if(c == '\0') { //The second double quote is missing error = ERROR_INVALID_SYNTAX; } else if(escapeFlag) { //Recipients that process the value of a quoted-string must //handle a quoted-pair as if it were replaced by the octet //following the backslash escapeFlag = FALSE; } else if(c == '\\') { //The backslash octet can be used as a single-octet quoting //mechanism within quoted-string and comment constructs escapeFlag = TRUE; } else if(c == '\"') { //Advance pointer over the double quote i++; //Save the length of the parameter value param->valueLen = p + i - param->value; //Successful processing error = NO_ERROR; } else if(isprint(c) || c == '\t' || c >= 128) { //Advance data pointer } else { //Invalid character error = ERROR_INVALID_SYNTAX; } } else { //Check current character if(c == '\0' || c == ' ' || c == '\t' || c == ',' || c == ';') { //Save the length of the parameter value param->valueLen = p + i - param->value; //Successful processing error = NO_ERROR; } else if(isalnum(c) || osStrchr("!#$%&'*+-.^_`|~", c) || c >= 128) { //Advance data pointer } else { //Invalid character error = ERROR_INVALID_SYNTAX; } } } //Point to the next character of the string if(error == ERROR_IN_PROGRESS) i++; } //Check whether the parameter value is a quoted string if(param->valueLen >= 2 && param->value[0] == '\"') { //Discard the surrounding quotes param->value++; param->valueLen -= 2; } //Actual position if the list of parameters *pos = p + i; //Return status code return error; } /** * @brief Compare parameter name with the supplied string * @param[in] param Pointer to the parameter * @param[in] name NULL-terminated string * @return Comparison result **/ bool_t httpCompareParamName(const HttpParam *param, const char_t *name) { bool_t res; size_t n; //Initialize flag res = FALSE; //Determine the length of the string n = osStrlen(name); //Check the length of the parameter name if(param->name != NULL && param->nameLen == n) { //Compare names if(!osStrncasecmp(param->name, name, n)) { res = TRUE; } } //Return comparison result return res; } /** * @brief Compare parameter name with the supplied string * @param[in] param Pointer to the parameter * @param[in] value NULL-terminated string * @return Comparison result **/ bool_t httpCompareParamValue(const HttpParam *param, const char_t *value) { bool_t res; size_t n; //Initialize flag res = FALSE; //Determine the length of the string n = osStrlen(value); //Check the length of the parameter value if(param->value != NULL && param->valueLen == n) { //Perform case-insensitive comparison if(!osStrncasecmp(param->value, value, n)) { res = TRUE; } } //Return comparison result return res; } /** * @brief Copy the value of a parameter * @param[in] param Pointer to the parameter * @param[out] value Pointer to the buffer where to copy the parameter value * @param[out] maxLen Maximum number of characters the buffer can hold * @return Error code **/ error_t httpCopyParamValue(const HttpParam *param, char_t *value, size_t maxLen) { error_t error; size_t n; //Initialize status code error = NO_ERROR; //Check the length of the parameter value if(param->valueLen <= maxLen) { //Get the length of the string n = param->valueLen; } else { //Limit the number of characters to copy n = maxLen; //Report an error error = ERROR_BUFFER_OVERFLOW; } //Copy the value of the parameter osMemcpy(value, param->value, n); //Properly terminate the string with a NULL character value[n] = '\0'; //Return status code return error; } /** * @brief Convert byte array to hex string * @param[in] input Point to the byte array * @param[in] inputLen Length of the byte array * @param[out] output NULL-terminated string resulting from the conversion **/ void httpEncodeHexString(const uint8_t *input, size_t inputLen, char_t *output) { int_t i; //Hex conversion table static const char_t hexDigit[16] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' }; //Process byte array for(i = inputLen - 1; i >= 0; i--) { //Convert lower nibble output[i * 2 + 1] = hexDigit[input[i] & 0x0F]; //Then convert upper nibble output[i * 2] = hexDigit[(input[i] >> 4) & 0x0F]; } //Properly terminate the string with a NULL character output[inputLen * 2] = '\0'; }
error_t httpCheckCharset(const char_t *s, size_t length, uint_t charset) { error_t error; size_t i; uint8_t c; uint_t m; //Initialize status code error = NO_ERROR; //Parse string for(i = 0; i < length; i++) { //Get current character c = (uint8_t) s[i]; //Any 8-bit sequence of data m = HTTP_CHARSET_OCTET; //Check if character is a control character if(iscntrl(c)) m |= HTTP_CHARSET_CTL; //Check if character is printable if(isprint(c) && c <= 126) m |= HTTP_CHARSET_TEXT | HTTP_CHARSET_VCHAR; //Check if character is blank if(c == ' ' || c == '\t') m |= HTTP_CHARSET_TEXT | HTTP_CHARSET_LWS; //Check if character is alphabetic if(isalpha(c)) m |= HTTP_CHARSET_TCHAR | HTTP_CHARSET_ALPHA; //Check if character is decimal digit if(osIsdigit(c)) m |= HTTP_CHARSET_TCHAR | HTTP_CHARSET_DIGIT; //Check if character is hexadecimal digit if(isxdigit(c)) m |= HTTP_CHARSET_HEX; //Check if character is in the extended character set if(c >= 128) m |= HTTP_CHARSET_TEXT | HTTP_CHARSET_OBS_TEXT; //Check if character is a token character if(strchr("!#$%&'*+-.^_`|~", c)) m |= HTTP_CHARSET_TCHAR; //Invalid character? if((m & charset) == 0) error = ERROR_INVALID_SYNTAX; } //Return status code return error; }
error_t httpCheckCharset(const char_t *s, size_t length, uint_t charset) { error_t error; size_t i; uint8_t c; uint_t m; //Initialize status code error = NO_ERROR; //Parse string for(i = 0; i < length; i++) { //Get current character c = (uint8_t) s[i]; //Any 8-bit sequence of data m = HTTP_CHARSET_OCTET; //Check if character is a control character if(iscntrl(c)) m |= HTTP_CHARSET_CTL; //Check if character is printable if(isprint(c) && c <= 126) m |= HTTP_CHARSET_TEXT | HTTP_CHARSET_VCHAR; //Check if character is blank if(c == ' ' || c == '\t') m |= HTTP_CHARSET_TEXT | HTTP_CHARSET_LWS; //Check if character is alphabetic if(isalpha(c)) m |= HTTP_CHARSET_TCHAR | HTTP_CHARSET_ALPHA; //Check if character is decimal digit if(osIsdigit(c)) m |= HTTP_CHARSET_TCHAR | HTTP_CHARSET_DIGIT; //Check if character is hexadecimal digit if(isxdigit(c)) m |= HTTP_CHARSET_HEX; //Check if character is in the extended character set if(c >= 128) m |= HTTP_CHARSET_TEXT | HTTP_CHARSET_OBS_TEXT; //Check if character is a token character if(osStrchr("!#$%&'*+-.^_`|~", c)) m |= HTTP_CHARSET_TCHAR; //Invalid character? if((m & charset) == 0) error = ERROR_INVALID_SYNTAX; } //Return status code return error; }
{'added': [(9, ' * Copyright (C) 2010-2021 Oryx Embedded SARL. All rights reserved.'), (28, ' * @version 2.0.2'), (97, ' if(osStrchr("!#$%&\'*+-.^_`|~", c))'), (166, ' else if(isalnum(c) || osStrchr("!#$%&\'*+-.^_`|~", c) || c >= 128)'), (199, ' else if(isalnum(c) || osStrchr("!#$%&\'*+-.^_`|~", c) || c >= 128)'), (233, ' else if(isalnum(c) || osStrchr("!#$%&\'*+-.^_`|~", c) || c >= 128)'), (264, ' else if(isalnum(c) || osStrchr("!#$%&\'*+-.^_`|~", c) || c >= 128)'), (328, ' else if(isalnum(c) || osStrchr("!#$%&\'*+-.^_`|~", c) || c >= 128)')], 'deleted': [(9, ' * Copyright (C) 2010-2020 Oryx Embedded SARL. All rights reserved.'), (28, ' * @version 2.0.0'), (97, ' if(strchr("!#$%&\'*+-.^_`|~", c))'), (166, ' else if(isalnum(c) || strchr("!#$%&\'*+-.^_`|~", c) || c >= 128)'), (199, ' else if(isalnum(c) || strchr("!#$%&\'*+-.^_`|~", c) || c >= 128)'), (233, ' else if(isalnum(c) || strchr("!#$%&\'*+-.^_`|~", c) || c >= 128)'), (264, ' else if(isalnum(c) || strchr("!#$%&\'*+-.^_`|~", c) || c >= 128)'), (328, ' else if(isalnum(c) || strchr("!#$%&\'*+-.^_`|~", c) || c >= 128)'), (473, ' * @return Error code')]}
8
9
271
1,293
https://github.com/Oryx-Embedded/CycloneTCP
CVE-2021-26788
['CWE-20']
http_common.c
httpParseParam
/** * @file http_common.c * @brief Definitions common to HTTP client and server * * @section License * * SPDX-License-Identifier: GPL-2.0-or-later * * Copyright (C) 2010-2020 Oryx Embedded SARL. All rights reserved. * * This file is part of CycloneTCP Open. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * @author Oryx Embedded SARL (www.oryx-embedded.com) * @version 2.0.0 **/ //Switch to the appropriate trace level #define TRACE_LEVEL HTTP_TRACE_LEVEL //Dependencies #include <ctype.h> #include "core/net.h" #include "http/http_common.h" #include "debug.h" /** * @brief Check whether a string contains valid characters * @param[in] s Pointer to the string * @param[in] length Length of the string * @param[in] charset Acceptable charset * @return Error code **/ error_t httpCheckCharset(const char_t *s, size_t length, uint_t charset) { error_t error; size_t i; uint8_t c; uint_t m; //Initialize status code error = NO_ERROR; //Parse string for(i = 0; i < length; i++) { //Get current character c = (uint8_t) s[i]; //Any 8-bit sequence of data m = HTTP_CHARSET_OCTET; //Check if character is a control character if(iscntrl(c)) m |= HTTP_CHARSET_CTL; //Check if character is printable if(isprint(c) && c <= 126) m |= HTTP_CHARSET_TEXT | HTTP_CHARSET_VCHAR; //Check if character is blank if(c == ' ' || c == '\t') m |= HTTP_CHARSET_TEXT | HTTP_CHARSET_LWS; //Check if character is alphabetic if(isalpha(c)) m |= HTTP_CHARSET_TCHAR | HTTP_CHARSET_ALPHA; //Check if character is decimal digit if(osIsdigit(c)) m |= HTTP_CHARSET_TCHAR | HTTP_CHARSET_DIGIT; //Check if character is hexadecimal digit if(isxdigit(c)) m |= HTTP_CHARSET_HEX; //Check if character is in the extended character set if(c >= 128) m |= HTTP_CHARSET_TEXT | HTTP_CHARSET_OBS_TEXT; //Check if character is a token character if(strchr("!#$%&'*+-.^_`|~", c)) m |= HTTP_CHARSET_TCHAR; //Invalid character? if((m & charset) == 0) error = ERROR_INVALID_SYNTAX; } //Return status code return error; } /** * @brief Parse a list of parameters * @param[in,out] pos Actual position if the list of parameters * @param[out] param Structure that contains the parameter name and value * @return Error code **/ error_t httpParseParam(const char_t **pos, HttpParam *param) { error_t error; size_t i; uint8_t c; bool_t escapeFlag; bool_t separatorFound; const char_t *p; //Check parameters if(pos == NULL || param == NULL) return ERROR_INVALID_PARAMETER; //Initialize structure param->name = NULL; param->nameLen = 0; param->value = NULL; param->valueLen = 0; //Initialize variables escapeFlag = FALSE; separatorFound = FALSE; //Initialize status code error = ERROR_IN_PROGRESS; //Point to the first character i = 0; p = *pos; //Loop through the list of parameters while(error == ERROR_IN_PROGRESS) { //Get current character c = (uint8_t) p[i]; //Check current state if(param->name == NULL) { //Check current character if(c == '\0') { //The list of parameters is empty error = ERROR_NOT_FOUND; } else if(c == ' ' || c == '\t' || c == ',' || c == ';') { //Discard whitespace and separator characters } else if(isalnum(c) || strchr("!#$%&'*+-.^_`|~", c) || c >= 128) { //Point to the first character of the parameter name param->name = p + i; } else { //Invalid character error = ERROR_INVALID_SYNTAX; } } else if(param->nameLen == 0) { //Check current character if(c == '\0' || c == ',' || c == ';') { //Save the length of the parameter name param->nameLen = p + i - param->name; //Successful processing error = NO_ERROR; } else if(c == ' ' || c == '\t') { //Save the length of the parameter name param->nameLen = p + i - param->name; } else if(c == '=') { //The key/value separator has been found separatorFound = TRUE; //Save the length of the parameter name param->nameLen = p + i - param->name; } else if(isalnum(c) || strchr("!#$%&'*+-.^_`|~", c) || c >= 128) { //Advance data pointer } else { //Invalid character error = ERROR_INVALID_SYNTAX; } } else if(!separatorFound) { //Check current character if(c == '\0' || c == ',' || c == ';') { //Successful processing error = NO_ERROR; } else if(c == ' ' || c == '\t') { //Discard whitespace characters } else if(c == '=') { //The key/value separator has been found separatorFound = TRUE; } else if(c == '\"') { //Point to the first character that follows the parameter name i = param->name + param->nameLen - p; //Successful processing error = NO_ERROR; } else if(isalnum(c) || strchr("!#$%&'*+-.^_`|~", c) || c >= 128) { //Point to the first character that follows the parameter name i = param->name + param->nameLen - p; //Successful processing error = NO_ERROR; } else { //Invalid character error = ERROR_INVALID_SYNTAX; } } else if(param->value == NULL) { //Check current character if(c == '\0' || c == ',' || c == ';') { //Successful processing error = NO_ERROR; } else if(c == ' ' || c == '\t') { //Discard whitespace characters } else if(c == '\"') { //A string of text is parsed as a single word if it is quoted //using double-quote marks (refer to RFC 7230, section 3.2.6) param->value = p + i; } else if(isalnum(c) || strchr("!#$%&'*+-.^_`|~", c) || c >= 128) { //Point to the first character of the parameter value param->value = p + i; } else { //Invalid character error = ERROR_INVALID_SYNTAX; } } else { //Quoted string? if(param->value[0] == '\"') { //Check current character if(c == '\0') { //The second double quote is missing error = ERROR_INVALID_SYNTAX; } else if(escapeFlag) { //Recipients that process the value of a quoted-string must //handle a quoted-pair as if it were replaced by the octet //following the backslash escapeFlag = FALSE; } else if(c == '\\') { //The backslash octet can be used as a single-octet quoting //mechanism within quoted-string and comment constructs escapeFlag = TRUE; } else if(c == '\"') { //Advance pointer over the double quote i++; //Save the length of the parameter value param->valueLen = p + i - param->value; //Successful processing error = NO_ERROR; } else if(isprint(c) || c == '\t' || c >= 128) { //Advance data pointer } else { //Invalid character error = ERROR_INVALID_SYNTAX; } } else { //Check current character if(c == '\0' || c == ' ' || c == '\t' || c == ',' || c == ';') { //Save the length of the parameter value param->valueLen = p + i - param->value; //Successful processing error = NO_ERROR; } else if(isalnum(c) || strchr("!#$%&'*+-.^_`|~", c) || c >= 128) { //Advance data pointer } else { //Invalid character error = ERROR_INVALID_SYNTAX; } } } //Point to the next character of the string if(error == ERROR_IN_PROGRESS) i++; } //Check whether the parameter value is a quoted string if(param->valueLen >= 2 && param->value[0] == '\"') { //Discard the surrounding quotes param->value++; param->valueLen -= 2; } //Actual position if the list of parameters *pos = p + i; //Return status code return error; } /** * @brief Compare parameter name with the supplied string * @param[in] param Pointer to the parameter * @param[in] name NULL-terminated string * @return Comparison result **/ bool_t httpCompareParamName(const HttpParam *param, const char_t *name) { bool_t res; size_t n; //Initialize flag res = FALSE; //Determine the length of the string n = osStrlen(name); //Check the length of the parameter name if(param->name != NULL && param->nameLen == n) { //Compare names if(!osStrncasecmp(param->name, name, n)) { res = TRUE; } } //Return comparison result return res; } /** * @brief Compare parameter name with the supplied string * @param[in] param Pointer to the parameter * @param[in] value NULL-terminated string * @return Comparison result **/ bool_t httpCompareParamValue(const HttpParam *param, const char_t *value) { bool_t res; size_t n; //Initialize flag res = FALSE; //Determine the length of the string n = osStrlen(value); //Check the length of the parameter value if(param->value != NULL && param->valueLen == n) { //Perform case-insensitive comparison if(!osStrncasecmp(param->value, value, n)) { res = TRUE; } } //Return comparison result return res; } /** * @brief Copy the value of a parameter * @param[in] param Pointer to the parameter * @param[out] value Pointer to the buffer where to copy the parameter value * @param[out] maxLen Maximum number of characters the buffer can hold * @return Error code **/ error_t httpCopyParamValue(const HttpParam *param, char_t *value, size_t maxLen) { error_t error; size_t n; //Initialize status code error = NO_ERROR; //Check the length of the parameter value if(param->valueLen <= maxLen) { //Get the length of the string n = param->valueLen; } else { //Limit the number of characters to copy n = maxLen; //Report an error error = ERROR_BUFFER_OVERFLOW; } //Copy the value of the parameter osMemcpy(value, param->value, n); //Properly terminate the string with a NULL character value[n] = '\0'; //Return status code return error; } /** * @brief Convert byte array to hex string * @param[in] input Point to the byte array * @param[in] inputLen Length of the byte array * @param[out] output NULL-terminated string resulting from the conversion * @return Error code **/ void httpEncodeHexString(const uint8_t *input, size_t inputLen, char_t *output) { int_t i; //Hex conversion table static const char_t hexDigit[16] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' }; //Process byte array for(i = inputLen - 1; i >= 0; i--) { //Convert lower nibble output[i * 2 + 1] = hexDigit[input[i] & 0x0F]; //Then convert upper nibble output[i * 2] = hexDigit[(input[i] >> 4) & 0x0F]; } //Properly terminate the string with a NULL character output[inputLen * 2] = '\0'; }
/** * @file http_common.c * @brief Definitions common to HTTP client and server * * @section License * * SPDX-License-Identifier: GPL-2.0-or-later * * Copyright (C) 2010-2021 Oryx Embedded SARL. All rights reserved. * * This file is part of CycloneTCP Open. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * @author Oryx Embedded SARL (www.oryx-embedded.com) * @version 2.0.2 **/ //Switch to the appropriate trace level #define TRACE_LEVEL HTTP_TRACE_LEVEL //Dependencies #include <ctype.h> #include "core/net.h" #include "http/http_common.h" #include "debug.h" /** * @brief Check whether a string contains valid characters * @param[in] s Pointer to the string * @param[in] length Length of the string * @param[in] charset Acceptable charset * @return Error code **/ error_t httpCheckCharset(const char_t *s, size_t length, uint_t charset) { error_t error; size_t i; uint8_t c; uint_t m; //Initialize status code error = NO_ERROR; //Parse string for(i = 0; i < length; i++) { //Get current character c = (uint8_t) s[i]; //Any 8-bit sequence of data m = HTTP_CHARSET_OCTET; //Check if character is a control character if(iscntrl(c)) m |= HTTP_CHARSET_CTL; //Check if character is printable if(isprint(c) && c <= 126) m |= HTTP_CHARSET_TEXT | HTTP_CHARSET_VCHAR; //Check if character is blank if(c == ' ' || c == '\t') m |= HTTP_CHARSET_TEXT | HTTP_CHARSET_LWS; //Check if character is alphabetic if(isalpha(c)) m |= HTTP_CHARSET_TCHAR | HTTP_CHARSET_ALPHA; //Check if character is decimal digit if(osIsdigit(c)) m |= HTTP_CHARSET_TCHAR | HTTP_CHARSET_DIGIT; //Check if character is hexadecimal digit if(isxdigit(c)) m |= HTTP_CHARSET_HEX; //Check if character is in the extended character set if(c >= 128) m |= HTTP_CHARSET_TEXT | HTTP_CHARSET_OBS_TEXT; //Check if character is a token character if(osStrchr("!#$%&'*+-.^_`|~", c)) m |= HTTP_CHARSET_TCHAR; //Invalid character? if((m & charset) == 0) error = ERROR_INVALID_SYNTAX; } //Return status code return error; } /** * @brief Parse a list of parameters * @param[in,out] pos Actual position if the list of parameters * @param[out] param Structure that contains the parameter name and value * @return Error code **/ error_t httpParseParam(const char_t **pos, HttpParam *param) { error_t error; size_t i; uint8_t c; bool_t escapeFlag; bool_t separatorFound; const char_t *p; //Check parameters if(pos == NULL || param == NULL) return ERROR_INVALID_PARAMETER; //Initialize structure param->name = NULL; param->nameLen = 0; param->value = NULL; param->valueLen = 0; //Initialize variables escapeFlag = FALSE; separatorFound = FALSE; //Initialize status code error = ERROR_IN_PROGRESS; //Point to the first character i = 0; p = *pos; //Loop through the list of parameters while(error == ERROR_IN_PROGRESS) { //Get current character c = (uint8_t) p[i]; //Check current state if(param->name == NULL) { //Check current character if(c == '\0') { //The list of parameters is empty error = ERROR_NOT_FOUND; } else if(c == ' ' || c == '\t' || c == ',' || c == ';') { //Discard whitespace and separator characters } else if(isalnum(c) || osStrchr("!#$%&'*+-.^_`|~", c) || c >= 128) { //Point to the first character of the parameter name param->name = p + i; } else { //Invalid character error = ERROR_INVALID_SYNTAX; } } else if(param->nameLen == 0) { //Check current character if(c == '\0' || c == ',' || c == ';') { //Save the length of the parameter name param->nameLen = p + i - param->name; //Successful processing error = NO_ERROR; } else if(c == ' ' || c == '\t') { //Save the length of the parameter name param->nameLen = p + i - param->name; } else if(c == '=') { //The key/value separator has been found separatorFound = TRUE; //Save the length of the parameter name param->nameLen = p + i - param->name; } else if(isalnum(c) || osStrchr("!#$%&'*+-.^_`|~", c) || c >= 128) { //Advance data pointer } else { //Invalid character error = ERROR_INVALID_SYNTAX; } } else if(!separatorFound) { //Check current character if(c == '\0' || c == ',' || c == ';') { //Successful processing error = NO_ERROR; } else if(c == ' ' || c == '\t') { //Discard whitespace characters } else if(c == '=') { //The key/value separator has been found separatorFound = TRUE; } else if(c == '\"') { //Point to the first character that follows the parameter name i = param->name + param->nameLen - p; //Successful processing error = NO_ERROR; } else if(isalnum(c) || osStrchr("!#$%&'*+-.^_`|~", c) || c >= 128) { //Point to the first character that follows the parameter name i = param->name + param->nameLen - p; //Successful processing error = NO_ERROR; } else { //Invalid character error = ERROR_INVALID_SYNTAX; } } else if(param->value == NULL) { //Check current character if(c == '\0' || c == ',' || c == ';') { //Successful processing error = NO_ERROR; } else if(c == ' ' || c == '\t') { //Discard whitespace characters } else if(c == '\"') { //A string of text is parsed as a single word if it is quoted //using double-quote marks (refer to RFC 7230, section 3.2.6) param->value = p + i; } else if(isalnum(c) || osStrchr("!#$%&'*+-.^_`|~", c) || c >= 128) { //Point to the first character of the parameter value param->value = p + i; } else { //Invalid character error = ERROR_INVALID_SYNTAX; } } else { //Quoted string? if(param->value[0] == '\"') { //Check current character if(c == '\0') { //The second double quote is missing error = ERROR_INVALID_SYNTAX; } else if(escapeFlag) { //Recipients that process the value of a quoted-string must //handle a quoted-pair as if it were replaced by the octet //following the backslash escapeFlag = FALSE; } else if(c == '\\') { //The backslash octet can be used as a single-octet quoting //mechanism within quoted-string and comment constructs escapeFlag = TRUE; } else if(c == '\"') { //Advance pointer over the double quote i++; //Save the length of the parameter value param->valueLen = p + i - param->value; //Successful processing error = NO_ERROR; } else if(isprint(c) || c == '\t' || c >= 128) { //Advance data pointer } else { //Invalid character error = ERROR_INVALID_SYNTAX; } } else { //Check current character if(c == '\0' || c == ' ' || c == '\t' || c == ',' || c == ';') { //Save the length of the parameter value param->valueLen = p + i - param->value; //Successful processing error = NO_ERROR; } else if(isalnum(c) || osStrchr("!#$%&'*+-.^_`|~", c) || c >= 128) { //Advance data pointer } else { //Invalid character error = ERROR_INVALID_SYNTAX; } } } //Point to the next character of the string if(error == ERROR_IN_PROGRESS) i++; } //Check whether the parameter value is a quoted string if(param->valueLen >= 2 && param->value[0] == '\"') { //Discard the surrounding quotes param->value++; param->valueLen -= 2; } //Actual position if the list of parameters *pos = p + i; //Return status code return error; } /** * @brief Compare parameter name with the supplied string * @param[in] param Pointer to the parameter * @param[in] name NULL-terminated string * @return Comparison result **/ bool_t httpCompareParamName(const HttpParam *param, const char_t *name) { bool_t res; size_t n; //Initialize flag res = FALSE; //Determine the length of the string n = osStrlen(name); //Check the length of the parameter name if(param->name != NULL && param->nameLen == n) { //Compare names if(!osStrncasecmp(param->name, name, n)) { res = TRUE; } } //Return comparison result return res; } /** * @brief Compare parameter name with the supplied string * @param[in] param Pointer to the parameter * @param[in] value NULL-terminated string * @return Comparison result **/ bool_t httpCompareParamValue(const HttpParam *param, const char_t *value) { bool_t res; size_t n; //Initialize flag res = FALSE; //Determine the length of the string n = osStrlen(value); //Check the length of the parameter value if(param->value != NULL && param->valueLen == n) { //Perform case-insensitive comparison if(!osStrncasecmp(param->value, value, n)) { res = TRUE; } } //Return comparison result return res; } /** * @brief Copy the value of a parameter * @param[in] param Pointer to the parameter * @param[out] value Pointer to the buffer where to copy the parameter value * @param[out] maxLen Maximum number of characters the buffer can hold * @return Error code **/ error_t httpCopyParamValue(const HttpParam *param, char_t *value, size_t maxLen) { error_t error; size_t n; //Initialize status code error = NO_ERROR; //Check the length of the parameter value if(param->valueLen <= maxLen) { //Get the length of the string n = param->valueLen; } else { //Limit the number of characters to copy n = maxLen; //Report an error error = ERROR_BUFFER_OVERFLOW; } //Copy the value of the parameter osMemcpy(value, param->value, n); //Properly terminate the string with a NULL character value[n] = '\0'; //Return status code return error; } /** * @brief Convert byte array to hex string * @param[in] input Point to the byte array * @param[in] inputLen Length of the byte array * @param[out] output NULL-terminated string resulting from the conversion **/ void httpEncodeHexString(const uint8_t *input, size_t inputLen, char_t *output) { int_t i; //Hex conversion table static const char_t hexDigit[16] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' }; //Process byte array for(i = inputLen - 1; i >= 0; i--) { //Convert lower nibble output[i * 2 + 1] = hexDigit[input[i] & 0x0F]; //Then convert upper nibble output[i * 2] = hexDigit[(input[i] >> 4) & 0x0F]; } //Properly terminate the string with a NULL character output[inputLen * 2] = '\0'; }
error_t httpParseParam(const char_t **pos, HttpParam *param) { error_t error; size_t i; uint8_t c; bool_t escapeFlag; bool_t separatorFound; const char_t *p; //Check parameters if(pos == NULL || param == NULL) return ERROR_INVALID_PARAMETER; //Initialize structure param->name = NULL; param->nameLen = 0; param->value = NULL; param->valueLen = 0; //Initialize variables escapeFlag = FALSE; separatorFound = FALSE; //Initialize status code error = ERROR_IN_PROGRESS; //Point to the first character i = 0; p = *pos; //Loop through the list of parameters while(error == ERROR_IN_PROGRESS) { //Get current character c = (uint8_t) p[i]; //Check current state if(param->name == NULL) { //Check current character if(c == '\0') { //The list of parameters is empty error = ERROR_NOT_FOUND; } else if(c == ' ' || c == '\t' || c == ',' || c == ';') { //Discard whitespace and separator characters } else if(isalnum(c) || strchr("!#$%&'*+-.^_`|~", c) || c >= 128) { //Point to the first character of the parameter name param->name = p + i; } else { //Invalid character error = ERROR_INVALID_SYNTAX; } } else if(param->nameLen == 0) { //Check current character if(c == '\0' || c == ',' || c == ';') { //Save the length of the parameter name param->nameLen = p + i - param->name; //Successful processing error = NO_ERROR; } else if(c == ' ' || c == '\t') { //Save the length of the parameter name param->nameLen = p + i - param->name; } else if(c == '=') { //The key/value separator has been found separatorFound = TRUE; //Save the length of the parameter name param->nameLen = p + i - param->name; } else if(isalnum(c) || strchr("!#$%&'*+-.^_`|~", c) || c >= 128) { //Advance data pointer } else { //Invalid character error = ERROR_INVALID_SYNTAX; } } else if(!separatorFound) { //Check current character if(c == '\0' || c == ',' || c == ';') { //Successful processing error = NO_ERROR; } else if(c == ' ' || c == '\t') { //Discard whitespace characters } else if(c == '=') { //The key/value separator has been found separatorFound = TRUE; } else if(c == '\"') { //Point to the first character that follows the parameter name i = param->name + param->nameLen - p; //Successful processing error = NO_ERROR; } else if(isalnum(c) || strchr("!#$%&'*+-.^_`|~", c) || c >= 128) { //Point to the first character that follows the parameter name i = param->name + param->nameLen - p; //Successful processing error = NO_ERROR; } else { //Invalid character error = ERROR_INVALID_SYNTAX; } } else if(param->value == NULL) { //Check current character if(c == '\0' || c == ',' || c == ';') { //Successful processing error = NO_ERROR; } else if(c == ' ' || c == '\t') { //Discard whitespace characters } else if(c == '\"') { //A string of text is parsed as a single word if it is quoted //using double-quote marks (refer to RFC 7230, section 3.2.6) param->value = p + i; } else if(isalnum(c) || strchr("!#$%&'*+-.^_`|~", c) || c >= 128) { //Point to the first character of the parameter value param->value = p + i; } else { //Invalid character error = ERROR_INVALID_SYNTAX; } } else { //Quoted string? if(param->value[0] == '\"') { //Check current character if(c == '\0') { //The second double quote is missing error = ERROR_INVALID_SYNTAX; } else if(escapeFlag) { //Recipients that process the value of a quoted-string must //handle a quoted-pair as if it were replaced by the octet //following the backslash escapeFlag = FALSE; } else if(c == '\\') { //The backslash octet can be used as a single-octet quoting //mechanism within quoted-string and comment constructs escapeFlag = TRUE; } else if(c == '\"') { //Advance pointer over the double quote i++; //Save the length of the parameter value param->valueLen = p + i - param->value; //Successful processing error = NO_ERROR; } else if(isprint(c) || c == '\t' || c >= 128) { //Advance data pointer } else { //Invalid character error = ERROR_INVALID_SYNTAX; } } else { //Check current character if(c == '\0' || c == ' ' || c == '\t' || c == ',' || c == ';') { //Save the length of the parameter value param->valueLen = p + i - param->value; //Successful processing error = NO_ERROR; } else if(isalnum(c) || strchr("!#$%&'*+-.^_`|~", c) || c >= 128) { //Advance data pointer } else { //Invalid character error = ERROR_INVALID_SYNTAX; } } } //Point to the next character of the string if(error == ERROR_IN_PROGRESS) i++; } //Check whether the parameter value is a quoted string if(param->valueLen >= 2 && param->value[0] == '\"') { //Discard the surrounding quotes param->value++; param->valueLen -= 2; } //Actual position if the list of parameters *pos = p + i; //Return status code return error; }
error_t httpParseParam(const char_t **pos, HttpParam *param) { error_t error; size_t i; uint8_t c; bool_t escapeFlag; bool_t separatorFound; const char_t *p; //Check parameters if(pos == NULL || param == NULL) return ERROR_INVALID_PARAMETER; //Initialize structure param->name = NULL; param->nameLen = 0; param->value = NULL; param->valueLen = 0; //Initialize variables escapeFlag = FALSE; separatorFound = FALSE; //Initialize status code error = ERROR_IN_PROGRESS; //Point to the first character i = 0; p = *pos; //Loop through the list of parameters while(error == ERROR_IN_PROGRESS) { //Get current character c = (uint8_t) p[i]; //Check current state if(param->name == NULL) { //Check current character if(c == '\0') { //The list of parameters is empty error = ERROR_NOT_FOUND; } else if(c == ' ' || c == '\t' || c == ',' || c == ';') { //Discard whitespace and separator characters } else if(isalnum(c) || osStrchr("!#$%&'*+-.^_`|~", c) || c >= 128) { //Point to the first character of the parameter name param->name = p + i; } else { //Invalid character error = ERROR_INVALID_SYNTAX; } } else if(param->nameLen == 0) { //Check current character if(c == '\0' || c == ',' || c == ';') { //Save the length of the parameter name param->nameLen = p + i - param->name; //Successful processing error = NO_ERROR; } else if(c == ' ' || c == '\t') { //Save the length of the parameter name param->nameLen = p + i - param->name; } else if(c == '=') { //The key/value separator has been found separatorFound = TRUE; //Save the length of the parameter name param->nameLen = p + i - param->name; } else if(isalnum(c) || osStrchr("!#$%&'*+-.^_`|~", c) || c >= 128) { //Advance data pointer } else { //Invalid character error = ERROR_INVALID_SYNTAX; } } else if(!separatorFound) { //Check current character if(c == '\0' || c == ',' || c == ';') { //Successful processing error = NO_ERROR; } else if(c == ' ' || c == '\t') { //Discard whitespace characters } else if(c == '=') { //The key/value separator has been found separatorFound = TRUE; } else if(c == '\"') { //Point to the first character that follows the parameter name i = param->name + param->nameLen - p; //Successful processing error = NO_ERROR; } else if(isalnum(c) || osStrchr("!#$%&'*+-.^_`|~", c) || c >= 128) { //Point to the first character that follows the parameter name i = param->name + param->nameLen - p; //Successful processing error = NO_ERROR; } else { //Invalid character error = ERROR_INVALID_SYNTAX; } } else if(param->value == NULL) { //Check current character if(c == '\0' || c == ',' || c == ';') { //Successful processing error = NO_ERROR; } else if(c == ' ' || c == '\t') { //Discard whitespace characters } else if(c == '\"') { //A string of text is parsed as a single word if it is quoted //using double-quote marks (refer to RFC 7230, section 3.2.6) param->value = p + i; } else if(isalnum(c) || osStrchr("!#$%&'*+-.^_`|~", c) || c >= 128) { //Point to the first character of the parameter value param->value = p + i; } else { //Invalid character error = ERROR_INVALID_SYNTAX; } } else { //Quoted string? if(param->value[0] == '\"') { //Check current character if(c == '\0') { //The second double quote is missing error = ERROR_INVALID_SYNTAX; } else if(escapeFlag) { //Recipients that process the value of a quoted-string must //handle a quoted-pair as if it were replaced by the octet //following the backslash escapeFlag = FALSE; } else if(c == '\\') { //The backslash octet can be used as a single-octet quoting //mechanism within quoted-string and comment constructs escapeFlag = TRUE; } else if(c == '\"') { //Advance pointer over the double quote i++; //Save the length of the parameter value param->valueLen = p + i - param->value; //Successful processing error = NO_ERROR; } else if(isprint(c) || c == '\t' || c >= 128) { //Advance data pointer } else { //Invalid character error = ERROR_INVALID_SYNTAX; } } else { //Check current character if(c == '\0' || c == ' ' || c == '\t' || c == ',' || c == ';') { //Save the length of the parameter value param->valueLen = p + i - param->value; //Successful processing error = NO_ERROR; } else if(isalnum(c) || osStrchr("!#$%&'*+-.^_`|~", c) || c >= 128) { //Advance data pointer } else { //Invalid character error = ERROR_INVALID_SYNTAX; } } } //Point to the next character of the string if(error == ERROR_IN_PROGRESS) i++; } //Check whether the parameter value is a quoted string if(param->valueLen >= 2 && param->value[0] == '\"') { //Discard the surrounding quotes param->value++; param->valueLen -= 2; } //Actual position if the list of parameters *pos = p + i; //Return status code return error; }
{'added': [(9, ' * Copyright (C) 2010-2021 Oryx Embedded SARL. All rights reserved.'), (28, ' * @version 2.0.2'), (97, ' if(osStrchr("!#$%&\'*+-.^_`|~", c))'), (166, ' else if(isalnum(c) || osStrchr("!#$%&\'*+-.^_`|~", c) || c >= 128)'), (199, ' else if(isalnum(c) || osStrchr("!#$%&\'*+-.^_`|~", c) || c >= 128)'), (233, ' else if(isalnum(c) || osStrchr("!#$%&\'*+-.^_`|~", c) || c >= 128)'), (264, ' else if(isalnum(c) || osStrchr("!#$%&\'*+-.^_`|~", c) || c >= 128)'), (328, ' else if(isalnum(c) || osStrchr("!#$%&\'*+-.^_`|~", c) || c >= 128)')], 'deleted': [(9, ' * Copyright (C) 2010-2020 Oryx Embedded SARL. All rights reserved.'), (28, ' * @version 2.0.0'), (97, ' if(strchr("!#$%&\'*+-.^_`|~", c))'), (166, ' else if(isalnum(c) || strchr("!#$%&\'*+-.^_`|~", c) || c >= 128)'), (199, ' else if(isalnum(c) || strchr("!#$%&\'*+-.^_`|~", c) || c >= 128)'), (233, ' else if(isalnum(c) || strchr("!#$%&\'*+-.^_`|~", c) || c >= 128)'), (264, ' else if(isalnum(c) || strchr("!#$%&\'*+-.^_`|~", c) || c >= 128)'), (328, ' else if(isalnum(c) || strchr("!#$%&\'*+-.^_`|~", c) || c >= 128)'), (473, ' * @return Error code')]}
8
9
271
1,293
https://github.com/Oryx-Embedded/CycloneTCP
CVE-2021-26788
['CWE-20']
jpg_dec.c
jpg_dec_parseopts
/* * Copyright (c) 2001-2003 Michael David Adams. * All rights reserved. */ /* __START_OF_JASPER_LICENSE__ * * JasPer License Version 2.0 * * Copyright (c) 2001-2006 Michael David Adams * Copyright (c) 1999-2000 Image Power, Inc. * Copyright (c) 1999-2000 The University of British Columbia * * All rights reserved. * * Permission is hereby granted, free of charge, to any person (the * "User") obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, and/or sell copies of the Software, and to permit * persons to whom the Software is furnished to do so, subject to the * following conditions: * * 1. The above copyright notices and this permission notice (which * includes the disclaimer below) shall be included in all copies or * substantial portions of the Software. * * 2. The name of a copyright holder shall not be used to endorse or * promote products derived from the Software without specific prior * written permission. * * THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS * LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER * THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS * "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL * INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE * PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE * THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY. * EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS * BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL * PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS * GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE * ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE * IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL * SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES, * AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL * SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH * THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH, * PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH * RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY * EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES. * * __END_OF_JASPER_LICENSE__ */ /******************************************************************************\ * Includes. \******************************************************************************/ #include <stdio.h> #include <assert.h> #include <ctype.h> #include "jasper/jas_tvp.h" #include "jasper/jas_stream.h" #include "jasper/jas_image.h" #include "jasper/jas_string.h" #include "jasper/jas_debug.h" #include "jpg_jpeglib.h" #include "jpg_cod.h" /******************************************************************************\ * Types. \******************************************************************************/ /* JPEG decoder data sink type. */ typedef struct jpg_dest_s { /* Initialize output. */ void (*start_output)(j_decompress_ptr cinfo, struct jpg_dest_s *dinfo); /* Output rows of decompressed data. */ void (*put_pixel_rows)(j_decompress_ptr cinfo, struct jpg_dest_s *dinfo, JDIMENSION rows_supplied); /* Cleanup output. */ void (*finish_output)(j_decompress_ptr cinfo, struct jpg_dest_s *dinfo); /* Output buffer. */ JSAMPARRAY buffer; /* Height of output buffer. */ JDIMENSION buffer_height; /* The current row. */ JDIMENSION row; /* The image used to hold the decompressed sample data. */ jas_image_t *image; /* The row buffer. */ jas_matrix_t *data; /* The error indicator. If this is nonzero, something has gone wrong during decompression. */ int error; } jpg_dest_t; /******************************************************************************\ * Local functions. \******************************************************************************/ static void jpg_start_output(j_decompress_ptr cinfo, jpg_dest_t *dinfo); static void jpg_put_pixel_rows(j_decompress_ptr cinfo, jpg_dest_t *dinfo, JDIMENSION rows_supplied); static void jpg_finish_output(j_decompress_ptr cinfo, jpg_dest_t *dinfo); static int jpg_copystreamtofile(FILE *out, jas_stream_t *in); static jas_image_t *jpg_mkimage(j_decompress_ptr cinfo); /******************************************************************************\ * \******************************************************************************/ typedef struct { size_t max_size; } jpg_dec_importopts_t; typedef enum { OPT_MAXSIZE, } optid_t; static jas_taginfo_t decopts[] = { {OPT_MAXSIZE, "max_size"}, {-1, 0} }; static int jpg_dec_parseopts(char *optstr, jpg_dec_importopts_t *opts) { jas_tvparser_t *tvp; opts->max_size = 0; if (!(tvp = jas_tvparser_create(optstr ? optstr : ""))) { return -1; } while (!jas_tvparser_next(tvp)) { switch (jas_taginfo_nonull(jas_taginfos_lookup(decopts, jas_tvparser_gettag(tvp)))->id) { case OPT_MAXSIZE: opts->max_size = atoi(jas_tvparser_getval(tvp)); break; default: jas_eprintf("warning: ignoring invalid option %s\n", jas_tvparser_gettag(tvp)); break; } } jas_tvparser_destroy(tvp); return 0; } /******************************************************************************\ * Code for load operation. \******************************************************************************/ /* Load an image from a stream in the JPG format. */ jas_image_t *jpg_decode(jas_stream_t *in, char *optstr) { struct jpeg_decompress_struct cinfo; struct jpeg_error_mgr jerr; FILE *input_file; jpg_dest_t dest_mgr_buf; jpg_dest_t *dest_mgr = &dest_mgr_buf; JDIMENSION num_scanlines; jas_image_t *image; int ret; jpg_dec_importopts_t opts; size_t size; if (jpg_dec_parseopts(optstr, &opts)) { goto error; } // In theory, the two memset calls that follow are not needed. // They are only here to make the code more predictable in the event // that the JPEG library fails to initialize a member. memset(&cinfo, 0, sizeof(struct jpeg_decompress_struct)); memset(dest_mgr, 0, sizeof(jpg_dest_t)); dest_mgr->data = 0; image = 0; input_file = 0; if (!(input_file = tmpfile())) { jas_eprintf("cannot make temporary file\n"); goto error; } if (jpg_copystreamtofile(input_file, in)) { jas_eprintf("cannot copy stream\n"); goto error; } rewind(input_file); /* Allocate and initialize a JPEG decompression object. */ JAS_DBGLOG(10, ("jpeg_std_error(%p)\n", &jerr)); cinfo.err = jpeg_std_error(&jerr); JAS_DBGLOG(10, ("jpeg_create_decompress(%p)\n", &cinfo)); jpeg_create_decompress(&cinfo); /* Specify the data source for decompression. */ JAS_DBGLOG(10, ("jpeg_stdio_src(%p, %p)\n", &cinfo, input_file)); jpeg_stdio_src(&cinfo, input_file); /* Read the file header to obtain the image information. */ JAS_DBGLOG(10, ("jpeg_read_header(%p, TRUE)\n", &cinfo)); ret = jpeg_read_header(&cinfo, TRUE); JAS_DBGLOG(10, ("jpeg_read_header return value %d\n", ret)); if (ret != JPEG_HEADER_OK) { jas_eprintf("jpeg_read_header did not return JPEG_HEADER_OK\n"); } JAS_DBGLOG(10, ( "header: image_width %d; image_height %d; num_components %d\n", cinfo.image_width, cinfo.image_height, cinfo.num_components) ); /* Start the decompressor. */ JAS_DBGLOG(10, ("jpeg_start_decompress(%p)\n", &cinfo)); ret = jpeg_start_decompress(&cinfo); JAS_DBGLOG(10, ("jpeg_start_decompress return value %d\n", ret)); JAS_DBGLOG(10, ( "header: output_width %d; output_height %d; output_components %d\n", cinfo.output_width, cinfo.output_height, cinfo.output_components) ); if (opts.max_size) { if (!jas_safe_size_mul(cinfo.output_width, cinfo.output_height, &size) || !jas_safe_size_mul(size, cinfo.output_components, &size)) { goto error; } if (size > opts.max_size) { jas_eprintf("image is too large\n"); goto error; } } /* Create an image object to hold the decoded data. */ if (!(image = jpg_mkimage(&cinfo))) { jas_eprintf("jpg_mkimage failed\n"); goto error; } /* Initialize the data sink object. */ dest_mgr->image = image; if (!(dest_mgr->data = jas_matrix_create(1, cinfo.output_width))) { jas_eprintf("jas_matrix_create failed\n"); goto error; } dest_mgr->start_output = jpg_start_output; dest_mgr->put_pixel_rows = jpg_put_pixel_rows; dest_mgr->finish_output = jpg_finish_output; dest_mgr->buffer = (*cinfo.mem->alloc_sarray) ((j_common_ptr) &cinfo, JPOOL_IMAGE, cinfo.output_width * cinfo.output_components, (JDIMENSION) 1); dest_mgr->buffer_height = 1; dest_mgr->error = 0; /* Process the compressed data. */ (*dest_mgr->start_output)(&cinfo, dest_mgr); while (cinfo.output_scanline < cinfo.output_height) { JAS_DBGLOG(10, ("jpeg_read_scanlines(%p, %p, %lu)\n", &cinfo, dest_mgr->buffer, JAS_CAST(unsigned long, dest_mgr->buffer_height))); num_scanlines = jpeg_read_scanlines(&cinfo, dest_mgr->buffer, dest_mgr->buffer_height); JAS_DBGLOG(10, ("jpeg_read_scanlines return value %lu\n", JAS_CAST(unsigned long, num_scanlines))); (*dest_mgr->put_pixel_rows)(&cinfo, dest_mgr, num_scanlines); } (*dest_mgr->finish_output)(&cinfo, dest_mgr); /* Complete the decompression process. */ JAS_DBGLOG(10, ("jpeg_finish_decompress(%p)\n", &cinfo)); jpeg_finish_decompress(&cinfo); /* Destroy the JPEG decompression object. */ JAS_DBGLOG(10, ("jpeg_destroy_decompress(%p)\n", &cinfo)); jpeg_destroy_decompress(&cinfo); jas_matrix_destroy(dest_mgr->data); JAS_DBGLOG(10, ("fclose(%p)\n", input_file)); fclose(input_file); input_file = 0; if (dest_mgr->error) { jas_eprintf("error during decoding\n"); goto error; } return image; error: if (dest_mgr->data) { jas_matrix_destroy(dest_mgr->data); } if (image) { jas_image_destroy(image); } if (input_file) { fclose(input_file); } return 0; } /******************************************************************************\ * \******************************************************************************/ static jas_image_t *jpg_mkimage(j_decompress_ptr cinfo) { jas_image_t *image; int cmptno; jas_image_cmptparm_t cmptparm; int numcmpts; JAS_DBGLOG(10, ("jpg_mkimage(%p)\n", cinfo)); image = 0; numcmpts = cinfo->output_components; if (!(image = jas_image_create0())) { goto error; } for (cmptno = 0; cmptno < numcmpts; ++cmptno) { if (cinfo->image_width > JAS_IMAGE_COORD_MAX || cinfo->image_height > JAS_IMAGE_COORD_MAX) { goto error; } cmptparm.tlx = 0; cmptparm.tly = 0; cmptparm.hstep = 1; cmptparm.vstep = 1; cmptparm.width = cinfo->image_width; cmptparm.height = cinfo->image_height; cmptparm.prec = 8; cmptparm.sgnd = false; if (jas_image_addcmpt(image, cmptno, &cmptparm)) { goto error; } } if (numcmpts == 3) { jas_image_setclrspc(image, JAS_CLRSPC_SRGB); jas_image_setcmpttype(image, 0, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_R)); jas_image_setcmpttype(image, 1, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_G)); jas_image_setcmpttype(image, 2, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_B)); } else { jas_image_setclrspc(image, JAS_CLRSPC_SGRAY); jas_image_setcmpttype(image, 0, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_GRAY_Y)); } return image; error: if (image) { jas_image_destroy(image); } return 0; } /******************************************************************************\ * Data source code. \******************************************************************************/ static int jpg_copystreamtofile(FILE *out, jas_stream_t *in) { int c; while ((c = jas_stream_getc(in)) != EOF) { if (fputc(c, out) == EOF) { return -1; } } if (jas_stream_error(in)) { return -1; } return 0; } /******************************************************************************\ * Data sink code. \******************************************************************************/ static void jpg_start_output(j_decompress_ptr cinfo, jpg_dest_t *dinfo) { /* Avoid compiler warnings about unused parameters. */ cinfo = 0; JAS_DBGLOG(10, ("jpg_start_output(%p, %p)\n", cinfo, dinfo)); dinfo->row = 0; } static void jpg_put_pixel_rows(j_decompress_ptr cinfo, jpg_dest_t *dinfo, JDIMENSION rows_supplied) { JSAMPLE *bufptr; int cmptno; JDIMENSION x; uint_fast32_t width; JAS_DBGLOG(10, ("jpg_put_pixel_rows(%p, %p)\n", cinfo, dinfo)); if (dinfo->error) { return; } assert(cinfo->output_components == jas_image_numcmpts(dinfo->image)); for (cmptno = 0; cmptno < cinfo->output_components; ++cmptno) { width = jas_image_cmptwidth(dinfo->image, cmptno); bufptr = (dinfo->buffer[0]) + cmptno; for (x = 0; x < width; ++x) { jas_matrix_set(dinfo->data, 0, x, GETJSAMPLE(*bufptr)); bufptr += cinfo->output_components; } JAS_DBGLOG(10, ( "jas_image_writecmpt called for component %d row %lu\n", cmptno, JAS_CAST(unsigned long, dinfo->row))); if (jas_image_writecmpt(dinfo->image, cmptno, 0, dinfo->row, width, 1, dinfo->data)) { dinfo->error = 1; } } dinfo->row += rows_supplied; } static void jpg_finish_output(j_decompress_ptr cinfo, jpg_dest_t *dinfo) { JAS_DBGLOG(10, ("jpg_finish_output(%p, %p)\n", cinfo, dinfo)); /* Avoid compiler warnings about unused parameters. */ cinfo = 0; dinfo = 0; }
/* * Copyright (c) 2001-2003 Michael David Adams. * All rights reserved. */ /* __START_OF_JASPER_LICENSE__ * * JasPer License Version 2.0 * * Copyright (c) 2001-2006 Michael David Adams * Copyright (c) 1999-2000 Image Power, Inc. * Copyright (c) 1999-2000 The University of British Columbia * * All rights reserved. * * Permission is hereby granted, free of charge, to any person (the * "User") obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, and/or sell copies of the Software, and to permit * persons to whom the Software is furnished to do so, subject to the * following conditions: * * 1. The above copyright notices and this permission notice (which * includes the disclaimer below) shall be included in all copies or * substantial portions of the Software. * * 2. The name of a copyright holder shall not be used to endorse or * promote products derived from the Software without specific prior * written permission. * * THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS * LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER * THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS * "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL * INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE * PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE * THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY. * EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS * BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL * PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS * GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE * ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE * IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL * SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES, * AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL * SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH * THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH, * PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH * RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY * EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES. * * __END_OF_JASPER_LICENSE__ */ /******************************************************************************\ * Includes. \******************************************************************************/ #include <stdio.h> #include <assert.h> #include <ctype.h> #include "jasper/jas_tvp.h" #include "jasper/jas_stream.h" #include "jasper/jas_image.h" #include "jasper/jas_string.h" #include "jasper/jas_debug.h" #include "jpg_jpeglib.h" #include "jpg_cod.h" /******************************************************************************\ * Types. \******************************************************************************/ typedef struct { size_t max_samples; } jpg_dec_importopts_t; typedef enum { OPT_MAXSIZE, } optid_t; /* JPEG decoder data sink type. */ typedef struct jpg_dest_s { /* Initialize output. */ void (*start_output)(j_decompress_ptr cinfo, struct jpg_dest_s *dinfo); /* Output rows of decompressed data. */ void (*put_pixel_rows)(j_decompress_ptr cinfo, struct jpg_dest_s *dinfo, JDIMENSION rows_supplied); /* Cleanup output. */ void (*finish_output)(j_decompress_ptr cinfo, struct jpg_dest_s *dinfo); /* Output buffer. */ JSAMPARRAY buffer; /* Height of output buffer. */ JDIMENSION buffer_height; /* The current row. */ JDIMENSION row; /* The image used to hold the decompressed sample data. */ jas_image_t *image; /* The row buffer. */ jas_matrix_t *data; /* The error indicator. If this is nonzero, something has gone wrong during decompression. */ int error; } jpg_dest_t; /******************************************************************************\ * Local functions. \******************************************************************************/ static void jpg_start_output(j_decompress_ptr cinfo, jpg_dest_t *dinfo); static void jpg_put_pixel_rows(j_decompress_ptr cinfo, jpg_dest_t *dinfo, JDIMENSION rows_supplied); static void jpg_finish_output(j_decompress_ptr cinfo, jpg_dest_t *dinfo); static int jpg_copystreamtofile(FILE *out, jas_stream_t *in); static jas_image_t *jpg_mkimage(j_decompress_ptr cinfo); /******************************************************************************\ * Option parsing. \******************************************************************************/ static jas_taginfo_t decopts[] = { {OPT_MAXSIZE, "max_samples"}, {-1, 0} }; static int jpg_dec_parseopts(char *optstr, jpg_dec_importopts_t *opts) { jas_tvparser_t *tvp; opts->max_samples = 64 * JAS_MEBI; if (!(tvp = jas_tvparser_create(optstr ? optstr : ""))) { return -1; } while (!jas_tvparser_next(tvp)) { switch (jas_taginfo_nonull(jas_taginfos_lookup(decopts, jas_tvparser_gettag(tvp)))->id) { case OPT_MAXSIZE: opts->max_samples = atoi(jas_tvparser_getval(tvp)); break; default: jas_eprintf("warning: ignoring invalid option %s\n", jas_tvparser_gettag(tvp)); break; } } jas_tvparser_destroy(tvp); return 0; } /******************************************************************************\ * Code for load operation. \******************************************************************************/ /* Load an image from a stream in the JPG format. */ jas_image_t *jpg_decode(jas_stream_t *in, char *optstr) { struct jpeg_decompress_struct cinfo; struct jpeg_error_mgr jerr; FILE *input_file; jpg_dest_t dest_mgr_buf; jpg_dest_t *dest_mgr = &dest_mgr_buf; JDIMENSION num_scanlines; jas_image_t *image; int ret; jpg_dec_importopts_t opts; size_t num_samples; JAS_DBGLOG(100, ("jpg_decode(%p, \"%s\")\n", in, optstr)); if (jpg_dec_parseopts(optstr, &opts)) { goto error; } // In theory, the two memset calls that follow are not needed. // They are only here to make the code more predictable in the event // that the JPEG library fails to initialize a member. memset(&cinfo, 0, sizeof(struct jpeg_decompress_struct)); memset(dest_mgr, 0, sizeof(jpg_dest_t)); dest_mgr->data = 0; image = 0; input_file = 0; if (!(input_file = tmpfile())) { jas_eprintf("cannot make temporary file\n"); goto error; } if (jpg_copystreamtofile(input_file, in)) { jas_eprintf("cannot copy stream\n"); goto error; } rewind(input_file); /* Allocate and initialize a JPEG decompression object. */ JAS_DBGLOG(10, ("jpeg_std_error(%p)\n", &jerr)); cinfo.err = jpeg_std_error(&jerr); JAS_DBGLOG(10, ("jpeg_create_decompress(%p)\n", &cinfo)); jpeg_create_decompress(&cinfo); /* Specify the data source for decompression. */ JAS_DBGLOG(10, ("jpeg_stdio_src(%p, %p)\n", &cinfo, input_file)); jpeg_stdio_src(&cinfo, input_file); /* Read the file header to obtain the image information. */ JAS_DBGLOG(10, ("jpeg_read_header(%p, TRUE)\n", &cinfo)); ret = jpeg_read_header(&cinfo, TRUE); JAS_DBGLOG(10, ("jpeg_read_header return value %d\n", ret)); if (ret != JPEG_HEADER_OK) { jas_eprintf("jpeg_read_header did not return JPEG_HEADER_OK\n"); } JAS_DBGLOG(10, ( "header: image_width %d; image_height %d; num_components %d\n", cinfo.image_width, cinfo.image_height, cinfo.num_components) ); if (opts.max_samples > 0) { if (!jas_safe_size_mul3(cinfo.image_width, cinfo.image_height, cinfo.num_components, &num_samples)) { goto error; } if (num_samples > opts.max_samples) { jas_eprintf("image is too large (%zu > %zu)\n", num_samples, opts.max_samples); goto error; } } /* Start the decompressor. */ JAS_DBGLOG(10, ("jpeg_start_decompress(%p)\n", &cinfo)); ret = jpeg_start_decompress(&cinfo); JAS_DBGLOG(10, ("jpeg_start_decompress return value %d\n", ret)); JAS_DBGLOG(10, ( "header: output_width %d; output_height %d; output_components %d\n", cinfo.output_width, cinfo.output_height, cinfo.output_components) ); /* Create an image object to hold the decoded data. */ if (!(image = jpg_mkimage(&cinfo))) { jas_eprintf("jpg_mkimage failed\n"); goto error; } /* Initialize the data sink object. */ dest_mgr->image = image; if (!(dest_mgr->data = jas_matrix_create(1, cinfo.output_width))) { jas_eprintf("jas_matrix_create failed\n"); goto error; } dest_mgr->start_output = jpg_start_output; dest_mgr->put_pixel_rows = jpg_put_pixel_rows; dest_mgr->finish_output = jpg_finish_output; dest_mgr->buffer = (*cinfo.mem->alloc_sarray) ((j_common_ptr) &cinfo, JPOOL_IMAGE, cinfo.output_width * cinfo.output_components, (JDIMENSION) 1); dest_mgr->buffer_height = 1; dest_mgr->error = 0; /* Process the compressed data. */ (*dest_mgr->start_output)(&cinfo, dest_mgr); while (cinfo.output_scanline < cinfo.output_height) { JAS_DBGLOG(10, ("jpeg_read_scanlines(%p, %p, %lu)\n", &cinfo, dest_mgr->buffer, JAS_CAST(unsigned long, dest_mgr->buffer_height))); num_scanlines = jpeg_read_scanlines(&cinfo, dest_mgr->buffer, dest_mgr->buffer_height); JAS_DBGLOG(10, ("jpeg_read_scanlines return value %lu\n", JAS_CAST(unsigned long, num_scanlines))); (*dest_mgr->put_pixel_rows)(&cinfo, dest_mgr, num_scanlines); } (*dest_mgr->finish_output)(&cinfo, dest_mgr); /* Complete the decompression process. */ JAS_DBGLOG(10, ("jpeg_finish_decompress(%p)\n", &cinfo)); jpeg_finish_decompress(&cinfo); /* Destroy the JPEG decompression object. */ JAS_DBGLOG(10, ("jpeg_destroy_decompress(%p)\n", &cinfo)); jpeg_destroy_decompress(&cinfo); jas_matrix_destroy(dest_mgr->data); JAS_DBGLOG(10, ("fclose(%p)\n", input_file)); fclose(input_file); input_file = 0; if (dest_mgr->error) { jas_eprintf("error during decoding\n"); goto error; } return image; error: if (dest_mgr->data) { jas_matrix_destroy(dest_mgr->data); } if (image) { jas_image_destroy(image); } if (input_file) { fclose(input_file); } return 0; } /******************************************************************************\ * \******************************************************************************/ static jas_image_t *jpg_mkimage(j_decompress_ptr cinfo) { jas_image_t *image; int cmptno; jas_image_cmptparm_t cmptparm; int numcmpts; JAS_DBGLOG(10, ("jpg_mkimage(%p)\n", cinfo)); image = 0; numcmpts = cinfo->output_components; if (!(image = jas_image_create0())) { goto error; } for (cmptno = 0; cmptno < numcmpts; ++cmptno) { if (cinfo->image_width > JAS_IMAGE_COORD_MAX || cinfo->image_height > JAS_IMAGE_COORD_MAX) { goto error; } cmptparm.tlx = 0; cmptparm.tly = 0; cmptparm.hstep = 1; cmptparm.vstep = 1; cmptparm.width = cinfo->image_width; cmptparm.height = cinfo->image_height; cmptparm.prec = 8; cmptparm.sgnd = false; if (jas_image_addcmpt(image, cmptno, &cmptparm)) { goto error; } } if (numcmpts == 3) { jas_image_setclrspc(image, JAS_CLRSPC_SRGB); jas_image_setcmpttype(image, 0, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_R)); jas_image_setcmpttype(image, 1, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_G)); jas_image_setcmpttype(image, 2, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_B)); } else { jas_image_setclrspc(image, JAS_CLRSPC_SGRAY); jas_image_setcmpttype(image, 0, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_GRAY_Y)); } return image; error: if (image) { jas_image_destroy(image); } return 0; } /******************************************************************************\ * Data source code. \******************************************************************************/ static int jpg_copystreamtofile(FILE *out, jas_stream_t *in) { int c; while ((c = jas_stream_getc(in)) != EOF) { if (fputc(c, out) == EOF) { return -1; } } if (jas_stream_error(in)) { return -1; } return 0; } /******************************************************************************\ * Data sink code. \******************************************************************************/ static void jpg_start_output(j_decompress_ptr cinfo, jpg_dest_t *dinfo) { /* Avoid compiler warnings about unused parameters. */ cinfo = 0; JAS_DBGLOG(10, ("jpg_start_output(%p, %p)\n", cinfo, dinfo)); dinfo->row = 0; } static void jpg_put_pixel_rows(j_decompress_ptr cinfo, jpg_dest_t *dinfo, JDIMENSION rows_supplied) { JSAMPLE *bufptr; int cmptno; JDIMENSION x; uint_fast32_t width; JAS_DBGLOG(10, ("jpg_put_pixel_rows(%p, %p)\n", cinfo, dinfo)); if (dinfo->error) { return; } assert(cinfo->output_components == jas_image_numcmpts(dinfo->image)); for (cmptno = 0; cmptno < cinfo->output_components; ++cmptno) { width = jas_image_cmptwidth(dinfo->image, cmptno); bufptr = (dinfo->buffer[0]) + cmptno; for (x = 0; x < width; ++x) { jas_matrix_set(dinfo->data, 0, x, GETJSAMPLE(*bufptr)); bufptr += cinfo->output_components; } JAS_DBGLOG(10, ( "jas_image_writecmpt called for component %d row %lu\n", cmptno, JAS_CAST(unsigned long, dinfo->row))); if (jas_image_writecmpt(dinfo->image, cmptno, 0, dinfo->row, width, 1, dinfo->data)) { dinfo->error = 1; } } dinfo->row += rows_supplied; } static void jpg_finish_output(j_decompress_ptr cinfo, jpg_dest_t *dinfo) { JAS_DBGLOG(10, ("jpg_finish_output(%p, %p)\n", cinfo, dinfo)); /* Avoid compiler warnings about unused parameters. */ cinfo = 0; dinfo = 0; }
static int jpg_dec_parseopts(char *optstr, jpg_dec_importopts_t *opts) { jas_tvparser_t *tvp; opts->max_size = 0; if (!(tvp = jas_tvparser_create(optstr ? optstr : ""))) { return -1; } while (!jas_tvparser_next(tvp)) { switch (jas_taginfo_nonull(jas_taginfos_lookup(decopts, jas_tvparser_gettag(tvp)))->id) { case OPT_MAXSIZE: opts->max_size = atoi(jas_tvparser_getval(tvp)); break; default: jas_eprintf("warning: ignoring invalid option %s\n", jas_tvparser_gettag(tvp)); break; } } jas_tvparser_destroy(tvp); return 0; }
static int jpg_dec_parseopts(char *optstr, jpg_dec_importopts_t *opts) { jas_tvparser_t *tvp; opts->max_samples = 64 * JAS_MEBI; if (!(tvp = jas_tvparser_create(optstr ? optstr : ""))) { return -1; } while (!jas_tvparser_next(tvp)) { switch (jas_taginfo_nonull(jas_taginfos_lookup(decopts, jas_tvparser_gettag(tvp)))->id) { case OPT_MAXSIZE: opts->max_samples = atoi(jas_tvparser_getval(tvp)); break; default: jas_eprintf("warning: ignoring invalid option %s\n", jas_tvparser_gettag(tvp)); break; } } jas_tvparser_destroy(tvp); return 0; }
{'added': [(83, 'typedef struct {'), (84, '\tsize_t max_samples;'), (85, '} jpg_dec_importopts_t;'), (86, ''), (87, 'typedef enum {'), (88, '\tOPT_MAXSIZE,'), (89, '} optid_t;'), (90, ''), (138, '* Option parsing.'), (142, '\t{OPT_MAXSIZE, "max_samples"},'), (150, '\topts->max_samples = 64 * JAS_MEBI;'), (160, '\t\t\topts->max_samples = atoi(jas_tvparser_getval(tvp));'), (191, '\tsize_t num_samples;'), (192, ''), (193, '\tJAS_DBGLOG(100, ("jpg_decode(%p, \\"%s\\")\\n", in, optstr));'), (241, '\tif (opts.max_samples > 0) {'), (242, '\t\tif (!jas_safe_size_mul3(cinfo.image_width, cinfo.image_height,'), (243, '\t\t cinfo.num_components, &num_samples)) {'), (244, '\t\t\tgoto error;'), (245, '\t\t}'), (246, '\t\tif (num_samples > opts.max_samples) {'), (247, '\t\t\tjas_eprintf("image is too large (%zu > %zu)\\n", num_samples,'), (248, '\t\t\t opts.max_samples);'), (249, '\t\t\tgoto error;'), (250, '\t\t}'), (251, '\t}'), (252, '')], 'deleted': [(130, '*'), (133, 'typedef struct {'), (134, '\tsize_t max_size;'), (135, '} jpg_dec_importopts_t;'), (136, ''), (137, 'typedef enum {'), (138, '\tOPT_MAXSIZE,'), (139, '} optid_t;'), (140, ''), (142, '\t{OPT_MAXSIZE, "max_size"},'), (150, '\topts->max_size = 0;'), (160, '\t\t\topts->max_size = atoi(jas_tvparser_getval(tvp));'), (191, '\tsize_t size;'), (248, '\tif (opts.max_size) {'), (249, '\t\tif (!jas_safe_size_mul(cinfo.output_width, cinfo.output_height,'), (250, '\t\t &size) ||'), (251, '\t\t !jas_safe_size_mul(size, cinfo.output_components, &size)) {'), (252, '\t\t\tgoto error;'), (253, '\t\t}'), (254, '\t\tif (size > opts.max_size) {'), (255, '\t\t\tjas_eprintf("image is too large\\n");'), (256, '\t\t\tgoto error;'), (257, '\t\t}'), (258, '\t}'), (259, '')]}
27
25
280
1,669
https://github.com/mdadams/jasper
CVE-2016-9395
['CWE-20']
jpg_dec.c
jpg_decode
/* * Copyright (c) 2001-2003 Michael David Adams. * All rights reserved. */ /* __START_OF_JASPER_LICENSE__ * * JasPer License Version 2.0 * * Copyright (c) 2001-2006 Michael David Adams * Copyright (c) 1999-2000 Image Power, Inc. * Copyright (c) 1999-2000 The University of British Columbia * * All rights reserved. * * Permission is hereby granted, free of charge, to any person (the * "User") obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, and/or sell copies of the Software, and to permit * persons to whom the Software is furnished to do so, subject to the * following conditions: * * 1. The above copyright notices and this permission notice (which * includes the disclaimer below) shall be included in all copies or * substantial portions of the Software. * * 2. The name of a copyright holder shall not be used to endorse or * promote products derived from the Software without specific prior * written permission. * * THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS * LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER * THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS * "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL * INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE * PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE * THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY. * EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS * BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL * PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS * GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE * ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE * IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL * SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES, * AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL * SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH * THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH, * PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH * RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY * EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES. * * __END_OF_JASPER_LICENSE__ */ /******************************************************************************\ * Includes. \******************************************************************************/ #include <stdio.h> #include <assert.h> #include <ctype.h> #include "jasper/jas_tvp.h" #include "jasper/jas_stream.h" #include "jasper/jas_image.h" #include "jasper/jas_string.h" #include "jasper/jas_debug.h" #include "jpg_jpeglib.h" #include "jpg_cod.h" /******************************************************************************\ * Types. \******************************************************************************/ /* JPEG decoder data sink type. */ typedef struct jpg_dest_s { /* Initialize output. */ void (*start_output)(j_decompress_ptr cinfo, struct jpg_dest_s *dinfo); /* Output rows of decompressed data. */ void (*put_pixel_rows)(j_decompress_ptr cinfo, struct jpg_dest_s *dinfo, JDIMENSION rows_supplied); /* Cleanup output. */ void (*finish_output)(j_decompress_ptr cinfo, struct jpg_dest_s *dinfo); /* Output buffer. */ JSAMPARRAY buffer; /* Height of output buffer. */ JDIMENSION buffer_height; /* The current row. */ JDIMENSION row; /* The image used to hold the decompressed sample data. */ jas_image_t *image; /* The row buffer. */ jas_matrix_t *data; /* The error indicator. If this is nonzero, something has gone wrong during decompression. */ int error; } jpg_dest_t; /******************************************************************************\ * Local functions. \******************************************************************************/ static void jpg_start_output(j_decompress_ptr cinfo, jpg_dest_t *dinfo); static void jpg_put_pixel_rows(j_decompress_ptr cinfo, jpg_dest_t *dinfo, JDIMENSION rows_supplied); static void jpg_finish_output(j_decompress_ptr cinfo, jpg_dest_t *dinfo); static int jpg_copystreamtofile(FILE *out, jas_stream_t *in); static jas_image_t *jpg_mkimage(j_decompress_ptr cinfo); /******************************************************************************\ * \******************************************************************************/ typedef struct { size_t max_size; } jpg_dec_importopts_t; typedef enum { OPT_MAXSIZE, } optid_t; static jas_taginfo_t decopts[] = { {OPT_MAXSIZE, "max_size"}, {-1, 0} }; static int jpg_dec_parseopts(char *optstr, jpg_dec_importopts_t *opts) { jas_tvparser_t *tvp; opts->max_size = 0; if (!(tvp = jas_tvparser_create(optstr ? optstr : ""))) { return -1; } while (!jas_tvparser_next(tvp)) { switch (jas_taginfo_nonull(jas_taginfos_lookup(decopts, jas_tvparser_gettag(tvp)))->id) { case OPT_MAXSIZE: opts->max_size = atoi(jas_tvparser_getval(tvp)); break; default: jas_eprintf("warning: ignoring invalid option %s\n", jas_tvparser_gettag(tvp)); break; } } jas_tvparser_destroy(tvp); return 0; } /******************************************************************************\ * Code for load operation. \******************************************************************************/ /* Load an image from a stream in the JPG format. */ jas_image_t *jpg_decode(jas_stream_t *in, char *optstr) { struct jpeg_decompress_struct cinfo; struct jpeg_error_mgr jerr; FILE *input_file; jpg_dest_t dest_mgr_buf; jpg_dest_t *dest_mgr = &dest_mgr_buf; JDIMENSION num_scanlines; jas_image_t *image; int ret; jpg_dec_importopts_t opts; size_t size; if (jpg_dec_parseopts(optstr, &opts)) { goto error; } // In theory, the two memset calls that follow are not needed. // They are only here to make the code more predictable in the event // that the JPEG library fails to initialize a member. memset(&cinfo, 0, sizeof(struct jpeg_decompress_struct)); memset(dest_mgr, 0, sizeof(jpg_dest_t)); dest_mgr->data = 0; image = 0; input_file = 0; if (!(input_file = tmpfile())) { jas_eprintf("cannot make temporary file\n"); goto error; } if (jpg_copystreamtofile(input_file, in)) { jas_eprintf("cannot copy stream\n"); goto error; } rewind(input_file); /* Allocate and initialize a JPEG decompression object. */ JAS_DBGLOG(10, ("jpeg_std_error(%p)\n", &jerr)); cinfo.err = jpeg_std_error(&jerr); JAS_DBGLOG(10, ("jpeg_create_decompress(%p)\n", &cinfo)); jpeg_create_decompress(&cinfo); /* Specify the data source for decompression. */ JAS_DBGLOG(10, ("jpeg_stdio_src(%p, %p)\n", &cinfo, input_file)); jpeg_stdio_src(&cinfo, input_file); /* Read the file header to obtain the image information. */ JAS_DBGLOG(10, ("jpeg_read_header(%p, TRUE)\n", &cinfo)); ret = jpeg_read_header(&cinfo, TRUE); JAS_DBGLOG(10, ("jpeg_read_header return value %d\n", ret)); if (ret != JPEG_HEADER_OK) { jas_eprintf("jpeg_read_header did not return JPEG_HEADER_OK\n"); } JAS_DBGLOG(10, ( "header: image_width %d; image_height %d; num_components %d\n", cinfo.image_width, cinfo.image_height, cinfo.num_components) ); /* Start the decompressor. */ JAS_DBGLOG(10, ("jpeg_start_decompress(%p)\n", &cinfo)); ret = jpeg_start_decompress(&cinfo); JAS_DBGLOG(10, ("jpeg_start_decompress return value %d\n", ret)); JAS_DBGLOG(10, ( "header: output_width %d; output_height %d; output_components %d\n", cinfo.output_width, cinfo.output_height, cinfo.output_components) ); if (opts.max_size) { if (!jas_safe_size_mul(cinfo.output_width, cinfo.output_height, &size) || !jas_safe_size_mul(size, cinfo.output_components, &size)) { goto error; } if (size > opts.max_size) { jas_eprintf("image is too large\n"); goto error; } } /* Create an image object to hold the decoded data. */ if (!(image = jpg_mkimage(&cinfo))) { jas_eprintf("jpg_mkimage failed\n"); goto error; } /* Initialize the data sink object. */ dest_mgr->image = image; if (!(dest_mgr->data = jas_matrix_create(1, cinfo.output_width))) { jas_eprintf("jas_matrix_create failed\n"); goto error; } dest_mgr->start_output = jpg_start_output; dest_mgr->put_pixel_rows = jpg_put_pixel_rows; dest_mgr->finish_output = jpg_finish_output; dest_mgr->buffer = (*cinfo.mem->alloc_sarray) ((j_common_ptr) &cinfo, JPOOL_IMAGE, cinfo.output_width * cinfo.output_components, (JDIMENSION) 1); dest_mgr->buffer_height = 1; dest_mgr->error = 0; /* Process the compressed data. */ (*dest_mgr->start_output)(&cinfo, dest_mgr); while (cinfo.output_scanline < cinfo.output_height) { JAS_DBGLOG(10, ("jpeg_read_scanlines(%p, %p, %lu)\n", &cinfo, dest_mgr->buffer, JAS_CAST(unsigned long, dest_mgr->buffer_height))); num_scanlines = jpeg_read_scanlines(&cinfo, dest_mgr->buffer, dest_mgr->buffer_height); JAS_DBGLOG(10, ("jpeg_read_scanlines return value %lu\n", JAS_CAST(unsigned long, num_scanlines))); (*dest_mgr->put_pixel_rows)(&cinfo, dest_mgr, num_scanlines); } (*dest_mgr->finish_output)(&cinfo, dest_mgr); /* Complete the decompression process. */ JAS_DBGLOG(10, ("jpeg_finish_decompress(%p)\n", &cinfo)); jpeg_finish_decompress(&cinfo); /* Destroy the JPEG decompression object. */ JAS_DBGLOG(10, ("jpeg_destroy_decompress(%p)\n", &cinfo)); jpeg_destroy_decompress(&cinfo); jas_matrix_destroy(dest_mgr->data); JAS_DBGLOG(10, ("fclose(%p)\n", input_file)); fclose(input_file); input_file = 0; if (dest_mgr->error) { jas_eprintf("error during decoding\n"); goto error; } return image; error: if (dest_mgr->data) { jas_matrix_destroy(dest_mgr->data); } if (image) { jas_image_destroy(image); } if (input_file) { fclose(input_file); } return 0; } /******************************************************************************\ * \******************************************************************************/ static jas_image_t *jpg_mkimage(j_decompress_ptr cinfo) { jas_image_t *image; int cmptno; jas_image_cmptparm_t cmptparm; int numcmpts; JAS_DBGLOG(10, ("jpg_mkimage(%p)\n", cinfo)); image = 0; numcmpts = cinfo->output_components; if (!(image = jas_image_create0())) { goto error; } for (cmptno = 0; cmptno < numcmpts; ++cmptno) { if (cinfo->image_width > JAS_IMAGE_COORD_MAX || cinfo->image_height > JAS_IMAGE_COORD_MAX) { goto error; } cmptparm.tlx = 0; cmptparm.tly = 0; cmptparm.hstep = 1; cmptparm.vstep = 1; cmptparm.width = cinfo->image_width; cmptparm.height = cinfo->image_height; cmptparm.prec = 8; cmptparm.sgnd = false; if (jas_image_addcmpt(image, cmptno, &cmptparm)) { goto error; } } if (numcmpts == 3) { jas_image_setclrspc(image, JAS_CLRSPC_SRGB); jas_image_setcmpttype(image, 0, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_R)); jas_image_setcmpttype(image, 1, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_G)); jas_image_setcmpttype(image, 2, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_B)); } else { jas_image_setclrspc(image, JAS_CLRSPC_SGRAY); jas_image_setcmpttype(image, 0, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_GRAY_Y)); } return image; error: if (image) { jas_image_destroy(image); } return 0; } /******************************************************************************\ * Data source code. \******************************************************************************/ static int jpg_copystreamtofile(FILE *out, jas_stream_t *in) { int c; while ((c = jas_stream_getc(in)) != EOF) { if (fputc(c, out) == EOF) { return -1; } } if (jas_stream_error(in)) { return -1; } return 0; } /******************************************************************************\ * Data sink code. \******************************************************************************/ static void jpg_start_output(j_decompress_ptr cinfo, jpg_dest_t *dinfo) { /* Avoid compiler warnings about unused parameters. */ cinfo = 0; JAS_DBGLOG(10, ("jpg_start_output(%p, %p)\n", cinfo, dinfo)); dinfo->row = 0; } static void jpg_put_pixel_rows(j_decompress_ptr cinfo, jpg_dest_t *dinfo, JDIMENSION rows_supplied) { JSAMPLE *bufptr; int cmptno; JDIMENSION x; uint_fast32_t width; JAS_DBGLOG(10, ("jpg_put_pixel_rows(%p, %p)\n", cinfo, dinfo)); if (dinfo->error) { return; } assert(cinfo->output_components == jas_image_numcmpts(dinfo->image)); for (cmptno = 0; cmptno < cinfo->output_components; ++cmptno) { width = jas_image_cmptwidth(dinfo->image, cmptno); bufptr = (dinfo->buffer[0]) + cmptno; for (x = 0; x < width; ++x) { jas_matrix_set(dinfo->data, 0, x, GETJSAMPLE(*bufptr)); bufptr += cinfo->output_components; } JAS_DBGLOG(10, ( "jas_image_writecmpt called for component %d row %lu\n", cmptno, JAS_CAST(unsigned long, dinfo->row))); if (jas_image_writecmpt(dinfo->image, cmptno, 0, dinfo->row, width, 1, dinfo->data)) { dinfo->error = 1; } } dinfo->row += rows_supplied; } static void jpg_finish_output(j_decompress_ptr cinfo, jpg_dest_t *dinfo) { JAS_DBGLOG(10, ("jpg_finish_output(%p, %p)\n", cinfo, dinfo)); /* Avoid compiler warnings about unused parameters. */ cinfo = 0; dinfo = 0; }
/* * Copyright (c) 2001-2003 Michael David Adams. * All rights reserved. */ /* __START_OF_JASPER_LICENSE__ * * JasPer License Version 2.0 * * Copyright (c) 2001-2006 Michael David Adams * Copyright (c) 1999-2000 Image Power, Inc. * Copyright (c) 1999-2000 The University of British Columbia * * All rights reserved. * * Permission is hereby granted, free of charge, to any person (the * "User") obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, and/or sell copies of the Software, and to permit * persons to whom the Software is furnished to do so, subject to the * following conditions: * * 1. The above copyright notices and this permission notice (which * includes the disclaimer below) shall be included in all copies or * substantial portions of the Software. * * 2. The name of a copyright holder shall not be used to endorse or * promote products derived from the Software without specific prior * written permission. * * THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS * LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER * THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS * "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL * INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE * PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE * THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY. * EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS * BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL * PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS * GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE * ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE * IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL * SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES, * AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL * SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH * THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH, * PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH * RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY * EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES. * * __END_OF_JASPER_LICENSE__ */ /******************************************************************************\ * Includes. \******************************************************************************/ #include <stdio.h> #include <assert.h> #include <ctype.h> #include "jasper/jas_tvp.h" #include "jasper/jas_stream.h" #include "jasper/jas_image.h" #include "jasper/jas_string.h" #include "jasper/jas_debug.h" #include "jpg_jpeglib.h" #include "jpg_cod.h" /******************************************************************************\ * Types. \******************************************************************************/ typedef struct { size_t max_samples; } jpg_dec_importopts_t; typedef enum { OPT_MAXSIZE, } optid_t; /* JPEG decoder data sink type. */ typedef struct jpg_dest_s { /* Initialize output. */ void (*start_output)(j_decompress_ptr cinfo, struct jpg_dest_s *dinfo); /* Output rows of decompressed data. */ void (*put_pixel_rows)(j_decompress_ptr cinfo, struct jpg_dest_s *dinfo, JDIMENSION rows_supplied); /* Cleanup output. */ void (*finish_output)(j_decompress_ptr cinfo, struct jpg_dest_s *dinfo); /* Output buffer. */ JSAMPARRAY buffer; /* Height of output buffer. */ JDIMENSION buffer_height; /* The current row. */ JDIMENSION row; /* The image used to hold the decompressed sample data. */ jas_image_t *image; /* The row buffer. */ jas_matrix_t *data; /* The error indicator. If this is nonzero, something has gone wrong during decompression. */ int error; } jpg_dest_t; /******************************************************************************\ * Local functions. \******************************************************************************/ static void jpg_start_output(j_decompress_ptr cinfo, jpg_dest_t *dinfo); static void jpg_put_pixel_rows(j_decompress_ptr cinfo, jpg_dest_t *dinfo, JDIMENSION rows_supplied); static void jpg_finish_output(j_decompress_ptr cinfo, jpg_dest_t *dinfo); static int jpg_copystreamtofile(FILE *out, jas_stream_t *in); static jas_image_t *jpg_mkimage(j_decompress_ptr cinfo); /******************************************************************************\ * Option parsing. \******************************************************************************/ static jas_taginfo_t decopts[] = { {OPT_MAXSIZE, "max_samples"}, {-1, 0} }; static int jpg_dec_parseopts(char *optstr, jpg_dec_importopts_t *opts) { jas_tvparser_t *tvp; opts->max_samples = 64 * JAS_MEBI; if (!(tvp = jas_tvparser_create(optstr ? optstr : ""))) { return -1; } while (!jas_tvparser_next(tvp)) { switch (jas_taginfo_nonull(jas_taginfos_lookup(decopts, jas_tvparser_gettag(tvp)))->id) { case OPT_MAXSIZE: opts->max_samples = atoi(jas_tvparser_getval(tvp)); break; default: jas_eprintf("warning: ignoring invalid option %s\n", jas_tvparser_gettag(tvp)); break; } } jas_tvparser_destroy(tvp); return 0; } /******************************************************************************\ * Code for load operation. \******************************************************************************/ /* Load an image from a stream in the JPG format. */ jas_image_t *jpg_decode(jas_stream_t *in, char *optstr) { struct jpeg_decompress_struct cinfo; struct jpeg_error_mgr jerr; FILE *input_file; jpg_dest_t dest_mgr_buf; jpg_dest_t *dest_mgr = &dest_mgr_buf; JDIMENSION num_scanlines; jas_image_t *image; int ret; jpg_dec_importopts_t opts; size_t num_samples; JAS_DBGLOG(100, ("jpg_decode(%p, \"%s\")\n", in, optstr)); if (jpg_dec_parseopts(optstr, &opts)) { goto error; } // In theory, the two memset calls that follow are not needed. // They are only here to make the code more predictable in the event // that the JPEG library fails to initialize a member. memset(&cinfo, 0, sizeof(struct jpeg_decompress_struct)); memset(dest_mgr, 0, sizeof(jpg_dest_t)); dest_mgr->data = 0; image = 0; input_file = 0; if (!(input_file = tmpfile())) { jas_eprintf("cannot make temporary file\n"); goto error; } if (jpg_copystreamtofile(input_file, in)) { jas_eprintf("cannot copy stream\n"); goto error; } rewind(input_file); /* Allocate and initialize a JPEG decompression object. */ JAS_DBGLOG(10, ("jpeg_std_error(%p)\n", &jerr)); cinfo.err = jpeg_std_error(&jerr); JAS_DBGLOG(10, ("jpeg_create_decompress(%p)\n", &cinfo)); jpeg_create_decompress(&cinfo); /* Specify the data source for decompression. */ JAS_DBGLOG(10, ("jpeg_stdio_src(%p, %p)\n", &cinfo, input_file)); jpeg_stdio_src(&cinfo, input_file); /* Read the file header to obtain the image information. */ JAS_DBGLOG(10, ("jpeg_read_header(%p, TRUE)\n", &cinfo)); ret = jpeg_read_header(&cinfo, TRUE); JAS_DBGLOG(10, ("jpeg_read_header return value %d\n", ret)); if (ret != JPEG_HEADER_OK) { jas_eprintf("jpeg_read_header did not return JPEG_HEADER_OK\n"); } JAS_DBGLOG(10, ( "header: image_width %d; image_height %d; num_components %d\n", cinfo.image_width, cinfo.image_height, cinfo.num_components) ); if (opts.max_samples > 0) { if (!jas_safe_size_mul3(cinfo.image_width, cinfo.image_height, cinfo.num_components, &num_samples)) { goto error; } if (num_samples > opts.max_samples) { jas_eprintf("image is too large (%zu > %zu)\n", num_samples, opts.max_samples); goto error; } } /* Start the decompressor. */ JAS_DBGLOG(10, ("jpeg_start_decompress(%p)\n", &cinfo)); ret = jpeg_start_decompress(&cinfo); JAS_DBGLOG(10, ("jpeg_start_decompress return value %d\n", ret)); JAS_DBGLOG(10, ( "header: output_width %d; output_height %d; output_components %d\n", cinfo.output_width, cinfo.output_height, cinfo.output_components) ); /* Create an image object to hold the decoded data. */ if (!(image = jpg_mkimage(&cinfo))) { jas_eprintf("jpg_mkimage failed\n"); goto error; } /* Initialize the data sink object. */ dest_mgr->image = image; if (!(dest_mgr->data = jas_matrix_create(1, cinfo.output_width))) { jas_eprintf("jas_matrix_create failed\n"); goto error; } dest_mgr->start_output = jpg_start_output; dest_mgr->put_pixel_rows = jpg_put_pixel_rows; dest_mgr->finish_output = jpg_finish_output; dest_mgr->buffer = (*cinfo.mem->alloc_sarray) ((j_common_ptr) &cinfo, JPOOL_IMAGE, cinfo.output_width * cinfo.output_components, (JDIMENSION) 1); dest_mgr->buffer_height = 1; dest_mgr->error = 0; /* Process the compressed data. */ (*dest_mgr->start_output)(&cinfo, dest_mgr); while (cinfo.output_scanline < cinfo.output_height) { JAS_DBGLOG(10, ("jpeg_read_scanlines(%p, %p, %lu)\n", &cinfo, dest_mgr->buffer, JAS_CAST(unsigned long, dest_mgr->buffer_height))); num_scanlines = jpeg_read_scanlines(&cinfo, dest_mgr->buffer, dest_mgr->buffer_height); JAS_DBGLOG(10, ("jpeg_read_scanlines return value %lu\n", JAS_CAST(unsigned long, num_scanlines))); (*dest_mgr->put_pixel_rows)(&cinfo, dest_mgr, num_scanlines); } (*dest_mgr->finish_output)(&cinfo, dest_mgr); /* Complete the decompression process. */ JAS_DBGLOG(10, ("jpeg_finish_decompress(%p)\n", &cinfo)); jpeg_finish_decompress(&cinfo); /* Destroy the JPEG decompression object. */ JAS_DBGLOG(10, ("jpeg_destroy_decompress(%p)\n", &cinfo)); jpeg_destroy_decompress(&cinfo); jas_matrix_destroy(dest_mgr->data); JAS_DBGLOG(10, ("fclose(%p)\n", input_file)); fclose(input_file); input_file = 0; if (dest_mgr->error) { jas_eprintf("error during decoding\n"); goto error; } return image; error: if (dest_mgr->data) { jas_matrix_destroy(dest_mgr->data); } if (image) { jas_image_destroy(image); } if (input_file) { fclose(input_file); } return 0; } /******************************************************************************\ * \******************************************************************************/ static jas_image_t *jpg_mkimage(j_decompress_ptr cinfo) { jas_image_t *image; int cmptno; jas_image_cmptparm_t cmptparm; int numcmpts; JAS_DBGLOG(10, ("jpg_mkimage(%p)\n", cinfo)); image = 0; numcmpts = cinfo->output_components; if (!(image = jas_image_create0())) { goto error; } for (cmptno = 0; cmptno < numcmpts; ++cmptno) { if (cinfo->image_width > JAS_IMAGE_COORD_MAX || cinfo->image_height > JAS_IMAGE_COORD_MAX) { goto error; } cmptparm.tlx = 0; cmptparm.tly = 0; cmptparm.hstep = 1; cmptparm.vstep = 1; cmptparm.width = cinfo->image_width; cmptparm.height = cinfo->image_height; cmptparm.prec = 8; cmptparm.sgnd = false; if (jas_image_addcmpt(image, cmptno, &cmptparm)) { goto error; } } if (numcmpts == 3) { jas_image_setclrspc(image, JAS_CLRSPC_SRGB); jas_image_setcmpttype(image, 0, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_R)); jas_image_setcmpttype(image, 1, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_G)); jas_image_setcmpttype(image, 2, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_B)); } else { jas_image_setclrspc(image, JAS_CLRSPC_SGRAY); jas_image_setcmpttype(image, 0, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_GRAY_Y)); } return image; error: if (image) { jas_image_destroy(image); } return 0; } /******************************************************************************\ * Data source code. \******************************************************************************/ static int jpg_copystreamtofile(FILE *out, jas_stream_t *in) { int c; while ((c = jas_stream_getc(in)) != EOF) { if (fputc(c, out) == EOF) { return -1; } } if (jas_stream_error(in)) { return -1; } return 0; } /******************************************************************************\ * Data sink code. \******************************************************************************/ static void jpg_start_output(j_decompress_ptr cinfo, jpg_dest_t *dinfo) { /* Avoid compiler warnings about unused parameters. */ cinfo = 0; JAS_DBGLOG(10, ("jpg_start_output(%p, %p)\n", cinfo, dinfo)); dinfo->row = 0; } static void jpg_put_pixel_rows(j_decompress_ptr cinfo, jpg_dest_t *dinfo, JDIMENSION rows_supplied) { JSAMPLE *bufptr; int cmptno; JDIMENSION x; uint_fast32_t width; JAS_DBGLOG(10, ("jpg_put_pixel_rows(%p, %p)\n", cinfo, dinfo)); if (dinfo->error) { return; } assert(cinfo->output_components == jas_image_numcmpts(dinfo->image)); for (cmptno = 0; cmptno < cinfo->output_components; ++cmptno) { width = jas_image_cmptwidth(dinfo->image, cmptno); bufptr = (dinfo->buffer[0]) + cmptno; for (x = 0; x < width; ++x) { jas_matrix_set(dinfo->data, 0, x, GETJSAMPLE(*bufptr)); bufptr += cinfo->output_components; } JAS_DBGLOG(10, ( "jas_image_writecmpt called for component %d row %lu\n", cmptno, JAS_CAST(unsigned long, dinfo->row))); if (jas_image_writecmpt(dinfo->image, cmptno, 0, dinfo->row, width, 1, dinfo->data)) { dinfo->error = 1; } } dinfo->row += rows_supplied; } static void jpg_finish_output(j_decompress_ptr cinfo, jpg_dest_t *dinfo) { JAS_DBGLOG(10, ("jpg_finish_output(%p, %p)\n", cinfo, dinfo)); /* Avoid compiler warnings about unused parameters. */ cinfo = 0; dinfo = 0; }
jas_image_t *jpg_decode(jas_stream_t *in, char *optstr) { struct jpeg_decompress_struct cinfo; struct jpeg_error_mgr jerr; FILE *input_file; jpg_dest_t dest_mgr_buf; jpg_dest_t *dest_mgr = &dest_mgr_buf; JDIMENSION num_scanlines; jas_image_t *image; int ret; jpg_dec_importopts_t opts; size_t size; if (jpg_dec_parseopts(optstr, &opts)) { goto error; } // In theory, the two memset calls that follow are not needed. // They are only here to make the code more predictable in the event // that the JPEG library fails to initialize a member. memset(&cinfo, 0, sizeof(struct jpeg_decompress_struct)); memset(dest_mgr, 0, sizeof(jpg_dest_t)); dest_mgr->data = 0; image = 0; input_file = 0; if (!(input_file = tmpfile())) { jas_eprintf("cannot make temporary file\n"); goto error; } if (jpg_copystreamtofile(input_file, in)) { jas_eprintf("cannot copy stream\n"); goto error; } rewind(input_file); /* Allocate and initialize a JPEG decompression object. */ JAS_DBGLOG(10, ("jpeg_std_error(%p)\n", &jerr)); cinfo.err = jpeg_std_error(&jerr); JAS_DBGLOG(10, ("jpeg_create_decompress(%p)\n", &cinfo)); jpeg_create_decompress(&cinfo); /* Specify the data source for decompression. */ JAS_DBGLOG(10, ("jpeg_stdio_src(%p, %p)\n", &cinfo, input_file)); jpeg_stdio_src(&cinfo, input_file); /* Read the file header to obtain the image information. */ JAS_DBGLOG(10, ("jpeg_read_header(%p, TRUE)\n", &cinfo)); ret = jpeg_read_header(&cinfo, TRUE); JAS_DBGLOG(10, ("jpeg_read_header return value %d\n", ret)); if (ret != JPEG_HEADER_OK) { jas_eprintf("jpeg_read_header did not return JPEG_HEADER_OK\n"); } JAS_DBGLOG(10, ( "header: image_width %d; image_height %d; num_components %d\n", cinfo.image_width, cinfo.image_height, cinfo.num_components) ); /* Start the decompressor. */ JAS_DBGLOG(10, ("jpeg_start_decompress(%p)\n", &cinfo)); ret = jpeg_start_decompress(&cinfo); JAS_DBGLOG(10, ("jpeg_start_decompress return value %d\n", ret)); JAS_DBGLOG(10, ( "header: output_width %d; output_height %d; output_components %d\n", cinfo.output_width, cinfo.output_height, cinfo.output_components) ); if (opts.max_size) { if (!jas_safe_size_mul(cinfo.output_width, cinfo.output_height, &size) || !jas_safe_size_mul(size, cinfo.output_components, &size)) { goto error; } if (size > opts.max_size) { jas_eprintf("image is too large\n"); goto error; } } /* Create an image object to hold the decoded data. */ if (!(image = jpg_mkimage(&cinfo))) { jas_eprintf("jpg_mkimage failed\n"); goto error; } /* Initialize the data sink object. */ dest_mgr->image = image; if (!(dest_mgr->data = jas_matrix_create(1, cinfo.output_width))) { jas_eprintf("jas_matrix_create failed\n"); goto error; } dest_mgr->start_output = jpg_start_output; dest_mgr->put_pixel_rows = jpg_put_pixel_rows; dest_mgr->finish_output = jpg_finish_output; dest_mgr->buffer = (*cinfo.mem->alloc_sarray) ((j_common_ptr) &cinfo, JPOOL_IMAGE, cinfo.output_width * cinfo.output_components, (JDIMENSION) 1); dest_mgr->buffer_height = 1; dest_mgr->error = 0; /* Process the compressed data. */ (*dest_mgr->start_output)(&cinfo, dest_mgr); while (cinfo.output_scanline < cinfo.output_height) { JAS_DBGLOG(10, ("jpeg_read_scanlines(%p, %p, %lu)\n", &cinfo, dest_mgr->buffer, JAS_CAST(unsigned long, dest_mgr->buffer_height))); num_scanlines = jpeg_read_scanlines(&cinfo, dest_mgr->buffer, dest_mgr->buffer_height); JAS_DBGLOG(10, ("jpeg_read_scanlines return value %lu\n", JAS_CAST(unsigned long, num_scanlines))); (*dest_mgr->put_pixel_rows)(&cinfo, dest_mgr, num_scanlines); } (*dest_mgr->finish_output)(&cinfo, dest_mgr); /* Complete the decompression process. */ JAS_DBGLOG(10, ("jpeg_finish_decompress(%p)\n", &cinfo)); jpeg_finish_decompress(&cinfo); /* Destroy the JPEG decompression object. */ JAS_DBGLOG(10, ("jpeg_destroy_decompress(%p)\n", &cinfo)); jpeg_destroy_decompress(&cinfo); jas_matrix_destroy(dest_mgr->data); JAS_DBGLOG(10, ("fclose(%p)\n", input_file)); fclose(input_file); input_file = 0; if (dest_mgr->error) { jas_eprintf("error during decoding\n"); goto error; } return image; error: if (dest_mgr->data) { jas_matrix_destroy(dest_mgr->data); } if (image) { jas_image_destroy(image); } if (input_file) { fclose(input_file); } return 0; }
jas_image_t *jpg_decode(jas_stream_t *in, char *optstr) { struct jpeg_decompress_struct cinfo; struct jpeg_error_mgr jerr; FILE *input_file; jpg_dest_t dest_mgr_buf; jpg_dest_t *dest_mgr = &dest_mgr_buf; JDIMENSION num_scanlines; jas_image_t *image; int ret; jpg_dec_importopts_t opts; size_t num_samples; JAS_DBGLOG(100, ("jpg_decode(%p, \"%s\")\n", in, optstr)); if (jpg_dec_parseopts(optstr, &opts)) { goto error; } // In theory, the two memset calls that follow are not needed. // They are only here to make the code more predictable in the event // that the JPEG library fails to initialize a member. memset(&cinfo, 0, sizeof(struct jpeg_decompress_struct)); memset(dest_mgr, 0, sizeof(jpg_dest_t)); dest_mgr->data = 0; image = 0; input_file = 0; if (!(input_file = tmpfile())) { jas_eprintf("cannot make temporary file\n"); goto error; } if (jpg_copystreamtofile(input_file, in)) { jas_eprintf("cannot copy stream\n"); goto error; } rewind(input_file); /* Allocate and initialize a JPEG decompression object. */ JAS_DBGLOG(10, ("jpeg_std_error(%p)\n", &jerr)); cinfo.err = jpeg_std_error(&jerr); JAS_DBGLOG(10, ("jpeg_create_decompress(%p)\n", &cinfo)); jpeg_create_decompress(&cinfo); /* Specify the data source for decompression. */ JAS_DBGLOG(10, ("jpeg_stdio_src(%p, %p)\n", &cinfo, input_file)); jpeg_stdio_src(&cinfo, input_file); /* Read the file header to obtain the image information. */ JAS_DBGLOG(10, ("jpeg_read_header(%p, TRUE)\n", &cinfo)); ret = jpeg_read_header(&cinfo, TRUE); JAS_DBGLOG(10, ("jpeg_read_header return value %d\n", ret)); if (ret != JPEG_HEADER_OK) { jas_eprintf("jpeg_read_header did not return JPEG_HEADER_OK\n"); } JAS_DBGLOG(10, ( "header: image_width %d; image_height %d; num_components %d\n", cinfo.image_width, cinfo.image_height, cinfo.num_components) ); if (opts.max_samples > 0) { if (!jas_safe_size_mul3(cinfo.image_width, cinfo.image_height, cinfo.num_components, &num_samples)) { goto error; } if (num_samples > opts.max_samples) { jas_eprintf("image is too large (%zu > %zu)\n", num_samples, opts.max_samples); goto error; } } /* Start the decompressor. */ JAS_DBGLOG(10, ("jpeg_start_decompress(%p)\n", &cinfo)); ret = jpeg_start_decompress(&cinfo); JAS_DBGLOG(10, ("jpeg_start_decompress return value %d\n", ret)); JAS_DBGLOG(10, ( "header: output_width %d; output_height %d; output_components %d\n", cinfo.output_width, cinfo.output_height, cinfo.output_components) ); /* Create an image object to hold the decoded data. */ if (!(image = jpg_mkimage(&cinfo))) { jas_eprintf("jpg_mkimage failed\n"); goto error; } /* Initialize the data sink object. */ dest_mgr->image = image; if (!(dest_mgr->data = jas_matrix_create(1, cinfo.output_width))) { jas_eprintf("jas_matrix_create failed\n"); goto error; } dest_mgr->start_output = jpg_start_output; dest_mgr->put_pixel_rows = jpg_put_pixel_rows; dest_mgr->finish_output = jpg_finish_output; dest_mgr->buffer = (*cinfo.mem->alloc_sarray) ((j_common_ptr) &cinfo, JPOOL_IMAGE, cinfo.output_width * cinfo.output_components, (JDIMENSION) 1); dest_mgr->buffer_height = 1; dest_mgr->error = 0; /* Process the compressed data. */ (*dest_mgr->start_output)(&cinfo, dest_mgr); while (cinfo.output_scanline < cinfo.output_height) { JAS_DBGLOG(10, ("jpeg_read_scanlines(%p, %p, %lu)\n", &cinfo, dest_mgr->buffer, JAS_CAST(unsigned long, dest_mgr->buffer_height))); num_scanlines = jpeg_read_scanlines(&cinfo, dest_mgr->buffer, dest_mgr->buffer_height); JAS_DBGLOG(10, ("jpeg_read_scanlines return value %lu\n", JAS_CAST(unsigned long, num_scanlines))); (*dest_mgr->put_pixel_rows)(&cinfo, dest_mgr, num_scanlines); } (*dest_mgr->finish_output)(&cinfo, dest_mgr); /* Complete the decompression process. */ JAS_DBGLOG(10, ("jpeg_finish_decompress(%p)\n", &cinfo)); jpeg_finish_decompress(&cinfo); /* Destroy the JPEG decompression object. */ JAS_DBGLOG(10, ("jpeg_destroy_decompress(%p)\n", &cinfo)); jpeg_destroy_decompress(&cinfo); jas_matrix_destroy(dest_mgr->data); JAS_DBGLOG(10, ("fclose(%p)\n", input_file)); fclose(input_file); input_file = 0; if (dest_mgr->error) { jas_eprintf("error during decoding\n"); goto error; } return image; error: if (dest_mgr->data) { jas_matrix_destroy(dest_mgr->data); } if (image) { jas_image_destroy(image); } if (input_file) { fclose(input_file); } return 0; }
{'added': [(83, 'typedef struct {'), (84, '\tsize_t max_samples;'), (85, '} jpg_dec_importopts_t;'), (86, ''), (87, 'typedef enum {'), (88, '\tOPT_MAXSIZE,'), (89, '} optid_t;'), (90, ''), (138, '* Option parsing.'), (142, '\t{OPT_MAXSIZE, "max_samples"},'), (150, '\topts->max_samples = 64 * JAS_MEBI;'), (160, '\t\t\topts->max_samples = atoi(jas_tvparser_getval(tvp));'), (191, '\tsize_t num_samples;'), (192, ''), (193, '\tJAS_DBGLOG(100, ("jpg_decode(%p, \\"%s\\")\\n", in, optstr));'), (241, '\tif (opts.max_samples > 0) {'), (242, '\t\tif (!jas_safe_size_mul3(cinfo.image_width, cinfo.image_height,'), (243, '\t\t cinfo.num_components, &num_samples)) {'), (244, '\t\t\tgoto error;'), (245, '\t\t}'), (246, '\t\tif (num_samples > opts.max_samples) {'), (247, '\t\t\tjas_eprintf("image is too large (%zu > %zu)\\n", num_samples,'), (248, '\t\t\t opts.max_samples);'), (249, '\t\t\tgoto error;'), (250, '\t\t}'), (251, '\t}'), (252, '')], 'deleted': [(130, '*'), (133, 'typedef struct {'), (134, '\tsize_t max_size;'), (135, '} jpg_dec_importopts_t;'), (136, ''), (137, 'typedef enum {'), (138, '\tOPT_MAXSIZE,'), (139, '} optid_t;'), (140, ''), (142, '\t{OPT_MAXSIZE, "max_size"},'), (150, '\topts->max_size = 0;'), (160, '\t\t\topts->max_size = atoi(jas_tvparser_getval(tvp));'), (191, '\tsize_t size;'), (248, '\tif (opts.max_size) {'), (249, '\t\tif (!jas_safe_size_mul(cinfo.output_width, cinfo.output_height,'), (250, '\t\t &size) ||'), (251, '\t\t !jas_safe_size_mul(size, cinfo.output_components, &size)) {'), (252, '\t\t\tgoto error;'), (253, '\t\t}'), (254, '\t\tif (size > opts.max_size) {'), (255, '\t\t\tjas_eprintf("image is too large\\n");'), (256, '\t\t\tgoto error;'), (257, '\t\t}'), (258, '\t}'), (259, '')]}
27
25
280
1,669
https://github.com/mdadams/jasper
CVE-2016-9395
['CWE-20']
processor.c
set_content_type
/* * Copyright (C) Tildeslash Ltd. All rights reserved. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License version 3. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * In addition, as a special exception, the copyright holders give * permission to link the code of portions of this program with the * OpenSSL library under certain conditions as described in each * individual source file, and distribute linked combinations * including the two. * * You must obey the GNU Affero General Public License in all respects * for all of the code used other than OpenSSL. */ #include "config.h" #ifdef HAVE_STDIO_H #include <stdio.h> #endif #ifdef HAVE_STDLIB_H #include <stdlib.h> #endif #ifdef HAVE_ERRNO_H #include <errno.h> #endif #ifdef HAVE_STDARG_H #include <stdarg.h> #endif #ifdef HAVE_SYS_TYPES_H #include <sys/types.h> #endif #ifdef HAVE_SYS_SOCKET_H #include <sys/socket.h> #endif #ifdef HAVE_SETJMP_H #include <setjmp.h> #endif #ifdef HAVE_STRING_H #include <string.h> #endif #ifdef HAVE_STRINGS_H #include <strings.h> #endif #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #ifdef HAVE_LIMITS_H #include <limits.h> #endif #include "monit.h" #include "processor.h" #include "base64.h" // libmonit #include "util/Str.h" #include "system/Net.h" /** * A naive quasi HTTP Processor module that can handle HTTP requests * received from a client, and return responses based on those * requests. * * This Processor delegates the actual handling of the request and * reponse to so called cervlets, which must implement two methods; * doGet and doPost. * * NOTES * This Processor is command oriented and if a second slash '/' is * found in the URL it's asumed to be the PATHINFO. In other words * this processor perceive an URL as: * * /COMMAND?QUERYSTRING/PATHINFO * * The doGet/doPost routines act's on the COMMAND. See the * cervlet.c code in this dir. for an example. * * @file */ static int _httpPostLimit; /* -------------------------------------------------------------- Prototypes */ static void do_service(Socket_T); static void destroy_entry(void *); static char *get_date(char *, int); static char *get_server(char *, int); static void create_headers(HttpRequest); static void send_response(HttpRequest, HttpResponse); static boolean_t basic_authenticate(HttpRequest); static void done(HttpRequest, HttpResponse); static void destroy_HttpRequest(HttpRequest); static void reset_response(HttpResponse res); static HttpParameter parse_parameters(char *); static boolean_t create_parameters(HttpRequest req); static void destroy_HttpResponse(HttpResponse); static HttpRequest create_HttpRequest(Socket_T); static void internal_error(Socket_T, int, char *); static HttpResponse create_HttpResponse(Socket_T); static boolean_t is_authenticated(HttpRequest, HttpResponse); static int get_next_token(char *s, int *cursor, char **r); /* * An object for implementors of the service functions; doGet and * doPost. Implementing modules i.e. CERVLETS, must implement the * doGet and doPost functions and the engine will call the add_Impl * function to setup the callback to these functions. */ struct ServiceImpl { void(*doGet)(HttpRequest, HttpResponse); void(*doPost)(HttpRequest, HttpResponse); } Impl; /* ------------------------------------------------------------------ Public */ /** * Process a HTTP request. This is done by dispatching to the service * function. * @param s A Socket_T representing the client connection */ void *http_processor(Socket_T s) { if (! Net_canRead(Socket_getSocket(s), REQUEST_TIMEOUT * 1000)) internal_error(s, SC_REQUEST_TIMEOUT, "Time out when handling the Request"); else do_service(s); Socket_free(&s); return NULL; } /** * Callback for implementors of cervlet functions. * @param doGetFunc doGet function * @param doPostFunc doPost function */ void add_Impl(void(*doGet)(HttpRequest, HttpResponse), void(*doPost)(HttpRequest, HttpResponse)) { Impl.doGet = doGet; Impl.doPost = doPost; } void Processor_setHttpPostLimit() { // Base buffer size (space for e.g. "action=<name>") _httpPostLimit = STRLEN; // Add space for each service for (Service_T s = servicelist; s; s = s->next) _httpPostLimit += strlen("&service=") + strlen(s->name); } void escapeHTML(StringBuffer_T sb, const char *s) { for (int i = 0; s[i]; i++) { if (s[i] == '<') StringBuffer_append(sb, "&lt;"); else if (s[i] == '>') StringBuffer_append(sb, "&gt;"); else if (s[i] == '&') StringBuffer_append(sb, "&amp;"); else StringBuffer_append(sb, "%c", s[i]); } } /** * Send an error message * @param res HttpResponse object * @param code Error Code to lookup and send * @param msg Optional error message (may be NULL) */ void send_error(HttpRequest req, HttpResponse res, int code, const char *msg, ...) { ASSERT(msg); const char *err = get_status_string(code); reset_response(res); set_content_type(res, "text/html"); set_status(res, code); StringBuffer_append(res->outputbuffer, "<html>" "<head>" "<title>%d %s</title>" "</head>" "<body bgcolor=#FFFFFF>" "<h2>%s</h2>", code, err, err); char *message; va_list ap; va_start(ap, msg); message = Str_vcat(msg, ap); va_end(ap); escapeHTML(res->outputbuffer, message); if (code != SC_UNAUTHORIZED) // We log details in basic_authenticate() already, no need to log generic error sent to client here LogError("HttpRequest: error -- client [%s]: %s %d %s\n", NVLSTR(Socket_getRemoteHost(req->S)), SERVER_PROTOCOL, code, message); FREE(message); char server[STRLEN]; StringBuffer_append(res->outputbuffer, "<hr>" "<a href='%s'><font size=-1>%s</font></a>" "</body>" "</html>" "\r\n", SERVER_URL, get_server(server, STRLEN)); } /* -------------------------------------------------------------- Properties */ /** * Adds a response header with the given name and value. If the header * had already been set the new value overwrites the previous one. * @param res HttpResponse object * @param name Header key name * @param value Header key value */ void set_header(HttpResponse res, const char *name, const char *value) { HttpHeader h = NULL; ASSERT(res); ASSERT(name); NEW(h); h->name = Str_dup(name); h->value = Str_dup(value); if (res->headers) { HttpHeader n, p; for (n = p = res->headers; p; n = p, p = p->next) { if (IS(p->name, name)) { FREE(p->value); p->value = Str_dup(value); destroy_entry(h); return; } } n->next = h; } else { res->headers = h; } } /** * Sets the status code for the response * @param res HttpResponse object * @param code A HTTP status code <100-510> * @param msg The status code string message */ void set_status(HttpResponse res, int code) { res->status = code; res->status_msg = get_status_string(code); } /** * Set the response content-type * @param res HttpResponse object * @param mime Mime content type, e.g. text/html */ void set_content_type(HttpResponse res, const char *mime) { set_header(res, "Content-Type", mime); } /** * Returns the value of the specified header * @param req HttpRequest object * @param name Header name to lookup the value for * @return The value of the specified header, NULL if not found */ const char *get_header(HttpRequest req, const char *name) { for (HttpHeader p = req->headers; p; p = p->next) if (IS(p->name, name)) return (p->value); return NULL; } /** * Returns the value of the specified parameter * @param req HttpRequest object * @param name The request parameter key to lookup the value for * @return The value of the specified parameter, or NULL if not found */ const char *get_parameter(HttpRequest req, const char *name) { for (HttpParameter p = req->params; p; p = p->next) if (IS(p->name, name)) return (p->value); return NULL; } /** * Returns a string containing all (extra) headers found in the * response. The headers are newline separated in the returned * string. * @param res HttpResponse object * @return A String containing all headers set in the Response object */ char *get_headers(HttpResponse res) { char buf[RES_STRLEN]; char *b = buf; *buf = 0; for (HttpHeader p = res->headers; (((b - buf) + STRLEN) < RES_STRLEN) && p; p = p->next) b += snprintf(b, STRLEN,"%s: %s\r\n", p->name, p->value); return buf[0] ? Str_dup(buf) : NULL; } /** * Lookup the corresponding HTTP status string for the given status * code * @param status A HTTP status code * @return A default status message for the specified HTTP status * code. */ const char *get_status_string(int status) { switch (status) { case SC_OK: return "OK"; case SC_ACCEPTED: return "Accepted"; case SC_BAD_GATEWAY: return "Bad Gateway"; case SC_BAD_REQUEST: return "Bad Request"; case SC_CONFLICT: return "Conflict"; case SC_CONTINUE: return "Continue"; case SC_CREATED: return "Created"; case SC_EXPECTATION_FAILED: return "Expectation Failed"; case SC_FORBIDDEN: return "Forbidden"; case SC_GATEWAY_TIMEOUT: return "Gateway Timeout"; case SC_GONE: return "Gone"; case SC_VERSION_NOT_SUPPORTED: return "HTTP Version Not Supported"; case SC_INTERNAL_SERVER_ERROR: return "Internal Server Error"; case SC_LENGTH_REQUIRED: return "Length Required"; case SC_METHOD_NOT_ALLOWED: return "Method Not Allowed"; case SC_MOVED_PERMANENTLY: return "Moved Permanently"; case SC_MOVED_TEMPORARILY: return "Moved Temporarily"; case SC_MULTIPLE_CHOICES: return "Multiple Choices"; case SC_NO_CONTENT: return "No Content"; case SC_NON_AUTHORITATIVE: return "Non-Authoritative Information"; case SC_NOT_ACCEPTABLE: return "Not Acceptable"; case SC_NOT_FOUND: return "Not Found"; case SC_NOT_IMPLEMENTED: return "Not Implemented"; case SC_NOT_MODIFIED: return "Not Modified"; case SC_PARTIAL_CONTENT: return "Partial Content"; case SC_PAYMENT_REQUIRED: return "Payment Required"; case SC_PRECONDITION_FAILED: return "Precondition Failed"; case SC_PROXY_AUTHENTICATION_REQUIRED: return "Proxy Authentication Required"; case SC_REQUEST_ENTITY_TOO_LARGE: return "Request Entity Too Large"; case SC_REQUEST_TIMEOUT: return "Request Timeout"; case SC_REQUEST_URI_TOO_LARGE: return "Request URI Too Large"; case SC_RANGE_NOT_SATISFIABLE: return "Requested Range Not Satisfiable"; case SC_RESET_CONTENT: return "Reset Content"; case SC_SEE_OTHER: return "See Other"; case SC_SERVICE_UNAVAILABLE: return "Service Unavailable"; case SC_SWITCHING_PROTOCOLS: return "Switching Protocols"; case SC_UNAUTHORIZED: return "Unauthorized"; case SC_UNSUPPORTED_MEDIA_TYPE: return "Unsupported Media Type"; case SC_USE_PROXY: return "Use Proxy"; default: { return "Unknown HTTP status"; } } } /* ----------------------------------------------------------------- Private */ /** * Receives standard HTTP requests from a client socket and dispatches * them to the doXXX methods defined in a cervlet module. */ static void do_service(Socket_T s) { volatile HttpResponse res = create_HttpResponse(s); volatile HttpRequest req = create_HttpRequest(s); if (res && req) { if (Run.httpd.flags & Httpd_Ssl) set_header(res, "Strict-Transport-Security", "max-age=63072000; includeSubdomains; preload"); if (is_authenticated(req, res)) { if (IS(req->method, METHOD_GET)) Impl.doGet(req, res); else if (IS(req->method, METHOD_POST)) Impl.doPost(req, res); else send_error(req, res, SC_NOT_IMPLEMENTED, "Method not implemented"); } send_response(req, res); } done(req, res); } /** * Return a (RFC1123) Date string */ static char *get_date(char *result, int size) { time_t now; time(&now); if (strftime(result, size, DATEFMT, gmtime(&now)) <= 0) *result = 0; return result; } /** * Return this server name + version */ static char *get_server(char *result, int size) { snprintf(result, size, "%s %s", SERVER_NAME, Run.httpd.flags & Httpd_Signature ? SERVER_VERSION : ""); return result; } /** * Send the response to the client. If the response has already been * commited, this function does nothing. */ static void send_response(HttpRequest req, HttpResponse res) { Socket_T S = res->S; if (! res->is_committed) { char date[STRLEN]; char server[STRLEN]; #ifdef HAVE_LIBZ const char *acceptEncoding = get_header(req, "Accept-Encoding"); boolean_t canCompress = acceptEncoding && Str_sub(acceptEncoding, "gzip") ? true : false; #else boolean_t canCompress = false; #endif const void *body = NULL; size_t bodyLength = 0; if (canCompress) { body = StringBuffer_toCompressed(res->outputbuffer, 6, &bodyLength); set_header(res, "Content-Encoding", "gzip"); } else { body = StringBuffer_toString(res->outputbuffer); bodyLength = StringBuffer_length(res->outputbuffer); } char *headers = get_headers(res); res->is_committed = true; get_date(date, STRLEN); get_server(server, STRLEN); Socket_print(S, "%s %d %s\r\n", res->protocol, res->status, res->status_msg); Socket_print(S, "Date: %s\r\n", date); Socket_print(S, "Server: %s\r\n", server); Socket_print(S, "Content-Length: %zu\r\n", bodyLength); Socket_print(S, "Connection: close\r\n"); if (headers) Socket_print(S, "%s", headers); Socket_print(S, "\r\n"); if (bodyLength) Socket_write(S, (unsigned char *)body, bodyLength); FREE(headers); } } /* --------------------------------------------------------------- Factories */ /** * Returns a new HttpRequest object wrapping the client request */ static HttpRequest create_HttpRequest(Socket_T S) { char line[REQ_STRLEN]; if (Socket_readLine(S, line, sizeof(line)) == NULL) { internal_error(S, SC_BAD_REQUEST, "No request found"); return NULL; } Str_chomp(line); char method[STRLEN]; char url[REQ_STRLEN]; char protocol[STRLEN]; if (sscanf(line, "%255s %1023s HTTP/%3[1.0]", method, url, protocol) != 3) { internal_error(S, SC_BAD_REQUEST, "Cannot parse request"); return NULL; } if (strlen(url) >= MAX_URL_LENGTH) { internal_error(S, SC_BAD_REQUEST, "[error] URL too long"); return NULL; } HttpRequest req = NULL; NEW(req); req->S = S; Util_urlDecode(url); req->url = Str_dup(url); req->method = Str_dup(method); req->protocol = Str_dup(protocol); create_headers(req); if (! create_parameters(req)) { destroy_HttpRequest(req); internal_error(S, SC_BAD_REQUEST, "Cannot parse Request parameters"); return NULL; } return req; } /** * Returns a new HttpResponse object wrapping a default response. Use * the set_XXX methods to change the object. */ static HttpResponse create_HttpResponse(Socket_T S) { HttpResponse res = NULL; NEW(res); res->S = S; res->status = SC_OK; res->outputbuffer = StringBuffer_create(256); res->is_committed = false; res->protocol = SERVER_PROTOCOL; res->status_msg = get_status_string(SC_OK); return res; } /** * Create HTTP headers for the given request */ static void create_headers(HttpRequest req) { char line[REQ_STRLEN] = {0}; while (Socket_readLine(req->S, line, sizeof(line)) && ! (Str_isEqual(line, "\r\n") || Str_isEqual(line, "\n"))) { char *value = strchr(line, ':'); if (value) { HttpHeader header = NULL; NEW(header); *value++ = 0; Str_trim(line); Str_trim(value); Str_chomp(value); header->name = Str_dup(line); header->value = Str_dup(value); header->next = req->headers; req->headers = header; } } } /** * Create parameters for the given request. Returns false if an error * occurs. */ static boolean_t create_parameters(HttpRequest req) { char *query_string = NULL; if (IS(req->method, METHOD_POST)) { int len; const char *content_length = get_header(req, "Content-Length"); if (! content_length || sscanf(content_length, "%d", &len) != 1 || len < 0 || len > _httpPostLimit) return false; if (len != 0) { query_string = CALLOC(1, _httpPostLimit + 1); int n = Socket_read(req->S, query_string, len); if (n != len) { FREE(query_string); return false; } } } else if (IS(req->method, METHOD_GET)) { char *p = strchr(req->url, '?'); if (p) { *p++ = 0; query_string = Str_dup(p); } } if (query_string) { if (*query_string) { char *p = strchr(query_string, '/'); if (p) { *p++ = 0; req->pathinfo = Str_dup(p); } req->params = parse_parameters(query_string); } FREE(query_string); } return true; } /* ----------------------------------------------------------------- Cleanup */ /** * Clear the response output buffer and headers */ static void reset_response(HttpResponse res) { if (res->headers) { destroy_entry(res->headers); res->headers = NULL; /* Release Pragma */ } StringBuffer_clear(res->outputbuffer); } /** * Finalize the request and response object. */ static void done(HttpRequest req, HttpResponse res) { destroy_HttpRequest(req); destroy_HttpResponse(res); } /** * Free a HttpRequest object */ static void destroy_HttpRequest(HttpRequest req) { if (req) { FREE(req->method); FREE(req->url); FREE(req->pathinfo); FREE(req->protocol); FREE(req->remote_user); if (req->headers) destroy_entry(req->headers); if (req->params) destroy_entry(req->params); FREE(req); } } /** * Free a HttpResponse object */ static void destroy_HttpResponse(HttpResponse res) { if (res) { StringBuffer_free(&(res->outputbuffer)); if (res->headers) destroy_entry(res->headers); FREE(res); } } /** * Free a (linked list of) http entry object(s). Both HttpHeader and * HttpParameter are of this type. */ static void destroy_entry(void *p) { struct entry *h = p; if (h->next) destroy_entry(h->next); FREE(h->name); FREE(h->value); FREE(h); } /* ----------------------------------------------------- Checkers/Validators */ /** * Do Basic Authentication if this auth. style is allowed. */ static boolean_t is_authenticated(HttpRequest req, HttpResponse res) { if (Run.httpd.credentials) { if (! basic_authenticate(req)) { // Send just generic error message to the client to not disclose e.g. username existence in case of credentials harvesting attack send_error(req, res, SC_UNAUTHORIZED, "You are not authorized to access monit. Either you supplied the wrong credentials (e.g. bad password), or your browser doesn't understand how to supply the credentials required"); set_header(res, "WWW-Authenticate", "Basic realm=\"monit\""); return false; } } return true; } /** * Authenticate the basic-credentials (uname/password) submitted by * the user. */ static boolean_t basic_authenticate(HttpRequest req) { const char *credentials = get_header(req, "Authorization"); if (! (credentials && Str_startsWith(credentials, "Basic "))) { LogError("HttpRequest: access denied -- client [%s]: missing or invalid Authorization header\n", NVLSTR(Socket_getRemoteHost(req->S))); return false; } char buf[STRLEN] = {0}; strncpy(buf, &credentials[6], sizeof(buf) - 1); char uname[STRLEN] = {0}; if (decode_base64((unsigned char *)uname, buf) <= 0) { LogError("HttpRequest: access denied -- client [%s]: invalid Authorization header\n", NVLSTR(Socket_getRemoteHost(req->S))); return false; } if (! *uname) { LogError("HttpRequest: access denied -- client [%s]: empty username\n", NVLSTR(Socket_getRemoteHost(req->S))); return false; } char *password = password = strchr(uname, ':'); if (! password || ! *password) { LogError("HttpRequest: access denied -- client [%s]: empty password\n", NVLSTR(Socket_getRemoteHost(req->S))); return false; } *password++ = 0; /* Check if user exist */ if (! Util_getUserCredentials(uname)) { LogError("HttpRequest: access denied -- client [%s]: unknown user '%s'\n", NVLSTR(Socket_getRemoteHost(req->S)), uname); return false; } /* Check if user has supplied the right password */ if (! Util_checkCredentials(uname, password)) { LogError("HttpRequest: access denied -- client [%s]: wrong password for user '%s'\n", NVLSTR(Socket_getRemoteHost(req->S)), uname); return false; } req->remote_user = Str_dup(uname); return true; } /* --------------------------------------------------------------- Utilities */ /** * Send an error message to the client. This is a helper function, * used internal if the service function fails to setup the framework * properly; i.e. with a valid HttpRequest and a valid HttpResponse. */ static void internal_error(Socket_T S, int status, char *msg) { char date[STRLEN]; char server[STRLEN]; const char *status_msg = get_status_string(status); get_date(date, STRLEN); get_server(server, STRLEN); Socket_print(S, "%s %d %s\r\n" "Date: %s\r\n" "Server: %s\r\n" "Content-Type: text/html\r\n" "Connection: close\r\n" "\r\n" "<html><head><title>%s</title></head>" "<body bgcolor=#FFFFFF><h2>%s</h2>%s<p>" "<hr><a href='%s'><font size=-1>%s</font></a>" "</body></html>\r\n", SERVER_PROTOCOL, status, status_msg, date, server, status_msg, status_msg, msg, SERVER_URL, server); DEBUG("HttpRequest: error -- client [%s]: %s %d %s\n", NVLSTR(Socket_getRemoteHost(S)), SERVER_PROTOCOL, status, msg ? msg : status_msg); } /** * Parse request parameters from the given query string and return a * linked list of HttpParameters */ static HttpParameter parse_parameters(char *query_string) { #define KEY 1 #define VALUE 2 int token; int cursor = 0; char *key = NULL; char *value = NULL; HttpParameter head = NULL; while ((token = get_next_token(query_string, &cursor, &value))) { if (token == KEY) key = value; else if (token == VALUE) { HttpParameter p = NULL; if (! key) goto error; NEW(p); p->name = key; p->value = value; p->next = head; head = p; key = NULL; } } return head; error: FREE(key); FREE(value); if ( head != NULL ) destroy_entry(head); return NULL; } /** * A mini-scanner for tokenizing a query string */ static int get_next_token(char *s, int *cursor, char **r) { int i = *cursor; while (s[*cursor]) { if (s[*cursor+1] == '=') { *cursor += 1; *r = Str_ndup(&s[i], (*cursor-i)); return KEY; } if (s[*cursor] == '=') { while (s[*cursor] && s[*cursor] != '&') *cursor += 1; if (s[*cursor] == '&') { *r = Str_ndup(&s[i+1], (*cursor-i)-1); *cursor += 1; } else { *r = Str_ndup(&s[i+1], (*cursor-i)); } return VALUE; } *cursor += 1; } return 0; }
/* * Copyright (C) Tildeslash Ltd. All rights reserved. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License version 3. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * In addition, as a special exception, the copyright holders give * permission to link the code of portions of this program with the * OpenSSL library under certain conditions as described in each * individual source file, and distribute linked combinations * including the two. * * You must obey the GNU Affero General Public License in all respects * for all of the code used other than OpenSSL. */ #include "config.h" #ifdef HAVE_STDIO_H #include <stdio.h> #endif #ifdef HAVE_STDLIB_H #include <stdlib.h> #endif #ifdef HAVE_ERRNO_H #include <errno.h> #endif #ifdef HAVE_STDARG_H #include <stdarg.h> #endif #ifdef HAVE_SYS_TYPES_H #include <sys/types.h> #endif #ifdef HAVE_SYS_SOCKET_H #include <sys/socket.h> #endif #ifdef HAVE_SETJMP_H #include <setjmp.h> #endif #ifdef HAVE_STRING_H #include <string.h> #endif #ifdef HAVE_STRINGS_H #include <strings.h> #endif #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #ifdef HAVE_LIMITS_H #include <limits.h> #endif #include "monit.h" #include "processor.h" #include "base64.h" // libmonit #include "util/Str.h" #include "system/Net.h" /** * A naive quasi HTTP Processor module that can handle HTTP requests * received from a client, and return responses based on those * requests. * * This Processor delegates the actual handling of the request and * reponse to so called cervlets, which must implement two methods; * doGet and doPost. * * NOTES * This Processor is command oriented and if a second slash '/' is * found in the URL it's asumed to be the PATHINFO. In other words * this processor perceive an URL as: * * /COMMAND?QUERYSTRING/PATHINFO * * The doGet/doPost routines act's on the COMMAND. See the * cervlet.c code in this dir. for an example. * * @file */ static int _httpPostLimit; /* -------------------------------------------------------------- Prototypes */ static void do_service(Socket_T); static void destroy_entry(void *); static char *get_date(char *, int); static char *get_server(char *, int); static void create_headers(HttpRequest); static void send_response(HttpRequest, HttpResponse); static boolean_t basic_authenticate(HttpRequest); static void done(HttpRequest, HttpResponse); static void destroy_HttpRequest(HttpRequest); static void reset_response(HttpResponse res); static HttpParameter parse_parameters(char *); static boolean_t create_parameters(HttpRequest req); static void destroy_HttpResponse(HttpResponse); static HttpRequest create_HttpRequest(Socket_T); static void internal_error(Socket_T, int, char *); static HttpResponse create_HttpResponse(Socket_T); static boolean_t is_authenticated(HttpRequest, HttpResponse); static int get_next_token(char *s, int *cursor, char **r); /* * An object for implementors of the service functions; doGet and * doPost. Implementing modules i.e. CERVLETS, must implement the * doGet and doPost functions and the engine will call the add_Impl * function to setup the callback to these functions. */ struct ServiceImpl { void(*doGet)(HttpRequest, HttpResponse); void(*doPost)(HttpRequest, HttpResponse); } Impl; /* ------------------------------------------------------------------ Public */ /** * Process a HTTP request. This is done by dispatching to the service * function. * @param s A Socket_T representing the client connection */ void *http_processor(Socket_T s) { if (! Net_canRead(Socket_getSocket(s), REQUEST_TIMEOUT * 1000)) internal_error(s, SC_REQUEST_TIMEOUT, "Time out when handling the Request"); else do_service(s); Socket_free(&s); return NULL; } /** * Callback for implementors of cervlet functions. * @param doGetFunc doGet function * @param doPostFunc doPost function */ void add_Impl(void(*doGet)(HttpRequest, HttpResponse), void(*doPost)(HttpRequest, HttpResponse)) { Impl.doGet = doGet; Impl.doPost = doPost; } void Processor_setHttpPostLimit() { // Base buffer size (space for e.g. "action=<name>") _httpPostLimit = STRLEN; // Add space for each service for (Service_T s = servicelist; s; s = s->next) _httpPostLimit += strlen("&service=") + strlen(s->name); } void escapeHTML(StringBuffer_T sb, const char *s) { for (int i = 0; s[i]; i++) { if (s[i] == '<') StringBuffer_append(sb, "&lt;"); else if (s[i] == '>') StringBuffer_append(sb, "&gt;"); else if (s[i] == '&') StringBuffer_append(sb, "&amp;"); else StringBuffer_append(sb, "%c", s[i]); } } /** * Send an error message * @param res HttpResponse object * @param code Error Code to lookup and send * @param msg Optional error message (may be NULL) */ void send_error(HttpRequest req, HttpResponse res, int code, const char *msg, ...) { ASSERT(msg); const char *err = get_status_string(code); reset_response(res); set_content_type(res, "text/html"); set_status(res, code); StringBuffer_append(res->outputbuffer, "<html>" "<head>" "<title>%d %s</title>" "</head>" "<body bgcolor=#FFFFFF>" "<h2>%s</h2>", code, err, err); char *message; va_list ap; va_start(ap, msg); message = Str_vcat(msg, ap); va_end(ap); escapeHTML(res->outputbuffer, message); if (code != SC_UNAUTHORIZED) // We log details in basic_authenticate() already, no need to log generic error sent to client here LogError("HttpRequest: error -- client [%s]: %s %d %s\n", NVLSTR(Socket_getRemoteHost(req->S)), SERVER_PROTOCOL, code, message); FREE(message); char server[STRLEN]; StringBuffer_append(res->outputbuffer, "<hr>" "<a href='%s'><font size=-1>%s</font></a>" "</body>" "</html>" "\r\n", SERVER_URL, get_server(server, STRLEN)); } /* -------------------------------------------------------------- Properties */ /** * Adds a response header with the given name and value. If the header * had already been set the new value overwrites the previous one. * @param res HttpResponse object * @param name Header key name * @param value Header key value */ void set_header(HttpResponse res, const char *name, const char *value, ...) { HttpHeader h = NULL; ASSERT(res); ASSERT(name); NEW(h); h->name = Str_dup(name); va_list ap; va_start(ap, value); h->value = Str_vcat(value, ap); va_end(ap); if (res->headers) { HttpHeader n, p; for (n = p = res->headers; p; n = p, p = p->next) { if (IS(p->name, name)) { FREE(p->value); p->value = Str_dup(value); destroy_entry(h); return; } } n->next = h; } else { res->headers = h; } } /** * Sets the status code for the response * @param res HttpResponse object * @param code A HTTP status code <100-510> * @param msg The status code string message */ void set_status(HttpResponse res, int code) { res->status = code; res->status_msg = get_status_string(code); } /** * Set the response content-type * @param res HttpResponse object * @param mime Mime content type, e.g. text/html */ void set_content_type(HttpResponse res, const char *mime) { set_header(res, "Content-Type", "%s", mime); } /** * Returns the value of the specified header * @param req HttpRequest object * @param name Header name to lookup the value for * @return The value of the specified header, NULL if not found */ const char *get_header(HttpRequest req, const char *name) { for (HttpHeader p = req->headers; p; p = p->next) if (IS(p->name, name)) return (p->value); return NULL; } /** * Returns the value of the specified parameter * @param req HttpRequest object * @param name The request parameter key to lookup the value for * @return The value of the specified parameter, or NULL if not found */ const char *get_parameter(HttpRequest req, const char *name) { for (HttpParameter p = req->params; p; p = p->next) if (IS(p->name, name)) return (p->value); return NULL; } /** * Returns a string containing all (extra) headers found in the * response. The headers are newline separated in the returned * string. * @param res HttpResponse object * @return A String containing all headers set in the Response object */ char *get_headers(HttpResponse res) { char buf[RES_STRLEN]; char *b = buf; *buf = 0; for (HttpHeader p = res->headers; (((b - buf) + STRLEN) < RES_STRLEN) && p; p = p->next) b += snprintf(b, STRLEN,"%s: %s\r\n", p->name, p->value); return buf[0] ? Str_dup(buf) : NULL; } /** * Lookup the corresponding HTTP status string for the given status * code * @param status A HTTP status code * @return A default status message for the specified HTTP status * code. */ const char *get_status_string(int status) { switch (status) { case SC_OK: return "OK"; case SC_ACCEPTED: return "Accepted"; case SC_BAD_GATEWAY: return "Bad Gateway"; case SC_BAD_REQUEST: return "Bad Request"; case SC_CONFLICT: return "Conflict"; case SC_CONTINUE: return "Continue"; case SC_CREATED: return "Created"; case SC_EXPECTATION_FAILED: return "Expectation Failed"; case SC_FORBIDDEN: return "Forbidden"; case SC_GATEWAY_TIMEOUT: return "Gateway Timeout"; case SC_GONE: return "Gone"; case SC_VERSION_NOT_SUPPORTED: return "HTTP Version Not Supported"; case SC_INTERNAL_SERVER_ERROR: return "Internal Server Error"; case SC_LENGTH_REQUIRED: return "Length Required"; case SC_METHOD_NOT_ALLOWED: return "Method Not Allowed"; case SC_MOVED_PERMANENTLY: return "Moved Permanently"; case SC_MOVED_TEMPORARILY: return "Moved Temporarily"; case SC_MULTIPLE_CHOICES: return "Multiple Choices"; case SC_NO_CONTENT: return "No Content"; case SC_NON_AUTHORITATIVE: return "Non-Authoritative Information"; case SC_NOT_ACCEPTABLE: return "Not Acceptable"; case SC_NOT_FOUND: return "Not Found"; case SC_NOT_IMPLEMENTED: return "Not Implemented"; case SC_NOT_MODIFIED: return "Not Modified"; case SC_PARTIAL_CONTENT: return "Partial Content"; case SC_PAYMENT_REQUIRED: return "Payment Required"; case SC_PRECONDITION_FAILED: return "Precondition Failed"; case SC_PROXY_AUTHENTICATION_REQUIRED: return "Proxy Authentication Required"; case SC_REQUEST_ENTITY_TOO_LARGE: return "Request Entity Too Large"; case SC_REQUEST_TIMEOUT: return "Request Timeout"; case SC_REQUEST_URI_TOO_LARGE: return "Request URI Too Large"; case SC_RANGE_NOT_SATISFIABLE: return "Requested Range Not Satisfiable"; case SC_RESET_CONTENT: return "Reset Content"; case SC_SEE_OTHER: return "See Other"; case SC_SERVICE_UNAVAILABLE: return "Service Unavailable"; case SC_SWITCHING_PROTOCOLS: return "Switching Protocols"; case SC_UNAUTHORIZED: return "Unauthorized"; case SC_UNSUPPORTED_MEDIA_TYPE: return "Unsupported Media Type"; case SC_USE_PROXY: return "Use Proxy"; default: { return "Unknown HTTP status"; } } } /* ----------------------------------------------------------------- Private */ /** * Receives standard HTTP requests from a client socket and dispatches * them to the doXXX methods defined in a cervlet module. */ static void do_service(Socket_T s) { volatile HttpResponse res = create_HttpResponse(s); volatile HttpRequest req = create_HttpRequest(s); if (res && req) { if (Run.httpd.flags & Httpd_Ssl) set_header(res, "Strict-Transport-Security", "max-age=63072000; includeSubdomains; preload"); if (is_authenticated(req, res)) { set_header(res, "Set-Cookie", "securitytoken=%s; Max-Age=600; HttpOnly; SameSite=strict%s", res->token, Run.httpd.flags & Httpd_Ssl ? "; Secure" : ""); if (IS(req->method, METHOD_GET)) Impl.doGet(req, res); else if (IS(req->method, METHOD_POST)) Impl.doPost(req, res); else send_error(req, res, SC_NOT_IMPLEMENTED, "Method not implemented"); } send_response(req, res); } done(req, res); } /** * Return a (RFC1123) Date string */ static char *get_date(char *result, int size) { time_t now; time(&now); if (strftime(result, size, DATEFMT, gmtime(&now)) <= 0) *result = 0; return result; } /** * Return this server name + version */ static char *get_server(char *result, int size) { snprintf(result, size, "%s %s", SERVER_NAME, Run.httpd.flags & Httpd_Signature ? SERVER_VERSION : ""); return result; } /** * Send the response to the client. If the response has already been * commited, this function does nothing. */ static void send_response(HttpRequest req, HttpResponse res) { Socket_T S = res->S; if (! res->is_committed) { char date[STRLEN]; char server[STRLEN]; #ifdef HAVE_LIBZ const char *acceptEncoding = get_header(req, "Accept-Encoding"); boolean_t canCompress = acceptEncoding && Str_sub(acceptEncoding, "gzip") ? true : false; #else boolean_t canCompress = false; #endif const void *body = NULL; size_t bodyLength = 0; if (canCompress) { body = StringBuffer_toCompressed(res->outputbuffer, 6, &bodyLength); set_header(res, "Content-Encoding", "gzip"); } else { body = StringBuffer_toString(res->outputbuffer); bodyLength = StringBuffer_length(res->outputbuffer); } char *headers = get_headers(res); res->is_committed = true; get_date(date, STRLEN); get_server(server, STRLEN); Socket_print(S, "%s %d %s\r\n", res->protocol, res->status, res->status_msg); Socket_print(S, "Date: %s\r\n", date); Socket_print(S, "Server: %s\r\n", server); Socket_print(S, "Content-Length: %zu\r\n", bodyLength); Socket_print(S, "Connection: close\r\n"); if (headers) Socket_print(S, "%s", headers); Socket_print(S, "\r\n"); if (bodyLength) Socket_write(S, (unsigned char *)body, bodyLength); FREE(headers); } } /* --------------------------------------------------------------- Factories */ /** * Returns a new HttpRequest object wrapping the client request */ static HttpRequest create_HttpRequest(Socket_T S) { char line[REQ_STRLEN]; if (Socket_readLine(S, line, sizeof(line)) == NULL) { internal_error(S, SC_BAD_REQUEST, "No request found"); return NULL; } Str_chomp(line); char method[STRLEN]; char url[REQ_STRLEN]; char protocol[STRLEN]; if (sscanf(line, "%255s %1023s HTTP/%3[1.0]", method, url, protocol) != 3) { internal_error(S, SC_BAD_REQUEST, "Cannot parse request"); return NULL; } if (strlen(url) >= MAX_URL_LENGTH) { internal_error(S, SC_BAD_REQUEST, "[error] URL too long"); return NULL; } HttpRequest req = NULL; NEW(req); req->S = S; Util_urlDecode(url); req->url = Str_dup(url); req->method = Str_dup(method); req->protocol = Str_dup(protocol); create_headers(req); if (! create_parameters(req)) { destroy_HttpRequest(req); internal_error(S, SC_BAD_REQUEST, "Cannot parse Request parameters"); return NULL; } return req; } /** * Returns a new HttpResponse object wrapping a default response. Use * the set_XXX methods to change the object. */ static HttpResponse create_HttpResponse(Socket_T S) { HttpResponse res = NULL; NEW(res); res->S = S; res->status = SC_OK; res->outputbuffer = StringBuffer_create(256); res->is_committed = false; res->protocol = SERVER_PROTOCOL; res->status_msg = get_status_string(SC_OK); Util_getToken(res->token); return res; } /** * Create HTTP headers for the given request */ static void create_headers(HttpRequest req) { char line[REQ_STRLEN] = {0}; while (Socket_readLine(req->S, line, sizeof(line)) && ! (Str_isEqual(line, "\r\n") || Str_isEqual(line, "\n"))) { char *value = strchr(line, ':'); if (value) { HttpHeader header = NULL; NEW(header); *value++ = 0; Str_trim(line); Str_trim(value); Str_chomp(value); header->name = Str_dup(line); header->value = Str_dup(value); header->next = req->headers; req->headers = header; } } } /** * Create parameters for the given request. Returns false if an error * occurs. */ static boolean_t create_parameters(HttpRequest req) { char *query_string = NULL; if (IS(req->method, METHOD_POST)) { int len; const char *content_length = get_header(req, "Content-Length"); if (! content_length || sscanf(content_length, "%d", &len) != 1 || len < 0 || len > _httpPostLimit) return false; if (len != 0) { query_string = CALLOC(1, _httpPostLimit + 1); int n = Socket_read(req->S, query_string, len); if (n != len) { FREE(query_string); return false; } } } else if (IS(req->method, METHOD_GET)) { char *p = strchr(req->url, '?'); if (p) { *p++ = 0; query_string = Str_dup(p); } } if (query_string) { if (*query_string) { char *p = strchr(query_string, '/'); if (p) { *p++ = 0; req->pathinfo = Str_dup(p); } req->params = parse_parameters(query_string); } FREE(query_string); } return true; } /* ----------------------------------------------------------------- Cleanup */ /** * Clear the response output buffer and headers */ static void reset_response(HttpResponse res) { if (res->headers) { destroy_entry(res->headers); res->headers = NULL; /* Release Pragma */ } StringBuffer_clear(res->outputbuffer); } /** * Finalize the request and response object. */ static void done(HttpRequest req, HttpResponse res) { destroy_HttpRequest(req); destroy_HttpResponse(res); } /** * Free a HttpRequest object */ static void destroy_HttpRequest(HttpRequest req) { if (req) { FREE(req->method); FREE(req->url); FREE(req->pathinfo); FREE(req->protocol); FREE(req->remote_user); if (req->headers) destroy_entry(req->headers); if (req->params) destroy_entry(req->params); FREE(req); } } /** * Free a HttpResponse object */ static void destroy_HttpResponse(HttpResponse res) { if (res) { StringBuffer_free(&(res->outputbuffer)); if (res->headers) destroy_entry(res->headers); FREE(res); } } /** * Free a (linked list of) http entry object(s). Both HttpHeader and * HttpParameter are of this type. */ static void destroy_entry(void *p) { struct entry *h = p; if (h->next) destroy_entry(h->next); FREE(h->name); FREE(h->value); FREE(h); } /* ----------------------------------------------------- Checkers/Validators */ /** * Do Basic Authentication if this auth. style is allowed. */ static boolean_t is_authenticated(HttpRequest req, HttpResponse res) { if (Run.httpd.credentials) { if (! basic_authenticate(req)) { // Send just generic error message to the client to not disclose e.g. username existence in case of credentials harvesting attack send_error(req, res, SC_UNAUTHORIZED, "You are not authorized to access monit. Either you supplied the wrong credentials (e.g. bad password), or your browser doesn't understand how to supply the credentials required"); set_header(res, "WWW-Authenticate", "Basic realm=\"monit\""); return false; } } if (IS(req->method, METHOD_POST)) { // Check CSRF double-submit cookie (https://www.owasp.org/index.php/Cross-Site_Request_Forgery_(CSRF)_Prevention_Cheat_Sheet#Double_Submit_Cookie) const char *cookie = get_header(req, "Cookie"); const char *token = get_parameter(req, "securitytoken"); if (! cookie) { LogError("HttpRequest: access denied -- client [%s]: missing CSRF token cookie\n", NVLSTR(Socket_getRemoteHost(req->S))); send_error(req, res, SC_FORBIDDEN, "Invalid CSRF Token"); return false; } if (! token) { LogError("HttpRequest: access denied -- client [%s]: missing CSRF token in HTTP parameter\n", NVLSTR(Socket_getRemoteHost(req->S))); send_error(req, res, SC_FORBIDDEN, "Invalid CSRF Token"); return false; } if (! Str_startsWith(cookie, "securitytoken=")) { LogError("HttpRequest: access denied -- client [%s]: no CSRF token in cookie\n", NVLSTR(Socket_getRemoteHost(req->S))); send_error(req, res, SC_FORBIDDEN, "Invalid CSRF Token"); return false; } if (Str_compareConstantTime(cookie + 14, token)) { LogError("HttpRequest: access denied -- client [%s]: CSRF token mismatch\n", NVLSTR(Socket_getRemoteHost(req->S))); send_error(req, res, SC_FORBIDDEN, "Invalid CSRF Token"); return false; } } return true; } /** * Authenticate the basic-credentials (uname/password) submitted by * the user. */ static boolean_t basic_authenticate(HttpRequest req) { const char *credentials = get_header(req, "Authorization"); if (! (credentials && Str_startsWith(credentials, "Basic "))) { LogError("HttpRequest: access denied -- client [%s]: missing or invalid Authorization header\n", NVLSTR(Socket_getRemoteHost(req->S))); return false; } char buf[STRLEN] = {0}; strncpy(buf, &credentials[6], sizeof(buf) - 1); char uname[STRLEN] = {0}; if (decode_base64((unsigned char *)uname, buf) <= 0) { LogError("HttpRequest: access denied -- client [%s]: invalid Authorization header\n", NVLSTR(Socket_getRemoteHost(req->S))); return false; } if (! *uname) { LogError("HttpRequest: access denied -- client [%s]: empty username\n", NVLSTR(Socket_getRemoteHost(req->S))); return false; } char *password = password = strchr(uname, ':'); if (! password || ! *password) { LogError("HttpRequest: access denied -- client [%s]: empty password\n", NVLSTR(Socket_getRemoteHost(req->S))); return false; } *password++ = 0; /* Check if user exist */ if (! Util_getUserCredentials(uname)) { LogError("HttpRequest: access denied -- client [%s]: unknown user '%s'\n", NVLSTR(Socket_getRemoteHost(req->S)), uname); return false; } /* Check if user has supplied the right password */ if (! Util_checkCredentials(uname, password)) { LogError("HttpRequest: access denied -- client [%s]: wrong password for user '%s'\n", NVLSTR(Socket_getRemoteHost(req->S)), uname); return false; } req->remote_user = Str_dup(uname); return true; } /* --------------------------------------------------------------- Utilities */ /** * Send an error message to the client. This is a helper function, * used internal if the service function fails to setup the framework * properly; i.e. with a valid HttpRequest and a valid HttpResponse. */ static void internal_error(Socket_T S, int status, char *msg) { char date[STRLEN]; char server[STRLEN]; const char *status_msg = get_status_string(status); get_date(date, STRLEN); get_server(server, STRLEN); Socket_print(S, "%s %d %s\r\n" "Date: %s\r\n" "Server: %s\r\n" "Content-Type: text/html\r\n" "Connection: close\r\n" "\r\n" "<html><head><title>%s</title></head>" "<body bgcolor=#FFFFFF><h2>%s</h2>%s<p>" "<hr><a href='%s'><font size=-1>%s</font></a>" "</body></html>\r\n", SERVER_PROTOCOL, status, status_msg, date, server, status_msg, status_msg, msg, SERVER_URL, server); DEBUG("HttpRequest: error -- client [%s]: %s %d %s\n", NVLSTR(Socket_getRemoteHost(S)), SERVER_PROTOCOL, status, msg ? msg : status_msg); } /** * Parse request parameters from the given query string and return a * linked list of HttpParameters */ static HttpParameter parse_parameters(char *query_string) { #define KEY 1 #define VALUE 2 int token; int cursor = 0; char *key = NULL; char *value = NULL; HttpParameter head = NULL; while ((token = get_next_token(query_string, &cursor, &value))) { if (token == KEY) key = value; else if (token == VALUE) { HttpParameter p = NULL; if (! key) goto error; NEW(p); p->name = key; p->value = value; p->next = head; head = p; key = NULL; } } return head; error: FREE(key); FREE(value); if ( head != NULL ) destroy_entry(head); return NULL; } /** * A mini-scanner for tokenizing a query string */ static int get_next_token(char *s, int *cursor, char **r) { int i = *cursor; while (s[*cursor]) { if (s[*cursor+1] == '=') { *cursor += 1; *r = Str_ndup(&s[i], (*cursor-i)); return KEY; } if (s[*cursor] == '=') { while (s[*cursor] && s[*cursor] != '&') *cursor += 1; if (s[*cursor] == '&') { *r = Str_ndup(&s[i+1], (*cursor-i)-1); *cursor += 1; } else { *r = Str_ndup(&s[i+1], (*cursor-i)); } return VALUE; } *cursor += 1; } return 0; }
void set_content_type(HttpResponse res, const char *mime) { set_header(res, "Content-Type", mime); }
void set_content_type(HttpResponse res, const char *mime) { set_header(res, "Content-Type", "%s", mime); }
{'added': [(244, 'void set_header(HttpResponse res, const char *name, const char *value, ...) {'), (252, ' va_list ap;'), (253, ' va_start(ap, value);'), (254, ' h->value = Str_vcat(value, ap);'), (255, ' va_end(ap);'), (291, ' set_header(res, "Content-Type", "%s", mime);'), (448, ' set_header(res, "Set-Cookie", "securitytoken=%s; Max-Age=600; HttpOnly; SameSite=strict%s", res->token, Run.httpd.flags & Httpd_Ssl ? "; Secure" : "");'), (581, ' Util_getToken(res->token);'), (735, ' if (IS(req->method, METHOD_POST)) {'), (736, ' // Check CSRF double-submit cookie (https://www.owasp.org/index.php/Cross-Site_Request_Forgery_(CSRF)_Prevention_Cheat_Sheet#Double_Submit_Cookie)'), (737, ' const char *cookie = get_header(req, "Cookie");'), (738, ' const char *token = get_parameter(req, "securitytoken");'), (739, ' if (! cookie) {'), (740, ' LogError("HttpRequest: access denied -- client [%s]: missing CSRF token cookie\\n", NVLSTR(Socket_getRemoteHost(req->S)));'), (741, ' send_error(req, res, SC_FORBIDDEN, "Invalid CSRF Token");'), (742, ' return false;'), (743, ' }'), (744, ' if (! token) {'), (745, ' LogError("HttpRequest: access denied -- client [%s]: missing CSRF token in HTTP parameter\\n", NVLSTR(Socket_getRemoteHost(req->S)));'), (746, ' send_error(req, res, SC_FORBIDDEN, "Invalid CSRF Token");'), (747, ' return false;'), (748, ' }'), (749, ' if (! Str_startsWith(cookie, "securitytoken=")) {'), (750, ' LogError("HttpRequest: access denied -- client [%s]: no CSRF token in cookie\\n", NVLSTR(Socket_getRemoteHost(req->S)));'), (751, ' send_error(req, res, SC_FORBIDDEN, "Invalid CSRF Token");'), (752, ' return false;'), (753, ' }'), (754, ' if (Str_compareConstantTime(cookie + 14, token)) {'), (755, ' LogError("HttpRequest: access denied -- client [%s]: CSRF token mismatch\\n", NVLSTR(Socket_getRemoteHost(req->S)));'), (756, ' send_error(req, res, SC_FORBIDDEN, "Invalid CSRF Token");'), (757, ' return false;'), (758, ' }'), (759, ' }')], 'deleted': [(244, 'void set_header(HttpResponse res, const char *name, const char *value) {'), (252, ' h->value = Str_dup(value);'), (288, ' set_header(res, "Content-Type", mime);')]}
33
3
581
3,625
https://bitbucket.org/tildeslash/monit
CVE-2016-7067
['CWE-352']
processor.c
set_header
/* * Copyright (C) Tildeslash Ltd. All rights reserved. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License version 3. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * In addition, as a special exception, the copyright holders give * permission to link the code of portions of this program with the * OpenSSL library under certain conditions as described in each * individual source file, and distribute linked combinations * including the two. * * You must obey the GNU Affero General Public License in all respects * for all of the code used other than OpenSSL. */ #include "config.h" #ifdef HAVE_STDIO_H #include <stdio.h> #endif #ifdef HAVE_STDLIB_H #include <stdlib.h> #endif #ifdef HAVE_ERRNO_H #include <errno.h> #endif #ifdef HAVE_STDARG_H #include <stdarg.h> #endif #ifdef HAVE_SYS_TYPES_H #include <sys/types.h> #endif #ifdef HAVE_SYS_SOCKET_H #include <sys/socket.h> #endif #ifdef HAVE_SETJMP_H #include <setjmp.h> #endif #ifdef HAVE_STRING_H #include <string.h> #endif #ifdef HAVE_STRINGS_H #include <strings.h> #endif #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #ifdef HAVE_LIMITS_H #include <limits.h> #endif #include "monit.h" #include "processor.h" #include "base64.h" // libmonit #include "util/Str.h" #include "system/Net.h" /** * A naive quasi HTTP Processor module that can handle HTTP requests * received from a client, and return responses based on those * requests. * * This Processor delegates the actual handling of the request and * reponse to so called cervlets, which must implement two methods; * doGet and doPost. * * NOTES * This Processor is command oriented and if a second slash '/' is * found in the URL it's asumed to be the PATHINFO. In other words * this processor perceive an URL as: * * /COMMAND?QUERYSTRING/PATHINFO * * The doGet/doPost routines act's on the COMMAND. See the * cervlet.c code in this dir. for an example. * * @file */ static int _httpPostLimit; /* -------------------------------------------------------------- Prototypes */ static void do_service(Socket_T); static void destroy_entry(void *); static char *get_date(char *, int); static char *get_server(char *, int); static void create_headers(HttpRequest); static void send_response(HttpRequest, HttpResponse); static boolean_t basic_authenticate(HttpRequest); static void done(HttpRequest, HttpResponse); static void destroy_HttpRequest(HttpRequest); static void reset_response(HttpResponse res); static HttpParameter parse_parameters(char *); static boolean_t create_parameters(HttpRequest req); static void destroy_HttpResponse(HttpResponse); static HttpRequest create_HttpRequest(Socket_T); static void internal_error(Socket_T, int, char *); static HttpResponse create_HttpResponse(Socket_T); static boolean_t is_authenticated(HttpRequest, HttpResponse); static int get_next_token(char *s, int *cursor, char **r); /* * An object for implementors of the service functions; doGet and * doPost. Implementing modules i.e. CERVLETS, must implement the * doGet and doPost functions and the engine will call the add_Impl * function to setup the callback to these functions. */ struct ServiceImpl { void(*doGet)(HttpRequest, HttpResponse); void(*doPost)(HttpRequest, HttpResponse); } Impl; /* ------------------------------------------------------------------ Public */ /** * Process a HTTP request. This is done by dispatching to the service * function. * @param s A Socket_T representing the client connection */ void *http_processor(Socket_T s) { if (! Net_canRead(Socket_getSocket(s), REQUEST_TIMEOUT * 1000)) internal_error(s, SC_REQUEST_TIMEOUT, "Time out when handling the Request"); else do_service(s); Socket_free(&s); return NULL; } /** * Callback for implementors of cervlet functions. * @param doGetFunc doGet function * @param doPostFunc doPost function */ void add_Impl(void(*doGet)(HttpRequest, HttpResponse), void(*doPost)(HttpRequest, HttpResponse)) { Impl.doGet = doGet; Impl.doPost = doPost; } void Processor_setHttpPostLimit() { // Base buffer size (space for e.g. "action=<name>") _httpPostLimit = STRLEN; // Add space for each service for (Service_T s = servicelist; s; s = s->next) _httpPostLimit += strlen("&service=") + strlen(s->name); } void escapeHTML(StringBuffer_T sb, const char *s) { for (int i = 0; s[i]; i++) { if (s[i] == '<') StringBuffer_append(sb, "&lt;"); else if (s[i] == '>') StringBuffer_append(sb, "&gt;"); else if (s[i] == '&') StringBuffer_append(sb, "&amp;"); else StringBuffer_append(sb, "%c", s[i]); } } /** * Send an error message * @param res HttpResponse object * @param code Error Code to lookup and send * @param msg Optional error message (may be NULL) */ void send_error(HttpRequest req, HttpResponse res, int code, const char *msg, ...) { ASSERT(msg); const char *err = get_status_string(code); reset_response(res); set_content_type(res, "text/html"); set_status(res, code); StringBuffer_append(res->outputbuffer, "<html>" "<head>" "<title>%d %s</title>" "</head>" "<body bgcolor=#FFFFFF>" "<h2>%s</h2>", code, err, err); char *message; va_list ap; va_start(ap, msg); message = Str_vcat(msg, ap); va_end(ap); escapeHTML(res->outputbuffer, message); if (code != SC_UNAUTHORIZED) // We log details in basic_authenticate() already, no need to log generic error sent to client here LogError("HttpRequest: error -- client [%s]: %s %d %s\n", NVLSTR(Socket_getRemoteHost(req->S)), SERVER_PROTOCOL, code, message); FREE(message); char server[STRLEN]; StringBuffer_append(res->outputbuffer, "<hr>" "<a href='%s'><font size=-1>%s</font></a>" "</body>" "</html>" "\r\n", SERVER_URL, get_server(server, STRLEN)); } /* -------------------------------------------------------------- Properties */ /** * Adds a response header with the given name and value. If the header * had already been set the new value overwrites the previous one. * @param res HttpResponse object * @param name Header key name * @param value Header key value */ void set_header(HttpResponse res, const char *name, const char *value) { HttpHeader h = NULL; ASSERT(res); ASSERT(name); NEW(h); h->name = Str_dup(name); h->value = Str_dup(value); if (res->headers) { HttpHeader n, p; for (n = p = res->headers; p; n = p, p = p->next) { if (IS(p->name, name)) { FREE(p->value); p->value = Str_dup(value); destroy_entry(h); return; } } n->next = h; } else { res->headers = h; } } /** * Sets the status code for the response * @param res HttpResponse object * @param code A HTTP status code <100-510> * @param msg The status code string message */ void set_status(HttpResponse res, int code) { res->status = code; res->status_msg = get_status_string(code); } /** * Set the response content-type * @param res HttpResponse object * @param mime Mime content type, e.g. text/html */ void set_content_type(HttpResponse res, const char *mime) { set_header(res, "Content-Type", mime); } /** * Returns the value of the specified header * @param req HttpRequest object * @param name Header name to lookup the value for * @return The value of the specified header, NULL if not found */ const char *get_header(HttpRequest req, const char *name) { for (HttpHeader p = req->headers; p; p = p->next) if (IS(p->name, name)) return (p->value); return NULL; } /** * Returns the value of the specified parameter * @param req HttpRequest object * @param name The request parameter key to lookup the value for * @return The value of the specified parameter, or NULL if not found */ const char *get_parameter(HttpRequest req, const char *name) { for (HttpParameter p = req->params; p; p = p->next) if (IS(p->name, name)) return (p->value); return NULL; } /** * Returns a string containing all (extra) headers found in the * response. The headers are newline separated in the returned * string. * @param res HttpResponse object * @return A String containing all headers set in the Response object */ char *get_headers(HttpResponse res) { char buf[RES_STRLEN]; char *b = buf; *buf = 0; for (HttpHeader p = res->headers; (((b - buf) + STRLEN) < RES_STRLEN) && p; p = p->next) b += snprintf(b, STRLEN,"%s: %s\r\n", p->name, p->value); return buf[0] ? Str_dup(buf) : NULL; } /** * Lookup the corresponding HTTP status string for the given status * code * @param status A HTTP status code * @return A default status message for the specified HTTP status * code. */ const char *get_status_string(int status) { switch (status) { case SC_OK: return "OK"; case SC_ACCEPTED: return "Accepted"; case SC_BAD_GATEWAY: return "Bad Gateway"; case SC_BAD_REQUEST: return "Bad Request"; case SC_CONFLICT: return "Conflict"; case SC_CONTINUE: return "Continue"; case SC_CREATED: return "Created"; case SC_EXPECTATION_FAILED: return "Expectation Failed"; case SC_FORBIDDEN: return "Forbidden"; case SC_GATEWAY_TIMEOUT: return "Gateway Timeout"; case SC_GONE: return "Gone"; case SC_VERSION_NOT_SUPPORTED: return "HTTP Version Not Supported"; case SC_INTERNAL_SERVER_ERROR: return "Internal Server Error"; case SC_LENGTH_REQUIRED: return "Length Required"; case SC_METHOD_NOT_ALLOWED: return "Method Not Allowed"; case SC_MOVED_PERMANENTLY: return "Moved Permanently"; case SC_MOVED_TEMPORARILY: return "Moved Temporarily"; case SC_MULTIPLE_CHOICES: return "Multiple Choices"; case SC_NO_CONTENT: return "No Content"; case SC_NON_AUTHORITATIVE: return "Non-Authoritative Information"; case SC_NOT_ACCEPTABLE: return "Not Acceptable"; case SC_NOT_FOUND: return "Not Found"; case SC_NOT_IMPLEMENTED: return "Not Implemented"; case SC_NOT_MODIFIED: return "Not Modified"; case SC_PARTIAL_CONTENT: return "Partial Content"; case SC_PAYMENT_REQUIRED: return "Payment Required"; case SC_PRECONDITION_FAILED: return "Precondition Failed"; case SC_PROXY_AUTHENTICATION_REQUIRED: return "Proxy Authentication Required"; case SC_REQUEST_ENTITY_TOO_LARGE: return "Request Entity Too Large"; case SC_REQUEST_TIMEOUT: return "Request Timeout"; case SC_REQUEST_URI_TOO_LARGE: return "Request URI Too Large"; case SC_RANGE_NOT_SATISFIABLE: return "Requested Range Not Satisfiable"; case SC_RESET_CONTENT: return "Reset Content"; case SC_SEE_OTHER: return "See Other"; case SC_SERVICE_UNAVAILABLE: return "Service Unavailable"; case SC_SWITCHING_PROTOCOLS: return "Switching Protocols"; case SC_UNAUTHORIZED: return "Unauthorized"; case SC_UNSUPPORTED_MEDIA_TYPE: return "Unsupported Media Type"; case SC_USE_PROXY: return "Use Proxy"; default: { return "Unknown HTTP status"; } } } /* ----------------------------------------------------------------- Private */ /** * Receives standard HTTP requests from a client socket and dispatches * them to the doXXX methods defined in a cervlet module. */ static void do_service(Socket_T s) { volatile HttpResponse res = create_HttpResponse(s); volatile HttpRequest req = create_HttpRequest(s); if (res && req) { if (Run.httpd.flags & Httpd_Ssl) set_header(res, "Strict-Transport-Security", "max-age=63072000; includeSubdomains; preload"); if (is_authenticated(req, res)) { if (IS(req->method, METHOD_GET)) Impl.doGet(req, res); else if (IS(req->method, METHOD_POST)) Impl.doPost(req, res); else send_error(req, res, SC_NOT_IMPLEMENTED, "Method not implemented"); } send_response(req, res); } done(req, res); } /** * Return a (RFC1123) Date string */ static char *get_date(char *result, int size) { time_t now; time(&now); if (strftime(result, size, DATEFMT, gmtime(&now)) <= 0) *result = 0; return result; } /** * Return this server name + version */ static char *get_server(char *result, int size) { snprintf(result, size, "%s %s", SERVER_NAME, Run.httpd.flags & Httpd_Signature ? SERVER_VERSION : ""); return result; } /** * Send the response to the client. If the response has already been * commited, this function does nothing. */ static void send_response(HttpRequest req, HttpResponse res) { Socket_T S = res->S; if (! res->is_committed) { char date[STRLEN]; char server[STRLEN]; #ifdef HAVE_LIBZ const char *acceptEncoding = get_header(req, "Accept-Encoding"); boolean_t canCompress = acceptEncoding && Str_sub(acceptEncoding, "gzip") ? true : false; #else boolean_t canCompress = false; #endif const void *body = NULL; size_t bodyLength = 0; if (canCompress) { body = StringBuffer_toCompressed(res->outputbuffer, 6, &bodyLength); set_header(res, "Content-Encoding", "gzip"); } else { body = StringBuffer_toString(res->outputbuffer); bodyLength = StringBuffer_length(res->outputbuffer); } char *headers = get_headers(res); res->is_committed = true; get_date(date, STRLEN); get_server(server, STRLEN); Socket_print(S, "%s %d %s\r\n", res->protocol, res->status, res->status_msg); Socket_print(S, "Date: %s\r\n", date); Socket_print(S, "Server: %s\r\n", server); Socket_print(S, "Content-Length: %zu\r\n", bodyLength); Socket_print(S, "Connection: close\r\n"); if (headers) Socket_print(S, "%s", headers); Socket_print(S, "\r\n"); if (bodyLength) Socket_write(S, (unsigned char *)body, bodyLength); FREE(headers); } } /* --------------------------------------------------------------- Factories */ /** * Returns a new HttpRequest object wrapping the client request */ static HttpRequest create_HttpRequest(Socket_T S) { char line[REQ_STRLEN]; if (Socket_readLine(S, line, sizeof(line)) == NULL) { internal_error(S, SC_BAD_REQUEST, "No request found"); return NULL; } Str_chomp(line); char method[STRLEN]; char url[REQ_STRLEN]; char protocol[STRLEN]; if (sscanf(line, "%255s %1023s HTTP/%3[1.0]", method, url, protocol) != 3) { internal_error(S, SC_BAD_REQUEST, "Cannot parse request"); return NULL; } if (strlen(url) >= MAX_URL_LENGTH) { internal_error(S, SC_BAD_REQUEST, "[error] URL too long"); return NULL; } HttpRequest req = NULL; NEW(req); req->S = S; Util_urlDecode(url); req->url = Str_dup(url); req->method = Str_dup(method); req->protocol = Str_dup(protocol); create_headers(req); if (! create_parameters(req)) { destroy_HttpRequest(req); internal_error(S, SC_BAD_REQUEST, "Cannot parse Request parameters"); return NULL; } return req; } /** * Returns a new HttpResponse object wrapping a default response. Use * the set_XXX methods to change the object. */ static HttpResponse create_HttpResponse(Socket_T S) { HttpResponse res = NULL; NEW(res); res->S = S; res->status = SC_OK; res->outputbuffer = StringBuffer_create(256); res->is_committed = false; res->protocol = SERVER_PROTOCOL; res->status_msg = get_status_string(SC_OK); return res; } /** * Create HTTP headers for the given request */ static void create_headers(HttpRequest req) { char line[REQ_STRLEN] = {0}; while (Socket_readLine(req->S, line, sizeof(line)) && ! (Str_isEqual(line, "\r\n") || Str_isEqual(line, "\n"))) { char *value = strchr(line, ':'); if (value) { HttpHeader header = NULL; NEW(header); *value++ = 0; Str_trim(line); Str_trim(value); Str_chomp(value); header->name = Str_dup(line); header->value = Str_dup(value); header->next = req->headers; req->headers = header; } } } /** * Create parameters for the given request. Returns false if an error * occurs. */ static boolean_t create_parameters(HttpRequest req) { char *query_string = NULL; if (IS(req->method, METHOD_POST)) { int len; const char *content_length = get_header(req, "Content-Length"); if (! content_length || sscanf(content_length, "%d", &len) != 1 || len < 0 || len > _httpPostLimit) return false; if (len != 0) { query_string = CALLOC(1, _httpPostLimit + 1); int n = Socket_read(req->S, query_string, len); if (n != len) { FREE(query_string); return false; } } } else if (IS(req->method, METHOD_GET)) { char *p = strchr(req->url, '?'); if (p) { *p++ = 0; query_string = Str_dup(p); } } if (query_string) { if (*query_string) { char *p = strchr(query_string, '/'); if (p) { *p++ = 0; req->pathinfo = Str_dup(p); } req->params = parse_parameters(query_string); } FREE(query_string); } return true; } /* ----------------------------------------------------------------- Cleanup */ /** * Clear the response output buffer and headers */ static void reset_response(HttpResponse res) { if (res->headers) { destroy_entry(res->headers); res->headers = NULL; /* Release Pragma */ } StringBuffer_clear(res->outputbuffer); } /** * Finalize the request and response object. */ static void done(HttpRequest req, HttpResponse res) { destroy_HttpRequest(req); destroy_HttpResponse(res); } /** * Free a HttpRequest object */ static void destroy_HttpRequest(HttpRequest req) { if (req) { FREE(req->method); FREE(req->url); FREE(req->pathinfo); FREE(req->protocol); FREE(req->remote_user); if (req->headers) destroy_entry(req->headers); if (req->params) destroy_entry(req->params); FREE(req); } } /** * Free a HttpResponse object */ static void destroy_HttpResponse(HttpResponse res) { if (res) { StringBuffer_free(&(res->outputbuffer)); if (res->headers) destroy_entry(res->headers); FREE(res); } } /** * Free a (linked list of) http entry object(s). Both HttpHeader and * HttpParameter are of this type. */ static void destroy_entry(void *p) { struct entry *h = p; if (h->next) destroy_entry(h->next); FREE(h->name); FREE(h->value); FREE(h); } /* ----------------------------------------------------- Checkers/Validators */ /** * Do Basic Authentication if this auth. style is allowed. */ static boolean_t is_authenticated(HttpRequest req, HttpResponse res) { if (Run.httpd.credentials) { if (! basic_authenticate(req)) { // Send just generic error message to the client to not disclose e.g. username existence in case of credentials harvesting attack send_error(req, res, SC_UNAUTHORIZED, "You are not authorized to access monit. Either you supplied the wrong credentials (e.g. bad password), or your browser doesn't understand how to supply the credentials required"); set_header(res, "WWW-Authenticate", "Basic realm=\"monit\""); return false; } } return true; } /** * Authenticate the basic-credentials (uname/password) submitted by * the user. */ static boolean_t basic_authenticate(HttpRequest req) { const char *credentials = get_header(req, "Authorization"); if (! (credentials && Str_startsWith(credentials, "Basic "))) { LogError("HttpRequest: access denied -- client [%s]: missing or invalid Authorization header\n", NVLSTR(Socket_getRemoteHost(req->S))); return false; } char buf[STRLEN] = {0}; strncpy(buf, &credentials[6], sizeof(buf) - 1); char uname[STRLEN] = {0}; if (decode_base64((unsigned char *)uname, buf) <= 0) { LogError("HttpRequest: access denied -- client [%s]: invalid Authorization header\n", NVLSTR(Socket_getRemoteHost(req->S))); return false; } if (! *uname) { LogError("HttpRequest: access denied -- client [%s]: empty username\n", NVLSTR(Socket_getRemoteHost(req->S))); return false; } char *password = password = strchr(uname, ':'); if (! password || ! *password) { LogError("HttpRequest: access denied -- client [%s]: empty password\n", NVLSTR(Socket_getRemoteHost(req->S))); return false; } *password++ = 0; /* Check if user exist */ if (! Util_getUserCredentials(uname)) { LogError("HttpRequest: access denied -- client [%s]: unknown user '%s'\n", NVLSTR(Socket_getRemoteHost(req->S)), uname); return false; } /* Check if user has supplied the right password */ if (! Util_checkCredentials(uname, password)) { LogError("HttpRequest: access denied -- client [%s]: wrong password for user '%s'\n", NVLSTR(Socket_getRemoteHost(req->S)), uname); return false; } req->remote_user = Str_dup(uname); return true; } /* --------------------------------------------------------------- Utilities */ /** * Send an error message to the client. This is a helper function, * used internal if the service function fails to setup the framework * properly; i.e. with a valid HttpRequest and a valid HttpResponse. */ static void internal_error(Socket_T S, int status, char *msg) { char date[STRLEN]; char server[STRLEN]; const char *status_msg = get_status_string(status); get_date(date, STRLEN); get_server(server, STRLEN); Socket_print(S, "%s %d %s\r\n" "Date: %s\r\n" "Server: %s\r\n" "Content-Type: text/html\r\n" "Connection: close\r\n" "\r\n" "<html><head><title>%s</title></head>" "<body bgcolor=#FFFFFF><h2>%s</h2>%s<p>" "<hr><a href='%s'><font size=-1>%s</font></a>" "</body></html>\r\n", SERVER_PROTOCOL, status, status_msg, date, server, status_msg, status_msg, msg, SERVER_URL, server); DEBUG("HttpRequest: error -- client [%s]: %s %d %s\n", NVLSTR(Socket_getRemoteHost(S)), SERVER_PROTOCOL, status, msg ? msg : status_msg); } /** * Parse request parameters from the given query string and return a * linked list of HttpParameters */ static HttpParameter parse_parameters(char *query_string) { #define KEY 1 #define VALUE 2 int token; int cursor = 0; char *key = NULL; char *value = NULL; HttpParameter head = NULL; while ((token = get_next_token(query_string, &cursor, &value))) { if (token == KEY) key = value; else if (token == VALUE) { HttpParameter p = NULL; if (! key) goto error; NEW(p); p->name = key; p->value = value; p->next = head; head = p; key = NULL; } } return head; error: FREE(key); FREE(value); if ( head != NULL ) destroy_entry(head); return NULL; } /** * A mini-scanner for tokenizing a query string */ static int get_next_token(char *s, int *cursor, char **r) { int i = *cursor; while (s[*cursor]) { if (s[*cursor+1] == '=') { *cursor += 1; *r = Str_ndup(&s[i], (*cursor-i)); return KEY; } if (s[*cursor] == '=') { while (s[*cursor] && s[*cursor] != '&') *cursor += 1; if (s[*cursor] == '&') { *r = Str_ndup(&s[i+1], (*cursor-i)-1); *cursor += 1; } else { *r = Str_ndup(&s[i+1], (*cursor-i)); } return VALUE; } *cursor += 1; } return 0; }
/* * Copyright (C) Tildeslash Ltd. All rights reserved. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License version 3. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * In addition, as a special exception, the copyright holders give * permission to link the code of portions of this program with the * OpenSSL library under certain conditions as described in each * individual source file, and distribute linked combinations * including the two. * * You must obey the GNU Affero General Public License in all respects * for all of the code used other than OpenSSL. */ #include "config.h" #ifdef HAVE_STDIO_H #include <stdio.h> #endif #ifdef HAVE_STDLIB_H #include <stdlib.h> #endif #ifdef HAVE_ERRNO_H #include <errno.h> #endif #ifdef HAVE_STDARG_H #include <stdarg.h> #endif #ifdef HAVE_SYS_TYPES_H #include <sys/types.h> #endif #ifdef HAVE_SYS_SOCKET_H #include <sys/socket.h> #endif #ifdef HAVE_SETJMP_H #include <setjmp.h> #endif #ifdef HAVE_STRING_H #include <string.h> #endif #ifdef HAVE_STRINGS_H #include <strings.h> #endif #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #ifdef HAVE_LIMITS_H #include <limits.h> #endif #include "monit.h" #include "processor.h" #include "base64.h" // libmonit #include "util/Str.h" #include "system/Net.h" /** * A naive quasi HTTP Processor module that can handle HTTP requests * received from a client, and return responses based on those * requests. * * This Processor delegates the actual handling of the request and * reponse to so called cervlets, which must implement two methods; * doGet and doPost. * * NOTES * This Processor is command oriented and if a second slash '/' is * found in the URL it's asumed to be the PATHINFO. In other words * this processor perceive an URL as: * * /COMMAND?QUERYSTRING/PATHINFO * * The doGet/doPost routines act's on the COMMAND. See the * cervlet.c code in this dir. for an example. * * @file */ static int _httpPostLimit; /* -------------------------------------------------------------- Prototypes */ static void do_service(Socket_T); static void destroy_entry(void *); static char *get_date(char *, int); static char *get_server(char *, int); static void create_headers(HttpRequest); static void send_response(HttpRequest, HttpResponse); static boolean_t basic_authenticate(HttpRequest); static void done(HttpRequest, HttpResponse); static void destroy_HttpRequest(HttpRequest); static void reset_response(HttpResponse res); static HttpParameter parse_parameters(char *); static boolean_t create_parameters(HttpRequest req); static void destroy_HttpResponse(HttpResponse); static HttpRequest create_HttpRequest(Socket_T); static void internal_error(Socket_T, int, char *); static HttpResponse create_HttpResponse(Socket_T); static boolean_t is_authenticated(HttpRequest, HttpResponse); static int get_next_token(char *s, int *cursor, char **r); /* * An object for implementors of the service functions; doGet and * doPost. Implementing modules i.e. CERVLETS, must implement the * doGet and doPost functions and the engine will call the add_Impl * function to setup the callback to these functions. */ struct ServiceImpl { void(*doGet)(HttpRequest, HttpResponse); void(*doPost)(HttpRequest, HttpResponse); } Impl; /* ------------------------------------------------------------------ Public */ /** * Process a HTTP request. This is done by dispatching to the service * function. * @param s A Socket_T representing the client connection */ void *http_processor(Socket_T s) { if (! Net_canRead(Socket_getSocket(s), REQUEST_TIMEOUT * 1000)) internal_error(s, SC_REQUEST_TIMEOUT, "Time out when handling the Request"); else do_service(s); Socket_free(&s); return NULL; } /** * Callback for implementors of cervlet functions. * @param doGetFunc doGet function * @param doPostFunc doPost function */ void add_Impl(void(*doGet)(HttpRequest, HttpResponse), void(*doPost)(HttpRequest, HttpResponse)) { Impl.doGet = doGet; Impl.doPost = doPost; } void Processor_setHttpPostLimit() { // Base buffer size (space for e.g. "action=<name>") _httpPostLimit = STRLEN; // Add space for each service for (Service_T s = servicelist; s; s = s->next) _httpPostLimit += strlen("&service=") + strlen(s->name); } void escapeHTML(StringBuffer_T sb, const char *s) { for (int i = 0; s[i]; i++) { if (s[i] == '<') StringBuffer_append(sb, "&lt;"); else if (s[i] == '>') StringBuffer_append(sb, "&gt;"); else if (s[i] == '&') StringBuffer_append(sb, "&amp;"); else StringBuffer_append(sb, "%c", s[i]); } } /** * Send an error message * @param res HttpResponse object * @param code Error Code to lookup and send * @param msg Optional error message (may be NULL) */ void send_error(HttpRequest req, HttpResponse res, int code, const char *msg, ...) { ASSERT(msg); const char *err = get_status_string(code); reset_response(res); set_content_type(res, "text/html"); set_status(res, code); StringBuffer_append(res->outputbuffer, "<html>" "<head>" "<title>%d %s</title>" "</head>" "<body bgcolor=#FFFFFF>" "<h2>%s</h2>", code, err, err); char *message; va_list ap; va_start(ap, msg); message = Str_vcat(msg, ap); va_end(ap); escapeHTML(res->outputbuffer, message); if (code != SC_UNAUTHORIZED) // We log details in basic_authenticate() already, no need to log generic error sent to client here LogError("HttpRequest: error -- client [%s]: %s %d %s\n", NVLSTR(Socket_getRemoteHost(req->S)), SERVER_PROTOCOL, code, message); FREE(message); char server[STRLEN]; StringBuffer_append(res->outputbuffer, "<hr>" "<a href='%s'><font size=-1>%s</font></a>" "</body>" "</html>" "\r\n", SERVER_URL, get_server(server, STRLEN)); } /* -------------------------------------------------------------- Properties */ /** * Adds a response header with the given name and value. If the header * had already been set the new value overwrites the previous one. * @param res HttpResponse object * @param name Header key name * @param value Header key value */ void set_header(HttpResponse res, const char *name, const char *value, ...) { HttpHeader h = NULL; ASSERT(res); ASSERT(name); NEW(h); h->name = Str_dup(name); va_list ap; va_start(ap, value); h->value = Str_vcat(value, ap); va_end(ap); if (res->headers) { HttpHeader n, p; for (n = p = res->headers; p; n = p, p = p->next) { if (IS(p->name, name)) { FREE(p->value); p->value = Str_dup(value); destroy_entry(h); return; } } n->next = h; } else { res->headers = h; } } /** * Sets the status code for the response * @param res HttpResponse object * @param code A HTTP status code <100-510> * @param msg The status code string message */ void set_status(HttpResponse res, int code) { res->status = code; res->status_msg = get_status_string(code); } /** * Set the response content-type * @param res HttpResponse object * @param mime Mime content type, e.g. text/html */ void set_content_type(HttpResponse res, const char *mime) { set_header(res, "Content-Type", "%s", mime); } /** * Returns the value of the specified header * @param req HttpRequest object * @param name Header name to lookup the value for * @return The value of the specified header, NULL if not found */ const char *get_header(HttpRequest req, const char *name) { for (HttpHeader p = req->headers; p; p = p->next) if (IS(p->name, name)) return (p->value); return NULL; } /** * Returns the value of the specified parameter * @param req HttpRequest object * @param name The request parameter key to lookup the value for * @return The value of the specified parameter, or NULL if not found */ const char *get_parameter(HttpRequest req, const char *name) { for (HttpParameter p = req->params; p; p = p->next) if (IS(p->name, name)) return (p->value); return NULL; } /** * Returns a string containing all (extra) headers found in the * response. The headers are newline separated in the returned * string. * @param res HttpResponse object * @return A String containing all headers set in the Response object */ char *get_headers(HttpResponse res) { char buf[RES_STRLEN]; char *b = buf; *buf = 0; for (HttpHeader p = res->headers; (((b - buf) + STRLEN) < RES_STRLEN) && p; p = p->next) b += snprintf(b, STRLEN,"%s: %s\r\n", p->name, p->value); return buf[0] ? Str_dup(buf) : NULL; } /** * Lookup the corresponding HTTP status string for the given status * code * @param status A HTTP status code * @return A default status message for the specified HTTP status * code. */ const char *get_status_string(int status) { switch (status) { case SC_OK: return "OK"; case SC_ACCEPTED: return "Accepted"; case SC_BAD_GATEWAY: return "Bad Gateway"; case SC_BAD_REQUEST: return "Bad Request"; case SC_CONFLICT: return "Conflict"; case SC_CONTINUE: return "Continue"; case SC_CREATED: return "Created"; case SC_EXPECTATION_FAILED: return "Expectation Failed"; case SC_FORBIDDEN: return "Forbidden"; case SC_GATEWAY_TIMEOUT: return "Gateway Timeout"; case SC_GONE: return "Gone"; case SC_VERSION_NOT_SUPPORTED: return "HTTP Version Not Supported"; case SC_INTERNAL_SERVER_ERROR: return "Internal Server Error"; case SC_LENGTH_REQUIRED: return "Length Required"; case SC_METHOD_NOT_ALLOWED: return "Method Not Allowed"; case SC_MOVED_PERMANENTLY: return "Moved Permanently"; case SC_MOVED_TEMPORARILY: return "Moved Temporarily"; case SC_MULTIPLE_CHOICES: return "Multiple Choices"; case SC_NO_CONTENT: return "No Content"; case SC_NON_AUTHORITATIVE: return "Non-Authoritative Information"; case SC_NOT_ACCEPTABLE: return "Not Acceptable"; case SC_NOT_FOUND: return "Not Found"; case SC_NOT_IMPLEMENTED: return "Not Implemented"; case SC_NOT_MODIFIED: return "Not Modified"; case SC_PARTIAL_CONTENT: return "Partial Content"; case SC_PAYMENT_REQUIRED: return "Payment Required"; case SC_PRECONDITION_FAILED: return "Precondition Failed"; case SC_PROXY_AUTHENTICATION_REQUIRED: return "Proxy Authentication Required"; case SC_REQUEST_ENTITY_TOO_LARGE: return "Request Entity Too Large"; case SC_REQUEST_TIMEOUT: return "Request Timeout"; case SC_REQUEST_URI_TOO_LARGE: return "Request URI Too Large"; case SC_RANGE_NOT_SATISFIABLE: return "Requested Range Not Satisfiable"; case SC_RESET_CONTENT: return "Reset Content"; case SC_SEE_OTHER: return "See Other"; case SC_SERVICE_UNAVAILABLE: return "Service Unavailable"; case SC_SWITCHING_PROTOCOLS: return "Switching Protocols"; case SC_UNAUTHORIZED: return "Unauthorized"; case SC_UNSUPPORTED_MEDIA_TYPE: return "Unsupported Media Type"; case SC_USE_PROXY: return "Use Proxy"; default: { return "Unknown HTTP status"; } } } /* ----------------------------------------------------------------- Private */ /** * Receives standard HTTP requests from a client socket and dispatches * them to the doXXX methods defined in a cervlet module. */ static void do_service(Socket_T s) { volatile HttpResponse res = create_HttpResponse(s); volatile HttpRequest req = create_HttpRequest(s); if (res && req) { if (Run.httpd.flags & Httpd_Ssl) set_header(res, "Strict-Transport-Security", "max-age=63072000; includeSubdomains; preload"); if (is_authenticated(req, res)) { set_header(res, "Set-Cookie", "securitytoken=%s; Max-Age=600; HttpOnly; SameSite=strict%s", res->token, Run.httpd.flags & Httpd_Ssl ? "; Secure" : ""); if (IS(req->method, METHOD_GET)) Impl.doGet(req, res); else if (IS(req->method, METHOD_POST)) Impl.doPost(req, res); else send_error(req, res, SC_NOT_IMPLEMENTED, "Method not implemented"); } send_response(req, res); } done(req, res); } /** * Return a (RFC1123) Date string */ static char *get_date(char *result, int size) { time_t now; time(&now); if (strftime(result, size, DATEFMT, gmtime(&now)) <= 0) *result = 0; return result; } /** * Return this server name + version */ static char *get_server(char *result, int size) { snprintf(result, size, "%s %s", SERVER_NAME, Run.httpd.flags & Httpd_Signature ? SERVER_VERSION : ""); return result; } /** * Send the response to the client. If the response has already been * commited, this function does nothing. */ static void send_response(HttpRequest req, HttpResponse res) { Socket_T S = res->S; if (! res->is_committed) { char date[STRLEN]; char server[STRLEN]; #ifdef HAVE_LIBZ const char *acceptEncoding = get_header(req, "Accept-Encoding"); boolean_t canCompress = acceptEncoding && Str_sub(acceptEncoding, "gzip") ? true : false; #else boolean_t canCompress = false; #endif const void *body = NULL; size_t bodyLength = 0; if (canCompress) { body = StringBuffer_toCompressed(res->outputbuffer, 6, &bodyLength); set_header(res, "Content-Encoding", "gzip"); } else { body = StringBuffer_toString(res->outputbuffer); bodyLength = StringBuffer_length(res->outputbuffer); } char *headers = get_headers(res); res->is_committed = true; get_date(date, STRLEN); get_server(server, STRLEN); Socket_print(S, "%s %d %s\r\n", res->protocol, res->status, res->status_msg); Socket_print(S, "Date: %s\r\n", date); Socket_print(S, "Server: %s\r\n", server); Socket_print(S, "Content-Length: %zu\r\n", bodyLength); Socket_print(S, "Connection: close\r\n"); if (headers) Socket_print(S, "%s", headers); Socket_print(S, "\r\n"); if (bodyLength) Socket_write(S, (unsigned char *)body, bodyLength); FREE(headers); } } /* --------------------------------------------------------------- Factories */ /** * Returns a new HttpRequest object wrapping the client request */ static HttpRequest create_HttpRequest(Socket_T S) { char line[REQ_STRLEN]; if (Socket_readLine(S, line, sizeof(line)) == NULL) { internal_error(S, SC_BAD_REQUEST, "No request found"); return NULL; } Str_chomp(line); char method[STRLEN]; char url[REQ_STRLEN]; char protocol[STRLEN]; if (sscanf(line, "%255s %1023s HTTP/%3[1.0]", method, url, protocol) != 3) { internal_error(S, SC_BAD_REQUEST, "Cannot parse request"); return NULL; } if (strlen(url) >= MAX_URL_LENGTH) { internal_error(S, SC_BAD_REQUEST, "[error] URL too long"); return NULL; } HttpRequest req = NULL; NEW(req); req->S = S; Util_urlDecode(url); req->url = Str_dup(url); req->method = Str_dup(method); req->protocol = Str_dup(protocol); create_headers(req); if (! create_parameters(req)) { destroy_HttpRequest(req); internal_error(S, SC_BAD_REQUEST, "Cannot parse Request parameters"); return NULL; } return req; } /** * Returns a new HttpResponse object wrapping a default response. Use * the set_XXX methods to change the object. */ static HttpResponse create_HttpResponse(Socket_T S) { HttpResponse res = NULL; NEW(res); res->S = S; res->status = SC_OK; res->outputbuffer = StringBuffer_create(256); res->is_committed = false; res->protocol = SERVER_PROTOCOL; res->status_msg = get_status_string(SC_OK); Util_getToken(res->token); return res; } /** * Create HTTP headers for the given request */ static void create_headers(HttpRequest req) { char line[REQ_STRLEN] = {0}; while (Socket_readLine(req->S, line, sizeof(line)) && ! (Str_isEqual(line, "\r\n") || Str_isEqual(line, "\n"))) { char *value = strchr(line, ':'); if (value) { HttpHeader header = NULL; NEW(header); *value++ = 0; Str_trim(line); Str_trim(value); Str_chomp(value); header->name = Str_dup(line); header->value = Str_dup(value); header->next = req->headers; req->headers = header; } } } /** * Create parameters for the given request. Returns false if an error * occurs. */ static boolean_t create_parameters(HttpRequest req) { char *query_string = NULL; if (IS(req->method, METHOD_POST)) { int len; const char *content_length = get_header(req, "Content-Length"); if (! content_length || sscanf(content_length, "%d", &len) != 1 || len < 0 || len > _httpPostLimit) return false; if (len != 0) { query_string = CALLOC(1, _httpPostLimit + 1); int n = Socket_read(req->S, query_string, len); if (n != len) { FREE(query_string); return false; } } } else if (IS(req->method, METHOD_GET)) { char *p = strchr(req->url, '?'); if (p) { *p++ = 0; query_string = Str_dup(p); } } if (query_string) { if (*query_string) { char *p = strchr(query_string, '/'); if (p) { *p++ = 0; req->pathinfo = Str_dup(p); } req->params = parse_parameters(query_string); } FREE(query_string); } return true; } /* ----------------------------------------------------------------- Cleanup */ /** * Clear the response output buffer and headers */ static void reset_response(HttpResponse res) { if (res->headers) { destroy_entry(res->headers); res->headers = NULL; /* Release Pragma */ } StringBuffer_clear(res->outputbuffer); } /** * Finalize the request and response object. */ static void done(HttpRequest req, HttpResponse res) { destroy_HttpRequest(req); destroy_HttpResponse(res); } /** * Free a HttpRequest object */ static void destroy_HttpRequest(HttpRequest req) { if (req) { FREE(req->method); FREE(req->url); FREE(req->pathinfo); FREE(req->protocol); FREE(req->remote_user); if (req->headers) destroy_entry(req->headers); if (req->params) destroy_entry(req->params); FREE(req); } } /** * Free a HttpResponse object */ static void destroy_HttpResponse(HttpResponse res) { if (res) { StringBuffer_free(&(res->outputbuffer)); if (res->headers) destroy_entry(res->headers); FREE(res); } } /** * Free a (linked list of) http entry object(s). Both HttpHeader and * HttpParameter are of this type. */ static void destroy_entry(void *p) { struct entry *h = p; if (h->next) destroy_entry(h->next); FREE(h->name); FREE(h->value); FREE(h); } /* ----------------------------------------------------- Checkers/Validators */ /** * Do Basic Authentication if this auth. style is allowed. */ static boolean_t is_authenticated(HttpRequest req, HttpResponse res) { if (Run.httpd.credentials) { if (! basic_authenticate(req)) { // Send just generic error message to the client to not disclose e.g. username existence in case of credentials harvesting attack send_error(req, res, SC_UNAUTHORIZED, "You are not authorized to access monit. Either you supplied the wrong credentials (e.g. bad password), or your browser doesn't understand how to supply the credentials required"); set_header(res, "WWW-Authenticate", "Basic realm=\"monit\""); return false; } } if (IS(req->method, METHOD_POST)) { // Check CSRF double-submit cookie (https://www.owasp.org/index.php/Cross-Site_Request_Forgery_(CSRF)_Prevention_Cheat_Sheet#Double_Submit_Cookie) const char *cookie = get_header(req, "Cookie"); const char *token = get_parameter(req, "securitytoken"); if (! cookie) { LogError("HttpRequest: access denied -- client [%s]: missing CSRF token cookie\n", NVLSTR(Socket_getRemoteHost(req->S))); send_error(req, res, SC_FORBIDDEN, "Invalid CSRF Token"); return false; } if (! token) { LogError("HttpRequest: access denied -- client [%s]: missing CSRF token in HTTP parameter\n", NVLSTR(Socket_getRemoteHost(req->S))); send_error(req, res, SC_FORBIDDEN, "Invalid CSRF Token"); return false; } if (! Str_startsWith(cookie, "securitytoken=")) { LogError("HttpRequest: access denied -- client [%s]: no CSRF token in cookie\n", NVLSTR(Socket_getRemoteHost(req->S))); send_error(req, res, SC_FORBIDDEN, "Invalid CSRF Token"); return false; } if (Str_compareConstantTime(cookie + 14, token)) { LogError("HttpRequest: access denied -- client [%s]: CSRF token mismatch\n", NVLSTR(Socket_getRemoteHost(req->S))); send_error(req, res, SC_FORBIDDEN, "Invalid CSRF Token"); return false; } } return true; } /** * Authenticate the basic-credentials (uname/password) submitted by * the user. */ static boolean_t basic_authenticate(HttpRequest req) { const char *credentials = get_header(req, "Authorization"); if (! (credentials && Str_startsWith(credentials, "Basic "))) { LogError("HttpRequest: access denied -- client [%s]: missing or invalid Authorization header\n", NVLSTR(Socket_getRemoteHost(req->S))); return false; } char buf[STRLEN] = {0}; strncpy(buf, &credentials[6], sizeof(buf) - 1); char uname[STRLEN] = {0}; if (decode_base64((unsigned char *)uname, buf) <= 0) { LogError("HttpRequest: access denied -- client [%s]: invalid Authorization header\n", NVLSTR(Socket_getRemoteHost(req->S))); return false; } if (! *uname) { LogError("HttpRequest: access denied -- client [%s]: empty username\n", NVLSTR(Socket_getRemoteHost(req->S))); return false; } char *password = password = strchr(uname, ':'); if (! password || ! *password) { LogError("HttpRequest: access denied -- client [%s]: empty password\n", NVLSTR(Socket_getRemoteHost(req->S))); return false; } *password++ = 0; /* Check if user exist */ if (! Util_getUserCredentials(uname)) { LogError("HttpRequest: access denied -- client [%s]: unknown user '%s'\n", NVLSTR(Socket_getRemoteHost(req->S)), uname); return false; } /* Check if user has supplied the right password */ if (! Util_checkCredentials(uname, password)) { LogError("HttpRequest: access denied -- client [%s]: wrong password for user '%s'\n", NVLSTR(Socket_getRemoteHost(req->S)), uname); return false; } req->remote_user = Str_dup(uname); return true; } /* --------------------------------------------------------------- Utilities */ /** * Send an error message to the client. This is a helper function, * used internal if the service function fails to setup the framework * properly; i.e. with a valid HttpRequest and a valid HttpResponse. */ static void internal_error(Socket_T S, int status, char *msg) { char date[STRLEN]; char server[STRLEN]; const char *status_msg = get_status_string(status); get_date(date, STRLEN); get_server(server, STRLEN); Socket_print(S, "%s %d %s\r\n" "Date: %s\r\n" "Server: %s\r\n" "Content-Type: text/html\r\n" "Connection: close\r\n" "\r\n" "<html><head><title>%s</title></head>" "<body bgcolor=#FFFFFF><h2>%s</h2>%s<p>" "<hr><a href='%s'><font size=-1>%s</font></a>" "</body></html>\r\n", SERVER_PROTOCOL, status, status_msg, date, server, status_msg, status_msg, msg, SERVER_URL, server); DEBUG("HttpRequest: error -- client [%s]: %s %d %s\n", NVLSTR(Socket_getRemoteHost(S)), SERVER_PROTOCOL, status, msg ? msg : status_msg); } /** * Parse request parameters from the given query string and return a * linked list of HttpParameters */ static HttpParameter parse_parameters(char *query_string) { #define KEY 1 #define VALUE 2 int token; int cursor = 0; char *key = NULL; char *value = NULL; HttpParameter head = NULL; while ((token = get_next_token(query_string, &cursor, &value))) { if (token == KEY) key = value; else if (token == VALUE) { HttpParameter p = NULL; if (! key) goto error; NEW(p); p->name = key; p->value = value; p->next = head; head = p; key = NULL; } } return head; error: FREE(key); FREE(value); if ( head != NULL ) destroy_entry(head); return NULL; } /** * A mini-scanner for tokenizing a query string */ static int get_next_token(char *s, int *cursor, char **r) { int i = *cursor; while (s[*cursor]) { if (s[*cursor+1] == '=') { *cursor += 1; *r = Str_ndup(&s[i], (*cursor-i)); return KEY; } if (s[*cursor] == '=') { while (s[*cursor] && s[*cursor] != '&') *cursor += 1; if (s[*cursor] == '&') { *r = Str_ndup(&s[i+1], (*cursor-i)-1); *cursor += 1; } else { *r = Str_ndup(&s[i+1], (*cursor-i)); } return VALUE; } *cursor += 1; } return 0; }
void set_header(HttpResponse res, const char *name, const char *value) { HttpHeader h = NULL; ASSERT(res); ASSERT(name); NEW(h); h->name = Str_dup(name); h->value = Str_dup(value); if (res->headers) { HttpHeader n, p; for (n = p = res->headers; p; n = p, p = p->next) { if (IS(p->name, name)) { FREE(p->value); p->value = Str_dup(value); destroy_entry(h); return; } } n->next = h; } else { res->headers = h; } }
void set_header(HttpResponse res, const char *name, const char *value, ...) { HttpHeader h = NULL; ASSERT(res); ASSERT(name); NEW(h); h->name = Str_dup(name); va_list ap; va_start(ap, value); h->value = Str_vcat(value, ap); va_end(ap); if (res->headers) { HttpHeader n, p; for (n = p = res->headers; p; n = p, p = p->next) { if (IS(p->name, name)) { FREE(p->value); p->value = Str_dup(value); destroy_entry(h); return; } } n->next = h; } else { res->headers = h; } }
{'added': [(244, 'void set_header(HttpResponse res, const char *name, const char *value, ...) {'), (252, ' va_list ap;'), (253, ' va_start(ap, value);'), (254, ' h->value = Str_vcat(value, ap);'), (255, ' va_end(ap);'), (291, ' set_header(res, "Content-Type", "%s", mime);'), (448, ' set_header(res, "Set-Cookie", "securitytoken=%s; Max-Age=600; HttpOnly; SameSite=strict%s", res->token, Run.httpd.flags & Httpd_Ssl ? "; Secure" : "");'), (581, ' Util_getToken(res->token);'), (735, ' if (IS(req->method, METHOD_POST)) {'), (736, ' // Check CSRF double-submit cookie (https://www.owasp.org/index.php/Cross-Site_Request_Forgery_(CSRF)_Prevention_Cheat_Sheet#Double_Submit_Cookie)'), (737, ' const char *cookie = get_header(req, "Cookie");'), (738, ' const char *token = get_parameter(req, "securitytoken");'), (739, ' if (! cookie) {'), (740, ' LogError("HttpRequest: access denied -- client [%s]: missing CSRF token cookie\\n", NVLSTR(Socket_getRemoteHost(req->S)));'), (741, ' send_error(req, res, SC_FORBIDDEN, "Invalid CSRF Token");'), (742, ' return false;'), (743, ' }'), (744, ' if (! token) {'), (745, ' LogError("HttpRequest: access denied -- client [%s]: missing CSRF token in HTTP parameter\\n", NVLSTR(Socket_getRemoteHost(req->S)));'), (746, ' send_error(req, res, SC_FORBIDDEN, "Invalid CSRF Token");'), (747, ' return false;'), (748, ' }'), (749, ' if (! Str_startsWith(cookie, "securitytoken=")) {'), (750, ' LogError("HttpRequest: access denied -- client [%s]: no CSRF token in cookie\\n", NVLSTR(Socket_getRemoteHost(req->S)));'), (751, ' send_error(req, res, SC_FORBIDDEN, "Invalid CSRF Token");'), (752, ' return false;'), (753, ' }'), (754, ' if (Str_compareConstantTime(cookie + 14, token)) {'), (755, ' LogError("HttpRequest: access denied -- client [%s]: CSRF token mismatch\\n", NVLSTR(Socket_getRemoteHost(req->S)));'), (756, ' send_error(req, res, SC_FORBIDDEN, "Invalid CSRF Token");'), (757, ' return false;'), (758, ' }'), (759, ' }')], 'deleted': [(244, 'void set_header(HttpResponse res, const char *name, const char *value) {'), (252, ' h->value = Str_dup(value);'), (288, ' set_header(res, "Content-Type", mime);')]}
33
3
581
3,625
https://bitbucket.org/tildeslash/monit
CVE-2016-7067
['CWE-352']
heap_overflow.c
main
/** * Test that the crafted TGA file doesn't trigger OOB reads. */ #include "gd.h" #include "gdtest.h" static size_t read_test_file(char **buffer, char *basename); int main() { gdImagePtr im; char *buffer; size_t size; size = read_test_file(&buffer, "heap_overflow.tga"); im = gdImageCreateFromTgaPtr(size, (void *) buffer); gdTestAssert(im == NULL); free(buffer); return gdNumFailures(); } static size_t read_test_file(char **buffer, char *basename) { char *filename; FILE *fp; size_t exp_size, act_size; filename = gdTestFilePath2("tga", basename); fp = fopen(filename, "rb"); gdTestAssert(fp != NULL); fseek(fp, 0, SEEK_END); exp_size = ftell(fp); fseek(fp, 0, SEEK_SET); *buffer = malloc(exp_size); gdTestAssert(*buffer != NULL); act_size = fread(*buffer, sizeof(**buffer), exp_size, fp); gdTestAssert(act_size == exp_size); fclose(fp); free(filename); return act_size; }
/** * Test that crafted TGA files don't trigger OOB reads. */ #include "gd.h" #include "gdtest.h" static void check_file(char *basename); static size_t read_test_file(char **buffer, char *basename); int main() { check_file("heap_overflow_1.tga"); check_file("heap_overflow_2.tga"); return gdNumFailures(); } static void check_file(char *basename) { gdImagePtr im; char *buffer; size_t size; size = read_test_file(&buffer, basename); im = gdImageCreateFromTgaPtr(size, (void *) buffer); gdTestAssert(im == NULL); free(buffer); } static size_t read_test_file(char **buffer, char *basename) { char *filename; FILE *fp; size_t exp_size, act_size; filename = gdTestFilePath2("tga", basename); fp = fopen(filename, "rb"); gdTestAssert(fp != NULL); fseek(fp, 0, SEEK_END); exp_size = ftell(fp); fseek(fp, 0, SEEK_SET); *buffer = malloc(exp_size); gdTestAssert(*buffer != NULL); act_size = fread(*buffer, sizeof(**buffer), exp_size, fp); gdTestAssert(act_size == exp_size); fclose(fp); free(filename); return act_size; }
int main() { gdImagePtr im; char *buffer; size_t size; size = read_test_file(&buffer, "heap_overflow.tga"); im = gdImageCreateFromTgaPtr(size, (void *) buffer); gdTestAssert(im == NULL); free(buffer); return gdNumFailures(); }
int main() { check_file("heap_overflow_1.tga"); check_file("heap_overflow_2.tga"); return gdNumFailures(); }
{'added': [(2, " * Test that crafted TGA files don't trigger OOB reads."), (10, 'static void check_file(char *basename);'), (15, '{'), (16, ' check_file("heap_overflow_1.tga");'), (17, ' check_file("heap_overflow_2.tga");'), (18, ''), (19, ' return gdNumFailures();'), (20, '}'), (21, ''), (22, ''), (23, 'static void check_file(char *basename)'), (29, ' size = read_test_file(&buffer, basename);')], 'deleted': [(2, " * Test that the crafted TGA file doesn't trigger OOB reads."), (19, ' size = read_test_file(&buffer, "heap_overflow.tga");'), (23, ''), (24, ' return gdNumFailures();')]}
12
4
39
233
https://github.com/libgd/libgd
CVE-2016-6906
['CWE-125']
big_key.c
big_key_init
/* Large capacity key type * * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/init.h> #include <linux/seq_file.h> #include <linux/file.h> #include <linux/shmem_fs.h> #include <linux/err.h> #include <linux/scatterlist.h> #include <keys/user-type.h> #include <keys/big_key-type.h> #include <crypto/rng.h> #include <crypto/skcipher.h> /* * Layout of key payload words. */ enum { big_key_data, big_key_path, big_key_path_2nd_part, big_key_len, }; /* * Crypto operation with big_key data */ enum big_key_op { BIG_KEY_ENC, BIG_KEY_DEC, }; /* * If the data is under this limit, there's no point creating a shm file to * hold it as the permanently resident metadata for the shmem fs will be at * least as large as the data. */ #define BIG_KEY_FILE_THRESHOLD (sizeof(struct inode) + sizeof(struct dentry)) /* * Key size for big_key data encryption */ #define ENC_KEY_SIZE 16 /* * big_key defined keys take an arbitrary string as the description and an * arbitrary blob of data as the payload */ struct key_type key_type_big_key = { .name = "big_key", .preparse = big_key_preparse, .free_preparse = big_key_free_preparse, .instantiate = generic_key_instantiate, .revoke = big_key_revoke, .destroy = big_key_destroy, .describe = big_key_describe, .read = big_key_read, }; /* * Crypto names for big_key data encryption */ static const char big_key_rng_name[] = "stdrng"; static const char big_key_alg_name[] = "ecb(aes)"; /* * Crypto algorithms for big_key data encryption */ static struct crypto_rng *big_key_rng; static struct crypto_skcipher *big_key_skcipher; /* * Generate random key to encrypt big_key data */ static inline int big_key_gen_enckey(u8 *key) { return crypto_rng_get_bytes(big_key_rng, key, ENC_KEY_SIZE); } /* * Encrypt/decrypt big_key data */ static int big_key_crypt(enum big_key_op op, u8 *data, size_t datalen, u8 *key) { int ret = -EINVAL; struct scatterlist sgio; SKCIPHER_REQUEST_ON_STACK(req, big_key_skcipher); if (crypto_skcipher_setkey(big_key_skcipher, key, ENC_KEY_SIZE)) { ret = -EAGAIN; goto error; } skcipher_request_set_tfm(req, big_key_skcipher); skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); sg_init_one(&sgio, data, datalen); skcipher_request_set_crypt(req, &sgio, &sgio, datalen, NULL); if (op == BIG_KEY_ENC) ret = crypto_skcipher_encrypt(req); else ret = crypto_skcipher_decrypt(req); skcipher_request_zero(req); error: return ret; } /* * Preparse a big key */ int big_key_preparse(struct key_preparsed_payload *prep) { struct path *path = (struct path *)&prep->payload.data[big_key_path]; struct file *file; u8 *enckey; u8 *data = NULL; ssize_t written; size_t datalen = prep->datalen; int ret; ret = -EINVAL; if (datalen <= 0 || datalen > 1024 * 1024 || !prep->data) goto error; /* Set an arbitrary quota */ prep->quotalen = 16; prep->payload.data[big_key_len] = (void *)(unsigned long)datalen; if (datalen > BIG_KEY_FILE_THRESHOLD) { /* Create a shmem file to store the data in. This will permit the data * to be swapped out if needed. * * File content is stored encrypted with randomly generated key. */ size_t enclen = ALIGN(datalen, crypto_skcipher_blocksize(big_key_skcipher)); /* prepare aligned data to encrypt */ data = kmalloc(enclen, GFP_KERNEL); if (!data) return -ENOMEM; memcpy(data, prep->data, datalen); memset(data + datalen, 0x00, enclen - datalen); /* generate random key */ enckey = kmalloc(ENC_KEY_SIZE, GFP_KERNEL); if (!enckey) { ret = -ENOMEM; goto error; } ret = big_key_gen_enckey(enckey); if (ret) goto err_enckey; /* encrypt aligned data */ ret = big_key_crypt(BIG_KEY_ENC, data, enclen, enckey); if (ret) goto err_enckey; /* save aligned data to file */ file = shmem_kernel_file_setup("", enclen, 0); if (IS_ERR(file)) { ret = PTR_ERR(file); goto err_enckey; } written = kernel_write(file, data, enclen, 0); if (written != enclen) { ret = written; if (written >= 0) ret = -ENOMEM; goto err_fput; } /* Pin the mount and dentry to the key so that we can open it again * later */ prep->payload.data[big_key_data] = enckey; *path = file->f_path; path_get(path); fput(file); kfree(data); } else { /* Just store the data in a buffer */ void *data = kmalloc(datalen, GFP_KERNEL); if (!data) return -ENOMEM; prep->payload.data[big_key_data] = data; memcpy(data, prep->data, prep->datalen); } return 0; err_fput: fput(file); err_enckey: kfree(enckey); error: kfree(data); return ret; } /* * Clear preparsement. */ void big_key_free_preparse(struct key_preparsed_payload *prep) { if (prep->datalen > BIG_KEY_FILE_THRESHOLD) { struct path *path = (struct path *)&prep->payload.data[big_key_path]; path_put(path); } kfree(prep->payload.data[big_key_data]); } /* * dispose of the links from a revoked keyring * - called with the key sem write-locked */ void big_key_revoke(struct key *key) { struct path *path = (struct path *)&key->payload.data[big_key_path]; /* clear the quota */ key_payload_reserve(key, 0); if (key_is_instantiated(key) && (size_t)key->payload.data[big_key_len] > BIG_KEY_FILE_THRESHOLD) vfs_truncate(path, 0); } /* * dispose of the data dangling from the corpse of a big_key key */ void big_key_destroy(struct key *key) { size_t datalen = (size_t)key->payload.data[big_key_len]; if (datalen > BIG_KEY_FILE_THRESHOLD) { struct path *path = (struct path *)&key->payload.data[big_key_path]; path_put(path); path->mnt = NULL; path->dentry = NULL; } kfree(key->payload.data[big_key_data]); key->payload.data[big_key_data] = NULL; } /* * describe the big_key key */ void big_key_describe(const struct key *key, struct seq_file *m) { size_t datalen = (size_t)key->payload.data[big_key_len]; seq_puts(m, key->description); if (key_is_instantiated(key)) seq_printf(m, ": %zu [%s]", datalen, datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff"); } /* * read the key data * - the key's semaphore is read-locked */ long big_key_read(const struct key *key, char __user *buffer, size_t buflen) { size_t datalen = (size_t)key->payload.data[big_key_len]; long ret; if (!buffer || buflen < datalen) return datalen; if (datalen > BIG_KEY_FILE_THRESHOLD) { struct path *path = (struct path *)&key->payload.data[big_key_path]; struct file *file; u8 *data; u8 *enckey = (u8 *)key->payload.data[big_key_data]; size_t enclen = ALIGN(datalen, crypto_skcipher_blocksize(big_key_skcipher)); data = kmalloc(enclen, GFP_KERNEL); if (!data) return -ENOMEM; file = dentry_open(path, O_RDONLY, current_cred()); if (IS_ERR(file)) { ret = PTR_ERR(file); goto error; } /* read file to kernel and decrypt */ ret = kernel_read(file, 0, data, enclen); if (ret >= 0 && ret != enclen) { ret = -EIO; goto err_fput; } ret = big_key_crypt(BIG_KEY_DEC, data, enclen, enckey); if (ret) goto err_fput; ret = datalen; /* copy decrypted data to user */ if (copy_to_user(buffer, data, datalen) != 0) ret = -EFAULT; err_fput: fput(file); error: kfree(data); } else { ret = datalen; if (copy_to_user(buffer, key->payload.data[big_key_data], datalen) != 0) ret = -EFAULT; } return ret; } /* * Register key type */ static int __init big_key_init(void) { return register_key_type(&key_type_big_key); } /* * Initialize big_key crypto and RNG algorithms */ static int __init big_key_crypto_init(void) { int ret = -EINVAL; /* init RNG */ big_key_rng = crypto_alloc_rng(big_key_rng_name, 0, 0); if (IS_ERR(big_key_rng)) { big_key_rng = NULL; return -EFAULT; } /* seed RNG */ ret = crypto_rng_reset(big_key_rng, NULL, crypto_rng_seedsize(big_key_rng)); if (ret) goto error; /* init block cipher */ big_key_skcipher = crypto_alloc_skcipher(big_key_alg_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(big_key_skcipher)) { big_key_skcipher = NULL; ret = -EFAULT; goto error; } return 0; error: crypto_free_rng(big_key_rng); big_key_rng = NULL; return ret; } device_initcall(big_key_init); late_initcall(big_key_crypto_init);
/* Large capacity key type * * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #define pr_fmt(fmt) "big_key: "fmt #include <linux/init.h> #include <linux/seq_file.h> #include <linux/file.h> #include <linux/shmem_fs.h> #include <linux/err.h> #include <linux/scatterlist.h> #include <keys/user-type.h> #include <keys/big_key-type.h> #include <crypto/rng.h> #include <crypto/skcipher.h> /* * Layout of key payload words. */ enum { big_key_data, big_key_path, big_key_path_2nd_part, big_key_len, }; /* * Crypto operation with big_key data */ enum big_key_op { BIG_KEY_ENC, BIG_KEY_DEC, }; /* * If the data is under this limit, there's no point creating a shm file to * hold it as the permanently resident metadata for the shmem fs will be at * least as large as the data. */ #define BIG_KEY_FILE_THRESHOLD (sizeof(struct inode) + sizeof(struct dentry)) /* * Key size for big_key data encryption */ #define ENC_KEY_SIZE 16 /* * big_key defined keys take an arbitrary string as the description and an * arbitrary blob of data as the payload */ struct key_type key_type_big_key = { .name = "big_key", .preparse = big_key_preparse, .free_preparse = big_key_free_preparse, .instantiate = generic_key_instantiate, .revoke = big_key_revoke, .destroy = big_key_destroy, .describe = big_key_describe, .read = big_key_read, }; /* * Crypto names for big_key data encryption */ static const char big_key_rng_name[] = "stdrng"; static const char big_key_alg_name[] = "ecb(aes)"; /* * Crypto algorithms for big_key data encryption */ static struct crypto_rng *big_key_rng; static struct crypto_skcipher *big_key_skcipher; /* * Generate random key to encrypt big_key data */ static inline int big_key_gen_enckey(u8 *key) { return crypto_rng_get_bytes(big_key_rng, key, ENC_KEY_SIZE); } /* * Encrypt/decrypt big_key data */ static int big_key_crypt(enum big_key_op op, u8 *data, size_t datalen, u8 *key) { int ret = -EINVAL; struct scatterlist sgio; SKCIPHER_REQUEST_ON_STACK(req, big_key_skcipher); if (crypto_skcipher_setkey(big_key_skcipher, key, ENC_KEY_SIZE)) { ret = -EAGAIN; goto error; } skcipher_request_set_tfm(req, big_key_skcipher); skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); sg_init_one(&sgio, data, datalen); skcipher_request_set_crypt(req, &sgio, &sgio, datalen, NULL); if (op == BIG_KEY_ENC) ret = crypto_skcipher_encrypt(req); else ret = crypto_skcipher_decrypt(req); skcipher_request_zero(req); error: return ret; } /* * Preparse a big key */ int big_key_preparse(struct key_preparsed_payload *prep) { struct path *path = (struct path *)&prep->payload.data[big_key_path]; struct file *file; u8 *enckey; u8 *data = NULL; ssize_t written; size_t datalen = prep->datalen; int ret; ret = -EINVAL; if (datalen <= 0 || datalen > 1024 * 1024 || !prep->data) goto error; /* Set an arbitrary quota */ prep->quotalen = 16; prep->payload.data[big_key_len] = (void *)(unsigned long)datalen; if (datalen > BIG_KEY_FILE_THRESHOLD) { /* Create a shmem file to store the data in. This will permit the data * to be swapped out if needed. * * File content is stored encrypted with randomly generated key. */ size_t enclen = ALIGN(datalen, crypto_skcipher_blocksize(big_key_skcipher)); /* prepare aligned data to encrypt */ data = kmalloc(enclen, GFP_KERNEL); if (!data) return -ENOMEM; memcpy(data, prep->data, datalen); memset(data + datalen, 0x00, enclen - datalen); /* generate random key */ enckey = kmalloc(ENC_KEY_SIZE, GFP_KERNEL); if (!enckey) { ret = -ENOMEM; goto error; } ret = big_key_gen_enckey(enckey); if (ret) goto err_enckey; /* encrypt aligned data */ ret = big_key_crypt(BIG_KEY_ENC, data, enclen, enckey); if (ret) goto err_enckey; /* save aligned data to file */ file = shmem_kernel_file_setup("", enclen, 0); if (IS_ERR(file)) { ret = PTR_ERR(file); goto err_enckey; } written = kernel_write(file, data, enclen, 0); if (written != enclen) { ret = written; if (written >= 0) ret = -ENOMEM; goto err_fput; } /* Pin the mount and dentry to the key so that we can open it again * later */ prep->payload.data[big_key_data] = enckey; *path = file->f_path; path_get(path); fput(file); kfree(data); } else { /* Just store the data in a buffer */ void *data = kmalloc(datalen, GFP_KERNEL); if (!data) return -ENOMEM; prep->payload.data[big_key_data] = data; memcpy(data, prep->data, prep->datalen); } return 0; err_fput: fput(file); err_enckey: kfree(enckey); error: kfree(data); return ret; } /* * Clear preparsement. */ void big_key_free_preparse(struct key_preparsed_payload *prep) { if (prep->datalen > BIG_KEY_FILE_THRESHOLD) { struct path *path = (struct path *)&prep->payload.data[big_key_path]; path_put(path); } kfree(prep->payload.data[big_key_data]); } /* * dispose of the links from a revoked keyring * - called with the key sem write-locked */ void big_key_revoke(struct key *key) { struct path *path = (struct path *)&key->payload.data[big_key_path]; /* clear the quota */ key_payload_reserve(key, 0); if (key_is_instantiated(key) && (size_t)key->payload.data[big_key_len] > BIG_KEY_FILE_THRESHOLD) vfs_truncate(path, 0); } /* * dispose of the data dangling from the corpse of a big_key key */ void big_key_destroy(struct key *key) { size_t datalen = (size_t)key->payload.data[big_key_len]; if (datalen > BIG_KEY_FILE_THRESHOLD) { struct path *path = (struct path *)&key->payload.data[big_key_path]; path_put(path); path->mnt = NULL; path->dentry = NULL; } kfree(key->payload.data[big_key_data]); key->payload.data[big_key_data] = NULL; } /* * describe the big_key key */ void big_key_describe(const struct key *key, struct seq_file *m) { size_t datalen = (size_t)key->payload.data[big_key_len]; seq_puts(m, key->description); if (key_is_instantiated(key)) seq_printf(m, ": %zu [%s]", datalen, datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff"); } /* * read the key data * - the key's semaphore is read-locked */ long big_key_read(const struct key *key, char __user *buffer, size_t buflen) { size_t datalen = (size_t)key->payload.data[big_key_len]; long ret; if (!buffer || buflen < datalen) return datalen; if (datalen > BIG_KEY_FILE_THRESHOLD) { struct path *path = (struct path *)&key->payload.data[big_key_path]; struct file *file; u8 *data; u8 *enckey = (u8 *)key->payload.data[big_key_data]; size_t enclen = ALIGN(datalen, crypto_skcipher_blocksize(big_key_skcipher)); data = kmalloc(enclen, GFP_KERNEL); if (!data) return -ENOMEM; file = dentry_open(path, O_RDONLY, current_cred()); if (IS_ERR(file)) { ret = PTR_ERR(file); goto error; } /* read file to kernel and decrypt */ ret = kernel_read(file, 0, data, enclen); if (ret >= 0 && ret != enclen) { ret = -EIO; goto err_fput; } ret = big_key_crypt(BIG_KEY_DEC, data, enclen, enckey); if (ret) goto err_fput; ret = datalen; /* copy decrypted data to user */ if (copy_to_user(buffer, data, datalen) != 0) ret = -EFAULT; err_fput: fput(file); error: kfree(data); } else { ret = datalen; if (copy_to_user(buffer, key->payload.data[big_key_data], datalen) != 0) ret = -EFAULT; } return ret; } /* * Register key type */ static int __init big_key_init(void) { struct crypto_skcipher *cipher; struct crypto_rng *rng; int ret; rng = crypto_alloc_rng(big_key_rng_name, 0, 0); if (IS_ERR(rng)) { pr_err("Can't alloc rng: %ld\n", PTR_ERR(rng)); return PTR_ERR(rng); } big_key_rng = rng; /* seed RNG */ ret = crypto_rng_reset(rng, NULL, crypto_rng_seedsize(rng)); if (ret) { pr_err("Can't reset rng: %d\n", ret); goto error_rng; } /* init block cipher */ cipher = crypto_alloc_skcipher(big_key_alg_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(cipher)) { ret = PTR_ERR(cipher); pr_err("Can't alloc crypto: %d\n", ret); goto error_rng; } big_key_skcipher = cipher; ret = register_key_type(&key_type_big_key); if (ret < 0) { pr_err("Can't register type: %d\n", ret); goto error_cipher; } return 0; error_cipher: crypto_free_skcipher(big_key_skcipher); error_rng: crypto_free_rng(big_key_rng); return ret; } late_initcall(big_key_init);
static int __init big_key_init(void) { return register_key_type(&key_type_big_key); }
static int __init big_key_init(void) { struct crypto_skcipher *cipher; struct crypto_rng *rng; int ret; rng = crypto_alloc_rng(big_key_rng_name, 0, 0); if (IS_ERR(rng)) { pr_err("Can't alloc rng: %ld\n", PTR_ERR(rng)); return PTR_ERR(rng); } big_key_rng = rng; /* seed RNG */ ret = crypto_rng_reset(rng, NULL, crypto_rng_seedsize(rng)); if (ret) { pr_err("Can't reset rng: %d\n", ret); goto error_rng; } /* init block cipher */ cipher = crypto_alloc_skcipher(big_key_alg_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(cipher)) { ret = PTR_ERR(cipher); pr_err("Can't alloc crypto: %d\n", ret); goto error_rng; } big_key_skcipher = cipher; ret = register_key_type(&key_type_big_key); if (ret < 0) { pr_err("Can't register type: %d\n", ret); goto error_cipher; } return 0; error_cipher: crypto_free_skcipher(big_key_skcipher); error_rng: crypto_free_rng(big_key_rng); return ret; }
{'added': [(12, '#define pr_fmt(fmt) "big_key: "fmt'), (345, '\tstruct crypto_skcipher *cipher;'), (346, '\tstruct crypto_rng *rng;'), (347, '\tint ret;'), (349, '\trng = crypto_alloc_rng(big_key_rng_name, 0, 0);'), (350, '\tif (IS_ERR(rng)) {'), (351, '\t\tpr_err("Can\'t alloc rng: %ld\\n", PTR_ERR(rng));'), (352, '\t\treturn PTR_ERR(rng);'), (355, '\tbig_key_rng = rng;'), (356, ''), (358, '\tret = crypto_rng_reset(rng, NULL, crypto_rng_seedsize(rng));'), (359, '\tif (ret) {'), (360, '\t\tpr_err("Can\'t reset rng: %d\\n", ret);'), (361, '\t\tgoto error_rng;'), (362, '\t}'), (365, '\tcipher = crypto_alloc_skcipher(big_key_alg_name, 0, CRYPTO_ALG_ASYNC);'), (366, '\tif (IS_ERR(cipher)) {'), (367, '\t\tret = PTR_ERR(cipher);'), (368, '\t\tpr_err("Can\'t alloc crypto: %d\\n", ret);'), (369, '\t\tgoto error_rng;'), (370, '\t}'), (371, ''), (372, '\tbig_key_skcipher = cipher;'), (373, ''), (374, '\tret = register_key_type(&key_type_big_key);'), (375, '\tif (ret < 0) {'), (376, '\t\tpr_err("Can\'t register type: %d\\n", ret);'), (377, '\t\tgoto error_cipher;'), (382, 'error_cipher:'), (383, '\tcrypto_free_skcipher(big_key_skcipher);'), (384, 'error_rng:'), (389, 'late_initcall(big_key_init);')], 'deleted': [(344, '\treturn register_key_type(&key_type_big_key);'), (345, '}'), (346, ''), (347, '/*'), (348, ' * Initialize big_key crypto and RNG algorithms'), (349, ' */'), (350, 'static int __init big_key_crypto_init(void)'), (351, '{'), (352, '\tint ret = -EINVAL;'), (354, '\t/* init RNG */'), (355, '\tbig_key_rng = crypto_alloc_rng(big_key_rng_name, 0, 0);'), (356, '\tif (IS_ERR(big_key_rng)) {'), (357, '\t\tbig_key_rng = NULL;'), (358, '\t\treturn -EFAULT;'), (362, '\tret = crypto_rng_reset(big_key_rng, NULL, crypto_rng_seedsize(big_key_rng));'), (363, '\tif (ret)'), (364, '\t\tgoto error;'), (367, '\tbig_key_skcipher = crypto_alloc_skcipher(big_key_alg_name,'), (368, '\t\t\t\t\t\t 0, CRYPTO_ALG_ASYNC);'), (369, '\tif (IS_ERR(big_key_skcipher)) {'), (370, '\t\tbig_key_skcipher = NULL;'), (371, '\t\tret = -EFAULT;'), (372, '\t\tgoto error;'), (377, 'error:'), (379, '\tbig_key_rng = NULL;'), (383, 'device_initcall(big_key_init);'), (384, 'late_initcall(big_key_crypto_init);')]}
32
27
241
1,430
https://github.com/torvalds/linux
CVE-2016-9313
['CWE-476']
media.c
Media_CheckDataEntry
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre * Copyright (c) Telecom ParisTech 2000-2019 * All rights reserved * * This file is part of GPAC / ISO Media File Format sub-project * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <gpac/internal/isomedia_dev.h> #include <gpac/constants.h> #include <gpac/avparse.h> #ifndef GPAC_DISABLE_ISOM GF_Err Media_GetSampleDesc(GF_MediaBox *mdia, u32 SampleDescIndex, GF_SampleEntryBox **out_entry, u32 *dataRefIndex) { GF_SampleDescriptionBox *stsd; GF_SampleEntryBox *entry = NULL; if (!mdia) return GF_ISOM_INVALID_FILE; stsd = mdia->information->sampleTable->SampleDescription; if (!stsd) return GF_ISOM_INVALID_FILE; if (!SampleDescIndex || (SampleDescIndex > gf_list_count(stsd->child_boxes)) ) return GF_BAD_PARAM; entry = (GF_SampleEntryBox*)gf_list_get(stsd->child_boxes, SampleDescIndex - 1); if (!entry) return GF_ISOM_INVALID_FILE; if (out_entry) *out_entry = entry; if (dataRefIndex) *dataRefIndex = entry->dataReferenceIndex; return GF_OK; } GF_Err Media_GetSampleDescIndex(GF_MediaBox *mdia, u64 DTS, u32 *sampleDescIndex) { GF_Err e; u32 sampleNumber, prevSampleNumber, num; u64 offset; if (sampleDescIndex == NULL) return GF_BAD_PARAM; //find the sample for this time e = stbl_findEntryForTime(mdia->information->sampleTable, (u32) DTS, 0, &sampleNumber, &prevSampleNumber); if (e) return e; if (!sampleNumber && !prevSampleNumber) { //we have to assume the track was created to be used... If we have a sampleDesc, OK if (gf_list_count(mdia->information->sampleTable->SampleDescription->child_boxes)) { (*sampleDescIndex) = 1; return GF_OK; } return GF_BAD_PARAM; } return stbl_GetSampleInfos(mdia->information->sampleTable, ( sampleNumber ? sampleNumber : prevSampleNumber), &offset, &num, sampleDescIndex, NULL); } static GF_Err gf_isom_get_3gpp_audio_esd(GF_SampleTableBox *stbl, u32 type, GF_GenericAudioSampleEntryBox *entry, GF_ESD **out_esd) { (*out_esd) = gf_odf_desc_esd_new(2); (*out_esd)->decoderConfig->streamType = GF_STREAM_AUDIO; /*official mapping to MPEG-4*/ switch (type) { case GF_ISOM_SUBTYPE_3GP_EVRC: (*out_esd)->decoderConfig->objectTypeIndication = GF_CODECID_EVRC; return GF_OK; case GF_ISOM_SUBTYPE_3GP_QCELP: { u32 block_size, sample_rate, sample_size, i; GF_SttsEntry *ent; GF_BitStream *bs; char szName[80]; /*only map CBR*/ sample_size = stbl->SampleSize->sampleSize; (*out_esd)->decoderConfig->objectTypeIndication = GF_CODECID_QCELP; bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_bs_write_data(bs, "QLCMfmt ", 8); gf_bs_write_u32_le(bs, 150);/*fmt chunk size*/ gf_bs_write_u8(bs, 1); gf_bs_write_u8(bs, 0); /*QCELP GUID*/ gf_bs_write_data(bs, "\x41\x6D\x7F\x5E\x15\xB1\xD0\x11\xBA\x91\x00\x80\x5F\xB4\xB9\x7E", 16); gf_bs_write_u16_le(bs, 1); memset(szName, 0, 80); strcpy(szName, "QCELP-13K(GPAC-emulated)"); gf_bs_write_data(bs, szName, 80); ent = &stbl->TimeToSample->entries[0]; sample_rate = entry->samplerate_hi; block_size = ent ? ent->sampleDelta : 160; gf_bs_write_u16_le(bs, 8*sample_size*sample_rate/block_size); gf_bs_write_u16_le(bs, sample_size); gf_bs_write_u16_le(bs, block_size); gf_bs_write_u16_le(bs, sample_rate); gf_bs_write_u16_le(bs, entry->bitspersample); gf_bs_write_u32_le(bs, sample_size ? 0 : 7); /**/ for (i=0; i<7; i++) { static const u32 qcelp_r2s [] = {0, 1, 1, 4, 2, 8, 3, 17, 4, 35, 5, 8, 14, 1}; if (sample_size) { gf_bs_write_u16(bs, 0); } else { gf_bs_write_u8(bs, qcelp_r2s[2*i+1]); gf_bs_write_u8(bs, qcelp_r2s[2*i]); } } gf_bs_write_u16(bs, 0); memset(szName, 0, 80); gf_bs_write_data(bs, szName, 20);/*reserved*/ gf_bs_get_content(bs, & (*out_esd)->decoderConfig->decoderSpecificInfo->data, & (*out_esd)->decoderConfig->decoderSpecificInfo->dataLength); gf_bs_del(bs); } return GF_OK; case GF_ISOM_SUBTYPE_3GP_SMV: (*out_esd)->decoderConfig->objectTypeIndication = GF_CODECID_SMV; return GF_OK; case GF_ISOM_SUBTYPE_3GP_AMR: (*out_esd)->decoderConfig->objectTypeIndication = GF_CODECID_AMR; return GF_OK; case GF_ISOM_SUBTYPE_3GP_AMR_WB: (*out_esd)->decoderConfig->objectTypeIndication = GF_CODECID_AMR_WB; return GF_OK; default: GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] unsupported sample description type %s\n", gf_4cc_to_str(entry->type))); break; } return GF_OK; } GF_Err Media_GetESD(GF_MediaBox *mdia, u32 sampleDescIndex, GF_ESD **out_esd, Bool true_desc_only) { u32 type; GF_ESD *esd; GF_MPEGSampleEntryBox *entry = NULL; GF_ESDBox *ESDa; GF_ProtectionSchemeInfoBox *sinf; GF_SampleDescriptionBox *stsd = mdia->information->sampleTable->SampleDescription; *out_esd = NULL; if (!stsd || !stsd->child_boxes || !sampleDescIndex || (sampleDescIndex > gf_list_count(stsd->child_boxes)) ) return GF_BAD_PARAM; esd = NULL; entry = (GF_MPEGSampleEntryBox*)gf_list_get(stsd->child_boxes, sampleDescIndex - 1); if (! entry) return GF_ISOM_INVALID_MEDIA; *out_esd = NULL; ESDa = NULL; type = entry->type; switch (type) { case GF_ISOM_BOX_TYPE_ENCV: case GF_ISOM_BOX_TYPE_ENCA: case GF_ISOM_BOX_TYPE_ENCS: case GF_ISOM_BOX_TYPE_ENCF: case GF_ISOM_BOX_TYPE_ENCM: case GF_ISOM_BOX_TYPE_ENCT: sinf = (GF_ProtectionSchemeInfoBox *) gf_isom_box_find_child(entry->child_boxes, GF_ISOM_BOX_TYPE_SINF); if (sinf && sinf->original_format) { type = sinf->original_format->data_format; } break; case GF_ISOM_BOX_TYPE_RESV: sinf = (GF_ProtectionSchemeInfoBox *) gf_isom_box_find_child(entry->child_boxes, GF_ISOM_BOX_TYPE_RINF); if (sinf && sinf->original_format) { type = sinf->original_format->data_format; } break; } switch (type) { case GF_ISOM_BOX_TYPE_MP4V: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_VIDEO) return GF_ISOM_INVALID_MEDIA; ESDa = ((GF_MPEGVisualSampleEntryBox*)entry)->esd; if (ESDa) esd = (GF_ESD *) ESDa->desc; /*avc1 encrypted*/ else esd = ((GF_MPEGVisualSampleEntryBox*) entry)->emul_esd; break; case GF_ISOM_BOX_TYPE_AVC1: case GF_ISOM_BOX_TYPE_AVC2: case GF_ISOM_BOX_TYPE_AVC3: case GF_ISOM_BOX_TYPE_AVC4: case GF_ISOM_BOX_TYPE_HVC1: case GF_ISOM_BOX_TYPE_HEV1: case GF_ISOM_BOX_TYPE_HVC2: case GF_ISOM_BOX_TYPE_HEV2: case GF_ISOM_BOX_TYPE_HVT1: case GF_ISOM_BOX_TYPE_264B: case GF_ISOM_BOX_TYPE_265B: case GF_ISOM_BOX_TYPE_DVHE: case GF_ISOM_BOX_TYPE_VVC1: case GF_ISOM_BOX_TYPE_VVI1: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_VIDEO) return GF_ISOM_INVALID_MEDIA; esd = ((GF_MPEGVisualSampleEntryBox*) entry)->emul_esd; break; case GF_ISOM_BOX_TYPE_SVC1: case GF_ISOM_BOX_TYPE_MVC1: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_VIDEO) return GF_ISOM_INVALID_MEDIA; if ((mdia->mediaTrack->extractor_mode & 0x0000FFFF) != GF_ISOM_NALU_EXTRACT_INSPECT) AVC_RewriteESDescriptorEx((GF_MPEGVisualSampleEntryBox*) entry, mdia); else AVC_RewriteESDescriptorEx((GF_MPEGVisualSampleEntryBox*) entry, NULL); esd = ((GF_MPEGVisualSampleEntryBox*) entry)->emul_esd; break; case GF_ISOM_BOX_TYPE_LHE1: case GF_ISOM_BOX_TYPE_LHV1: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_VIDEO) return GF_ISOM_INVALID_MEDIA; if ((mdia->mediaTrack->extractor_mode & 0x0000FFFF) != GF_ISOM_NALU_EXTRACT_INSPECT) HEVC_RewriteESDescriptorEx((GF_MPEGVisualSampleEntryBox*) entry, mdia); else HEVC_RewriteESDescriptorEx((GF_MPEGVisualSampleEntryBox*) entry, NULL); esd = ((GF_MPEGVisualSampleEntryBox*) entry)->emul_esd; break; case GF_ISOM_BOX_TYPE_AV01: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_VIDEO) return GF_ISOM_INVALID_MEDIA; AV1_RewriteESDescriptorEx((GF_MPEGVisualSampleEntryBox*)entry, mdia); esd = ((GF_MPEGVisualSampleEntryBox*)entry)->emul_esd; break; case GF_ISOM_BOX_TYPE_VP08: case GF_ISOM_BOX_TYPE_VP09: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_VIDEO) return GF_ISOM_INVALID_MEDIA; VP9_RewriteESDescriptorEx((GF_MPEGVisualSampleEntryBox*)entry, mdia); esd = ((GF_MPEGVisualSampleEntryBox*)entry)->emul_esd; break; case GF_ISOM_BOX_TYPE_MP4A: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_AUDIO) return GF_ISOM_INVALID_MEDIA; { GF_MPEGAudioSampleEntryBox *ase = (GF_MPEGAudioSampleEntryBox*)entry; ESDa = ase->esd; if (ESDa) { esd = (GF_ESD *) ESDa->desc; } else if (!true_desc_only) { Bool make_mp4a = GF_FALSE; sinf = (GF_ProtectionSchemeInfoBox *) gf_isom_box_find_child(entry->child_boxes, GF_ISOM_BOX_TYPE_SINF); if (sinf && sinf->original_format) { if (sinf->original_format->data_format==GF_ISOM_BOX_TYPE_MP4A) { make_mp4a = GF_TRUE; } } else { // Assuming that if no ESD is provided the stream is Basic MPEG-4 AAC LC make_mp4a = GF_TRUE; } if (make_mp4a) { GF_M4ADecSpecInfo aacinfo; memset(&aacinfo, 0, sizeof(GF_M4ADecSpecInfo)); aacinfo.nb_chan = ase->channel_count; aacinfo.base_object_type = GF_M4A_AAC_LC; aacinfo.base_sr = ase->samplerate_hi; *out_esd = gf_odf_desc_esd_new(0); (*out_esd)->decoderConfig->streamType = GF_STREAM_AUDIO; (*out_esd)->decoderConfig->objectTypeIndication = GF_CODECID_AAC_MPEG4; gf_m4a_write_config(&aacinfo, &(*out_esd)->decoderConfig->decoderSpecificInfo->data, &(*out_esd)->decoderConfig->decoderSpecificInfo->dataLength); } } } break; case GF_ISOM_BOX_TYPE_MP4S: if (entry->internal_type==GF_ISOM_SAMPLE_ENTRY_MP4S) { ESDa = entry->esd; if (ESDa) esd = (GF_ESD *) ESDa->desc; } break; #ifndef GPAC_DISABLE_TTXT case GF_ISOM_BOX_TYPE_TX3G: case GF_ISOM_BOX_TYPE_TEXT: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_MP4S) return GF_ISOM_INVALID_MEDIA; if (!true_desc_only && mdia->mediaTrack->moov->mov->convert_streaming_text) { GF_Err e = gf_isom_get_ttxt_esd(mdia, out_esd); if (e) return e; break; } else return GF_ISOM_INVALID_MEDIA; #endif #ifndef GPAC_DISABLE_VTT case GF_ISOM_BOX_TYPE_WVTT: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_MP4S) return GF_ISOM_INVALID_MEDIA; { GF_WebVTTSampleEntryBox*vtte = (GF_WebVTTSampleEntryBox*)entry; esd = gf_odf_desc_esd_new(2); *out_esd = esd; esd->decoderConfig->streamType = GF_STREAM_TEXT; esd->decoderConfig->objectTypeIndication = GF_CODECID_WEBVTT; if (vtte->config) { esd->decoderConfig->decoderSpecificInfo->dataLength = (u32) strlen(vtte->config->string); esd->decoderConfig->decoderSpecificInfo->data = gf_malloc(sizeof(char)*esd->decoderConfig->decoderSpecificInfo->dataLength); memcpy(esd->decoderConfig->decoderSpecificInfo->data, vtte->config->string, esd->decoderConfig->decoderSpecificInfo->dataLength); } } break; case GF_ISOM_BOX_TYPE_STPP: case GF_ISOM_BOX_TYPE_SBTT: case GF_ISOM_BOX_TYPE_STXT: break; #endif case GF_ISOM_SUBTYPE_3GP_AMR: case GF_ISOM_SUBTYPE_3GP_AMR_WB: case GF_ISOM_SUBTYPE_3GP_EVRC: case GF_ISOM_SUBTYPE_3GP_QCELP: case GF_ISOM_SUBTYPE_3GP_SMV: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_AUDIO) return GF_ISOM_INVALID_MEDIA; if (!true_desc_only) { GF_Err e = gf_isom_get_3gpp_audio_esd(mdia->information->sampleTable, type, (GF_GenericAudioSampleEntryBox*)entry, out_esd); if (e) return e; break; } else return GF_ISOM_INVALID_MEDIA; case GF_ISOM_SUBTYPE_OPUS: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_AUDIO) return GF_ISOM_INVALID_MEDIA; { GF_OpusSpecificBox *e = ((GF_MPEGAudioSampleEntryBox*)entry)->cfg_opus; GF_BitStream *bs_out; if (!e) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("ESD not found for Opus\n)")); break; } *out_esd = gf_odf_desc_esd_new(2); (*out_esd)->decoderConfig->streamType = GF_STREAM_AUDIO; (*out_esd)->decoderConfig->objectTypeIndication = GF_CODECID_OPUS; //serialize box with header - compatibility with ffmpeg bs_out = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_isom_box_size((GF_Box *) e); gf_isom_box_write((GF_Box *) e, bs_out); gf_bs_get_content(bs_out, & (*out_esd)->decoderConfig->decoderSpecificInfo->data, & (*out_esd)->decoderConfig->decoderSpecificInfo->dataLength); gf_bs_del(bs_out); break; } case GF_ISOM_SUBTYPE_3GP_H263: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_VIDEO) return GF_ISOM_INVALID_MEDIA; if (true_desc_only) { return GF_ISOM_INVALID_MEDIA; } else { esd = gf_odf_desc_esd_new(2); *out_esd = esd; esd->decoderConfig->streamType = GF_STREAM_VISUAL; esd->decoderConfig->objectTypeIndication = GF_CODECID_H263; break; } case GF_ISOM_SUBTYPE_MP3: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_AUDIO) return GF_ISOM_INVALID_MEDIA; if (true_desc_only) { return GF_ISOM_INVALID_MEDIA; } else { esd = gf_odf_desc_esd_new(2); *out_esd = esd; esd->decoderConfig->streamType = GF_STREAM_AUDIO; esd->decoderConfig->objectTypeIndication = GF_CODECID_MPEG_AUDIO; break; } case GF_ISOM_SUBTYPE_LSR1: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_MP4S) return GF_ISOM_INVALID_MEDIA; if (true_desc_only) { return GF_ISOM_INVALID_MEDIA; } else { GF_LASeRSampleEntryBox*ptr = (GF_LASeRSampleEntryBox*)entry; esd = gf_odf_desc_esd_new(2); *out_esd = esd; esd->decoderConfig->streamType = GF_STREAM_SCENE; esd->decoderConfig->objectTypeIndication = GF_CODECID_LASER; esd->decoderConfig->decoderSpecificInfo->dataLength = ptr->lsr_config->hdr_size; esd->decoderConfig->decoderSpecificInfo->data = gf_malloc(sizeof(char)*ptr->lsr_config->hdr_size); if (!esd->decoderConfig->decoderSpecificInfo->data) return GF_OUT_OF_MEM; memcpy(esd->decoderConfig->decoderSpecificInfo->data, ptr->lsr_config->hdr, sizeof(char)*ptr->lsr_config->hdr_size); break; } case GF_ISOM_SUBTYPE_MH3D_MHA1: case GF_ISOM_SUBTYPE_MH3D_MHA2: case GF_ISOM_SUBTYPE_MH3D_MHM1: case GF_ISOM_SUBTYPE_MH3D_MHM2: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_AUDIO) return GF_ISOM_INVALID_MEDIA; if (true_desc_only) { return GF_ISOM_INVALID_MEDIA; } else { GF_MPEGAudioSampleEntryBox*ptr = (GF_MPEGAudioSampleEntryBox*)entry; esd = gf_odf_desc_esd_new(2); *out_esd = esd; esd->decoderConfig->streamType = GF_STREAM_AUDIO; if ((type==GF_ISOM_SUBTYPE_MH3D_MHA1) || (type==GF_ISOM_SUBTYPE_MH3D_MHA2)) esd->decoderConfig->objectTypeIndication = GF_CODECID_MPHA; else esd->decoderConfig->objectTypeIndication = GF_CODECID_MHAS; if (ptr->cfg_mha) { GF_BitStream *bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_bs_write_u8(bs, ptr->cfg_mha->configuration_version); gf_bs_write_u8(bs, ptr->cfg_mha->mha_pl_indication); gf_bs_write_u8(bs, ptr->cfg_mha->reference_channel_layout); gf_bs_write_u16(bs, ptr->cfg_mha->mha_config ? ptr->cfg_mha->mha_config_size : 0); if (ptr->cfg_mha->mha_config && ptr->cfg_mha->mha_config_size) gf_bs_write_data(bs, ptr->cfg_mha->mha_config, ptr->cfg_mha->mha_config_size); gf_bs_get_content(bs, &esd->decoderConfig->decoderSpecificInfo->data, &esd->decoderConfig->decoderSpecificInfo->dataLength); gf_bs_del(bs); } } break; default: return GF_ISOM_INVALID_MEDIA; } if (true_desc_only) { if (!esd) return GF_ISOM_INVALID_MEDIA; *out_esd = esd; return GF_OK; } else { if (!esd && !*out_esd) return GF_ISOM_INVALID_MEDIA; if (*out_esd == NULL) return gf_odf_desc_copy((GF_Descriptor *)esd, (GF_Descriptor **)out_esd); } return GF_OK; } Bool Media_IsSampleSyncShadow(GF_ShadowSyncBox *stsh, u32 sampleNumber) { u32 i; GF_StshEntry *ent; if (!stsh) return 0; i=0; while ((ent = (GF_StshEntry*)gf_list_enum(stsh->entries, &i))) { if ((u32) ent->syncSampleNumber == sampleNumber) return 1; else if ((u32) ent->syncSampleNumber > sampleNumber) return 0; } return 0; } GF_Err Media_GetSample(GF_MediaBox *mdia, u32 sampleNumber, GF_ISOSample **samp, u32 *sIDX, Bool no_data, u64 *out_offset) { GF_Err e; u32 bytesRead; u32 dataRefIndex, chunkNumber; u64 offset, new_size; u32 sdesc_idx; GF_SampleEntryBox *entry; GF_StscEntry *stsc_entry; if (!mdia || !mdia->information->sampleTable) return GF_BAD_PARAM; if (!mdia->information->sampleTable->SampleSize) return GF_ISOM_INVALID_FILE; //OK, here we go.... if (sampleNumber > mdia->information->sampleTable->SampleSize->sampleCount) return GF_BAD_PARAM; //the data info if (!sIDX && !no_data) return GF_BAD_PARAM; e = stbl_GetSampleInfos(mdia->information->sampleTable, sampleNumber, &offset, &chunkNumber, &sdesc_idx, &stsc_entry); if (e) return e; if (sIDX) (*sIDX) = sdesc_idx; if (out_offset) *out_offset = offset; if (!samp ) return GF_OK; if (mdia->information->sampleTable->TimeToSample) { //get the DTS e = stbl_GetSampleDTS(mdia->information->sampleTable->TimeToSample, sampleNumber, &(*samp)->DTS); if (e) return e; } else { (*samp)->DTS=0; } //the CTS offset if (mdia->information->sampleTable->CompositionOffset) { e = stbl_GetSampleCTS(mdia->information->sampleTable->CompositionOffset , sampleNumber, &(*samp)->CTS_Offset); if (e) return e; } else { (*samp)->CTS_Offset = 0; } //the size e = stbl_GetSampleSize(mdia->information->sampleTable->SampleSize, sampleNumber, &(*samp)->dataLength); if (e) return e; //the RAP if (mdia->information->sampleTable->SyncSample) { e = stbl_GetSampleRAP(mdia->information->sampleTable->SyncSample, sampleNumber, &(*samp)->IsRAP, NULL, NULL); if (e) return e; } else { //if no SyncSample, all samples are sync (cf spec) (*samp)->IsRAP = RAP; } if (mdia->information->sampleTable->SampleDep) { u32 isLeading, dependsOn, dependedOn, redundant; e = stbl_GetSampleDepType(mdia->information->sampleTable->SampleDep, sampleNumber, &isLeading, &dependsOn, &dependedOn, &redundant); if (!e) { if (dependsOn==1) (*samp)->IsRAP = RAP_NO; //commenting following code since it is wrong - an I frame is not always a SAP1, it can be a SAP2 or SAP3. //Keeping this code breaks AVC / HEVC openGOP import when writing sample dependencies //else if (dependsOn==2) (*samp)->IsRAP = RAP; /*if not depended upon and redundant, mark as carousel sample*/ if ((dependedOn==2) && (redundant==1)) (*samp)->IsRAP = RAP_REDUNDANT; /*TODO FIXME - we must enhance the IsRAP semantics to carry disposable info ... */ } } /*get sync shadow*/ if (Media_IsSampleSyncShadow(mdia->information->sampleTable->ShadowSync, sampleNumber)) (*samp)->IsRAP = RAP_REDUNDANT; //the data info if (!sIDX && !no_data) return GF_BAD_PARAM; if (!sIDX && !out_offset) return GF_OK; if (!sIDX) return GF_OK; (*sIDX) = sdesc_idx; // e = stbl_GetSampleInfos(mdia->information->sampleTable, sampleNumber, &offset, &chunkNumber, sIDX, &stsc_entry); // if (e) return e; //then get the DataRef e = Media_GetSampleDesc(mdia, sdesc_idx, &entry, &dataRefIndex); if (e) return e; //if moov is compressed, remove offset if sample is after moov in this file if (mdia->mediaTrack->moov->compressed_diff) { GF_DataEntryBox *ent = (GF_DataEntryBox*)gf_list_get(mdia->information->dataInformation->dref->child_boxes, dataRefIndex - 1); if (ent && (ent->flags&1) && (offset>=mdia->mediaTrack->moov->file_offset)) { offset -= mdia->mediaTrack->moov->compressed_diff; } } if (no_data) { if ( ((*samp)->dataLength != 0) && mdia->mediaTrack->pack_num_samples) { u32 idx_in_chunk = sampleNumber - mdia->information->sampleTable->SampleToChunk->firstSampleInCurrentChunk; u32 left_in_chunk = stsc_entry->samplesPerChunk - idx_in_chunk; if (left_in_chunk > mdia->mediaTrack->pack_num_samples) left_in_chunk = mdia->mediaTrack->pack_num_samples; (*samp)->dataLength *= left_in_chunk; (*samp)->nb_pack = left_in_chunk; } return GF_OK; } // Open the data handler - check our mode, don't reopen in read only if this is //the same entry. In other modes we have no choice because the main data map is //divided into the original and the edition files if (mdia->mediaTrack->moov->mov->openMode == GF_ISOM_OPEN_READ) { //same as last call in read mode if (!mdia->information->dataHandler) { e = gf_isom_datamap_open(mdia, dataRefIndex, stsc_entry->isEdited); if (e) return e; } mdia->information->dataEntryIndex = dataRefIndex; } else { e = gf_isom_datamap_open(mdia, dataRefIndex, stsc_entry->isEdited); if (e) return e; } if ( mdia->mediaTrack->moov->mov->read_byte_offset || mdia->mediaTrack->moov->mov->bytes_removed) { GF_DataEntryBox *ent = (GF_DataEntryBox*)gf_list_get(mdia->information->dataInformation->dref->child_boxes, dataRefIndex - 1); if (ent && (ent->flags&1)) { u64 real_offset = mdia->mediaTrack->moov->mov->read_byte_offset + mdia->mediaTrack->moov->mov->bytes_removed; if (offset < real_offset) return GF_IO_ERR; if (mdia->information->dataHandler->last_read_offset != mdia->mediaTrack->moov->mov->read_byte_offset) { mdia->information->dataHandler->last_read_offset = mdia->mediaTrack->moov->mov->read_byte_offset; gf_bs_get_refreshed_size(mdia->information->dataHandler->bs); } offset -= real_offset; } } if ((*samp)->dataLength != 0) { if (mdia->mediaTrack->pack_num_samples) { u32 idx_in_chunk = sampleNumber - mdia->information->sampleTable->SampleToChunk->firstSampleInCurrentChunk; u32 left_in_chunk = stsc_entry->samplesPerChunk - idx_in_chunk; if (left_in_chunk > mdia->mediaTrack->pack_num_samples) left_in_chunk = mdia->mediaTrack->pack_num_samples; (*samp)->dataLength *= left_in_chunk; (*samp)->nb_pack = left_in_chunk; } /*and finally get the data, include padding if needed*/ if ((*samp)->alloc_size) { if ((*samp)->alloc_size < (*samp)->dataLength + mdia->mediaTrack->padding_bytes) { (*samp)->data = (char *) gf_realloc((*samp)->data, sizeof(char) * ( (*samp)->dataLength + mdia->mediaTrack->padding_bytes) ); if (! (*samp)->data) return GF_OUT_OF_MEM; (*samp)->alloc_size = (*samp)->dataLength + mdia->mediaTrack->padding_bytes; } } else { (*samp)->data = (char *) gf_malloc(sizeof(char) * ( (*samp)->dataLength + mdia->mediaTrack->padding_bytes) ); if (! (*samp)->data) return GF_OUT_OF_MEM; } if (mdia->mediaTrack->padding_bytes) memset((*samp)->data + (*samp)->dataLength, 0, sizeof(char) * mdia->mediaTrack->padding_bytes); //check if we can get the sample (make sure we have enougth data...) new_size = gf_bs_get_size(mdia->information->dataHandler->bs); if (offset + (*samp)->dataLength > new_size) { //always refresh the size to avoid wrong info on http/ftp new_size = gf_bs_get_refreshed_size(mdia->information->dataHandler->bs); if (offset + (*samp)->dataLength > new_size) { mdia->BytesMissing = offset + (*samp)->dataLength - new_size; return GF_ISOM_INCOMPLETE_FILE; } } bytesRead = gf_isom_datamap_get_data(mdia->information->dataHandler, (*samp)->data, (*samp)->dataLength, offset); //if bytesRead != sampleSize, we have an IO err if (bytesRead < (*samp)->dataLength) { return GF_IO_ERR; } mdia->BytesMissing = 0; } //finally rewrite the sample if this is an OD Access Unit or NAL-based one //we do this even if sample size is zero because of sample implicit reconstruction rules (especially tile tracks) if (mdia->handler->handlerType == GF_ISOM_MEDIA_OD) { if (!mdia->mediaTrack->moov->mov->disable_odf_translate) { e = Media_RewriteODFrame(mdia, *samp); if (e) return e; } } else if (gf_isom_is_nalu_based_entry(mdia, entry) && !gf_isom_is_encrypted_entry(entry->type) ) { e = gf_isom_nalu_sample_rewrite(mdia, *samp, sampleNumber, (GF_MPEGVisualSampleEntryBox *)entry); if (e) return e; } else if (mdia->mediaTrack->moov->mov->convert_streaming_text && ((mdia->handler->handlerType == GF_ISOM_MEDIA_TEXT) || (mdia->handler->handlerType == GF_ISOM_MEDIA_SCENE) || (mdia->handler->handlerType == GF_ISOM_MEDIA_SUBT)) && (entry->type == GF_ISOM_BOX_TYPE_TX3G || entry->type == GF_ISOM_BOX_TYPE_TEXT) ) { u64 dur; if (sampleNumber == mdia->information->sampleTable->SampleSize->sampleCount) { dur = mdia->mediaHeader->duration - (*samp)->DTS; } else { stbl_GetSampleDTS(mdia->information->sampleTable->TimeToSample, sampleNumber+1, &dur); dur -= (*samp)->DTS; } e = gf_isom_rewrite_text_sample(*samp, sdesc_idx, (u32) dur); if (e) return e; } return GF_OK; } GF_Err Media_CheckDataEntry(GF_MediaBox *mdia, u32 dataEntryIndex) { GF_DataEntryURLBox *entry; GF_DataMap *map; GF_Err e; if (!mdia || !dataEntryIndex || dataEntryIndex > gf_list_count(mdia->information->dataInformation->dref->child_boxes)) return GF_BAD_PARAM; entry = (GF_DataEntryURLBox*)gf_list_get(mdia->information->dataInformation->dref->child_boxes, dataEntryIndex - 1); if (!entry) return GF_ISOM_INVALID_FILE; if (entry->flags == 1) return GF_OK; //ok, not self contained, let's go for it... //we don't know what's a URN yet if (entry->type == GF_ISOM_BOX_TYPE_URN) return GF_NOT_SUPPORTED; if (mdia->mediaTrack->moov->mov->openMode == GF_ISOM_OPEN_WRITE) { e = gf_isom_datamap_new(entry->location, NULL, GF_ISOM_DATA_MAP_READ, &map); } else { e = gf_isom_datamap_new(entry->location, mdia->mediaTrack->moov->mov->fileName, GF_ISOM_DATA_MAP_READ, &map); } if (e) return e; gf_isom_datamap_del(map); return GF_OK; } Bool Media_IsSelfContained(GF_MediaBox *mdia, u32 StreamDescIndex) { u32 drefIndex=0; GF_FullBox *a=NULL; GF_SampleEntryBox *se = NULL; Media_GetSampleDesc(mdia, StreamDescIndex, &se, &drefIndex); if (!drefIndex) return 0; if (mdia && mdia->information && mdia->information->dataInformation && mdia->information->dataInformation->dref ) { a = (GF_FullBox*)gf_list_get(mdia->information->dataInformation->dref->child_boxes, drefIndex - 1); } if (!a) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] broken file: Data reference index set to %d but no data reference entry found\n", drefIndex)); return 1; } if (a->flags & 1) return 1; /*QT specific*/ if (a->type == GF_QT_BOX_TYPE_ALIS) return 1; return 0; } GF_ISOMDataRefAllType Media_SelfContainedType(GF_MediaBox *mdia) { u32 nb_ext, nb_self; u32 i, count; nb_ext = nb_self = 0; count = mdia->information->sampleTable->SampleDescription ? gf_list_count(mdia->information->sampleTable->SampleDescription->child_boxes) : 0; for (i=0; i<count; i++) { if (Media_IsSelfContained(mdia, i+1)) nb_self++; else nb_ext++; } if (nb_ext==count) return ISOM_DREF_EXT; if (nb_self==count) return ISOM_DREF_SELF; return ISOM_DREF_MIXED; } //look for a sync sample from a given point in media time GF_Err Media_FindSyncSample(GF_SampleTableBox *stbl, u32 searchFromSample, u32 *sampleNumber, u8 mode) { GF_ISOSAPType isRAP; u32 next, prev, next_in_sap, prev_in_sap; if (!stbl || !stbl->SyncSample) return GF_BAD_PARAM; //set to current sample if we don't find a RAP *sampleNumber = searchFromSample; //this is not the exact sample, but the prev move to next sample if enough samples.... if ( (mode == GF_ISOM_SEARCH_SYNC_FORWARD) && (searchFromSample == stbl->SampleSize->sampleCount) ) { return GF_OK; } if ( (mode == GF_ISOM_SEARCH_SYNC_BACKWARD) && !searchFromSample) { *sampleNumber = 1; return GF_OK; } //get the entry stbl_GetSampleRAP(stbl->SyncSample, searchFromSample, &isRAP, &prev, &next); if (isRAP) { (*sampleNumber) = searchFromSample; return GF_OK; } /*check sample groups - prev & next are overwritten if RAP group is found, but are not re-initialized otherwise*/ stbl_SearchSAPs(stbl, searchFromSample, &isRAP, &prev_in_sap, &next_in_sap); if (isRAP) { (*sampleNumber) = searchFromSample; return GF_OK; } if (prev_in_sap > prev) prev = prev_in_sap; if (next_in_sap && next_in_sap < next) next = next_in_sap; //nothing yet, go for next time... if (mode == GF_ISOM_SEARCH_SYNC_FORWARD) { if (next) *sampleNumber = next; } else { if (prev) *sampleNumber = prev; } return GF_OK; } //create a DataReference if not existing (only for WRITE-edit mode) GF_Err Media_FindDataRef(GF_DataReferenceBox *dref, char *URLname, char *URNname, u32 *dataRefIndex) { u32 i; GF_DataEntryURLBox *entry; if (!dref) return GF_BAD_PARAM; *dataRefIndex = 0; i=0; while ((entry = (GF_DataEntryURLBox*)gf_list_enum(dref->child_boxes, &i))) { if (entry->type == GF_ISOM_BOX_TYPE_URL) { //self-contained case if (entry->flags == 1) { //if nothing specified, get the dataRef if (!URLname && !URNname) { *dataRefIndex = i; return GF_OK; } } else { //OK, check if we have URL if (URLname && !strcmp(URLname, entry->location)) { *dataRefIndex = i; return GF_OK; } } } else { //this is a URN one, only check the URN name (URL optional) if (URNname && !strcmp(URNname, ((GF_DataEntryURNBox *)entry)->nameURN)) { *dataRefIndex = i; return GF_OK; } } } return GF_OK; } //Get the total media duration based on the TimeToSample table GF_Err Media_SetDuration(GF_TrackBox *trak) { GF_Err e; GF_ESD *esd; u64 DTS; GF_SttsEntry *ent; u32 nbSamp; if (!trak || !trak->Media || !trak->Media->information || !trak->Media->information->sampleTable) return GF_ISOM_INVALID_FILE; if (!trak->Media->information->sampleTable->SampleSize || !trak->Media->information->sampleTable->TimeToSample) return GF_ISOM_INVALID_FILE; nbSamp = trak->Media->information->sampleTable->SampleSize->sampleCount; //we need to check how many samples we have. // == 1 -> last sample duration == default duration // > 1 -> last sample duration == prev sample duration switch (nbSamp) { case 0: trak->Media->mediaHeader->duration = 0; if (Track_IsMPEG4Stream(trak->Media->handler->handlerType)) { Media_GetESD(trak->Media, 1, &esd, 1); if (esd && esd->URLString) trak->Media->mediaHeader->duration = (u64) -1; } return GF_OK; // case 1: // trak->Media->mediaHeader->duration = trak->Media->mediaHeader->timeScale; // return GF_OK; default: //we assume a constant frame rate for the media and assume the last sample //will be hold the same time as the prev one e = stbl_GetSampleDTS(trak->Media->information->sampleTable->TimeToSample, nbSamp, &DTS); if (e < 0) { return e; } if (trak->Media->information->sampleTable->TimeToSample->nb_entries > 0) { ent = &trak->Media->information->sampleTable->TimeToSample->entries[trak->Media->information->sampleTable->TimeToSample->nb_entries-1]; } else { ent = NULL; } trak->Media->mediaHeader->duration = DTS; #if 1 if (ent) trak->Media->mediaHeader->duration += ent->sampleDelta; #else if (!ent) { u64 DTSprev; stbl_GetSampleDTS(trak->Media->information->sampleTable->TimeToSample, nbSamp-1, &DTSprev); trak->Media->mediaHeader->duration += (DTS - DTSprev); } else { #ifndef GPAC_DISABLE_ISOM_WRITE if (trak->moov->mov->editFileMap && trak->Media->information->sampleTable->CompositionOffset) { u32 count, i; u64 max_ts; GF_DttsEntry *cts_ent; GF_CompositionOffsetBox *ctts = trak->Media->information->sampleTable->CompositionOffset; if (ctts->w_LastSampleNumber==nbSamp) { count = gf_list_count(ctts->entryList); max_ts = trak->Media->mediaHeader->duration; while (count) { count -= 1; cts_ent = gf_list_get(ctts->entryList, count); if (nbSamp<cts_ent->sampleCount) break; for (i=0; i<cts_ent->sampleCount; i++) { stbl_GetSampleDTS(trak->Media->information->sampleTable->TimeToSample, nbSamp-i, &DTS); if ((s32) cts_ent->decodingOffset < 0) max_ts = DTS; else max_ts = DTS + cts_ent->decodingOffset; if (max_ts>=trak->Media->mediaHeader->duration) { trak->Media->mediaHeader->duration = max_ts; } else { break; } } if (max_ts<trak->Media->mediaHeader->duration) { break; } nbSamp-=cts_ent->sampleCount; } } } #endif /*GPAC_DISABLE_ISOM_WRITE*/ trak->Media->mediaHeader->duration += ent->sampleDelta; } #endif return GF_OK; } } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err Media_SetDrefURL(GF_DataEntryURLBox *dref_entry, const char *origName, const char *finalName) { //for now we only support dref created in same folder for relative URLs if (strstr(origName, "://") || ((origName[1]==':') && (origName[2]=='\\')) || (origName[0]=='/') || (origName[0]=='\\') ) { dref_entry->location = gf_strdup(origName); } else { char *fname = strrchr(origName, '/'); if (!fname) fname = strrchr(origName, '\\'); if (fname) fname++; if (!fname) { dref_entry->location = gf_strdup(origName); } else { u32 len = (u32) (fname - origName); if (!finalName || strncmp(origName, finalName, len)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Concatenation of relative path %s with relative path %s not supported, use absolute URLs\n", origName, finalName)); return GF_NOT_SUPPORTED; } else { dref_entry->location = gf_strdup(fname); } } } return GF_OK; } GF_Err Media_CreateDataRef(GF_ISOFile *movie, GF_DataReferenceBox *dref, char *URLname, char *URNname, u32 *dataRefIndex) { GF_Err e; Bool use_alis=GF_FALSE; GF_DataEntryURLBox *entry; if (URLname && !strcmp(URLname, "alis")) { URLname = NULL; use_alis=GF_TRUE; } if (!URLname && !URNname) { //THIS IS SELF CONTAIN, create a regular entry if needed entry = (GF_DataEntryURLBox *) gf_isom_box_new_parent(&dref->child_boxes, use_alis ? GF_QT_BOX_TYPE_ALIS : GF_ISOM_BOX_TYPE_URL); if (!entry) return GF_OUT_OF_MEM; entry->flags = 1; *dataRefIndex = gf_list_count(dref->child_boxes); return GF_OK; } else if (!URNname && URLname) { //THIS IS URL entry = (GF_DataEntryURLBox *) gf_isom_box_new_parent(&dref->child_boxes, GF_ISOM_BOX_TYPE_URL); if (!entry) return GF_OUT_OF_MEM; entry->flags = 0; e = Media_SetDrefURL(entry, URLname, movie->fileName ? movie->fileName : movie->finalName); if (! entry->location) { gf_isom_box_del_parent(&dref->child_boxes, (GF_Box *)entry); return e ? e : GF_OUT_OF_MEM; } *dataRefIndex = gf_list_count(dref->child_boxes); return GF_OK; } else { //THIS IS URN entry = (GF_DataEntryURLBox *) gf_isom_box_new_parent(&dref->child_boxes, GF_ISOM_BOX_TYPE_URN); if (!entry) return GF_OUT_OF_MEM; ((GF_DataEntryURNBox *)entry)->flags = 0; ((GF_DataEntryURNBox *)entry)->nameURN = (char*)gf_malloc(strlen(URNname)+1); if (! ((GF_DataEntryURNBox *)entry)->nameURN) { gf_isom_box_del_parent(&dref->child_boxes, (GF_Box *)entry); return GF_OUT_OF_MEM; } strcpy(((GF_DataEntryURNBox *)entry)->nameURN, URNname); //check for URL if (URLname) { ((GF_DataEntryURNBox *)entry)->location = (char*)gf_malloc(strlen(URLname)+1); if (! ((GF_DataEntryURNBox *)entry)->location) { gf_isom_box_del_parent(&dref->child_boxes, (GF_Box *)entry); return GF_OUT_OF_MEM; } strcpy(((GF_DataEntryURNBox *)entry)->location, URLname); } *dataRefIndex = gf_list_count(dref->child_boxes); return GF_OK; } return GF_OK; } GF_Err Media_AddSample(GF_MediaBox *mdia, u64 data_offset, const GF_ISOSample *sample, u32 StreamDescIndex, u32 syncShadowNumber) { GF_Err e; GF_SampleTableBox *stbl; u32 sampleNumber, i; if (!mdia || !sample) return GF_BAD_PARAM; stbl = mdia->information->sampleTable; //get a valid sampleNumber for this new guy e = stbl_AddDTS(stbl, sample->DTS, &sampleNumber, mdia->mediaHeader->timeScale, sample->nb_pack); if (e) return e; //add size e = stbl_AddSize(stbl->SampleSize, sampleNumber, sample->dataLength, sample->nb_pack); if (e) return e; //adds CTS offset if (sample->CTS_Offset) { //if we don't have a CTS table, add it... if (!stbl->CompositionOffset) { stbl->CompositionOffset = (GF_CompositionOffsetBox *) gf_isom_box_new_parent(&stbl->child_boxes, GF_ISOM_BOX_TYPE_CTTS); if (!stbl->CompositionOffset) return GF_OUT_OF_MEM; } //then add our CTS (the prev samples with no CTS offset will be automatically added... e = stbl_AddCTS(stbl, sampleNumber, sample->CTS_Offset); if (e) return e; } else if (stbl->CompositionOffset) { e = stbl_AddCTS(stbl, sampleNumber, sample->CTS_Offset); if (e) return e; } //The first non sync sample we see must create a syncTable if (sample->IsRAP) { //insert it only if we have a sync table and if we have an IDR slice if (stbl->SyncSample && (sample->IsRAP == RAP)) { e = stbl_AddRAP(stbl->SyncSample, sampleNumber); if (e) return e; } } else { //non-sync sample. Create a SyncSample table if needed if (!stbl->SyncSample) { stbl->SyncSample = (GF_SyncSampleBox *) gf_isom_box_new_parent(&stbl->child_boxes, GF_ISOM_BOX_TYPE_STSS); if (!stbl->SyncSample) return GF_OUT_OF_MEM; //all the prev samples are sync for (i=0; i<stbl->SampleSize->sampleCount; i++) { if (i+1 != sampleNumber) { e = stbl_AddRAP(stbl->SyncSample, i+1); if (e) return e; } } } } if (sample->IsRAP==RAP_REDUNDANT) { e = stbl_AddRedundant(stbl, sampleNumber); if (e) return e; } if (!mdia->mediaTrack->chunk_cache) { //and update the chunks e = stbl_AddChunkOffset(mdia, sampleNumber, StreamDescIndex, data_offset, sample->nb_pack); if (e) return e; } if (!syncShadowNumber) return GF_OK; if (!stbl->ShadowSync) { stbl->ShadowSync = (GF_ShadowSyncBox *) gf_isom_box_new_parent(&stbl->child_boxes, GF_ISOM_BOX_TYPE_STSH); if (!stbl->ShadowSync) return GF_OUT_OF_MEM; } return stbl_AddShadow(mdia->information->sampleTable->ShadowSync, sampleNumber, syncShadowNumber); } static GF_Err UpdateSample(GF_MediaBox *mdia, u32 sampleNumber, u32 size, s32 CTS, u64 offset, u8 isRap) { u32 i; GF_SampleTableBox *stbl = mdia->information->sampleTable; //set size, offset, RAP, CTS ... stbl_SetSampleSize(stbl->SampleSize, sampleNumber, size); stbl_SetChunkOffset(mdia, sampleNumber, offset); //do we have a CTS? if (stbl->CompositionOffset) { stbl_SetSampleCTS(stbl, sampleNumber, CTS); } else { //do we need one ?? if (CTS) { stbl->CompositionOffset = (GF_CompositionOffsetBox *) gf_isom_box_new_parent(&stbl->child_boxes, GF_ISOM_BOX_TYPE_CTTS); if (!stbl->CompositionOffset) return GF_OUT_OF_MEM; stbl_AddCTS(stbl, sampleNumber, CTS); } } //do we have a sync ??? if (stbl->SyncSample) { stbl_SetSampleRAP(stbl->SyncSample, sampleNumber, isRap); } else { //do we need one if (! isRap) { stbl->SyncSample = (GF_SyncSampleBox *) gf_isom_box_new_parent(&stbl->child_boxes, GF_ISOM_BOX_TYPE_STSS); if (!stbl->SyncSample) return GF_OUT_OF_MEM; //what a pain: all the sample we had have to be sync ... for (i=0; i<stbl->SampleSize->sampleCount; i++) { if (i+1 != sampleNumber) stbl_AddRAP(stbl->SyncSample, i+1); } } } if (isRap==2) { stbl_SetRedundant(stbl, sampleNumber); } return GF_OK; } GF_Err Media_UpdateSample(GF_MediaBox *mdia, u32 sampleNumber, GF_ISOSample *sample, Bool data_only) { GF_Err e; u32 drefIndex, chunkNum, descIndex; u64 newOffset, DTS; GF_DataEntryURLBox *Dentry; GF_SampleTableBox *stbl; if (!mdia || !sample || !sampleNumber || !mdia->mediaTrack->moov->mov->editFileMap) return GF_BAD_PARAM; stbl = mdia->information->sampleTable; if (!data_only) { //check we have the sampe dts e = stbl_GetSampleDTS(stbl->TimeToSample, sampleNumber, &DTS); if (e) return e; if (DTS != sample->DTS) return GF_BAD_PARAM; } //get our infos stbl_GetSampleInfos(stbl, sampleNumber, &newOffset, &chunkNum, &descIndex, NULL); //then check the data ref e = Media_GetSampleDesc(mdia, descIndex, NULL, &drefIndex); if (e) return e; Dentry = (GF_DataEntryURLBox*)gf_list_get(mdia->information->dataInformation->dref->child_boxes, drefIndex - 1); if (!Dentry) return GF_ISOM_INVALID_FILE; if (Dentry->flags != 1) return GF_BAD_PARAM; //MEDIA DATA EDIT: write this new sample to the edit temp file newOffset = gf_isom_datamap_get_offset(mdia->mediaTrack->moov->mov->editFileMap); if (sample->dataLength) { e = gf_isom_datamap_add_data(mdia->mediaTrack->moov->mov->editFileMap, sample->data, sample->dataLength); if (e) return e; } if (data_only) { stbl_SetSampleSize(stbl->SampleSize, sampleNumber, sample->dataLength); return stbl_SetChunkOffset(mdia, sampleNumber, newOffset); } return UpdateSample(mdia, sampleNumber, sample->dataLength, sample->CTS_Offset, newOffset, sample->IsRAP); } GF_Err Media_UpdateSampleReference(GF_MediaBox *mdia, u32 sampleNumber, GF_ISOSample *sample, u64 data_offset) { GF_Err e; u32 drefIndex, chunkNum, descIndex; u64 off, DTS; GF_DataEntryURLBox *Dentry; GF_SampleTableBox *stbl; if (!mdia) return GF_BAD_PARAM; stbl = mdia->information->sampleTable; //check we have the sampe dts e = stbl_GetSampleDTS(stbl->TimeToSample, sampleNumber, &DTS); if (e) return e; if (DTS != sample->DTS) return GF_BAD_PARAM; //get our infos stbl_GetSampleInfos(stbl, sampleNumber, &off, &chunkNum, &descIndex, NULL); //then check the data ref e = Media_GetSampleDesc(mdia, descIndex, NULL, &drefIndex); if (e) return e; Dentry = (GF_DataEntryURLBox*)gf_list_get(mdia->information->dataInformation->dref->child_boxes, drefIndex - 1); if (!Dentry) return GF_ISOM_INVALID_FILE; //we only modify self-contained data if (Dentry->flags == 1) return GF_ISOM_INVALID_MODE; //and we don't modify the media data return UpdateSample(mdia, sampleNumber, sample->dataLength, sample->CTS_Offset, data_offset, sample->IsRAP); } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #endif /*GPAC_DISABLE_ISOM*/
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre * Copyright (c) Telecom ParisTech 2000-2019 * All rights reserved * * This file is part of GPAC / ISO Media File Format sub-project * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <gpac/internal/isomedia_dev.h> #include <gpac/constants.h> #include <gpac/avparse.h> #ifndef GPAC_DISABLE_ISOM GF_Err Media_GetSampleDesc(GF_MediaBox *mdia, u32 SampleDescIndex, GF_SampleEntryBox **out_entry, u32 *dataRefIndex) { GF_SampleDescriptionBox *stsd; GF_SampleEntryBox *entry = NULL; if (!mdia) return GF_ISOM_INVALID_FILE; stsd = mdia->information->sampleTable->SampleDescription; if (!stsd) return GF_ISOM_INVALID_FILE; if (!SampleDescIndex || (SampleDescIndex > gf_list_count(stsd->child_boxes)) ) return GF_BAD_PARAM; entry = (GF_SampleEntryBox*)gf_list_get(stsd->child_boxes, SampleDescIndex - 1); if (!entry) return GF_ISOM_INVALID_FILE; if (out_entry) *out_entry = entry; if (dataRefIndex) *dataRefIndex = entry->dataReferenceIndex; return GF_OK; } GF_Err Media_GetSampleDescIndex(GF_MediaBox *mdia, u64 DTS, u32 *sampleDescIndex) { GF_Err e; u32 sampleNumber, prevSampleNumber, num; u64 offset; if (sampleDescIndex == NULL) return GF_BAD_PARAM; //find the sample for this time e = stbl_findEntryForTime(mdia->information->sampleTable, (u32) DTS, 0, &sampleNumber, &prevSampleNumber); if (e) return e; if (!sampleNumber && !prevSampleNumber) { //we have to assume the track was created to be used... If we have a sampleDesc, OK if (gf_list_count(mdia->information->sampleTable->SampleDescription->child_boxes)) { (*sampleDescIndex) = 1; return GF_OK; } return GF_BAD_PARAM; } return stbl_GetSampleInfos(mdia->information->sampleTable, ( sampleNumber ? sampleNumber : prevSampleNumber), &offset, &num, sampleDescIndex, NULL); } static GF_Err gf_isom_get_3gpp_audio_esd(GF_SampleTableBox *stbl, u32 type, GF_GenericAudioSampleEntryBox *entry, GF_ESD **out_esd) { (*out_esd) = gf_odf_desc_esd_new(2); (*out_esd)->decoderConfig->streamType = GF_STREAM_AUDIO; /*official mapping to MPEG-4*/ switch (type) { case GF_ISOM_SUBTYPE_3GP_EVRC: (*out_esd)->decoderConfig->objectTypeIndication = GF_CODECID_EVRC; return GF_OK; case GF_ISOM_SUBTYPE_3GP_QCELP: { u32 block_size, sample_rate, sample_size, i; GF_SttsEntry *ent; GF_BitStream *bs; char szName[80]; /*only map CBR*/ sample_size = stbl->SampleSize->sampleSize; (*out_esd)->decoderConfig->objectTypeIndication = GF_CODECID_QCELP; bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_bs_write_data(bs, "QLCMfmt ", 8); gf_bs_write_u32_le(bs, 150);/*fmt chunk size*/ gf_bs_write_u8(bs, 1); gf_bs_write_u8(bs, 0); /*QCELP GUID*/ gf_bs_write_data(bs, "\x41\x6D\x7F\x5E\x15\xB1\xD0\x11\xBA\x91\x00\x80\x5F\xB4\xB9\x7E", 16); gf_bs_write_u16_le(bs, 1); memset(szName, 0, 80); strcpy(szName, "QCELP-13K(GPAC-emulated)"); gf_bs_write_data(bs, szName, 80); ent = &stbl->TimeToSample->entries[0]; sample_rate = entry->samplerate_hi; block_size = ent ? ent->sampleDelta : 160; gf_bs_write_u16_le(bs, 8*sample_size*sample_rate/block_size); gf_bs_write_u16_le(bs, sample_size); gf_bs_write_u16_le(bs, block_size); gf_bs_write_u16_le(bs, sample_rate); gf_bs_write_u16_le(bs, entry->bitspersample); gf_bs_write_u32_le(bs, sample_size ? 0 : 7); /**/ for (i=0; i<7; i++) { static const u32 qcelp_r2s [] = {0, 1, 1, 4, 2, 8, 3, 17, 4, 35, 5, 8, 14, 1}; if (sample_size) { gf_bs_write_u16(bs, 0); } else { gf_bs_write_u8(bs, qcelp_r2s[2*i+1]); gf_bs_write_u8(bs, qcelp_r2s[2*i]); } } gf_bs_write_u16(bs, 0); memset(szName, 0, 80); gf_bs_write_data(bs, szName, 20);/*reserved*/ gf_bs_get_content(bs, & (*out_esd)->decoderConfig->decoderSpecificInfo->data, & (*out_esd)->decoderConfig->decoderSpecificInfo->dataLength); gf_bs_del(bs); } return GF_OK; case GF_ISOM_SUBTYPE_3GP_SMV: (*out_esd)->decoderConfig->objectTypeIndication = GF_CODECID_SMV; return GF_OK; case GF_ISOM_SUBTYPE_3GP_AMR: (*out_esd)->decoderConfig->objectTypeIndication = GF_CODECID_AMR; return GF_OK; case GF_ISOM_SUBTYPE_3GP_AMR_WB: (*out_esd)->decoderConfig->objectTypeIndication = GF_CODECID_AMR_WB; return GF_OK; default: GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] unsupported sample description type %s\n", gf_4cc_to_str(entry->type))); break; } return GF_OK; } GF_Err Media_GetESD(GF_MediaBox *mdia, u32 sampleDescIndex, GF_ESD **out_esd, Bool true_desc_only) { u32 type; GF_ESD *esd; GF_MPEGSampleEntryBox *entry = NULL; GF_ESDBox *ESDa; GF_ProtectionSchemeInfoBox *sinf; GF_SampleDescriptionBox *stsd = mdia->information->sampleTable->SampleDescription; *out_esd = NULL; if (!stsd || !stsd->child_boxes || !sampleDescIndex || (sampleDescIndex > gf_list_count(stsd->child_boxes)) ) return GF_BAD_PARAM; esd = NULL; entry = (GF_MPEGSampleEntryBox*)gf_list_get(stsd->child_boxes, sampleDescIndex - 1); if (! entry) return GF_ISOM_INVALID_MEDIA; *out_esd = NULL; ESDa = NULL; type = entry->type; switch (type) { case GF_ISOM_BOX_TYPE_ENCV: case GF_ISOM_BOX_TYPE_ENCA: case GF_ISOM_BOX_TYPE_ENCS: case GF_ISOM_BOX_TYPE_ENCF: case GF_ISOM_BOX_TYPE_ENCM: case GF_ISOM_BOX_TYPE_ENCT: sinf = (GF_ProtectionSchemeInfoBox *) gf_isom_box_find_child(entry->child_boxes, GF_ISOM_BOX_TYPE_SINF); if (sinf && sinf->original_format) { type = sinf->original_format->data_format; } break; case GF_ISOM_BOX_TYPE_RESV: sinf = (GF_ProtectionSchemeInfoBox *) gf_isom_box_find_child(entry->child_boxes, GF_ISOM_BOX_TYPE_RINF); if (sinf && sinf->original_format) { type = sinf->original_format->data_format; } break; } switch (type) { case GF_ISOM_BOX_TYPE_MP4V: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_VIDEO) return GF_ISOM_INVALID_MEDIA; ESDa = ((GF_MPEGVisualSampleEntryBox*)entry)->esd; if (ESDa) esd = (GF_ESD *) ESDa->desc; /*avc1 encrypted*/ else esd = ((GF_MPEGVisualSampleEntryBox*) entry)->emul_esd; break; case GF_ISOM_BOX_TYPE_AVC1: case GF_ISOM_BOX_TYPE_AVC2: case GF_ISOM_BOX_TYPE_AVC3: case GF_ISOM_BOX_TYPE_AVC4: case GF_ISOM_BOX_TYPE_HVC1: case GF_ISOM_BOX_TYPE_HEV1: case GF_ISOM_BOX_TYPE_HVC2: case GF_ISOM_BOX_TYPE_HEV2: case GF_ISOM_BOX_TYPE_HVT1: case GF_ISOM_BOX_TYPE_264B: case GF_ISOM_BOX_TYPE_265B: case GF_ISOM_BOX_TYPE_DVHE: case GF_ISOM_BOX_TYPE_VVC1: case GF_ISOM_BOX_TYPE_VVI1: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_VIDEO) return GF_ISOM_INVALID_MEDIA; esd = ((GF_MPEGVisualSampleEntryBox*) entry)->emul_esd; break; case GF_ISOM_BOX_TYPE_SVC1: case GF_ISOM_BOX_TYPE_MVC1: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_VIDEO) return GF_ISOM_INVALID_MEDIA; if ((mdia->mediaTrack->extractor_mode & 0x0000FFFF) != GF_ISOM_NALU_EXTRACT_INSPECT) AVC_RewriteESDescriptorEx((GF_MPEGVisualSampleEntryBox*) entry, mdia); else AVC_RewriteESDescriptorEx((GF_MPEGVisualSampleEntryBox*) entry, NULL); esd = ((GF_MPEGVisualSampleEntryBox*) entry)->emul_esd; break; case GF_ISOM_BOX_TYPE_LHE1: case GF_ISOM_BOX_TYPE_LHV1: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_VIDEO) return GF_ISOM_INVALID_MEDIA; if ((mdia->mediaTrack->extractor_mode & 0x0000FFFF) != GF_ISOM_NALU_EXTRACT_INSPECT) HEVC_RewriteESDescriptorEx((GF_MPEGVisualSampleEntryBox*) entry, mdia); else HEVC_RewriteESDescriptorEx((GF_MPEGVisualSampleEntryBox*) entry, NULL); esd = ((GF_MPEGVisualSampleEntryBox*) entry)->emul_esd; break; case GF_ISOM_BOX_TYPE_AV01: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_VIDEO) return GF_ISOM_INVALID_MEDIA; AV1_RewriteESDescriptorEx((GF_MPEGVisualSampleEntryBox*)entry, mdia); esd = ((GF_MPEGVisualSampleEntryBox*)entry)->emul_esd; break; case GF_ISOM_BOX_TYPE_VP08: case GF_ISOM_BOX_TYPE_VP09: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_VIDEO) return GF_ISOM_INVALID_MEDIA; VP9_RewriteESDescriptorEx((GF_MPEGVisualSampleEntryBox*)entry, mdia); esd = ((GF_MPEGVisualSampleEntryBox*)entry)->emul_esd; break; case GF_ISOM_BOX_TYPE_MP4A: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_AUDIO) return GF_ISOM_INVALID_MEDIA; { GF_MPEGAudioSampleEntryBox *ase = (GF_MPEGAudioSampleEntryBox*)entry; ESDa = ase->esd; if (ESDa) { esd = (GF_ESD *) ESDa->desc; } else if (!true_desc_only) { Bool make_mp4a = GF_FALSE; sinf = (GF_ProtectionSchemeInfoBox *) gf_isom_box_find_child(entry->child_boxes, GF_ISOM_BOX_TYPE_SINF); if (sinf && sinf->original_format) { if (sinf->original_format->data_format==GF_ISOM_BOX_TYPE_MP4A) { make_mp4a = GF_TRUE; } } else { // Assuming that if no ESD is provided the stream is Basic MPEG-4 AAC LC make_mp4a = GF_TRUE; } if (make_mp4a) { GF_M4ADecSpecInfo aacinfo; memset(&aacinfo, 0, sizeof(GF_M4ADecSpecInfo)); aacinfo.nb_chan = ase->channel_count; aacinfo.base_object_type = GF_M4A_AAC_LC; aacinfo.base_sr = ase->samplerate_hi; *out_esd = gf_odf_desc_esd_new(0); (*out_esd)->decoderConfig->streamType = GF_STREAM_AUDIO; (*out_esd)->decoderConfig->objectTypeIndication = GF_CODECID_AAC_MPEG4; gf_m4a_write_config(&aacinfo, &(*out_esd)->decoderConfig->decoderSpecificInfo->data, &(*out_esd)->decoderConfig->decoderSpecificInfo->dataLength); } } } break; case GF_ISOM_BOX_TYPE_MP4S: if (entry->internal_type==GF_ISOM_SAMPLE_ENTRY_MP4S) { ESDa = entry->esd; if (ESDa) esd = (GF_ESD *) ESDa->desc; } break; #ifndef GPAC_DISABLE_TTXT case GF_ISOM_BOX_TYPE_TX3G: case GF_ISOM_BOX_TYPE_TEXT: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_MP4S) return GF_ISOM_INVALID_MEDIA; if (!true_desc_only && mdia->mediaTrack->moov->mov->convert_streaming_text) { GF_Err e = gf_isom_get_ttxt_esd(mdia, out_esd); if (e) return e; break; } else return GF_ISOM_INVALID_MEDIA; #endif #ifndef GPAC_DISABLE_VTT case GF_ISOM_BOX_TYPE_WVTT: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_MP4S) return GF_ISOM_INVALID_MEDIA; { GF_WebVTTSampleEntryBox*vtte = (GF_WebVTTSampleEntryBox*)entry; esd = gf_odf_desc_esd_new(2); *out_esd = esd; esd->decoderConfig->streamType = GF_STREAM_TEXT; esd->decoderConfig->objectTypeIndication = GF_CODECID_WEBVTT; if (vtte->config) { esd->decoderConfig->decoderSpecificInfo->dataLength = (u32) strlen(vtte->config->string); esd->decoderConfig->decoderSpecificInfo->data = gf_malloc(sizeof(char)*esd->decoderConfig->decoderSpecificInfo->dataLength); memcpy(esd->decoderConfig->decoderSpecificInfo->data, vtte->config->string, esd->decoderConfig->decoderSpecificInfo->dataLength); } } break; case GF_ISOM_BOX_TYPE_STPP: case GF_ISOM_BOX_TYPE_SBTT: case GF_ISOM_BOX_TYPE_STXT: break; #endif case GF_ISOM_SUBTYPE_3GP_AMR: case GF_ISOM_SUBTYPE_3GP_AMR_WB: case GF_ISOM_SUBTYPE_3GP_EVRC: case GF_ISOM_SUBTYPE_3GP_QCELP: case GF_ISOM_SUBTYPE_3GP_SMV: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_AUDIO) return GF_ISOM_INVALID_MEDIA; if (!true_desc_only) { GF_Err e = gf_isom_get_3gpp_audio_esd(mdia->information->sampleTable, type, (GF_GenericAudioSampleEntryBox*)entry, out_esd); if (e) return e; break; } else return GF_ISOM_INVALID_MEDIA; case GF_ISOM_SUBTYPE_OPUS: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_AUDIO) return GF_ISOM_INVALID_MEDIA; { GF_OpusSpecificBox *e = ((GF_MPEGAudioSampleEntryBox*)entry)->cfg_opus; GF_BitStream *bs_out; if (!e) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("ESD not found for Opus\n)")); break; } *out_esd = gf_odf_desc_esd_new(2); (*out_esd)->decoderConfig->streamType = GF_STREAM_AUDIO; (*out_esd)->decoderConfig->objectTypeIndication = GF_CODECID_OPUS; //serialize box with header - compatibility with ffmpeg bs_out = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_isom_box_size((GF_Box *) e); gf_isom_box_write((GF_Box *) e, bs_out); gf_bs_get_content(bs_out, & (*out_esd)->decoderConfig->decoderSpecificInfo->data, & (*out_esd)->decoderConfig->decoderSpecificInfo->dataLength); gf_bs_del(bs_out); break; } case GF_ISOM_SUBTYPE_3GP_H263: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_VIDEO) return GF_ISOM_INVALID_MEDIA; if (true_desc_only) { return GF_ISOM_INVALID_MEDIA; } else { esd = gf_odf_desc_esd_new(2); *out_esd = esd; esd->decoderConfig->streamType = GF_STREAM_VISUAL; esd->decoderConfig->objectTypeIndication = GF_CODECID_H263; break; } case GF_ISOM_SUBTYPE_MP3: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_AUDIO) return GF_ISOM_INVALID_MEDIA; if (true_desc_only) { return GF_ISOM_INVALID_MEDIA; } else { esd = gf_odf_desc_esd_new(2); *out_esd = esd; esd->decoderConfig->streamType = GF_STREAM_AUDIO; esd->decoderConfig->objectTypeIndication = GF_CODECID_MPEG_AUDIO; break; } case GF_ISOM_SUBTYPE_LSR1: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_MP4S) return GF_ISOM_INVALID_MEDIA; if (true_desc_only) { return GF_ISOM_INVALID_MEDIA; } else { GF_LASeRSampleEntryBox*ptr = (GF_LASeRSampleEntryBox*)entry; esd = gf_odf_desc_esd_new(2); *out_esd = esd; esd->decoderConfig->streamType = GF_STREAM_SCENE; esd->decoderConfig->objectTypeIndication = GF_CODECID_LASER; esd->decoderConfig->decoderSpecificInfo->dataLength = ptr->lsr_config->hdr_size; esd->decoderConfig->decoderSpecificInfo->data = gf_malloc(sizeof(char)*ptr->lsr_config->hdr_size); if (!esd->decoderConfig->decoderSpecificInfo->data) return GF_OUT_OF_MEM; memcpy(esd->decoderConfig->decoderSpecificInfo->data, ptr->lsr_config->hdr, sizeof(char)*ptr->lsr_config->hdr_size); break; } case GF_ISOM_SUBTYPE_MH3D_MHA1: case GF_ISOM_SUBTYPE_MH3D_MHA2: case GF_ISOM_SUBTYPE_MH3D_MHM1: case GF_ISOM_SUBTYPE_MH3D_MHM2: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_AUDIO) return GF_ISOM_INVALID_MEDIA; if (true_desc_only) { return GF_ISOM_INVALID_MEDIA; } else { GF_MPEGAudioSampleEntryBox*ptr = (GF_MPEGAudioSampleEntryBox*)entry; esd = gf_odf_desc_esd_new(2); *out_esd = esd; esd->decoderConfig->streamType = GF_STREAM_AUDIO; if ((type==GF_ISOM_SUBTYPE_MH3D_MHA1) || (type==GF_ISOM_SUBTYPE_MH3D_MHA2)) esd->decoderConfig->objectTypeIndication = GF_CODECID_MPHA; else esd->decoderConfig->objectTypeIndication = GF_CODECID_MHAS; if (ptr->cfg_mha) { GF_BitStream *bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_bs_write_u8(bs, ptr->cfg_mha->configuration_version); gf_bs_write_u8(bs, ptr->cfg_mha->mha_pl_indication); gf_bs_write_u8(bs, ptr->cfg_mha->reference_channel_layout); gf_bs_write_u16(bs, ptr->cfg_mha->mha_config ? ptr->cfg_mha->mha_config_size : 0); if (ptr->cfg_mha->mha_config && ptr->cfg_mha->mha_config_size) gf_bs_write_data(bs, ptr->cfg_mha->mha_config, ptr->cfg_mha->mha_config_size); gf_bs_get_content(bs, &esd->decoderConfig->decoderSpecificInfo->data, &esd->decoderConfig->decoderSpecificInfo->dataLength); gf_bs_del(bs); } } break; default: return GF_ISOM_INVALID_MEDIA; } if (true_desc_only) { if (!esd) return GF_ISOM_INVALID_MEDIA; *out_esd = esd; return GF_OK; } else { if (!esd && !*out_esd) return GF_ISOM_INVALID_MEDIA; if (*out_esd == NULL) return gf_odf_desc_copy((GF_Descriptor *)esd, (GF_Descriptor **)out_esd); } return GF_OK; } Bool Media_IsSampleSyncShadow(GF_ShadowSyncBox *stsh, u32 sampleNumber) { u32 i; GF_StshEntry *ent; if (!stsh) return 0; i=0; while ((ent = (GF_StshEntry*)gf_list_enum(stsh->entries, &i))) { if ((u32) ent->syncSampleNumber == sampleNumber) return 1; else if ((u32) ent->syncSampleNumber > sampleNumber) return 0; } return 0; } GF_Err Media_GetSample(GF_MediaBox *mdia, u32 sampleNumber, GF_ISOSample **samp, u32 *sIDX, Bool no_data, u64 *out_offset) { GF_Err e; u32 bytesRead; u32 dataRefIndex, chunkNumber; u64 offset, new_size; u32 sdesc_idx; GF_SampleEntryBox *entry; GF_StscEntry *stsc_entry; if (!mdia || !mdia->information->sampleTable) return GF_BAD_PARAM; if (!mdia->information->sampleTable->SampleSize) return GF_ISOM_INVALID_FILE; //OK, here we go.... if (sampleNumber > mdia->information->sampleTable->SampleSize->sampleCount) return GF_BAD_PARAM; //the data info if (!sIDX && !no_data) return GF_BAD_PARAM; e = stbl_GetSampleInfos(mdia->information->sampleTable, sampleNumber, &offset, &chunkNumber, &sdesc_idx, &stsc_entry); if (e) return e; if (sIDX) (*sIDX) = sdesc_idx; if (out_offset) *out_offset = offset; if (!samp ) return GF_OK; if (mdia->information->sampleTable->TimeToSample) { //get the DTS e = stbl_GetSampleDTS(mdia->information->sampleTable->TimeToSample, sampleNumber, &(*samp)->DTS); if (e) return e; } else { (*samp)->DTS=0; } //the CTS offset if (mdia->information->sampleTable->CompositionOffset) { e = stbl_GetSampleCTS(mdia->information->sampleTable->CompositionOffset , sampleNumber, &(*samp)->CTS_Offset); if (e) return e; } else { (*samp)->CTS_Offset = 0; } //the size e = stbl_GetSampleSize(mdia->information->sampleTable->SampleSize, sampleNumber, &(*samp)->dataLength); if (e) return e; //the RAP if (mdia->information->sampleTable->SyncSample) { e = stbl_GetSampleRAP(mdia->information->sampleTable->SyncSample, sampleNumber, &(*samp)->IsRAP, NULL, NULL); if (e) return e; } else { //if no SyncSample, all samples are sync (cf spec) (*samp)->IsRAP = RAP; } if (mdia->information->sampleTable->SampleDep) { u32 isLeading, dependsOn, dependedOn, redundant; e = stbl_GetSampleDepType(mdia->information->sampleTable->SampleDep, sampleNumber, &isLeading, &dependsOn, &dependedOn, &redundant); if (!e) { if (dependsOn==1) (*samp)->IsRAP = RAP_NO; //commenting following code since it is wrong - an I frame is not always a SAP1, it can be a SAP2 or SAP3. //Keeping this code breaks AVC / HEVC openGOP import when writing sample dependencies //else if (dependsOn==2) (*samp)->IsRAP = RAP; /*if not depended upon and redundant, mark as carousel sample*/ if ((dependedOn==2) && (redundant==1)) (*samp)->IsRAP = RAP_REDUNDANT; /*TODO FIXME - we must enhance the IsRAP semantics to carry disposable info ... */ } } /*get sync shadow*/ if (Media_IsSampleSyncShadow(mdia->information->sampleTable->ShadowSync, sampleNumber)) (*samp)->IsRAP = RAP_REDUNDANT; //the data info if (!sIDX && !no_data) return GF_BAD_PARAM; if (!sIDX && !out_offset) return GF_OK; if (!sIDX) return GF_OK; (*sIDX) = sdesc_idx; // e = stbl_GetSampleInfos(mdia->information->sampleTable, sampleNumber, &offset, &chunkNumber, sIDX, &stsc_entry); // if (e) return e; //then get the DataRef e = Media_GetSampleDesc(mdia, sdesc_idx, &entry, &dataRefIndex); if (e) return e; //if moov is compressed, remove offset if sample is after moov in this file if (mdia->mediaTrack->moov->compressed_diff) { GF_DataEntryBox *ent = (GF_DataEntryBox*)gf_list_get(mdia->information->dataInformation->dref->child_boxes, dataRefIndex - 1); if (ent && (ent->flags&1) && (offset>=mdia->mediaTrack->moov->file_offset)) { offset -= mdia->mediaTrack->moov->compressed_diff; } } if (no_data) { if ( ((*samp)->dataLength != 0) && mdia->mediaTrack->pack_num_samples) { u32 idx_in_chunk = sampleNumber - mdia->information->sampleTable->SampleToChunk->firstSampleInCurrentChunk; u32 left_in_chunk = stsc_entry->samplesPerChunk - idx_in_chunk; if (left_in_chunk > mdia->mediaTrack->pack_num_samples) left_in_chunk = mdia->mediaTrack->pack_num_samples; (*samp)->dataLength *= left_in_chunk; (*samp)->nb_pack = left_in_chunk; } return GF_OK; } // Open the data handler - check our mode, don't reopen in read only if this is //the same entry. In other modes we have no choice because the main data map is //divided into the original and the edition files if (mdia->mediaTrack->moov->mov->openMode == GF_ISOM_OPEN_READ) { //same as last call in read mode if (!mdia->information->dataHandler) { e = gf_isom_datamap_open(mdia, dataRefIndex, stsc_entry->isEdited); if (e) return e; } mdia->information->dataEntryIndex = dataRefIndex; } else { e = gf_isom_datamap_open(mdia, dataRefIndex, stsc_entry->isEdited); if (e) return e; } if ( mdia->mediaTrack->moov->mov->read_byte_offset || mdia->mediaTrack->moov->mov->bytes_removed) { GF_DataEntryBox *ent = (GF_DataEntryBox*)gf_list_get(mdia->information->dataInformation->dref->child_boxes, dataRefIndex - 1); if (ent && (ent->flags&1)) { u64 real_offset = mdia->mediaTrack->moov->mov->read_byte_offset + mdia->mediaTrack->moov->mov->bytes_removed; if (offset < real_offset) return GF_IO_ERR; if (mdia->information->dataHandler->last_read_offset != mdia->mediaTrack->moov->mov->read_byte_offset) { mdia->information->dataHandler->last_read_offset = mdia->mediaTrack->moov->mov->read_byte_offset; gf_bs_get_refreshed_size(mdia->information->dataHandler->bs); } offset -= real_offset; } } if ((*samp)->dataLength != 0) { if (mdia->mediaTrack->pack_num_samples) { u32 idx_in_chunk = sampleNumber - mdia->information->sampleTable->SampleToChunk->firstSampleInCurrentChunk; u32 left_in_chunk = stsc_entry->samplesPerChunk - idx_in_chunk; if (left_in_chunk > mdia->mediaTrack->pack_num_samples) left_in_chunk = mdia->mediaTrack->pack_num_samples; (*samp)->dataLength *= left_in_chunk; (*samp)->nb_pack = left_in_chunk; } /*and finally get the data, include padding if needed*/ if ((*samp)->alloc_size) { if ((*samp)->alloc_size < (*samp)->dataLength + mdia->mediaTrack->padding_bytes) { (*samp)->data = (char *) gf_realloc((*samp)->data, sizeof(char) * ( (*samp)->dataLength + mdia->mediaTrack->padding_bytes) ); if (! (*samp)->data) return GF_OUT_OF_MEM; (*samp)->alloc_size = (*samp)->dataLength + mdia->mediaTrack->padding_bytes; } } else { (*samp)->data = (char *) gf_malloc(sizeof(char) * ( (*samp)->dataLength + mdia->mediaTrack->padding_bytes) ); if (! (*samp)->data) return GF_OUT_OF_MEM; } if (mdia->mediaTrack->padding_bytes) memset((*samp)->data + (*samp)->dataLength, 0, sizeof(char) * mdia->mediaTrack->padding_bytes); //check if we can get the sample (make sure we have enougth data...) new_size = gf_bs_get_size(mdia->information->dataHandler->bs); if (offset + (*samp)->dataLength > new_size) { //always refresh the size to avoid wrong info on http/ftp new_size = gf_bs_get_refreshed_size(mdia->information->dataHandler->bs); if (offset + (*samp)->dataLength > new_size) { mdia->BytesMissing = offset + (*samp)->dataLength - new_size; return GF_ISOM_INCOMPLETE_FILE; } } bytesRead = gf_isom_datamap_get_data(mdia->information->dataHandler, (*samp)->data, (*samp)->dataLength, offset); //if bytesRead != sampleSize, we have an IO err if (bytesRead < (*samp)->dataLength) { return GF_IO_ERR; } mdia->BytesMissing = 0; } //finally rewrite the sample if this is an OD Access Unit or NAL-based one //we do this even if sample size is zero because of sample implicit reconstruction rules (especially tile tracks) if (mdia->handler->handlerType == GF_ISOM_MEDIA_OD) { if (!mdia->mediaTrack->moov->mov->disable_odf_translate) { e = Media_RewriteODFrame(mdia, *samp); if (e) return e; } } else if (gf_isom_is_nalu_based_entry(mdia, entry) && !gf_isom_is_encrypted_entry(entry->type) ) { e = gf_isom_nalu_sample_rewrite(mdia, *samp, sampleNumber, (GF_MPEGVisualSampleEntryBox *)entry); if (e) return e; } else if (mdia->mediaTrack->moov->mov->convert_streaming_text && ((mdia->handler->handlerType == GF_ISOM_MEDIA_TEXT) || (mdia->handler->handlerType == GF_ISOM_MEDIA_SCENE) || (mdia->handler->handlerType == GF_ISOM_MEDIA_SUBT)) && (entry->type == GF_ISOM_BOX_TYPE_TX3G || entry->type == GF_ISOM_BOX_TYPE_TEXT) ) { u64 dur; if (sampleNumber == mdia->information->sampleTable->SampleSize->sampleCount) { dur = mdia->mediaHeader->duration - (*samp)->DTS; } else { stbl_GetSampleDTS(mdia->information->sampleTable->TimeToSample, sampleNumber+1, &dur); dur -= (*samp)->DTS; } e = gf_isom_rewrite_text_sample(*samp, sdesc_idx, (u32) dur); if (e) return e; } return GF_OK; } GF_Err Media_CheckDataEntry(GF_MediaBox *mdia, u32 dataEntryIndex) { GF_DataEntryURLBox *entry; GF_DataMap *map; GF_Err e; if (!mdia || !dataEntryIndex || dataEntryIndex > gf_list_count(mdia->information->dataInformation->dref->child_boxes)) return GF_BAD_PARAM; entry = (GF_DataEntryURLBox*)gf_list_get(mdia->information->dataInformation->dref->child_boxes, dataEntryIndex - 1); if (!entry) return GF_ISOM_INVALID_FILE; if (entry->flags == 1) return GF_OK; //ok, not self contained, let's go for it... //we only support alias and URL boxes if ((entry->type != GF_ISOM_BOX_TYPE_URL) && (entry->type != GF_QT_BOX_TYPE_ALIS) ) return GF_NOT_SUPPORTED; if (mdia->mediaTrack->moov->mov->openMode == GF_ISOM_OPEN_WRITE) { e = gf_isom_datamap_new(entry->location, NULL, GF_ISOM_DATA_MAP_READ, &map); } else { e = gf_isom_datamap_new(entry->location, mdia->mediaTrack->moov->mov->fileName, GF_ISOM_DATA_MAP_READ, &map); } if (e) return e; gf_isom_datamap_del(map); return GF_OK; } Bool Media_IsSelfContained(GF_MediaBox *mdia, u32 StreamDescIndex) { u32 drefIndex=0; GF_FullBox *a=NULL; GF_SampleEntryBox *se = NULL; Media_GetSampleDesc(mdia, StreamDescIndex, &se, &drefIndex); if (!drefIndex) return 0; if (mdia && mdia->information && mdia->information->dataInformation && mdia->information->dataInformation->dref ) { a = (GF_FullBox*)gf_list_get(mdia->information->dataInformation->dref->child_boxes, drefIndex - 1); } if (!a) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] broken file: Data reference index set to %d but no data reference entry found\n", drefIndex)); return 1; } if (a->flags & 1) return 1; /*QT specific*/ if (a->type == GF_QT_BOX_TYPE_ALIS) return 1; return 0; } GF_ISOMDataRefAllType Media_SelfContainedType(GF_MediaBox *mdia) { u32 nb_ext, nb_self; u32 i, count; nb_ext = nb_self = 0; count = mdia->information->sampleTable->SampleDescription ? gf_list_count(mdia->information->sampleTable->SampleDescription->child_boxes) : 0; for (i=0; i<count; i++) { if (Media_IsSelfContained(mdia, i+1)) nb_self++; else nb_ext++; } if (nb_ext==count) return ISOM_DREF_EXT; if (nb_self==count) return ISOM_DREF_SELF; return ISOM_DREF_MIXED; } //look for a sync sample from a given point in media time GF_Err Media_FindSyncSample(GF_SampleTableBox *stbl, u32 searchFromSample, u32 *sampleNumber, u8 mode) { GF_ISOSAPType isRAP; u32 next, prev, next_in_sap, prev_in_sap; if (!stbl || !stbl->SyncSample) return GF_BAD_PARAM; //set to current sample if we don't find a RAP *sampleNumber = searchFromSample; //this is not the exact sample, but the prev move to next sample if enough samples.... if ( (mode == GF_ISOM_SEARCH_SYNC_FORWARD) && (searchFromSample == stbl->SampleSize->sampleCount) ) { return GF_OK; } if ( (mode == GF_ISOM_SEARCH_SYNC_BACKWARD) && !searchFromSample) { *sampleNumber = 1; return GF_OK; } //get the entry stbl_GetSampleRAP(stbl->SyncSample, searchFromSample, &isRAP, &prev, &next); if (isRAP) { (*sampleNumber) = searchFromSample; return GF_OK; } /*check sample groups - prev & next are overwritten if RAP group is found, but are not re-initialized otherwise*/ stbl_SearchSAPs(stbl, searchFromSample, &isRAP, &prev_in_sap, &next_in_sap); if (isRAP) { (*sampleNumber) = searchFromSample; return GF_OK; } if (prev_in_sap > prev) prev = prev_in_sap; if (next_in_sap && next_in_sap < next) next = next_in_sap; //nothing yet, go for next time... if (mode == GF_ISOM_SEARCH_SYNC_FORWARD) { if (next) *sampleNumber = next; } else { if (prev) *sampleNumber = prev; } return GF_OK; } //create a DataReference if not existing (only for WRITE-edit mode) GF_Err Media_FindDataRef(GF_DataReferenceBox *dref, char *URLname, char *URNname, u32 *dataRefIndex) { u32 i; GF_DataEntryURLBox *entry; if (!dref) return GF_BAD_PARAM; *dataRefIndex = 0; i=0; while ((entry = (GF_DataEntryURLBox*)gf_list_enum(dref->child_boxes, &i))) { if (entry->type == GF_ISOM_BOX_TYPE_URL) { //self-contained case if (entry->flags == 1) { //if nothing specified, get the dataRef if (!URLname && !URNname) { *dataRefIndex = i; return GF_OK; } } else { //OK, check if we have URL if (URLname && !strcmp(URLname, entry->location)) { *dataRefIndex = i; return GF_OK; } } } else { //this is a URN one, only check the URN name (URL optional) if (URNname && !strcmp(URNname, ((GF_DataEntryURNBox *)entry)->nameURN)) { *dataRefIndex = i; return GF_OK; } } } return GF_OK; } //Get the total media duration based on the TimeToSample table GF_Err Media_SetDuration(GF_TrackBox *trak) { GF_Err e; GF_ESD *esd; u64 DTS; GF_SttsEntry *ent; u32 nbSamp; if (!trak || !trak->Media || !trak->Media->information || !trak->Media->information->sampleTable) return GF_ISOM_INVALID_FILE; if (!trak->Media->information->sampleTable->SampleSize || !trak->Media->information->sampleTable->TimeToSample) return GF_ISOM_INVALID_FILE; nbSamp = trak->Media->information->sampleTable->SampleSize->sampleCount; //we need to check how many samples we have. // == 1 -> last sample duration == default duration // > 1 -> last sample duration == prev sample duration switch (nbSamp) { case 0: trak->Media->mediaHeader->duration = 0; if (Track_IsMPEG4Stream(trak->Media->handler->handlerType)) { Media_GetESD(trak->Media, 1, &esd, 1); if (esd && esd->URLString) trak->Media->mediaHeader->duration = (u64) -1; } return GF_OK; // case 1: // trak->Media->mediaHeader->duration = trak->Media->mediaHeader->timeScale; // return GF_OK; default: //we assume a constant frame rate for the media and assume the last sample //will be hold the same time as the prev one e = stbl_GetSampleDTS(trak->Media->information->sampleTable->TimeToSample, nbSamp, &DTS); if (e < 0) { return e; } if (trak->Media->information->sampleTable->TimeToSample->nb_entries > 0) { ent = &trak->Media->information->sampleTable->TimeToSample->entries[trak->Media->information->sampleTable->TimeToSample->nb_entries-1]; } else { ent = NULL; } trak->Media->mediaHeader->duration = DTS; #if 1 if (ent) trak->Media->mediaHeader->duration += ent->sampleDelta; #else if (!ent) { u64 DTSprev; stbl_GetSampleDTS(trak->Media->information->sampleTable->TimeToSample, nbSamp-1, &DTSprev); trak->Media->mediaHeader->duration += (DTS - DTSprev); } else { #ifndef GPAC_DISABLE_ISOM_WRITE if (trak->moov->mov->editFileMap && trak->Media->information->sampleTable->CompositionOffset) { u32 count, i; u64 max_ts; GF_DttsEntry *cts_ent; GF_CompositionOffsetBox *ctts = trak->Media->information->sampleTable->CompositionOffset; if (ctts->w_LastSampleNumber==nbSamp) { count = gf_list_count(ctts->entryList); max_ts = trak->Media->mediaHeader->duration; while (count) { count -= 1; cts_ent = gf_list_get(ctts->entryList, count); if (nbSamp<cts_ent->sampleCount) break; for (i=0; i<cts_ent->sampleCount; i++) { stbl_GetSampleDTS(trak->Media->information->sampleTable->TimeToSample, nbSamp-i, &DTS); if ((s32) cts_ent->decodingOffset < 0) max_ts = DTS; else max_ts = DTS + cts_ent->decodingOffset; if (max_ts>=trak->Media->mediaHeader->duration) { trak->Media->mediaHeader->duration = max_ts; } else { break; } } if (max_ts<trak->Media->mediaHeader->duration) { break; } nbSamp-=cts_ent->sampleCount; } } } #endif /*GPAC_DISABLE_ISOM_WRITE*/ trak->Media->mediaHeader->duration += ent->sampleDelta; } #endif return GF_OK; } } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err Media_SetDrefURL(GF_DataEntryURLBox *dref_entry, const char *origName, const char *finalName) { //for now we only support dref created in same folder for relative URLs if (strstr(origName, "://") || ((origName[1]==':') && (origName[2]=='\\')) || (origName[0]=='/') || (origName[0]=='\\') ) { dref_entry->location = gf_strdup(origName); } else { char *fname = strrchr(origName, '/'); if (!fname) fname = strrchr(origName, '\\'); if (fname) fname++; if (!fname) { dref_entry->location = gf_strdup(origName); } else { u32 len = (u32) (fname - origName); if (!finalName || strncmp(origName, finalName, len)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Concatenation of relative path %s with relative path %s not supported, use absolute URLs\n", origName, finalName)); return GF_NOT_SUPPORTED; } else { dref_entry->location = gf_strdup(fname); } } } return GF_OK; } GF_Err Media_CreateDataRef(GF_ISOFile *movie, GF_DataReferenceBox *dref, char *URLname, char *URNname, u32 *dataRefIndex) { GF_Err e; Bool use_alis=GF_FALSE; GF_DataEntryURLBox *entry; if (URLname && !strcmp(URLname, "alis")) { URLname = NULL; use_alis=GF_TRUE; } if (!URLname && !URNname) { //THIS IS SELF CONTAIN, create a regular entry if needed entry = (GF_DataEntryURLBox *) gf_isom_box_new_parent(&dref->child_boxes, use_alis ? GF_QT_BOX_TYPE_ALIS : GF_ISOM_BOX_TYPE_URL); if (!entry) return GF_OUT_OF_MEM; entry->flags = 1; *dataRefIndex = gf_list_count(dref->child_boxes); return GF_OK; } else if (!URNname && URLname) { //THIS IS URL entry = (GF_DataEntryURLBox *) gf_isom_box_new_parent(&dref->child_boxes, GF_ISOM_BOX_TYPE_URL); if (!entry) return GF_OUT_OF_MEM; entry->flags = 0; e = Media_SetDrefURL(entry, URLname, movie->fileName ? movie->fileName : movie->finalName); if (! entry->location) { gf_isom_box_del_parent(&dref->child_boxes, (GF_Box *)entry); return e ? e : GF_OUT_OF_MEM; } *dataRefIndex = gf_list_count(dref->child_boxes); return GF_OK; } else { //THIS IS URN entry = (GF_DataEntryURLBox *) gf_isom_box_new_parent(&dref->child_boxes, GF_ISOM_BOX_TYPE_URN); if (!entry) return GF_OUT_OF_MEM; ((GF_DataEntryURNBox *)entry)->flags = 0; ((GF_DataEntryURNBox *)entry)->nameURN = (char*)gf_malloc(strlen(URNname)+1); if (! ((GF_DataEntryURNBox *)entry)->nameURN) { gf_isom_box_del_parent(&dref->child_boxes, (GF_Box *)entry); return GF_OUT_OF_MEM; } strcpy(((GF_DataEntryURNBox *)entry)->nameURN, URNname); //check for URL if (URLname) { ((GF_DataEntryURNBox *)entry)->location = (char*)gf_malloc(strlen(URLname)+1); if (! ((GF_DataEntryURNBox *)entry)->location) { gf_isom_box_del_parent(&dref->child_boxes, (GF_Box *)entry); return GF_OUT_OF_MEM; } strcpy(((GF_DataEntryURNBox *)entry)->location, URLname); } *dataRefIndex = gf_list_count(dref->child_boxes); return GF_OK; } return GF_OK; } GF_Err Media_AddSample(GF_MediaBox *mdia, u64 data_offset, const GF_ISOSample *sample, u32 StreamDescIndex, u32 syncShadowNumber) { GF_Err e; GF_SampleTableBox *stbl; u32 sampleNumber, i; if (!mdia || !sample) return GF_BAD_PARAM; stbl = mdia->information->sampleTable; //get a valid sampleNumber for this new guy e = stbl_AddDTS(stbl, sample->DTS, &sampleNumber, mdia->mediaHeader->timeScale, sample->nb_pack); if (e) return e; //add size e = stbl_AddSize(stbl->SampleSize, sampleNumber, sample->dataLength, sample->nb_pack); if (e) return e; //adds CTS offset if (sample->CTS_Offset) { //if we don't have a CTS table, add it... if (!stbl->CompositionOffset) { stbl->CompositionOffset = (GF_CompositionOffsetBox *) gf_isom_box_new_parent(&stbl->child_boxes, GF_ISOM_BOX_TYPE_CTTS); if (!stbl->CompositionOffset) return GF_OUT_OF_MEM; } //then add our CTS (the prev samples with no CTS offset will be automatically added... e = stbl_AddCTS(stbl, sampleNumber, sample->CTS_Offset); if (e) return e; } else if (stbl->CompositionOffset) { e = stbl_AddCTS(stbl, sampleNumber, sample->CTS_Offset); if (e) return e; } //The first non sync sample we see must create a syncTable if (sample->IsRAP) { //insert it only if we have a sync table and if we have an IDR slice if (stbl->SyncSample && (sample->IsRAP == RAP)) { e = stbl_AddRAP(stbl->SyncSample, sampleNumber); if (e) return e; } } else { //non-sync sample. Create a SyncSample table if needed if (!stbl->SyncSample) { stbl->SyncSample = (GF_SyncSampleBox *) gf_isom_box_new_parent(&stbl->child_boxes, GF_ISOM_BOX_TYPE_STSS); if (!stbl->SyncSample) return GF_OUT_OF_MEM; //all the prev samples are sync for (i=0; i<stbl->SampleSize->sampleCount; i++) { if (i+1 != sampleNumber) { e = stbl_AddRAP(stbl->SyncSample, i+1); if (e) return e; } } } } if (sample->IsRAP==RAP_REDUNDANT) { e = stbl_AddRedundant(stbl, sampleNumber); if (e) return e; } if (!mdia->mediaTrack->chunk_cache) { //and update the chunks e = stbl_AddChunkOffset(mdia, sampleNumber, StreamDescIndex, data_offset, sample->nb_pack); if (e) return e; } if (!syncShadowNumber) return GF_OK; if (!stbl->ShadowSync) { stbl->ShadowSync = (GF_ShadowSyncBox *) gf_isom_box_new_parent(&stbl->child_boxes, GF_ISOM_BOX_TYPE_STSH); if (!stbl->ShadowSync) return GF_OUT_OF_MEM; } return stbl_AddShadow(mdia->information->sampleTable->ShadowSync, sampleNumber, syncShadowNumber); } static GF_Err UpdateSample(GF_MediaBox *mdia, u32 sampleNumber, u32 size, s32 CTS, u64 offset, u8 isRap) { u32 i; GF_SampleTableBox *stbl = mdia->information->sampleTable; //set size, offset, RAP, CTS ... stbl_SetSampleSize(stbl->SampleSize, sampleNumber, size); stbl_SetChunkOffset(mdia, sampleNumber, offset); //do we have a CTS? if (stbl->CompositionOffset) { stbl_SetSampleCTS(stbl, sampleNumber, CTS); } else { //do we need one ?? if (CTS) { stbl->CompositionOffset = (GF_CompositionOffsetBox *) gf_isom_box_new_parent(&stbl->child_boxes, GF_ISOM_BOX_TYPE_CTTS); if (!stbl->CompositionOffset) return GF_OUT_OF_MEM; stbl_AddCTS(stbl, sampleNumber, CTS); } } //do we have a sync ??? if (stbl->SyncSample) { stbl_SetSampleRAP(stbl->SyncSample, sampleNumber, isRap); } else { //do we need one if (! isRap) { stbl->SyncSample = (GF_SyncSampleBox *) gf_isom_box_new_parent(&stbl->child_boxes, GF_ISOM_BOX_TYPE_STSS); if (!stbl->SyncSample) return GF_OUT_OF_MEM; //what a pain: all the sample we had have to be sync ... for (i=0; i<stbl->SampleSize->sampleCount; i++) { if (i+1 != sampleNumber) stbl_AddRAP(stbl->SyncSample, i+1); } } } if (isRap==2) { stbl_SetRedundant(stbl, sampleNumber); } return GF_OK; } GF_Err Media_UpdateSample(GF_MediaBox *mdia, u32 sampleNumber, GF_ISOSample *sample, Bool data_only) { GF_Err e; u32 drefIndex, chunkNum, descIndex; u64 newOffset, DTS; GF_DataEntryURLBox *Dentry; GF_SampleTableBox *stbl; if (!mdia || !sample || !sampleNumber || !mdia->mediaTrack->moov->mov->editFileMap) return GF_BAD_PARAM; stbl = mdia->information->sampleTable; if (!data_only) { //check we have the sampe dts e = stbl_GetSampleDTS(stbl->TimeToSample, sampleNumber, &DTS); if (e) return e; if (DTS != sample->DTS) return GF_BAD_PARAM; } //get our infos stbl_GetSampleInfos(stbl, sampleNumber, &newOffset, &chunkNum, &descIndex, NULL); //then check the data ref e = Media_GetSampleDesc(mdia, descIndex, NULL, &drefIndex); if (e) return e; Dentry = (GF_DataEntryURLBox*)gf_list_get(mdia->information->dataInformation->dref->child_boxes, drefIndex - 1); if (!Dentry) return GF_ISOM_INVALID_FILE; if (Dentry->flags != 1) return GF_BAD_PARAM; //MEDIA DATA EDIT: write this new sample to the edit temp file newOffset = gf_isom_datamap_get_offset(mdia->mediaTrack->moov->mov->editFileMap); if (sample->dataLength) { e = gf_isom_datamap_add_data(mdia->mediaTrack->moov->mov->editFileMap, sample->data, sample->dataLength); if (e) return e; } if (data_only) { stbl_SetSampleSize(stbl->SampleSize, sampleNumber, sample->dataLength); return stbl_SetChunkOffset(mdia, sampleNumber, newOffset); } return UpdateSample(mdia, sampleNumber, sample->dataLength, sample->CTS_Offset, newOffset, sample->IsRAP); } GF_Err Media_UpdateSampleReference(GF_MediaBox *mdia, u32 sampleNumber, GF_ISOSample *sample, u64 data_offset) { GF_Err e; u32 drefIndex, chunkNum, descIndex; u64 off, DTS; GF_DataEntryURLBox *Dentry; GF_SampleTableBox *stbl; if (!mdia) return GF_BAD_PARAM; stbl = mdia->information->sampleTable; //check we have the sampe dts e = stbl_GetSampleDTS(stbl->TimeToSample, sampleNumber, &DTS); if (e) return e; if (DTS != sample->DTS) return GF_BAD_PARAM; //get our infos stbl_GetSampleInfos(stbl, sampleNumber, &off, &chunkNum, &descIndex, NULL); //then check the data ref e = Media_GetSampleDesc(mdia, descIndex, NULL, &drefIndex); if (e) return e; Dentry = (GF_DataEntryURLBox*)gf_list_get(mdia->information->dataInformation->dref->child_boxes, drefIndex - 1); if (!Dentry) return GF_ISOM_INVALID_FILE; //we only modify self-contained data if (Dentry->flags == 1) return GF_ISOM_INVALID_MODE; //and we don't modify the media data return UpdateSample(mdia, sampleNumber, sample->dataLength, sample->CTS_Offset, data_offset, sample->IsRAP); } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #endif /*GPAC_DISABLE_ISOM*/
GF_Err Media_CheckDataEntry(GF_MediaBox *mdia, u32 dataEntryIndex) { GF_DataEntryURLBox *entry; GF_DataMap *map; GF_Err e; if (!mdia || !dataEntryIndex || dataEntryIndex > gf_list_count(mdia->information->dataInformation->dref->child_boxes)) return GF_BAD_PARAM; entry = (GF_DataEntryURLBox*)gf_list_get(mdia->information->dataInformation->dref->child_boxes, dataEntryIndex - 1); if (!entry) return GF_ISOM_INVALID_FILE; if (entry->flags == 1) return GF_OK; //ok, not self contained, let's go for it... //we don't know what's a URN yet if (entry->type == GF_ISOM_BOX_TYPE_URN) return GF_NOT_SUPPORTED; if (mdia->mediaTrack->moov->mov->openMode == GF_ISOM_OPEN_WRITE) { e = gf_isom_datamap_new(entry->location, NULL, GF_ISOM_DATA_MAP_READ, &map); } else { e = gf_isom_datamap_new(entry->location, mdia->mediaTrack->moov->mov->fileName, GF_ISOM_DATA_MAP_READ, &map); } if (e) return e; gf_isom_datamap_del(map); return GF_OK; }
GF_Err Media_CheckDataEntry(GF_MediaBox *mdia, u32 dataEntryIndex) { GF_DataEntryURLBox *entry; GF_DataMap *map; GF_Err e; if (!mdia || !dataEntryIndex || dataEntryIndex > gf_list_count(mdia->information->dataInformation->dref->child_boxes)) return GF_BAD_PARAM; entry = (GF_DataEntryURLBox*)gf_list_get(mdia->information->dataInformation->dref->child_boxes, dataEntryIndex - 1); if (!entry) return GF_ISOM_INVALID_FILE; if (entry->flags == 1) return GF_OK; //ok, not self contained, let's go for it... //we only support alias and URL boxes if ((entry->type != GF_ISOM_BOX_TYPE_URL) && (entry->type != GF_QT_BOX_TYPE_ALIS) ) return GF_NOT_SUPPORTED; if (mdia->mediaTrack->moov->mov->openMode == GF_ISOM_OPEN_WRITE) { e = gf_isom_datamap_new(entry->location, NULL, GF_ISOM_DATA_MAP_READ, &map); } else { e = gf_isom_datamap_new(entry->location, mdia->mediaTrack->moov->mov->fileName, GF_ISOM_DATA_MAP_READ, &map); } if (e) return e; gf_isom_datamap_del(map); return GF_OK; }
{'added': [(687, '\t//we only support alias and URL boxes'), (688, '\tif ((entry->type != GF_ISOM_BOX_TYPE_URL) && (entry->type != GF_QT_BOX_TYPE_ALIS) )'), (689, '\t\treturn GF_NOT_SUPPORTED;'), (690, '')], 'deleted': [(677, ''), (688, "\t//we don't know what's a URN yet"), (689, '\tif (entry->type == GF_ISOM_BOX_TYPE_URN) return GF_NOT_SUPPORTED;')]}
4
3
949
7,411
https://github.com/gpac/gpac
CVE-2021-32137
['CWE-787']
print-isoclns.c
esis_print
/* * Copyright (c) 1992, 1993, 1994, 1995, 1996 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * Original code by Matt Thomas, Digital Equipment Corporation * * Extensively modified by Hannes Gredler (hannes@gredler.at) for more * complete IS-IS & CLNP support. */ /* \summary: ISO CLNS, ESIS, and ISIS printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include <string.h> #include "netdissect.h" #include "addrtoname.h" #include "ether.h" #include "nlpid.h" #include "extract.h" #include "gmpls.h" #include "oui.h" #include "signature.h" static const char tstr[] = " [|isis]"; /* * IS-IS is defined in ISO 10589. Look there for protocol definitions. */ #define SYSTEM_ID_LEN ETHER_ADDR_LEN #define NODE_ID_LEN SYSTEM_ID_LEN+1 #define LSP_ID_LEN SYSTEM_ID_LEN+2 #define ISIS_VERSION 1 #define ESIS_VERSION 1 #define CLNP_VERSION 1 #define ISIS_PDU_TYPE_MASK 0x1F #define ESIS_PDU_TYPE_MASK 0x1F #define CLNP_PDU_TYPE_MASK 0x1F #define CLNP_FLAG_MASK 0xE0 #define ISIS_LAN_PRIORITY_MASK 0x7F #define ISIS_PDU_L1_LAN_IIH 15 #define ISIS_PDU_L2_LAN_IIH 16 #define ISIS_PDU_PTP_IIH 17 #define ISIS_PDU_L1_LSP 18 #define ISIS_PDU_L2_LSP 20 #define ISIS_PDU_L1_CSNP 24 #define ISIS_PDU_L2_CSNP 25 #define ISIS_PDU_L1_PSNP 26 #define ISIS_PDU_L2_PSNP 27 static const struct tok isis_pdu_values[] = { { ISIS_PDU_L1_LAN_IIH, "L1 Lan IIH"}, { ISIS_PDU_L2_LAN_IIH, "L2 Lan IIH"}, { ISIS_PDU_PTP_IIH, "p2p IIH"}, { ISIS_PDU_L1_LSP, "L1 LSP"}, { ISIS_PDU_L2_LSP, "L2 LSP"}, { ISIS_PDU_L1_CSNP, "L1 CSNP"}, { ISIS_PDU_L2_CSNP, "L2 CSNP"}, { ISIS_PDU_L1_PSNP, "L1 PSNP"}, { ISIS_PDU_L2_PSNP, "L2 PSNP"}, { 0, NULL} }; /* * A TLV is a tuple of a type, length and a value and is normally used for * encoding information in all sorts of places. This is an enumeration of * the well known types. * * list taken from rfc3359 plus some memory from veterans ;-) */ #define ISIS_TLV_AREA_ADDR 1 /* iso10589 */ #define ISIS_TLV_IS_REACH 2 /* iso10589 */ #define ISIS_TLV_ESNEIGH 3 /* iso10589 */ #define ISIS_TLV_PART_DIS 4 /* iso10589 */ #define ISIS_TLV_PREFIX_NEIGH 5 /* iso10589 */ #define ISIS_TLV_ISNEIGH 6 /* iso10589 */ #define ISIS_TLV_ISNEIGH_VARLEN 7 /* iso10589 */ #define ISIS_TLV_PADDING 8 /* iso10589 */ #define ISIS_TLV_LSP 9 /* iso10589 */ #define ISIS_TLV_AUTH 10 /* iso10589, rfc3567 */ #define ISIS_TLV_CHECKSUM 12 /* rfc3358 */ #define ISIS_TLV_CHECKSUM_MINLEN 2 #define ISIS_TLV_POI 13 /* rfc6232 */ #define ISIS_TLV_LSP_BUFFERSIZE 14 /* iso10589 rev2 */ #define ISIS_TLV_LSP_BUFFERSIZE_MINLEN 2 #define ISIS_TLV_EXT_IS_REACH 22 /* draft-ietf-isis-traffic-05 */ #define ISIS_TLV_IS_ALIAS_ID 24 /* draft-ietf-isis-ext-lsp-frags-02 */ #define ISIS_TLV_DECNET_PHASE4 42 #define ISIS_TLV_LUCENT_PRIVATE 66 #define ISIS_TLV_INT_IP_REACH 128 /* rfc1195, rfc2966 */ #define ISIS_TLV_PROTOCOLS 129 /* rfc1195 */ #define ISIS_TLV_EXT_IP_REACH 130 /* rfc1195, rfc2966 */ #define ISIS_TLV_IDRP_INFO 131 /* rfc1195 */ #define ISIS_TLV_IDRP_INFO_MINLEN 1 #define ISIS_TLV_IPADDR 132 /* rfc1195 */ #define ISIS_TLV_IPAUTH 133 /* rfc1195 */ #define ISIS_TLV_TE_ROUTER_ID 134 /* draft-ietf-isis-traffic-05 */ #define ISIS_TLV_EXTD_IP_REACH 135 /* draft-ietf-isis-traffic-05 */ #define ISIS_TLV_HOSTNAME 137 /* rfc2763 */ #define ISIS_TLV_SHARED_RISK_GROUP 138 /* draft-ietf-isis-gmpls-extensions */ #define ISIS_TLV_MT_PORT_CAP 143 /* rfc6165 */ #define ISIS_TLV_MT_CAPABILITY 144 /* rfc6329 */ #define ISIS_TLV_NORTEL_PRIVATE1 176 #define ISIS_TLV_NORTEL_PRIVATE2 177 #define ISIS_TLV_RESTART_SIGNALING 211 /* rfc3847 */ #define ISIS_TLV_RESTART_SIGNALING_FLAGLEN 1 #define ISIS_TLV_RESTART_SIGNALING_HOLDTIMELEN 2 #define ISIS_TLV_MT_IS_REACH 222 /* draft-ietf-isis-wg-multi-topology-05 */ #define ISIS_TLV_MT_SUPPORTED 229 /* draft-ietf-isis-wg-multi-topology-05 */ #define ISIS_TLV_MT_SUPPORTED_MINLEN 2 #define ISIS_TLV_IP6ADDR 232 /* draft-ietf-isis-ipv6-02 */ #define ISIS_TLV_MT_IP_REACH 235 /* draft-ietf-isis-wg-multi-topology-05 */ #define ISIS_TLV_IP6_REACH 236 /* draft-ietf-isis-ipv6-02 */ #define ISIS_TLV_MT_IP6_REACH 237 /* draft-ietf-isis-wg-multi-topology-05 */ #define ISIS_TLV_PTP_ADJ 240 /* rfc3373 */ #define ISIS_TLV_IIH_SEQNR 241 /* draft-shen-isis-iih-sequence-00 */ #define ISIS_TLV_IIH_SEQNR_MINLEN 4 #define ISIS_TLV_VENDOR_PRIVATE 250 /* draft-ietf-isis-experimental-tlv-01 */ #define ISIS_TLV_VENDOR_PRIVATE_MINLEN 3 static const struct tok isis_tlv_values[] = { { ISIS_TLV_AREA_ADDR, "Area address(es)"}, { ISIS_TLV_IS_REACH, "IS Reachability"}, { ISIS_TLV_ESNEIGH, "ES Neighbor(s)"}, { ISIS_TLV_PART_DIS, "Partition DIS"}, { ISIS_TLV_PREFIX_NEIGH, "Prefix Neighbors"}, { ISIS_TLV_ISNEIGH, "IS Neighbor(s)"}, { ISIS_TLV_ISNEIGH_VARLEN, "IS Neighbor(s) (variable length)"}, { ISIS_TLV_PADDING, "Padding"}, { ISIS_TLV_LSP, "LSP entries"}, { ISIS_TLV_AUTH, "Authentication"}, { ISIS_TLV_CHECKSUM, "Checksum"}, { ISIS_TLV_POI, "Purge Originator Identifier"}, { ISIS_TLV_LSP_BUFFERSIZE, "LSP Buffersize"}, { ISIS_TLV_EXT_IS_REACH, "Extended IS Reachability"}, { ISIS_TLV_IS_ALIAS_ID, "IS Alias ID"}, { ISIS_TLV_DECNET_PHASE4, "DECnet Phase IV"}, { ISIS_TLV_LUCENT_PRIVATE, "Lucent Proprietary"}, { ISIS_TLV_INT_IP_REACH, "IPv4 Internal Reachability"}, { ISIS_TLV_PROTOCOLS, "Protocols supported"}, { ISIS_TLV_EXT_IP_REACH, "IPv4 External Reachability"}, { ISIS_TLV_IDRP_INFO, "Inter-Domain Information Type"}, { ISIS_TLV_IPADDR, "IPv4 Interface address(es)"}, { ISIS_TLV_IPAUTH, "IPv4 authentication (deprecated)"}, { ISIS_TLV_TE_ROUTER_ID, "Traffic Engineering Router ID"}, { ISIS_TLV_EXTD_IP_REACH, "Extended IPv4 Reachability"}, { ISIS_TLV_SHARED_RISK_GROUP, "Shared Risk Link Group"}, { ISIS_TLV_MT_PORT_CAP, "Multi-Topology-Aware Port Capability"}, { ISIS_TLV_MT_CAPABILITY, "Multi-Topology Capability"}, { ISIS_TLV_NORTEL_PRIVATE1, "Nortel Proprietary"}, { ISIS_TLV_NORTEL_PRIVATE2, "Nortel Proprietary"}, { ISIS_TLV_HOSTNAME, "Hostname"}, { ISIS_TLV_RESTART_SIGNALING, "Restart Signaling"}, { ISIS_TLV_MT_IS_REACH, "Multi Topology IS Reachability"}, { ISIS_TLV_MT_SUPPORTED, "Multi Topology"}, { ISIS_TLV_IP6ADDR, "IPv6 Interface address(es)"}, { ISIS_TLV_MT_IP_REACH, "Multi-Topology IPv4 Reachability"}, { ISIS_TLV_IP6_REACH, "IPv6 reachability"}, { ISIS_TLV_MT_IP6_REACH, "Multi-Topology IP6 Reachability"}, { ISIS_TLV_PTP_ADJ, "Point-to-point Adjacency State"}, { ISIS_TLV_IIH_SEQNR, "Hello PDU Sequence Number"}, { ISIS_TLV_VENDOR_PRIVATE, "Vendor Private"}, { 0, NULL } }; #define ESIS_OPTION_PROTOCOLS 129 #define ESIS_OPTION_QOS_MAINTENANCE 195 /* iso9542 */ #define ESIS_OPTION_SECURITY 197 /* iso9542 */ #define ESIS_OPTION_ES_CONF_TIME 198 /* iso9542 */ #define ESIS_OPTION_PRIORITY 205 /* iso9542 */ #define ESIS_OPTION_ADDRESS_MASK 225 /* iso9542 */ #define ESIS_OPTION_SNPA_MASK 226 /* iso9542 */ static const struct tok esis_option_values[] = { { ESIS_OPTION_PROTOCOLS, "Protocols supported"}, { ESIS_OPTION_QOS_MAINTENANCE, "QoS Maintenance" }, { ESIS_OPTION_SECURITY, "Security" }, { ESIS_OPTION_ES_CONF_TIME, "ES Configuration Time" }, { ESIS_OPTION_PRIORITY, "Priority" }, { ESIS_OPTION_ADDRESS_MASK, "Addressk Mask" }, { ESIS_OPTION_SNPA_MASK, "SNPA Mask" }, { 0, NULL } }; #define CLNP_OPTION_DISCARD_REASON 193 #define CLNP_OPTION_QOS_MAINTENANCE 195 /* iso8473 */ #define CLNP_OPTION_SECURITY 197 /* iso8473 */ #define CLNP_OPTION_SOURCE_ROUTING 200 /* iso8473 */ #define CLNP_OPTION_ROUTE_RECORDING 203 /* iso8473 */ #define CLNP_OPTION_PADDING 204 /* iso8473 */ #define CLNP_OPTION_PRIORITY 205 /* iso8473 */ static const struct tok clnp_option_values[] = { { CLNP_OPTION_DISCARD_REASON, "Discard Reason"}, { CLNP_OPTION_PRIORITY, "Priority"}, { CLNP_OPTION_QOS_MAINTENANCE, "QoS Maintenance"}, { CLNP_OPTION_SECURITY, "Security"}, { CLNP_OPTION_SOURCE_ROUTING, "Source Routing"}, { CLNP_OPTION_ROUTE_RECORDING, "Route Recording"}, { CLNP_OPTION_PADDING, "Padding"}, { 0, NULL } }; static const struct tok clnp_option_rfd_class_values[] = { { 0x0, "General"}, { 0x8, "Address"}, { 0x9, "Source Routeing"}, { 0xa, "Lifetime"}, { 0xb, "PDU Discarded"}, { 0xc, "Reassembly"}, { 0, NULL } }; static const struct tok clnp_option_rfd_general_values[] = { { 0x0, "Reason not specified"}, { 0x1, "Protocol procedure error"}, { 0x2, "Incorrect checksum"}, { 0x3, "PDU discarded due to congestion"}, { 0x4, "Header syntax error (cannot be parsed)"}, { 0x5, "Segmentation needed but not permitted"}, { 0x6, "Incomplete PDU received"}, { 0x7, "Duplicate option"}, { 0, NULL } }; static const struct tok clnp_option_rfd_address_values[] = { { 0x0, "Destination address unreachable"}, { 0x1, "Destination address unknown"}, { 0, NULL } }; static const struct tok clnp_option_rfd_source_routeing_values[] = { { 0x0, "Unspecified source routeing error"}, { 0x1, "Syntax error in source routeing field"}, { 0x2, "Unknown address in source routeing field"}, { 0x3, "Path not acceptable"}, { 0, NULL } }; static const struct tok clnp_option_rfd_lifetime_values[] = { { 0x0, "Lifetime expired while data unit in transit"}, { 0x1, "Lifetime expired during reassembly"}, { 0, NULL } }; static const struct tok clnp_option_rfd_pdu_discard_values[] = { { 0x0, "Unsupported option not specified"}, { 0x1, "Unsupported protocol version"}, { 0x2, "Unsupported security option"}, { 0x3, "Unsupported source routeing option"}, { 0x4, "Unsupported recording of route option"}, { 0, NULL } }; static const struct tok clnp_option_rfd_reassembly_values[] = { { 0x0, "Reassembly interference"}, { 0, NULL } }; /* array of 16 error-classes */ static const struct tok *clnp_option_rfd_error_class[] = { clnp_option_rfd_general_values, NULL, NULL, NULL, NULL, NULL, NULL, NULL, clnp_option_rfd_address_values, clnp_option_rfd_source_routeing_values, clnp_option_rfd_lifetime_values, clnp_option_rfd_pdu_discard_values, clnp_option_rfd_reassembly_values, NULL, NULL, NULL }; #define CLNP_OPTION_OPTION_QOS_MASK 0x3f #define CLNP_OPTION_SCOPE_MASK 0xc0 #define CLNP_OPTION_SCOPE_SA_SPEC 0x40 #define CLNP_OPTION_SCOPE_DA_SPEC 0x80 #define CLNP_OPTION_SCOPE_GLOBAL 0xc0 static const struct tok clnp_option_scope_values[] = { { CLNP_OPTION_SCOPE_SA_SPEC, "Source Address Specific"}, { CLNP_OPTION_SCOPE_DA_SPEC, "Destination Address Specific"}, { CLNP_OPTION_SCOPE_GLOBAL, "Globally unique"}, { 0, NULL } }; static const struct tok clnp_option_sr_rr_values[] = { { 0x0, "partial"}, { 0x1, "complete"}, { 0, NULL } }; static const struct tok clnp_option_sr_rr_string_values[] = { { CLNP_OPTION_SOURCE_ROUTING, "source routing"}, { CLNP_OPTION_ROUTE_RECORDING, "recording of route in progress"}, { 0, NULL } }; static const struct tok clnp_option_qos_global_values[] = { { 0x20, "reserved"}, { 0x10, "sequencing vs. delay"}, { 0x08, "congested"}, { 0x04, "delay vs. cost"}, { 0x02, "error vs. delay"}, { 0x01, "error vs. cost"}, { 0, NULL } }; #define ISIS_SUBTLV_EXT_IS_REACH_ADMIN_GROUP 3 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_LINK_LOCAL_REMOTE_ID 4 /* rfc4205 */ #define ISIS_SUBTLV_EXT_IS_REACH_LINK_REMOTE_ID 5 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_IPV4_INTF_ADDR 6 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_IPV4_NEIGHBOR_ADDR 8 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_MAX_LINK_BW 9 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_RESERVABLE_BW 10 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_UNRESERVED_BW 11 /* rfc4124 */ #define ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS_OLD 12 /* draft-ietf-tewg-diff-te-proto-06 */ #define ISIS_SUBTLV_EXT_IS_REACH_TE_METRIC 18 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_LINK_ATTRIBUTE 19 /* draft-ietf-isis-link-attr-01 */ #define ISIS_SUBTLV_EXT_IS_REACH_LINK_PROTECTION_TYPE 20 /* rfc4205 */ #define ISIS_SUBTLV_EXT_IS_REACH_INTF_SW_CAP_DESCR 21 /* rfc4205 */ #define ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS 22 /* rfc4124 */ #define ISIS_SUBTLV_SPB_METRIC 29 /* rfc6329 */ static const struct tok isis_ext_is_reach_subtlv_values[] = { { ISIS_SUBTLV_EXT_IS_REACH_ADMIN_GROUP, "Administrative groups" }, { ISIS_SUBTLV_EXT_IS_REACH_LINK_LOCAL_REMOTE_ID, "Link Local/Remote Identifier" }, { ISIS_SUBTLV_EXT_IS_REACH_LINK_REMOTE_ID, "Link Remote Identifier" }, { ISIS_SUBTLV_EXT_IS_REACH_IPV4_INTF_ADDR, "IPv4 interface address" }, { ISIS_SUBTLV_EXT_IS_REACH_IPV4_NEIGHBOR_ADDR, "IPv4 neighbor address" }, { ISIS_SUBTLV_EXT_IS_REACH_MAX_LINK_BW, "Maximum link bandwidth" }, { ISIS_SUBTLV_EXT_IS_REACH_RESERVABLE_BW, "Reservable link bandwidth" }, { ISIS_SUBTLV_EXT_IS_REACH_UNRESERVED_BW, "Unreserved bandwidth" }, { ISIS_SUBTLV_EXT_IS_REACH_TE_METRIC, "Traffic Engineering Metric" }, { ISIS_SUBTLV_EXT_IS_REACH_LINK_ATTRIBUTE, "Link Attribute" }, { ISIS_SUBTLV_EXT_IS_REACH_LINK_PROTECTION_TYPE, "Link Protection Type" }, { ISIS_SUBTLV_EXT_IS_REACH_INTF_SW_CAP_DESCR, "Interface Switching Capability" }, { ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS_OLD, "Bandwidth Constraints (old)" }, { ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS, "Bandwidth Constraints" }, { ISIS_SUBTLV_SPB_METRIC, "SPB Metric" }, { 250, "Reserved for cisco specific extensions" }, { 251, "Reserved for cisco specific extensions" }, { 252, "Reserved for cisco specific extensions" }, { 253, "Reserved for cisco specific extensions" }, { 254, "Reserved for cisco specific extensions" }, { 255, "Reserved for future expansion" }, { 0, NULL } }; #define ISIS_SUBTLV_EXTD_IP_REACH_ADMIN_TAG32 1 /* draft-ietf-isis-admin-tags-01 */ #define ISIS_SUBTLV_EXTD_IP_REACH_ADMIN_TAG64 2 /* draft-ietf-isis-admin-tags-01 */ #define ISIS_SUBTLV_EXTD_IP_REACH_MGMT_PREFIX_COLOR 117 /* draft-ietf-isis-wg-multi-topology-05 */ static const struct tok isis_ext_ip_reach_subtlv_values[] = { { ISIS_SUBTLV_EXTD_IP_REACH_ADMIN_TAG32, "32-Bit Administrative tag" }, { ISIS_SUBTLV_EXTD_IP_REACH_ADMIN_TAG64, "64-Bit Administrative tag" }, { ISIS_SUBTLV_EXTD_IP_REACH_MGMT_PREFIX_COLOR, "Management Prefix Color" }, { 0, NULL } }; static const struct tok isis_subtlv_link_attribute_values[] = { { 0x01, "Local Protection Available" }, { 0x02, "Link excluded from local protection path" }, { 0x04, "Local maintenance required"}, { 0, NULL } }; #define ISIS_SUBTLV_AUTH_SIMPLE 1 #define ISIS_SUBTLV_AUTH_GENERIC 3 /* rfc 5310 */ #define ISIS_SUBTLV_AUTH_MD5 54 #define ISIS_SUBTLV_AUTH_MD5_LEN 16 #define ISIS_SUBTLV_AUTH_PRIVATE 255 static const struct tok isis_subtlv_auth_values[] = { { ISIS_SUBTLV_AUTH_SIMPLE, "simple text password"}, { ISIS_SUBTLV_AUTH_GENERIC, "Generic Crypto key-id"}, { ISIS_SUBTLV_AUTH_MD5, "HMAC-MD5 password"}, { ISIS_SUBTLV_AUTH_PRIVATE, "Routing Domain private password"}, { 0, NULL } }; #define ISIS_SUBTLV_IDRP_RES 0 #define ISIS_SUBTLV_IDRP_LOCAL 1 #define ISIS_SUBTLV_IDRP_ASN 2 static const struct tok isis_subtlv_idrp_values[] = { { ISIS_SUBTLV_IDRP_RES, "Reserved"}, { ISIS_SUBTLV_IDRP_LOCAL, "Routing-Domain Specific"}, { ISIS_SUBTLV_IDRP_ASN, "AS Number Tag"}, { 0, NULL} }; #define ISIS_SUBTLV_SPB_MCID 4 #define ISIS_SUBTLV_SPB_DIGEST 5 #define ISIS_SUBTLV_SPB_BVID 6 #define ISIS_SUBTLV_SPB_INSTANCE 1 #define ISIS_SUBTLV_SPBM_SI 3 #define ISIS_SPB_MCID_LEN 51 #define ISIS_SUBTLV_SPB_MCID_MIN_LEN 102 #define ISIS_SUBTLV_SPB_DIGEST_MIN_LEN 33 #define ISIS_SUBTLV_SPB_BVID_MIN_LEN 6 #define ISIS_SUBTLV_SPB_INSTANCE_MIN_LEN 19 #define ISIS_SUBTLV_SPB_INSTANCE_VLAN_TUPLE_LEN 8 static const struct tok isis_mt_port_cap_subtlv_values[] = { { ISIS_SUBTLV_SPB_MCID, "SPB MCID" }, { ISIS_SUBTLV_SPB_DIGEST, "SPB Digest" }, { ISIS_SUBTLV_SPB_BVID, "SPB BVID" }, { 0, NULL } }; static const struct tok isis_mt_capability_subtlv_values[] = { { ISIS_SUBTLV_SPB_INSTANCE, "SPB Instance" }, { ISIS_SUBTLV_SPBM_SI, "SPBM Service Identifier and Unicast Address" }, { 0, NULL } }; struct isis_spb_mcid { uint8_t format_id; uint8_t name[32]; uint8_t revision_lvl[2]; uint8_t digest[16]; }; struct isis_subtlv_spb_mcid { struct isis_spb_mcid mcid; struct isis_spb_mcid aux_mcid; }; struct isis_subtlv_spb_instance { uint8_t cist_root_id[8]; uint8_t cist_external_root_path_cost[4]; uint8_t bridge_priority[2]; uint8_t spsourceid[4]; uint8_t no_of_trees; }; #define CLNP_SEGMENT_PART 0x80 #define CLNP_MORE_SEGMENTS 0x40 #define CLNP_REQUEST_ER 0x20 static const struct tok clnp_flag_values[] = { { CLNP_SEGMENT_PART, "Segmentation permitted"}, { CLNP_MORE_SEGMENTS, "more Segments"}, { CLNP_REQUEST_ER, "request Error Report"}, { 0, NULL} }; #define ISIS_MASK_LSP_OL_BIT(x) ((x)&0x4) #define ISIS_MASK_LSP_ISTYPE_BITS(x) ((x)&0x3) #define ISIS_MASK_LSP_PARTITION_BIT(x) ((x)&0x80) #define ISIS_MASK_LSP_ATT_BITS(x) ((x)&0x78) #define ISIS_MASK_LSP_ATT_ERROR_BIT(x) ((x)&0x40) #define ISIS_MASK_LSP_ATT_EXPENSE_BIT(x) ((x)&0x20) #define ISIS_MASK_LSP_ATT_DELAY_BIT(x) ((x)&0x10) #define ISIS_MASK_LSP_ATT_DEFAULT_BIT(x) ((x)&0x8) #define ISIS_MASK_MTID(x) ((x)&0x0fff) #define ISIS_MASK_MTFLAGS(x) ((x)&0xf000) static const struct tok isis_mt_flag_values[] = { { 0x4000, "ATT bit set"}, { 0x8000, "Overload bit set"}, { 0, NULL} }; #define ISIS_MASK_TLV_EXTD_IP_UPDOWN(x) ((x)&0x80) #define ISIS_MASK_TLV_EXTD_IP_SUBTLV(x) ((x)&0x40) #define ISIS_MASK_TLV_EXTD_IP6_IE(x) ((x)&0x40) #define ISIS_MASK_TLV_EXTD_IP6_SUBTLV(x) ((x)&0x20) #define ISIS_LSP_TLV_METRIC_SUPPORTED(x) ((x)&0x80) #define ISIS_LSP_TLV_METRIC_IE(x) ((x)&0x40) #define ISIS_LSP_TLV_METRIC_UPDOWN(x) ((x)&0x80) #define ISIS_LSP_TLV_METRIC_VALUE(x) ((x)&0x3f) #define ISIS_MASK_TLV_SHARED_RISK_GROUP(x) ((x)&0x1) static const struct tok isis_mt_values[] = { { 0, "IPv4 unicast"}, { 1, "In-Band Management"}, { 2, "IPv6 unicast"}, { 3, "Multicast"}, { 4095, "Development, Experimental or Proprietary"}, { 0, NULL } }; static const struct tok isis_iih_circuit_type_values[] = { { 1, "Level 1 only"}, { 2, "Level 2 only"}, { 3, "Level 1, Level 2"}, { 0, NULL} }; #define ISIS_LSP_TYPE_UNUSED0 0 #define ISIS_LSP_TYPE_LEVEL_1 1 #define ISIS_LSP_TYPE_UNUSED2 2 #define ISIS_LSP_TYPE_LEVEL_2 3 static const struct tok isis_lsp_istype_values[] = { { ISIS_LSP_TYPE_UNUSED0, "Unused 0x0 (invalid)"}, { ISIS_LSP_TYPE_LEVEL_1, "L1 IS"}, { ISIS_LSP_TYPE_UNUSED2, "Unused 0x2 (invalid)"}, { ISIS_LSP_TYPE_LEVEL_2, "L2 IS"}, { 0, NULL } }; /* * Katz's point to point adjacency TLV uses codes to tell us the state of * the remote adjacency. Enumerate them. */ #define ISIS_PTP_ADJ_UP 0 #define ISIS_PTP_ADJ_INIT 1 #define ISIS_PTP_ADJ_DOWN 2 static const struct tok isis_ptp_adjancey_values[] = { { ISIS_PTP_ADJ_UP, "Up" }, { ISIS_PTP_ADJ_INIT, "Initializing" }, { ISIS_PTP_ADJ_DOWN, "Down" }, { 0, NULL} }; struct isis_tlv_ptp_adj { uint8_t adjacency_state; uint8_t extd_local_circuit_id[4]; uint8_t neighbor_sysid[SYSTEM_ID_LEN]; uint8_t neighbor_extd_local_circuit_id[4]; }; static void osi_print_cksum(netdissect_options *, const uint8_t *pptr, uint16_t checksum, int checksum_offset, u_int length); static int clnp_print(netdissect_options *, const uint8_t *, u_int); static void esis_print(netdissect_options *, const uint8_t *, u_int); static int isis_print(netdissect_options *, const uint8_t *, u_int); struct isis_metric_block { uint8_t metric_default; uint8_t metric_delay; uint8_t metric_expense; uint8_t metric_error; }; struct isis_tlv_is_reach { struct isis_metric_block isis_metric_block; uint8_t neighbor_nodeid[NODE_ID_LEN]; }; struct isis_tlv_es_reach { struct isis_metric_block isis_metric_block; uint8_t neighbor_sysid[SYSTEM_ID_LEN]; }; struct isis_tlv_ip_reach { struct isis_metric_block isis_metric_block; uint8_t prefix[4]; uint8_t mask[4]; }; static const struct tok isis_is_reach_virtual_values[] = { { 0, "IsNotVirtual"}, { 1, "IsVirtual"}, { 0, NULL } }; static const struct tok isis_restart_flag_values[] = { { 0x1, "Restart Request"}, { 0x2, "Restart Acknowledgement"}, { 0x4, "Suppress adjacency advertisement"}, { 0, NULL } }; struct isis_common_header { uint8_t nlpid; uint8_t fixed_len; uint8_t version; /* Protocol version */ uint8_t id_length; uint8_t pdu_type; /* 3 MSbits are reserved */ uint8_t pdu_version; /* Packet format version */ uint8_t reserved; uint8_t max_area; }; struct isis_iih_lan_header { uint8_t circuit_type; uint8_t source_id[SYSTEM_ID_LEN]; uint8_t holding_time[2]; uint8_t pdu_len[2]; uint8_t priority; uint8_t lan_id[NODE_ID_LEN]; }; struct isis_iih_ptp_header { uint8_t circuit_type; uint8_t source_id[SYSTEM_ID_LEN]; uint8_t holding_time[2]; uint8_t pdu_len[2]; uint8_t circuit_id; }; struct isis_lsp_header { uint8_t pdu_len[2]; uint8_t remaining_lifetime[2]; uint8_t lsp_id[LSP_ID_LEN]; uint8_t sequence_number[4]; uint8_t checksum[2]; uint8_t typeblock; }; struct isis_csnp_header { uint8_t pdu_len[2]; uint8_t source_id[NODE_ID_LEN]; uint8_t start_lsp_id[LSP_ID_LEN]; uint8_t end_lsp_id[LSP_ID_LEN]; }; struct isis_psnp_header { uint8_t pdu_len[2]; uint8_t source_id[NODE_ID_LEN]; }; struct isis_tlv_lsp { uint8_t remaining_lifetime[2]; uint8_t lsp_id[LSP_ID_LEN]; uint8_t sequence_number[4]; uint8_t checksum[2]; }; #define ISIS_COMMON_HEADER_SIZE (sizeof(struct isis_common_header)) #define ISIS_IIH_LAN_HEADER_SIZE (sizeof(struct isis_iih_lan_header)) #define ISIS_IIH_PTP_HEADER_SIZE (sizeof(struct isis_iih_ptp_header)) #define ISIS_LSP_HEADER_SIZE (sizeof(struct isis_lsp_header)) #define ISIS_CSNP_HEADER_SIZE (sizeof(struct isis_csnp_header)) #define ISIS_PSNP_HEADER_SIZE (sizeof(struct isis_psnp_header)) void isoclns_print(netdissect_options *ndo, const uint8_t *p, u_int length) { if (!ND_TTEST(*p)) { /* enough bytes on the wire ? */ ND_PRINT((ndo, "|OSI")); return; } if (ndo->ndo_eflag) ND_PRINT((ndo, "OSI NLPID %s (0x%02x): ", tok2str(nlpid_values, "Unknown", *p), *p)); switch (*p) { case NLPID_CLNP: if (!clnp_print(ndo, p, length)) print_unknown_data(ndo, p, "\n\t", length); break; case NLPID_ESIS: esis_print(ndo, p, length); return; case NLPID_ISIS: if (!isis_print(ndo, p, length)) print_unknown_data(ndo, p, "\n\t", length); break; case NLPID_NULLNS: ND_PRINT((ndo, "%slength: %u", ndo->ndo_eflag ? "" : ", ", length)); break; case NLPID_Q933: q933_print(ndo, p + 1, length - 1); break; case NLPID_IP: ip_print(ndo, p + 1, length - 1); break; case NLPID_IP6: ip6_print(ndo, p + 1, length - 1); break; case NLPID_PPP: ppp_print(ndo, p + 1, length - 1); break; default: if (!ndo->ndo_eflag) ND_PRINT((ndo, "OSI NLPID 0x%02x unknown", *p)); ND_PRINT((ndo, "%slength: %u", ndo->ndo_eflag ? "" : ", ", length)); if (length > 1) print_unknown_data(ndo, p, "\n\t", length); break; } } #define CLNP_PDU_ER 1 #define CLNP_PDU_DT 28 #define CLNP_PDU_MD 29 #define CLNP_PDU_ERQ 30 #define CLNP_PDU_ERP 31 static const struct tok clnp_pdu_values[] = { { CLNP_PDU_ER, "Error Report"}, { CLNP_PDU_MD, "MD"}, { CLNP_PDU_DT, "Data"}, { CLNP_PDU_ERQ, "Echo Request"}, { CLNP_PDU_ERP, "Echo Response"}, { 0, NULL } }; struct clnp_header_t { uint8_t nlpid; uint8_t length_indicator; uint8_t version; uint8_t lifetime; /* units of 500ms */ uint8_t type; uint8_t segment_length[2]; uint8_t cksum[2]; }; struct clnp_segment_header_t { uint8_t data_unit_id[2]; uint8_t segment_offset[2]; uint8_t total_length[2]; }; /* * clnp_print * Decode CLNP packets. Return 0 on error. */ static int clnp_print(netdissect_options *ndo, const uint8_t *pptr, u_int length) { const uint8_t *optr,*source_address,*dest_address; u_int li,tlen,nsap_offset,source_address_length,dest_address_length, clnp_pdu_type, clnp_flags; const struct clnp_header_t *clnp_header; const struct clnp_segment_header_t *clnp_segment_header; uint8_t rfd_error_major,rfd_error_minor; clnp_header = (const struct clnp_header_t *) pptr; ND_TCHECK(*clnp_header); li = clnp_header->length_indicator; optr = pptr; if (!ndo->ndo_eflag) ND_PRINT((ndo, "CLNP")); /* * Sanity checking of the header. */ if (clnp_header->version != CLNP_VERSION) { ND_PRINT((ndo, "version %d packet not supported", clnp_header->version)); return (0); } if (li > length) { ND_PRINT((ndo, " length indicator(%u) > PDU size (%u)!", li, length)); return (0); } if (li < sizeof(struct clnp_header_t)) { ND_PRINT((ndo, " length indicator %u < min PDU size:", li)); while (pptr < ndo->ndo_snapend) ND_PRINT((ndo, "%02X", *pptr++)); return (0); } /* FIXME further header sanity checking */ clnp_pdu_type = clnp_header->type & CLNP_PDU_TYPE_MASK; clnp_flags = clnp_header->type & CLNP_FLAG_MASK; pptr += sizeof(struct clnp_header_t); li -= sizeof(struct clnp_header_t); if (li < 1) { ND_PRINT((ndo, "li < size of fixed part of CLNP header and addresses")); return (0); } ND_TCHECK(*pptr); dest_address_length = *pptr; pptr += 1; li -= 1; if (li < dest_address_length) { ND_PRINT((ndo, "li < size of fixed part of CLNP header and addresses")); return (0); } ND_TCHECK2(*pptr, dest_address_length); dest_address = pptr; pptr += dest_address_length; li -= dest_address_length; if (li < 1) { ND_PRINT((ndo, "li < size of fixed part of CLNP header and addresses")); return (0); } ND_TCHECK(*pptr); source_address_length = *pptr; pptr += 1; li -= 1; if (li < source_address_length) { ND_PRINT((ndo, "li < size of fixed part of CLNP header and addresses")); return (0); } ND_TCHECK2(*pptr, source_address_length); source_address = pptr; pptr += source_address_length; li -= source_address_length; if (ndo->ndo_vflag < 1) { ND_PRINT((ndo, "%s%s > %s, %s, length %u", ndo->ndo_eflag ? "" : ", ", isonsap_string(ndo, source_address, source_address_length), isonsap_string(ndo, dest_address, dest_address_length), tok2str(clnp_pdu_values,"unknown (%u)",clnp_pdu_type), length)); return (1); } ND_PRINT((ndo, "%slength %u", ndo->ndo_eflag ? "" : ", ", length)); ND_PRINT((ndo, "\n\t%s PDU, hlen: %u, v: %u, lifetime: %u.%us, Segment PDU length: %u, checksum: 0x%04x", tok2str(clnp_pdu_values, "unknown (%u)",clnp_pdu_type), clnp_header->length_indicator, clnp_header->version, clnp_header->lifetime/2, (clnp_header->lifetime%2)*5, EXTRACT_16BITS(clnp_header->segment_length), EXTRACT_16BITS(clnp_header->cksum))); osi_print_cksum(ndo, optr, EXTRACT_16BITS(clnp_header->cksum), 7, clnp_header->length_indicator); ND_PRINT((ndo, "\n\tFlags [%s]", bittok2str(clnp_flag_values, "none", clnp_flags))); ND_PRINT((ndo, "\n\tsource address (length %u): %s\n\tdest address (length %u): %s", source_address_length, isonsap_string(ndo, source_address, source_address_length), dest_address_length, isonsap_string(ndo, dest_address, dest_address_length))); if (clnp_flags & CLNP_SEGMENT_PART) { if (li < sizeof(const struct clnp_segment_header_t)) { ND_PRINT((ndo, "li < size of fixed part of CLNP header, addresses, and segment part")); return (0); } clnp_segment_header = (const struct clnp_segment_header_t *) pptr; ND_TCHECK(*clnp_segment_header); ND_PRINT((ndo, "\n\tData Unit ID: 0x%04x, Segment Offset: %u, Total PDU Length: %u", EXTRACT_16BITS(clnp_segment_header->data_unit_id), EXTRACT_16BITS(clnp_segment_header->segment_offset), EXTRACT_16BITS(clnp_segment_header->total_length))); pptr+=sizeof(const struct clnp_segment_header_t); li-=sizeof(const struct clnp_segment_header_t); } /* now walk the options */ while (li >= 2) { u_int op, opli; const uint8_t *tptr; if (li < 2) { ND_PRINT((ndo, ", bad opts/li")); return (0); } ND_TCHECK2(*pptr, 2); op = *pptr++; opli = *pptr++; li -= 2; if (opli > li) { ND_PRINT((ndo, ", opt (%d) too long", op)); return (0); } ND_TCHECK2(*pptr, opli); li -= opli; tptr = pptr; tlen = opli; ND_PRINT((ndo, "\n\t %s Option #%u, length %u, value: ", tok2str(clnp_option_values,"Unknown",op), op, opli)); /* * We've already checked that the entire option is present * in the captured packet with the ND_TCHECK2() call. * Therefore, we don't need to do ND_TCHECK()/ND_TCHECK2() * checks. * We do, however, need to check tlen, to make sure we * don't run past the end of the option. */ switch (op) { case CLNP_OPTION_ROUTE_RECORDING: /* those two options share the format */ case CLNP_OPTION_SOURCE_ROUTING: if (tlen < 2) { ND_PRINT((ndo, ", bad opt len")); return (0); } ND_PRINT((ndo, "%s %s", tok2str(clnp_option_sr_rr_values,"Unknown",*tptr), tok2str(clnp_option_sr_rr_string_values, "Unknown Option %u", op))); nsap_offset=*(tptr+1); if (nsap_offset == 0) { ND_PRINT((ndo, " Bad NSAP offset (0)")); break; } nsap_offset-=1; /* offset to nsap list */ if (nsap_offset > tlen) { ND_PRINT((ndo, " Bad NSAP offset (past end of option)")); break; } tptr+=nsap_offset; tlen-=nsap_offset; while (tlen > 0) { source_address_length=*tptr; if (tlen < source_address_length+1) { ND_PRINT((ndo, "\n\t NSAP address goes past end of option")); break; } if (source_address_length > 0) { source_address=(tptr+1); ND_TCHECK2(*source_address, source_address_length); ND_PRINT((ndo, "\n\t NSAP address (length %u): %s", source_address_length, isonsap_string(ndo, source_address, source_address_length))); } tlen-=source_address_length+1; } break; case CLNP_OPTION_PRIORITY: if (tlen < 1) { ND_PRINT((ndo, ", bad opt len")); return (0); } ND_PRINT((ndo, "0x%1x", *tptr&0x0f)); break; case CLNP_OPTION_QOS_MAINTENANCE: if (tlen < 1) { ND_PRINT((ndo, ", bad opt len")); return (0); } ND_PRINT((ndo, "\n\t Format Code: %s", tok2str(clnp_option_scope_values, "Reserved", *tptr&CLNP_OPTION_SCOPE_MASK))); if ((*tptr&CLNP_OPTION_SCOPE_MASK) == CLNP_OPTION_SCOPE_GLOBAL) ND_PRINT((ndo, "\n\t QoS Flags [%s]", bittok2str(clnp_option_qos_global_values, "none", *tptr&CLNP_OPTION_OPTION_QOS_MASK))); break; case CLNP_OPTION_SECURITY: if (tlen < 2) { ND_PRINT((ndo, ", bad opt len")); return (0); } ND_PRINT((ndo, "\n\t Format Code: %s, Security-Level %u", tok2str(clnp_option_scope_values,"Reserved",*tptr&CLNP_OPTION_SCOPE_MASK), *(tptr+1))); break; case CLNP_OPTION_DISCARD_REASON: if (tlen < 1) { ND_PRINT((ndo, ", bad opt len")); return (0); } rfd_error_major = (*tptr&0xf0) >> 4; rfd_error_minor = *tptr&0x0f; ND_PRINT((ndo, "\n\t Class: %s Error (0x%01x), %s (0x%01x)", tok2str(clnp_option_rfd_class_values,"Unknown",rfd_error_major), rfd_error_major, tok2str(clnp_option_rfd_error_class[rfd_error_major],"Unknown",rfd_error_minor), rfd_error_minor)); break; case CLNP_OPTION_PADDING: ND_PRINT((ndo, "padding data")); break; /* * FIXME those are the defined Options that lack a decoder * you are welcome to contribute code ;-) */ default: print_unknown_data(ndo, tptr, "\n\t ", opli); break; } if (ndo->ndo_vflag > 1) print_unknown_data(ndo, pptr, "\n\t ", opli); pptr += opli; } switch (clnp_pdu_type) { case CLNP_PDU_ER: /* fall through */ case CLNP_PDU_ERP: ND_TCHECK(*pptr); if (*(pptr) == NLPID_CLNP) { ND_PRINT((ndo, "\n\t-----original packet-----\n\t")); /* FIXME recursion protection */ clnp_print(ndo, pptr, length - clnp_header->length_indicator); break; } case CLNP_PDU_DT: case CLNP_PDU_MD: case CLNP_PDU_ERQ: default: /* dump the PDU specific data */ if (length-(pptr-optr) > 0) { ND_PRINT((ndo, "\n\t undecoded non-header data, length %u", length-clnp_header->length_indicator)); print_unknown_data(ndo, pptr, "\n\t ", length - (pptr - optr)); } } return (1); trunc: ND_PRINT((ndo, "[|clnp]")); return (1); } #define ESIS_PDU_REDIRECT 6 #define ESIS_PDU_ESH 2 #define ESIS_PDU_ISH 4 static const struct tok esis_pdu_values[] = { { ESIS_PDU_REDIRECT, "redirect"}, { ESIS_PDU_ESH, "ESH"}, { ESIS_PDU_ISH, "ISH"}, { 0, NULL } }; struct esis_header_t { uint8_t nlpid; uint8_t length_indicator; uint8_t version; uint8_t reserved; uint8_t type; uint8_t holdtime[2]; uint8_t cksum[2]; }; static void esis_print(netdissect_options *ndo, const uint8_t *pptr, u_int length) { const uint8_t *optr; u_int li,esis_pdu_type,source_address_length, source_address_number; const struct esis_header_t *esis_header; if (!ndo->ndo_eflag) ND_PRINT((ndo, "ES-IS")); if (length <= 2) { ND_PRINT((ndo, ndo->ndo_qflag ? "bad pkt!" : "no header at all!")); return; } esis_header = (const struct esis_header_t *) pptr; ND_TCHECK(*esis_header); li = esis_header->length_indicator; optr = pptr; /* * Sanity checking of the header. */ if (esis_header->nlpid != NLPID_ESIS) { ND_PRINT((ndo, " nlpid 0x%02x packet not supported", esis_header->nlpid)); return; } if (esis_header->version != ESIS_VERSION) { ND_PRINT((ndo, " version %d packet not supported", esis_header->version)); return; } if (li > length) { ND_PRINT((ndo, " length indicator(%u) > PDU size (%u)!", li, length)); return; } if (li < sizeof(struct esis_header_t) + 2) { ND_PRINT((ndo, " length indicator %u < min PDU size:", li)); while (pptr < ndo->ndo_snapend) ND_PRINT((ndo, "%02X", *pptr++)); return; } esis_pdu_type = esis_header->type & ESIS_PDU_TYPE_MASK; if (ndo->ndo_vflag < 1) { ND_PRINT((ndo, "%s%s, length %u", ndo->ndo_eflag ? "" : ", ", tok2str(esis_pdu_values,"unknown type (%u)",esis_pdu_type), length)); return; } else ND_PRINT((ndo, "%slength %u\n\t%s (%u)", ndo->ndo_eflag ? "" : ", ", length, tok2str(esis_pdu_values,"unknown type: %u", esis_pdu_type), esis_pdu_type)); ND_PRINT((ndo, ", v: %u%s", esis_header->version, esis_header->version == ESIS_VERSION ? "" : "unsupported" )); ND_PRINT((ndo, ", checksum: 0x%04x", EXTRACT_16BITS(esis_header->cksum))); osi_print_cksum(ndo, pptr, EXTRACT_16BITS(esis_header->cksum), 7, li); ND_PRINT((ndo, ", holding time: %us, length indicator: %u", EXTRACT_16BITS(esis_header->holdtime), li)); if (ndo->ndo_vflag > 1) print_unknown_data(ndo, optr, "\n\t", sizeof(struct esis_header_t)); pptr += sizeof(struct esis_header_t); li -= sizeof(struct esis_header_t); switch (esis_pdu_type) { case ESIS_PDU_REDIRECT: { const uint8_t *dst, *snpa, *neta; u_int dstl, snpal, netal; ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad redirect/li")); return; } dstl = *pptr; pptr++; li--; ND_TCHECK2(*pptr, dstl); if (li < dstl) { ND_PRINT((ndo, ", bad redirect/li")); return; } dst = pptr; pptr += dstl; li -= dstl; ND_PRINT((ndo, "\n\t %s", isonsap_string(ndo, dst, dstl))); ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad redirect/li")); return; } snpal = *pptr; pptr++; li--; ND_TCHECK2(*pptr, snpal); if (li < snpal) { ND_PRINT((ndo, ", bad redirect/li")); return; } snpa = pptr; pptr += snpal; li -= snpal; ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad redirect/li")); return; } netal = *pptr; pptr++; ND_TCHECK2(*pptr, netal); if (li < netal) { ND_PRINT((ndo, ", bad redirect/li")); return; } neta = pptr; pptr += netal; li -= netal; if (netal == 0) ND_PRINT((ndo, "\n\t %s", etheraddr_string(ndo, snpa))); else ND_PRINT((ndo, "\n\t %s", isonsap_string(ndo, neta, netal))); break; } case ESIS_PDU_ESH: ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad esh/li")); return; } source_address_number = *pptr; pptr++; li--; ND_PRINT((ndo, "\n\t Number of Source Addresses: %u", source_address_number)); while (source_address_number > 0) { ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad esh/li")); return; } source_address_length = *pptr; pptr++; li--; ND_TCHECK2(*pptr, source_address_length); if (li < source_address_length) { ND_PRINT((ndo, ", bad esh/li")); return; } ND_PRINT((ndo, "\n\t NET (length: %u): %s", source_address_length, isonsap_string(ndo, pptr, source_address_length))); pptr += source_address_length; li -= source_address_length; source_address_number--; } break; case ESIS_PDU_ISH: { ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad ish/li")); return; } source_address_length = *pptr; pptr++; li--; ND_TCHECK2(*pptr, source_address_length); if (li < source_address_length) { ND_PRINT((ndo, ", bad ish/li")); return; } ND_PRINT((ndo, "\n\t NET (length: %u): %s", source_address_length, isonsap_string(ndo, pptr, source_address_length))); pptr += source_address_length; li -= source_address_length; break; } default: if (ndo->ndo_vflag <= 1) { if (pptr < ndo->ndo_snapend) print_unknown_data(ndo, pptr, "\n\t ", ndo->ndo_snapend - pptr); } return; } /* now walk the options */ while (li != 0) { u_int op, opli; const uint8_t *tptr; if (li < 2) { ND_PRINT((ndo, ", bad opts/li")); return; } ND_TCHECK2(*pptr, 2); op = *pptr++; opli = *pptr++; li -= 2; if (opli > li) { ND_PRINT((ndo, ", opt (%d) too long", op)); return; } li -= opli; tptr = pptr; ND_PRINT((ndo, "\n\t %s Option #%u, length %u, value: ", tok2str(esis_option_values,"Unknown",op), op, opli)); switch (op) { case ESIS_OPTION_ES_CONF_TIME: if (opli == 2) { ND_TCHECK2(*pptr, 2); ND_PRINT((ndo, "%us", EXTRACT_16BITS(tptr))); } else ND_PRINT((ndo, "(bad length)")); break; case ESIS_OPTION_PROTOCOLS: while (opli>0) { ND_TCHECK(*pptr); ND_PRINT((ndo, "%s (0x%02x)", tok2str(nlpid_values, "unknown", *tptr), *tptr)); if (opli>1) /* further NPLIDs ? - put comma */ ND_PRINT((ndo, ", ")); tptr++; opli--; } break; /* * FIXME those are the defined Options that lack a decoder * you are welcome to contribute code ;-) */ case ESIS_OPTION_QOS_MAINTENANCE: case ESIS_OPTION_SECURITY: case ESIS_OPTION_PRIORITY: case ESIS_OPTION_ADDRESS_MASK: case ESIS_OPTION_SNPA_MASK: default: print_unknown_data(ndo, tptr, "\n\t ", opli); break; } if (ndo->ndo_vflag > 1) print_unknown_data(ndo, pptr, "\n\t ", opli); pptr += opli; } trunc: return; } static void isis_print_mcid(netdissect_options *ndo, const struct isis_spb_mcid *mcid) { int i; ND_TCHECK(*mcid); ND_PRINT((ndo, "ID: %d, Name: ", mcid->format_id)); if (fn_printzp(ndo, mcid->name, 32, ndo->ndo_snapend)) goto trunc; ND_PRINT((ndo, "\n\t Lvl: %d", EXTRACT_16BITS(mcid->revision_lvl))); ND_PRINT((ndo, ", Digest: ")); for(i=0;i<16;i++) ND_PRINT((ndo, "%.2x ", mcid->digest[i])); trunc: ND_PRINT((ndo, "%s", tstr)); } static int isis_print_mt_port_cap_subtlv(netdissect_options *ndo, const uint8_t *tptr, int len) { int stlv_type, stlv_len; const struct isis_subtlv_spb_mcid *subtlv_spb_mcid; int i; while (len > 2) { stlv_type = *(tptr++); stlv_len = *(tptr++); /* first lets see if we know the subTLVs name*/ ND_PRINT((ndo, "\n\t %s subTLV #%u, length: %u", tok2str(isis_mt_port_cap_subtlv_values, "unknown", stlv_type), stlv_type, stlv_len)); /*len -= TLV_TYPE_LEN_OFFSET;*/ len = len -2; switch (stlv_type) { case ISIS_SUBTLV_SPB_MCID: { ND_TCHECK2(*(tptr), ISIS_SUBTLV_SPB_MCID_MIN_LEN); subtlv_spb_mcid = (const struct isis_subtlv_spb_mcid *)tptr; ND_PRINT((ndo, "\n\t MCID: ")); isis_print_mcid(ndo, &(subtlv_spb_mcid->mcid)); /*tptr += SPB_MCID_MIN_LEN; len -= SPB_MCID_MIN_LEN; */ ND_PRINT((ndo, "\n\t AUX-MCID: ")); isis_print_mcid(ndo, &(subtlv_spb_mcid->aux_mcid)); /*tptr += SPB_MCID_MIN_LEN; len -= SPB_MCID_MIN_LEN; */ tptr = tptr + sizeof(struct isis_subtlv_spb_mcid); len = len - sizeof(struct isis_subtlv_spb_mcid); break; } case ISIS_SUBTLV_SPB_DIGEST: { ND_TCHECK2(*(tptr), ISIS_SUBTLV_SPB_DIGEST_MIN_LEN); ND_PRINT((ndo, "\n\t RES: %d V: %d A: %d D: %d", (*(tptr) >> 5), (((*tptr)>> 4) & 0x01), ((*(tptr) >> 2) & 0x03), ((*tptr) & 0x03))); tptr++; ND_PRINT((ndo, "\n\t Digest: ")); for(i=1;i<=8; i++) { ND_PRINT((ndo, "%08x ", EXTRACT_32BITS(tptr))); if (i%4 == 0 && i != 8) ND_PRINT((ndo, "\n\t ")); tptr = tptr + 4; } len = len - ISIS_SUBTLV_SPB_DIGEST_MIN_LEN; break; } case ISIS_SUBTLV_SPB_BVID: { ND_TCHECK2(*(tptr), stlv_len); while (len >= ISIS_SUBTLV_SPB_BVID_MIN_LEN) { ND_TCHECK2(*(tptr), ISIS_SUBTLV_SPB_BVID_MIN_LEN); ND_PRINT((ndo, "\n\t ECT: %08x", EXTRACT_32BITS(tptr))); tptr = tptr+4; ND_PRINT((ndo, " BVID: %d, U:%01x M:%01x ", (EXTRACT_16BITS (tptr) >> 4) , (EXTRACT_16BITS (tptr) >> 3) & 0x01, (EXTRACT_16BITS (tptr) >> 2) & 0x01)); tptr = tptr + 2; len = len - ISIS_SUBTLV_SPB_BVID_MIN_LEN; } break; } default: break; } } return 0; trunc: ND_PRINT((ndo, "\n\t\t")); ND_PRINT((ndo, "%s", tstr)); return(1); } static int isis_print_mt_capability_subtlv(netdissect_options *ndo, const uint8_t *tptr, int len) { int stlv_type, stlv_len, tmp; while (len > 2) { stlv_type = *(tptr++); stlv_len = *(tptr++); /* first lets see if we know the subTLVs name*/ ND_PRINT((ndo, "\n\t %s subTLV #%u, length: %u", tok2str(isis_mt_capability_subtlv_values, "unknown", stlv_type), stlv_type, stlv_len)); len = len - 2; switch (stlv_type) { case ISIS_SUBTLV_SPB_INSTANCE: ND_TCHECK2(*tptr, ISIS_SUBTLV_SPB_INSTANCE_MIN_LEN); ND_PRINT((ndo, "\n\t CIST Root-ID: %08x", EXTRACT_32BITS(tptr))); tptr = tptr+4; ND_PRINT((ndo, " %08x", EXTRACT_32BITS(tptr))); tptr = tptr+4; ND_PRINT((ndo, ", Path Cost: %08x", EXTRACT_32BITS(tptr))); tptr = tptr+4; ND_PRINT((ndo, ", Prio: %d", EXTRACT_16BITS(tptr))); tptr = tptr + 2; ND_PRINT((ndo, "\n\t RES: %d", EXTRACT_16BITS(tptr) >> 5)); ND_PRINT((ndo, ", V: %d", (EXTRACT_16BITS(tptr) >> 4) & 0x0001)); ND_PRINT((ndo, ", SPSource-ID: %d", (EXTRACT_32BITS(tptr) & 0x000fffff))); tptr = tptr+4; ND_PRINT((ndo, ", No of Trees: %x", *(tptr))); tmp = *(tptr++); len = len - ISIS_SUBTLV_SPB_INSTANCE_MIN_LEN; while (tmp) { ND_TCHECK2(*tptr, ISIS_SUBTLV_SPB_INSTANCE_VLAN_TUPLE_LEN); ND_PRINT((ndo, "\n\t U:%d, M:%d, A:%d, RES:%d", *(tptr) >> 7, (*(tptr) >> 6) & 0x01, (*(tptr) >> 5) & 0x01, (*(tptr) & 0x1f))); tptr++; ND_PRINT((ndo, ", ECT: %08x", EXTRACT_32BITS(tptr))); tptr = tptr + 4; ND_PRINT((ndo, ", BVID: %d, SPVID: %d", (EXTRACT_24BITS(tptr) >> 12) & 0x000fff, EXTRACT_24BITS(tptr) & 0x000fff)); tptr = tptr + 3; len = len - ISIS_SUBTLV_SPB_INSTANCE_VLAN_TUPLE_LEN; tmp--; } break; case ISIS_SUBTLV_SPBM_SI: ND_TCHECK2(*tptr, 8); ND_PRINT((ndo, "\n\t BMAC: %08x", EXTRACT_32BITS(tptr))); tptr = tptr+4; ND_PRINT((ndo, "%04x", EXTRACT_16BITS(tptr))); tptr = tptr+2; ND_PRINT((ndo, ", RES: %d, VID: %d", EXTRACT_16BITS(tptr) >> 12, (EXTRACT_16BITS(tptr)) & 0x0fff)); tptr = tptr+2; len = len - 8; stlv_len = stlv_len - 8; while (stlv_len >= 4) { ND_TCHECK2(*tptr, 4); ND_PRINT((ndo, "\n\t T: %d, R: %d, RES: %d, ISID: %d", (EXTRACT_32BITS(tptr) >> 31), (EXTRACT_32BITS(tptr) >> 30) & 0x01, (EXTRACT_32BITS(tptr) >> 24) & 0x03f, (EXTRACT_32BITS(tptr)) & 0x0ffffff)); tptr = tptr + 4; len = len - 4; stlv_len = stlv_len - 4; } break; default: break; } } return 0; trunc: ND_PRINT((ndo, "\n\t\t")); ND_PRINT((ndo, "%s", tstr)); return(1); } /* shared routine for printing system, node and lsp-ids */ static char * isis_print_id(const uint8_t *cp, int id_len) { int i; static char id[sizeof("xxxx.xxxx.xxxx.yy-zz")]; char *pos = id; for (i = 1; i <= SYSTEM_ID_LEN; i++) { snprintf(pos, sizeof(id) - (pos - id), "%02x", *cp++); pos += strlen(pos); if (i == 2 || i == 4) *pos++ = '.'; } if (id_len >= NODE_ID_LEN) { snprintf(pos, sizeof(id) - (pos - id), ".%02x", *cp++); pos += strlen(pos); } if (id_len == LSP_ID_LEN) snprintf(pos, sizeof(id) - (pos - id), "-%02x", *cp); return (id); } /* print the 4-byte metric block which is common found in the old-style TLVs */ static int isis_print_metric_block(netdissect_options *ndo, const struct isis_metric_block *isis_metric_block) { ND_PRINT((ndo, ", Default Metric: %d, %s", ISIS_LSP_TLV_METRIC_VALUE(isis_metric_block->metric_default), ISIS_LSP_TLV_METRIC_IE(isis_metric_block->metric_default) ? "External" : "Internal")); if (!ISIS_LSP_TLV_METRIC_SUPPORTED(isis_metric_block->metric_delay)) ND_PRINT((ndo, "\n\t\t Delay Metric: %d, %s", ISIS_LSP_TLV_METRIC_VALUE(isis_metric_block->metric_delay), ISIS_LSP_TLV_METRIC_IE(isis_metric_block->metric_delay) ? "External" : "Internal")); if (!ISIS_LSP_TLV_METRIC_SUPPORTED(isis_metric_block->metric_expense)) ND_PRINT((ndo, "\n\t\t Expense Metric: %d, %s", ISIS_LSP_TLV_METRIC_VALUE(isis_metric_block->metric_expense), ISIS_LSP_TLV_METRIC_IE(isis_metric_block->metric_expense) ? "External" : "Internal")); if (!ISIS_LSP_TLV_METRIC_SUPPORTED(isis_metric_block->metric_error)) ND_PRINT((ndo, "\n\t\t Error Metric: %d, %s", ISIS_LSP_TLV_METRIC_VALUE(isis_metric_block->metric_error), ISIS_LSP_TLV_METRIC_IE(isis_metric_block->metric_error) ? "External" : "Internal")); return(1); /* everything is ok */ } static int isis_print_tlv_ip_reach(netdissect_options *ndo, const uint8_t *cp, const char *ident, int length) { int prefix_len; const struct isis_tlv_ip_reach *tlv_ip_reach; tlv_ip_reach = (const struct isis_tlv_ip_reach *)cp; while (length > 0) { if ((size_t)length < sizeof(*tlv_ip_reach)) { ND_PRINT((ndo, "short IPv4 Reachability (%d vs %lu)", length, (unsigned long)sizeof(*tlv_ip_reach))); return (0); } if (!ND_TTEST(*tlv_ip_reach)) return (0); prefix_len = mask2plen(EXTRACT_32BITS(tlv_ip_reach->mask)); if (prefix_len == -1) ND_PRINT((ndo, "%sIPv4 prefix: %s mask %s", ident, ipaddr_string(ndo, (tlv_ip_reach->prefix)), ipaddr_string(ndo, (tlv_ip_reach->mask)))); else ND_PRINT((ndo, "%sIPv4 prefix: %15s/%u", ident, ipaddr_string(ndo, (tlv_ip_reach->prefix)), prefix_len)); ND_PRINT((ndo, ", Distribution: %s, Metric: %u, %s", ISIS_LSP_TLV_METRIC_UPDOWN(tlv_ip_reach->isis_metric_block.metric_default) ? "down" : "up", ISIS_LSP_TLV_METRIC_VALUE(tlv_ip_reach->isis_metric_block.metric_default), ISIS_LSP_TLV_METRIC_IE(tlv_ip_reach->isis_metric_block.metric_default) ? "External" : "Internal")); if (!ISIS_LSP_TLV_METRIC_SUPPORTED(tlv_ip_reach->isis_metric_block.metric_delay)) ND_PRINT((ndo, "%s Delay Metric: %u, %s", ident, ISIS_LSP_TLV_METRIC_VALUE(tlv_ip_reach->isis_metric_block.metric_delay), ISIS_LSP_TLV_METRIC_IE(tlv_ip_reach->isis_metric_block.metric_delay) ? "External" : "Internal")); if (!ISIS_LSP_TLV_METRIC_SUPPORTED(tlv_ip_reach->isis_metric_block.metric_expense)) ND_PRINT((ndo, "%s Expense Metric: %u, %s", ident, ISIS_LSP_TLV_METRIC_VALUE(tlv_ip_reach->isis_metric_block.metric_expense), ISIS_LSP_TLV_METRIC_IE(tlv_ip_reach->isis_metric_block.metric_expense) ? "External" : "Internal")); if (!ISIS_LSP_TLV_METRIC_SUPPORTED(tlv_ip_reach->isis_metric_block.metric_error)) ND_PRINT((ndo, "%s Error Metric: %u, %s", ident, ISIS_LSP_TLV_METRIC_VALUE(tlv_ip_reach->isis_metric_block.metric_error), ISIS_LSP_TLV_METRIC_IE(tlv_ip_reach->isis_metric_block.metric_error) ? "External" : "Internal")); length -= sizeof(struct isis_tlv_ip_reach); tlv_ip_reach++; } return (1); } /* * this is the common IP-REACH subTLV decoder it is called * from various EXTD-IP REACH TLVs (135,235,236,237) */ static int isis_print_ip_reach_subtlv(netdissect_options *ndo, const uint8_t *tptr, int subt, int subl, const char *ident) { /* first lets see if we know the subTLVs name*/ ND_PRINT((ndo, "%s%s subTLV #%u, length: %u", ident, tok2str(isis_ext_ip_reach_subtlv_values, "unknown", subt), subt, subl)); ND_TCHECK2(*tptr,subl); switch(subt) { case ISIS_SUBTLV_EXTD_IP_REACH_MGMT_PREFIX_COLOR: /* fall through */ case ISIS_SUBTLV_EXTD_IP_REACH_ADMIN_TAG32: while (subl >= 4) { ND_PRINT((ndo, ", 0x%08x (=%u)", EXTRACT_32BITS(tptr), EXTRACT_32BITS(tptr))); tptr+=4; subl-=4; } break; case ISIS_SUBTLV_EXTD_IP_REACH_ADMIN_TAG64: while (subl >= 8) { ND_PRINT((ndo, ", 0x%08x%08x", EXTRACT_32BITS(tptr), EXTRACT_32BITS(tptr+4))); tptr+=8; subl-=8; } break; default: if (!print_unknown_data(ndo, tptr, "\n\t\t ", subl)) return(0); break; } return(1); trunc: ND_PRINT((ndo, "%s", ident)); ND_PRINT((ndo, "%s", tstr)); return(0); } /* * this is the common IS-REACH subTLV decoder it is called * from isis_print_ext_is_reach() */ static int isis_print_is_reach_subtlv(netdissect_options *ndo, const uint8_t *tptr, u_int subt, u_int subl, const char *ident) { u_int te_class,priority_level,gmpls_switch_cap; union { /* int to float conversion buffer for several subTLVs */ float f; uint32_t i; } bw; /* first lets see if we know the subTLVs name*/ ND_PRINT((ndo, "%s%s subTLV #%u, length: %u", ident, tok2str(isis_ext_is_reach_subtlv_values, "unknown", subt), subt, subl)); ND_TCHECK2(*tptr, subl); switch(subt) { case ISIS_SUBTLV_EXT_IS_REACH_ADMIN_GROUP: case ISIS_SUBTLV_EXT_IS_REACH_LINK_LOCAL_REMOTE_ID: case ISIS_SUBTLV_EXT_IS_REACH_LINK_REMOTE_ID: if (subl >= 4) { ND_PRINT((ndo, ", 0x%08x", EXTRACT_32BITS(tptr))); if (subl == 8) /* rfc4205 */ ND_PRINT((ndo, ", 0x%08x", EXTRACT_32BITS(tptr+4))); } break; case ISIS_SUBTLV_EXT_IS_REACH_IPV4_INTF_ADDR: case ISIS_SUBTLV_EXT_IS_REACH_IPV4_NEIGHBOR_ADDR: if (subl >= sizeof(struct in_addr)) ND_PRINT((ndo, ", %s", ipaddr_string(ndo, tptr))); break; case ISIS_SUBTLV_EXT_IS_REACH_MAX_LINK_BW : case ISIS_SUBTLV_EXT_IS_REACH_RESERVABLE_BW: if (subl >= 4) { bw.i = EXTRACT_32BITS(tptr); ND_PRINT((ndo, ", %.3f Mbps", bw.f * 8 / 1000000)); } break; case ISIS_SUBTLV_EXT_IS_REACH_UNRESERVED_BW : if (subl >= 32) { for (te_class = 0; te_class < 8; te_class++) { bw.i = EXTRACT_32BITS(tptr); ND_PRINT((ndo, "%s TE-Class %u: %.3f Mbps", ident, te_class, bw.f * 8 / 1000000)); tptr+=4; } } break; case ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS: /* fall through */ case ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS_OLD: ND_PRINT((ndo, "%sBandwidth Constraints Model ID: %s (%u)", ident, tok2str(diffserv_te_bc_values, "unknown", *tptr), *tptr)); tptr++; /* decode BCs until the subTLV ends */ for (te_class = 0; te_class < (subl-1)/4; te_class++) { ND_TCHECK2(*tptr, 4); bw.i = EXTRACT_32BITS(tptr); ND_PRINT((ndo, "%s Bandwidth constraint CT%u: %.3f Mbps", ident, te_class, bw.f * 8 / 1000000)); tptr+=4; } break; case ISIS_SUBTLV_EXT_IS_REACH_TE_METRIC: if (subl >= 3) ND_PRINT((ndo, ", %u", EXTRACT_24BITS(tptr))); break; case ISIS_SUBTLV_EXT_IS_REACH_LINK_ATTRIBUTE: if (subl == 2) { ND_PRINT((ndo, ", [ %s ] (0x%04x)", bittok2str(isis_subtlv_link_attribute_values, "Unknown", EXTRACT_16BITS(tptr)), EXTRACT_16BITS(tptr))); } break; case ISIS_SUBTLV_EXT_IS_REACH_LINK_PROTECTION_TYPE: if (subl >= 2) { ND_PRINT((ndo, ", %s, Priority %u", bittok2str(gmpls_link_prot_values, "none", *tptr), *(tptr+1))); } break; case ISIS_SUBTLV_SPB_METRIC: if (subl >= 6) { ND_PRINT((ndo, ", LM: %u", EXTRACT_24BITS(tptr))); tptr=tptr+3; ND_PRINT((ndo, ", P: %u", *(tptr))); tptr++; ND_PRINT((ndo, ", P-ID: %u", EXTRACT_16BITS(tptr))); } break; case ISIS_SUBTLV_EXT_IS_REACH_INTF_SW_CAP_DESCR: if (subl >= 36) { gmpls_switch_cap = *tptr; ND_PRINT((ndo, "%s Interface Switching Capability:%s", ident, tok2str(gmpls_switch_cap_values, "Unknown", gmpls_switch_cap))); ND_PRINT((ndo, ", LSP Encoding: %s", tok2str(gmpls_encoding_values, "Unknown", *(tptr + 1)))); tptr+=4; ND_PRINT((ndo, "%s Max LSP Bandwidth:", ident)); for (priority_level = 0; priority_level < 8; priority_level++) { bw.i = EXTRACT_32BITS(tptr); ND_PRINT((ndo, "%s priority level %d: %.3f Mbps", ident, priority_level, bw.f * 8 / 1000000)); tptr+=4; } subl-=36; switch (gmpls_switch_cap) { case GMPLS_PSC1: case GMPLS_PSC2: case GMPLS_PSC3: case GMPLS_PSC4: ND_TCHECK2(*tptr, 6); bw.i = EXTRACT_32BITS(tptr); ND_PRINT((ndo, "%s Min LSP Bandwidth: %.3f Mbps", ident, bw.f * 8 / 1000000)); ND_PRINT((ndo, "%s Interface MTU: %u", ident, EXTRACT_16BITS(tptr + 4))); break; case GMPLS_TSC: ND_TCHECK2(*tptr, 8); bw.i = EXTRACT_32BITS(tptr); ND_PRINT((ndo, "%s Min LSP Bandwidth: %.3f Mbps", ident, bw.f * 8 / 1000000)); ND_PRINT((ndo, "%s Indication %s", ident, tok2str(gmpls_switch_cap_tsc_indication_values, "Unknown (%u)", *(tptr + 4)))); break; default: /* there is some optional stuff left to decode but this is as of yet not specified so just lets hexdump what is left */ if(subl>0){ if (!print_unknown_data(ndo, tptr, "\n\t\t ", subl)) return(0); } } } break; default: if (!print_unknown_data(ndo, tptr, "\n\t\t ", subl)) return(0); break; } return(1); trunc: return(0); } /* * this is the common IS-REACH decoder it is called * from various EXTD-IS REACH style TLVs (22,24,222) */ static int isis_print_ext_is_reach(netdissect_options *ndo, const uint8_t *tptr, const char *ident, int tlv_type) { char ident_buffer[20]; int subtlv_type,subtlv_len,subtlv_sum_len; int proc_bytes = 0; /* how many bytes did we process ? */ if (!ND_TTEST2(*tptr, NODE_ID_LEN)) return(0); ND_PRINT((ndo, "%sIS Neighbor: %s", ident, isis_print_id(tptr, NODE_ID_LEN))); tptr+=(NODE_ID_LEN); if (tlv_type != ISIS_TLV_IS_ALIAS_ID) { /* the Alias TLV Metric field is implicit 0 */ if (!ND_TTEST2(*tptr, 3)) /* and is therefore skipped */ return(0); ND_PRINT((ndo, ", Metric: %d", EXTRACT_24BITS(tptr))); tptr+=3; } if (!ND_TTEST2(*tptr, 1)) return(0); subtlv_sum_len=*(tptr++); /* read out subTLV length */ proc_bytes=NODE_ID_LEN+3+1; ND_PRINT((ndo, ", %ssub-TLVs present",subtlv_sum_len ? "" : "no ")); if (subtlv_sum_len) { ND_PRINT((ndo, " (%u)", subtlv_sum_len)); while (subtlv_sum_len>0) { if (!ND_TTEST2(*tptr,2)) return(0); subtlv_type=*(tptr++); subtlv_len=*(tptr++); /* prepend the indent string */ snprintf(ident_buffer, sizeof(ident_buffer), "%s ",ident); if (!isis_print_is_reach_subtlv(ndo, tptr, subtlv_type, subtlv_len, ident_buffer)) return(0); tptr+=subtlv_len; subtlv_sum_len-=(subtlv_len+2); proc_bytes+=(subtlv_len+2); } } return(proc_bytes); } /* * this is the common Multi Topology ID decoder * it is called from various MT-TLVs (222,229,235,237) */ static int isis_print_mtid(netdissect_options *ndo, const uint8_t *tptr, const char *ident) { if (!ND_TTEST2(*tptr, 2)) return(0); ND_PRINT((ndo, "%s%s", ident, tok2str(isis_mt_values, "Reserved for IETF Consensus", ISIS_MASK_MTID(EXTRACT_16BITS(tptr))))); ND_PRINT((ndo, " Topology (0x%03x), Flags: [%s]", ISIS_MASK_MTID(EXTRACT_16BITS(tptr)), bittok2str(isis_mt_flag_values, "none",ISIS_MASK_MTFLAGS(EXTRACT_16BITS(tptr))))); return(2); } /* * this is the common extended IP reach decoder * it is called from TLVs (135,235,236,237) * we process the TLV and optional subTLVs and return * the amount of processed bytes */ static int isis_print_extd_ip_reach(netdissect_options *ndo, const uint8_t *tptr, const char *ident, uint16_t afi) { char ident_buffer[20]; uint8_t prefix[sizeof(struct in6_addr)]; /* shared copy buffer for IPv4 and IPv6 prefixes */ u_int metric, status_byte, bit_length, byte_length, sublen, processed, subtlvtype, subtlvlen; if (!ND_TTEST2(*tptr, 4)) return (0); metric = EXTRACT_32BITS(tptr); processed=4; tptr+=4; if (afi == AF_INET) { if (!ND_TTEST2(*tptr, 1)) /* fetch status byte */ return (0); status_byte=*(tptr++); bit_length = status_byte&0x3f; if (bit_length > 32) { ND_PRINT((ndo, "%sIPv4 prefix: bad bit length %u", ident, bit_length)); return (0); } processed++; } else if (afi == AF_INET6) { if (!ND_TTEST2(*tptr, 2)) /* fetch status & prefix_len byte */ return (0); status_byte=*(tptr++); bit_length=*(tptr++); if (bit_length > 128) { ND_PRINT((ndo, "%sIPv6 prefix: bad bit length %u", ident, bit_length)); return (0); } processed+=2; } else return (0); /* somebody is fooling us */ byte_length = (bit_length + 7) / 8; /* prefix has variable length encoding */ if (!ND_TTEST2(*tptr, byte_length)) return (0); memset(prefix, 0, sizeof prefix); /* clear the copy buffer */ memcpy(prefix,tptr,byte_length); /* copy as much as is stored in the TLV */ tptr+=byte_length; processed+=byte_length; if (afi == AF_INET) ND_PRINT((ndo, "%sIPv4 prefix: %15s/%u", ident, ipaddr_string(ndo, prefix), bit_length)); else if (afi == AF_INET6) ND_PRINT((ndo, "%sIPv6 prefix: %s/%u", ident, ip6addr_string(ndo, prefix), bit_length)); ND_PRINT((ndo, ", Distribution: %s, Metric: %u", ISIS_MASK_TLV_EXTD_IP_UPDOWN(status_byte) ? "down" : "up", metric)); if (afi == AF_INET && ISIS_MASK_TLV_EXTD_IP_SUBTLV(status_byte)) ND_PRINT((ndo, ", sub-TLVs present")); else if (afi == AF_INET6) ND_PRINT((ndo, ", %s%s", ISIS_MASK_TLV_EXTD_IP6_IE(status_byte) ? "External" : "Internal", ISIS_MASK_TLV_EXTD_IP6_SUBTLV(status_byte) ? ", sub-TLVs present" : "")); if ((afi == AF_INET && ISIS_MASK_TLV_EXTD_IP_SUBTLV(status_byte)) || (afi == AF_INET6 && ISIS_MASK_TLV_EXTD_IP6_SUBTLV(status_byte)) ) { /* assume that one prefix can hold more than one subTLV - therefore the first byte must reflect the aggregate bytecount of the subTLVs for this prefix */ if (!ND_TTEST2(*tptr, 1)) return (0); sublen=*(tptr++); processed+=sublen+1; ND_PRINT((ndo, " (%u)", sublen)); /* print out subTLV length */ while (sublen>0) { if (!ND_TTEST2(*tptr,2)) return (0); subtlvtype=*(tptr++); subtlvlen=*(tptr++); /* prepend the indent string */ snprintf(ident_buffer, sizeof(ident_buffer), "%s ",ident); if (!isis_print_ip_reach_subtlv(ndo, tptr, subtlvtype, subtlvlen, ident_buffer)) return(0); tptr+=subtlvlen; sublen-=(subtlvlen+2); } } return (processed); } /* * Clear checksum and lifetime prior to signature verification. */ static void isis_clear_checksum_lifetime(void *header) { struct isis_lsp_header *header_lsp = (struct isis_lsp_header *) header; header_lsp->checksum[0] = 0; header_lsp->checksum[1] = 0; header_lsp->remaining_lifetime[0] = 0; header_lsp->remaining_lifetime[1] = 0; } /* * isis_print * Decode IS-IS packets. Return 0 on error. */ static int isis_print(netdissect_options *ndo, const uint8_t *p, u_int length) { const struct isis_common_header *isis_header; const struct isis_iih_lan_header *header_iih_lan; const struct isis_iih_ptp_header *header_iih_ptp; const struct isis_lsp_header *header_lsp; const struct isis_csnp_header *header_csnp; const struct isis_psnp_header *header_psnp; const struct isis_tlv_lsp *tlv_lsp; const struct isis_tlv_ptp_adj *tlv_ptp_adj; const struct isis_tlv_is_reach *tlv_is_reach; const struct isis_tlv_es_reach *tlv_es_reach; uint8_t pdu_type, max_area, id_length, tlv_type, tlv_len, tmp, alen, lan_alen, prefix_len; uint8_t ext_is_len, ext_ip_len, mt_len; const uint8_t *optr, *pptr, *tptr; u_short packet_len,pdu_len, key_id; u_int i,vendor_id; int sigcheck; packet_len=length; optr = p; /* initialize the _o_riginal pointer to the packet start - need it for parsing the checksum TLV and authentication TLV verification */ isis_header = (const struct isis_common_header *)p; ND_TCHECK(*isis_header); if (length < ISIS_COMMON_HEADER_SIZE) goto trunc; pptr = p+(ISIS_COMMON_HEADER_SIZE); header_iih_lan = (const struct isis_iih_lan_header *)pptr; header_iih_ptp = (const struct isis_iih_ptp_header *)pptr; header_lsp = (const struct isis_lsp_header *)pptr; header_csnp = (const struct isis_csnp_header *)pptr; header_psnp = (const struct isis_psnp_header *)pptr; if (!ndo->ndo_eflag) ND_PRINT((ndo, "IS-IS")); /* * Sanity checking of the header. */ if (isis_header->version != ISIS_VERSION) { ND_PRINT((ndo, "version %d packet not supported", isis_header->version)); return (0); } if ((isis_header->id_length != SYSTEM_ID_LEN) && (isis_header->id_length != 0)) { ND_PRINT((ndo, "system ID length of %d is not supported", isis_header->id_length)); return (0); } if (isis_header->pdu_version != ISIS_VERSION) { ND_PRINT((ndo, "version %d packet not supported", isis_header->pdu_version)); return (0); } if (length < isis_header->fixed_len) { ND_PRINT((ndo, "fixed header length %u > packet length %u", isis_header->fixed_len, length)); return (0); } if (isis_header->fixed_len < ISIS_COMMON_HEADER_SIZE) { ND_PRINT((ndo, "fixed header length %u < minimum header size %u", isis_header->fixed_len, (u_int)ISIS_COMMON_HEADER_SIZE)); return (0); } max_area = isis_header->max_area; switch(max_area) { case 0: max_area = 3; /* silly shit */ break; case 255: ND_PRINT((ndo, "bad packet -- 255 areas")); return (0); default: break; } id_length = isis_header->id_length; switch(id_length) { case 0: id_length = 6; /* silly shit again */ break; case 1: /* 1-8 are valid sys-ID lenghts */ case 2: case 3: case 4: case 5: case 6: case 7: case 8: break; case 255: id_length = 0; /* entirely useless */ break; default: break; } /* toss any non 6-byte sys-ID len PDUs */ if (id_length != 6 ) { ND_PRINT((ndo, "bad packet -- illegal sys-ID length (%u)", id_length)); return (0); } pdu_type=isis_header->pdu_type; /* in non-verbose mode print the basic PDU Type plus PDU specific brief information*/ if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, "%s%s", ndo->ndo_eflag ? "" : ", ", tok2str(isis_pdu_values, "unknown PDU-Type %u", pdu_type))); } else { /* ok they seem to want to know everything - lets fully decode it */ ND_PRINT((ndo, "%slength %u", ndo->ndo_eflag ? "" : ", ", length)); ND_PRINT((ndo, "\n\t%s, hlen: %u, v: %u, pdu-v: %u, sys-id-len: %u (%u), max-area: %u (%u)", tok2str(isis_pdu_values, "unknown, type %u", pdu_type), isis_header->fixed_len, isis_header->version, isis_header->pdu_version, id_length, isis_header->id_length, max_area, isis_header->max_area)); if (ndo->ndo_vflag > 1) { if (!print_unknown_data(ndo, optr, "\n\t", 8)) /* provide the _o_riginal pointer */ return (0); /* for optionally debugging the common header */ } } switch (pdu_type) { case ISIS_PDU_L1_LAN_IIH: case ISIS_PDU_L2_LAN_IIH: if (isis_header->fixed_len != (ISIS_COMMON_HEADER_SIZE+ISIS_IIH_LAN_HEADER_SIZE)) { ND_PRINT((ndo, ", bogus fixed header length %u should be %lu", isis_header->fixed_len, (unsigned long)(ISIS_COMMON_HEADER_SIZE+ISIS_IIH_LAN_HEADER_SIZE))); return (0); } ND_TCHECK(*header_iih_lan); if (length < ISIS_COMMON_HEADER_SIZE+ISIS_IIH_LAN_HEADER_SIZE) goto trunc; if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, ", src-id %s", isis_print_id(header_iih_lan->source_id, SYSTEM_ID_LEN))); ND_PRINT((ndo, ", lan-id %s, prio %u", isis_print_id(header_iih_lan->lan_id,NODE_ID_LEN), header_iih_lan->priority)); ND_PRINT((ndo, ", length %u", length)); return (1); } pdu_len=EXTRACT_16BITS(header_iih_lan->pdu_len); if (packet_len>pdu_len) { packet_len=pdu_len; /* do TLV decoding as long as it makes sense */ length=pdu_len; } ND_PRINT((ndo, "\n\t source-id: %s, holding time: %us, Flags: [%s]", isis_print_id(header_iih_lan->source_id,SYSTEM_ID_LEN), EXTRACT_16BITS(header_iih_lan->holding_time), tok2str(isis_iih_circuit_type_values, "unknown circuit type 0x%02x", header_iih_lan->circuit_type))); ND_PRINT((ndo, "\n\t lan-id: %s, Priority: %u, PDU length: %u", isis_print_id(header_iih_lan->lan_id, NODE_ID_LEN), (header_iih_lan->priority) & ISIS_LAN_PRIORITY_MASK, pdu_len)); if (ndo->ndo_vflag > 1) { if (!print_unknown_data(ndo, pptr, "\n\t ", ISIS_IIH_LAN_HEADER_SIZE)) return (0); } packet_len -= (ISIS_COMMON_HEADER_SIZE+ISIS_IIH_LAN_HEADER_SIZE); pptr = p + (ISIS_COMMON_HEADER_SIZE+ISIS_IIH_LAN_HEADER_SIZE); break; case ISIS_PDU_PTP_IIH: if (isis_header->fixed_len != (ISIS_COMMON_HEADER_SIZE+ISIS_IIH_PTP_HEADER_SIZE)) { ND_PRINT((ndo, ", bogus fixed header length %u should be %lu", isis_header->fixed_len, (unsigned long)(ISIS_COMMON_HEADER_SIZE+ISIS_IIH_PTP_HEADER_SIZE))); return (0); } ND_TCHECK(*header_iih_ptp); if (length < ISIS_COMMON_HEADER_SIZE+ISIS_IIH_PTP_HEADER_SIZE) goto trunc; if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, ", src-id %s", isis_print_id(header_iih_ptp->source_id, SYSTEM_ID_LEN))); ND_PRINT((ndo, ", length %u", length)); return (1); } pdu_len=EXTRACT_16BITS(header_iih_ptp->pdu_len); if (packet_len>pdu_len) { packet_len=pdu_len; /* do TLV decoding as long as it makes sense */ length=pdu_len; } ND_PRINT((ndo, "\n\t source-id: %s, holding time: %us, Flags: [%s]", isis_print_id(header_iih_ptp->source_id,SYSTEM_ID_LEN), EXTRACT_16BITS(header_iih_ptp->holding_time), tok2str(isis_iih_circuit_type_values, "unknown circuit type 0x%02x", header_iih_ptp->circuit_type))); ND_PRINT((ndo, "\n\t circuit-id: 0x%02x, PDU length: %u", header_iih_ptp->circuit_id, pdu_len)); if (ndo->ndo_vflag > 1) { if (!print_unknown_data(ndo, pptr, "\n\t ", ISIS_IIH_PTP_HEADER_SIZE)) return (0); } packet_len -= (ISIS_COMMON_HEADER_SIZE+ISIS_IIH_PTP_HEADER_SIZE); pptr = p + (ISIS_COMMON_HEADER_SIZE+ISIS_IIH_PTP_HEADER_SIZE); break; case ISIS_PDU_L1_LSP: case ISIS_PDU_L2_LSP: if (isis_header->fixed_len != (ISIS_COMMON_HEADER_SIZE+ISIS_LSP_HEADER_SIZE)) { ND_PRINT((ndo, ", bogus fixed header length %u should be %lu", isis_header->fixed_len, (unsigned long)ISIS_LSP_HEADER_SIZE)); return (0); } ND_TCHECK(*header_lsp); if (length < ISIS_COMMON_HEADER_SIZE+ISIS_LSP_HEADER_SIZE) goto trunc; if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, ", lsp-id %s, seq 0x%08x, lifetime %5us", isis_print_id(header_lsp->lsp_id, LSP_ID_LEN), EXTRACT_32BITS(header_lsp->sequence_number), EXTRACT_16BITS(header_lsp->remaining_lifetime))); ND_PRINT((ndo, ", length %u", length)); return (1); } pdu_len=EXTRACT_16BITS(header_lsp->pdu_len); if (packet_len>pdu_len) { packet_len=pdu_len; /* do TLV decoding as long as it makes sense */ length=pdu_len; } ND_PRINT((ndo, "\n\t lsp-id: %s, seq: 0x%08x, lifetime: %5us\n\t chksum: 0x%04x", isis_print_id(header_lsp->lsp_id, LSP_ID_LEN), EXTRACT_32BITS(header_lsp->sequence_number), EXTRACT_16BITS(header_lsp->remaining_lifetime), EXTRACT_16BITS(header_lsp->checksum))); osi_print_cksum(ndo, (const uint8_t *)header_lsp->lsp_id, EXTRACT_16BITS(header_lsp->checksum), 12, length-12); ND_PRINT((ndo, ", PDU length: %u, Flags: [ %s", pdu_len, ISIS_MASK_LSP_OL_BIT(header_lsp->typeblock) ? "Overload bit set, " : "")); if (ISIS_MASK_LSP_ATT_BITS(header_lsp->typeblock)) { ND_PRINT((ndo, "%s", ISIS_MASK_LSP_ATT_DEFAULT_BIT(header_lsp->typeblock) ? "default " : "")); ND_PRINT((ndo, "%s", ISIS_MASK_LSP_ATT_DELAY_BIT(header_lsp->typeblock) ? "delay " : "")); ND_PRINT((ndo, "%s", ISIS_MASK_LSP_ATT_EXPENSE_BIT(header_lsp->typeblock) ? "expense " : "")); ND_PRINT((ndo, "%s", ISIS_MASK_LSP_ATT_ERROR_BIT(header_lsp->typeblock) ? "error " : "")); ND_PRINT((ndo, "ATT bit set, ")); } ND_PRINT((ndo, "%s", ISIS_MASK_LSP_PARTITION_BIT(header_lsp->typeblock) ? "P bit set, " : "")); ND_PRINT((ndo, "%s ]", tok2str(isis_lsp_istype_values, "Unknown(0x%x)", ISIS_MASK_LSP_ISTYPE_BITS(header_lsp->typeblock)))); if (ndo->ndo_vflag > 1) { if (!print_unknown_data(ndo, pptr, "\n\t ", ISIS_LSP_HEADER_SIZE)) return (0); } packet_len -= (ISIS_COMMON_HEADER_SIZE+ISIS_LSP_HEADER_SIZE); pptr = p + (ISIS_COMMON_HEADER_SIZE+ISIS_LSP_HEADER_SIZE); break; case ISIS_PDU_L1_CSNP: case ISIS_PDU_L2_CSNP: if (isis_header->fixed_len != (ISIS_COMMON_HEADER_SIZE+ISIS_CSNP_HEADER_SIZE)) { ND_PRINT((ndo, ", bogus fixed header length %u should be %lu", isis_header->fixed_len, (unsigned long)(ISIS_COMMON_HEADER_SIZE+ISIS_CSNP_HEADER_SIZE))); return (0); } ND_TCHECK(*header_csnp); if (length < ISIS_COMMON_HEADER_SIZE+ISIS_CSNP_HEADER_SIZE) goto trunc; if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, ", src-id %s", isis_print_id(header_csnp->source_id, NODE_ID_LEN))); ND_PRINT((ndo, ", length %u", length)); return (1); } pdu_len=EXTRACT_16BITS(header_csnp->pdu_len); if (packet_len>pdu_len) { packet_len=pdu_len; /* do TLV decoding as long as it makes sense */ length=pdu_len; } ND_PRINT((ndo, "\n\t source-id: %s, PDU length: %u", isis_print_id(header_csnp->source_id, NODE_ID_LEN), pdu_len)); ND_PRINT((ndo, "\n\t start lsp-id: %s", isis_print_id(header_csnp->start_lsp_id, LSP_ID_LEN))); ND_PRINT((ndo, "\n\t end lsp-id: %s", isis_print_id(header_csnp->end_lsp_id, LSP_ID_LEN))); if (ndo->ndo_vflag > 1) { if (!print_unknown_data(ndo, pptr, "\n\t ", ISIS_CSNP_HEADER_SIZE)) return (0); } packet_len -= (ISIS_COMMON_HEADER_SIZE+ISIS_CSNP_HEADER_SIZE); pptr = p + (ISIS_COMMON_HEADER_SIZE+ISIS_CSNP_HEADER_SIZE); break; case ISIS_PDU_L1_PSNP: case ISIS_PDU_L2_PSNP: if (isis_header->fixed_len != (ISIS_COMMON_HEADER_SIZE+ISIS_PSNP_HEADER_SIZE)) { ND_PRINT((ndo, "- bogus fixed header length %u should be %lu", isis_header->fixed_len, (unsigned long)(ISIS_COMMON_HEADER_SIZE+ISIS_PSNP_HEADER_SIZE))); return (0); } ND_TCHECK(*header_psnp); if (length < ISIS_COMMON_HEADER_SIZE+ISIS_PSNP_HEADER_SIZE) goto trunc; if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, ", src-id %s", isis_print_id(header_psnp->source_id, NODE_ID_LEN))); ND_PRINT((ndo, ", length %u", length)); return (1); } pdu_len=EXTRACT_16BITS(header_psnp->pdu_len); if (packet_len>pdu_len) { packet_len=pdu_len; /* do TLV decoding as long as it makes sense */ length=pdu_len; } ND_PRINT((ndo, "\n\t source-id: %s, PDU length: %u", isis_print_id(header_psnp->source_id, NODE_ID_LEN), pdu_len)); if (ndo->ndo_vflag > 1) { if (!print_unknown_data(ndo, pptr, "\n\t ", ISIS_PSNP_HEADER_SIZE)) return (0); } packet_len -= (ISIS_COMMON_HEADER_SIZE+ISIS_PSNP_HEADER_SIZE); pptr = p + (ISIS_COMMON_HEADER_SIZE+ISIS_PSNP_HEADER_SIZE); break; default: if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, ", length %u", length)); return (1); } (void)print_unknown_data(ndo, pptr, "\n\t ", length); return (0); } /* * Now print the TLV's. */ while (packet_len > 0) { ND_TCHECK2(*pptr, 2); if (packet_len < 2) goto trunc; tlv_type = *pptr++; tlv_len = *pptr++; tmp =tlv_len; /* copy temporary len & pointer to packet data */ tptr = pptr; packet_len -= 2; /* first lets see if we know the TLVs name*/ ND_PRINT((ndo, "\n\t %s TLV #%u, length: %u", tok2str(isis_tlv_values, "unknown", tlv_type), tlv_type, tlv_len)); if (tlv_len == 0) /* something is invalid */ continue; if (packet_len < tlv_len) goto trunc; /* now check if we have a decoder otherwise do a hexdump at the end*/ switch (tlv_type) { case ISIS_TLV_AREA_ADDR: ND_TCHECK2(*tptr, 1); alen = *tptr++; while (tmp && alen < tmp) { ND_TCHECK2(*tptr, alen); ND_PRINT((ndo, "\n\t Area address (length: %u): %s", alen, isonsap_string(ndo, tptr, alen))); tptr += alen; tmp -= alen + 1; if (tmp==0) /* if this is the last area address do not attemt a boundary check */ break; ND_TCHECK2(*tptr, 1); alen = *tptr++; } break; case ISIS_TLV_ISNEIGH: while (tmp >= ETHER_ADDR_LEN) { ND_TCHECK2(*tptr, ETHER_ADDR_LEN); ND_PRINT((ndo, "\n\t SNPA: %s", isis_print_id(tptr, ETHER_ADDR_LEN))); tmp -= ETHER_ADDR_LEN; tptr += ETHER_ADDR_LEN; } break; case ISIS_TLV_ISNEIGH_VARLEN: if (!ND_TTEST2(*tptr, 1) || tmp < 3) /* min. TLV length */ goto trunctlv; lan_alen = *tptr++; /* LAN address length */ if (lan_alen == 0) { ND_PRINT((ndo, "\n\t LAN address length 0 bytes (invalid)")); break; } tmp --; ND_PRINT((ndo, "\n\t LAN address length %u bytes ", lan_alen)); while (tmp >= lan_alen) { ND_TCHECK2(*tptr, lan_alen); ND_PRINT((ndo, "\n\t\tIS Neighbor: %s", isis_print_id(tptr, lan_alen))); tmp -= lan_alen; tptr +=lan_alen; } break; case ISIS_TLV_PADDING: break; case ISIS_TLV_MT_IS_REACH: mt_len = isis_print_mtid(ndo, tptr, "\n\t "); if (mt_len == 0) /* did something go wrong ? */ goto trunctlv; tptr+=mt_len; tmp-=mt_len; while (tmp >= 2+NODE_ID_LEN+3+1) { ext_is_len = isis_print_ext_is_reach(ndo, tptr, "\n\t ", tlv_type); if (ext_is_len == 0) /* did something go wrong ? */ goto trunctlv; tmp-=ext_is_len; tptr+=ext_is_len; } break; case ISIS_TLV_IS_ALIAS_ID: while (tmp >= NODE_ID_LEN+1) { /* is it worth attempting a decode ? */ ext_is_len = isis_print_ext_is_reach(ndo, tptr, "\n\t ", tlv_type); if (ext_is_len == 0) /* did something go wrong ? */ goto trunctlv; tmp-=ext_is_len; tptr+=ext_is_len; } break; case ISIS_TLV_EXT_IS_REACH: while (tmp >= NODE_ID_LEN+3+1) { /* is it worth attempting a decode ? */ ext_is_len = isis_print_ext_is_reach(ndo, tptr, "\n\t ", tlv_type); if (ext_is_len == 0) /* did something go wrong ? */ goto trunctlv; tmp-=ext_is_len; tptr+=ext_is_len; } break; case ISIS_TLV_IS_REACH: ND_TCHECK2(*tptr,1); /* check if there is one byte left to read out the virtual flag */ ND_PRINT((ndo, "\n\t %s", tok2str(isis_is_reach_virtual_values, "bogus virtual flag 0x%02x", *tptr++))); tlv_is_reach = (const struct isis_tlv_is_reach *)tptr; while (tmp >= sizeof(struct isis_tlv_is_reach)) { ND_TCHECK(*tlv_is_reach); ND_PRINT((ndo, "\n\t IS Neighbor: %s", isis_print_id(tlv_is_reach->neighbor_nodeid, NODE_ID_LEN))); isis_print_metric_block(ndo, &tlv_is_reach->isis_metric_block); tmp -= sizeof(struct isis_tlv_is_reach); tlv_is_reach++; } break; case ISIS_TLV_ESNEIGH: tlv_es_reach = (const struct isis_tlv_es_reach *)tptr; while (tmp >= sizeof(struct isis_tlv_es_reach)) { ND_TCHECK(*tlv_es_reach); ND_PRINT((ndo, "\n\t ES Neighbor: %s", isis_print_id(tlv_es_reach->neighbor_sysid, SYSTEM_ID_LEN))); isis_print_metric_block(ndo, &tlv_es_reach->isis_metric_block); tmp -= sizeof(struct isis_tlv_es_reach); tlv_es_reach++; } break; /* those two TLVs share the same format */ case ISIS_TLV_INT_IP_REACH: case ISIS_TLV_EXT_IP_REACH: if (!isis_print_tlv_ip_reach(ndo, pptr, "\n\t ", tlv_len)) return (1); break; case ISIS_TLV_EXTD_IP_REACH: while (tmp>0) { ext_ip_len = isis_print_extd_ip_reach(ndo, tptr, "\n\t ", AF_INET); if (ext_ip_len == 0) /* did something go wrong ? */ goto trunctlv; tptr+=ext_ip_len; tmp-=ext_ip_len; } break; case ISIS_TLV_MT_IP_REACH: mt_len = isis_print_mtid(ndo, tptr, "\n\t "); if (mt_len == 0) { /* did something go wrong ? */ goto trunctlv; } tptr+=mt_len; tmp-=mt_len; while (tmp>0) { ext_ip_len = isis_print_extd_ip_reach(ndo, tptr, "\n\t ", AF_INET); if (ext_ip_len == 0) /* did something go wrong ? */ goto trunctlv; tptr+=ext_ip_len; tmp-=ext_ip_len; } break; case ISIS_TLV_IP6_REACH: while (tmp>0) { ext_ip_len = isis_print_extd_ip_reach(ndo, tptr, "\n\t ", AF_INET6); if (ext_ip_len == 0) /* did something go wrong ? */ goto trunctlv; tptr+=ext_ip_len; tmp-=ext_ip_len; } break; case ISIS_TLV_MT_IP6_REACH: mt_len = isis_print_mtid(ndo, tptr, "\n\t "); if (mt_len == 0) { /* did something go wrong ? */ goto trunctlv; } tptr+=mt_len; tmp-=mt_len; while (tmp>0) { ext_ip_len = isis_print_extd_ip_reach(ndo, tptr, "\n\t ", AF_INET6); if (ext_ip_len == 0) /* did something go wrong ? */ goto trunctlv; tptr+=ext_ip_len; tmp-=ext_ip_len; } break; case ISIS_TLV_IP6ADDR: while (tmp>=sizeof(struct in6_addr)) { ND_TCHECK2(*tptr, sizeof(struct in6_addr)); ND_PRINT((ndo, "\n\t IPv6 interface address: %s", ip6addr_string(ndo, tptr))); tptr += sizeof(struct in6_addr); tmp -= sizeof(struct in6_addr); } break; case ISIS_TLV_AUTH: ND_TCHECK2(*tptr, 1); ND_PRINT((ndo, "\n\t %s: ", tok2str(isis_subtlv_auth_values, "unknown Authentication type 0x%02x", *tptr))); switch (*tptr) { case ISIS_SUBTLV_AUTH_SIMPLE: if (fn_printzp(ndo, tptr + 1, tlv_len - 1, ndo->ndo_snapend)) goto trunctlv; break; case ISIS_SUBTLV_AUTH_MD5: for(i=1;i<tlv_len;i++) { ND_TCHECK2(*(tptr + i), 1); ND_PRINT((ndo, "%02x", *(tptr + i))); } if (tlv_len != ISIS_SUBTLV_AUTH_MD5_LEN+1) ND_PRINT((ndo, ", (invalid subTLV) ")); sigcheck = signature_verify(ndo, optr, length, tptr + 1, isis_clear_checksum_lifetime, header_lsp); ND_PRINT((ndo, " (%s)", tok2str(signature_check_values, "Unknown", sigcheck))); break; case ISIS_SUBTLV_AUTH_GENERIC: ND_TCHECK2(*(tptr + 1), 2); key_id = EXTRACT_16BITS((tptr+1)); ND_PRINT((ndo, "%u, password: ", key_id)); for(i=1 + sizeof(uint16_t);i<tlv_len;i++) { ND_TCHECK2(*(tptr + i), 1); ND_PRINT((ndo, "%02x", *(tptr + i))); } break; case ISIS_SUBTLV_AUTH_PRIVATE: default: if (!print_unknown_data(ndo, tptr + 1, "\n\t\t ", tlv_len - 1)) return(0); break; } break; case ISIS_TLV_PTP_ADJ: tlv_ptp_adj = (const struct isis_tlv_ptp_adj *)tptr; if(tmp>=1) { ND_TCHECK2(*tptr, 1); ND_PRINT((ndo, "\n\t Adjacency State: %s (%u)", tok2str(isis_ptp_adjancey_values, "unknown", *tptr), *tptr)); tmp--; } if(tmp>sizeof(tlv_ptp_adj->extd_local_circuit_id)) { ND_TCHECK(tlv_ptp_adj->extd_local_circuit_id); ND_PRINT((ndo, "\n\t Extended Local circuit-ID: 0x%08x", EXTRACT_32BITS(tlv_ptp_adj->extd_local_circuit_id))); tmp-=sizeof(tlv_ptp_adj->extd_local_circuit_id); } if(tmp>=SYSTEM_ID_LEN) { ND_TCHECK2(tlv_ptp_adj->neighbor_sysid, SYSTEM_ID_LEN); ND_PRINT((ndo, "\n\t Neighbor System-ID: %s", isis_print_id(tlv_ptp_adj->neighbor_sysid, SYSTEM_ID_LEN))); tmp-=SYSTEM_ID_LEN; } if(tmp>=sizeof(tlv_ptp_adj->neighbor_extd_local_circuit_id)) { ND_TCHECK(tlv_ptp_adj->neighbor_extd_local_circuit_id); ND_PRINT((ndo, "\n\t Neighbor Extended Local circuit-ID: 0x%08x", EXTRACT_32BITS(tlv_ptp_adj->neighbor_extd_local_circuit_id))); } break; case ISIS_TLV_PROTOCOLS: ND_PRINT((ndo, "\n\t NLPID(s): ")); while (tmp>0) { ND_TCHECK2(*(tptr), 1); ND_PRINT((ndo, "%s (0x%02x)", tok2str(nlpid_values, "unknown", *tptr), *tptr)); if (tmp>1) /* further NPLIDs ? - put comma */ ND_PRINT((ndo, ", ")); tptr++; tmp--; } break; case ISIS_TLV_MT_PORT_CAP: { ND_TCHECK2(*(tptr), 2); ND_PRINT((ndo, "\n\t RES: %d, MTID(s): %d", (EXTRACT_16BITS (tptr) >> 12), (EXTRACT_16BITS (tptr) & 0x0fff))); tmp = tmp-2; tptr = tptr+2; if (tmp) isis_print_mt_port_cap_subtlv(ndo, tptr, tmp); break; } case ISIS_TLV_MT_CAPABILITY: ND_TCHECK2(*(tptr), 2); ND_PRINT((ndo, "\n\t O: %d, RES: %d, MTID(s): %d", (EXTRACT_16BITS(tptr) >> 15) & 0x01, (EXTRACT_16BITS(tptr) >> 12) & 0x07, EXTRACT_16BITS(tptr) & 0x0fff)); tmp = tmp-2; tptr = tptr+2; if (tmp) isis_print_mt_capability_subtlv(ndo, tptr, tmp); break; case ISIS_TLV_TE_ROUTER_ID: ND_TCHECK2(*pptr, sizeof(struct in_addr)); ND_PRINT((ndo, "\n\t Traffic Engineering Router ID: %s", ipaddr_string(ndo, pptr))); break; case ISIS_TLV_IPADDR: while (tmp>=sizeof(struct in_addr)) { ND_TCHECK2(*tptr, sizeof(struct in_addr)); ND_PRINT((ndo, "\n\t IPv4 interface address: %s", ipaddr_string(ndo, tptr))); tptr += sizeof(struct in_addr); tmp -= sizeof(struct in_addr); } break; case ISIS_TLV_HOSTNAME: ND_PRINT((ndo, "\n\t Hostname: ")); if (fn_printzp(ndo, tptr, tmp, ndo->ndo_snapend)) goto trunctlv; break; case ISIS_TLV_SHARED_RISK_GROUP: if (tmp < NODE_ID_LEN) break; ND_TCHECK2(*tptr, NODE_ID_LEN); ND_PRINT((ndo, "\n\t IS Neighbor: %s", isis_print_id(tptr, NODE_ID_LEN))); tptr+=(NODE_ID_LEN); tmp-=(NODE_ID_LEN); if (tmp < 1) break; ND_TCHECK2(*tptr, 1); ND_PRINT((ndo, ", Flags: [%s]", ISIS_MASK_TLV_SHARED_RISK_GROUP(*tptr++) ? "numbered" : "unnumbered")); tmp--; if (tmp < sizeof(struct in_addr)) break; ND_TCHECK2(*tptr, sizeof(struct in_addr)); ND_PRINT((ndo, "\n\t IPv4 interface address: %s", ipaddr_string(ndo, tptr))); tptr+=sizeof(struct in_addr); tmp-=sizeof(struct in_addr); if (tmp < sizeof(struct in_addr)) break; ND_TCHECK2(*tptr, sizeof(struct in_addr)); ND_PRINT((ndo, "\n\t IPv4 neighbor address: %s", ipaddr_string(ndo, tptr))); tptr+=sizeof(struct in_addr); tmp-=sizeof(struct in_addr); while (tmp>=4) { ND_TCHECK2(*tptr, 4); ND_PRINT((ndo, "\n\t Link-ID: 0x%08x", EXTRACT_32BITS(tptr))); tptr+=4; tmp-=4; } break; case ISIS_TLV_LSP: tlv_lsp = (const struct isis_tlv_lsp *)tptr; while(tmp>=sizeof(struct isis_tlv_lsp)) { ND_TCHECK((tlv_lsp->lsp_id)[LSP_ID_LEN-1]); ND_PRINT((ndo, "\n\t lsp-id: %s", isis_print_id(tlv_lsp->lsp_id, LSP_ID_LEN))); ND_TCHECK2(tlv_lsp->sequence_number, 4); ND_PRINT((ndo, ", seq: 0x%08x", EXTRACT_32BITS(tlv_lsp->sequence_number))); ND_TCHECK2(tlv_lsp->remaining_lifetime, 2); ND_PRINT((ndo, ", lifetime: %5ds", EXTRACT_16BITS(tlv_lsp->remaining_lifetime))); ND_TCHECK2(tlv_lsp->checksum, 2); ND_PRINT((ndo, ", chksum: 0x%04x", EXTRACT_16BITS(tlv_lsp->checksum))); tmp-=sizeof(struct isis_tlv_lsp); tlv_lsp++; } break; case ISIS_TLV_CHECKSUM: if (tmp < ISIS_TLV_CHECKSUM_MINLEN) break; ND_TCHECK2(*tptr, ISIS_TLV_CHECKSUM_MINLEN); ND_PRINT((ndo, "\n\t checksum: 0x%04x ", EXTRACT_16BITS(tptr))); /* do not attempt to verify the checksum if it is zero * most likely a HMAC-MD5 TLV is also present and * to avoid conflicts the checksum TLV is zeroed. * see rfc3358 for details */ osi_print_cksum(ndo, optr, EXTRACT_16BITS(tptr), tptr-optr, length); break; case ISIS_TLV_POI: if (tlv_len >= SYSTEM_ID_LEN + 1) { ND_TCHECK2(*tptr, SYSTEM_ID_LEN + 1); ND_PRINT((ndo, "\n\t Purge Originator System-ID: %s", isis_print_id(tptr + 1, SYSTEM_ID_LEN))); } if (tlv_len == 2 * SYSTEM_ID_LEN + 1) { ND_TCHECK2(*tptr, 2 * SYSTEM_ID_LEN + 1); ND_PRINT((ndo, "\n\t Received from System-ID: %s", isis_print_id(tptr + SYSTEM_ID_LEN + 1, SYSTEM_ID_LEN))); } break; case ISIS_TLV_MT_SUPPORTED: if (tmp < ISIS_TLV_MT_SUPPORTED_MINLEN) break; while (tmp>1) { /* length can only be a multiple of 2, otherwise there is something broken -> so decode down until length is 1 */ if (tmp!=1) { mt_len = isis_print_mtid(ndo, tptr, "\n\t "); if (mt_len == 0) /* did something go wrong ? */ goto trunctlv; tptr+=mt_len; tmp-=mt_len; } else { ND_PRINT((ndo, "\n\t invalid MT-ID")); break; } } break; case ISIS_TLV_RESTART_SIGNALING: /* first attempt to decode the flags */ if (tmp < ISIS_TLV_RESTART_SIGNALING_FLAGLEN) break; ND_TCHECK2(*tptr, ISIS_TLV_RESTART_SIGNALING_FLAGLEN); ND_PRINT((ndo, "\n\t Flags [%s]", bittok2str(isis_restart_flag_values, "none", *tptr))); tptr+=ISIS_TLV_RESTART_SIGNALING_FLAGLEN; tmp-=ISIS_TLV_RESTART_SIGNALING_FLAGLEN; /* is there anything other than the flags field? */ if (tmp == 0) break; if (tmp < ISIS_TLV_RESTART_SIGNALING_HOLDTIMELEN) break; ND_TCHECK2(*tptr, ISIS_TLV_RESTART_SIGNALING_HOLDTIMELEN); ND_PRINT((ndo, ", Remaining holding time %us", EXTRACT_16BITS(tptr))); tptr+=ISIS_TLV_RESTART_SIGNALING_HOLDTIMELEN; tmp-=ISIS_TLV_RESTART_SIGNALING_HOLDTIMELEN; /* is there an additional sysid field present ?*/ if (tmp == SYSTEM_ID_LEN) { ND_TCHECK2(*tptr, SYSTEM_ID_LEN); ND_PRINT((ndo, ", for %s", isis_print_id(tptr,SYSTEM_ID_LEN))); } break; case ISIS_TLV_IDRP_INFO: if (tmp < ISIS_TLV_IDRP_INFO_MINLEN) break; ND_TCHECK2(*tptr, ISIS_TLV_IDRP_INFO_MINLEN); ND_PRINT((ndo, "\n\t Inter-Domain Information Type: %s", tok2str(isis_subtlv_idrp_values, "Unknown (0x%02x)", *tptr))); switch (*tptr++) { case ISIS_SUBTLV_IDRP_ASN: ND_TCHECK2(*tptr, 2); /* fetch AS number */ ND_PRINT((ndo, "AS Number: %u", EXTRACT_16BITS(tptr))); break; case ISIS_SUBTLV_IDRP_LOCAL: case ISIS_SUBTLV_IDRP_RES: default: if (!print_unknown_data(ndo, tptr, "\n\t ", tlv_len - 1)) return(0); break; } break; case ISIS_TLV_LSP_BUFFERSIZE: if (tmp < ISIS_TLV_LSP_BUFFERSIZE_MINLEN) break; ND_TCHECK2(*tptr, ISIS_TLV_LSP_BUFFERSIZE_MINLEN); ND_PRINT((ndo, "\n\t LSP Buffersize: %u", EXTRACT_16BITS(tptr))); break; case ISIS_TLV_PART_DIS: while (tmp >= SYSTEM_ID_LEN) { ND_TCHECK2(*tptr, SYSTEM_ID_LEN); ND_PRINT((ndo, "\n\t %s", isis_print_id(tptr, SYSTEM_ID_LEN))); tptr+=SYSTEM_ID_LEN; tmp-=SYSTEM_ID_LEN; } break; case ISIS_TLV_PREFIX_NEIGH: if (tmp < sizeof(struct isis_metric_block)) break; ND_TCHECK2(*tptr, sizeof(struct isis_metric_block)); ND_PRINT((ndo, "\n\t Metric Block")); isis_print_metric_block(ndo, (const struct isis_metric_block *)tptr); tptr+=sizeof(struct isis_metric_block); tmp-=sizeof(struct isis_metric_block); while(tmp>0) { ND_TCHECK2(*tptr, 1); prefix_len=*tptr++; /* read out prefix length in semioctets*/ if (prefix_len < 2) { ND_PRINT((ndo, "\n\t\tAddress: prefix length %u < 2", prefix_len)); break; } tmp--; if (tmp < prefix_len/2) break; ND_TCHECK2(*tptr, prefix_len / 2); ND_PRINT((ndo, "\n\t\tAddress: %s/%u", isonsap_string(ndo, tptr, prefix_len / 2), prefix_len * 4)); tptr+=prefix_len/2; tmp-=prefix_len/2; } break; case ISIS_TLV_IIH_SEQNR: if (tmp < ISIS_TLV_IIH_SEQNR_MINLEN) break; ND_TCHECK2(*tptr, ISIS_TLV_IIH_SEQNR_MINLEN); /* check if four bytes are on the wire */ ND_PRINT((ndo, "\n\t Sequence number: %u", EXTRACT_32BITS(tptr))); break; case ISIS_TLV_VENDOR_PRIVATE: if (tmp < ISIS_TLV_VENDOR_PRIVATE_MINLEN) break; ND_TCHECK2(*tptr, ISIS_TLV_VENDOR_PRIVATE_MINLEN); /* check if enough byte for a full oui */ vendor_id = EXTRACT_24BITS(tptr); ND_PRINT((ndo, "\n\t Vendor: %s (%u)", tok2str(oui_values, "Unknown", vendor_id), vendor_id)); tptr+=3; tmp-=3; if (tmp > 0) /* hexdump the rest */ if (!print_unknown_data(ndo, tptr, "\n\t\t", tmp)) return(0); break; /* * FIXME those are the defined TLVs that lack a decoder * you are welcome to contribute code ;-) */ case ISIS_TLV_DECNET_PHASE4: case ISIS_TLV_LUCENT_PRIVATE: case ISIS_TLV_IPAUTH: case ISIS_TLV_NORTEL_PRIVATE1: case ISIS_TLV_NORTEL_PRIVATE2: default: if (ndo->ndo_vflag <= 1) { if (!print_unknown_data(ndo, pptr, "\n\t\t", tlv_len)) return(0); } break; } /* do we want to see an additionally hexdump ? */ if (ndo->ndo_vflag> 1) { if (!print_unknown_data(ndo, pptr, "\n\t ", tlv_len)) return(0); } pptr += tlv_len; packet_len -= tlv_len; } if (packet_len != 0) { ND_PRINT((ndo, "\n\t %u straggler bytes", packet_len)); } return (1); trunc: ND_PRINT((ndo, "%s", tstr)); return (1); trunctlv: ND_PRINT((ndo, "\n\t\t")); ND_PRINT((ndo, "%s", tstr)); return(1); } static void osi_print_cksum(netdissect_options *ndo, const uint8_t *pptr, uint16_t checksum, int checksum_offset, u_int length) { uint16_t calculated_checksum; /* do not attempt to verify the checksum if it is zero, * if the offset is nonsense, * or the base pointer is not sane */ if (!checksum || checksum_offset < 0 || !ND_TTEST2(*(pptr + checksum_offset), 2) || (u_int)checksum_offset > length || !ND_TTEST2(*pptr, length)) { ND_PRINT((ndo, " (unverified)")); } else { #if 0 printf("\nosi_print_cksum: %p %u %u\n", pptr, checksum_offset, length); #endif calculated_checksum = create_osi_cksum(pptr, checksum_offset, length); if (checksum == calculated_checksum) { ND_PRINT((ndo, " (correct)")); } else { ND_PRINT((ndo, " (incorrect should be 0x%04x)", calculated_checksum)); } } } /* * Local Variables: * c-style: whitesmith * c-basic-offset: 8 * End: */
/* * Copyright (c) 1992, 1993, 1994, 1995, 1996 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * Original code by Matt Thomas, Digital Equipment Corporation * * Extensively modified by Hannes Gredler (hannes@gredler.at) for more * complete IS-IS & CLNP support. */ /* \summary: ISO CLNS, ESIS, and ISIS printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include <string.h> #include "netdissect.h" #include "addrtoname.h" #include "ether.h" #include "nlpid.h" #include "extract.h" #include "gmpls.h" #include "oui.h" #include "signature.h" static const char tstr[] = " [|isis]"; /* * IS-IS is defined in ISO 10589. Look there for protocol definitions. */ #define SYSTEM_ID_LEN ETHER_ADDR_LEN #define NODE_ID_LEN SYSTEM_ID_LEN+1 #define LSP_ID_LEN SYSTEM_ID_LEN+2 #define ISIS_VERSION 1 #define ESIS_VERSION 1 #define CLNP_VERSION 1 #define ISIS_PDU_TYPE_MASK 0x1F #define ESIS_PDU_TYPE_MASK 0x1F #define CLNP_PDU_TYPE_MASK 0x1F #define CLNP_FLAG_MASK 0xE0 #define ISIS_LAN_PRIORITY_MASK 0x7F #define ISIS_PDU_L1_LAN_IIH 15 #define ISIS_PDU_L2_LAN_IIH 16 #define ISIS_PDU_PTP_IIH 17 #define ISIS_PDU_L1_LSP 18 #define ISIS_PDU_L2_LSP 20 #define ISIS_PDU_L1_CSNP 24 #define ISIS_PDU_L2_CSNP 25 #define ISIS_PDU_L1_PSNP 26 #define ISIS_PDU_L2_PSNP 27 static const struct tok isis_pdu_values[] = { { ISIS_PDU_L1_LAN_IIH, "L1 Lan IIH"}, { ISIS_PDU_L2_LAN_IIH, "L2 Lan IIH"}, { ISIS_PDU_PTP_IIH, "p2p IIH"}, { ISIS_PDU_L1_LSP, "L1 LSP"}, { ISIS_PDU_L2_LSP, "L2 LSP"}, { ISIS_PDU_L1_CSNP, "L1 CSNP"}, { ISIS_PDU_L2_CSNP, "L2 CSNP"}, { ISIS_PDU_L1_PSNP, "L1 PSNP"}, { ISIS_PDU_L2_PSNP, "L2 PSNP"}, { 0, NULL} }; /* * A TLV is a tuple of a type, length and a value and is normally used for * encoding information in all sorts of places. This is an enumeration of * the well known types. * * list taken from rfc3359 plus some memory from veterans ;-) */ #define ISIS_TLV_AREA_ADDR 1 /* iso10589 */ #define ISIS_TLV_IS_REACH 2 /* iso10589 */ #define ISIS_TLV_ESNEIGH 3 /* iso10589 */ #define ISIS_TLV_PART_DIS 4 /* iso10589 */ #define ISIS_TLV_PREFIX_NEIGH 5 /* iso10589 */ #define ISIS_TLV_ISNEIGH 6 /* iso10589 */ #define ISIS_TLV_ISNEIGH_VARLEN 7 /* iso10589 */ #define ISIS_TLV_PADDING 8 /* iso10589 */ #define ISIS_TLV_LSP 9 /* iso10589 */ #define ISIS_TLV_AUTH 10 /* iso10589, rfc3567 */ #define ISIS_TLV_CHECKSUM 12 /* rfc3358 */ #define ISIS_TLV_CHECKSUM_MINLEN 2 #define ISIS_TLV_POI 13 /* rfc6232 */ #define ISIS_TLV_LSP_BUFFERSIZE 14 /* iso10589 rev2 */ #define ISIS_TLV_LSP_BUFFERSIZE_MINLEN 2 #define ISIS_TLV_EXT_IS_REACH 22 /* draft-ietf-isis-traffic-05 */ #define ISIS_TLV_IS_ALIAS_ID 24 /* draft-ietf-isis-ext-lsp-frags-02 */ #define ISIS_TLV_DECNET_PHASE4 42 #define ISIS_TLV_LUCENT_PRIVATE 66 #define ISIS_TLV_INT_IP_REACH 128 /* rfc1195, rfc2966 */ #define ISIS_TLV_PROTOCOLS 129 /* rfc1195 */ #define ISIS_TLV_EXT_IP_REACH 130 /* rfc1195, rfc2966 */ #define ISIS_TLV_IDRP_INFO 131 /* rfc1195 */ #define ISIS_TLV_IDRP_INFO_MINLEN 1 #define ISIS_TLV_IPADDR 132 /* rfc1195 */ #define ISIS_TLV_IPAUTH 133 /* rfc1195 */ #define ISIS_TLV_TE_ROUTER_ID 134 /* draft-ietf-isis-traffic-05 */ #define ISIS_TLV_EXTD_IP_REACH 135 /* draft-ietf-isis-traffic-05 */ #define ISIS_TLV_HOSTNAME 137 /* rfc2763 */ #define ISIS_TLV_SHARED_RISK_GROUP 138 /* draft-ietf-isis-gmpls-extensions */ #define ISIS_TLV_MT_PORT_CAP 143 /* rfc6165 */ #define ISIS_TLV_MT_CAPABILITY 144 /* rfc6329 */ #define ISIS_TLV_NORTEL_PRIVATE1 176 #define ISIS_TLV_NORTEL_PRIVATE2 177 #define ISIS_TLV_RESTART_SIGNALING 211 /* rfc3847 */ #define ISIS_TLV_RESTART_SIGNALING_FLAGLEN 1 #define ISIS_TLV_RESTART_SIGNALING_HOLDTIMELEN 2 #define ISIS_TLV_MT_IS_REACH 222 /* draft-ietf-isis-wg-multi-topology-05 */ #define ISIS_TLV_MT_SUPPORTED 229 /* draft-ietf-isis-wg-multi-topology-05 */ #define ISIS_TLV_MT_SUPPORTED_MINLEN 2 #define ISIS_TLV_IP6ADDR 232 /* draft-ietf-isis-ipv6-02 */ #define ISIS_TLV_MT_IP_REACH 235 /* draft-ietf-isis-wg-multi-topology-05 */ #define ISIS_TLV_IP6_REACH 236 /* draft-ietf-isis-ipv6-02 */ #define ISIS_TLV_MT_IP6_REACH 237 /* draft-ietf-isis-wg-multi-topology-05 */ #define ISIS_TLV_PTP_ADJ 240 /* rfc3373 */ #define ISIS_TLV_IIH_SEQNR 241 /* draft-shen-isis-iih-sequence-00 */ #define ISIS_TLV_IIH_SEQNR_MINLEN 4 #define ISIS_TLV_VENDOR_PRIVATE 250 /* draft-ietf-isis-experimental-tlv-01 */ #define ISIS_TLV_VENDOR_PRIVATE_MINLEN 3 static const struct tok isis_tlv_values[] = { { ISIS_TLV_AREA_ADDR, "Area address(es)"}, { ISIS_TLV_IS_REACH, "IS Reachability"}, { ISIS_TLV_ESNEIGH, "ES Neighbor(s)"}, { ISIS_TLV_PART_DIS, "Partition DIS"}, { ISIS_TLV_PREFIX_NEIGH, "Prefix Neighbors"}, { ISIS_TLV_ISNEIGH, "IS Neighbor(s)"}, { ISIS_TLV_ISNEIGH_VARLEN, "IS Neighbor(s) (variable length)"}, { ISIS_TLV_PADDING, "Padding"}, { ISIS_TLV_LSP, "LSP entries"}, { ISIS_TLV_AUTH, "Authentication"}, { ISIS_TLV_CHECKSUM, "Checksum"}, { ISIS_TLV_POI, "Purge Originator Identifier"}, { ISIS_TLV_LSP_BUFFERSIZE, "LSP Buffersize"}, { ISIS_TLV_EXT_IS_REACH, "Extended IS Reachability"}, { ISIS_TLV_IS_ALIAS_ID, "IS Alias ID"}, { ISIS_TLV_DECNET_PHASE4, "DECnet Phase IV"}, { ISIS_TLV_LUCENT_PRIVATE, "Lucent Proprietary"}, { ISIS_TLV_INT_IP_REACH, "IPv4 Internal Reachability"}, { ISIS_TLV_PROTOCOLS, "Protocols supported"}, { ISIS_TLV_EXT_IP_REACH, "IPv4 External Reachability"}, { ISIS_TLV_IDRP_INFO, "Inter-Domain Information Type"}, { ISIS_TLV_IPADDR, "IPv4 Interface address(es)"}, { ISIS_TLV_IPAUTH, "IPv4 authentication (deprecated)"}, { ISIS_TLV_TE_ROUTER_ID, "Traffic Engineering Router ID"}, { ISIS_TLV_EXTD_IP_REACH, "Extended IPv4 Reachability"}, { ISIS_TLV_SHARED_RISK_GROUP, "Shared Risk Link Group"}, { ISIS_TLV_MT_PORT_CAP, "Multi-Topology-Aware Port Capability"}, { ISIS_TLV_MT_CAPABILITY, "Multi-Topology Capability"}, { ISIS_TLV_NORTEL_PRIVATE1, "Nortel Proprietary"}, { ISIS_TLV_NORTEL_PRIVATE2, "Nortel Proprietary"}, { ISIS_TLV_HOSTNAME, "Hostname"}, { ISIS_TLV_RESTART_SIGNALING, "Restart Signaling"}, { ISIS_TLV_MT_IS_REACH, "Multi Topology IS Reachability"}, { ISIS_TLV_MT_SUPPORTED, "Multi Topology"}, { ISIS_TLV_IP6ADDR, "IPv6 Interface address(es)"}, { ISIS_TLV_MT_IP_REACH, "Multi-Topology IPv4 Reachability"}, { ISIS_TLV_IP6_REACH, "IPv6 reachability"}, { ISIS_TLV_MT_IP6_REACH, "Multi-Topology IP6 Reachability"}, { ISIS_TLV_PTP_ADJ, "Point-to-point Adjacency State"}, { ISIS_TLV_IIH_SEQNR, "Hello PDU Sequence Number"}, { ISIS_TLV_VENDOR_PRIVATE, "Vendor Private"}, { 0, NULL } }; #define ESIS_OPTION_PROTOCOLS 129 #define ESIS_OPTION_QOS_MAINTENANCE 195 /* iso9542 */ #define ESIS_OPTION_SECURITY 197 /* iso9542 */ #define ESIS_OPTION_ES_CONF_TIME 198 /* iso9542 */ #define ESIS_OPTION_PRIORITY 205 /* iso9542 */ #define ESIS_OPTION_ADDRESS_MASK 225 /* iso9542 */ #define ESIS_OPTION_SNPA_MASK 226 /* iso9542 */ static const struct tok esis_option_values[] = { { ESIS_OPTION_PROTOCOLS, "Protocols supported"}, { ESIS_OPTION_QOS_MAINTENANCE, "QoS Maintenance" }, { ESIS_OPTION_SECURITY, "Security" }, { ESIS_OPTION_ES_CONF_TIME, "ES Configuration Time" }, { ESIS_OPTION_PRIORITY, "Priority" }, { ESIS_OPTION_ADDRESS_MASK, "Addressk Mask" }, { ESIS_OPTION_SNPA_MASK, "SNPA Mask" }, { 0, NULL } }; #define CLNP_OPTION_DISCARD_REASON 193 #define CLNP_OPTION_QOS_MAINTENANCE 195 /* iso8473 */ #define CLNP_OPTION_SECURITY 197 /* iso8473 */ #define CLNP_OPTION_SOURCE_ROUTING 200 /* iso8473 */ #define CLNP_OPTION_ROUTE_RECORDING 203 /* iso8473 */ #define CLNP_OPTION_PADDING 204 /* iso8473 */ #define CLNP_OPTION_PRIORITY 205 /* iso8473 */ static const struct tok clnp_option_values[] = { { CLNP_OPTION_DISCARD_REASON, "Discard Reason"}, { CLNP_OPTION_PRIORITY, "Priority"}, { CLNP_OPTION_QOS_MAINTENANCE, "QoS Maintenance"}, { CLNP_OPTION_SECURITY, "Security"}, { CLNP_OPTION_SOURCE_ROUTING, "Source Routing"}, { CLNP_OPTION_ROUTE_RECORDING, "Route Recording"}, { CLNP_OPTION_PADDING, "Padding"}, { 0, NULL } }; static const struct tok clnp_option_rfd_class_values[] = { { 0x0, "General"}, { 0x8, "Address"}, { 0x9, "Source Routeing"}, { 0xa, "Lifetime"}, { 0xb, "PDU Discarded"}, { 0xc, "Reassembly"}, { 0, NULL } }; static const struct tok clnp_option_rfd_general_values[] = { { 0x0, "Reason not specified"}, { 0x1, "Protocol procedure error"}, { 0x2, "Incorrect checksum"}, { 0x3, "PDU discarded due to congestion"}, { 0x4, "Header syntax error (cannot be parsed)"}, { 0x5, "Segmentation needed but not permitted"}, { 0x6, "Incomplete PDU received"}, { 0x7, "Duplicate option"}, { 0, NULL } }; static const struct tok clnp_option_rfd_address_values[] = { { 0x0, "Destination address unreachable"}, { 0x1, "Destination address unknown"}, { 0, NULL } }; static const struct tok clnp_option_rfd_source_routeing_values[] = { { 0x0, "Unspecified source routeing error"}, { 0x1, "Syntax error in source routeing field"}, { 0x2, "Unknown address in source routeing field"}, { 0x3, "Path not acceptable"}, { 0, NULL } }; static const struct tok clnp_option_rfd_lifetime_values[] = { { 0x0, "Lifetime expired while data unit in transit"}, { 0x1, "Lifetime expired during reassembly"}, { 0, NULL } }; static const struct tok clnp_option_rfd_pdu_discard_values[] = { { 0x0, "Unsupported option not specified"}, { 0x1, "Unsupported protocol version"}, { 0x2, "Unsupported security option"}, { 0x3, "Unsupported source routeing option"}, { 0x4, "Unsupported recording of route option"}, { 0, NULL } }; static const struct tok clnp_option_rfd_reassembly_values[] = { { 0x0, "Reassembly interference"}, { 0, NULL } }; /* array of 16 error-classes */ static const struct tok *clnp_option_rfd_error_class[] = { clnp_option_rfd_general_values, NULL, NULL, NULL, NULL, NULL, NULL, NULL, clnp_option_rfd_address_values, clnp_option_rfd_source_routeing_values, clnp_option_rfd_lifetime_values, clnp_option_rfd_pdu_discard_values, clnp_option_rfd_reassembly_values, NULL, NULL, NULL }; #define CLNP_OPTION_OPTION_QOS_MASK 0x3f #define CLNP_OPTION_SCOPE_MASK 0xc0 #define CLNP_OPTION_SCOPE_SA_SPEC 0x40 #define CLNP_OPTION_SCOPE_DA_SPEC 0x80 #define CLNP_OPTION_SCOPE_GLOBAL 0xc0 static const struct tok clnp_option_scope_values[] = { { CLNP_OPTION_SCOPE_SA_SPEC, "Source Address Specific"}, { CLNP_OPTION_SCOPE_DA_SPEC, "Destination Address Specific"}, { CLNP_OPTION_SCOPE_GLOBAL, "Globally unique"}, { 0, NULL } }; static const struct tok clnp_option_sr_rr_values[] = { { 0x0, "partial"}, { 0x1, "complete"}, { 0, NULL } }; static const struct tok clnp_option_sr_rr_string_values[] = { { CLNP_OPTION_SOURCE_ROUTING, "source routing"}, { CLNP_OPTION_ROUTE_RECORDING, "recording of route in progress"}, { 0, NULL } }; static const struct tok clnp_option_qos_global_values[] = { { 0x20, "reserved"}, { 0x10, "sequencing vs. delay"}, { 0x08, "congested"}, { 0x04, "delay vs. cost"}, { 0x02, "error vs. delay"}, { 0x01, "error vs. cost"}, { 0, NULL } }; #define ISIS_SUBTLV_EXT_IS_REACH_ADMIN_GROUP 3 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_LINK_LOCAL_REMOTE_ID 4 /* rfc4205 */ #define ISIS_SUBTLV_EXT_IS_REACH_LINK_REMOTE_ID 5 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_IPV4_INTF_ADDR 6 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_IPV4_NEIGHBOR_ADDR 8 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_MAX_LINK_BW 9 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_RESERVABLE_BW 10 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_UNRESERVED_BW 11 /* rfc4124 */ #define ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS_OLD 12 /* draft-ietf-tewg-diff-te-proto-06 */ #define ISIS_SUBTLV_EXT_IS_REACH_TE_METRIC 18 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_LINK_ATTRIBUTE 19 /* draft-ietf-isis-link-attr-01 */ #define ISIS_SUBTLV_EXT_IS_REACH_LINK_PROTECTION_TYPE 20 /* rfc4205 */ #define ISIS_SUBTLV_EXT_IS_REACH_INTF_SW_CAP_DESCR 21 /* rfc4205 */ #define ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS 22 /* rfc4124 */ #define ISIS_SUBTLV_SPB_METRIC 29 /* rfc6329 */ static const struct tok isis_ext_is_reach_subtlv_values[] = { { ISIS_SUBTLV_EXT_IS_REACH_ADMIN_GROUP, "Administrative groups" }, { ISIS_SUBTLV_EXT_IS_REACH_LINK_LOCAL_REMOTE_ID, "Link Local/Remote Identifier" }, { ISIS_SUBTLV_EXT_IS_REACH_LINK_REMOTE_ID, "Link Remote Identifier" }, { ISIS_SUBTLV_EXT_IS_REACH_IPV4_INTF_ADDR, "IPv4 interface address" }, { ISIS_SUBTLV_EXT_IS_REACH_IPV4_NEIGHBOR_ADDR, "IPv4 neighbor address" }, { ISIS_SUBTLV_EXT_IS_REACH_MAX_LINK_BW, "Maximum link bandwidth" }, { ISIS_SUBTLV_EXT_IS_REACH_RESERVABLE_BW, "Reservable link bandwidth" }, { ISIS_SUBTLV_EXT_IS_REACH_UNRESERVED_BW, "Unreserved bandwidth" }, { ISIS_SUBTLV_EXT_IS_REACH_TE_METRIC, "Traffic Engineering Metric" }, { ISIS_SUBTLV_EXT_IS_REACH_LINK_ATTRIBUTE, "Link Attribute" }, { ISIS_SUBTLV_EXT_IS_REACH_LINK_PROTECTION_TYPE, "Link Protection Type" }, { ISIS_SUBTLV_EXT_IS_REACH_INTF_SW_CAP_DESCR, "Interface Switching Capability" }, { ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS_OLD, "Bandwidth Constraints (old)" }, { ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS, "Bandwidth Constraints" }, { ISIS_SUBTLV_SPB_METRIC, "SPB Metric" }, { 250, "Reserved for cisco specific extensions" }, { 251, "Reserved for cisco specific extensions" }, { 252, "Reserved for cisco specific extensions" }, { 253, "Reserved for cisco specific extensions" }, { 254, "Reserved for cisco specific extensions" }, { 255, "Reserved for future expansion" }, { 0, NULL } }; #define ISIS_SUBTLV_EXTD_IP_REACH_ADMIN_TAG32 1 /* draft-ietf-isis-admin-tags-01 */ #define ISIS_SUBTLV_EXTD_IP_REACH_ADMIN_TAG64 2 /* draft-ietf-isis-admin-tags-01 */ #define ISIS_SUBTLV_EXTD_IP_REACH_MGMT_PREFIX_COLOR 117 /* draft-ietf-isis-wg-multi-topology-05 */ static const struct tok isis_ext_ip_reach_subtlv_values[] = { { ISIS_SUBTLV_EXTD_IP_REACH_ADMIN_TAG32, "32-Bit Administrative tag" }, { ISIS_SUBTLV_EXTD_IP_REACH_ADMIN_TAG64, "64-Bit Administrative tag" }, { ISIS_SUBTLV_EXTD_IP_REACH_MGMT_PREFIX_COLOR, "Management Prefix Color" }, { 0, NULL } }; static const struct tok isis_subtlv_link_attribute_values[] = { { 0x01, "Local Protection Available" }, { 0x02, "Link excluded from local protection path" }, { 0x04, "Local maintenance required"}, { 0, NULL } }; #define ISIS_SUBTLV_AUTH_SIMPLE 1 #define ISIS_SUBTLV_AUTH_GENERIC 3 /* rfc 5310 */ #define ISIS_SUBTLV_AUTH_MD5 54 #define ISIS_SUBTLV_AUTH_MD5_LEN 16 #define ISIS_SUBTLV_AUTH_PRIVATE 255 static const struct tok isis_subtlv_auth_values[] = { { ISIS_SUBTLV_AUTH_SIMPLE, "simple text password"}, { ISIS_SUBTLV_AUTH_GENERIC, "Generic Crypto key-id"}, { ISIS_SUBTLV_AUTH_MD5, "HMAC-MD5 password"}, { ISIS_SUBTLV_AUTH_PRIVATE, "Routing Domain private password"}, { 0, NULL } }; #define ISIS_SUBTLV_IDRP_RES 0 #define ISIS_SUBTLV_IDRP_LOCAL 1 #define ISIS_SUBTLV_IDRP_ASN 2 static const struct tok isis_subtlv_idrp_values[] = { { ISIS_SUBTLV_IDRP_RES, "Reserved"}, { ISIS_SUBTLV_IDRP_LOCAL, "Routing-Domain Specific"}, { ISIS_SUBTLV_IDRP_ASN, "AS Number Tag"}, { 0, NULL} }; #define ISIS_SUBTLV_SPB_MCID 4 #define ISIS_SUBTLV_SPB_DIGEST 5 #define ISIS_SUBTLV_SPB_BVID 6 #define ISIS_SUBTLV_SPB_INSTANCE 1 #define ISIS_SUBTLV_SPBM_SI 3 #define ISIS_SPB_MCID_LEN 51 #define ISIS_SUBTLV_SPB_MCID_MIN_LEN 102 #define ISIS_SUBTLV_SPB_DIGEST_MIN_LEN 33 #define ISIS_SUBTLV_SPB_BVID_MIN_LEN 6 #define ISIS_SUBTLV_SPB_INSTANCE_MIN_LEN 19 #define ISIS_SUBTLV_SPB_INSTANCE_VLAN_TUPLE_LEN 8 static const struct tok isis_mt_port_cap_subtlv_values[] = { { ISIS_SUBTLV_SPB_MCID, "SPB MCID" }, { ISIS_SUBTLV_SPB_DIGEST, "SPB Digest" }, { ISIS_SUBTLV_SPB_BVID, "SPB BVID" }, { 0, NULL } }; static const struct tok isis_mt_capability_subtlv_values[] = { { ISIS_SUBTLV_SPB_INSTANCE, "SPB Instance" }, { ISIS_SUBTLV_SPBM_SI, "SPBM Service Identifier and Unicast Address" }, { 0, NULL } }; struct isis_spb_mcid { uint8_t format_id; uint8_t name[32]; uint8_t revision_lvl[2]; uint8_t digest[16]; }; struct isis_subtlv_spb_mcid { struct isis_spb_mcid mcid; struct isis_spb_mcid aux_mcid; }; struct isis_subtlv_spb_instance { uint8_t cist_root_id[8]; uint8_t cist_external_root_path_cost[4]; uint8_t bridge_priority[2]; uint8_t spsourceid[4]; uint8_t no_of_trees; }; #define CLNP_SEGMENT_PART 0x80 #define CLNP_MORE_SEGMENTS 0x40 #define CLNP_REQUEST_ER 0x20 static const struct tok clnp_flag_values[] = { { CLNP_SEGMENT_PART, "Segmentation permitted"}, { CLNP_MORE_SEGMENTS, "more Segments"}, { CLNP_REQUEST_ER, "request Error Report"}, { 0, NULL} }; #define ISIS_MASK_LSP_OL_BIT(x) ((x)&0x4) #define ISIS_MASK_LSP_ISTYPE_BITS(x) ((x)&0x3) #define ISIS_MASK_LSP_PARTITION_BIT(x) ((x)&0x80) #define ISIS_MASK_LSP_ATT_BITS(x) ((x)&0x78) #define ISIS_MASK_LSP_ATT_ERROR_BIT(x) ((x)&0x40) #define ISIS_MASK_LSP_ATT_EXPENSE_BIT(x) ((x)&0x20) #define ISIS_MASK_LSP_ATT_DELAY_BIT(x) ((x)&0x10) #define ISIS_MASK_LSP_ATT_DEFAULT_BIT(x) ((x)&0x8) #define ISIS_MASK_MTID(x) ((x)&0x0fff) #define ISIS_MASK_MTFLAGS(x) ((x)&0xf000) static const struct tok isis_mt_flag_values[] = { { 0x4000, "ATT bit set"}, { 0x8000, "Overload bit set"}, { 0, NULL} }; #define ISIS_MASK_TLV_EXTD_IP_UPDOWN(x) ((x)&0x80) #define ISIS_MASK_TLV_EXTD_IP_SUBTLV(x) ((x)&0x40) #define ISIS_MASK_TLV_EXTD_IP6_IE(x) ((x)&0x40) #define ISIS_MASK_TLV_EXTD_IP6_SUBTLV(x) ((x)&0x20) #define ISIS_LSP_TLV_METRIC_SUPPORTED(x) ((x)&0x80) #define ISIS_LSP_TLV_METRIC_IE(x) ((x)&0x40) #define ISIS_LSP_TLV_METRIC_UPDOWN(x) ((x)&0x80) #define ISIS_LSP_TLV_METRIC_VALUE(x) ((x)&0x3f) #define ISIS_MASK_TLV_SHARED_RISK_GROUP(x) ((x)&0x1) static const struct tok isis_mt_values[] = { { 0, "IPv4 unicast"}, { 1, "In-Band Management"}, { 2, "IPv6 unicast"}, { 3, "Multicast"}, { 4095, "Development, Experimental or Proprietary"}, { 0, NULL } }; static const struct tok isis_iih_circuit_type_values[] = { { 1, "Level 1 only"}, { 2, "Level 2 only"}, { 3, "Level 1, Level 2"}, { 0, NULL} }; #define ISIS_LSP_TYPE_UNUSED0 0 #define ISIS_LSP_TYPE_LEVEL_1 1 #define ISIS_LSP_TYPE_UNUSED2 2 #define ISIS_LSP_TYPE_LEVEL_2 3 static const struct tok isis_lsp_istype_values[] = { { ISIS_LSP_TYPE_UNUSED0, "Unused 0x0 (invalid)"}, { ISIS_LSP_TYPE_LEVEL_1, "L1 IS"}, { ISIS_LSP_TYPE_UNUSED2, "Unused 0x2 (invalid)"}, { ISIS_LSP_TYPE_LEVEL_2, "L2 IS"}, { 0, NULL } }; /* * Katz's point to point adjacency TLV uses codes to tell us the state of * the remote adjacency. Enumerate them. */ #define ISIS_PTP_ADJ_UP 0 #define ISIS_PTP_ADJ_INIT 1 #define ISIS_PTP_ADJ_DOWN 2 static const struct tok isis_ptp_adjancey_values[] = { { ISIS_PTP_ADJ_UP, "Up" }, { ISIS_PTP_ADJ_INIT, "Initializing" }, { ISIS_PTP_ADJ_DOWN, "Down" }, { 0, NULL} }; struct isis_tlv_ptp_adj { uint8_t adjacency_state; uint8_t extd_local_circuit_id[4]; uint8_t neighbor_sysid[SYSTEM_ID_LEN]; uint8_t neighbor_extd_local_circuit_id[4]; }; static void osi_print_cksum(netdissect_options *, const uint8_t *pptr, uint16_t checksum, int checksum_offset, u_int length); static int clnp_print(netdissect_options *, const uint8_t *, u_int); static void esis_print(netdissect_options *, const uint8_t *, u_int); static int isis_print(netdissect_options *, const uint8_t *, u_int); struct isis_metric_block { uint8_t metric_default; uint8_t metric_delay; uint8_t metric_expense; uint8_t metric_error; }; struct isis_tlv_is_reach { struct isis_metric_block isis_metric_block; uint8_t neighbor_nodeid[NODE_ID_LEN]; }; struct isis_tlv_es_reach { struct isis_metric_block isis_metric_block; uint8_t neighbor_sysid[SYSTEM_ID_LEN]; }; struct isis_tlv_ip_reach { struct isis_metric_block isis_metric_block; uint8_t prefix[4]; uint8_t mask[4]; }; static const struct tok isis_is_reach_virtual_values[] = { { 0, "IsNotVirtual"}, { 1, "IsVirtual"}, { 0, NULL } }; static const struct tok isis_restart_flag_values[] = { { 0x1, "Restart Request"}, { 0x2, "Restart Acknowledgement"}, { 0x4, "Suppress adjacency advertisement"}, { 0, NULL } }; struct isis_common_header { uint8_t nlpid; uint8_t fixed_len; uint8_t version; /* Protocol version */ uint8_t id_length; uint8_t pdu_type; /* 3 MSbits are reserved */ uint8_t pdu_version; /* Packet format version */ uint8_t reserved; uint8_t max_area; }; struct isis_iih_lan_header { uint8_t circuit_type; uint8_t source_id[SYSTEM_ID_LEN]; uint8_t holding_time[2]; uint8_t pdu_len[2]; uint8_t priority; uint8_t lan_id[NODE_ID_LEN]; }; struct isis_iih_ptp_header { uint8_t circuit_type; uint8_t source_id[SYSTEM_ID_LEN]; uint8_t holding_time[2]; uint8_t pdu_len[2]; uint8_t circuit_id; }; struct isis_lsp_header { uint8_t pdu_len[2]; uint8_t remaining_lifetime[2]; uint8_t lsp_id[LSP_ID_LEN]; uint8_t sequence_number[4]; uint8_t checksum[2]; uint8_t typeblock; }; struct isis_csnp_header { uint8_t pdu_len[2]; uint8_t source_id[NODE_ID_LEN]; uint8_t start_lsp_id[LSP_ID_LEN]; uint8_t end_lsp_id[LSP_ID_LEN]; }; struct isis_psnp_header { uint8_t pdu_len[2]; uint8_t source_id[NODE_ID_LEN]; }; struct isis_tlv_lsp { uint8_t remaining_lifetime[2]; uint8_t lsp_id[LSP_ID_LEN]; uint8_t sequence_number[4]; uint8_t checksum[2]; }; #define ISIS_COMMON_HEADER_SIZE (sizeof(struct isis_common_header)) #define ISIS_IIH_LAN_HEADER_SIZE (sizeof(struct isis_iih_lan_header)) #define ISIS_IIH_PTP_HEADER_SIZE (sizeof(struct isis_iih_ptp_header)) #define ISIS_LSP_HEADER_SIZE (sizeof(struct isis_lsp_header)) #define ISIS_CSNP_HEADER_SIZE (sizeof(struct isis_csnp_header)) #define ISIS_PSNP_HEADER_SIZE (sizeof(struct isis_psnp_header)) void isoclns_print(netdissect_options *ndo, const uint8_t *p, u_int length) { if (!ND_TTEST(*p)) { /* enough bytes on the wire ? */ ND_PRINT((ndo, "|OSI")); return; } if (ndo->ndo_eflag) ND_PRINT((ndo, "OSI NLPID %s (0x%02x): ", tok2str(nlpid_values, "Unknown", *p), *p)); switch (*p) { case NLPID_CLNP: if (!clnp_print(ndo, p, length)) print_unknown_data(ndo, p, "\n\t", length); break; case NLPID_ESIS: esis_print(ndo, p, length); return; case NLPID_ISIS: if (!isis_print(ndo, p, length)) print_unknown_data(ndo, p, "\n\t", length); break; case NLPID_NULLNS: ND_PRINT((ndo, "%slength: %u", ndo->ndo_eflag ? "" : ", ", length)); break; case NLPID_Q933: q933_print(ndo, p + 1, length - 1); break; case NLPID_IP: ip_print(ndo, p + 1, length - 1); break; case NLPID_IP6: ip6_print(ndo, p + 1, length - 1); break; case NLPID_PPP: ppp_print(ndo, p + 1, length - 1); break; default: if (!ndo->ndo_eflag) ND_PRINT((ndo, "OSI NLPID 0x%02x unknown", *p)); ND_PRINT((ndo, "%slength: %u", ndo->ndo_eflag ? "" : ", ", length)); if (length > 1) print_unknown_data(ndo, p, "\n\t", length); break; } } #define CLNP_PDU_ER 1 #define CLNP_PDU_DT 28 #define CLNP_PDU_MD 29 #define CLNP_PDU_ERQ 30 #define CLNP_PDU_ERP 31 static const struct tok clnp_pdu_values[] = { { CLNP_PDU_ER, "Error Report"}, { CLNP_PDU_MD, "MD"}, { CLNP_PDU_DT, "Data"}, { CLNP_PDU_ERQ, "Echo Request"}, { CLNP_PDU_ERP, "Echo Response"}, { 0, NULL } }; struct clnp_header_t { uint8_t nlpid; uint8_t length_indicator; uint8_t version; uint8_t lifetime; /* units of 500ms */ uint8_t type; uint8_t segment_length[2]; uint8_t cksum[2]; }; struct clnp_segment_header_t { uint8_t data_unit_id[2]; uint8_t segment_offset[2]; uint8_t total_length[2]; }; /* * clnp_print * Decode CLNP packets. Return 0 on error. */ static int clnp_print(netdissect_options *ndo, const uint8_t *pptr, u_int length) { const uint8_t *optr,*source_address,*dest_address; u_int li,tlen,nsap_offset,source_address_length,dest_address_length, clnp_pdu_type, clnp_flags; const struct clnp_header_t *clnp_header; const struct clnp_segment_header_t *clnp_segment_header; uint8_t rfd_error_major,rfd_error_minor; clnp_header = (const struct clnp_header_t *) pptr; ND_TCHECK(*clnp_header); li = clnp_header->length_indicator; optr = pptr; if (!ndo->ndo_eflag) ND_PRINT((ndo, "CLNP")); /* * Sanity checking of the header. */ if (clnp_header->version != CLNP_VERSION) { ND_PRINT((ndo, "version %d packet not supported", clnp_header->version)); return (0); } if (li > length) { ND_PRINT((ndo, " length indicator(%u) > PDU size (%u)!", li, length)); return (0); } if (li < sizeof(struct clnp_header_t)) { ND_PRINT((ndo, " length indicator %u < min PDU size:", li)); while (pptr < ndo->ndo_snapend) ND_PRINT((ndo, "%02X", *pptr++)); return (0); } /* FIXME further header sanity checking */ clnp_pdu_type = clnp_header->type & CLNP_PDU_TYPE_MASK; clnp_flags = clnp_header->type & CLNP_FLAG_MASK; pptr += sizeof(struct clnp_header_t); li -= sizeof(struct clnp_header_t); if (li < 1) { ND_PRINT((ndo, "li < size of fixed part of CLNP header and addresses")); return (0); } ND_TCHECK(*pptr); dest_address_length = *pptr; pptr += 1; li -= 1; if (li < dest_address_length) { ND_PRINT((ndo, "li < size of fixed part of CLNP header and addresses")); return (0); } ND_TCHECK2(*pptr, dest_address_length); dest_address = pptr; pptr += dest_address_length; li -= dest_address_length; if (li < 1) { ND_PRINT((ndo, "li < size of fixed part of CLNP header and addresses")); return (0); } ND_TCHECK(*pptr); source_address_length = *pptr; pptr += 1; li -= 1; if (li < source_address_length) { ND_PRINT((ndo, "li < size of fixed part of CLNP header and addresses")); return (0); } ND_TCHECK2(*pptr, source_address_length); source_address = pptr; pptr += source_address_length; li -= source_address_length; if (ndo->ndo_vflag < 1) { ND_PRINT((ndo, "%s%s > %s, %s, length %u", ndo->ndo_eflag ? "" : ", ", isonsap_string(ndo, source_address, source_address_length), isonsap_string(ndo, dest_address, dest_address_length), tok2str(clnp_pdu_values,"unknown (%u)",clnp_pdu_type), length)); return (1); } ND_PRINT((ndo, "%slength %u", ndo->ndo_eflag ? "" : ", ", length)); ND_PRINT((ndo, "\n\t%s PDU, hlen: %u, v: %u, lifetime: %u.%us, Segment PDU length: %u, checksum: 0x%04x", tok2str(clnp_pdu_values, "unknown (%u)",clnp_pdu_type), clnp_header->length_indicator, clnp_header->version, clnp_header->lifetime/2, (clnp_header->lifetime%2)*5, EXTRACT_16BITS(clnp_header->segment_length), EXTRACT_16BITS(clnp_header->cksum))); osi_print_cksum(ndo, optr, EXTRACT_16BITS(clnp_header->cksum), 7, clnp_header->length_indicator); ND_PRINT((ndo, "\n\tFlags [%s]", bittok2str(clnp_flag_values, "none", clnp_flags))); ND_PRINT((ndo, "\n\tsource address (length %u): %s\n\tdest address (length %u): %s", source_address_length, isonsap_string(ndo, source_address, source_address_length), dest_address_length, isonsap_string(ndo, dest_address, dest_address_length))); if (clnp_flags & CLNP_SEGMENT_PART) { if (li < sizeof(const struct clnp_segment_header_t)) { ND_PRINT((ndo, "li < size of fixed part of CLNP header, addresses, and segment part")); return (0); } clnp_segment_header = (const struct clnp_segment_header_t *) pptr; ND_TCHECK(*clnp_segment_header); ND_PRINT((ndo, "\n\tData Unit ID: 0x%04x, Segment Offset: %u, Total PDU Length: %u", EXTRACT_16BITS(clnp_segment_header->data_unit_id), EXTRACT_16BITS(clnp_segment_header->segment_offset), EXTRACT_16BITS(clnp_segment_header->total_length))); pptr+=sizeof(const struct clnp_segment_header_t); li-=sizeof(const struct clnp_segment_header_t); } /* now walk the options */ while (li >= 2) { u_int op, opli; const uint8_t *tptr; if (li < 2) { ND_PRINT((ndo, ", bad opts/li")); return (0); } ND_TCHECK2(*pptr, 2); op = *pptr++; opli = *pptr++; li -= 2; if (opli > li) { ND_PRINT((ndo, ", opt (%d) too long", op)); return (0); } ND_TCHECK2(*pptr, opli); li -= opli; tptr = pptr; tlen = opli; ND_PRINT((ndo, "\n\t %s Option #%u, length %u, value: ", tok2str(clnp_option_values,"Unknown",op), op, opli)); /* * We've already checked that the entire option is present * in the captured packet with the ND_TCHECK2() call. * Therefore, we don't need to do ND_TCHECK()/ND_TCHECK2() * checks. * We do, however, need to check tlen, to make sure we * don't run past the end of the option. */ switch (op) { case CLNP_OPTION_ROUTE_RECORDING: /* those two options share the format */ case CLNP_OPTION_SOURCE_ROUTING: if (tlen < 2) { ND_PRINT((ndo, ", bad opt len")); return (0); } ND_PRINT((ndo, "%s %s", tok2str(clnp_option_sr_rr_values,"Unknown",*tptr), tok2str(clnp_option_sr_rr_string_values, "Unknown Option %u", op))); nsap_offset=*(tptr+1); if (nsap_offset == 0) { ND_PRINT((ndo, " Bad NSAP offset (0)")); break; } nsap_offset-=1; /* offset to nsap list */ if (nsap_offset > tlen) { ND_PRINT((ndo, " Bad NSAP offset (past end of option)")); break; } tptr+=nsap_offset; tlen-=nsap_offset; while (tlen > 0) { source_address_length=*tptr; if (tlen < source_address_length+1) { ND_PRINT((ndo, "\n\t NSAP address goes past end of option")); break; } if (source_address_length > 0) { source_address=(tptr+1); ND_TCHECK2(*source_address, source_address_length); ND_PRINT((ndo, "\n\t NSAP address (length %u): %s", source_address_length, isonsap_string(ndo, source_address, source_address_length))); } tlen-=source_address_length+1; } break; case CLNP_OPTION_PRIORITY: if (tlen < 1) { ND_PRINT((ndo, ", bad opt len")); return (0); } ND_PRINT((ndo, "0x%1x", *tptr&0x0f)); break; case CLNP_OPTION_QOS_MAINTENANCE: if (tlen < 1) { ND_PRINT((ndo, ", bad opt len")); return (0); } ND_PRINT((ndo, "\n\t Format Code: %s", tok2str(clnp_option_scope_values, "Reserved", *tptr&CLNP_OPTION_SCOPE_MASK))); if ((*tptr&CLNP_OPTION_SCOPE_MASK) == CLNP_OPTION_SCOPE_GLOBAL) ND_PRINT((ndo, "\n\t QoS Flags [%s]", bittok2str(clnp_option_qos_global_values, "none", *tptr&CLNP_OPTION_OPTION_QOS_MASK))); break; case CLNP_OPTION_SECURITY: if (tlen < 2) { ND_PRINT((ndo, ", bad opt len")); return (0); } ND_PRINT((ndo, "\n\t Format Code: %s, Security-Level %u", tok2str(clnp_option_scope_values,"Reserved",*tptr&CLNP_OPTION_SCOPE_MASK), *(tptr+1))); break; case CLNP_OPTION_DISCARD_REASON: if (tlen < 1) { ND_PRINT((ndo, ", bad opt len")); return (0); } rfd_error_major = (*tptr&0xf0) >> 4; rfd_error_minor = *tptr&0x0f; ND_PRINT((ndo, "\n\t Class: %s Error (0x%01x), %s (0x%01x)", tok2str(clnp_option_rfd_class_values,"Unknown",rfd_error_major), rfd_error_major, tok2str(clnp_option_rfd_error_class[rfd_error_major],"Unknown",rfd_error_minor), rfd_error_minor)); break; case CLNP_OPTION_PADDING: ND_PRINT((ndo, "padding data")); break; /* * FIXME those are the defined Options that lack a decoder * you are welcome to contribute code ;-) */ default: print_unknown_data(ndo, tptr, "\n\t ", opli); break; } if (ndo->ndo_vflag > 1) print_unknown_data(ndo, pptr, "\n\t ", opli); pptr += opli; } switch (clnp_pdu_type) { case CLNP_PDU_ER: /* fall through */ case CLNP_PDU_ERP: ND_TCHECK(*pptr); if (*(pptr) == NLPID_CLNP) { ND_PRINT((ndo, "\n\t-----original packet-----\n\t")); /* FIXME recursion protection */ clnp_print(ndo, pptr, length - clnp_header->length_indicator); break; } case CLNP_PDU_DT: case CLNP_PDU_MD: case CLNP_PDU_ERQ: default: /* dump the PDU specific data */ if (length-(pptr-optr) > 0) { ND_PRINT((ndo, "\n\t undecoded non-header data, length %u", length-clnp_header->length_indicator)); print_unknown_data(ndo, pptr, "\n\t ", length - (pptr - optr)); } } return (1); trunc: ND_PRINT((ndo, "[|clnp]")); return (1); } #define ESIS_PDU_REDIRECT 6 #define ESIS_PDU_ESH 2 #define ESIS_PDU_ISH 4 static const struct tok esis_pdu_values[] = { { ESIS_PDU_REDIRECT, "redirect"}, { ESIS_PDU_ESH, "ESH"}, { ESIS_PDU_ISH, "ISH"}, { 0, NULL } }; struct esis_header_t { uint8_t nlpid; uint8_t length_indicator; uint8_t version; uint8_t reserved; uint8_t type; uint8_t holdtime[2]; uint8_t cksum[2]; }; static void esis_print(netdissect_options *ndo, const uint8_t *pptr, u_int length) { const uint8_t *optr; u_int li,esis_pdu_type,source_address_length, source_address_number; const struct esis_header_t *esis_header; if (!ndo->ndo_eflag) ND_PRINT((ndo, "ES-IS")); if (length <= 2) { ND_PRINT((ndo, ndo->ndo_qflag ? "bad pkt!" : "no header at all!")); return; } esis_header = (const struct esis_header_t *) pptr; ND_TCHECK(*esis_header); li = esis_header->length_indicator; optr = pptr; /* * Sanity checking of the header. */ if (esis_header->nlpid != NLPID_ESIS) { ND_PRINT((ndo, " nlpid 0x%02x packet not supported", esis_header->nlpid)); return; } if (esis_header->version != ESIS_VERSION) { ND_PRINT((ndo, " version %d packet not supported", esis_header->version)); return; } if (li > length) { ND_PRINT((ndo, " length indicator(%u) > PDU size (%u)!", li, length)); return; } if (li < sizeof(struct esis_header_t) + 2) { ND_PRINT((ndo, " length indicator %u < min PDU size:", li)); while (pptr < ndo->ndo_snapend) ND_PRINT((ndo, "%02X", *pptr++)); return; } esis_pdu_type = esis_header->type & ESIS_PDU_TYPE_MASK; if (ndo->ndo_vflag < 1) { ND_PRINT((ndo, "%s%s, length %u", ndo->ndo_eflag ? "" : ", ", tok2str(esis_pdu_values,"unknown type (%u)",esis_pdu_type), length)); return; } else ND_PRINT((ndo, "%slength %u\n\t%s (%u)", ndo->ndo_eflag ? "" : ", ", length, tok2str(esis_pdu_values,"unknown type: %u", esis_pdu_type), esis_pdu_type)); ND_PRINT((ndo, ", v: %u%s", esis_header->version, esis_header->version == ESIS_VERSION ? "" : "unsupported" )); ND_PRINT((ndo, ", checksum: 0x%04x", EXTRACT_16BITS(esis_header->cksum))); osi_print_cksum(ndo, pptr, EXTRACT_16BITS(esis_header->cksum), 7, li); ND_PRINT((ndo, ", holding time: %us, length indicator: %u", EXTRACT_16BITS(esis_header->holdtime), li)); if (ndo->ndo_vflag > 1) print_unknown_data(ndo, optr, "\n\t", sizeof(struct esis_header_t)); pptr += sizeof(struct esis_header_t); li -= sizeof(struct esis_header_t); switch (esis_pdu_type) { case ESIS_PDU_REDIRECT: { const uint8_t *dst, *snpa, *neta; u_int dstl, snpal, netal; ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad redirect/li")); return; } dstl = *pptr; pptr++; li--; ND_TCHECK2(*pptr, dstl); if (li < dstl) { ND_PRINT((ndo, ", bad redirect/li")); return; } dst = pptr; pptr += dstl; li -= dstl; ND_PRINT((ndo, "\n\t %s", isonsap_string(ndo, dst, dstl))); ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad redirect/li")); return; } snpal = *pptr; pptr++; li--; ND_TCHECK2(*pptr, snpal); if (li < snpal) { ND_PRINT((ndo, ", bad redirect/li")); return; } snpa = pptr; pptr += snpal; li -= snpal; ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad redirect/li")); return; } netal = *pptr; pptr++; ND_TCHECK2(*pptr, netal); if (li < netal) { ND_PRINT((ndo, ", bad redirect/li")); return; } neta = pptr; pptr += netal; li -= netal; if (snpal == 6) ND_PRINT((ndo, "\n\t SNPA (length: %u): %s", snpal, etheraddr_string(ndo, snpa))); else ND_PRINT((ndo, "\n\t SNPA (length: %u): %s", snpal, linkaddr_string(ndo, snpa, LINKADDR_OTHER, snpal))); if (netal != 0) ND_PRINT((ndo, "\n\t NET (length: %u) %s", netal, isonsap_string(ndo, neta, netal))); break; } case ESIS_PDU_ESH: ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad esh/li")); return; } source_address_number = *pptr; pptr++; li--; ND_PRINT((ndo, "\n\t Number of Source Addresses: %u", source_address_number)); while (source_address_number > 0) { ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad esh/li")); return; } source_address_length = *pptr; pptr++; li--; ND_TCHECK2(*pptr, source_address_length); if (li < source_address_length) { ND_PRINT((ndo, ", bad esh/li")); return; } ND_PRINT((ndo, "\n\t NET (length: %u): %s", source_address_length, isonsap_string(ndo, pptr, source_address_length))); pptr += source_address_length; li -= source_address_length; source_address_number--; } break; case ESIS_PDU_ISH: { ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad ish/li")); return; } source_address_length = *pptr; pptr++; li--; ND_TCHECK2(*pptr, source_address_length); if (li < source_address_length) { ND_PRINT((ndo, ", bad ish/li")); return; } ND_PRINT((ndo, "\n\t NET (length: %u): %s", source_address_length, isonsap_string(ndo, pptr, source_address_length))); pptr += source_address_length; li -= source_address_length; break; } default: if (ndo->ndo_vflag <= 1) { if (pptr < ndo->ndo_snapend) print_unknown_data(ndo, pptr, "\n\t ", ndo->ndo_snapend - pptr); } return; } /* now walk the options */ while (li != 0) { u_int op, opli; const uint8_t *tptr; if (li < 2) { ND_PRINT((ndo, ", bad opts/li")); return; } ND_TCHECK2(*pptr, 2); op = *pptr++; opli = *pptr++; li -= 2; if (opli > li) { ND_PRINT((ndo, ", opt (%d) too long", op)); return; } li -= opli; tptr = pptr; ND_PRINT((ndo, "\n\t %s Option #%u, length %u, value: ", tok2str(esis_option_values,"Unknown",op), op, opli)); switch (op) { case ESIS_OPTION_ES_CONF_TIME: if (opli == 2) { ND_TCHECK2(*pptr, 2); ND_PRINT((ndo, "%us", EXTRACT_16BITS(tptr))); } else ND_PRINT((ndo, "(bad length)")); break; case ESIS_OPTION_PROTOCOLS: while (opli>0) { ND_TCHECK(*pptr); ND_PRINT((ndo, "%s (0x%02x)", tok2str(nlpid_values, "unknown", *tptr), *tptr)); if (opli>1) /* further NPLIDs ? - put comma */ ND_PRINT((ndo, ", ")); tptr++; opli--; } break; /* * FIXME those are the defined Options that lack a decoder * you are welcome to contribute code ;-) */ case ESIS_OPTION_QOS_MAINTENANCE: case ESIS_OPTION_SECURITY: case ESIS_OPTION_PRIORITY: case ESIS_OPTION_ADDRESS_MASK: case ESIS_OPTION_SNPA_MASK: default: print_unknown_data(ndo, tptr, "\n\t ", opli); break; } if (ndo->ndo_vflag > 1) print_unknown_data(ndo, pptr, "\n\t ", opli); pptr += opli; } trunc: return; } static void isis_print_mcid(netdissect_options *ndo, const struct isis_spb_mcid *mcid) { int i; ND_TCHECK(*mcid); ND_PRINT((ndo, "ID: %d, Name: ", mcid->format_id)); if (fn_printzp(ndo, mcid->name, 32, ndo->ndo_snapend)) goto trunc; ND_PRINT((ndo, "\n\t Lvl: %d", EXTRACT_16BITS(mcid->revision_lvl))); ND_PRINT((ndo, ", Digest: ")); for(i=0;i<16;i++) ND_PRINT((ndo, "%.2x ", mcid->digest[i])); trunc: ND_PRINT((ndo, "%s", tstr)); } static int isis_print_mt_port_cap_subtlv(netdissect_options *ndo, const uint8_t *tptr, int len) { int stlv_type, stlv_len; const struct isis_subtlv_spb_mcid *subtlv_spb_mcid; int i; while (len > 2) { stlv_type = *(tptr++); stlv_len = *(tptr++); /* first lets see if we know the subTLVs name*/ ND_PRINT((ndo, "\n\t %s subTLV #%u, length: %u", tok2str(isis_mt_port_cap_subtlv_values, "unknown", stlv_type), stlv_type, stlv_len)); /*len -= TLV_TYPE_LEN_OFFSET;*/ len = len -2; switch (stlv_type) { case ISIS_SUBTLV_SPB_MCID: { ND_TCHECK2(*(tptr), ISIS_SUBTLV_SPB_MCID_MIN_LEN); subtlv_spb_mcid = (const struct isis_subtlv_spb_mcid *)tptr; ND_PRINT((ndo, "\n\t MCID: ")); isis_print_mcid(ndo, &(subtlv_spb_mcid->mcid)); /*tptr += SPB_MCID_MIN_LEN; len -= SPB_MCID_MIN_LEN; */ ND_PRINT((ndo, "\n\t AUX-MCID: ")); isis_print_mcid(ndo, &(subtlv_spb_mcid->aux_mcid)); /*tptr += SPB_MCID_MIN_LEN; len -= SPB_MCID_MIN_LEN; */ tptr = tptr + sizeof(struct isis_subtlv_spb_mcid); len = len - sizeof(struct isis_subtlv_spb_mcid); break; } case ISIS_SUBTLV_SPB_DIGEST: { ND_TCHECK2(*(tptr), ISIS_SUBTLV_SPB_DIGEST_MIN_LEN); ND_PRINT((ndo, "\n\t RES: %d V: %d A: %d D: %d", (*(tptr) >> 5), (((*tptr)>> 4) & 0x01), ((*(tptr) >> 2) & 0x03), ((*tptr) & 0x03))); tptr++; ND_PRINT((ndo, "\n\t Digest: ")); for(i=1;i<=8; i++) { ND_PRINT((ndo, "%08x ", EXTRACT_32BITS(tptr))); if (i%4 == 0 && i != 8) ND_PRINT((ndo, "\n\t ")); tptr = tptr + 4; } len = len - ISIS_SUBTLV_SPB_DIGEST_MIN_LEN; break; } case ISIS_SUBTLV_SPB_BVID: { ND_TCHECK2(*(tptr), stlv_len); while (len >= ISIS_SUBTLV_SPB_BVID_MIN_LEN) { ND_TCHECK2(*(tptr), ISIS_SUBTLV_SPB_BVID_MIN_LEN); ND_PRINT((ndo, "\n\t ECT: %08x", EXTRACT_32BITS(tptr))); tptr = tptr+4; ND_PRINT((ndo, " BVID: %d, U:%01x M:%01x ", (EXTRACT_16BITS (tptr) >> 4) , (EXTRACT_16BITS (tptr) >> 3) & 0x01, (EXTRACT_16BITS (tptr) >> 2) & 0x01)); tptr = tptr + 2; len = len - ISIS_SUBTLV_SPB_BVID_MIN_LEN; } break; } default: break; } } return 0; trunc: ND_PRINT((ndo, "\n\t\t")); ND_PRINT((ndo, "%s", tstr)); return(1); } static int isis_print_mt_capability_subtlv(netdissect_options *ndo, const uint8_t *tptr, int len) { int stlv_type, stlv_len, tmp; while (len > 2) { stlv_type = *(tptr++); stlv_len = *(tptr++); /* first lets see if we know the subTLVs name*/ ND_PRINT((ndo, "\n\t %s subTLV #%u, length: %u", tok2str(isis_mt_capability_subtlv_values, "unknown", stlv_type), stlv_type, stlv_len)); len = len - 2; switch (stlv_type) { case ISIS_SUBTLV_SPB_INSTANCE: ND_TCHECK2(*tptr, ISIS_SUBTLV_SPB_INSTANCE_MIN_LEN); ND_PRINT((ndo, "\n\t CIST Root-ID: %08x", EXTRACT_32BITS(tptr))); tptr = tptr+4; ND_PRINT((ndo, " %08x", EXTRACT_32BITS(tptr))); tptr = tptr+4; ND_PRINT((ndo, ", Path Cost: %08x", EXTRACT_32BITS(tptr))); tptr = tptr+4; ND_PRINT((ndo, ", Prio: %d", EXTRACT_16BITS(tptr))); tptr = tptr + 2; ND_PRINT((ndo, "\n\t RES: %d", EXTRACT_16BITS(tptr) >> 5)); ND_PRINT((ndo, ", V: %d", (EXTRACT_16BITS(tptr) >> 4) & 0x0001)); ND_PRINT((ndo, ", SPSource-ID: %d", (EXTRACT_32BITS(tptr) & 0x000fffff))); tptr = tptr+4; ND_PRINT((ndo, ", No of Trees: %x", *(tptr))); tmp = *(tptr++); len = len - ISIS_SUBTLV_SPB_INSTANCE_MIN_LEN; while (tmp) { ND_TCHECK2(*tptr, ISIS_SUBTLV_SPB_INSTANCE_VLAN_TUPLE_LEN); ND_PRINT((ndo, "\n\t U:%d, M:%d, A:%d, RES:%d", *(tptr) >> 7, (*(tptr) >> 6) & 0x01, (*(tptr) >> 5) & 0x01, (*(tptr) & 0x1f))); tptr++; ND_PRINT((ndo, ", ECT: %08x", EXTRACT_32BITS(tptr))); tptr = tptr + 4; ND_PRINT((ndo, ", BVID: %d, SPVID: %d", (EXTRACT_24BITS(tptr) >> 12) & 0x000fff, EXTRACT_24BITS(tptr) & 0x000fff)); tptr = tptr + 3; len = len - ISIS_SUBTLV_SPB_INSTANCE_VLAN_TUPLE_LEN; tmp--; } break; case ISIS_SUBTLV_SPBM_SI: ND_TCHECK2(*tptr, 8); ND_PRINT((ndo, "\n\t BMAC: %08x", EXTRACT_32BITS(tptr))); tptr = tptr+4; ND_PRINT((ndo, "%04x", EXTRACT_16BITS(tptr))); tptr = tptr+2; ND_PRINT((ndo, ", RES: %d, VID: %d", EXTRACT_16BITS(tptr) >> 12, (EXTRACT_16BITS(tptr)) & 0x0fff)); tptr = tptr+2; len = len - 8; stlv_len = stlv_len - 8; while (stlv_len >= 4) { ND_TCHECK2(*tptr, 4); ND_PRINT((ndo, "\n\t T: %d, R: %d, RES: %d, ISID: %d", (EXTRACT_32BITS(tptr) >> 31), (EXTRACT_32BITS(tptr) >> 30) & 0x01, (EXTRACT_32BITS(tptr) >> 24) & 0x03f, (EXTRACT_32BITS(tptr)) & 0x0ffffff)); tptr = tptr + 4; len = len - 4; stlv_len = stlv_len - 4; } break; default: break; } } return 0; trunc: ND_PRINT((ndo, "\n\t\t")); ND_PRINT((ndo, "%s", tstr)); return(1); } /* shared routine for printing system, node and lsp-ids */ static char * isis_print_id(const uint8_t *cp, int id_len) { int i; static char id[sizeof("xxxx.xxxx.xxxx.yy-zz")]; char *pos = id; for (i = 1; i <= SYSTEM_ID_LEN; i++) { snprintf(pos, sizeof(id) - (pos - id), "%02x", *cp++); pos += strlen(pos); if (i == 2 || i == 4) *pos++ = '.'; } if (id_len >= NODE_ID_LEN) { snprintf(pos, sizeof(id) - (pos - id), ".%02x", *cp++); pos += strlen(pos); } if (id_len == LSP_ID_LEN) snprintf(pos, sizeof(id) - (pos - id), "-%02x", *cp); return (id); } /* print the 4-byte metric block which is common found in the old-style TLVs */ static int isis_print_metric_block(netdissect_options *ndo, const struct isis_metric_block *isis_metric_block) { ND_PRINT((ndo, ", Default Metric: %d, %s", ISIS_LSP_TLV_METRIC_VALUE(isis_metric_block->metric_default), ISIS_LSP_TLV_METRIC_IE(isis_metric_block->metric_default) ? "External" : "Internal")); if (!ISIS_LSP_TLV_METRIC_SUPPORTED(isis_metric_block->metric_delay)) ND_PRINT((ndo, "\n\t\t Delay Metric: %d, %s", ISIS_LSP_TLV_METRIC_VALUE(isis_metric_block->metric_delay), ISIS_LSP_TLV_METRIC_IE(isis_metric_block->metric_delay) ? "External" : "Internal")); if (!ISIS_LSP_TLV_METRIC_SUPPORTED(isis_metric_block->metric_expense)) ND_PRINT((ndo, "\n\t\t Expense Metric: %d, %s", ISIS_LSP_TLV_METRIC_VALUE(isis_metric_block->metric_expense), ISIS_LSP_TLV_METRIC_IE(isis_metric_block->metric_expense) ? "External" : "Internal")); if (!ISIS_LSP_TLV_METRIC_SUPPORTED(isis_metric_block->metric_error)) ND_PRINT((ndo, "\n\t\t Error Metric: %d, %s", ISIS_LSP_TLV_METRIC_VALUE(isis_metric_block->metric_error), ISIS_LSP_TLV_METRIC_IE(isis_metric_block->metric_error) ? "External" : "Internal")); return(1); /* everything is ok */ } static int isis_print_tlv_ip_reach(netdissect_options *ndo, const uint8_t *cp, const char *ident, int length) { int prefix_len; const struct isis_tlv_ip_reach *tlv_ip_reach; tlv_ip_reach = (const struct isis_tlv_ip_reach *)cp; while (length > 0) { if ((size_t)length < sizeof(*tlv_ip_reach)) { ND_PRINT((ndo, "short IPv4 Reachability (%d vs %lu)", length, (unsigned long)sizeof(*tlv_ip_reach))); return (0); } if (!ND_TTEST(*tlv_ip_reach)) return (0); prefix_len = mask2plen(EXTRACT_32BITS(tlv_ip_reach->mask)); if (prefix_len == -1) ND_PRINT((ndo, "%sIPv4 prefix: %s mask %s", ident, ipaddr_string(ndo, (tlv_ip_reach->prefix)), ipaddr_string(ndo, (tlv_ip_reach->mask)))); else ND_PRINT((ndo, "%sIPv4 prefix: %15s/%u", ident, ipaddr_string(ndo, (tlv_ip_reach->prefix)), prefix_len)); ND_PRINT((ndo, ", Distribution: %s, Metric: %u, %s", ISIS_LSP_TLV_METRIC_UPDOWN(tlv_ip_reach->isis_metric_block.metric_default) ? "down" : "up", ISIS_LSP_TLV_METRIC_VALUE(tlv_ip_reach->isis_metric_block.metric_default), ISIS_LSP_TLV_METRIC_IE(tlv_ip_reach->isis_metric_block.metric_default) ? "External" : "Internal")); if (!ISIS_LSP_TLV_METRIC_SUPPORTED(tlv_ip_reach->isis_metric_block.metric_delay)) ND_PRINT((ndo, "%s Delay Metric: %u, %s", ident, ISIS_LSP_TLV_METRIC_VALUE(tlv_ip_reach->isis_metric_block.metric_delay), ISIS_LSP_TLV_METRIC_IE(tlv_ip_reach->isis_metric_block.metric_delay) ? "External" : "Internal")); if (!ISIS_LSP_TLV_METRIC_SUPPORTED(tlv_ip_reach->isis_metric_block.metric_expense)) ND_PRINT((ndo, "%s Expense Metric: %u, %s", ident, ISIS_LSP_TLV_METRIC_VALUE(tlv_ip_reach->isis_metric_block.metric_expense), ISIS_LSP_TLV_METRIC_IE(tlv_ip_reach->isis_metric_block.metric_expense) ? "External" : "Internal")); if (!ISIS_LSP_TLV_METRIC_SUPPORTED(tlv_ip_reach->isis_metric_block.metric_error)) ND_PRINT((ndo, "%s Error Metric: %u, %s", ident, ISIS_LSP_TLV_METRIC_VALUE(tlv_ip_reach->isis_metric_block.metric_error), ISIS_LSP_TLV_METRIC_IE(tlv_ip_reach->isis_metric_block.metric_error) ? "External" : "Internal")); length -= sizeof(struct isis_tlv_ip_reach); tlv_ip_reach++; } return (1); } /* * this is the common IP-REACH subTLV decoder it is called * from various EXTD-IP REACH TLVs (135,235,236,237) */ static int isis_print_ip_reach_subtlv(netdissect_options *ndo, const uint8_t *tptr, int subt, int subl, const char *ident) { /* first lets see if we know the subTLVs name*/ ND_PRINT((ndo, "%s%s subTLV #%u, length: %u", ident, tok2str(isis_ext_ip_reach_subtlv_values, "unknown", subt), subt, subl)); ND_TCHECK2(*tptr,subl); switch(subt) { case ISIS_SUBTLV_EXTD_IP_REACH_MGMT_PREFIX_COLOR: /* fall through */ case ISIS_SUBTLV_EXTD_IP_REACH_ADMIN_TAG32: while (subl >= 4) { ND_PRINT((ndo, ", 0x%08x (=%u)", EXTRACT_32BITS(tptr), EXTRACT_32BITS(tptr))); tptr+=4; subl-=4; } break; case ISIS_SUBTLV_EXTD_IP_REACH_ADMIN_TAG64: while (subl >= 8) { ND_PRINT((ndo, ", 0x%08x%08x", EXTRACT_32BITS(tptr), EXTRACT_32BITS(tptr+4))); tptr+=8; subl-=8; } break; default: if (!print_unknown_data(ndo, tptr, "\n\t\t ", subl)) return(0); break; } return(1); trunc: ND_PRINT((ndo, "%s", ident)); ND_PRINT((ndo, "%s", tstr)); return(0); } /* * this is the common IS-REACH subTLV decoder it is called * from isis_print_ext_is_reach() */ static int isis_print_is_reach_subtlv(netdissect_options *ndo, const uint8_t *tptr, u_int subt, u_int subl, const char *ident) { u_int te_class,priority_level,gmpls_switch_cap; union { /* int to float conversion buffer for several subTLVs */ float f; uint32_t i; } bw; /* first lets see if we know the subTLVs name*/ ND_PRINT((ndo, "%s%s subTLV #%u, length: %u", ident, tok2str(isis_ext_is_reach_subtlv_values, "unknown", subt), subt, subl)); ND_TCHECK2(*tptr, subl); switch(subt) { case ISIS_SUBTLV_EXT_IS_REACH_ADMIN_GROUP: case ISIS_SUBTLV_EXT_IS_REACH_LINK_LOCAL_REMOTE_ID: case ISIS_SUBTLV_EXT_IS_REACH_LINK_REMOTE_ID: if (subl >= 4) { ND_PRINT((ndo, ", 0x%08x", EXTRACT_32BITS(tptr))); if (subl == 8) /* rfc4205 */ ND_PRINT((ndo, ", 0x%08x", EXTRACT_32BITS(tptr+4))); } break; case ISIS_SUBTLV_EXT_IS_REACH_IPV4_INTF_ADDR: case ISIS_SUBTLV_EXT_IS_REACH_IPV4_NEIGHBOR_ADDR: if (subl >= sizeof(struct in_addr)) ND_PRINT((ndo, ", %s", ipaddr_string(ndo, tptr))); break; case ISIS_SUBTLV_EXT_IS_REACH_MAX_LINK_BW : case ISIS_SUBTLV_EXT_IS_REACH_RESERVABLE_BW: if (subl >= 4) { bw.i = EXTRACT_32BITS(tptr); ND_PRINT((ndo, ", %.3f Mbps", bw.f * 8 / 1000000)); } break; case ISIS_SUBTLV_EXT_IS_REACH_UNRESERVED_BW : if (subl >= 32) { for (te_class = 0; te_class < 8; te_class++) { bw.i = EXTRACT_32BITS(tptr); ND_PRINT((ndo, "%s TE-Class %u: %.3f Mbps", ident, te_class, bw.f * 8 / 1000000)); tptr+=4; } } break; case ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS: /* fall through */ case ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS_OLD: ND_PRINT((ndo, "%sBandwidth Constraints Model ID: %s (%u)", ident, tok2str(diffserv_te_bc_values, "unknown", *tptr), *tptr)); tptr++; /* decode BCs until the subTLV ends */ for (te_class = 0; te_class < (subl-1)/4; te_class++) { ND_TCHECK2(*tptr, 4); bw.i = EXTRACT_32BITS(tptr); ND_PRINT((ndo, "%s Bandwidth constraint CT%u: %.3f Mbps", ident, te_class, bw.f * 8 / 1000000)); tptr+=4; } break; case ISIS_SUBTLV_EXT_IS_REACH_TE_METRIC: if (subl >= 3) ND_PRINT((ndo, ", %u", EXTRACT_24BITS(tptr))); break; case ISIS_SUBTLV_EXT_IS_REACH_LINK_ATTRIBUTE: if (subl == 2) { ND_PRINT((ndo, ", [ %s ] (0x%04x)", bittok2str(isis_subtlv_link_attribute_values, "Unknown", EXTRACT_16BITS(tptr)), EXTRACT_16BITS(tptr))); } break; case ISIS_SUBTLV_EXT_IS_REACH_LINK_PROTECTION_TYPE: if (subl >= 2) { ND_PRINT((ndo, ", %s, Priority %u", bittok2str(gmpls_link_prot_values, "none", *tptr), *(tptr+1))); } break; case ISIS_SUBTLV_SPB_METRIC: if (subl >= 6) { ND_PRINT((ndo, ", LM: %u", EXTRACT_24BITS(tptr))); tptr=tptr+3; ND_PRINT((ndo, ", P: %u", *(tptr))); tptr++; ND_PRINT((ndo, ", P-ID: %u", EXTRACT_16BITS(tptr))); } break; case ISIS_SUBTLV_EXT_IS_REACH_INTF_SW_CAP_DESCR: if (subl >= 36) { gmpls_switch_cap = *tptr; ND_PRINT((ndo, "%s Interface Switching Capability:%s", ident, tok2str(gmpls_switch_cap_values, "Unknown", gmpls_switch_cap))); ND_PRINT((ndo, ", LSP Encoding: %s", tok2str(gmpls_encoding_values, "Unknown", *(tptr + 1)))); tptr+=4; ND_PRINT((ndo, "%s Max LSP Bandwidth:", ident)); for (priority_level = 0; priority_level < 8; priority_level++) { bw.i = EXTRACT_32BITS(tptr); ND_PRINT((ndo, "%s priority level %d: %.3f Mbps", ident, priority_level, bw.f * 8 / 1000000)); tptr+=4; } subl-=36; switch (gmpls_switch_cap) { case GMPLS_PSC1: case GMPLS_PSC2: case GMPLS_PSC3: case GMPLS_PSC4: ND_TCHECK2(*tptr, 6); bw.i = EXTRACT_32BITS(tptr); ND_PRINT((ndo, "%s Min LSP Bandwidth: %.3f Mbps", ident, bw.f * 8 / 1000000)); ND_PRINT((ndo, "%s Interface MTU: %u", ident, EXTRACT_16BITS(tptr + 4))); break; case GMPLS_TSC: ND_TCHECK2(*tptr, 8); bw.i = EXTRACT_32BITS(tptr); ND_PRINT((ndo, "%s Min LSP Bandwidth: %.3f Mbps", ident, bw.f * 8 / 1000000)); ND_PRINT((ndo, "%s Indication %s", ident, tok2str(gmpls_switch_cap_tsc_indication_values, "Unknown (%u)", *(tptr + 4)))); break; default: /* there is some optional stuff left to decode but this is as of yet not specified so just lets hexdump what is left */ if(subl>0){ if (!print_unknown_data(ndo, tptr, "\n\t\t ", subl)) return(0); } } } break; default: if (!print_unknown_data(ndo, tptr, "\n\t\t ", subl)) return(0); break; } return(1); trunc: return(0); } /* * this is the common IS-REACH decoder it is called * from various EXTD-IS REACH style TLVs (22,24,222) */ static int isis_print_ext_is_reach(netdissect_options *ndo, const uint8_t *tptr, const char *ident, int tlv_type) { char ident_buffer[20]; int subtlv_type,subtlv_len,subtlv_sum_len; int proc_bytes = 0; /* how many bytes did we process ? */ if (!ND_TTEST2(*tptr, NODE_ID_LEN)) return(0); ND_PRINT((ndo, "%sIS Neighbor: %s", ident, isis_print_id(tptr, NODE_ID_LEN))); tptr+=(NODE_ID_LEN); if (tlv_type != ISIS_TLV_IS_ALIAS_ID) { /* the Alias TLV Metric field is implicit 0 */ if (!ND_TTEST2(*tptr, 3)) /* and is therefore skipped */ return(0); ND_PRINT((ndo, ", Metric: %d", EXTRACT_24BITS(tptr))); tptr+=3; } if (!ND_TTEST2(*tptr, 1)) return(0); subtlv_sum_len=*(tptr++); /* read out subTLV length */ proc_bytes=NODE_ID_LEN+3+1; ND_PRINT((ndo, ", %ssub-TLVs present",subtlv_sum_len ? "" : "no ")); if (subtlv_sum_len) { ND_PRINT((ndo, " (%u)", subtlv_sum_len)); while (subtlv_sum_len>0) { if (!ND_TTEST2(*tptr,2)) return(0); subtlv_type=*(tptr++); subtlv_len=*(tptr++); /* prepend the indent string */ snprintf(ident_buffer, sizeof(ident_buffer), "%s ",ident); if (!isis_print_is_reach_subtlv(ndo, tptr, subtlv_type, subtlv_len, ident_buffer)) return(0); tptr+=subtlv_len; subtlv_sum_len-=(subtlv_len+2); proc_bytes+=(subtlv_len+2); } } return(proc_bytes); } /* * this is the common Multi Topology ID decoder * it is called from various MT-TLVs (222,229,235,237) */ static int isis_print_mtid(netdissect_options *ndo, const uint8_t *tptr, const char *ident) { if (!ND_TTEST2(*tptr, 2)) return(0); ND_PRINT((ndo, "%s%s", ident, tok2str(isis_mt_values, "Reserved for IETF Consensus", ISIS_MASK_MTID(EXTRACT_16BITS(tptr))))); ND_PRINT((ndo, " Topology (0x%03x), Flags: [%s]", ISIS_MASK_MTID(EXTRACT_16BITS(tptr)), bittok2str(isis_mt_flag_values, "none",ISIS_MASK_MTFLAGS(EXTRACT_16BITS(tptr))))); return(2); } /* * this is the common extended IP reach decoder * it is called from TLVs (135,235,236,237) * we process the TLV and optional subTLVs and return * the amount of processed bytes */ static int isis_print_extd_ip_reach(netdissect_options *ndo, const uint8_t *tptr, const char *ident, uint16_t afi) { char ident_buffer[20]; uint8_t prefix[sizeof(struct in6_addr)]; /* shared copy buffer for IPv4 and IPv6 prefixes */ u_int metric, status_byte, bit_length, byte_length, sublen, processed, subtlvtype, subtlvlen; if (!ND_TTEST2(*tptr, 4)) return (0); metric = EXTRACT_32BITS(tptr); processed=4; tptr+=4; if (afi == AF_INET) { if (!ND_TTEST2(*tptr, 1)) /* fetch status byte */ return (0); status_byte=*(tptr++); bit_length = status_byte&0x3f; if (bit_length > 32) { ND_PRINT((ndo, "%sIPv4 prefix: bad bit length %u", ident, bit_length)); return (0); } processed++; } else if (afi == AF_INET6) { if (!ND_TTEST2(*tptr, 2)) /* fetch status & prefix_len byte */ return (0); status_byte=*(tptr++); bit_length=*(tptr++); if (bit_length > 128) { ND_PRINT((ndo, "%sIPv6 prefix: bad bit length %u", ident, bit_length)); return (0); } processed+=2; } else return (0); /* somebody is fooling us */ byte_length = (bit_length + 7) / 8; /* prefix has variable length encoding */ if (!ND_TTEST2(*tptr, byte_length)) return (0); memset(prefix, 0, sizeof prefix); /* clear the copy buffer */ memcpy(prefix,tptr,byte_length); /* copy as much as is stored in the TLV */ tptr+=byte_length; processed+=byte_length; if (afi == AF_INET) ND_PRINT((ndo, "%sIPv4 prefix: %15s/%u", ident, ipaddr_string(ndo, prefix), bit_length)); else if (afi == AF_INET6) ND_PRINT((ndo, "%sIPv6 prefix: %s/%u", ident, ip6addr_string(ndo, prefix), bit_length)); ND_PRINT((ndo, ", Distribution: %s, Metric: %u", ISIS_MASK_TLV_EXTD_IP_UPDOWN(status_byte) ? "down" : "up", metric)); if (afi == AF_INET && ISIS_MASK_TLV_EXTD_IP_SUBTLV(status_byte)) ND_PRINT((ndo, ", sub-TLVs present")); else if (afi == AF_INET6) ND_PRINT((ndo, ", %s%s", ISIS_MASK_TLV_EXTD_IP6_IE(status_byte) ? "External" : "Internal", ISIS_MASK_TLV_EXTD_IP6_SUBTLV(status_byte) ? ", sub-TLVs present" : "")); if ((afi == AF_INET && ISIS_MASK_TLV_EXTD_IP_SUBTLV(status_byte)) || (afi == AF_INET6 && ISIS_MASK_TLV_EXTD_IP6_SUBTLV(status_byte)) ) { /* assume that one prefix can hold more than one subTLV - therefore the first byte must reflect the aggregate bytecount of the subTLVs for this prefix */ if (!ND_TTEST2(*tptr, 1)) return (0); sublen=*(tptr++); processed+=sublen+1; ND_PRINT((ndo, " (%u)", sublen)); /* print out subTLV length */ while (sublen>0) { if (!ND_TTEST2(*tptr,2)) return (0); subtlvtype=*(tptr++); subtlvlen=*(tptr++); /* prepend the indent string */ snprintf(ident_buffer, sizeof(ident_buffer), "%s ",ident); if (!isis_print_ip_reach_subtlv(ndo, tptr, subtlvtype, subtlvlen, ident_buffer)) return(0); tptr+=subtlvlen; sublen-=(subtlvlen+2); } } return (processed); } /* * Clear checksum and lifetime prior to signature verification. */ static void isis_clear_checksum_lifetime(void *header) { struct isis_lsp_header *header_lsp = (struct isis_lsp_header *) header; header_lsp->checksum[0] = 0; header_lsp->checksum[1] = 0; header_lsp->remaining_lifetime[0] = 0; header_lsp->remaining_lifetime[1] = 0; } /* * isis_print * Decode IS-IS packets. Return 0 on error. */ static int isis_print(netdissect_options *ndo, const uint8_t *p, u_int length) { const struct isis_common_header *isis_header; const struct isis_iih_lan_header *header_iih_lan; const struct isis_iih_ptp_header *header_iih_ptp; const struct isis_lsp_header *header_lsp; const struct isis_csnp_header *header_csnp; const struct isis_psnp_header *header_psnp; const struct isis_tlv_lsp *tlv_lsp; const struct isis_tlv_ptp_adj *tlv_ptp_adj; const struct isis_tlv_is_reach *tlv_is_reach; const struct isis_tlv_es_reach *tlv_es_reach; uint8_t pdu_type, max_area, id_length, tlv_type, tlv_len, tmp, alen, lan_alen, prefix_len; uint8_t ext_is_len, ext_ip_len, mt_len; const uint8_t *optr, *pptr, *tptr; u_short packet_len,pdu_len, key_id; u_int i,vendor_id; int sigcheck; packet_len=length; optr = p; /* initialize the _o_riginal pointer to the packet start - need it for parsing the checksum TLV and authentication TLV verification */ isis_header = (const struct isis_common_header *)p; ND_TCHECK(*isis_header); if (length < ISIS_COMMON_HEADER_SIZE) goto trunc; pptr = p+(ISIS_COMMON_HEADER_SIZE); header_iih_lan = (const struct isis_iih_lan_header *)pptr; header_iih_ptp = (const struct isis_iih_ptp_header *)pptr; header_lsp = (const struct isis_lsp_header *)pptr; header_csnp = (const struct isis_csnp_header *)pptr; header_psnp = (const struct isis_psnp_header *)pptr; if (!ndo->ndo_eflag) ND_PRINT((ndo, "IS-IS")); /* * Sanity checking of the header. */ if (isis_header->version != ISIS_VERSION) { ND_PRINT((ndo, "version %d packet not supported", isis_header->version)); return (0); } if ((isis_header->id_length != SYSTEM_ID_LEN) && (isis_header->id_length != 0)) { ND_PRINT((ndo, "system ID length of %d is not supported", isis_header->id_length)); return (0); } if (isis_header->pdu_version != ISIS_VERSION) { ND_PRINT((ndo, "version %d packet not supported", isis_header->pdu_version)); return (0); } if (length < isis_header->fixed_len) { ND_PRINT((ndo, "fixed header length %u > packet length %u", isis_header->fixed_len, length)); return (0); } if (isis_header->fixed_len < ISIS_COMMON_HEADER_SIZE) { ND_PRINT((ndo, "fixed header length %u < minimum header size %u", isis_header->fixed_len, (u_int)ISIS_COMMON_HEADER_SIZE)); return (0); } max_area = isis_header->max_area; switch(max_area) { case 0: max_area = 3; /* silly shit */ break; case 255: ND_PRINT((ndo, "bad packet -- 255 areas")); return (0); default: break; } id_length = isis_header->id_length; switch(id_length) { case 0: id_length = 6; /* silly shit again */ break; case 1: /* 1-8 are valid sys-ID lenghts */ case 2: case 3: case 4: case 5: case 6: case 7: case 8: break; case 255: id_length = 0; /* entirely useless */ break; default: break; } /* toss any non 6-byte sys-ID len PDUs */ if (id_length != 6 ) { ND_PRINT((ndo, "bad packet -- illegal sys-ID length (%u)", id_length)); return (0); } pdu_type=isis_header->pdu_type; /* in non-verbose mode print the basic PDU Type plus PDU specific brief information*/ if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, "%s%s", ndo->ndo_eflag ? "" : ", ", tok2str(isis_pdu_values, "unknown PDU-Type %u", pdu_type))); } else { /* ok they seem to want to know everything - lets fully decode it */ ND_PRINT((ndo, "%slength %u", ndo->ndo_eflag ? "" : ", ", length)); ND_PRINT((ndo, "\n\t%s, hlen: %u, v: %u, pdu-v: %u, sys-id-len: %u (%u), max-area: %u (%u)", tok2str(isis_pdu_values, "unknown, type %u", pdu_type), isis_header->fixed_len, isis_header->version, isis_header->pdu_version, id_length, isis_header->id_length, max_area, isis_header->max_area)); if (ndo->ndo_vflag > 1) { if (!print_unknown_data(ndo, optr, "\n\t", 8)) /* provide the _o_riginal pointer */ return (0); /* for optionally debugging the common header */ } } switch (pdu_type) { case ISIS_PDU_L1_LAN_IIH: case ISIS_PDU_L2_LAN_IIH: if (isis_header->fixed_len != (ISIS_COMMON_HEADER_SIZE+ISIS_IIH_LAN_HEADER_SIZE)) { ND_PRINT((ndo, ", bogus fixed header length %u should be %lu", isis_header->fixed_len, (unsigned long)(ISIS_COMMON_HEADER_SIZE+ISIS_IIH_LAN_HEADER_SIZE))); return (0); } ND_TCHECK(*header_iih_lan); if (length < ISIS_COMMON_HEADER_SIZE+ISIS_IIH_LAN_HEADER_SIZE) goto trunc; if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, ", src-id %s", isis_print_id(header_iih_lan->source_id, SYSTEM_ID_LEN))); ND_PRINT((ndo, ", lan-id %s, prio %u", isis_print_id(header_iih_lan->lan_id,NODE_ID_LEN), header_iih_lan->priority)); ND_PRINT((ndo, ", length %u", length)); return (1); } pdu_len=EXTRACT_16BITS(header_iih_lan->pdu_len); if (packet_len>pdu_len) { packet_len=pdu_len; /* do TLV decoding as long as it makes sense */ length=pdu_len; } ND_PRINT((ndo, "\n\t source-id: %s, holding time: %us, Flags: [%s]", isis_print_id(header_iih_lan->source_id,SYSTEM_ID_LEN), EXTRACT_16BITS(header_iih_lan->holding_time), tok2str(isis_iih_circuit_type_values, "unknown circuit type 0x%02x", header_iih_lan->circuit_type))); ND_PRINT((ndo, "\n\t lan-id: %s, Priority: %u, PDU length: %u", isis_print_id(header_iih_lan->lan_id, NODE_ID_LEN), (header_iih_lan->priority) & ISIS_LAN_PRIORITY_MASK, pdu_len)); if (ndo->ndo_vflag > 1) { if (!print_unknown_data(ndo, pptr, "\n\t ", ISIS_IIH_LAN_HEADER_SIZE)) return (0); } packet_len -= (ISIS_COMMON_HEADER_SIZE+ISIS_IIH_LAN_HEADER_SIZE); pptr = p + (ISIS_COMMON_HEADER_SIZE+ISIS_IIH_LAN_HEADER_SIZE); break; case ISIS_PDU_PTP_IIH: if (isis_header->fixed_len != (ISIS_COMMON_HEADER_SIZE+ISIS_IIH_PTP_HEADER_SIZE)) { ND_PRINT((ndo, ", bogus fixed header length %u should be %lu", isis_header->fixed_len, (unsigned long)(ISIS_COMMON_HEADER_SIZE+ISIS_IIH_PTP_HEADER_SIZE))); return (0); } ND_TCHECK(*header_iih_ptp); if (length < ISIS_COMMON_HEADER_SIZE+ISIS_IIH_PTP_HEADER_SIZE) goto trunc; if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, ", src-id %s", isis_print_id(header_iih_ptp->source_id, SYSTEM_ID_LEN))); ND_PRINT((ndo, ", length %u", length)); return (1); } pdu_len=EXTRACT_16BITS(header_iih_ptp->pdu_len); if (packet_len>pdu_len) { packet_len=pdu_len; /* do TLV decoding as long as it makes sense */ length=pdu_len; } ND_PRINT((ndo, "\n\t source-id: %s, holding time: %us, Flags: [%s]", isis_print_id(header_iih_ptp->source_id,SYSTEM_ID_LEN), EXTRACT_16BITS(header_iih_ptp->holding_time), tok2str(isis_iih_circuit_type_values, "unknown circuit type 0x%02x", header_iih_ptp->circuit_type))); ND_PRINT((ndo, "\n\t circuit-id: 0x%02x, PDU length: %u", header_iih_ptp->circuit_id, pdu_len)); if (ndo->ndo_vflag > 1) { if (!print_unknown_data(ndo, pptr, "\n\t ", ISIS_IIH_PTP_HEADER_SIZE)) return (0); } packet_len -= (ISIS_COMMON_HEADER_SIZE+ISIS_IIH_PTP_HEADER_SIZE); pptr = p + (ISIS_COMMON_HEADER_SIZE+ISIS_IIH_PTP_HEADER_SIZE); break; case ISIS_PDU_L1_LSP: case ISIS_PDU_L2_LSP: if (isis_header->fixed_len != (ISIS_COMMON_HEADER_SIZE+ISIS_LSP_HEADER_SIZE)) { ND_PRINT((ndo, ", bogus fixed header length %u should be %lu", isis_header->fixed_len, (unsigned long)ISIS_LSP_HEADER_SIZE)); return (0); } ND_TCHECK(*header_lsp); if (length < ISIS_COMMON_HEADER_SIZE+ISIS_LSP_HEADER_SIZE) goto trunc; if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, ", lsp-id %s, seq 0x%08x, lifetime %5us", isis_print_id(header_lsp->lsp_id, LSP_ID_LEN), EXTRACT_32BITS(header_lsp->sequence_number), EXTRACT_16BITS(header_lsp->remaining_lifetime))); ND_PRINT((ndo, ", length %u", length)); return (1); } pdu_len=EXTRACT_16BITS(header_lsp->pdu_len); if (packet_len>pdu_len) { packet_len=pdu_len; /* do TLV decoding as long as it makes sense */ length=pdu_len; } ND_PRINT((ndo, "\n\t lsp-id: %s, seq: 0x%08x, lifetime: %5us\n\t chksum: 0x%04x", isis_print_id(header_lsp->lsp_id, LSP_ID_LEN), EXTRACT_32BITS(header_lsp->sequence_number), EXTRACT_16BITS(header_lsp->remaining_lifetime), EXTRACT_16BITS(header_lsp->checksum))); osi_print_cksum(ndo, (const uint8_t *)header_lsp->lsp_id, EXTRACT_16BITS(header_lsp->checksum), 12, length-12); ND_PRINT((ndo, ", PDU length: %u, Flags: [ %s", pdu_len, ISIS_MASK_LSP_OL_BIT(header_lsp->typeblock) ? "Overload bit set, " : "")); if (ISIS_MASK_LSP_ATT_BITS(header_lsp->typeblock)) { ND_PRINT((ndo, "%s", ISIS_MASK_LSP_ATT_DEFAULT_BIT(header_lsp->typeblock) ? "default " : "")); ND_PRINT((ndo, "%s", ISIS_MASK_LSP_ATT_DELAY_BIT(header_lsp->typeblock) ? "delay " : "")); ND_PRINT((ndo, "%s", ISIS_MASK_LSP_ATT_EXPENSE_BIT(header_lsp->typeblock) ? "expense " : "")); ND_PRINT((ndo, "%s", ISIS_MASK_LSP_ATT_ERROR_BIT(header_lsp->typeblock) ? "error " : "")); ND_PRINT((ndo, "ATT bit set, ")); } ND_PRINT((ndo, "%s", ISIS_MASK_LSP_PARTITION_BIT(header_lsp->typeblock) ? "P bit set, " : "")); ND_PRINT((ndo, "%s ]", tok2str(isis_lsp_istype_values, "Unknown(0x%x)", ISIS_MASK_LSP_ISTYPE_BITS(header_lsp->typeblock)))); if (ndo->ndo_vflag > 1) { if (!print_unknown_data(ndo, pptr, "\n\t ", ISIS_LSP_HEADER_SIZE)) return (0); } packet_len -= (ISIS_COMMON_HEADER_SIZE+ISIS_LSP_HEADER_SIZE); pptr = p + (ISIS_COMMON_HEADER_SIZE+ISIS_LSP_HEADER_SIZE); break; case ISIS_PDU_L1_CSNP: case ISIS_PDU_L2_CSNP: if (isis_header->fixed_len != (ISIS_COMMON_HEADER_SIZE+ISIS_CSNP_HEADER_SIZE)) { ND_PRINT((ndo, ", bogus fixed header length %u should be %lu", isis_header->fixed_len, (unsigned long)(ISIS_COMMON_HEADER_SIZE+ISIS_CSNP_HEADER_SIZE))); return (0); } ND_TCHECK(*header_csnp); if (length < ISIS_COMMON_HEADER_SIZE+ISIS_CSNP_HEADER_SIZE) goto trunc; if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, ", src-id %s", isis_print_id(header_csnp->source_id, NODE_ID_LEN))); ND_PRINT((ndo, ", length %u", length)); return (1); } pdu_len=EXTRACT_16BITS(header_csnp->pdu_len); if (packet_len>pdu_len) { packet_len=pdu_len; /* do TLV decoding as long as it makes sense */ length=pdu_len; } ND_PRINT((ndo, "\n\t source-id: %s, PDU length: %u", isis_print_id(header_csnp->source_id, NODE_ID_LEN), pdu_len)); ND_PRINT((ndo, "\n\t start lsp-id: %s", isis_print_id(header_csnp->start_lsp_id, LSP_ID_LEN))); ND_PRINT((ndo, "\n\t end lsp-id: %s", isis_print_id(header_csnp->end_lsp_id, LSP_ID_LEN))); if (ndo->ndo_vflag > 1) { if (!print_unknown_data(ndo, pptr, "\n\t ", ISIS_CSNP_HEADER_SIZE)) return (0); } packet_len -= (ISIS_COMMON_HEADER_SIZE+ISIS_CSNP_HEADER_SIZE); pptr = p + (ISIS_COMMON_HEADER_SIZE+ISIS_CSNP_HEADER_SIZE); break; case ISIS_PDU_L1_PSNP: case ISIS_PDU_L2_PSNP: if (isis_header->fixed_len != (ISIS_COMMON_HEADER_SIZE+ISIS_PSNP_HEADER_SIZE)) { ND_PRINT((ndo, "- bogus fixed header length %u should be %lu", isis_header->fixed_len, (unsigned long)(ISIS_COMMON_HEADER_SIZE+ISIS_PSNP_HEADER_SIZE))); return (0); } ND_TCHECK(*header_psnp); if (length < ISIS_COMMON_HEADER_SIZE+ISIS_PSNP_HEADER_SIZE) goto trunc; if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, ", src-id %s", isis_print_id(header_psnp->source_id, NODE_ID_LEN))); ND_PRINT((ndo, ", length %u", length)); return (1); } pdu_len=EXTRACT_16BITS(header_psnp->pdu_len); if (packet_len>pdu_len) { packet_len=pdu_len; /* do TLV decoding as long as it makes sense */ length=pdu_len; } ND_PRINT((ndo, "\n\t source-id: %s, PDU length: %u", isis_print_id(header_psnp->source_id, NODE_ID_LEN), pdu_len)); if (ndo->ndo_vflag > 1) { if (!print_unknown_data(ndo, pptr, "\n\t ", ISIS_PSNP_HEADER_SIZE)) return (0); } packet_len -= (ISIS_COMMON_HEADER_SIZE+ISIS_PSNP_HEADER_SIZE); pptr = p + (ISIS_COMMON_HEADER_SIZE+ISIS_PSNP_HEADER_SIZE); break; default: if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, ", length %u", length)); return (1); } (void)print_unknown_data(ndo, pptr, "\n\t ", length); return (0); } /* * Now print the TLV's. */ while (packet_len > 0) { ND_TCHECK2(*pptr, 2); if (packet_len < 2) goto trunc; tlv_type = *pptr++; tlv_len = *pptr++; tmp =tlv_len; /* copy temporary len & pointer to packet data */ tptr = pptr; packet_len -= 2; /* first lets see if we know the TLVs name*/ ND_PRINT((ndo, "\n\t %s TLV #%u, length: %u", tok2str(isis_tlv_values, "unknown", tlv_type), tlv_type, tlv_len)); if (tlv_len == 0) /* something is invalid */ continue; if (packet_len < tlv_len) goto trunc; /* now check if we have a decoder otherwise do a hexdump at the end*/ switch (tlv_type) { case ISIS_TLV_AREA_ADDR: ND_TCHECK2(*tptr, 1); alen = *tptr++; while (tmp && alen < tmp) { ND_TCHECK2(*tptr, alen); ND_PRINT((ndo, "\n\t Area address (length: %u): %s", alen, isonsap_string(ndo, tptr, alen))); tptr += alen; tmp -= alen + 1; if (tmp==0) /* if this is the last area address do not attemt a boundary check */ break; ND_TCHECK2(*tptr, 1); alen = *tptr++; } break; case ISIS_TLV_ISNEIGH: while (tmp >= ETHER_ADDR_LEN) { ND_TCHECK2(*tptr, ETHER_ADDR_LEN); ND_PRINT((ndo, "\n\t SNPA: %s", isis_print_id(tptr, ETHER_ADDR_LEN))); tmp -= ETHER_ADDR_LEN; tptr += ETHER_ADDR_LEN; } break; case ISIS_TLV_ISNEIGH_VARLEN: if (!ND_TTEST2(*tptr, 1) || tmp < 3) /* min. TLV length */ goto trunctlv; lan_alen = *tptr++; /* LAN address length */ if (lan_alen == 0) { ND_PRINT((ndo, "\n\t LAN address length 0 bytes (invalid)")); break; } tmp --; ND_PRINT((ndo, "\n\t LAN address length %u bytes ", lan_alen)); while (tmp >= lan_alen) { ND_TCHECK2(*tptr, lan_alen); ND_PRINT((ndo, "\n\t\tIS Neighbor: %s", isis_print_id(tptr, lan_alen))); tmp -= lan_alen; tptr +=lan_alen; } break; case ISIS_TLV_PADDING: break; case ISIS_TLV_MT_IS_REACH: mt_len = isis_print_mtid(ndo, tptr, "\n\t "); if (mt_len == 0) /* did something go wrong ? */ goto trunctlv; tptr+=mt_len; tmp-=mt_len; while (tmp >= 2+NODE_ID_LEN+3+1) { ext_is_len = isis_print_ext_is_reach(ndo, tptr, "\n\t ", tlv_type); if (ext_is_len == 0) /* did something go wrong ? */ goto trunctlv; tmp-=ext_is_len; tptr+=ext_is_len; } break; case ISIS_TLV_IS_ALIAS_ID: while (tmp >= NODE_ID_LEN+1) { /* is it worth attempting a decode ? */ ext_is_len = isis_print_ext_is_reach(ndo, tptr, "\n\t ", tlv_type); if (ext_is_len == 0) /* did something go wrong ? */ goto trunctlv; tmp-=ext_is_len; tptr+=ext_is_len; } break; case ISIS_TLV_EXT_IS_REACH: while (tmp >= NODE_ID_LEN+3+1) { /* is it worth attempting a decode ? */ ext_is_len = isis_print_ext_is_reach(ndo, tptr, "\n\t ", tlv_type); if (ext_is_len == 0) /* did something go wrong ? */ goto trunctlv; tmp-=ext_is_len; tptr+=ext_is_len; } break; case ISIS_TLV_IS_REACH: ND_TCHECK2(*tptr,1); /* check if there is one byte left to read out the virtual flag */ ND_PRINT((ndo, "\n\t %s", tok2str(isis_is_reach_virtual_values, "bogus virtual flag 0x%02x", *tptr++))); tlv_is_reach = (const struct isis_tlv_is_reach *)tptr; while (tmp >= sizeof(struct isis_tlv_is_reach)) { ND_TCHECK(*tlv_is_reach); ND_PRINT((ndo, "\n\t IS Neighbor: %s", isis_print_id(tlv_is_reach->neighbor_nodeid, NODE_ID_LEN))); isis_print_metric_block(ndo, &tlv_is_reach->isis_metric_block); tmp -= sizeof(struct isis_tlv_is_reach); tlv_is_reach++; } break; case ISIS_TLV_ESNEIGH: tlv_es_reach = (const struct isis_tlv_es_reach *)tptr; while (tmp >= sizeof(struct isis_tlv_es_reach)) { ND_TCHECK(*tlv_es_reach); ND_PRINT((ndo, "\n\t ES Neighbor: %s", isis_print_id(tlv_es_reach->neighbor_sysid, SYSTEM_ID_LEN))); isis_print_metric_block(ndo, &tlv_es_reach->isis_metric_block); tmp -= sizeof(struct isis_tlv_es_reach); tlv_es_reach++; } break; /* those two TLVs share the same format */ case ISIS_TLV_INT_IP_REACH: case ISIS_TLV_EXT_IP_REACH: if (!isis_print_tlv_ip_reach(ndo, pptr, "\n\t ", tlv_len)) return (1); break; case ISIS_TLV_EXTD_IP_REACH: while (tmp>0) { ext_ip_len = isis_print_extd_ip_reach(ndo, tptr, "\n\t ", AF_INET); if (ext_ip_len == 0) /* did something go wrong ? */ goto trunctlv; tptr+=ext_ip_len; tmp-=ext_ip_len; } break; case ISIS_TLV_MT_IP_REACH: mt_len = isis_print_mtid(ndo, tptr, "\n\t "); if (mt_len == 0) { /* did something go wrong ? */ goto trunctlv; } tptr+=mt_len; tmp-=mt_len; while (tmp>0) { ext_ip_len = isis_print_extd_ip_reach(ndo, tptr, "\n\t ", AF_INET); if (ext_ip_len == 0) /* did something go wrong ? */ goto trunctlv; tptr+=ext_ip_len; tmp-=ext_ip_len; } break; case ISIS_TLV_IP6_REACH: while (tmp>0) { ext_ip_len = isis_print_extd_ip_reach(ndo, tptr, "\n\t ", AF_INET6); if (ext_ip_len == 0) /* did something go wrong ? */ goto trunctlv; tptr+=ext_ip_len; tmp-=ext_ip_len; } break; case ISIS_TLV_MT_IP6_REACH: mt_len = isis_print_mtid(ndo, tptr, "\n\t "); if (mt_len == 0) { /* did something go wrong ? */ goto trunctlv; } tptr+=mt_len; tmp-=mt_len; while (tmp>0) { ext_ip_len = isis_print_extd_ip_reach(ndo, tptr, "\n\t ", AF_INET6); if (ext_ip_len == 0) /* did something go wrong ? */ goto trunctlv; tptr+=ext_ip_len; tmp-=ext_ip_len; } break; case ISIS_TLV_IP6ADDR: while (tmp>=sizeof(struct in6_addr)) { ND_TCHECK2(*tptr, sizeof(struct in6_addr)); ND_PRINT((ndo, "\n\t IPv6 interface address: %s", ip6addr_string(ndo, tptr))); tptr += sizeof(struct in6_addr); tmp -= sizeof(struct in6_addr); } break; case ISIS_TLV_AUTH: ND_TCHECK2(*tptr, 1); ND_PRINT((ndo, "\n\t %s: ", tok2str(isis_subtlv_auth_values, "unknown Authentication type 0x%02x", *tptr))); switch (*tptr) { case ISIS_SUBTLV_AUTH_SIMPLE: if (fn_printzp(ndo, tptr + 1, tlv_len - 1, ndo->ndo_snapend)) goto trunctlv; break; case ISIS_SUBTLV_AUTH_MD5: for(i=1;i<tlv_len;i++) { ND_TCHECK2(*(tptr + i), 1); ND_PRINT((ndo, "%02x", *(tptr + i))); } if (tlv_len != ISIS_SUBTLV_AUTH_MD5_LEN+1) ND_PRINT((ndo, ", (invalid subTLV) ")); sigcheck = signature_verify(ndo, optr, length, tptr + 1, isis_clear_checksum_lifetime, header_lsp); ND_PRINT((ndo, " (%s)", tok2str(signature_check_values, "Unknown", sigcheck))); break; case ISIS_SUBTLV_AUTH_GENERIC: ND_TCHECK2(*(tptr + 1), 2); key_id = EXTRACT_16BITS((tptr+1)); ND_PRINT((ndo, "%u, password: ", key_id)); for(i=1 + sizeof(uint16_t);i<tlv_len;i++) { ND_TCHECK2(*(tptr + i), 1); ND_PRINT((ndo, "%02x", *(tptr + i))); } break; case ISIS_SUBTLV_AUTH_PRIVATE: default: if (!print_unknown_data(ndo, tptr + 1, "\n\t\t ", tlv_len - 1)) return(0); break; } break; case ISIS_TLV_PTP_ADJ: tlv_ptp_adj = (const struct isis_tlv_ptp_adj *)tptr; if(tmp>=1) { ND_TCHECK2(*tptr, 1); ND_PRINT((ndo, "\n\t Adjacency State: %s (%u)", tok2str(isis_ptp_adjancey_values, "unknown", *tptr), *tptr)); tmp--; } if(tmp>sizeof(tlv_ptp_adj->extd_local_circuit_id)) { ND_TCHECK(tlv_ptp_adj->extd_local_circuit_id); ND_PRINT((ndo, "\n\t Extended Local circuit-ID: 0x%08x", EXTRACT_32BITS(tlv_ptp_adj->extd_local_circuit_id))); tmp-=sizeof(tlv_ptp_adj->extd_local_circuit_id); } if(tmp>=SYSTEM_ID_LEN) { ND_TCHECK2(tlv_ptp_adj->neighbor_sysid, SYSTEM_ID_LEN); ND_PRINT((ndo, "\n\t Neighbor System-ID: %s", isis_print_id(tlv_ptp_adj->neighbor_sysid, SYSTEM_ID_LEN))); tmp-=SYSTEM_ID_LEN; } if(tmp>=sizeof(tlv_ptp_adj->neighbor_extd_local_circuit_id)) { ND_TCHECK(tlv_ptp_adj->neighbor_extd_local_circuit_id); ND_PRINT((ndo, "\n\t Neighbor Extended Local circuit-ID: 0x%08x", EXTRACT_32BITS(tlv_ptp_adj->neighbor_extd_local_circuit_id))); } break; case ISIS_TLV_PROTOCOLS: ND_PRINT((ndo, "\n\t NLPID(s): ")); while (tmp>0) { ND_TCHECK2(*(tptr), 1); ND_PRINT((ndo, "%s (0x%02x)", tok2str(nlpid_values, "unknown", *tptr), *tptr)); if (tmp>1) /* further NPLIDs ? - put comma */ ND_PRINT((ndo, ", ")); tptr++; tmp--; } break; case ISIS_TLV_MT_PORT_CAP: { ND_TCHECK2(*(tptr), 2); ND_PRINT((ndo, "\n\t RES: %d, MTID(s): %d", (EXTRACT_16BITS (tptr) >> 12), (EXTRACT_16BITS (tptr) & 0x0fff))); tmp = tmp-2; tptr = tptr+2; if (tmp) isis_print_mt_port_cap_subtlv(ndo, tptr, tmp); break; } case ISIS_TLV_MT_CAPABILITY: ND_TCHECK2(*(tptr), 2); ND_PRINT((ndo, "\n\t O: %d, RES: %d, MTID(s): %d", (EXTRACT_16BITS(tptr) >> 15) & 0x01, (EXTRACT_16BITS(tptr) >> 12) & 0x07, EXTRACT_16BITS(tptr) & 0x0fff)); tmp = tmp-2; tptr = tptr+2; if (tmp) isis_print_mt_capability_subtlv(ndo, tptr, tmp); break; case ISIS_TLV_TE_ROUTER_ID: ND_TCHECK2(*pptr, sizeof(struct in_addr)); ND_PRINT((ndo, "\n\t Traffic Engineering Router ID: %s", ipaddr_string(ndo, pptr))); break; case ISIS_TLV_IPADDR: while (tmp>=sizeof(struct in_addr)) { ND_TCHECK2(*tptr, sizeof(struct in_addr)); ND_PRINT((ndo, "\n\t IPv4 interface address: %s", ipaddr_string(ndo, tptr))); tptr += sizeof(struct in_addr); tmp -= sizeof(struct in_addr); } break; case ISIS_TLV_HOSTNAME: ND_PRINT((ndo, "\n\t Hostname: ")); if (fn_printzp(ndo, tptr, tmp, ndo->ndo_snapend)) goto trunctlv; break; case ISIS_TLV_SHARED_RISK_GROUP: if (tmp < NODE_ID_LEN) break; ND_TCHECK2(*tptr, NODE_ID_LEN); ND_PRINT((ndo, "\n\t IS Neighbor: %s", isis_print_id(tptr, NODE_ID_LEN))); tptr+=(NODE_ID_LEN); tmp-=(NODE_ID_LEN); if (tmp < 1) break; ND_TCHECK2(*tptr, 1); ND_PRINT((ndo, ", Flags: [%s]", ISIS_MASK_TLV_SHARED_RISK_GROUP(*tptr++) ? "numbered" : "unnumbered")); tmp--; if (tmp < sizeof(struct in_addr)) break; ND_TCHECK2(*tptr, sizeof(struct in_addr)); ND_PRINT((ndo, "\n\t IPv4 interface address: %s", ipaddr_string(ndo, tptr))); tptr+=sizeof(struct in_addr); tmp-=sizeof(struct in_addr); if (tmp < sizeof(struct in_addr)) break; ND_TCHECK2(*tptr, sizeof(struct in_addr)); ND_PRINT((ndo, "\n\t IPv4 neighbor address: %s", ipaddr_string(ndo, tptr))); tptr+=sizeof(struct in_addr); tmp-=sizeof(struct in_addr); while (tmp>=4) { ND_TCHECK2(*tptr, 4); ND_PRINT((ndo, "\n\t Link-ID: 0x%08x", EXTRACT_32BITS(tptr))); tptr+=4; tmp-=4; } break; case ISIS_TLV_LSP: tlv_lsp = (const struct isis_tlv_lsp *)tptr; while(tmp>=sizeof(struct isis_tlv_lsp)) { ND_TCHECK((tlv_lsp->lsp_id)[LSP_ID_LEN-1]); ND_PRINT((ndo, "\n\t lsp-id: %s", isis_print_id(tlv_lsp->lsp_id, LSP_ID_LEN))); ND_TCHECK2(tlv_lsp->sequence_number, 4); ND_PRINT((ndo, ", seq: 0x%08x", EXTRACT_32BITS(tlv_lsp->sequence_number))); ND_TCHECK2(tlv_lsp->remaining_lifetime, 2); ND_PRINT((ndo, ", lifetime: %5ds", EXTRACT_16BITS(tlv_lsp->remaining_lifetime))); ND_TCHECK2(tlv_lsp->checksum, 2); ND_PRINT((ndo, ", chksum: 0x%04x", EXTRACT_16BITS(tlv_lsp->checksum))); tmp-=sizeof(struct isis_tlv_lsp); tlv_lsp++; } break; case ISIS_TLV_CHECKSUM: if (tmp < ISIS_TLV_CHECKSUM_MINLEN) break; ND_TCHECK2(*tptr, ISIS_TLV_CHECKSUM_MINLEN); ND_PRINT((ndo, "\n\t checksum: 0x%04x ", EXTRACT_16BITS(tptr))); /* do not attempt to verify the checksum if it is zero * most likely a HMAC-MD5 TLV is also present and * to avoid conflicts the checksum TLV is zeroed. * see rfc3358 for details */ osi_print_cksum(ndo, optr, EXTRACT_16BITS(tptr), tptr-optr, length); break; case ISIS_TLV_POI: if (tlv_len >= SYSTEM_ID_LEN + 1) { ND_TCHECK2(*tptr, SYSTEM_ID_LEN + 1); ND_PRINT((ndo, "\n\t Purge Originator System-ID: %s", isis_print_id(tptr + 1, SYSTEM_ID_LEN))); } if (tlv_len == 2 * SYSTEM_ID_LEN + 1) { ND_TCHECK2(*tptr, 2 * SYSTEM_ID_LEN + 1); ND_PRINT((ndo, "\n\t Received from System-ID: %s", isis_print_id(tptr + SYSTEM_ID_LEN + 1, SYSTEM_ID_LEN))); } break; case ISIS_TLV_MT_SUPPORTED: if (tmp < ISIS_TLV_MT_SUPPORTED_MINLEN) break; while (tmp>1) { /* length can only be a multiple of 2, otherwise there is something broken -> so decode down until length is 1 */ if (tmp!=1) { mt_len = isis_print_mtid(ndo, tptr, "\n\t "); if (mt_len == 0) /* did something go wrong ? */ goto trunctlv; tptr+=mt_len; tmp-=mt_len; } else { ND_PRINT((ndo, "\n\t invalid MT-ID")); break; } } break; case ISIS_TLV_RESTART_SIGNALING: /* first attempt to decode the flags */ if (tmp < ISIS_TLV_RESTART_SIGNALING_FLAGLEN) break; ND_TCHECK2(*tptr, ISIS_TLV_RESTART_SIGNALING_FLAGLEN); ND_PRINT((ndo, "\n\t Flags [%s]", bittok2str(isis_restart_flag_values, "none", *tptr))); tptr+=ISIS_TLV_RESTART_SIGNALING_FLAGLEN; tmp-=ISIS_TLV_RESTART_SIGNALING_FLAGLEN; /* is there anything other than the flags field? */ if (tmp == 0) break; if (tmp < ISIS_TLV_RESTART_SIGNALING_HOLDTIMELEN) break; ND_TCHECK2(*tptr, ISIS_TLV_RESTART_SIGNALING_HOLDTIMELEN); ND_PRINT((ndo, ", Remaining holding time %us", EXTRACT_16BITS(tptr))); tptr+=ISIS_TLV_RESTART_SIGNALING_HOLDTIMELEN; tmp-=ISIS_TLV_RESTART_SIGNALING_HOLDTIMELEN; /* is there an additional sysid field present ?*/ if (tmp == SYSTEM_ID_LEN) { ND_TCHECK2(*tptr, SYSTEM_ID_LEN); ND_PRINT((ndo, ", for %s", isis_print_id(tptr,SYSTEM_ID_LEN))); } break; case ISIS_TLV_IDRP_INFO: if (tmp < ISIS_TLV_IDRP_INFO_MINLEN) break; ND_TCHECK2(*tptr, ISIS_TLV_IDRP_INFO_MINLEN); ND_PRINT((ndo, "\n\t Inter-Domain Information Type: %s", tok2str(isis_subtlv_idrp_values, "Unknown (0x%02x)", *tptr))); switch (*tptr++) { case ISIS_SUBTLV_IDRP_ASN: ND_TCHECK2(*tptr, 2); /* fetch AS number */ ND_PRINT((ndo, "AS Number: %u", EXTRACT_16BITS(tptr))); break; case ISIS_SUBTLV_IDRP_LOCAL: case ISIS_SUBTLV_IDRP_RES: default: if (!print_unknown_data(ndo, tptr, "\n\t ", tlv_len - 1)) return(0); break; } break; case ISIS_TLV_LSP_BUFFERSIZE: if (tmp < ISIS_TLV_LSP_BUFFERSIZE_MINLEN) break; ND_TCHECK2(*tptr, ISIS_TLV_LSP_BUFFERSIZE_MINLEN); ND_PRINT((ndo, "\n\t LSP Buffersize: %u", EXTRACT_16BITS(tptr))); break; case ISIS_TLV_PART_DIS: while (tmp >= SYSTEM_ID_LEN) { ND_TCHECK2(*tptr, SYSTEM_ID_LEN); ND_PRINT((ndo, "\n\t %s", isis_print_id(tptr, SYSTEM_ID_LEN))); tptr+=SYSTEM_ID_LEN; tmp-=SYSTEM_ID_LEN; } break; case ISIS_TLV_PREFIX_NEIGH: if (tmp < sizeof(struct isis_metric_block)) break; ND_TCHECK2(*tptr, sizeof(struct isis_metric_block)); ND_PRINT((ndo, "\n\t Metric Block")); isis_print_metric_block(ndo, (const struct isis_metric_block *)tptr); tptr+=sizeof(struct isis_metric_block); tmp-=sizeof(struct isis_metric_block); while(tmp>0) { ND_TCHECK2(*tptr, 1); prefix_len=*tptr++; /* read out prefix length in semioctets*/ if (prefix_len < 2) { ND_PRINT((ndo, "\n\t\tAddress: prefix length %u < 2", prefix_len)); break; } tmp--; if (tmp < prefix_len/2) break; ND_TCHECK2(*tptr, prefix_len / 2); ND_PRINT((ndo, "\n\t\tAddress: %s/%u", isonsap_string(ndo, tptr, prefix_len / 2), prefix_len * 4)); tptr+=prefix_len/2; tmp-=prefix_len/2; } break; case ISIS_TLV_IIH_SEQNR: if (tmp < ISIS_TLV_IIH_SEQNR_MINLEN) break; ND_TCHECK2(*tptr, ISIS_TLV_IIH_SEQNR_MINLEN); /* check if four bytes are on the wire */ ND_PRINT((ndo, "\n\t Sequence number: %u", EXTRACT_32BITS(tptr))); break; case ISIS_TLV_VENDOR_PRIVATE: if (tmp < ISIS_TLV_VENDOR_PRIVATE_MINLEN) break; ND_TCHECK2(*tptr, ISIS_TLV_VENDOR_PRIVATE_MINLEN); /* check if enough byte for a full oui */ vendor_id = EXTRACT_24BITS(tptr); ND_PRINT((ndo, "\n\t Vendor: %s (%u)", tok2str(oui_values, "Unknown", vendor_id), vendor_id)); tptr+=3; tmp-=3; if (tmp > 0) /* hexdump the rest */ if (!print_unknown_data(ndo, tptr, "\n\t\t", tmp)) return(0); break; /* * FIXME those are the defined TLVs that lack a decoder * you are welcome to contribute code ;-) */ case ISIS_TLV_DECNET_PHASE4: case ISIS_TLV_LUCENT_PRIVATE: case ISIS_TLV_IPAUTH: case ISIS_TLV_NORTEL_PRIVATE1: case ISIS_TLV_NORTEL_PRIVATE2: default: if (ndo->ndo_vflag <= 1) { if (!print_unknown_data(ndo, pptr, "\n\t\t", tlv_len)) return(0); } break; } /* do we want to see an additionally hexdump ? */ if (ndo->ndo_vflag> 1) { if (!print_unknown_data(ndo, pptr, "\n\t ", tlv_len)) return(0); } pptr += tlv_len; packet_len -= tlv_len; } if (packet_len != 0) { ND_PRINT((ndo, "\n\t %u straggler bytes", packet_len)); } return (1); trunc: ND_PRINT((ndo, "%s", tstr)); return (1); trunctlv: ND_PRINT((ndo, "\n\t\t")); ND_PRINT((ndo, "%s", tstr)); return(1); } static void osi_print_cksum(netdissect_options *ndo, const uint8_t *pptr, uint16_t checksum, int checksum_offset, u_int length) { uint16_t calculated_checksum; /* do not attempt to verify the checksum if it is zero, * if the offset is nonsense, * or the base pointer is not sane */ if (!checksum || checksum_offset < 0 || !ND_TTEST2(*(pptr + checksum_offset), 2) || (u_int)checksum_offset > length || !ND_TTEST2(*pptr, length)) { ND_PRINT((ndo, " (unverified)")); } else { #if 0 printf("\nosi_print_cksum: %p %u %u\n", pptr, checksum_offset, length); #endif calculated_checksum = create_osi_cksum(pptr, checksum_offset, length); if (checksum == calculated_checksum) { ND_PRINT((ndo, " (correct)")); } else { ND_PRINT((ndo, " (incorrect should be 0x%04x)", calculated_checksum)); } } } /* * Local Variables: * c-style: whitesmith * c-basic-offset: 8 * End: */
esis_print(netdissect_options *ndo, const uint8_t *pptr, u_int length) { const uint8_t *optr; u_int li,esis_pdu_type,source_address_length, source_address_number; const struct esis_header_t *esis_header; if (!ndo->ndo_eflag) ND_PRINT((ndo, "ES-IS")); if (length <= 2) { ND_PRINT((ndo, ndo->ndo_qflag ? "bad pkt!" : "no header at all!")); return; } esis_header = (const struct esis_header_t *) pptr; ND_TCHECK(*esis_header); li = esis_header->length_indicator; optr = pptr; /* * Sanity checking of the header. */ if (esis_header->nlpid != NLPID_ESIS) { ND_PRINT((ndo, " nlpid 0x%02x packet not supported", esis_header->nlpid)); return; } if (esis_header->version != ESIS_VERSION) { ND_PRINT((ndo, " version %d packet not supported", esis_header->version)); return; } if (li > length) { ND_PRINT((ndo, " length indicator(%u) > PDU size (%u)!", li, length)); return; } if (li < sizeof(struct esis_header_t) + 2) { ND_PRINT((ndo, " length indicator %u < min PDU size:", li)); while (pptr < ndo->ndo_snapend) ND_PRINT((ndo, "%02X", *pptr++)); return; } esis_pdu_type = esis_header->type & ESIS_PDU_TYPE_MASK; if (ndo->ndo_vflag < 1) { ND_PRINT((ndo, "%s%s, length %u", ndo->ndo_eflag ? "" : ", ", tok2str(esis_pdu_values,"unknown type (%u)",esis_pdu_type), length)); return; } else ND_PRINT((ndo, "%slength %u\n\t%s (%u)", ndo->ndo_eflag ? "" : ", ", length, tok2str(esis_pdu_values,"unknown type: %u", esis_pdu_type), esis_pdu_type)); ND_PRINT((ndo, ", v: %u%s", esis_header->version, esis_header->version == ESIS_VERSION ? "" : "unsupported" )); ND_PRINT((ndo, ", checksum: 0x%04x", EXTRACT_16BITS(esis_header->cksum))); osi_print_cksum(ndo, pptr, EXTRACT_16BITS(esis_header->cksum), 7, li); ND_PRINT((ndo, ", holding time: %us, length indicator: %u", EXTRACT_16BITS(esis_header->holdtime), li)); if (ndo->ndo_vflag > 1) print_unknown_data(ndo, optr, "\n\t", sizeof(struct esis_header_t)); pptr += sizeof(struct esis_header_t); li -= sizeof(struct esis_header_t); switch (esis_pdu_type) { case ESIS_PDU_REDIRECT: { const uint8_t *dst, *snpa, *neta; u_int dstl, snpal, netal; ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad redirect/li")); return; } dstl = *pptr; pptr++; li--; ND_TCHECK2(*pptr, dstl); if (li < dstl) { ND_PRINT((ndo, ", bad redirect/li")); return; } dst = pptr; pptr += dstl; li -= dstl; ND_PRINT((ndo, "\n\t %s", isonsap_string(ndo, dst, dstl))); ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad redirect/li")); return; } snpal = *pptr; pptr++; li--; ND_TCHECK2(*pptr, snpal); if (li < snpal) { ND_PRINT((ndo, ", bad redirect/li")); return; } snpa = pptr; pptr += snpal; li -= snpal; ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad redirect/li")); return; } netal = *pptr; pptr++; ND_TCHECK2(*pptr, netal); if (li < netal) { ND_PRINT((ndo, ", bad redirect/li")); return; } neta = pptr; pptr += netal; li -= netal; if (netal == 0) ND_PRINT((ndo, "\n\t %s", etheraddr_string(ndo, snpa))); else ND_PRINT((ndo, "\n\t %s", isonsap_string(ndo, neta, netal))); break; } case ESIS_PDU_ESH: ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad esh/li")); return; } source_address_number = *pptr; pptr++; li--; ND_PRINT((ndo, "\n\t Number of Source Addresses: %u", source_address_number)); while (source_address_number > 0) { ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad esh/li")); return; } source_address_length = *pptr; pptr++; li--; ND_TCHECK2(*pptr, source_address_length); if (li < source_address_length) { ND_PRINT((ndo, ", bad esh/li")); return; } ND_PRINT((ndo, "\n\t NET (length: %u): %s", source_address_length, isonsap_string(ndo, pptr, source_address_length))); pptr += source_address_length; li -= source_address_length; source_address_number--; } break; case ESIS_PDU_ISH: { ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad ish/li")); return; } source_address_length = *pptr; pptr++; li--; ND_TCHECK2(*pptr, source_address_length); if (li < source_address_length) { ND_PRINT((ndo, ", bad ish/li")); return; } ND_PRINT((ndo, "\n\t NET (length: %u): %s", source_address_length, isonsap_string(ndo, pptr, source_address_length))); pptr += source_address_length; li -= source_address_length; break; } default: if (ndo->ndo_vflag <= 1) { if (pptr < ndo->ndo_snapend) print_unknown_data(ndo, pptr, "\n\t ", ndo->ndo_snapend - pptr); } return; } /* now walk the options */ while (li != 0) { u_int op, opli; const uint8_t *tptr; if (li < 2) { ND_PRINT((ndo, ", bad opts/li")); return; } ND_TCHECK2(*pptr, 2); op = *pptr++; opli = *pptr++; li -= 2; if (opli > li) { ND_PRINT((ndo, ", opt (%d) too long", op)); return; } li -= opli; tptr = pptr; ND_PRINT((ndo, "\n\t %s Option #%u, length %u, value: ", tok2str(esis_option_values,"Unknown",op), op, opli)); switch (op) { case ESIS_OPTION_ES_CONF_TIME: if (opli == 2) { ND_TCHECK2(*pptr, 2); ND_PRINT((ndo, "%us", EXTRACT_16BITS(tptr))); } else ND_PRINT((ndo, "(bad length)")); break; case ESIS_OPTION_PROTOCOLS: while (opli>0) { ND_TCHECK(*pptr); ND_PRINT((ndo, "%s (0x%02x)", tok2str(nlpid_values, "unknown", *tptr), *tptr)); if (opli>1) /* further NPLIDs ? - put comma */ ND_PRINT((ndo, ", ")); tptr++; opli--; } break; /* * FIXME those are the defined Options that lack a decoder * you are welcome to contribute code ;-) */ case ESIS_OPTION_QOS_MAINTENANCE: case ESIS_OPTION_SECURITY: case ESIS_OPTION_PRIORITY: case ESIS_OPTION_ADDRESS_MASK: case ESIS_OPTION_SNPA_MASK: default: print_unknown_data(ndo, tptr, "\n\t ", opli); break; } if (ndo->ndo_vflag > 1) print_unknown_data(ndo, pptr, "\n\t ", opli); pptr += opli; } trunc: return; }
esis_print(netdissect_options *ndo, const uint8_t *pptr, u_int length) { const uint8_t *optr; u_int li,esis_pdu_type,source_address_length, source_address_number; const struct esis_header_t *esis_header; if (!ndo->ndo_eflag) ND_PRINT((ndo, "ES-IS")); if (length <= 2) { ND_PRINT((ndo, ndo->ndo_qflag ? "bad pkt!" : "no header at all!")); return; } esis_header = (const struct esis_header_t *) pptr; ND_TCHECK(*esis_header); li = esis_header->length_indicator; optr = pptr; /* * Sanity checking of the header. */ if (esis_header->nlpid != NLPID_ESIS) { ND_PRINT((ndo, " nlpid 0x%02x packet not supported", esis_header->nlpid)); return; } if (esis_header->version != ESIS_VERSION) { ND_PRINT((ndo, " version %d packet not supported", esis_header->version)); return; } if (li > length) { ND_PRINT((ndo, " length indicator(%u) > PDU size (%u)!", li, length)); return; } if (li < sizeof(struct esis_header_t) + 2) { ND_PRINT((ndo, " length indicator %u < min PDU size:", li)); while (pptr < ndo->ndo_snapend) ND_PRINT((ndo, "%02X", *pptr++)); return; } esis_pdu_type = esis_header->type & ESIS_PDU_TYPE_MASK; if (ndo->ndo_vflag < 1) { ND_PRINT((ndo, "%s%s, length %u", ndo->ndo_eflag ? "" : ", ", tok2str(esis_pdu_values,"unknown type (%u)",esis_pdu_type), length)); return; } else ND_PRINT((ndo, "%slength %u\n\t%s (%u)", ndo->ndo_eflag ? "" : ", ", length, tok2str(esis_pdu_values,"unknown type: %u", esis_pdu_type), esis_pdu_type)); ND_PRINT((ndo, ", v: %u%s", esis_header->version, esis_header->version == ESIS_VERSION ? "" : "unsupported" )); ND_PRINT((ndo, ", checksum: 0x%04x", EXTRACT_16BITS(esis_header->cksum))); osi_print_cksum(ndo, pptr, EXTRACT_16BITS(esis_header->cksum), 7, li); ND_PRINT((ndo, ", holding time: %us, length indicator: %u", EXTRACT_16BITS(esis_header->holdtime), li)); if (ndo->ndo_vflag > 1) print_unknown_data(ndo, optr, "\n\t", sizeof(struct esis_header_t)); pptr += sizeof(struct esis_header_t); li -= sizeof(struct esis_header_t); switch (esis_pdu_type) { case ESIS_PDU_REDIRECT: { const uint8_t *dst, *snpa, *neta; u_int dstl, snpal, netal; ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad redirect/li")); return; } dstl = *pptr; pptr++; li--; ND_TCHECK2(*pptr, dstl); if (li < dstl) { ND_PRINT((ndo, ", bad redirect/li")); return; } dst = pptr; pptr += dstl; li -= dstl; ND_PRINT((ndo, "\n\t %s", isonsap_string(ndo, dst, dstl))); ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad redirect/li")); return; } snpal = *pptr; pptr++; li--; ND_TCHECK2(*pptr, snpal); if (li < snpal) { ND_PRINT((ndo, ", bad redirect/li")); return; } snpa = pptr; pptr += snpal; li -= snpal; ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad redirect/li")); return; } netal = *pptr; pptr++; ND_TCHECK2(*pptr, netal); if (li < netal) { ND_PRINT((ndo, ", bad redirect/li")); return; } neta = pptr; pptr += netal; li -= netal; if (snpal == 6) ND_PRINT((ndo, "\n\t SNPA (length: %u): %s", snpal, etheraddr_string(ndo, snpa))); else ND_PRINT((ndo, "\n\t SNPA (length: %u): %s", snpal, linkaddr_string(ndo, snpa, LINKADDR_OTHER, snpal))); if (netal != 0) ND_PRINT((ndo, "\n\t NET (length: %u) %s", netal, isonsap_string(ndo, neta, netal))); break; } case ESIS_PDU_ESH: ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad esh/li")); return; } source_address_number = *pptr; pptr++; li--; ND_PRINT((ndo, "\n\t Number of Source Addresses: %u", source_address_number)); while (source_address_number > 0) { ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad esh/li")); return; } source_address_length = *pptr; pptr++; li--; ND_TCHECK2(*pptr, source_address_length); if (li < source_address_length) { ND_PRINT((ndo, ", bad esh/li")); return; } ND_PRINT((ndo, "\n\t NET (length: %u): %s", source_address_length, isonsap_string(ndo, pptr, source_address_length))); pptr += source_address_length; li -= source_address_length; source_address_number--; } break; case ESIS_PDU_ISH: { ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad ish/li")); return; } source_address_length = *pptr; pptr++; li--; ND_TCHECK2(*pptr, source_address_length); if (li < source_address_length) { ND_PRINT((ndo, ", bad ish/li")); return; } ND_PRINT((ndo, "\n\t NET (length: %u): %s", source_address_length, isonsap_string(ndo, pptr, source_address_length))); pptr += source_address_length; li -= source_address_length; break; } default: if (ndo->ndo_vflag <= 1) { if (pptr < ndo->ndo_snapend) print_unknown_data(ndo, pptr, "\n\t ", ndo->ndo_snapend - pptr); } return; } /* now walk the options */ while (li != 0) { u_int op, opli; const uint8_t *tptr; if (li < 2) { ND_PRINT((ndo, ", bad opts/li")); return; } ND_TCHECK2(*pptr, 2); op = *pptr++; opli = *pptr++; li -= 2; if (opli > li) { ND_PRINT((ndo, ", opt (%d) too long", op)); return; } li -= opli; tptr = pptr; ND_PRINT((ndo, "\n\t %s Option #%u, length %u, value: ", tok2str(esis_option_values,"Unknown",op), op, opli)); switch (op) { case ESIS_OPTION_ES_CONF_TIME: if (opli == 2) { ND_TCHECK2(*pptr, 2); ND_PRINT((ndo, "%us", EXTRACT_16BITS(tptr))); } else ND_PRINT((ndo, "(bad length)")); break; case ESIS_OPTION_PROTOCOLS: while (opli>0) { ND_TCHECK(*pptr); ND_PRINT((ndo, "%s (0x%02x)", tok2str(nlpid_values, "unknown", *tptr), *tptr)); if (opli>1) /* further NPLIDs ? - put comma */ ND_PRINT((ndo, ", ")); tptr++; opli--; } break; /* * FIXME those are the defined Options that lack a decoder * you are welcome to contribute code ;-) */ case ESIS_OPTION_QOS_MAINTENANCE: case ESIS_OPTION_SECURITY: case ESIS_OPTION_PRIORITY: case ESIS_OPTION_ADDRESS_MASK: case ESIS_OPTION_SNPA_MASK: default: print_unknown_data(ndo, tptr, "\n\t ", opli); break; } if (ndo->ndo_vflag > 1) print_unknown_data(ndo, pptr, "\n\t ", opli); pptr += opli; } trunc: return; }
{'added': [(1220, '\t\tif (snpal == 6)'), (1221, '\t\t\tND_PRINT((ndo, "\\n\\t SNPA (length: %u): %s",'), (1222, '\t\t\t snpal,'), (1223, '\t\t\t etheraddr_string(ndo, snpa)));'), (1225, '\t\t\tND_PRINT((ndo, "\\n\\t SNPA (length: %u): %s",'), (1226, '\t\t\t snpal,'), (1227, '\t\t\t linkaddr_string(ndo, snpa, LINKADDR_OTHER, snpal)));'), (1228, '\t\tif (netal != 0)'), (1229, '\t\t\tND_PRINT((ndo, "\\n\\t NET (length: %u) %s",'), (1230, '\t\t\t netal,'), (1231, '\t\t\t isonsap_string(ndo, neta, netal)));')], 'deleted': [(1220, '\t\tif (netal == 0)'), (1221, '\t\t\tND_PRINT((ndo, "\\n\\t %s", etheraddr_string(ndo, snpa)));'), (1223, '\t\t\tND_PRINT((ndo, "\\n\\t %s", isonsap_string(ndo, neta, netal)));')]}
11
3
2,411
14,646
https://github.com/the-tcpdump-group/tcpdump
CVE-2017-13016
['CWE-125']
mpegts.c
gf_m2ts_process_pat
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre * Copyright (c) Telecom ParisTech 2005-2012 * * This file is part of GPAC / MPEG2-TS sub-project * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <gpac/mpegts.h> #ifndef GPAC_DISABLE_MPEG2TS #include <string.h> #include <gpac/constants.h> #include <gpac/internal/media_dev.h> #include <gpac/download.h> #ifndef GPAC_DISABLE_STREAMING #include <gpac/internal/ietf_dev.h> #endif #ifdef GPAC_CONFIG_LINUX #include <unistd.h> #endif #ifdef GPAC_ENABLE_MPE #include <gpac/dvb_mpe.h> #endif #ifdef GPAC_ENABLE_DSMCC #include <gpac/ait.h> #endif #define DEBUG_TS_PACKET 0 GF_EXPORT const char *gf_m2ts_get_stream_name(u32 streamType) { switch (streamType) { case GF_M2TS_VIDEO_MPEG1: return "MPEG-1 Video"; case GF_M2TS_VIDEO_MPEG2: return "MPEG-2 Video"; case GF_M2TS_AUDIO_MPEG1: return "MPEG-1 Audio"; case GF_M2TS_AUDIO_MPEG2: return "MPEG-2 Audio"; case GF_M2TS_PRIVATE_SECTION: return "Private Section"; case GF_M2TS_PRIVATE_DATA: return "Private Data"; case GF_M2TS_AUDIO_AAC: return "AAC Audio"; case GF_M2TS_VIDEO_MPEG4: return "MPEG-4 Video"; case GF_M2TS_VIDEO_H264: return "MPEG-4/H264 Video"; case GF_M2TS_VIDEO_SVC: return "H264-SVC Video"; case GF_M2TS_VIDEO_HEVC: return "HEVC Video"; case GF_M2TS_VIDEO_SHVC: return "SHVC Video"; case GF_M2TS_VIDEO_SHVC_TEMPORAL: return "SHVC Video Temporal Sublayer"; case GF_M2TS_VIDEO_MHVC: return "MHVC Video"; case GF_M2TS_VIDEO_MHVC_TEMPORAL: return "MHVC Video Temporal Sublayer"; case GF_M2TS_AUDIO_AC3: return "Dolby AC3 Audio"; case GF_M2TS_AUDIO_DTS: return "Dolby DTS Audio"; case GF_M2TS_SUBTITLE_DVB: return "DVB Subtitle"; case GF_M2TS_SYSTEMS_MPEG4_PES: return "MPEG-4 SL (PES)"; case GF_M2TS_SYSTEMS_MPEG4_SECTIONS: return "MPEG-4 SL (Section)"; case GF_M2TS_MPE_SECTIONS: return "MPE (Section)"; case GF_M2TS_METADATA_PES: return "Metadata (PES)"; case GF_M2TS_METADATA_ID3_HLS: return "ID3/HLS Metadata (PES)"; default: return "Unknown"; } } static u32 gf_m2ts_reframe_default(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, Bool same_pts, unsigned char *data, u32 data_len, GF_M2TS_PESHeader *pes_hdr) { GF_M2TS_PES_PCK pck; pck.flags = 0; if (pes->rap) pck.flags |= GF_M2TS_PES_PCK_RAP; if (!same_pts) pck.flags |= GF_M2TS_PES_PCK_AU_START; pck.DTS = pes->DTS; pck.PTS = pes->PTS; pck.data = (char *)data; pck.data_len = data_len; pck.stream = pes; ts->on_event(ts, GF_M2TS_EVT_PES_PCK, &pck); /*we consumed all data*/ return 0; } static u32 gf_m2ts_reframe_reset(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, Bool same_pts, unsigned char *data, u32 data_len, GF_M2TS_PESHeader *pes_hdr) { if (pes->pck_data) { gf_free(pes->pck_data); pes->pck_data = NULL; } pes->pck_data_len = pes->pck_alloc_len = 0; if (pes->prev_data) { gf_free(pes->prev_data); pes->prev_data = NULL; } pes->prev_data_len = 0; pes->pes_len = 0; pes->prev_PTS = 0; pes->reframe = NULL; pes->cc = -1; pes->temi_tc_desc_len = 0; return 0; } static void add_text(char **buffer, u32 *size, u32 *pos, char *msg, u32 msg_len) { if (!msg || !buffer) return; if (*pos+msg_len>*size) { *size = *pos+msg_len-*size+256; *buffer = (char *)gf_realloc(*buffer, *size); } strncpy((*buffer)+(*pos), msg, msg_len); *pos += msg_len; } static GF_Err id3_parse_tag(char *data, u32 length, char **output, u32 *output_size, u32 *output_pos) { GF_BitStream *bs; u32 pos; if ((data[0] != 'I') || (data[1] != 'D') || (data[2] != '3')) return GF_NOT_SUPPORTED; bs = gf_bs_new(data, length, GF_BITSTREAM_READ); gf_bs_skip_bytes(bs, 3); /*u8 major = */gf_bs_read_u8(bs); /*u8 minor = */gf_bs_read_u8(bs); /*u8 unsync = */gf_bs_read_int(bs, 1); /*u8 ext_hdr = */ gf_bs_read_int(bs, 1); gf_bs_read_int(bs, 6); u32 size = gf_id3_read_size(bs); pos = (u32) gf_bs_get_position(bs); if (size != length-pos) size = length-pos; while (size && (gf_bs_available(bs)>=10) ) { u32 ftag = gf_bs_read_u32(bs); u32 fsize = gf_id3_read_size(bs); /*u16 fflags = */gf_bs_read_u16(bs); size -= 10; //TODO, handle more ID3 tags ? if (ftag==ID3V2_FRAME_TXXX) { u32 pos = (u32) gf_bs_get_position(bs); char *text = data+pos; add_text(output, output_size, output_pos, text, fsize); } else { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] ID3 tag not handled, patch welcome\n", gf_4cc_to_str(ftag) ) ); } gf_bs_skip_bytes(bs, fsize); } gf_bs_del(bs); return GF_OK; } static u32 gf_m2ts_reframe_id3_pes(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, Bool same_pts, unsigned char *data, u32 data_len, GF_M2TS_PESHeader *pes_hdr) { char frame_header[256]; char *output_text = NULL; u32 output_len = 0; u32 pos = 0; GF_M2TS_PES_PCK pck; pck.flags = 0; if (pes->rap) pck.flags |= GF_M2TS_PES_PCK_RAP; if (!same_pts) pck.flags |= GF_M2TS_PES_PCK_AU_START; pck.DTS = pes->DTS; pck.PTS = pes->PTS; sprintf(frame_header, LLU" --> NEXT\n", pes->PTS); add_text(&output_text, &output_len, &pos, frame_header, (u32)strlen(frame_header)); id3_parse_tag((char *)data, data_len, &output_text, &output_len, &pos); add_text(&output_text, &output_len, &pos, "\n\n", 2); pck.data = (char *)output_text; pck.data_len = pos; pck.stream = pes; ts->on_event(ts, GF_M2TS_EVT_PES_PCK, &pck); gf_free(output_text); /*we consumed all data*/ return 0; } static u32 gf_m2ts_sync(GF_M2TS_Demuxer *ts, char *data, u32 size, Bool simple_check) { u32 i=0; /*if first byte is sync assume we're sync*/ if (simple_check && (data[i]==0x47)) return 0; while (i < size) { if (i+192 >= size) return size; if ((data[i]==0x47) && (data[i+188]==0x47)) break; if (i+192 >= size) return size; if ((data[i]==0x47) && (data[i+192]==0x47)) { ts->prefix_present = 1; break; } i++; } if (i) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] re-sync skipped %d bytes\n", i) ); } return i; } GF_EXPORT Bool gf_m2ts_crc32_check(u8 *data, u32 len) { u32 crc = gf_crc_32(data, len); u32 crc_val = GF_4CC((u8) data[len], (u8) data[len+1], (u8) data[len+2], (u8) data[len+3]); return (crc==crc_val) ? GF_TRUE : GF_FALSE; } static GF_M2TS_SectionFilter *gf_m2ts_section_filter_new(gf_m2ts_section_callback process_section_callback, Bool process_individual) { GF_M2TS_SectionFilter *sec; GF_SAFEALLOC(sec, GF_M2TS_SectionFilter); if (!sec) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] gf_m2ts_section_filter_new : OUT OF MEMORY\n")); return NULL; } sec->cc = -1; sec->process_section = process_section_callback; sec->process_individual = process_individual; return sec; } static void gf_m2ts_reset_sections(GF_List *sections) { u32 count; GF_M2TS_Section *section; //GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Deleting sections\n")); count = gf_list_count(sections); while (count) { section = gf_list_get(sections, 0); gf_list_rem(sections, 0); if (section->data) gf_free(section->data); gf_free(section); count--; } } static void gf_m2ts_section_filter_reset(GF_M2TS_SectionFilter *sf) { if (sf->section) { gf_free(sf->section); sf->section = NULL; } while (sf->table) { GF_M2TS_Table *t = sf->table; sf->table = t->next; gf_m2ts_reset_sections(t->sections); gf_list_del(t->sections); gf_free(t); } sf->cc = -1; sf->length = sf->received = 0; sf->demux_restarted = 1; } static void gf_m2ts_section_filter_del(GF_M2TS_SectionFilter *sf) { gf_m2ts_section_filter_reset(sf); gf_free(sf); } static void gf_m2ts_metadata_descriptor_del(GF_M2TS_MetadataDescriptor *metad) { if (metad) { if (metad->service_id_record) gf_free(metad->service_id_record); if (metad->decoder_config) gf_free(metad->decoder_config); if (metad->decoder_config_id) gf_free(metad->decoder_config_id); gf_free(metad); } } GF_EXPORT void gf_m2ts_es_del(GF_M2TS_ES *es, GF_M2TS_Demuxer *ts) { gf_list_del_item(es->program->streams, es); if (es->flags & GF_M2TS_ES_IS_SECTION) { GF_M2TS_SECTION_ES *ses = (GF_M2TS_SECTION_ES *)es; if (ses->sec) gf_m2ts_section_filter_del(ses->sec); #ifdef GPAC_ENABLE_MPE if (es->flags & GF_M2TS_ES_IS_MPE) gf_dvb_mpe_section_del(es); #endif } else if (es->pid!=es->program->pmt_pid) { GF_M2TS_PES *pes = (GF_M2TS_PES *)es; if ((pes->flags & GF_M2TS_INHERIT_PCR) && ts->ess[es->program->pcr_pid]==es) ts->ess[es->program->pcr_pid] = NULL; if (pes->pck_data) gf_free(pes->pck_data); if (pes->prev_data) gf_free(pes->prev_data); if (pes->buf) gf_free(pes->buf); if (pes->reassemble_buf) gf_free(pes->reassemble_buf); if (pes->temi_tc_desc) gf_free(pes->temi_tc_desc); if (pes->metadata_descriptor) gf_m2ts_metadata_descriptor_del(pes->metadata_descriptor); } if (es->slcfg) gf_free(es->slcfg); gf_free(es); } static void gf_m2ts_reset_sdt(GF_M2TS_Demuxer *ts) { while (gf_list_count(ts->SDTs)) { GF_M2TS_SDT *sdt = (GF_M2TS_SDT *)gf_list_last(ts->SDTs); gf_list_rem_last(ts->SDTs); if (sdt->provider) gf_free(sdt->provider); if (sdt->service) gf_free(sdt->service); gf_free(sdt); } } GF_EXPORT GF_M2TS_SDT *gf_m2ts_get_sdt_info(GF_M2TS_Demuxer *ts, u32 program_id) { u32 i; for (i=0; i<gf_list_count(ts->SDTs); i++) { GF_M2TS_SDT *sdt = (GF_M2TS_SDT *)gf_list_get(ts->SDTs, i); if (sdt->service_id==program_id) return sdt; } return NULL; } static void gf_m2ts_section_complete(GF_M2TS_Demuxer *ts, GF_M2TS_SectionFilter *sec, GF_M2TS_SECTION_ES *ses) { //seek mode, only process PAT and PMT if (ts->seek_mode && (sec->section[0] != GF_M2TS_TABLE_ID_PAT) && (sec->section[0] != GF_M2TS_TABLE_ID_PMT)) { /*clean-up (including broken sections)*/ if (sec->section) gf_free(sec->section); sec->section = NULL; sec->length = sec->received = 0; return; } if (!sec->process_section) { if ((ts->on_event && (sec->section[0]==GF_M2TS_TABLE_ID_AIT)) ) { #ifdef GPAC_ENABLE_DSMCC GF_M2TS_SL_PCK pck; pck.data_len = sec->length; pck.data = sec->section; pck.stream = (GF_M2TS_ES *)ses; //ts->on_event(ts, GF_M2TS_EVT_AIT_FOUND, &pck); on_ait_section(ts, GF_M2TS_EVT_AIT_FOUND, &pck); #endif } else if ((ts->on_event && (sec->section[0]==GF_M2TS_TABLE_ID_DSM_CC_ENCAPSULATED_DATA || sec->section[0]==GF_M2TS_TABLE_ID_DSM_CC_UN_MESSAGE || sec->section[0]==GF_M2TS_TABLE_ID_DSM_CC_DOWNLOAD_DATA_MESSAGE || sec->section[0]==GF_M2TS_TABLE_ID_DSM_CC_STREAM_DESCRIPTION || sec->section[0]==GF_M2TS_TABLE_ID_DSM_CC_PRIVATE)) ) { #ifdef GPAC_ENABLE_DSMCC GF_M2TS_SL_PCK pck; pck.data_len = sec->length; pck.data = sec->section; pck.stream = (GF_M2TS_ES *)ses; on_dsmcc_section(ts,GF_M2TS_EVT_DSMCC_FOUND,&pck); //ts->on_event(ts, GF_M2TS_EVT_DSMCC_FOUND, &pck); #endif } #ifdef GPAC_ENABLE_MPE else if (ts->on_mpe_event && ((ses && (ses->flags & GF_M2TS_EVT_DVB_MPE)) || (sec->section[0]==GF_M2TS_TABLE_ID_INT)) ) { GF_M2TS_SL_PCK pck; pck.data_len = sec->length; pck.data = sec->section; pck.stream = (GF_M2TS_ES *)ses; ts->on_mpe_event(ts, GF_M2TS_EVT_DVB_MPE, &pck); } #endif else if (ts->on_event) { GF_M2TS_SL_PCK pck; pck.data_len = sec->length; pck.data = sec->section; pck.stream = (GF_M2TS_ES *)ses; ts->on_event(ts, GF_M2TS_EVT_DVB_GENERAL, &pck); } } else { Bool has_syntax_indicator; u8 table_id; u16 extended_table_id; u32 status, section_start, i; GF_M2TS_Table *t, *prev_t; unsigned char *data; Bool section_valid = 0; status = 0; /*parse header*/ data = (u8 *)sec->section; /*look for proper table*/ table_id = data[0]; if (ts->on_event) { switch (table_id) { case GF_M2TS_TABLE_ID_PAT: case GF_M2TS_TABLE_ID_SDT_ACTUAL: case GF_M2TS_TABLE_ID_PMT: case GF_M2TS_TABLE_ID_NIT_ACTUAL: case GF_M2TS_TABLE_ID_TDT: case GF_M2TS_TABLE_ID_TOT: { GF_M2TS_SL_PCK pck; pck.data_len = sec->length; pck.data = sec->section; pck.stream = (GF_M2TS_ES *)ses; ts->on_event(ts, GF_M2TS_EVT_DVB_GENERAL, &pck); } } } has_syntax_indicator = (data[1] & 0x80) ? 1 : 0; if (has_syntax_indicator) { extended_table_id = (data[3]<<8) | data[4]; } else { extended_table_id = 0; } prev_t = NULL; t = sec->table; while (t) { if ((t->table_id==table_id) && (t->ex_table_id == extended_table_id)) break; prev_t = t; t = t->next; } /*create table*/ if (!t) { GF_SAFEALLOC(t, GF_M2TS_Table); if (!t) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Fail to alloc table %d %d\n", table_id, extended_table_id)); return; } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Creating table %d %d\n", table_id, extended_table_id)); t->table_id = table_id; t->ex_table_id = extended_table_id; t->last_version_number = 0xFF; t->sections = gf_list_new(); if (prev_t) prev_t->next = t; else sec->table = t; } if (has_syntax_indicator) { if (sec->length < 4) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] corrupted section length %d less than CRC \n", sec->length)); } else { /*remove crc32*/ sec->length -= 4; if (gf_m2ts_crc32_check((char *)data, sec->length)) { s32 cur_sec_num; t->version_number = (data[5] >> 1) & 0x1f; if (t->last_section_number && t->section_number && (t->version_number != t->last_version_number)) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] table transmission interrupted: previous table (v=%d) %d/%d sections - new table (v=%d) %d/%d sections\n", t->last_version_number, t->section_number, t->last_section_number, t->version_number, data[6] + 1, data[7] + 1) ); gf_m2ts_reset_sections(t->sections); t->section_number = 0; } t->current_next_indicator = (data[5] & 0x1) ? 1 : 0; /*add one to section numbers to detect if we missed or not the first section in the table*/ cur_sec_num = data[6] + 1; t->last_section_number = data[7] + 1; section_start = 8; /*we missed something*/ if (!sec->process_individual && t->section_number + 1 != cur_sec_num) { /* TODO - Check how to handle sections when the first complete section does not have its sec num 0 */ section_valid = 0; if (t->is_init) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] corrupted table (lost section %d)\n", cur_sec_num ? cur_sec_num-1 : 31) ); } } else { section_valid = 1; t->section_number = cur_sec_num; } } else { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] corrupted section (CRC32 failed)\n")); } } } else { section_valid = 1; section_start = 3; } /*process section*/ if (section_valid) { GF_M2TS_Section *section; GF_SAFEALLOC(section, GF_M2TS_Section); if (!section) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Fail to create section\n")); return; } section->data_size = sec->length - section_start; section->data = (unsigned char*)gf_malloc(sizeof(unsigned char)*section->data_size); memcpy(section->data, sec->section + section_start, sizeof(unsigned char)*section->data_size); gf_list_add(t->sections, section); if (t->section_number == 1) { status |= GF_M2TS_TABLE_START; if (t->last_version_number == t->version_number) { t->is_repeat = 1; } else { t->is_repeat = 0; } /*only update version number in the first section of the table*/ t->last_version_number = t->version_number; } if (t->is_init) { if (t->is_repeat) { status |= GF_M2TS_TABLE_REPEAT; } else { status |= GF_M2TS_TABLE_UPDATE; } } else { status |= GF_M2TS_TABLE_FOUND; } if (t->last_section_number == t->section_number) { u32 table_size; status |= GF_M2TS_TABLE_END; table_size = 0; for (i=0; i<gf_list_count(t->sections); i++) { GF_M2TS_Section *section = gf_list_get(t->sections, i); table_size += section->data_size; } if (t->is_repeat) { if (t->table_size != table_size) { status |= GF_M2TS_TABLE_UPDATE; GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] Repeated section found with different sizes (old table %d bytes, new table %d bytes)\n", t->table_size, table_size) ); t->table_size = table_size; } } else { t->table_size = table_size; } t->is_init = 1; /*reset section number*/ t->section_number = 0; t->is_repeat = 0; } if (sec->process_individual) { /*send each section of the table and not the aggregated table*/ if (sec->process_section) sec->process_section(ts, ses, t->sections, t->table_id, t->ex_table_id, t->version_number, (u8) (t->last_section_number - 1), status); gf_m2ts_reset_sections(t->sections); } else { if (status&GF_M2TS_TABLE_END) { if (sec->process_section) sec->process_section(ts, ses, t->sections, t->table_id, t->ex_table_id, t->version_number, (u8) (t->last_section_number - 1), status); gf_m2ts_reset_sections(t->sections); } } } else { sec->cc = -1; t->section_number = 0; } } /*clean-up (including broken sections)*/ if (sec->section) gf_free(sec->section); sec->section = NULL; sec->length = sec->received = 0; } static Bool gf_m2ts_is_long_section(u8 table_id) { switch (table_id) { case GF_M2TS_TABLE_ID_MPEG4_BIFS: case GF_M2TS_TABLE_ID_MPEG4_OD: case GF_M2TS_TABLE_ID_INT: case GF_M2TS_TABLE_ID_EIT_ACTUAL_PF: case GF_M2TS_TABLE_ID_EIT_OTHER_PF: case GF_M2TS_TABLE_ID_ST: case GF_M2TS_TABLE_ID_SIT: case GF_M2TS_TABLE_ID_DSM_CC_PRIVATE: case GF_M2TS_TABLE_ID_MPE_FEC: case GF_M2TS_TABLE_ID_DSM_CC_DOWNLOAD_DATA_MESSAGE: case GF_M2TS_TABLE_ID_DSM_CC_UN_MESSAGE: return 1; default: if (table_id >= GF_M2TS_TABLE_ID_EIT_SCHEDULE_MIN && table_id <= GF_M2TS_TABLE_ID_EIT_SCHEDULE_MAX) return 1; else return 0; } } static u32 gf_m2ts_get_section_length(char byte0, char byte1, char byte2) { u32 length; if (gf_m2ts_is_long_section(byte0)) { length = 3 + ( ((((u32)byte1)<<8) | (byte2&0xff)) & 0xfff ); } else { length = 3 + ( ((((u32)byte1)<<8) | (byte2&0xff)) & 0x3ff ); } return length; } static void gf_m2ts_gather_section(GF_M2TS_Demuxer *ts, GF_M2TS_SectionFilter *sec, GF_M2TS_SECTION_ES *ses, GF_M2TS_Header *hdr, unsigned char *data, u32 data_size) { u32 payload_size = data_size; u8 expect_cc = (sec->cc<0) ? hdr->continuity_counter : (sec->cc + 1) & 0xf; Bool disc = (expect_cc == hdr->continuity_counter) ? 0 : 1; sec->cc = expect_cc; /*may happen if hdr->adaptation_field=2 no payload in TS packet*/ if (!data_size) return; if (hdr->payload_start) { u32 ptr_field; ptr_field = data[0]; if (ptr_field+1>data_size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Invalid section start (@ptr_field=%d, @data_size=%d)\n", ptr_field, data_size) ); return; } /*end of previous section*/ if (!sec->length && sec->received) { /* the length of the section could not be determined from the previous TS packet because we had only 1 or 2 bytes */ if (sec->received == 1) sec->length = gf_m2ts_get_section_length(sec->section[0], data[1], data[2]); else /* (sec->received == 2) */ sec->length = gf_m2ts_get_section_length(sec->section[0], sec->section[1], data[1]); sec->section = (char*)gf_realloc(sec->section, sizeof(char)*sec->length); } if (sec->length && sec->received + ptr_field >= sec->length) { u32 len = sec->length - sec->received; memcpy(sec->section + sec->received, data+1, sizeof(char)*len); sec->received += len; if (ptr_field > len) GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Invalid pointer field (@ptr_field=%d, @remaining=%d)\n", ptr_field, len) ); gf_m2ts_section_complete(ts, sec, ses); } data += ptr_field+1; data_size -= ptr_field+1; payload_size -= ptr_field+1; aggregated_section: if (sec->section) gf_free(sec->section); sec->length = sec->received = 0; sec->section = (char*)gf_malloc(sizeof(char)*data_size); memcpy(sec->section, data, sizeof(char)*data_size); sec->received = data_size; } else if (disc) { if (sec->section) gf_free(sec->section); sec->section = NULL; sec->received = sec->length = 0; return; } else if (!sec->section) { return; } else { if (sec->length && sec->received+data_size > sec->length) data_size = sec->length - sec->received; if (sec->length) { memcpy(sec->section + sec->received, data, sizeof(char)*data_size); } else { sec->section = (char*)gf_realloc(sec->section, sizeof(char)*(sec->received+data_size)); memcpy(sec->section + sec->received, data, sizeof(char)*data_size); } sec->received += data_size; } /*alloc final buffer*/ if (!sec->length && (sec->received >= 3)) { sec->length = gf_m2ts_get_section_length(sec->section[0], sec->section[1], sec->section[2]); sec->section = (char*)gf_realloc(sec->section, sizeof(char)*sec->length); if (sec->received > sec->length) { data_size -= sec->received - sec->length; sec->received = sec->length; } } if (!sec->length || sec->received < sec->length) return; /*OK done*/ gf_m2ts_section_complete(ts, sec, ses); if (payload_size > data_size) { data += data_size; /* detect padding after previous section */ if (data[0] != 0xFF) { data_size = payload_size - data_size; payload_size = data_size; goto aggregated_section; } } } static void gf_m2ts_process_sdt(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *ses, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status) { u32 pos, evt_type; u32 nb_sections; u32 data_size; unsigned char *data; GF_M2TS_Section *section; /*wait for the last section */ if (!(status&GF_M2TS_TABLE_END)) return; /*skip if already received*/ if (status&GF_M2TS_TABLE_REPEAT) { if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_SDT_REPEAT, NULL); return; } if (table_id != GF_M2TS_TABLE_ID_SDT_ACTUAL) { return; } gf_m2ts_reset_sdt(ts); nb_sections = gf_list_count(sections); if (nb_sections > 1) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] SDT on multiple sections not supported\n")); } section = (GF_M2TS_Section *)gf_list_get(sections, 0); data = section->data; data_size = section->data_size; //orig_net_id = (data[0] << 8) | data[1]; pos = 3; while (pos < data_size) { GF_M2TS_SDT *sdt; u32 descs_size, d_pos, ulen; GF_SAFEALLOC(sdt, GF_M2TS_SDT); if (!sdt) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] Fail to create SDT\n")); return; } gf_list_add(ts->SDTs, sdt); sdt->service_id = (data[pos]<<8) + data[pos+1]; sdt->EIT_schedule = (data[pos+2] & 0x2) ? 1 : 0; sdt->EIT_present_following = (data[pos+2] & 0x1); sdt->running_status = (data[pos+3]>>5) & 0x7; sdt->free_CA_mode = (data[pos+3]>>4) & 0x1; descs_size = ((data[pos+3]&0xf)<<8) | data[pos+4]; pos += 5; d_pos = 0; while (d_pos < descs_size) { u8 d_tag = data[pos+d_pos]; u8 d_len = data[pos+d_pos+1]; switch (d_tag) { case GF_M2TS_DVB_SERVICE_DESCRIPTOR: if (sdt->provider) gf_free(sdt->provider); sdt->provider = NULL; if (sdt->service) gf_free(sdt->service); sdt->service = NULL; d_pos+=2; sdt->service_type = data[pos+d_pos]; ulen = data[pos+d_pos+1]; d_pos += 2; sdt->provider = (char*)gf_malloc(sizeof(char)*(ulen+1)); memcpy(sdt->provider, data+pos+d_pos, sizeof(char)*ulen); sdt->provider[ulen] = 0; d_pos += ulen; ulen = data[pos+d_pos]; d_pos += 1; sdt->service = (char*)gf_malloc(sizeof(char)*(ulen+1)); memcpy(sdt->service, data+pos+d_pos, sizeof(char)*ulen); sdt->service[ulen] = 0; d_pos += ulen; break; default: GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Skipping descriptor (0x%x) not supported\n", d_tag)); d_pos += d_len; if (d_len == 0) d_pos = descs_size; break; } } pos += descs_size; } evt_type = GF_M2TS_EVT_SDT_FOUND; if (ts->on_event) ts->on_event(ts, evt_type, NULL); } static void gf_m2ts_process_mpeg4section(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *es, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status) { GF_M2TS_SL_PCK sl_pck; u32 nb_sections, i; GF_M2TS_Section *section; /*skip if already received*/ if (status & GF_M2TS_TABLE_REPEAT) if (!(es->flags & GF_M2TS_ES_SEND_REPEATED_SECTIONS)) return; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Sections for PID %d\n", es->pid) ); /*send all sections (eg SL-packets)*/ nb_sections = gf_list_count(sections); for (i=0; i<nb_sections; i++) { section = (GF_M2TS_Section *)gf_list_get(sections, i); sl_pck.data = (char *)section->data; sl_pck.data_len = section->data_size; sl_pck.stream = (GF_M2TS_ES *)es; sl_pck.version_number = version_number; if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_SL_PCK, &sl_pck); } } static void gf_m2ts_process_nit(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *nit_es, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] NIT table processing (not yet implemented)")); } static void gf_m2ts_process_tdt_tot(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *tdt_tot_es, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status) { unsigned char *data; u32 data_size, nb_sections; u32 date, yp, mp, k; GF_M2TS_Section *section; GF_M2TS_TDT_TOT *time_table; const char *table_name; /*wait for the last section */ if ( !(status & GF_M2TS_TABLE_END) ) return; switch (table_id) { case GF_M2TS_TABLE_ID_TDT: table_name = "TDT"; break; case GF_M2TS_TABLE_ID_TOT: table_name = "TOT"; break; default: GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Unimplemented table_id %u for PID %u\n", table_id, GF_M2TS_PID_TDT_TOT_ST)); return; } nb_sections = gf_list_count(sections); if (nb_sections > 1) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] %s on multiple sections not supported\n", table_name)); } section = (GF_M2TS_Section *)gf_list_get(sections, 0); data = section->data; data_size = section->data_size; /*TOT only contains 40 bits of UTC_time; TDT add descriptors and a CRC*/ if ((table_id==GF_M2TS_TABLE_ID_TDT) && (data_size != 5)) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] Corrupted TDT size\n", table_name)); } GF_SAFEALLOC(time_table, GF_M2TS_TDT_TOT); if (!time_table) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Fail to alloc DVB time table\n")); return; } /*UTC_time - see annex C of DVB-SI ETSI EN 300468*/ /* decodes an Modified Julian Date (MJD) into a Co-ordinated Universal Time (UTC) See annex C of DVB-SI ETSI EN 300468 */ date = data[0]*256 + data[1]; yp = (u32)((date - 15078.2)/365.25); mp = (u32)((date - 14956.1 - (u32)(yp * 365.25))/30.6001); time_table->day = (u32)(date - 14956 - (u32)(yp * 365.25) - (u32)(mp * 30.6001)); if (mp == 14 || mp == 15) k = 1; else k = 0; time_table->year = yp + k + 1900; time_table->month = mp - 1 - k*12; time_table->hour = 10*((data[2]&0xf0)>>4) + (data[2]&0x0f); time_table->minute = 10*((data[3]&0xf0)>>4) + (data[3]&0x0f); time_table->second = 10*((data[4]&0xf0)>>4) + (data[4]&0x0f); assert(time_table->hour<24 && time_table->minute<60 && time_table->second<60); GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Stream UTC time is %u/%02u/%02u %02u:%02u:%02u\n", time_table->year, time_table->month, time_table->day, time_table->hour, time_table->minute, time_table->second)); switch (table_id) { case GF_M2TS_TABLE_ID_TDT: if (ts->TDT_time) gf_free(ts->TDT_time); ts->TDT_time = time_table; if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_TDT, time_table); break; case GF_M2TS_TABLE_ID_TOT: #if 0 { u32 pos, loop_len; loop_len = ((data[5]&0x0f) << 8) | (data[6] & 0xff); data += 7; pos = 0; while (pos < loop_len) { u8 tag = data[pos]; pos += 2; if (tag == GF_M2TS_DVB_LOCAL_TIME_OFFSET_DESCRIPTOR) { char tmp_time[10]; u16 offset_hours, offset_minutes; now->country_code[0] = data[pos]; now->country_code[1] = data[pos+1]; now->country_code[2] = data[pos+2]; now->country_region_id = data[pos+3]>>2; sprintf(tmp_time, "%02x", data[pos+4]); offset_hours = atoi(tmp_time); sprintf(tmp_time, "%02x", data[pos+5]); offset_minutes = atoi(tmp_time); now->local_time_offset_seconds = (offset_hours * 60 + offset_minutes) * 60; if (data[pos+3] & 1) now->local_time_offset_seconds *= -1; dvb_decode_mjd_to_unix_time(data+pos+6, &now->unix_next_toc); sprintf(tmp_time, "%02x", data[pos+11]); offset_hours = atoi(tmp_time); sprintf(tmp_time, "%02x", data[pos+12]); offset_minutes = atoi(tmp_time); now->next_time_offset_seconds = (offset_hours * 60 + offset_minutes) * 60; if (data[pos+3] & 1) now->next_time_offset_seconds *= -1; pos+= 13; } } /*TODO: check lengths are ok*/ if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_TOT, time_table); } #endif /*check CRC32*/ if (ts->tdt_tot->length<4) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] corrupted %s table (less than 4 bytes but CRC32 should be present\n", table_name)); goto error_exit; } if (!gf_m2ts_crc32_check(ts->tdt_tot->section, ts->tdt_tot->length-4)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] corrupted %s table (CRC32 failed)\n", table_name)); goto error_exit; } if (ts->TDT_time) gf_free(ts->TDT_time); ts->TDT_time = time_table; if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_TOT, time_table); break; default: assert(0); goto error_exit; } return; /*success*/ error_exit: gf_free(time_table); return; } static GF_M2TS_MetadataPointerDescriptor *gf_m2ts_read_metadata_pointer_descriptor(GF_BitStream *bs, u32 length) { u32 size; GF_M2TS_MetadataPointerDescriptor *d; GF_SAFEALLOC(d, GF_M2TS_MetadataPointerDescriptor); if (!d) return NULL; d->application_format = gf_bs_read_u16(bs); size = 2; if (d->application_format == 0xFFFF) { d->application_format_identifier = gf_bs_read_u32(bs); size += 4; } d->format = gf_bs_read_u8(bs); size += 1; if (d->format == 0xFF) { d->format_identifier = gf_bs_read_u32(bs); size += 4; } d->service_id = gf_bs_read_u8(bs); d->locator_record_flag = (gf_bs_read_int(bs, 1) ? GF_TRUE : GF_FALSE); d->carriage_flag = (enum metadata_carriage)gf_bs_read_int(bs, 2); gf_bs_read_int(bs, 5); /*reserved */ size += 2; if (d->locator_record_flag) { d->locator_length = gf_bs_read_u8(bs); d->locator_data = (char *)gf_malloc(d->locator_length); size += 1 + d->locator_length; gf_bs_read_data(bs, d->locator_data, d->locator_length); } if (d->carriage_flag != 3) { d->program_number = gf_bs_read_u16(bs); size += 2; } if (d->carriage_flag == 1) { d->ts_location = gf_bs_read_u16(bs); d->ts_id = gf_bs_read_u16(bs); size += 4; } if (length-size > 0) { d->data_size = length-size; d->data = (char *)gf_malloc(d->data_size); gf_bs_read_data(bs, d->data, d->data_size); } return d; } static void gf_m2ts_metadata_pointer_descriptor_del(GF_M2TS_MetadataPointerDescriptor *metapd) { if (metapd) { if (metapd->locator_data) gf_free(metapd->locator_data); if (metapd->data) gf_free(metapd->data); gf_free(metapd); } } static GF_M2TS_MetadataDescriptor *gf_m2ts_read_metadata_descriptor(GF_BitStream *bs, u32 length) { u32 size; GF_M2TS_MetadataDescriptor *d; GF_SAFEALLOC(d, GF_M2TS_MetadataDescriptor); if (!d) return NULL; d->application_format = gf_bs_read_u16(bs); size = 2; if (d->application_format == 0xFFFF) { d->application_format_identifier = gf_bs_read_u32(bs); size += 4; } d->format = gf_bs_read_u8(bs); size += 1; if (d->format == 0xFF) { d->format_identifier = gf_bs_read_u32(bs); size += 4; } d->service_id = gf_bs_read_u8(bs); d->decoder_config_flags = gf_bs_read_int(bs, 3); d->dsmcc_flag = (gf_bs_read_int(bs, 1) ? GF_TRUE : GF_FALSE); gf_bs_read_int(bs, 4); /* reserved */ size += 2; if (d->dsmcc_flag) { d->service_id_record_length = gf_bs_read_u8(bs); d->service_id_record = (char *)gf_malloc(d->service_id_record_length); size += 1 + d->service_id_record_length; gf_bs_read_data(bs, d->service_id_record, d->service_id_record_length); } if (d->decoder_config_flags == 1) { d->decoder_config_length = gf_bs_read_u8(bs); d->decoder_config = (char *)gf_malloc(d->decoder_config_length); size += 1 + d->decoder_config_length; gf_bs_read_data(bs, d->decoder_config, d->decoder_config_length); } if (d->decoder_config_flags == 3) { d->decoder_config_id_length = gf_bs_read_u8(bs); d->decoder_config_id = (char *)gf_malloc(d->decoder_config_id_length); size += 1 + d->decoder_config_id_length; gf_bs_read_data(bs, d->decoder_config_id, d->decoder_config_id_length); } if (d->decoder_config_flags == 4) { d->decoder_config_service_id = gf_bs_read_u8(bs); size++; } return d; } static void gf_m2ts_process_pmt(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *pmt, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status) { u32 info_length, pos, desc_len, evt_type, nb_es,i; u32 nb_sections; u32 data_size; u32 nb_hevc, nb_hevc_temp, nb_shvc, nb_shvc_temp, nb_mhvc, nb_mhvc_temp; unsigned char *data; GF_M2TS_Section *section; GF_Err e = GF_OK; /*wait for the last section */ if (!(status&GF_M2TS_TABLE_END)) return; nb_es = 0; /*skip if already received but no update detected (eg same data) */ if ((status&GF_M2TS_TABLE_REPEAT) && !(status&GF_M2TS_TABLE_UPDATE)) { if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_PMT_REPEAT, pmt->program); return; } if (pmt->sec->demux_restarted) { pmt->sec->demux_restarted = 0; return; } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PMT Found or updated\n")); nb_sections = gf_list_count(sections); if (nb_sections > 1) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("PMT on multiple sections not supported\n")); } section = (GF_M2TS_Section *)gf_list_get(sections, 0); data = section->data; data_size = section->data_size; pmt->program->pcr_pid = ((data[0] & 0x1f) << 8) | data[1]; info_length = ((data[2]&0xf)<<8) | data[3]; if (info_length + 4 > data_size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Broken PMT first loop, %d bytes avail but first loop size %d\n", data_size, info_length)); return; } else if (info_length != 0) { /* ...Read Descriptors ... */ u8 tag, len; u32 first_loop_len = 0; tag = data[4]; len = data[5]; while (info_length > first_loop_len) { if (tag == GF_M2TS_MPEG4_IOD_DESCRIPTOR) { if ((len>2) && (len - 2 <= info_length)) { u32 size; GF_BitStream *iod_bs; iod_bs = gf_bs_new((char *)data+8, len-2, GF_BITSTREAM_READ); if (pmt->program->pmt_iod) gf_odf_desc_del((GF_Descriptor *)pmt->program->pmt_iod); e = gf_odf_parse_descriptor(iod_bs , (GF_Descriptor **) &pmt->program->pmt_iod, &size); gf_bs_del(iod_bs ); if (e==GF_OK) { /*remember program number for service/program selection*/ if (pmt->program->pmt_iod) pmt->program->pmt_iod->ServiceID = pmt->program->number; /*if empty IOD (freebox case), discard it and use dynamic declaration of object*/ if (!gf_list_count(pmt->program->pmt_iod->ESDescriptors)) { gf_odf_desc_del((GF_Descriptor *)pmt->program->pmt_iod); pmt->program->pmt_iod = NULL; } } } else { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Broken IOD! len %d less than 2 bytes to declare IOD\n", len)); } } else if (tag == GF_M2TS_METADATA_POINTER_DESCRIPTOR) { GF_BitStream *metadatapd_bs; GF_M2TS_MetadataPointerDescriptor *metapd; metadatapd_bs = gf_bs_new((char *)data+6, len, GF_BITSTREAM_READ); metapd = gf_m2ts_read_metadata_pointer_descriptor(metadatapd_bs, len); gf_bs_del(metadatapd_bs); if (metapd->application_format_identifier == GF_M2TS_META_ID3 && metapd->format_identifier == GF_M2TS_META_ID3 && metapd->carriage_flag == METADATA_CARRIAGE_SAME_TS) { /*HLS ID3 Metadata */ pmt->program->metadata_pointer_descriptor = metapd; } else { /* don't know what to do with it for now, delete */ gf_m2ts_metadata_pointer_descriptor_del(metapd); } } else { GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Skipping descriptor (0x%x) and others not supported\n", tag)); } first_loop_len += 2 + len; } } if (data_size <= 4 + info_length) return; data += 4 + info_length; data_size -= 4 + info_length; pos = 0; /* count de number of program related PMT received */ for(i=0; i<gf_list_count(ts->programs); i++) { GF_M2TS_Program *prog = (GF_M2TS_Program *)gf_list_get(ts->programs,i); if(prog->pmt_pid == pmt->pid) { break; } } nb_hevc = nb_hevc_temp = nb_shvc = nb_shvc_temp = nb_mhvc = nb_mhvc_temp = 0; while (pos<data_size) { GF_M2TS_PES *pes = NULL; GF_M2TS_SECTION_ES *ses = NULL; GF_M2TS_ES *es = NULL; Bool inherit_pcr = 0; u32 pid, stream_type, reg_desc_format; if (pos + 5 > data_size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Broken PMT! size %d but position %d and need at least 5 bytes to declare es\n", data_size, pos)); break; } stream_type = data[0]; pid = ((data[1] & 0x1f) << 8) | data[2]; desc_len = ((data[3] & 0xf) << 8) | data[4]; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("stream_type :%d \n",stream_type)); switch (stream_type) { /* PES */ case GF_M2TS_VIDEO_MPEG1: case GF_M2TS_VIDEO_MPEG2: case GF_M2TS_VIDEO_DCII: case GF_M2TS_VIDEO_MPEG4: case GF_M2TS_SYSTEMS_MPEG4_PES: case GF_M2TS_VIDEO_H264: case GF_M2TS_VIDEO_SVC: case GF_M2TS_VIDEO_MVCD: case GF_M2TS_VIDEO_HEVC: case GF_M2TS_VIDEO_HEVC_MCTS: case GF_M2TS_VIDEO_HEVC_TEMPORAL: case GF_M2TS_VIDEO_SHVC: case GF_M2TS_VIDEO_SHVC_TEMPORAL: case GF_M2TS_VIDEO_MHVC: case GF_M2TS_VIDEO_MHVC_TEMPORAL: inherit_pcr = 1; case GF_M2TS_AUDIO_MPEG1: case GF_M2TS_AUDIO_MPEG2: case GF_M2TS_AUDIO_AAC: case GF_M2TS_AUDIO_LATM_AAC: case GF_M2TS_AUDIO_AC3: case GF_M2TS_AUDIO_DTS: case GF_M2TS_MHAS_MAIN: case GF_M2TS_MHAS_AUX: case GF_M2TS_SUBTITLE_DVB: case GF_M2TS_METADATA_PES: GF_SAFEALLOC(pes, GF_M2TS_PES); if (!pes) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG2TS] Failed to allocate ES for pid %d\n", pid)); return; } pes->cc = -1; pes->flags = GF_M2TS_ES_IS_PES; if (inherit_pcr) pes->flags |= GF_M2TS_INHERIT_PCR; es = (GF_M2TS_ES *)pes; break; case GF_M2TS_PRIVATE_DATA: GF_SAFEALLOC(pes, GF_M2TS_PES); if (!pes) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG2TS] Failed to allocate ES for pid %d\n", pid)); return; } pes->cc = -1; pes->flags = GF_M2TS_ES_IS_PES; es = (GF_M2TS_ES *)pes; break; /* Sections */ case GF_M2TS_SYSTEMS_MPEG4_SECTIONS: GF_SAFEALLOC(ses, GF_M2TS_SECTION_ES); if (!ses) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG2TS] Failed to allocate ES for pid %d\n", pid)); return; } es = (GF_M2TS_ES *)ses; es->flags |= GF_M2TS_ES_IS_SECTION; /* carriage of ISO_IEC_14496 data in sections */ if (stream_type == GF_M2TS_SYSTEMS_MPEG4_SECTIONS) { /*MPEG-4 sections need to be fully checked: if one section is lost, this means we lost one SL packet in the AU so we must wait for the complete section again*/ ses->sec = gf_m2ts_section_filter_new(gf_m2ts_process_mpeg4section, 0); /*create OD container*/ if (!pmt->program->additional_ods) { pmt->program->additional_ods = gf_list_new(); ts->has_4on2 = 1; } } break; case GF_M2TS_13818_6_ANNEX_A: case GF_M2TS_13818_6_ANNEX_B: case GF_M2TS_13818_6_ANNEX_C: case GF_M2TS_13818_6_ANNEX_D: case GF_M2TS_PRIVATE_SECTION: case GF_M2TS_QUALITY_SEC: case GF_M2TS_MORE_SEC: GF_SAFEALLOC(ses, GF_M2TS_SECTION_ES); if (!ses) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG2TS] Failed to allocate ES for pid %d\n", pid)); return; } es = (GF_M2TS_ES *)ses; es->flags |= GF_M2TS_ES_IS_SECTION; es->pid = pid; es->service_id = pmt->program->number; if (stream_type == GF_M2TS_PRIVATE_SECTION) { GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("AIT sections on pid %d\n", pid)); } else if (stream_type == GF_M2TS_QUALITY_SEC) { GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("Quality metadata sections on pid %d\n", pid)); } else if (stream_type == GF_M2TS_MORE_SEC) { GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("MORE sections on pid %d\n", pid)); } else { GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("stream type DSM CC user private sections on pid %d \n", pid)); } /* NULL means: trigger the call to on_event with DVB_GENERAL type and the raw section as payload */ ses->sec = gf_m2ts_section_filter_new(NULL, 1); //ses->sec->service_id = pmt->program->number; break; case GF_M2TS_MPE_SECTIONS: if (! ts->prefix_present) { GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("stream type MPE found : pid = %d \n", pid)); #ifdef GPAC_ENABLE_MPE es = gf_dvb_mpe_section_new(); if (es->flags & GF_M2TS_ES_IS_SECTION) { /* NULL means: trigger the call to on_event with DVB_GENERAL type and the raw section as payload */ ((GF_M2TS_SECTION_ES*)es)->sec = gf_m2ts_section_filter_new(NULL, 1); } #endif break; } default: GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] Stream type (0x%x) for PID %d not supported\n", stream_type, pid ) ); //GF_LOG(/*GF_LOG_WARNING*/GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Stream type (0x%x) for PID %d not supported\n", stream_type, pid ) ); break; } if (es) { es->stream_type = (stream_type==GF_M2TS_PRIVATE_DATA) ? 0 : stream_type; es->program = pmt->program; es->pid = pid; es->component_tag = -1; } pos += 5; data += 5; while (desc_len) { if (pos + 2 > data_size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Broken PMT descriptor! size %d but position %d and need at least 2 bytes to parse descritpor\n", data_size, pos)); break; } u8 tag = data[0]; u32 len = data[1]; if (pos + 2 + len > data_size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Broken PMT descriptor! size %d, desc size %d but position %d\n", data_size, len, pos)); break; } if (es) { switch (tag) { case GF_M2TS_ISO_639_LANGUAGE_DESCRIPTOR: if (pes && (len>=3) ) pes->lang = GF_4CC(' ', data[2], data[3], data[4]); break; case GF_M2TS_MPEG4_SL_DESCRIPTOR: if (len>=2) { es->mpeg4_es_id = ( (u32) data[2] & 0x1f) << 8 | data[3]; es->flags |= GF_M2TS_ES_IS_SL; } break; case GF_M2TS_REGISTRATION_DESCRIPTOR: if (len>=4) { reg_desc_format = GF_4CC(data[2], data[3], data[4], data[5]); /*cf http://www.smpte-ra.org/mpegreg/mpegreg.html*/ switch (reg_desc_format) { case GF_M2TS_RA_STREAM_AC3: es->stream_type = GF_M2TS_AUDIO_AC3; break; case GF_M2TS_RA_STREAM_VC1: es->stream_type = GF_M2TS_VIDEO_VC1; break; case GF_M2TS_RA_STREAM_GPAC: if (len==8) { es->stream_type = GF_4CC(data[6], data[7], data[8], data[9]); es->flags |= GF_M2TS_GPAC_CODEC_ID; break; } default: GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("Unknown registration descriptor %s\n", gf_4cc_to_str(reg_desc_format) )); break; } } break; case GF_M2TS_DVB_EAC3_DESCRIPTOR: es->stream_type = GF_M2TS_AUDIO_EC3; break; case GF_M2TS_DVB_DATA_BROADCAST_ID_DESCRIPTOR: if (len>=2) { u32 id = data[2]<<8 | data[3]; if ((id == 0xB) && ses && !ses->sec) { ses->sec = gf_m2ts_section_filter_new(NULL, 1); } } break; case GF_M2TS_DVB_SUBTITLING_DESCRIPTOR: if (pes && (len>=8)) { pes->sub.language[0] = data[2]; pes->sub.language[1] = data[3]; pes->sub.language[2] = data[4]; pes->sub.type = data[5]; pes->sub.composition_page_id = (data[6]<<8) | data[7]; pes->sub.ancillary_page_id = (data[8]<<8) | data[9]; } es->stream_type = GF_M2TS_DVB_SUBTITLE; break; case GF_M2TS_DVB_STREAM_IDENTIFIER_DESCRIPTOR: if (len>=1) { es->component_tag = data[2]; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("Component Tag: %d on Program %d\n", es->component_tag, es->program->number)); } break; case GF_M2TS_DVB_TELETEXT_DESCRIPTOR: es->stream_type = GF_M2TS_DVB_TELETEXT; break; case GF_M2TS_DVB_VBI_DATA_DESCRIPTOR: es->stream_type = GF_M2TS_DVB_VBI; break; case GF_M2TS_HIERARCHY_DESCRIPTOR: if (pes && (len>=4)) { u8 hierarchy_embedded_layer_index; GF_BitStream *hbs = gf_bs_new((const char *)data, data_size, GF_BITSTREAM_READ); /*u32 skip = */gf_bs_read_int(hbs, 16); /*u8 res1 = */gf_bs_read_int(hbs, 1); /*u8 temp_scal = */gf_bs_read_int(hbs, 1); /*u8 spatial_scal = */gf_bs_read_int(hbs, 1); /*u8 quality_scal = */gf_bs_read_int(hbs, 1); /*u8 hierarchy_type = */gf_bs_read_int(hbs, 4); /*u8 res2 = */gf_bs_read_int(hbs, 2); /*u8 hierarchy_layer_index = */gf_bs_read_int(hbs, 6); /*u8 tref_not_present = */gf_bs_read_int(hbs, 1); /*u8 res3 = */gf_bs_read_int(hbs, 1); hierarchy_embedded_layer_index = gf_bs_read_int(hbs, 6); /*u8 res4 = */gf_bs_read_int(hbs, 2); /*u8 hierarchy_channel = */gf_bs_read_int(hbs, 6); gf_bs_del(hbs); pes->depends_on_pid = 1+hierarchy_embedded_layer_index; } break; case GF_M2TS_METADATA_DESCRIPTOR: { GF_BitStream *metadatad_bs; GF_M2TS_MetadataDescriptor *metad; metadatad_bs = gf_bs_new((char *)data+2, len, GF_BITSTREAM_READ); metad = gf_m2ts_read_metadata_descriptor(metadatad_bs, len); gf_bs_del(metadatad_bs); if (metad->application_format_identifier == GF_M2TS_META_ID3 && metad->format_identifier == GF_M2TS_META_ID3) { /*HLS ID3 Metadata */ if (pes) { pes->metadata_descriptor = metad; pes->stream_type = GF_M2TS_METADATA_ID3_HLS; } } else { /* don't know what to do with it for now, delete */ gf_m2ts_metadata_descriptor_del(metad); } } break; default: GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] skipping descriptor (0x%x) not supported\n", tag)); break; } } data += len+2; pos += len+2; if (desc_len < len+2) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Invalid PMT es descriptor size for PID %d\n", pid ) ); break; } desc_len-=len+2; } if (es && !es->stream_type) { gf_free(es); es = NULL; GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Private Stream type (0x%x) for PID %d not supported\n", stream_type, pid ) ); } if (!es) continue; if (ts->ess[pid]) { //this is component reuse across programs, overwrite the previously declared stream ... if (status & GF_M2TS_TABLE_FOUND) { GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d reused across programs %d and %d, not completely supported\n", pid, ts->ess[pid]->program->number, es->program->number ) ); //add stream to program but don't reassign the pid table until the stream is playing (>GF_M2TS_PES_FRAMING_SKIP) gf_list_add(pmt->program->streams, es); if (!(es->flags & GF_M2TS_ES_IS_SECTION) ) gf_m2ts_set_pes_framing(pes, GF_M2TS_PES_FRAMING_SKIP); nb_es++; //skip assignment below es = NULL; } /*watchout for pmt update - FIXME this likely won't work in most cases*/ else { GF_M2TS_ES *o_es = ts->ess[es->pid]; if ((o_es->stream_type == es->stream_type) && ((o_es->flags & GF_M2TS_ES_STATIC_FLAGS_MASK) == (es->flags & GF_M2TS_ES_STATIC_FLAGS_MASK)) && (o_es->mpeg4_es_id == es->mpeg4_es_id) && ((o_es->flags & GF_M2TS_ES_IS_SECTION) || ((GF_M2TS_PES *)o_es)->lang == ((GF_M2TS_PES *)es)->lang) ) { gf_free(es); es = NULL; } else { gf_m2ts_es_del(o_es, ts); ts->ess[es->pid] = NULL; } } } if (es) { ts->ess[es->pid] = es; gf_list_add(pmt->program->streams, es); if (!(es->flags & GF_M2TS_ES_IS_SECTION) ) gf_m2ts_set_pes_framing(pes, GF_M2TS_PES_FRAMING_SKIP); nb_es++; if (es->stream_type == GF_M2TS_VIDEO_HEVC) nb_hevc++; else if (es->stream_type == GF_M2TS_VIDEO_HEVC_TEMPORAL) nb_hevc_temp++; else if (es->stream_type == GF_M2TS_VIDEO_SHVC) nb_shvc++; else if (es->stream_type == GF_M2TS_VIDEO_SHVC_TEMPORAL) nb_shvc_temp++; else if (es->stream_type == GF_M2TS_VIDEO_MHVC) nb_mhvc++; else if (es->stream_type == GF_M2TS_VIDEO_MHVC_TEMPORAL) nb_mhvc_temp++; } } //Table 2-139, implied hierarchy indexes if (nb_hevc_temp + nb_shvc + nb_shvc_temp + nb_mhvc+ nb_mhvc_temp) { for (i=0; i<gf_list_count(pmt->program->streams); i++) { GF_M2TS_PES *es = (GF_M2TS_PES *)gf_list_get(pmt->program->streams, i); if ( !(es->flags & GF_M2TS_ES_IS_PES)) continue; if (es->depends_on_pid) continue; switch (es->stream_type) { case GF_M2TS_VIDEO_HEVC_TEMPORAL: es->depends_on_pid = 1; break; case GF_M2TS_VIDEO_SHVC: if (!nb_hevc_temp) es->depends_on_pid = 1; else es->depends_on_pid = 2; break; case GF_M2TS_VIDEO_SHVC_TEMPORAL: es->depends_on_pid = 3; break; case GF_M2TS_VIDEO_MHVC: if (!nb_hevc_temp) es->depends_on_pid = 1; else es->depends_on_pid = 2; break; case GF_M2TS_VIDEO_MHVC_TEMPORAL: if (!nb_hevc_temp) es->depends_on_pid = 2; else es->depends_on_pid = 3; break; } } } if (nb_es) { u32 i; //translate hierarchy descriptors indexes into PIDs - check whether the PMT-index rules are the same for HEVC for (i=0; i<gf_list_count(pmt->program->streams); i++) { GF_M2TS_PES *an_es = NULL; GF_M2TS_PES *es = (GF_M2TS_PES *)gf_list_get(pmt->program->streams, i); if ( !(es->flags & GF_M2TS_ES_IS_PES)) continue; if (!es->depends_on_pid) continue; //fixeme we are not always assured that hierarchy_layer_index matches the stream index... //+1 is because our first stream is the PMT an_es = (GF_M2TS_PES *)gf_list_get(pmt->program->streams, es->depends_on_pid); if (an_es) { es->depends_on_pid = an_es->pid; } else { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[M2TS] Wrong dependency index in hierarchy descriptor, assuming non-scalable stream\n")); es->depends_on_pid = 0; } } evt_type = (status&GF_M2TS_TABLE_FOUND) ? GF_M2TS_EVT_PMT_FOUND : GF_M2TS_EVT_PMT_UPDATE; if (ts->on_event) ts->on_event(ts, evt_type, pmt->program); } else { /* if we found no new ES it's simply a repeat of the PMT */ if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_PMT_REPEAT, pmt->program); } } static void gf_m2ts_process_pat(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *ses, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status) { GF_M2TS_Program *prog; GF_M2TS_SECTION_ES *pmt; u32 i, nb_progs, evt_type; u32 nb_sections; u32 data_size; unsigned char *data; GF_M2TS_Section *section; /*wait for the last section */ if (!(status&GF_M2TS_TABLE_END)) return; /*skip if already received*/ if (status&GF_M2TS_TABLE_REPEAT) { if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_PAT_REPEAT, NULL); return; } nb_sections = gf_list_count(sections); if (nb_sections > 1) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("PAT on multiple sections not supported\n")); } section = (GF_M2TS_Section *)gf_list_get(sections, 0); data = section->data; data_size = section->data_size; if (!(status&GF_M2TS_TABLE_UPDATE) && gf_list_count(ts->programs)) { if (ts->pat->demux_restarted) { ts->pat->demux_restarted = 0; } else { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Multiple different PAT on single TS found, ignoring new PAT declaration (table id %d - extended table id %d)\n", table_id, ex_table_id)); } return; } nb_progs = data_size / 4; for (i=0; i<nb_progs; i++) { u16 number, pid; number = (data[0]<<8) | data[1]; pid = (data[2]&0x1f)<<8 | data[3]; data += 4; if (number==0) { if (!ts->nit) { ts->nit = gf_m2ts_section_filter_new(gf_m2ts_process_nit, 0); } } else { GF_SAFEALLOC(prog, GF_M2TS_Program); if (!prog) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Fail to allocate program for pid %d\n", pid)); return; } prog->streams = gf_list_new(); prog->pmt_pid = pid; prog->number = number; prog->ts = ts; gf_list_add(ts->programs, prog); GF_SAFEALLOC(pmt, GF_M2TS_SECTION_ES); if (!pmt) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Fail to allocate pmt filter for pid %d\n", pid)); return; } pmt->flags = GF_M2TS_ES_IS_SECTION; gf_list_add(prog->streams, pmt); pmt->pid = prog->pmt_pid; pmt->program = prog; ts->ess[pmt->pid] = (GF_M2TS_ES *)pmt; pmt->sec = gf_m2ts_section_filter_new(gf_m2ts_process_pmt, 0); } } evt_type = (status&GF_M2TS_TABLE_UPDATE) ? GF_M2TS_EVT_PAT_UPDATE : GF_M2TS_EVT_PAT_FOUND; if (ts->on_event) ts->on_event(ts, evt_type, NULL); } static void gf_m2ts_process_cat(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *ses, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status) { u32 evt_type; /* GF_M2TS_Program *prog; GF_M2TS_SECTION_ES *pmt; u32 i, nb_progs; u32 nb_sections; u32 data_size; unsigned char *data; GF_M2TS_Section *section; */ /*wait for the last section */ if (!(status&GF_M2TS_TABLE_END)) return; /*skip if already received*/ if (status&GF_M2TS_TABLE_REPEAT) { if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_CAT_REPEAT, NULL); return; } /* nb_sections = gf_list_count(sections); if (nb_sections > 1) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("CAT on multiple sections not supported\n")); } section = (GF_M2TS_Section *)gf_list_get(sections, 0); data = section->data; data_size = section->data_size; nb_progs = data_size / 4; for (i=0; i<nb_progs; i++) { u16 number, pid; number = (data[0]<<8) | data[1]; pid = (data[2]&0x1f)<<8 | data[3]; data += 4; if (number==0) { if (!ts->nit) { ts->nit = gf_m2ts_section_filter_new(gf_m2ts_process_nit, 0); } } else { GF_SAFEALLOC(prog, GF_M2TS_Program); prog->streams = gf_list_new(); prog->pmt_pid = pid; prog->number = number; gf_list_add(ts->programs, prog); GF_SAFEALLOC(pmt, GF_M2TS_SECTION_ES); pmt->flags = GF_M2TS_ES_IS_SECTION; gf_list_add(prog->streams, pmt); pmt->pid = prog->pmt_pid; pmt->program = prog; ts->ess[pmt->pid] = (GF_M2TS_ES *)pmt; pmt->sec = gf_m2ts_section_filter_new(gf_m2ts_process_pmt, 0); } } */ evt_type = (status&GF_M2TS_TABLE_UPDATE) ? GF_M2TS_EVT_CAT_UPDATE : GF_M2TS_EVT_CAT_FOUND; if (ts->on_event) ts->on_event(ts, evt_type, NULL); } u64 gf_m2ts_get_pts(unsigned char *data) { u64 pts; u32 val; pts = (u64)((data[0] >> 1) & 0x07) << 30; val = (data[1] << 8) | data[2]; pts |= (u64)(val >> 1) << 15; val = (data[3] << 8) | data[4]; pts |= (u64)(val >> 1); return pts; } void gf_m2ts_pes_header(GF_M2TS_PES *pes, unsigned char *data, u32 data_size, GF_M2TS_PESHeader *pesh) { u32 has_pts, has_dts; u32 len_check; memset(pesh, 0, sizeof(GF_M2TS_PESHeader)); len_check = 0; pesh->id = data[0]; pesh->pck_len = (data[1]<<8) | data[2]; /* 2bits scrambling_control = gf_bs_read_int(bs,2); priority = gf_bs_read_int(bs,1); */ pesh->data_alignment = (data[3] & 0x4) ? 1 : 0; /* copyright = gf_bs_read_int(bs,1); original = gf_bs_read_int(bs,1); */ has_pts = (data[4]&0x80); has_dts = has_pts ? (data[4]&0x40) : 0; /* ESCR_flag = gf_bs_read_int(bs,1); ES_rate_flag = gf_bs_read_int(bs,1); DSM_flag = gf_bs_read_int(bs,1); additional_copy_flag = gf_bs_read_int(bs,1); prev_crc_flag = gf_bs_read_int(bs,1); extension_flag = gf_bs_read_int(bs,1); */ pesh->hdr_data_len = data[5]; data += 6; if (has_pts) { pesh->PTS = gf_m2ts_get_pts(data); data+=5; len_check += 5; } if (has_dts) { pesh->DTS = gf_m2ts_get_pts(data); //data+=5; len_check += 5; } else { pesh->DTS = pesh->PTS; } if (len_check < pesh->hdr_data_len) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d Skipping %d bytes in pes header\n", pes->pid, pesh->hdr_data_len - len_check)); } else if (len_check > pesh->hdr_data_len) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d Wrong pes_header_data_length field %d bytes - read %d\n", pes->pid, pesh->hdr_data_len, len_check)); } if ((pesh->PTS<90000) && ((s32)pesh->DTS<0)) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d Wrong DTS %d negative for PTS %d - forcing to 0\n", pes->pid, pesh->DTS, pesh->PTS)); pesh->DTS=0; } } static void gf_m2ts_store_temi(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes) { GF_BitStream *bs = gf_bs_new(pes->temi_tc_desc, pes->temi_tc_desc_len, GF_BITSTREAM_READ); u32 has_timestamp = gf_bs_read_int(bs, 2); Bool has_ntp = (Bool) gf_bs_read_int(bs, 1); /*u32 has_ptp = */gf_bs_read_int(bs, 1); /*u32 has_timecode = */gf_bs_read_int(bs, 2); memset(&pes->temi_tc, 0, sizeof(GF_M2TS_TemiTimecodeDescriptor)); pes->temi_tc.force_reload = gf_bs_read_int(bs, 1); pes->temi_tc.is_paused = gf_bs_read_int(bs, 1); pes->temi_tc.is_discontinuity = gf_bs_read_int(bs, 1); gf_bs_read_int(bs, 7); pes->temi_tc.timeline_id = gf_bs_read_int(bs, 8); if (has_timestamp) { pes->temi_tc.media_timescale = gf_bs_read_u32(bs); if (has_timestamp==2) pes->temi_tc.media_timestamp = gf_bs_read_u64(bs); else pes->temi_tc.media_timestamp = gf_bs_read_u32(bs); } if (has_ntp) { pes->temi_tc.ntp = gf_bs_read_u64(bs); } gf_bs_del(bs); pes->temi_tc_desc_len = 0; pes->temi_pending = 1; } void gf_m2ts_flush_pes(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes) { GF_M2TS_PESHeader pesh; if (!ts) return; /*we need at least a full, valid start code and PES header !!*/ if ((pes->pck_data_len >= 4) && !pes->pck_data[0] && !pes->pck_data[1] && (pes->pck_data[2] == 0x1)) { u32 len; Bool has_pes_header = GF_TRUE; u32 stream_id = pes->pck_data[3]; Bool same_pts = GF_FALSE; switch (stream_id) { case GF_M2_STREAMID_PROGRAM_STREAM_MAP: case GF_M2_STREAMID_PADDING: case GF_M2_STREAMID_PRIVATE_2: case GF_M2_STREAMID_ECM: case GF_M2_STREAMID_EMM: case GF_M2_STREAMID_PROGRAM_STREAM_DIRECTORY: case GF_M2_STREAMID_DSMCC: case GF_M2_STREAMID_H222_TYPE_E: has_pes_header = GF_FALSE; break; } if (has_pes_header) { /*OK read header*/ gf_m2ts_pes_header(pes, pes->pck_data + 3, pes->pck_data_len - 3, &pesh); /*send PES timing*/ if (ts->notify_pes_timing) { GF_M2TS_PES_PCK pck; memset(&pck, 0, sizeof(GF_M2TS_PES_PCK)); pck.PTS = pesh.PTS; pck.DTS = pesh.DTS; pck.stream = pes; if (pes->rap) pck.flags |= GF_M2TS_PES_PCK_RAP; pes->pes_end_packet_number = ts->pck_number; if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_PES_TIMING, &pck); } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d Got PES header DTS %d PTS %d\n", pes->pid, pesh.DTS, pesh.PTS)); if (pesh.PTS) { if (pesh.PTS == pes->PTS) { same_pts = GF_TRUE; GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d - same PTS "LLU" for two consecutive PES packets \n", pes->pid, pes->PTS)); } #ifndef GPAC_DISABLE_LOG /*FIXME - this test should only be done for non bi-directionnally coded media else if (pesh.PTS < pes->PTS) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d - PTS "LLU" less than previous packet PTS "LLU"\n", pes->pid, pesh.PTS, pes->PTS) ); } */ #endif pes->PTS = pesh.PTS; #ifndef GPAC_DISABLE_LOG { if (pes->DTS && (pesh.DTS == pes->DTS)) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d - same DTS "LLU" for two consecutive PES packets \n", pes->pid, pes->DTS)); } if (pesh.DTS < pes->DTS) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d - DTS "LLU" less than previous DTS "LLU"\n", pes->pid, pesh.DTS, pes->DTS)); } } #endif pes->DTS = pesh.DTS; } /*no PTSs were coded, same time*/ else if (!pesh.hdr_data_len) { same_pts = GF_TRUE; } /*3-byte start-code + 6 bytes header + hdr extensions*/ len = 9 + pesh.hdr_data_len; } else { /*3-byte start-code + 1 byte streamid*/ len = 4; memset(&pesh, 0, sizeof(pesh)); } if ((u8) pes->pck_data[3]==0xfa) { GF_M2TS_SL_PCK sl_pck; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] SL Packet in PES for %d - ES ID %d\n", pes->pid, pes->mpeg4_es_id)); if (pes->pck_data_len > len) { sl_pck.data = (char *)pes->pck_data + len; sl_pck.data_len = pes->pck_data_len - len; sl_pck.stream = (GF_M2TS_ES *)pes; if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_SL_PCK, &sl_pck); } else { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Bad SL Packet size: (%d indicated < %d header)\n", pes->pid, pes->pck_data_len, len)); } } else if (pes->reframe) { u32 remain = 0; u32 offset = len; if (pesh.pck_len && (pesh.pck_len-3-pesh.hdr_data_len != pes->pck_data_len-len)) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PES payload size %d but received %d bytes\n", pes->pid, (u32) ( pesh.pck_len-3-pesh.hdr_data_len), pes->pck_data_len-len)); } //copy over the remaining of previous PES payload before start of this PES payload if (pes->prev_data_len) { if (pes->prev_data_len < len) { offset = len - pes->prev_data_len; memcpy(pes->pck_data + offset, pes->prev_data, pes->prev_data_len); } else { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PES reassembly buffer overflow (%d bytes not processed from previous PES) - discarding prev data\n", pes->pid, pes->prev_data_len )); } } if (!pes->temi_pending && pes->temi_tc_desc_len) { gf_m2ts_store_temi(ts, pes); } if (pes->temi_pending) { pes->temi_pending = 0; pes->temi_tc.pes_pts = pes->PTS; if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_TEMI_TIMECODE, &pes->temi_tc); } if (! ts->seek_mode) remain = pes->reframe(ts, pes, same_pts, pes->pck_data+offset, pes->pck_data_len-offset, &pesh); //CLEANUP alloc stuff if (pes->prev_data) gf_free(pes->prev_data); pes->prev_data = NULL; pes->prev_data_len = 0; if (remain) { pes->prev_data = gf_malloc(sizeof(char)*remain); assert(pes->pck_data_len >= remain); memcpy(pes->prev_data, pes->pck_data + pes->pck_data_len - remain, remain); pes->prev_data_len = remain; } } } else if (pes->pck_data_len) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PES %d: Bad PES Header, discarding packet (maybe stream is encrypted ?)\n", pes->pid)); } pes->pck_data_len = 0; pes->pes_len = 0; pes->rap = 0; } static void gf_m2ts_process_pes(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, GF_M2TS_Header *hdr, unsigned char *data, u32 data_size, GF_M2TS_AdaptationField *paf) { u8 expect_cc; Bool disc=0; Bool flush_pes = 0; /*duplicated packet, NOT A DISCONTINUITY, we should discard the packet - however we may encounter this configuration in DASH at segment boundaries. If payload start is set, ignore duplication*/ if (hdr->continuity_counter==pes->cc) { if (!hdr->payload_start || (hdr->adaptation_field!=3) ) { GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[MPEG-2 TS] PES %d: Duplicated Packet found (CC %d) - skipping\n", pes->pid, pes->cc)); return; } } else { expect_cc = (pes->cc<0) ? hdr->continuity_counter : (pes->cc + 1) & 0xf; if (expect_cc != hdr->continuity_counter) disc = 1; } pes->cc = hdr->continuity_counter; if (disc) { if (pes->flags & GF_M2TS_ES_IGNORE_NEXT_DISCONTINUITY) { pes->flags &= ~GF_M2TS_ES_IGNORE_NEXT_DISCONTINUITY; disc = 0; } if (disc) { if (hdr->payload_start) { if (pes->pck_data_len) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PES %d: Packet discontinuity (%d expected - got %d) - may have lost end of previous PES\n", pes->pid, expect_cc, hdr->continuity_counter)); } } else { if (pes->pck_data_len) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PES %d: Packet discontinuity (%d expected - got %d) - trashing PES packet\n", pes->pid, expect_cc, hdr->continuity_counter)); } pes->pck_data_len = 0; pes->pes_len = 0; pes->cc = -1; return; } } } if (!pes->reframe) return; if (hdr->payload_start) { flush_pes = 1; pes->pes_start_packet_number = ts->pck_number; pes->before_last_pcr_value = pes->program->before_last_pcr_value; pes->before_last_pcr_value_pck_number = pes->program->before_last_pcr_value_pck_number; pes->last_pcr_value = pes->program->last_pcr_value; pes->last_pcr_value_pck_number = pes->program->last_pcr_value_pck_number; } else if (pes->pes_len && (pes->pck_data_len + data_size == pes->pes_len + 6)) { /* 6 = startcode+stream_id+length*/ /*reassemble pes*/ if (pes->pck_data_len + data_size > pes->pck_alloc_len) { pes->pck_alloc_len = pes->pck_data_len + data_size; pes->pck_data = (u8*)gf_realloc(pes->pck_data, pes->pck_alloc_len); } memcpy(pes->pck_data+pes->pck_data_len, data, data_size); pes->pck_data_len += data_size; /*force discard*/ data_size = 0; flush_pes = 1; } /*PES first fragment: flush previous packet*/ if (flush_pes && pes->pck_data_len) { gf_m2ts_flush_pes(ts, pes); if (!data_size) return; } /*we need to wait for first packet of PES*/ if (!pes->pck_data_len && !hdr->payload_start) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Waiting for PES header, trashing data\n", hdr->pid)); return; } /*reassemble*/ if (pes->pck_data_len + data_size > pes->pck_alloc_len ) { pes->pck_alloc_len = pes->pck_data_len + data_size; pes->pck_data = (u8*)gf_realloc(pes->pck_data, pes->pck_alloc_len); } memcpy(pes->pck_data + pes->pck_data_len, data, data_size); pes->pck_data_len += data_size; if (paf && paf->random_access_indicator) pes->rap = 1; if (hdr->payload_start && !pes->pes_len && (pes->pck_data_len>=6)) { pes->pes_len = (pes->pck_data[4]<<8) | pes->pck_data[5]; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Got PES packet len %d\n", pes->pid, pes->pes_len)); if (pes->pes_len + 6 == pes->pck_data_len) { gf_m2ts_flush_pes(ts, pes); } } } static void gf_m2ts_get_adaptation_field(GF_M2TS_Demuxer *ts, GF_M2TS_AdaptationField *paf, unsigned char *data, u32 size, u32 pid) { unsigned char *af_extension; paf->discontinuity_indicator = (data[0] & 0x80) ? 1 : 0; paf->random_access_indicator = (data[0] & 0x40) ? 1 : 0; paf->priority_indicator = (data[0] & 0x20) ? 1 : 0; paf->PCR_flag = (data[0] & 0x10) ? 1 : 0; paf->OPCR_flag = (data[0] & 0x8) ? 1 : 0; paf->splicing_point_flag = (data[0] & 0x4) ? 1 : 0; paf->transport_private_data_flag = (data[0] & 0x2) ? 1 : 0; paf->adaptation_field_extension_flag = (data[0] & 0x1) ? 1 : 0; af_extension = data + 1; if (paf->PCR_flag == 1) { u32 base = ((u32)data[1] << 24) | ((u32)data[2] << 16) | ((u32)data[3] << 8) | (u32)data[4]; u64 PCR = (u64) base; paf->PCR_base = (PCR << 1) | (data[5] >> 7); paf->PCR_ext = ((data[5] & 1) << 8) | data[6]; af_extension += 6; } if (paf->adaptation_field_extension_flag) { u32 afext_bytes; Bool ltw_flag, pwr_flag, seamless_flag, af_desc_not_present; if (paf->OPCR_flag) { af_extension += 6; } if (paf->splicing_point_flag) { af_extension += 1; } if (paf->transport_private_data_flag) { u32 priv_bytes = af_extension[0]; af_extension += 1 + priv_bytes; } afext_bytes = af_extension[0]; ltw_flag = af_extension[1] & 0x80 ? 1 : 0; pwr_flag = af_extension[1] & 0x40 ? 1 : 0; seamless_flag = af_extension[1] & 0x20 ? 1 : 0; af_desc_not_present = af_extension[1] & 0x10 ? 1 : 0; af_extension += 2; if (!afext_bytes) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Bad Adaptation Extension found\n", pid)); return; } afext_bytes-=1; if (ltw_flag) { af_extension += 2; if (afext_bytes<2) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Bad Adaptation Extension found\n", pid)); return; } afext_bytes-=2; } if (pwr_flag) { af_extension += 3; if (afext_bytes<3) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Bad Adaptation Extension found\n", pid)); return; } afext_bytes-=3; } if (seamless_flag) { af_extension += 3; if (afext_bytes<3) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Bad Adaptation Extension found\n", pid)); return; } afext_bytes-=3; } if (! af_desc_not_present) { while (afext_bytes) { GF_BitStream *bs; char *desc; u8 desc_tag = af_extension[0]; u8 desc_len = af_extension[1]; if (!desc_len || (u32) desc_len+2 > afext_bytes) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Bad Adaptation Descriptor found (tag %d) size is %d but only %d bytes available\n", pid, desc_tag, desc_len, afext_bytes)); break; } desc = (char *) af_extension+2; bs = gf_bs_new(desc, desc_len, GF_BITSTREAM_READ); switch (desc_tag) { case GF_M2TS_AFDESC_LOCATION_DESCRIPTOR: { Bool use_base_temi_url; char URL[255]; GF_M2TS_TemiLocationDescriptor temi_loc; memset(&temi_loc, 0, sizeof(GF_M2TS_TemiLocationDescriptor) ); temi_loc.reload_external = gf_bs_read_int(bs, 1); temi_loc.is_announce = gf_bs_read_int(bs, 1); temi_loc.is_splicing = gf_bs_read_int(bs, 1); use_base_temi_url = gf_bs_read_int(bs, 1); gf_bs_read_int(bs, 5); //reserved temi_loc.timeline_id = gf_bs_read_int(bs, 7); if (!use_base_temi_url) { char *_url = URL; u8 scheme = gf_bs_read_int(bs, 8); u8 url_len = gf_bs_read_int(bs, 8); switch (scheme) { case 1: strcpy(URL, "http://"); _url = URL+7; break; case 2: strcpy(URL, "https://"); _url = URL+8; break; } gf_bs_read_data(bs, _url, url_len); _url[url_len] = 0; } temi_loc.external_URL = URL; GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d AF Location descriptor found - URL %s\n", pid, URL)); if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_TEMI_LOCATION, &temi_loc); } break; case GF_M2TS_AFDESC_TIMELINE_DESCRIPTOR: if (ts->ess[pid] && (ts->ess[pid]->flags & GF_M2TS_ES_IS_PES)) { GF_M2TS_PES *pes = (GF_M2TS_PES *) ts->ess[pid]; if (pes->temi_tc_desc_len) gf_m2ts_store_temi(ts, pes); if (pes->temi_tc_desc_alloc_size < desc_len) { pes->temi_tc_desc = gf_realloc(pes->temi_tc_desc, desc_len); pes->temi_tc_desc_alloc_size = desc_len; } memcpy(pes->temi_tc_desc, desc, desc_len); pes->temi_tc_desc_len = desc_len; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d AF Timeline descriptor found\n", pid)); } break; } gf_bs_del(bs); af_extension += 2+desc_len; afext_bytes -= 2+desc_len; } } } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Adaptation Field found: Discontinuity %d - RAP %d - PCR: "LLD"\n", pid, paf->discontinuity_indicator, paf->random_access_indicator, paf->PCR_flag ? paf->PCR_base * 300 + paf->PCR_ext : 0)); } static GF_Err gf_m2ts_process_packet(GF_M2TS_Demuxer *ts, unsigned char *data) { GF_M2TS_ES *es; GF_M2TS_Header hdr; GF_M2TS_AdaptationField af, *paf; u32 payload_size, af_size; u32 pos = 0; ts->pck_number++; /* read TS packet header*/ hdr.sync = data[0]; if (hdr.sync != 0x47) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] TS Packet %d does not start with sync marker\n", ts->pck_number)); return GF_CORRUPTED_DATA; } hdr.error = (data[1] & 0x80) ? 1 : 0; hdr.payload_start = (data[1] & 0x40) ? 1 : 0; hdr.priority = (data[1] & 0x20) ? 1 : 0; hdr.pid = ( (data[1]&0x1f) << 8) | data[2]; hdr.scrambling_ctrl = (data[3] >> 6) & 0x3; hdr.adaptation_field = (data[3] >> 4) & 0x3; hdr.continuity_counter = data[3] & 0xf; if (hdr.error) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] TS Packet %d has error (PID could be %d)\n", ts->pck_number, hdr.pid)); return GF_CORRUPTED_DATA; } //#if DEBUG_TS_PACKET GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] TS Packet %d PID %d CC %d Encrypted %d\n", ts->pck_number, hdr.pid, hdr.continuity_counter, hdr.scrambling_ctrl)); //#endif if (hdr.scrambling_ctrl) { //TODO add decyphering GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] TS Packet %d is scrambled - not supported\n", ts->pck_number, hdr.pid)); return GF_NOT_SUPPORTED; } paf = NULL; payload_size = 184; pos = 4; switch (hdr.adaptation_field) { /*adaptation+data*/ case 3: af_size = data[4]; if (af_size>183) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] TS Packet %d AF field larger than 183 !\n", ts->pck_number)); //error return GF_CORRUPTED_DATA; } paf = &af; memset(paf, 0, sizeof(GF_M2TS_AdaptationField)); //this will stop you when processing invalid (yet existing) mpeg2ts streams in debug assert( af_size<=183); if (af_size>183) GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] TS Packet %d Detected wrong adaption field size %u when control value is 3\n", ts->pck_number, af_size)); if (af_size) gf_m2ts_get_adaptation_field(ts, paf, data+5, af_size, hdr.pid); pos += 1+af_size; payload_size = 183 - af_size; break; /*adaptation only - still process in case of PCR*/ case 2: af_size = data[4]; if (af_size != 183) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] TS Packet %d AF size is %d when it must be 183 for AF type 2\n", ts->pck_number, af_size)); return GF_CORRUPTED_DATA; } paf = &af; memset(paf, 0, sizeof(GF_M2TS_AdaptationField)); gf_m2ts_get_adaptation_field(ts, paf, data+5, af_size, hdr.pid); payload_size = 0; /*no payload and no PCR, return*/ if (!paf->PCR_flag) return GF_OK; break; /*reserved*/ case 0: return GF_OK; default: break; } data += pos; /*PAT*/ if (hdr.pid == GF_M2TS_PID_PAT) { gf_m2ts_gather_section(ts, ts->pat, NULL, &hdr, data, payload_size); return GF_OK; } else if (hdr.pid == GF_M2TS_PID_CAT) { gf_m2ts_gather_section(ts, ts->cat, NULL, &hdr, data, payload_size); return GF_OK; } es = ts->ess[hdr.pid]; if (paf && paf->PCR_flag) { if (!es) { u32 i, j; for(i=0; i<gf_list_count(ts->programs); i++) { GF_M2TS_PES *first_pes = NULL; GF_M2TS_Program *program = (GF_M2TS_Program *)gf_list_get(ts->programs,i); if(program->pcr_pid != hdr.pid) continue; for (j=0; j<gf_list_count(program->streams); j++) { GF_M2TS_PES *pes = (GF_M2TS_PES *) gf_list_get(program->streams, j); if (pes->flags & GF_M2TS_INHERIT_PCR) { ts->ess[hdr.pid] = (GF_M2TS_ES *) pes; pes->flags |= GF_M2TS_FAKE_PCR; break; } if (pes->flags & GF_M2TS_ES_IS_PES) { first_pes = pes; } } //non found, use the first media stream as a PCR destination - Q: is it legal to have PCR only streams not declared in PMT ? if (!es && first_pes) { es = (GF_M2TS_ES *) first_pes; first_pes->flags |= GF_M2TS_FAKE_PCR; } break; } if (!es) es = ts->ess[hdr.pid]; } if (es) { GF_M2TS_PES_PCK pck; s64 prev_diff_in_us; Bool discontinuity; s32 cc = -1; if (es->flags & GF_M2TS_FAKE_PCR) { cc = es->program->pcr_cc; es->program->pcr_cc = hdr.continuity_counter; } else if (es->flags & GF_M2TS_ES_IS_PES) cc = ((GF_M2TS_PES*)es)->cc; else if (((GF_M2TS_SECTION_ES*)es)->sec) cc = ((GF_M2TS_SECTION_ES*)es)->sec->cc; discontinuity = paf->discontinuity_indicator; if ((cc>=0) && es->program->before_last_pcr_value) { //no increment of CC if AF only packet if (hdr.adaptation_field == 2) { if (hdr.continuity_counter != cc) { discontinuity = GF_TRUE; } } else if (hdr.continuity_counter != ((cc + 1) & 0xF)) { discontinuity = GF_TRUE; } } memset(&pck, 0, sizeof(GF_M2TS_PES_PCK)); prev_diff_in_us = (s64) (es->program->last_pcr_value /27- es->program->before_last_pcr_value/27); es->program->before_last_pcr_value = es->program->last_pcr_value; es->program->before_last_pcr_value_pck_number = es->program->last_pcr_value_pck_number; es->program->last_pcr_value_pck_number = ts->pck_number; es->program->last_pcr_value = paf->PCR_base * 300 + paf->PCR_ext; if (!es->program->last_pcr_value) es->program->last_pcr_value = 1; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PCR found "LLU" ("LLU" at 90kHz) - PCR diff is %d us\n", hdr.pid, es->program->last_pcr_value, es->program->last_pcr_value/300, (s32) (es->program->last_pcr_value - es->program->before_last_pcr_value)/27 )); pck.PTS = es->program->last_pcr_value; pck.stream = (GF_M2TS_PES *)es; //try to ignore all discontinuities that are less than 200 ms (seen in some HLS setup ...) if (discontinuity) { s64 diff_in_us = (s64) (es->program->last_pcr_value - es->program->before_last_pcr_value) / 27; u64 diff = ABS(diff_in_us - prev_diff_in_us); if ((diff_in_us<0) && (diff_in_us >= -200000)) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d new PCR, with discontinuity signaled, is less than previously received PCR (diff %d us) but not too large, trying to ignore discontinuity\n", hdr.pid, diff_in_us)); } //ignore PCR discontinuity indicator if PCR found is larger than previously received PCR and diffence between PCR before and after discontinuity indicator is smaller than 50ms else if ((diff_in_us > 0) && (diff < 200000)) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PCR discontinuity signaled but diff is small (diff %d us - PCR diff %d vs prev PCR diff %d) - ignore it\n", hdr.pid, diff, diff_in_us, prev_diff_in_us)); } else if (paf->discontinuity_indicator) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PCR discontinuity signaled (diff %d us - PCR diff %d vs prev PCR diff %d)\n", hdr.pid, diff, diff_in_us, prev_diff_in_us)); pck.flags = GF_M2TS_PES_PCK_DISCONTINUITY; } else { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PCR discontinuity not signaled (diff %d us - PCR diff %d vs prev PCR diff %d)\n", hdr.pid, diff, diff_in_us, prev_diff_in_us)); pck.flags = GF_M2TS_PES_PCK_DISCONTINUITY; } } else if ( (es->program->last_pcr_value < es->program->before_last_pcr_value) ) { s64 diff_in_us = (s64) (es->program->last_pcr_value - es->program->before_last_pcr_value) / 27; //if less than 200 ms before PCR loop at the last PCR, this is a PCR loop if (GF_M2TS_MAX_PCR - es->program->before_last_pcr_value < 5400000 /*2*2700000*/) { GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PCR loop found from "LLU" to "LLU" \n", hdr.pid, es->program->before_last_pcr_value, es->program->last_pcr_value)); } else if ((diff_in_us<0) && (diff_in_us >= -200000)) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d new PCR, without discontinuity signaled, is less than previously received PCR (diff %d us) but not too large, trying to ignore discontinuity\n", hdr.pid, diff_in_us)); } else { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PCR found "LLU" is less than previously received PCR "LLU" (PCR diff %g sec) but no discontinuity signaled\n", hdr.pid, es->program->last_pcr_value, es->program->before_last_pcr_value, (GF_M2TS_MAX_PCR - es->program->before_last_pcr_value + es->program->last_pcr_value) / 27000000.0)); pck.flags = GF_M2TS_PES_PCK_DISCONTINUITY; } } if (pck.flags & GF_M2TS_PES_PCK_DISCONTINUITY) { gf_m2ts_reset_parsers_for_program(ts, es->program); } if (ts->on_event) { ts->on_event(ts, GF_M2TS_EVT_PES_PCR, &pck); } } } /*check for DVB reserved PIDs*/ if (!es) { if (hdr.pid == GF_M2TS_PID_SDT_BAT_ST) { gf_m2ts_gather_section(ts, ts->sdt, NULL, &hdr, data, payload_size); return GF_OK; } else if (hdr.pid == GF_M2TS_PID_NIT_ST) { /*ignore them, unused at application level*/ gf_m2ts_gather_section(ts, ts->nit, NULL, &hdr, data, payload_size); return GF_OK; } else if (hdr.pid == GF_M2TS_PID_EIT_ST_CIT) { /* ignore EIT messages for the moment */ gf_m2ts_gather_section(ts, ts->eit, NULL, &hdr, data, payload_size); return GF_OK; } else if (hdr.pid == GF_M2TS_PID_TDT_TOT_ST) { gf_m2ts_gather_section(ts, ts->tdt_tot, NULL, &hdr, data, payload_size); } else { /* ignore packet */ } } else if (es->flags & GF_M2TS_ES_IS_SECTION) { /* The stream uses sections to carry its payload */ GF_M2TS_SECTION_ES *ses = (GF_M2TS_SECTION_ES *)es; if (ses->sec) gf_m2ts_gather_section(ts, ses->sec, ses, &hdr, data, payload_size); } else { GF_M2TS_PES *pes = (GF_M2TS_PES *)es; /* regular stream using PES packets */ if (pes->reframe && payload_size) gf_m2ts_process_pes(ts, pes, &hdr, data, payload_size, paf); } return GF_OK; } GF_EXPORT GF_Err gf_m2ts_process_data(GF_M2TS_Demuxer *ts, u8 *data, u32 data_size) { GF_Err e=GF_OK; u32 pos, pck_size; Bool is_align = 1; if (ts->buffer_size) { //we are sync, copy remaining bytes if ( (ts->buffer[0]==0x47) && (ts->buffer_size<200)) { u32 pck_size = ts->prefix_present ? 192 : 188; if (ts->alloc_size < 200) { ts->alloc_size = 200; ts->buffer = (char*)gf_realloc(ts->buffer, sizeof(char)*ts->alloc_size); } memcpy(ts->buffer + ts->buffer_size, data, pck_size - ts->buffer_size); e |= gf_m2ts_process_packet(ts, (unsigned char *)ts->buffer); data += (pck_size - ts->buffer_size); data_size = data_size - (pck_size - ts->buffer_size); } //not sync, copy over the complete buffer else { if (ts->alloc_size < ts->buffer_size+data_size) { ts->alloc_size = ts->buffer_size+data_size; ts->buffer = (char*)gf_realloc(ts->buffer, sizeof(char)*ts->alloc_size); } memcpy(ts->buffer + ts->buffer_size, data, sizeof(char)*data_size); ts->buffer_size += data_size; is_align = 0; data = ts->buffer; data_size = ts->buffer_size; } } /*sync input data*/ pos = gf_m2ts_sync(ts, data, data_size, is_align); if (pos==data_size) { if (is_align) { if (ts->alloc_size<data_size) { ts->buffer = (char*)gf_realloc(ts->buffer, sizeof(char)*data_size); ts->alloc_size = data_size; } memcpy(ts->buffer, data, sizeof(char)*data_size); ts->buffer_size = data_size; } return GF_OK; } pck_size = ts->prefix_present ? 192 : 188; for (;;) { /*wait for a complete packet*/ if (data_size < pos + pck_size) { ts->buffer_size = data_size - pos; data += pos; if (!ts->buffer_size) { return e; } assert(ts->buffer_size<pck_size); if (is_align) { u32 s = ts->buffer_size; if (s<200) s = 200; if (ts->alloc_size < s) { ts->alloc_size = s; ts->buffer = (char*)gf_realloc(ts->buffer, sizeof(char)*ts->alloc_size); } memcpy(ts->buffer, data, sizeof(char)*ts->buffer_size); } else { memmove(ts->buffer, data, sizeof(char)*ts->buffer_size); } return e; } /*process*/ e |= gf_m2ts_process_packet(ts, (unsigned char *)data + pos); pos += pck_size; } return e; } //unused #if 0 GF_ESD *gf_m2ts_get_esd(GF_M2TS_ES *es) { GF_ESD *esd; u32 k, esd_count; esd = NULL; if (es->program->pmt_iod && es->program->pmt_iod->ESDescriptors) { esd_count = gf_list_count(es->program->pmt_iod->ESDescriptors); for (k = 0; k < esd_count; k++) { GF_ESD *esd_tmp = (GF_ESD *)gf_list_get(es->program->pmt_iod->ESDescriptors, k); if (esd_tmp->ESID != es->mpeg4_es_id) continue; esd = esd_tmp; break; } } if (!esd && es->program->additional_ods) { u32 od_count, od_index; od_count = gf_list_count(es->program->additional_ods); for (od_index = 0; od_index < od_count; od_index++) { GF_ObjectDescriptor *od = (GF_ObjectDescriptor *)gf_list_get(es->program->additional_ods, od_index); esd_count = gf_list_count(od->ESDescriptors); for (k = 0; k < esd_count; k++) { GF_ESD *esd_tmp = (GF_ESD *)gf_list_get(od->ESDescriptors, k); if (esd_tmp->ESID != es->mpeg4_es_id) continue; esd = esd_tmp; break; } } } return esd; } void gf_m2ts_set_segment_switch(GF_M2TS_Demuxer *ts) { u32 i; for (i=0; i<GF_M2TS_MAX_STREAMS; i++) { GF_M2TS_ES *es = (GF_M2TS_ES *) ts->ess[i]; if (!es) continue; es->flags |= GF_M2TS_ES_IGNORE_NEXT_DISCONTINUITY; } } #endif GF_EXPORT void gf_m2ts_reset_parsers_for_program(GF_M2TS_Demuxer *ts, GF_M2TS_Program *prog) { u32 i; for (i=0; i<GF_M2TS_MAX_STREAMS; i++) { GF_M2TS_ES *es = (GF_M2TS_ES *) ts->ess[i]; if (!es) continue; if (prog && (es->program != prog) ) continue; if (es->flags & GF_M2TS_ES_IS_SECTION) { GF_M2TS_SECTION_ES *ses = (GF_M2TS_SECTION_ES *)es; gf_m2ts_section_filter_reset(ses->sec); } else { GF_M2TS_PES *pes = (GF_M2TS_PES *)es; if (!pes || (pes->pid==pes->program->pmt_pid)) continue; pes->cc = -1; pes->frame_state = 0; pes->pck_data_len = 0; if (pes->prev_data) gf_free(pes->prev_data); pes->prev_data = NULL; pes->prev_data_len = 0; pes->PTS = pes->DTS = 0; // pes->prev_PTS = 0; // pes->first_dts = 0; pes->pes_len = pes->pes_end_packet_number = pes->pes_start_packet_number = 0; if (pes->buf) gf_free(pes->buf); pes->buf = NULL; if (pes->temi_tc_desc) gf_free(pes->temi_tc_desc); pes->temi_tc_desc = NULL; pes->temi_tc_desc_len = pes->temi_tc_desc_alloc_size = 0; pes->before_last_pcr_value = pes->before_last_pcr_value_pck_number = 0; pes->last_pcr_value = pes->last_pcr_value_pck_number = 0; if (pes->program->pcr_pid==pes->pid) { pes->program->last_pcr_value = pes->program->last_pcr_value_pck_number = 0; pes->program->before_last_pcr_value = pes->program->before_last_pcr_value_pck_number = 0; } } } } GF_EXPORT void gf_m2ts_reset_parsers(GF_M2TS_Demuxer *ts) { gf_m2ts_reset_parsers_for_program(ts, NULL); ts->pck_number = 0; gf_m2ts_section_filter_reset(ts->cat); gf_m2ts_section_filter_reset(ts->pat); gf_m2ts_section_filter_reset(ts->sdt); gf_m2ts_section_filter_reset(ts->nit); gf_m2ts_section_filter_reset(ts->eit); gf_m2ts_section_filter_reset(ts->tdt_tot); } #if 0 //unused u32 gf_m2ts_pes_get_framing_mode(GF_M2TS_PES *pes) { if (pes->flags & GF_M2TS_ES_IS_SECTION) { if (pes->flags & GF_M2TS_ES_IS_SL) { if ( ((GF_M2TS_SECTION_ES *)pes)->sec->process_section == NULL) return GF_M2TS_PES_FRAMING_DEFAULT; } return GF_M2TS_PES_FRAMING_SKIP_NO_RESET; } if (!pes->reframe ) return GF_M2TS_PES_FRAMING_SKIP_NO_RESET; if (pes->reframe == gf_m2ts_reframe_default) return GF_M2TS_PES_FRAMING_RAW; if (pes->reframe == gf_m2ts_reframe_reset) return GF_M2TS_PES_FRAMING_SKIP; return GF_M2TS_PES_FRAMING_DEFAULT; } #endif GF_EXPORT GF_Err gf_m2ts_set_pes_framing(GF_M2TS_PES *pes, u32 mode) { if (!pes) return GF_BAD_PARAM; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Setting pes framing mode of PID %d to %d\n", pes->pid, mode) ); /*ignore request for section PIDs*/ if (pes->flags & GF_M2TS_ES_IS_SECTION) { if (pes->flags & GF_M2TS_ES_IS_SL) { if (mode==GF_M2TS_PES_FRAMING_DEFAULT) { ((GF_M2TS_SECTION_ES *)pes)->sec->process_section = gf_m2ts_process_mpeg4section; } else { ((GF_M2TS_SECTION_ES *)pes)->sec->process_section = NULL; } } return GF_OK; } if (pes->pid==pes->program->pmt_pid) return GF_BAD_PARAM; //if component reuse, disable previous pes if ((mode > GF_M2TS_PES_FRAMING_SKIP) && (pes->program->ts->ess[pes->pid] != (GF_M2TS_ES *) pes)) { GF_M2TS_PES *o_pes = (GF_M2TS_PES *) pes->program->ts->ess[pes->pid]; if (o_pes->flags & GF_M2TS_ES_IS_PES) gf_m2ts_set_pes_framing(o_pes, GF_M2TS_PES_FRAMING_SKIP); GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[MPEG-2 TS] Reassinging PID %d from program %d to program %d\n", pes->pid, o_pes->program->number, pes->program->number) ); pes->program->ts->ess[pes->pid] = (GF_M2TS_ES *) pes; } switch (mode) { case GF_M2TS_PES_FRAMING_RAW: pes->reframe = gf_m2ts_reframe_default; break; case GF_M2TS_PES_FRAMING_SKIP: pes->reframe = gf_m2ts_reframe_reset; break; case GF_M2TS_PES_FRAMING_SKIP_NO_RESET: pes->reframe = NULL; break; case GF_M2TS_PES_FRAMING_DEFAULT: default: switch (pes->stream_type) { case GF_M2TS_VIDEO_MPEG1: case GF_M2TS_VIDEO_MPEG2: case GF_M2TS_VIDEO_H264: case GF_M2TS_VIDEO_SVC: case GF_M2TS_VIDEO_HEVC: case GF_M2TS_VIDEO_HEVC_TEMPORAL: case GF_M2TS_VIDEO_HEVC_MCTS: case GF_M2TS_VIDEO_SHVC: case GF_M2TS_VIDEO_SHVC_TEMPORAL: case GF_M2TS_VIDEO_MHVC: case GF_M2TS_VIDEO_MHVC_TEMPORAL: case GF_M2TS_AUDIO_MPEG1: case GF_M2TS_AUDIO_MPEG2: case GF_M2TS_AUDIO_AAC: case GF_M2TS_AUDIO_LATM_AAC: case GF_M2TS_AUDIO_AC3: case GF_M2TS_AUDIO_EC3: //for all our supported codec types, use a reframer filter pes->reframe = gf_m2ts_reframe_default; break; case GF_M2TS_PRIVATE_DATA: /* TODO: handle DVB subtitle streams */ break; case GF_M2TS_METADATA_ID3_HLS: //TODO pes->reframe = gf_m2ts_reframe_id3_pes; break; default: pes->reframe = gf_m2ts_reframe_default; break; } break; } return GF_OK; } GF_EXPORT GF_M2TS_Demuxer *gf_m2ts_demux_new() { GF_M2TS_Demuxer *ts; GF_SAFEALLOC(ts, GF_M2TS_Demuxer); if (!ts) return NULL; ts->programs = gf_list_new(); ts->SDTs = gf_list_new(); ts->pat = gf_m2ts_section_filter_new(gf_m2ts_process_pat, 0); ts->cat = gf_m2ts_section_filter_new(gf_m2ts_process_cat, 0); ts->sdt = gf_m2ts_section_filter_new(gf_m2ts_process_sdt, 1); ts->nit = gf_m2ts_section_filter_new(gf_m2ts_process_nit, 0); ts->eit = gf_m2ts_section_filter_new(NULL/*gf_m2ts_process_eit*/, 1); ts->tdt_tot = gf_m2ts_section_filter_new(gf_m2ts_process_tdt_tot, 1); #ifdef GPAC_ENABLE_MPE gf_dvb_mpe_init(ts); #endif ts->nb_prog_pmt_received = 0; ts->ChannelAppList = gf_list_new(); return ts; } GF_EXPORT void gf_m2ts_demux_dmscc_init(GF_M2TS_Demuxer *ts) { char temp_dir[GF_MAX_PATH]; u32 length; GF_Err e; ts->dsmcc_controler = gf_list_new(); ts->process_dmscc = 1; strcpy(temp_dir, gf_get_default_cache_directory() ); length = (u32) strlen(temp_dir); if(temp_dir[length-1] == GF_PATH_SEPARATOR) { temp_dir[length-1] = 0; } ts->dsmcc_root_dir = (char*)gf_calloc(strlen(temp_dir)+strlen("CarouselData")+2,sizeof(char)); sprintf(ts->dsmcc_root_dir,"%s%cCarouselData",temp_dir,GF_PATH_SEPARATOR); e = gf_mkdir(ts->dsmcc_root_dir); if(e) { GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[Process DSMCC] Error during the creation of the directory %s \n",ts->dsmcc_root_dir)); } } GF_EXPORT void gf_m2ts_demux_del(GF_M2TS_Demuxer *ts) { u32 i; if (ts->pat) gf_m2ts_section_filter_del(ts->pat); if (ts->cat) gf_m2ts_section_filter_del(ts->cat); if (ts->sdt) gf_m2ts_section_filter_del(ts->sdt); if (ts->nit) gf_m2ts_section_filter_del(ts->nit); if (ts->eit) gf_m2ts_section_filter_del(ts->eit); if (ts->tdt_tot) gf_m2ts_section_filter_del(ts->tdt_tot); for (i=0; i<GF_M2TS_MAX_STREAMS; i++) { //bacause of pure PCR streams, en ES might be reassigned on 2 PIDs, one for the ES and one for the PCR if (ts->ess[i] && (ts->ess[i]->pid==i)) gf_m2ts_es_del(ts->ess[i], ts); } if (ts->buffer) gf_free(ts->buffer); while (gf_list_count(ts->programs)) { GF_M2TS_Program *p = (GF_M2TS_Program *)gf_list_last(ts->programs); gf_list_rem_last(ts->programs); gf_list_del(p->streams); /*reset OD list*/ if (p->additional_ods) { gf_odf_desc_list_del(p->additional_ods); gf_list_del(p->additional_ods); } if (p->pmt_iod) gf_odf_desc_del((GF_Descriptor *)p->pmt_iod); if (p->metadata_pointer_descriptor) gf_m2ts_metadata_pointer_descriptor_del(p->metadata_pointer_descriptor); gf_free(p); } gf_list_del(ts->programs); if (ts->TDT_time) gf_free(ts->TDT_time); gf_m2ts_reset_sdt(ts); if (ts->tdt_tot) gf_list_del(ts->SDTs); #ifdef GPAC_ENABLE_MPE gf_dvb_mpe_shutdown(ts); #endif if (ts->dsmcc_controler) { if (gf_list_count(ts->dsmcc_controler)) { #ifdef GPAC_ENABLE_DSMCC GF_M2TS_DSMCC_OVERLORD* dsmcc_overlord = (GF_M2TS_DSMCC_OVERLORD*)gf_list_get(ts->dsmcc_controler,0); gf_cleanup_dir(dsmcc_overlord->root_dir); gf_rmdir(dsmcc_overlord->root_dir); gf_m2ts_delete_dsmcc_overlord(dsmcc_overlord); if(ts->dsmcc_root_dir) { gf_free(ts->dsmcc_root_dir); } #endif } gf_list_del(ts->dsmcc_controler); } while(gf_list_count(ts->ChannelAppList)) { #ifdef GPAC_ENABLE_DSMCC GF_M2TS_CHANNEL_APPLICATION_INFO* ChanAppInfo = (GF_M2TS_CHANNEL_APPLICATION_INFO*)gf_list_get(ts->ChannelAppList,0); gf_m2ts_delete_channel_application_info(ChanAppInfo); gf_list_rem(ts->ChannelAppList,0); #endif } gf_list_del(ts->ChannelAppList); if (ts->dsmcc_root_dir) gf_free(ts->dsmcc_root_dir); gf_free(ts); } #if 0//unused void gf_m2ts_print_info(GF_M2TS_Demuxer *ts) { #ifdef GPAC_ENABLE_MPE gf_m2ts_print_mpe_info(ts); #endif } #endif #define M2TS_PROBE_SIZE 188000 static Bool gf_m2ts_probe_buffer(char *buf, u32 size) { GF_Err e; GF_M2TS_Demuxer *ts; u32 lt; lt = gf_log_get_tool_level(GF_LOG_CONTAINER); gf_log_set_tool_level(GF_LOG_CONTAINER, GF_LOG_QUIET); ts = gf_m2ts_demux_new(); e = gf_m2ts_process_data(ts, buf, size); if (!ts->pck_number) e = GF_BAD_PARAM; gf_m2ts_demux_del(ts); gf_log_set_tool_level(GF_LOG_CONTAINER, lt); if (e) return GF_FALSE; return GF_TRUE; } GF_EXPORT Bool gf_m2ts_probe_file(const char *fileName) { char buf[M2TS_PROBE_SIZE]; u32 size; FILE *t; if (!strncmp(fileName, "gmem://", 7)) { u8 *mem_address; if (gf_blob_get_data(fileName, &mem_address, &size) != GF_OK) { return GF_FALSE; } if (size>M2TS_PROBE_SIZE) size = M2TS_PROBE_SIZE; memcpy(buf, mem_address, size); } else { t = gf_fopen(fileName, "rb"); if (!t) return 0; size = (u32) fread(buf, 1, M2TS_PROBE_SIZE, t); gf_fclose(t); if ((s32) size <= 0) return 0; } return gf_m2ts_probe_buffer(buf, size); } GF_EXPORT Bool gf_m2ts_probe_data(const u8 *data, u32 size) { size /= 188; size *= 188; return gf_m2ts_probe_buffer((char *) data, size); } static void rewrite_pts_dts(unsigned char *ptr, u64 TS) { ptr[0] &= 0xf1; ptr[0] |= (unsigned char)((TS&0x1c0000000ULL)>>29); ptr[1] = (unsigned char)((TS&0x03fc00000ULL)>>22); ptr[2] &= 0x1; ptr[2] |= (unsigned char)((TS&0x0003f8000ULL)>>14); ptr[3] = (unsigned char)((TS&0x000007f80ULL)>>7); ptr[4] &= 0x1; ptr[4] |= (unsigned char)((TS&0x00000007fULL)<<1); assert(((u64)(ptr[0]&0xe)<<29) + ((u64)ptr[1]<<22) + ((u64)(ptr[2]&0xfe)<<14) + ((u64)ptr[3]<<7) + ((ptr[4]&0xfe)>>1) == TS); } #define ADJUST_TIMESTAMP(_TS) \ if (_TS < (u64) -ts_shift) _TS = pcr_mod + _TS + ts_shift; \ else _TS = _TS + ts_shift; \ while (_TS > pcr_mod) _TS -= pcr_mod; \ GF_EXPORT GF_Err gf_m2ts_restamp(u8 *buffer, u32 size, s64 ts_shift, u8 *is_pes) { u32 done = 0; u64 pcr_mod; // if (!ts_shift) return GF_OK; pcr_mod = 0x80000000; pcr_mod*=4; while (done + 188 <= size) { u8 *pesh; u8 *pck; u64 pcr_base=0, pcr_ext=0; u16 pid; u8 adaptation_field, adaptation_field_length; pck = (u8*) buffer+done; if (pck[0]!=0x47) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[M2TS Restamp] Invalid sync byte %X\n", pck[0])); return GF_NON_COMPLIANT_BITSTREAM; } pid = ((pck[1] & 0x1f) <<8 ) + pck[2]; adaptation_field_length = 0; adaptation_field = (pck[3] >> 4) & 0x3; if ((adaptation_field==2) || (adaptation_field==3)) { adaptation_field_length = pck[4]; if ( pck[5]&0x10 /*PCR_flag*/) { pcr_base = (((u64)pck[6])<<25) + (pck[7]<<17) + (pck[8]<<9) + (pck[9]<<1) + (pck[10]>>7); pcr_ext = ((pck[10]&1)<<8) + pck[11]; ADJUST_TIMESTAMP(pcr_base); pck[6] = (unsigned char)(0xff&(pcr_base>>25)); pck[7] = (unsigned char)(0xff&(pcr_base>>17)); pck[8] = (unsigned char)(0xff&(pcr_base>>9)); pck[9] = (unsigned char)(0xff&(pcr_base>>1)); pck[10] = (unsigned char)(((0x1&pcr_base)<<7) | 0x7e | ((0x100&pcr_ext)>>8)); if (pcr_ext != ((pck[10]&1)<<8) + pck[11]) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[M2TS Restamp] Sanity check failed for PCR restamping\n")); return GF_IO_ERR; } pck[11] = (unsigned char)(0xff&pcr_ext); } /*add adaptation_field_length field*/ adaptation_field_length++; } if (!is_pes[pid] || !(pck[1]&0x40)) { done+=188; continue; } pesh = &pck[4+adaptation_field_length]; if ((pesh[0]==0x00) && (pesh[1]==0x00) && (pesh[2]==0x01)) { Bool has_pts, has_dts; if ((pesh[6]&0xc0)!=0x80) { done+=188; continue; } has_pts = (pesh[7]&0x80); has_dts = has_pts ? (pesh[7]&0x40) : 0; if (has_pts) { u64 PTS; if (((pesh[9]&0xe0)>>4)!=0x2) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[M2TS Restamp] PID %4d: Wrong PES header, PTS decoding: '0010' expected\n", pid)); done+=188; continue; } PTS = gf_m2ts_get_pts(pesh + 9); ADJUST_TIMESTAMP(PTS); rewrite_pts_dts(pesh+9, PTS); } if (has_dts) { u64 DTS = gf_m2ts_get_pts(pesh + 14); ADJUST_TIMESTAMP(DTS); rewrite_pts_dts(pesh+14, DTS); } } else { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[M2TS Restamp] PID %4d: Wrong PES not beginning with start code\n", pid)); } done+=188; } return GF_OK; } #endif /*GPAC_DISABLE_MPEG2TS*/
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre * Copyright (c) Telecom ParisTech 2005-2012 * * This file is part of GPAC / MPEG2-TS sub-project * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <gpac/mpegts.h> #ifndef GPAC_DISABLE_MPEG2TS #include <string.h> #include <gpac/constants.h> #include <gpac/internal/media_dev.h> #include <gpac/download.h> #ifndef GPAC_DISABLE_STREAMING #include <gpac/internal/ietf_dev.h> #endif #ifdef GPAC_CONFIG_LINUX #include <unistd.h> #endif #ifdef GPAC_ENABLE_MPE #include <gpac/dvb_mpe.h> #endif #ifdef GPAC_ENABLE_DSMCC #include <gpac/ait.h> #endif #define DEBUG_TS_PACKET 0 GF_EXPORT const char *gf_m2ts_get_stream_name(u32 streamType) { switch (streamType) { case GF_M2TS_VIDEO_MPEG1: return "MPEG-1 Video"; case GF_M2TS_VIDEO_MPEG2: return "MPEG-2 Video"; case GF_M2TS_AUDIO_MPEG1: return "MPEG-1 Audio"; case GF_M2TS_AUDIO_MPEG2: return "MPEG-2 Audio"; case GF_M2TS_PRIVATE_SECTION: return "Private Section"; case GF_M2TS_PRIVATE_DATA: return "Private Data"; case GF_M2TS_AUDIO_AAC: return "AAC Audio"; case GF_M2TS_VIDEO_MPEG4: return "MPEG-4 Video"; case GF_M2TS_VIDEO_H264: return "MPEG-4/H264 Video"; case GF_M2TS_VIDEO_SVC: return "H264-SVC Video"; case GF_M2TS_VIDEO_HEVC: return "HEVC Video"; case GF_M2TS_VIDEO_SHVC: return "SHVC Video"; case GF_M2TS_VIDEO_SHVC_TEMPORAL: return "SHVC Video Temporal Sublayer"; case GF_M2TS_VIDEO_MHVC: return "MHVC Video"; case GF_M2TS_VIDEO_MHVC_TEMPORAL: return "MHVC Video Temporal Sublayer"; case GF_M2TS_AUDIO_AC3: return "Dolby AC3 Audio"; case GF_M2TS_AUDIO_DTS: return "Dolby DTS Audio"; case GF_M2TS_SUBTITLE_DVB: return "DVB Subtitle"; case GF_M2TS_SYSTEMS_MPEG4_PES: return "MPEG-4 SL (PES)"; case GF_M2TS_SYSTEMS_MPEG4_SECTIONS: return "MPEG-4 SL (Section)"; case GF_M2TS_MPE_SECTIONS: return "MPE (Section)"; case GF_M2TS_METADATA_PES: return "Metadata (PES)"; case GF_M2TS_METADATA_ID3_HLS: return "ID3/HLS Metadata (PES)"; default: return "Unknown"; } } static u32 gf_m2ts_reframe_default(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, Bool same_pts, unsigned char *data, u32 data_len, GF_M2TS_PESHeader *pes_hdr) { GF_M2TS_PES_PCK pck; pck.flags = 0; if (pes->rap) pck.flags |= GF_M2TS_PES_PCK_RAP; if (!same_pts) pck.flags |= GF_M2TS_PES_PCK_AU_START; pck.DTS = pes->DTS; pck.PTS = pes->PTS; pck.data = (char *)data; pck.data_len = data_len; pck.stream = pes; ts->on_event(ts, GF_M2TS_EVT_PES_PCK, &pck); /*we consumed all data*/ return 0; } static u32 gf_m2ts_reframe_reset(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, Bool same_pts, unsigned char *data, u32 data_len, GF_M2TS_PESHeader *pes_hdr) { if (pes->pck_data) { gf_free(pes->pck_data); pes->pck_data = NULL; } pes->pck_data_len = pes->pck_alloc_len = 0; if (pes->prev_data) { gf_free(pes->prev_data); pes->prev_data = NULL; } pes->prev_data_len = 0; pes->pes_len = 0; pes->prev_PTS = 0; pes->reframe = NULL; pes->cc = -1; pes->temi_tc_desc_len = 0; return 0; } static void add_text(char **buffer, u32 *size, u32 *pos, char *msg, u32 msg_len) { if (!msg || !buffer) return; if (*pos+msg_len>*size) { *size = *pos+msg_len-*size+256; *buffer = (char *)gf_realloc(*buffer, *size); } strncpy((*buffer)+(*pos), msg, msg_len); *pos += msg_len; } static GF_Err id3_parse_tag(char *data, u32 length, char **output, u32 *output_size, u32 *output_pos) { GF_BitStream *bs; u32 pos; if ((data[0] != 'I') || (data[1] != 'D') || (data[2] != '3')) return GF_NOT_SUPPORTED; bs = gf_bs_new(data, length, GF_BITSTREAM_READ); gf_bs_skip_bytes(bs, 3); /*u8 major = */gf_bs_read_u8(bs); /*u8 minor = */gf_bs_read_u8(bs); /*u8 unsync = */gf_bs_read_int(bs, 1); /*u8 ext_hdr = */ gf_bs_read_int(bs, 1); gf_bs_read_int(bs, 6); u32 size = gf_id3_read_size(bs); pos = (u32) gf_bs_get_position(bs); if (size != length-pos) size = length-pos; while (size && (gf_bs_available(bs)>=10) ) { u32 ftag = gf_bs_read_u32(bs); u32 fsize = gf_id3_read_size(bs); /*u16 fflags = */gf_bs_read_u16(bs); size -= 10; //TODO, handle more ID3 tags ? if (ftag==ID3V2_FRAME_TXXX) { u32 pos = (u32) gf_bs_get_position(bs); char *text = data+pos; add_text(output, output_size, output_pos, text, fsize); } else { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] ID3 tag not handled, patch welcome\n", gf_4cc_to_str(ftag) ) ); } gf_bs_skip_bytes(bs, fsize); } gf_bs_del(bs); return GF_OK; } static u32 gf_m2ts_reframe_id3_pes(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, Bool same_pts, unsigned char *data, u32 data_len, GF_M2TS_PESHeader *pes_hdr) { char frame_header[256]; char *output_text = NULL; u32 output_len = 0; u32 pos = 0; GF_M2TS_PES_PCK pck; pck.flags = 0; if (pes->rap) pck.flags |= GF_M2TS_PES_PCK_RAP; if (!same_pts) pck.flags |= GF_M2TS_PES_PCK_AU_START; pck.DTS = pes->DTS; pck.PTS = pes->PTS; sprintf(frame_header, LLU" --> NEXT\n", pes->PTS); add_text(&output_text, &output_len, &pos, frame_header, (u32)strlen(frame_header)); id3_parse_tag((char *)data, data_len, &output_text, &output_len, &pos); add_text(&output_text, &output_len, &pos, "\n\n", 2); pck.data = (char *)output_text; pck.data_len = pos; pck.stream = pes; ts->on_event(ts, GF_M2TS_EVT_PES_PCK, &pck); gf_free(output_text); /*we consumed all data*/ return 0; } static u32 gf_m2ts_sync(GF_M2TS_Demuxer *ts, char *data, u32 size, Bool simple_check) { u32 i=0; /*if first byte is sync assume we're sync*/ if (simple_check && (data[i]==0x47)) return 0; while (i < size) { if (i+192 >= size) return size; if ((data[i]==0x47) && (data[i+188]==0x47)) break; if (i+192 >= size) return size; if ((data[i]==0x47) && (data[i+192]==0x47)) { ts->prefix_present = 1; break; } i++; } if (i) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] re-sync skipped %d bytes\n", i) ); } return i; } GF_EXPORT Bool gf_m2ts_crc32_check(u8 *data, u32 len) { u32 crc = gf_crc_32(data, len); u32 crc_val = GF_4CC((u8) data[len], (u8) data[len+1], (u8) data[len+2], (u8) data[len+3]); return (crc==crc_val) ? GF_TRUE : GF_FALSE; } static GF_M2TS_SectionFilter *gf_m2ts_section_filter_new(gf_m2ts_section_callback process_section_callback, Bool process_individual) { GF_M2TS_SectionFilter *sec; GF_SAFEALLOC(sec, GF_M2TS_SectionFilter); if (!sec) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] gf_m2ts_section_filter_new : OUT OF MEMORY\n")); return NULL; } sec->cc = -1; sec->process_section = process_section_callback; sec->process_individual = process_individual; return sec; } static void gf_m2ts_reset_sections(GF_List *sections) { u32 count; GF_M2TS_Section *section; //GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Deleting sections\n")); count = gf_list_count(sections); while (count) { section = gf_list_get(sections, 0); gf_list_rem(sections, 0); if (section->data) gf_free(section->data); gf_free(section); count--; } } static void gf_m2ts_section_filter_reset(GF_M2TS_SectionFilter *sf) { if (sf->section) { gf_free(sf->section); sf->section = NULL; } while (sf->table) { GF_M2TS_Table *t = sf->table; sf->table = t->next; gf_m2ts_reset_sections(t->sections); gf_list_del(t->sections); gf_free(t); } sf->cc = -1; sf->length = sf->received = 0; sf->demux_restarted = 1; } static void gf_m2ts_section_filter_del(GF_M2TS_SectionFilter *sf) { gf_m2ts_section_filter_reset(sf); gf_free(sf); } static void gf_m2ts_metadata_descriptor_del(GF_M2TS_MetadataDescriptor *metad) { if (metad) { if (metad->service_id_record) gf_free(metad->service_id_record); if (metad->decoder_config) gf_free(metad->decoder_config); if (metad->decoder_config_id) gf_free(metad->decoder_config_id); gf_free(metad); } } GF_EXPORT void gf_m2ts_es_del(GF_M2TS_ES *es, GF_M2TS_Demuxer *ts) { gf_list_del_item(es->program->streams, es); if (es->flags & GF_M2TS_ES_IS_SECTION) { GF_M2TS_SECTION_ES *ses = (GF_M2TS_SECTION_ES *)es; if (ses->sec) gf_m2ts_section_filter_del(ses->sec); #ifdef GPAC_ENABLE_MPE if (es->flags & GF_M2TS_ES_IS_MPE) gf_dvb_mpe_section_del(es); #endif } else if (es->pid!=es->program->pmt_pid) { GF_M2TS_PES *pes = (GF_M2TS_PES *)es; if ((pes->flags & GF_M2TS_INHERIT_PCR) && ts->ess[es->program->pcr_pid]==es) ts->ess[es->program->pcr_pid] = NULL; if (pes->pck_data) gf_free(pes->pck_data); if (pes->prev_data) gf_free(pes->prev_data); if (pes->buf) gf_free(pes->buf); if (pes->reassemble_buf) gf_free(pes->reassemble_buf); if (pes->temi_tc_desc) gf_free(pes->temi_tc_desc); if (pes->metadata_descriptor) gf_m2ts_metadata_descriptor_del(pes->metadata_descriptor); } if (es->slcfg) gf_free(es->slcfg); gf_free(es); } static void gf_m2ts_reset_sdt(GF_M2TS_Demuxer *ts) { while (gf_list_count(ts->SDTs)) { GF_M2TS_SDT *sdt = (GF_M2TS_SDT *)gf_list_last(ts->SDTs); gf_list_rem_last(ts->SDTs); if (sdt->provider) gf_free(sdt->provider); if (sdt->service) gf_free(sdt->service); gf_free(sdt); } } GF_EXPORT GF_M2TS_SDT *gf_m2ts_get_sdt_info(GF_M2TS_Demuxer *ts, u32 program_id) { u32 i; for (i=0; i<gf_list_count(ts->SDTs); i++) { GF_M2TS_SDT *sdt = (GF_M2TS_SDT *)gf_list_get(ts->SDTs, i); if (sdt->service_id==program_id) return sdt; } return NULL; } static void gf_m2ts_section_complete(GF_M2TS_Demuxer *ts, GF_M2TS_SectionFilter *sec, GF_M2TS_SECTION_ES *ses) { //seek mode, only process PAT and PMT if (ts->seek_mode && (sec->section[0] != GF_M2TS_TABLE_ID_PAT) && (sec->section[0] != GF_M2TS_TABLE_ID_PMT)) { /*clean-up (including broken sections)*/ if (sec->section) gf_free(sec->section); sec->section = NULL; sec->length = sec->received = 0; return; } if (!sec->process_section) { if ((ts->on_event && (sec->section[0]==GF_M2TS_TABLE_ID_AIT)) ) { #ifdef GPAC_ENABLE_DSMCC GF_M2TS_SL_PCK pck; pck.data_len = sec->length; pck.data = sec->section; pck.stream = (GF_M2TS_ES *)ses; //ts->on_event(ts, GF_M2TS_EVT_AIT_FOUND, &pck); on_ait_section(ts, GF_M2TS_EVT_AIT_FOUND, &pck); #endif } else if ((ts->on_event && (sec->section[0]==GF_M2TS_TABLE_ID_DSM_CC_ENCAPSULATED_DATA || sec->section[0]==GF_M2TS_TABLE_ID_DSM_CC_UN_MESSAGE || sec->section[0]==GF_M2TS_TABLE_ID_DSM_CC_DOWNLOAD_DATA_MESSAGE || sec->section[0]==GF_M2TS_TABLE_ID_DSM_CC_STREAM_DESCRIPTION || sec->section[0]==GF_M2TS_TABLE_ID_DSM_CC_PRIVATE)) ) { #ifdef GPAC_ENABLE_DSMCC GF_M2TS_SL_PCK pck; pck.data_len = sec->length; pck.data = sec->section; pck.stream = (GF_M2TS_ES *)ses; on_dsmcc_section(ts,GF_M2TS_EVT_DSMCC_FOUND,&pck); //ts->on_event(ts, GF_M2TS_EVT_DSMCC_FOUND, &pck); #endif } #ifdef GPAC_ENABLE_MPE else if (ts->on_mpe_event && ((ses && (ses->flags & GF_M2TS_EVT_DVB_MPE)) || (sec->section[0]==GF_M2TS_TABLE_ID_INT)) ) { GF_M2TS_SL_PCK pck; pck.data_len = sec->length; pck.data = sec->section; pck.stream = (GF_M2TS_ES *)ses; ts->on_mpe_event(ts, GF_M2TS_EVT_DVB_MPE, &pck); } #endif else if (ts->on_event) { GF_M2TS_SL_PCK pck; pck.data_len = sec->length; pck.data = sec->section; pck.stream = (GF_M2TS_ES *)ses; ts->on_event(ts, GF_M2TS_EVT_DVB_GENERAL, &pck); } } else { Bool has_syntax_indicator; u8 table_id; u16 extended_table_id; u32 status, section_start, i; GF_M2TS_Table *t, *prev_t; unsigned char *data; Bool section_valid = 0; status = 0; /*parse header*/ data = (u8 *)sec->section; /*look for proper table*/ table_id = data[0]; if (ts->on_event) { switch (table_id) { case GF_M2TS_TABLE_ID_PAT: case GF_M2TS_TABLE_ID_SDT_ACTUAL: case GF_M2TS_TABLE_ID_PMT: case GF_M2TS_TABLE_ID_NIT_ACTUAL: case GF_M2TS_TABLE_ID_TDT: case GF_M2TS_TABLE_ID_TOT: { GF_M2TS_SL_PCK pck; pck.data_len = sec->length; pck.data = sec->section; pck.stream = (GF_M2TS_ES *)ses; ts->on_event(ts, GF_M2TS_EVT_DVB_GENERAL, &pck); } } } has_syntax_indicator = (data[1] & 0x80) ? 1 : 0; if (has_syntax_indicator) { extended_table_id = (data[3]<<8) | data[4]; } else { extended_table_id = 0; } prev_t = NULL; t = sec->table; while (t) { if ((t->table_id==table_id) && (t->ex_table_id == extended_table_id)) break; prev_t = t; t = t->next; } /*create table*/ if (!t) { GF_SAFEALLOC(t, GF_M2TS_Table); if (!t) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Fail to alloc table %d %d\n", table_id, extended_table_id)); return; } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Creating table %d %d\n", table_id, extended_table_id)); t->table_id = table_id; t->ex_table_id = extended_table_id; t->last_version_number = 0xFF; t->sections = gf_list_new(); if (prev_t) prev_t->next = t; else sec->table = t; } if (has_syntax_indicator) { if (sec->length < 4) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] corrupted section length %d less than CRC \n", sec->length)); } else { /*remove crc32*/ sec->length -= 4; if (gf_m2ts_crc32_check((char *)data, sec->length)) { s32 cur_sec_num; t->version_number = (data[5] >> 1) & 0x1f; if (t->last_section_number && t->section_number && (t->version_number != t->last_version_number)) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] table transmission interrupted: previous table (v=%d) %d/%d sections - new table (v=%d) %d/%d sections\n", t->last_version_number, t->section_number, t->last_section_number, t->version_number, data[6] + 1, data[7] + 1) ); gf_m2ts_reset_sections(t->sections); t->section_number = 0; } t->current_next_indicator = (data[5] & 0x1) ? 1 : 0; /*add one to section numbers to detect if we missed or not the first section in the table*/ cur_sec_num = data[6] + 1; t->last_section_number = data[7] + 1; section_start = 8; /*we missed something*/ if (!sec->process_individual && t->section_number + 1 != cur_sec_num) { /* TODO - Check how to handle sections when the first complete section does not have its sec num 0 */ section_valid = 0; if (t->is_init) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] corrupted table (lost section %d)\n", cur_sec_num ? cur_sec_num-1 : 31) ); } } else { section_valid = 1; t->section_number = cur_sec_num; } } else { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] corrupted section (CRC32 failed)\n")); } } } else { section_valid = 1; section_start = 3; } /*process section*/ if (section_valid) { GF_M2TS_Section *section; GF_SAFEALLOC(section, GF_M2TS_Section); if (!section) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Fail to create section\n")); return; } section->data_size = sec->length - section_start; section->data = (unsigned char*)gf_malloc(sizeof(unsigned char)*section->data_size); memcpy(section->data, sec->section + section_start, sizeof(unsigned char)*section->data_size); gf_list_add(t->sections, section); if (t->section_number == 1) { status |= GF_M2TS_TABLE_START; if (t->last_version_number == t->version_number) { t->is_repeat = 1; } else { t->is_repeat = 0; } /*only update version number in the first section of the table*/ t->last_version_number = t->version_number; } if (t->is_init) { if (t->is_repeat) { status |= GF_M2TS_TABLE_REPEAT; } else { status |= GF_M2TS_TABLE_UPDATE; } } else { status |= GF_M2TS_TABLE_FOUND; } if (t->last_section_number == t->section_number) { u32 table_size; status |= GF_M2TS_TABLE_END; table_size = 0; for (i=0; i<gf_list_count(t->sections); i++) { GF_M2TS_Section *section = gf_list_get(t->sections, i); table_size += section->data_size; } if (t->is_repeat) { if (t->table_size != table_size) { status |= GF_M2TS_TABLE_UPDATE; GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] Repeated section found with different sizes (old table %d bytes, new table %d bytes)\n", t->table_size, table_size) ); t->table_size = table_size; } } else { t->table_size = table_size; } t->is_init = 1; /*reset section number*/ t->section_number = 0; t->is_repeat = 0; } if (sec->process_individual) { /*send each section of the table and not the aggregated table*/ if (sec->process_section) sec->process_section(ts, ses, t->sections, t->table_id, t->ex_table_id, t->version_number, (u8) (t->last_section_number - 1), status); gf_m2ts_reset_sections(t->sections); } else { if (status&GF_M2TS_TABLE_END) { if (sec->process_section) sec->process_section(ts, ses, t->sections, t->table_id, t->ex_table_id, t->version_number, (u8) (t->last_section_number - 1), status); gf_m2ts_reset_sections(t->sections); } } } else { sec->cc = -1; t->section_number = 0; } } /*clean-up (including broken sections)*/ if (sec->section) gf_free(sec->section); sec->section = NULL; sec->length = sec->received = 0; } static Bool gf_m2ts_is_long_section(u8 table_id) { switch (table_id) { case GF_M2TS_TABLE_ID_MPEG4_BIFS: case GF_M2TS_TABLE_ID_MPEG4_OD: case GF_M2TS_TABLE_ID_INT: case GF_M2TS_TABLE_ID_EIT_ACTUAL_PF: case GF_M2TS_TABLE_ID_EIT_OTHER_PF: case GF_M2TS_TABLE_ID_ST: case GF_M2TS_TABLE_ID_SIT: case GF_M2TS_TABLE_ID_DSM_CC_PRIVATE: case GF_M2TS_TABLE_ID_MPE_FEC: case GF_M2TS_TABLE_ID_DSM_CC_DOWNLOAD_DATA_MESSAGE: case GF_M2TS_TABLE_ID_DSM_CC_UN_MESSAGE: return 1; default: if (table_id >= GF_M2TS_TABLE_ID_EIT_SCHEDULE_MIN && table_id <= GF_M2TS_TABLE_ID_EIT_SCHEDULE_MAX) return 1; else return 0; } } static u32 gf_m2ts_get_section_length(char byte0, char byte1, char byte2) { u32 length; if (gf_m2ts_is_long_section(byte0)) { length = 3 + ( ((((u32)byte1)<<8) | (byte2&0xff)) & 0xfff ); } else { length = 3 + ( ((((u32)byte1)<<8) | (byte2&0xff)) & 0x3ff ); } return length; } static void gf_m2ts_gather_section(GF_M2TS_Demuxer *ts, GF_M2TS_SectionFilter *sec, GF_M2TS_SECTION_ES *ses, GF_M2TS_Header *hdr, unsigned char *data, u32 data_size) { u32 payload_size = data_size; u8 expect_cc = (sec->cc<0) ? hdr->continuity_counter : (sec->cc + 1) & 0xf; Bool disc = (expect_cc == hdr->continuity_counter) ? 0 : 1; sec->cc = expect_cc; /*may happen if hdr->adaptation_field=2 no payload in TS packet*/ if (!data_size) return; if (hdr->payload_start) { u32 ptr_field; ptr_field = data[0]; if (ptr_field+1>data_size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Invalid section start (@ptr_field=%d, @data_size=%d)\n", ptr_field, data_size) ); return; } /*end of previous section*/ if (!sec->length && sec->received) { /* the length of the section could not be determined from the previous TS packet because we had only 1 or 2 bytes */ if (sec->received == 1) sec->length = gf_m2ts_get_section_length(sec->section[0], data[1], data[2]); else /* (sec->received == 2) */ sec->length = gf_m2ts_get_section_length(sec->section[0], sec->section[1], data[1]); sec->section = (char*)gf_realloc(sec->section, sizeof(char)*sec->length); } if (sec->length && sec->received + ptr_field >= sec->length) { u32 len = sec->length - sec->received; memcpy(sec->section + sec->received, data+1, sizeof(char)*len); sec->received += len; if (ptr_field > len) GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Invalid pointer field (@ptr_field=%d, @remaining=%d)\n", ptr_field, len) ); gf_m2ts_section_complete(ts, sec, ses); } data += ptr_field+1; data_size -= ptr_field+1; payload_size -= ptr_field+1; aggregated_section: if (sec->section) gf_free(sec->section); sec->length = sec->received = 0; sec->section = (char*)gf_malloc(sizeof(char)*data_size); memcpy(sec->section, data, sizeof(char)*data_size); sec->received = data_size; } else if (disc) { if (sec->section) gf_free(sec->section); sec->section = NULL; sec->received = sec->length = 0; return; } else if (!sec->section) { return; } else { if (sec->length && sec->received+data_size > sec->length) data_size = sec->length - sec->received; if (sec->length) { memcpy(sec->section + sec->received, data, sizeof(char)*data_size); } else { sec->section = (char*)gf_realloc(sec->section, sizeof(char)*(sec->received+data_size)); memcpy(sec->section + sec->received, data, sizeof(char)*data_size); } sec->received += data_size; } /*alloc final buffer*/ if (!sec->length && (sec->received >= 3)) { sec->length = gf_m2ts_get_section_length(sec->section[0], sec->section[1], sec->section[2]); sec->section = (char*)gf_realloc(sec->section, sizeof(char)*sec->length); if (sec->received > sec->length) { data_size -= sec->received - sec->length; sec->received = sec->length; } } if (!sec->length || sec->received < sec->length) return; /*OK done*/ gf_m2ts_section_complete(ts, sec, ses); if (payload_size > data_size) { data += data_size; /* detect padding after previous section */ if (data[0] != 0xFF) { data_size = payload_size - data_size; payload_size = data_size; goto aggregated_section; } } } static void gf_m2ts_process_sdt(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *ses, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status) { u32 pos, evt_type; u32 nb_sections; u32 data_size; unsigned char *data; GF_M2TS_Section *section; /*wait for the last section */ if (!(status&GF_M2TS_TABLE_END)) return; /*skip if already received*/ if (status&GF_M2TS_TABLE_REPEAT) { if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_SDT_REPEAT, NULL); return; } if (table_id != GF_M2TS_TABLE_ID_SDT_ACTUAL) { return; } gf_m2ts_reset_sdt(ts); nb_sections = gf_list_count(sections); if (nb_sections > 1) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] SDT on multiple sections not supported\n")); } section = (GF_M2TS_Section *)gf_list_get(sections, 0); data = section->data; data_size = section->data_size; //orig_net_id = (data[0] << 8) | data[1]; pos = 3; while (pos < data_size) { GF_M2TS_SDT *sdt; u32 descs_size, d_pos, ulen; GF_SAFEALLOC(sdt, GF_M2TS_SDT); if (!sdt) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] Fail to create SDT\n")); return; } gf_list_add(ts->SDTs, sdt); sdt->service_id = (data[pos]<<8) + data[pos+1]; sdt->EIT_schedule = (data[pos+2] & 0x2) ? 1 : 0; sdt->EIT_present_following = (data[pos+2] & 0x1); sdt->running_status = (data[pos+3]>>5) & 0x7; sdt->free_CA_mode = (data[pos+3]>>4) & 0x1; descs_size = ((data[pos+3]&0xf)<<8) | data[pos+4]; pos += 5; d_pos = 0; while (d_pos < descs_size) { u8 d_tag = data[pos+d_pos]; u8 d_len = data[pos+d_pos+1]; switch (d_tag) { case GF_M2TS_DVB_SERVICE_DESCRIPTOR: if (sdt->provider) gf_free(sdt->provider); sdt->provider = NULL; if (sdt->service) gf_free(sdt->service); sdt->service = NULL; d_pos+=2; sdt->service_type = data[pos+d_pos]; ulen = data[pos+d_pos+1]; d_pos += 2; sdt->provider = (char*)gf_malloc(sizeof(char)*(ulen+1)); memcpy(sdt->provider, data+pos+d_pos, sizeof(char)*ulen); sdt->provider[ulen] = 0; d_pos += ulen; ulen = data[pos+d_pos]; d_pos += 1; sdt->service = (char*)gf_malloc(sizeof(char)*(ulen+1)); memcpy(sdt->service, data+pos+d_pos, sizeof(char)*ulen); sdt->service[ulen] = 0; d_pos += ulen; break; default: GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Skipping descriptor (0x%x) not supported\n", d_tag)); d_pos += d_len; if (d_len == 0) d_pos = descs_size; break; } } pos += descs_size; } evt_type = GF_M2TS_EVT_SDT_FOUND; if (ts->on_event) ts->on_event(ts, evt_type, NULL); } static void gf_m2ts_process_mpeg4section(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *es, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status) { GF_M2TS_SL_PCK sl_pck; u32 nb_sections, i; GF_M2TS_Section *section; /*skip if already received*/ if (status & GF_M2TS_TABLE_REPEAT) if (!(es->flags & GF_M2TS_ES_SEND_REPEATED_SECTIONS)) return; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Sections for PID %d\n", es->pid) ); /*send all sections (eg SL-packets)*/ nb_sections = gf_list_count(sections); for (i=0; i<nb_sections; i++) { section = (GF_M2TS_Section *)gf_list_get(sections, i); sl_pck.data = (char *)section->data; sl_pck.data_len = section->data_size; sl_pck.stream = (GF_M2TS_ES *)es; sl_pck.version_number = version_number; if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_SL_PCK, &sl_pck); } } static void gf_m2ts_process_nit(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *nit_es, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] NIT table processing (not yet implemented)")); } static void gf_m2ts_process_tdt_tot(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *tdt_tot_es, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status) { unsigned char *data; u32 data_size, nb_sections; u32 date, yp, mp, k; GF_M2TS_Section *section; GF_M2TS_TDT_TOT *time_table; const char *table_name; /*wait for the last section */ if ( !(status & GF_M2TS_TABLE_END) ) return; switch (table_id) { case GF_M2TS_TABLE_ID_TDT: table_name = "TDT"; break; case GF_M2TS_TABLE_ID_TOT: table_name = "TOT"; break; default: GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Unimplemented table_id %u for PID %u\n", table_id, GF_M2TS_PID_TDT_TOT_ST)); return; } nb_sections = gf_list_count(sections); if (nb_sections > 1) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] %s on multiple sections not supported\n", table_name)); } section = (GF_M2TS_Section *)gf_list_get(sections, 0); data = section->data; data_size = section->data_size; /*TOT only contains 40 bits of UTC_time; TDT add descriptors and a CRC*/ if ((table_id==GF_M2TS_TABLE_ID_TDT) && (data_size != 5)) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] Corrupted TDT size\n", table_name)); } GF_SAFEALLOC(time_table, GF_M2TS_TDT_TOT); if (!time_table) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Fail to alloc DVB time table\n")); return; } /*UTC_time - see annex C of DVB-SI ETSI EN 300468*/ /* decodes an Modified Julian Date (MJD) into a Co-ordinated Universal Time (UTC) See annex C of DVB-SI ETSI EN 300468 */ date = data[0]*256 + data[1]; yp = (u32)((date - 15078.2)/365.25); mp = (u32)((date - 14956.1 - (u32)(yp * 365.25))/30.6001); time_table->day = (u32)(date - 14956 - (u32)(yp * 365.25) - (u32)(mp * 30.6001)); if (mp == 14 || mp == 15) k = 1; else k = 0; time_table->year = yp + k + 1900; time_table->month = mp - 1 - k*12; time_table->hour = 10*((data[2]&0xf0)>>4) + (data[2]&0x0f); time_table->minute = 10*((data[3]&0xf0)>>4) + (data[3]&0x0f); time_table->second = 10*((data[4]&0xf0)>>4) + (data[4]&0x0f); assert(time_table->hour<24 && time_table->minute<60 && time_table->second<60); GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Stream UTC time is %u/%02u/%02u %02u:%02u:%02u\n", time_table->year, time_table->month, time_table->day, time_table->hour, time_table->minute, time_table->second)); switch (table_id) { case GF_M2TS_TABLE_ID_TDT: if (ts->TDT_time) gf_free(ts->TDT_time); ts->TDT_time = time_table; if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_TDT, time_table); break; case GF_M2TS_TABLE_ID_TOT: #if 0 { u32 pos, loop_len; loop_len = ((data[5]&0x0f) << 8) | (data[6] & 0xff); data += 7; pos = 0; while (pos < loop_len) { u8 tag = data[pos]; pos += 2; if (tag == GF_M2TS_DVB_LOCAL_TIME_OFFSET_DESCRIPTOR) { char tmp_time[10]; u16 offset_hours, offset_minutes; now->country_code[0] = data[pos]; now->country_code[1] = data[pos+1]; now->country_code[2] = data[pos+2]; now->country_region_id = data[pos+3]>>2; sprintf(tmp_time, "%02x", data[pos+4]); offset_hours = atoi(tmp_time); sprintf(tmp_time, "%02x", data[pos+5]); offset_minutes = atoi(tmp_time); now->local_time_offset_seconds = (offset_hours * 60 + offset_minutes) * 60; if (data[pos+3] & 1) now->local_time_offset_seconds *= -1; dvb_decode_mjd_to_unix_time(data+pos+6, &now->unix_next_toc); sprintf(tmp_time, "%02x", data[pos+11]); offset_hours = atoi(tmp_time); sprintf(tmp_time, "%02x", data[pos+12]); offset_minutes = atoi(tmp_time); now->next_time_offset_seconds = (offset_hours * 60 + offset_minutes) * 60; if (data[pos+3] & 1) now->next_time_offset_seconds *= -1; pos+= 13; } } /*TODO: check lengths are ok*/ if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_TOT, time_table); } #endif /*check CRC32*/ if (ts->tdt_tot->length<4) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] corrupted %s table (less than 4 bytes but CRC32 should be present\n", table_name)); goto error_exit; } if (!gf_m2ts_crc32_check(ts->tdt_tot->section, ts->tdt_tot->length-4)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] corrupted %s table (CRC32 failed)\n", table_name)); goto error_exit; } if (ts->TDT_time) gf_free(ts->TDT_time); ts->TDT_time = time_table; if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_TOT, time_table); break; default: assert(0); goto error_exit; } return; /*success*/ error_exit: gf_free(time_table); return; } static GF_M2TS_MetadataPointerDescriptor *gf_m2ts_read_metadata_pointer_descriptor(GF_BitStream *bs, u32 length) { u32 size; GF_M2TS_MetadataPointerDescriptor *d; GF_SAFEALLOC(d, GF_M2TS_MetadataPointerDescriptor); if (!d) return NULL; d->application_format = gf_bs_read_u16(bs); size = 2; if (d->application_format == 0xFFFF) { d->application_format_identifier = gf_bs_read_u32(bs); size += 4; } d->format = gf_bs_read_u8(bs); size += 1; if (d->format == 0xFF) { d->format_identifier = gf_bs_read_u32(bs); size += 4; } d->service_id = gf_bs_read_u8(bs); d->locator_record_flag = (gf_bs_read_int(bs, 1) ? GF_TRUE : GF_FALSE); d->carriage_flag = (enum metadata_carriage)gf_bs_read_int(bs, 2); gf_bs_read_int(bs, 5); /*reserved */ size += 2; if (d->locator_record_flag) { d->locator_length = gf_bs_read_u8(bs); d->locator_data = (char *)gf_malloc(d->locator_length); size += 1 + d->locator_length; gf_bs_read_data(bs, d->locator_data, d->locator_length); } if (d->carriage_flag != 3) { d->program_number = gf_bs_read_u16(bs); size += 2; } if (d->carriage_flag == 1) { d->ts_location = gf_bs_read_u16(bs); d->ts_id = gf_bs_read_u16(bs); size += 4; } if (length-size > 0) { d->data_size = length-size; d->data = (char *)gf_malloc(d->data_size); gf_bs_read_data(bs, d->data, d->data_size); } return d; } static void gf_m2ts_metadata_pointer_descriptor_del(GF_M2TS_MetadataPointerDescriptor *metapd) { if (metapd) { if (metapd->locator_data) gf_free(metapd->locator_data); if (metapd->data) gf_free(metapd->data); gf_free(metapd); } } static GF_M2TS_MetadataDescriptor *gf_m2ts_read_metadata_descriptor(GF_BitStream *bs, u32 length) { u32 size; GF_M2TS_MetadataDescriptor *d; GF_SAFEALLOC(d, GF_M2TS_MetadataDescriptor); if (!d) return NULL; d->application_format = gf_bs_read_u16(bs); size = 2; if (d->application_format == 0xFFFF) { d->application_format_identifier = gf_bs_read_u32(bs); size += 4; } d->format = gf_bs_read_u8(bs); size += 1; if (d->format == 0xFF) { d->format_identifier = gf_bs_read_u32(bs); size += 4; } d->service_id = gf_bs_read_u8(bs); d->decoder_config_flags = gf_bs_read_int(bs, 3); d->dsmcc_flag = (gf_bs_read_int(bs, 1) ? GF_TRUE : GF_FALSE); gf_bs_read_int(bs, 4); /* reserved */ size += 2; if (d->dsmcc_flag) { d->service_id_record_length = gf_bs_read_u8(bs); d->service_id_record = (char *)gf_malloc(d->service_id_record_length); size += 1 + d->service_id_record_length; gf_bs_read_data(bs, d->service_id_record, d->service_id_record_length); } if (d->decoder_config_flags == 1) { d->decoder_config_length = gf_bs_read_u8(bs); d->decoder_config = (char *)gf_malloc(d->decoder_config_length); size += 1 + d->decoder_config_length; gf_bs_read_data(bs, d->decoder_config, d->decoder_config_length); } if (d->decoder_config_flags == 3) { d->decoder_config_id_length = gf_bs_read_u8(bs); d->decoder_config_id = (char *)gf_malloc(d->decoder_config_id_length); size += 1 + d->decoder_config_id_length; gf_bs_read_data(bs, d->decoder_config_id, d->decoder_config_id_length); } if (d->decoder_config_flags == 4) { d->decoder_config_service_id = gf_bs_read_u8(bs); size++; } return d; } static void gf_m2ts_process_pmt(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *pmt, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status) { u32 info_length, pos, desc_len, evt_type, nb_es,i; u32 nb_sections; u32 data_size; u32 nb_hevc, nb_hevc_temp, nb_shvc, nb_shvc_temp, nb_mhvc, nb_mhvc_temp; unsigned char *data; GF_M2TS_Section *section; GF_Err e = GF_OK; /*wait for the last section */ if (!(status&GF_M2TS_TABLE_END)) return; nb_es = 0; /*skip if already received but no update detected (eg same data) */ if ((status&GF_M2TS_TABLE_REPEAT) && !(status&GF_M2TS_TABLE_UPDATE)) { if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_PMT_REPEAT, pmt->program); return; } if (pmt->sec->demux_restarted) { pmt->sec->demux_restarted = 0; return; } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PMT Found or updated\n")); nb_sections = gf_list_count(sections); if (nb_sections > 1) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("PMT on multiple sections not supported\n")); } section = (GF_M2TS_Section *)gf_list_get(sections, 0); data = section->data; data_size = section->data_size; pmt->program->pcr_pid = ((data[0] & 0x1f) << 8) | data[1]; info_length = ((data[2]&0xf)<<8) | data[3]; if (info_length + 4 > data_size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Broken PMT first loop, %d bytes avail but first loop size %d\n", data_size, info_length)); return; } else if (info_length != 0) { /* ...Read Descriptors ... */ u8 tag, len; u32 first_loop_len = 0; tag = data[4]; len = data[5]; while (info_length > first_loop_len) { if (tag == GF_M2TS_MPEG4_IOD_DESCRIPTOR) { if ((len>2) && (len - 2 <= info_length)) { u32 size; GF_BitStream *iod_bs; iod_bs = gf_bs_new((char *)data+8, len-2, GF_BITSTREAM_READ); if (pmt->program->pmt_iod) gf_odf_desc_del((GF_Descriptor *)pmt->program->pmt_iod); e = gf_odf_parse_descriptor(iod_bs , (GF_Descriptor **) &pmt->program->pmt_iod, &size); gf_bs_del(iod_bs ); if (e==GF_OK) { /*remember program number for service/program selection*/ if (pmt->program->pmt_iod) pmt->program->pmt_iod->ServiceID = pmt->program->number; /*if empty IOD (freebox case), discard it and use dynamic declaration of object*/ if (!gf_list_count(pmt->program->pmt_iod->ESDescriptors)) { gf_odf_desc_del((GF_Descriptor *)pmt->program->pmt_iod); pmt->program->pmt_iod = NULL; } } } else { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Broken IOD! len %d less than 2 bytes to declare IOD\n", len)); } } else if (tag == GF_M2TS_METADATA_POINTER_DESCRIPTOR) { GF_BitStream *metadatapd_bs; GF_M2TS_MetadataPointerDescriptor *metapd; metadatapd_bs = gf_bs_new((char *)data+6, len, GF_BITSTREAM_READ); metapd = gf_m2ts_read_metadata_pointer_descriptor(metadatapd_bs, len); gf_bs_del(metadatapd_bs); if (metapd->application_format_identifier == GF_M2TS_META_ID3 && metapd->format_identifier == GF_M2TS_META_ID3 && metapd->carriage_flag == METADATA_CARRIAGE_SAME_TS) { /*HLS ID3 Metadata */ pmt->program->metadata_pointer_descriptor = metapd; } else { /* don't know what to do with it for now, delete */ gf_m2ts_metadata_pointer_descriptor_del(metapd); } } else { GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Skipping descriptor (0x%x) and others not supported\n", tag)); } first_loop_len += 2 + len; } } if (data_size <= 4 + info_length) return; data += 4 + info_length; data_size -= 4 + info_length; pos = 0; /* count de number of program related PMT received */ for(i=0; i<gf_list_count(ts->programs); i++) { GF_M2TS_Program *prog = (GF_M2TS_Program *)gf_list_get(ts->programs,i); if(prog->pmt_pid == pmt->pid) { break; } } nb_hevc = nb_hevc_temp = nb_shvc = nb_shvc_temp = nb_mhvc = nb_mhvc_temp = 0; while (pos<data_size) { GF_M2TS_PES *pes = NULL; GF_M2TS_SECTION_ES *ses = NULL; GF_M2TS_ES *es = NULL; Bool inherit_pcr = 0; u32 pid, stream_type, reg_desc_format; if (pos + 5 > data_size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Broken PMT! size %d but position %d and need at least 5 bytes to declare es\n", data_size, pos)); break; } stream_type = data[0]; pid = ((data[1] & 0x1f) << 8) | data[2]; desc_len = ((data[3] & 0xf) << 8) | data[4]; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("stream_type :%d \n",stream_type)); switch (stream_type) { /* PES */ case GF_M2TS_VIDEO_MPEG1: case GF_M2TS_VIDEO_MPEG2: case GF_M2TS_VIDEO_DCII: case GF_M2TS_VIDEO_MPEG4: case GF_M2TS_SYSTEMS_MPEG4_PES: case GF_M2TS_VIDEO_H264: case GF_M2TS_VIDEO_SVC: case GF_M2TS_VIDEO_MVCD: case GF_M2TS_VIDEO_HEVC: case GF_M2TS_VIDEO_HEVC_MCTS: case GF_M2TS_VIDEO_HEVC_TEMPORAL: case GF_M2TS_VIDEO_SHVC: case GF_M2TS_VIDEO_SHVC_TEMPORAL: case GF_M2TS_VIDEO_MHVC: case GF_M2TS_VIDEO_MHVC_TEMPORAL: inherit_pcr = 1; case GF_M2TS_AUDIO_MPEG1: case GF_M2TS_AUDIO_MPEG2: case GF_M2TS_AUDIO_AAC: case GF_M2TS_AUDIO_LATM_AAC: case GF_M2TS_AUDIO_AC3: case GF_M2TS_AUDIO_DTS: case GF_M2TS_MHAS_MAIN: case GF_M2TS_MHAS_AUX: case GF_M2TS_SUBTITLE_DVB: case GF_M2TS_METADATA_PES: GF_SAFEALLOC(pes, GF_M2TS_PES); if (!pes) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG2TS] Failed to allocate ES for pid %d\n", pid)); return; } pes->cc = -1; pes->flags = GF_M2TS_ES_IS_PES; if (inherit_pcr) pes->flags |= GF_M2TS_INHERIT_PCR; es = (GF_M2TS_ES *)pes; break; case GF_M2TS_PRIVATE_DATA: GF_SAFEALLOC(pes, GF_M2TS_PES); if (!pes) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG2TS] Failed to allocate ES for pid %d\n", pid)); return; } pes->cc = -1; pes->flags = GF_M2TS_ES_IS_PES; es = (GF_M2TS_ES *)pes; break; /* Sections */ case GF_M2TS_SYSTEMS_MPEG4_SECTIONS: GF_SAFEALLOC(ses, GF_M2TS_SECTION_ES); if (!ses) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG2TS] Failed to allocate ES for pid %d\n", pid)); return; } es = (GF_M2TS_ES *)ses; es->flags |= GF_M2TS_ES_IS_SECTION; /* carriage of ISO_IEC_14496 data in sections */ if (stream_type == GF_M2TS_SYSTEMS_MPEG4_SECTIONS) { /*MPEG-4 sections need to be fully checked: if one section is lost, this means we lost one SL packet in the AU so we must wait for the complete section again*/ ses->sec = gf_m2ts_section_filter_new(gf_m2ts_process_mpeg4section, 0); /*create OD container*/ if (!pmt->program->additional_ods) { pmt->program->additional_ods = gf_list_new(); ts->has_4on2 = 1; } } break; case GF_M2TS_13818_6_ANNEX_A: case GF_M2TS_13818_6_ANNEX_B: case GF_M2TS_13818_6_ANNEX_C: case GF_M2TS_13818_6_ANNEX_D: case GF_M2TS_PRIVATE_SECTION: case GF_M2TS_QUALITY_SEC: case GF_M2TS_MORE_SEC: GF_SAFEALLOC(ses, GF_M2TS_SECTION_ES); if (!ses) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG2TS] Failed to allocate ES for pid %d\n", pid)); return; } es = (GF_M2TS_ES *)ses; es->flags |= GF_M2TS_ES_IS_SECTION; es->pid = pid; es->service_id = pmt->program->number; if (stream_type == GF_M2TS_PRIVATE_SECTION) { GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("AIT sections on pid %d\n", pid)); } else if (stream_type == GF_M2TS_QUALITY_SEC) { GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("Quality metadata sections on pid %d\n", pid)); } else if (stream_type == GF_M2TS_MORE_SEC) { GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("MORE sections on pid %d\n", pid)); } else { GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("stream type DSM CC user private sections on pid %d \n", pid)); } /* NULL means: trigger the call to on_event with DVB_GENERAL type and the raw section as payload */ ses->sec = gf_m2ts_section_filter_new(NULL, 1); //ses->sec->service_id = pmt->program->number; break; case GF_M2TS_MPE_SECTIONS: if (! ts->prefix_present) { GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("stream type MPE found : pid = %d \n", pid)); #ifdef GPAC_ENABLE_MPE es = gf_dvb_mpe_section_new(); if (es->flags & GF_M2TS_ES_IS_SECTION) { /* NULL means: trigger the call to on_event with DVB_GENERAL type and the raw section as payload */ ((GF_M2TS_SECTION_ES*)es)->sec = gf_m2ts_section_filter_new(NULL, 1); } #endif break; } default: GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] Stream type (0x%x) for PID %d not supported\n", stream_type, pid ) ); //GF_LOG(/*GF_LOG_WARNING*/GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Stream type (0x%x) for PID %d not supported\n", stream_type, pid ) ); break; } if (es) { es->stream_type = (stream_type==GF_M2TS_PRIVATE_DATA) ? 0 : stream_type; es->program = pmt->program; es->pid = pid; es->component_tag = -1; } pos += 5; data += 5; while (desc_len) { if (pos + 2 > data_size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Broken PMT descriptor! size %d but position %d and need at least 2 bytes to parse descritpor\n", data_size, pos)); break; } u8 tag = data[0]; u32 len = data[1]; if (pos + 2 + len > data_size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Broken PMT descriptor! size %d, desc size %d but position %d\n", data_size, len, pos)); break; } if (es) { switch (tag) { case GF_M2TS_ISO_639_LANGUAGE_DESCRIPTOR: if (pes && (len>=3) ) pes->lang = GF_4CC(' ', data[2], data[3], data[4]); break; case GF_M2TS_MPEG4_SL_DESCRIPTOR: if (len>=2) { es->mpeg4_es_id = ( (u32) data[2] & 0x1f) << 8 | data[3]; es->flags |= GF_M2TS_ES_IS_SL; } break; case GF_M2TS_REGISTRATION_DESCRIPTOR: if (len>=4) { reg_desc_format = GF_4CC(data[2], data[3], data[4], data[5]); /*cf http://www.smpte-ra.org/mpegreg/mpegreg.html*/ switch (reg_desc_format) { case GF_M2TS_RA_STREAM_AC3: es->stream_type = GF_M2TS_AUDIO_AC3; break; case GF_M2TS_RA_STREAM_VC1: es->stream_type = GF_M2TS_VIDEO_VC1; break; case GF_M2TS_RA_STREAM_GPAC: if (len==8) { es->stream_type = GF_4CC(data[6], data[7], data[8], data[9]); es->flags |= GF_M2TS_GPAC_CODEC_ID; break; } default: GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("Unknown registration descriptor %s\n", gf_4cc_to_str(reg_desc_format) )); break; } } break; case GF_M2TS_DVB_EAC3_DESCRIPTOR: es->stream_type = GF_M2TS_AUDIO_EC3; break; case GF_M2TS_DVB_DATA_BROADCAST_ID_DESCRIPTOR: if (len>=2) { u32 id = data[2]<<8 | data[3]; if ((id == 0xB) && ses && !ses->sec) { ses->sec = gf_m2ts_section_filter_new(NULL, 1); } } break; case GF_M2TS_DVB_SUBTITLING_DESCRIPTOR: if (pes && (len>=8)) { pes->sub.language[0] = data[2]; pes->sub.language[1] = data[3]; pes->sub.language[2] = data[4]; pes->sub.type = data[5]; pes->sub.composition_page_id = (data[6]<<8) | data[7]; pes->sub.ancillary_page_id = (data[8]<<8) | data[9]; } es->stream_type = GF_M2TS_DVB_SUBTITLE; break; case GF_M2TS_DVB_STREAM_IDENTIFIER_DESCRIPTOR: if (len>=1) { es->component_tag = data[2]; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("Component Tag: %d on Program %d\n", es->component_tag, es->program->number)); } break; case GF_M2TS_DVB_TELETEXT_DESCRIPTOR: es->stream_type = GF_M2TS_DVB_TELETEXT; break; case GF_M2TS_DVB_VBI_DATA_DESCRIPTOR: es->stream_type = GF_M2TS_DVB_VBI; break; case GF_M2TS_HIERARCHY_DESCRIPTOR: if (pes && (len>=4)) { u8 hierarchy_embedded_layer_index; GF_BitStream *hbs = gf_bs_new((const char *)data, data_size, GF_BITSTREAM_READ); /*u32 skip = */gf_bs_read_int(hbs, 16); /*u8 res1 = */gf_bs_read_int(hbs, 1); /*u8 temp_scal = */gf_bs_read_int(hbs, 1); /*u8 spatial_scal = */gf_bs_read_int(hbs, 1); /*u8 quality_scal = */gf_bs_read_int(hbs, 1); /*u8 hierarchy_type = */gf_bs_read_int(hbs, 4); /*u8 res2 = */gf_bs_read_int(hbs, 2); /*u8 hierarchy_layer_index = */gf_bs_read_int(hbs, 6); /*u8 tref_not_present = */gf_bs_read_int(hbs, 1); /*u8 res3 = */gf_bs_read_int(hbs, 1); hierarchy_embedded_layer_index = gf_bs_read_int(hbs, 6); /*u8 res4 = */gf_bs_read_int(hbs, 2); /*u8 hierarchy_channel = */gf_bs_read_int(hbs, 6); gf_bs_del(hbs); pes->depends_on_pid = 1+hierarchy_embedded_layer_index; } break; case GF_M2TS_METADATA_DESCRIPTOR: { GF_BitStream *metadatad_bs; GF_M2TS_MetadataDescriptor *metad; metadatad_bs = gf_bs_new((char *)data+2, len, GF_BITSTREAM_READ); metad = gf_m2ts_read_metadata_descriptor(metadatad_bs, len); gf_bs_del(metadatad_bs); if (metad->application_format_identifier == GF_M2TS_META_ID3 && metad->format_identifier == GF_M2TS_META_ID3) { /*HLS ID3 Metadata */ if (pes) { pes->metadata_descriptor = metad; pes->stream_type = GF_M2TS_METADATA_ID3_HLS; } } else { /* don't know what to do with it for now, delete */ gf_m2ts_metadata_descriptor_del(metad); } } break; default: GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] skipping descriptor (0x%x) not supported\n", tag)); break; } } data += len+2; pos += len+2; if (desc_len < len+2) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Invalid PMT es descriptor size for PID %d\n", pid ) ); break; } desc_len-=len+2; } if (es && !es->stream_type) { gf_free(es); es = NULL; GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Private Stream type (0x%x) for PID %d not supported\n", stream_type, pid ) ); } if (!es) continue; if (ts->ess[pid]) { //this is component reuse across programs, overwrite the previously declared stream ... if (status & GF_M2TS_TABLE_FOUND) { GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d reused across programs %d and %d, not completely supported\n", pid, ts->ess[pid]->program->number, es->program->number ) ); //add stream to program but don't reassign the pid table until the stream is playing (>GF_M2TS_PES_FRAMING_SKIP) gf_list_add(pmt->program->streams, es); if (!(es->flags & GF_M2TS_ES_IS_SECTION) ) gf_m2ts_set_pes_framing(pes, GF_M2TS_PES_FRAMING_SKIP); nb_es++; //skip assignment below es = NULL; } /*watchout for pmt update - FIXME this likely won't work in most cases*/ else { GF_M2TS_ES *o_es = ts->ess[es->pid]; if ((o_es->stream_type == es->stream_type) && ((o_es->flags & GF_M2TS_ES_STATIC_FLAGS_MASK) == (es->flags & GF_M2TS_ES_STATIC_FLAGS_MASK)) && (o_es->mpeg4_es_id == es->mpeg4_es_id) && ((o_es->flags & GF_M2TS_ES_IS_SECTION) || ((GF_M2TS_PES *)o_es)->lang == ((GF_M2TS_PES *)es)->lang) ) { gf_free(es); es = NULL; } else { gf_m2ts_es_del(o_es, ts); ts->ess[es->pid] = NULL; } } } if (es) { ts->ess[es->pid] = es; gf_list_add(pmt->program->streams, es); if (!(es->flags & GF_M2TS_ES_IS_SECTION) ) gf_m2ts_set_pes_framing(pes, GF_M2TS_PES_FRAMING_SKIP); nb_es++; if (es->stream_type == GF_M2TS_VIDEO_HEVC) nb_hevc++; else if (es->stream_type == GF_M2TS_VIDEO_HEVC_TEMPORAL) nb_hevc_temp++; else if (es->stream_type == GF_M2TS_VIDEO_SHVC) nb_shvc++; else if (es->stream_type == GF_M2TS_VIDEO_SHVC_TEMPORAL) nb_shvc_temp++; else if (es->stream_type == GF_M2TS_VIDEO_MHVC) nb_mhvc++; else if (es->stream_type == GF_M2TS_VIDEO_MHVC_TEMPORAL) nb_mhvc_temp++; } } //Table 2-139, implied hierarchy indexes if (nb_hevc_temp + nb_shvc + nb_shvc_temp + nb_mhvc+ nb_mhvc_temp) { for (i=0; i<gf_list_count(pmt->program->streams); i++) { GF_M2TS_PES *es = (GF_M2TS_PES *)gf_list_get(pmt->program->streams, i); if ( !(es->flags & GF_M2TS_ES_IS_PES)) continue; if (es->depends_on_pid) continue; switch (es->stream_type) { case GF_M2TS_VIDEO_HEVC_TEMPORAL: es->depends_on_pid = 1; break; case GF_M2TS_VIDEO_SHVC: if (!nb_hevc_temp) es->depends_on_pid = 1; else es->depends_on_pid = 2; break; case GF_M2TS_VIDEO_SHVC_TEMPORAL: es->depends_on_pid = 3; break; case GF_M2TS_VIDEO_MHVC: if (!nb_hevc_temp) es->depends_on_pid = 1; else es->depends_on_pid = 2; break; case GF_M2TS_VIDEO_MHVC_TEMPORAL: if (!nb_hevc_temp) es->depends_on_pid = 2; else es->depends_on_pid = 3; break; } } } if (nb_es) { u32 i; //translate hierarchy descriptors indexes into PIDs - check whether the PMT-index rules are the same for HEVC for (i=0; i<gf_list_count(pmt->program->streams); i++) { GF_M2TS_PES *an_es = NULL; GF_M2TS_PES *es = (GF_M2TS_PES *)gf_list_get(pmt->program->streams, i); if ( !(es->flags & GF_M2TS_ES_IS_PES)) continue; if (!es->depends_on_pid) continue; //fixeme we are not always assured that hierarchy_layer_index matches the stream index... //+1 is because our first stream is the PMT an_es = (GF_M2TS_PES *)gf_list_get(pmt->program->streams, es->depends_on_pid); if (an_es) { es->depends_on_pid = an_es->pid; } else { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[M2TS] Wrong dependency index in hierarchy descriptor, assuming non-scalable stream\n")); es->depends_on_pid = 0; } } evt_type = (status&GF_M2TS_TABLE_FOUND) ? GF_M2TS_EVT_PMT_FOUND : GF_M2TS_EVT_PMT_UPDATE; if (ts->on_event) ts->on_event(ts, evt_type, pmt->program); } else { /* if we found no new ES it's simply a repeat of the PMT */ if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_PMT_REPEAT, pmt->program); } } static void gf_m2ts_process_pat(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *ses, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status) { GF_M2TS_Program *prog; GF_M2TS_SECTION_ES *pmt; u32 i, nb_progs, evt_type; u32 nb_sections; u32 data_size; unsigned char *data; GF_M2TS_Section *section; /*wait for the last section */ if (!(status&GF_M2TS_TABLE_END)) return; /*skip if already received*/ if (status&GF_M2TS_TABLE_REPEAT) { if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_PAT_REPEAT, NULL); return; } nb_sections = gf_list_count(sections); if (nb_sections > 1) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("PAT on multiple sections not supported\n")); } section = (GF_M2TS_Section *)gf_list_get(sections, 0); data = section->data; data_size = section->data_size; if (!(status&GF_M2TS_TABLE_UPDATE) && gf_list_count(ts->programs)) { if (ts->pat->demux_restarted) { ts->pat->demux_restarted = 0; } else { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Multiple different PAT on single TS found, ignoring new PAT declaration (table id %d - extended table id %d)\n", table_id, ex_table_id)); } return; } nb_progs = data_size / 4; for (i=0; i<nb_progs; i++) { u16 number, pid; number = (data[0]<<8) | data[1]; pid = (data[2]&0x1f)<<8 | data[3]; data += 4; if (number==0) { if (!ts->nit) { ts->nit = gf_m2ts_section_filter_new(gf_m2ts_process_nit, 0); } } else if (!pid) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Broken PAT found reserved PID 0, ignoring\n", pid)); } else if (! ts->ess[pid]) { GF_SAFEALLOC(prog, GF_M2TS_Program); if (!prog) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Fail to allocate program for pid %d\n", pid)); return; } prog->streams = gf_list_new(); prog->pmt_pid = pid; prog->number = number; prog->ts = ts; gf_list_add(ts->programs, prog); GF_SAFEALLOC(pmt, GF_M2TS_SECTION_ES); if (!pmt) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Fail to allocate pmt filter for pid %d\n", pid)); return; } pmt->flags = GF_M2TS_ES_IS_SECTION; gf_list_add(prog->streams, pmt); pmt->pid = prog->pmt_pid; pmt->program = prog; ts->ess[pmt->pid] = (GF_M2TS_ES *)pmt; pmt->sec = gf_m2ts_section_filter_new(gf_m2ts_process_pmt, 0); } } evt_type = (status&GF_M2TS_TABLE_UPDATE) ? GF_M2TS_EVT_PAT_UPDATE : GF_M2TS_EVT_PAT_FOUND; if (ts->on_event) ts->on_event(ts, evt_type, NULL); } static void gf_m2ts_process_cat(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *ses, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status) { u32 evt_type; /* GF_M2TS_Program *prog; GF_M2TS_SECTION_ES *pmt; u32 i, nb_progs; u32 nb_sections; u32 data_size; unsigned char *data; GF_M2TS_Section *section; */ /*wait for the last section */ if (!(status&GF_M2TS_TABLE_END)) return; /*skip if already received*/ if (status&GF_M2TS_TABLE_REPEAT) { if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_CAT_REPEAT, NULL); return; } /* nb_sections = gf_list_count(sections); if (nb_sections > 1) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("CAT on multiple sections not supported\n")); } section = (GF_M2TS_Section *)gf_list_get(sections, 0); data = section->data; data_size = section->data_size; nb_progs = data_size / 4; for (i=0; i<nb_progs; i++) { u16 number, pid; number = (data[0]<<8) | data[1]; pid = (data[2]&0x1f)<<8 | data[3]; data += 4; if (number==0) { if (!ts->nit) { ts->nit = gf_m2ts_section_filter_new(gf_m2ts_process_nit, 0); } } else { GF_SAFEALLOC(prog, GF_M2TS_Program); prog->streams = gf_list_new(); prog->pmt_pid = pid; prog->number = number; gf_list_add(ts->programs, prog); GF_SAFEALLOC(pmt, GF_M2TS_SECTION_ES); pmt->flags = GF_M2TS_ES_IS_SECTION; gf_list_add(prog->streams, pmt); pmt->pid = prog->pmt_pid; pmt->program = prog; ts->ess[pmt->pid] = (GF_M2TS_ES *)pmt; pmt->sec = gf_m2ts_section_filter_new(gf_m2ts_process_pmt, 0); } } */ evt_type = (status&GF_M2TS_TABLE_UPDATE) ? GF_M2TS_EVT_CAT_UPDATE : GF_M2TS_EVT_CAT_FOUND; if (ts->on_event) ts->on_event(ts, evt_type, NULL); } u64 gf_m2ts_get_pts(unsigned char *data) { u64 pts; u32 val; pts = (u64)((data[0] >> 1) & 0x07) << 30; val = (data[1] << 8) | data[2]; pts |= (u64)(val >> 1) << 15; val = (data[3] << 8) | data[4]; pts |= (u64)(val >> 1); return pts; } void gf_m2ts_pes_header(GF_M2TS_PES *pes, unsigned char *data, u32 data_size, GF_M2TS_PESHeader *pesh) { u32 has_pts, has_dts; u32 len_check; memset(pesh, 0, sizeof(GF_M2TS_PESHeader)); len_check = 0; pesh->id = data[0]; pesh->pck_len = (data[1]<<8) | data[2]; /* 2bits scrambling_control = gf_bs_read_int(bs,2); priority = gf_bs_read_int(bs,1); */ pesh->data_alignment = (data[3] & 0x4) ? 1 : 0; /* copyright = gf_bs_read_int(bs,1); original = gf_bs_read_int(bs,1); */ has_pts = (data[4]&0x80); has_dts = has_pts ? (data[4]&0x40) : 0; /* ESCR_flag = gf_bs_read_int(bs,1); ES_rate_flag = gf_bs_read_int(bs,1); DSM_flag = gf_bs_read_int(bs,1); additional_copy_flag = gf_bs_read_int(bs,1); prev_crc_flag = gf_bs_read_int(bs,1); extension_flag = gf_bs_read_int(bs,1); */ pesh->hdr_data_len = data[5]; data += 6; if (has_pts) { pesh->PTS = gf_m2ts_get_pts(data); data+=5; len_check += 5; } if (has_dts) { pesh->DTS = gf_m2ts_get_pts(data); //data+=5; len_check += 5; } else { pesh->DTS = pesh->PTS; } if (len_check < pesh->hdr_data_len) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d Skipping %d bytes in pes header\n", pes->pid, pesh->hdr_data_len - len_check)); } else if (len_check > pesh->hdr_data_len) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d Wrong pes_header_data_length field %d bytes - read %d\n", pes->pid, pesh->hdr_data_len, len_check)); } if ((pesh->PTS<90000) && ((s32)pesh->DTS<0)) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d Wrong DTS %d negative for PTS %d - forcing to 0\n", pes->pid, pesh->DTS, pesh->PTS)); pesh->DTS=0; } } static void gf_m2ts_store_temi(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes) { GF_BitStream *bs = gf_bs_new(pes->temi_tc_desc, pes->temi_tc_desc_len, GF_BITSTREAM_READ); u32 has_timestamp = gf_bs_read_int(bs, 2); Bool has_ntp = (Bool) gf_bs_read_int(bs, 1); /*u32 has_ptp = */gf_bs_read_int(bs, 1); /*u32 has_timecode = */gf_bs_read_int(bs, 2); memset(&pes->temi_tc, 0, sizeof(GF_M2TS_TemiTimecodeDescriptor)); pes->temi_tc.force_reload = gf_bs_read_int(bs, 1); pes->temi_tc.is_paused = gf_bs_read_int(bs, 1); pes->temi_tc.is_discontinuity = gf_bs_read_int(bs, 1); gf_bs_read_int(bs, 7); pes->temi_tc.timeline_id = gf_bs_read_int(bs, 8); if (has_timestamp) { pes->temi_tc.media_timescale = gf_bs_read_u32(bs); if (has_timestamp==2) pes->temi_tc.media_timestamp = gf_bs_read_u64(bs); else pes->temi_tc.media_timestamp = gf_bs_read_u32(bs); } if (has_ntp) { pes->temi_tc.ntp = gf_bs_read_u64(bs); } gf_bs_del(bs); pes->temi_tc_desc_len = 0; pes->temi_pending = 1; } void gf_m2ts_flush_pes(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes) { GF_M2TS_PESHeader pesh; if (!ts) return; /*we need at least a full, valid start code and PES header !!*/ if ((pes->pck_data_len >= 4) && !pes->pck_data[0] && !pes->pck_data[1] && (pes->pck_data[2] == 0x1)) { u32 len; Bool has_pes_header = GF_TRUE; u32 stream_id = pes->pck_data[3]; Bool same_pts = GF_FALSE; switch (stream_id) { case GF_M2_STREAMID_PROGRAM_STREAM_MAP: case GF_M2_STREAMID_PADDING: case GF_M2_STREAMID_PRIVATE_2: case GF_M2_STREAMID_ECM: case GF_M2_STREAMID_EMM: case GF_M2_STREAMID_PROGRAM_STREAM_DIRECTORY: case GF_M2_STREAMID_DSMCC: case GF_M2_STREAMID_H222_TYPE_E: has_pes_header = GF_FALSE; break; } if (has_pes_header) { /*OK read header*/ gf_m2ts_pes_header(pes, pes->pck_data + 3, pes->pck_data_len - 3, &pesh); /*send PES timing*/ if (ts->notify_pes_timing) { GF_M2TS_PES_PCK pck; memset(&pck, 0, sizeof(GF_M2TS_PES_PCK)); pck.PTS = pesh.PTS; pck.DTS = pesh.DTS; pck.stream = pes; if (pes->rap) pck.flags |= GF_M2TS_PES_PCK_RAP; pes->pes_end_packet_number = ts->pck_number; if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_PES_TIMING, &pck); } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d Got PES header DTS %d PTS %d\n", pes->pid, pesh.DTS, pesh.PTS)); if (pesh.PTS) { if (pesh.PTS == pes->PTS) { same_pts = GF_TRUE; GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d - same PTS "LLU" for two consecutive PES packets \n", pes->pid, pes->PTS)); } #ifndef GPAC_DISABLE_LOG /*FIXME - this test should only be done for non bi-directionnally coded media else if (pesh.PTS < pes->PTS) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d - PTS "LLU" less than previous packet PTS "LLU"\n", pes->pid, pesh.PTS, pes->PTS) ); } */ #endif pes->PTS = pesh.PTS; #ifndef GPAC_DISABLE_LOG { if (pes->DTS && (pesh.DTS == pes->DTS)) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d - same DTS "LLU" for two consecutive PES packets \n", pes->pid, pes->DTS)); } if (pesh.DTS < pes->DTS) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d - DTS "LLU" less than previous DTS "LLU"\n", pes->pid, pesh.DTS, pes->DTS)); } } #endif pes->DTS = pesh.DTS; } /*no PTSs were coded, same time*/ else if (!pesh.hdr_data_len) { same_pts = GF_TRUE; } /*3-byte start-code + 6 bytes header + hdr extensions*/ len = 9 + pesh.hdr_data_len; } else { /*3-byte start-code + 1 byte streamid*/ len = 4; memset(&pesh, 0, sizeof(pesh)); } if ((u8) pes->pck_data[3]==0xfa) { GF_M2TS_SL_PCK sl_pck; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] SL Packet in PES for %d - ES ID %d\n", pes->pid, pes->mpeg4_es_id)); if (pes->pck_data_len > len) { sl_pck.data = (char *)pes->pck_data + len; sl_pck.data_len = pes->pck_data_len - len; sl_pck.stream = (GF_M2TS_ES *)pes; if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_SL_PCK, &sl_pck); } else { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Bad SL Packet size: (%d indicated < %d header)\n", pes->pid, pes->pck_data_len, len)); } } else if (pes->reframe) { u32 remain = 0; u32 offset = len; if (pesh.pck_len && (pesh.pck_len-3-pesh.hdr_data_len != pes->pck_data_len-len)) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PES payload size %d but received %d bytes\n", pes->pid, (u32) ( pesh.pck_len-3-pesh.hdr_data_len), pes->pck_data_len-len)); } //copy over the remaining of previous PES payload before start of this PES payload if (pes->prev_data_len) { if (pes->prev_data_len < len) { offset = len - pes->prev_data_len; memcpy(pes->pck_data + offset, pes->prev_data, pes->prev_data_len); } else { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PES reassembly buffer overflow (%d bytes not processed from previous PES) - discarding prev data\n", pes->pid, pes->prev_data_len )); } } if (!pes->temi_pending && pes->temi_tc_desc_len) { gf_m2ts_store_temi(ts, pes); } if (pes->temi_pending) { pes->temi_pending = 0; pes->temi_tc.pes_pts = pes->PTS; if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_TEMI_TIMECODE, &pes->temi_tc); } if (! ts->seek_mode) remain = pes->reframe(ts, pes, same_pts, pes->pck_data+offset, pes->pck_data_len-offset, &pesh); //CLEANUP alloc stuff if (pes->prev_data) gf_free(pes->prev_data); pes->prev_data = NULL; pes->prev_data_len = 0; if (remain) { pes->prev_data = gf_malloc(sizeof(char)*remain); assert(pes->pck_data_len >= remain); memcpy(pes->prev_data, pes->pck_data + pes->pck_data_len - remain, remain); pes->prev_data_len = remain; } } } else if (pes->pck_data_len) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PES %d: Bad PES Header, discarding packet (maybe stream is encrypted ?)\n", pes->pid)); } pes->pck_data_len = 0; pes->pes_len = 0; pes->rap = 0; } static void gf_m2ts_process_pes(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, GF_M2TS_Header *hdr, unsigned char *data, u32 data_size, GF_M2TS_AdaptationField *paf) { u8 expect_cc; Bool disc=0; Bool flush_pes = 0; /*duplicated packet, NOT A DISCONTINUITY, we should discard the packet - however we may encounter this configuration in DASH at segment boundaries. If payload start is set, ignore duplication*/ if (hdr->continuity_counter==pes->cc) { if (!hdr->payload_start || (hdr->adaptation_field!=3) ) { GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[MPEG-2 TS] PES %d: Duplicated Packet found (CC %d) - skipping\n", pes->pid, pes->cc)); return; } } else { expect_cc = (pes->cc<0) ? hdr->continuity_counter : (pes->cc + 1) & 0xf; if (expect_cc != hdr->continuity_counter) disc = 1; } pes->cc = hdr->continuity_counter; if (disc) { if (pes->flags & GF_M2TS_ES_IGNORE_NEXT_DISCONTINUITY) { pes->flags &= ~GF_M2TS_ES_IGNORE_NEXT_DISCONTINUITY; disc = 0; } if (disc) { if (hdr->payload_start) { if (pes->pck_data_len) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PES %d: Packet discontinuity (%d expected - got %d) - may have lost end of previous PES\n", pes->pid, expect_cc, hdr->continuity_counter)); } } else { if (pes->pck_data_len) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PES %d: Packet discontinuity (%d expected - got %d) - trashing PES packet\n", pes->pid, expect_cc, hdr->continuity_counter)); } pes->pck_data_len = 0; pes->pes_len = 0; pes->cc = -1; return; } } } if (!pes->reframe) return; if (hdr->payload_start) { flush_pes = 1; pes->pes_start_packet_number = ts->pck_number; pes->before_last_pcr_value = pes->program->before_last_pcr_value; pes->before_last_pcr_value_pck_number = pes->program->before_last_pcr_value_pck_number; pes->last_pcr_value = pes->program->last_pcr_value; pes->last_pcr_value_pck_number = pes->program->last_pcr_value_pck_number; } else if (pes->pes_len && (pes->pck_data_len + data_size == pes->pes_len + 6)) { /* 6 = startcode+stream_id+length*/ /*reassemble pes*/ if (pes->pck_data_len + data_size > pes->pck_alloc_len) { pes->pck_alloc_len = pes->pck_data_len + data_size; pes->pck_data = (u8*)gf_realloc(pes->pck_data, pes->pck_alloc_len); } memcpy(pes->pck_data+pes->pck_data_len, data, data_size); pes->pck_data_len += data_size; /*force discard*/ data_size = 0; flush_pes = 1; } /*PES first fragment: flush previous packet*/ if (flush_pes && pes->pck_data_len) { gf_m2ts_flush_pes(ts, pes); if (!data_size) return; } /*we need to wait for first packet of PES*/ if (!pes->pck_data_len && !hdr->payload_start) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Waiting for PES header, trashing data\n", hdr->pid)); return; } /*reassemble*/ if (pes->pck_data_len + data_size > pes->pck_alloc_len ) { pes->pck_alloc_len = pes->pck_data_len + data_size; pes->pck_data = (u8*)gf_realloc(pes->pck_data, pes->pck_alloc_len); } memcpy(pes->pck_data + pes->pck_data_len, data, data_size); pes->pck_data_len += data_size; if (paf && paf->random_access_indicator) pes->rap = 1; if (hdr->payload_start && !pes->pes_len && (pes->pck_data_len>=6)) { pes->pes_len = (pes->pck_data[4]<<8) | pes->pck_data[5]; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Got PES packet len %d\n", pes->pid, pes->pes_len)); if (pes->pes_len + 6 == pes->pck_data_len) { gf_m2ts_flush_pes(ts, pes); } } } static void gf_m2ts_get_adaptation_field(GF_M2TS_Demuxer *ts, GF_M2TS_AdaptationField *paf, unsigned char *data, u32 size, u32 pid) { unsigned char *af_extension; paf->discontinuity_indicator = (data[0] & 0x80) ? 1 : 0; paf->random_access_indicator = (data[0] & 0x40) ? 1 : 0; paf->priority_indicator = (data[0] & 0x20) ? 1 : 0; paf->PCR_flag = (data[0] & 0x10) ? 1 : 0; paf->OPCR_flag = (data[0] & 0x8) ? 1 : 0; paf->splicing_point_flag = (data[0] & 0x4) ? 1 : 0; paf->transport_private_data_flag = (data[0] & 0x2) ? 1 : 0; paf->adaptation_field_extension_flag = (data[0] & 0x1) ? 1 : 0; af_extension = data + 1; if (paf->PCR_flag == 1) { u32 base = ((u32)data[1] << 24) | ((u32)data[2] << 16) | ((u32)data[3] << 8) | (u32)data[4]; u64 PCR = (u64) base; paf->PCR_base = (PCR << 1) | (data[5] >> 7); paf->PCR_ext = ((data[5] & 1) << 8) | data[6]; af_extension += 6; } if (paf->adaptation_field_extension_flag) { u32 afext_bytes; Bool ltw_flag, pwr_flag, seamless_flag, af_desc_not_present; if (paf->OPCR_flag) { af_extension += 6; } if (paf->splicing_point_flag) { af_extension += 1; } if (paf->transport_private_data_flag) { u32 priv_bytes = af_extension[0]; af_extension += 1 + priv_bytes; } afext_bytes = af_extension[0]; ltw_flag = af_extension[1] & 0x80 ? 1 : 0; pwr_flag = af_extension[1] & 0x40 ? 1 : 0; seamless_flag = af_extension[1] & 0x20 ? 1 : 0; af_desc_not_present = af_extension[1] & 0x10 ? 1 : 0; af_extension += 2; if (!afext_bytes) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Bad Adaptation Extension found\n", pid)); return; } afext_bytes-=1; if (ltw_flag) { af_extension += 2; if (afext_bytes<2) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Bad Adaptation Extension found\n", pid)); return; } afext_bytes-=2; } if (pwr_flag) { af_extension += 3; if (afext_bytes<3) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Bad Adaptation Extension found\n", pid)); return; } afext_bytes-=3; } if (seamless_flag) { af_extension += 3; if (afext_bytes<3) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Bad Adaptation Extension found\n", pid)); return; } afext_bytes-=3; } if (! af_desc_not_present) { while (afext_bytes) { GF_BitStream *bs; char *desc; u8 desc_tag = af_extension[0]; u8 desc_len = af_extension[1]; if (!desc_len || (u32) desc_len+2 > afext_bytes) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Bad Adaptation Descriptor found (tag %d) size is %d but only %d bytes available\n", pid, desc_tag, desc_len, afext_bytes)); break; } desc = (char *) af_extension+2; bs = gf_bs_new(desc, desc_len, GF_BITSTREAM_READ); switch (desc_tag) { case GF_M2TS_AFDESC_LOCATION_DESCRIPTOR: { Bool use_base_temi_url; char URL[255]; GF_M2TS_TemiLocationDescriptor temi_loc; memset(&temi_loc, 0, sizeof(GF_M2TS_TemiLocationDescriptor) ); temi_loc.reload_external = gf_bs_read_int(bs, 1); temi_loc.is_announce = gf_bs_read_int(bs, 1); temi_loc.is_splicing = gf_bs_read_int(bs, 1); use_base_temi_url = gf_bs_read_int(bs, 1); gf_bs_read_int(bs, 5); //reserved temi_loc.timeline_id = gf_bs_read_int(bs, 7); if (!use_base_temi_url) { char *_url = URL; u8 scheme = gf_bs_read_int(bs, 8); u8 url_len = gf_bs_read_int(bs, 8); switch (scheme) { case 1: strcpy(URL, "http://"); _url = URL+7; break; case 2: strcpy(URL, "https://"); _url = URL+8; break; } gf_bs_read_data(bs, _url, url_len); _url[url_len] = 0; } temi_loc.external_URL = URL; GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d AF Location descriptor found - URL %s\n", pid, URL)); if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_TEMI_LOCATION, &temi_loc); } break; case GF_M2TS_AFDESC_TIMELINE_DESCRIPTOR: if (ts->ess[pid] && (ts->ess[pid]->flags & GF_M2TS_ES_IS_PES)) { GF_M2TS_PES *pes = (GF_M2TS_PES *) ts->ess[pid]; if (pes->temi_tc_desc_len) gf_m2ts_store_temi(ts, pes); if (pes->temi_tc_desc_alloc_size < desc_len) { pes->temi_tc_desc = gf_realloc(pes->temi_tc_desc, desc_len); pes->temi_tc_desc_alloc_size = desc_len; } memcpy(pes->temi_tc_desc, desc, desc_len); pes->temi_tc_desc_len = desc_len; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d AF Timeline descriptor found\n", pid)); } break; } gf_bs_del(bs); af_extension += 2+desc_len; afext_bytes -= 2+desc_len; } } } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Adaptation Field found: Discontinuity %d - RAP %d - PCR: "LLD"\n", pid, paf->discontinuity_indicator, paf->random_access_indicator, paf->PCR_flag ? paf->PCR_base * 300 + paf->PCR_ext : 0)); } static GF_Err gf_m2ts_process_packet(GF_M2TS_Demuxer *ts, unsigned char *data) { GF_M2TS_ES *es; GF_M2TS_Header hdr; GF_M2TS_AdaptationField af, *paf; u32 payload_size, af_size; u32 pos = 0; ts->pck_number++; /* read TS packet header*/ hdr.sync = data[0]; if (hdr.sync != 0x47) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] TS Packet %d does not start with sync marker\n", ts->pck_number)); return GF_CORRUPTED_DATA; } hdr.error = (data[1] & 0x80) ? 1 : 0; hdr.payload_start = (data[1] & 0x40) ? 1 : 0; hdr.priority = (data[1] & 0x20) ? 1 : 0; hdr.pid = ( (data[1]&0x1f) << 8) | data[2]; hdr.scrambling_ctrl = (data[3] >> 6) & 0x3; hdr.adaptation_field = (data[3] >> 4) & 0x3; hdr.continuity_counter = data[3] & 0xf; if (hdr.error) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] TS Packet %d has error (PID could be %d)\n", ts->pck_number, hdr.pid)); return GF_CORRUPTED_DATA; } //#if DEBUG_TS_PACKET GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] TS Packet %d PID %d CC %d Encrypted %d\n", ts->pck_number, hdr.pid, hdr.continuity_counter, hdr.scrambling_ctrl)); //#endif if (hdr.scrambling_ctrl) { //TODO add decyphering GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] TS Packet %d is scrambled - not supported\n", ts->pck_number, hdr.pid)); return GF_NOT_SUPPORTED; } paf = NULL; payload_size = 184; pos = 4; switch (hdr.adaptation_field) { /*adaptation+data*/ case 3: af_size = data[4]; if (af_size>183) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] TS Packet %d AF field larger than 183 !\n", ts->pck_number)); //error return GF_CORRUPTED_DATA; } paf = &af; memset(paf, 0, sizeof(GF_M2TS_AdaptationField)); //this will stop you when processing invalid (yet existing) mpeg2ts streams in debug assert( af_size<=183); if (af_size>183) GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] TS Packet %d Detected wrong adaption field size %u when control value is 3\n", ts->pck_number, af_size)); if (af_size) gf_m2ts_get_adaptation_field(ts, paf, data+5, af_size, hdr.pid); pos += 1+af_size; payload_size = 183 - af_size; break; /*adaptation only - still process in case of PCR*/ case 2: af_size = data[4]; if (af_size != 183) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] TS Packet %d AF size is %d when it must be 183 for AF type 2\n", ts->pck_number, af_size)); return GF_CORRUPTED_DATA; } paf = &af; memset(paf, 0, sizeof(GF_M2TS_AdaptationField)); gf_m2ts_get_adaptation_field(ts, paf, data+5, af_size, hdr.pid); payload_size = 0; /*no payload and no PCR, return*/ if (!paf->PCR_flag) return GF_OK; break; /*reserved*/ case 0: return GF_OK; default: break; } data += pos; /*PAT*/ if (hdr.pid == GF_M2TS_PID_PAT) { gf_m2ts_gather_section(ts, ts->pat, NULL, &hdr, data, payload_size); return GF_OK; } else if (hdr.pid == GF_M2TS_PID_CAT) { gf_m2ts_gather_section(ts, ts->cat, NULL, &hdr, data, payload_size); return GF_OK; } es = ts->ess[hdr.pid]; if (paf && paf->PCR_flag) { if (!es) { u32 i, j; for(i=0; i<gf_list_count(ts->programs); i++) { GF_M2TS_PES *first_pes = NULL; GF_M2TS_Program *program = (GF_M2TS_Program *)gf_list_get(ts->programs,i); if(program->pcr_pid != hdr.pid) continue; for (j=0; j<gf_list_count(program->streams); j++) { GF_M2TS_PES *pes = (GF_M2TS_PES *) gf_list_get(program->streams, j); if (pes->flags & GF_M2TS_INHERIT_PCR) { ts->ess[hdr.pid] = (GF_M2TS_ES *) pes; pes->flags |= GF_M2TS_FAKE_PCR; break; } if (pes->flags & GF_M2TS_ES_IS_PES) { first_pes = pes; } } //non found, use the first media stream as a PCR destination - Q: is it legal to have PCR only streams not declared in PMT ? if (!es && first_pes) { es = (GF_M2TS_ES *) first_pes; first_pes->flags |= GF_M2TS_FAKE_PCR; } break; } if (!es) es = ts->ess[hdr.pid]; } if (es) { GF_M2TS_PES_PCK pck; s64 prev_diff_in_us; Bool discontinuity; s32 cc = -1; if (es->flags & GF_M2TS_FAKE_PCR) { cc = es->program->pcr_cc; es->program->pcr_cc = hdr.continuity_counter; } else if (es->flags & GF_M2TS_ES_IS_PES) cc = ((GF_M2TS_PES*)es)->cc; else if (((GF_M2TS_SECTION_ES*)es)->sec) cc = ((GF_M2TS_SECTION_ES*)es)->sec->cc; discontinuity = paf->discontinuity_indicator; if ((cc>=0) && es->program->before_last_pcr_value) { //no increment of CC if AF only packet if (hdr.adaptation_field == 2) { if (hdr.continuity_counter != cc) { discontinuity = GF_TRUE; } } else if (hdr.continuity_counter != ((cc + 1) & 0xF)) { discontinuity = GF_TRUE; } } memset(&pck, 0, sizeof(GF_M2TS_PES_PCK)); prev_diff_in_us = (s64) (es->program->last_pcr_value /27- es->program->before_last_pcr_value/27); es->program->before_last_pcr_value = es->program->last_pcr_value; es->program->before_last_pcr_value_pck_number = es->program->last_pcr_value_pck_number; es->program->last_pcr_value_pck_number = ts->pck_number; es->program->last_pcr_value = paf->PCR_base * 300 + paf->PCR_ext; if (!es->program->last_pcr_value) es->program->last_pcr_value = 1; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PCR found "LLU" ("LLU" at 90kHz) - PCR diff is %d us\n", hdr.pid, es->program->last_pcr_value, es->program->last_pcr_value/300, (s32) (es->program->last_pcr_value - es->program->before_last_pcr_value)/27 )); pck.PTS = es->program->last_pcr_value; pck.stream = (GF_M2TS_PES *)es; //try to ignore all discontinuities that are less than 200 ms (seen in some HLS setup ...) if (discontinuity) { s64 diff_in_us = (s64) (es->program->last_pcr_value - es->program->before_last_pcr_value) / 27; u64 diff = ABS(diff_in_us - prev_diff_in_us); if ((diff_in_us<0) && (diff_in_us >= -200000)) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d new PCR, with discontinuity signaled, is less than previously received PCR (diff %d us) but not too large, trying to ignore discontinuity\n", hdr.pid, diff_in_us)); } //ignore PCR discontinuity indicator if PCR found is larger than previously received PCR and diffence between PCR before and after discontinuity indicator is smaller than 50ms else if ((diff_in_us > 0) && (diff < 200000)) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PCR discontinuity signaled but diff is small (diff %d us - PCR diff %d vs prev PCR diff %d) - ignore it\n", hdr.pid, diff, diff_in_us, prev_diff_in_us)); } else if (paf->discontinuity_indicator) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PCR discontinuity signaled (diff %d us - PCR diff %d vs prev PCR diff %d)\n", hdr.pid, diff, diff_in_us, prev_diff_in_us)); pck.flags = GF_M2TS_PES_PCK_DISCONTINUITY; } else { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PCR discontinuity not signaled (diff %d us - PCR diff %d vs prev PCR diff %d)\n", hdr.pid, diff, diff_in_us, prev_diff_in_us)); pck.flags = GF_M2TS_PES_PCK_DISCONTINUITY; } } else if ( (es->program->last_pcr_value < es->program->before_last_pcr_value) ) { s64 diff_in_us = (s64) (es->program->last_pcr_value - es->program->before_last_pcr_value) / 27; //if less than 200 ms before PCR loop at the last PCR, this is a PCR loop if (GF_M2TS_MAX_PCR - es->program->before_last_pcr_value < 5400000 /*2*2700000*/) { GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PCR loop found from "LLU" to "LLU" \n", hdr.pid, es->program->before_last_pcr_value, es->program->last_pcr_value)); } else if ((diff_in_us<0) && (diff_in_us >= -200000)) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d new PCR, without discontinuity signaled, is less than previously received PCR (diff %d us) but not too large, trying to ignore discontinuity\n", hdr.pid, diff_in_us)); } else { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PCR found "LLU" is less than previously received PCR "LLU" (PCR diff %g sec) but no discontinuity signaled\n", hdr.pid, es->program->last_pcr_value, es->program->before_last_pcr_value, (GF_M2TS_MAX_PCR - es->program->before_last_pcr_value + es->program->last_pcr_value) / 27000000.0)); pck.flags = GF_M2TS_PES_PCK_DISCONTINUITY; } } if (pck.flags & GF_M2TS_PES_PCK_DISCONTINUITY) { gf_m2ts_reset_parsers_for_program(ts, es->program); } if (ts->on_event) { ts->on_event(ts, GF_M2TS_EVT_PES_PCR, &pck); } } } /*check for DVB reserved PIDs*/ if (!es) { if (hdr.pid == GF_M2TS_PID_SDT_BAT_ST) { gf_m2ts_gather_section(ts, ts->sdt, NULL, &hdr, data, payload_size); return GF_OK; } else if (hdr.pid == GF_M2TS_PID_NIT_ST) { /*ignore them, unused at application level*/ gf_m2ts_gather_section(ts, ts->nit, NULL, &hdr, data, payload_size); return GF_OK; } else if (hdr.pid == GF_M2TS_PID_EIT_ST_CIT) { /* ignore EIT messages for the moment */ gf_m2ts_gather_section(ts, ts->eit, NULL, &hdr, data, payload_size); return GF_OK; } else if (hdr.pid == GF_M2TS_PID_TDT_TOT_ST) { gf_m2ts_gather_section(ts, ts->tdt_tot, NULL, &hdr, data, payload_size); } else { /* ignore packet */ } } else if (es->flags & GF_M2TS_ES_IS_SECTION) { /* The stream uses sections to carry its payload */ GF_M2TS_SECTION_ES *ses = (GF_M2TS_SECTION_ES *)es; if (ses->sec) gf_m2ts_gather_section(ts, ses->sec, ses, &hdr, data, payload_size); } else { GF_M2TS_PES *pes = (GF_M2TS_PES *)es; /* regular stream using PES packets */ if (pes->reframe && payload_size) gf_m2ts_process_pes(ts, pes, &hdr, data, payload_size, paf); } return GF_OK; } GF_EXPORT GF_Err gf_m2ts_process_data(GF_M2TS_Demuxer *ts, u8 *data, u32 data_size) { GF_Err e=GF_OK; u32 pos, pck_size; Bool is_align = 1; if (ts->buffer_size) { //we are sync, copy remaining bytes if ( (ts->buffer[0]==0x47) && (ts->buffer_size<200)) { u32 pck_size = ts->prefix_present ? 192 : 188; if (ts->alloc_size < 200) { ts->alloc_size = 200; ts->buffer = (char*)gf_realloc(ts->buffer, sizeof(char)*ts->alloc_size); } memcpy(ts->buffer + ts->buffer_size, data, pck_size - ts->buffer_size); e |= gf_m2ts_process_packet(ts, (unsigned char *)ts->buffer); data += (pck_size - ts->buffer_size); data_size = data_size - (pck_size - ts->buffer_size); } //not sync, copy over the complete buffer else { if (ts->alloc_size < ts->buffer_size+data_size) { ts->alloc_size = ts->buffer_size+data_size; ts->buffer = (char*)gf_realloc(ts->buffer, sizeof(char)*ts->alloc_size); } memcpy(ts->buffer + ts->buffer_size, data, sizeof(char)*data_size); ts->buffer_size += data_size; is_align = 0; data = ts->buffer; data_size = ts->buffer_size; } } /*sync input data*/ pos = gf_m2ts_sync(ts, data, data_size, is_align); if (pos==data_size) { if (is_align) { if (ts->alloc_size<data_size) { ts->buffer = (char*)gf_realloc(ts->buffer, sizeof(char)*data_size); ts->alloc_size = data_size; } memcpy(ts->buffer, data, sizeof(char)*data_size); ts->buffer_size = data_size; } return GF_OK; } pck_size = ts->prefix_present ? 192 : 188; for (;;) { /*wait for a complete packet*/ if (data_size < pos + pck_size) { ts->buffer_size = data_size - pos; data += pos; if (!ts->buffer_size) { return e; } assert(ts->buffer_size<pck_size); if (is_align) { u32 s = ts->buffer_size; if (s<200) s = 200; if (ts->alloc_size < s) { ts->alloc_size = s; ts->buffer = (char*)gf_realloc(ts->buffer, sizeof(char)*ts->alloc_size); } memcpy(ts->buffer, data, sizeof(char)*ts->buffer_size); } else { memmove(ts->buffer, data, sizeof(char)*ts->buffer_size); } return e; } /*process*/ e |= gf_m2ts_process_packet(ts, (unsigned char *)data + pos); pos += pck_size; } return e; } //unused #if 0 GF_ESD *gf_m2ts_get_esd(GF_M2TS_ES *es) { GF_ESD *esd; u32 k, esd_count; esd = NULL; if (es->program->pmt_iod && es->program->pmt_iod->ESDescriptors) { esd_count = gf_list_count(es->program->pmt_iod->ESDescriptors); for (k = 0; k < esd_count; k++) { GF_ESD *esd_tmp = (GF_ESD *)gf_list_get(es->program->pmt_iod->ESDescriptors, k); if (esd_tmp->ESID != es->mpeg4_es_id) continue; esd = esd_tmp; break; } } if (!esd && es->program->additional_ods) { u32 od_count, od_index; od_count = gf_list_count(es->program->additional_ods); for (od_index = 0; od_index < od_count; od_index++) { GF_ObjectDescriptor *od = (GF_ObjectDescriptor *)gf_list_get(es->program->additional_ods, od_index); esd_count = gf_list_count(od->ESDescriptors); for (k = 0; k < esd_count; k++) { GF_ESD *esd_tmp = (GF_ESD *)gf_list_get(od->ESDescriptors, k); if (esd_tmp->ESID != es->mpeg4_es_id) continue; esd = esd_tmp; break; } } } return esd; } void gf_m2ts_set_segment_switch(GF_M2TS_Demuxer *ts) { u32 i; for (i=0; i<GF_M2TS_MAX_STREAMS; i++) { GF_M2TS_ES *es = (GF_M2TS_ES *) ts->ess[i]; if (!es) continue; es->flags |= GF_M2TS_ES_IGNORE_NEXT_DISCONTINUITY; } } #endif GF_EXPORT void gf_m2ts_reset_parsers_for_program(GF_M2TS_Demuxer *ts, GF_M2TS_Program *prog) { u32 i; for (i=0; i<GF_M2TS_MAX_STREAMS; i++) { GF_M2TS_ES *es = (GF_M2TS_ES *) ts->ess[i]; if (!es) continue; if (prog && (es->program != prog) ) continue; if (es->flags & GF_M2TS_ES_IS_SECTION) { GF_M2TS_SECTION_ES *ses = (GF_M2TS_SECTION_ES *)es; gf_m2ts_section_filter_reset(ses->sec); } else { GF_M2TS_PES *pes = (GF_M2TS_PES *)es; if (!pes || (pes->pid==pes->program->pmt_pid)) continue; pes->cc = -1; pes->frame_state = 0; pes->pck_data_len = 0; if (pes->prev_data) gf_free(pes->prev_data); pes->prev_data = NULL; pes->prev_data_len = 0; pes->PTS = pes->DTS = 0; // pes->prev_PTS = 0; // pes->first_dts = 0; pes->pes_len = pes->pes_end_packet_number = pes->pes_start_packet_number = 0; if (pes->buf) gf_free(pes->buf); pes->buf = NULL; if (pes->temi_tc_desc) gf_free(pes->temi_tc_desc); pes->temi_tc_desc = NULL; pes->temi_tc_desc_len = pes->temi_tc_desc_alloc_size = 0; pes->before_last_pcr_value = pes->before_last_pcr_value_pck_number = 0; pes->last_pcr_value = pes->last_pcr_value_pck_number = 0; if (pes->program->pcr_pid==pes->pid) { pes->program->last_pcr_value = pes->program->last_pcr_value_pck_number = 0; pes->program->before_last_pcr_value = pes->program->before_last_pcr_value_pck_number = 0; } } } } GF_EXPORT void gf_m2ts_reset_parsers(GF_M2TS_Demuxer *ts) { gf_m2ts_reset_parsers_for_program(ts, NULL); ts->pck_number = 0; gf_m2ts_section_filter_reset(ts->cat); gf_m2ts_section_filter_reset(ts->pat); gf_m2ts_section_filter_reset(ts->sdt); gf_m2ts_section_filter_reset(ts->nit); gf_m2ts_section_filter_reset(ts->eit); gf_m2ts_section_filter_reset(ts->tdt_tot); } #if 0 //unused u32 gf_m2ts_pes_get_framing_mode(GF_M2TS_PES *pes) { if (pes->flags & GF_M2TS_ES_IS_SECTION) { if (pes->flags & GF_M2TS_ES_IS_SL) { if ( ((GF_M2TS_SECTION_ES *)pes)->sec->process_section == NULL) return GF_M2TS_PES_FRAMING_DEFAULT; } return GF_M2TS_PES_FRAMING_SKIP_NO_RESET; } if (!pes->reframe ) return GF_M2TS_PES_FRAMING_SKIP_NO_RESET; if (pes->reframe == gf_m2ts_reframe_default) return GF_M2TS_PES_FRAMING_RAW; if (pes->reframe == gf_m2ts_reframe_reset) return GF_M2TS_PES_FRAMING_SKIP; return GF_M2TS_PES_FRAMING_DEFAULT; } #endif GF_EXPORT GF_Err gf_m2ts_set_pes_framing(GF_M2TS_PES *pes, u32 mode) { if (!pes) return GF_BAD_PARAM; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Setting pes framing mode of PID %d to %d\n", pes->pid, mode) ); /*ignore request for section PIDs*/ if (pes->flags & GF_M2TS_ES_IS_SECTION) { if (pes->flags & GF_M2TS_ES_IS_SL) { if (mode==GF_M2TS_PES_FRAMING_DEFAULT) { ((GF_M2TS_SECTION_ES *)pes)->sec->process_section = gf_m2ts_process_mpeg4section; } else { ((GF_M2TS_SECTION_ES *)pes)->sec->process_section = NULL; } } return GF_OK; } if (pes->pid==pes->program->pmt_pid) return GF_BAD_PARAM; //if component reuse, disable previous pes if ((mode > GF_M2TS_PES_FRAMING_SKIP) && (pes->program->ts->ess[pes->pid] != (GF_M2TS_ES *) pes)) { GF_M2TS_PES *o_pes = (GF_M2TS_PES *) pes->program->ts->ess[pes->pid]; if (o_pes->flags & GF_M2TS_ES_IS_PES) gf_m2ts_set_pes_framing(o_pes, GF_M2TS_PES_FRAMING_SKIP); GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[MPEG-2 TS] Reassinging PID %d from program %d to program %d\n", pes->pid, o_pes->program->number, pes->program->number) ); pes->program->ts->ess[pes->pid] = (GF_M2TS_ES *) pes; } switch (mode) { case GF_M2TS_PES_FRAMING_RAW: pes->reframe = gf_m2ts_reframe_default; break; case GF_M2TS_PES_FRAMING_SKIP: pes->reframe = gf_m2ts_reframe_reset; break; case GF_M2TS_PES_FRAMING_SKIP_NO_RESET: pes->reframe = NULL; break; case GF_M2TS_PES_FRAMING_DEFAULT: default: switch (pes->stream_type) { case GF_M2TS_VIDEO_MPEG1: case GF_M2TS_VIDEO_MPEG2: case GF_M2TS_VIDEO_H264: case GF_M2TS_VIDEO_SVC: case GF_M2TS_VIDEO_HEVC: case GF_M2TS_VIDEO_HEVC_TEMPORAL: case GF_M2TS_VIDEO_HEVC_MCTS: case GF_M2TS_VIDEO_SHVC: case GF_M2TS_VIDEO_SHVC_TEMPORAL: case GF_M2TS_VIDEO_MHVC: case GF_M2TS_VIDEO_MHVC_TEMPORAL: case GF_M2TS_AUDIO_MPEG1: case GF_M2TS_AUDIO_MPEG2: case GF_M2TS_AUDIO_AAC: case GF_M2TS_AUDIO_LATM_AAC: case GF_M2TS_AUDIO_AC3: case GF_M2TS_AUDIO_EC3: //for all our supported codec types, use a reframer filter pes->reframe = gf_m2ts_reframe_default; break; case GF_M2TS_PRIVATE_DATA: /* TODO: handle DVB subtitle streams */ break; case GF_M2TS_METADATA_ID3_HLS: //TODO pes->reframe = gf_m2ts_reframe_id3_pes; break; default: pes->reframe = gf_m2ts_reframe_default; break; } break; } return GF_OK; } GF_EXPORT GF_M2TS_Demuxer *gf_m2ts_demux_new() { GF_M2TS_Demuxer *ts; GF_SAFEALLOC(ts, GF_M2TS_Demuxer); if (!ts) return NULL; ts->programs = gf_list_new(); ts->SDTs = gf_list_new(); ts->pat = gf_m2ts_section_filter_new(gf_m2ts_process_pat, 0); ts->cat = gf_m2ts_section_filter_new(gf_m2ts_process_cat, 0); ts->sdt = gf_m2ts_section_filter_new(gf_m2ts_process_sdt, 1); ts->nit = gf_m2ts_section_filter_new(gf_m2ts_process_nit, 0); ts->eit = gf_m2ts_section_filter_new(NULL/*gf_m2ts_process_eit*/, 1); ts->tdt_tot = gf_m2ts_section_filter_new(gf_m2ts_process_tdt_tot, 1); #ifdef GPAC_ENABLE_MPE gf_dvb_mpe_init(ts); #endif ts->nb_prog_pmt_received = 0; ts->ChannelAppList = gf_list_new(); return ts; } GF_EXPORT void gf_m2ts_demux_dmscc_init(GF_M2TS_Demuxer *ts) { char temp_dir[GF_MAX_PATH]; u32 length; GF_Err e; ts->dsmcc_controler = gf_list_new(); ts->process_dmscc = 1; strcpy(temp_dir, gf_get_default_cache_directory() ); length = (u32) strlen(temp_dir); if(temp_dir[length-1] == GF_PATH_SEPARATOR) { temp_dir[length-1] = 0; } ts->dsmcc_root_dir = (char*)gf_calloc(strlen(temp_dir)+strlen("CarouselData")+2,sizeof(char)); sprintf(ts->dsmcc_root_dir,"%s%cCarouselData",temp_dir,GF_PATH_SEPARATOR); e = gf_mkdir(ts->dsmcc_root_dir); if(e) { GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[Process DSMCC] Error during the creation of the directory %s \n",ts->dsmcc_root_dir)); } } GF_EXPORT void gf_m2ts_demux_del(GF_M2TS_Demuxer *ts) { u32 i; if (ts->pat) gf_m2ts_section_filter_del(ts->pat); if (ts->cat) gf_m2ts_section_filter_del(ts->cat); if (ts->sdt) gf_m2ts_section_filter_del(ts->sdt); if (ts->nit) gf_m2ts_section_filter_del(ts->nit); if (ts->eit) gf_m2ts_section_filter_del(ts->eit); if (ts->tdt_tot) gf_m2ts_section_filter_del(ts->tdt_tot); for (i=0; i<GF_M2TS_MAX_STREAMS; i++) { //bacause of pure PCR streams, en ES might be reassigned on 2 PIDs, one for the ES and one for the PCR if (ts->ess[i] && (ts->ess[i]->pid==i)) gf_m2ts_es_del(ts->ess[i], ts); } if (ts->buffer) gf_free(ts->buffer); while (gf_list_count(ts->programs)) { GF_M2TS_Program *p = (GF_M2TS_Program *)gf_list_last(ts->programs); gf_list_rem_last(ts->programs); gf_list_del(p->streams); /*reset OD list*/ if (p->additional_ods) { gf_odf_desc_list_del(p->additional_ods); gf_list_del(p->additional_ods); } if (p->pmt_iod) gf_odf_desc_del((GF_Descriptor *)p->pmt_iod); if (p->metadata_pointer_descriptor) gf_m2ts_metadata_pointer_descriptor_del(p->metadata_pointer_descriptor); gf_free(p); } gf_list_del(ts->programs); if (ts->TDT_time) gf_free(ts->TDT_time); gf_m2ts_reset_sdt(ts); if (ts->tdt_tot) gf_list_del(ts->SDTs); #ifdef GPAC_ENABLE_MPE gf_dvb_mpe_shutdown(ts); #endif if (ts->dsmcc_controler) { if (gf_list_count(ts->dsmcc_controler)) { #ifdef GPAC_ENABLE_DSMCC GF_M2TS_DSMCC_OVERLORD* dsmcc_overlord = (GF_M2TS_DSMCC_OVERLORD*)gf_list_get(ts->dsmcc_controler,0); gf_cleanup_dir(dsmcc_overlord->root_dir); gf_rmdir(dsmcc_overlord->root_dir); gf_m2ts_delete_dsmcc_overlord(dsmcc_overlord); if(ts->dsmcc_root_dir) { gf_free(ts->dsmcc_root_dir); } #endif } gf_list_del(ts->dsmcc_controler); } while(gf_list_count(ts->ChannelAppList)) { #ifdef GPAC_ENABLE_DSMCC GF_M2TS_CHANNEL_APPLICATION_INFO* ChanAppInfo = (GF_M2TS_CHANNEL_APPLICATION_INFO*)gf_list_get(ts->ChannelAppList,0); gf_m2ts_delete_channel_application_info(ChanAppInfo); gf_list_rem(ts->ChannelAppList,0); #endif } gf_list_del(ts->ChannelAppList); if (ts->dsmcc_root_dir) gf_free(ts->dsmcc_root_dir); gf_free(ts); } #if 0//unused void gf_m2ts_print_info(GF_M2TS_Demuxer *ts) { #ifdef GPAC_ENABLE_MPE gf_m2ts_print_mpe_info(ts); #endif } #endif #define M2TS_PROBE_SIZE 188000 static Bool gf_m2ts_probe_buffer(char *buf, u32 size) { GF_Err e; GF_M2TS_Demuxer *ts; u32 lt; lt = gf_log_get_tool_level(GF_LOG_CONTAINER); gf_log_set_tool_level(GF_LOG_CONTAINER, GF_LOG_QUIET); ts = gf_m2ts_demux_new(); e = gf_m2ts_process_data(ts, buf, size); if (!ts->pck_number) e = GF_BAD_PARAM; gf_m2ts_demux_del(ts); gf_log_set_tool_level(GF_LOG_CONTAINER, lt); if (e) return GF_FALSE; return GF_TRUE; } GF_EXPORT Bool gf_m2ts_probe_file(const char *fileName) { char buf[M2TS_PROBE_SIZE]; u32 size; FILE *t; if (!strncmp(fileName, "gmem://", 7)) { u8 *mem_address; if (gf_blob_get_data(fileName, &mem_address, &size) != GF_OK) { return GF_FALSE; } if (size>M2TS_PROBE_SIZE) size = M2TS_PROBE_SIZE; memcpy(buf, mem_address, size); } else { t = gf_fopen(fileName, "rb"); if (!t) return 0; size = (u32) fread(buf, 1, M2TS_PROBE_SIZE, t); gf_fclose(t); if ((s32) size <= 0) return 0; } return gf_m2ts_probe_buffer(buf, size); } GF_EXPORT Bool gf_m2ts_probe_data(const u8 *data, u32 size) { size /= 188; size *= 188; return gf_m2ts_probe_buffer((char *) data, size); } static void rewrite_pts_dts(unsigned char *ptr, u64 TS) { ptr[0] &= 0xf1; ptr[0] |= (unsigned char)((TS&0x1c0000000ULL)>>29); ptr[1] = (unsigned char)((TS&0x03fc00000ULL)>>22); ptr[2] &= 0x1; ptr[2] |= (unsigned char)((TS&0x0003f8000ULL)>>14); ptr[3] = (unsigned char)((TS&0x000007f80ULL)>>7); ptr[4] &= 0x1; ptr[4] |= (unsigned char)((TS&0x00000007fULL)<<1); assert(((u64)(ptr[0]&0xe)<<29) + ((u64)ptr[1]<<22) + ((u64)(ptr[2]&0xfe)<<14) + ((u64)ptr[3]<<7) + ((ptr[4]&0xfe)>>1) == TS); } #define ADJUST_TIMESTAMP(_TS) \ if (_TS < (u64) -ts_shift) _TS = pcr_mod + _TS + ts_shift; \ else _TS = _TS + ts_shift; \ while (_TS > pcr_mod) _TS -= pcr_mod; \ GF_EXPORT GF_Err gf_m2ts_restamp(u8 *buffer, u32 size, s64 ts_shift, u8 *is_pes) { u32 done = 0; u64 pcr_mod; // if (!ts_shift) return GF_OK; pcr_mod = 0x80000000; pcr_mod*=4; while (done + 188 <= size) { u8 *pesh; u8 *pck; u64 pcr_base=0, pcr_ext=0; u16 pid; u8 adaptation_field, adaptation_field_length; pck = (u8*) buffer+done; if (pck[0]!=0x47) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[M2TS Restamp] Invalid sync byte %X\n", pck[0])); return GF_NON_COMPLIANT_BITSTREAM; } pid = ((pck[1] & 0x1f) <<8 ) + pck[2]; adaptation_field_length = 0; adaptation_field = (pck[3] >> 4) & 0x3; if ((adaptation_field==2) || (adaptation_field==3)) { adaptation_field_length = pck[4]; if ( pck[5]&0x10 /*PCR_flag*/) { pcr_base = (((u64)pck[6])<<25) + (pck[7]<<17) + (pck[8]<<9) + (pck[9]<<1) + (pck[10]>>7); pcr_ext = ((pck[10]&1)<<8) + pck[11]; ADJUST_TIMESTAMP(pcr_base); pck[6] = (unsigned char)(0xff&(pcr_base>>25)); pck[7] = (unsigned char)(0xff&(pcr_base>>17)); pck[8] = (unsigned char)(0xff&(pcr_base>>9)); pck[9] = (unsigned char)(0xff&(pcr_base>>1)); pck[10] = (unsigned char)(((0x1&pcr_base)<<7) | 0x7e | ((0x100&pcr_ext)>>8)); if (pcr_ext != ((pck[10]&1)<<8) + pck[11]) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[M2TS Restamp] Sanity check failed for PCR restamping\n")); return GF_IO_ERR; } pck[11] = (unsigned char)(0xff&pcr_ext); } /*add adaptation_field_length field*/ adaptation_field_length++; } if (!is_pes[pid] || !(pck[1]&0x40)) { done+=188; continue; } pesh = &pck[4+adaptation_field_length]; if ((pesh[0]==0x00) && (pesh[1]==0x00) && (pesh[2]==0x01)) { Bool has_pts, has_dts; if ((pesh[6]&0xc0)!=0x80) { done+=188; continue; } has_pts = (pesh[7]&0x80); has_dts = has_pts ? (pesh[7]&0x40) : 0; if (has_pts) { u64 PTS; if (((pesh[9]&0xe0)>>4)!=0x2) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[M2TS Restamp] PID %4d: Wrong PES header, PTS decoding: '0010' expected\n", pid)); done+=188; continue; } PTS = gf_m2ts_get_pts(pesh + 9); ADJUST_TIMESTAMP(PTS); rewrite_pts_dts(pesh+9, PTS); } if (has_dts) { u64 DTS = gf_m2ts_get_pts(pesh + 14); ADJUST_TIMESTAMP(DTS); rewrite_pts_dts(pesh+14, DTS); } } else { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[M2TS Restamp] PID %4d: Wrong PES not beginning with start code\n", pid)); } done+=188; } return GF_OK; } #endif /*GPAC_DISABLE_MPEG2TS*/
static void gf_m2ts_process_pat(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *ses, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status) { GF_M2TS_Program *prog; GF_M2TS_SECTION_ES *pmt; u32 i, nb_progs, evt_type; u32 nb_sections; u32 data_size; unsigned char *data; GF_M2TS_Section *section; /*wait for the last section */ if (!(status&GF_M2TS_TABLE_END)) return; /*skip if already received*/ if (status&GF_M2TS_TABLE_REPEAT) { if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_PAT_REPEAT, NULL); return; } nb_sections = gf_list_count(sections); if (nb_sections > 1) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("PAT on multiple sections not supported\n")); } section = (GF_M2TS_Section *)gf_list_get(sections, 0); data = section->data; data_size = section->data_size; if (!(status&GF_M2TS_TABLE_UPDATE) && gf_list_count(ts->programs)) { if (ts->pat->demux_restarted) { ts->pat->demux_restarted = 0; } else { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Multiple different PAT on single TS found, ignoring new PAT declaration (table id %d - extended table id %d)\n", table_id, ex_table_id)); } return; } nb_progs = data_size / 4; for (i=0; i<nb_progs; i++) { u16 number, pid; number = (data[0]<<8) | data[1]; pid = (data[2]&0x1f)<<8 | data[3]; data += 4; if (number==0) { if (!ts->nit) { ts->nit = gf_m2ts_section_filter_new(gf_m2ts_process_nit, 0); } } else { GF_SAFEALLOC(prog, GF_M2TS_Program); if (!prog) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Fail to allocate program for pid %d\n", pid)); return; } prog->streams = gf_list_new(); prog->pmt_pid = pid; prog->number = number; prog->ts = ts; gf_list_add(ts->programs, prog); GF_SAFEALLOC(pmt, GF_M2TS_SECTION_ES); if (!pmt) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Fail to allocate pmt filter for pid %d\n", pid)); return; } pmt->flags = GF_M2TS_ES_IS_SECTION; gf_list_add(prog->streams, pmt); pmt->pid = prog->pmt_pid; pmt->program = prog; ts->ess[pmt->pid] = (GF_M2TS_ES *)pmt; pmt->sec = gf_m2ts_section_filter_new(gf_m2ts_process_pmt, 0); } } evt_type = (status&GF_M2TS_TABLE_UPDATE) ? GF_M2TS_EVT_PAT_UPDATE : GF_M2TS_EVT_PAT_FOUND; if (ts->on_event) ts->on_event(ts, evt_type, NULL); }
static void gf_m2ts_process_pat(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *ses, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status) { GF_M2TS_Program *prog; GF_M2TS_SECTION_ES *pmt; u32 i, nb_progs, evt_type; u32 nb_sections; u32 data_size; unsigned char *data; GF_M2TS_Section *section; /*wait for the last section */ if (!(status&GF_M2TS_TABLE_END)) return; /*skip if already received*/ if (status&GF_M2TS_TABLE_REPEAT) { if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_PAT_REPEAT, NULL); return; } nb_sections = gf_list_count(sections); if (nb_sections > 1) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("PAT on multiple sections not supported\n")); } section = (GF_M2TS_Section *)gf_list_get(sections, 0); data = section->data; data_size = section->data_size; if (!(status&GF_M2TS_TABLE_UPDATE) && gf_list_count(ts->programs)) { if (ts->pat->demux_restarted) { ts->pat->demux_restarted = 0; } else { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Multiple different PAT on single TS found, ignoring new PAT declaration (table id %d - extended table id %d)\n", table_id, ex_table_id)); } return; } nb_progs = data_size / 4; for (i=0; i<nb_progs; i++) { u16 number, pid; number = (data[0]<<8) | data[1]; pid = (data[2]&0x1f)<<8 | data[3]; data += 4; if (number==0) { if (!ts->nit) { ts->nit = gf_m2ts_section_filter_new(gf_m2ts_process_nit, 0); } } else if (!pid) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Broken PAT found reserved PID 0, ignoring\n", pid)); } else if (! ts->ess[pid]) { GF_SAFEALLOC(prog, GF_M2TS_Program); if (!prog) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Fail to allocate program for pid %d\n", pid)); return; } prog->streams = gf_list_new(); prog->pmt_pid = pid; prog->number = number; prog->ts = ts; gf_list_add(ts->programs, prog); GF_SAFEALLOC(pmt, GF_M2TS_SECTION_ES); if (!pmt) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Fail to allocate pmt filter for pid %d\n", pid)); return; } pmt->flags = GF_M2TS_ES_IS_SECTION; gf_list_add(prog->streams, pmt); pmt->pid = prog->pmt_pid; pmt->program = prog; ts->ess[pmt->pid] = (GF_M2TS_ES *)pmt; pmt->sec = gf_m2ts_section_filter_new(gf_m2ts_process_pmt, 0); } } evt_type = (status&GF_M2TS_TABLE_UPDATE) ? GF_M2TS_EVT_PAT_UPDATE : GF_M2TS_EVT_PAT_FOUND; if (ts->on_event) ts->on_event(ts, evt_type, NULL); }
{'added': [(1670, '\t\t} else if (!pid) {'), (1671, '\t\t\tGF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Broken PAT found reserved PID 0, ignoring\\n", pid));'), (1672, '\t\t} else if (! ts->ess[pid]) {')], 'deleted': [(1670, '\t\t} else {')]}
3
1
2,469
18,945
https://github.com/gpac/gpac
CVE-2019-20628
['CWE-416']
utils.c
avcodec_open2
/* * utils for libavcodec * Copyright (c) 2001 Fabrice Bellard * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * utils. */ #include "config.h" #include "libavutil/attributes.h" #include "libavutil/avassert.h" #include "libavutil/avstring.h" #include "libavutil/bprint.h" #include "libavutil/channel_layout.h" #include "libavutil/crc.h" #include "libavutil/frame.h" #include "libavutil/hwcontext.h" #include "libavutil/internal.h" #include "libavutil/mathematics.h" #include "libavutil/mem_internal.h" #include "libavutil/pixdesc.h" #include "libavutil/imgutils.h" #include "libavutil/samplefmt.h" #include "libavutil/dict.h" #include "libavutil/thread.h" #include "avcodec.h" #include "decode.h" #include "hwaccel.h" #include "libavutil/opt.h" #include "mpegvideo.h" #include "thread.h" #include "frame_thread_encoder.h" #include "internal.h" #include "raw.h" #include "bytestream.h" #include "version.h" #include <stdlib.h> #include <stdarg.h> #include <stdatomic.h> #include <limits.h> #include <float.h> #if CONFIG_ICONV # include <iconv.h> #endif #include "libavutil/ffversion.h" const char av_codec_ffversion[] = "FFmpeg version " FFMPEG_VERSION; static AVMutex codec_mutex = AV_MUTEX_INITIALIZER; void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size) { uint8_t **p = ptr; if (min_size > SIZE_MAX - AV_INPUT_BUFFER_PADDING_SIZE) { av_freep(p); *size = 0; return; } if (!ff_fast_malloc(p, size, min_size + AV_INPUT_BUFFER_PADDING_SIZE, 1)) memset(*p + min_size, 0, AV_INPUT_BUFFER_PADDING_SIZE); } void av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size) { uint8_t **p = ptr; if (min_size > SIZE_MAX - AV_INPUT_BUFFER_PADDING_SIZE) { av_freep(p); *size = 0; return; } if (!ff_fast_malloc(p, size, min_size + AV_INPUT_BUFFER_PADDING_SIZE, 1)) memset(*p, 0, min_size + AV_INPUT_BUFFER_PADDING_SIZE); } int av_codec_is_encoder(const AVCodec *codec) { return codec && (codec->encode_sub || codec->encode2 ||codec->send_frame); } int av_codec_is_decoder(const AVCodec *codec) { return codec && (codec->decode || codec->receive_frame); } int ff_set_dimensions(AVCodecContext *s, int width, int height) { int ret = av_image_check_size2(width, height, s->max_pixels, AV_PIX_FMT_NONE, 0, s); if (ret < 0) width = height = 0; s->coded_width = width; s->coded_height = height; s->width = AV_CEIL_RSHIFT(width, s->lowres); s->height = AV_CEIL_RSHIFT(height, s->lowres); return ret; } int ff_set_sar(AVCodecContext *avctx, AVRational sar) { int ret = av_image_check_sar(avctx->width, avctx->height, sar); if (ret < 0) { av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %d/%d\n", sar.num, sar.den); avctx->sample_aspect_ratio = (AVRational){ 0, 1 }; return ret; } else { avctx->sample_aspect_ratio = sar; } return 0; } int ff_side_data_update_matrix_encoding(AVFrame *frame, enum AVMatrixEncoding matrix_encoding) { AVFrameSideData *side_data; enum AVMatrixEncoding *data; side_data = av_frame_get_side_data(frame, AV_FRAME_DATA_MATRIXENCODING); if (!side_data) side_data = av_frame_new_side_data(frame, AV_FRAME_DATA_MATRIXENCODING, sizeof(enum AVMatrixEncoding)); if (!side_data) return AVERROR(ENOMEM); data = (enum AVMatrixEncoding*)side_data->data; *data = matrix_encoding; return 0; } void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int linesize_align[AV_NUM_DATA_POINTERS]) { int i; int w_align = 1; int h_align = 1; AVPixFmtDescriptor const *desc = av_pix_fmt_desc_get(s->pix_fmt); if (desc) { w_align = 1 << desc->log2_chroma_w; h_align = 1 << desc->log2_chroma_h; } switch (s->pix_fmt) { case AV_PIX_FMT_YUV420P: case AV_PIX_FMT_YUYV422: case AV_PIX_FMT_YVYU422: case AV_PIX_FMT_UYVY422: case AV_PIX_FMT_YUV422P: case AV_PIX_FMT_YUV440P: case AV_PIX_FMT_YUV444P: case AV_PIX_FMT_GBRP: case AV_PIX_FMT_GBRAP: case AV_PIX_FMT_GRAY8: case AV_PIX_FMT_GRAY16BE: case AV_PIX_FMT_GRAY16LE: case AV_PIX_FMT_YUVJ420P: case AV_PIX_FMT_YUVJ422P: case AV_PIX_FMT_YUVJ440P: case AV_PIX_FMT_YUVJ444P: case AV_PIX_FMT_YUVA420P: case AV_PIX_FMT_YUVA422P: case AV_PIX_FMT_YUVA444P: case AV_PIX_FMT_YUV420P9LE: case AV_PIX_FMT_YUV420P9BE: case AV_PIX_FMT_YUV420P10LE: case AV_PIX_FMT_YUV420P10BE: case AV_PIX_FMT_YUV420P12LE: case AV_PIX_FMT_YUV420P12BE: case AV_PIX_FMT_YUV420P14LE: case AV_PIX_FMT_YUV420P14BE: case AV_PIX_FMT_YUV420P16LE: case AV_PIX_FMT_YUV420P16BE: case AV_PIX_FMT_YUVA420P9LE: case AV_PIX_FMT_YUVA420P9BE: case AV_PIX_FMT_YUVA420P10LE: case AV_PIX_FMT_YUVA420P10BE: case AV_PIX_FMT_YUVA420P16LE: case AV_PIX_FMT_YUVA420P16BE: case AV_PIX_FMT_YUV422P9LE: case AV_PIX_FMT_YUV422P9BE: case AV_PIX_FMT_YUV422P10LE: case AV_PIX_FMT_YUV422P10BE: case AV_PIX_FMT_YUV422P12LE: case AV_PIX_FMT_YUV422P12BE: case AV_PIX_FMT_YUV422P14LE: case AV_PIX_FMT_YUV422P14BE: case AV_PIX_FMT_YUV422P16LE: case AV_PIX_FMT_YUV422P16BE: case AV_PIX_FMT_YUVA422P9LE: case AV_PIX_FMT_YUVA422P9BE: case AV_PIX_FMT_YUVA422P10LE: case AV_PIX_FMT_YUVA422P10BE: case AV_PIX_FMT_YUVA422P12LE: case AV_PIX_FMT_YUVA422P12BE: case AV_PIX_FMT_YUVA422P16LE: case AV_PIX_FMT_YUVA422P16BE: case AV_PIX_FMT_YUV440P10LE: case AV_PIX_FMT_YUV440P10BE: case AV_PIX_FMT_YUV440P12LE: case AV_PIX_FMT_YUV440P12BE: case AV_PIX_FMT_YUV444P9LE: case AV_PIX_FMT_YUV444P9BE: case AV_PIX_FMT_YUV444P10LE: case AV_PIX_FMT_YUV444P10BE: case AV_PIX_FMT_YUV444P12LE: case AV_PIX_FMT_YUV444P12BE: case AV_PIX_FMT_YUV444P14LE: case AV_PIX_FMT_YUV444P14BE: case AV_PIX_FMT_YUV444P16LE: case AV_PIX_FMT_YUV444P16BE: case AV_PIX_FMT_YUVA444P9LE: case AV_PIX_FMT_YUVA444P9BE: case AV_PIX_FMT_YUVA444P10LE: case AV_PIX_FMT_YUVA444P10BE: case AV_PIX_FMT_YUVA444P12LE: case AV_PIX_FMT_YUVA444P12BE: case AV_PIX_FMT_YUVA444P16LE: case AV_PIX_FMT_YUVA444P16BE: case AV_PIX_FMT_GBRP9LE: case AV_PIX_FMT_GBRP9BE: case AV_PIX_FMT_GBRP10LE: case AV_PIX_FMT_GBRP10BE: case AV_PIX_FMT_GBRP12LE: case AV_PIX_FMT_GBRP12BE: case AV_PIX_FMT_GBRP14LE: case AV_PIX_FMT_GBRP14BE: case AV_PIX_FMT_GBRP16LE: case AV_PIX_FMT_GBRP16BE: case AV_PIX_FMT_GBRAP12LE: case AV_PIX_FMT_GBRAP12BE: case AV_PIX_FMT_GBRAP16LE: case AV_PIX_FMT_GBRAP16BE: w_align = 16; //FIXME assume 16 pixel per macroblock h_align = 16 * 2; // interlaced needs 2 macroblocks height break; case AV_PIX_FMT_YUV411P: case AV_PIX_FMT_YUVJ411P: case AV_PIX_FMT_UYYVYY411: w_align = 32; h_align = 16 * 2; break; case AV_PIX_FMT_YUV410P: if (s->codec_id == AV_CODEC_ID_SVQ1) { w_align = 64; h_align = 64; } break; case AV_PIX_FMT_RGB555: if (s->codec_id == AV_CODEC_ID_RPZA) { w_align = 4; h_align = 4; } if (s->codec_id == AV_CODEC_ID_INTERPLAY_VIDEO) { w_align = 8; h_align = 8; } break; case AV_PIX_FMT_PAL8: case AV_PIX_FMT_BGR8: case AV_PIX_FMT_RGB8: if (s->codec_id == AV_CODEC_ID_SMC || s->codec_id == AV_CODEC_ID_CINEPAK) { w_align = 4; h_align = 4; } if (s->codec_id == AV_CODEC_ID_JV || s->codec_id == AV_CODEC_ID_INTERPLAY_VIDEO) { w_align = 8; h_align = 8; } break; case AV_PIX_FMT_BGR24: if ((s->codec_id == AV_CODEC_ID_MSZH) || (s->codec_id == AV_CODEC_ID_ZLIB)) { w_align = 4; h_align = 4; } break; case AV_PIX_FMT_RGB24: if (s->codec_id == AV_CODEC_ID_CINEPAK) { w_align = 4; h_align = 4; } break; default: break; } if (s->codec_id == AV_CODEC_ID_IFF_ILBM) { w_align = FFMAX(w_align, 8); } *width = FFALIGN(*width, w_align); *height = FFALIGN(*height, h_align); if (s->codec_id == AV_CODEC_ID_H264 || s->lowres || s->codec_id == AV_CODEC_ID_VP5 || s->codec_id == AV_CODEC_ID_VP6 || s->codec_id == AV_CODEC_ID_VP6F || s->codec_id == AV_CODEC_ID_VP6A ) { // some of the optimized chroma MC reads one line too much // which is also done in mpeg decoders with lowres > 0 *height += 2; // H.264 uses edge emulation for out of frame motion vectors, for this // it requires a temporary area large enough to hold a 21x21 block, // increasing witdth ensure that the temporary area is large enough, // the next rounded up width is 32 *width = FFMAX(*width, 32); } for (i = 0; i < 4; i++) linesize_align[i] = STRIDE_ALIGN; } void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height) { const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->pix_fmt); int chroma_shift = desc->log2_chroma_w; int linesize_align[AV_NUM_DATA_POINTERS]; int align; avcodec_align_dimensions2(s, width, height, linesize_align); align = FFMAX(linesize_align[0], linesize_align[3]); linesize_align[1] <<= chroma_shift; linesize_align[2] <<= chroma_shift; align = FFMAX3(align, linesize_align[1], linesize_align[2]); *width = FFALIGN(*width, align); } int avcodec_enum_to_chroma_pos(int *xpos, int *ypos, enum AVChromaLocation pos) { if (pos <= AVCHROMA_LOC_UNSPECIFIED || pos >= AVCHROMA_LOC_NB) return AVERROR(EINVAL); pos--; *xpos = (pos&1) * 128; *ypos = ((pos>>1)^(pos<4)) * 128; return 0; } enum AVChromaLocation avcodec_chroma_pos_to_enum(int xpos, int ypos) { int pos, xout, yout; for (pos = AVCHROMA_LOC_UNSPECIFIED + 1; pos < AVCHROMA_LOC_NB; pos++) { if (avcodec_enum_to_chroma_pos(&xout, &yout, pos) == 0 && xout == xpos && yout == ypos) return pos; } return AVCHROMA_LOC_UNSPECIFIED; } int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels, enum AVSampleFormat sample_fmt, const uint8_t *buf, int buf_size, int align) { int ch, planar, needed_size, ret = 0; needed_size = av_samples_get_buffer_size(NULL, nb_channels, frame->nb_samples, sample_fmt, align); if (buf_size < needed_size) return AVERROR(EINVAL); planar = av_sample_fmt_is_planar(sample_fmt); if (planar && nb_channels > AV_NUM_DATA_POINTERS) { if (!(frame->extended_data = av_mallocz_array(nb_channels, sizeof(*frame->extended_data)))) return AVERROR(ENOMEM); } else { frame->extended_data = frame->data; } if ((ret = av_samples_fill_arrays(frame->extended_data, &frame->linesize[0], (uint8_t *)(intptr_t)buf, nb_channels, frame->nb_samples, sample_fmt, align)) < 0) { if (frame->extended_data != frame->data) av_freep(&frame->extended_data); return ret; } if (frame->extended_data != frame->data) { for (ch = 0; ch < AV_NUM_DATA_POINTERS; ch++) frame->data[ch] = frame->extended_data[ch]; } return ret; } void ff_color_frame(AVFrame *frame, const int c[4]) { const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format); int p, y, x; av_assert0(desc->flags & AV_PIX_FMT_FLAG_PLANAR); for (p = 0; p<desc->nb_components; p++) { uint8_t *dst = frame->data[p]; int is_chroma = p == 1 || p == 2; int bytes = is_chroma ? AV_CEIL_RSHIFT(frame->width, desc->log2_chroma_w) : frame->width; int height = is_chroma ? AV_CEIL_RSHIFT(frame->height, desc->log2_chroma_h) : frame->height; for (y = 0; y < height; y++) { if (desc->comp[0].depth >= 9) { for (x = 0; x<bytes; x++) ((uint16_t*)dst)[x] = c[p]; }else memset(dst, c[p], bytes); dst += frame->linesize[p]; } } } int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2), void *arg, int *ret, int count, int size) { int i; for (i = 0; i < count; i++) { int r = func(c, (char *)arg + i * size); if (ret) ret[i] = r; } emms_c(); return 0; } int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int jobnr, int threadnr), void *arg, int *ret, int count) { int i; for (i = 0; i < count; i++) { int r = func(c, arg, i, 0); if (ret) ret[i] = r; } emms_c(); return 0; } enum AVPixelFormat avpriv_find_pix_fmt(const PixelFormatTag *tags, unsigned int fourcc) { while (tags->pix_fmt >= 0) { if (tags->fourcc == fourcc) return tags->pix_fmt; tags++; } return AV_PIX_FMT_NONE; } #if FF_API_CODEC_GET_SET MAKE_ACCESSORS(AVCodecContext, codec, AVRational, pkt_timebase) MAKE_ACCESSORS(AVCodecContext, codec, const AVCodecDescriptor *, codec_descriptor) MAKE_ACCESSORS(AVCodecContext, codec, int, lowres) MAKE_ACCESSORS(AVCodecContext, codec, int, seek_preroll) MAKE_ACCESSORS(AVCodecContext, codec, uint16_t*, chroma_intra_matrix) unsigned av_codec_get_codec_properties(const AVCodecContext *codec) { return codec->properties; } int av_codec_get_max_lowres(const AVCodec *codec) { return codec->max_lowres; } #endif int avpriv_codec_get_cap_skip_frame_fill_param(const AVCodec *codec){ return !!(codec->caps_internal & FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM); } static int64_t get_bit_rate(AVCodecContext *ctx) { int64_t bit_rate; int bits_per_sample; switch (ctx->codec_type) { case AVMEDIA_TYPE_VIDEO: case AVMEDIA_TYPE_DATA: case AVMEDIA_TYPE_SUBTITLE: case AVMEDIA_TYPE_ATTACHMENT: bit_rate = ctx->bit_rate; break; case AVMEDIA_TYPE_AUDIO: bits_per_sample = av_get_bits_per_sample(ctx->codec_id); bit_rate = bits_per_sample ? ctx->sample_rate * (int64_t)ctx->channels * bits_per_sample : ctx->bit_rate; break; default: bit_rate = 0; break; } return bit_rate; } static void ff_lock_avcodec(AVCodecContext *log_ctx, const AVCodec *codec) { if (!(codec->caps_internal & FF_CODEC_CAP_INIT_THREADSAFE) && codec->init) ff_mutex_lock(&codec_mutex); } static void ff_unlock_avcodec(const AVCodec *codec) { if (!(codec->caps_internal & FF_CODEC_CAP_INIT_THREADSAFE) && codec->init) ff_mutex_unlock(&codec_mutex); } int attribute_align_arg ff_codec_open2_recursive(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options) { int ret = 0; ff_unlock_avcodec(codec); ret = avcodec_open2(avctx, codec, options); ff_lock_avcodec(avctx, codec); return ret; } int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options) { int ret = 0; int codec_init_ok = 0; AVDictionary *tmp = NULL; const AVPixFmtDescriptor *pixdesc; if (avcodec_is_open(avctx)) return 0; if ((!codec && !avctx->codec)) { av_log(avctx, AV_LOG_ERROR, "No codec provided to avcodec_open2()\n"); return AVERROR(EINVAL); } if ((codec && avctx->codec && codec != avctx->codec)) { av_log(avctx, AV_LOG_ERROR, "This AVCodecContext was allocated for %s, " "but %s passed to avcodec_open2()\n", avctx->codec->name, codec->name); return AVERROR(EINVAL); } if (!codec) codec = avctx->codec; if (avctx->extradata_size < 0 || avctx->extradata_size >= FF_MAX_EXTRADATA_SIZE) return AVERROR(EINVAL); if (options) av_dict_copy(&tmp, *options, 0); ff_lock_avcodec(avctx, codec); avctx->internal = av_mallocz(sizeof(*avctx->internal)); if (!avctx->internal) { ret = AVERROR(ENOMEM); goto end; } avctx->internal->pool = av_mallocz(sizeof(*avctx->internal->pool)); if (!avctx->internal->pool) { ret = AVERROR(ENOMEM); goto free_and_end; } avctx->internal->to_free = av_frame_alloc(); if (!avctx->internal->to_free) { ret = AVERROR(ENOMEM); goto free_and_end; } avctx->internal->compat_decode_frame = av_frame_alloc(); if (!avctx->internal->compat_decode_frame) { ret = AVERROR(ENOMEM); goto free_and_end; } avctx->internal->buffer_frame = av_frame_alloc(); if (!avctx->internal->buffer_frame) { ret = AVERROR(ENOMEM); goto free_and_end; } avctx->internal->buffer_pkt = av_packet_alloc(); if (!avctx->internal->buffer_pkt) { ret = AVERROR(ENOMEM); goto free_and_end; } avctx->internal->ds.in_pkt = av_packet_alloc(); if (!avctx->internal->ds.in_pkt) { ret = AVERROR(ENOMEM); goto free_and_end; } avctx->internal->last_pkt_props = av_packet_alloc(); if (!avctx->internal->last_pkt_props) { ret = AVERROR(ENOMEM); goto free_and_end; } avctx->internal->skip_samples_multiplier = 1; if (codec->priv_data_size > 0) { if (!avctx->priv_data) { avctx->priv_data = av_mallocz(codec->priv_data_size); if (!avctx->priv_data) { ret = AVERROR(ENOMEM); goto end; } if (codec->priv_class) { *(const AVClass **)avctx->priv_data = codec->priv_class; av_opt_set_defaults(avctx->priv_data); } } if (codec->priv_class && (ret = av_opt_set_dict(avctx->priv_data, &tmp)) < 0) goto free_and_end; } else { avctx->priv_data = NULL; } if ((ret = av_opt_set_dict(avctx, &tmp)) < 0) goto free_and_end; if (avctx->codec_whitelist && av_match_list(codec->name, avctx->codec_whitelist, ',') <= 0) { av_log(avctx, AV_LOG_ERROR, "Codec (%s) not on whitelist \'%s\'\n", codec->name, avctx->codec_whitelist); ret = AVERROR(EINVAL); goto free_and_end; } // only call ff_set_dimensions() for non H.264/VP6F/DXV codecs so as not to overwrite previously setup dimensions if (!(avctx->coded_width && avctx->coded_height && avctx->width && avctx->height && (avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_VP6F || avctx->codec_id == AV_CODEC_ID_DXV))) { if (avctx->coded_width && avctx->coded_height) ret = ff_set_dimensions(avctx, avctx->coded_width, avctx->coded_height); else if (avctx->width && avctx->height) ret = ff_set_dimensions(avctx, avctx->width, avctx->height); if (ret < 0) goto free_and_end; } if ((avctx->coded_width || avctx->coded_height || avctx->width || avctx->height) && ( av_image_check_size2(avctx->coded_width, avctx->coded_height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx) < 0 || av_image_check_size2(avctx->width, avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx) < 0)) { av_log(avctx, AV_LOG_WARNING, "Ignoring invalid width/height values\n"); ff_set_dimensions(avctx, 0, 0); } if (avctx->width > 0 && avctx->height > 0) { if (av_image_check_sar(avctx->width, avctx->height, avctx->sample_aspect_ratio) < 0) { av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n", avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den); avctx->sample_aspect_ratio = (AVRational){ 0, 1 }; } } /* if the decoder init function was already called previously, * free the already allocated subtitle_header before overwriting it */ if (av_codec_is_decoder(codec)) av_freep(&avctx->subtitle_header); if (avctx->channels > FF_SANE_NB_CHANNELS) { av_log(avctx, AV_LOG_ERROR, "Too many channels: %d\n", avctx->channels); ret = AVERROR(EINVAL); goto free_and_end; } avctx->codec = codec; if ((avctx->codec_type == AVMEDIA_TYPE_UNKNOWN || avctx->codec_type == codec->type) && avctx->codec_id == AV_CODEC_ID_NONE) { avctx->codec_type = codec->type; avctx->codec_id = codec->id; } if (avctx->codec_id != codec->id || (avctx->codec_type != codec->type && avctx->codec_type != AVMEDIA_TYPE_ATTACHMENT)) { av_log(avctx, AV_LOG_ERROR, "Codec type or id mismatches\n"); ret = AVERROR(EINVAL); goto free_and_end; } avctx->frame_number = 0; avctx->codec_descriptor = avcodec_descriptor_get(avctx->codec_id); if ((avctx->codec->capabilities & AV_CODEC_CAP_EXPERIMENTAL) && avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) { const char *codec_string = av_codec_is_encoder(codec) ? "encoder" : "decoder"; AVCodec *codec2; av_log(avctx, AV_LOG_ERROR, "The %s '%s' is experimental but experimental codecs are not enabled, " "add '-strict %d' if you want to use it.\n", codec_string, codec->name, FF_COMPLIANCE_EXPERIMENTAL); codec2 = av_codec_is_encoder(codec) ? avcodec_find_encoder(codec->id) : avcodec_find_decoder(codec->id); if (!(codec2->capabilities & AV_CODEC_CAP_EXPERIMENTAL)) av_log(avctx, AV_LOG_ERROR, "Alternatively use the non experimental %s '%s'.\n", codec_string, codec2->name); ret = AVERROR_EXPERIMENTAL; goto free_and_end; } if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && (!avctx->time_base.num || !avctx->time_base.den)) { avctx->time_base.num = 1; avctx->time_base.den = avctx->sample_rate; } if (!HAVE_THREADS) av_log(avctx, AV_LOG_WARNING, "Warning: not compiled with thread support, using thread emulation\n"); if (CONFIG_FRAME_THREAD_ENCODER && av_codec_is_encoder(avctx->codec)) { ff_unlock_avcodec(codec); //we will instantiate a few encoders thus kick the counter to prevent false detection of a problem ret = ff_frame_thread_encoder_init(avctx, options ? *options : NULL); ff_lock_avcodec(avctx, codec); if (ret < 0) goto free_and_end; } if (av_codec_is_decoder(avctx->codec)) { ret = ff_decode_bsfs_init(avctx); if (ret < 0) goto free_and_end; } if (HAVE_THREADS && !(avctx->internal->frame_thread_encoder && (avctx->active_thread_type&FF_THREAD_FRAME))) { ret = ff_thread_init(avctx); if (ret < 0) { goto free_and_end; } } if (!HAVE_THREADS && !(codec->capabilities & AV_CODEC_CAP_AUTO_THREADS)) avctx->thread_count = 1; if (avctx->codec->max_lowres < avctx->lowres || avctx->lowres < 0) { av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n", avctx->codec->max_lowres); avctx->lowres = avctx->codec->max_lowres; } if (av_codec_is_encoder(avctx->codec)) { int i; #if FF_API_CODED_FRAME FF_DISABLE_DEPRECATION_WARNINGS avctx->coded_frame = av_frame_alloc(); if (!avctx->coded_frame) { ret = AVERROR(ENOMEM); goto free_and_end; } FF_ENABLE_DEPRECATION_WARNINGS #endif if (avctx->time_base.num <= 0 || avctx->time_base.den <= 0) { av_log(avctx, AV_LOG_ERROR, "The encoder timebase is not set.\n"); ret = AVERROR(EINVAL); goto free_and_end; } if (avctx->codec->sample_fmts) { for (i = 0; avctx->codec->sample_fmts[i] != AV_SAMPLE_FMT_NONE; i++) { if (avctx->sample_fmt == avctx->codec->sample_fmts[i]) break; if (avctx->channels == 1 && av_get_planar_sample_fmt(avctx->sample_fmt) == av_get_planar_sample_fmt(avctx->codec->sample_fmts[i])) { avctx->sample_fmt = avctx->codec->sample_fmts[i]; break; } } if (avctx->codec->sample_fmts[i] == AV_SAMPLE_FMT_NONE) { char buf[128]; snprintf(buf, sizeof(buf), "%d", avctx->sample_fmt); av_log(avctx, AV_LOG_ERROR, "Specified sample format %s is invalid or not supported\n", (char *)av_x_if_null(av_get_sample_fmt_name(avctx->sample_fmt), buf)); ret = AVERROR(EINVAL); goto free_and_end; } } if (avctx->codec->pix_fmts) { for (i = 0; avctx->codec->pix_fmts[i] != AV_PIX_FMT_NONE; i++) if (avctx->pix_fmt == avctx->codec->pix_fmts[i]) break; if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_NONE && !((avctx->codec_id == AV_CODEC_ID_MJPEG || avctx->codec_id == AV_CODEC_ID_LJPEG) && avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL)) { char buf[128]; snprintf(buf, sizeof(buf), "%d", avctx->pix_fmt); av_log(avctx, AV_LOG_ERROR, "Specified pixel format %s is invalid or not supported\n", (char *)av_x_if_null(av_get_pix_fmt_name(avctx->pix_fmt), buf)); ret = AVERROR(EINVAL); goto free_and_end; } if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ420P || avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ411P || avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ422P || avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ440P || avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ444P) avctx->color_range = AVCOL_RANGE_JPEG; } if (avctx->codec->supported_samplerates) { for (i = 0; avctx->codec->supported_samplerates[i] != 0; i++) if (avctx->sample_rate == avctx->codec->supported_samplerates[i]) break; if (avctx->codec->supported_samplerates[i] == 0) { av_log(avctx, AV_LOG_ERROR, "Specified sample rate %d is not supported\n", avctx->sample_rate); ret = AVERROR(EINVAL); goto free_and_end; } } if (avctx->sample_rate < 0) { av_log(avctx, AV_LOG_ERROR, "Specified sample rate %d is not supported\n", avctx->sample_rate); ret = AVERROR(EINVAL); goto free_and_end; } if (avctx->codec->channel_layouts) { if (!avctx->channel_layout) { av_log(avctx, AV_LOG_WARNING, "Channel layout not specified\n"); } else { for (i = 0; avctx->codec->channel_layouts[i] != 0; i++) if (avctx->channel_layout == avctx->codec->channel_layouts[i]) break; if (avctx->codec->channel_layouts[i] == 0) { char buf[512]; av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout); av_log(avctx, AV_LOG_ERROR, "Specified channel layout '%s' is not supported\n", buf); ret = AVERROR(EINVAL); goto free_and_end; } } } if (avctx->channel_layout && avctx->channels) { int channels = av_get_channel_layout_nb_channels(avctx->channel_layout); if (channels != avctx->channels) { char buf[512]; av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout); av_log(avctx, AV_LOG_ERROR, "Channel layout '%s' with %d channels does not match number of specified channels %d\n", buf, channels, avctx->channels); ret = AVERROR(EINVAL); goto free_and_end; } } else if (avctx->channel_layout) { avctx->channels = av_get_channel_layout_nb_channels(avctx->channel_layout); } if (avctx->channels < 0) { av_log(avctx, AV_LOG_ERROR, "Specified number of channels %d is not supported\n", avctx->channels); ret = AVERROR(EINVAL); goto free_and_end; } if(avctx->codec_type == AVMEDIA_TYPE_VIDEO) { pixdesc = av_pix_fmt_desc_get(avctx->pix_fmt); if ( avctx->bits_per_raw_sample < 0 || (avctx->bits_per_raw_sample > 8 && pixdesc->comp[0].depth <= 8)) { av_log(avctx, AV_LOG_WARNING, "Specified bit depth %d not possible with the specified pixel formats depth %d\n", avctx->bits_per_raw_sample, pixdesc->comp[0].depth); avctx->bits_per_raw_sample = pixdesc->comp[0].depth; } if (avctx->width <= 0 || avctx->height <= 0) { av_log(avctx, AV_LOG_ERROR, "dimensions not set\n"); ret = AVERROR(EINVAL); goto free_and_end; } } if ( (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO) && avctx->bit_rate>0 && avctx->bit_rate<1000) { av_log(avctx, AV_LOG_WARNING, "Bitrate %"PRId64" is extremely low, maybe you mean %"PRId64"k\n", avctx->bit_rate, avctx->bit_rate); } if (!avctx->rc_initial_buffer_occupancy) avctx->rc_initial_buffer_occupancy = avctx->rc_buffer_size * 3LL / 4; if (avctx->ticks_per_frame && avctx->time_base.num && avctx->ticks_per_frame > INT_MAX / avctx->time_base.num) { av_log(avctx, AV_LOG_ERROR, "ticks_per_frame %d too large for the timebase %d/%d.", avctx->ticks_per_frame, avctx->time_base.num, avctx->time_base.den); goto free_and_end; } if (avctx->hw_frames_ctx) { AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data; if (frames_ctx->format != avctx->pix_fmt) { av_log(avctx, AV_LOG_ERROR, "Mismatching AVCodecContext.pix_fmt and AVHWFramesContext.format\n"); ret = AVERROR(EINVAL); goto free_and_end; } if (avctx->sw_pix_fmt != AV_PIX_FMT_NONE && avctx->sw_pix_fmt != frames_ctx->sw_format) { av_log(avctx, AV_LOG_ERROR, "Mismatching AVCodecContext.sw_pix_fmt (%s) " "and AVHWFramesContext.sw_format (%s)\n", av_get_pix_fmt_name(avctx->sw_pix_fmt), av_get_pix_fmt_name(frames_ctx->sw_format)); ret = AVERROR(EINVAL); goto free_and_end; } avctx->sw_pix_fmt = frames_ctx->sw_format; } } avctx->pts_correction_num_faulty_pts = avctx->pts_correction_num_faulty_dts = 0; avctx->pts_correction_last_pts = avctx->pts_correction_last_dts = INT64_MIN; if ( !CONFIG_GRAY && avctx->flags & AV_CODEC_FLAG_GRAY && avctx->codec_descriptor->type == AVMEDIA_TYPE_VIDEO) av_log(avctx, AV_LOG_WARNING, "gray decoding requested but not enabled at configuration time\n"); if ( avctx->codec->init && (!(avctx->active_thread_type&FF_THREAD_FRAME) || avctx->internal->frame_thread_encoder)) { ret = avctx->codec->init(avctx); if (ret < 0) { goto free_and_end; } codec_init_ok = 1; } ret=0; if (av_codec_is_decoder(avctx->codec)) { if (!avctx->bit_rate) avctx->bit_rate = get_bit_rate(avctx); /* validate channel layout from the decoder */ if (avctx->channel_layout) { int channels = av_get_channel_layout_nb_channels(avctx->channel_layout); if (!avctx->channels) avctx->channels = channels; else if (channels != avctx->channels) { char buf[512]; av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout); av_log(avctx, AV_LOG_WARNING, "Channel layout '%s' with %d channels does not match specified number of channels %d: " "ignoring specified channel layout\n", buf, channels, avctx->channels); avctx->channel_layout = 0; } } if (avctx->channels && avctx->channels < 0 || avctx->channels > FF_SANE_NB_CHANNELS) { ret = AVERROR(EINVAL); goto free_and_end; } if (avctx->bits_per_coded_sample < 0) { ret = AVERROR(EINVAL); goto free_and_end; } if (avctx->sub_charenc) { if (avctx->codec_type != AVMEDIA_TYPE_SUBTITLE) { av_log(avctx, AV_LOG_ERROR, "Character encoding is only " "supported with subtitles codecs\n"); ret = AVERROR(EINVAL); goto free_and_end; } else if (avctx->codec_descriptor->props & AV_CODEC_PROP_BITMAP_SUB) { av_log(avctx, AV_LOG_WARNING, "Codec '%s' is bitmap-based, " "subtitles character encoding will be ignored\n", avctx->codec_descriptor->name); avctx->sub_charenc_mode = FF_SUB_CHARENC_MODE_DO_NOTHING; } else { /* input character encoding is set for a text based subtitle * codec at this point */ if (avctx->sub_charenc_mode == FF_SUB_CHARENC_MODE_AUTOMATIC) avctx->sub_charenc_mode = FF_SUB_CHARENC_MODE_PRE_DECODER; if (avctx->sub_charenc_mode == FF_SUB_CHARENC_MODE_PRE_DECODER) { #if CONFIG_ICONV iconv_t cd = iconv_open("UTF-8", avctx->sub_charenc); if (cd == (iconv_t)-1) { ret = AVERROR(errno); av_log(avctx, AV_LOG_ERROR, "Unable to open iconv context " "with input character encoding \"%s\"\n", avctx->sub_charenc); goto free_and_end; } iconv_close(cd); #else av_log(avctx, AV_LOG_ERROR, "Character encoding subtitles " "conversion needs a libavcodec built with iconv support " "for this codec\n"); ret = AVERROR(ENOSYS); goto free_and_end; #endif } } } #if FF_API_AVCTX_TIMEBASE if (avctx->framerate.num > 0 && avctx->framerate.den > 0) avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1})); #endif } if (codec->priv_data_size > 0 && avctx->priv_data && codec->priv_class) { av_assert0(*(const AVClass **)avctx->priv_data == codec->priv_class); } end: ff_unlock_avcodec(codec); if (options) { av_dict_free(options); *options = tmp; } return ret; free_and_end: if (avctx->codec && (codec_init_ok || (avctx->codec->caps_internal & FF_CODEC_CAP_INIT_CLEANUP))) avctx->codec->close(avctx); if (codec->priv_class && codec->priv_data_size) av_opt_free(avctx->priv_data); av_opt_free(avctx); #if FF_API_CODED_FRAME FF_DISABLE_DEPRECATION_WARNINGS av_frame_free(&avctx->coded_frame); FF_ENABLE_DEPRECATION_WARNINGS #endif av_dict_free(&tmp); av_freep(&avctx->priv_data); if (avctx->internal) { av_frame_free(&avctx->internal->to_free); av_frame_free(&avctx->internal->compat_decode_frame); av_frame_free(&avctx->internal->buffer_frame); av_packet_free(&avctx->internal->buffer_pkt); av_packet_free(&avctx->internal->last_pkt_props); av_packet_free(&avctx->internal->ds.in_pkt); ff_decode_bsfs_uninit(avctx); av_freep(&avctx->internal->pool); } av_freep(&avctx->internal); avctx->codec = NULL; goto end; } void avsubtitle_free(AVSubtitle *sub) { int i; for (i = 0; i < sub->num_rects; i++) { av_freep(&sub->rects[i]->data[0]); av_freep(&sub->rects[i]->data[1]); av_freep(&sub->rects[i]->data[2]); av_freep(&sub->rects[i]->data[3]); av_freep(&sub->rects[i]->text); av_freep(&sub->rects[i]->ass); av_freep(&sub->rects[i]); } av_freep(&sub->rects); memset(sub, 0, sizeof(*sub)); } av_cold int avcodec_close(AVCodecContext *avctx) { int i; if (!avctx) return 0; if (avcodec_is_open(avctx)) { FramePool *pool = avctx->internal->pool; if (CONFIG_FRAME_THREAD_ENCODER && avctx->internal->frame_thread_encoder && avctx->thread_count > 1) { ff_frame_thread_encoder_free(avctx); } if (HAVE_THREADS && avctx->internal->thread_ctx) ff_thread_free(avctx); if (avctx->codec && avctx->codec->close) avctx->codec->close(avctx); avctx->internal->byte_buffer_size = 0; av_freep(&avctx->internal->byte_buffer); av_frame_free(&avctx->internal->to_free); av_frame_free(&avctx->internal->compat_decode_frame); av_frame_free(&avctx->internal->buffer_frame); av_packet_free(&avctx->internal->buffer_pkt); av_packet_free(&avctx->internal->last_pkt_props); av_packet_free(&avctx->internal->ds.in_pkt); for (i = 0; i < FF_ARRAY_ELEMS(pool->pools); i++) av_buffer_pool_uninit(&pool->pools[i]); av_freep(&avctx->internal->pool); if (avctx->hwaccel && avctx->hwaccel->uninit) avctx->hwaccel->uninit(avctx); av_freep(&avctx->internal->hwaccel_priv_data); ff_decode_bsfs_uninit(avctx); av_freep(&avctx->internal); } for (i = 0; i < avctx->nb_coded_side_data; i++) av_freep(&avctx->coded_side_data[i].data); av_freep(&avctx->coded_side_data); avctx->nb_coded_side_data = 0; av_buffer_unref(&avctx->hw_frames_ctx); av_buffer_unref(&avctx->hw_device_ctx); if (avctx->priv_data && avctx->codec && avctx->codec->priv_class) av_opt_free(avctx->priv_data); av_opt_free(avctx); av_freep(&avctx->priv_data); if (av_codec_is_encoder(avctx->codec)) { av_freep(&avctx->extradata); #if FF_API_CODED_FRAME FF_DISABLE_DEPRECATION_WARNINGS av_frame_free(&avctx->coded_frame); FF_ENABLE_DEPRECATION_WARNINGS #endif } avctx->codec = NULL; avctx->active_thread_type = 0; return 0; } const char *avcodec_get_name(enum AVCodecID id) { const AVCodecDescriptor *cd; AVCodec *codec; if (id == AV_CODEC_ID_NONE) return "none"; cd = avcodec_descriptor_get(id); if (cd) return cd->name; av_log(NULL, AV_LOG_WARNING, "Codec 0x%x is not in the full list.\n", id); codec = avcodec_find_decoder(id); if (codec) return codec->name; codec = avcodec_find_encoder(id); if (codec) return codec->name; return "unknown_codec"; } size_t av_get_codec_tag_string(char *buf, size_t buf_size, unsigned int codec_tag) { int i, len, ret = 0; #define TAG_PRINT(x) \ (((x) >= '0' && (x) <= '9') || \ ((x) >= 'a' && (x) <= 'z') || ((x) >= 'A' && (x) <= 'Z') || \ ((x) == '.' || (x) == ' ' || (x) == '-' || (x) == '_')) for (i = 0; i < 4; i++) { len = snprintf(buf, buf_size, TAG_PRINT(codec_tag & 0xFF) ? "%c" : "[%d]", codec_tag & 0xFF); buf += len; buf_size = buf_size > len ? buf_size - len : 0; ret += len; codec_tag >>= 8; } return ret; } void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode) { const char *codec_type; const char *codec_name; const char *profile = NULL; int64_t bitrate; int new_line = 0; AVRational display_aspect_ratio; const char *separator = enc->dump_separator ? (const char *)enc->dump_separator : ", "; if (!buf || buf_size <= 0) return; codec_type = av_get_media_type_string(enc->codec_type); codec_name = avcodec_get_name(enc->codec_id); profile = avcodec_profile_name(enc->codec_id, enc->profile); snprintf(buf, buf_size, "%s: %s", codec_type ? codec_type : "unknown", codec_name); buf[0] ^= 'a' ^ 'A'; /* first letter in uppercase */ if (enc->codec && strcmp(enc->codec->name, codec_name)) snprintf(buf + strlen(buf), buf_size - strlen(buf), " (%s)", enc->codec->name); if (profile) snprintf(buf + strlen(buf), buf_size - strlen(buf), " (%s)", profile); if ( enc->codec_type == AVMEDIA_TYPE_VIDEO && av_log_get_level() >= AV_LOG_VERBOSE && enc->refs) snprintf(buf + strlen(buf), buf_size - strlen(buf), ", %d reference frame%s", enc->refs, enc->refs > 1 ? "s" : ""); if (enc->codec_tag) snprintf(buf + strlen(buf), buf_size - strlen(buf), " (%s / 0x%04X)", av_fourcc2str(enc->codec_tag), enc->codec_tag); switch (enc->codec_type) { case AVMEDIA_TYPE_VIDEO: { char detail[256] = "("; av_strlcat(buf, separator, buf_size); snprintf(buf + strlen(buf), buf_size - strlen(buf), "%s", enc->pix_fmt == AV_PIX_FMT_NONE ? "none" : av_get_pix_fmt_name(enc->pix_fmt)); if (enc->bits_per_raw_sample && enc->pix_fmt != AV_PIX_FMT_NONE && enc->bits_per_raw_sample < av_pix_fmt_desc_get(enc->pix_fmt)->comp[0].depth) av_strlcatf(detail, sizeof(detail), "%d bpc, ", enc->bits_per_raw_sample); if (enc->color_range != AVCOL_RANGE_UNSPECIFIED) av_strlcatf(detail, sizeof(detail), "%s, ", av_color_range_name(enc->color_range)); if (enc->colorspace != AVCOL_SPC_UNSPECIFIED || enc->color_primaries != AVCOL_PRI_UNSPECIFIED || enc->color_trc != AVCOL_TRC_UNSPECIFIED) { if (enc->colorspace != (int)enc->color_primaries || enc->colorspace != (int)enc->color_trc) { new_line = 1; av_strlcatf(detail, sizeof(detail), "%s/%s/%s, ", av_color_space_name(enc->colorspace), av_color_primaries_name(enc->color_primaries), av_color_transfer_name(enc->color_trc)); } else av_strlcatf(detail, sizeof(detail), "%s, ", av_get_colorspace_name(enc->colorspace)); } if (enc->field_order != AV_FIELD_UNKNOWN) { const char *field_order = "progressive"; if (enc->field_order == AV_FIELD_TT) field_order = "top first"; else if (enc->field_order == AV_FIELD_BB) field_order = "bottom first"; else if (enc->field_order == AV_FIELD_TB) field_order = "top coded first (swapped)"; else if (enc->field_order == AV_FIELD_BT) field_order = "bottom coded first (swapped)"; av_strlcatf(detail, sizeof(detail), "%s, ", field_order); } if (av_log_get_level() >= AV_LOG_VERBOSE && enc->chroma_sample_location != AVCHROMA_LOC_UNSPECIFIED) av_strlcatf(detail, sizeof(detail), "%s, ", av_chroma_location_name(enc->chroma_sample_location)); if (strlen(detail) > 1) { detail[strlen(detail) - 2] = 0; av_strlcatf(buf, buf_size, "%s)", detail); } } if (enc->width) { av_strlcat(buf, new_line ? separator : ", ", buf_size); snprintf(buf + strlen(buf), buf_size - strlen(buf), "%dx%d", enc->width, enc->height); if (av_log_get_level() >= AV_LOG_VERBOSE && (enc->width != enc->coded_width || enc->height != enc->coded_height)) snprintf(buf + strlen(buf), buf_size - strlen(buf), " (%dx%d)", enc->coded_width, enc->coded_height); if (enc->sample_aspect_ratio.num) { av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den, enc->width * (int64_t)enc->sample_aspect_ratio.num, enc->height * (int64_t)enc->sample_aspect_ratio.den, 1024 * 1024); snprintf(buf + strlen(buf), buf_size - strlen(buf), " [SAR %d:%d DAR %d:%d]", enc->sample_aspect_ratio.num, enc->sample_aspect_ratio.den, display_aspect_ratio.num, display_aspect_ratio.den); } if (av_log_get_level() >= AV_LOG_DEBUG) { int g = av_gcd(enc->time_base.num, enc->time_base.den); snprintf(buf + strlen(buf), buf_size - strlen(buf), ", %d/%d", enc->time_base.num / g, enc->time_base.den / g); } } if (encode) { snprintf(buf + strlen(buf), buf_size - strlen(buf), ", q=%d-%d", enc->qmin, enc->qmax); } else { if (enc->properties & FF_CODEC_PROPERTY_CLOSED_CAPTIONS) snprintf(buf + strlen(buf), buf_size - strlen(buf), ", Closed Captions"); if (enc->properties & FF_CODEC_PROPERTY_LOSSLESS) snprintf(buf + strlen(buf), buf_size - strlen(buf), ", lossless"); } break; case AVMEDIA_TYPE_AUDIO: av_strlcat(buf, separator, buf_size); if (enc->sample_rate) { snprintf(buf + strlen(buf), buf_size - strlen(buf), "%d Hz, ", enc->sample_rate); } av_get_channel_layout_string(buf + strlen(buf), buf_size - strlen(buf), enc->channels, enc->channel_layout); if (enc->sample_fmt != AV_SAMPLE_FMT_NONE) { snprintf(buf + strlen(buf), buf_size - strlen(buf), ", %s", av_get_sample_fmt_name(enc->sample_fmt)); } if ( enc->bits_per_raw_sample > 0 && enc->bits_per_raw_sample != av_get_bytes_per_sample(enc->sample_fmt) * 8) snprintf(buf + strlen(buf), buf_size - strlen(buf), " (%d bit)", enc->bits_per_raw_sample); if (av_log_get_level() >= AV_LOG_VERBOSE) { if (enc->initial_padding) snprintf(buf + strlen(buf), buf_size - strlen(buf), ", delay %d", enc->initial_padding); if (enc->trailing_padding) snprintf(buf + strlen(buf), buf_size - strlen(buf), ", padding %d", enc->trailing_padding); } break; case AVMEDIA_TYPE_DATA: if (av_log_get_level() >= AV_LOG_DEBUG) { int g = av_gcd(enc->time_base.num, enc->time_base.den); if (g) snprintf(buf + strlen(buf), buf_size - strlen(buf), ", %d/%d", enc->time_base.num / g, enc->time_base.den / g); } break; case AVMEDIA_TYPE_SUBTITLE: if (enc->width) snprintf(buf + strlen(buf), buf_size - strlen(buf), ", %dx%d", enc->width, enc->height); break; default: return; } if (encode) { if (enc->flags & AV_CODEC_FLAG_PASS1) snprintf(buf + strlen(buf), buf_size - strlen(buf), ", pass 1"); if (enc->flags & AV_CODEC_FLAG_PASS2) snprintf(buf + strlen(buf), buf_size - strlen(buf), ", pass 2"); } bitrate = get_bit_rate(enc); if (bitrate != 0) { snprintf(buf + strlen(buf), buf_size - strlen(buf), ", %"PRId64" kb/s", bitrate / 1000); } else if (enc->rc_max_rate > 0) { snprintf(buf + strlen(buf), buf_size - strlen(buf), ", max. %"PRId64" kb/s", enc->rc_max_rate / 1000); } } const char *av_get_profile_name(const AVCodec *codec, int profile) { const AVProfile *p; if (profile == FF_PROFILE_UNKNOWN || !codec->profiles) return NULL; for (p = codec->profiles; p->profile != FF_PROFILE_UNKNOWN; p++) if (p->profile == profile) return p->name; return NULL; } const char *avcodec_profile_name(enum AVCodecID codec_id, int profile) { const AVCodecDescriptor *desc = avcodec_descriptor_get(codec_id); const AVProfile *p; if (profile == FF_PROFILE_UNKNOWN || !desc || !desc->profiles) return NULL; for (p = desc->profiles; p->profile != FF_PROFILE_UNKNOWN; p++) if (p->profile == profile) return p->name; return NULL; } unsigned avcodec_version(void) { av_assert0(AV_CODEC_ID_PCM_S8_PLANAR==65563); av_assert0(AV_CODEC_ID_ADPCM_G722==69660); av_assert0(AV_CODEC_ID_SRT==94216); av_assert0(LIBAVCODEC_VERSION_MICRO >= 100); return LIBAVCODEC_VERSION_INT; } const char *avcodec_configuration(void) { return FFMPEG_CONFIGURATION; } const char *avcodec_license(void) { #define LICENSE_PREFIX "libavcodec license: " return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1; } int av_get_exact_bits_per_sample(enum AVCodecID codec_id) { switch (codec_id) { case AV_CODEC_ID_8SVX_EXP: case AV_CODEC_ID_8SVX_FIB: case AV_CODEC_ID_ADPCM_CT: case AV_CODEC_ID_ADPCM_IMA_APC: case AV_CODEC_ID_ADPCM_IMA_EA_SEAD: case AV_CODEC_ID_ADPCM_IMA_OKI: case AV_CODEC_ID_ADPCM_IMA_WS: case AV_CODEC_ID_ADPCM_G722: case AV_CODEC_ID_ADPCM_YAMAHA: case AV_CODEC_ID_ADPCM_AICA: return 4; case AV_CODEC_ID_DSD_LSBF: case AV_CODEC_ID_DSD_MSBF: case AV_CODEC_ID_DSD_LSBF_PLANAR: case AV_CODEC_ID_DSD_MSBF_PLANAR: case AV_CODEC_ID_PCM_ALAW: case AV_CODEC_ID_PCM_MULAW: case AV_CODEC_ID_PCM_VIDC: case AV_CODEC_ID_PCM_S8: case AV_CODEC_ID_PCM_S8_PLANAR: case AV_CODEC_ID_PCM_U8: case AV_CODEC_ID_PCM_ZORK: case AV_CODEC_ID_SDX2_DPCM: return 8; case AV_CODEC_ID_PCM_S16BE: case AV_CODEC_ID_PCM_S16BE_PLANAR: case AV_CODEC_ID_PCM_S16LE: case AV_CODEC_ID_PCM_S16LE_PLANAR: case AV_CODEC_ID_PCM_U16BE: case AV_CODEC_ID_PCM_U16LE: return 16; case AV_CODEC_ID_PCM_S24DAUD: case AV_CODEC_ID_PCM_S24BE: case AV_CODEC_ID_PCM_S24LE: case AV_CODEC_ID_PCM_S24LE_PLANAR: case AV_CODEC_ID_PCM_U24BE: case AV_CODEC_ID_PCM_U24LE: return 24; case AV_CODEC_ID_PCM_S32BE: case AV_CODEC_ID_PCM_S32LE: case AV_CODEC_ID_PCM_S32LE_PLANAR: case AV_CODEC_ID_PCM_U32BE: case AV_CODEC_ID_PCM_U32LE: case AV_CODEC_ID_PCM_F32BE: case AV_CODEC_ID_PCM_F32LE: case AV_CODEC_ID_PCM_F24LE: case AV_CODEC_ID_PCM_F16LE: return 32; case AV_CODEC_ID_PCM_F64BE: case AV_CODEC_ID_PCM_F64LE: case AV_CODEC_ID_PCM_S64BE: case AV_CODEC_ID_PCM_S64LE: return 64; default: return 0; } } enum AVCodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be) { static const enum AVCodecID map[AV_SAMPLE_FMT_NB][2] = { [AV_SAMPLE_FMT_U8 ] = { AV_CODEC_ID_PCM_U8, AV_CODEC_ID_PCM_U8 }, [AV_SAMPLE_FMT_S16 ] = { AV_CODEC_ID_PCM_S16LE, AV_CODEC_ID_PCM_S16BE }, [AV_SAMPLE_FMT_S32 ] = { AV_CODEC_ID_PCM_S32LE, AV_CODEC_ID_PCM_S32BE }, [AV_SAMPLE_FMT_FLT ] = { AV_CODEC_ID_PCM_F32LE, AV_CODEC_ID_PCM_F32BE }, [AV_SAMPLE_FMT_DBL ] = { AV_CODEC_ID_PCM_F64LE, AV_CODEC_ID_PCM_F64BE }, [AV_SAMPLE_FMT_U8P ] = { AV_CODEC_ID_PCM_U8, AV_CODEC_ID_PCM_U8 }, [AV_SAMPLE_FMT_S16P] = { AV_CODEC_ID_PCM_S16LE, AV_CODEC_ID_PCM_S16BE }, [AV_SAMPLE_FMT_S32P] = { AV_CODEC_ID_PCM_S32LE, AV_CODEC_ID_PCM_S32BE }, [AV_SAMPLE_FMT_S64P] = { AV_CODEC_ID_PCM_S64LE, AV_CODEC_ID_PCM_S64BE }, [AV_SAMPLE_FMT_FLTP] = { AV_CODEC_ID_PCM_F32LE, AV_CODEC_ID_PCM_F32BE }, [AV_SAMPLE_FMT_DBLP] = { AV_CODEC_ID_PCM_F64LE, AV_CODEC_ID_PCM_F64BE }, }; if (fmt < 0 || fmt >= AV_SAMPLE_FMT_NB) return AV_CODEC_ID_NONE; if (be < 0 || be > 1) be = AV_NE(1, 0); return map[fmt][be]; } int av_get_bits_per_sample(enum AVCodecID codec_id) { switch (codec_id) { case AV_CODEC_ID_ADPCM_SBPRO_2: return 2; case AV_CODEC_ID_ADPCM_SBPRO_3: return 3; case AV_CODEC_ID_ADPCM_SBPRO_4: case AV_CODEC_ID_ADPCM_IMA_WAV: case AV_CODEC_ID_ADPCM_IMA_QT: case AV_CODEC_ID_ADPCM_SWF: case AV_CODEC_ID_ADPCM_MS: return 4; default: return av_get_exact_bits_per_sample(codec_id); } } static int get_audio_frame_duration(enum AVCodecID id, int sr, int ch, int ba, uint32_t tag, int bits_per_coded_sample, int64_t bitrate, uint8_t * extradata, int frame_size, int frame_bytes) { int bps = av_get_exact_bits_per_sample(id); int framecount = (ba > 0 && frame_bytes / ba > 0) ? frame_bytes / ba : 1; /* codecs with an exact constant bits per sample */ if (bps > 0 && ch > 0 && frame_bytes > 0 && ch < 32768 && bps < 32768) return (frame_bytes * 8LL) / (bps * ch); bps = bits_per_coded_sample; /* codecs with a fixed packet duration */ switch (id) { case AV_CODEC_ID_ADPCM_ADX: return 32; case AV_CODEC_ID_ADPCM_IMA_QT: return 64; case AV_CODEC_ID_ADPCM_EA_XAS: return 128; case AV_CODEC_ID_AMR_NB: case AV_CODEC_ID_EVRC: case AV_CODEC_ID_GSM: case AV_CODEC_ID_QCELP: case AV_CODEC_ID_RA_288: return 160; case AV_CODEC_ID_AMR_WB: case AV_CODEC_ID_GSM_MS: return 320; case AV_CODEC_ID_MP1: return 384; case AV_CODEC_ID_ATRAC1: return 512; case AV_CODEC_ID_ATRAC9: case AV_CODEC_ID_ATRAC3: return 1024 * framecount; case AV_CODEC_ID_ATRAC3P: return 2048; case AV_CODEC_ID_MP2: case AV_CODEC_ID_MUSEPACK7: return 1152; case AV_CODEC_ID_AC3: return 1536; } if (sr > 0) { /* calc from sample rate */ if (id == AV_CODEC_ID_TTA) return 256 * sr / 245; else if (id == AV_CODEC_ID_DST) return 588 * sr / 44100; if (ch > 0) { /* calc from sample rate and channels */ if (id == AV_CODEC_ID_BINKAUDIO_DCT) return (480 << (sr / 22050)) / ch; } if (id == AV_CODEC_ID_MP3) return sr <= 24000 ? 576 : 1152; } if (ba > 0) { /* calc from block_align */ if (id == AV_CODEC_ID_SIPR) { switch (ba) { case 20: return 160; case 19: return 144; case 29: return 288; case 37: return 480; } } else if (id == AV_CODEC_ID_ILBC) { switch (ba) { case 38: return 160; case 50: return 240; } } } if (frame_bytes > 0) { /* calc from frame_bytes only */ if (id == AV_CODEC_ID_TRUESPEECH) return 240 * (frame_bytes / 32); if (id == AV_CODEC_ID_NELLYMOSER) return 256 * (frame_bytes / 64); if (id == AV_CODEC_ID_RA_144) return 160 * (frame_bytes / 20); if (bps > 0) { /* calc from frame_bytes and bits_per_coded_sample */ if (id == AV_CODEC_ID_ADPCM_G726 || id == AV_CODEC_ID_ADPCM_G726LE) return frame_bytes * 8 / bps; } if (ch > 0 && ch < INT_MAX/16) { /* calc from frame_bytes and channels */ switch (id) { case AV_CODEC_ID_ADPCM_AFC: return frame_bytes / (9 * ch) * 16; case AV_CODEC_ID_ADPCM_PSX: case AV_CODEC_ID_ADPCM_DTK: return frame_bytes / (16 * ch) * 28; case AV_CODEC_ID_ADPCM_4XM: case AV_CODEC_ID_ADPCM_IMA_DAT4: case AV_CODEC_ID_ADPCM_IMA_ISS: return (frame_bytes - 4 * ch) * 2 / ch; case AV_CODEC_ID_ADPCM_IMA_SMJPEG: return (frame_bytes - 4) * 2 / ch; case AV_CODEC_ID_ADPCM_IMA_AMV: return (frame_bytes - 8) * 2 / ch; case AV_CODEC_ID_ADPCM_THP: case AV_CODEC_ID_ADPCM_THP_LE: if (extradata) return frame_bytes * 14 / (8 * ch); break; case AV_CODEC_ID_ADPCM_XA: return (frame_bytes / 128) * 224 / ch; case AV_CODEC_ID_INTERPLAY_DPCM: return (frame_bytes - 6 - ch) / ch; case AV_CODEC_ID_ROQ_DPCM: return (frame_bytes - 8) / ch; case AV_CODEC_ID_XAN_DPCM: return (frame_bytes - 2 * ch) / ch; case AV_CODEC_ID_MACE3: return 3 * frame_bytes / ch; case AV_CODEC_ID_MACE6: return 6 * frame_bytes / ch; case AV_CODEC_ID_PCM_LXF: return 2 * (frame_bytes / (5 * ch)); case AV_CODEC_ID_IAC: case AV_CODEC_ID_IMC: return 4 * frame_bytes / ch; } if (tag) { /* calc from frame_bytes, channels, and codec_tag */ if (id == AV_CODEC_ID_SOL_DPCM) { if (tag == 3) return frame_bytes / ch; else return frame_bytes * 2 / ch; } } if (ba > 0) { /* calc from frame_bytes, channels, and block_align */ int blocks = frame_bytes / ba; switch (id) { case AV_CODEC_ID_ADPCM_IMA_WAV: if (bps < 2 || bps > 5) return 0; return blocks * (1 + (ba - 4 * ch) / (bps * ch) * 8); case AV_CODEC_ID_ADPCM_IMA_DK3: return blocks * (((ba - 16) * 2 / 3 * 4) / ch); case AV_CODEC_ID_ADPCM_IMA_DK4: return blocks * (1 + (ba - 4 * ch) * 2 / ch); case AV_CODEC_ID_ADPCM_IMA_RAD: return blocks * ((ba - 4 * ch) * 2 / ch); case AV_CODEC_ID_ADPCM_MS: return blocks * (2 + (ba - 7 * ch) * 2 / ch); case AV_CODEC_ID_ADPCM_MTAF: return blocks * (ba - 16) * 2 / ch; } } if (bps > 0) { /* calc from frame_bytes, channels, and bits_per_coded_sample */ switch (id) { case AV_CODEC_ID_PCM_DVD: if(bps<4 || frame_bytes<3) return 0; return 2 * ((frame_bytes - 3) / ((bps * 2 / 8) * ch)); case AV_CODEC_ID_PCM_BLURAY: if(bps<4 || frame_bytes<4) return 0; return (frame_bytes - 4) / ((FFALIGN(ch, 2) * bps) / 8); case AV_CODEC_ID_S302M: return 2 * (frame_bytes / ((bps + 4) / 4)) / ch; } } } } /* Fall back on using frame_size */ if (frame_size > 1 && frame_bytes) return frame_size; //For WMA we currently have no other means to calculate duration thus we //do it here by assuming CBR, which is true for all known cases. if (bitrate > 0 && frame_bytes > 0 && sr > 0 && ba > 1) { if (id == AV_CODEC_ID_WMAV1 || id == AV_CODEC_ID_WMAV2) return (frame_bytes * 8LL * sr) / bitrate; } return 0; } int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes) { return get_audio_frame_duration(avctx->codec_id, avctx->sample_rate, avctx->channels, avctx->block_align, avctx->codec_tag, avctx->bits_per_coded_sample, avctx->bit_rate, avctx->extradata, avctx->frame_size, frame_bytes); } int av_get_audio_frame_duration2(AVCodecParameters *par, int frame_bytes) { return get_audio_frame_duration(par->codec_id, par->sample_rate, par->channels, par->block_align, par->codec_tag, par->bits_per_coded_sample, par->bit_rate, par->extradata, par->frame_size, frame_bytes); } #if !HAVE_THREADS int ff_thread_init(AVCodecContext *s) { return -1; } #endif unsigned int av_xiphlacing(unsigned char *s, unsigned int v) { unsigned int n = 0; while (v >= 0xff) { *s++ = 0xff; v -= 0xff; n++; } *s = v; n++; return n; } int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b) { int i; for (i = 0; i < size && !(tab[i][0] == a && tab[i][1] == b); i++) ; return i; } const AVCodecHWConfig *avcodec_get_hw_config(const AVCodec *codec, int index) { int i; if (!codec->hw_configs || index < 0) return NULL; for (i = 0; i <= index; i++) if (!codec->hw_configs[i]) return NULL; return &codec->hw_configs[index]->public; } #if FF_API_USER_VISIBLE_AVHWACCEL AVHWAccel *av_hwaccel_next(const AVHWAccel *hwaccel) { return NULL; } void av_register_hwaccel(AVHWAccel *hwaccel) { } #endif #if FF_API_LOCKMGR int av_lockmgr_register(int (*cb)(void **mutex, enum AVLockOp op)) { return 0; } #endif unsigned int avpriv_toupper4(unsigned int x) { return av_toupper(x & 0xFF) + (av_toupper((x >> 8) & 0xFF) << 8) + (av_toupper((x >> 16) & 0xFF) << 16) + ((unsigned)av_toupper((x >> 24) & 0xFF) << 24); } int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src) { int ret; dst->owner[0] = src->owner[0]; dst->owner[1] = src->owner[1]; ret = av_frame_ref(dst->f, src->f); if (ret < 0) return ret; av_assert0(!dst->progress); if (src->progress && !(dst->progress = av_buffer_ref(src->progress))) { ff_thread_release_buffer(dst->owner[0], dst); return AVERROR(ENOMEM); } return 0; } #if !HAVE_THREADS enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt) { return ff_get_format(avctx, fmt); } int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags) { f->owner[0] = f->owner[1] = avctx; return ff_get_buffer(avctx, f->f, flags); } void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f) { if (f->f) av_frame_unref(f->f); } void ff_thread_finish_setup(AVCodecContext *avctx) { } void ff_thread_report_progress(ThreadFrame *f, int progress, int field) { } void ff_thread_await_progress(ThreadFrame *f, int progress, int field) { } int ff_thread_can_start_frame(AVCodecContext *avctx) { return 1; } int ff_alloc_entries(AVCodecContext *avctx, int count) { return 0; } void ff_reset_entries(AVCodecContext *avctx) { } void ff_thread_await_progress2(AVCodecContext *avctx, int field, int thread, int shift) { } void ff_thread_report_progress2(AVCodecContext *avctx, int field, int thread, int n) { } #endif int avcodec_is_open(AVCodecContext *s) { return !!s->internal; } int avpriv_bprint_to_extradata(AVCodecContext *avctx, struct AVBPrint *buf) { int ret; char *str; ret = av_bprint_finalize(buf, &str); if (ret < 0) return ret; if (!av_bprint_is_complete(buf)) { av_free(str); return AVERROR(ENOMEM); } avctx->extradata = str; /* Note: the string is NUL terminated (so extradata can be read as a * string), but the ending character is not accounted in the size (in * binary formats you are likely not supposed to mux that character). When * extradata is copied, it is also padded with AV_INPUT_BUFFER_PADDING_SIZE * zeros. */ avctx->extradata_size = buf->len; return 0; } const uint8_t *avpriv_find_start_code(const uint8_t *av_restrict p, const uint8_t *end, uint32_t *av_restrict state) { int i; av_assert0(p <= end); if (p >= end) return end; for (i = 0; i < 3; i++) { uint32_t tmp = *state << 8; *state = tmp + *(p++); if (tmp == 0x100 || p == end) return p; } while (p < end) { if (p[-1] > 1 ) p += 3; else if (p[-2] ) p += 2; else if (p[-3]|(p[-1]-1)) p++; else { p++; break; } } p = FFMIN(p, end) - 4; *state = AV_RB32(p); return p + 4; } AVCPBProperties *av_cpb_properties_alloc(size_t *size) { AVCPBProperties *props = av_mallocz(sizeof(AVCPBProperties)); if (!props) return NULL; if (size) *size = sizeof(*props); props->vbv_delay = UINT64_MAX; return props; } AVCPBProperties *ff_add_cpb_side_data(AVCodecContext *avctx) { AVPacketSideData *tmp; AVCPBProperties *props; size_t size; props = av_cpb_properties_alloc(&size); if (!props) return NULL; tmp = av_realloc_array(avctx->coded_side_data, avctx->nb_coded_side_data + 1, sizeof(*tmp)); if (!tmp) { av_freep(&props); return NULL; } avctx->coded_side_data = tmp; avctx->nb_coded_side_data++; avctx->coded_side_data[avctx->nb_coded_side_data - 1].type = AV_PKT_DATA_CPB_PROPERTIES; avctx->coded_side_data[avctx->nb_coded_side_data - 1].data = (uint8_t*)props; avctx->coded_side_data[avctx->nb_coded_side_data - 1].size = size; return props; } static void codec_parameters_reset(AVCodecParameters *par) { av_freep(&par->extradata); memset(par, 0, sizeof(*par)); par->codec_type = AVMEDIA_TYPE_UNKNOWN; par->codec_id = AV_CODEC_ID_NONE; par->format = -1; par->field_order = AV_FIELD_UNKNOWN; par->color_range = AVCOL_RANGE_UNSPECIFIED; par->color_primaries = AVCOL_PRI_UNSPECIFIED; par->color_trc = AVCOL_TRC_UNSPECIFIED; par->color_space = AVCOL_SPC_UNSPECIFIED; par->chroma_location = AVCHROMA_LOC_UNSPECIFIED; par->sample_aspect_ratio = (AVRational){ 0, 1 }; par->profile = FF_PROFILE_UNKNOWN; par->level = FF_LEVEL_UNKNOWN; } AVCodecParameters *avcodec_parameters_alloc(void) { AVCodecParameters *par = av_mallocz(sizeof(*par)); if (!par) return NULL; codec_parameters_reset(par); return par; } void avcodec_parameters_free(AVCodecParameters **ppar) { AVCodecParameters *par = *ppar; if (!par) return; codec_parameters_reset(par); av_freep(ppar); } int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src) { codec_parameters_reset(dst); memcpy(dst, src, sizeof(*dst)); dst->extradata = NULL; dst->extradata_size = 0; if (src->extradata) { dst->extradata = av_mallocz(src->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE); if (!dst->extradata) return AVERROR(ENOMEM); memcpy(dst->extradata, src->extradata, src->extradata_size); dst->extradata_size = src->extradata_size; } return 0; } int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec) { codec_parameters_reset(par); par->codec_type = codec->codec_type; par->codec_id = codec->codec_id; par->codec_tag = codec->codec_tag; par->bit_rate = codec->bit_rate; par->bits_per_coded_sample = codec->bits_per_coded_sample; par->bits_per_raw_sample = codec->bits_per_raw_sample; par->profile = codec->profile; par->level = codec->level; switch (par->codec_type) { case AVMEDIA_TYPE_VIDEO: par->format = codec->pix_fmt; par->width = codec->width; par->height = codec->height; par->field_order = codec->field_order; par->color_range = codec->color_range; par->color_primaries = codec->color_primaries; par->color_trc = codec->color_trc; par->color_space = codec->colorspace; par->chroma_location = codec->chroma_sample_location; par->sample_aspect_ratio = codec->sample_aspect_ratio; par->video_delay = codec->has_b_frames; break; case AVMEDIA_TYPE_AUDIO: par->format = codec->sample_fmt; par->channel_layout = codec->channel_layout; par->channels = codec->channels; par->sample_rate = codec->sample_rate; par->block_align = codec->block_align; par->frame_size = codec->frame_size; par->initial_padding = codec->initial_padding; par->trailing_padding = codec->trailing_padding; par->seek_preroll = codec->seek_preroll; break; case AVMEDIA_TYPE_SUBTITLE: par->width = codec->width; par->height = codec->height; break; } if (codec->extradata) { par->extradata = av_mallocz(codec->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE); if (!par->extradata) return AVERROR(ENOMEM); memcpy(par->extradata, codec->extradata, codec->extradata_size); par->extradata_size = codec->extradata_size; } return 0; } int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par) { codec->codec_type = par->codec_type; codec->codec_id = par->codec_id; codec->codec_tag = par->codec_tag; codec->bit_rate = par->bit_rate; codec->bits_per_coded_sample = par->bits_per_coded_sample; codec->bits_per_raw_sample = par->bits_per_raw_sample; codec->profile = par->profile; codec->level = par->level; switch (par->codec_type) { case AVMEDIA_TYPE_VIDEO: codec->pix_fmt = par->format; codec->width = par->width; codec->height = par->height; codec->field_order = par->field_order; codec->color_range = par->color_range; codec->color_primaries = par->color_primaries; codec->color_trc = par->color_trc; codec->colorspace = par->color_space; codec->chroma_sample_location = par->chroma_location; codec->sample_aspect_ratio = par->sample_aspect_ratio; codec->has_b_frames = par->video_delay; break; case AVMEDIA_TYPE_AUDIO: codec->sample_fmt = par->format; codec->channel_layout = par->channel_layout; codec->channels = par->channels; codec->sample_rate = par->sample_rate; codec->block_align = par->block_align; codec->frame_size = par->frame_size; codec->delay = codec->initial_padding = par->initial_padding; codec->trailing_padding = par->trailing_padding; codec->seek_preroll = par->seek_preroll; break; case AVMEDIA_TYPE_SUBTITLE: codec->width = par->width; codec->height = par->height; break; } if (par->extradata) { av_freep(&codec->extradata); codec->extradata = av_mallocz(par->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE); if (!codec->extradata) return AVERROR(ENOMEM); memcpy(codec->extradata, par->extradata, par->extradata_size); codec->extradata_size = par->extradata_size; } return 0; } int ff_alloc_a53_sei(const AVFrame *frame, size_t prefix_len, void **data, size_t *sei_size) { AVFrameSideData *side_data = NULL; uint8_t *sei_data; if (frame) side_data = av_frame_get_side_data(frame, AV_FRAME_DATA_A53_CC); if (!side_data) { *data = NULL; return 0; } *sei_size = side_data->size + 11; *data = av_mallocz(*sei_size + prefix_len); if (!*data) return AVERROR(ENOMEM); sei_data = (uint8_t*)*data + prefix_len; // country code sei_data[0] = 181; sei_data[1] = 0; sei_data[2] = 49; /** * 'GA94' is standard in North America for ATSC, but hard coding * this style may not be the right thing to do -- other formats * do exist. This information is not available in the side_data * so we are going with this right now. */ AV_WL32(sei_data + 3, MKTAG('G', 'A', '9', '4')); sei_data[7] = 3; sei_data[8] = ((side_data->size/3) & 0x1f) | 0x40; sei_data[9] = 0; memcpy(sei_data + 10, side_data->data, side_data->size); sei_data[side_data->size+10] = 255; return 0; } int64_t ff_guess_coded_bitrate(AVCodecContext *avctx) { AVRational framerate = avctx->framerate; int bits_per_coded_sample = avctx->bits_per_coded_sample; int64_t bitrate; if (!(framerate.num && framerate.den)) framerate = av_inv_q(avctx->time_base); if (!(framerate.num && framerate.den)) return 0; if (!bits_per_coded_sample) { const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt); bits_per_coded_sample = av_get_bits_per_pixel(desc); } bitrate = (int64_t)bits_per_coded_sample * avctx->width * avctx->height * framerate.num / framerate.den; return bitrate; } int ff_int_from_list_or_default(void *ctx, const char * val_name, int val, const int * array_valid_values, int default_value) { int i = 0, ref_val; while (1) { ref_val = array_valid_values[i]; if (ref_val == INT_MAX) break; if (val == ref_val) return val; i++; } /* val is not a valid value */ av_log(ctx, AV_LOG_DEBUG, "%s %d are not supported. Set to default value : %d\n", val_name, val, default_value); return default_value; }
/* * utils for libavcodec * Copyright (c) 2001 Fabrice Bellard * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * utils. */ #include "config.h" #include "libavutil/attributes.h" #include "libavutil/avassert.h" #include "libavutil/avstring.h" #include "libavutil/bprint.h" #include "libavutil/channel_layout.h" #include "libavutil/crc.h" #include "libavutil/frame.h" #include "libavutil/hwcontext.h" #include "libavutil/internal.h" #include "libavutil/mathematics.h" #include "libavutil/mem_internal.h" #include "libavutil/pixdesc.h" #include "libavutil/imgutils.h" #include "libavutil/samplefmt.h" #include "libavutil/dict.h" #include "libavutil/thread.h" #include "avcodec.h" #include "decode.h" #include "hwaccel.h" #include "libavutil/opt.h" #include "mpegvideo.h" #include "thread.h" #include "frame_thread_encoder.h" #include "internal.h" #include "raw.h" #include "bytestream.h" #include "version.h" #include <stdlib.h> #include <stdarg.h> #include <stdatomic.h> #include <limits.h> #include <float.h> #if CONFIG_ICONV # include <iconv.h> #endif #include "libavutil/ffversion.h" const char av_codec_ffversion[] = "FFmpeg version " FFMPEG_VERSION; static AVMutex codec_mutex = AV_MUTEX_INITIALIZER; void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size) { uint8_t **p = ptr; if (min_size > SIZE_MAX - AV_INPUT_BUFFER_PADDING_SIZE) { av_freep(p); *size = 0; return; } if (!ff_fast_malloc(p, size, min_size + AV_INPUT_BUFFER_PADDING_SIZE, 1)) memset(*p + min_size, 0, AV_INPUT_BUFFER_PADDING_SIZE); } void av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size) { uint8_t **p = ptr; if (min_size > SIZE_MAX - AV_INPUT_BUFFER_PADDING_SIZE) { av_freep(p); *size = 0; return; } if (!ff_fast_malloc(p, size, min_size + AV_INPUT_BUFFER_PADDING_SIZE, 1)) memset(*p, 0, min_size + AV_INPUT_BUFFER_PADDING_SIZE); } int av_codec_is_encoder(const AVCodec *codec) { return codec && (codec->encode_sub || codec->encode2 ||codec->send_frame); } int av_codec_is_decoder(const AVCodec *codec) { return codec && (codec->decode || codec->receive_frame); } int ff_set_dimensions(AVCodecContext *s, int width, int height) { int ret = av_image_check_size2(width, height, s->max_pixels, AV_PIX_FMT_NONE, 0, s); if (ret < 0) width = height = 0; s->coded_width = width; s->coded_height = height; s->width = AV_CEIL_RSHIFT(width, s->lowres); s->height = AV_CEIL_RSHIFT(height, s->lowres); return ret; } int ff_set_sar(AVCodecContext *avctx, AVRational sar) { int ret = av_image_check_sar(avctx->width, avctx->height, sar); if (ret < 0) { av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %d/%d\n", sar.num, sar.den); avctx->sample_aspect_ratio = (AVRational){ 0, 1 }; return ret; } else { avctx->sample_aspect_ratio = sar; } return 0; } int ff_side_data_update_matrix_encoding(AVFrame *frame, enum AVMatrixEncoding matrix_encoding) { AVFrameSideData *side_data; enum AVMatrixEncoding *data; side_data = av_frame_get_side_data(frame, AV_FRAME_DATA_MATRIXENCODING); if (!side_data) side_data = av_frame_new_side_data(frame, AV_FRAME_DATA_MATRIXENCODING, sizeof(enum AVMatrixEncoding)); if (!side_data) return AVERROR(ENOMEM); data = (enum AVMatrixEncoding*)side_data->data; *data = matrix_encoding; return 0; } void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int linesize_align[AV_NUM_DATA_POINTERS]) { int i; int w_align = 1; int h_align = 1; AVPixFmtDescriptor const *desc = av_pix_fmt_desc_get(s->pix_fmt); if (desc) { w_align = 1 << desc->log2_chroma_w; h_align = 1 << desc->log2_chroma_h; } switch (s->pix_fmt) { case AV_PIX_FMT_YUV420P: case AV_PIX_FMT_YUYV422: case AV_PIX_FMT_YVYU422: case AV_PIX_FMT_UYVY422: case AV_PIX_FMT_YUV422P: case AV_PIX_FMT_YUV440P: case AV_PIX_FMT_YUV444P: case AV_PIX_FMT_GBRP: case AV_PIX_FMT_GBRAP: case AV_PIX_FMT_GRAY8: case AV_PIX_FMT_GRAY16BE: case AV_PIX_FMT_GRAY16LE: case AV_PIX_FMT_YUVJ420P: case AV_PIX_FMT_YUVJ422P: case AV_PIX_FMT_YUVJ440P: case AV_PIX_FMT_YUVJ444P: case AV_PIX_FMT_YUVA420P: case AV_PIX_FMT_YUVA422P: case AV_PIX_FMT_YUVA444P: case AV_PIX_FMT_YUV420P9LE: case AV_PIX_FMT_YUV420P9BE: case AV_PIX_FMT_YUV420P10LE: case AV_PIX_FMT_YUV420P10BE: case AV_PIX_FMT_YUV420P12LE: case AV_PIX_FMT_YUV420P12BE: case AV_PIX_FMT_YUV420P14LE: case AV_PIX_FMT_YUV420P14BE: case AV_PIX_FMT_YUV420P16LE: case AV_PIX_FMT_YUV420P16BE: case AV_PIX_FMT_YUVA420P9LE: case AV_PIX_FMT_YUVA420P9BE: case AV_PIX_FMT_YUVA420P10LE: case AV_PIX_FMT_YUVA420P10BE: case AV_PIX_FMT_YUVA420P16LE: case AV_PIX_FMT_YUVA420P16BE: case AV_PIX_FMT_YUV422P9LE: case AV_PIX_FMT_YUV422P9BE: case AV_PIX_FMT_YUV422P10LE: case AV_PIX_FMT_YUV422P10BE: case AV_PIX_FMT_YUV422P12LE: case AV_PIX_FMT_YUV422P12BE: case AV_PIX_FMT_YUV422P14LE: case AV_PIX_FMT_YUV422P14BE: case AV_PIX_FMT_YUV422P16LE: case AV_PIX_FMT_YUV422P16BE: case AV_PIX_FMT_YUVA422P9LE: case AV_PIX_FMT_YUVA422P9BE: case AV_PIX_FMT_YUVA422P10LE: case AV_PIX_FMT_YUVA422P10BE: case AV_PIX_FMT_YUVA422P12LE: case AV_PIX_FMT_YUVA422P12BE: case AV_PIX_FMT_YUVA422P16LE: case AV_PIX_FMT_YUVA422P16BE: case AV_PIX_FMT_YUV440P10LE: case AV_PIX_FMT_YUV440P10BE: case AV_PIX_FMT_YUV440P12LE: case AV_PIX_FMT_YUV440P12BE: case AV_PIX_FMT_YUV444P9LE: case AV_PIX_FMT_YUV444P9BE: case AV_PIX_FMT_YUV444P10LE: case AV_PIX_FMT_YUV444P10BE: case AV_PIX_FMT_YUV444P12LE: case AV_PIX_FMT_YUV444P12BE: case AV_PIX_FMT_YUV444P14LE: case AV_PIX_FMT_YUV444P14BE: case AV_PIX_FMT_YUV444P16LE: case AV_PIX_FMT_YUV444P16BE: case AV_PIX_FMT_YUVA444P9LE: case AV_PIX_FMT_YUVA444P9BE: case AV_PIX_FMT_YUVA444P10LE: case AV_PIX_FMT_YUVA444P10BE: case AV_PIX_FMT_YUVA444P12LE: case AV_PIX_FMT_YUVA444P12BE: case AV_PIX_FMT_YUVA444P16LE: case AV_PIX_FMT_YUVA444P16BE: case AV_PIX_FMT_GBRP9LE: case AV_PIX_FMT_GBRP9BE: case AV_PIX_FMT_GBRP10LE: case AV_PIX_FMT_GBRP10BE: case AV_PIX_FMT_GBRP12LE: case AV_PIX_FMT_GBRP12BE: case AV_PIX_FMT_GBRP14LE: case AV_PIX_FMT_GBRP14BE: case AV_PIX_FMT_GBRP16LE: case AV_PIX_FMT_GBRP16BE: case AV_PIX_FMT_GBRAP12LE: case AV_PIX_FMT_GBRAP12BE: case AV_PIX_FMT_GBRAP16LE: case AV_PIX_FMT_GBRAP16BE: w_align = 16; //FIXME assume 16 pixel per macroblock h_align = 16 * 2; // interlaced needs 2 macroblocks height break; case AV_PIX_FMT_YUV411P: case AV_PIX_FMT_YUVJ411P: case AV_PIX_FMT_UYYVYY411: w_align = 32; h_align = 16 * 2; break; case AV_PIX_FMT_YUV410P: if (s->codec_id == AV_CODEC_ID_SVQ1) { w_align = 64; h_align = 64; } break; case AV_PIX_FMT_RGB555: if (s->codec_id == AV_CODEC_ID_RPZA) { w_align = 4; h_align = 4; } if (s->codec_id == AV_CODEC_ID_INTERPLAY_VIDEO) { w_align = 8; h_align = 8; } break; case AV_PIX_FMT_PAL8: case AV_PIX_FMT_BGR8: case AV_PIX_FMT_RGB8: if (s->codec_id == AV_CODEC_ID_SMC || s->codec_id == AV_CODEC_ID_CINEPAK) { w_align = 4; h_align = 4; } if (s->codec_id == AV_CODEC_ID_JV || s->codec_id == AV_CODEC_ID_INTERPLAY_VIDEO) { w_align = 8; h_align = 8; } break; case AV_PIX_FMT_BGR24: if ((s->codec_id == AV_CODEC_ID_MSZH) || (s->codec_id == AV_CODEC_ID_ZLIB)) { w_align = 4; h_align = 4; } break; case AV_PIX_FMT_RGB24: if (s->codec_id == AV_CODEC_ID_CINEPAK) { w_align = 4; h_align = 4; } break; default: break; } if (s->codec_id == AV_CODEC_ID_IFF_ILBM) { w_align = FFMAX(w_align, 8); } *width = FFALIGN(*width, w_align); *height = FFALIGN(*height, h_align); if (s->codec_id == AV_CODEC_ID_H264 || s->lowres || s->codec_id == AV_CODEC_ID_VP5 || s->codec_id == AV_CODEC_ID_VP6 || s->codec_id == AV_CODEC_ID_VP6F || s->codec_id == AV_CODEC_ID_VP6A ) { // some of the optimized chroma MC reads one line too much // which is also done in mpeg decoders with lowres > 0 *height += 2; // H.264 uses edge emulation for out of frame motion vectors, for this // it requires a temporary area large enough to hold a 21x21 block, // increasing witdth ensure that the temporary area is large enough, // the next rounded up width is 32 *width = FFMAX(*width, 32); } for (i = 0; i < 4; i++) linesize_align[i] = STRIDE_ALIGN; } void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height) { const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->pix_fmt); int chroma_shift = desc->log2_chroma_w; int linesize_align[AV_NUM_DATA_POINTERS]; int align; avcodec_align_dimensions2(s, width, height, linesize_align); align = FFMAX(linesize_align[0], linesize_align[3]); linesize_align[1] <<= chroma_shift; linesize_align[2] <<= chroma_shift; align = FFMAX3(align, linesize_align[1], linesize_align[2]); *width = FFALIGN(*width, align); } int avcodec_enum_to_chroma_pos(int *xpos, int *ypos, enum AVChromaLocation pos) { if (pos <= AVCHROMA_LOC_UNSPECIFIED || pos >= AVCHROMA_LOC_NB) return AVERROR(EINVAL); pos--; *xpos = (pos&1) * 128; *ypos = ((pos>>1)^(pos<4)) * 128; return 0; } enum AVChromaLocation avcodec_chroma_pos_to_enum(int xpos, int ypos) { int pos, xout, yout; for (pos = AVCHROMA_LOC_UNSPECIFIED + 1; pos < AVCHROMA_LOC_NB; pos++) { if (avcodec_enum_to_chroma_pos(&xout, &yout, pos) == 0 && xout == xpos && yout == ypos) return pos; } return AVCHROMA_LOC_UNSPECIFIED; } int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels, enum AVSampleFormat sample_fmt, const uint8_t *buf, int buf_size, int align) { int ch, planar, needed_size, ret = 0; needed_size = av_samples_get_buffer_size(NULL, nb_channels, frame->nb_samples, sample_fmt, align); if (buf_size < needed_size) return AVERROR(EINVAL); planar = av_sample_fmt_is_planar(sample_fmt); if (planar && nb_channels > AV_NUM_DATA_POINTERS) { if (!(frame->extended_data = av_mallocz_array(nb_channels, sizeof(*frame->extended_data)))) return AVERROR(ENOMEM); } else { frame->extended_data = frame->data; } if ((ret = av_samples_fill_arrays(frame->extended_data, &frame->linesize[0], (uint8_t *)(intptr_t)buf, nb_channels, frame->nb_samples, sample_fmt, align)) < 0) { if (frame->extended_data != frame->data) av_freep(&frame->extended_data); return ret; } if (frame->extended_data != frame->data) { for (ch = 0; ch < AV_NUM_DATA_POINTERS; ch++) frame->data[ch] = frame->extended_data[ch]; } return ret; } void ff_color_frame(AVFrame *frame, const int c[4]) { const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format); int p, y, x; av_assert0(desc->flags & AV_PIX_FMT_FLAG_PLANAR); for (p = 0; p<desc->nb_components; p++) { uint8_t *dst = frame->data[p]; int is_chroma = p == 1 || p == 2; int bytes = is_chroma ? AV_CEIL_RSHIFT(frame->width, desc->log2_chroma_w) : frame->width; int height = is_chroma ? AV_CEIL_RSHIFT(frame->height, desc->log2_chroma_h) : frame->height; for (y = 0; y < height; y++) { if (desc->comp[0].depth >= 9) { for (x = 0; x<bytes; x++) ((uint16_t*)dst)[x] = c[p]; }else memset(dst, c[p], bytes); dst += frame->linesize[p]; } } } int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2), void *arg, int *ret, int count, int size) { int i; for (i = 0; i < count; i++) { int r = func(c, (char *)arg + i * size); if (ret) ret[i] = r; } emms_c(); return 0; } int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int jobnr, int threadnr), void *arg, int *ret, int count) { int i; for (i = 0; i < count; i++) { int r = func(c, arg, i, 0); if (ret) ret[i] = r; } emms_c(); return 0; } enum AVPixelFormat avpriv_find_pix_fmt(const PixelFormatTag *tags, unsigned int fourcc) { while (tags->pix_fmt >= 0) { if (tags->fourcc == fourcc) return tags->pix_fmt; tags++; } return AV_PIX_FMT_NONE; } #if FF_API_CODEC_GET_SET MAKE_ACCESSORS(AVCodecContext, codec, AVRational, pkt_timebase) MAKE_ACCESSORS(AVCodecContext, codec, const AVCodecDescriptor *, codec_descriptor) MAKE_ACCESSORS(AVCodecContext, codec, int, lowres) MAKE_ACCESSORS(AVCodecContext, codec, int, seek_preroll) MAKE_ACCESSORS(AVCodecContext, codec, uint16_t*, chroma_intra_matrix) unsigned av_codec_get_codec_properties(const AVCodecContext *codec) { return codec->properties; } int av_codec_get_max_lowres(const AVCodec *codec) { return codec->max_lowres; } #endif int avpriv_codec_get_cap_skip_frame_fill_param(const AVCodec *codec){ return !!(codec->caps_internal & FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM); } static int64_t get_bit_rate(AVCodecContext *ctx) { int64_t bit_rate; int bits_per_sample; switch (ctx->codec_type) { case AVMEDIA_TYPE_VIDEO: case AVMEDIA_TYPE_DATA: case AVMEDIA_TYPE_SUBTITLE: case AVMEDIA_TYPE_ATTACHMENT: bit_rate = ctx->bit_rate; break; case AVMEDIA_TYPE_AUDIO: bits_per_sample = av_get_bits_per_sample(ctx->codec_id); bit_rate = bits_per_sample ? ctx->sample_rate * (int64_t)ctx->channels * bits_per_sample : ctx->bit_rate; break; default: bit_rate = 0; break; } return bit_rate; } static void ff_lock_avcodec(AVCodecContext *log_ctx, const AVCodec *codec) { if (!(codec->caps_internal & FF_CODEC_CAP_INIT_THREADSAFE) && codec->init) ff_mutex_lock(&codec_mutex); } static void ff_unlock_avcodec(const AVCodec *codec) { if (!(codec->caps_internal & FF_CODEC_CAP_INIT_THREADSAFE) && codec->init) ff_mutex_unlock(&codec_mutex); } int attribute_align_arg ff_codec_open2_recursive(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options) { int ret = 0; ff_unlock_avcodec(codec); ret = avcodec_open2(avctx, codec, options); ff_lock_avcodec(avctx, codec); return ret; } int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options) { int ret = 0; int codec_init_ok = 0; AVDictionary *tmp = NULL; const AVPixFmtDescriptor *pixdesc; if (avcodec_is_open(avctx)) return 0; if ((!codec && !avctx->codec)) { av_log(avctx, AV_LOG_ERROR, "No codec provided to avcodec_open2()\n"); return AVERROR(EINVAL); } if ((codec && avctx->codec && codec != avctx->codec)) { av_log(avctx, AV_LOG_ERROR, "This AVCodecContext was allocated for %s, " "but %s passed to avcodec_open2()\n", avctx->codec->name, codec->name); return AVERROR(EINVAL); } if (!codec) codec = avctx->codec; if (avctx->extradata_size < 0 || avctx->extradata_size >= FF_MAX_EXTRADATA_SIZE) return AVERROR(EINVAL); if (options) av_dict_copy(&tmp, *options, 0); ff_lock_avcodec(avctx, codec); avctx->internal = av_mallocz(sizeof(*avctx->internal)); if (!avctx->internal) { ret = AVERROR(ENOMEM); goto end; } avctx->internal->pool = av_mallocz(sizeof(*avctx->internal->pool)); if (!avctx->internal->pool) { ret = AVERROR(ENOMEM); goto free_and_end; } avctx->internal->to_free = av_frame_alloc(); if (!avctx->internal->to_free) { ret = AVERROR(ENOMEM); goto free_and_end; } avctx->internal->compat_decode_frame = av_frame_alloc(); if (!avctx->internal->compat_decode_frame) { ret = AVERROR(ENOMEM); goto free_and_end; } avctx->internal->buffer_frame = av_frame_alloc(); if (!avctx->internal->buffer_frame) { ret = AVERROR(ENOMEM); goto free_and_end; } avctx->internal->buffer_pkt = av_packet_alloc(); if (!avctx->internal->buffer_pkt) { ret = AVERROR(ENOMEM); goto free_and_end; } avctx->internal->ds.in_pkt = av_packet_alloc(); if (!avctx->internal->ds.in_pkt) { ret = AVERROR(ENOMEM); goto free_and_end; } avctx->internal->last_pkt_props = av_packet_alloc(); if (!avctx->internal->last_pkt_props) { ret = AVERROR(ENOMEM); goto free_and_end; } avctx->internal->skip_samples_multiplier = 1; if (codec->priv_data_size > 0) { if (!avctx->priv_data) { avctx->priv_data = av_mallocz(codec->priv_data_size); if (!avctx->priv_data) { ret = AVERROR(ENOMEM); goto end; } if (codec->priv_class) { *(const AVClass **)avctx->priv_data = codec->priv_class; av_opt_set_defaults(avctx->priv_data); } } if (codec->priv_class && (ret = av_opt_set_dict(avctx->priv_data, &tmp)) < 0) goto free_and_end; } else { avctx->priv_data = NULL; } if ((ret = av_opt_set_dict(avctx, &tmp)) < 0) goto free_and_end; if (avctx->codec_whitelist && av_match_list(codec->name, avctx->codec_whitelist, ',') <= 0) { av_log(avctx, AV_LOG_ERROR, "Codec (%s) not on whitelist \'%s\'\n", codec->name, avctx->codec_whitelist); ret = AVERROR(EINVAL); goto free_and_end; } // only call ff_set_dimensions() for non H.264/VP6F/DXV codecs so as not to overwrite previously setup dimensions if (!(avctx->coded_width && avctx->coded_height && avctx->width && avctx->height && (avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_VP6F || avctx->codec_id == AV_CODEC_ID_DXV))) { if (avctx->coded_width && avctx->coded_height) ret = ff_set_dimensions(avctx, avctx->coded_width, avctx->coded_height); else if (avctx->width && avctx->height) ret = ff_set_dimensions(avctx, avctx->width, avctx->height); if (ret < 0) goto free_and_end; } if ((avctx->coded_width || avctx->coded_height || avctx->width || avctx->height) && ( av_image_check_size2(avctx->coded_width, avctx->coded_height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx) < 0 || av_image_check_size2(avctx->width, avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx) < 0)) { av_log(avctx, AV_LOG_WARNING, "Ignoring invalid width/height values\n"); ff_set_dimensions(avctx, 0, 0); } if (avctx->width > 0 && avctx->height > 0) { if (av_image_check_sar(avctx->width, avctx->height, avctx->sample_aspect_ratio) < 0) { av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n", avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den); avctx->sample_aspect_ratio = (AVRational){ 0, 1 }; } } /* if the decoder init function was already called previously, * free the already allocated subtitle_header before overwriting it */ if (av_codec_is_decoder(codec)) av_freep(&avctx->subtitle_header); if (avctx->channels > FF_SANE_NB_CHANNELS) { av_log(avctx, AV_LOG_ERROR, "Too many channels: %d\n", avctx->channels); ret = AVERROR(EINVAL); goto free_and_end; } avctx->codec = codec; if ((avctx->codec_type == AVMEDIA_TYPE_UNKNOWN || avctx->codec_type == codec->type) && avctx->codec_id == AV_CODEC_ID_NONE) { avctx->codec_type = codec->type; avctx->codec_id = codec->id; } if (avctx->codec_id != codec->id || (avctx->codec_type != codec->type && avctx->codec_type != AVMEDIA_TYPE_ATTACHMENT)) { av_log(avctx, AV_LOG_ERROR, "Codec type or id mismatches\n"); ret = AVERROR(EINVAL); goto free_and_end; } avctx->frame_number = 0; avctx->codec_descriptor = avcodec_descriptor_get(avctx->codec_id); if ((avctx->codec->capabilities & AV_CODEC_CAP_EXPERIMENTAL) && avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) { const char *codec_string = av_codec_is_encoder(codec) ? "encoder" : "decoder"; AVCodec *codec2; av_log(avctx, AV_LOG_ERROR, "The %s '%s' is experimental but experimental codecs are not enabled, " "add '-strict %d' if you want to use it.\n", codec_string, codec->name, FF_COMPLIANCE_EXPERIMENTAL); codec2 = av_codec_is_encoder(codec) ? avcodec_find_encoder(codec->id) : avcodec_find_decoder(codec->id); if (!(codec2->capabilities & AV_CODEC_CAP_EXPERIMENTAL)) av_log(avctx, AV_LOG_ERROR, "Alternatively use the non experimental %s '%s'.\n", codec_string, codec2->name); ret = AVERROR_EXPERIMENTAL; goto free_and_end; } if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && (!avctx->time_base.num || !avctx->time_base.den)) { avctx->time_base.num = 1; avctx->time_base.den = avctx->sample_rate; } if (!HAVE_THREADS) av_log(avctx, AV_LOG_WARNING, "Warning: not compiled with thread support, using thread emulation\n"); if (CONFIG_FRAME_THREAD_ENCODER && av_codec_is_encoder(avctx->codec)) { ff_unlock_avcodec(codec); //we will instantiate a few encoders thus kick the counter to prevent false detection of a problem ret = ff_frame_thread_encoder_init(avctx, options ? *options : NULL); ff_lock_avcodec(avctx, codec); if (ret < 0) goto free_and_end; } if (av_codec_is_decoder(avctx->codec)) { ret = ff_decode_bsfs_init(avctx); if (ret < 0) goto free_and_end; } if (HAVE_THREADS && !(avctx->internal->frame_thread_encoder && (avctx->active_thread_type&FF_THREAD_FRAME))) { ret = ff_thread_init(avctx); if (ret < 0) { goto free_and_end; } } if (!HAVE_THREADS && !(codec->capabilities & AV_CODEC_CAP_AUTO_THREADS)) avctx->thread_count = 1; if (avctx->codec->max_lowres < avctx->lowres || avctx->lowres < 0) { av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n", avctx->codec->max_lowres); avctx->lowres = avctx->codec->max_lowres; } if (av_codec_is_encoder(avctx->codec)) { int i; #if FF_API_CODED_FRAME FF_DISABLE_DEPRECATION_WARNINGS avctx->coded_frame = av_frame_alloc(); if (!avctx->coded_frame) { ret = AVERROR(ENOMEM); goto free_and_end; } FF_ENABLE_DEPRECATION_WARNINGS #endif if (avctx->time_base.num <= 0 || avctx->time_base.den <= 0) { av_log(avctx, AV_LOG_ERROR, "The encoder timebase is not set.\n"); ret = AVERROR(EINVAL); goto free_and_end; } if (avctx->codec->sample_fmts) { for (i = 0; avctx->codec->sample_fmts[i] != AV_SAMPLE_FMT_NONE; i++) { if (avctx->sample_fmt == avctx->codec->sample_fmts[i]) break; if (avctx->channels == 1 && av_get_planar_sample_fmt(avctx->sample_fmt) == av_get_planar_sample_fmt(avctx->codec->sample_fmts[i])) { avctx->sample_fmt = avctx->codec->sample_fmts[i]; break; } } if (avctx->codec->sample_fmts[i] == AV_SAMPLE_FMT_NONE) { char buf[128]; snprintf(buf, sizeof(buf), "%d", avctx->sample_fmt); av_log(avctx, AV_LOG_ERROR, "Specified sample format %s is invalid or not supported\n", (char *)av_x_if_null(av_get_sample_fmt_name(avctx->sample_fmt), buf)); ret = AVERROR(EINVAL); goto free_and_end; } } if (avctx->codec->pix_fmts) { for (i = 0; avctx->codec->pix_fmts[i] != AV_PIX_FMT_NONE; i++) if (avctx->pix_fmt == avctx->codec->pix_fmts[i]) break; if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_NONE && !((avctx->codec_id == AV_CODEC_ID_MJPEG || avctx->codec_id == AV_CODEC_ID_LJPEG) && avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL)) { char buf[128]; snprintf(buf, sizeof(buf), "%d", avctx->pix_fmt); av_log(avctx, AV_LOG_ERROR, "Specified pixel format %s is invalid or not supported\n", (char *)av_x_if_null(av_get_pix_fmt_name(avctx->pix_fmt), buf)); ret = AVERROR(EINVAL); goto free_and_end; } if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ420P || avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ411P || avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ422P || avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ440P || avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ444P) avctx->color_range = AVCOL_RANGE_JPEG; } if (avctx->codec->supported_samplerates) { for (i = 0; avctx->codec->supported_samplerates[i] != 0; i++) if (avctx->sample_rate == avctx->codec->supported_samplerates[i]) break; if (avctx->codec->supported_samplerates[i] == 0) { av_log(avctx, AV_LOG_ERROR, "Specified sample rate %d is not supported\n", avctx->sample_rate); ret = AVERROR(EINVAL); goto free_and_end; } } if (avctx->sample_rate < 0) { av_log(avctx, AV_LOG_ERROR, "Specified sample rate %d is not supported\n", avctx->sample_rate); ret = AVERROR(EINVAL); goto free_and_end; } if (avctx->codec->channel_layouts) { if (!avctx->channel_layout) { av_log(avctx, AV_LOG_WARNING, "Channel layout not specified\n"); } else { for (i = 0; avctx->codec->channel_layouts[i] != 0; i++) if (avctx->channel_layout == avctx->codec->channel_layouts[i]) break; if (avctx->codec->channel_layouts[i] == 0) { char buf[512]; av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout); av_log(avctx, AV_LOG_ERROR, "Specified channel layout '%s' is not supported\n", buf); ret = AVERROR(EINVAL); goto free_and_end; } } } if (avctx->channel_layout && avctx->channels) { int channels = av_get_channel_layout_nb_channels(avctx->channel_layout); if (channels != avctx->channels) { char buf[512]; av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout); av_log(avctx, AV_LOG_ERROR, "Channel layout '%s' with %d channels does not match number of specified channels %d\n", buf, channels, avctx->channels); ret = AVERROR(EINVAL); goto free_and_end; } } else if (avctx->channel_layout) { avctx->channels = av_get_channel_layout_nb_channels(avctx->channel_layout); } if (avctx->channels < 0) { av_log(avctx, AV_LOG_ERROR, "Specified number of channels %d is not supported\n", avctx->channels); ret = AVERROR(EINVAL); goto free_and_end; } if(avctx->codec_type == AVMEDIA_TYPE_VIDEO) { pixdesc = av_pix_fmt_desc_get(avctx->pix_fmt); if ( avctx->bits_per_raw_sample < 0 || (avctx->bits_per_raw_sample > 8 && pixdesc->comp[0].depth <= 8)) { av_log(avctx, AV_LOG_WARNING, "Specified bit depth %d not possible with the specified pixel formats depth %d\n", avctx->bits_per_raw_sample, pixdesc->comp[0].depth); avctx->bits_per_raw_sample = pixdesc->comp[0].depth; } if (avctx->width <= 0 || avctx->height <= 0) { av_log(avctx, AV_LOG_ERROR, "dimensions not set\n"); ret = AVERROR(EINVAL); goto free_and_end; } } if ( (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO) && avctx->bit_rate>0 && avctx->bit_rate<1000) { av_log(avctx, AV_LOG_WARNING, "Bitrate %"PRId64" is extremely low, maybe you mean %"PRId64"k\n", avctx->bit_rate, avctx->bit_rate); } if (!avctx->rc_initial_buffer_occupancy) avctx->rc_initial_buffer_occupancy = avctx->rc_buffer_size * 3LL / 4; if (avctx->ticks_per_frame && avctx->time_base.num && avctx->ticks_per_frame > INT_MAX / avctx->time_base.num) { av_log(avctx, AV_LOG_ERROR, "ticks_per_frame %d too large for the timebase %d/%d.", avctx->ticks_per_frame, avctx->time_base.num, avctx->time_base.den); goto free_and_end; } if (avctx->hw_frames_ctx) { AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data; if (frames_ctx->format != avctx->pix_fmt) { av_log(avctx, AV_LOG_ERROR, "Mismatching AVCodecContext.pix_fmt and AVHWFramesContext.format\n"); ret = AVERROR(EINVAL); goto free_and_end; } if (avctx->sw_pix_fmt != AV_PIX_FMT_NONE && avctx->sw_pix_fmt != frames_ctx->sw_format) { av_log(avctx, AV_LOG_ERROR, "Mismatching AVCodecContext.sw_pix_fmt (%s) " "and AVHWFramesContext.sw_format (%s)\n", av_get_pix_fmt_name(avctx->sw_pix_fmt), av_get_pix_fmt_name(frames_ctx->sw_format)); ret = AVERROR(EINVAL); goto free_and_end; } avctx->sw_pix_fmt = frames_ctx->sw_format; } } avctx->pts_correction_num_faulty_pts = avctx->pts_correction_num_faulty_dts = 0; avctx->pts_correction_last_pts = avctx->pts_correction_last_dts = INT64_MIN; if ( !CONFIG_GRAY && avctx->flags & AV_CODEC_FLAG_GRAY && avctx->codec_descriptor->type == AVMEDIA_TYPE_VIDEO) av_log(avctx, AV_LOG_WARNING, "gray decoding requested but not enabled at configuration time\n"); if ( avctx->codec->init && (!(avctx->active_thread_type&FF_THREAD_FRAME) || avctx->internal->frame_thread_encoder)) { ret = avctx->codec->init(avctx); if (ret < 0) { goto free_and_end; } codec_init_ok = 1; } ret=0; if (av_codec_is_decoder(avctx->codec)) { if (!avctx->bit_rate) avctx->bit_rate = get_bit_rate(avctx); /* validate channel layout from the decoder */ if (avctx->channel_layout) { int channels = av_get_channel_layout_nb_channels(avctx->channel_layout); if (!avctx->channels) avctx->channels = channels; else if (channels != avctx->channels) { char buf[512]; av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout); av_log(avctx, AV_LOG_WARNING, "Channel layout '%s' with %d channels does not match specified number of channels %d: " "ignoring specified channel layout\n", buf, channels, avctx->channels); avctx->channel_layout = 0; } } if (avctx->channels && avctx->channels < 0 || avctx->channels > FF_SANE_NB_CHANNELS) { ret = AVERROR(EINVAL); goto free_and_end; } if (avctx->bits_per_coded_sample < 0) { ret = AVERROR(EINVAL); goto free_and_end; } if (avctx->sub_charenc) { if (avctx->codec_type != AVMEDIA_TYPE_SUBTITLE) { av_log(avctx, AV_LOG_ERROR, "Character encoding is only " "supported with subtitles codecs\n"); ret = AVERROR(EINVAL); goto free_and_end; } else if (avctx->codec_descriptor->props & AV_CODEC_PROP_BITMAP_SUB) { av_log(avctx, AV_LOG_WARNING, "Codec '%s' is bitmap-based, " "subtitles character encoding will be ignored\n", avctx->codec_descriptor->name); avctx->sub_charenc_mode = FF_SUB_CHARENC_MODE_DO_NOTHING; } else { /* input character encoding is set for a text based subtitle * codec at this point */ if (avctx->sub_charenc_mode == FF_SUB_CHARENC_MODE_AUTOMATIC) avctx->sub_charenc_mode = FF_SUB_CHARENC_MODE_PRE_DECODER; if (avctx->sub_charenc_mode == FF_SUB_CHARENC_MODE_PRE_DECODER) { #if CONFIG_ICONV iconv_t cd = iconv_open("UTF-8", avctx->sub_charenc); if (cd == (iconv_t)-1) { ret = AVERROR(errno); av_log(avctx, AV_LOG_ERROR, "Unable to open iconv context " "with input character encoding \"%s\"\n", avctx->sub_charenc); goto free_and_end; } iconv_close(cd); #else av_log(avctx, AV_LOG_ERROR, "Character encoding subtitles " "conversion needs a libavcodec built with iconv support " "for this codec\n"); ret = AVERROR(ENOSYS); goto free_and_end; #endif } } } #if FF_API_AVCTX_TIMEBASE if (avctx->framerate.num > 0 && avctx->framerate.den > 0) avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1})); #endif } if (codec->priv_data_size > 0 && avctx->priv_data && codec->priv_class) { av_assert0(*(const AVClass **)avctx->priv_data == codec->priv_class); } end: ff_unlock_avcodec(codec); if (options) { av_dict_free(options); *options = tmp; } return ret; free_and_end: if (avctx->codec && avctx->codec->close && (codec_init_ok || (avctx->codec->caps_internal & FF_CODEC_CAP_INIT_CLEANUP))) avctx->codec->close(avctx); if (codec->priv_class && codec->priv_data_size) av_opt_free(avctx->priv_data); av_opt_free(avctx); #if FF_API_CODED_FRAME FF_DISABLE_DEPRECATION_WARNINGS av_frame_free(&avctx->coded_frame); FF_ENABLE_DEPRECATION_WARNINGS #endif av_dict_free(&tmp); av_freep(&avctx->priv_data); if (avctx->internal) { av_frame_free(&avctx->internal->to_free); av_frame_free(&avctx->internal->compat_decode_frame); av_frame_free(&avctx->internal->buffer_frame); av_packet_free(&avctx->internal->buffer_pkt); av_packet_free(&avctx->internal->last_pkt_props); av_packet_free(&avctx->internal->ds.in_pkt); ff_decode_bsfs_uninit(avctx); av_freep(&avctx->internal->pool); } av_freep(&avctx->internal); avctx->codec = NULL; goto end; } void avsubtitle_free(AVSubtitle *sub) { int i; for (i = 0; i < sub->num_rects; i++) { av_freep(&sub->rects[i]->data[0]); av_freep(&sub->rects[i]->data[1]); av_freep(&sub->rects[i]->data[2]); av_freep(&sub->rects[i]->data[3]); av_freep(&sub->rects[i]->text); av_freep(&sub->rects[i]->ass); av_freep(&sub->rects[i]); } av_freep(&sub->rects); memset(sub, 0, sizeof(*sub)); } av_cold int avcodec_close(AVCodecContext *avctx) { int i; if (!avctx) return 0; if (avcodec_is_open(avctx)) { FramePool *pool = avctx->internal->pool; if (CONFIG_FRAME_THREAD_ENCODER && avctx->internal->frame_thread_encoder && avctx->thread_count > 1) { ff_frame_thread_encoder_free(avctx); } if (HAVE_THREADS && avctx->internal->thread_ctx) ff_thread_free(avctx); if (avctx->codec && avctx->codec->close) avctx->codec->close(avctx); avctx->internal->byte_buffer_size = 0; av_freep(&avctx->internal->byte_buffer); av_frame_free(&avctx->internal->to_free); av_frame_free(&avctx->internal->compat_decode_frame); av_frame_free(&avctx->internal->buffer_frame); av_packet_free(&avctx->internal->buffer_pkt); av_packet_free(&avctx->internal->last_pkt_props); av_packet_free(&avctx->internal->ds.in_pkt); for (i = 0; i < FF_ARRAY_ELEMS(pool->pools); i++) av_buffer_pool_uninit(&pool->pools[i]); av_freep(&avctx->internal->pool); if (avctx->hwaccel && avctx->hwaccel->uninit) avctx->hwaccel->uninit(avctx); av_freep(&avctx->internal->hwaccel_priv_data); ff_decode_bsfs_uninit(avctx); av_freep(&avctx->internal); } for (i = 0; i < avctx->nb_coded_side_data; i++) av_freep(&avctx->coded_side_data[i].data); av_freep(&avctx->coded_side_data); avctx->nb_coded_side_data = 0; av_buffer_unref(&avctx->hw_frames_ctx); av_buffer_unref(&avctx->hw_device_ctx); if (avctx->priv_data && avctx->codec && avctx->codec->priv_class) av_opt_free(avctx->priv_data); av_opt_free(avctx); av_freep(&avctx->priv_data); if (av_codec_is_encoder(avctx->codec)) { av_freep(&avctx->extradata); #if FF_API_CODED_FRAME FF_DISABLE_DEPRECATION_WARNINGS av_frame_free(&avctx->coded_frame); FF_ENABLE_DEPRECATION_WARNINGS #endif } avctx->codec = NULL; avctx->active_thread_type = 0; return 0; } const char *avcodec_get_name(enum AVCodecID id) { const AVCodecDescriptor *cd; AVCodec *codec; if (id == AV_CODEC_ID_NONE) return "none"; cd = avcodec_descriptor_get(id); if (cd) return cd->name; av_log(NULL, AV_LOG_WARNING, "Codec 0x%x is not in the full list.\n", id); codec = avcodec_find_decoder(id); if (codec) return codec->name; codec = avcodec_find_encoder(id); if (codec) return codec->name; return "unknown_codec"; } size_t av_get_codec_tag_string(char *buf, size_t buf_size, unsigned int codec_tag) { int i, len, ret = 0; #define TAG_PRINT(x) \ (((x) >= '0' && (x) <= '9') || \ ((x) >= 'a' && (x) <= 'z') || ((x) >= 'A' && (x) <= 'Z') || \ ((x) == '.' || (x) == ' ' || (x) == '-' || (x) == '_')) for (i = 0; i < 4; i++) { len = snprintf(buf, buf_size, TAG_PRINT(codec_tag & 0xFF) ? "%c" : "[%d]", codec_tag & 0xFF); buf += len; buf_size = buf_size > len ? buf_size - len : 0; ret += len; codec_tag >>= 8; } return ret; } void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode) { const char *codec_type; const char *codec_name; const char *profile = NULL; int64_t bitrate; int new_line = 0; AVRational display_aspect_ratio; const char *separator = enc->dump_separator ? (const char *)enc->dump_separator : ", "; if (!buf || buf_size <= 0) return; codec_type = av_get_media_type_string(enc->codec_type); codec_name = avcodec_get_name(enc->codec_id); profile = avcodec_profile_name(enc->codec_id, enc->profile); snprintf(buf, buf_size, "%s: %s", codec_type ? codec_type : "unknown", codec_name); buf[0] ^= 'a' ^ 'A'; /* first letter in uppercase */ if (enc->codec && strcmp(enc->codec->name, codec_name)) snprintf(buf + strlen(buf), buf_size - strlen(buf), " (%s)", enc->codec->name); if (profile) snprintf(buf + strlen(buf), buf_size - strlen(buf), " (%s)", profile); if ( enc->codec_type == AVMEDIA_TYPE_VIDEO && av_log_get_level() >= AV_LOG_VERBOSE && enc->refs) snprintf(buf + strlen(buf), buf_size - strlen(buf), ", %d reference frame%s", enc->refs, enc->refs > 1 ? "s" : ""); if (enc->codec_tag) snprintf(buf + strlen(buf), buf_size - strlen(buf), " (%s / 0x%04X)", av_fourcc2str(enc->codec_tag), enc->codec_tag); switch (enc->codec_type) { case AVMEDIA_TYPE_VIDEO: { char detail[256] = "("; av_strlcat(buf, separator, buf_size); snprintf(buf + strlen(buf), buf_size - strlen(buf), "%s", enc->pix_fmt == AV_PIX_FMT_NONE ? "none" : av_get_pix_fmt_name(enc->pix_fmt)); if (enc->bits_per_raw_sample && enc->pix_fmt != AV_PIX_FMT_NONE && enc->bits_per_raw_sample < av_pix_fmt_desc_get(enc->pix_fmt)->comp[0].depth) av_strlcatf(detail, sizeof(detail), "%d bpc, ", enc->bits_per_raw_sample); if (enc->color_range != AVCOL_RANGE_UNSPECIFIED) av_strlcatf(detail, sizeof(detail), "%s, ", av_color_range_name(enc->color_range)); if (enc->colorspace != AVCOL_SPC_UNSPECIFIED || enc->color_primaries != AVCOL_PRI_UNSPECIFIED || enc->color_trc != AVCOL_TRC_UNSPECIFIED) { if (enc->colorspace != (int)enc->color_primaries || enc->colorspace != (int)enc->color_trc) { new_line = 1; av_strlcatf(detail, sizeof(detail), "%s/%s/%s, ", av_color_space_name(enc->colorspace), av_color_primaries_name(enc->color_primaries), av_color_transfer_name(enc->color_trc)); } else av_strlcatf(detail, sizeof(detail), "%s, ", av_get_colorspace_name(enc->colorspace)); } if (enc->field_order != AV_FIELD_UNKNOWN) { const char *field_order = "progressive"; if (enc->field_order == AV_FIELD_TT) field_order = "top first"; else if (enc->field_order == AV_FIELD_BB) field_order = "bottom first"; else if (enc->field_order == AV_FIELD_TB) field_order = "top coded first (swapped)"; else if (enc->field_order == AV_FIELD_BT) field_order = "bottom coded first (swapped)"; av_strlcatf(detail, sizeof(detail), "%s, ", field_order); } if (av_log_get_level() >= AV_LOG_VERBOSE && enc->chroma_sample_location != AVCHROMA_LOC_UNSPECIFIED) av_strlcatf(detail, sizeof(detail), "%s, ", av_chroma_location_name(enc->chroma_sample_location)); if (strlen(detail) > 1) { detail[strlen(detail) - 2] = 0; av_strlcatf(buf, buf_size, "%s)", detail); } } if (enc->width) { av_strlcat(buf, new_line ? separator : ", ", buf_size); snprintf(buf + strlen(buf), buf_size - strlen(buf), "%dx%d", enc->width, enc->height); if (av_log_get_level() >= AV_LOG_VERBOSE && (enc->width != enc->coded_width || enc->height != enc->coded_height)) snprintf(buf + strlen(buf), buf_size - strlen(buf), " (%dx%d)", enc->coded_width, enc->coded_height); if (enc->sample_aspect_ratio.num) { av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den, enc->width * (int64_t)enc->sample_aspect_ratio.num, enc->height * (int64_t)enc->sample_aspect_ratio.den, 1024 * 1024); snprintf(buf + strlen(buf), buf_size - strlen(buf), " [SAR %d:%d DAR %d:%d]", enc->sample_aspect_ratio.num, enc->sample_aspect_ratio.den, display_aspect_ratio.num, display_aspect_ratio.den); } if (av_log_get_level() >= AV_LOG_DEBUG) { int g = av_gcd(enc->time_base.num, enc->time_base.den); snprintf(buf + strlen(buf), buf_size - strlen(buf), ", %d/%d", enc->time_base.num / g, enc->time_base.den / g); } } if (encode) { snprintf(buf + strlen(buf), buf_size - strlen(buf), ", q=%d-%d", enc->qmin, enc->qmax); } else { if (enc->properties & FF_CODEC_PROPERTY_CLOSED_CAPTIONS) snprintf(buf + strlen(buf), buf_size - strlen(buf), ", Closed Captions"); if (enc->properties & FF_CODEC_PROPERTY_LOSSLESS) snprintf(buf + strlen(buf), buf_size - strlen(buf), ", lossless"); } break; case AVMEDIA_TYPE_AUDIO: av_strlcat(buf, separator, buf_size); if (enc->sample_rate) { snprintf(buf + strlen(buf), buf_size - strlen(buf), "%d Hz, ", enc->sample_rate); } av_get_channel_layout_string(buf + strlen(buf), buf_size - strlen(buf), enc->channels, enc->channel_layout); if (enc->sample_fmt != AV_SAMPLE_FMT_NONE) { snprintf(buf + strlen(buf), buf_size - strlen(buf), ", %s", av_get_sample_fmt_name(enc->sample_fmt)); } if ( enc->bits_per_raw_sample > 0 && enc->bits_per_raw_sample != av_get_bytes_per_sample(enc->sample_fmt) * 8) snprintf(buf + strlen(buf), buf_size - strlen(buf), " (%d bit)", enc->bits_per_raw_sample); if (av_log_get_level() >= AV_LOG_VERBOSE) { if (enc->initial_padding) snprintf(buf + strlen(buf), buf_size - strlen(buf), ", delay %d", enc->initial_padding); if (enc->trailing_padding) snprintf(buf + strlen(buf), buf_size - strlen(buf), ", padding %d", enc->trailing_padding); } break; case AVMEDIA_TYPE_DATA: if (av_log_get_level() >= AV_LOG_DEBUG) { int g = av_gcd(enc->time_base.num, enc->time_base.den); if (g) snprintf(buf + strlen(buf), buf_size - strlen(buf), ", %d/%d", enc->time_base.num / g, enc->time_base.den / g); } break; case AVMEDIA_TYPE_SUBTITLE: if (enc->width) snprintf(buf + strlen(buf), buf_size - strlen(buf), ", %dx%d", enc->width, enc->height); break; default: return; } if (encode) { if (enc->flags & AV_CODEC_FLAG_PASS1) snprintf(buf + strlen(buf), buf_size - strlen(buf), ", pass 1"); if (enc->flags & AV_CODEC_FLAG_PASS2) snprintf(buf + strlen(buf), buf_size - strlen(buf), ", pass 2"); } bitrate = get_bit_rate(enc); if (bitrate != 0) { snprintf(buf + strlen(buf), buf_size - strlen(buf), ", %"PRId64" kb/s", bitrate / 1000); } else if (enc->rc_max_rate > 0) { snprintf(buf + strlen(buf), buf_size - strlen(buf), ", max. %"PRId64" kb/s", enc->rc_max_rate / 1000); } } const char *av_get_profile_name(const AVCodec *codec, int profile) { const AVProfile *p; if (profile == FF_PROFILE_UNKNOWN || !codec->profiles) return NULL; for (p = codec->profiles; p->profile != FF_PROFILE_UNKNOWN; p++) if (p->profile == profile) return p->name; return NULL; } const char *avcodec_profile_name(enum AVCodecID codec_id, int profile) { const AVCodecDescriptor *desc = avcodec_descriptor_get(codec_id); const AVProfile *p; if (profile == FF_PROFILE_UNKNOWN || !desc || !desc->profiles) return NULL; for (p = desc->profiles; p->profile != FF_PROFILE_UNKNOWN; p++) if (p->profile == profile) return p->name; return NULL; } unsigned avcodec_version(void) { av_assert0(AV_CODEC_ID_PCM_S8_PLANAR==65563); av_assert0(AV_CODEC_ID_ADPCM_G722==69660); av_assert0(AV_CODEC_ID_SRT==94216); av_assert0(LIBAVCODEC_VERSION_MICRO >= 100); return LIBAVCODEC_VERSION_INT; } const char *avcodec_configuration(void) { return FFMPEG_CONFIGURATION; } const char *avcodec_license(void) { #define LICENSE_PREFIX "libavcodec license: " return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1; } int av_get_exact_bits_per_sample(enum AVCodecID codec_id) { switch (codec_id) { case AV_CODEC_ID_8SVX_EXP: case AV_CODEC_ID_8SVX_FIB: case AV_CODEC_ID_ADPCM_CT: case AV_CODEC_ID_ADPCM_IMA_APC: case AV_CODEC_ID_ADPCM_IMA_EA_SEAD: case AV_CODEC_ID_ADPCM_IMA_OKI: case AV_CODEC_ID_ADPCM_IMA_WS: case AV_CODEC_ID_ADPCM_G722: case AV_CODEC_ID_ADPCM_YAMAHA: case AV_CODEC_ID_ADPCM_AICA: return 4; case AV_CODEC_ID_DSD_LSBF: case AV_CODEC_ID_DSD_MSBF: case AV_CODEC_ID_DSD_LSBF_PLANAR: case AV_CODEC_ID_DSD_MSBF_PLANAR: case AV_CODEC_ID_PCM_ALAW: case AV_CODEC_ID_PCM_MULAW: case AV_CODEC_ID_PCM_VIDC: case AV_CODEC_ID_PCM_S8: case AV_CODEC_ID_PCM_S8_PLANAR: case AV_CODEC_ID_PCM_U8: case AV_CODEC_ID_PCM_ZORK: case AV_CODEC_ID_SDX2_DPCM: return 8; case AV_CODEC_ID_PCM_S16BE: case AV_CODEC_ID_PCM_S16BE_PLANAR: case AV_CODEC_ID_PCM_S16LE: case AV_CODEC_ID_PCM_S16LE_PLANAR: case AV_CODEC_ID_PCM_U16BE: case AV_CODEC_ID_PCM_U16LE: return 16; case AV_CODEC_ID_PCM_S24DAUD: case AV_CODEC_ID_PCM_S24BE: case AV_CODEC_ID_PCM_S24LE: case AV_CODEC_ID_PCM_S24LE_PLANAR: case AV_CODEC_ID_PCM_U24BE: case AV_CODEC_ID_PCM_U24LE: return 24; case AV_CODEC_ID_PCM_S32BE: case AV_CODEC_ID_PCM_S32LE: case AV_CODEC_ID_PCM_S32LE_PLANAR: case AV_CODEC_ID_PCM_U32BE: case AV_CODEC_ID_PCM_U32LE: case AV_CODEC_ID_PCM_F32BE: case AV_CODEC_ID_PCM_F32LE: case AV_CODEC_ID_PCM_F24LE: case AV_CODEC_ID_PCM_F16LE: return 32; case AV_CODEC_ID_PCM_F64BE: case AV_CODEC_ID_PCM_F64LE: case AV_CODEC_ID_PCM_S64BE: case AV_CODEC_ID_PCM_S64LE: return 64; default: return 0; } } enum AVCodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be) { static const enum AVCodecID map[AV_SAMPLE_FMT_NB][2] = { [AV_SAMPLE_FMT_U8 ] = { AV_CODEC_ID_PCM_U8, AV_CODEC_ID_PCM_U8 }, [AV_SAMPLE_FMT_S16 ] = { AV_CODEC_ID_PCM_S16LE, AV_CODEC_ID_PCM_S16BE }, [AV_SAMPLE_FMT_S32 ] = { AV_CODEC_ID_PCM_S32LE, AV_CODEC_ID_PCM_S32BE }, [AV_SAMPLE_FMT_FLT ] = { AV_CODEC_ID_PCM_F32LE, AV_CODEC_ID_PCM_F32BE }, [AV_SAMPLE_FMT_DBL ] = { AV_CODEC_ID_PCM_F64LE, AV_CODEC_ID_PCM_F64BE }, [AV_SAMPLE_FMT_U8P ] = { AV_CODEC_ID_PCM_U8, AV_CODEC_ID_PCM_U8 }, [AV_SAMPLE_FMT_S16P] = { AV_CODEC_ID_PCM_S16LE, AV_CODEC_ID_PCM_S16BE }, [AV_SAMPLE_FMT_S32P] = { AV_CODEC_ID_PCM_S32LE, AV_CODEC_ID_PCM_S32BE }, [AV_SAMPLE_FMT_S64P] = { AV_CODEC_ID_PCM_S64LE, AV_CODEC_ID_PCM_S64BE }, [AV_SAMPLE_FMT_FLTP] = { AV_CODEC_ID_PCM_F32LE, AV_CODEC_ID_PCM_F32BE }, [AV_SAMPLE_FMT_DBLP] = { AV_CODEC_ID_PCM_F64LE, AV_CODEC_ID_PCM_F64BE }, }; if (fmt < 0 || fmt >= AV_SAMPLE_FMT_NB) return AV_CODEC_ID_NONE; if (be < 0 || be > 1) be = AV_NE(1, 0); return map[fmt][be]; } int av_get_bits_per_sample(enum AVCodecID codec_id) { switch (codec_id) { case AV_CODEC_ID_ADPCM_SBPRO_2: return 2; case AV_CODEC_ID_ADPCM_SBPRO_3: return 3; case AV_CODEC_ID_ADPCM_SBPRO_4: case AV_CODEC_ID_ADPCM_IMA_WAV: case AV_CODEC_ID_ADPCM_IMA_QT: case AV_CODEC_ID_ADPCM_SWF: case AV_CODEC_ID_ADPCM_MS: return 4; default: return av_get_exact_bits_per_sample(codec_id); } } static int get_audio_frame_duration(enum AVCodecID id, int sr, int ch, int ba, uint32_t tag, int bits_per_coded_sample, int64_t bitrate, uint8_t * extradata, int frame_size, int frame_bytes) { int bps = av_get_exact_bits_per_sample(id); int framecount = (ba > 0 && frame_bytes / ba > 0) ? frame_bytes / ba : 1; /* codecs with an exact constant bits per sample */ if (bps > 0 && ch > 0 && frame_bytes > 0 && ch < 32768 && bps < 32768) return (frame_bytes * 8LL) / (bps * ch); bps = bits_per_coded_sample; /* codecs with a fixed packet duration */ switch (id) { case AV_CODEC_ID_ADPCM_ADX: return 32; case AV_CODEC_ID_ADPCM_IMA_QT: return 64; case AV_CODEC_ID_ADPCM_EA_XAS: return 128; case AV_CODEC_ID_AMR_NB: case AV_CODEC_ID_EVRC: case AV_CODEC_ID_GSM: case AV_CODEC_ID_QCELP: case AV_CODEC_ID_RA_288: return 160; case AV_CODEC_ID_AMR_WB: case AV_CODEC_ID_GSM_MS: return 320; case AV_CODEC_ID_MP1: return 384; case AV_CODEC_ID_ATRAC1: return 512; case AV_CODEC_ID_ATRAC9: case AV_CODEC_ID_ATRAC3: return 1024 * framecount; case AV_CODEC_ID_ATRAC3P: return 2048; case AV_CODEC_ID_MP2: case AV_CODEC_ID_MUSEPACK7: return 1152; case AV_CODEC_ID_AC3: return 1536; } if (sr > 0) { /* calc from sample rate */ if (id == AV_CODEC_ID_TTA) return 256 * sr / 245; else if (id == AV_CODEC_ID_DST) return 588 * sr / 44100; if (ch > 0) { /* calc from sample rate and channels */ if (id == AV_CODEC_ID_BINKAUDIO_DCT) return (480 << (sr / 22050)) / ch; } if (id == AV_CODEC_ID_MP3) return sr <= 24000 ? 576 : 1152; } if (ba > 0) { /* calc from block_align */ if (id == AV_CODEC_ID_SIPR) { switch (ba) { case 20: return 160; case 19: return 144; case 29: return 288; case 37: return 480; } } else if (id == AV_CODEC_ID_ILBC) { switch (ba) { case 38: return 160; case 50: return 240; } } } if (frame_bytes > 0) { /* calc from frame_bytes only */ if (id == AV_CODEC_ID_TRUESPEECH) return 240 * (frame_bytes / 32); if (id == AV_CODEC_ID_NELLYMOSER) return 256 * (frame_bytes / 64); if (id == AV_CODEC_ID_RA_144) return 160 * (frame_bytes / 20); if (bps > 0) { /* calc from frame_bytes and bits_per_coded_sample */ if (id == AV_CODEC_ID_ADPCM_G726 || id == AV_CODEC_ID_ADPCM_G726LE) return frame_bytes * 8 / bps; } if (ch > 0 && ch < INT_MAX/16) { /* calc from frame_bytes and channels */ switch (id) { case AV_CODEC_ID_ADPCM_AFC: return frame_bytes / (9 * ch) * 16; case AV_CODEC_ID_ADPCM_PSX: case AV_CODEC_ID_ADPCM_DTK: return frame_bytes / (16 * ch) * 28; case AV_CODEC_ID_ADPCM_4XM: case AV_CODEC_ID_ADPCM_IMA_DAT4: case AV_CODEC_ID_ADPCM_IMA_ISS: return (frame_bytes - 4 * ch) * 2 / ch; case AV_CODEC_ID_ADPCM_IMA_SMJPEG: return (frame_bytes - 4) * 2 / ch; case AV_CODEC_ID_ADPCM_IMA_AMV: return (frame_bytes - 8) * 2 / ch; case AV_CODEC_ID_ADPCM_THP: case AV_CODEC_ID_ADPCM_THP_LE: if (extradata) return frame_bytes * 14 / (8 * ch); break; case AV_CODEC_ID_ADPCM_XA: return (frame_bytes / 128) * 224 / ch; case AV_CODEC_ID_INTERPLAY_DPCM: return (frame_bytes - 6 - ch) / ch; case AV_CODEC_ID_ROQ_DPCM: return (frame_bytes - 8) / ch; case AV_CODEC_ID_XAN_DPCM: return (frame_bytes - 2 * ch) / ch; case AV_CODEC_ID_MACE3: return 3 * frame_bytes / ch; case AV_CODEC_ID_MACE6: return 6 * frame_bytes / ch; case AV_CODEC_ID_PCM_LXF: return 2 * (frame_bytes / (5 * ch)); case AV_CODEC_ID_IAC: case AV_CODEC_ID_IMC: return 4 * frame_bytes / ch; } if (tag) { /* calc from frame_bytes, channels, and codec_tag */ if (id == AV_CODEC_ID_SOL_DPCM) { if (tag == 3) return frame_bytes / ch; else return frame_bytes * 2 / ch; } } if (ba > 0) { /* calc from frame_bytes, channels, and block_align */ int blocks = frame_bytes / ba; switch (id) { case AV_CODEC_ID_ADPCM_IMA_WAV: if (bps < 2 || bps > 5) return 0; return blocks * (1 + (ba - 4 * ch) / (bps * ch) * 8); case AV_CODEC_ID_ADPCM_IMA_DK3: return blocks * (((ba - 16) * 2 / 3 * 4) / ch); case AV_CODEC_ID_ADPCM_IMA_DK4: return blocks * (1 + (ba - 4 * ch) * 2 / ch); case AV_CODEC_ID_ADPCM_IMA_RAD: return blocks * ((ba - 4 * ch) * 2 / ch); case AV_CODEC_ID_ADPCM_MS: return blocks * (2 + (ba - 7 * ch) * 2 / ch); case AV_CODEC_ID_ADPCM_MTAF: return blocks * (ba - 16) * 2 / ch; } } if (bps > 0) { /* calc from frame_bytes, channels, and bits_per_coded_sample */ switch (id) { case AV_CODEC_ID_PCM_DVD: if(bps<4 || frame_bytes<3) return 0; return 2 * ((frame_bytes - 3) / ((bps * 2 / 8) * ch)); case AV_CODEC_ID_PCM_BLURAY: if(bps<4 || frame_bytes<4) return 0; return (frame_bytes - 4) / ((FFALIGN(ch, 2) * bps) / 8); case AV_CODEC_ID_S302M: return 2 * (frame_bytes / ((bps + 4) / 4)) / ch; } } } } /* Fall back on using frame_size */ if (frame_size > 1 && frame_bytes) return frame_size; //For WMA we currently have no other means to calculate duration thus we //do it here by assuming CBR, which is true for all known cases. if (bitrate > 0 && frame_bytes > 0 && sr > 0 && ba > 1) { if (id == AV_CODEC_ID_WMAV1 || id == AV_CODEC_ID_WMAV2) return (frame_bytes * 8LL * sr) / bitrate; } return 0; } int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes) { return get_audio_frame_duration(avctx->codec_id, avctx->sample_rate, avctx->channels, avctx->block_align, avctx->codec_tag, avctx->bits_per_coded_sample, avctx->bit_rate, avctx->extradata, avctx->frame_size, frame_bytes); } int av_get_audio_frame_duration2(AVCodecParameters *par, int frame_bytes) { return get_audio_frame_duration(par->codec_id, par->sample_rate, par->channels, par->block_align, par->codec_tag, par->bits_per_coded_sample, par->bit_rate, par->extradata, par->frame_size, frame_bytes); } #if !HAVE_THREADS int ff_thread_init(AVCodecContext *s) { return -1; } #endif unsigned int av_xiphlacing(unsigned char *s, unsigned int v) { unsigned int n = 0; while (v >= 0xff) { *s++ = 0xff; v -= 0xff; n++; } *s = v; n++; return n; } int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b) { int i; for (i = 0; i < size && !(tab[i][0] == a && tab[i][1] == b); i++) ; return i; } const AVCodecHWConfig *avcodec_get_hw_config(const AVCodec *codec, int index) { int i; if (!codec->hw_configs || index < 0) return NULL; for (i = 0; i <= index; i++) if (!codec->hw_configs[i]) return NULL; return &codec->hw_configs[index]->public; } #if FF_API_USER_VISIBLE_AVHWACCEL AVHWAccel *av_hwaccel_next(const AVHWAccel *hwaccel) { return NULL; } void av_register_hwaccel(AVHWAccel *hwaccel) { } #endif #if FF_API_LOCKMGR int av_lockmgr_register(int (*cb)(void **mutex, enum AVLockOp op)) { return 0; } #endif unsigned int avpriv_toupper4(unsigned int x) { return av_toupper(x & 0xFF) + (av_toupper((x >> 8) & 0xFF) << 8) + (av_toupper((x >> 16) & 0xFF) << 16) + ((unsigned)av_toupper((x >> 24) & 0xFF) << 24); } int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src) { int ret; dst->owner[0] = src->owner[0]; dst->owner[1] = src->owner[1]; ret = av_frame_ref(dst->f, src->f); if (ret < 0) return ret; av_assert0(!dst->progress); if (src->progress && !(dst->progress = av_buffer_ref(src->progress))) { ff_thread_release_buffer(dst->owner[0], dst); return AVERROR(ENOMEM); } return 0; } #if !HAVE_THREADS enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt) { return ff_get_format(avctx, fmt); } int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags) { f->owner[0] = f->owner[1] = avctx; return ff_get_buffer(avctx, f->f, flags); } void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f) { if (f->f) av_frame_unref(f->f); } void ff_thread_finish_setup(AVCodecContext *avctx) { } void ff_thread_report_progress(ThreadFrame *f, int progress, int field) { } void ff_thread_await_progress(ThreadFrame *f, int progress, int field) { } int ff_thread_can_start_frame(AVCodecContext *avctx) { return 1; } int ff_alloc_entries(AVCodecContext *avctx, int count) { return 0; } void ff_reset_entries(AVCodecContext *avctx) { } void ff_thread_await_progress2(AVCodecContext *avctx, int field, int thread, int shift) { } void ff_thread_report_progress2(AVCodecContext *avctx, int field, int thread, int n) { } #endif int avcodec_is_open(AVCodecContext *s) { return !!s->internal; } int avpriv_bprint_to_extradata(AVCodecContext *avctx, struct AVBPrint *buf) { int ret; char *str; ret = av_bprint_finalize(buf, &str); if (ret < 0) return ret; if (!av_bprint_is_complete(buf)) { av_free(str); return AVERROR(ENOMEM); } avctx->extradata = str; /* Note: the string is NUL terminated (so extradata can be read as a * string), but the ending character is not accounted in the size (in * binary formats you are likely not supposed to mux that character). When * extradata is copied, it is also padded with AV_INPUT_BUFFER_PADDING_SIZE * zeros. */ avctx->extradata_size = buf->len; return 0; } const uint8_t *avpriv_find_start_code(const uint8_t *av_restrict p, const uint8_t *end, uint32_t *av_restrict state) { int i; av_assert0(p <= end); if (p >= end) return end; for (i = 0; i < 3; i++) { uint32_t tmp = *state << 8; *state = tmp + *(p++); if (tmp == 0x100 || p == end) return p; } while (p < end) { if (p[-1] > 1 ) p += 3; else if (p[-2] ) p += 2; else if (p[-3]|(p[-1]-1)) p++; else { p++; break; } } p = FFMIN(p, end) - 4; *state = AV_RB32(p); return p + 4; } AVCPBProperties *av_cpb_properties_alloc(size_t *size) { AVCPBProperties *props = av_mallocz(sizeof(AVCPBProperties)); if (!props) return NULL; if (size) *size = sizeof(*props); props->vbv_delay = UINT64_MAX; return props; } AVCPBProperties *ff_add_cpb_side_data(AVCodecContext *avctx) { AVPacketSideData *tmp; AVCPBProperties *props; size_t size; props = av_cpb_properties_alloc(&size); if (!props) return NULL; tmp = av_realloc_array(avctx->coded_side_data, avctx->nb_coded_side_data + 1, sizeof(*tmp)); if (!tmp) { av_freep(&props); return NULL; } avctx->coded_side_data = tmp; avctx->nb_coded_side_data++; avctx->coded_side_data[avctx->nb_coded_side_data - 1].type = AV_PKT_DATA_CPB_PROPERTIES; avctx->coded_side_data[avctx->nb_coded_side_data - 1].data = (uint8_t*)props; avctx->coded_side_data[avctx->nb_coded_side_data - 1].size = size; return props; } static void codec_parameters_reset(AVCodecParameters *par) { av_freep(&par->extradata); memset(par, 0, sizeof(*par)); par->codec_type = AVMEDIA_TYPE_UNKNOWN; par->codec_id = AV_CODEC_ID_NONE; par->format = -1; par->field_order = AV_FIELD_UNKNOWN; par->color_range = AVCOL_RANGE_UNSPECIFIED; par->color_primaries = AVCOL_PRI_UNSPECIFIED; par->color_trc = AVCOL_TRC_UNSPECIFIED; par->color_space = AVCOL_SPC_UNSPECIFIED; par->chroma_location = AVCHROMA_LOC_UNSPECIFIED; par->sample_aspect_ratio = (AVRational){ 0, 1 }; par->profile = FF_PROFILE_UNKNOWN; par->level = FF_LEVEL_UNKNOWN; } AVCodecParameters *avcodec_parameters_alloc(void) { AVCodecParameters *par = av_mallocz(sizeof(*par)); if (!par) return NULL; codec_parameters_reset(par); return par; } void avcodec_parameters_free(AVCodecParameters **ppar) { AVCodecParameters *par = *ppar; if (!par) return; codec_parameters_reset(par); av_freep(ppar); } int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src) { codec_parameters_reset(dst); memcpy(dst, src, sizeof(*dst)); dst->extradata = NULL; dst->extradata_size = 0; if (src->extradata) { dst->extradata = av_mallocz(src->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE); if (!dst->extradata) return AVERROR(ENOMEM); memcpy(dst->extradata, src->extradata, src->extradata_size); dst->extradata_size = src->extradata_size; } return 0; } int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec) { codec_parameters_reset(par); par->codec_type = codec->codec_type; par->codec_id = codec->codec_id; par->codec_tag = codec->codec_tag; par->bit_rate = codec->bit_rate; par->bits_per_coded_sample = codec->bits_per_coded_sample; par->bits_per_raw_sample = codec->bits_per_raw_sample; par->profile = codec->profile; par->level = codec->level; switch (par->codec_type) { case AVMEDIA_TYPE_VIDEO: par->format = codec->pix_fmt; par->width = codec->width; par->height = codec->height; par->field_order = codec->field_order; par->color_range = codec->color_range; par->color_primaries = codec->color_primaries; par->color_trc = codec->color_trc; par->color_space = codec->colorspace; par->chroma_location = codec->chroma_sample_location; par->sample_aspect_ratio = codec->sample_aspect_ratio; par->video_delay = codec->has_b_frames; break; case AVMEDIA_TYPE_AUDIO: par->format = codec->sample_fmt; par->channel_layout = codec->channel_layout; par->channels = codec->channels; par->sample_rate = codec->sample_rate; par->block_align = codec->block_align; par->frame_size = codec->frame_size; par->initial_padding = codec->initial_padding; par->trailing_padding = codec->trailing_padding; par->seek_preroll = codec->seek_preroll; break; case AVMEDIA_TYPE_SUBTITLE: par->width = codec->width; par->height = codec->height; break; } if (codec->extradata) { par->extradata = av_mallocz(codec->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE); if (!par->extradata) return AVERROR(ENOMEM); memcpy(par->extradata, codec->extradata, codec->extradata_size); par->extradata_size = codec->extradata_size; } return 0; } int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par) { codec->codec_type = par->codec_type; codec->codec_id = par->codec_id; codec->codec_tag = par->codec_tag; codec->bit_rate = par->bit_rate; codec->bits_per_coded_sample = par->bits_per_coded_sample; codec->bits_per_raw_sample = par->bits_per_raw_sample; codec->profile = par->profile; codec->level = par->level; switch (par->codec_type) { case AVMEDIA_TYPE_VIDEO: codec->pix_fmt = par->format; codec->width = par->width; codec->height = par->height; codec->field_order = par->field_order; codec->color_range = par->color_range; codec->color_primaries = par->color_primaries; codec->color_trc = par->color_trc; codec->colorspace = par->color_space; codec->chroma_sample_location = par->chroma_location; codec->sample_aspect_ratio = par->sample_aspect_ratio; codec->has_b_frames = par->video_delay; break; case AVMEDIA_TYPE_AUDIO: codec->sample_fmt = par->format; codec->channel_layout = par->channel_layout; codec->channels = par->channels; codec->sample_rate = par->sample_rate; codec->block_align = par->block_align; codec->frame_size = par->frame_size; codec->delay = codec->initial_padding = par->initial_padding; codec->trailing_padding = par->trailing_padding; codec->seek_preroll = par->seek_preroll; break; case AVMEDIA_TYPE_SUBTITLE: codec->width = par->width; codec->height = par->height; break; } if (par->extradata) { av_freep(&codec->extradata); codec->extradata = av_mallocz(par->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE); if (!codec->extradata) return AVERROR(ENOMEM); memcpy(codec->extradata, par->extradata, par->extradata_size); codec->extradata_size = par->extradata_size; } return 0; } int ff_alloc_a53_sei(const AVFrame *frame, size_t prefix_len, void **data, size_t *sei_size) { AVFrameSideData *side_data = NULL; uint8_t *sei_data; if (frame) side_data = av_frame_get_side_data(frame, AV_FRAME_DATA_A53_CC); if (!side_data) { *data = NULL; return 0; } *sei_size = side_data->size + 11; *data = av_mallocz(*sei_size + prefix_len); if (!*data) return AVERROR(ENOMEM); sei_data = (uint8_t*)*data + prefix_len; // country code sei_data[0] = 181; sei_data[1] = 0; sei_data[2] = 49; /** * 'GA94' is standard in North America for ATSC, but hard coding * this style may not be the right thing to do -- other formats * do exist. This information is not available in the side_data * so we are going with this right now. */ AV_WL32(sei_data + 3, MKTAG('G', 'A', '9', '4')); sei_data[7] = 3; sei_data[8] = ((side_data->size/3) & 0x1f) | 0x40; sei_data[9] = 0; memcpy(sei_data + 10, side_data->data, side_data->size); sei_data[side_data->size+10] = 255; return 0; } int64_t ff_guess_coded_bitrate(AVCodecContext *avctx) { AVRational framerate = avctx->framerate; int bits_per_coded_sample = avctx->bits_per_coded_sample; int64_t bitrate; if (!(framerate.num && framerate.den)) framerate = av_inv_q(avctx->time_base); if (!(framerate.num && framerate.den)) return 0; if (!bits_per_coded_sample) { const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt); bits_per_coded_sample = av_get_bits_per_pixel(desc); } bitrate = (int64_t)bits_per_coded_sample * avctx->width * avctx->height * framerate.num / framerate.den; return bitrate; } int ff_int_from_list_or_default(void *ctx, const char * val_name, int val, const int * array_valid_values, int default_value) { int i = 0, ref_val; while (1) { ref_val = array_valid_values[i]; if (ref_val == INT_MAX) break; if (val == ref_val) return val; i++; } /* val is not a valid value */ av_log(ctx, AV_LOG_DEBUG, "%s %d are not supported. Set to default value : %d\n", val_name, val, default_value); return default_value; }
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options) { int ret = 0; int codec_init_ok = 0; AVDictionary *tmp = NULL; const AVPixFmtDescriptor *pixdesc; if (avcodec_is_open(avctx)) return 0; if ((!codec && !avctx->codec)) { av_log(avctx, AV_LOG_ERROR, "No codec provided to avcodec_open2()\n"); return AVERROR(EINVAL); } if ((codec && avctx->codec && codec != avctx->codec)) { av_log(avctx, AV_LOG_ERROR, "This AVCodecContext was allocated for %s, " "but %s passed to avcodec_open2()\n", avctx->codec->name, codec->name); return AVERROR(EINVAL); } if (!codec) codec = avctx->codec; if (avctx->extradata_size < 0 || avctx->extradata_size >= FF_MAX_EXTRADATA_SIZE) return AVERROR(EINVAL); if (options) av_dict_copy(&tmp, *options, 0); ff_lock_avcodec(avctx, codec); avctx->internal = av_mallocz(sizeof(*avctx->internal)); if (!avctx->internal) { ret = AVERROR(ENOMEM); goto end; } avctx->internal->pool = av_mallocz(sizeof(*avctx->internal->pool)); if (!avctx->internal->pool) { ret = AVERROR(ENOMEM); goto free_and_end; } avctx->internal->to_free = av_frame_alloc(); if (!avctx->internal->to_free) { ret = AVERROR(ENOMEM); goto free_and_end; } avctx->internal->compat_decode_frame = av_frame_alloc(); if (!avctx->internal->compat_decode_frame) { ret = AVERROR(ENOMEM); goto free_and_end; } avctx->internal->buffer_frame = av_frame_alloc(); if (!avctx->internal->buffer_frame) { ret = AVERROR(ENOMEM); goto free_and_end; } avctx->internal->buffer_pkt = av_packet_alloc(); if (!avctx->internal->buffer_pkt) { ret = AVERROR(ENOMEM); goto free_and_end; } avctx->internal->ds.in_pkt = av_packet_alloc(); if (!avctx->internal->ds.in_pkt) { ret = AVERROR(ENOMEM); goto free_and_end; } avctx->internal->last_pkt_props = av_packet_alloc(); if (!avctx->internal->last_pkt_props) { ret = AVERROR(ENOMEM); goto free_and_end; } avctx->internal->skip_samples_multiplier = 1; if (codec->priv_data_size > 0) { if (!avctx->priv_data) { avctx->priv_data = av_mallocz(codec->priv_data_size); if (!avctx->priv_data) { ret = AVERROR(ENOMEM); goto end; } if (codec->priv_class) { *(const AVClass **)avctx->priv_data = codec->priv_class; av_opt_set_defaults(avctx->priv_data); } } if (codec->priv_class && (ret = av_opt_set_dict(avctx->priv_data, &tmp)) < 0) goto free_and_end; } else { avctx->priv_data = NULL; } if ((ret = av_opt_set_dict(avctx, &tmp)) < 0) goto free_and_end; if (avctx->codec_whitelist && av_match_list(codec->name, avctx->codec_whitelist, ',') <= 0) { av_log(avctx, AV_LOG_ERROR, "Codec (%s) not on whitelist \'%s\'\n", codec->name, avctx->codec_whitelist); ret = AVERROR(EINVAL); goto free_and_end; } // only call ff_set_dimensions() for non H.264/VP6F/DXV codecs so as not to overwrite previously setup dimensions if (!(avctx->coded_width && avctx->coded_height && avctx->width && avctx->height && (avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_VP6F || avctx->codec_id == AV_CODEC_ID_DXV))) { if (avctx->coded_width && avctx->coded_height) ret = ff_set_dimensions(avctx, avctx->coded_width, avctx->coded_height); else if (avctx->width && avctx->height) ret = ff_set_dimensions(avctx, avctx->width, avctx->height); if (ret < 0) goto free_and_end; } if ((avctx->coded_width || avctx->coded_height || avctx->width || avctx->height) && ( av_image_check_size2(avctx->coded_width, avctx->coded_height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx) < 0 || av_image_check_size2(avctx->width, avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx) < 0)) { av_log(avctx, AV_LOG_WARNING, "Ignoring invalid width/height values\n"); ff_set_dimensions(avctx, 0, 0); } if (avctx->width > 0 && avctx->height > 0) { if (av_image_check_sar(avctx->width, avctx->height, avctx->sample_aspect_ratio) < 0) { av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n", avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den); avctx->sample_aspect_ratio = (AVRational){ 0, 1 }; } } /* if the decoder init function was already called previously, * free the already allocated subtitle_header before overwriting it */ if (av_codec_is_decoder(codec)) av_freep(&avctx->subtitle_header); if (avctx->channels > FF_SANE_NB_CHANNELS) { av_log(avctx, AV_LOG_ERROR, "Too many channels: %d\n", avctx->channels); ret = AVERROR(EINVAL); goto free_and_end; } avctx->codec = codec; if ((avctx->codec_type == AVMEDIA_TYPE_UNKNOWN || avctx->codec_type == codec->type) && avctx->codec_id == AV_CODEC_ID_NONE) { avctx->codec_type = codec->type; avctx->codec_id = codec->id; } if (avctx->codec_id != codec->id || (avctx->codec_type != codec->type && avctx->codec_type != AVMEDIA_TYPE_ATTACHMENT)) { av_log(avctx, AV_LOG_ERROR, "Codec type or id mismatches\n"); ret = AVERROR(EINVAL); goto free_and_end; } avctx->frame_number = 0; avctx->codec_descriptor = avcodec_descriptor_get(avctx->codec_id); if ((avctx->codec->capabilities & AV_CODEC_CAP_EXPERIMENTAL) && avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) { const char *codec_string = av_codec_is_encoder(codec) ? "encoder" : "decoder"; AVCodec *codec2; av_log(avctx, AV_LOG_ERROR, "The %s '%s' is experimental but experimental codecs are not enabled, " "add '-strict %d' if you want to use it.\n", codec_string, codec->name, FF_COMPLIANCE_EXPERIMENTAL); codec2 = av_codec_is_encoder(codec) ? avcodec_find_encoder(codec->id) : avcodec_find_decoder(codec->id); if (!(codec2->capabilities & AV_CODEC_CAP_EXPERIMENTAL)) av_log(avctx, AV_LOG_ERROR, "Alternatively use the non experimental %s '%s'.\n", codec_string, codec2->name); ret = AVERROR_EXPERIMENTAL; goto free_and_end; } if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && (!avctx->time_base.num || !avctx->time_base.den)) { avctx->time_base.num = 1; avctx->time_base.den = avctx->sample_rate; } if (!HAVE_THREADS) av_log(avctx, AV_LOG_WARNING, "Warning: not compiled with thread support, using thread emulation\n"); if (CONFIG_FRAME_THREAD_ENCODER && av_codec_is_encoder(avctx->codec)) { ff_unlock_avcodec(codec); //we will instantiate a few encoders thus kick the counter to prevent false detection of a problem ret = ff_frame_thread_encoder_init(avctx, options ? *options : NULL); ff_lock_avcodec(avctx, codec); if (ret < 0) goto free_and_end; } if (av_codec_is_decoder(avctx->codec)) { ret = ff_decode_bsfs_init(avctx); if (ret < 0) goto free_and_end; } if (HAVE_THREADS && !(avctx->internal->frame_thread_encoder && (avctx->active_thread_type&FF_THREAD_FRAME))) { ret = ff_thread_init(avctx); if (ret < 0) { goto free_and_end; } } if (!HAVE_THREADS && !(codec->capabilities & AV_CODEC_CAP_AUTO_THREADS)) avctx->thread_count = 1; if (avctx->codec->max_lowres < avctx->lowres || avctx->lowres < 0) { av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n", avctx->codec->max_lowres); avctx->lowres = avctx->codec->max_lowres; } if (av_codec_is_encoder(avctx->codec)) { int i; #if FF_API_CODED_FRAME FF_DISABLE_DEPRECATION_WARNINGS avctx->coded_frame = av_frame_alloc(); if (!avctx->coded_frame) { ret = AVERROR(ENOMEM); goto free_and_end; } FF_ENABLE_DEPRECATION_WARNINGS #endif if (avctx->time_base.num <= 0 || avctx->time_base.den <= 0) { av_log(avctx, AV_LOG_ERROR, "The encoder timebase is not set.\n"); ret = AVERROR(EINVAL); goto free_and_end; } if (avctx->codec->sample_fmts) { for (i = 0; avctx->codec->sample_fmts[i] != AV_SAMPLE_FMT_NONE; i++) { if (avctx->sample_fmt == avctx->codec->sample_fmts[i]) break; if (avctx->channels == 1 && av_get_planar_sample_fmt(avctx->sample_fmt) == av_get_planar_sample_fmt(avctx->codec->sample_fmts[i])) { avctx->sample_fmt = avctx->codec->sample_fmts[i]; break; } } if (avctx->codec->sample_fmts[i] == AV_SAMPLE_FMT_NONE) { char buf[128]; snprintf(buf, sizeof(buf), "%d", avctx->sample_fmt); av_log(avctx, AV_LOG_ERROR, "Specified sample format %s is invalid or not supported\n", (char *)av_x_if_null(av_get_sample_fmt_name(avctx->sample_fmt), buf)); ret = AVERROR(EINVAL); goto free_and_end; } } if (avctx->codec->pix_fmts) { for (i = 0; avctx->codec->pix_fmts[i] != AV_PIX_FMT_NONE; i++) if (avctx->pix_fmt == avctx->codec->pix_fmts[i]) break; if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_NONE && !((avctx->codec_id == AV_CODEC_ID_MJPEG || avctx->codec_id == AV_CODEC_ID_LJPEG) && avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL)) { char buf[128]; snprintf(buf, sizeof(buf), "%d", avctx->pix_fmt); av_log(avctx, AV_LOG_ERROR, "Specified pixel format %s is invalid or not supported\n", (char *)av_x_if_null(av_get_pix_fmt_name(avctx->pix_fmt), buf)); ret = AVERROR(EINVAL); goto free_and_end; } if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ420P || avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ411P || avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ422P || avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ440P || avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ444P) avctx->color_range = AVCOL_RANGE_JPEG; } if (avctx->codec->supported_samplerates) { for (i = 0; avctx->codec->supported_samplerates[i] != 0; i++) if (avctx->sample_rate == avctx->codec->supported_samplerates[i]) break; if (avctx->codec->supported_samplerates[i] == 0) { av_log(avctx, AV_LOG_ERROR, "Specified sample rate %d is not supported\n", avctx->sample_rate); ret = AVERROR(EINVAL); goto free_and_end; } } if (avctx->sample_rate < 0) { av_log(avctx, AV_LOG_ERROR, "Specified sample rate %d is not supported\n", avctx->sample_rate); ret = AVERROR(EINVAL); goto free_and_end; } if (avctx->codec->channel_layouts) { if (!avctx->channel_layout) { av_log(avctx, AV_LOG_WARNING, "Channel layout not specified\n"); } else { for (i = 0; avctx->codec->channel_layouts[i] != 0; i++) if (avctx->channel_layout == avctx->codec->channel_layouts[i]) break; if (avctx->codec->channel_layouts[i] == 0) { char buf[512]; av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout); av_log(avctx, AV_LOG_ERROR, "Specified channel layout '%s' is not supported\n", buf); ret = AVERROR(EINVAL); goto free_and_end; } } } if (avctx->channel_layout && avctx->channels) { int channels = av_get_channel_layout_nb_channels(avctx->channel_layout); if (channels != avctx->channels) { char buf[512]; av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout); av_log(avctx, AV_LOG_ERROR, "Channel layout '%s' with %d channels does not match number of specified channels %d\n", buf, channels, avctx->channels); ret = AVERROR(EINVAL); goto free_and_end; } } else if (avctx->channel_layout) { avctx->channels = av_get_channel_layout_nb_channels(avctx->channel_layout); } if (avctx->channels < 0) { av_log(avctx, AV_LOG_ERROR, "Specified number of channels %d is not supported\n", avctx->channels); ret = AVERROR(EINVAL); goto free_and_end; } if(avctx->codec_type == AVMEDIA_TYPE_VIDEO) { pixdesc = av_pix_fmt_desc_get(avctx->pix_fmt); if ( avctx->bits_per_raw_sample < 0 || (avctx->bits_per_raw_sample > 8 && pixdesc->comp[0].depth <= 8)) { av_log(avctx, AV_LOG_WARNING, "Specified bit depth %d not possible with the specified pixel formats depth %d\n", avctx->bits_per_raw_sample, pixdesc->comp[0].depth); avctx->bits_per_raw_sample = pixdesc->comp[0].depth; } if (avctx->width <= 0 || avctx->height <= 0) { av_log(avctx, AV_LOG_ERROR, "dimensions not set\n"); ret = AVERROR(EINVAL); goto free_and_end; } } if ( (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO) && avctx->bit_rate>0 && avctx->bit_rate<1000) { av_log(avctx, AV_LOG_WARNING, "Bitrate %"PRId64" is extremely low, maybe you mean %"PRId64"k\n", avctx->bit_rate, avctx->bit_rate); } if (!avctx->rc_initial_buffer_occupancy) avctx->rc_initial_buffer_occupancy = avctx->rc_buffer_size * 3LL / 4; if (avctx->ticks_per_frame && avctx->time_base.num && avctx->ticks_per_frame > INT_MAX / avctx->time_base.num) { av_log(avctx, AV_LOG_ERROR, "ticks_per_frame %d too large for the timebase %d/%d.", avctx->ticks_per_frame, avctx->time_base.num, avctx->time_base.den); goto free_and_end; } if (avctx->hw_frames_ctx) { AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data; if (frames_ctx->format != avctx->pix_fmt) { av_log(avctx, AV_LOG_ERROR, "Mismatching AVCodecContext.pix_fmt and AVHWFramesContext.format\n"); ret = AVERROR(EINVAL); goto free_and_end; } if (avctx->sw_pix_fmt != AV_PIX_FMT_NONE && avctx->sw_pix_fmt != frames_ctx->sw_format) { av_log(avctx, AV_LOG_ERROR, "Mismatching AVCodecContext.sw_pix_fmt (%s) " "and AVHWFramesContext.sw_format (%s)\n", av_get_pix_fmt_name(avctx->sw_pix_fmt), av_get_pix_fmt_name(frames_ctx->sw_format)); ret = AVERROR(EINVAL); goto free_and_end; } avctx->sw_pix_fmt = frames_ctx->sw_format; } } avctx->pts_correction_num_faulty_pts = avctx->pts_correction_num_faulty_dts = 0; avctx->pts_correction_last_pts = avctx->pts_correction_last_dts = INT64_MIN; if ( !CONFIG_GRAY && avctx->flags & AV_CODEC_FLAG_GRAY && avctx->codec_descriptor->type == AVMEDIA_TYPE_VIDEO) av_log(avctx, AV_LOG_WARNING, "gray decoding requested but not enabled at configuration time\n"); if ( avctx->codec->init && (!(avctx->active_thread_type&FF_THREAD_FRAME) || avctx->internal->frame_thread_encoder)) { ret = avctx->codec->init(avctx); if (ret < 0) { goto free_and_end; } codec_init_ok = 1; } ret=0; if (av_codec_is_decoder(avctx->codec)) { if (!avctx->bit_rate) avctx->bit_rate = get_bit_rate(avctx); /* validate channel layout from the decoder */ if (avctx->channel_layout) { int channels = av_get_channel_layout_nb_channels(avctx->channel_layout); if (!avctx->channels) avctx->channels = channels; else if (channels != avctx->channels) { char buf[512]; av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout); av_log(avctx, AV_LOG_WARNING, "Channel layout '%s' with %d channels does not match specified number of channels %d: " "ignoring specified channel layout\n", buf, channels, avctx->channels); avctx->channel_layout = 0; } } if (avctx->channels && avctx->channels < 0 || avctx->channels > FF_SANE_NB_CHANNELS) { ret = AVERROR(EINVAL); goto free_and_end; } if (avctx->bits_per_coded_sample < 0) { ret = AVERROR(EINVAL); goto free_and_end; } if (avctx->sub_charenc) { if (avctx->codec_type != AVMEDIA_TYPE_SUBTITLE) { av_log(avctx, AV_LOG_ERROR, "Character encoding is only " "supported with subtitles codecs\n"); ret = AVERROR(EINVAL); goto free_and_end; } else if (avctx->codec_descriptor->props & AV_CODEC_PROP_BITMAP_SUB) { av_log(avctx, AV_LOG_WARNING, "Codec '%s' is bitmap-based, " "subtitles character encoding will be ignored\n", avctx->codec_descriptor->name); avctx->sub_charenc_mode = FF_SUB_CHARENC_MODE_DO_NOTHING; } else { /* input character encoding is set for a text based subtitle * codec at this point */ if (avctx->sub_charenc_mode == FF_SUB_CHARENC_MODE_AUTOMATIC) avctx->sub_charenc_mode = FF_SUB_CHARENC_MODE_PRE_DECODER; if (avctx->sub_charenc_mode == FF_SUB_CHARENC_MODE_PRE_DECODER) { #if CONFIG_ICONV iconv_t cd = iconv_open("UTF-8", avctx->sub_charenc); if (cd == (iconv_t)-1) { ret = AVERROR(errno); av_log(avctx, AV_LOG_ERROR, "Unable to open iconv context " "with input character encoding \"%s\"\n", avctx->sub_charenc); goto free_and_end; } iconv_close(cd); #else av_log(avctx, AV_LOG_ERROR, "Character encoding subtitles " "conversion needs a libavcodec built with iconv support " "for this codec\n"); ret = AVERROR(ENOSYS); goto free_and_end; #endif } } } #if FF_API_AVCTX_TIMEBASE if (avctx->framerate.num > 0 && avctx->framerate.den > 0) avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1})); #endif } if (codec->priv_data_size > 0 && avctx->priv_data && codec->priv_class) { av_assert0(*(const AVClass **)avctx->priv_data == codec->priv_class); } end: ff_unlock_avcodec(codec); if (options) { av_dict_free(options); *options = tmp; } return ret; free_and_end: if (avctx->codec && (codec_init_ok || (avctx->codec->caps_internal & FF_CODEC_CAP_INIT_CLEANUP))) avctx->codec->close(avctx); if (codec->priv_class && codec->priv_data_size) av_opt_free(avctx->priv_data); av_opt_free(avctx); #if FF_API_CODED_FRAME FF_DISABLE_DEPRECATION_WARNINGS av_frame_free(&avctx->coded_frame); FF_ENABLE_DEPRECATION_WARNINGS #endif av_dict_free(&tmp); av_freep(&avctx->priv_data); if (avctx->internal) { av_frame_free(&avctx->internal->to_free); av_frame_free(&avctx->internal->compat_decode_frame); av_frame_free(&avctx->internal->buffer_frame); av_packet_free(&avctx->internal->buffer_pkt); av_packet_free(&avctx->internal->last_pkt_props); av_packet_free(&avctx->internal->ds.in_pkt); ff_decode_bsfs_uninit(avctx); av_freep(&avctx->internal->pool); } av_freep(&avctx->internal); avctx->codec = NULL; goto end; }
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options) { int ret = 0; int codec_init_ok = 0; AVDictionary *tmp = NULL; const AVPixFmtDescriptor *pixdesc; if (avcodec_is_open(avctx)) return 0; if ((!codec && !avctx->codec)) { av_log(avctx, AV_LOG_ERROR, "No codec provided to avcodec_open2()\n"); return AVERROR(EINVAL); } if ((codec && avctx->codec && codec != avctx->codec)) { av_log(avctx, AV_LOG_ERROR, "This AVCodecContext was allocated for %s, " "but %s passed to avcodec_open2()\n", avctx->codec->name, codec->name); return AVERROR(EINVAL); } if (!codec) codec = avctx->codec; if (avctx->extradata_size < 0 || avctx->extradata_size >= FF_MAX_EXTRADATA_SIZE) return AVERROR(EINVAL); if (options) av_dict_copy(&tmp, *options, 0); ff_lock_avcodec(avctx, codec); avctx->internal = av_mallocz(sizeof(*avctx->internal)); if (!avctx->internal) { ret = AVERROR(ENOMEM); goto end; } avctx->internal->pool = av_mallocz(sizeof(*avctx->internal->pool)); if (!avctx->internal->pool) { ret = AVERROR(ENOMEM); goto free_and_end; } avctx->internal->to_free = av_frame_alloc(); if (!avctx->internal->to_free) { ret = AVERROR(ENOMEM); goto free_and_end; } avctx->internal->compat_decode_frame = av_frame_alloc(); if (!avctx->internal->compat_decode_frame) { ret = AVERROR(ENOMEM); goto free_and_end; } avctx->internal->buffer_frame = av_frame_alloc(); if (!avctx->internal->buffer_frame) { ret = AVERROR(ENOMEM); goto free_and_end; } avctx->internal->buffer_pkt = av_packet_alloc(); if (!avctx->internal->buffer_pkt) { ret = AVERROR(ENOMEM); goto free_and_end; } avctx->internal->ds.in_pkt = av_packet_alloc(); if (!avctx->internal->ds.in_pkt) { ret = AVERROR(ENOMEM); goto free_and_end; } avctx->internal->last_pkt_props = av_packet_alloc(); if (!avctx->internal->last_pkt_props) { ret = AVERROR(ENOMEM); goto free_and_end; } avctx->internal->skip_samples_multiplier = 1; if (codec->priv_data_size > 0) { if (!avctx->priv_data) { avctx->priv_data = av_mallocz(codec->priv_data_size); if (!avctx->priv_data) { ret = AVERROR(ENOMEM); goto end; } if (codec->priv_class) { *(const AVClass **)avctx->priv_data = codec->priv_class; av_opt_set_defaults(avctx->priv_data); } } if (codec->priv_class && (ret = av_opt_set_dict(avctx->priv_data, &tmp)) < 0) goto free_and_end; } else { avctx->priv_data = NULL; } if ((ret = av_opt_set_dict(avctx, &tmp)) < 0) goto free_and_end; if (avctx->codec_whitelist && av_match_list(codec->name, avctx->codec_whitelist, ',') <= 0) { av_log(avctx, AV_LOG_ERROR, "Codec (%s) not on whitelist \'%s\'\n", codec->name, avctx->codec_whitelist); ret = AVERROR(EINVAL); goto free_and_end; } // only call ff_set_dimensions() for non H.264/VP6F/DXV codecs so as not to overwrite previously setup dimensions if (!(avctx->coded_width && avctx->coded_height && avctx->width && avctx->height && (avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_VP6F || avctx->codec_id == AV_CODEC_ID_DXV))) { if (avctx->coded_width && avctx->coded_height) ret = ff_set_dimensions(avctx, avctx->coded_width, avctx->coded_height); else if (avctx->width && avctx->height) ret = ff_set_dimensions(avctx, avctx->width, avctx->height); if (ret < 0) goto free_and_end; } if ((avctx->coded_width || avctx->coded_height || avctx->width || avctx->height) && ( av_image_check_size2(avctx->coded_width, avctx->coded_height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx) < 0 || av_image_check_size2(avctx->width, avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx) < 0)) { av_log(avctx, AV_LOG_WARNING, "Ignoring invalid width/height values\n"); ff_set_dimensions(avctx, 0, 0); } if (avctx->width > 0 && avctx->height > 0) { if (av_image_check_sar(avctx->width, avctx->height, avctx->sample_aspect_ratio) < 0) { av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n", avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den); avctx->sample_aspect_ratio = (AVRational){ 0, 1 }; } } /* if the decoder init function was already called previously, * free the already allocated subtitle_header before overwriting it */ if (av_codec_is_decoder(codec)) av_freep(&avctx->subtitle_header); if (avctx->channels > FF_SANE_NB_CHANNELS) { av_log(avctx, AV_LOG_ERROR, "Too many channels: %d\n", avctx->channels); ret = AVERROR(EINVAL); goto free_and_end; } avctx->codec = codec; if ((avctx->codec_type == AVMEDIA_TYPE_UNKNOWN || avctx->codec_type == codec->type) && avctx->codec_id == AV_CODEC_ID_NONE) { avctx->codec_type = codec->type; avctx->codec_id = codec->id; } if (avctx->codec_id != codec->id || (avctx->codec_type != codec->type && avctx->codec_type != AVMEDIA_TYPE_ATTACHMENT)) { av_log(avctx, AV_LOG_ERROR, "Codec type or id mismatches\n"); ret = AVERROR(EINVAL); goto free_and_end; } avctx->frame_number = 0; avctx->codec_descriptor = avcodec_descriptor_get(avctx->codec_id); if ((avctx->codec->capabilities & AV_CODEC_CAP_EXPERIMENTAL) && avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) { const char *codec_string = av_codec_is_encoder(codec) ? "encoder" : "decoder"; AVCodec *codec2; av_log(avctx, AV_LOG_ERROR, "The %s '%s' is experimental but experimental codecs are not enabled, " "add '-strict %d' if you want to use it.\n", codec_string, codec->name, FF_COMPLIANCE_EXPERIMENTAL); codec2 = av_codec_is_encoder(codec) ? avcodec_find_encoder(codec->id) : avcodec_find_decoder(codec->id); if (!(codec2->capabilities & AV_CODEC_CAP_EXPERIMENTAL)) av_log(avctx, AV_LOG_ERROR, "Alternatively use the non experimental %s '%s'.\n", codec_string, codec2->name); ret = AVERROR_EXPERIMENTAL; goto free_and_end; } if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && (!avctx->time_base.num || !avctx->time_base.den)) { avctx->time_base.num = 1; avctx->time_base.den = avctx->sample_rate; } if (!HAVE_THREADS) av_log(avctx, AV_LOG_WARNING, "Warning: not compiled with thread support, using thread emulation\n"); if (CONFIG_FRAME_THREAD_ENCODER && av_codec_is_encoder(avctx->codec)) { ff_unlock_avcodec(codec); //we will instantiate a few encoders thus kick the counter to prevent false detection of a problem ret = ff_frame_thread_encoder_init(avctx, options ? *options : NULL); ff_lock_avcodec(avctx, codec); if (ret < 0) goto free_and_end; } if (av_codec_is_decoder(avctx->codec)) { ret = ff_decode_bsfs_init(avctx); if (ret < 0) goto free_and_end; } if (HAVE_THREADS && !(avctx->internal->frame_thread_encoder && (avctx->active_thread_type&FF_THREAD_FRAME))) { ret = ff_thread_init(avctx); if (ret < 0) { goto free_and_end; } } if (!HAVE_THREADS && !(codec->capabilities & AV_CODEC_CAP_AUTO_THREADS)) avctx->thread_count = 1; if (avctx->codec->max_lowres < avctx->lowres || avctx->lowres < 0) { av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n", avctx->codec->max_lowres); avctx->lowres = avctx->codec->max_lowres; } if (av_codec_is_encoder(avctx->codec)) { int i; #if FF_API_CODED_FRAME FF_DISABLE_DEPRECATION_WARNINGS avctx->coded_frame = av_frame_alloc(); if (!avctx->coded_frame) { ret = AVERROR(ENOMEM); goto free_and_end; } FF_ENABLE_DEPRECATION_WARNINGS #endif if (avctx->time_base.num <= 0 || avctx->time_base.den <= 0) { av_log(avctx, AV_LOG_ERROR, "The encoder timebase is not set.\n"); ret = AVERROR(EINVAL); goto free_and_end; } if (avctx->codec->sample_fmts) { for (i = 0; avctx->codec->sample_fmts[i] != AV_SAMPLE_FMT_NONE; i++) { if (avctx->sample_fmt == avctx->codec->sample_fmts[i]) break; if (avctx->channels == 1 && av_get_planar_sample_fmt(avctx->sample_fmt) == av_get_planar_sample_fmt(avctx->codec->sample_fmts[i])) { avctx->sample_fmt = avctx->codec->sample_fmts[i]; break; } } if (avctx->codec->sample_fmts[i] == AV_SAMPLE_FMT_NONE) { char buf[128]; snprintf(buf, sizeof(buf), "%d", avctx->sample_fmt); av_log(avctx, AV_LOG_ERROR, "Specified sample format %s is invalid or not supported\n", (char *)av_x_if_null(av_get_sample_fmt_name(avctx->sample_fmt), buf)); ret = AVERROR(EINVAL); goto free_and_end; } } if (avctx->codec->pix_fmts) { for (i = 0; avctx->codec->pix_fmts[i] != AV_PIX_FMT_NONE; i++) if (avctx->pix_fmt == avctx->codec->pix_fmts[i]) break; if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_NONE && !((avctx->codec_id == AV_CODEC_ID_MJPEG || avctx->codec_id == AV_CODEC_ID_LJPEG) && avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL)) { char buf[128]; snprintf(buf, sizeof(buf), "%d", avctx->pix_fmt); av_log(avctx, AV_LOG_ERROR, "Specified pixel format %s is invalid or not supported\n", (char *)av_x_if_null(av_get_pix_fmt_name(avctx->pix_fmt), buf)); ret = AVERROR(EINVAL); goto free_and_end; } if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ420P || avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ411P || avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ422P || avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ440P || avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ444P) avctx->color_range = AVCOL_RANGE_JPEG; } if (avctx->codec->supported_samplerates) { for (i = 0; avctx->codec->supported_samplerates[i] != 0; i++) if (avctx->sample_rate == avctx->codec->supported_samplerates[i]) break; if (avctx->codec->supported_samplerates[i] == 0) { av_log(avctx, AV_LOG_ERROR, "Specified sample rate %d is not supported\n", avctx->sample_rate); ret = AVERROR(EINVAL); goto free_and_end; } } if (avctx->sample_rate < 0) { av_log(avctx, AV_LOG_ERROR, "Specified sample rate %d is not supported\n", avctx->sample_rate); ret = AVERROR(EINVAL); goto free_and_end; } if (avctx->codec->channel_layouts) { if (!avctx->channel_layout) { av_log(avctx, AV_LOG_WARNING, "Channel layout not specified\n"); } else { for (i = 0; avctx->codec->channel_layouts[i] != 0; i++) if (avctx->channel_layout == avctx->codec->channel_layouts[i]) break; if (avctx->codec->channel_layouts[i] == 0) { char buf[512]; av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout); av_log(avctx, AV_LOG_ERROR, "Specified channel layout '%s' is not supported\n", buf); ret = AVERROR(EINVAL); goto free_and_end; } } } if (avctx->channel_layout && avctx->channels) { int channels = av_get_channel_layout_nb_channels(avctx->channel_layout); if (channels != avctx->channels) { char buf[512]; av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout); av_log(avctx, AV_LOG_ERROR, "Channel layout '%s' with %d channels does not match number of specified channels %d\n", buf, channels, avctx->channels); ret = AVERROR(EINVAL); goto free_and_end; } } else if (avctx->channel_layout) { avctx->channels = av_get_channel_layout_nb_channels(avctx->channel_layout); } if (avctx->channels < 0) { av_log(avctx, AV_LOG_ERROR, "Specified number of channels %d is not supported\n", avctx->channels); ret = AVERROR(EINVAL); goto free_and_end; } if(avctx->codec_type == AVMEDIA_TYPE_VIDEO) { pixdesc = av_pix_fmt_desc_get(avctx->pix_fmt); if ( avctx->bits_per_raw_sample < 0 || (avctx->bits_per_raw_sample > 8 && pixdesc->comp[0].depth <= 8)) { av_log(avctx, AV_LOG_WARNING, "Specified bit depth %d not possible with the specified pixel formats depth %d\n", avctx->bits_per_raw_sample, pixdesc->comp[0].depth); avctx->bits_per_raw_sample = pixdesc->comp[0].depth; } if (avctx->width <= 0 || avctx->height <= 0) { av_log(avctx, AV_LOG_ERROR, "dimensions not set\n"); ret = AVERROR(EINVAL); goto free_and_end; } } if ( (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO) && avctx->bit_rate>0 && avctx->bit_rate<1000) { av_log(avctx, AV_LOG_WARNING, "Bitrate %"PRId64" is extremely low, maybe you mean %"PRId64"k\n", avctx->bit_rate, avctx->bit_rate); } if (!avctx->rc_initial_buffer_occupancy) avctx->rc_initial_buffer_occupancy = avctx->rc_buffer_size * 3LL / 4; if (avctx->ticks_per_frame && avctx->time_base.num && avctx->ticks_per_frame > INT_MAX / avctx->time_base.num) { av_log(avctx, AV_LOG_ERROR, "ticks_per_frame %d too large for the timebase %d/%d.", avctx->ticks_per_frame, avctx->time_base.num, avctx->time_base.den); goto free_and_end; } if (avctx->hw_frames_ctx) { AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data; if (frames_ctx->format != avctx->pix_fmt) { av_log(avctx, AV_LOG_ERROR, "Mismatching AVCodecContext.pix_fmt and AVHWFramesContext.format\n"); ret = AVERROR(EINVAL); goto free_and_end; } if (avctx->sw_pix_fmt != AV_PIX_FMT_NONE && avctx->sw_pix_fmt != frames_ctx->sw_format) { av_log(avctx, AV_LOG_ERROR, "Mismatching AVCodecContext.sw_pix_fmt (%s) " "and AVHWFramesContext.sw_format (%s)\n", av_get_pix_fmt_name(avctx->sw_pix_fmt), av_get_pix_fmt_name(frames_ctx->sw_format)); ret = AVERROR(EINVAL); goto free_and_end; } avctx->sw_pix_fmt = frames_ctx->sw_format; } } avctx->pts_correction_num_faulty_pts = avctx->pts_correction_num_faulty_dts = 0; avctx->pts_correction_last_pts = avctx->pts_correction_last_dts = INT64_MIN; if ( !CONFIG_GRAY && avctx->flags & AV_CODEC_FLAG_GRAY && avctx->codec_descriptor->type == AVMEDIA_TYPE_VIDEO) av_log(avctx, AV_LOG_WARNING, "gray decoding requested but not enabled at configuration time\n"); if ( avctx->codec->init && (!(avctx->active_thread_type&FF_THREAD_FRAME) || avctx->internal->frame_thread_encoder)) { ret = avctx->codec->init(avctx); if (ret < 0) { goto free_and_end; } codec_init_ok = 1; } ret=0; if (av_codec_is_decoder(avctx->codec)) { if (!avctx->bit_rate) avctx->bit_rate = get_bit_rate(avctx); /* validate channel layout from the decoder */ if (avctx->channel_layout) { int channels = av_get_channel_layout_nb_channels(avctx->channel_layout); if (!avctx->channels) avctx->channels = channels; else if (channels != avctx->channels) { char buf[512]; av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout); av_log(avctx, AV_LOG_WARNING, "Channel layout '%s' with %d channels does not match specified number of channels %d: " "ignoring specified channel layout\n", buf, channels, avctx->channels); avctx->channel_layout = 0; } } if (avctx->channels && avctx->channels < 0 || avctx->channels > FF_SANE_NB_CHANNELS) { ret = AVERROR(EINVAL); goto free_and_end; } if (avctx->bits_per_coded_sample < 0) { ret = AVERROR(EINVAL); goto free_and_end; } if (avctx->sub_charenc) { if (avctx->codec_type != AVMEDIA_TYPE_SUBTITLE) { av_log(avctx, AV_LOG_ERROR, "Character encoding is only " "supported with subtitles codecs\n"); ret = AVERROR(EINVAL); goto free_and_end; } else if (avctx->codec_descriptor->props & AV_CODEC_PROP_BITMAP_SUB) { av_log(avctx, AV_LOG_WARNING, "Codec '%s' is bitmap-based, " "subtitles character encoding will be ignored\n", avctx->codec_descriptor->name); avctx->sub_charenc_mode = FF_SUB_CHARENC_MODE_DO_NOTHING; } else { /* input character encoding is set for a text based subtitle * codec at this point */ if (avctx->sub_charenc_mode == FF_SUB_CHARENC_MODE_AUTOMATIC) avctx->sub_charenc_mode = FF_SUB_CHARENC_MODE_PRE_DECODER; if (avctx->sub_charenc_mode == FF_SUB_CHARENC_MODE_PRE_DECODER) { #if CONFIG_ICONV iconv_t cd = iconv_open("UTF-8", avctx->sub_charenc); if (cd == (iconv_t)-1) { ret = AVERROR(errno); av_log(avctx, AV_LOG_ERROR, "Unable to open iconv context " "with input character encoding \"%s\"\n", avctx->sub_charenc); goto free_and_end; } iconv_close(cd); #else av_log(avctx, AV_LOG_ERROR, "Character encoding subtitles " "conversion needs a libavcodec built with iconv support " "for this codec\n"); ret = AVERROR(ENOSYS); goto free_and_end; #endif } } } #if FF_API_AVCTX_TIMEBASE if (avctx->framerate.num > 0 && avctx->framerate.den > 0) avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1})); #endif } if (codec->priv_data_size > 0 && avctx->priv_data && codec->priv_class) { av_assert0(*(const AVClass **)avctx->priv_data == codec->priv_class); } end: ff_unlock_avcodec(codec); if (options) { av_dict_free(options); *options = tmp; } return ret; free_and_end: if (avctx->codec && avctx->codec->close && (codec_init_ok || (avctx->codec->caps_internal & FF_CODEC_CAP_INIT_CLEANUP))) avctx->codec->close(avctx); if (codec->priv_class && codec->priv_data_size) av_opt_free(avctx->priv_data); av_opt_free(avctx); #if FF_API_CODED_FRAME FF_DISABLE_DEPRECATION_WARNINGS av_frame_free(&avctx->coded_frame); FF_ENABLE_DEPRECATION_WARNINGS #endif av_dict_free(&tmp); av_freep(&avctx->priv_data); if (avctx->internal) { av_frame_free(&avctx->internal->to_free); av_frame_free(&avctx->internal->compat_decode_frame); av_frame_free(&avctx->internal->buffer_frame); av_packet_free(&avctx->internal->buffer_pkt); av_packet_free(&avctx->internal->last_pkt_props); av_packet_free(&avctx->internal->ds.in_pkt); ff_decode_bsfs_uninit(avctx); av_freep(&avctx->internal->pool); } av_freep(&avctx->internal); avctx->codec = NULL; goto end; }
{'added': [(1027, ' if (avctx->codec && avctx->codec->close &&')], 'deleted': [(1027, ' if (avctx->codec &&')]}
1
1
1,874
12,221
https://github.com/FFmpeg/FFmpeg
CVE-2019-17539
['CWE-476']
verifier.c
regsafe
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com * Copyright (c) 2016 Facebook * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/bpf.h> #include <linux/bpf_verifier.h> #include <linux/filter.h> #include <net/netlink.h> #include <linux/file.h> #include <linux/vmalloc.h> #include <linux/stringify.h> #include "disasm.h" static const struct bpf_verifier_ops * const bpf_verifier_ops[] = { #define BPF_PROG_TYPE(_id, _name) \ [_id] = & _name ## _verifier_ops, #define BPF_MAP_TYPE(_id, _ops) #include <linux/bpf_types.h> #undef BPF_PROG_TYPE #undef BPF_MAP_TYPE }; /* bpf_check() is a static code analyzer that walks eBPF program * instruction by instruction and updates register/stack state. * All paths of conditional branches are analyzed until 'bpf_exit' insn. * * The first pass is depth-first-search to check that the program is a DAG. * It rejects the following programs: * - larger than BPF_MAXINSNS insns * - if loop is present (detected via back-edge) * - unreachable insns exist (shouldn't be a forest. program = one function) * - out of bounds or malformed jumps * The second pass is all possible path descent from the 1st insn. * Since it's analyzing all pathes through the program, the length of the * analysis is limited to 64k insn, which may be hit even if total number of * insn is less then 4K, but there are too many branches that change stack/regs. * Number of 'branches to be analyzed' is limited to 1k * * On entry to each instruction, each register has a type, and the instruction * changes the types of the registers depending on instruction semantics. * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is * copied to R1. * * All registers are 64-bit. * R0 - return register * R1-R5 argument passing registers * R6-R9 callee saved registers * R10 - frame pointer read-only * * At the start of BPF program the register R1 contains a pointer to bpf_context * and has type PTR_TO_CTX. * * Verifier tracks arithmetic operations on pointers in case: * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20), * 1st insn copies R10 (which has FRAME_PTR) type into R1 * and 2nd arithmetic instruction is pattern matched to recognize * that it wants to construct a pointer to some element within stack. * So after 2nd insn, the register R1 has type PTR_TO_STACK * (and -20 constant is saved for further stack bounds checking). * Meaning that this reg is a pointer to stack plus known immediate constant. * * Most of the time the registers have SCALAR_VALUE type, which * means the register has some value, but it's not a valid pointer. * (like pointer plus pointer becomes SCALAR_VALUE type) * * When verifier sees load or store instructions the type of base register * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK. These are three pointer * types recognized by check_mem_access() function. * * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' * and the range of [ptr, ptr + map's value_size) is accessible. * * registers used to pass values to function calls are checked against * function argument constraints. * * ARG_PTR_TO_MAP_KEY is one of such argument constraints. * It means that the register type passed to this function must be * PTR_TO_STACK and it will be used inside the function as * 'pointer to map element key' * * For example the argument constraints for bpf_map_lookup_elem(): * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, * .arg1_type = ARG_CONST_MAP_PTR, * .arg2_type = ARG_PTR_TO_MAP_KEY, * * ret_type says that this function returns 'pointer to map elem value or null' * function expects 1st argument to be a const pointer to 'struct bpf_map' and * 2nd argument should be a pointer to stack, which will be used inside * the helper function as a pointer to map element key. * * On the kernel side the helper function looks like: * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) * { * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; * void *key = (void *) (unsigned long) r2; * void *value; * * here kernel can access 'key' and 'map' pointers safely, knowing that * [key, key + map->key_size) bytes are valid and were initialized on * the stack of eBPF program. * } * * Corresponding eBPF program may look like: * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), * here verifier looks at prototype of map_lookup_elem() and sees: * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok, * Now verifier knows that this map has key of R1->map_ptr->key_size bytes * * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far, * Now verifier checks that [R2, R2 + map's key_size) are within stack limits * and were initialized prior to this call. * If it's ok, then verifier allows this BPF_CALL insn and looks at * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function * returns ether pointer to map value or NULL. * * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off' * insn, the register holding that pointer in the true branch changes state to * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false * branch. See check_cond_jmp_op(). * * After the call R0 is set to return type of the function and registers R1-R5 * are set to NOT_INIT to indicate that they are no longer readable. */ /* verifier_state + insn_idx are pushed to stack when branch is encountered */ struct bpf_verifier_stack_elem { /* verifer state is 'st' * before processing instruction 'insn_idx' * and after processing instruction 'prev_insn_idx' */ struct bpf_verifier_state st; int insn_idx; int prev_insn_idx; struct bpf_verifier_stack_elem *next; }; #define BPF_COMPLEXITY_LIMIT_INSNS 131072 #define BPF_COMPLEXITY_LIMIT_STACK 1024 #define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA) struct bpf_call_arg_meta { struct bpf_map *map_ptr; bool raw_mode; bool pkt_access; int regno; int access_size; }; static DEFINE_MUTEX(bpf_verifier_lock); /* log_level controls verbosity level of eBPF verifier. * verbose() is used to dump the verification trace to the log, so the user * can figure out what's wrong with the program */ static __printf(2, 3) void verbose(struct bpf_verifier_env *env, const char *fmt, ...) { struct bpf_verifer_log *log = &env->log; unsigned int n; va_list args; if (!log->level || !log->ubuf || bpf_verifier_log_full(log)) return; va_start(args, fmt); n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args); va_end(args); WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1, "verifier log line truncated - local buffer too short\n"); n = min(log->len_total - log->len_used - 1, n); log->kbuf[n] = '\0'; if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1)) log->len_used += n; else log->ubuf = NULL; } static bool type_is_pkt_pointer(enum bpf_reg_type type) { return type == PTR_TO_PACKET || type == PTR_TO_PACKET_META; } /* string representation of 'enum bpf_reg_type' */ static const char * const reg_type_str[] = { [NOT_INIT] = "?", [SCALAR_VALUE] = "inv", [PTR_TO_CTX] = "ctx", [CONST_PTR_TO_MAP] = "map_ptr", [PTR_TO_MAP_VALUE] = "map_value", [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", [PTR_TO_STACK] = "fp", [PTR_TO_PACKET] = "pkt", [PTR_TO_PACKET_META] = "pkt_meta", [PTR_TO_PACKET_END] = "pkt_end", }; static void print_verifier_state(struct bpf_verifier_env *env, struct bpf_verifier_state *state) { struct bpf_reg_state *reg; enum bpf_reg_type t; int i; for (i = 0; i < MAX_BPF_REG; i++) { reg = &state->regs[i]; t = reg->type; if (t == NOT_INIT) continue; verbose(env, " R%d=%s", i, reg_type_str[t]); if ((t == SCALAR_VALUE || t == PTR_TO_STACK) && tnum_is_const(reg->var_off)) { /* reg->off should be 0 for SCALAR_VALUE */ verbose(env, "%lld", reg->var_off.value + reg->off); } else { verbose(env, "(id=%d", reg->id); if (t != SCALAR_VALUE) verbose(env, ",off=%d", reg->off); if (type_is_pkt_pointer(t)) verbose(env, ",r=%d", reg->range); else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE || t == PTR_TO_MAP_VALUE_OR_NULL) verbose(env, ",ks=%d,vs=%d", reg->map_ptr->key_size, reg->map_ptr->value_size); if (tnum_is_const(reg->var_off)) { /* Typically an immediate SCALAR_VALUE, but * could be a pointer whose offset is too big * for reg->off */ verbose(env, ",imm=%llx", reg->var_off.value); } else { if (reg->smin_value != reg->umin_value && reg->smin_value != S64_MIN) verbose(env, ",smin_value=%lld", (long long)reg->smin_value); if (reg->smax_value != reg->umax_value && reg->smax_value != S64_MAX) verbose(env, ",smax_value=%lld", (long long)reg->smax_value); if (reg->umin_value != 0) verbose(env, ",umin_value=%llu", (unsigned long long)reg->umin_value); if (reg->umax_value != U64_MAX) verbose(env, ",umax_value=%llu", (unsigned long long)reg->umax_value); if (!tnum_is_unknown(reg->var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, ",var_off=%s", tn_buf); } } verbose(env, ")"); } } for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { if (state->stack[i].slot_type[0] == STACK_SPILL) verbose(env, " fp%d=%s", -MAX_BPF_STACK + i * BPF_REG_SIZE, reg_type_str[state->stack[i].spilled_ptr.type]); } verbose(env, "\n"); } static int copy_stack_state(struct bpf_verifier_state *dst, const struct bpf_verifier_state *src) { if (!src->stack) return 0; if (WARN_ON_ONCE(dst->allocated_stack < src->allocated_stack)) { /* internal bug, make state invalid to reject the program */ memset(dst, 0, sizeof(*dst)); return -EFAULT; } memcpy(dst->stack, src->stack, sizeof(*src->stack) * (src->allocated_stack / BPF_REG_SIZE)); return 0; } /* do_check() starts with zero-sized stack in struct bpf_verifier_state to * make it consume minimal amount of memory. check_stack_write() access from * the program calls into realloc_verifier_state() to grow the stack size. * Note there is a non-zero 'parent' pointer inside bpf_verifier_state * which this function copies over. It points to previous bpf_verifier_state * which is never reallocated */ static int realloc_verifier_state(struct bpf_verifier_state *state, int size, bool copy_old) { u32 old_size = state->allocated_stack; struct bpf_stack_state *new_stack; int slot = size / BPF_REG_SIZE; if (size <= old_size || !size) { if (copy_old) return 0; state->allocated_stack = slot * BPF_REG_SIZE; if (!size && old_size) { kfree(state->stack); state->stack = NULL; } return 0; } new_stack = kmalloc_array(slot, sizeof(struct bpf_stack_state), GFP_KERNEL); if (!new_stack) return -ENOMEM; if (copy_old) { if (state->stack) memcpy(new_stack, state->stack, sizeof(*new_stack) * (old_size / BPF_REG_SIZE)); memset(new_stack + old_size / BPF_REG_SIZE, 0, sizeof(*new_stack) * (size - old_size) / BPF_REG_SIZE); } state->allocated_stack = slot * BPF_REG_SIZE; kfree(state->stack); state->stack = new_stack; return 0; } static void free_verifier_state(struct bpf_verifier_state *state, bool free_self) { kfree(state->stack); if (free_self) kfree(state); } /* copy verifier state from src to dst growing dst stack space * when necessary to accommodate larger src stack */ static int copy_verifier_state(struct bpf_verifier_state *dst, const struct bpf_verifier_state *src) { int err; err = realloc_verifier_state(dst, src->allocated_stack, false); if (err) return err; memcpy(dst, src, offsetof(struct bpf_verifier_state, allocated_stack)); return copy_stack_state(dst, src); } static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx, int *insn_idx) { struct bpf_verifier_state *cur = env->cur_state; struct bpf_verifier_stack_elem *elem, *head = env->head; int err; if (env->head == NULL) return -ENOENT; if (cur) { err = copy_verifier_state(cur, &head->st); if (err) return err; } if (insn_idx) *insn_idx = head->insn_idx; if (prev_insn_idx) *prev_insn_idx = head->prev_insn_idx; elem = head->next; free_verifier_state(&head->st, false); kfree(head); env->head = elem; env->stack_size--; return 0; } static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) { struct bpf_verifier_state *cur = env->cur_state; struct bpf_verifier_stack_elem *elem; int err; elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); if (!elem) goto err; elem->insn_idx = insn_idx; elem->prev_insn_idx = prev_insn_idx; elem->next = env->head; env->head = elem; env->stack_size++; err = copy_verifier_state(&elem->st, cur); if (err) goto err; if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) { verbose(env, "BPF program is too complex\n"); goto err; } return &elem->st; err: /* pop all elements and return */ while (!pop_stack(env, NULL, NULL)); return NULL; } #define CALLER_SAVED_REGS 6 static const int caller_saved[CALLER_SAVED_REGS] = { BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 }; static void __mark_reg_not_init(struct bpf_reg_state *reg); /* Mark the unknown part of a register (variable offset or scalar value) as * known to have the value @imm. */ static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm) { reg->id = 0; reg->var_off = tnum_const(imm); reg->smin_value = (s64)imm; reg->smax_value = (s64)imm; reg->umin_value = imm; reg->umax_value = imm; } /* Mark the 'variable offset' part of a register as zero. This should be * used only on registers holding a pointer type. */ static void __mark_reg_known_zero(struct bpf_reg_state *reg) { __mark_reg_known(reg, 0); } static void mark_reg_known_zero(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno) { if (WARN_ON(regno >= MAX_BPF_REG)) { verbose(env, "mark_reg_known_zero(regs, %u)\n", regno); /* Something bad happened, let's kill all regs */ for (regno = 0; regno < MAX_BPF_REG; regno++) __mark_reg_not_init(regs + regno); return; } __mark_reg_known_zero(regs + regno); } static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg) { return type_is_pkt_pointer(reg->type); } static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg) { return reg_is_pkt_pointer(reg) || reg->type == PTR_TO_PACKET_END; } /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */ static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg, enum bpf_reg_type which) { /* The register can already have a range from prior markings. * This is fine as long as it hasn't been advanced from its * origin. */ return reg->type == which && reg->id == 0 && reg->off == 0 && tnum_equals_const(reg->var_off, 0); } /* Attempts to improve min/max values based on var_off information */ static void __update_reg_bounds(struct bpf_reg_state *reg) { /* min signed is max(sign bit) | min(other bits) */ reg->smin_value = max_t(s64, reg->smin_value, reg->var_off.value | (reg->var_off.mask & S64_MIN)); /* max signed is min(sign bit) | max(other bits) */ reg->smax_value = min_t(s64, reg->smax_value, reg->var_off.value | (reg->var_off.mask & S64_MAX)); reg->umin_value = max(reg->umin_value, reg->var_off.value); reg->umax_value = min(reg->umax_value, reg->var_off.value | reg->var_off.mask); } /* Uses signed min/max values to inform unsigned, and vice-versa */ static void __reg_deduce_bounds(struct bpf_reg_state *reg) { /* Learn sign from signed bounds. * If we cannot cross the sign boundary, then signed and unsigned bounds * are the same, so combine. This works even in the negative case, e.g. * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. */ if (reg->smin_value >= 0 || reg->smax_value < 0) { reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, reg->umin_value); reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, reg->umax_value); return; } /* Learn sign from unsigned bounds. Signed bounds cross the sign * boundary, so we must be careful. */ if ((s64)reg->umax_value >= 0) { /* Positive. We can't learn anything from the smin, but smax * is positive, hence safe. */ reg->smin_value = reg->umin_value; reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, reg->umax_value); } else if ((s64)reg->umin_value < 0) { /* Negative. We can't learn anything from the smax, but smin * is negative, hence safe. */ reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, reg->umin_value); reg->smax_value = reg->umax_value; } } /* Attempts to improve var_off based on unsigned min/max information */ static void __reg_bound_offset(struct bpf_reg_state *reg) { reg->var_off = tnum_intersect(reg->var_off, tnum_range(reg->umin_value, reg->umax_value)); } /* Reset the min/max bounds of a register */ static void __mark_reg_unbounded(struct bpf_reg_state *reg) { reg->smin_value = S64_MIN; reg->smax_value = S64_MAX; reg->umin_value = 0; reg->umax_value = U64_MAX; } /* Mark a register as having a completely unknown (scalar) value. */ static void __mark_reg_unknown(struct bpf_reg_state *reg) { reg->type = SCALAR_VALUE; reg->id = 0; reg->off = 0; reg->var_off = tnum_unknown; __mark_reg_unbounded(reg); } static void mark_reg_unknown(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno) { if (WARN_ON(regno >= MAX_BPF_REG)) { verbose(env, "mark_reg_unknown(regs, %u)\n", regno); /* Something bad happened, let's kill all regs */ for (regno = 0; regno < MAX_BPF_REG; regno++) __mark_reg_not_init(regs + regno); return; } __mark_reg_unknown(regs + regno); } static void __mark_reg_not_init(struct bpf_reg_state *reg) { __mark_reg_unknown(reg); reg->type = NOT_INIT; } static void mark_reg_not_init(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno) { if (WARN_ON(regno >= MAX_BPF_REG)) { verbose(env, "mark_reg_not_init(regs, %u)\n", regno); /* Something bad happened, let's kill all regs */ for (regno = 0; regno < MAX_BPF_REG; regno++) __mark_reg_not_init(regs + regno); return; } __mark_reg_not_init(regs + regno); } static void init_reg_state(struct bpf_verifier_env *env, struct bpf_reg_state *regs) { int i; for (i = 0; i < MAX_BPF_REG; i++) { mark_reg_not_init(env, regs, i); regs[i].live = REG_LIVE_NONE; } /* frame pointer */ regs[BPF_REG_FP].type = PTR_TO_STACK; mark_reg_known_zero(env, regs, BPF_REG_FP); /* 1st arg to a function */ regs[BPF_REG_1].type = PTR_TO_CTX; mark_reg_known_zero(env, regs, BPF_REG_1); } enum reg_arg_type { SRC_OP, /* register is used as source operand */ DST_OP, /* register is used as destination operand */ DST_OP_NO_MARK /* same as above, check only, don't mark */ }; static void mark_reg_read(const struct bpf_verifier_state *state, u32 regno) { struct bpf_verifier_state *parent = state->parent; if (regno == BPF_REG_FP) /* We don't need to worry about FP liveness because it's read-only */ return; while (parent) { /* if read wasn't screened by an earlier write ... */ if (state->regs[regno].live & REG_LIVE_WRITTEN) break; /* ... then we depend on parent's value */ parent->regs[regno].live |= REG_LIVE_READ; state = parent; parent = state->parent; } } static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, enum reg_arg_type t) { struct bpf_reg_state *regs = env->cur_state->regs; if (regno >= MAX_BPF_REG) { verbose(env, "R%d is invalid\n", regno); return -EINVAL; } if (t == SRC_OP) { /* check whether register used as source operand can be read */ if (regs[regno].type == NOT_INIT) { verbose(env, "R%d !read_ok\n", regno); return -EACCES; } mark_reg_read(env->cur_state, regno); } else { /* check whether register used as dest operand can be written to */ if (regno == BPF_REG_FP) { verbose(env, "frame pointer is read only\n"); return -EACCES; } regs[regno].live |= REG_LIVE_WRITTEN; if (t == DST_OP) mark_reg_unknown(env, regs, regno); } return 0; } static bool is_spillable_regtype(enum bpf_reg_type type) { switch (type) { case PTR_TO_MAP_VALUE: case PTR_TO_MAP_VALUE_OR_NULL: case PTR_TO_STACK: case PTR_TO_CTX: case PTR_TO_PACKET: case PTR_TO_PACKET_META: case PTR_TO_PACKET_END: case CONST_PTR_TO_MAP: return true; default: return false; } } /* check_stack_read/write functions track spill/fill of registers, * stack boundary and alignment are checked in check_mem_access() */ static int check_stack_write(struct bpf_verifier_env *env, struct bpf_verifier_state *state, int off, int size, int value_regno) { int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; err = realloc_verifier_state(state, round_up(slot + 1, BPF_REG_SIZE), true); if (err) return err; /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, * so it's aligned access and [off, off + size) are within stack limits */ if (!env->allow_ptr_leaks && state->stack[spi].slot_type[0] == STACK_SPILL && size != BPF_REG_SIZE) { verbose(env, "attempt to corrupt spilled pointer on stack\n"); return -EACCES; } if (value_regno >= 0 && is_spillable_regtype(state->regs[value_regno].type)) { /* register containing pointer is being spilled into stack */ if (size != BPF_REG_SIZE) { verbose(env, "invalid size of register spill\n"); return -EACCES; } /* save register state */ state->stack[spi].spilled_ptr = state->regs[value_regno]; state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; for (i = 0; i < BPF_REG_SIZE; i++) state->stack[spi].slot_type[i] = STACK_SPILL; } else { /* regular write of data into stack */ state->stack[spi].spilled_ptr = (struct bpf_reg_state) {}; for (i = 0; i < size; i++) state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = STACK_MISC; } return 0; } static void mark_stack_slot_read(const struct bpf_verifier_state *state, int slot) { struct bpf_verifier_state *parent = state->parent; while (parent) { /* if read wasn't screened by an earlier write ... */ if (state->stack[slot].spilled_ptr.live & REG_LIVE_WRITTEN) break; /* ... then we depend on parent's value */ parent->stack[slot].spilled_ptr.live |= REG_LIVE_READ; state = parent; parent = state->parent; } } static int check_stack_read(struct bpf_verifier_env *env, struct bpf_verifier_state *state, int off, int size, int value_regno) { int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; u8 *stype; if (state->allocated_stack <= slot) { verbose(env, "invalid read from stack off %d+0 size %d\n", off, size); return -EACCES; } stype = state->stack[spi].slot_type; if (stype[0] == STACK_SPILL) { if (size != BPF_REG_SIZE) { verbose(env, "invalid size of register spill\n"); return -EACCES; } for (i = 1; i < BPF_REG_SIZE; i++) { if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) { verbose(env, "corrupted spill memory\n"); return -EACCES; } } if (value_regno >= 0) { /* restore register state from stack */ state->regs[value_regno] = state->stack[spi].spilled_ptr; mark_stack_slot_read(state, spi); } return 0; } else { for (i = 0; i < size; i++) { if (stype[(slot - i) % BPF_REG_SIZE] != STACK_MISC) { verbose(env, "invalid read from stack off %d+%d size %d\n", off, i, size); return -EACCES; } } if (value_regno >= 0) /* have read misc data from the stack */ mark_reg_unknown(env, state->regs, value_regno); return 0; } } /* check read/write into map element returned by bpf_map_lookup_elem() */ static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_map *map = regs[regno].map_ptr; if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) || off + size > map->value_size) { verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n", map->value_size, off, size); return -EACCES; } return 0; } /* check read/write into a map element with possible variable offset */ static int check_map_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_verifier_state *state = env->cur_state; struct bpf_reg_state *reg = &state->regs[regno]; int err; /* We may have adjusted the register to this map value, so we * need to try adding each of min_value and max_value to off * to make sure our theoretical access will be safe. */ if (env->log.level) print_verifier_state(env, state); /* The minimum value is only important with signed * comparisons where we can't assume the floor of a * value is 0. If we are using signed variables for our * index'es we need to make sure that whatever we use * will have a set floor within our range. */ if (reg->smin_value < 0) { verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", regno); return -EACCES; } err = __check_map_access(env, regno, reg->smin_value + off, size, zero_size_allowed); if (err) { verbose(env, "R%d min value is outside of the array range\n", regno); return err; } /* If we haven't set a max value then we need to bail since we can't be * sure we won't do bad things. * If reg->umax_value + off could overflow, treat that as unbounded too. */ if (reg->umax_value >= BPF_MAX_VAR_OFF) { verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n", regno); return -EACCES; } err = __check_map_access(env, regno, reg->umax_value + off, size, zero_size_allowed); if (err) verbose(env, "R%d max value is outside of the array range\n", regno); return err; } #define MAX_PACKET_OFF 0xffff static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, const struct bpf_call_arg_meta *meta, enum bpf_access_type t) { switch (env->prog->type) { case BPF_PROG_TYPE_LWT_IN: case BPF_PROG_TYPE_LWT_OUT: /* dst_input() and dst_output() can't write for now */ if (t == BPF_WRITE) return false; /* fallthrough */ case BPF_PROG_TYPE_SCHED_CLS: case BPF_PROG_TYPE_SCHED_ACT: case BPF_PROG_TYPE_XDP: case BPF_PROG_TYPE_LWT_XMIT: case BPF_PROG_TYPE_SK_SKB: if (meta) return meta->pkt_access; env->seen_direct_write = true; return true; default: return false; } } static int __check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = &regs[regno]; if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) || (u64)off + size > reg->range) { verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", off, size, regno, reg->id, reg->off, reg->range); return -EACCES; } return 0; } static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = &regs[regno]; int err; /* We may have added a variable offset to the packet pointer; but any * reg->range we have comes after that. We are only checking the fixed * offset. */ /* We don't allow negative numbers, because we aren't tracking enough * detail to prove they're safe. */ if (reg->smin_value < 0) { verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", regno); return -EACCES; } err = __check_packet_access(env, regno, off, size, zero_size_allowed); if (err) { verbose(env, "R%d offset is outside of the packet\n", regno); return err; } return err; } /* check access to 'struct bpf_context' fields. Supports fixed offsets only */ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, enum bpf_access_type t, enum bpf_reg_type *reg_type) { struct bpf_insn_access_aux info = { .reg_type = *reg_type, }; if (env->ops->is_valid_access && env->ops->is_valid_access(off, size, t, &info)) { /* A non zero info.ctx_field_size indicates that this field is a * candidate for later verifier transformation to load the whole * field and then apply a mask when accessed with a narrower * access than actual ctx access size. A zero info.ctx_field_size * will only allow for whole field access and rejects any other * type of narrower access. */ *reg_type = info.reg_type; env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; /* remember the offset of last byte accessed in ctx */ if (env->prog->aux->max_ctx_offset < off + size) env->prog->aux->max_ctx_offset = off + size; return 0; } verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size); return -EACCES; } static bool __is_pointer_value(bool allow_ptr_leaks, const struct bpf_reg_state *reg) { if (allow_ptr_leaks) return false; return reg->type != SCALAR_VALUE; } static bool is_pointer_value(struct bpf_verifier_env *env, int regno) { return __is_pointer_value(env->allow_ptr_leaks, cur_regs(env) + regno); } static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int off, int size, bool strict) { struct tnum reg_off; int ip_align; /* Byte size accesses are always allowed. */ if (!strict || size == 1) return 0; /* For platforms that do not have a Kconfig enabling * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of * NET_IP_ALIGN is universally set to '2'. And on platforms * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get * to this code only in strict mode where we want to emulate * the NET_IP_ALIGN==2 checking. Therefore use an * unconditional IP align value of '2'. */ ip_align = 2; reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off)); if (!tnum_is_aligned(reg_off, size)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "misaligned packet access off %d+%s+%d+%d size %d\n", ip_align, tn_buf, reg->off, off, size); return -EACCES; } return 0; } static int check_generic_ptr_alignment(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, const char *pointer_desc, int off, int size, bool strict) { struct tnum reg_off; /* Byte size accesses are always allowed. */ if (!strict || size == 1) return 0; reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off)); if (!tnum_is_aligned(reg_off, size)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "misaligned %saccess off %s+%d+%d size %d\n", pointer_desc, tn_buf, reg->off, off, size); return -EACCES; } return 0; } static int check_ptr_alignment(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int off, int size) { bool strict = env->strict_alignment; const char *pointer_desc = ""; switch (reg->type) { case PTR_TO_PACKET: case PTR_TO_PACKET_META: /* Special case, because of NET_IP_ALIGN. Given metadata sits * right in front, treat it the very same way. */ return check_pkt_ptr_alignment(env, reg, off, size, strict); case PTR_TO_MAP_VALUE: pointer_desc = "value "; break; case PTR_TO_CTX: pointer_desc = "context "; break; case PTR_TO_STACK: pointer_desc = "stack "; /* The stack spill tracking logic in check_stack_write() * and check_stack_read() relies on stack accesses being * aligned. */ strict = true; break; default: break; } return check_generic_ptr_alignment(env, reg, pointer_desc, off, size, strict); } /* truncate register to smaller size (in bytes) * must be called with size < BPF_REG_SIZE */ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size) { u64 mask; /* clear high bits in bit representation */ reg->var_off = tnum_cast(reg->var_off, size); /* fix arithmetic bounds */ mask = ((u64)1 << (size * 8)) - 1; if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) { reg->umin_value &= mask; reg->umax_value &= mask; } else { reg->umin_value = 0; reg->umax_value = mask; } reg->smin_value = reg->umin_value; reg->smax_value = reg->umax_value; } /* check whether memory at (regno + off) is accessible for t = (read | write) * if t==write, value_regno is a register which value is stored into memory * if t==read, value_regno is a register which will receive the value from memory * if t==write && value_regno==-1, some unknown value is stored into memory * if t==read && value_regno==-1, don't care what we read from memory */ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off, int bpf_size, enum bpf_access_type t, int value_regno) { struct bpf_verifier_state *state = env->cur_state; struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = regs + regno; int size, err = 0; size = bpf_size_to_bytes(bpf_size); if (size < 0) return size; /* alignment checks will add in reg->off themselves */ err = check_ptr_alignment(env, reg, off, size); if (err) return err; /* for access checks, reg->off is just part of off */ off += reg->off; if (reg->type == PTR_TO_MAP_VALUE) { if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose(env, "R%d leaks addr into map\n", value_regno); return -EACCES; } err = check_map_access(env, regno, off, size, false); if (!err && t == BPF_READ && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); } else if (reg->type == PTR_TO_CTX) { enum bpf_reg_type reg_type = SCALAR_VALUE; if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose(env, "R%d leaks addr into ctx\n", value_regno); return -EACCES; } /* ctx accesses must be at a fixed offset, so that we can * determine what type of data were returned. */ if (reg->off) { verbose(env, "dereference of modified ctx ptr R%d off=%d+%d, ctx+const is allowed, ctx+const+const is not\n", regno, reg->off, off - reg->off); return -EACCES; } if (!tnum_is_const(reg->var_off) || reg->var_off.value) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "variable ctx access var_off=%s off=%d size=%d", tn_buf, off, size); return -EACCES; } err = check_ctx_access(env, insn_idx, off, size, t, &reg_type); if (!err && t == BPF_READ && value_regno >= 0) { /* ctx access returns either a scalar, or a * PTR_TO_PACKET[_META,_END]. In the latter * case, we know the offset is zero. */ if (reg_type == SCALAR_VALUE) mark_reg_unknown(env, regs, value_regno); else mark_reg_known_zero(env, regs, value_regno); regs[value_regno].id = 0; regs[value_regno].off = 0; regs[value_regno].range = 0; regs[value_regno].type = reg_type; } } else if (reg->type == PTR_TO_STACK) { /* stack accesses must be at a fixed offset, so that we can * determine what type of data were returned. * See check_stack_read(). */ if (!tnum_is_const(reg->var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "variable stack access var_off=%s off=%d size=%d", tn_buf, off, size); return -EACCES; } off += reg->var_off.value; if (off >= 0 || off < -MAX_BPF_STACK) { verbose(env, "invalid stack off=%d size=%d\n", off, size); return -EACCES; } if (env->prog->aux->stack_depth < -off) env->prog->aux->stack_depth = -off; if (t == BPF_WRITE) err = check_stack_write(env, state, off, size, value_regno); else err = check_stack_read(env, state, off, size, value_regno); } else if (reg_is_pkt_pointer(reg)) { if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { verbose(env, "cannot write into packet\n"); return -EACCES; } if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose(env, "R%d leaks addr into packet\n", value_regno); return -EACCES; } err = check_packet_access(env, regno, off, size, false); if (!err && t == BPF_READ && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); } else { verbose(env, "R%d invalid mem access '%s'\n", regno, reg_type_str[reg->type]); return -EACCES; } if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && regs[value_regno].type == SCALAR_VALUE) { /* b/h/w load zero-extends, mark upper bits as known 0 */ coerce_reg_to_size(&regs[value_regno], size); } return err; } static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn) { int err; if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) || insn->imm != 0) { verbose(env, "BPF_XADD uses reserved fields\n"); return -EINVAL; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if (is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d leaks addr into mem\n", insn->src_reg); return -EACCES; } /* check whether atomic_add can read the memory */ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_READ, -1); if (err) return err; /* check whether atomic_add can write into the same memory */ return check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, -1); } /* Does this register contain a constant zero? */ static bool register_is_null(struct bpf_reg_state reg) { return reg.type == SCALAR_VALUE && tnum_equals_const(reg.var_off, 0); } /* when register 'regno' is passed into function that will read 'access_size' * bytes from that pointer, make sure that it's within stack boundary * and all elements of stack are initialized. * Unlike most pointer bounds-checking functions, this one doesn't take an * 'off' argument, so it has to add in reg->off itself. */ static int check_stack_boundary(struct bpf_verifier_env *env, int regno, int access_size, bool zero_size_allowed, struct bpf_call_arg_meta *meta) { struct bpf_verifier_state *state = env->cur_state; struct bpf_reg_state *regs = state->regs; int off, i, slot, spi; if (regs[regno].type != PTR_TO_STACK) { /* Allow zero-byte read from NULL, regardless of pointer type */ if (zero_size_allowed && access_size == 0 && register_is_null(regs[regno])) return 0; verbose(env, "R%d type=%s expected=%s\n", regno, reg_type_str[regs[regno].type], reg_type_str[PTR_TO_STACK]); return -EACCES; } /* Only allow fixed-offset stack reads */ if (!tnum_is_const(regs[regno].var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off); verbose(env, "invalid variable stack read R%d var_off=%s\n", regno, tn_buf); return -EACCES; } off = regs[regno].off + regs[regno].var_off.value; if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || access_size < 0 || (access_size == 0 && !zero_size_allowed)) { verbose(env, "invalid stack type R%d off=%d access_size=%d\n", regno, off, access_size); return -EACCES; } if (env->prog->aux->stack_depth < -off) env->prog->aux->stack_depth = -off; if (meta && meta->raw_mode) { meta->access_size = access_size; meta->regno = regno; return 0; } for (i = 0; i < access_size; i++) { slot = -(off + i) - 1; spi = slot / BPF_REG_SIZE; if (state->allocated_stack <= slot || state->stack[spi].slot_type[slot % BPF_REG_SIZE] != STACK_MISC) { verbose(env, "invalid indirect read from stack off %d+%d size %d\n", off, i, access_size); return -EACCES; } } return 0; } static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, int access_size, bool zero_size_allowed, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; switch (reg->type) { case PTR_TO_PACKET: case PTR_TO_PACKET_META: return check_packet_access(env, regno, reg->off, access_size, zero_size_allowed); case PTR_TO_MAP_VALUE: return check_map_access(env, regno, reg->off, access_size, zero_size_allowed); default: /* scalar_value|ptr_to_stack or invalid ptr */ return check_stack_boundary(env, regno, access_size, zero_size_allowed, meta); } } static int check_func_arg(struct bpf_verifier_env *env, u32 regno, enum bpf_arg_type arg_type, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; enum bpf_reg_type expected_type, type = reg->type; int err = 0; if (arg_type == ARG_DONTCARE) return 0; err = check_reg_arg(env, regno, SRC_OP); if (err) return err; if (arg_type == ARG_ANYTHING) { if (is_pointer_value(env, regno)) { verbose(env, "R%d leaks addr into helper function\n", regno); return -EACCES; } return 0; } if (type_is_pkt_pointer(type) && !may_access_direct_pkt_data(env, meta, BPF_READ)) { verbose(env, "helper access to the packet is not allowed\n"); return -EACCES; } if (arg_type == ARG_PTR_TO_MAP_KEY || arg_type == ARG_PTR_TO_MAP_VALUE) { expected_type = PTR_TO_STACK; if (!type_is_pkt_pointer(type) && type != expected_type) goto err_type; } else if (arg_type == ARG_CONST_SIZE || arg_type == ARG_CONST_SIZE_OR_ZERO) { expected_type = SCALAR_VALUE; if (type != expected_type) goto err_type; } else if (arg_type == ARG_CONST_MAP_PTR) { expected_type = CONST_PTR_TO_MAP; if (type != expected_type) goto err_type; } else if (arg_type == ARG_PTR_TO_CTX) { expected_type = PTR_TO_CTX; if (type != expected_type) goto err_type; } else if (arg_type == ARG_PTR_TO_MEM || arg_type == ARG_PTR_TO_MEM_OR_NULL || arg_type == ARG_PTR_TO_UNINIT_MEM) { expected_type = PTR_TO_STACK; /* One exception here. In case function allows for NULL to be * passed in as argument, it's a SCALAR_VALUE type. Final test * happens during stack boundary checking. */ if (register_is_null(*reg) && arg_type == ARG_PTR_TO_MEM_OR_NULL) /* final test in check_stack_boundary() */; else if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE && type != expected_type) goto err_type; meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM; } else { verbose(env, "unsupported arg_type %d\n", arg_type); return -EFAULT; } if (arg_type == ARG_CONST_MAP_PTR) { /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ meta->map_ptr = reg->map_ptr; } else if (arg_type == ARG_PTR_TO_MAP_KEY) { /* bpf_map_xxx(..., map_ptr, ..., key) call: * check that [key, key + map->key_size) are within * stack limits and initialized */ if (!meta->map_ptr) { /* in function declaration map_ptr must come before * map_key, so that it's verified and known before * we have to check map_key here. Otherwise it means * that kernel subsystem misconfigured verifier */ verbose(env, "invalid map_ptr to access map->key\n"); return -EACCES; } if (type_is_pkt_pointer(type)) err = check_packet_access(env, regno, reg->off, meta->map_ptr->key_size, false); else err = check_stack_boundary(env, regno, meta->map_ptr->key_size, false, NULL); } else if (arg_type == ARG_PTR_TO_MAP_VALUE) { /* bpf_map_xxx(..., map_ptr, ..., value) call: * check [value, value + map->value_size) validity */ if (!meta->map_ptr) { /* kernel subsystem misconfigured verifier */ verbose(env, "invalid map_ptr to access map->value\n"); return -EACCES; } if (type_is_pkt_pointer(type)) err = check_packet_access(env, regno, reg->off, meta->map_ptr->value_size, false); else err = check_stack_boundary(env, regno, meta->map_ptr->value_size, false, NULL); } else if (arg_type == ARG_CONST_SIZE || arg_type == ARG_CONST_SIZE_OR_ZERO) { bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO); /* bpf_xxx(..., buf, len) call will access 'len' bytes * from stack pointer 'buf'. Check it * note: regno == len, regno - 1 == buf */ if (regno == 0) { /* kernel subsystem misconfigured verifier */ verbose(env, "ARG_CONST_SIZE cannot be first argument\n"); return -EACCES; } /* The register is SCALAR_VALUE; the access check * happens using its boundaries. */ if (!tnum_is_const(reg->var_off)) /* For unprivileged variable accesses, disable raw * mode so that the program is required to * initialize all the memory that the helper could * just partially fill up. */ meta = NULL; if (reg->smin_value < 0) { verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n", regno); return -EACCES; } if (reg->umin_value == 0) { err = check_helper_mem_access(env, regno - 1, 0, zero_size_allowed, meta); if (err) return err; } if (reg->umax_value >= BPF_MAX_VAR_SIZ) { verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", regno); return -EACCES; } err = check_helper_mem_access(env, regno - 1, reg->umax_value, zero_size_allowed, meta); } return err; err_type: verbose(env, "R%d type=%s expected=%s\n", regno, reg_type_str[type], reg_type_str[expected_type]); return -EACCES; } static int check_map_func_compatibility(struct bpf_verifier_env *env, struct bpf_map *map, int func_id) { if (!map) return 0; /* We need a two way check, first is from map perspective ... */ switch (map->map_type) { case BPF_MAP_TYPE_PROG_ARRAY: if (func_id != BPF_FUNC_tail_call) goto error; break; case BPF_MAP_TYPE_PERF_EVENT_ARRAY: if (func_id != BPF_FUNC_perf_event_read && func_id != BPF_FUNC_perf_event_output && func_id != BPF_FUNC_perf_event_read_value) goto error; break; case BPF_MAP_TYPE_STACK_TRACE: if (func_id != BPF_FUNC_get_stackid) goto error; break; case BPF_MAP_TYPE_CGROUP_ARRAY: if (func_id != BPF_FUNC_skb_under_cgroup && func_id != BPF_FUNC_current_task_under_cgroup) goto error; break; /* devmap returns a pointer to a live net_device ifindex that we cannot * allow to be modified from bpf side. So do not allow lookup elements * for now. */ case BPF_MAP_TYPE_DEVMAP: if (func_id != BPF_FUNC_redirect_map) goto error; break; /* Restrict bpf side of cpumap, open when use-cases appear */ case BPF_MAP_TYPE_CPUMAP: if (func_id != BPF_FUNC_redirect_map) goto error; break; case BPF_MAP_TYPE_ARRAY_OF_MAPS: case BPF_MAP_TYPE_HASH_OF_MAPS: if (func_id != BPF_FUNC_map_lookup_elem) goto error; break; case BPF_MAP_TYPE_SOCKMAP: if (func_id != BPF_FUNC_sk_redirect_map && func_id != BPF_FUNC_sock_map_update && func_id != BPF_FUNC_map_delete_elem) goto error; break; default: break; } /* ... and second from the function itself. */ switch (func_id) { case BPF_FUNC_tail_call: if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) goto error; break; case BPF_FUNC_perf_event_read: case BPF_FUNC_perf_event_output: case BPF_FUNC_perf_event_read_value: if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) goto error; break; case BPF_FUNC_get_stackid: if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) goto error; break; case BPF_FUNC_current_task_under_cgroup: case BPF_FUNC_skb_under_cgroup: if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) goto error; break; case BPF_FUNC_redirect_map: if (map->map_type != BPF_MAP_TYPE_DEVMAP && map->map_type != BPF_MAP_TYPE_CPUMAP) goto error; break; case BPF_FUNC_sk_redirect_map: if (map->map_type != BPF_MAP_TYPE_SOCKMAP) goto error; break; case BPF_FUNC_sock_map_update: if (map->map_type != BPF_MAP_TYPE_SOCKMAP) goto error; break; default: break; } return 0; error: verbose(env, "cannot pass map_type %d into func %s#%d\n", map->map_type, func_id_name(func_id), func_id); return -EINVAL; } static int check_raw_mode(const struct bpf_func_proto *fn) { int count = 0; if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM) count++; return count > 1 ? -EINVAL : 0; } /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] * are now invalid, so turn them into unknown SCALAR_VALUE. */ static void clear_all_pkt_pointers(struct bpf_verifier_env *env) { struct bpf_verifier_state *state = env->cur_state; struct bpf_reg_state *regs = state->regs, *reg; int i; for (i = 0; i < MAX_BPF_REG; i++) if (reg_is_pkt_pointer_any(&regs[i])) mark_reg_unknown(env, regs, i); for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { if (state->stack[i].slot_type[0] != STACK_SPILL) continue; reg = &state->stack[i].spilled_ptr; if (reg_is_pkt_pointer_any(reg)) __mark_reg_unknown(reg); } } static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx) { const struct bpf_func_proto *fn = NULL; struct bpf_reg_state *regs; struct bpf_call_arg_meta meta; bool changes_data; int i, err; /* find function prototype */ if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { verbose(env, "invalid func %s#%d\n", func_id_name(func_id), func_id); return -EINVAL; } if (env->ops->get_func_proto) fn = env->ops->get_func_proto(func_id); if (!fn) { verbose(env, "unknown func %s#%d\n", func_id_name(func_id), func_id); return -EINVAL; } /* eBPF programs must be GPL compatible to use GPL-ed functions */ if (!env->prog->gpl_compatible && fn->gpl_only) { verbose(env, "cannot call GPL only function from proprietary program\n"); return -EINVAL; } /* With LD_ABS/IND some JITs save/restore skb from r1. */ changes_data = bpf_helper_changes_pkt_data(fn->func); if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) { verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n", func_id_name(func_id), func_id); return -EINVAL; } memset(&meta, 0, sizeof(meta)); meta.pkt_access = fn->pkt_access; /* We only support one arg being in raw mode at the moment, which * is sufficient for the helper functions we have right now. */ err = check_raw_mode(fn); if (err) { verbose(env, "kernel subsystem misconfigured func %s#%d\n", func_id_name(func_id), func_id); return err; } /* check args */ err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta); if (err) return err; err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); if (err) return err; err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); if (err) return err; err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta); if (err) return err; err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta); if (err) return err; /* Mark slots with STACK_MISC in case of raw mode, stack offset * is inferred from register state. */ for (i = 0; i < meta.access_size; i++) { err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1); if (err) return err; } regs = cur_regs(env); /* reset caller saved regs */ for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(env, regs, caller_saved[i]); check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); } /* update return register (already marked as written above) */ if (fn->ret_type == RET_INTEGER) { /* sets type to SCALAR_VALUE */ mark_reg_unknown(env, regs, BPF_REG_0); } else if (fn->ret_type == RET_VOID) { regs[BPF_REG_0].type = NOT_INIT; } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) { struct bpf_insn_aux_data *insn_aux; regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; /* There is no offset yet applied, variable or fixed */ mark_reg_known_zero(env, regs, BPF_REG_0); regs[BPF_REG_0].off = 0; /* remember map_ptr, so that check_map_access() * can check 'value_size' boundary of memory access * to map element returned from bpf_map_lookup_elem() */ if (meta.map_ptr == NULL) { verbose(env, "kernel subsystem misconfigured verifier\n"); return -EINVAL; } regs[BPF_REG_0].map_ptr = meta.map_ptr; regs[BPF_REG_0].id = ++env->id_gen; insn_aux = &env->insn_aux_data[insn_idx]; if (!insn_aux->map_ptr) insn_aux->map_ptr = meta.map_ptr; else if (insn_aux->map_ptr != meta.map_ptr) insn_aux->map_ptr = BPF_MAP_PTR_POISON; } else { verbose(env, "unknown return type %d of func %s#%d\n", fn->ret_type, func_id_name(func_id), func_id); return -EINVAL; } err = check_map_func_compatibility(env, meta.map_ptr, func_id); if (err) return err; if (changes_data) clear_all_pkt_pointers(env); return 0; } static bool signed_add_overflows(s64 a, s64 b) { /* Do the add in u64, where overflow is well-defined */ s64 res = (s64)((u64)a + (u64)b); if (b < 0) return res > a; return res < a; } static bool signed_sub_overflows(s64 a, s64 b) { /* Do the sub in u64, where overflow is well-defined */ s64 res = (s64)((u64)a - (u64)b); if (b < 0) return res < a; return res > a; } /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. * Caller should also handle BPF_MOV case separately. * If we return -EACCES, caller may want to try again treating pointer as a * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks. */ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn, const struct bpf_reg_state *ptr_reg, const struct bpf_reg_state *off_reg) { struct bpf_reg_state *regs = cur_regs(env), *dst_reg; bool known = tnum_is_const(off_reg->var_off); s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value, smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; u8 opcode = BPF_OP(insn->code); u32 dst = insn->dst_reg; dst_reg = &regs[dst]; if (WARN_ON_ONCE(known && (smin_val != smax_val))) { print_verifier_state(env, env->cur_state); verbose(env, "verifier internal error: known but bad sbounds\n"); return -EINVAL; } if (WARN_ON_ONCE(known && (umin_val != umax_val))) { print_verifier_state(env, env->cur_state); verbose(env, "verifier internal error: known but bad ubounds\n"); return -EINVAL; } if (BPF_CLASS(insn->code) != BPF_ALU64) { /* 32-bit ALU ops on pointers produce (meaningless) scalars */ if (!env->allow_ptr_leaks) verbose(env, "R%d 32-bit pointer arithmetic prohibited\n", dst); return -EACCES; } if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { if (!env->allow_ptr_leaks) verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n", dst); return -EACCES; } if (ptr_reg->type == CONST_PTR_TO_MAP) { if (!env->allow_ptr_leaks) verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n", dst); return -EACCES; } if (ptr_reg->type == PTR_TO_PACKET_END) { if (!env->allow_ptr_leaks) verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n", dst); return -EACCES; } /* In case of 'scalar += pointer', dst_reg inherits pointer type and id. * The id may be overwritten later if we create a new variable offset. */ dst_reg->type = ptr_reg->type; dst_reg->id = ptr_reg->id; switch (opcode) { case BPF_ADD: /* We can take a fixed offset as long as it doesn't overflow * the s32 'off' field */ if (known && (ptr_reg->off + smin_val == (s64)(s32)(ptr_reg->off + smin_val))) { /* pointer += K. Accumulate it into fixed offset */ dst_reg->smin_value = smin_ptr; dst_reg->smax_value = smax_ptr; dst_reg->umin_value = umin_ptr; dst_reg->umax_value = umax_ptr; dst_reg->var_off = ptr_reg->var_off; dst_reg->off = ptr_reg->off + smin_val; dst_reg->range = ptr_reg->range; break; } /* A new variable offset is created. Note that off_reg->off * == 0, since it's a scalar. * dst_reg gets the pointer type and since some positive * integer value was added to the pointer, give it a new 'id' * if it's a PTR_TO_PACKET. * this creates a new 'base' pointer, off_reg (variable) gets * added into the variable offset, and we copy the fixed offset * from ptr_reg. */ if (signed_add_overflows(smin_ptr, smin_val) || signed_add_overflows(smax_ptr, smax_val)) { dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = smin_ptr + smin_val; dst_reg->smax_value = smax_ptr + smax_val; } if (umin_ptr + umin_val < umin_ptr || umax_ptr + umax_val < umax_ptr) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value = umin_ptr + umin_val; dst_reg->umax_value = umax_ptr + umax_val; } dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; if (reg_is_pkt_pointer(ptr_reg)) { dst_reg->id = ++env->id_gen; /* something was added to pkt_ptr, set range to zero */ dst_reg->range = 0; } break; case BPF_SUB: if (dst_reg == off_reg) { /* scalar -= pointer. Creates an unknown scalar */ if (!env->allow_ptr_leaks) verbose(env, "R%d tried to subtract pointer from scalar\n", dst); return -EACCES; } /* We don't allow subtraction from FP, because (according to * test_verifier.c test "invalid fp arithmetic", JITs might not * be able to deal with it. */ if (ptr_reg->type == PTR_TO_STACK) { if (!env->allow_ptr_leaks) verbose(env, "R%d subtraction from stack pointer prohibited\n", dst); return -EACCES; } if (known && (ptr_reg->off - smin_val == (s64)(s32)(ptr_reg->off - smin_val))) { /* pointer -= K. Subtract it from fixed offset */ dst_reg->smin_value = smin_ptr; dst_reg->smax_value = smax_ptr; dst_reg->umin_value = umin_ptr; dst_reg->umax_value = umax_ptr; dst_reg->var_off = ptr_reg->var_off; dst_reg->id = ptr_reg->id; dst_reg->off = ptr_reg->off - smin_val; dst_reg->range = ptr_reg->range; break; } /* A new variable offset is created. If the subtrahend is known * nonnegative, then any reg->range we had before is still good. */ if (signed_sub_overflows(smin_ptr, smax_val) || signed_sub_overflows(smax_ptr, smin_val)) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = smin_ptr - smax_val; dst_reg->smax_value = smax_ptr - smin_val; } if (umin_ptr < umax_val) { /* Overflow possible, we know nothing */ dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { /* Cannot overflow (as long as bounds are consistent) */ dst_reg->umin_value = umin_ptr - umax_val; dst_reg->umax_value = umax_ptr - umin_val; } dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; if (reg_is_pkt_pointer(ptr_reg)) { dst_reg->id = ++env->id_gen; /* something was added to pkt_ptr, set range to zero */ if (smin_val < 0) dst_reg->range = 0; } break; case BPF_AND: case BPF_OR: case BPF_XOR: /* bitwise ops on pointers are troublesome, prohibit for now. * (However, in principle we could allow some cases, e.g. * ptr &= ~3 which would reduce min_value by 3.) */ if (!env->allow_ptr_leaks) verbose(env, "R%d bitwise operator %s on pointer prohibited\n", dst, bpf_alu_string[opcode >> 4]); return -EACCES; default: /* other operators (e.g. MUL,LSH) produce non-pointer results */ if (!env->allow_ptr_leaks) verbose(env, "R%d pointer arithmetic with %s operator prohibited\n", dst, bpf_alu_string[opcode >> 4]); return -EACCES; } __update_reg_bounds(dst_reg); __reg_deduce_bounds(dst_reg); __reg_bound_offset(dst_reg); return 0; } /* WARNING: This function does calculations on 64-bit values, but the actual * execution may occur on 32-bit values. Therefore, things like bitshifts * need extra checks in the 32-bit case. */ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn, struct bpf_reg_state *dst_reg, struct bpf_reg_state src_reg) { struct bpf_reg_state *regs = cur_regs(env); u8 opcode = BPF_OP(insn->code); bool src_known, dst_known; s64 smin_val, smax_val; u64 umin_val, umax_val; u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; smin_val = src_reg.smin_value; smax_val = src_reg.smax_value; umin_val = src_reg.umin_value; umax_val = src_reg.umax_value; src_known = tnum_is_const(src_reg.var_off); dst_known = tnum_is_const(dst_reg->var_off); switch (opcode) { case BPF_ADD: if (signed_add_overflows(dst_reg->smin_value, smin_val) || signed_add_overflows(dst_reg->smax_value, smax_val)) { dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value += smin_val; dst_reg->smax_value += smax_val; } if (dst_reg->umin_value + umin_val < umin_val || dst_reg->umax_value + umax_val < umax_val) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value += umin_val; dst_reg->umax_value += umax_val; } dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); break; case BPF_SUB: if (signed_sub_overflows(dst_reg->smin_value, smax_val) || signed_sub_overflows(dst_reg->smax_value, smin_val)) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value -= smax_val; dst_reg->smax_value -= smin_val; } if (dst_reg->umin_value < umax_val) { /* Overflow possible, we know nothing */ dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { /* Cannot overflow (as long as bounds are consistent) */ dst_reg->umin_value -= umax_val; dst_reg->umax_value -= umin_val; } dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); break; case BPF_MUL: dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); if (smin_val < 0 || dst_reg->smin_value < 0) { /* Ain't nobody got time to multiply that sign */ __mark_reg_unbounded(dst_reg); __update_reg_bounds(dst_reg); break; } /* Both values are positive, so we can work with unsigned and * copy the result to signed (unless it exceeds S64_MAX). */ if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) { /* Potential overflow, we know nothing */ __mark_reg_unbounded(dst_reg); /* (except what we can learn from the var_off) */ __update_reg_bounds(dst_reg); break; } dst_reg->umin_value *= umin_val; dst_reg->umax_value *= umax_val; if (dst_reg->umax_value > S64_MAX) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } break; case BPF_AND: if (src_known && dst_known) { __mark_reg_known(dst_reg, dst_reg->var_off.value & src_reg.var_off.value); break; } /* We get our minimum from the var_off, since that's inherently * bitwise. Our maximum is the minimum of the operands' maxima. */ dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); dst_reg->umin_value = dst_reg->var_off.value; dst_reg->umax_value = min(dst_reg->umax_value, umax_val); if (dst_reg->smin_value < 0 || smin_val < 0) { /* Lose signed bounds when ANDing negative numbers, * ain't nobody got time for that. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { /* ANDing two positives gives a positive, so safe to * cast result into s64. */ dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_OR: if (src_known && dst_known) { __mark_reg_known(dst_reg, dst_reg->var_off.value | src_reg.var_off.value); break; } /* We get our maximum from the var_off, and our minimum is the * maximum of the operands' minima */ dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); dst_reg->umin_value = max(dst_reg->umin_value, umin_val); dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; if (dst_reg->smin_value < 0 || smin_val < 0) { /* Lose signed bounds when ORing negative numbers, * ain't nobody got time for that. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { /* ORing two positives gives a positive, so safe to * cast result into s64. */ dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_LSH: if (umax_val >= insn_bitness) { /* Shifts greater than 31 or 63 are undefined. * This includes shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } /* We lose all sign bit information (except what we can pick * up from var_off) */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; /* If we might shift our top bit out, then we know nothing */ if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value <<= umin_val; dst_reg->umax_value <<= umax_val; } if (src_known) dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); else dst_reg->var_off = tnum_lshift(tnum_unknown, umin_val); /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_RSH: if (umax_val >= insn_bitness) { /* Shifts greater than 31 or 63 are undefined. * This includes shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } /* BPF_RSH is an unsigned shift. If the value in dst_reg might * be negative, then either: * 1) src_reg might be zero, so the sign bit of the result is * unknown, so we lose our signed bounds * 2) it's known negative, thus the unsigned bounds capture the * signed bounds * 3) the signed bounds cross zero, so they tell us nothing * about the result * If the value in dst_reg is known nonnegative, then again the * unsigned bounts capture the signed bounds. * Thus, in all cases it suffices to blow away our signed bounds * and rely on inferring new ones from the unsigned bounds and * var_off of the result. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; if (src_known) dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val); else dst_reg->var_off = tnum_rshift(tnum_unknown, umin_val); dst_reg->umin_value >>= umax_val; dst_reg->umax_value >>= umin_val; /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; default: mark_reg_unknown(env, regs, insn->dst_reg); break; } if (BPF_CLASS(insn->code) != BPF_ALU64) { /* 32-bit ALU ops are (32,32)->32 */ coerce_reg_to_size(dst_reg, 4); coerce_reg_to_size(&src_reg, 4); } __reg_deduce_bounds(dst_reg); __reg_bound_offset(dst_reg); return 0; } /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max * and var_off. */ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env), *dst_reg, *src_reg; struct bpf_reg_state *ptr_reg = NULL, off_reg = {0}; u8 opcode = BPF_OP(insn->code); int rc; dst_reg = &regs[insn->dst_reg]; src_reg = NULL; if (dst_reg->type != SCALAR_VALUE) ptr_reg = dst_reg; if (BPF_SRC(insn->code) == BPF_X) { src_reg = &regs[insn->src_reg]; if (src_reg->type != SCALAR_VALUE) { if (dst_reg->type != SCALAR_VALUE) { /* Combining two pointers by any ALU op yields * an arbitrary scalar. */ if (!env->allow_ptr_leaks) { verbose(env, "R%d pointer %s pointer prohibited\n", insn->dst_reg, bpf_alu_string[opcode >> 4]); return -EACCES; } mark_reg_unknown(env, regs, insn->dst_reg); return 0; } else { /* scalar += pointer * This is legal, but we have to reverse our * src/dest handling in computing the range */ rc = adjust_ptr_min_max_vals(env, insn, src_reg, dst_reg); if (rc == -EACCES && env->allow_ptr_leaks) { /* scalar += unknown scalar */ __mark_reg_unknown(&off_reg); return adjust_scalar_min_max_vals( env, insn, dst_reg, off_reg); } return rc; } } else if (ptr_reg) { /* pointer += scalar */ rc = adjust_ptr_min_max_vals(env, insn, dst_reg, src_reg); if (rc == -EACCES && env->allow_ptr_leaks) { /* unknown scalar += scalar */ __mark_reg_unknown(dst_reg); return adjust_scalar_min_max_vals( env, insn, dst_reg, *src_reg); } return rc; } } else { /* Pretend the src is a reg with a known value, since we only * need to be able to read from this state. */ off_reg.type = SCALAR_VALUE; __mark_reg_known(&off_reg, insn->imm); src_reg = &off_reg; if (ptr_reg) { /* pointer += K */ rc = adjust_ptr_min_max_vals(env, insn, ptr_reg, src_reg); if (rc == -EACCES && env->allow_ptr_leaks) { /* unknown scalar += K */ __mark_reg_unknown(dst_reg); return adjust_scalar_min_max_vals( env, insn, dst_reg, off_reg); } return rc; } } /* Got here implies adding two SCALAR_VALUEs */ if (WARN_ON_ONCE(ptr_reg)) { print_verifier_state(env, env->cur_state); verbose(env, "verifier internal error: unexpected ptr_reg\n"); return -EINVAL; } if (WARN_ON(!src_reg)) { print_verifier_state(env, env->cur_state); verbose(env, "verifier internal error: no src_reg\n"); return -EINVAL; } return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg); } /* check validity of 32-bit and 64-bit arithmetic operations */ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env); u8 opcode = BPF_OP(insn->code); int err; if (opcode == BPF_END || opcode == BPF_NEG) { if (opcode == BPF_NEG) { if (BPF_SRC(insn->code) != 0 || insn->src_reg != BPF_REG_0 || insn->off != 0 || insn->imm != 0) { verbose(env, "BPF_NEG uses reserved fields\n"); return -EINVAL; } } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0 || (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || BPF_CLASS(insn->code) == BPF_ALU64) { verbose(env, "BPF_END uses reserved fields\n"); return -EINVAL; } } /* check src operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if (is_pointer_value(env, insn->dst_reg)) { verbose(env, "R%d pointer arithmetic prohibited\n", insn->dst_reg); return -EACCES; } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; } else if (opcode == BPF_MOV) { if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0 || insn->off != 0) { verbose(env, "BPF_MOV uses reserved fields\n"); return -EINVAL; } /* check src operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0) { verbose(env, "BPF_MOV uses reserved fields\n"); return -EINVAL; } } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; if (BPF_SRC(insn->code) == BPF_X) { if (BPF_CLASS(insn->code) == BPF_ALU64) { /* case: R1 = R2 * copy register state to dest reg */ regs[insn->dst_reg] = regs[insn->src_reg]; regs[insn->dst_reg].live |= REG_LIVE_WRITTEN; } else { /* R1 = (u32) R2 */ if (is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d partial copy of pointer\n", insn->src_reg); return -EACCES; } mark_reg_unknown(env, regs, insn->dst_reg); coerce_reg_to_size(&regs[insn->dst_reg], 4); } } else { /* case: R = imm * remember the value we stored into this reg */ regs[insn->dst_reg].type = SCALAR_VALUE; if (BPF_CLASS(insn->code) == BPF_ALU64) { __mark_reg_known(regs + insn->dst_reg, insn->imm); } else { __mark_reg_known(regs + insn->dst_reg, (u32)insn->imm); } } } else if (opcode > BPF_END) { verbose(env, "invalid BPF_ALU opcode %x\n", opcode); return -EINVAL; } else { /* all other ALU ops: and, sub, xor, add, ... */ if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0 || insn->off != 0) { verbose(env, "BPF_ALU uses reserved fields\n"); return -EINVAL; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0) { verbose(env, "BPF_ALU uses reserved fields\n"); return -EINVAL; } } /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if ((opcode == BPF_MOD || opcode == BPF_DIV) && BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { verbose(env, "div by zero\n"); return -EINVAL; } if ((opcode == BPF_LSH || opcode == BPF_RSH || opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; if (insn->imm < 0 || insn->imm >= size) { verbose(env, "invalid shift %d\n", insn->imm); return -EINVAL; } } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); if (err) return err; return adjust_reg_min_max_vals(env, insn); } return 0; } static void find_good_pkt_pointers(struct bpf_verifier_state *state, struct bpf_reg_state *dst_reg, enum bpf_reg_type type, bool range_right_open) { struct bpf_reg_state *regs = state->regs, *reg; u16 new_range; int i; if (dst_reg->off < 0 || (dst_reg->off == 0 && range_right_open)) /* This doesn't give us any range */ return; if (dst_reg->umax_value > MAX_PACKET_OFF || dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF) /* Risk of overflow. For instance, ptr + (1<<63) may be less * than pkt_end, but that's because it's also less than pkt. */ return; new_range = dst_reg->off; if (range_right_open) new_range--; /* Examples for register markings: * * pkt_data in dst register: * * r2 = r3; * r2 += 8; * if (r2 > pkt_end) goto <handle exception> * <access okay> * * r2 = r3; * r2 += 8; * if (r2 < pkt_end) goto <access okay> * <handle exception> * * Where: * r2 == dst_reg, pkt_end == src_reg * r2=pkt(id=n,off=8,r=0) * r3=pkt(id=n,off=0,r=0) * * pkt_data in src register: * * r2 = r3; * r2 += 8; * if (pkt_end >= r2) goto <access okay> * <handle exception> * * r2 = r3; * r2 += 8; * if (pkt_end <= r2) goto <handle exception> * <access okay> * * Where: * pkt_end == dst_reg, r2 == src_reg * r2=pkt(id=n,off=8,r=0) * r3=pkt(id=n,off=0,r=0) * * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8) * and [r3, r3 + 8-1) respectively is safe to access depending on * the check. */ /* If our ids match, then we must have the same max_value. And we * don't care about the other reg's fixed offset, since if it's too big * the range won't allow anything. * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. */ for (i = 0; i < MAX_BPF_REG; i++) if (regs[i].type == type && regs[i].id == dst_reg->id) /* keep the maximum range already checked */ regs[i].range = max(regs[i].range, new_range); for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { if (state->stack[i].slot_type[0] != STACK_SPILL) continue; reg = &state->stack[i].spilled_ptr; if (reg->type == type && reg->id == dst_reg->id) reg->range = max(reg->range, new_range); } } /* Adjusts the register min/max values in the case that the dst_reg is the * variable register that we are working on, and src_reg is a constant or we're * simply doing a BPF_K check. * In JEQ/JNE cases we also adjust the var_off values. */ static void reg_set_min_max(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, u8 opcode) { /* If the dst_reg is a pointer, we can't learn anything about its * variable offset from the compare (unless src_reg were a pointer into * the same object, but we don't bother with that. * Since false_reg and true_reg have the same type by construction, we * only need to check one of them for pointerness. */ if (__is_pointer_value(false, false_reg)) return; switch (opcode) { case BPF_JEQ: /* If this is false then we know nothing Jon Snow, but if it is * true then we know for sure. */ __mark_reg_known(true_reg, val); break; case BPF_JNE: /* If this is true we know nothing Jon Snow, but if it is false * we know the value for sure; */ __mark_reg_known(false_reg, val); break; case BPF_JGT: false_reg->umax_value = min(false_reg->umax_value, val); true_reg->umin_value = max(true_reg->umin_value, val + 1); break; case BPF_JSGT: false_reg->smax_value = min_t(s64, false_reg->smax_value, val); true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1); break; case BPF_JLT: false_reg->umin_value = max(false_reg->umin_value, val); true_reg->umax_value = min(true_reg->umax_value, val - 1); break; case BPF_JSLT: false_reg->smin_value = max_t(s64, false_reg->smin_value, val); true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1); break; case BPF_JGE: false_reg->umax_value = min(false_reg->umax_value, val - 1); true_reg->umin_value = max(true_reg->umin_value, val); break; case BPF_JSGE: false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1); true_reg->smin_value = max_t(s64, true_reg->smin_value, val); break; case BPF_JLE: false_reg->umin_value = max(false_reg->umin_value, val + 1); true_reg->umax_value = min(true_reg->umax_value, val); break; case BPF_JSLE: false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1); true_reg->smax_value = min_t(s64, true_reg->smax_value, val); break; default: break; } __reg_deduce_bounds(false_reg); __reg_deduce_bounds(true_reg); /* We might have learned some bits from the bounds. */ __reg_bound_offset(false_reg); __reg_bound_offset(true_reg); /* Intersecting with the old var_off might have improved our bounds * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), * then new var_off is (0; 0x7f...fc) which improves our umax. */ __update_reg_bounds(false_reg); __update_reg_bounds(true_reg); } /* Same as above, but for the case that dst_reg holds a constant and src_reg is * the variable reg. */ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, u8 opcode) { if (__is_pointer_value(false, false_reg)) return; switch (opcode) { case BPF_JEQ: /* If this is false then we know nothing Jon Snow, but if it is * true then we know for sure. */ __mark_reg_known(true_reg, val); break; case BPF_JNE: /* If this is true we know nothing Jon Snow, but if it is false * we know the value for sure; */ __mark_reg_known(false_reg, val); break; case BPF_JGT: true_reg->umax_value = min(true_reg->umax_value, val - 1); false_reg->umin_value = max(false_reg->umin_value, val); break; case BPF_JSGT: true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1); false_reg->smin_value = max_t(s64, false_reg->smin_value, val); break; case BPF_JLT: true_reg->umin_value = max(true_reg->umin_value, val + 1); false_reg->umax_value = min(false_reg->umax_value, val); break; case BPF_JSLT: true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1); false_reg->smax_value = min_t(s64, false_reg->smax_value, val); break; case BPF_JGE: true_reg->umax_value = min(true_reg->umax_value, val); false_reg->umin_value = max(false_reg->umin_value, val + 1); break; case BPF_JSGE: true_reg->smax_value = min_t(s64, true_reg->smax_value, val); false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1); break; case BPF_JLE: true_reg->umin_value = max(true_reg->umin_value, val); false_reg->umax_value = min(false_reg->umax_value, val - 1); break; case BPF_JSLE: true_reg->smin_value = max_t(s64, true_reg->smin_value, val); false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1); break; default: break; } __reg_deduce_bounds(false_reg); __reg_deduce_bounds(true_reg); /* We might have learned some bits from the bounds. */ __reg_bound_offset(false_reg); __reg_bound_offset(true_reg); /* Intersecting with the old var_off might have improved our bounds * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), * then new var_off is (0; 0x7f...fc) which improves our umax. */ __update_reg_bounds(false_reg); __update_reg_bounds(true_reg); } /* Regs are known to be equal, so intersect their min/max/var_off */ static void __reg_combine_min_max(struct bpf_reg_state *src_reg, struct bpf_reg_state *dst_reg) { src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value, dst_reg->umin_value); src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value, dst_reg->umax_value); src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value, dst_reg->smin_value); src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value, dst_reg->smax_value); src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off, dst_reg->var_off); /* We might have learned new bounds from the var_off. */ __update_reg_bounds(src_reg); __update_reg_bounds(dst_reg); /* We might have learned something about the sign bit. */ __reg_deduce_bounds(src_reg); __reg_deduce_bounds(dst_reg); /* We might have learned some bits from the bounds. */ __reg_bound_offset(src_reg); __reg_bound_offset(dst_reg); /* Intersecting with the old var_off might have improved our bounds * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), * then new var_off is (0; 0x7f...fc) which improves our umax. */ __update_reg_bounds(src_reg); __update_reg_bounds(dst_reg); } static void reg_combine_min_max(struct bpf_reg_state *true_src, struct bpf_reg_state *true_dst, struct bpf_reg_state *false_src, struct bpf_reg_state *false_dst, u8 opcode) { switch (opcode) { case BPF_JEQ: __reg_combine_min_max(true_src, true_dst); break; case BPF_JNE: __reg_combine_min_max(false_src, false_dst); break; } } static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id, bool is_null) { struct bpf_reg_state *reg = &regs[regno]; if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) { /* Old offset (both fixed and variable parts) should * have been known-zero, because we don't allow pointer * arithmetic on pointers that might be NULL. */ if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || !tnum_equals_const(reg->var_off, 0) || reg->off)) { __mark_reg_known_zero(reg); reg->off = 0; } if (is_null) { reg->type = SCALAR_VALUE; } else if (reg->map_ptr->inner_map_meta) { reg->type = CONST_PTR_TO_MAP; reg->map_ptr = reg->map_ptr->inner_map_meta; } else { reg->type = PTR_TO_MAP_VALUE; } /* We don't need id from this point onwards anymore, thus we * should better reset it, so that state pruning has chances * to take effect. */ reg->id = 0; } } /* The logic is similar to find_good_pkt_pointers(), both could eventually * be folded together at some point. */ static void mark_map_regs(struct bpf_verifier_state *state, u32 regno, bool is_null) { struct bpf_reg_state *regs = state->regs; u32 id = regs[regno].id; int i; for (i = 0; i < MAX_BPF_REG; i++) mark_map_reg(regs, i, id, is_null); for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { if (state->stack[i].slot_type[0] != STACK_SPILL) continue; mark_map_reg(&state->stack[i].spilled_ptr, 0, id, is_null); } } static bool try_match_pkt_pointers(const struct bpf_insn *insn, struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg, struct bpf_verifier_state *this_branch, struct bpf_verifier_state *other_branch) { if (BPF_SRC(insn->code) != BPF_X) return false; switch (BPF_OP(insn->code)) { case BPF_JGT: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' > pkt_end, pkt_meta' > pkt_data */ find_good_pkt_pointers(this_branch, dst_reg, dst_reg->type, false); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end > pkt_data', pkt_data > pkt_meta' */ find_good_pkt_pointers(other_branch, src_reg, src_reg->type, true); } else { return false; } break; case BPF_JLT: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' < pkt_end, pkt_meta' < pkt_data */ find_good_pkt_pointers(other_branch, dst_reg, dst_reg->type, true); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end < pkt_data', pkt_data > pkt_meta' */ find_good_pkt_pointers(this_branch, src_reg, src_reg->type, false); } else { return false; } break; case BPF_JGE: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */ find_good_pkt_pointers(this_branch, dst_reg, dst_reg->type, true); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */ find_good_pkt_pointers(other_branch, src_reg, src_reg->type, false); } else { return false; } break; case BPF_JLE: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */ find_good_pkt_pointers(other_branch, dst_reg, dst_reg->type, false); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */ find_good_pkt_pointers(this_branch, src_reg, src_reg->type, true); } else { return false; } break; default: return false; } return true; } static int check_cond_jmp_op(struct bpf_verifier_env *env, struct bpf_insn *insn, int *insn_idx) { struct bpf_verifier_state *other_branch, *this_branch = env->cur_state; struct bpf_reg_state *regs = this_branch->regs, *dst_reg; u8 opcode = BPF_OP(insn->code); int err; if (opcode > BPF_JSLE) { verbose(env, "invalid BPF_JMP opcode %x\n", opcode); return -EINVAL; } if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0) { verbose(env, "BPF_JMP uses reserved fields\n"); return -EINVAL; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; if (is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d pointer comparison prohibited\n", insn->src_reg); return -EACCES; } } else { if (insn->src_reg != BPF_REG_0) { verbose(env, "BPF_JMP uses reserved fields\n"); return -EINVAL; } } /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; dst_reg = &regs[insn->dst_reg]; /* detect if R == 0 where R was initialized to zero earlier */ if (BPF_SRC(insn->code) == BPF_K && (opcode == BPF_JEQ || opcode == BPF_JNE) && dst_reg->type == SCALAR_VALUE && tnum_equals_const(dst_reg->var_off, insn->imm)) { if (opcode == BPF_JEQ) { /* if (imm == imm) goto pc+off; * only follow the goto, ignore fall-through */ *insn_idx += insn->off; return 0; } else { /* if (imm != imm) goto pc+off; * only follow fall-through branch, since * that's where the program will go */ return 0; } } other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx); if (!other_branch) return -EFAULT; /* detect if we are comparing against a constant value so we can adjust * our min/max values for our dst register. * this is only legit if both are scalars (or pointers to the same * object, I suppose, but we don't support that right now), because * otherwise the different base pointers mean the offsets aren't * comparable. */ if (BPF_SRC(insn->code) == BPF_X) { if (dst_reg->type == SCALAR_VALUE && regs[insn->src_reg].type == SCALAR_VALUE) { if (tnum_is_const(regs[insn->src_reg].var_off)) reg_set_min_max(&other_branch->regs[insn->dst_reg], dst_reg, regs[insn->src_reg].var_off.value, opcode); else if (tnum_is_const(dst_reg->var_off)) reg_set_min_max_inv(&other_branch->regs[insn->src_reg], &regs[insn->src_reg], dst_reg->var_off.value, opcode); else if (opcode == BPF_JEQ || opcode == BPF_JNE) /* Comparing for equality, we can combine knowledge */ reg_combine_min_max(&other_branch->regs[insn->src_reg], &other_branch->regs[insn->dst_reg], &regs[insn->src_reg], &regs[insn->dst_reg], opcode); } } else if (dst_reg->type == SCALAR_VALUE) { reg_set_min_max(&other_branch->regs[insn->dst_reg], dst_reg, insn->imm, opcode); } /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */ if (BPF_SRC(insn->code) == BPF_K && insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { /* Mark all identical map registers in each branch as either * safe or unknown depending R == 0 or R != 0 conditional. */ mark_map_regs(this_branch, insn->dst_reg, opcode == BPF_JNE); mark_map_regs(other_branch, insn->dst_reg, opcode == BPF_JEQ); } else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg], this_branch, other_branch) && is_pointer_value(env, insn->dst_reg)) { verbose(env, "R%d pointer comparison prohibited\n", insn->dst_reg); return -EACCES; } if (env->log.level) print_verifier_state(env, this_branch); return 0; } /* return the map pointer stored inside BPF_LD_IMM64 instruction */ static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn) { u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32; return (struct bpf_map *) (unsigned long) imm64; } /* verify BPF_LD_IMM64 instruction */ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env); int err; if (BPF_SIZE(insn->code) != BPF_DW) { verbose(env, "invalid BPF_LD_IMM insn\n"); return -EINVAL; } if (insn->off != 0) { verbose(env, "BPF_LD_IMM64 uses reserved fields\n"); return -EINVAL; } err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; if (insn->src_reg == 0) { u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; regs[insn->dst_reg].type = SCALAR_VALUE; __mark_reg_known(&regs[insn->dst_reg], imm); return 0; } /* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */ BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD); regs[insn->dst_reg].type = CONST_PTR_TO_MAP; regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn); return 0; } static bool may_access_skb(enum bpf_prog_type type) { switch (type) { case BPF_PROG_TYPE_SOCKET_FILTER: case BPF_PROG_TYPE_SCHED_CLS: case BPF_PROG_TYPE_SCHED_ACT: return true; default: return false; } } /* verify safety of LD_ABS|LD_IND instructions: * - they can only appear in the programs where ctx == skb * - since they are wrappers of function calls, they scratch R1-R5 registers, * preserve R6-R9, and store return value into R0 * * Implicit input: * ctx == skb == R6 == CTX * * Explicit input: * SRC == any register * IMM == 32-bit immediate * * Output: * R0 - 8/16/32-bit skb data converted to cpu endianness */ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env); u8 mode = BPF_MODE(insn->code); int i, err; if (!may_access_skb(env->prog->type)) { verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); return -EINVAL; } if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || BPF_SIZE(insn->code) == BPF_DW || (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n"); return -EINVAL; } /* check whether implicit source operand (register R6) is readable */ err = check_reg_arg(env, BPF_REG_6, SRC_OP); if (err) return err; if (regs[BPF_REG_6].type != PTR_TO_CTX) { verbose(env, "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); return -EINVAL; } if (mode == BPF_IND) { /* check explicit source operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } /* reset caller saved regs to unreadable */ for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(env, regs, caller_saved[i]); check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); } /* mark destination R0 register as readable, since it contains * the value fetched from the packet. * Already marked as written above. */ mark_reg_unknown(env, regs, BPF_REG_0); return 0; } static int check_return_code(struct bpf_verifier_env *env) { struct bpf_reg_state *reg; struct tnum range = tnum_range(0, 1); switch (env->prog->type) { case BPF_PROG_TYPE_CGROUP_SKB: case BPF_PROG_TYPE_CGROUP_SOCK: case BPF_PROG_TYPE_SOCK_OPS: case BPF_PROG_TYPE_CGROUP_DEVICE: break; default: return 0; } reg = cur_regs(env) + BPF_REG_0; if (reg->type != SCALAR_VALUE) { verbose(env, "At program exit the register R0 is not a known value (%s)\n", reg_type_str[reg->type]); return -EINVAL; } if (!tnum_in(range, reg->var_off)) { verbose(env, "At program exit the register R0 "); if (!tnum_is_unknown(reg->var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "has value %s", tn_buf); } else { verbose(env, "has unknown scalar value"); } verbose(env, " should have been 0 or 1\n"); return -EINVAL; } return 0; } /* non-recursive DFS pseudo code * 1 procedure DFS-iterative(G,v): * 2 label v as discovered * 3 let S be a stack * 4 S.push(v) * 5 while S is not empty * 6 t <- S.pop() * 7 if t is what we're looking for: * 8 return t * 9 for all edges e in G.adjacentEdges(t) do * 10 if edge e is already labelled * 11 continue with the next edge * 12 w <- G.adjacentVertex(t,e) * 13 if vertex w is not discovered and not explored * 14 label e as tree-edge * 15 label w as discovered * 16 S.push(w) * 17 continue at 5 * 18 else if vertex w is discovered * 19 label e as back-edge * 20 else * 21 // vertex w is explored * 22 label e as forward- or cross-edge * 23 label t as explored * 24 S.pop() * * convention: * 0x10 - discovered * 0x11 - discovered and fall-through edge labelled * 0x12 - discovered and fall-through and branch edges labelled * 0x20 - explored */ enum { DISCOVERED = 0x10, EXPLORED = 0x20, FALLTHROUGH = 1, BRANCH = 2, }; #define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L) static int *insn_stack; /* stack of insns to process */ static int cur_stack; /* current stack index */ static int *insn_state; /* t, w, e - match pseudo-code above: * t - index of current instruction * w - next instruction * e - edge */ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env) { if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) return 0; if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH)) return 0; if (w < 0 || w >= env->prog->len) { verbose(env, "jump out of range from insn %d to %d\n", t, w); return -EINVAL; } if (e == BRANCH) /* mark branch target for state pruning */ env->explored_states[w] = STATE_LIST_MARK; if (insn_state[w] == 0) { /* tree-edge */ insn_state[t] = DISCOVERED | e; insn_state[w] = DISCOVERED; if (cur_stack >= env->prog->len) return -E2BIG; insn_stack[cur_stack++] = w; return 1; } else if ((insn_state[w] & 0xF0) == DISCOVERED) { verbose(env, "back-edge from insn %d to %d\n", t, w); return -EINVAL; } else if (insn_state[w] == EXPLORED) { /* forward- or cross-edge */ insn_state[t] = DISCOVERED | e; } else { verbose(env, "insn state internal bug\n"); return -EFAULT; } return 0; } /* non-recursive depth-first-search to detect loops in BPF program * loop == back-edge in directed graph */ static int check_cfg(struct bpf_verifier_env *env) { struct bpf_insn *insns = env->prog->insnsi; int insn_cnt = env->prog->len; int ret = 0; int i, t; insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); if (!insn_state) return -ENOMEM; insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); if (!insn_stack) { kfree(insn_state); return -ENOMEM; } insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */ insn_stack[0] = 0; /* 0 is the first instruction */ cur_stack = 1; peek_stack: if (cur_stack == 0) goto check_state; t = insn_stack[cur_stack - 1]; if (BPF_CLASS(insns[t].code) == BPF_JMP) { u8 opcode = BPF_OP(insns[t].code); if (opcode == BPF_EXIT) { goto mark_explored; } else if (opcode == BPF_CALL) { ret = push_insn(t, t + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; if (t + 1 < insn_cnt) env->explored_states[t + 1] = STATE_LIST_MARK; } else if (opcode == BPF_JA) { if (BPF_SRC(insns[t].code) != BPF_K) { ret = -EINVAL; goto err_free; } /* unconditional jump with single edge */ ret = push_insn(t, t + insns[t].off + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; /* tell verifier to check for equivalent states * after every call and jump */ if (t + 1 < insn_cnt) env->explored_states[t + 1] = STATE_LIST_MARK; } else { /* conditional jump with two edges */ env->explored_states[t] = STATE_LIST_MARK; ret = push_insn(t, t + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; ret = push_insn(t, t + insns[t].off + 1, BRANCH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; } } else { /* all other non-branch instructions with single * fall-through edge */ ret = push_insn(t, t + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; } mark_explored: insn_state[t] = EXPLORED; if (cur_stack-- <= 0) { verbose(env, "pop stack internal bug\n"); ret = -EFAULT; goto err_free; } goto peek_stack; check_state: for (i = 0; i < insn_cnt; i++) { if (insn_state[i] != EXPLORED) { verbose(env, "unreachable insn %d\n", i); ret = -EINVAL; goto err_free; } } ret = 0; /* cfg looks good */ err_free: kfree(insn_state); kfree(insn_stack); return ret; } /* check %cur's range satisfies %old's */ static bool range_within(struct bpf_reg_state *old, struct bpf_reg_state *cur) { return old->umin_value <= cur->umin_value && old->umax_value >= cur->umax_value && old->smin_value <= cur->smin_value && old->smax_value >= cur->smax_value; } /* Maximum number of register states that can exist at once */ #define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) struct idpair { u32 old; u32 cur; }; /* If in the old state two registers had the same id, then they need to have * the same id in the new state as well. But that id could be different from * the old state, so we need to track the mapping from old to new ids. * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent * regs with old id 5 must also have new id 9 for the new state to be safe. But * regs with a different old id could still have new id 9, we don't care about * that. * So we look through our idmap to see if this old id has been seen before. If * so, we require the new id to match; otherwise, we add the id pair to the map. */ static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap) { unsigned int i; for (i = 0; i < ID_MAP_SIZE; i++) { if (!idmap[i].old) { /* Reached an empty slot; haven't seen this id before */ idmap[i].old = old_id; idmap[i].cur = cur_id; return true; } if (idmap[i].old == old_id) return idmap[i].cur == cur_id; } /* We ran out of idmap slots, which should be impossible */ WARN_ON_ONCE(1); return false; } /* Returns true if (rold safe implies rcur safe) */ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, struct idpair *idmap) { if (!(rold->live & REG_LIVE_READ)) /* explored state didn't use this */ return true; if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, live)) == 0) return true; if (rold->type == NOT_INIT) /* explored state can't have used this */ return true; if (rcur->type == NOT_INIT) return false; switch (rold->type) { case SCALAR_VALUE: if (rcur->type == SCALAR_VALUE) { /* new val must satisfy old val knowledge */ return range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); } else { /* if we knew anything about the old value, we're not * equal, because we can't know anything about the * scalar value of the pointer in the new value. */ return rold->umin_value == 0 && rold->umax_value == U64_MAX && rold->smin_value == S64_MIN && rold->smax_value == S64_MAX && tnum_is_unknown(rold->var_off); } case PTR_TO_MAP_VALUE: /* If the new min/max/var_off satisfy the old ones and * everything else matches, we are OK. * We don't care about the 'id' value, because nothing * uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL) */ return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); case PTR_TO_MAP_VALUE_OR_NULL: /* a PTR_TO_MAP_VALUE could be safe to use as a * PTR_TO_MAP_VALUE_OR_NULL into the same map. * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL- * checked, doing so could have affected others with the same * id, and we can't check for that because we lost the id when * we converted to a PTR_TO_MAP_VALUE. */ if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL) return false; if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id))) return false; /* Check our ids match any regs they're supposed to */ return check_ids(rold->id, rcur->id, idmap); case PTR_TO_PACKET_META: case PTR_TO_PACKET: if (rcur->type != rold->type) return false; /* We must have at least as much range as the old ptr * did, so that any accesses which were safe before are * still safe. This is true even if old range < old off, * since someone could have accessed through (ptr - k), or * even done ptr -= k in a register, to get a safe access. */ if (rold->range > rcur->range) return false; /* If the offsets don't match, we can't trust our alignment; * nor can we be sure that we won't fall out of range. */ if (rold->off != rcur->off) return false; /* id relations must be preserved */ if (rold->id && !check_ids(rold->id, rcur->id, idmap)) return false; /* new val must satisfy old val knowledge */ return range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); case PTR_TO_CTX: case CONST_PTR_TO_MAP: case PTR_TO_STACK: case PTR_TO_PACKET_END: /* Only valid matches are exact, which memcmp() above * would have accepted */ default: /* Don't know what's going on, just say it's not safe */ return false; } /* Shouldn't get here; if we do, say it's not safe */ WARN_ON_ONCE(1); return false; } static bool stacksafe(struct bpf_verifier_state *old, struct bpf_verifier_state *cur, struct idpair *idmap) { int i, spi; /* if explored stack has more populated slots than current stack * such stacks are not equivalent */ if (old->allocated_stack > cur->allocated_stack) return false; /* walk slots of the explored stack and ignore any additional * slots in the current stack, since explored(safe) state * didn't use them */ for (i = 0; i < old->allocated_stack; i++) { spi = i / BPF_REG_SIZE; if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) continue; if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != cur->stack[spi].slot_type[i % BPF_REG_SIZE]) /* Ex: old explored (safe) state has STACK_SPILL in * this stack slot, but current has has STACK_MISC -> * this verifier states are not equivalent, * return false to continue verification of this path */ return false; if (i % BPF_REG_SIZE) continue; if (old->stack[spi].slot_type[0] != STACK_SPILL) continue; if (!regsafe(&old->stack[spi].spilled_ptr, &cur->stack[spi].spilled_ptr, idmap)) /* when explored and current stack slot are both storing * spilled registers, check that stored pointers types * are the same as well. * Ex: explored safe path could have stored * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8} * but current path has stored: * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16} * such verifier states are not equivalent. * return false to continue verification of this path */ return false; } return true; } /* compare two verifier states * * all states stored in state_list are known to be valid, since * verifier reached 'bpf_exit' instruction through them * * this function is called when verifier exploring different branches of * execution popped from the state stack. If it sees an old state that has * more strict register state and more strict stack state then this execution * branch doesn't need to be explored further, since verifier already * concluded that more strict state leads to valid finish. * * Therefore two states are equivalent if register state is more conservative * and explored stack state is more conservative than the current one. * Example: * explored current * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC) * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC) * * In other words if current stack state (one being explored) has more * valid slots than old one that already passed validation, it means * the verifier can stop exploring and conclude that current state is valid too * * Similarly with registers. If explored state has register type as invalid * whereas register type in current state is meaningful, it means that * the current state will reach 'bpf_exit' instruction safely */ static bool states_equal(struct bpf_verifier_env *env, struct bpf_verifier_state *old, struct bpf_verifier_state *cur) { struct idpair *idmap; bool ret = false; int i; idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL); /* If we failed to allocate the idmap, just say it's not safe */ if (!idmap) return false; for (i = 0; i < MAX_BPF_REG; i++) { if (!regsafe(&old->regs[i], &cur->regs[i], idmap)) goto out_free; } if (!stacksafe(old, cur, idmap)) goto out_free; ret = true; out_free: kfree(idmap); return ret; } /* A write screens off any subsequent reads; but write marks come from the * straight-line code between a state and its parent. When we arrive at a * jump target (in the first iteration of the propagate_liveness() loop), * we didn't arrive by the straight-line code, so read marks in state must * propagate to parent regardless of state's write marks. */ static bool do_propagate_liveness(const struct bpf_verifier_state *state, struct bpf_verifier_state *parent) { bool writes = parent == state->parent; /* Observe write marks */ bool touched = false; /* any changes made? */ int i; if (!parent) return touched; /* Propagate read liveness of registers... */ BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); /* We don't need to worry about FP liveness because it's read-only */ for (i = 0; i < BPF_REG_FP; i++) { if (parent->regs[i].live & REG_LIVE_READ) continue; if (writes && (state->regs[i].live & REG_LIVE_WRITTEN)) continue; if (state->regs[i].live & REG_LIVE_READ) { parent->regs[i].live |= REG_LIVE_READ; touched = true; } } /* ... and stack slots */ for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && i < parent->allocated_stack / BPF_REG_SIZE; i++) { if (parent->stack[i].slot_type[0] != STACK_SPILL) continue; if (state->stack[i].slot_type[0] != STACK_SPILL) continue; if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ) continue; if (writes && (state->stack[i].spilled_ptr.live & REG_LIVE_WRITTEN)) continue; if (state->stack[i].spilled_ptr.live & REG_LIVE_READ) { parent->stack[i].spilled_ptr.live |= REG_LIVE_READ; touched = true; } } return touched; } /* "parent" is "a state from which we reach the current state", but initially * it is not the state->parent (i.e. "the state whose straight-line code leads * to the current state"), instead it is the state that happened to arrive at * a (prunable) equivalent of the current state. See comment above * do_propagate_liveness() for consequences of this. * This function is just a more efficient way of calling mark_reg_read() or * mark_stack_slot_read() on each reg in "parent" that is read in "state", * though it requires that parent != state->parent in the call arguments. */ static void propagate_liveness(const struct bpf_verifier_state *state, struct bpf_verifier_state *parent) { while (do_propagate_liveness(state, parent)) { /* Something changed, so we need to feed those changes onward */ state = parent; parent = state->parent; } } static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) { struct bpf_verifier_state_list *new_sl; struct bpf_verifier_state_list *sl; struct bpf_verifier_state *cur = env->cur_state; int i, err; sl = env->explored_states[insn_idx]; if (!sl) /* this 'insn_idx' instruction wasn't marked, so we will not * be doing state search here */ return 0; while (sl != STATE_LIST_MARK) { if (states_equal(env, &sl->state, cur)) { /* reached equivalent register/stack state, * prune the search. * Registers read by the continuation are read by us. * If we have any write marks in env->cur_state, they * will prevent corresponding reads in the continuation * from reaching our parent (an explored_state). Our * own state will get the read marks recorded, but * they'll be immediately forgotten as we're pruning * this state and will pop a new one. */ propagate_liveness(&sl->state, cur); return 1; } sl = sl->next; } /* there were no equivalent states, remember current one. * technically the current state is not proven to be safe yet, * but it will either reach bpf_exit (which means it's safe) or * it will be rejected. Since there are no loops, we won't be * seeing this 'insn_idx' instruction again on the way to bpf_exit */ new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL); if (!new_sl) return -ENOMEM; /* add new state to the head of linked list */ err = copy_verifier_state(&new_sl->state, cur); if (err) { free_verifier_state(&new_sl->state, false); kfree(new_sl); return err; } new_sl->next = env->explored_states[insn_idx]; env->explored_states[insn_idx] = new_sl; /* connect new state to parentage chain */ cur->parent = &new_sl->state; /* clear write marks in current state: the writes we did are not writes * our child did, so they don't screen off its reads from us. * (There are no read marks in current state, because reads always mark * their parent and current state never has children yet. Only * explored_states can get read marks.) */ for (i = 0; i < BPF_REG_FP; i++) cur->regs[i].live = REG_LIVE_NONE; for (i = 0; i < cur->allocated_stack / BPF_REG_SIZE; i++) if (cur->stack[i].slot_type[0] == STACK_SPILL) cur->stack[i].spilled_ptr.live = REG_LIVE_NONE; return 0; } static int ext_analyzer_insn_hook(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) { if (env->dev_ops && env->dev_ops->insn_hook) return env->dev_ops->insn_hook(env, insn_idx, prev_insn_idx); return 0; } static int do_check(struct bpf_verifier_env *env) { struct bpf_verifier_state *state; struct bpf_insn *insns = env->prog->insnsi; struct bpf_reg_state *regs; int insn_cnt = env->prog->len; int insn_idx, prev_insn_idx = 0; int insn_processed = 0; bool do_print_state = false; state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL); if (!state) return -ENOMEM; env->cur_state = state; init_reg_state(env, state->regs); state->parent = NULL; insn_idx = 0; for (;;) { struct bpf_insn *insn; u8 class; int err; if (insn_idx >= insn_cnt) { verbose(env, "invalid insn idx %d insn_cnt %d\n", insn_idx, insn_cnt); return -EFAULT; } insn = &insns[insn_idx]; class = BPF_CLASS(insn->code); if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { verbose(env, "BPF program is too large. Processed %d insn\n", insn_processed); return -E2BIG; } err = is_state_visited(env, insn_idx); if (err < 0) return err; if (err == 1) { /* found equivalent state, can prune the search */ if (env->log.level) { if (do_print_state) verbose(env, "\nfrom %d to %d: safe\n", prev_insn_idx, insn_idx); else verbose(env, "%d: safe\n", insn_idx); } goto process_bpf_exit; } if (need_resched()) cond_resched(); if (env->log.level > 1 || (env->log.level && do_print_state)) { if (env->log.level > 1) verbose(env, "%d:", insn_idx); else verbose(env, "\nfrom %d to %d:", prev_insn_idx, insn_idx); print_verifier_state(env, state); do_print_state = false; } if (env->log.level) { verbose(env, "%d: ", insn_idx); print_bpf_insn(verbose, env, insn, env->allow_ptr_leaks); } err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx); if (err) return err; regs = cur_regs(env); env->insn_aux_data[insn_idx].seen = true; if (class == BPF_ALU || class == BPF_ALU64) { err = check_alu_op(env, insn); if (err) return err; } else if (class == BPF_LDX) { enum bpf_reg_type *prev_src_type, src_reg_type; /* check for reserved fields is already done */ /* check src operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); if (err) return err; src_reg_type = regs[insn->src_reg].type; /* check that memory (src_reg + off) is readable, * the state of dst_reg will be updated by this func */ err = check_mem_access(env, insn_idx, insn->src_reg, insn->off, BPF_SIZE(insn->code), BPF_READ, insn->dst_reg); if (err) return err; prev_src_type = &env->insn_aux_data[insn_idx].ptr_type; if (*prev_src_type == NOT_INIT) { /* saw a valid insn * dst_reg = *(u32 *)(src_reg + off) * save type to validate intersecting paths */ *prev_src_type = src_reg_type; } else if (src_reg_type != *prev_src_type && (src_reg_type == PTR_TO_CTX || *prev_src_type == PTR_TO_CTX)) { /* ABuser program is trying to use the same insn * dst_reg = *(u32*) (src_reg + off) * with different pointer types: * src_reg == ctx in one branch and * src_reg == stack|map in some other branch. * Reject it. */ verbose(env, "same insn cannot be used with different pointers\n"); return -EINVAL; } } else if (class == BPF_STX) { enum bpf_reg_type *prev_dst_type, dst_reg_type; if (BPF_MODE(insn->code) == BPF_XADD) { err = check_xadd(env, insn_idx, insn); if (err) return err; insn_idx++; continue; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; dst_reg_type = regs[insn->dst_reg].type; /* check that memory (dst_reg + off) is writeable */ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, insn->src_reg); if (err) return err; prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type; if (*prev_dst_type == NOT_INIT) { *prev_dst_type = dst_reg_type; } else if (dst_reg_type != *prev_dst_type && (dst_reg_type == PTR_TO_CTX || *prev_dst_type == PTR_TO_CTX)) { verbose(env, "same insn cannot be used with different pointers\n"); return -EINVAL; } } else if (class == BPF_ST) { if (BPF_MODE(insn->code) != BPF_MEM || insn->src_reg != BPF_REG_0) { verbose(env, "BPF_ST uses reserved fields\n"); return -EINVAL; } /* check src operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; /* check that memory (dst_reg + off) is writeable */ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, -1); if (err) return err; } else if (class == BPF_JMP) { u8 opcode = BPF_OP(insn->code); if (opcode == BPF_CALL) { if (BPF_SRC(insn->code) != BPF_K || insn->off != 0 || insn->src_reg != BPF_REG_0 || insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_CALL uses reserved fields\n"); return -EINVAL; } err = check_call(env, insn->imm, insn_idx); if (err) return err; } else if (opcode == BPF_JA) { if (BPF_SRC(insn->code) != BPF_K || insn->imm != 0 || insn->src_reg != BPF_REG_0 || insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_JA uses reserved fields\n"); return -EINVAL; } insn_idx += insn->off + 1; continue; } else if (opcode == BPF_EXIT) { if (BPF_SRC(insn->code) != BPF_K || insn->imm != 0 || insn->src_reg != BPF_REG_0 || insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_EXIT uses reserved fields\n"); return -EINVAL; } /* eBPF calling convetion is such that R0 is used * to return the value from eBPF program. * Make sure that it's readable at this time * of bpf_exit, which means that program wrote * something into it earlier */ err = check_reg_arg(env, BPF_REG_0, SRC_OP); if (err) return err; if (is_pointer_value(env, BPF_REG_0)) { verbose(env, "R0 leaks addr as return value\n"); return -EACCES; } err = check_return_code(env); if (err) return err; process_bpf_exit: err = pop_stack(env, &prev_insn_idx, &insn_idx); if (err < 0) { if (err != -ENOENT) return err; break; } else { do_print_state = true; continue; } } else { err = check_cond_jmp_op(env, insn, &insn_idx); if (err) return err; } } else if (class == BPF_LD) { u8 mode = BPF_MODE(insn->code); if (mode == BPF_ABS || mode == BPF_IND) { err = check_ld_abs(env, insn); if (err) return err; } else if (mode == BPF_IMM) { err = check_ld_imm(env, insn); if (err) return err; insn_idx++; env->insn_aux_data[insn_idx].seen = true; } else { verbose(env, "invalid BPF_LD mode\n"); return -EINVAL; } } else { verbose(env, "unknown insn class %d\n", class); return -EINVAL; } insn_idx++; } verbose(env, "processed %d insns, stack depth %d\n", insn_processed, env->prog->aux->stack_depth); return 0; } static int check_map_prealloc(struct bpf_map *map) { return (map->map_type != BPF_MAP_TYPE_HASH && map->map_type != BPF_MAP_TYPE_PERCPU_HASH && map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) || !(map->map_flags & BPF_F_NO_PREALLOC); } static int check_map_prog_compatibility(struct bpf_verifier_env *env, struct bpf_map *map, struct bpf_prog *prog) { /* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use * preallocated hash maps, since doing memory allocation * in overflow_handler can crash depending on where nmi got * triggered. */ if (prog->type == BPF_PROG_TYPE_PERF_EVENT) { if (!check_map_prealloc(map)) { verbose(env, "perf_event programs can only use preallocated hash map\n"); return -EINVAL; } if (map->inner_map_meta && !check_map_prealloc(map->inner_map_meta)) { verbose(env, "perf_event programs can only use preallocated inner hash map\n"); return -EINVAL; } } return 0; } /* look for pseudo eBPF instructions that access map FDs and * replace them with actual map pointers */ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) { struct bpf_insn *insn = env->prog->insnsi; int insn_cnt = env->prog->len; int i, j, err; err = bpf_prog_calc_tag(env->prog); if (err) return err; for (i = 0; i < insn_cnt; i++, insn++) { if (BPF_CLASS(insn->code) == BPF_LDX && (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { verbose(env, "BPF_LDX uses reserved fields\n"); return -EINVAL; } if (BPF_CLASS(insn->code) == BPF_STX && ((BPF_MODE(insn->code) != BPF_MEM && BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) { verbose(env, "BPF_STX uses reserved fields\n"); return -EINVAL; } if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { struct bpf_map *map; struct fd f; if (i == insn_cnt - 1 || insn[1].code != 0 || insn[1].dst_reg != 0 || insn[1].src_reg != 0 || insn[1].off != 0) { verbose(env, "invalid bpf_ld_imm64 insn\n"); return -EINVAL; } if (insn->src_reg == 0) /* valid generic load 64-bit imm */ goto next_insn; if (insn->src_reg != BPF_PSEUDO_MAP_FD) { verbose(env, "unrecognized bpf_ld_imm64 insn\n"); return -EINVAL; } f = fdget(insn->imm); map = __bpf_map_get(f); if (IS_ERR(map)) { verbose(env, "fd %d is not pointing to valid bpf_map\n", insn->imm); return PTR_ERR(map); } err = check_map_prog_compatibility(env, map, env->prog); if (err) { fdput(f); return err; } /* store map pointer inside BPF_LD_IMM64 instruction */ insn[0].imm = (u32) (unsigned long) map; insn[1].imm = ((u64) (unsigned long) map) >> 32; /* check whether we recorded this map already */ for (j = 0; j < env->used_map_cnt; j++) if (env->used_maps[j] == map) { fdput(f); goto next_insn; } if (env->used_map_cnt >= MAX_USED_MAPS) { fdput(f); return -E2BIG; } /* hold the map. If the program is rejected by verifier, * the map will be released by release_maps() or it * will be used by the valid program until it's unloaded * and all maps are released in free_bpf_prog_info() */ map = bpf_map_inc(map, false); if (IS_ERR(map)) { fdput(f); return PTR_ERR(map); } env->used_maps[env->used_map_cnt++] = map; fdput(f); next_insn: insn++; i++; } } /* now all pseudo BPF_LD_IMM64 instructions load valid * 'struct bpf_map *' into a register instead of user map_fd. * These pointers will be used later by verifier to validate map access. */ return 0; } /* drop refcnt of maps used by the rejected program */ static void release_maps(struct bpf_verifier_env *env) { int i; for (i = 0; i < env->used_map_cnt; i++) bpf_map_put(env->used_maps[i]); } /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env) { struct bpf_insn *insn = env->prog->insnsi; int insn_cnt = env->prog->len; int i; for (i = 0; i < insn_cnt; i++, insn++) if (insn->code == (BPF_LD | BPF_IMM | BPF_DW)) insn->src_reg = 0; } /* single env->prog->insni[off] instruction was replaced with the range * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying * [0, off) and [off, end) to new locations, so the patched range stays zero */ static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len, u32 off, u32 cnt) { struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; int i; if (cnt == 1) return 0; new_data = vzalloc(sizeof(struct bpf_insn_aux_data) * prog_len); if (!new_data) return -ENOMEM; memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); memcpy(new_data + off + cnt - 1, old_data + off, sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); for (i = off; i < off + cnt - 1; i++) new_data[i].seen = true; env->insn_aux_data = new_data; vfree(old_data); return 0; } static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, const struct bpf_insn *patch, u32 len) { struct bpf_prog *new_prog; new_prog = bpf_patch_insn_single(env->prog, off, patch, len); if (!new_prog) return NULL; if (adjust_insn_aux_data(env, new_prog->len, off, len)) return NULL; return new_prog; } /* The verifier does more data flow analysis than llvm and will not explore * branches that are dead at run time. Malicious programs can have dead code * too. Therefore replace all dead at-run-time code with nops. */ static void sanitize_dead_code(struct bpf_verifier_env *env) { struct bpf_insn_aux_data *aux_data = env->insn_aux_data; struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0); struct bpf_insn *insn = env->prog->insnsi; const int insn_cnt = env->prog->len; int i; for (i = 0; i < insn_cnt; i++) { if (aux_data[i].seen) continue; memcpy(insn + i, &nop, sizeof(nop)); } } /* convert load instructions that access fields of 'struct __sk_buff' * into sequence of instructions that access fields of 'struct sk_buff' */ static int convert_ctx_accesses(struct bpf_verifier_env *env) { const struct bpf_verifier_ops *ops = env->ops; int i, cnt, size, ctx_field_size, delta = 0; const int insn_cnt = env->prog->len; struct bpf_insn insn_buf[16], *insn; struct bpf_prog *new_prog; enum bpf_access_type type; bool is_narrower_load; u32 target_size; if (ops->gen_prologue) { cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, env->prog); if (cnt >= ARRAY_SIZE(insn_buf)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } else if (cnt) { new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); if (!new_prog) return -ENOMEM; env->prog = new_prog; delta += cnt - 1; } } if (!ops->convert_ctx_access) return 0; insn = env->prog->insnsi + delta; for (i = 0; i < insn_cnt; i++, insn++) { if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || insn->code == (BPF_LDX | BPF_MEM | BPF_H) || insn->code == (BPF_LDX | BPF_MEM | BPF_W) || insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) type = BPF_READ; else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || insn->code == (BPF_STX | BPF_MEM | BPF_H) || insn->code == (BPF_STX | BPF_MEM | BPF_W) || insn->code == (BPF_STX | BPF_MEM | BPF_DW)) type = BPF_WRITE; else continue; if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX) continue; ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; size = BPF_LDST_BYTES(insn); /* If the read access is a narrower load of the field, * convert to a 4/8-byte load, to minimum program type specific * convert_ctx_access changes. If conversion is successful, * we will apply proper mask to the result. */ is_narrower_load = size < ctx_field_size; if (is_narrower_load) { u32 off = insn->off; u8 size_code; if (type == BPF_WRITE) { verbose(env, "bpf verifier narrow ctx access misconfigured\n"); return -EINVAL; } size_code = BPF_H; if (ctx_field_size == 4) size_code = BPF_W; else if (ctx_field_size == 8) size_code = BPF_DW; insn->off = off & ~(ctx_field_size - 1); insn->code = BPF_LDX | BPF_MEM | size_code; } target_size = 0; cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog, &target_size); if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) || (ctx_field_size && !target_size)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } if (is_narrower_load && size < target_size) { if (ctx_field_size <= 4) insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, (1 << size * 8) - 1); else insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg, (1 << size * 8) - 1); } new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; /* keep walking new program and skip insns we just inserted */ env->prog = new_prog; insn = new_prog->insnsi + i + delta; } return 0; } /* fixup insn->imm field of bpf_call instructions * and inline eligible helpers as explicit sequence of BPF instructions * * this function is called after eBPF program passed verification */ static int fixup_bpf_calls(struct bpf_verifier_env *env) { struct bpf_prog *prog = env->prog; struct bpf_insn *insn = prog->insnsi; const struct bpf_func_proto *fn; const int insn_cnt = prog->len; struct bpf_insn insn_buf[16]; struct bpf_prog *new_prog; struct bpf_map *map_ptr; int i, cnt, delta = 0; for (i = 0; i < insn_cnt; i++, insn++) { if (insn->code != (BPF_JMP | BPF_CALL)) continue; if (insn->imm == BPF_FUNC_get_route_realm) prog->dst_needed = 1; if (insn->imm == BPF_FUNC_get_prandom_u32) bpf_user_rnd_init_once(); if (insn->imm == BPF_FUNC_tail_call) { /* If we tail call into other programs, we * cannot make any assumptions since they can * be replaced dynamically during runtime in * the program array. */ prog->cb_access = 1; env->prog->aux->stack_depth = MAX_BPF_STACK; /* mark bpf_tail_call as different opcode to avoid * conditional branch in the interpeter for every normal * call and to prevent accidental JITing by JIT compiler * that doesn't support bpf_tail_call yet */ insn->imm = 0; insn->code = BPF_JMP | BPF_TAIL_CALL; continue; } /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup * handlers are currently limited to 64 bit only. */ if (ebpf_jit_enabled() && BITS_PER_LONG == 64 && insn->imm == BPF_FUNC_map_lookup_elem) { map_ptr = env->insn_aux_data[i + delta].map_ptr; if (map_ptr == BPF_MAP_PTR_POISON || !map_ptr->ops->map_gen_lookup) goto patch_call_imm; cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf); if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; /* keep walking new program and skip insns we just inserted */ env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } if (insn->imm == BPF_FUNC_redirect_map) { /* Note, we cannot use prog directly as imm as subsequent * rewrites would still change the prog pointer. The only * stable address we can use is aux, which also works with * prog clones during blinding. */ u64 addr = (unsigned long)prog->aux; struct bpf_insn r4_ld[] = { BPF_LD_IMM64(BPF_REG_4, addr), *insn, }; cnt = ARRAY_SIZE(r4_ld); new_prog = bpf_patch_insn_data(env, i + delta, r4_ld, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; } patch_call_imm: fn = env->ops->get_func_proto(insn->imm); /* all functions that have prototype and verifier allowed * programs to call them, must be real in-kernel functions */ if (!fn->func) { verbose(env, "kernel subsystem misconfigured func %s#%d\n", func_id_name(insn->imm), insn->imm); return -EFAULT; } insn->imm = fn->func - __bpf_call_base; } return 0; } static void free_states(struct bpf_verifier_env *env) { struct bpf_verifier_state_list *sl, *sln; int i; if (!env->explored_states) return; for (i = 0; i < env->prog->len; i++) { sl = env->explored_states[i]; if (sl) while (sl != STATE_LIST_MARK) { sln = sl->next; free_verifier_state(&sl->state, false); kfree(sl); sl = sln; } } kfree(env->explored_states); } int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) { struct bpf_verifier_env *env; struct bpf_verifer_log *log; int ret = -EINVAL; /* no program is valid */ if (ARRAY_SIZE(bpf_verifier_ops) == 0) return -EINVAL; /* 'struct bpf_verifier_env' can be global, but since it's not small, * allocate/free it every time bpf_check() is called */ env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); if (!env) return -ENOMEM; log = &env->log; env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) * (*prog)->len); ret = -ENOMEM; if (!env->insn_aux_data) goto err_free_env; env->prog = *prog; env->ops = bpf_verifier_ops[env->prog->type]; /* grab the mutex to protect few globals used by verifier */ mutex_lock(&bpf_verifier_lock); if (attr->log_level || attr->log_buf || attr->log_size) { /* user requested verbose verifier output * and supplied buffer to store the verification trace */ log->level = attr->log_level; log->ubuf = (char __user *) (unsigned long) attr->log_buf; log->len_total = attr->log_size; ret = -EINVAL; /* log attributes have to be sane */ if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 || !log->level || !log->ubuf) goto err_unlock; } env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) env->strict_alignment = true; if (env->prog->aux->offload) { ret = bpf_prog_offload_verifier_prep(env); if (ret) goto err_unlock; } ret = replace_map_fd_with_map_ptr(env); if (ret < 0) goto skip_full_check; env->explored_states = kcalloc(env->prog->len, sizeof(struct bpf_verifier_state_list *), GFP_USER); ret = -ENOMEM; if (!env->explored_states) goto skip_full_check; ret = check_cfg(env); if (ret < 0) goto skip_full_check; env->allow_ptr_leaks = capable(CAP_SYS_ADMIN); ret = do_check(env); if (env->cur_state) { free_verifier_state(env->cur_state, true); env->cur_state = NULL; } skip_full_check: while (!pop_stack(env, NULL, NULL)); free_states(env); if (ret == 0) sanitize_dead_code(env); if (ret == 0) /* program is valid, convert *(u32*)(ctx + off) accesses */ ret = convert_ctx_accesses(env); if (ret == 0) ret = fixup_bpf_calls(env); if (log->level && bpf_verifier_log_full(log)) ret = -ENOSPC; if (log->level && !log->ubuf) { ret = -EFAULT; goto err_release_maps; } if (ret == 0 && env->used_map_cnt) { /* if program passed verifier, update used_maps in bpf_prog_info */ env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, sizeof(env->used_maps[0]), GFP_KERNEL); if (!env->prog->aux->used_maps) { ret = -ENOMEM; goto err_release_maps; } memcpy(env->prog->aux->used_maps, env->used_maps, sizeof(env->used_maps[0]) * env->used_map_cnt); env->prog->aux->used_map_cnt = env->used_map_cnt; /* program is valid. Convert pseudo bpf_ld_imm64 into generic * bpf_ld_imm64 instructions */ convert_pseudo_ld_imm64(env); } err_release_maps: if (!env->prog->aux->used_maps) /* if we didn't copy map pointers into bpf_prog_info, release * them now. Otherwise free_bpf_prog_info() will release them. */ release_maps(env); *prog = env->prog; err_unlock: mutex_unlock(&bpf_verifier_lock); vfree(env->insn_aux_data); err_free_env: kfree(env); return ret; }
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com * Copyright (c) 2016 Facebook * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/bpf.h> #include <linux/bpf_verifier.h> #include <linux/filter.h> #include <net/netlink.h> #include <linux/file.h> #include <linux/vmalloc.h> #include <linux/stringify.h> #include "disasm.h" static const struct bpf_verifier_ops * const bpf_verifier_ops[] = { #define BPF_PROG_TYPE(_id, _name) \ [_id] = & _name ## _verifier_ops, #define BPF_MAP_TYPE(_id, _ops) #include <linux/bpf_types.h> #undef BPF_PROG_TYPE #undef BPF_MAP_TYPE }; /* bpf_check() is a static code analyzer that walks eBPF program * instruction by instruction and updates register/stack state. * All paths of conditional branches are analyzed until 'bpf_exit' insn. * * The first pass is depth-first-search to check that the program is a DAG. * It rejects the following programs: * - larger than BPF_MAXINSNS insns * - if loop is present (detected via back-edge) * - unreachable insns exist (shouldn't be a forest. program = one function) * - out of bounds or malformed jumps * The second pass is all possible path descent from the 1st insn. * Since it's analyzing all pathes through the program, the length of the * analysis is limited to 64k insn, which may be hit even if total number of * insn is less then 4K, but there are too many branches that change stack/regs. * Number of 'branches to be analyzed' is limited to 1k * * On entry to each instruction, each register has a type, and the instruction * changes the types of the registers depending on instruction semantics. * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is * copied to R1. * * All registers are 64-bit. * R0 - return register * R1-R5 argument passing registers * R6-R9 callee saved registers * R10 - frame pointer read-only * * At the start of BPF program the register R1 contains a pointer to bpf_context * and has type PTR_TO_CTX. * * Verifier tracks arithmetic operations on pointers in case: * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20), * 1st insn copies R10 (which has FRAME_PTR) type into R1 * and 2nd arithmetic instruction is pattern matched to recognize * that it wants to construct a pointer to some element within stack. * So after 2nd insn, the register R1 has type PTR_TO_STACK * (and -20 constant is saved for further stack bounds checking). * Meaning that this reg is a pointer to stack plus known immediate constant. * * Most of the time the registers have SCALAR_VALUE type, which * means the register has some value, but it's not a valid pointer. * (like pointer plus pointer becomes SCALAR_VALUE type) * * When verifier sees load or store instructions the type of base register * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK. These are three pointer * types recognized by check_mem_access() function. * * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' * and the range of [ptr, ptr + map's value_size) is accessible. * * registers used to pass values to function calls are checked against * function argument constraints. * * ARG_PTR_TO_MAP_KEY is one of such argument constraints. * It means that the register type passed to this function must be * PTR_TO_STACK and it will be used inside the function as * 'pointer to map element key' * * For example the argument constraints for bpf_map_lookup_elem(): * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, * .arg1_type = ARG_CONST_MAP_PTR, * .arg2_type = ARG_PTR_TO_MAP_KEY, * * ret_type says that this function returns 'pointer to map elem value or null' * function expects 1st argument to be a const pointer to 'struct bpf_map' and * 2nd argument should be a pointer to stack, which will be used inside * the helper function as a pointer to map element key. * * On the kernel side the helper function looks like: * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) * { * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; * void *key = (void *) (unsigned long) r2; * void *value; * * here kernel can access 'key' and 'map' pointers safely, knowing that * [key, key + map->key_size) bytes are valid and were initialized on * the stack of eBPF program. * } * * Corresponding eBPF program may look like: * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), * here verifier looks at prototype of map_lookup_elem() and sees: * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok, * Now verifier knows that this map has key of R1->map_ptr->key_size bytes * * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far, * Now verifier checks that [R2, R2 + map's key_size) are within stack limits * and were initialized prior to this call. * If it's ok, then verifier allows this BPF_CALL insn and looks at * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function * returns ether pointer to map value or NULL. * * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off' * insn, the register holding that pointer in the true branch changes state to * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false * branch. See check_cond_jmp_op(). * * After the call R0 is set to return type of the function and registers R1-R5 * are set to NOT_INIT to indicate that they are no longer readable. */ /* verifier_state + insn_idx are pushed to stack when branch is encountered */ struct bpf_verifier_stack_elem { /* verifer state is 'st' * before processing instruction 'insn_idx' * and after processing instruction 'prev_insn_idx' */ struct bpf_verifier_state st; int insn_idx; int prev_insn_idx; struct bpf_verifier_stack_elem *next; }; #define BPF_COMPLEXITY_LIMIT_INSNS 131072 #define BPF_COMPLEXITY_LIMIT_STACK 1024 #define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA) struct bpf_call_arg_meta { struct bpf_map *map_ptr; bool raw_mode; bool pkt_access; int regno; int access_size; }; static DEFINE_MUTEX(bpf_verifier_lock); /* log_level controls verbosity level of eBPF verifier. * verbose() is used to dump the verification trace to the log, so the user * can figure out what's wrong with the program */ static __printf(2, 3) void verbose(struct bpf_verifier_env *env, const char *fmt, ...) { struct bpf_verifer_log *log = &env->log; unsigned int n; va_list args; if (!log->level || !log->ubuf || bpf_verifier_log_full(log)) return; va_start(args, fmt); n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args); va_end(args); WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1, "verifier log line truncated - local buffer too short\n"); n = min(log->len_total - log->len_used - 1, n); log->kbuf[n] = '\0'; if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1)) log->len_used += n; else log->ubuf = NULL; } static bool type_is_pkt_pointer(enum bpf_reg_type type) { return type == PTR_TO_PACKET || type == PTR_TO_PACKET_META; } /* string representation of 'enum bpf_reg_type' */ static const char * const reg_type_str[] = { [NOT_INIT] = "?", [SCALAR_VALUE] = "inv", [PTR_TO_CTX] = "ctx", [CONST_PTR_TO_MAP] = "map_ptr", [PTR_TO_MAP_VALUE] = "map_value", [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", [PTR_TO_STACK] = "fp", [PTR_TO_PACKET] = "pkt", [PTR_TO_PACKET_META] = "pkt_meta", [PTR_TO_PACKET_END] = "pkt_end", }; static void print_verifier_state(struct bpf_verifier_env *env, struct bpf_verifier_state *state) { struct bpf_reg_state *reg; enum bpf_reg_type t; int i; for (i = 0; i < MAX_BPF_REG; i++) { reg = &state->regs[i]; t = reg->type; if (t == NOT_INIT) continue; verbose(env, " R%d=%s", i, reg_type_str[t]); if ((t == SCALAR_VALUE || t == PTR_TO_STACK) && tnum_is_const(reg->var_off)) { /* reg->off should be 0 for SCALAR_VALUE */ verbose(env, "%lld", reg->var_off.value + reg->off); } else { verbose(env, "(id=%d", reg->id); if (t != SCALAR_VALUE) verbose(env, ",off=%d", reg->off); if (type_is_pkt_pointer(t)) verbose(env, ",r=%d", reg->range); else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE || t == PTR_TO_MAP_VALUE_OR_NULL) verbose(env, ",ks=%d,vs=%d", reg->map_ptr->key_size, reg->map_ptr->value_size); if (tnum_is_const(reg->var_off)) { /* Typically an immediate SCALAR_VALUE, but * could be a pointer whose offset is too big * for reg->off */ verbose(env, ",imm=%llx", reg->var_off.value); } else { if (reg->smin_value != reg->umin_value && reg->smin_value != S64_MIN) verbose(env, ",smin_value=%lld", (long long)reg->smin_value); if (reg->smax_value != reg->umax_value && reg->smax_value != S64_MAX) verbose(env, ",smax_value=%lld", (long long)reg->smax_value); if (reg->umin_value != 0) verbose(env, ",umin_value=%llu", (unsigned long long)reg->umin_value); if (reg->umax_value != U64_MAX) verbose(env, ",umax_value=%llu", (unsigned long long)reg->umax_value); if (!tnum_is_unknown(reg->var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, ",var_off=%s", tn_buf); } } verbose(env, ")"); } } for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { if (state->stack[i].slot_type[0] == STACK_SPILL) verbose(env, " fp%d=%s", -MAX_BPF_STACK + i * BPF_REG_SIZE, reg_type_str[state->stack[i].spilled_ptr.type]); } verbose(env, "\n"); } static int copy_stack_state(struct bpf_verifier_state *dst, const struct bpf_verifier_state *src) { if (!src->stack) return 0; if (WARN_ON_ONCE(dst->allocated_stack < src->allocated_stack)) { /* internal bug, make state invalid to reject the program */ memset(dst, 0, sizeof(*dst)); return -EFAULT; } memcpy(dst->stack, src->stack, sizeof(*src->stack) * (src->allocated_stack / BPF_REG_SIZE)); return 0; } /* do_check() starts with zero-sized stack in struct bpf_verifier_state to * make it consume minimal amount of memory. check_stack_write() access from * the program calls into realloc_verifier_state() to grow the stack size. * Note there is a non-zero 'parent' pointer inside bpf_verifier_state * which this function copies over. It points to previous bpf_verifier_state * which is never reallocated */ static int realloc_verifier_state(struct bpf_verifier_state *state, int size, bool copy_old) { u32 old_size = state->allocated_stack; struct bpf_stack_state *new_stack; int slot = size / BPF_REG_SIZE; if (size <= old_size || !size) { if (copy_old) return 0; state->allocated_stack = slot * BPF_REG_SIZE; if (!size && old_size) { kfree(state->stack); state->stack = NULL; } return 0; } new_stack = kmalloc_array(slot, sizeof(struct bpf_stack_state), GFP_KERNEL); if (!new_stack) return -ENOMEM; if (copy_old) { if (state->stack) memcpy(new_stack, state->stack, sizeof(*new_stack) * (old_size / BPF_REG_SIZE)); memset(new_stack + old_size / BPF_REG_SIZE, 0, sizeof(*new_stack) * (size - old_size) / BPF_REG_SIZE); } state->allocated_stack = slot * BPF_REG_SIZE; kfree(state->stack); state->stack = new_stack; return 0; } static void free_verifier_state(struct bpf_verifier_state *state, bool free_self) { kfree(state->stack); if (free_self) kfree(state); } /* copy verifier state from src to dst growing dst stack space * when necessary to accommodate larger src stack */ static int copy_verifier_state(struct bpf_verifier_state *dst, const struct bpf_verifier_state *src) { int err; err = realloc_verifier_state(dst, src->allocated_stack, false); if (err) return err; memcpy(dst, src, offsetof(struct bpf_verifier_state, allocated_stack)); return copy_stack_state(dst, src); } static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx, int *insn_idx) { struct bpf_verifier_state *cur = env->cur_state; struct bpf_verifier_stack_elem *elem, *head = env->head; int err; if (env->head == NULL) return -ENOENT; if (cur) { err = copy_verifier_state(cur, &head->st); if (err) return err; } if (insn_idx) *insn_idx = head->insn_idx; if (prev_insn_idx) *prev_insn_idx = head->prev_insn_idx; elem = head->next; free_verifier_state(&head->st, false); kfree(head); env->head = elem; env->stack_size--; return 0; } static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) { struct bpf_verifier_state *cur = env->cur_state; struct bpf_verifier_stack_elem *elem; int err; elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); if (!elem) goto err; elem->insn_idx = insn_idx; elem->prev_insn_idx = prev_insn_idx; elem->next = env->head; env->head = elem; env->stack_size++; err = copy_verifier_state(&elem->st, cur); if (err) goto err; if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) { verbose(env, "BPF program is too complex\n"); goto err; } return &elem->st; err: /* pop all elements and return */ while (!pop_stack(env, NULL, NULL)); return NULL; } #define CALLER_SAVED_REGS 6 static const int caller_saved[CALLER_SAVED_REGS] = { BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 }; static void __mark_reg_not_init(struct bpf_reg_state *reg); /* Mark the unknown part of a register (variable offset or scalar value) as * known to have the value @imm. */ static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm) { reg->id = 0; reg->var_off = tnum_const(imm); reg->smin_value = (s64)imm; reg->smax_value = (s64)imm; reg->umin_value = imm; reg->umax_value = imm; } /* Mark the 'variable offset' part of a register as zero. This should be * used only on registers holding a pointer type. */ static void __mark_reg_known_zero(struct bpf_reg_state *reg) { __mark_reg_known(reg, 0); } static void mark_reg_known_zero(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno) { if (WARN_ON(regno >= MAX_BPF_REG)) { verbose(env, "mark_reg_known_zero(regs, %u)\n", regno); /* Something bad happened, let's kill all regs */ for (regno = 0; regno < MAX_BPF_REG; regno++) __mark_reg_not_init(regs + regno); return; } __mark_reg_known_zero(regs + regno); } static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg) { return type_is_pkt_pointer(reg->type); } static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg) { return reg_is_pkt_pointer(reg) || reg->type == PTR_TO_PACKET_END; } /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */ static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg, enum bpf_reg_type which) { /* The register can already have a range from prior markings. * This is fine as long as it hasn't been advanced from its * origin. */ return reg->type == which && reg->id == 0 && reg->off == 0 && tnum_equals_const(reg->var_off, 0); } /* Attempts to improve min/max values based on var_off information */ static void __update_reg_bounds(struct bpf_reg_state *reg) { /* min signed is max(sign bit) | min(other bits) */ reg->smin_value = max_t(s64, reg->smin_value, reg->var_off.value | (reg->var_off.mask & S64_MIN)); /* max signed is min(sign bit) | max(other bits) */ reg->smax_value = min_t(s64, reg->smax_value, reg->var_off.value | (reg->var_off.mask & S64_MAX)); reg->umin_value = max(reg->umin_value, reg->var_off.value); reg->umax_value = min(reg->umax_value, reg->var_off.value | reg->var_off.mask); } /* Uses signed min/max values to inform unsigned, and vice-versa */ static void __reg_deduce_bounds(struct bpf_reg_state *reg) { /* Learn sign from signed bounds. * If we cannot cross the sign boundary, then signed and unsigned bounds * are the same, so combine. This works even in the negative case, e.g. * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. */ if (reg->smin_value >= 0 || reg->smax_value < 0) { reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, reg->umin_value); reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, reg->umax_value); return; } /* Learn sign from unsigned bounds. Signed bounds cross the sign * boundary, so we must be careful. */ if ((s64)reg->umax_value >= 0) { /* Positive. We can't learn anything from the smin, but smax * is positive, hence safe. */ reg->smin_value = reg->umin_value; reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, reg->umax_value); } else if ((s64)reg->umin_value < 0) { /* Negative. We can't learn anything from the smax, but smin * is negative, hence safe. */ reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, reg->umin_value); reg->smax_value = reg->umax_value; } } /* Attempts to improve var_off based on unsigned min/max information */ static void __reg_bound_offset(struct bpf_reg_state *reg) { reg->var_off = tnum_intersect(reg->var_off, tnum_range(reg->umin_value, reg->umax_value)); } /* Reset the min/max bounds of a register */ static void __mark_reg_unbounded(struct bpf_reg_state *reg) { reg->smin_value = S64_MIN; reg->smax_value = S64_MAX; reg->umin_value = 0; reg->umax_value = U64_MAX; } /* Mark a register as having a completely unknown (scalar) value. */ static void __mark_reg_unknown(struct bpf_reg_state *reg) { reg->type = SCALAR_VALUE; reg->id = 0; reg->off = 0; reg->var_off = tnum_unknown; __mark_reg_unbounded(reg); } static void mark_reg_unknown(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno) { if (WARN_ON(regno >= MAX_BPF_REG)) { verbose(env, "mark_reg_unknown(regs, %u)\n", regno); /* Something bad happened, let's kill all regs */ for (regno = 0; regno < MAX_BPF_REG; regno++) __mark_reg_not_init(regs + regno); return; } __mark_reg_unknown(regs + regno); } static void __mark_reg_not_init(struct bpf_reg_state *reg) { __mark_reg_unknown(reg); reg->type = NOT_INIT; } static void mark_reg_not_init(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno) { if (WARN_ON(regno >= MAX_BPF_REG)) { verbose(env, "mark_reg_not_init(regs, %u)\n", regno); /* Something bad happened, let's kill all regs */ for (regno = 0; regno < MAX_BPF_REG; regno++) __mark_reg_not_init(regs + regno); return; } __mark_reg_not_init(regs + regno); } static void init_reg_state(struct bpf_verifier_env *env, struct bpf_reg_state *regs) { int i; for (i = 0; i < MAX_BPF_REG; i++) { mark_reg_not_init(env, regs, i); regs[i].live = REG_LIVE_NONE; } /* frame pointer */ regs[BPF_REG_FP].type = PTR_TO_STACK; mark_reg_known_zero(env, regs, BPF_REG_FP); /* 1st arg to a function */ regs[BPF_REG_1].type = PTR_TO_CTX; mark_reg_known_zero(env, regs, BPF_REG_1); } enum reg_arg_type { SRC_OP, /* register is used as source operand */ DST_OP, /* register is used as destination operand */ DST_OP_NO_MARK /* same as above, check only, don't mark */ }; static void mark_reg_read(const struct bpf_verifier_state *state, u32 regno) { struct bpf_verifier_state *parent = state->parent; if (regno == BPF_REG_FP) /* We don't need to worry about FP liveness because it's read-only */ return; while (parent) { /* if read wasn't screened by an earlier write ... */ if (state->regs[regno].live & REG_LIVE_WRITTEN) break; /* ... then we depend on parent's value */ parent->regs[regno].live |= REG_LIVE_READ; state = parent; parent = state->parent; } } static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, enum reg_arg_type t) { struct bpf_reg_state *regs = env->cur_state->regs; if (regno >= MAX_BPF_REG) { verbose(env, "R%d is invalid\n", regno); return -EINVAL; } if (t == SRC_OP) { /* check whether register used as source operand can be read */ if (regs[regno].type == NOT_INIT) { verbose(env, "R%d !read_ok\n", regno); return -EACCES; } mark_reg_read(env->cur_state, regno); } else { /* check whether register used as dest operand can be written to */ if (regno == BPF_REG_FP) { verbose(env, "frame pointer is read only\n"); return -EACCES; } regs[regno].live |= REG_LIVE_WRITTEN; if (t == DST_OP) mark_reg_unknown(env, regs, regno); } return 0; } static bool is_spillable_regtype(enum bpf_reg_type type) { switch (type) { case PTR_TO_MAP_VALUE: case PTR_TO_MAP_VALUE_OR_NULL: case PTR_TO_STACK: case PTR_TO_CTX: case PTR_TO_PACKET: case PTR_TO_PACKET_META: case PTR_TO_PACKET_END: case CONST_PTR_TO_MAP: return true; default: return false; } } /* check_stack_read/write functions track spill/fill of registers, * stack boundary and alignment are checked in check_mem_access() */ static int check_stack_write(struct bpf_verifier_env *env, struct bpf_verifier_state *state, int off, int size, int value_regno) { int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; err = realloc_verifier_state(state, round_up(slot + 1, BPF_REG_SIZE), true); if (err) return err; /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, * so it's aligned access and [off, off + size) are within stack limits */ if (!env->allow_ptr_leaks && state->stack[spi].slot_type[0] == STACK_SPILL && size != BPF_REG_SIZE) { verbose(env, "attempt to corrupt spilled pointer on stack\n"); return -EACCES; } if (value_regno >= 0 && is_spillable_regtype(state->regs[value_regno].type)) { /* register containing pointer is being spilled into stack */ if (size != BPF_REG_SIZE) { verbose(env, "invalid size of register spill\n"); return -EACCES; } /* save register state */ state->stack[spi].spilled_ptr = state->regs[value_regno]; state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; for (i = 0; i < BPF_REG_SIZE; i++) state->stack[spi].slot_type[i] = STACK_SPILL; } else { /* regular write of data into stack */ state->stack[spi].spilled_ptr = (struct bpf_reg_state) {}; for (i = 0; i < size; i++) state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = STACK_MISC; } return 0; } static void mark_stack_slot_read(const struct bpf_verifier_state *state, int slot) { struct bpf_verifier_state *parent = state->parent; while (parent) { /* if read wasn't screened by an earlier write ... */ if (state->stack[slot].spilled_ptr.live & REG_LIVE_WRITTEN) break; /* ... then we depend on parent's value */ parent->stack[slot].spilled_ptr.live |= REG_LIVE_READ; state = parent; parent = state->parent; } } static int check_stack_read(struct bpf_verifier_env *env, struct bpf_verifier_state *state, int off, int size, int value_regno) { int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; u8 *stype; if (state->allocated_stack <= slot) { verbose(env, "invalid read from stack off %d+0 size %d\n", off, size); return -EACCES; } stype = state->stack[spi].slot_type; if (stype[0] == STACK_SPILL) { if (size != BPF_REG_SIZE) { verbose(env, "invalid size of register spill\n"); return -EACCES; } for (i = 1; i < BPF_REG_SIZE; i++) { if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) { verbose(env, "corrupted spill memory\n"); return -EACCES; } } if (value_regno >= 0) { /* restore register state from stack */ state->regs[value_regno] = state->stack[spi].spilled_ptr; mark_stack_slot_read(state, spi); } return 0; } else { for (i = 0; i < size; i++) { if (stype[(slot - i) % BPF_REG_SIZE] != STACK_MISC) { verbose(env, "invalid read from stack off %d+%d size %d\n", off, i, size); return -EACCES; } } if (value_regno >= 0) /* have read misc data from the stack */ mark_reg_unknown(env, state->regs, value_regno); return 0; } } /* check read/write into map element returned by bpf_map_lookup_elem() */ static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_map *map = regs[regno].map_ptr; if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) || off + size > map->value_size) { verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n", map->value_size, off, size); return -EACCES; } return 0; } /* check read/write into a map element with possible variable offset */ static int check_map_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_verifier_state *state = env->cur_state; struct bpf_reg_state *reg = &state->regs[regno]; int err; /* We may have adjusted the register to this map value, so we * need to try adding each of min_value and max_value to off * to make sure our theoretical access will be safe. */ if (env->log.level) print_verifier_state(env, state); /* The minimum value is only important with signed * comparisons where we can't assume the floor of a * value is 0. If we are using signed variables for our * index'es we need to make sure that whatever we use * will have a set floor within our range. */ if (reg->smin_value < 0) { verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", regno); return -EACCES; } err = __check_map_access(env, regno, reg->smin_value + off, size, zero_size_allowed); if (err) { verbose(env, "R%d min value is outside of the array range\n", regno); return err; } /* If we haven't set a max value then we need to bail since we can't be * sure we won't do bad things. * If reg->umax_value + off could overflow, treat that as unbounded too. */ if (reg->umax_value >= BPF_MAX_VAR_OFF) { verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n", regno); return -EACCES; } err = __check_map_access(env, regno, reg->umax_value + off, size, zero_size_allowed); if (err) verbose(env, "R%d max value is outside of the array range\n", regno); return err; } #define MAX_PACKET_OFF 0xffff static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, const struct bpf_call_arg_meta *meta, enum bpf_access_type t) { switch (env->prog->type) { case BPF_PROG_TYPE_LWT_IN: case BPF_PROG_TYPE_LWT_OUT: /* dst_input() and dst_output() can't write for now */ if (t == BPF_WRITE) return false; /* fallthrough */ case BPF_PROG_TYPE_SCHED_CLS: case BPF_PROG_TYPE_SCHED_ACT: case BPF_PROG_TYPE_XDP: case BPF_PROG_TYPE_LWT_XMIT: case BPF_PROG_TYPE_SK_SKB: if (meta) return meta->pkt_access; env->seen_direct_write = true; return true; default: return false; } } static int __check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = &regs[regno]; if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) || (u64)off + size > reg->range) { verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", off, size, regno, reg->id, reg->off, reg->range); return -EACCES; } return 0; } static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = &regs[regno]; int err; /* We may have added a variable offset to the packet pointer; but any * reg->range we have comes after that. We are only checking the fixed * offset. */ /* We don't allow negative numbers, because we aren't tracking enough * detail to prove they're safe. */ if (reg->smin_value < 0) { verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", regno); return -EACCES; } err = __check_packet_access(env, regno, off, size, zero_size_allowed); if (err) { verbose(env, "R%d offset is outside of the packet\n", regno); return err; } return err; } /* check access to 'struct bpf_context' fields. Supports fixed offsets only */ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, enum bpf_access_type t, enum bpf_reg_type *reg_type) { struct bpf_insn_access_aux info = { .reg_type = *reg_type, }; if (env->ops->is_valid_access && env->ops->is_valid_access(off, size, t, &info)) { /* A non zero info.ctx_field_size indicates that this field is a * candidate for later verifier transformation to load the whole * field and then apply a mask when accessed with a narrower * access than actual ctx access size. A zero info.ctx_field_size * will only allow for whole field access and rejects any other * type of narrower access. */ *reg_type = info.reg_type; env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; /* remember the offset of last byte accessed in ctx */ if (env->prog->aux->max_ctx_offset < off + size) env->prog->aux->max_ctx_offset = off + size; return 0; } verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size); return -EACCES; } static bool __is_pointer_value(bool allow_ptr_leaks, const struct bpf_reg_state *reg) { if (allow_ptr_leaks) return false; return reg->type != SCALAR_VALUE; } static bool is_pointer_value(struct bpf_verifier_env *env, int regno) { return __is_pointer_value(env->allow_ptr_leaks, cur_regs(env) + regno); } static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int off, int size, bool strict) { struct tnum reg_off; int ip_align; /* Byte size accesses are always allowed. */ if (!strict || size == 1) return 0; /* For platforms that do not have a Kconfig enabling * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of * NET_IP_ALIGN is universally set to '2'. And on platforms * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get * to this code only in strict mode where we want to emulate * the NET_IP_ALIGN==2 checking. Therefore use an * unconditional IP align value of '2'. */ ip_align = 2; reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off)); if (!tnum_is_aligned(reg_off, size)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "misaligned packet access off %d+%s+%d+%d size %d\n", ip_align, tn_buf, reg->off, off, size); return -EACCES; } return 0; } static int check_generic_ptr_alignment(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, const char *pointer_desc, int off, int size, bool strict) { struct tnum reg_off; /* Byte size accesses are always allowed. */ if (!strict || size == 1) return 0; reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off)); if (!tnum_is_aligned(reg_off, size)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "misaligned %saccess off %s+%d+%d size %d\n", pointer_desc, tn_buf, reg->off, off, size); return -EACCES; } return 0; } static int check_ptr_alignment(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int off, int size) { bool strict = env->strict_alignment; const char *pointer_desc = ""; switch (reg->type) { case PTR_TO_PACKET: case PTR_TO_PACKET_META: /* Special case, because of NET_IP_ALIGN. Given metadata sits * right in front, treat it the very same way. */ return check_pkt_ptr_alignment(env, reg, off, size, strict); case PTR_TO_MAP_VALUE: pointer_desc = "value "; break; case PTR_TO_CTX: pointer_desc = "context "; break; case PTR_TO_STACK: pointer_desc = "stack "; /* The stack spill tracking logic in check_stack_write() * and check_stack_read() relies on stack accesses being * aligned. */ strict = true; break; default: break; } return check_generic_ptr_alignment(env, reg, pointer_desc, off, size, strict); } /* truncate register to smaller size (in bytes) * must be called with size < BPF_REG_SIZE */ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size) { u64 mask; /* clear high bits in bit representation */ reg->var_off = tnum_cast(reg->var_off, size); /* fix arithmetic bounds */ mask = ((u64)1 << (size * 8)) - 1; if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) { reg->umin_value &= mask; reg->umax_value &= mask; } else { reg->umin_value = 0; reg->umax_value = mask; } reg->smin_value = reg->umin_value; reg->smax_value = reg->umax_value; } /* check whether memory at (regno + off) is accessible for t = (read | write) * if t==write, value_regno is a register which value is stored into memory * if t==read, value_regno is a register which will receive the value from memory * if t==write && value_regno==-1, some unknown value is stored into memory * if t==read && value_regno==-1, don't care what we read from memory */ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off, int bpf_size, enum bpf_access_type t, int value_regno) { struct bpf_verifier_state *state = env->cur_state; struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = regs + regno; int size, err = 0; size = bpf_size_to_bytes(bpf_size); if (size < 0) return size; /* alignment checks will add in reg->off themselves */ err = check_ptr_alignment(env, reg, off, size); if (err) return err; /* for access checks, reg->off is just part of off */ off += reg->off; if (reg->type == PTR_TO_MAP_VALUE) { if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose(env, "R%d leaks addr into map\n", value_regno); return -EACCES; } err = check_map_access(env, regno, off, size, false); if (!err && t == BPF_READ && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); } else if (reg->type == PTR_TO_CTX) { enum bpf_reg_type reg_type = SCALAR_VALUE; if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose(env, "R%d leaks addr into ctx\n", value_regno); return -EACCES; } /* ctx accesses must be at a fixed offset, so that we can * determine what type of data were returned. */ if (reg->off) { verbose(env, "dereference of modified ctx ptr R%d off=%d+%d, ctx+const is allowed, ctx+const+const is not\n", regno, reg->off, off - reg->off); return -EACCES; } if (!tnum_is_const(reg->var_off) || reg->var_off.value) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "variable ctx access var_off=%s off=%d size=%d", tn_buf, off, size); return -EACCES; } err = check_ctx_access(env, insn_idx, off, size, t, &reg_type); if (!err && t == BPF_READ && value_regno >= 0) { /* ctx access returns either a scalar, or a * PTR_TO_PACKET[_META,_END]. In the latter * case, we know the offset is zero. */ if (reg_type == SCALAR_VALUE) mark_reg_unknown(env, regs, value_regno); else mark_reg_known_zero(env, regs, value_regno); regs[value_regno].id = 0; regs[value_regno].off = 0; regs[value_regno].range = 0; regs[value_regno].type = reg_type; } } else if (reg->type == PTR_TO_STACK) { /* stack accesses must be at a fixed offset, so that we can * determine what type of data were returned. * See check_stack_read(). */ if (!tnum_is_const(reg->var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "variable stack access var_off=%s off=%d size=%d", tn_buf, off, size); return -EACCES; } off += reg->var_off.value; if (off >= 0 || off < -MAX_BPF_STACK) { verbose(env, "invalid stack off=%d size=%d\n", off, size); return -EACCES; } if (env->prog->aux->stack_depth < -off) env->prog->aux->stack_depth = -off; if (t == BPF_WRITE) err = check_stack_write(env, state, off, size, value_regno); else err = check_stack_read(env, state, off, size, value_regno); } else if (reg_is_pkt_pointer(reg)) { if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { verbose(env, "cannot write into packet\n"); return -EACCES; } if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose(env, "R%d leaks addr into packet\n", value_regno); return -EACCES; } err = check_packet_access(env, regno, off, size, false); if (!err && t == BPF_READ && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); } else { verbose(env, "R%d invalid mem access '%s'\n", regno, reg_type_str[reg->type]); return -EACCES; } if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && regs[value_regno].type == SCALAR_VALUE) { /* b/h/w load zero-extends, mark upper bits as known 0 */ coerce_reg_to_size(&regs[value_regno], size); } return err; } static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn) { int err; if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) || insn->imm != 0) { verbose(env, "BPF_XADD uses reserved fields\n"); return -EINVAL; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if (is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d leaks addr into mem\n", insn->src_reg); return -EACCES; } /* check whether atomic_add can read the memory */ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_READ, -1); if (err) return err; /* check whether atomic_add can write into the same memory */ return check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, -1); } /* Does this register contain a constant zero? */ static bool register_is_null(struct bpf_reg_state reg) { return reg.type == SCALAR_VALUE && tnum_equals_const(reg.var_off, 0); } /* when register 'regno' is passed into function that will read 'access_size' * bytes from that pointer, make sure that it's within stack boundary * and all elements of stack are initialized. * Unlike most pointer bounds-checking functions, this one doesn't take an * 'off' argument, so it has to add in reg->off itself. */ static int check_stack_boundary(struct bpf_verifier_env *env, int regno, int access_size, bool zero_size_allowed, struct bpf_call_arg_meta *meta) { struct bpf_verifier_state *state = env->cur_state; struct bpf_reg_state *regs = state->regs; int off, i, slot, spi; if (regs[regno].type != PTR_TO_STACK) { /* Allow zero-byte read from NULL, regardless of pointer type */ if (zero_size_allowed && access_size == 0 && register_is_null(regs[regno])) return 0; verbose(env, "R%d type=%s expected=%s\n", regno, reg_type_str[regs[regno].type], reg_type_str[PTR_TO_STACK]); return -EACCES; } /* Only allow fixed-offset stack reads */ if (!tnum_is_const(regs[regno].var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off); verbose(env, "invalid variable stack read R%d var_off=%s\n", regno, tn_buf); return -EACCES; } off = regs[regno].off + regs[regno].var_off.value; if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || access_size < 0 || (access_size == 0 && !zero_size_allowed)) { verbose(env, "invalid stack type R%d off=%d access_size=%d\n", regno, off, access_size); return -EACCES; } if (env->prog->aux->stack_depth < -off) env->prog->aux->stack_depth = -off; if (meta && meta->raw_mode) { meta->access_size = access_size; meta->regno = regno; return 0; } for (i = 0; i < access_size; i++) { slot = -(off + i) - 1; spi = slot / BPF_REG_SIZE; if (state->allocated_stack <= slot || state->stack[spi].slot_type[slot % BPF_REG_SIZE] != STACK_MISC) { verbose(env, "invalid indirect read from stack off %d+%d size %d\n", off, i, access_size); return -EACCES; } } return 0; } static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, int access_size, bool zero_size_allowed, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; switch (reg->type) { case PTR_TO_PACKET: case PTR_TO_PACKET_META: return check_packet_access(env, regno, reg->off, access_size, zero_size_allowed); case PTR_TO_MAP_VALUE: return check_map_access(env, regno, reg->off, access_size, zero_size_allowed); default: /* scalar_value|ptr_to_stack or invalid ptr */ return check_stack_boundary(env, regno, access_size, zero_size_allowed, meta); } } static int check_func_arg(struct bpf_verifier_env *env, u32 regno, enum bpf_arg_type arg_type, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; enum bpf_reg_type expected_type, type = reg->type; int err = 0; if (arg_type == ARG_DONTCARE) return 0; err = check_reg_arg(env, regno, SRC_OP); if (err) return err; if (arg_type == ARG_ANYTHING) { if (is_pointer_value(env, regno)) { verbose(env, "R%d leaks addr into helper function\n", regno); return -EACCES; } return 0; } if (type_is_pkt_pointer(type) && !may_access_direct_pkt_data(env, meta, BPF_READ)) { verbose(env, "helper access to the packet is not allowed\n"); return -EACCES; } if (arg_type == ARG_PTR_TO_MAP_KEY || arg_type == ARG_PTR_TO_MAP_VALUE) { expected_type = PTR_TO_STACK; if (!type_is_pkt_pointer(type) && type != expected_type) goto err_type; } else if (arg_type == ARG_CONST_SIZE || arg_type == ARG_CONST_SIZE_OR_ZERO) { expected_type = SCALAR_VALUE; if (type != expected_type) goto err_type; } else if (arg_type == ARG_CONST_MAP_PTR) { expected_type = CONST_PTR_TO_MAP; if (type != expected_type) goto err_type; } else if (arg_type == ARG_PTR_TO_CTX) { expected_type = PTR_TO_CTX; if (type != expected_type) goto err_type; } else if (arg_type == ARG_PTR_TO_MEM || arg_type == ARG_PTR_TO_MEM_OR_NULL || arg_type == ARG_PTR_TO_UNINIT_MEM) { expected_type = PTR_TO_STACK; /* One exception here. In case function allows for NULL to be * passed in as argument, it's a SCALAR_VALUE type. Final test * happens during stack boundary checking. */ if (register_is_null(*reg) && arg_type == ARG_PTR_TO_MEM_OR_NULL) /* final test in check_stack_boundary() */; else if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE && type != expected_type) goto err_type; meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM; } else { verbose(env, "unsupported arg_type %d\n", arg_type); return -EFAULT; } if (arg_type == ARG_CONST_MAP_PTR) { /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ meta->map_ptr = reg->map_ptr; } else if (arg_type == ARG_PTR_TO_MAP_KEY) { /* bpf_map_xxx(..., map_ptr, ..., key) call: * check that [key, key + map->key_size) are within * stack limits and initialized */ if (!meta->map_ptr) { /* in function declaration map_ptr must come before * map_key, so that it's verified and known before * we have to check map_key here. Otherwise it means * that kernel subsystem misconfigured verifier */ verbose(env, "invalid map_ptr to access map->key\n"); return -EACCES; } if (type_is_pkt_pointer(type)) err = check_packet_access(env, regno, reg->off, meta->map_ptr->key_size, false); else err = check_stack_boundary(env, regno, meta->map_ptr->key_size, false, NULL); } else if (arg_type == ARG_PTR_TO_MAP_VALUE) { /* bpf_map_xxx(..., map_ptr, ..., value) call: * check [value, value + map->value_size) validity */ if (!meta->map_ptr) { /* kernel subsystem misconfigured verifier */ verbose(env, "invalid map_ptr to access map->value\n"); return -EACCES; } if (type_is_pkt_pointer(type)) err = check_packet_access(env, regno, reg->off, meta->map_ptr->value_size, false); else err = check_stack_boundary(env, regno, meta->map_ptr->value_size, false, NULL); } else if (arg_type == ARG_CONST_SIZE || arg_type == ARG_CONST_SIZE_OR_ZERO) { bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO); /* bpf_xxx(..., buf, len) call will access 'len' bytes * from stack pointer 'buf'. Check it * note: regno == len, regno - 1 == buf */ if (regno == 0) { /* kernel subsystem misconfigured verifier */ verbose(env, "ARG_CONST_SIZE cannot be first argument\n"); return -EACCES; } /* The register is SCALAR_VALUE; the access check * happens using its boundaries. */ if (!tnum_is_const(reg->var_off)) /* For unprivileged variable accesses, disable raw * mode so that the program is required to * initialize all the memory that the helper could * just partially fill up. */ meta = NULL; if (reg->smin_value < 0) { verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n", regno); return -EACCES; } if (reg->umin_value == 0) { err = check_helper_mem_access(env, regno - 1, 0, zero_size_allowed, meta); if (err) return err; } if (reg->umax_value >= BPF_MAX_VAR_SIZ) { verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", regno); return -EACCES; } err = check_helper_mem_access(env, regno - 1, reg->umax_value, zero_size_allowed, meta); } return err; err_type: verbose(env, "R%d type=%s expected=%s\n", regno, reg_type_str[type], reg_type_str[expected_type]); return -EACCES; } static int check_map_func_compatibility(struct bpf_verifier_env *env, struct bpf_map *map, int func_id) { if (!map) return 0; /* We need a two way check, first is from map perspective ... */ switch (map->map_type) { case BPF_MAP_TYPE_PROG_ARRAY: if (func_id != BPF_FUNC_tail_call) goto error; break; case BPF_MAP_TYPE_PERF_EVENT_ARRAY: if (func_id != BPF_FUNC_perf_event_read && func_id != BPF_FUNC_perf_event_output && func_id != BPF_FUNC_perf_event_read_value) goto error; break; case BPF_MAP_TYPE_STACK_TRACE: if (func_id != BPF_FUNC_get_stackid) goto error; break; case BPF_MAP_TYPE_CGROUP_ARRAY: if (func_id != BPF_FUNC_skb_under_cgroup && func_id != BPF_FUNC_current_task_under_cgroup) goto error; break; /* devmap returns a pointer to a live net_device ifindex that we cannot * allow to be modified from bpf side. So do not allow lookup elements * for now. */ case BPF_MAP_TYPE_DEVMAP: if (func_id != BPF_FUNC_redirect_map) goto error; break; /* Restrict bpf side of cpumap, open when use-cases appear */ case BPF_MAP_TYPE_CPUMAP: if (func_id != BPF_FUNC_redirect_map) goto error; break; case BPF_MAP_TYPE_ARRAY_OF_MAPS: case BPF_MAP_TYPE_HASH_OF_MAPS: if (func_id != BPF_FUNC_map_lookup_elem) goto error; break; case BPF_MAP_TYPE_SOCKMAP: if (func_id != BPF_FUNC_sk_redirect_map && func_id != BPF_FUNC_sock_map_update && func_id != BPF_FUNC_map_delete_elem) goto error; break; default: break; } /* ... and second from the function itself. */ switch (func_id) { case BPF_FUNC_tail_call: if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) goto error; break; case BPF_FUNC_perf_event_read: case BPF_FUNC_perf_event_output: case BPF_FUNC_perf_event_read_value: if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) goto error; break; case BPF_FUNC_get_stackid: if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) goto error; break; case BPF_FUNC_current_task_under_cgroup: case BPF_FUNC_skb_under_cgroup: if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) goto error; break; case BPF_FUNC_redirect_map: if (map->map_type != BPF_MAP_TYPE_DEVMAP && map->map_type != BPF_MAP_TYPE_CPUMAP) goto error; break; case BPF_FUNC_sk_redirect_map: if (map->map_type != BPF_MAP_TYPE_SOCKMAP) goto error; break; case BPF_FUNC_sock_map_update: if (map->map_type != BPF_MAP_TYPE_SOCKMAP) goto error; break; default: break; } return 0; error: verbose(env, "cannot pass map_type %d into func %s#%d\n", map->map_type, func_id_name(func_id), func_id); return -EINVAL; } static int check_raw_mode(const struct bpf_func_proto *fn) { int count = 0; if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM) count++; return count > 1 ? -EINVAL : 0; } /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] * are now invalid, so turn them into unknown SCALAR_VALUE. */ static void clear_all_pkt_pointers(struct bpf_verifier_env *env) { struct bpf_verifier_state *state = env->cur_state; struct bpf_reg_state *regs = state->regs, *reg; int i; for (i = 0; i < MAX_BPF_REG; i++) if (reg_is_pkt_pointer_any(&regs[i])) mark_reg_unknown(env, regs, i); for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { if (state->stack[i].slot_type[0] != STACK_SPILL) continue; reg = &state->stack[i].spilled_ptr; if (reg_is_pkt_pointer_any(reg)) __mark_reg_unknown(reg); } } static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx) { const struct bpf_func_proto *fn = NULL; struct bpf_reg_state *regs; struct bpf_call_arg_meta meta; bool changes_data; int i, err; /* find function prototype */ if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { verbose(env, "invalid func %s#%d\n", func_id_name(func_id), func_id); return -EINVAL; } if (env->ops->get_func_proto) fn = env->ops->get_func_proto(func_id); if (!fn) { verbose(env, "unknown func %s#%d\n", func_id_name(func_id), func_id); return -EINVAL; } /* eBPF programs must be GPL compatible to use GPL-ed functions */ if (!env->prog->gpl_compatible && fn->gpl_only) { verbose(env, "cannot call GPL only function from proprietary program\n"); return -EINVAL; } /* With LD_ABS/IND some JITs save/restore skb from r1. */ changes_data = bpf_helper_changes_pkt_data(fn->func); if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) { verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n", func_id_name(func_id), func_id); return -EINVAL; } memset(&meta, 0, sizeof(meta)); meta.pkt_access = fn->pkt_access; /* We only support one arg being in raw mode at the moment, which * is sufficient for the helper functions we have right now. */ err = check_raw_mode(fn); if (err) { verbose(env, "kernel subsystem misconfigured func %s#%d\n", func_id_name(func_id), func_id); return err; } /* check args */ err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta); if (err) return err; err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); if (err) return err; err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); if (err) return err; err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta); if (err) return err; err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta); if (err) return err; /* Mark slots with STACK_MISC in case of raw mode, stack offset * is inferred from register state. */ for (i = 0; i < meta.access_size; i++) { err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1); if (err) return err; } regs = cur_regs(env); /* reset caller saved regs */ for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(env, regs, caller_saved[i]); check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); } /* update return register (already marked as written above) */ if (fn->ret_type == RET_INTEGER) { /* sets type to SCALAR_VALUE */ mark_reg_unknown(env, regs, BPF_REG_0); } else if (fn->ret_type == RET_VOID) { regs[BPF_REG_0].type = NOT_INIT; } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) { struct bpf_insn_aux_data *insn_aux; regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; /* There is no offset yet applied, variable or fixed */ mark_reg_known_zero(env, regs, BPF_REG_0); regs[BPF_REG_0].off = 0; /* remember map_ptr, so that check_map_access() * can check 'value_size' boundary of memory access * to map element returned from bpf_map_lookup_elem() */ if (meta.map_ptr == NULL) { verbose(env, "kernel subsystem misconfigured verifier\n"); return -EINVAL; } regs[BPF_REG_0].map_ptr = meta.map_ptr; regs[BPF_REG_0].id = ++env->id_gen; insn_aux = &env->insn_aux_data[insn_idx]; if (!insn_aux->map_ptr) insn_aux->map_ptr = meta.map_ptr; else if (insn_aux->map_ptr != meta.map_ptr) insn_aux->map_ptr = BPF_MAP_PTR_POISON; } else { verbose(env, "unknown return type %d of func %s#%d\n", fn->ret_type, func_id_name(func_id), func_id); return -EINVAL; } err = check_map_func_compatibility(env, meta.map_ptr, func_id); if (err) return err; if (changes_data) clear_all_pkt_pointers(env); return 0; } static bool signed_add_overflows(s64 a, s64 b) { /* Do the add in u64, where overflow is well-defined */ s64 res = (s64)((u64)a + (u64)b); if (b < 0) return res > a; return res < a; } static bool signed_sub_overflows(s64 a, s64 b) { /* Do the sub in u64, where overflow is well-defined */ s64 res = (s64)((u64)a - (u64)b); if (b < 0) return res < a; return res > a; } /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. * Caller should also handle BPF_MOV case separately. * If we return -EACCES, caller may want to try again treating pointer as a * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks. */ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn, const struct bpf_reg_state *ptr_reg, const struct bpf_reg_state *off_reg) { struct bpf_reg_state *regs = cur_regs(env), *dst_reg; bool known = tnum_is_const(off_reg->var_off); s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value, smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; u8 opcode = BPF_OP(insn->code); u32 dst = insn->dst_reg; dst_reg = &regs[dst]; if (WARN_ON_ONCE(known && (smin_val != smax_val))) { print_verifier_state(env, env->cur_state); verbose(env, "verifier internal error: known but bad sbounds\n"); return -EINVAL; } if (WARN_ON_ONCE(known && (umin_val != umax_val))) { print_verifier_state(env, env->cur_state); verbose(env, "verifier internal error: known but bad ubounds\n"); return -EINVAL; } if (BPF_CLASS(insn->code) != BPF_ALU64) { /* 32-bit ALU ops on pointers produce (meaningless) scalars */ if (!env->allow_ptr_leaks) verbose(env, "R%d 32-bit pointer arithmetic prohibited\n", dst); return -EACCES; } if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { if (!env->allow_ptr_leaks) verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n", dst); return -EACCES; } if (ptr_reg->type == CONST_PTR_TO_MAP) { if (!env->allow_ptr_leaks) verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n", dst); return -EACCES; } if (ptr_reg->type == PTR_TO_PACKET_END) { if (!env->allow_ptr_leaks) verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n", dst); return -EACCES; } /* In case of 'scalar += pointer', dst_reg inherits pointer type and id. * The id may be overwritten later if we create a new variable offset. */ dst_reg->type = ptr_reg->type; dst_reg->id = ptr_reg->id; switch (opcode) { case BPF_ADD: /* We can take a fixed offset as long as it doesn't overflow * the s32 'off' field */ if (known && (ptr_reg->off + smin_val == (s64)(s32)(ptr_reg->off + smin_val))) { /* pointer += K. Accumulate it into fixed offset */ dst_reg->smin_value = smin_ptr; dst_reg->smax_value = smax_ptr; dst_reg->umin_value = umin_ptr; dst_reg->umax_value = umax_ptr; dst_reg->var_off = ptr_reg->var_off; dst_reg->off = ptr_reg->off + smin_val; dst_reg->range = ptr_reg->range; break; } /* A new variable offset is created. Note that off_reg->off * == 0, since it's a scalar. * dst_reg gets the pointer type and since some positive * integer value was added to the pointer, give it a new 'id' * if it's a PTR_TO_PACKET. * this creates a new 'base' pointer, off_reg (variable) gets * added into the variable offset, and we copy the fixed offset * from ptr_reg. */ if (signed_add_overflows(smin_ptr, smin_val) || signed_add_overflows(smax_ptr, smax_val)) { dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = smin_ptr + smin_val; dst_reg->smax_value = smax_ptr + smax_val; } if (umin_ptr + umin_val < umin_ptr || umax_ptr + umax_val < umax_ptr) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value = umin_ptr + umin_val; dst_reg->umax_value = umax_ptr + umax_val; } dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; if (reg_is_pkt_pointer(ptr_reg)) { dst_reg->id = ++env->id_gen; /* something was added to pkt_ptr, set range to zero */ dst_reg->range = 0; } break; case BPF_SUB: if (dst_reg == off_reg) { /* scalar -= pointer. Creates an unknown scalar */ if (!env->allow_ptr_leaks) verbose(env, "R%d tried to subtract pointer from scalar\n", dst); return -EACCES; } /* We don't allow subtraction from FP, because (according to * test_verifier.c test "invalid fp arithmetic", JITs might not * be able to deal with it. */ if (ptr_reg->type == PTR_TO_STACK) { if (!env->allow_ptr_leaks) verbose(env, "R%d subtraction from stack pointer prohibited\n", dst); return -EACCES; } if (known && (ptr_reg->off - smin_val == (s64)(s32)(ptr_reg->off - smin_val))) { /* pointer -= K. Subtract it from fixed offset */ dst_reg->smin_value = smin_ptr; dst_reg->smax_value = smax_ptr; dst_reg->umin_value = umin_ptr; dst_reg->umax_value = umax_ptr; dst_reg->var_off = ptr_reg->var_off; dst_reg->id = ptr_reg->id; dst_reg->off = ptr_reg->off - smin_val; dst_reg->range = ptr_reg->range; break; } /* A new variable offset is created. If the subtrahend is known * nonnegative, then any reg->range we had before is still good. */ if (signed_sub_overflows(smin_ptr, smax_val) || signed_sub_overflows(smax_ptr, smin_val)) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = smin_ptr - smax_val; dst_reg->smax_value = smax_ptr - smin_val; } if (umin_ptr < umax_val) { /* Overflow possible, we know nothing */ dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { /* Cannot overflow (as long as bounds are consistent) */ dst_reg->umin_value = umin_ptr - umax_val; dst_reg->umax_value = umax_ptr - umin_val; } dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; if (reg_is_pkt_pointer(ptr_reg)) { dst_reg->id = ++env->id_gen; /* something was added to pkt_ptr, set range to zero */ if (smin_val < 0) dst_reg->range = 0; } break; case BPF_AND: case BPF_OR: case BPF_XOR: /* bitwise ops on pointers are troublesome, prohibit for now. * (However, in principle we could allow some cases, e.g. * ptr &= ~3 which would reduce min_value by 3.) */ if (!env->allow_ptr_leaks) verbose(env, "R%d bitwise operator %s on pointer prohibited\n", dst, bpf_alu_string[opcode >> 4]); return -EACCES; default: /* other operators (e.g. MUL,LSH) produce non-pointer results */ if (!env->allow_ptr_leaks) verbose(env, "R%d pointer arithmetic with %s operator prohibited\n", dst, bpf_alu_string[opcode >> 4]); return -EACCES; } __update_reg_bounds(dst_reg); __reg_deduce_bounds(dst_reg); __reg_bound_offset(dst_reg); return 0; } /* WARNING: This function does calculations on 64-bit values, but the actual * execution may occur on 32-bit values. Therefore, things like bitshifts * need extra checks in the 32-bit case. */ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn, struct bpf_reg_state *dst_reg, struct bpf_reg_state src_reg) { struct bpf_reg_state *regs = cur_regs(env); u8 opcode = BPF_OP(insn->code); bool src_known, dst_known; s64 smin_val, smax_val; u64 umin_val, umax_val; u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; smin_val = src_reg.smin_value; smax_val = src_reg.smax_value; umin_val = src_reg.umin_value; umax_val = src_reg.umax_value; src_known = tnum_is_const(src_reg.var_off); dst_known = tnum_is_const(dst_reg->var_off); switch (opcode) { case BPF_ADD: if (signed_add_overflows(dst_reg->smin_value, smin_val) || signed_add_overflows(dst_reg->smax_value, smax_val)) { dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value += smin_val; dst_reg->smax_value += smax_val; } if (dst_reg->umin_value + umin_val < umin_val || dst_reg->umax_value + umax_val < umax_val) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value += umin_val; dst_reg->umax_value += umax_val; } dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); break; case BPF_SUB: if (signed_sub_overflows(dst_reg->smin_value, smax_val) || signed_sub_overflows(dst_reg->smax_value, smin_val)) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value -= smax_val; dst_reg->smax_value -= smin_val; } if (dst_reg->umin_value < umax_val) { /* Overflow possible, we know nothing */ dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { /* Cannot overflow (as long as bounds are consistent) */ dst_reg->umin_value -= umax_val; dst_reg->umax_value -= umin_val; } dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); break; case BPF_MUL: dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); if (smin_val < 0 || dst_reg->smin_value < 0) { /* Ain't nobody got time to multiply that sign */ __mark_reg_unbounded(dst_reg); __update_reg_bounds(dst_reg); break; } /* Both values are positive, so we can work with unsigned and * copy the result to signed (unless it exceeds S64_MAX). */ if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) { /* Potential overflow, we know nothing */ __mark_reg_unbounded(dst_reg); /* (except what we can learn from the var_off) */ __update_reg_bounds(dst_reg); break; } dst_reg->umin_value *= umin_val; dst_reg->umax_value *= umax_val; if (dst_reg->umax_value > S64_MAX) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } break; case BPF_AND: if (src_known && dst_known) { __mark_reg_known(dst_reg, dst_reg->var_off.value & src_reg.var_off.value); break; } /* We get our minimum from the var_off, since that's inherently * bitwise. Our maximum is the minimum of the operands' maxima. */ dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); dst_reg->umin_value = dst_reg->var_off.value; dst_reg->umax_value = min(dst_reg->umax_value, umax_val); if (dst_reg->smin_value < 0 || smin_val < 0) { /* Lose signed bounds when ANDing negative numbers, * ain't nobody got time for that. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { /* ANDing two positives gives a positive, so safe to * cast result into s64. */ dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_OR: if (src_known && dst_known) { __mark_reg_known(dst_reg, dst_reg->var_off.value | src_reg.var_off.value); break; } /* We get our maximum from the var_off, and our minimum is the * maximum of the operands' minima */ dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); dst_reg->umin_value = max(dst_reg->umin_value, umin_val); dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; if (dst_reg->smin_value < 0 || smin_val < 0) { /* Lose signed bounds when ORing negative numbers, * ain't nobody got time for that. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { /* ORing two positives gives a positive, so safe to * cast result into s64. */ dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_LSH: if (umax_val >= insn_bitness) { /* Shifts greater than 31 or 63 are undefined. * This includes shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } /* We lose all sign bit information (except what we can pick * up from var_off) */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; /* If we might shift our top bit out, then we know nothing */ if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value <<= umin_val; dst_reg->umax_value <<= umax_val; } if (src_known) dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); else dst_reg->var_off = tnum_lshift(tnum_unknown, umin_val); /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_RSH: if (umax_val >= insn_bitness) { /* Shifts greater than 31 or 63 are undefined. * This includes shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } /* BPF_RSH is an unsigned shift. If the value in dst_reg might * be negative, then either: * 1) src_reg might be zero, so the sign bit of the result is * unknown, so we lose our signed bounds * 2) it's known negative, thus the unsigned bounds capture the * signed bounds * 3) the signed bounds cross zero, so they tell us nothing * about the result * If the value in dst_reg is known nonnegative, then again the * unsigned bounts capture the signed bounds. * Thus, in all cases it suffices to blow away our signed bounds * and rely on inferring new ones from the unsigned bounds and * var_off of the result. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; if (src_known) dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val); else dst_reg->var_off = tnum_rshift(tnum_unknown, umin_val); dst_reg->umin_value >>= umax_val; dst_reg->umax_value >>= umin_val; /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; default: mark_reg_unknown(env, regs, insn->dst_reg); break; } if (BPF_CLASS(insn->code) != BPF_ALU64) { /* 32-bit ALU ops are (32,32)->32 */ coerce_reg_to_size(dst_reg, 4); coerce_reg_to_size(&src_reg, 4); } __reg_deduce_bounds(dst_reg); __reg_bound_offset(dst_reg); return 0; } /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max * and var_off. */ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env), *dst_reg, *src_reg; struct bpf_reg_state *ptr_reg = NULL, off_reg = {0}; u8 opcode = BPF_OP(insn->code); int rc; dst_reg = &regs[insn->dst_reg]; src_reg = NULL; if (dst_reg->type != SCALAR_VALUE) ptr_reg = dst_reg; if (BPF_SRC(insn->code) == BPF_X) { src_reg = &regs[insn->src_reg]; if (src_reg->type != SCALAR_VALUE) { if (dst_reg->type != SCALAR_VALUE) { /* Combining two pointers by any ALU op yields * an arbitrary scalar. */ if (!env->allow_ptr_leaks) { verbose(env, "R%d pointer %s pointer prohibited\n", insn->dst_reg, bpf_alu_string[opcode >> 4]); return -EACCES; } mark_reg_unknown(env, regs, insn->dst_reg); return 0; } else { /* scalar += pointer * This is legal, but we have to reverse our * src/dest handling in computing the range */ rc = adjust_ptr_min_max_vals(env, insn, src_reg, dst_reg); if (rc == -EACCES && env->allow_ptr_leaks) { /* scalar += unknown scalar */ __mark_reg_unknown(&off_reg); return adjust_scalar_min_max_vals( env, insn, dst_reg, off_reg); } return rc; } } else if (ptr_reg) { /* pointer += scalar */ rc = adjust_ptr_min_max_vals(env, insn, dst_reg, src_reg); if (rc == -EACCES && env->allow_ptr_leaks) { /* unknown scalar += scalar */ __mark_reg_unknown(dst_reg); return adjust_scalar_min_max_vals( env, insn, dst_reg, *src_reg); } return rc; } } else { /* Pretend the src is a reg with a known value, since we only * need to be able to read from this state. */ off_reg.type = SCALAR_VALUE; __mark_reg_known(&off_reg, insn->imm); src_reg = &off_reg; if (ptr_reg) { /* pointer += K */ rc = adjust_ptr_min_max_vals(env, insn, ptr_reg, src_reg); if (rc == -EACCES && env->allow_ptr_leaks) { /* unknown scalar += K */ __mark_reg_unknown(dst_reg); return adjust_scalar_min_max_vals( env, insn, dst_reg, off_reg); } return rc; } } /* Got here implies adding two SCALAR_VALUEs */ if (WARN_ON_ONCE(ptr_reg)) { print_verifier_state(env, env->cur_state); verbose(env, "verifier internal error: unexpected ptr_reg\n"); return -EINVAL; } if (WARN_ON(!src_reg)) { print_verifier_state(env, env->cur_state); verbose(env, "verifier internal error: no src_reg\n"); return -EINVAL; } return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg); } /* check validity of 32-bit and 64-bit arithmetic operations */ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env); u8 opcode = BPF_OP(insn->code); int err; if (opcode == BPF_END || opcode == BPF_NEG) { if (opcode == BPF_NEG) { if (BPF_SRC(insn->code) != 0 || insn->src_reg != BPF_REG_0 || insn->off != 0 || insn->imm != 0) { verbose(env, "BPF_NEG uses reserved fields\n"); return -EINVAL; } } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0 || (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || BPF_CLASS(insn->code) == BPF_ALU64) { verbose(env, "BPF_END uses reserved fields\n"); return -EINVAL; } } /* check src operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if (is_pointer_value(env, insn->dst_reg)) { verbose(env, "R%d pointer arithmetic prohibited\n", insn->dst_reg); return -EACCES; } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; } else if (opcode == BPF_MOV) { if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0 || insn->off != 0) { verbose(env, "BPF_MOV uses reserved fields\n"); return -EINVAL; } /* check src operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0) { verbose(env, "BPF_MOV uses reserved fields\n"); return -EINVAL; } } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; if (BPF_SRC(insn->code) == BPF_X) { if (BPF_CLASS(insn->code) == BPF_ALU64) { /* case: R1 = R2 * copy register state to dest reg */ regs[insn->dst_reg] = regs[insn->src_reg]; regs[insn->dst_reg].live |= REG_LIVE_WRITTEN; } else { /* R1 = (u32) R2 */ if (is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d partial copy of pointer\n", insn->src_reg); return -EACCES; } mark_reg_unknown(env, regs, insn->dst_reg); coerce_reg_to_size(&regs[insn->dst_reg], 4); } } else { /* case: R = imm * remember the value we stored into this reg */ regs[insn->dst_reg].type = SCALAR_VALUE; if (BPF_CLASS(insn->code) == BPF_ALU64) { __mark_reg_known(regs + insn->dst_reg, insn->imm); } else { __mark_reg_known(regs + insn->dst_reg, (u32)insn->imm); } } } else if (opcode > BPF_END) { verbose(env, "invalid BPF_ALU opcode %x\n", opcode); return -EINVAL; } else { /* all other ALU ops: and, sub, xor, add, ... */ if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0 || insn->off != 0) { verbose(env, "BPF_ALU uses reserved fields\n"); return -EINVAL; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0) { verbose(env, "BPF_ALU uses reserved fields\n"); return -EINVAL; } } /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if ((opcode == BPF_MOD || opcode == BPF_DIV) && BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { verbose(env, "div by zero\n"); return -EINVAL; } if ((opcode == BPF_LSH || opcode == BPF_RSH || opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; if (insn->imm < 0 || insn->imm >= size) { verbose(env, "invalid shift %d\n", insn->imm); return -EINVAL; } } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); if (err) return err; return adjust_reg_min_max_vals(env, insn); } return 0; } static void find_good_pkt_pointers(struct bpf_verifier_state *state, struct bpf_reg_state *dst_reg, enum bpf_reg_type type, bool range_right_open) { struct bpf_reg_state *regs = state->regs, *reg; u16 new_range; int i; if (dst_reg->off < 0 || (dst_reg->off == 0 && range_right_open)) /* This doesn't give us any range */ return; if (dst_reg->umax_value > MAX_PACKET_OFF || dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF) /* Risk of overflow. For instance, ptr + (1<<63) may be less * than pkt_end, but that's because it's also less than pkt. */ return; new_range = dst_reg->off; if (range_right_open) new_range--; /* Examples for register markings: * * pkt_data in dst register: * * r2 = r3; * r2 += 8; * if (r2 > pkt_end) goto <handle exception> * <access okay> * * r2 = r3; * r2 += 8; * if (r2 < pkt_end) goto <access okay> * <handle exception> * * Where: * r2 == dst_reg, pkt_end == src_reg * r2=pkt(id=n,off=8,r=0) * r3=pkt(id=n,off=0,r=0) * * pkt_data in src register: * * r2 = r3; * r2 += 8; * if (pkt_end >= r2) goto <access okay> * <handle exception> * * r2 = r3; * r2 += 8; * if (pkt_end <= r2) goto <handle exception> * <access okay> * * Where: * pkt_end == dst_reg, r2 == src_reg * r2=pkt(id=n,off=8,r=0) * r3=pkt(id=n,off=0,r=0) * * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8) * and [r3, r3 + 8-1) respectively is safe to access depending on * the check. */ /* If our ids match, then we must have the same max_value. And we * don't care about the other reg's fixed offset, since if it's too big * the range won't allow anything. * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. */ for (i = 0; i < MAX_BPF_REG; i++) if (regs[i].type == type && regs[i].id == dst_reg->id) /* keep the maximum range already checked */ regs[i].range = max(regs[i].range, new_range); for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { if (state->stack[i].slot_type[0] != STACK_SPILL) continue; reg = &state->stack[i].spilled_ptr; if (reg->type == type && reg->id == dst_reg->id) reg->range = max(reg->range, new_range); } } /* Adjusts the register min/max values in the case that the dst_reg is the * variable register that we are working on, and src_reg is a constant or we're * simply doing a BPF_K check. * In JEQ/JNE cases we also adjust the var_off values. */ static void reg_set_min_max(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, u8 opcode) { /* If the dst_reg is a pointer, we can't learn anything about its * variable offset from the compare (unless src_reg were a pointer into * the same object, but we don't bother with that. * Since false_reg and true_reg have the same type by construction, we * only need to check one of them for pointerness. */ if (__is_pointer_value(false, false_reg)) return; switch (opcode) { case BPF_JEQ: /* If this is false then we know nothing Jon Snow, but if it is * true then we know for sure. */ __mark_reg_known(true_reg, val); break; case BPF_JNE: /* If this is true we know nothing Jon Snow, but if it is false * we know the value for sure; */ __mark_reg_known(false_reg, val); break; case BPF_JGT: false_reg->umax_value = min(false_reg->umax_value, val); true_reg->umin_value = max(true_reg->umin_value, val + 1); break; case BPF_JSGT: false_reg->smax_value = min_t(s64, false_reg->smax_value, val); true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1); break; case BPF_JLT: false_reg->umin_value = max(false_reg->umin_value, val); true_reg->umax_value = min(true_reg->umax_value, val - 1); break; case BPF_JSLT: false_reg->smin_value = max_t(s64, false_reg->smin_value, val); true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1); break; case BPF_JGE: false_reg->umax_value = min(false_reg->umax_value, val - 1); true_reg->umin_value = max(true_reg->umin_value, val); break; case BPF_JSGE: false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1); true_reg->smin_value = max_t(s64, true_reg->smin_value, val); break; case BPF_JLE: false_reg->umin_value = max(false_reg->umin_value, val + 1); true_reg->umax_value = min(true_reg->umax_value, val); break; case BPF_JSLE: false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1); true_reg->smax_value = min_t(s64, true_reg->smax_value, val); break; default: break; } __reg_deduce_bounds(false_reg); __reg_deduce_bounds(true_reg); /* We might have learned some bits from the bounds. */ __reg_bound_offset(false_reg); __reg_bound_offset(true_reg); /* Intersecting with the old var_off might have improved our bounds * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), * then new var_off is (0; 0x7f...fc) which improves our umax. */ __update_reg_bounds(false_reg); __update_reg_bounds(true_reg); } /* Same as above, but for the case that dst_reg holds a constant and src_reg is * the variable reg. */ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, u8 opcode) { if (__is_pointer_value(false, false_reg)) return; switch (opcode) { case BPF_JEQ: /* If this is false then we know nothing Jon Snow, but if it is * true then we know for sure. */ __mark_reg_known(true_reg, val); break; case BPF_JNE: /* If this is true we know nothing Jon Snow, but if it is false * we know the value for sure; */ __mark_reg_known(false_reg, val); break; case BPF_JGT: true_reg->umax_value = min(true_reg->umax_value, val - 1); false_reg->umin_value = max(false_reg->umin_value, val); break; case BPF_JSGT: true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1); false_reg->smin_value = max_t(s64, false_reg->smin_value, val); break; case BPF_JLT: true_reg->umin_value = max(true_reg->umin_value, val + 1); false_reg->umax_value = min(false_reg->umax_value, val); break; case BPF_JSLT: true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1); false_reg->smax_value = min_t(s64, false_reg->smax_value, val); break; case BPF_JGE: true_reg->umax_value = min(true_reg->umax_value, val); false_reg->umin_value = max(false_reg->umin_value, val + 1); break; case BPF_JSGE: true_reg->smax_value = min_t(s64, true_reg->smax_value, val); false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1); break; case BPF_JLE: true_reg->umin_value = max(true_reg->umin_value, val); false_reg->umax_value = min(false_reg->umax_value, val - 1); break; case BPF_JSLE: true_reg->smin_value = max_t(s64, true_reg->smin_value, val); false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1); break; default: break; } __reg_deduce_bounds(false_reg); __reg_deduce_bounds(true_reg); /* We might have learned some bits from the bounds. */ __reg_bound_offset(false_reg); __reg_bound_offset(true_reg); /* Intersecting with the old var_off might have improved our bounds * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), * then new var_off is (0; 0x7f...fc) which improves our umax. */ __update_reg_bounds(false_reg); __update_reg_bounds(true_reg); } /* Regs are known to be equal, so intersect their min/max/var_off */ static void __reg_combine_min_max(struct bpf_reg_state *src_reg, struct bpf_reg_state *dst_reg) { src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value, dst_reg->umin_value); src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value, dst_reg->umax_value); src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value, dst_reg->smin_value); src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value, dst_reg->smax_value); src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off, dst_reg->var_off); /* We might have learned new bounds from the var_off. */ __update_reg_bounds(src_reg); __update_reg_bounds(dst_reg); /* We might have learned something about the sign bit. */ __reg_deduce_bounds(src_reg); __reg_deduce_bounds(dst_reg); /* We might have learned some bits from the bounds. */ __reg_bound_offset(src_reg); __reg_bound_offset(dst_reg); /* Intersecting with the old var_off might have improved our bounds * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), * then new var_off is (0; 0x7f...fc) which improves our umax. */ __update_reg_bounds(src_reg); __update_reg_bounds(dst_reg); } static void reg_combine_min_max(struct bpf_reg_state *true_src, struct bpf_reg_state *true_dst, struct bpf_reg_state *false_src, struct bpf_reg_state *false_dst, u8 opcode) { switch (opcode) { case BPF_JEQ: __reg_combine_min_max(true_src, true_dst); break; case BPF_JNE: __reg_combine_min_max(false_src, false_dst); break; } } static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id, bool is_null) { struct bpf_reg_state *reg = &regs[regno]; if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) { /* Old offset (both fixed and variable parts) should * have been known-zero, because we don't allow pointer * arithmetic on pointers that might be NULL. */ if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || !tnum_equals_const(reg->var_off, 0) || reg->off)) { __mark_reg_known_zero(reg); reg->off = 0; } if (is_null) { reg->type = SCALAR_VALUE; } else if (reg->map_ptr->inner_map_meta) { reg->type = CONST_PTR_TO_MAP; reg->map_ptr = reg->map_ptr->inner_map_meta; } else { reg->type = PTR_TO_MAP_VALUE; } /* We don't need id from this point onwards anymore, thus we * should better reset it, so that state pruning has chances * to take effect. */ reg->id = 0; } } /* The logic is similar to find_good_pkt_pointers(), both could eventually * be folded together at some point. */ static void mark_map_regs(struct bpf_verifier_state *state, u32 regno, bool is_null) { struct bpf_reg_state *regs = state->regs; u32 id = regs[regno].id; int i; for (i = 0; i < MAX_BPF_REG; i++) mark_map_reg(regs, i, id, is_null); for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { if (state->stack[i].slot_type[0] != STACK_SPILL) continue; mark_map_reg(&state->stack[i].spilled_ptr, 0, id, is_null); } } static bool try_match_pkt_pointers(const struct bpf_insn *insn, struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg, struct bpf_verifier_state *this_branch, struct bpf_verifier_state *other_branch) { if (BPF_SRC(insn->code) != BPF_X) return false; switch (BPF_OP(insn->code)) { case BPF_JGT: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' > pkt_end, pkt_meta' > pkt_data */ find_good_pkt_pointers(this_branch, dst_reg, dst_reg->type, false); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end > pkt_data', pkt_data > pkt_meta' */ find_good_pkt_pointers(other_branch, src_reg, src_reg->type, true); } else { return false; } break; case BPF_JLT: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' < pkt_end, pkt_meta' < pkt_data */ find_good_pkt_pointers(other_branch, dst_reg, dst_reg->type, true); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end < pkt_data', pkt_data > pkt_meta' */ find_good_pkt_pointers(this_branch, src_reg, src_reg->type, false); } else { return false; } break; case BPF_JGE: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */ find_good_pkt_pointers(this_branch, dst_reg, dst_reg->type, true); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */ find_good_pkt_pointers(other_branch, src_reg, src_reg->type, false); } else { return false; } break; case BPF_JLE: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */ find_good_pkt_pointers(other_branch, dst_reg, dst_reg->type, false); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */ find_good_pkt_pointers(this_branch, src_reg, src_reg->type, true); } else { return false; } break; default: return false; } return true; } static int check_cond_jmp_op(struct bpf_verifier_env *env, struct bpf_insn *insn, int *insn_idx) { struct bpf_verifier_state *other_branch, *this_branch = env->cur_state; struct bpf_reg_state *regs = this_branch->regs, *dst_reg; u8 opcode = BPF_OP(insn->code); int err; if (opcode > BPF_JSLE) { verbose(env, "invalid BPF_JMP opcode %x\n", opcode); return -EINVAL; } if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0) { verbose(env, "BPF_JMP uses reserved fields\n"); return -EINVAL; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; if (is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d pointer comparison prohibited\n", insn->src_reg); return -EACCES; } } else { if (insn->src_reg != BPF_REG_0) { verbose(env, "BPF_JMP uses reserved fields\n"); return -EINVAL; } } /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; dst_reg = &regs[insn->dst_reg]; /* detect if R == 0 where R was initialized to zero earlier */ if (BPF_SRC(insn->code) == BPF_K && (opcode == BPF_JEQ || opcode == BPF_JNE) && dst_reg->type == SCALAR_VALUE && tnum_equals_const(dst_reg->var_off, insn->imm)) { if (opcode == BPF_JEQ) { /* if (imm == imm) goto pc+off; * only follow the goto, ignore fall-through */ *insn_idx += insn->off; return 0; } else { /* if (imm != imm) goto pc+off; * only follow fall-through branch, since * that's where the program will go */ return 0; } } other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx); if (!other_branch) return -EFAULT; /* detect if we are comparing against a constant value so we can adjust * our min/max values for our dst register. * this is only legit if both are scalars (or pointers to the same * object, I suppose, but we don't support that right now), because * otherwise the different base pointers mean the offsets aren't * comparable. */ if (BPF_SRC(insn->code) == BPF_X) { if (dst_reg->type == SCALAR_VALUE && regs[insn->src_reg].type == SCALAR_VALUE) { if (tnum_is_const(regs[insn->src_reg].var_off)) reg_set_min_max(&other_branch->regs[insn->dst_reg], dst_reg, regs[insn->src_reg].var_off.value, opcode); else if (tnum_is_const(dst_reg->var_off)) reg_set_min_max_inv(&other_branch->regs[insn->src_reg], &regs[insn->src_reg], dst_reg->var_off.value, opcode); else if (opcode == BPF_JEQ || opcode == BPF_JNE) /* Comparing for equality, we can combine knowledge */ reg_combine_min_max(&other_branch->regs[insn->src_reg], &other_branch->regs[insn->dst_reg], &regs[insn->src_reg], &regs[insn->dst_reg], opcode); } } else if (dst_reg->type == SCALAR_VALUE) { reg_set_min_max(&other_branch->regs[insn->dst_reg], dst_reg, insn->imm, opcode); } /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */ if (BPF_SRC(insn->code) == BPF_K && insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { /* Mark all identical map registers in each branch as either * safe or unknown depending R == 0 or R != 0 conditional. */ mark_map_regs(this_branch, insn->dst_reg, opcode == BPF_JNE); mark_map_regs(other_branch, insn->dst_reg, opcode == BPF_JEQ); } else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg], this_branch, other_branch) && is_pointer_value(env, insn->dst_reg)) { verbose(env, "R%d pointer comparison prohibited\n", insn->dst_reg); return -EACCES; } if (env->log.level) print_verifier_state(env, this_branch); return 0; } /* return the map pointer stored inside BPF_LD_IMM64 instruction */ static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn) { u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32; return (struct bpf_map *) (unsigned long) imm64; } /* verify BPF_LD_IMM64 instruction */ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env); int err; if (BPF_SIZE(insn->code) != BPF_DW) { verbose(env, "invalid BPF_LD_IMM insn\n"); return -EINVAL; } if (insn->off != 0) { verbose(env, "BPF_LD_IMM64 uses reserved fields\n"); return -EINVAL; } err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; if (insn->src_reg == 0) { u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; regs[insn->dst_reg].type = SCALAR_VALUE; __mark_reg_known(&regs[insn->dst_reg], imm); return 0; } /* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */ BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD); regs[insn->dst_reg].type = CONST_PTR_TO_MAP; regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn); return 0; } static bool may_access_skb(enum bpf_prog_type type) { switch (type) { case BPF_PROG_TYPE_SOCKET_FILTER: case BPF_PROG_TYPE_SCHED_CLS: case BPF_PROG_TYPE_SCHED_ACT: return true; default: return false; } } /* verify safety of LD_ABS|LD_IND instructions: * - they can only appear in the programs where ctx == skb * - since they are wrappers of function calls, they scratch R1-R5 registers, * preserve R6-R9, and store return value into R0 * * Implicit input: * ctx == skb == R6 == CTX * * Explicit input: * SRC == any register * IMM == 32-bit immediate * * Output: * R0 - 8/16/32-bit skb data converted to cpu endianness */ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env); u8 mode = BPF_MODE(insn->code); int i, err; if (!may_access_skb(env->prog->type)) { verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); return -EINVAL; } if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || BPF_SIZE(insn->code) == BPF_DW || (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n"); return -EINVAL; } /* check whether implicit source operand (register R6) is readable */ err = check_reg_arg(env, BPF_REG_6, SRC_OP); if (err) return err; if (regs[BPF_REG_6].type != PTR_TO_CTX) { verbose(env, "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); return -EINVAL; } if (mode == BPF_IND) { /* check explicit source operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } /* reset caller saved regs to unreadable */ for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(env, regs, caller_saved[i]); check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); } /* mark destination R0 register as readable, since it contains * the value fetched from the packet. * Already marked as written above. */ mark_reg_unknown(env, regs, BPF_REG_0); return 0; } static int check_return_code(struct bpf_verifier_env *env) { struct bpf_reg_state *reg; struct tnum range = tnum_range(0, 1); switch (env->prog->type) { case BPF_PROG_TYPE_CGROUP_SKB: case BPF_PROG_TYPE_CGROUP_SOCK: case BPF_PROG_TYPE_SOCK_OPS: case BPF_PROG_TYPE_CGROUP_DEVICE: break; default: return 0; } reg = cur_regs(env) + BPF_REG_0; if (reg->type != SCALAR_VALUE) { verbose(env, "At program exit the register R0 is not a known value (%s)\n", reg_type_str[reg->type]); return -EINVAL; } if (!tnum_in(range, reg->var_off)) { verbose(env, "At program exit the register R0 "); if (!tnum_is_unknown(reg->var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "has value %s", tn_buf); } else { verbose(env, "has unknown scalar value"); } verbose(env, " should have been 0 or 1\n"); return -EINVAL; } return 0; } /* non-recursive DFS pseudo code * 1 procedure DFS-iterative(G,v): * 2 label v as discovered * 3 let S be a stack * 4 S.push(v) * 5 while S is not empty * 6 t <- S.pop() * 7 if t is what we're looking for: * 8 return t * 9 for all edges e in G.adjacentEdges(t) do * 10 if edge e is already labelled * 11 continue with the next edge * 12 w <- G.adjacentVertex(t,e) * 13 if vertex w is not discovered and not explored * 14 label e as tree-edge * 15 label w as discovered * 16 S.push(w) * 17 continue at 5 * 18 else if vertex w is discovered * 19 label e as back-edge * 20 else * 21 // vertex w is explored * 22 label e as forward- or cross-edge * 23 label t as explored * 24 S.pop() * * convention: * 0x10 - discovered * 0x11 - discovered and fall-through edge labelled * 0x12 - discovered and fall-through and branch edges labelled * 0x20 - explored */ enum { DISCOVERED = 0x10, EXPLORED = 0x20, FALLTHROUGH = 1, BRANCH = 2, }; #define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L) static int *insn_stack; /* stack of insns to process */ static int cur_stack; /* current stack index */ static int *insn_state; /* t, w, e - match pseudo-code above: * t - index of current instruction * w - next instruction * e - edge */ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env) { if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) return 0; if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH)) return 0; if (w < 0 || w >= env->prog->len) { verbose(env, "jump out of range from insn %d to %d\n", t, w); return -EINVAL; } if (e == BRANCH) /* mark branch target for state pruning */ env->explored_states[w] = STATE_LIST_MARK; if (insn_state[w] == 0) { /* tree-edge */ insn_state[t] = DISCOVERED | e; insn_state[w] = DISCOVERED; if (cur_stack >= env->prog->len) return -E2BIG; insn_stack[cur_stack++] = w; return 1; } else if ((insn_state[w] & 0xF0) == DISCOVERED) { verbose(env, "back-edge from insn %d to %d\n", t, w); return -EINVAL; } else if (insn_state[w] == EXPLORED) { /* forward- or cross-edge */ insn_state[t] = DISCOVERED | e; } else { verbose(env, "insn state internal bug\n"); return -EFAULT; } return 0; } /* non-recursive depth-first-search to detect loops in BPF program * loop == back-edge in directed graph */ static int check_cfg(struct bpf_verifier_env *env) { struct bpf_insn *insns = env->prog->insnsi; int insn_cnt = env->prog->len; int ret = 0; int i, t; insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); if (!insn_state) return -ENOMEM; insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); if (!insn_stack) { kfree(insn_state); return -ENOMEM; } insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */ insn_stack[0] = 0; /* 0 is the first instruction */ cur_stack = 1; peek_stack: if (cur_stack == 0) goto check_state; t = insn_stack[cur_stack - 1]; if (BPF_CLASS(insns[t].code) == BPF_JMP) { u8 opcode = BPF_OP(insns[t].code); if (opcode == BPF_EXIT) { goto mark_explored; } else if (opcode == BPF_CALL) { ret = push_insn(t, t + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; if (t + 1 < insn_cnt) env->explored_states[t + 1] = STATE_LIST_MARK; } else if (opcode == BPF_JA) { if (BPF_SRC(insns[t].code) != BPF_K) { ret = -EINVAL; goto err_free; } /* unconditional jump with single edge */ ret = push_insn(t, t + insns[t].off + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; /* tell verifier to check for equivalent states * after every call and jump */ if (t + 1 < insn_cnt) env->explored_states[t + 1] = STATE_LIST_MARK; } else { /* conditional jump with two edges */ env->explored_states[t] = STATE_LIST_MARK; ret = push_insn(t, t + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; ret = push_insn(t, t + insns[t].off + 1, BRANCH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; } } else { /* all other non-branch instructions with single * fall-through edge */ ret = push_insn(t, t + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; } mark_explored: insn_state[t] = EXPLORED; if (cur_stack-- <= 0) { verbose(env, "pop stack internal bug\n"); ret = -EFAULT; goto err_free; } goto peek_stack; check_state: for (i = 0; i < insn_cnt; i++) { if (insn_state[i] != EXPLORED) { verbose(env, "unreachable insn %d\n", i); ret = -EINVAL; goto err_free; } } ret = 0; /* cfg looks good */ err_free: kfree(insn_state); kfree(insn_stack); return ret; } /* check %cur's range satisfies %old's */ static bool range_within(struct bpf_reg_state *old, struct bpf_reg_state *cur) { return old->umin_value <= cur->umin_value && old->umax_value >= cur->umax_value && old->smin_value <= cur->smin_value && old->smax_value >= cur->smax_value; } /* Maximum number of register states that can exist at once */ #define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) struct idpair { u32 old; u32 cur; }; /* If in the old state two registers had the same id, then they need to have * the same id in the new state as well. But that id could be different from * the old state, so we need to track the mapping from old to new ids. * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent * regs with old id 5 must also have new id 9 for the new state to be safe. But * regs with a different old id could still have new id 9, we don't care about * that. * So we look through our idmap to see if this old id has been seen before. If * so, we require the new id to match; otherwise, we add the id pair to the map. */ static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap) { unsigned int i; for (i = 0; i < ID_MAP_SIZE; i++) { if (!idmap[i].old) { /* Reached an empty slot; haven't seen this id before */ idmap[i].old = old_id; idmap[i].cur = cur_id; return true; } if (idmap[i].old == old_id) return idmap[i].cur == cur_id; } /* We ran out of idmap slots, which should be impossible */ WARN_ON_ONCE(1); return false; } /* Returns true if (rold safe implies rcur safe) */ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, struct idpair *idmap) { if (!(rold->live & REG_LIVE_READ)) /* explored state didn't use this */ return true; if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, live)) == 0) return true; if (rold->type == NOT_INIT) /* explored state can't have used this */ return true; if (rcur->type == NOT_INIT) return false; switch (rold->type) { case SCALAR_VALUE: if (rcur->type == SCALAR_VALUE) { /* new val must satisfy old val knowledge */ return range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); } else { /* We're trying to use a pointer in place of a scalar. * Even if the scalar was unbounded, this could lead to * pointer leaks because scalars are allowed to leak * while pointers are not. We could make this safe in * special cases if root is calling us, but it's * probably not worth the hassle. */ return false; } case PTR_TO_MAP_VALUE: /* If the new min/max/var_off satisfy the old ones and * everything else matches, we are OK. * We don't care about the 'id' value, because nothing * uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL) */ return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); case PTR_TO_MAP_VALUE_OR_NULL: /* a PTR_TO_MAP_VALUE could be safe to use as a * PTR_TO_MAP_VALUE_OR_NULL into the same map. * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL- * checked, doing so could have affected others with the same * id, and we can't check for that because we lost the id when * we converted to a PTR_TO_MAP_VALUE. */ if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL) return false; if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id))) return false; /* Check our ids match any regs they're supposed to */ return check_ids(rold->id, rcur->id, idmap); case PTR_TO_PACKET_META: case PTR_TO_PACKET: if (rcur->type != rold->type) return false; /* We must have at least as much range as the old ptr * did, so that any accesses which were safe before are * still safe. This is true even if old range < old off, * since someone could have accessed through (ptr - k), or * even done ptr -= k in a register, to get a safe access. */ if (rold->range > rcur->range) return false; /* If the offsets don't match, we can't trust our alignment; * nor can we be sure that we won't fall out of range. */ if (rold->off != rcur->off) return false; /* id relations must be preserved */ if (rold->id && !check_ids(rold->id, rcur->id, idmap)) return false; /* new val must satisfy old val knowledge */ return range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); case PTR_TO_CTX: case CONST_PTR_TO_MAP: case PTR_TO_STACK: case PTR_TO_PACKET_END: /* Only valid matches are exact, which memcmp() above * would have accepted */ default: /* Don't know what's going on, just say it's not safe */ return false; } /* Shouldn't get here; if we do, say it's not safe */ WARN_ON_ONCE(1); return false; } static bool stacksafe(struct bpf_verifier_state *old, struct bpf_verifier_state *cur, struct idpair *idmap) { int i, spi; /* if explored stack has more populated slots than current stack * such stacks are not equivalent */ if (old->allocated_stack > cur->allocated_stack) return false; /* walk slots of the explored stack and ignore any additional * slots in the current stack, since explored(safe) state * didn't use them */ for (i = 0; i < old->allocated_stack; i++) { spi = i / BPF_REG_SIZE; if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) continue; if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != cur->stack[spi].slot_type[i % BPF_REG_SIZE]) /* Ex: old explored (safe) state has STACK_SPILL in * this stack slot, but current has has STACK_MISC -> * this verifier states are not equivalent, * return false to continue verification of this path */ return false; if (i % BPF_REG_SIZE) continue; if (old->stack[spi].slot_type[0] != STACK_SPILL) continue; if (!regsafe(&old->stack[spi].spilled_ptr, &cur->stack[spi].spilled_ptr, idmap)) /* when explored and current stack slot are both storing * spilled registers, check that stored pointers types * are the same as well. * Ex: explored safe path could have stored * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8} * but current path has stored: * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16} * such verifier states are not equivalent. * return false to continue verification of this path */ return false; } return true; } /* compare two verifier states * * all states stored in state_list are known to be valid, since * verifier reached 'bpf_exit' instruction through them * * this function is called when verifier exploring different branches of * execution popped from the state stack. If it sees an old state that has * more strict register state and more strict stack state then this execution * branch doesn't need to be explored further, since verifier already * concluded that more strict state leads to valid finish. * * Therefore two states are equivalent if register state is more conservative * and explored stack state is more conservative than the current one. * Example: * explored current * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC) * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC) * * In other words if current stack state (one being explored) has more * valid slots than old one that already passed validation, it means * the verifier can stop exploring and conclude that current state is valid too * * Similarly with registers. If explored state has register type as invalid * whereas register type in current state is meaningful, it means that * the current state will reach 'bpf_exit' instruction safely */ static bool states_equal(struct bpf_verifier_env *env, struct bpf_verifier_state *old, struct bpf_verifier_state *cur) { struct idpair *idmap; bool ret = false; int i; idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL); /* If we failed to allocate the idmap, just say it's not safe */ if (!idmap) return false; for (i = 0; i < MAX_BPF_REG; i++) { if (!regsafe(&old->regs[i], &cur->regs[i], idmap)) goto out_free; } if (!stacksafe(old, cur, idmap)) goto out_free; ret = true; out_free: kfree(idmap); return ret; } /* A write screens off any subsequent reads; but write marks come from the * straight-line code between a state and its parent. When we arrive at a * jump target (in the first iteration of the propagate_liveness() loop), * we didn't arrive by the straight-line code, so read marks in state must * propagate to parent regardless of state's write marks. */ static bool do_propagate_liveness(const struct bpf_verifier_state *state, struct bpf_verifier_state *parent) { bool writes = parent == state->parent; /* Observe write marks */ bool touched = false; /* any changes made? */ int i; if (!parent) return touched; /* Propagate read liveness of registers... */ BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); /* We don't need to worry about FP liveness because it's read-only */ for (i = 0; i < BPF_REG_FP; i++) { if (parent->regs[i].live & REG_LIVE_READ) continue; if (writes && (state->regs[i].live & REG_LIVE_WRITTEN)) continue; if (state->regs[i].live & REG_LIVE_READ) { parent->regs[i].live |= REG_LIVE_READ; touched = true; } } /* ... and stack slots */ for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && i < parent->allocated_stack / BPF_REG_SIZE; i++) { if (parent->stack[i].slot_type[0] != STACK_SPILL) continue; if (state->stack[i].slot_type[0] != STACK_SPILL) continue; if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ) continue; if (writes && (state->stack[i].spilled_ptr.live & REG_LIVE_WRITTEN)) continue; if (state->stack[i].spilled_ptr.live & REG_LIVE_READ) { parent->stack[i].spilled_ptr.live |= REG_LIVE_READ; touched = true; } } return touched; } /* "parent" is "a state from which we reach the current state", but initially * it is not the state->parent (i.e. "the state whose straight-line code leads * to the current state"), instead it is the state that happened to arrive at * a (prunable) equivalent of the current state. See comment above * do_propagate_liveness() for consequences of this. * This function is just a more efficient way of calling mark_reg_read() or * mark_stack_slot_read() on each reg in "parent" that is read in "state", * though it requires that parent != state->parent in the call arguments. */ static void propagate_liveness(const struct bpf_verifier_state *state, struct bpf_verifier_state *parent) { while (do_propagate_liveness(state, parent)) { /* Something changed, so we need to feed those changes onward */ state = parent; parent = state->parent; } } static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) { struct bpf_verifier_state_list *new_sl; struct bpf_verifier_state_list *sl; struct bpf_verifier_state *cur = env->cur_state; int i, err; sl = env->explored_states[insn_idx]; if (!sl) /* this 'insn_idx' instruction wasn't marked, so we will not * be doing state search here */ return 0; while (sl != STATE_LIST_MARK) { if (states_equal(env, &sl->state, cur)) { /* reached equivalent register/stack state, * prune the search. * Registers read by the continuation are read by us. * If we have any write marks in env->cur_state, they * will prevent corresponding reads in the continuation * from reaching our parent (an explored_state). Our * own state will get the read marks recorded, but * they'll be immediately forgotten as we're pruning * this state and will pop a new one. */ propagate_liveness(&sl->state, cur); return 1; } sl = sl->next; } /* there were no equivalent states, remember current one. * technically the current state is not proven to be safe yet, * but it will either reach bpf_exit (which means it's safe) or * it will be rejected. Since there are no loops, we won't be * seeing this 'insn_idx' instruction again on the way to bpf_exit */ new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL); if (!new_sl) return -ENOMEM; /* add new state to the head of linked list */ err = copy_verifier_state(&new_sl->state, cur); if (err) { free_verifier_state(&new_sl->state, false); kfree(new_sl); return err; } new_sl->next = env->explored_states[insn_idx]; env->explored_states[insn_idx] = new_sl; /* connect new state to parentage chain */ cur->parent = &new_sl->state; /* clear write marks in current state: the writes we did are not writes * our child did, so they don't screen off its reads from us. * (There are no read marks in current state, because reads always mark * their parent and current state never has children yet. Only * explored_states can get read marks.) */ for (i = 0; i < BPF_REG_FP; i++) cur->regs[i].live = REG_LIVE_NONE; for (i = 0; i < cur->allocated_stack / BPF_REG_SIZE; i++) if (cur->stack[i].slot_type[0] == STACK_SPILL) cur->stack[i].spilled_ptr.live = REG_LIVE_NONE; return 0; } static int ext_analyzer_insn_hook(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) { if (env->dev_ops && env->dev_ops->insn_hook) return env->dev_ops->insn_hook(env, insn_idx, prev_insn_idx); return 0; } static int do_check(struct bpf_verifier_env *env) { struct bpf_verifier_state *state; struct bpf_insn *insns = env->prog->insnsi; struct bpf_reg_state *regs; int insn_cnt = env->prog->len; int insn_idx, prev_insn_idx = 0; int insn_processed = 0; bool do_print_state = false; state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL); if (!state) return -ENOMEM; env->cur_state = state; init_reg_state(env, state->regs); state->parent = NULL; insn_idx = 0; for (;;) { struct bpf_insn *insn; u8 class; int err; if (insn_idx >= insn_cnt) { verbose(env, "invalid insn idx %d insn_cnt %d\n", insn_idx, insn_cnt); return -EFAULT; } insn = &insns[insn_idx]; class = BPF_CLASS(insn->code); if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { verbose(env, "BPF program is too large. Processed %d insn\n", insn_processed); return -E2BIG; } err = is_state_visited(env, insn_idx); if (err < 0) return err; if (err == 1) { /* found equivalent state, can prune the search */ if (env->log.level) { if (do_print_state) verbose(env, "\nfrom %d to %d: safe\n", prev_insn_idx, insn_idx); else verbose(env, "%d: safe\n", insn_idx); } goto process_bpf_exit; } if (need_resched()) cond_resched(); if (env->log.level > 1 || (env->log.level && do_print_state)) { if (env->log.level > 1) verbose(env, "%d:", insn_idx); else verbose(env, "\nfrom %d to %d:", prev_insn_idx, insn_idx); print_verifier_state(env, state); do_print_state = false; } if (env->log.level) { verbose(env, "%d: ", insn_idx); print_bpf_insn(verbose, env, insn, env->allow_ptr_leaks); } err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx); if (err) return err; regs = cur_regs(env); env->insn_aux_data[insn_idx].seen = true; if (class == BPF_ALU || class == BPF_ALU64) { err = check_alu_op(env, insn); if (err) return err; } else if (class == BPF_LDX) { enum bpf_reg_type *prev_src_type, src_reg_type; /* check for reserved fields is already done */ /* check src operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); if (err) return err; src_reg_type = regs[insn->src_reg].type; /* check that memory (src_reg + off) is readable, * the state of dst_reg will be updated by this func */ err = check_mem_access(env, insn_idx, insn->src_reg, insn->off, BPF_SIZE(insn->code), BPF_READ, insn->dst_reg); if (err) return err; prev_src_type = &env->insn_aux_data[insn_idx].ptr_type; if (*prev_src_type == NOT_INIT) { /* saw a valid insn * dst_reg = *(u32 *)(src_reg + off) * save type to validate intersecting paths */ *prev_src_type = src_reg_type; } else if (src_reg_type != *prev_src_type && (src_reg_type == PTR_TO_CTX || *prev_src_type == PTR_TO_CTX)) { /* ABuser program is trying to use the same insn * dst_reg = *(u32*) (src_reg + off) * with different pointer types: * src_reg == ctx in one branch and * src_reg == stack|map in some other branch. * Reject it. */ verbose(env, "same insn cannot be used with different pointers\n"); return -EINVAL; } } else if (class == BPF_STX) { enum bpf_reg_type *prev_dst_type, dst_reg_type; if (BPF_MODE(insn->code) == BPF_XADD) { err = check_xadd(env, insn_idx, insn); if (err) return err; insn_idx++; continue; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; dst_reg_type = regs[insn->dst_reg].type; /* check that memory (dst_reg + off) is writeable */ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, insn->src_reg); if (err) return err; prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type; if (*prev_dst_type == NOT_INIT) { *prev_dst_type = dst_reg_type; } else if (dst_reg_type != *prev_dst_type && (dst_reg_type == PTR_TO_CTX || *prev_dst_type == PTR_TO_CTX)) { verbose(env, "same insn cannot be used with different pointers\n"); return -EINVAL; } } else if (class == BPF_ST) { if (BPF_MODE(insn->code) != BPF_MEM || insn->src_reg != BPF_REG_0) { verbose(env, "BPF_ST uses reserved fields\n"); return -EINVAL; } /* check src operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; /* check that memory (dst_reg + off) is writeable */ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, -1); if (err) return err; } else if (class == BPF_JMP) { u8 opcode = BPF_OP(insn->code); if (opcode == BPF_CALL) { if (BPF_SRC(insn->code) != BPF_K || insn->off != 0 || insn->src_reg != BPF_REG_0 || insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_CALL uses reserved fields\n"); return -EINVAL; } err = check_call(env, insn->imm, insn_idx); if (err) return err; } else if (opcode == BPF_JA) { if (BPF_SRC(insn->code) != BPF_K || insn->imm != 0 || insn->src_reg != BPF_REG_0 || insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_JA uses reserved fields\n"); return -EINVAL; } insn_idx += insn->off + 1; continue; } else if (opcode == BPF_EXIT) { if (BPF_SRC(insn->code) != BPF_K || insn->imm != 0 || insn->src_reg != BPF_REG_0 || insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_EXIT uses reserved fields\n"); return -EINVAL; } /* eBPF calling convetion is such that R0 is used * to return the value from eBPF program. * Make sure that it's readable at this time * of bpf_exit, which means that program wrote * something into it earlier */ err = check_reg_arg(env, BPF_REG_0, SRC_OP); if (err) return err; if (is_pointer_value(env, BPF_REG_0)) { verbose(env, "R0 leaks addr as return value\n"); return -EACCES; } err = check_return_code(env); if (err) return err; process_bpf_exit: err = pop_stack(env, &prev_insn_idx, &insn_idx); if (err < 0) { if (err != -ENOENT) return err; break; } else { do_print_state = true; continue; } } else { err = check_cond_jmp_op(env, insn, &insn_idx); if (err) return err; } } else if (class == BPF_LD) { u8 mode = BPF_MODE(insn->code); if (mode == BPF_ABS || mode == BPF_IND) { err = check_ld_abs(env, insn); if (err) return err; } else if (mode == BPF_IMM) { err = check_ld_imm(env, insn); if (err) return err; insn_idx++; env->insn_aux_data[insn_idx].seen = true; } else { verbose(env, "invalid BPF_LD mode\n"); return -EINVAL; } } else { verbose(env, "unknown insn class %d\n", class); return -EINVAL; } insn_idx++; } verbose(env, "processed %d insns, stack depth %d\n", insn_processed, env->prog->aux->stack_depth); return 0; } static int check_map_prealloc(struct bpf_map *map) { return (map->map_type != BPF_MAP_TYPE_HASH && map->map_type != BPF_MAP_TYPE_PERCPU_HASH && map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) || !(map->map_flags & BPF_F_NO_PREALLOC); } static int check_map_prog_compatibility(struct bpf_verifier_env *env, struct bpf_map *map, struct bpf_prog *prog) { /* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use * preallocated hash maps, since doing memory allocation * in overflow_handler can crash depending on where nmi got * triggered. */ if (prog->type == BPF_PROG_TYPE_PERF_EVENT) { if (!check_map_prealloc(map)) { verbose(env, "perf_event programs can only use preallocated hash map\n"); return -EINVAL; } if (map->inner_map_meta && !check_map_prealloc(map->inner_map_meta)) { verbose(env, "perf_event programs can only use preallocated inner hash map\n"); return -EINVAL; } } return 0; } /* look for pseudo eBPF instructions that access map FDs and * replace them with actual map pointers */ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) { struct bpf_insn *insn = env->prog->insnsi; int insn_cnt = env->prog->len; int i, j, err; err = bpf_prog_calc_tag(env->prog); if (err) return err; for (i = 0; i < insn_cnt; i++, insn++) { if (BPF_CLASS(insn->code) == BPF_LDX && (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { verbose(env, "BPF_LDX uses reserved fields\n"); return -EINVAL; } if (BPF_CLASS(insn->code) == BPF_STX && ((BPF_MODE(insn->code) != BPF_MEM && BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) { verbose(env, "BPF_STX uses reserved fields\n"); return -EINVAL; } if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { struct bpf_map *map; struct fd f; if (i == insn_cnt - 1 || insn[1].code != 0 || insn[1].dst_reg != 0 || insn[1].src_reg != 0 || insn[1].off != 0) { verbose(env, "invalid bpf_ld_imm64 insn\n"); return -EINVAL; } if (insn->src_reg == 0) /* valid generic load 64-bit imm */ goto next_insn; if (insn->src_reg != BPF_PSEUDO_MAP_FD) { verbose(env, "unrecognized bpf_ld_imm64 insn\n"); return -EINVAL; } f = fdget(insn->imm); map = __bpf_map_get(f); if (IS_ERR(map)) { verbose(env, "fd %d is not pointing to valid bpf_map\n", insn->imm); return PTR_ERR(map); } err = check_map_prog_compatibility(env, map, env->prog); if (err) { fdput(f); return err; } /* store map pointer inside BPF_LD_IMM64 instruction */ insn[0].imm = (u32) (unsigned long) map; insn[1].imm = ((u64) (unsigned long) map) >> 32; /* check whether we recorded this map already */ for (j = 0; j < env->used_map_cnt; j++) if (env->used_maps[j] == map) { fdput(f); goto next_insn; } if (env->used_map_cnt >= MAX_USED_MAPS) { fdput(f); return -E2BIG; } /* hold the map. If the program is rejected by verifier, * the map will be released by release_maps() or it * will be used by the valid program until it's unloaded * and all maps are released in free_bpf_prog_info() */ map = bpf_map_inc(map, false); if (IS_ERR(map)) { fdput(f); return PTR_ERR(map); } env->used_maps[env->used_map_cnt++] = map; fdput(f); next_insn: insn++; i++; } } /* now all pseudo BPF_LD_IMM64 instructions load valid * 'struct bpf_map *' into a register instead of user map_fd. * These pointers will be used later by verifier to validate map access. */ return 0; } /* drop refcnt of maps used by the rejected program */ static void release_maps(struct bpf_verifier_env *env) { int i; for (i = 0; i < env->used_map_cnt; i++) bpf_map_put(env->used_maps[i]); } /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env) { struct bpf_insn *insn = env->prog->insnsi; int insn_cnt = env->prog->len; int i; for (i = 0; i < insn_cnt; i++, insn++) if (insn->code == (BPF_LD | BPF_IMM | BPF_DW)) insn->src_reg = 0; } /* single env->prog->insni[off] instruction was replaced with the range * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying * [0, off) and [off, end) to new locations, so the patched range stays zero */ static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len, u32 off, u32 cnt) { struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; int i; if (cnt == 1) return 0; new_data = vzalloc(sizeof(struct bpf_insn_aux_data) * prog_len); if (!new_data) return -ENOMEM; memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); memcpy(new_data + off + cnt - 1, old_data + off, sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); for (i = off; i < off + cnt - 1; i++) new_data[i].seen = true; env->insn_aux_data = new_data; vfree(old_data); return 0; } static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, const struct bpf_insn *patch, u32 len) { struct bpf_prog *new_prog; new_prog = bpf_patch_insn_single(env->prog, off, patch, len); if (!new_prog) return NULL; if (adjust_insn_aux_data(env, new_prog->len, off, len)) return NULL; return new_prog; } /* The verifier does more data flow analysis than llvm and will not explore * branches that are dead at run time. Malicious programs can have dead code * too. Therefore replace all dead at-run-time code with nops. */ static void sanitize_dead_code(struct bpf_verifier_env *env) { struct bpf_insn_aux_data *aux_data = env->insn_aux_data; struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0); struct bpf_insn *insn = env->prog->insnsi; const int insn_cnt = env->prog->len; int i; for (i = 0; i < insn_cnt; i++) { if (aux_data[i].seen) continue; memcpy(insn + i, &nop, sizeof(nop)); } } /* convert load instructions that access fields of 'struct __sk_buff' * into sequence of instructions that access fields of 'struct sk_buff' */ static int convert_ctx_accesses(struct bpf_verifier_env *env) { const struct bpf_verifier_ops *ops = env->ops; int i, cnt, size, ctx_field_size, delta = 0; const int insn_cnt = env->prog->len; struct bpf_insn insn_buf[16], *insn; struct bpf_prog *new_prog; enum bpf_access_type type; bool is_narrower_load; u32 target_size; if (ops->gen_prologue) { cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, env->prog); if (cnt >= ARRAY_SIZE(insn_buf)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } else if (cnt) { new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); if (!new_prog) return -ENOMEM; env->prog = new_prog; delta += cnt - 1; } } if (!ops->convert_ctx_access) return 0; insn = env->prog->insnsi + delta; for (i = 0; i < insn_cnt; i++, insn++) { if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || insn->code == (BPF_LDX | BPF_MEM | BPF_H) || insn->code == (BPF_LDX | BPF_MEM | BPF_W) || insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) type = BPF_READ; else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || insn->code == (BPF_STX | BPF_MEM | BPF_H) || insn->code == (BPF_STX | BPF_MEM | BPF_W) || insn->code == (BPF_STX | BPF_MEM | BPF_DW)) type = BPF_WRITE; else continue; if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX) continue; ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; size = BPF_LDST_BYTES(insn); /* If the read access is a narrower load of the field, * convert to a 4/8-byte load, to minimum program type specific * convert_ctx_access changes. If conversion is successful, * we will apply proper mask to the result. */ is_narrower_load = size < ctx_field_size; if (is_narrower_load) { u32 off = insn->off; u8 size_code; if (type == BPF_WRITE) { verbose(env, "bpf verifier narrow ctx access misconfigured\n"); return -EINVAL; } size_code = BPF_H; if (ctx_field_size == 4) size_code = BPF_W; else if (ctx_field_size == 8) size_code = BPF_DW; insn->off = off & ~(ctx_field_size - 1); insn->code = BPF_LDX | BPF_MEM | size_code; } target_size = 0; cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog, &target_size); if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) || (ctx_field_size && !target_size)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } if (is_narrower_load && size < target_size) { if (ctx_field_size <= 4) insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, (1 << size * 8) - 1); else insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg, (1 << size * 8) - 1); } new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; /* keep walking new program and skip insns we just inserted */ env->prog = new_prog; insn = new_prog->insnsi + i + delta; } return 0; } /* fixup insn->imm field of bpf_call instructions * and inline eligible helpers as explicit sequence of BPF instructions * * this function is called after eBPF program passed verification */ static int fixup_bpf_calls(struct bpf_verifier_env *env) { struct bpf_prog *prog = env->prog; struct bpf_insn *insn = prog->insnsi; const struct bpf_func_proto *fn; const int insn_cnt = prog->len; struct bpf_insn insn_buf[16]; struct bpf_prog *new_prog; struct bpf_map *map_ptr; int i, cnt, delta = 0; for (i = 0; i < insn_cnt; i++, insn++) { if (insn->code != (BPF_JMP | BPF_CALL)) continue; if (insn->imm == BPF_FUNC_get_route_realm) prog->dst_needed = 1; if (insn->imm == BPF_FUNC_get_prandom_u32) bpf_user_rnd_init_once(); if (insn->imm == BPF_FUNC_tail_call) { /* If we tail call into other programs, we * cannot make any assumptions since they can * be replaced dynamically during runtime in * the program array. */ prog->cb_access = 1; env->prog->aux->stack_depth = MAX_BPF_STACK; /* mark bpf_tail_call as different opcode to avoid * conditional branch in the interpeter for every normal * call and to prevent accidental JITing by JIT compiler * that doesn't support bpf_tail_call yet */ insn->imm = 0; insn->code = BPF_JMP | BPF_TAIL_CALL; continue; } /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup * handlers are currently limited to 64 bit only. */ if (ebpf_jit_enabled() && BITS_PER_LONG == 64 && insn->imm == BPF_FUNC_map_lookup_elem) { map_ptr = env->insn_aux_data[i + delta].map_ptr; if (map_ptr == BPF_MAP_PTR_POISON || !map_ptr->ops->map_gen_lookup) goto patch_call_imm; cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf); if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; /* keep walking new program and skip insns we just inserted */ env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } if (insn->imm == BPF_FUNC_redirect_map) { /* Note, we cannot use prog directly as imm as subsequent * rewrites would still change the prog pointer. The only * stable address we can use is aux, which also works with * prog clones during blinding. */ u64 addr = (unsigned long)prog->aux; struct bpf_insn r4_ld[] = { BPF_LD_IMM64(BPF_REG_4, addr), *insn, }; cnt = ARRAY_SIZE(r4_ld); new_prog = bpf_patch_insn_data(env, i + delta, r4_ld, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; } patch_call_imm: fn = env->ops->get_func_proto(insn->imm); /* all functions that have prototype and verifier allowed * programs to call them, must be real in-kernel functions */ if (!fn->func) { verbose(env, "kernel subsystem misconfigured func %s#%d\n", func_id_name(insn->imm), insn->imm); return -EFAULT; } insn->imm = fn->func - __bpf_call_base; } return 0; } static void free_states(struct bpf_verifier_env *env) { struct bpf_verifier_state_list *sl, *sln; int i; if (!env->explored_states) return; for (i = 0; i < env->prog->len; i++) { sl = env->explored_states[i]; if (sl) while (sl != STATE_LIST_MARK) { sln = sl->next; free_verifier_state(&sl->state, false); kfree(sl); sl = sln; } } kfree(env->explored_states); } int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) { struct bpf_verifier_env *env; struct bpf_verifer_log *log; int ret = -EINVAL; /* no program is valid */ if (ARRAY_SIZE(bpf_verifier_ops) == 0) return -EINVAL; /* 'struct bpf_verifier_env' can be global, but since it's not small, * allocate/free it every time bpf_check() is called */ env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); if (!env) return -ENOMEM; log = &env->log; env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) * (*prog)->len); ret = -ENOMEM; if (!env->insn_aux_data) goto err_free_env; env->prog = *prog; env->ops = bpf_verifier_ops[env->prog->type]; /* grab the mutex to protect few globals used by verifier */ mutex_lock(&bpf_verifier_lock); if (attr->log_level || attr->log_buf || attr->log_size) { /* user requested verbose verifier output * and supplied buffer to store the verification trace */ log->level = attr->log_level; log->ubuf = (char __user *) (unsigned long) attr->log_buf; log->len_total = attr->log_size; ret = -EINVAL; /* log attributes have to be sane */ if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 || !log->level || !log->ubuf) goto err_unlock; } env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) env->strict_alignment = true; if (env->prog->aux->offload) { ret = bpf_prog_offload_verifier_prep(env); if (ret) goto err_unlock; } ret = replace_map_fd_with_map_ptr(env); if (ret < 0) goto skip_full_check; env->explored_states = kcalloc(env->prog->len, sizeof(struct bpf_verifier_state_list *), GFP_USER); ret = -ENOMEM; if (!env->explored_states) goto skip_full_check; ret = check_cfg(env); if (ret < 0) goto skip_full_check; env->allow_ptr_leaks = capable(CAP_SYS_ADMIN); ret = do_check(env); if (env->cur_state) { free_verifier_state(env->cur_state, true); env->cur_state = NULL; } skip_full_check: while (!pop_stack(env, NULL, NULL)); free_states(env); if (ret == 0) sanitize_dead_code(env); if (ret == 0) /* program is valid, convert *(u32*)(ctx + off) accesses */ ret = convert_ctx_accesses(env); if (ret == 0) ret = fixup_bpf_calls(env); if (log->level && bpf_verifier_log_full(log)) ret = -ENOSPC; if (log->level && !log->ubuf) { ret = -EFAULT; goto err_release_maps; } if (ret == 0 && env->used_map_cnt) { /* if program passed verifier, update used_maps in bpf_prog_info */ env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, sizeof(env->used_maps[0]), GFP_KERNEL); if (!env->prog->aux->used_maps) { ret = -ENOMEM; goto err_release_maps; } memcpy(env->prog->aux->used_maps, env->used_maps, sizeof(env->used_maps[0]) * env->used_map_cnt); env->prog->aux->used_map_cnt = env->used_map_cnt; /* program is valid. Convert pseudo bpf_ld_imm64 into generic * bpf_ld_imm64 instructions */ convert_pseudo_ld_imm64(env); } err_release_maps: if (!env->prog->aux->used_maps) /* if we didn't copy map pointers into bpf_prog_info, release * them now. Otherwise free_bpf_prog_info() will release them. */ release_maps(env); *prog = env->prog; err_unlock: mutex_unlock(&bpf_verifier_lock); vfree(env->insn_aux_data); err_free_env: kfree(env); return ret; }
static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, struct idpair *idmap) { if (!(rold->live & REG_LIVE_READ)) /* explored state didn't use this */ return true; if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, live)) == 0) return true; if (rold->type == NOT_INIT) /* explored state can't have used this */ return true; if (rcur->type == NOT_INIT) return false; switch (rold->type) { case SCALAR_VALUE: if (rcur->type == SCALAR_VALUE) { /* new val must satisfy old val knowledge */ return range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); } else { /* if we knew anything about the old value, we're not * equal, because we can't know anything about the * scalar value of the pointer in the new value. */ return rold->umin_value == 0 && rold->umax_value == U64_MAX && rold->smin_value == S64_MIN && rold->smax_value == S64_MAX && tnum_is_unknown(rold->var_off); } case PTR_TO_MAP_VALUE: /* If the new min/max/var_off satisfy the old ones and * everything else matches, we are OK. * We don't care about the 'id' value, because nothing * uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL) */ return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); case PTR_TO_MAP_VALUE_OR_NULL: /* a PTR_TO_MAP_VALUE could be safe to use as a * PTR_TO_MAP_VALUE_OR_NULL into the same map. * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL- * checked, doing so could have affected others with the same * id, and we can't check for that because we lost the id when * we converted to a PTR_TO_MAP_VALUE. */ if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL) return false; if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id))) return false; /* Check our ids match any regs they're supposed to */ return check_ids(rold->id, rcur->id, idmap); case PTR_TO_PACKET_META: case PTR_TO_PACKET: if (rcur->type != rold->type) return false; /* We must have at least as much range as the old ptr * did, so that any accesses which were safe before are * still safe. This is true even if old range < old off, * since someone could have accessed through (ptr - k), or * even done ptr -= k in a register, to get a safe access. */ if (rold->range > rcur->range) return false; /* If the offsets don't match, we can't trust our alignment; * nor can we be sure that we won't fall out of range. */ if (rold->off != rcur->off) return false; /* id relations must be preserved */ if (rold->id && !check_ids(rold->id, rcur->id, idmap)) return false; /* new val must satisfy old val knowledge */ return range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); case PTR_TO_CTX: case CONST_PTR_TO_MAP: case PTR_TO_STACK: case PTR_TO_PACKET_END: /* Only valid matches are exact, which memcmp() above * would have accepted */ default: /* Don't know what's going on, just say it's not safe */ return false; } /* Shouldn't get here; if we do, say it's not safe */ WARN_ON_ONCE(1); return false; }
static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, struct idpair *idmap) { if (!(rold->live & REG_LIVE_READ)) /* explored state didn't use this */ return true; if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, live)) == 0) return true; if (rold->type == NOT_INIT) /* explored state can't have used this */ return true; if (rcur->type == NOT_INIT) return false; switch (rold->type) { case SCALAR_VALUE: if (rcur->type == SCALAR_VALUE) { /* new val must satisfy old val knowledge */ return range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); } else { /* We're trying to use a pointer in place of a scalar. * Even if the scalar was unbounded, this could lead to * pointer leaks because scalars are allowed to leak * while pointers are not. We could make this safe in * special cases if root is calling us, but it's * probably not worth the hassle. */ return false; } case PTR_TO_MAP_VALUE: /* If the new min/max/var_off satisfy the old ones and * everything else matches, we are OK. * We don't care about the 'id' value, because nothing * uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL) */ return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); case PTR_TO_MAP_VALUE_OR_NULL: /* a PTR_TO_MAP_VALUE could be safe to use as a * PTR_TO_MAP_VALUE_OR_NULL into the same map. * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL- * checked, doing so could have affected others with the same * id, and we can't check for that because we lost the id when * we converted to a PTR_TO_MAP_VALUE. */ if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL) return false; if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id))) return false; /* Check our ids match any regs they're supposed to */ return check_ids(rold->id, rcur->id, idmap); case PTR_TO_PACKET_META: case PTR_TO_PACKET: if (rcur->type != rold->type) return false; /* We must have at least as much range as the old ptr * did, so that any accesses which were safe before are * still safe. This is true even if old range < old off, * since someone could have accessed through (ptr - k), or * even done ptr -= k in a register, to get a safe access. */ if (rold->range > rcur->range) return false; /* If the offsets don't match, we can't trust our alignment; * nor can we be sure that we won't fall out of range. */ if (rold->off != rcur->off) return false; /* id relations must be preserved */ if (rold->id && !check_ids(rold->id, rcur->id, idmap)) return false; /* new val must satisfy old val knowledge */ return range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); case PTR_TO_CTX: case CONST_PTR_TO_MAP: case PTR_TO_STACK: case PTR_TO_PACKET_END: /* Only valid matches are exact, which memcmp() above * would have accepted */ default: /* Don't know what's going on, just say it's not safe */ return false; } /* Shouldn't get here; if we do, say it's not safe */ WARN_ON_ONCE(1); return false; }
{'added': [(3470, "\t\t\t/* We're trying to use a pointer in place of a scalar."), (3471, '\t\t\t * Even if the scalar was unbounded, this could lead to'), (3472, '\t\t\t * pointer leaks because scalars are allowed to leak'), (3473, '\t\t\t * while pointers are not. We could make this safe in'), (3474, "\t\t\t * special cases if root is calling us, but it's"), (3475, '\t\t\t * probably not worth the hassle.'), (3477, '\t\t\treturn false;')], 'deleted': [(3470, "\t\t\t/* if we knew anything about the old value, we're not"), (3471, "\t\t\t * equal, because we can't know anything about the"), (3472, '\t\t\t * scalar value of the pointer in the new value.'), (3474, '\t\t\treturn rold->umin_value == 0 &&'), (3475, '\t\t\t rold->umax_value == U64_MAX &&'), (3476, '\t\t\t rold->smin_value == S64_MIN &&'), (3477, '\t\t\t rold->smax_value == S64_MAX &&'), (3478, '\t\t\t tnum_is_unknown(rold->var_off);')]}
7
8
3,247
20,660
https://github.com/torvalds/linux
CVE-2017-17855
['CWE-119']
print-nfs.c
nfs_printfh
/* * Copyright (c) 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* \summary: Network File System (NFS) printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include <stdio.h> #include <string.h> #include "netdissect.h" #include "addrtoname.h" #include "extract.h" #include "nfs.h" #include "nfsfh.h" #include "ip.h" #include "ip6.h" #include "rpc_auth.h" #include "rpc_msg.h" static const char tstr[] = " [|nfs]"; static void nfs_printfh(netdissect_options *, const uint32_t *, const u_int); static int xid_map_enter(netdissect_options *, const struct sunrpc_msg *, const u_char *); static int xid_map_find(const struct sunrpc_msg *, const u_char *, uint32_t *, uint32_t *); static void interp_reply(netdissect_options *, const struct sunrpc_msg *, uint32_t, uint32_t, int); static const uint32_t *parse_post_op_attr(netdissect_options *, const uint32_t *, int); /* * Mapping of old NFS Version 2 RPC numbers to generic numbers. */ static uint32_t nfsv3_procid[NFS_NPROCS] = { NFSPROC_NULL, NFSPROC_GETATTR, NFSPROC_SETATTR, NFSPROC_NOOP, NFSPROC_LOOKUP, NFSPROC_READLINK, NFSPROC_READ, NFSPROC_NOOP, NFSPROC_WRITE, NFSPROC_CREATE, NFSPROC_REMOVE, NFSPROC_RENAME, NFSPROC_LINK, NFSPROC_SYMLINK, NFSPROC_MKDIR, NFSPROC_RMDIR, NFSPROC_READDIR, NFSPROC_FSSTAT, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP }; static const struct tok nfsproc_str[] = { { NFSPROC_NOOP, "nop" }, { NFSPROC_NULL, "null" }, { NFSPROC_GETATTR, "getattr" }, { NFSPROC_SETATTR, "setattr" }, { NFSPROC_LOOKUP, "lookup" }, { NFSPROC_ACCESS, "access" }, { NFSPROC_READLINK, "readlink" }, { NFSPROC_READ, "read" }, { NFSPROC_WRITE, "write" }, { NFSPROC_CREATE, "create" }, { NFSPROC_MKDIR, "mkdir" }, { NFSPROC_SYMLINK, "symlink" }, { NFSPROC_MKNOD, "mknod" }, { NFSPROC_REMOVE, "remove" }, { NFSPROC_RMDIR, "rmdir" }, { NFSPROC_RENAME, "rename" }, { NFSPROC_LINK, "link" }, { NFSPROC_READDIR, "readdir" }, { NFSPROC_READDIRPLUS, "readdirplus" }, { NFSPROC_FSSTAT, "fsstat" }, { NFSPROC_FSINFO, "fsinfo" }, { NFSPROC_PATHCONF, "pathconf" }, { NFSPROC_COMMIT, "commit" }, { 0, NULL } }; /* * NFS V2 and V3 status values. * * Some of these come from the RFCs for NFS V2 and V3, with the message * strings taken from the FreeBSD C library "errlst.c". * * Others are errors that are not in the RFC but that I suspect some * NFS servers could return; the values are FreeBSD errno values, as * the first NFS server was the SunOS 2.0 one, and until 5.0 SunOS * was primarily BSD-derived. */ static const struct tok status2str[] = { { 1, "Operation not permitted" }, /* EPERM */ { 2, "No such file or directory" }, /* ENOENT */ { 5, "Input/output error" }, /* EIO */ { 6, "Device not configured" }, /* ENXIO */ { 11, "Resource deadlock avoided" }, /* EDEADLK */ { 12, "Cannot allocate memory" }, /* ENOMEM */ { 13, "Permission denied" }, /* EACCES */ { 17, "File exists" }, /* EEXIST */ { 18, "Cross-device link" }, /* EXDEV */ { 19, "Operation not supported by device" }, /* ENODEV */ { 20, "Not a directory" }, /* ENOTDIR */ { 21, "Is a directory" }, /* EISDIR */ { 22, "Invalid argument" }, /* EINVAL */ { 26, "Text file busy" }, /* ETXTBSY */ { 27, "File too large" }, /* EFBIG */ { 28, "No space left on device" }, /* ENOSPC */ { 30, "Read-only file system" }, /* EROFS */ { 31, "Too many links" }, /* EMLINK */ { 45, "Operation not supported" }, /* EOPNOTSUPP */ { 62, "Too many levels of symbolic links" }, /* ELOOP */ { 63, "File name too long" }, /* ENAMETOOLONG */ { 66, "Directory not empty" }, /* ENOTEMPTY */ { 69, "Disc quota exceeded" }, /* EDQUOT */ { 70, "Stale NFS file handle" }, /* ESTALE */ { 71, "Too many levels of remote in path" }, /* EREMOTE */ { 99, "Write cache flushed to disk" }, /* NFSERR_WFLUSH (not used) */ { 10001, "Illegal NFS file handle" }, /* NFS3ERR_BADHANDLE */ { 10002, "Update synchronization mismatch" }, /* NFS3ERR_NOT_SYNC */ { 10003, "READDIR/READDIRPLUS cookie is stale" }, /* NFS3ERR_BAD_COOKIE */ { 10004, "Operation not supported" }, /* NFS3ERR_NOTSUPP */ { 10005, "Buffer or request is too small" }, /* NFS3ERR_TOOSMALL */ { 10006, "Unspecified error on server" }, /* NFS3ERR_SERVERFAULT */ { 10007, "Object of that type not supported" }, /* NFS3ERR_BADTYPE */ { 10008, "Request couldn't be completed in time" }, /* NFS3ERR_JUKEBOX */ { 0, NULL } }; static const struct tok nfsv3_writemodes[] = { { 0, "unstable" }, { 1, "datasync" }, { 2, "filesync" }, { 0, NULL } }; static const struct tok type2str[] = { { NFNON, "NON" }, { NFREG, "REG" }, { NFDIR, "DIR" }, { NFBLK, "BLK" }, { NFCHR, "CHR" }, { NFLNK, "LNK" }, { NFFIFO, "FIFO" }, { 0, NULL } }; static const struct tok sunrpc_auth_str[] = { { SUNRPC_AUTH_OK, "OK" }, { SUNRPC_AUTH_BADCRED, "Bogus Credentials (seal broken)" }, { SUNRPC_AUTH_REJECTEDCRED, "Rejected Credentials (client should begin new session)" }, { SUNRPC_AUTH_BADVERF, "Bogus Verifier (seal broken)" }, { SUNRPC_AUTH_REJECTEDVERF, "Verifier expired or was replayed" }, { SUNRPC_AUTH_TOOWEAK, "Credentials are too weak" }, { SUNRPC_AUTH_INVALIDRESP, "Bogus response verifier" }, { SUNRPC_AUTH_FAILED, "Unknown failure" }, { 0, NULL } }; static const struct tok sunrpc_str[] = { { SUNRPC_PROG_UNAVAIL, "PROG_UNAVAIL" }, { SUNRPC_PROG_MISMATCH, "PROG_MISMATCH" }, { SUNRPC_PROC_UNAVAIL, "PROC_UNAVAIL" }, { SUNRPC_GARBAGE_ARGS, "GARBAGE_ARGS" }, { SUNRPC_SYSTEM_ERR, "SYSTEM_ERR" }, { 0, NULL } }; static void print_nfsaddr(netdissect_options *ndo, const u_char *bp, const char *s, const char *d) { const struct ip *ip; const struct ip6_hdr *ip6; char srcaddr[INET6_ADDRSTRLEN], dstaddr[INET6_ADDRSTRLEN]; srcaddr[0] = dstaddr[0] = '\0'; switch (IP_V((const struct ip *)bp)) { case 4: ip = (const struct ip *)bp; strlcpy(srcaddr, ipaddr_string(ndo, &ip->ip_src), sizeof(srcaddr)); strlcpy(dstaddr, ipaddr_string(ndo, &ip->ip_dst), sizeof(dstaddr)); break; case 6: ip6 = (const struct ip6_hdr *)bp; strlcpy(srcaddr, ip6addr_string(ndo, &ip6->ip6_src), sizeof(srcaddr)); strlcpy(dstaddr, ip6addr_string(ndo, &ip6->ip6_dst), sizeof(dstaddr)); break; default: strlcpy(srcaddr, "?", sizeof(srcaddr)); strlcpy(dstaddr, "?", sizeof(dstaddr)); break; } ND_PRINT((ndo, "%s.%s > %s.%s: ", srcaddr, s, dstaddr, d)); } static const uint32_t * parse_sattr3(netdissect_options *ndo, const uint32_t *dp, struct nfsv3_sattr *sa3) { ND_TCHECK(dp[0]); sa3->sa_modeset = EXTRACT_32BITS(dp); dp++; if (sa3->sa_modeset) { ND_TCHECK(dp[0]); sa3->sa_mode = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_uidset = EXTRACT_32BITS(dp); dp++; if (sa3->sa_uidset) { ND_TCHECK(dp[0]); sa3->sa_uid = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_gidset = EXTRACT_32BITS(dp); dp++; if (sa3->sa_gidset) { ND_TCHECK(dp[0]); sa3->sa_gid = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_sizeset = EXTRACT_32BITS(dp); dp++; if (sa3->sa_sizeset) { ND_TCHECK(dp[0]); sa3->sa_size = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_atimetype = EXTRACT_32BITS(dp); dp++; if (sa3->sa_atimetype == NFSV3SATTRTIME_TOCLIENT) { ND_TCHECK(dp[1]); sa3->sa_atime.nfsv3_sec = EXTRACT_32BITS(dp); dp++; sa3->sa_atime.nfsv3_nsec = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_mtimetype = EXTRACT_32BITS(dp); dp++; if (sa3->sa_mtimetype == NFSV3SATTRTIME_TOCLIENT) { ND_TCHECK(dp[1]); sa3->sa_mtime.nfsv3_sec = EXTRACT_32BITS(dp); dp++; sa3->sa_mtime.nfsv3_nsec = EXTRACT_32BITS(dp); dp++; } return dp; trunc: return NULL; } static int nfserr; /* true if we error rather than trunc */ static void print_sattr3(netdissect_options *ndo, const struct nfsv3_sattr *sa3, int verbose) { if (sa3->sa_modeset) ND_PRINT((ndo, " mode %o", sa3->sa_mode)); if (sa3->sa_uidset) ND_PRINT((ndo, " uid %u", sa3->sa_uid)); if (sa3->sa_gidset) ND_PRINT((ndo, " gid %u", sa3->sa_gid)); if (verbose > 1) { if (sa3->sa_atimetype == NFSV3SATTRTIME_TOCLIENT) ND_PRINT((ndo, " atime %u.%06u", sa3->sa_atime.nfsv3_sec, sa3->sa_atime.nfsv3_nsec)); if (sa3->sa_mtimetype == NFSV3SATTRTIME_TOCLIENT) ND_PRINT((ndo, " mtime %u.%06u", sa3->sa_mtime.nfsv3_sec, sa3->sa_mtime.nfsv3_nsec)); } } void nfsreply_print(netdissect_options *ndo, register const u_char *bp, u_int length, register const u_char *bp2) { register const struct sunrpc_msg *rp; char srcid[20], dstid[20]; /*fits 32bit*/ nfserr = 0; /* assume no error */ rp = (const struct sunrpc_msg *)bp; ND_TCHECK(rp->rm_xid); if (!ndo->ndo_nflag) { strlcpy(srcid, "nfs", sizeof(srcid)); snprintf(dstid, sizeof(dstid), "%u", EXTRACT_32BITS(&rp->rm_xid)); } else { snprintf(srcid, sizeof(srcid), "%u", NFS_PORT); snprintf(dstid, sizeof(dstid), "%u", EXTRACT_32BITS(&rp->rm_xid)); } print_nfsaddr(ndo, bp2, srcid, dstid); nfsreply_print_noaddr(ndo, bp, length, bp2); return; trunc: if (!nfserr) ND_PRINT((ndo, "%s", tstr)); } void nfsreply_print_noaddr(netdissect_options *ndo, register const u_char *bp, u_int length, register const u_char *bp2) { register const struct sunrpc_msg *rp; uint32_t proc, vers, reply_stat; enum sunrpc_reject_stat rstat; uint32_t rlow; uint32_t rhigh; enum sunrpc_auth_stat rwhy; nfserr = 0; /* assume no error */ rp = (const struct sunrpc_msg *)bp; ND_TCHECK(rp->rm_reply.rp_stat); reply_stat = EXTRACT_32BITS(&rp->rm_reply.rp_stat); switch (reply_stat) { case SUNRPC_MSG_ACCEPTED: ND_PRINT((ndo, "reply ok %u", length)); if (xid_map_find(rp, bp2, &proc, &vers) >= 0) interp_reply(ndo, rp, proc, vers, length); break; case SUNRPC_MSG_DENIED: ND_PRINT((ndo, "reply ERR %u: ", length)); ND_TCHECK(rp->rm_reply.rp_reject.rj_stat); rstat = EXTRACT_32BITS(&rp->rm_reply.rp_reject.rj_stat); switch (rstat) { case SUNRPC_RPC_MISMATCH: ND_TCHECK(rp->rm_reply.rp_reject.rj_vers.high); rlow = EXTRACT_32BITS(&rp->rm_reply.rp_reject.rj_vers.low); rhigh = EXTRACT_32BITS(&rp->rm_reply.rp_reject.rj_vers.high); ND_PRINT((ndo, "RPC Version mismatch (%u-%u)", rlow, rhigh)); break; case SUNRPC_AUTH_ERROR: ND_TCHECK(rp->rm_reply.rp_reject.rj_why); rwhy = EXTRACT_32BITS(&rp->rm_reply.rp_reject.rj_why); ND_PRINT((ndo, "Auth %s", tok2str(sunrpc_auth_str, "Invalid failure code %u", rwhy))); break; default: ND_PRINT((ndo, "Unknown reason for rejecting rpc message %u", (unsigned int)rstat)); break; } break; default: ND_PRINT((ndo, "reply Unknown rpc response code=%u %u", reply_stat, length)); break; } return; trunc: if (!nfserr) ND_PRINT((ndo, "%s", tstr)); } /* * Return a pointer to the first file handle in the packet. * If the packet was truncated, return 0. */ static const uint32_t * parsereq(netdissect_options *ndo, register const struct sunrpc_msg *rp, register u_int length) { register const uint32_t *dp; register u_int len; /* * find the start of the req data (if we captured it) */ dp = (const uint32_t *)&rp->rm_call.cb_cred; ND_TCHECK(dp[1]); len = EXTRACT_32BITS(&dp[1]); if (len < length) { dp += (len + (2 * sizeof(*dp) + 3)) / sizeof(*dp); ND_TCHECK(dp[1]); len = EXTRACT_32BITS(&dp[1]); if (len < length) { dp += (len + (2 * sizeof(*dp) + 3)) / sizeof(*dp); ND_TCHECK2(dp[0], 0); return (dp); } } trunc: return (NULL); } /* * Print out an NFS file handle and return a pointer to following word. * If packet was truncated, return 0. */ static const uint32_t * parsefh(netdissect_options *ndo, register const uint32_t *dp, int v3) { u_int len; if (v3) { ND_TCHECK(dp[0]); len = EXTRACT_32BITS(dp) / 4; dp++; } else len = NFSX_V2FH / 4; if (ND_TTEST2(*dp, len * sizeof(*dp))) { nfs_printfh(ndo, dp, len); return (dp + len); } trunc: return (NULL); } /* * Print out a file name and return pointer to 32-bit word past it. * If packet was truncated, return 0. */ static const uint32_t * parsefn(netdissect_options *ndo, register const uint32_t *dp) { register uint32_t len; register const u_char *cp; /* Bail if we don't have the string length */ ND_TCHECK(*dp); /* Fetch string length; convert to host order */ len = *dp++; NTOHL(len); ND_TCHECK2(*dp, ((len + 3) & ~3)); cp = (const u_char *)dp; /* Update 32-bit pointer (NFS filenames padded to 32-bit boundaries) */ dp += ((len + 3) & ~3) / sizeof(*dp); ND_PRINT((ndo, "\"")); if (fn_printn(ndo, cp, len, ndo->ndo_snapend)) { ND_PRINT((ndo, "\"")); goto trunc; } ND_PRINT((ndo, "\"")); return (dp); trunc: return NULL; } /* * Print out file handle and file name. * Return pointer to 32-bit word past file name. * If packet was truncated (or there was some other error), return 0. */ static const uint32_t * parsefhn(netdissect_options *ndo, register const uint32_t *dp, int v3) { dp = parsefh(ndo, dp, v3); if (dp == NULL) return (NULL); ND_PRINT((ndo, " ")); return (parsefn(ndo, dp)); } void nfsreq_print_noaddr(netdissect_options *ndo, register const u_char *bp, u_int length, register const u_char *bp2) { register const struct sunrpc_msg *rp; register const uint32_t *dp; nfs_type type; int v3; uint32_t proc; uint32_t access_flags; struct nfsv3_sattr sa3; ND_PRINT((ndo, "%d", length)); nfserr = 0; /* assume no error */ rp = (const struct sunrpc_msg *)bp; if (!xid_map_enter(ndo, rp, bp2)) /* record proc number for later on */ goto trunc; v3 = (EXTRACT_32BITS(&rp->rm_call.cb_vers) == NFS_VER3); proc = EXTRACT_32BITS(&rp->rm_call.cb_proc); if (!v3 && proc < NFS_NPROCS) proc = nfsv3_procid[proc]; ND_PRINT((ndo, " %s", tok2str(nfsproc_str, "proc-%u", proc))); switch (proc) { case NFSPROC_GETATTR: case NFSPROC_SETATTR: case NFSPROC_READLINK: case NFSPROC_FSSTAT: case NFSPROC_FSINFO: case NFSPROC_PATHCONF: if ((dp = parsereq(ndo, rp, length)) != NULL && parsefh(ndo, dp, v3) != NULL) return; break; case NFSPROC_LOOKUP: case NFSPROC_CREATE: case NFSPROC_MKDIR: case NFSPROC_REMOVE: case NFSPROC_RMDIR: if ((dp = parsereq(ndo, rp, length)) != NULL && parsefhn(ndo, dp, v3) != NULL) return; break; case NFSPROC_ACCESS: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_TCHECK(dp[0]); access_flags = EXTRACT_32BITS(&dp[0]); if (access_flags & ~NFSV3ACCESS_FULL) { /* NFSV3ACCESS definitions aren't up to date */ ND_PRINT((ndo, " %04x", access_flags)); } else if ((access_flags & NFSV3ACCESS_FULL) == NFSV3ACCESS_FULL) { ND_PRINT((ndo, " NFS_ACCESS_FULL")); } else { char separator = ' '; if (access_flags & NFSV3ACCESS_READ) { ND_PRINT((ndo, " NFS_ACCESS_READ")); separator = '|'; } if (access_flags & NFSV3ACCESS_LOOKUP) { ND_PRINT((ndo, "%cNFS_ACCESS_LOOKUP", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_MODIFY) { ND_PRINT((ndo, "%cNFS_ACCESS_MODIFY", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_EXTEND) { ND_PRINT((ndo, "%cNFS_ACCESS_EXTEND", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_DELETE) { ND_PRINT((ndo, "%cNFS_ACCESS_DELETE", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_EXECUTE) ND_PRINT((ndo, "%cNFS_ACCESS_EXECUTE", separator)); } return; } break; case NFSPROC_READ: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { if (v3) { ND_TCHECK(dp[2]); ND_PRINT((ndo, " %u bytes @ %" PRIu64, EXTRACT_32BITS(&dp[2]), EXTRACT_64BITS(&dp[0]))); } else { ND_TCHECK(dp[1]); ND_PRINT((ndo, " %u bytes @ %u", EXTRACT_32BITS(&dp[1]), EXTRACT_32BITS(&dp[0]))); } return; } break; case NFSPROC_WRITE: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { if (v3) { ND_TCHECK(dp[4]); ND_PRINT((ndo, " %u (%u) bytes @ %" PRIu64, EXTRACT_32BITS(&dp[4]), EXTRACT_32BITS(&dp[2]), EXTRACT_64BITS(&dp[0]))); if (ndo->ndo_vflag) { ND_PRINT((ndo, " <%s>", tok2str(nfsv3_writemodes, NULL, EXTRACT_32BITS(&dp[3])))); } } else { ND_TCHECK(dp[3]); ND_PRINT((ndo, " %u (%u) bytes @ %u (%u)", EXTRACT_32BITS(&dp[3]), EXTRACT_32BITS(&dp[2]), EXTRACT_32BITS(&dp[1]), EXTRACT_32BITS(&dp[0]))); } return; } break; case NFSPROC_SYMLINK: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefhn(ndo, dp, v3)) != NULL) { ND_PRINT((ndo, " ->")); if (v3 && (dp = parse_sattr3(ndo, dp, &sa3)) == NULL) break; if (parsefn(ndo, dp) == NULL) break; if (v3 && ndo->ndo_vflag) print_sattr3(ndo, &sa3, ndo->ndo_vflag); return; } break; case NFSPROC_MKNOD: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefhn(ndo, dp, v3)) != NULL) { ND_TCHECK(*dp); type = (nfs_type)EXTRACT_32BITS(dp); dp++; if ((dp = parse_sattr3(ndo, dp, &sa3)) == NULL) break; ND_PRINT((ndo, " %s", tok2str(type2str, "unk-ft %d", type))); if (ndo->ndo_vflag && (type == NFCHR || type == NFBLK)) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " %u/%u", EXTRACT_32BITS(&dp[0]), EXTRACT_32BITS(&dp[1]))); dp += 2; } if (ndo->ndo_vflag) print_sattr3(ndo, &sa3, ndo->ndo_vflag); return; } break; case NFSPROC_RENAME: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefhn(ndo, dp, v3)) != NULL) { ND_PRINT((ndo, " ->")); if (parsefhn(ndo, dp, v3) != NULL) return; } break; case NFSPROC_LINK: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_PRINT((ndo, " ->")); if (parsefhn(ndo, dp, v3) != NULL) return; } break; case NFSPROC_READDIR: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { if (v3) { ND_TCHECK(dp[4]); /* * We shouldn't really try to interpret the * offset cookie here. */ ND_PRINT((ndo, " %u bytes @ %" PRId64, EXTRACT_32BITS(&dp[4]), EXTRACT_64BITS(&dp[0]))); if (ndo->ndo_vflag) ND_PRINT((ndo, " verf %08x%08x", dp[2], dp[3])); } else { ND_TCHECK(dp[1]); /* * Print the offset as signed, since -1 is * common, but offsets > 2^31 aren't. */ ND_PRINT((ndo, " %u bytes @ %d", EXTRACT_32BITS(&dp[1]), EXTRACT_32BITS(&dp[0]))); } return; } break; case NFSPROC_READDIRPLUS: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_TCHECK(dp[4]); /* * We don't try to interpret the offset * cookie here. */ ND_PRINT((ndo, " %u bytes @ %" PRId64, EXTRACT_32BITS(&dp[4]), EXTRACT_64BITS(&dp[0]))); if (ndo->ndo_vflag) { ND_TCHECK(dp[5]); ND_PRINT((ndo, " max %u verf %08x%08x", EXTRACT_32BITS(&dp[5]), dp[2], dp[3])); } return; } break; case NFSPROC_COMMIT: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_TCHECK(dp[2]); ND_PRINT((ndo, " %u bytes @ %" PRIu64, EXTRACT_32BITS(&dp[2]), EXTRACT_64BITS(&dp[0]))); return; } break; default: return; } trunc: if (!nfserr) ND_PRINT((ndo, "%s", tstr)); } /* * Print out an NFS file handle. * We assume packet was not truncated before the end of the * file handle pointed to by dp. * * Note: new version (using portable file-handle parser) doesn't produce * generation number. It probably could be made to do that, with some * additional hacking on the parser code. */ static void nfs_printfh(netdissect_options *ndo, register const uint32_t *dp, const u_int len) { my_fsid fsid; uint32_t ino; const char *sfsname = NULL; char *spacep; if (ndo->ndo_uflag) { u_int i; char const *sep = ""; ND_PRINT((ndo, " fh[")); for (i=0; i<len; i++) { ND_PRINT((ndo, "%s%x", sep, dp[i])); sep = ":"; } ND_PRINT((ndo, "]")); return; } Parse_fh((const u_char *)dp, len, &fsid, &ino, NULL, &sfsname, 0); if (sfsname) { /* file system ID is ASCII, not numeric, for this server OS */ static char temp[NFSX_V3FHMAX+1]; /* Make sure string is null-terminated */ strncpy(temp, sfsname, NFSX_V3FHMAX); temp[sizeof(temp) - 1] = '\0'; /* Remove trailing spaces */ spacep = strchr(temp, ' '); if (spacep) *spacep = '\0'; ND_PRINT((ndo, " fh %s/", temp)); } else { ND_PRINT((ndo, " fh %d,%d/", fsid.Fsid_dev.Major, fsid.Fsid_dev.Minor)); } if(fsid.Fsid_dev.Minor == 257) /* Print the undecoded handle */ ND_PRINT((ndo, "%s", fsid.Opaque_Handle)); else ND_PRINT((ndo, "%ld", (long) ino)); } /* * Maintain a small cache of recent client.XID.server/proc pairs, to allow * us to match up replies with requests and thus to know how to parse * the reply. */ struct xid_map_entry { uint32_t xid; /* transaction ID (net order) */ int ipver; /* IP version (4 or 6) */ struct in6_addr client; /* client IP address (net order) */ struct in6_addr server; /* server IP address (net order) */ uint32_t proc; /* call proc number (host order) */ uint32_t vers; /* program version (host order) */ }; /* * Map entries are kept in an array that we manage as a ring; * new entries are always added at the tail of the ring. Initially, * all the entries are zero and hence don't match anything. */ #define XIDMAPSIZE 64 static struct xid_map_entry xid_map[XIDMAPSIZE]; static int xid_map_next = 0; static int xid_map_hint = 0; static int xid_map_enter(netdissect_options *ndo, const struct sunrpc_msg *rp, const u_char *bp) { const struct ip *ip = NULL; const struct ip6_hdr *ip6 = NULL; struct xid_map_entry *xmep; if (!ND_TTEST(rp->rm_call.cb_vers)) return (0); switch (IP_V((const struct ip *)bp)) { case 4: ip = (const struct ip *)bp; break; case 6: ip6 = (const struct ip6_hdr *)bp; break; default: return (1); } xmep = &xid_map[xid_map_next]; if (++xid_map_next >= XIDMAPSIZE) xid_map_next = 0; UNALIGNED_MEMCPY(&xmep->xid, &rp->rm_xid, sizeof(xmep->xid)); if (ip) { xmep->ipver = 4; UNALIGNED_MEMCPY(&xmep->client, &ip->ip_src, sizeof(ip->ip_src)); UNALIGNED_MEMCPY(&xmep->server, &ip->ip_dst, sizeof(ip->ip_dst)); } else if (ip6) { xmep->ipver = 6; UNALIGNED_MEMCPY(&xmep->client, &ip6->ip6_src, sizeof(ip6->ip6_src)); UNALIGNED_MEMCPY(&xmep->server, &ip6->ip6_dst, sizeof(ip6->ip6_dst)); } xmep->proc = EXTRACT_32BITS(&rp->rm_call.cb_proc); xmep->vers = EXTRACT_32BITS(&rp->rm_call.cb_vers); return (1); } /* * Returns 0 and puts NFSPROC_xxx in proc return and * version in vers return, or returns -1 on failure */ static int xid_map_find(const struct sunrpc_msg *rp, const u_char *bp, uint32_t *proc, uint32_t *vers) { int i; struct xid_map_entry *xmep; uint32_t xid; const struct ip *ip = (const struct ip *)bp; const struct ip6_hdr *ip6 = (const struct ip6_hdr *)bp; int cmp; UNALIGNED_MEMCPY(&xid, &rp->rm_xid, sizeof(xmep->xid)); /* Start searching from where we last left off */ i = xid_map_hint; do { xmep = &xid_map[i]; cmp = 1; if (xmep->ipver != IP_V(ip) || xmep->xid != xid) goto nextitem; switch (xmep->ipver) { case 4: if (UNALIGNED_MEMCMP(&ip->ip_src, &xmep->server, sizeof(ip->ip_src)) != 0 || UNALIGNED_MEMCMP(&ip->ip_dst, &xmep->client, sizeof(ip->ip_dst)) != 0) { cmp = 0; } break; case 6: if (UNALIGNED_MEMCMP(&ip6->ip6_src, &xmep->server, sizeof(ip6->ip6_src)) != 0 || UNALIGNED_MEMCMP(&ip6->ip6_dst, &xmep->client, sizeof(ip6->ip6_dst)) != 0) { cmp = 0; } break; default: cmp = 0; break; } if (cmp) { /* match */ xid_map_hint = i; *proc = xmep->proc; *vers = xmep->vers; return 0; } nextitem: if (++i >= XIDMAPSIZE) i = 0; } while (i != xid_map_hint); /* search failed */ return (-1); } /* * Routines for parsing reply packets */ /* * Return a pointer to the beginning of the actual results. * If the packet was truncated, return 0. */ static const uint32_t * parserep(netdissect_options *ndo, register const struct sunrpc_msg *rp, register u_int length) { register const uint32_t *dp; u_int len; enum sunrpc_accept_stat astat; /* * Portability note: * Here we find the address of the ar_verf credentials. * Originally, this calculation was * dp = (uint32_t *)&rp->rm_reply.rp_acpt.ar_verf * On the wire, the rp_acpt field starts immediately after * the (32 bit) rp_stat field. However, rp_acpt (which is a * "struct accepted_reply") contains a "struct opaque_auth", * whose internal representation contains a pointer, so on a * 64-bit machine the compiler inserts 32 bits of padding * before rp->rm_reply.rp_acpt.ar_verf. So, we cannot use * the internal representation to parse the on-the-wire * representation. Instead, we skip past the rp_stat field, * which is an "enum" and so occupies one 32-bit word. */ dp = ((const uint32_t *)&rp->rm_reply) + 1; ND_TCHECK(dp[1]); len = EXTRACT_32BITS(&dp[1]); if (len >= length) return (NULL); /* * skip past the ar_verf credentials. */ dp += (len + (2*sizeof(uint32_t) + 3)) / sizeof(uint32_t); /* * now we can check the ar_stat field */ ND_TCHECK(dp[0]); astat = (enum sunrpc_accept_stat) EXTRACT_32BITS(dp); if (astat != SUNRPC_SUCCESS) { ND_PRINT((ndo, " %s", tok2str(sunrpc_str, "ar_stat %d", astat))); nfserr = 1; /* suppress trunc string */ return (NULL); } /* successful return */ ND_TCHECK2(*dp, sizeof(astat)); return ((const uint32_t *) (sizeof(astat) + ((const char *)dp))); trunc: return (0); } static const uint32_t * parsestatus(netdissect_options *ndo, const uint32_t *dp, int *er) { int errnum; ND_TCHECK(dp[0]); errnum = EXTRACT_32BITS(&dp[0]); if (er) *er = errnum; if (errnum != 0) { if (!ndo->ndo_qflag) ND_PRINT((ndo, " ERROR: %s", tok2str(status2str, "unk %d", errnum))); nfserr = 1; } return (dp + 1); trunc: return NULL; } static const uint32_t * parsefattr(netdissect_options *ndo, const uint32_t *dp, int verbose, int v3) { const struct nfs_fattr *fap; fap = (const struct nfs_fattr *)dp; ND_TCHECK(fap->fa_gid); if (verbose) { ND_PRINT((ndo, " %s %o ids %d/%d", tok2str(type2str, "unk-ft %d ", EXTRACT_32BITS(&fap->fa_type)), EXTRACT_32BITS(&fap->fa_mode), EXTRACT_32BITS(&fap->fa_uid), EXTRACT_32BITS(&fap->fa_gid))); if (v3) { ND_TCHECK(fap->fa3_size); ND_PRINT((ndo, " sz %" PRIu64, EXTRACT_64BITS((const uint32_t *)&fap->fa3_size))); } else { ND_TCHECK(fap->fa2_size); ND_PRINT((ndo, " sz %d", EXTRACT_32BITS(&fap->fa2_size))); } } /* print lots more stuff */ if (verbose > 1) { if (v3) { ND_TCHECK(fap->fa3_ctime); ND_PRINT((ndo, " nlink %d rdev %d/%d", EXTRACT_32BITS(&fap->fa_nlink), EXTRACT_32BITS(&fap->fa3_rdev.specdata1), EXTRACT_32BITS(&fap->fa3_rdev.specdata2))); ND_PRINT((ndo, " fsid %" PRIx64, EXTRACT_64BITS((const uint32_t *)&fap->fa3_fsid))); ND_PRINT((ndo, " fileid %" PRIx64, EXTRACT_64BITS((const uint32_t *)&fap->fa3_fileid))); ND_PRINT((ndo, " a/m/ctime %u.%06u", EXTRACT_32BITS(&fap->fa3_atime.nfsv3_sec), EXTRACT_32BITS(&fap->fa3_atime.nfsv3_nsec))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa3_mtime.nfsv3_sec), EXTRACT_32BITS(&fap->fa3_mtime.nfsv3_nsec))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa3_ctime.nfsv3_sec), EXTRACT_32BITS(&fap->fa3_ctime.nfsv3_nsec))); } else { ND_TCHECK(fap->fa2_ctime); ND_PRINT((ndo, " nlink %d rdev 0x%x fsid 0x%x nodeid 0x%x a/m/ctime", EXTRACT_32BITS(&fap->fa_nlink), EXTRACT_32BITS(&fap->fa2_rdev), EXTRACT_32BITS(&fap->fa2_fsid), EXTRACT_32BITS(&fap->fa2_fileid))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa2_atime.nfsv2_sec), EXTRACT_32BITS(&fap->fa2_atime.nfsv2_usec))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa2_mtime.nfsv2_sec), EXTRACT_32BITS(&fap->fa2_mtime.nfsv2_usec))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa2_ctime.nfsv2_sec), EXTRACT_32BITS(&fap->fa2_ctime.nfsv2_usec))); } } return ((const uint32_t *)((const unsigned char *)dp + (v3 ? NFSX_V3FATTR : NFSX_V2FATTR))); trunc: return (NULL); } static int parseattrstat(netdissect_options *ndo, const uint32_t *dp, int verbose, int v3) { int er; dp = parsestatus(ndo, dp, &er); if (dp == NULL) return (0); if (er) return (1); return (parsefattr(ndo, dp, verbose, v3) != NULL); } static int parsediropres(netdissect_options *ndo, const uint32_t *dp) { int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (er) return (1); dp = parsefh(ndo, dp, 0); if (dp == NULL) return (0); return (parsefattr(ndo, dp, ndo->ndo_vflag, 0) != NULL); } static int parselinkres(netdissect_options *ndo, const uint32_t *dp, int v3) { int er; dp = parsestatus(ndo, dp, &er); if (dp == NULL) return(0); if (er) return(1); if (v3 && !(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) return (0); ND_PRINT((ndo, " ")); return (parsefn(ndo, dp) != NULL); } static int parsestatfs(netdissect_options *ndo, const uint32_t *dp, int v3) { const struct nfs_statfs *sfsp; int er; dp = parsestatus(ndo, dp, &er); if (dp == NULL) return (0); if (!v3 && er) return (1); if (ndo->ndo_qflag) return(1); if (v3) { if (ndo->ndo_vflag) ND_PRINT((ndo, " POST:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) return (0); } ND_TCHECK2(*dp, (v3 ? NFSX_V3STATFS : NFSX_V2STATFS)); sfsp = (const struct nfs_statfs *)dp; if (v3) { ND_PRINT((ndo, " tbytes %" PRIu64 " fbytes %" PRIu64 " abytes %" PRIu64, EXTRACT_64BITS((const uint32_t *)&sfsp->sf_tbytes), EXTRACT_64BITS((const uint32_t *)&sfsp->sf_fbytes), EXTRACT_64BITS((const uint32_t *)&sfsp->sf_abytes))); if (ndo->ndo_vflag) { ND_PRINT((ndo, " tfiles %" PRIu64 " ffiles %" PRIu64 " afiles %" PRIu64 " invar %u", EXTRACT_64BITS((const uint32_t *)&sfsp->sf_tfiles), EXTRACT_64BITS((const uint32_t *)&sfsp->sf_ffiles), EXTRACT_64BITS((const uint32_t *)&sfsp->sf_afiles), EXTRACT_32BITS(&sfsp->sf_invarsec))); } } else { ND_PRINT((ndo, " tsize %d bsize %d blocks %d bfree %d bavail %d", EXTRACT_32BITS(&sfsp->sf_tsize), EXTRACT_32BITS(&sfsp->sf_bsize), EXTRACT_32BITS(&sfsp->sf_blocks), EXTRACT_32BITS(&sfsp->sf_bfree), EXTRACT_32BITS(&sfsp->sf_bavail))); } return (1); trunc: return (0); } static int parserddires(netdissect_options *ndo, const uint32_t *dp) { int er; dp = parsestatus(ndo, dp, &er); if (dp == NULL) return (0); if (er) return (1); if (ndo->ndo_qflag) return (1); ND_TCHECK(dp[2]); ND_PRINT((ndo, " offset 0x%x size %d ", EXTRACT_32BITS(&dp[0]), EXTRACT_32BITS(&dp[1]))); if (dp[2] != 0) ND_PRINT((ndo, " eof")); return (1); trunc: return (0); } static const uint32_t * parse_wcc_attr(netdissect_options *ndo, const uint32_t *dp) { /* Our caller has already checked this */ ND_PRINT((ndo, " sz %" PRIu64, EXTRACT_64BITS(&dp[0]))); ND_PRINT((ndo, " mtime %u.%06u ctime %u.%06u", EXTRACT_32BITS(&dp[2]), EXTRACT_32BITS(&dp[3]), EXTRACT_32BITS(&dp[4]), EXTRACT_32BITS(&dp[5]))); return (dp + 6); } /* * Pre operation attributes. Print only if vflag > 1. */ static const uint32_t * parse_pre_op_attr(netdissect_options *ndo, const uint32_t *dp, int verbose) { ND_TCHECK(dp[0]); if (!EXTRACT_32BITS(&dp[0])) return (dp + 1); dp++; ND_TCHECK2(*dp, 24); if (verbose > 1) { return parse_wcc_attr(ndo, dp); } else { /* If not verbose enough, just skip over wcc_attr */ return (dp + 6); } trunc: return (NULL); } /* * Post operation attributes are printed if vflag >= 1 */ static const uint32_t * parse_post_op_attr(netdissect_options *ndo, const uint32_t *dp, int verbose) { ND_TCHECK(dp[0]); if (!EXTRACT_32BITS(&dp[0])) return (dp + 1); dp++; if (verbose) { return parsefattr(ndo, dp, verbose, 1); } else return (dp + (NFSX_V3FATTR / sizeof (uint32_t))); trunc: return (NULL); } static const uint32_t * parse_wcc_data(netdissect_options *ndo, const uint32_t *dp, int verbose) { if (verbose > 1) ND_PRINT((ndo, " PRE:")); if (!(dp = parse_pre_op_attr(ndo, dp, verbose))) return (0); if (verbose) ND_PRINT((ndo, " POST:")); return parse_post_op_attr(ndo, dp, verbose); } static const uint32_t * parsecreateopres(netdissect_options *ndo, const uint32_t *dp, int verbose) { int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (er) dp = parse_wcc_data(ndo, dp, verbose); else { ND_TCHECK(dp[0]); if (!EXTRACT_32BITS(&dp[0])) return (dp + 1); dp++; if (!(dp = parsefh(ndo, dp, 1))) return (0); if (verbose) { if (!(dp = parse_post_op_attr(ndo, dp, verbose))) return (0); if (ndo->ndo_vflag > 1) { ND_PRINT((ndo, " dir attr:")); dp = parse_wcc_data(ndo, dp, verbose); } } } return (dp); trunc: return (NULL); } static int parsewccres(netdissect_options *ndo, const uint32_t *dp, int verbose) { int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); return parse_wcc_data(ndo, dp, verbose) != NULL; } static const uint32_t * parsev3rddirres(netdissect_options *ndo, const uint32_t *dp, int verbose) { int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (ndo->ndo_vflag) ND_PRINT((ndo, " POST:")); if (!(dp = parse_post_op_attr(ndo, dp, verbose))) return (0); if (er) return dp; if (ndo->ndo_vflag) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " verf %08x%08x", dp[0], dp[1])); dp += 2; } return dp; trunc: return (NULL); } static int parsefsinfo(netdissect_options *ndo, const uint32_t *dp) { const struct nfsv3_fsinfo *sfp; int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (ndo->ndo_vflag) ND_PRINT((ndo, " POST:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) return (0); if (er) return (1); sfp = (const struct nfsv3_fsinfo *)dp; ND_TCHECK(*sfp); ND_PRINT((ndo, " rtmax %u rtpref %u wtmax %u wtpref %u dtpref %u", EXTRACT_32BITS(&sfp->fs_rtmax), EXTRACT_32BITS(&sfp->fs_rtpref), EXTRACT_32BITS(&sfp->fs_wtmax), EXTRACT_32BITS(&sfp->fs_wtpref), EXTRACT_32BITS(&sfp->fs_dtpref))); if (ndo->ndo_vflag) { ND_PRINT((ndo, " rtmult %u wtmult %u maxfsz %" PRIu64, EXTRACT_32BITS(&sfp->fs_rtmult), EXTRACT_32BITS(&sfp->fs_wtmult), EXTRACT_64BITS((const uint32_t *)&sfp->fs_maxfilesize))); ND_PRINT((ndo, " delta %u.%06u ", EXTRACT_32BITS(&sfp->fs_timedelta.nfsv3_sec), EXTRACT_32BITS(&sfp->fs_timedelta.nfsv3_nsec))); } return (1); trunc: return (0); } static int parsepathconf(netdissect_options *ndo, const uint32_t *dp) { int er; const struct nfsv3_pathconf *spp; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (ndo->ndo_vflag) ND_PRINT((ndo, " POST:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) return (0); if (er) return (1); spp = (const struct nfsv3_pathconf *)dp; ND_TCHECK(*spp); ND_PRINT((ndo, " linkmax %u namemax %u %s %s %s %s", EXTRACT_32BITS(&spp->pc_linkmax), EXTRACT_32BITS(&spp->pc_namemax), EXTRACT_32BITS(&spp->pc_notrunc) ? "notrunc" : "", EXTRACT_32BITS(&spp->pc_chownrestricted) ? "chownres" : "", EXTRACT_32BITS(&spp->pc_caseinsensitive) ? "igncase" : "", EXTRACT_32BITS(&spp->pc_casepreserving) ? "keepcase" : "")); return (1); trunc: return (0); } static void interp_reply(netdissect_options *ndo, const struct sunrpc_msg *rp, uint32_t proc, uint32_t vers, int length) { register const uint32_t *dp; register int v3; int er; v3 = (vers == NFS_VER3); if (!v3 && proc < NFS_NPROCS) proc = nfsv3_procid[proc]; ND_PRINT((ndo, " %s", tok2str(nfsproc_str, "proc-%u", proc))); switch (proc) { case NFSPROC_GETATTR: dp = parserep(ndo, rp, length); if (dp != NULL && parseattrstat(ndo, dp, !ndo->ndo_qflag, v3) != 0) return; break; case NFSPROC_SETATTR: if (!(dp = parserep(ndo, rp, length))) return; if (v3) { if (parsewccres(ndo, dp, ndo->ndo_vflag)) return; } else { if (parseattrstat(ndo, dp, !ndo->ndo_qflag, 0) != 0) return; } break; case NFSPROC_LOOKUP: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (er) { if (ndo->ndo_vflag > 1) { ND_PRINT((ndo, " post dattr:")); dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag); } } else { if (!(dp = parsefh(ndo, dp, v3))) break; if ((dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag)) && ndo->ndo_vflag > 1) { ND_PRINT((ndo, " post dattr:")); dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag); } } if (dp) return; } else { if (parsediropres(ndo, dp) != 0) return; } break; case NFSPROC_ACCESS: if (!(dp = parserep(ndo, rp, length))) break; if (!(dp = parsestatus(ndo, dp, &er))) break; if (ndo->ndo_vflag) ND_PRINT((ndo, " attr:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) break; if (!er) { ND_TCHECK(dp[0]); ND_PRINT((ndo, " c %04x", EXTRACT_32BITS(&dp[0]))); } return; case NFSPROC_READLINK: dp = parserep(ndo, rp, length); if (dp != NULL && parselinkres(ndo, dp, v3) != 0) return; break; case NFSPROC_READ: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) break; if (er) return; if (ndo->ndo_vflag) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " %u bytes", EXTRACT_32BITS(&dp[0]))); if (EXTRACT_32BITS(&dp[1])) ND_PRINT((ndo, " EOF")); } return; } else { if (parseattrstat(ndo, dp, ndo->ndo_vflag, 0) != 0) return; } break; case NFSPROC_WRITE: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; if (er) return; if (ndo->ndo_vflag) { ND_TCHECK(dp[0]); ND_PRINT((ndo, " %u bytes", EXTRACT_32BITS(&dp[0]))); if (ndo->ndo_vflag > 1) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " <%s>", tok2str(nfsv3_writemodes, NULL, EXTRACT_32BITS(&dp[1])))); } return; } } else { if (parseattrstat(ndo, dp, ndo->ndo_vflag, v3) != 0) return; } break; case NFSPROC_CREATE: case NFSPROC_MKDIR: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsecreateopres(ndo, dp, ndo->ndo_vflag) != NULL) return; } else { if (parsediropres(ndo, dp) != 0) return; } break; case NFSPROC_SYMLINK: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsecreateopres(ndo, dp, ndo->ndo_vflag) != NULL) return; } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_MKNOD: if (!(dp = parserep(ndo, rp, length))) break; if (parsecreateopres(ndo, dp, ndo->ndo_vflag) != NULL) return; break; case NFSPROC_REMOVE: case NFSPROC_RMDIR: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsewccres(ndo, dp, ndo->ndo_vflag)) return; } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_RENAME: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (ndo->ndo_vflag) { ND_PRINT((ndo, " from:")); if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; ND_PRINT((ndo, " to:")); if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; } return; } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_LINK: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (ndo->ndo_vflag) { ND_PRINT((ndo, " file POST:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) break; ND_PRINT((ndo, " dir:")); if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; return; } } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_READDIR: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsev3rddirres(ndo, dp, ndo->ndo_vflag)) return; } else { if (parserddires(ndo, dp) != 0) return; } break; case NFSPROC_READDIRPLUS: if (!(dp = parserep(ndo, rp, length))) break; if (parsev3rddirres(ndo, dp, ndo->ndo_vflag)) return; break; case NFSPROC_FSSTAT: dp = parserep(ndo, rp, length); if (dp != NULL && parsestatfs(ndo, dp, v3) != 0) return; break; case NFSPROC_FSINFO: dp = parserep(ndo, rp, length); if (dp != NULL && parsefsinfo(ndo, dp) != 0) return; break; case NFSPROC_PATHCONF: dp = parserep(ndo, rp, length); if (dp != NULL && parsepathconf(ndo, dp) != 0) return; break; case NFSPROC_COMMIT: dp = parserep(ndo, rp, length); if (dp != NULL && parsewccres(ndo, dp, ndo->ndo_vflag) != 0) return; break; default: return; } trunc: if (!nfserr) ND_PRINT((ndo, "%s", tstr)); }
/* * Copyright (c) 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* \summary: Network File System (NFS) printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include <stdio.h> #include <string.h> #include "netdissect.h" #include "addrtoname.h" #include "extract.h" #include "nfs.h" #include "nfsfh.h" #include "ip.h" #include "ip6.h" #include "rpc_auth.h" #include "rpc_msg.h" static const char tstr[] = " [|nfs]"; static void nfs_printfh(netdissect_options *, const uint32_t *, const u_int); static int xid_map_enter(netdissect_options *, const struct sunrpc_msg *, const u_char *); static int xid_map_find(const struct sunrpc_msg *, const u_char *, uint32_t *, uint32_t *); static void interp_reply(netdissect_options *, const struct sunrpc_msg *, uint32_t, uint32_t, int); static const uint32_t *parse_post_op_attr(netdissect_options *, const uint32_t *, int); /* * Mapping of old NFS Version 2 RPC numbers to generic numbers. */ static uint32_t nfsv3_procid[NFS_NPROCS] = { NFSPROC_NULL, NFSPROC_GETATTR, NFSPROC_SETATTR, NFSPROC_NOOP, NFSPROC_LOOKUP, NFSPROC_READLINK, NFSPROC_READ, NFSPROC_NOOP, NFSPROC_WRITE, NFSPROC_CREATE, NFSPROC_REMOVE, NFSPROC_RENAME, NFSPROC_LINK, NFSPROC_SYMLINK, NFSPROC_MKDIR, NFSPROC_RMDIR, NFSPROC_READDIR, NFSPROC_FSSTAT, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP, NFSPROC_NOOP }; static const struct tok nfsproc_str[] = { { NFSPROC_NOOP, "nop" }, { NFSPROC_NULL, "null" }, { NFSPROC_GETATTR, "getattr" }, { NFSPROC_SETATTR, "setattr" }, { NFSPROC_LOOKUP, "lookup" }, { NFSPROC_ACCESS, "access" }, { NFSPROC_READLINK, "readlink" }, { NFSPROC_READ, "read" }, { NFSPROC_WRITE, "write" }, { NFSPROC_CREATE, "create" }, { NFSPROC_MKDIR, "mkdir" }, { NFSPROC_SYMLINK, "symlink" }, { NFSPROC_MKNOD, "mknod" }, { NFSPROC_REMOVE, "remove" }, { NFSPROC_RMDIR, "rmdir" }, { NFSPROC_RENAME, "rename" }, { NFSPROC_LINK, "link" }, { NFSPROC_READDIR, "readdir" }, { NFSPROC_READDIRPLUS, "readdirplus" }, { NFSPROC_FSSTAT, "fsstat" }, { NFSPROC_FSINFO, "fsinfo" }, { NFSPROC_PATHCONF, "pathconf" }, { NFSPROC_COMMIT, "commit" }, { 0, NULL } }; /* * NFS V2 and V3 status values. * * Some of these come from the RFCs for NFS V2 and V3, with the message * strings taken from the FreeBSD C library "errlst.c". * * Others are errors that are not in the RFC but that I suspect some * NFS servers could return; the values are FreeBSD errno values, as * the first NFS server was the SunOS 2.0 one, and until 5.0 SunOS * was primarily BSD-derived. */ static const struct tok status2str[] = { { 1, "Operation not permitted" }, /* EPERM */ { 2, "No such file or directory" }, /* ENOENT */ { 5, "Input/output error" }, /* EIO */ { 6, "Device not configured" }, /* ENXIO */ { 11, "Resource deadlock avoided" }, /* EDEADLK */ { 12, "Cannot allocate memory" }, /* ENOMEM */ { 13, "Permission denied" }, /* EACCES */ { 17, "File exists" }, /* EEXIST */ { 18, "Cross-device link" }, /* EXDEV */ { 19, "Operation not supported by device" }, /* ENODEV */ { 20, "Not a directory" }, /* ENOTDIR */ { 21, "Is a directory" }, /* EISDIR */ { 22, "Invalid argument" }, /* EINVAL */ { 26, "Text file busy" }, /* ETXTBSY */ { 27, "File too large" }, /* EFBIG */ { 28, "No space left on device" }, /* ENOSPC */ { 30, "Read-only file system" }, /* EROFS */ { 31, "Too many links" }, /* EMLINK */ { 45, "Operation not supported" }, /* EOPNOTSUPP */ { 62, "Too many levels of symbolic links" }, /* ELOOP */ { 63, "File name too long" }, /* ENAMETOOLONG */ { 66, "Directory not empty" }, /* ENOTEMPTY */ { 69, "Disc quota exceeded" }, /* EDQUOT */ { 70, "Stale NFS file handle" }, /* ESTALE */ { 71, "Too many levels of remote in path" }, /* EREMOTE */ { 99, "Write cache flushed to disk" }, /* NFSERR_WFLUSH (not used) */ { 10001, "Illegal NFS file handle" }, /* NFS3ERR_BADHANDLE */ { 10002, "Update synchronization mismatch" }, /* NFS3ERR_NOT_SYNC */ { 10003, "READDIR/READDIRPLUS cookie is stale" }, /* NFS3ERR_BAD_COOKIE */ { 10004, "Operation not supported" }, /* NFS3ERR_NOTSUPP */ { 10005, "Buffer or request is too small" }, /* NFS3ERR_TOOSMALL */ { 10006, "Unspecified error on server" }, /* NFS3ERR_SERVERFAULT */ { 10007, "Object of that type not supported" }, /* NFS3ERR_BADTYPE */ { 10008, "Request couldn't be completed in time" }, /* NFS3ERR_JUKEBOX */ { 0, NULL } }; static const struct tok nfsv3_writemodes[] = { { 0, "unstable" }, { 1, "datasync" }, { 2, "filesync" }, { 0, NULL } }; static const struct tok type2str[] = { { NFNON, "NON" }, { NFREG, "REG" }, { NFDIR, "DIR" }, { NFBLK, "BLK" }, { NFCHR, "CHR" }, { NFLNK, "LNK" }, { NFFIFO, "FIFO" }, { 0, NULL } }; static const struct tok sunrpc_auth_str[] = { { SUNRPC_AUTH_OK, "OK" }, { SUNRPC_AUTH_BADCRED, "Bogus Credentials (seal broken)" }, { SUNRPC_AUTH_REJECTEDCRED, "Rejected Credentials (client should begin new session)" }, { SUNRPC_AUTH_BADVERF, "Bogus Verifier (seal broken)" }, { SUNRPC_AUTH_REJECTEDVERF, "Verifier expired or was replayed" }, { SUNRPC_AUTH_TOOWEAK, "Credentials are too weak" }, { SUNRPC_AUTH_INVALIDRESP, "Bogus response verifier" }, { SUNRPC_AUTH_FAILED, "Unknown failure" }, { 0, NULL } }; static const struct tok sunrpc_str[] = { { SUNRPC_PROG_UNAVAIL, "PROG_UNAVAIL" }, { SUNRPC_PROG_MISMATCH, "PROG_MISMATCH" }, { SUNRPC_PROC_UNAVAIL, "PROC_UNAVAIL" }, { SUNRPC_GARBAGE_ARGS, "GARBAGE_ARGS" }, { SUNRPC_SYSTEM_ERR, "SYSTEM_ERR" }, { 0, NULL } }; static void print_nfsaddr(netdissect_options *ndo, const u_char *bp, const char *s, const char *d) { const struct ip *ip; const struct ip6_hdr *ip6; char srcaddr[INET6_ADDRSTRLEN], dstaddr[INET6_ADDRSTRLEN]; srcaddr[0] = dstaddr[0] = '\0'; switch (IP_V((const struct ip *)bp)) { case 4: ip = (const struct ip *)bp; strlcpy(srcaddr, ipaddr_string(ndo, &ip->ip_src), sizeof(srcaddr)); strlcpy(dstaddr, ipaddr_string(ndo, &ip->ip_dst), sizeof(dstaddr)); break; case 6: ip6 = (const struct ip6_hdr *)bp; strlcpy(srcaddr, ip6addr_string(ndo, &ip6->ip6_src), sizeof(srcaddr)); strlcpy(dstaddr, ip6addr_string(ndo, &ip6->ip6_dst), sizeof(dstaddr)); break; default: strlcpy(srcaddr, "?", sizeof(srcaddr)); strlcpy(dstaddr, "?", sizeof(dstaddr)); break; } ND_PRINT((ndo, "%s.%s > %s.%s: ", srcaddr, s, dstaddr, d)); } static const uint32_t * parse_sattr3(netdissect_options *ndo, const uint32_t *dp, struct nfsv3_sattr *sa3) { ND_TCHECK(dp[0]); sa3->sa_modeset = EXTRACT_32BITS(dp); dp++; if (sa3->sa_modeset) { ND_TCHECK(dp[0]); sa3->sa_mode = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_uidset = EXTRACT_32BITS(dp); dp++; if (sa3->sa_uidset) { ND_TCHECK(dp[0]); sa3->sa_uid = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_gidset = EXTRACT_32BITS(dp); dp++; if (sa3->sa_gidset) { ND_TCHECK(dp[0]); sa3->sa_gid = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_sizeset = EXTRACT_32BITS(dp); dp++; if (sa3->sa_sizeset) { ND_TCHECK(dp[0]); sa3->sa_size = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_atimetype = EXTRACT_32BITS(dp); dp++; if (sa3->sa_atimetype == NFSV3SATTRTIME_TOCLIENT) { ND_TCHECK(dp[1]); sa3->sa_atime.nfsv3_sec = EXTRACT_32BITS(dp); dp++; sa3->sa_atime.nfsv3_nsec = EXTRACT_32BITS(dp); dp++; } ND_TCHECK(dp[0]); sa3->sa_mtimetype = EXTRACT_32BITS(dp); dp++; if (sa3->sa_mtimetype == NFSV3SATTRTIME_TOCLIENT) { ND_TCHECK(dp[1]); sa3->sa_mtime.nfsv3_sec = EXTRACT_32BITS(dp); dp++; sa3->sa_mtime.nfsv3_nsec = EXTRACT_32BITS(dp); dp++; } return dp; trunc: return NULL; } static int nfserr; /* true if we error rather than trunc */ static void print_sattr3(netdissect_options *ndo, const struct nfsv3_sattr *sa3, int verbose) { if (sa3->sa_modeset) ND_PRINT((ndo, " mode %o", sa3->sa_mode)); if (sa3->sa_uidset) ND_PRINT((ndo, " uid %u", sa3->sa_uid)); if (sa3->sa_gidset) ND_PRINT((ndo, " gid %u", sa3->sa_gid)); if (verbose > 1) { if (sa3->sa_atimetype == NFSV3SATTRTIME_TOCLIENT) ND_PRINT((ndo, " atime %u.%06u", sa3->sa_atime.nfsv3_sec, sa3->sa_atime.nfsv3_nsec)); if (sa3->sa_mtimetype == NFSV3SATTRTIME_TOCLIENT) ND_PRINT((ndo, " mtime %u.%06u", sa3->sa_mtime.nfsv3_sec, sa3->sa_mtime.nfsv3_nsec)); } } void nfsreply_print(netdissect_options *ndo, register const u_char *bp, u_int length, register const u_char *bp2) { register const struct sunrpc_msg *rp; char srcid[20], dstid[20]; /*fits 32bit*/ nfserr = 0; /* assume no error */ rp = (const struct sunrpc_msg *)bp; ND_TCHECK(rp->rm_xid); if (!ndo->ndo_nflag) { strlcpy(srcid, "nfs", sizeof(srcid)); snprintf(dstid, sizeof(dstid), "%u", EXTRACT_32BITS(&rp->rm_xid)); } else { snprintf(srcid, sizeof(srcid), "%u", NFS_PORT); snprintf(dstid, sizeof(dstid), "%u", EXTRACT_32BITS(&rp->rm_xid)); } print_nfsaddr(ndo, bp2, srcid, dstid); nfsreply_print_noaddr(ndo, bp, length, bp2); return; trunc: if (!nfserr) ND_PRINT((ndo, "%s", tstr)); } void nfsreply_print_noaddr(netdissect_options *ndo, register const u_char *bp, u_int length, register const u_char *bp2) { register const struct sunrpc_msg *rp; uint32_t proc, vers, reply_stat; enum sunrpc_reject_stat rstat; uint32_t rlow; uint32_t rhigh; enum sunrpc_auth_stat rwhy; nfserr = 0; /* assume no error */ rp = (const struct sunrpc_msg *)bp; ND_TCHECK(rp->rm_reply.rp_stat); reply_stat = EXTRACT_32BITS(&rp->rm_reply.rp_stat); switch (reply_stat) { case SUNRPC_MSG_ACCEPTED: ND_PRINT((ndo, "reply ok %u", length)); if (xid_map_find(rp, bp2, &proc, &vers) >= 0) interp_reply(ndo, rp, proc, vers, length); break; case SUNRPC_MSG_DENIED: ND_PRINT((ndo, "reply ERR %u: ", length)); ND_TCHECK(rp->rm_reply.rp_reject.rj_stat); rstat = EXTRACT_32BITS(&rp->rm_reply.rp_reject.rj_stat); switch (rstat) { case SUNRPC_RPC_MISMATCH: ND_TCHECK(rp->rm_reply.rp_reject.rj_vers.high); rlow = EXTRACT_32BITS(&rp->rm_reply.rp_reject.rj_vers.low); rhigh = EXTRACT_32BITS(&rp->rm_reply.rp_reject.rj_vers.high); ND_PRINT((ndo, "RPC Version mismatch (%u-%u)", rlow, rhigh)); break; case SUNRPC_AUTH_ERROR: ND_TCHECK(rp->rm_reply.rp_reject.rj_why); rwhy = EXTRACT_32BITS(&rp->rm_reply.rp_reject.rj_why); ND_PRINT((ndo, "Auth %s", tok2str(sunrpc_auth_str, "Invalid failure code %u", rwhy))); break; default: ND_PRINT((ndo, "Unknown reason for rejecting rpc message %u", (unsigned int)rstat)); break; } break; default: ND_PRINT((ndo, "reply Unknown rpc response code=%u %u", reply_stat, length)); break; } return; trunc: if (!nfserr) ND_PRINT((ndo, "%s", tstr)); } /* * Return a pointer to the first file handle in the packet. * If the packet was truncated, return 0. */ static const uint32_t * parsereq(netdissect_options *ndo, register const struct sunrpc_msg *rp, register u_int length) { register const uint32_t *dp; register u_int len; /* * find the start of the req data (if we captured it) */ dp = (const uint32_t *)&rp->rm_call.cb_cred; ND_TCHECK(dp[1]); len = EXTRACT_32BITS(&dp[1]); if (len < length) { dp += (len + (2 * sizeof(*dp) + 3)) / sizeof(*dp); ND_TCHECK(dp[1]); len = EXTRACT_32BITS(&dp[1]); if (len < length) { dp += (len + (2 * sizeof(*dp) + 3)) / sizeof(*dp); ND_TCHECK2(dp[0], 0); return (dp); } } trunc: return (NULL); } /* * Print out an NFS file handle and return a pointer to following word. * If packet was truncated, return 0. */ static const uint32_t * parsefh(netdissect_options *ndo, register const uint32_t *dp, int v3) { u_int len; if (v3) { ND_TCHECK(dp[0]); len = EXTRACT_32BITS(dp) / 4; dp++; } else len = NFSX_V2FH / 4; if (ND_TTEST2(*dp, len * sizeof(*dp))) { nfs_printfh(ndo, dp, len); return (dp + len); } trunc: return (NULL); } /* * Print out a file name and return pointer to 32-bit word past it. * If packet was truncated, return 0. */ static const uint32_t * parsefn(netdissect_options *ndo, register const uint32_t *dp) { register uint32_t len; register const u_char *cp; /* Bail if we don't have the string length */ ND_TCHECK(*dp); /* Fetch string length; convert to host order */ len = *dp++; NTOHL(len); ND_TCHECK2(*dp, ((len + 3) & ~3)); cp = (const u_char *)dp; /* Update 32-bit pointer (NFS filenames padded to 32-bit boundaries) */ dp += ((len + 3) & ~3) / sizeof(*dp); ND_PRINT((ndo, "\"")); if (fn_printn(ndo, cp, len, ndo->ndo_snapend)) { ND_PRINT((ndo, "\"")); goto trunc; } ND_PRINT((ndo, "\"")); return (dp); trunc: return NULL; } /* * Print out file handle and file name. * Return pointer to 32-bit word past file name. * If packet was truncated (or there was some other error), return 0. */ static const uint32_t * parsefhn(netdissect_options *ndo, register const uint32_t *dp, int v3) { dp = parsefh(ndo, dp, v3); if (dp == NULL) return (NULL); ND_PRINT((ndo, " ")); return (parsefn(ndo, dp)); } void nfsreq_print_noaddr(netdissect_options *ndo, register const u_char *bp, u_int length, register const u_char *bp2) { register const struct sunrpc_msg *rp; register const uint32_t *dp; nfs_type type; int v3; uint32_t proc; uint32_t access_flags; struct nfsv3_sattr sa3; ND_PRINT((ndo, "%d", length)); nfserr = 0; /* assume no error */ rp = (const struct sunrpc_msg *)bp; if (!xid_map_enter(ndo, rp, bp2)) /* record proc number for later on */ goto trunc; v3 = (EXTRACT_32BITS(&rp->rm_call.cb_vers) == NFS_VER3); proc = EXTRACT_32BITS(&rp->rm_call.cb_proc); if (!v3 && proc < NFS_NPROCS) proc = nfsv3_procid[proc]; ND_PRINT((ndo, " %s", tok2str(nfsproc_str, "proc-%u", proc))); switch (proc) { case NFSPROC_GETATTR: case NFSPROC_SETATTR: case NFSPROC_READLINK: case NFSPROC_FSSTAT: case NFSPROC_FSINFO: case NFSPROC_PATHCONF: if ((dp = parsereq(ndo, rp, length)) != NULL && parsefh(ndo, dp, v3) != NULL) return; break; case NFSPROC_LOOKUP: case NFSPROC_CREATE: case NFSPROC_MKDIR: case NFSPROC_REMOVE: case NFSPROC_RMDIR: if ((dp = parsereq(ndo, rp, length)) != NULL && parsefhn(ndo, dp, v3) != NULL) return; break; case NFSPROC_ACCESS: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_TCHECK(dp[0]); access_flags = EXTRACT_32BITS(&dp[0]); if (access_flags & ~NFSV3ACCESS_FULL) { /* NFSV3ACCESS definitions aren't up to date */ ND_PRINT((ndo, " %04x", access_flags)); } else if ((access_flags & NFSV3ACCESS_FULL) == NFSV3ACCESS_FULL) { ND_PRINT((ndo, " NFS_ACCESS_FULL")); } else { char separator = ' '; if (access_flags & NFSV3ACCESS_READ) { ND_PRINT((ndo, " NFS_ACCESS_READ")); separator = '|'; } if (access_flags & NFSV3ACCESS_LOOKUP) { ND_PRINT((ndo, "%cNFS_ACCESS_LOOKUP", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_MODIFY) { ND_PRINT((ndo, "%cNFS_ACCESS_MODIFY", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_EXTEND) { ND_PRINT((ndo, "%cNFS_ACCESS_EXTEND", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_DELETE) { ND_PRINT((ndo, "%cNFS_ACCESS_DELETE", separator)); separator = '|'; } if (access_flags & NFSV3ACCESS_EXECUTE) ND_PRINT((ndo, "%cNFS_ACCESS_EXECUTE", separator)); } return; } break; case NFSPROC_READ: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { if (v3) { ND_TCHECK(dp[2]); ND_PRINT((ndo, " %u bytes @ %" PRIu64, EXTRACT_32BITS(&dp[2]), EXTRACT_64BITS(&dp[0]))); } else { ND_TCHECK(dp[1]); ND_PRINT((ndo, " %u bytes @ %u", EXTRACT_32BITS(&dp[1]), EXTRACT_32BITS(&dp[0]))); } return; } break; case NFSPROC_WRITE: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { if (v3) { ND_TCHECK(dp[4]); ND_PRINT((ndo, " %u (%u) bytes @ %" PRIu64, EXTRACT_32BITS(&dp[4]), EXTRACT_32BITS(&dp[2]), EXTRACT_64BITS(&dp[0]))); if (ndo->ndo_vflag) { ND_PRINT((ndo, " <%s>", tok2str(nfsv3_writemodes, NULL, EXTRACT_32BITS(&dp[3])))); } } else { ND_TCHECK(dp[3]); ND_PRINT((ndo, " %u (%u) bytes @ %u (%u)", EXTRACT_32BITS(&dp[3]), EXTRACT_32BITS(&dp[2]), EXTRACT_32BITS(&dp[1]), EXTRACT_32BITS(&dp[0]))); } return; } break; case NFSPROC_SYMLINK: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefhn(ndo, dp, v3)) != NULL) { ND_PRINT((ndo, " ->")); if (v3 && (dp = parse_sattr3(ndo, dp, &sa3)) == NULL) break; if (parsefn(ndo, dp) == NULL) break; if (v3 && ndo->ndo_vflag) print_sattr3(ndo, &sa3, ndo->ndo_vflag); return; } break; case NFSPROC_MKNOD: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefhn(ndo, dp, v3)) != NULL) { ND_TCHECK(*dp); type = (nfs_type)EXTRACT_32BITS(dp); dp++; if ((dp = parse_sattr3(ndo, dp, &sa3)) == NULL) break; ND_PRINT((ndo, " %s", tok2str(type2str, "unk-ft %d", type))); if (ndo->ndo_vflag && (type == NFCHR || type == NFBLK)) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " %u/%u", EXTRACT_32BITS(&dp[0]), EXTRACT_32BITS(&dp[1]))); dp += 2; } if (ndo->ndo_vflag) print_sattr3(ndo, &sa3, ndo->ndo_vflag); return; } break; case NFSPROC_RENAME: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefhn(ndo, dp, v3)) != NULL) { ND_PRINT((ndo, " ->")); if (parsefhn(ndo, dp, v3) != NULL) return; } break; case NFSPROC_LINK: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_PRINT((ndo, " ->")); if (parsefhn(ndo, dp, v3) != NULL) return; } break; case NFSPROC_READDIR: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { if (v3) { ND_TCHECK(dp[4]); /* * We shouldn't really try to interpret the * offset cookie here. */ ND_PRINT((ndo, " %u bytes @ %" PRId64, EXTRACT_32BITS(&dp[4]), EXTRACT_64BITS(&dp[0]))); if (ndo->ndo_vflag) ND_PRINT((ndo, " verf %08x%08x", dp[2], dp[3])); } else { ND_TCHECK(dp[1]); /* * Print the offset as signed, since -1 is * common, but offsets > 2^31 aren't. */ ND_PRINT((ndo, " %u bytes @ %d", EXTRACT_32BITS(&dp[1]), EXTRACT_32BITS(&dp[0]))); } return; } break; case NFSPROC_READDIRPLUS: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_TCHECK(dp[4]); /* * We don't try to interpret the offset * cookie here. */ ND_PRINT((ndo, " %u bytes @ %" PRId64, EXTRACT_32BITS(&dp[4]), EXTRACT_64BITS(&dp[0]))); if (ndo->ndo_vflag) { ND_TCHECK(dp[5]); ND_PRINT((ndo, " max %u verf %08x%08x", EXTRACT_32BITS(&dp[5]), dp[2], dp[3])); } return; } break; case NFSPROC_COMMIT: if ((dp = parsereq(ndo, rp, length)) != NULL && (dp = parsefh(ndo, dp, v3)) != NULL) { ND_TCHECK(dp[2]); ND_PRINT((ndo, " %u bytes @ %" PRIu64, EXTRACT_32BITS(&dp[2]), EXTRACT_64BITS(&dp[0]))); return; } break; default: return; } trunc: if (!nfserr) ND_PRINT((ndo, "%s", tstr)); } /* * Print out an NFS file handle. * We assume packet was not truncated before the end of the * file handle pointed to by dp. * * Note: new version (using portable file-handle parser) doesn't produce * generation number. It probably could be made to do that, with some * additional hacking on the parser code. */ static void nfs_printfh(netdissect_options *ndo, register const uint32_t *dp, const u_int len) { my_fsid fsid; uint32_t ino; const char *sfsname = NULL; char *spacep; if (ndo->ndo_uflag) { u_int i; char const *sep = ""; ND_PRINT((ndo, " fh[")); for (i=0; i<len; i++) { ND_PRINT((ndo, "%s%x", sep, dp[i])); sep = ":"; } ND_PRINT((ndo, "]")); return; } Parse_fh((const u_char *)dp, len, &fsid, &ino, NULL, &sfsname, 0); if (sfsname) { /* file system ID is ASCII, not numeric, for this server OS */ char temp[NFSX_V3FHMAX+1]; u_int stringlen; /* Make sure string is null-terminated */ stringlen = len; if (stringlen > NFSX_V3FHMAX) stringlen = NFSX_V3FHMAX; strncpy(temp, sfsname, stringlen); temp[stringlen] = '\0'; /* Remove trailing spaces */ spacep = strchr(temp, ' '); if (spacep) *spacep = '\0'; ND_PRINT((ndo, " fh %s/", temp)); } else { ND_PRINT((ndo, " fh %d,%d/", fsid.Fsid_dev.Major, fsid.Fsid_dev.Minor)); } if(fsid.Fsid_dev.Minor == 257) /* Print the undecoded handle */ ND_PRINT((ndo, "%s", fsid.Opaque_Handle)); else ND_PRINT((ndo, "%ld", (long) ino)); } /* * Maintain a small cache of recent client.XID.server/proc pairs, to allow * us to match up replies with requests and thus to know how to parse * the reply. */ struct xid_map_entry { uint32_t xid; /* transaction ID (net order) */ int ipver; /* IP version (4 or 6) */ struct in6_addr client; /* client IP address (net order) */ struct in6_addr server; /* server IP address (net order) */ uint32_t proc; /* call proc number (host order) */ uint32_t vers; /* program version (host order) */ }; /* * Map entries are kept in an array that we manage as a ring; * new entries are always added at the tail of the ring. Initially, * all the entries are zero and hence don't match anything. */ #define XIDMAPSIZE 64 static struct xid_map_entry xid_map[XIDMAPSIZE]; static int xid_map_next = 0; static int xid_map_hint = 0; static int xid_map_enter(netdissect_options *ndo, const struct sunrpc_msg *rp, const u_char *bp) { const struct ip *ip = NULL; const struct ip6_hdr *ip6 = NULL; struct xid_map_entry *xmep; if (!ND_TTEST(rp->rm_call.cb_vers)) return (0); switch (IP_V((const struct ip *)bp)) { case 4: ip = (const struct ip *)bp; break; case 6: ip6 = (const struct ip6_hdr *)bp; break; default: return (1); } xmep = &xid_map[xid_map_next]; if (++xid_map_next >= XIDMAPSIZE) xid_map_next = 0; UNALIGNED_MEMCPY(&xmep->xid, &rp->rm_xid, sizeof(xmep->xid)); if (ip) { xmep->ipver = 4; UNALIGNED_MEMCPY(&xmep->client, &ip->ip_src, sizeof(ip->ip_src)); UNALIGNED_MEMCPY(&xmep->server, &ip->ip_dst, sizeof(ip->ip_dst)); } else if (ip6) { xmep->ipver = 6; UNALIGNED_MEMCPY(&xmep->client, &ip6->ip6_src, sizeof(ip6->ip6_src)); UNALIGNED_MEMCPY(&xmep->server, &ip6->ip6_dst, sizeof(ip6->ip6_dst)); } xmep->proc = EXTRACT_32BITS(&rp->rm_call.cb_proc); xmep->vers = EXTRACT_32BITS(&rp->rm_call.cb_vers); return (1); } /* * Returns 0 and puts NFSPROC_xxx in proc return and * version in vers return, or returns -1 on failure */ static int xid_map_find(const struct sunrpc_msg *rp, const u_char *bp, uint32_t *proc, uint32_t *vers) { int i; struct xid_map_entry *xmep; uint32_t xid; const struct ip *ip = (const struct ip *)bp; const struct ip6_hdr *ip6 = (const struct ip6_hdr *)bp; int cmp; UNALIGNED_MEMCPY(&xid, &rp->rm_xid, sizeof(xmep->xid)); /* Start searching from where we last left off */ i = xid_map_hint; do { xmep = &xid_map[i]; cmp = 1; if (xmep->ipver != IP_V(ip) || xmep->xid != xid) goto nextitem; switch (xmep->ipver) { case 4: if (UNALIGNED_MEMCMP(&ip->ip_src, &xmep->server, sizeof(ip->ip_src)) != 0 || UNALIGNED_MEMCMP(&ip->ip_dst, &xmep->client, sizeof(ip->ip_dst)) != 0) { cmp = 0; } break; case 6: if (UNALIGNED_MEMCMP(&ip6->ip6_src, &xmep->server, sizeof(ip6->ip6_src)) != 0 || UNALIGNED_MEMCMP(&ip6->ip6_dst, &xmep->client, sizeof(ip6->ip6_dst)) != 0) { cmp = 0; } break; default: cmp = 0; break; } if (cmp) { /* match */ xid_map_hint = i; *proc = xmep->proc; *vers = xmep->vers; return 0; } nextitem: if (++i >= XIDMAPSIZE) i = 0; } while (i != xid_map_hint); /* search failed */ return (-1); } /* * Routines for parsing reply packets */ /* * Return a pointer to the beginning of the actual results. * If the packet was truncated, return 0. */ static const uint32_t * parserep(netdissect_options *ndo, register const struct sunrpc_msg *rp, register u_int length) { register const uint32_t *dp; u_int len; enum sunrpc_accept_stat astat; /* * Portability note: * Here we find the address of the ar_verf credentials. * Originally, this calculation was * dp = (uint32_t *)&rp->rm_reply.rp_acpt.ar_verf * On the wire, the rp_acpt field starts immediately after * the (32 bit) rp_stat field. However, rp_acpt (which is a * "struct accepted_reply") contains a "struct opaque_auth", * whose internal representation contains a pointer, so on a * 64-bit machine the compiler inserts 32 bits of padding * before rp->rm_reply.rp_acpt.ar_verf. So, we cannot use * the internal representation to parse the on-the-wire * representation. Instead, we skip past the rp_stat field, * which is an "enum" and so occupies one 32-bit word. */ dp = ((const uint32_t *)&rp->rm_reply) + 1; ND_TCHECK(dp[1]); len = EXTRACT_32BITS(&dp[1]); if (len >= length) return (NULL); /* * skip past the ar_verf credentials. */ dp += (len + (2*sizeof(uint32_t) + 3)) / sizeof(uint32_t); /* * now we can check the ar_stat field */ ND_TCHECK(dp[0]); astat = (enum sunrpc_accept_stat) EXTRACT_32BITS(dp); if (astat != SUNRPC_SUCCESS) { ND_PRINT((ndo, " %s", tok2str(sunrpc_str, "ar_stat %d", astat))); nfserr = 1; /* suppress trunc string */ return (NULL); } /* successful return */ ND_TCHECK2(*dp, sizeof(astat)); return ((const uint32_t *) (sizeof(astat) + ((const char *)dp))); trunc: return (0); } static const uint32_t * parsestatus(netdissect_options *ndo, const uint32_t *dp, int *er) { int errnum; ND_TCHECK(dp[0]); errnum = EXTRACT_32BITS(&dp[0]); if (er) *er = errnum; if (errnum != 0) { if (!ndo->ndo_qflag) ND_PRINT((ndo, " ERROR: %s", tok2str(status2str, "unk %d", errnum))); nfserr = 1; } return (dp + 1); trunc: return NULL; } static const uint32_t * parsefattr(netdissect_options *ndo, const uint32_t *dp, int verbose, int v3) { const struct nfs_fattr *fap; fap = (const struct nfs_fattr *)dp; ND_TCHECK(fap->fa_gid); if (verbose) { ND_PRINT((ndo, " %s %o ids %d/%d", tok2str(type2str, "unk-ft %d ", EXTRACT_32BITS(&fap->fa_type)), EXTRACT_32BITS(&fap->fa_mode), EXTRACT_32BITS(&fap->fa_uid), EXTRACT_32BITS(&fap->fa_gid))); if (v3) { ND_TCHECK(fap->fa3_size); ND_PRINT((ndo, " sz %" PRIu64, EXTRACT_64BITS((const uint32_t *)&fap->fa3_size))); } else { ND_TCHECK(fap->fa2_size); ND_PRINT((ndo, " sz %d", EXTRACT_32BITS(&fap->fa2_size))); } } /* print lots more stuff */ if (verbose > 1) { if (v3) { ND_TCHECK(fap->fa3_ctime); ND_PRINT((ndo, " nlink %d rdev %d/%d", EXTRACT_32BITS(&fap->fa_nlink), EXTRACT_32BITS(&fap->fa3_rdev.specdata1), EXTRACT_32BITS(&fap->fa3_rdev.specdata2))); ND_PRINT((ndo, " fsid %" PRIx64, EXTRACT_64BITS((const uint32_t *)&fap->fa3_fsid))); ND_PRINT((ndo, " fileid %" PRIx64, EXTRACT_64BITS((const uint32_t *)&fap->fa3_fileid))); ND_PRINT((ndo, " a/m/ctime %u.%06u", EXTRACT_32BITS(&fap->fa3_atime.nfsv3_sec), EXTRACT_32BITS(&fap->fa3_atime.nfsv3_nsec))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa3_mtime.nfsv3_sec), EXTRACT_32BITS(&fap->fa3_mtime.nfsv3_nsec))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa3_ctime.nfsv3_sec), EXTRACT_32BITS(&fap->fa3_ctime.nfsv3_nsec))); } else { ND_TCHECK(fap->fa2_ctime); ND_PRINT((ndo, " nlink %d rdev 0x%x fsid 0x%x nodeid 0x%x a/m/ctime", EXTRACT_32BITS(&fap->fa_nlink), EXTRACT_32BITS(&fap->fa2_rdev), EXTRACT_32BITS(&fap->fa2_fsid), EXTRACT_32BITS(&fap->fa2_fileid))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa2_atime.nfsv2_sec), EXTRACT_32BITS(&fap->fa2_atime.nfsv2_usec))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa2_mtime.nfsv2_sec), EXTRACT_32BITS(&fap->fa2_mtime.nfsv2_usec))); ND_PRINT((ndo, " %u.%06u", EXTRACT_32BITS(&fap->fa2_ctime.nfsv2_sec), EXTRACT_32BITS(&fap->fa2_ctime.nfsv2_usec))); } } return ((const uint32_t *)((const unsigned char *)dp + (v3 ? NFSX_V3FATTR : NFSX_V2FATTR))); trunc: return (NULL); } static int parseattrstat(netdissect_options *ndo, const uint32_t *dp, int verbose, int v3) { int er; dp = parsestatus(ndo, dp, &er); if (dp == NULL) return (0); if (er) return (1); return (parsefattr(ndo, dp, verbose, v3) != NULL); } static int parsediropres(netdissect_options *ndo, const uint32_t *dp) { int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (er) return (1); dp = parsefh(ndo, dp, 0); if (dp == NULL) return (0); return (parsefattr(ndo, dp, ndo->ndo_vflag, 0) != NULL); } static int parselinkres(netdissect_options *ndo, const uint32_t *dp, int v3) { int er; dp = parsestatus(ndo, dp, &er); if (dp == NULL) return(0); if (er) return(1); if (v3 && !(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) return (0); ND_PRINT((ndo, " ")); return (parsefn(ndo, dp) != NULL); } static int parsestatfs(netdissect_options *ndo, const uint32_t *dp, int v3) { const struct nfs_statfs *sfsp; int er; dp = parsestatus(ndo, dp, &er); if (dp == NULL) return (0); if (!v3 && er) return (1); if (ndo->ndo_qflag) return(1); if (v3) { if (ndo->ndo_vflag) ND_PRINT((ndo, " POST:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) return (0); } ND_TCHECK2(*dp, (v3 ? NFSX_V3STATFS : NFSX_V2STATFS)); sfsp = (const struct nfs_statfs *)dp; if (v3) { ND_PRINT((ndo, " tbytes %" PRIu64 " fbytes %" PRIu64 " abytes %" PRIu64, EXTRACT_64BITS((const uint32_t *)&sfsp->sf_tbytes), EXTRACT_64BITS((const uint32_t *)&sfsp->sf_fbytes), EXTRACT_64BITS((const uint32_t *)&sfsp->sf_abytes))); if (ndo->ndo_vflag) { ND_PRINT((ndo, " tfiles %" PRIu64 " ffiles %" PRIu64 " afiles %" PRIu64 " invar %u", EXTRACT_64BITS((const uint32_t *)&sfsp->sf_tfiles), EXTRACT_64BITS((const uint32_t *)&sfsp->sf_ffiles), EXTRACT_64BITS((const uint32_t *)&sfsp->sf_afiles), EXTRACT_32BITS(&sfsp->sf_invarsec))); } } else { ND_PRINT((ndo, " tsize %d bsize %d blocks %d bfree %d bavail %d", EXTRACT_32BITS(&sfsp->sf_tsize), EXTRACT_32BITS(&sfsp->sf_bsize), EXTRACT_32BITS(&sfsp->sf_blocks), EXTRACT_32BITS(&sfsp->sf_bfree), EXTRACT_32BITS(&sfsp->sf_bavail))); } return (1); trunc: return (0); } static int parserddires(netdissect_options *ndo, const uint32_t *dp) { int er; dp = parsestatus(ndo, dp, &er); if (dp == NULL) return (0); if (er) return (1); if (ndo->ndo_qflag) return (1); ND_TCHECK(dp[2]); ND_PRINT((ndo, " offset 0x%x size %d ", EXTRACT_32BITS(&dp[0]), EXTRACT_32BITS(&dp[1]))); if (dp[2] != 0) ND_PRINT((ndo, " eof")); return (1); trunc: return (0); } static const uint32_t * parse_wcc_attr(netdissect_options *ndo, const uint32_t *dp) { /* Our caller has already checked this */ ND_PRINT((ndo, " sz %" PRIu64, EXTRACT_64BITS(&dp[0]))); ND_PRINT((ndo, " mtime %u.%06u ctime %u.%06u", EXTRACT_32BITS(&dp[2]), EXTRACT_32BITS(&dp[3]), EXTRACT_32BITS(&dp[4]), EXTRACT_32BITS(&dp[5]))); return (dp + 6); } /* * Pre operation attributes. Print only if vflag > 1. */ static const uint32_t * parse_pre_op_attr(netdissect_options *ndo, const uint32_t *dp, int verbose) { ND_TCHECK(dp[0]); if (!EXTRACT_32BITS(&dp[0])) return (dp + 1); dp++; ND_TCHECK2(*dp, 24); if (verbose > 1) { return parse_wcc_attr(ndo, dp); } else { /* If not verbose enough, just skip over wcc_attr */ return (dp + 6); } trunc: return (NULL); } /* * Post operation attributes are printed if vflag >= 1 */ static const uint32_t * parse_post_op_attr(netdissect_options *ndo, const uint32_t *dp, int verbose) { ND_TCHECK(dp[0]); if (!EXTRACT_32BITS(&dp[0])) return (dp + 1); dp++; if (verbose) { return parsefattr(ndo, dp, verbose, 1); } else return (dp + (NFSX_V3FATTR / sizeof (uint32_t))); trunc: return (NULL); } static const uint32_t * parse_wcc_data(netdissect_options *ndo, const uint32_t *dp, int verbose) { if (verbose > 1) ND_PRINT((ndo, " PRE:")); if (!(dp = parse_pre_op_attr(ndo, dp, verbose))) return (0); if (verbose) ND_PRINT((ndo, " POST:")); return parse_post_op_attr(ndo, dp, verbose); } static const uint32_t * parsecreateopres(netdissect_options *ndo, const uint32_t *dp, int verbose) { int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (er) dp = parse_wcc_data(ndo, dp, verbose); else { ND_TCHECK(dp[0]); if (!EXTRACT_32BITS(&dp[0])) return (dp + 1); dp++; if (!(dp = parsefh(ndo, dp, 1))) return (0); if (verbose) { if (!(dp = parse_post_op_attr(ndo, dp, verbose))) return (0); if (ndo->ndo_vflag > 1) { ND_PRINT((ndo, " dir attr:")); dp = parse_wcc_data(ndo, dp, verbose); } } } return (dp); trunc: return (NULL); } static int parsewccres(netdissect_options *ndo, const uint32_t *dp, int verbose) { int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); return parse_wcc_data(ndo, dp, verbose) != NULL; } static const uint32_t * parsev3rddirres(netdissect_options *ndo, const uint32_t *dp, int verbose) { int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (ndo->ndo_vflag) ND_PRINT((ndo, " POST:")); if (!(dp = parse_post_op_attr(ndo, dp, verbose))) return (0); if (er) return dp; if (ndo->ndo_vflag) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " verf %08x%08x", dp[0], dp[1])); dp += 2; } return dp; trunc: return (NULL); } static int parsefsinfo(netdissect_options *ndo, const uint32_t *dp) { const struct nfsv3_fsinfo *sfp; int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (ndo->ndo_vflag) ND_PRINT((ndo, " POST:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) return (0); if (er) return (1); sfp = (const struct nfsv3_fsinfo *)dp; ND_TCHECK(*sfp); ND_PRINT((ndo, " rtmax %u rtpref %u wtmax %u wtpref %u dtpref %u", EXTRACT_32BITS(&sfp->fs_rtmax), EXTRACT_32BITS(&sfp->fs_rtpref), EXTRACT_32BITS(&sfp->fs_wtmax), EXTRACT_32BITS(&sfp->fs_wtpref), EXTRACT_32BITS(&sfp->fs_dtpref))); if (ndo->ndo_vflag) { ND_PRINT((ndo, " rtmult %u wtmult %u maxfsz %" PRIu64, EXTRACT_32BITS(&sfp->fs_rtmult), EXTRACT_32BITS(&sfp->fs_wtmult), EXTRACT_64BITS((const uint32_t *)&sfp->fs_maxfilesize))); ND_PRINT((ndo, " delta %u.%06u ", EXTRACT_32BITS(&sfp->fs_timedelta.nfsv3_sec), EXTRACT_32BITS(&sfp->fs_timedelta.nfsv3_nsec))); } return (1); trunc: return (0); } static int parsepathconf(netdissect_options *ndo, const uint32_t *dp) { int er; const struct nfsv3_pathconf *spp; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (ndo->ndo_vflag) ND_PRINT((ndo, " POST:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) return (0); if (er) return (1); spp = (const struct nfsv3_pathconf *)dp; ND_TCHECK(*spp); ND_PRINT((ndo, " linkmax %u namemax %u %s %s %s %s", EXTRACT_32BITS(&spp->pc_linkmax), EXTRACT_32BITS(&spp->pc_namemax), EXTRACT_32BITS(&spp->pc_notrunc) ? "notrunc" : "", EXTRACT_32BITS(&spp->pc_chownrestricted) ? "chownres" : "", EXTRACT_32BITS(&spp->pc_caseinsensitive) ? "igncase" : "", EXTRACT_32BITS(&spp->pc_casepreserving) ? "keepcase" : "")); return (1); trunc: return (0); } static void interp_reply(netdissect_options *ndo, const struct sunrpc_msg *rp, uint32_t proc, uint32_t vers, int length) { register const uint32_t *dp; register int v3; int er; v3 = (vers == NFS_VER3); if (!v3 && proc < NFS_NPROCS) proc = nfsv3_procid[proc]; ND_PRINT((ndo, " %s", tok2str(nfsproc_str, "proc-%u", proc))); switch (proc) { case NFSPROC_GETATTR: dp = parserep(ndo, rp, length); if (dp != NULL && parseattrstat(ndo, dp, !ndo->ndo_qflag, v3) != 0) return; break; case NFSPROC_SETATTR: if (!(dp = parserep(ndo, rp, length))) return; if (v3) { if (parsewccres(ndo, dp, ndo->ndo_vflag)) return; } else { if (parseattrstat(ndo, dp, !ndo->ndo_qflag, 0) != 0) return; } break; case NFSPROC_LOOKUP: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (er) { if (ndo->ndo_vflag > 1) { ND_PRINT((ndo, " post dattr:")); dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag); } } else { if (!(dp = parsefh(ndo, dp, v3))) break; if ((dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag)) && ndo->ndo_vflag > 1) { ND_PRINT((ndo, " post dattr:")); dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag); } } if (dp) return; } else { if (parsediropres(ndo, dp) != 0) return; } break; case NFSPROC_ACCESS: if (!(dp = parserep(ndo, rp, length))) break; if (!(dp = parsestatus(ndo, dp, &er))) break; if (ndo->ndo_vflag) ND_PRINT((ndo, " attr:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) break; if (!er) { ND_TCHECK(dp[0]); ND_PRINT((ndo, " c %04x", EXTRACT_32BITS(&dp[0]))); } return; case NFSPROC_READLINK: dp = parserep(ndo, rp, length); if (dp != NULL && parselinkres(ndo, dp, v3) != 0) return; break; case NFSPROC_READ: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) break; if (er) return; if (ndo->ndo_vflag) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " %u bytes", EXTRACT_32BITS(&dp[0]))); if (EXTRACT_32BITS(&dp[1])) ND_PRINT((ndo, " EOF")); } return; } else { if (parseattrstat(ndo, dp, ndo->ndo_vflag, 0) != 0) return; } break; case NFSPROC_WRITE: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; if (er) return; if (ndo->ndo_vflag) { ND_TCHECK(dp[0]); ND_PRINT((ndo, " %u bytes", EXTRACT_32BITS(&dp[0]))); if (ndo->ndo_vflag > 1) { ND_TCHECK(dp[1]); ND_PRINT((ndo, " <%s>", tok2str(nfsv3_writemodes, NULL, EXTRACT_32BITS(&dp[1])))); } return; } } else { if (parseattrstat(ndo, dp, ndo->ndo_vflag, v3) != 0) return; } break; case NFSPROC_CREATE: case NFSPROC_MKDIR: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsecreateopres(ndo, dp, ndo->ndo_vflag) != NULL) return; } else { if (parsediropres(ndo, dp) != 0) return; } break; case NFSPROC_SYMLINK: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsecreateopres(ndo, dp, ndo->ndo_vflag) != NULL) return; } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_MKNOD: if (!(dp = parserep(ndo, rp, length))) break; if (parsecreateopres(ndo, dp, ndo->ndo_vflag) != NULL) return; break; case NFSPROC_REMOVE: case NFSPROC_RMDIR: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsewccres(ndo, dp, ndo->ndo_vflag)) return; } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_RENAME: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (ndo->ndo_vflag) { ND_PRINT((ndo, " from:")); if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; ND_PRINT((ndo, " to:")); if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; } return; } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_LINK: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (!(dp = parsestatus(ndo, dp, &er))) break; if (ndo->ndo_vflag) { ND_PRINT((ndo, " file POST:")); if (!(dp = parse_post_op_attr(ndo, dp, ndo->ndo_vflag))) break; ND_PRINT((ndo, " dir:")); if (!(dp = parse_wcc_data(ndo, dp, ndo->ndo_vflag))) break; return; } } else { if (parsestatus(ndo, dp, &er) != NULL) return; } break; case NFSPROC_READDIR: if (!(dp = parserep(ndo, rp, length))) break; if (v3) { if (parsev3rddirres(ndo, dp, ndo->ndo_vflag)) return; } else { if (parserddires(ndo, dp) != 0) return; } break; case NFSPROC_READDIRPLUS: if (!(dp = parserep(ndo, rp, length))) break; if (parsev3rddirres(ndo, dp, ndo->ndo_vflag)) return; break; case NFSPROC_FSSTAT: dp = parserep(ndo, rp, length); if (dp != NULL && parsestatfs(ndo, dp, v3) != 0) return; break; case NFSPROC_FSINFO: dp = parserep(ndo, rp, length); if (dp != NULL && parsefsinfo(ndo, dp) != 0) return; break; case NFSPROC_PATHCONF: dp = parserep(ndo, rp, length); if (dp != NULL && parsepathconf(ndo, dp) != 0) return; break; case NFSPROC_COMMIT: dp = parserep(ndo, rp, length); if (dp != NULL && parsewccres(ndo, dp, ndo->ndo_vflag) != 0) return; break; default: return; } trunc: if (!nfserr) ND_PRINT((ndo, "%s", tstr)); }
nfs_printfh(netdissect_options *ndo, register const uint32_t *dp, const u_int len) { my_fsid fsid; uint32_t ino; const char *sfsname = NULL; char *spacep; if (ndo->ndo_uflag) { u_int i; char const *sep = ""; ND_PRINT((ndo, " fh[")); for (i=0; i<len; i++) { ND_PRINT((ndo, "%s%x", sep, dp[i])); sep = ":"; } ND_PRINT((ndo, "]")); return; } Parse_fh((const u_char *)dp, len, &fsid, &ino, NULL, &sfsname, 0); if (sfsname) { /* file system ID is ASCII, not numeric, for this server OS */ static char temp[NFSX_V3FHMAX+1]; /* Make sure string is null-terminated */ strncpy(temp, sfsname, NFSX_V3FHMAX); temp[sizeof(temp) - 1] = '\0'; /* Remove trailing spaces */ spacep = strchr(temp, ' '); if (spacep) *spacep = '\0'; ND_PRINT((ndo, " fh %s/", temp)); } else { ND_PRINT((ndo, " fh %d,%d/", fsid.Fsid_dev.Major, fsid.Fsid_dev.Minor)); } if(fsid.Fsid_dev.Minor == 257) /* Print the undecoded handle */ ND_PRINT((ndo, "%s", fsid.Opaque_Handle)); else ND_PRINT((ndo, "%ld", (long) ino)); }
nfs_printfh(netdissect_options *ndo, register const uint32_t *dp, const u_int len) { my_fsid fsid; uint32_t ino; const char *sfsname = NULL; char *spacep; if (ndo->ndo_uflag) { u_int i; char const *sep = ""; ND_PRINT((ndo, " fh[")); for (i=0; i<len; i++) { ND_PRINT((ndo, "%s%x", sep, dp[i])); sep = ":"; } ND_PRINT((ndo, "]")); return; } Parse_fh((const u_char *)dp, len, &fsid, &ino, NULL, &sfsname, 0); if (sfsname) { /* file system ID is ASCII, not numeric, for this server OS */ char temp[NFSX_V3FHMAX+1]; u_int stringlen; /* Make sure string is null-terminated */ stringlen = len; if (stringlen > NFSX_V3FHMAX) stringlen = NFSX_V3FHMAX; strncpy(temp, sfsname, stringlen); temp[stringlen] = '\0'; /* Remove trailing spaces */ spacep = strchr(temp, ' '); if (spacep) *spacep = '\0'; ND_PRINT((ndo, " fh %s/", temp)); } else { ND_PRINT((ndo, " fh %d,%d/", fsid.Fsid_dev.Major, fsid.Fsid_dev.Minor)); } if(fsid.Fsid_dev.Minor == 257) /* Print the undecoded handle */ ND_PRINT((ndo, "%s", fsid.Opaque_Handle)); else ND_PRINT((ndo, "%ld", (long) ino)); }
{'added': [(810, '\t\tchar temp[NFSX_V3FHMAX+1];'), (811, '\t\tu_int stringlen;'), (814, '\t\tstringlen = len;'), (815, '\t\tif (stringlen > NFSX_V3FHMAX)'), (816, '\t\t\tstringlen = NFSX_V3FHMAX;'), (817, '\t\tstrncpy(temp, sfsname, stringlen);'), (818, "\t\ttemp[stringlen] = '\\0';")], 'deleted': [(810, '\t\tstatic char temp[NFSX_V3FHMAX+1];'), (813, '\t\tstrncpy(temp, sfsname, NFSX_V3FHMAX);'), (814, "\t\ttemp[sizeof(temp) - 1] = '\\0';")]}
7
3
1,404
9,297
https://github.com/the-tcpdump-group/tcpdump
CVE-2017-13001
['CWE-125']
pack-bitmap.c
show_object
#include "cache.h" #include "commit.h" #include "tag.h" #include "diff.h" #include "revision.h" #include "progress.h" #include "list-objects.h" #include "pack.h" #include "pack-bitmap.h" #include "pack-revindex.h" #include "pack-objects.h" /* * An entry on the bitmap index, representing the bitmap for a given * commit. */ struct stored_bitmap { unsigned char sha1[20]; struct ewah_bitmap *root; struct stored_bitmap *xor; int flags; }; /* * The currently active bitmap index. By design, repositories only have * a single bitmap index available (the index for the biggest packfile in * the repository), since bitmap indexes need full closure. * * If there is more than one bitmap index available (e.g. because of alternates), * the active bitmap index is the largest one. */ static struct bitmap_index { /* Packfile to which this bitmap index belongs to */ struct packed_git *pack; /* * Mark the first `reuse_objects` in the packfile as reused: * they will be sent as-is without using them for repacking * calculations */ uint32_t reuse_objects; /* mmapped buffer of the whole bitmap index */ unsigned char *map; size_t map_size; /* size of the mmaped buffer */ size_t map_pos; /* current position when loading the index */ /* * Type indexes. * * Each bitmap marks which objects in the packfile are of the given * type. This provides type information when yielding the objects from * the packfile during a walk, which allows for better delta bases. */ struct ewah_bitmap *commits; struct ewah_bitmap *trees; struct ewah_bitmap *blobs; struct ewah_bitmap *tags; /* Map from SHA1 -> `stored_bitmap` for all the bitmapped commits */ khash_sha1 *bitmaps; /* Number of bitmapped commits */ uint32_t entry_count; /* Name-hash cache (or NULL if not present). */ uint32_t *hashes; /* * Extended index. * * When trying to perform bitmap operations with objects that are not * packed in `pack`, these objects are added to this "fake index" and * are assumed to appear at the end of the packfile for all operations */ struct eindex { struct object **objects; uint32_t *hashes; uint32_t count, alloc; khash_sha1_pos *positions; } ext_index; /* Bitmap result of the last performed walk */ struct bitmap *result; /* Version of the bitmap index */ unsigned int version; unsigned loaded : 1; } bitmap_git; static struct ewah_bitmap *lookup_stored_bitmap(struct stored_bitmap *st) { struct ewah_bitmap *parent; struct ewah_bitmap *composed; if (st->xor == NULL) return st->root; composed = ewah_pool_new(); parent = lookup_stored_bitmap(st->xor); ewah_xor(st->root, parent, composed); ewah_pool_free(st->root); st->root = composed; st->xor = NULL; return composed; } /* * Read a bitmap from the current read position on the mmaped * index, and increase the read position accordingly */ static struct ewah_bitmap *read_bitmap_1(struct bitmap_index *index) { struct ewah_bitmap *b = ewah_pool_new(); int bitmap_size = ewah_read_mmap(b, index->map + index->map_pos, index->map_size - index->map_pos); if (bitmap_size < 0) { error("Failed to load bitmap index (corrupted?)"); ewah_pool_free(b); return NULL; } index->map_pos += bitmap_size; return b; } static int load_bitmap_header(struct bitmap_index *index) { struct bitmap_disk_header *header = (void *)index->map; if (index->map_size < sizeof(*header) + 20) return error("Corrupted bitmap index (missing header data)"); if (memcmp(header->magic, BITMAP_IDX_SIGNATURE, sizeof(BITMAP_IDX_SIGNATURE)) != 0) return error("Corrupted bitmap index file (wrong header)"); index->version = ntohs(header->version); if (index->version != 1) return error("Unsupported version for bitmap index file (%d)", index->version); /* Parse known bitmap format options */ { uint32_t flags = ntohs(header->options); if ((flags & BITMAP_OPT_FULL_DAG) == 0) return error("Unsupported options for bitmap index file " "(Git requires BITMAP_OPT_FULL_DAG)"); if (flags & BITMAP_OPT_HASH_CACHE) { unsigned char *end = index->map + index->map_size - 20; index->hashes = ((uint32_t *)end) - index->pack->num_objects; } } index->entry_count = ntohl(header->entry_count); index->map_pos += sizeof(*header); return 0; } static struct stored_bitmap *store_bitmap(struct bitmap_index *index, struct ewah_bitmap *root, const unsigned char *sha1, struct stored_bitmap *xor_with, int flags) { struct stored_bitmap *stored; khiter_t hash_pos; int ret; stored = xmalloc(sizeof(struct stored_bitmap)); stored->root = root; stored->xor = xor_with; stored->flags = flags; hashcpy(stored->sha1, sha1); hash_pos = kh_put_sha1(index->bitmaps, stored->sha1, &ret); /* a 0 return code means the insertion succeeded with no changes, * because the SHA1 already existed on the map. this is bad, there * shouldn't be duplicated commits in the index */ if (ret == 0) { error("Duplicate entry in bitmap index: %s", sha1_to_hex(sha1)); return NULL; } kh_value(index->bitmaps, hash_pos) = stored; return stored; } static inline uint32_t read_be32(const unsigned char *buffer, size_t *pos) { uint32_t result = get_be32(buffer + *pos); (*pos) += sizeof(result); return result; } static inline uint8_t read_u8(const unsigned char *buffer, size_t *pos) { return buffer[(*pos)++]; } #define MAX_XOR_OFFSET 160 static int load_bitmap_entries_v1(struct bitmap_index *index) { uint32_t i; struct stored_bitmap *recent_bitmaps[MAX_XOR_OFFSET] = { NULL }; for (i = 0; i < index->entry_count; ++i) { int xor_offset, flags; struct ewah_bitmap *bitmap = NULL; struct stored_bitmap *xor_bitmap = NULL; uint32_t commit_idx_pos; const unsigned char *sha1; commit_idx_pos = read_be32(index->map, &index->map_pos); xor_offset = read_u8(index->map, &index->map_pos); flags = read_u8(index->map, &index->map_pos); sha1 = nth_packed_object_sha1(index->pack, commit_idx_pos); bitmap = read_bitmap_1(index); if (!bitmap) return -1; if (xor_offset > MAX_XOR_OFFSET || xor_offset > i) return error("Corrupted bitmap pack index"); if (xor_offset > 0) { xor_bitmap = recent_bitmaps[(i - xor_offset) % MAX_XOR_OFFSET]; if (xor_bitmap == NULL) return error("Invalid XOR offset in bitmap pack index"); } recent_bitmaps[i % MAX_XOR_OFFSET] = store_bitmap( index, bitmap, sha1, xor_bitmap, flags); } return 0; } static char *pack_bitmap_filename(struct packed_git *p) { size_t len; if (!strip_suffix(p->pack_name, ".pack", &len)) die("BUG: pack_name does not end in .pack"); return xstrfmt("%.*s.bitmap", (int)len, p->pack_name); } static int open_pack_bitmap_1(struct packed_git *packfile) { int fd; struct stat st; char *idx_name; if (open_pack_index(packfile)) return -1; idx_name = pack_bitmap_filename(packfile); fd = git_open_noatime(idx_name); free(idx_name); if (fd < 0) return -1; if (fstat(fd, &st)) { close(fd); return -1; } if (bitmap_git.pack) { warning("ignoring extra bitmap file: %s", packfile->pack_name); close(fd); return -1; } bitmap_git.pack = packfile; bitmap_git.map_size = xsize_t(st.st_size); bitmap_git.map = xmmap(NULL, bitmap_git.map_size, PROT_READ, MAP_PRIVATE, fd, 0); bitmap_git.map_pos = 0; close(fd); if (load_bitmap_header(&bitmap_git) < 0) { munmap(bitmap_git.map, bitmap_git.map_size); bitmap_git.map = NULL; bitmap_git.map_size = 0; return -1; } return 0; } static int load_pack_bitmap(void) { assert(bitmap_git.map && !bitmap_git.loaded); bitmap_git.bitmaps = kh_init_sha1(); bitmap_git.ext_index.positions = kh_init_sha1_pos(); load_pack_revindex(bitmap_git.pack); if (!(bitmap_git.commits = read_bitmap_1(&bitmap_git)) || !(bitmap_git.trees = read_bitmap_1(&bitmap_git)) || !(bitmap_git.blobs = read_bitmap_1(&bitmap_git)) || !(bitmap_git.tags = read_bitmap_1(&bitmap_git))) goto failed; if (load_bitmap_entries_v1(&bitmap_git) < 0) goto failed; bitmap_git.loaded = 1; return 0; failed: munmap(bitmap_git.map, bitmap_git.map_size); bitmap_git.map = NULL; bitmap_git.map_size = 0; return -1; } static int open_pack_bitmap(void) { struct packed_git *p; int ret = -1; assert(!bitmap_git.map && !bitmap_git.loaded); prepare_packed_git(); for (p = packed_git; p; p = p->next) { if (open_pack_bitmap_1(p) == 0) ret = 0; } return ret; } int prepare_bitmap_git(void) { if (bitmap_git.loaded) return 0; if (!open_pack_bitmap()) return load_pack_bitmap(); return -1; } struct include_data { struct bitmap *base; struct bitmap *seen; }; static inline int bitmap_position_extended(const unsigned char *sha1) { khash_sha1_pos *positions = bitmap_git.ext_index.positions; khiter_t pos = kh_get_sha1_pos(positions, sha1); if (pos < kh_end(positions)) { int bitmap_pos = kh_value(positions, pos); return bitmap_pos + bitmap_git.pack->num_objects; } return -1; } static inline int bitmap_position_packfile(const unsigned char *sha1) { off_t offset = find_pack_entry_one(sha1, bitmap_git.pack); if (!offset) return -1; return find_revindex_position(bitmap_git.pack, offset); } static int bitmap_position(const unsigned char *sha1) { int pos = bitmap_position_packfile(sha1); return (pos >= 0) ? pos : bitmap_position_extended(sha1); } static int ext_index_add_object(struct object *object, const char *name) { struct eindex *eindex = &bitmap_git.ext_index; khiter_t hash_pos; int hash_ret; int bitmap_pos; hash_pos = kh_put_sha1_pos(eindex->positions, object->oid.hash, &hash_ret); if (hash_ret > 0) { if (eindex->count >= eindex->alloc) { eindex->alloc = (eindex->alloc + 16) * 3 / 2; REALLOC_ARRAY(eindex->objects, eindex->alloc); REALLOC_ARRAY(eindex->hashes, eindex->alloc); } bitmap_pos = eindex->count; eindex->objects[eindex->count] = object; eindex->hashes[eindex->count] = pack_name_hash(name); kh_value(eindex->positions, hash_pos) = bitmap_pos; eindex->count++; } else { bitmap_pos = kh_value(eindex->positions, hash_pos); } return bitmap_pos + bitmap_git.pack->num_objects; } static void show_object(struct object *object, struct strbuf *path, const char *last, void *data) { struct bitmap *base = data; int bitmap_pos; bitmap_pos = bitmap_position(object->oid.hash); if (bitmap_pos < 0) { char *name = path_name(path, last); bitmap_pos = ext_index_add_object(object, name); free(name); } bitmap_set(base, bitmap_pos); } static void show_commit(struct commit *commit, void *data) { } static int add_to_include_set(struct include_data *data, const unsigned char *sha1, int bitmap_pos) { khiter_t hash_pos; if (data->seen && bitmap_get(data->seen, bitmap_pos)) return 0; if (bitmap_get(data->base, bitmap_pos)) return 0; hash_pos = kh_get_sha1(bitmap_git.bitmaps, sha1); if (hash_pos < kh_end(bitmap_git.bitmaps)) { struct stored_bitmap *st = kh_value(bitmap_git.bitmaps, hash_pos); bitmap_or_ewah(data->base, lookup_stored_bitmap(st)); return 0; } bitmap_set(data->base, bitmap_pos); return 1; } static int should_include(struct commit *commit, void *_data) { struct include_data *data = _data; int bitmap_pos; bitmap_pos = bitmap_position(commit->object.oid.hash); if (bitmap_pos < 0) bitmap_pos = ext_index_add_object((struct object *)commit, NULL); if (!add_to_include_set(data, commit->object.oid.hash, bitmap_pos)) { struct commit_list *parent = commit->parents; while (parent) { parent->item->object.flags |= SEEN; parent = parent->next; } return 0; } return 1; } static struct bitmap *find_objects(struct rev_info *revs, struct object_list *roots, struct bitmap *seen) { struct bitmap *base = NULL; int needs_walk = 0; struct object_list *not_mapped = NULL; /* * Go through all the roots for the walk. The ones that have bitmaps * on the bitmap index will be `or`ed together to form an initial * global reachability analysis. * * The ones without bitmaps in the index will be stored in the * `not_mapped_list` for further processing. */ while (roots) { struct object *object = roots->item; roots = roots->next; if (object->type == OBJ_COMMIT) { khiter_t pos = kh_get_sha1(bitmap_git.bitmaps, object->oid.hash); if (pos < kh_end(bitmap_git.bitmaps)) { struct stored_bitmap *st = kh_value(bitmap_git.bitmaps, pos); struct ewah_bitmap *or_with = lookup_stored_bitmap(st); if (base == NULL) base = ewah_to_bitmap(or_with); else bitmap_or_ewah(base, or_with); object->flags |= SEEN; continue; } } object_list_insert(object, &not_mapped); } /* * Best case scenario: We found bitmaps for all the roots, * so the resulting `or` bitmap has the full reachability analysis */ if (not_mapped == NULL) return base; roots = not_mapped; /* * Let's iterate through all the roots that don't have bitmaps to * check if we can determine them to be reachable from the existing * global bitmap. * * If we cannot find them in the existing global bitmap, we'll need * to push them to an actual walk and run it until we can confirm * they are reachable */ while (roots) { struct object *object = roots->item; int pos; roots = roots->next; pos = bitmap_position(object->oid.hash); if (pos < 0 || base == NULL || !bitmap_get(base, pos)) { object->flags &= ~UNINTERESTING; add_pending_object(revs, object, ""); needs_walk = 1; } else { object->flags |= SEEN; } } if (needs_walk) { struct include_data incdata; if (base == NULL) base = bitmap_new(); incdata.base = base; incdata.seen = seen; revs->include_check = should_include; revs->include_check_data = &incdata; if (prepare_revision_walk(revs)) die("revision walk setup failed"); traverse_commit_list(revs, show_commit, show_object, base); } return base; } static void show_extended_objects(struct bitmap *objects, show_reachable_fn show_reach) { struct eindex *eindex = &bitmap_git.ext_index; uint32_t i; for (i = 0; i < eindex->count; ++i) { struct object *obj; if (!bitmap_get(objects, bitmap_git.pack->num_objects + i)) continue; obj = eindex->objects[i]; show_reach(obj->oid.hash, obj->type, 0, eindex->hashes[i], NULL, 0); } } static void show_objects_for_type( struct bitmap *objects, struct ewah_bitmap *type_filter, enum object_type object_type, show_reachable_fn show_reach) { size_t pos = 0, i = 0; uint32_t offset; struct ewah_iterator it; eword_t filter; if (bitmap_git.reuse_objects == bitmap_git.pack->num_objects) return; ewah_iterator_init(&it, type_filter); while (i < objects->word_alloc && ewah_iterator_next(&filter, &it)) { eword_t word = objects->words[i] & filter; for (offset = 0; offset < BITS_IN_EWORD; ++offset) { const unsigned char *sha1; struct revindex_entry *entry; uint32_t hash = 0; if ((word >> offset) == 0) break; offset += ewah_bit_ctz64(word >> offset); if (pos + offset < bitmap_git.reuse_objects) continue; entry = &bitmap_git.pack->revindex[pos + offset]; sha1 = nth_packed_object_sha1(bitmap_git.pack, entry->nr); if (bitmap_git.hashes) hash = ntohl(bitmap_git.hashes[entry->nr]); show_reach(sha1, object_type, 0, hash, bitmap_git.pack, entry->offset); } pos += BITS_IN_EWORD; i++; } } static int in_bitmapped_pack(struct object_list *roots) { while (roots) { struct object *object = roots->item; roots = roots->next; if (find_pack_entry_one(object->oid.hash, bitmap_git.pack) > 0) return 1; } return 0; } int prepare_bitmap_walk(struct rev_info *revs) { unsigned int i; unsigned int pending_nr = revs->pending.nr; struct object_array_entry *pending_e = revs->pending.objects; struct object_list *wants = NULL; struct object_list *haves = NULL; struct bitmap *wants_bitmap = NULL; struct bitmap *haves_bitmap = NULL; if (!bitmap_git.loaded) { /* try to open a bitmapped pack, but don't parse it yet * because we may not need to use it */ if (open_pack_bitmap() < 0) return -1; } for (i = 0; i < pending_nr; ++i) { struct object *object = pending_e[i].item; if (object->type == OBJ_NONE) parse_object_or_die(object->oid.hash, NULL); while (object->type == OBJ_TAG) { struct tag *tag = (struct tag *) object; if (object->flags & UNINTERESTING) object_list_insert(object, &haves); else object_list_insert(object, &wants); if (!tag->tagged) die("bad tag"); object = parse_object_or_die(tag->tagged->oid.hash, NULL); } if (object->flags & UNINTERESTING) object_list_insert(object, &haves); else object_list_insert(object, &wants); } /* * if we have a HAVES list, but none of those haves is contained * in the packfile that has a bitmap, we don't have anything to * optimize here */ if (haves && !in_bitmapped_pack(haves)) return -1; /* if we don't want anything, we're done here */ if (!wants) return -1; /* * now we're going to use bitmaps, so load the actual bitmap entries * from disk. this is the point of no return; after this the rev_list * becomes invalidated and we must perform the revwalk through bitmaps */ if (!bitmap_git.loaded && load_pack_bitmap() < 0) return -1; revs->pending.nr = 0; revs->pending.alloc = 0; revs->pending.objects = NULL; if (haves) { revs->ignore_missing_links = 1; haves_bitmap = find_objects(revs, haves, NULL); reset_revision_walk(); revs->ignore_missing_links = 0; if (haves_bitmap == NULL) die("BUG: failed to perform bitmap walk"); } wants_bitmap = find_objects(revs, wants, haves_bitmap); if (!wants_bitmap) die("BUG: failed to perform bitmap walk"); if (haves_bitmap) bitmap_and_not(wants_bitmap, haves_bitmap); bitmap_git.result = wants_bitmap; bitmap_free(haves_bitmap); return 0; } int reuse_partial_packfile_from_bitmap(struct packed_git **packfile, uint32_t *entries, off_t *up_to) { /* * Reuse the packfile content if we need more than * 90% of its objects */ static const double REUSE_PERCENT = 0.9; struct bitmap *result = bitmap_git.result; uint32_t reuse_threshold; uint32_t i, reuse_objects = 0; assert(result); for (i = 0; i < result->word_alloc; ++i) { if (result->words[i] != (eword_t)~0) { reuse_objects += ewah_bit_ctz64(~result->words[i]); break; } reuse_objects += BITS_IN_EWORD; } #ifdef GIT_BITMAP_DEBUG { const unsigned char *sha1; struct revindex_entry *entry; entry = &bitmap_git.reverse_index->revindex[reuse_objects]; sha1 = nth_packed_object_sha1(bitmap_git.pack, entry->nr); fprintf(stderr, "Failed to reuse at %d (%016llx)\n", reuse_objects, result->words[i]); fprintf(stderr, " %s\n", sha1_to_hex(sha1)); } #endif if (!reuse_objects) return -1; if (reuse_objects >= bitmap_git.pack->num_objects) { bitmap_git.reuse_objects = *entries = bitmap_git.pack->num_objects; *up_to = -1; /* reuse the full pack */ *packfile = bitmap_git.pack; return 0; } reuse_threshold = bitmap_popcount(bitmap_git.result) * REUSE_PERCENT; if (reuse_objects < reuse_threshold) return -1; bitmap_git.reuse_objects = *entries = reuse_objects; *up_to = bitmap_git.pack->revindex[reuse_objects].offset; *packfile = bitmap_git.pack; return 0; } void traverse_bitmap_commit_list(show_reachable_fn show_reachable) { assert(bitmap_git.result); show_objects_for_type(bitmap_git.result, bitmap_git.commits, OBJ_COMMIT, show_reachable); show_objects_for_type(bitmap_git.result, bitmap_git.trees, OBJ_TREE, show_reachable); show_objects_for_type(bitmap_git.result, bitmap_git.blobs, OBJ_BLOB, show_reachable); show_objects_for_type(bitmap_git.result, bitmap_git.tags, OBJ_TAG, show_reachable); show_extended_objects(bitmap_git.result, show_reachable); bitmap_free(bitmap_git.result); bitmap_git.result = NULL; } static uint32_t count_object_type(struct bitmap *objects, enum object_type type) { struct eindex *eindex = &bitmap_git.ext_index; uint32_t i = 0, count = 0; struct ewah_iterator it; eword_t filter; switch (type) { case OBJ_COMMIT: ewah_iterator_init(&it, bitmap_git.commits); break; case OBJ_TREE: ewah_iterator_init(&it, bitmap_git.trees); break; case OBJ_BLOB: ewah_iterator_init(&it, bitmap_git.blobs); break; case OBJ_TAG: ewah_iterator_init(&it, bitmap_git.tags); break; default: return 0; } while (i < objects->word_alloc && ewah_iterator_next(&filter, &it)) { eword_t word = objects->words[i++] & filter; count += ewah_bit_popcount64(word); } for (i = 0; i < eindex->count; ++i) { if (eindex->objects[i]->type == type && bitmap_get(objects, bitmap_git.pack->num_objects + i)) count++; } return count; } void count_bitmap_commit_list(uint32_t *commits, uint32_t *trees, uint32_t *blobs, uint32_t *tags) { assert(bitmap_git.result); if (commits) *commits = count_object_type(bitmap_git.result, OBJ_COMMIT); if (trees) *trees = count_object_type(bitmap_git.result, OBJ_TREE); if (blobs) *blobs = count_object_type(bitmap_git.result, OBJ_BLOB); if (tags) *tags = count_object_type(bitmap_git.result, OBJ_TAG); } struct bitmap_test_data { struct bitmap *base; struct progress *prg; size_t seen; }; static void test_show_object(struct object *object, struct strbuf *path, const char *last, void *data) { struct bitmap_test_data *tdata = data; int bitmap_pos; bitmap_pos = bitmap_position(object->oid.hash); if (bitmap_pos < 0) die("Object not in bitmap: %s\n", oid_to_hex(&object->oid)); bitmap_set(tdata->base, bitmap_pos); display_progress(tdata->prg, ++tdata->seen); } static void test_show_commit(struct commit *commit, void *data) { struct bitmap_test_data *tdata = data; int bitmap_pos; bitmap_pos = bitmap_position(commit->object.oid.hash); if (bitmap_pos < 0) die("Object not in bitmap: %s\n", oid_to_hex(&commit->object.oid)); bitmap_set(tdata->base, bitmap_pos); display_progress(tdata->prg, ++tdata->seen); } void test_bitmap_walk(struct rev_info *revs) { struct object *root; struct bitmap *result = NULL; khiter_t pos; size_t result_popcnt; struct bitmap_test_data tdata; if (prepare_bitmap_git()) die("failed to load bitmap indexes"); if (revs->pending.nr != 1) die("you must specify exactly one commit to test"); fprintf(stderr, "Bitmap v%d test (%d entries loaded)\n", bitmap_git.version, bitmap_git.entry_count); root = revs->pending.objects[0].item; pos = kh_get_sha1(bitmap_git.bitmaps, root->oid.hash); if (pos < kh_end(bitmap_git.bitmaps)) { struct stored_bitmap *st = kh_value(bitmap_git.bitmaps, pos); struct ewah_bitmap *bm = lookup_stored_bitmap(st); fprintf(stderr, "Found bitmap for %s. %d bits / %08x checksum\n", oid_to_hex(&root->oid), (int)bm->bit_size, ewah_checksum(bm)); result = ewah_to_bitmap(bm); } if (result == NULL) die("Commit %s doesn't have an indexed bitmap", oid_to_hex(&root->oid)); revs->tag_objects = 1; revs->tree_objects = 1; revs->blob_objects = 1; result_popcnt = bitmap_popcount(result); if (prepare_revision_walk(revs)) die("revision walk setup failed"); tdata.base = bitmap_new(); tdata.prg = start_progress("Verifying bitmap entries", result_popcnt); tdata.seen = 0; traverse_commit_list(revs, &test_show_commit, &test_show_object, &tdata); stop_progress(&tdata.prg); if (bitmap_equals(result, tdata.base)) fprintf(stderr, "OK!\n"); else fprintf(stderr, "Mismatch!\n"); bitmap_free(result); } static int rebuild_bitmap(uint32_t *reposition, struct ewah_bitmap *source, struct bitmap *dest) { uint32_t pos = 0; struct ewah_iterator it; eword_t word; ewah_iterator_init(&it, source); while (ewah_iterator_next(&word, &it)) { uint32_t offset, bit_pos; for (offset = 0; offset < BITS_IN_EWORD; ++offset) { if ((word >> offset) == 0) break; offset += ewah_bit_ctz64(word >> offset); bit_pos = reposition[pos + offset]; if (bit_pos > 0) bitmap_set(dest, bit_pos - 1); else /* can't reuse, we don't have the object */ return -1; } pos += BITS_IN_EWORD; } return 0; } int rebuild_existing_bitmaps(struct packing_data *mapping, khash_sha1 *reused_bitmaps, int show_progress) { uint32_t i, num_objects; uint32_t *reposition; struct bitmap *rebuild; struct stored_bitmap *stored; struct progress *progress = NULL; khiter_t hash_pos; int hash_ret; if (prepare_bitmap_git() < 0) return -1; num_objects = bitmap_git.pack->num_objects; reposition = xcalloc(num_objects, sizeof(uint32_t)); for (i = 0; i < num_objects; ++i) { const unsigned char *sha1; struct revindex_entry *entry; struct object_entry *oe; entry = &bitmap_git.pack->revindex[i]; sha1 = nth_packed_object_sha1(bitmap_git.pack, entry->nr); oe = packlist_find(mapping, sha1, NULL); if (oe) reposition[i] = oe->in_pack_pos + 1; } rebuild = bitmap_new(); i = 0; if (show_progress) progress = start_progress("Reusing bitmaps", 0); kh_foreach_value(bitmap_git.bitmaps, stored, { if (stored->flags & BITMAP_FLAG_REUSE) { if (!rebuild_bitmap(reposition, lookup_stored_bitmap(stored), rebuild)) { hash_pos = kh_put_sha1(reused_bitmaps, stored->sha1, &hash_ret); kh_value(reused_bitmaps, hash_pos) = bitmap_to_ewah(rebuild); } bitmap_reset(rebuild); display_progress(progress, ++i); } }); stop_progress(&progress); free(reposition); bitmap_free(rebuild); return 0; }
#include "cache.h" #include "commit.h" #include "tag.h" #include "diff.h" #include "revision.h" #include "progress.h" #include "list-objects.h" #include "pack.h" #include "pack-bitmap.h" #include "pack-revindex.h" #include "pack-objects.h" /* * An entry on the bitmap index, representing the bitmap for a given * commit. */ struct stored_bitmap { unsigned char sha1[20]; struct ewah_bitmap *root; struct stored_bitmap *xor; int flags; }; /* * The currently active bitmap index. By design, repositories only have * a single bitmap index available (the index for the biggest packfile in * the repository), since bitmap indexes need full closure. * * If there is more than one bitmap index available (e.g. because of alternates), * the active bitmap index is the largest one. */ static struct bitmap_index { /* Packfile to which this bitmap index belongs to */ struct packed_git *pack; /* * Mark the first `reuse_objects` in the packfile as reused: * they will be sent as-is without using them for repacking * calculations */ uint32_t reuse_objects; /* mmapped buffer of the whole bitmap index */ unsigned char *map; size_t map_size; /* size of the mmaped buffer */ size_t map_pos; /* current position when loading the index */ /* * Type indexes. * * Each bitmap marks which objects in the packfile are of the given * type. This provides type information when yielding the objects from * the packfile during a walk, which allows for better delta bases. */ struct ewah_bitmap *commits; struct ewah_bitmap *trees; struct ewah_bitmap *blobs; struct ewah_bitmap *tags; /* Map from SHA1 -> `stored_bitmap` for all the bitmapped commits */ khash_sha1 *bitmaps; /* Number of bitmapped commits */ uint32_t entry_count; /* Name-hash cache (or NULL if not present). */ uint32_t *hashes; /* * Extended index. * * When trying to perform bitmap operations with objects that are not * packed in `pack`, these objects are added to this "fake index" and * are assumed to appear at the end of the packfile for all operations */ struct eindex { struct object **objects; uint32_t *hashes; uint32_t count, alloc; khash_sha1_pos *positions; } ext_index; /* Bitmap result of the last performed walk */ struct bitmap *result; /* Version of the bitmap index */ unsigned int version; unsigned loaded : 1; } bitmap_git; static struct ewah_bitmap *lookup_stored_bitmap(struct stored_bitmap *st) { struct ewah_bitmap *parent; struct ewah_bitmap *composed; if (st->xor == NULL) return st->root; composed = ewah_pool_new(); parent = lookup_stored_bitmap(st->xor); ewah_xor(st->root, parent, composed); ewah_pool_free(st->root); st->root = composed; st->xor = NULL; return composed; } /* * Read a bitmap from the current read position on the mmaped * index, and increase the read position accordingly */ static struct ewah_bitmap *read_bitmap_1(struct bitmap_index *index) { struct ewah_bitmap *b = ewah_pool_new(); int bitmap_size = ewah_read_mmap(b, index->map + index->map_pos, index->map_size - index->map_pos); if (bitmap_size < 0) { error("Failed to load bitmap index (corrupted?)"); ewah_pool_free(b); return NULL; } index->map_pos += bitmap_size; return b; } static int load_bitmap_header(struct bitmap_index *index) { struct bitmap_disk_header *header = (void *)index->map; if (index->map_size < sizeof(*header) + 20) return error("Corrupted bitmap index (missing header data)"); if (memcmp(header->magic, BITMAP_IDX_SIGNATURE, sizeof(BITMAP_IDX_SIGNATURE)) != 0) return error("Corrupted bitmap index file (wrong header)"); index->version = ntohs(header->version); if (index->version != 1) return error("Unsupported version for bitmap index file (%d)", index->version); /* Parse known bitmap format options */ { uint32_t flags = ntohs(header->options); if ((flags & BITMAP_OPT_FULL_DAG) == 0) return error("Unsupported options for bitmap index file " "(Git requires BITMAP_OPT_FULL_DAG)"); if (flags & BITMAP_OPT_HASH_CACHE) { unsigned char *end = index->map + index->map_size - 20; index->hashes = ((uint32_t *)end) - index->pack->num_objects; } } index->entry_count = ntohl(header->entry_count); index->map_pos += sizeof(*header); return 0; } static struct stored_bitmap *store_bitmap(struct bitmap_index *index, struct ewah_bitmap *root, const unsigned char *sha1, struct stored_bitmap *xor_with, int flags) { struct stored_bitmap *stored; khiter_t hash_pos; int ret; stored = xmalloc(sizeof(struct stored_bitmap)); stored->root = root; stored->xor = xor_with; stored->flags = flags; hashcpy(stored->sha1, sha1); hash_pos = kh_put_sha1(index->bitmaps, stored->sha1, &ret); /* a 0 return code means the insertion succeeded with no changes, * because the SHA1 already existed on the map. this is bad, there * shouldn't be duplicated commits in the index */ if (ret == 0) { error("Duplicate entry in bitmap index: %s", sha1_to_hex(sha1)); return NULL; } kh_value(index->bitmaps, hash_pos) = stored; return stored; } static inline uint32_t read_be32(const unsigned char *buffer, size_t *pos) { uint32_t result = get_be32(buffer + *pos); (*pos) += sizeof(result); return result; } static inline uint8_t read_u8(const unsigned char *buffer, size_t *pos) { return buffer[(*pos)++]; } #define MAX_XOR_OFFSET 160 static int load_bitmap_entries_v1(struct bitmap_index *index) { uint32_t i; struct stored_bitmap *recent_bitmaps[MAX_XOR_OFFSET] = { NULL }; for (i = 0; i < index->entry_count; ++i) { int xor_offset, flags; struct ewah_bitmap *bitmap = NULL; struct stored_bitmap *xor_bitmap = NULL; uint32_t commit_idx_pos; const unsigned char *sha1; commit_idx_pos = read_be32(index->map, &index->map_pos); xor_offset = read_u8(index->map, &index->map_pos); flags = read_u8(index->map, &index->map_pos); sha1 = nth_packed_object_sha1(index->pack, commit_idx_pos); bitmap = read_bitmap_1(index); if (!bitmap) return -1; if (xor_offset > MAX_XOR_OFFSET || xor_offset > i) return error("Corrupted bitmap pack index"); if (xor_offset > 0) { xor_bitmap = recent_bitmaps[(i - xor_offset) % MAX_XOR_OFFSET]; if (xor_bitmap == NULL) return error("Invalid XOR offset in bitmap pack index"); } recent_bitmaps[i % MAX_XOR_OFFSET] = store_bitmap( index, bitmap, sha1, xor_bitmap, flags); } return 0; } static char *pack_bitmap_filename(struct packed_git *p) { size_t len; if (!strip_suffix(p->pack_name, ".pack", &len)) die("BUG: pack_name does not end in .pack"); return xstrfmt("%.*s.bitmap", (int)len, p->pack_name); } static int open_pack_bitmap_1(struct packed_git *packfile) { int fd; struct stat st; char *idx_name; if (open_pack_index(packfile)) return -1; idx_name = pack_bitmap_filename(packfile); fd = git_open_noatime(idx_name); free(idx_name); if (fd < 0) return -1; if (fstat(fd, &st)) { close(fd); return -1; } if (bitmap_git.pack) { warning("ignoring extra bitmap file: %s", packfile->pack_name); close(fd); return -1; } bitmap_git.pack = packfile; bitmap_git.map_size = xsize_t(st.st_size); bitmap_git.map = xmmap(NULL, bitmap_git.map_size, PROT_READ, MAP_PRIVATE, fd, 0); bitmap_git.map_pos = 0; close(fd); if (load_bitmap_header(&bitmap_git) < 0) { munmap(bitmap_git.map, bitmap_git.map_size); bitmap_git.map = NULL; bitmap_git.map_size = 0; return -1; } return 0; } static int load_pack_bitmap(void) { assert(bitmap_git.map && !bitmap_git.loaded); bitmap_git.bitmaps = kh_init_sha1(); bitmap_git.ext_index.positions = kh_init_sha1_pos(); load_pack_revindex(bitmap_git.pack); if (!(bitmap_git.commits = read_bitmap_1(&bitmap_git)) || !(bitmap_git.trees = read_bitmap_1(&bitmap_git)) || !(bitmap_git.blobs = read_bitmap_1(&bitmap_git)) || !(bitmap_git.tags = read_bitmap_1(&bitmap_git))) goto failed; if (load_bitmap_entries_v1(&bitmap_git) < 0) goto failed; bitmap_git.loaded = 1; return 0; failed: munmap(bitmap_git.map, bitmap_git.map_size); bitmap_git.map = NULL; bitmap_git.map_size = 0; return -1; } static int open_pack_bitmap(void) { struct packed_git *p; int ret = -1; assert(!bitmap_git.map && !bitmap_git.loaded); prepare_packed_git(); for (p = packed_git; p; p = p->next) { if (open_pack_bitmap_1(p) == 0) ret = 0; } return ret; } int prepare_bitmap_git(void) { if (bitmap_git.loaded) return 0; if (!open_pack_bitmap()) return load_pack_bitmap(); return -1; } struct include_data { struct bitmap *base; struct bitmap *seen; }; static inline int bitmap_position_extended(const unsigned char *sha1) { khash_sha1_pos *positions = bitmap_git.ext_index.positions; khiter_t pos = kh_get_sha1_pos(positions, sha1); if (pos < kh_end(positions)) { int bitmap_pos = kh_value(positions, pos); return bitmap_pos + bitmap_git.pack->num_objects; } return -1; } static inline int bitmap_position_packfile(const unsigned char *sha1) { off_t offset = find_pack_entry_one(sha1, bitmap_git.pack); if (!offset) return -1; return find_revindex_position(bitmap_git.pack, offset); } static int bitmap_position(const unsigned char *sha1) { int pos = bitmap_position_packfile(sha1); return (pos >= 0) ? pos : bitmap_position_extended(sha1); } static int ext_index_add_object(struct object *object, const char *name) { struct eindex *eindex = &bitmap_git.ext_index; khiter_t hash_pos; int hash_ret; int bitmap_pos; hash_pos = kh_put_sha1_pos(eindex->positions, object->oid.hash, &hash_ret); if (hash_ret > 0) { if (eindex->count >= eindex->alloc) { eindex->alloc = (eindex->alloc + 16) * 3 / 2; REALLOC_ARRAY(eindex->objects, eindex->alloc); REALLOC_ARRAY(eindex->hashes, eindex->alloc); } bitmap_pos = eindex->count; eindex->objects[eindex->count] = object; eindex->hashes[eindex->count] = pack_name_hash(name); kh_value(eindex->positions, hash_pos) = bitmap_pos; eindex->count++; } else { bitmap_pos = kh_value(eindex->positions, hash_pos); } return bitmap_pos + bitmap_git.pack->num_objects; } static void show_object(struct object *object, const char *name, void *data) { struct bitmap *base = data; int bitmap_pos; bitmap_pos = bitmap_position(object->oid.hash); if (bitmap_pos < 0) bitmap_pos = ext_index_add_object(object, name); bitmap_set(base, bitmap_pos); } static void show_commit(struct commit *commit, void *data) { } static int add_to_include_set(struct include_data *data, const unsigned char *sha1, int bitmap_pos) { khiter_t hash_pos; if (data->seen && bitmap_get(data->seen, bitmap_pos)) return 0; if (bitmap_get(data->base, bitmap_pos)) return 0; hash_pos = kh_get_sha1(bitmap_git.bitmaps, sha1); if (hash_pos < kh_end(bitmap_git.bitmaps)) { struct stored_bitmap *st = kh_value(bitmap_git.bitmaps, hash_pos); bitmap_or_ewah(data->base, lookup_stored_bitmap(st)); return 0; } bitmap_set(data->base, bitmap_pos); return 1; } static int should_include(struct commit *commit, void *_data) { struct include_data *data = _data; int bitmap_pos; bitmap_pos = bitmap_position(commit->object.oid.hash); if (bitmap_pos < 0) bitmap_pos = ext_index_add_object((struct object *)commit, NULL); if (!add_to_include_set(data, commit->object.oid.hash, bitmap_pos)) { struct commit_list *parent = commit->parents; while (parent) { parent->item->object.flags |= SEEN; parent = parent->next; } return 0; } return 1; } static struct bitmap *find_objects(struct rev_info *revs, struct object_list *roots, struct bitmap *seen) { struct bitmap *base = NULL; int needs_walk = 0; struct object_list *not_mapped = NULL; /* * Go through all the roots for the walk. The ones that have bitmaps * on the bitmap index will be `or`ed together to form an initial * global reachability analysis. * * The ones without bitmaps in the index will be stored in the * `not_mapped_list` for further processing. */ while (roots) { struct object *object = roots->item; roots = roots->next; if (object->type == OBJ_COMMIT) { khiter_t pos = kh_get_sha1(bitmap_git.bitmaps, object->oid.hash); if (pos < kh_end(bitmap_git.bitmaps)) { struct stored_bitmap *st = kh_value(bitmap_git.bitmaps, pos); struct ewah_bitmap *or_with = lookup_stored_bitmap(st); if (base == NULL) base = ewah_to_bitmap(or_with); else bitmap_or_ewah(base, or_with); object->flags |= SEEN; continue; } } object_list_insert(object, &not_mapped); } /* * Best case scenario: We found bitmaps for all the roots, * so the resulting `or` bitmap has the full reachability analysis */ if (not_mapped == NULL) return base; roots = not_mapped; /* * Let's iterate through all the roots that don't have bitmaps to * check if we can determine them to be reachable from the existing * global bitmap. * * If we cannot find them in the existing global bitmap, we'll need * to push them to an actual walk and run it until we can confirm * they are reachable */ while (roots) { struct object *object = roots->item; int pos; roots = roots->next; pos = bitmap_position(object->oid.hash); if (pos < 0 || base == NULL || !bitmap_get(base, pos)) { object->flags &= ~UNINTERESTING; add_pending_object(revs, object, ""); needs_walk = 1; } else { object->flags |= SEEN; } } if (needs_walk) { struct include_data incdata; if (base == NULL) base = bitmap_new(); incdata.base = base; incdata.seen = seen; revs->include_check = should_include; revs->include_check_data = &incdata; if (prepare_revision_walk(revs)) die("revision walk setup failed"); traverse_commit_list(revs, show_commit, show_object, base); } return base; } static void show_extended_objects(struct bitmap *objects, show_reachable_fn show_reach) { struct eindex *eindex = &bitmap_git.ext_index; uint32_t i; for (i = 0; i < eindex->count; ++i) { struct object *obj; if (!bitmap_get(objects, bitmap_git.pack->num_objects + i)) continue; obj = eindex->objects[i]; show_reach(obj->oid.hash, obj->type, 0, eindex->hashes[i], NULL, 0); } } static void show_objects_for_type( struct bitmap *objects, struct ewah_bitmap *type_filter, enum object_type object_type, show_reachable_fn show_reach) { size_t pos = 0, i = 0; uint32_t offset; struct ewah_iterator it; eword_t filter; if (bitmap_git.reuse_objects == bitmap_git.pack->num_objects) return; ewah_iterator_init(&it, type_filter); while (i < objects->word_alloc && ewah_iterator_next(&filter, &it)) { eword_t word = objects->words[i] & filter; for (offset = 0; offset < BITS_IN_EWORD; ++offset) { const unsigned char *sha1; struct revindex_entry *entry; uint32_t hash = 0; if ((word >> offset) == 0) break; offset += ewah_bit_ctz64(word >> offset); if (pos + offset < bitmap_git.reuse_objects) continue; entry = &bitmap_git.pack->revindex[pos + offset]; sha1 = nth_packed_object_sha1(bitmap_git.pack, entry->nr); if (bitmap_git.hashes) hash = ntohl(bitmap_git.hashes[entry->nr]); show_reach(sha1, object_type, 0, hash, bitmap_git.pack, entry->offset); } pos += BITS_IN_EWORD; i++; } } static int in_bitmapped_pack(struct object_list *roots) { while (roots) { struct object *object = roots->item; roots = roots->next; if (find_pack_entry_one(object->oid.hash, bitmap_git.pack) > 0) return 1; } return 0; } int prepare_bitmap_walk(struct rev_info *revs) { unsigned int i; unsigned int pending_nr = revs->pending.nr; struct object_array_entry *pending_e = revs->pending.objects; struct object_list *wants = NULL; struct object_list *haves = NULL; struct bitmap *wants_bitmap = NULL; struct bitmap *haves_bitmap = NULL; if (!bitmap_git.loaded) { /* try to open a bitmapped pack, but don't parse it yet * because we may not need to use it */ if (open_pack_bitmap() < 0) return -1; } for (i = 0; i < pending_nr; ++i) { struct object *object = pending_e[i].item; if (object->type == OBJ_NONE) parse_object_or_die(object->oid.hash, NULL); while (object->type == OBJ_TAG) { struct tag *tag = (struct tag *) object; if (object->flags & UNINTERESTING) object_list_insert(object, &haves); else object_list_insert(object, &wants); if (!tag->tagged) die("bad tag"); object = parse_object_or_die(tag->tagged->oid.hash, NULL); } if (object->flags & UNINTERESTING) object_list_insert(object, &haves); else object_list_insert(object, &wants); } /* * if we have a HAVES list, but none of those haves is contained * in the packfile that has a bitmap, we don't have anything to * optimize here */ if (haves && !in_bitmapped_pack(haves)) return -1; /* if we don't want anything, we're done here */ if (!wants) return -1; /* * now we're going to use bitmaps, so load the actual bitmap entries * from disk. this is the point of no return; after this the rev_list * becomes invalidated and we must perform the revwalk through bitmaps */ if (!bitmap_git.loaded && load_pack_bitmap() < 0) return -1; revs->pending.nr = 0; revs->pending.alloc = 0; revs->pending.objects = NULL; if (haves) { revs->ignore_missing_links = 1; haves_bitmap = find_objects(revs, haves, NULL); reset_revision_walk(); revs->ignore_missing_links = 0; if (haves_bitmap == NULL) die("BUG: failed to perform bitmap walk"); } wants_bitmap = find_objects(revs, wants, haves_bitmap); if (!wants_bitmap) die("BUG: failed to perform bitmap walk"); if (haves_bitmap) bitmap_and_not(wants_bitmap, haves_bitmap); bitmap_git.result = wants_bitmap; bitmap_free(haves_bitmap); return 0; } int reuse_partial_packfile_from_bitmap(struct packed_git **packfile, uint32_t *entries, off_t *up_to) { /* * Reuse the packfile content if we need more than * 90% of its objects */ static const double REUSE_PERCENT = 0.9; struct bitmap *result = bitmap_git.result; uint32_t reuse_threshold; uint32_t i, reuse_objects = 0; assert(result); for (i = 0; i < result->word_alloc; ++i) { if (result->words[i] != (eword_t)~0) { reuse_objects += ewah_bit_ctz64(~result->words[i]); break; } reuse_objects += BITS_IN_EWORD; } #ifdef GIT_BITMAP_DEBUG { const unsigned char *sha1; struct revindex_entry *entry; entry = &bitmap_git.reverse_index->revindex[reuse_objects]; sha1 = nth_packed_object_sha1(bitmap_git.pack, entry->nr); fprintf(stderr, "Failed to reuse at %d (%016llx)\n", reuse_objects, result->words[i]); fprintf(stderr, " %s\n", sha1_to_hex(sha1)); } #endif if (!reuse_objects) return -1; if (reuse_objects >= bitmap_git.pack->num_objects) { bitmap_git.reuse_objects = *entries = bitmap_git.pack->num_objects; *up_to = -1; /* reuse the full pack */ *packfile = bitmap_git.pack; return 0; } reuse_threshold = bitmap_popcount(bitmap_git.result) * REUSE_PERCENT; if (reuse_objects < reuse_threshold) return -1; bitmap_git.reuse_objects = *entries = reuse_objects; *up_to = bitmap_git.pack->revindex[reuse_objects].offset; *packfile = bitmap_git.pack; return 0; } void traverse_bitmap_commit_list(show_reachable_fn show_reachable) { assert(bitmap_git.result); show_objects_for_type(bitmap_git.result, bitmap_git.commits, OBJ_COMMIT, show_reachable); show_objects_for_type(bitmap_git.result, bitmap_git.trees, OBJ_TREE, show_reachable); show_objects_for_type(bitmap_git.result, bitmap_git.blobs, OBJ_BLOB, show_reachable); show_objects_for_type(bitmap_git.result, bitmap_git.tags, OBJ_TAG, show_reachable); show_extended_objects(bitmap_git.result, show_reachable); bitmap_free(bitmap_git.result); bitmap_git.result = NULL; } static uint32_t count_object_type(struct bitmap *objects, enum object_type type) { struct eindex *eindex = &bitmap_git.ext_index; uint32_t i = 0, count = 0; struct ewah_iterator it; eword_t filter; switch (type) { case OBJ_COMMIT: ewah_iterator_init(&it, bitmap_git.commits); break; case OBJ_TREE: ewah_iterator_init(&it, bitmap_git.trees); break; case OBJ_BLOB: ewah_iterator_init(&it, bitmap_git.blobs); break; case OBJ_TAG: ewah_iterator_init(&it, bitmap_git.tags); break; default: return 0; } while (i < objects->word_alloc && ewah_iterator_next(&filter, &it)) { eword_t word = objects->words[i++] & filter; count += ewah_bit_popcount64(word); } for (i = 0; i < eindex->count; ++i) { if (eindex->objects[i]->type == type && bitmap_get(objects, bitmap_git.pack->num_objects + i)) count++; } return count; } void count_bitmap_commit_list(uint32_t *commits, uint32_t *trees, uint32_t *blobs, uint32_t *tags) { assert(bitmap_git.result); if (commits) *commits = count_object_type(bitmap_git.result, OBJ_COMMIT); if (trees) *trees = count_object_type(bitmap_git.result, OBJ_TREE); if (blobs) *blobs = count_object_type(bitmap_git.result, OBJ_BLOB); if (tags) *tags = count_object_type(bitmap_git.result, OBJ_TAG); } struct bitmap_test_data { struct bitmap *base; struct progress *prg; size_t seen; }; static void test_show_object(struct object *object, const char *name, void *data) { struct bitmap_test_data *tdata = data; int bitmap_pos; bitmap_pos = bitmap_position(object->oid.hash); if (bitmap_pos < 0) die("Object not in bitmap: %s\n", oid_to_hex(&object->oid)); bitmap_set(tdata->base, bitmap_pos); display_progress(tdata->prg, ++tdata->seen); } static void test_show_commit(struct commit *commit, void *data) { struct bitmap_test_data *tdata = data; int bitmap_pos; bitmap_pos = bitmap_position(commit->object.oid.hash); if (bitmap_pos < 0) die("Object not in bitmap: %s\n", oid_to_hex(&commit->object.oid)); bitmap_set(tdata->base, bitmap_pos); display_progress(tdata->prg, ++tdata->seen); } void test_bitmap_walk(struct rev_info *revs) { struct object *root; struct bitmap *result = NULL; khiter_t pos; size_t result_popcnt; struct bitmap_test_data tdata; if (prepare_bitmap_git()) die("failed to load bitmap indexes"); if (revs->pending.nr != 1) die("you must specify exactly one commit to test"); fprintf(stderr, "Bitmap v%d test (%d entries loaded)\n", bitmap_git.version, bitmap_git.entry_count); root = revs->pending.objects[0].item; pos = kh_get_sha1(bitmap_git.bitmaps, root->oid.hash); if (pos < kh_end(bitmap_git.bitmaps)) { struct stored_bitmap *st = kh_value(bitmap_git.bitmaps, pos); struct ewah_bitmap *bm = lookup_stored_bitmap(st); fprintf(stderr, "Found bitmap for %s. %d bits / %08x checksum\n", oid_to_hex(&root->oid), (int)bm->bit_size, ewah_checksum(bm)); result = ewah_to_bitmap(bm); } if (result == NULL) die("Commit %s doesn't have an indexed bitmap", oid_to_hex(&root->oid)); revs->tag_objects = 1; revs->tree_objects = 1; revs->blob_objects = 1; result_popcnt = bitmap_popcount(result); if (prepare_revision_walk(revs)) die("revision walk setup failed"); tdata.base = bitmap_new(); tdata.prg = start_progress("Verifying bitmap entries", result_popcnt); tdata.seen = 0; traverse_commit_list(revs, &test_show_commit, &test_show_object, &tdata); stop_progress(&tdata.prg); if (bitmap_equals(result, tdata.base)) fprintf(stderr, "OK!\n"); else fprintf(stderr, "Mismatch!\n"); bitmap_free(result); } static int rebuild_bitmap(uint32_t *reposition, struct ewah_bitmap *source, struct bitmap *dest) { uint32_t pos = 0; struct ewah_iterator it; eword_t word; ewah_iterator_init(&it, source); while (ewah_iterator_next(&word, &it)) { uint32_t offset, bit_pos; for (offset = 0; offset < BITS_IN_EWORD; ++offset) { if ((word >> offset) == 0) break; offset += ewah_bit_ctz64(word >> offset); bit_pos = reposition[pos + offset]; if (bit_pos > 0) bitmap_set(dest, bit_pos - 1); else /* can't reuse, we don't have the object */ return -1; } pos += BITS_IN_EWORD; } return 0; } int rebuild_existing_bitmaps(struct packing_data *mapping, khash_sha1 *reused_bitmaps, int show_progress) { uint32_t i, num_objects; uint32_t *reposition; struct bitmap *rebuild; struct stored_bitmap *stored; struct progress *progress = NULL; khiter_t hash_pos; int hash_ret; if (prepare_bitmap_git() < 0) return -1; num_objects = bitmap_git.pack->num_objects; reposition = xcalloc(num_objects, sizeof(uint32_t)); for (i = 0; i < num_objects; ++i) { const unsigned char *sha1; struct revindex_entry *entry; struct object_entry *oe; entry = &bitmap_git.pack->revindex[i]; sha1 = nth_packed_object_sha1(bitmap_git.pack, entry->nr); oe = packlist_find(mapping, sha1, NULL); if (oe) reposition[i] = oe->in_pack_pos + 1; } rebuild = bitmap_new(); i = 0; if (show_progress) progress = start_progress("Reusing bitmaps", 0); kh_foreach_value(bitmap_git.bitmaps, stored, { if (stored->flags & BITMAP_FLAG_REUSE) { if (!rebuild_bitmap(reposition, lookup_stored_bitmap(stored), rebuild)) { hash_pos = kh_put_sha1(reused_bitmaps, stored->sha1, &hash_ret); kh_value(reused_bitmaps, hash_pos) = bitmap_to_ewah(rebuild); } bitmap_reset(rebuild); display_progress(progress, ++i); } }); stop_progress(&progress); free(reposition); bitmap_free(rebuild); return 0; }
static void show_object(struct object *object, struct strbuf *path, const char *last, void *data) { struct bitmap *base = data; int bitmap_pos; bitmap_pos = bitmap_position(object->oid.hash); if (bitmap_pos < 0) { char *name = path_name(path, last); bitmap_pos = ext_index_add_object(object, name); free(name); } bitmap_set(base, bitmap_pos); }
static void show_object(struct object *object, const char *name, void *data) { struct bitmap *base = data; int bitmap_pos; bitmap_pos = bitmap_position(object->oid.hash); if (bitmap_pos < 0) bitmap_pos = ext_index_add_object(object, name); bitmap_set(base, bitmap_pos); }
{'added': [(417, 'static void show_object(struct object *object, const char *name, void *data)'), (424, '\tif (bitmap_pos < 0)'), (893, 'static void test_show_object(struct object *object, const char *name,'), (894, '\t\t\t void *data)')], 'deleted': [(417, 'static void show_object(struct object *object, struct strbuf *path,'), (418, '\t\t\tconst char *last, void *data)'), (425, '\tif (bitmap_pos < 0) {'), (426, '\t\tchar *name = path_name(path, last);'), (428, '\t\tfree(name);'), (429, '\t}'), (897, 'static void test_show_object(struct object *object,'), (898, '\t\t\t struct strbuf *path,'), (899, '\t\t\t const char *last, void *data)')]}
4
9
744
4,729
https://github.com/git/git
CVE-2016-2315
['CWE-119']
pack-bitmap.c
test_show_object
#include "cache.h" #include "commit.h" #include "tag.h" #include "diff.h" #include "revision.h" #include "progress.h" #include "list-objects.h" #include "pack.h" #include "pack-bitmap.h" #include "pack-revindex.h" #include "pack-objects.h" /* * An entry on the bitmap index, representing the bitmap for a given * commit. */ struct stored_bitmap { unsigned char sha1[20]; struct ewah_bitmap *root; struct stored_bitmap *xor; int flags; }; /* * The currently active bitmap index. By design, repositories only have * a single bitmap index available (the index for the biggest packfile in * the repository), since bitmap indexes need full closure. * * If there is more than one bitmap index available (e.g. because of alternates), * the active bitmap index is the largest one. */ static struct bitmap_index { /* Packfile to which this bitmap index belongs to */ struct packed_git *pack; /* * Mark the first `reuse_objects` in the packfile as reused: * they will be sent as-is without using them for repacking * calculations */ uint32_t reuse_objects; /* mmapped buffer of the whole bitmap index */ unsigned char *map; size_t map_size; /* size of the mmaped buffer */ size_t map_pos; /* current position when loading the index */ /* * Type indexes. * * Each bitmap marks which objects in the packfile are of the given * type. This provides type information when yielding the objects from * the packfile during a walk, which allows for better delta bases. */ struct ewah_bitmap *commits; struct ewah_bitmap *trees; struct ewah_bitmap *blobs; struct ewah_bitmap *tags; /* Map from SHA1 -> `stored_bitmap` for all the bitmapped commits */ khash_sha1 *bitmaps; /* Number of bitmapped commits */ uint32_t entry_count; /* Name-hash cache (or NULL if not present). */ uint32_t *hashes; /* * Extended index. * * When trying to perform bitmap operations with objects that are not * packed in `pack`, these objects are added to this "fake index" and * are assumed to appear at the end of the packfile for all operations */ struct eindex { struct object **objects; uint32_t *hashes; uint32_t count, alloc; khash_sha1_pos *positions; } ext_index; /* Bitmap result of the last performed walk */ struct bitmap *result; /* Version of the bitmap index */ unsigned int version; unsigned loaded : 1; } bitmap_git; static struct ewah_bitmap *lookup_stored_bitmap(struct stored_bitmap *st) { struct ewah_bitmap *parent; struct ewah_bitmap *composed; if (st->xor == NULL) return st->root; composed = ewah_pool_new(); parent = lookup_stored_bitmap(st->xor); ewah_xor(st->root, parent, composed); ewah_pool_free(st->root); st->root = composed; st->xor = NULL; return composed; } /* * Read a bitmap from the current read position on the mmaped * index, and increase the read position accordingly */ static struct ewah_bitmap *read_bitmap_1(struct bitmap_index *index) { struct ewah_bitmap *b = ewah_pool_new(); int bitmap_size = ewah_read_mmap(b, index->map + index->map_pos, index->map_size - index->map_pos); if (bitmap_size < 0) { error("Failed to load bitmap index (corrupted?)"); ewah_pool_free(b); return NULL; } index->map_pos += bitmap_size; return b; } static int load_bitmap_header(struct bitmap_index *index) { struct bitmap_disk_header *header = (void *)index->map; if (index->map_size < sizeof(*header) + 20) return error("Corrupted bitmap index (missing header data)"); if (memcmp(header->magic, BITMAP_IDX_SIGNATURE, sizeof(BITMAP_IDX_SIGNATURE)) != 0) return error("Corrupted bitmap index file (wrong header)"); index->version = ntohs(header->version); if (index->version != 1) return error("Unsupported version for bitmap index file (%d)", index->version); /* Parse known bitmap format options */ { uint32_t flags = ntohs(header->options); if ((flags & BITMAP_OPT_FULL_DAG) == 0) return error("Unsupported options for bitmap index file " "(Git requires BITMAP_OPT_FULL_DAG)"); if (flags & BITMAP_OPT_HASH_CACHE) { unsigned char *end = index->map + index->map_size - 20; index->hashes = ((uint32_t *)end) - index->pack->num_objects; } } index->entry_count = ntohl(header->entry_count); index->map_pos += sizeof(*header); return 0; } static struct stored_bitmap *store_bitmap(struct bitmap_index *index, struct ewah_bitmap *root, const unsigned char *sha1, struct stored_bitmap *xor_with, int flags) { struct stored_bitmap *stored; khiter_t hash_pos; int ret; stored = xmalloc(sizeof(struct stored_bitmap)); stored->root = root; stored->xor = xor_with; stored->flags = flags; hashcpy(stored->sha1, sha1); hash_pos = kh_put_sha1(index->bitmaps, stored->sha1, &ret); /* a 0 return code means the insertion succeeded with no changes, * because the SHA1 already existed on the map. this is bad, there * shouldn't be duplicated commits in the index */ if (ret == 0) { error("Duplicate entry in bitmap index: %s", sha1_to_hex(sha1)); return NULL; } kh_value(index->bitmaps, hash_pos) = stored; return stored; } static inline uint32_t read_be32(const unsigned char *buffer, size_t *pos) { uint32_t result = get_be32(buffer + *pos); (*pos) += sizeof(result); return result; } static inline uint8_t read_u8(const unsigned char *buffer, size_t *pos) { return buffer[(*pos)++]; } #define MAX_XOR_OFFSET 160 static int load_bitmap_entries_v1(struct bitmap_index *index) { uint32_t i; struct stored_bitmap *recent_bitmaps[MAX_XOR_OFFSET] = { NULL }; for (i = 0; i < index->entry_count; ++i) { int xor_offset, flags; struct ewah_bitmap *bitmap = NULL; struct stored_bitmap *xor_bitmap = NULL; uint32_t commit_idx_pos; const unsigned char *sha1; commit_idx_pos = read_be32(index->map, &index->map_pos); xor_offset = read_u8(index->map, &index->map_pos); flags = read_u8(index->map, &index->map_pos); sha1 = nth_packed_object_sha1(index->pack, commit_idx_pos); bitmap = read_bitmap_1(index); if (!bitmap) return -1; if (xor_offset > MAX_XOR_OFFSET || xor_offset > i) return error("Corrupted bitmap pack index"); if (xor_offset > 0) { xor_bitmap = recent_bitmaps[(i - xor_offset) % MAX_XOR_OFFSET]; if (xor_bitmap == NULL) return error("Invalid XOR offset in bitmap pack index"); } recent_bitmaps[i % MAX_XOR_OFFSET] = store_bitmap( index, bitmap, sha1, xor_bitmap, flags); } return 0; } static char *pack_bitmap_filename(struct packed_git *p) { size_t len; if (!strip_suffix(p->pack_name, ".pack", &len)) die("BUG: pack_name does not end in .pack"); return xstrfmt("%.*s.bitmap", (int)len, p->pack_name); } static int open_pack_bitmap_1(struct packed_git *packfile) { int fd; struct stat st; char *idx_name; if (open_pack_index(packfile)) return -1; idx_name = pack_bitmap_filename(packfile); fd = git_open_noatime(idx_name); free(idx_name); if (fd < 0) return -1; if (fstat(fd, &st)) { close(fd); return -1; } if (bitmap_git.pack) { warning("ignoring extra bitmap file: %s", packfile->pack_name); close(fd); return -1; } bitmap_git.pack = packfile; bitmap_git.map_size = xsize_t(st.st_size); bitmap_git.map = xmmap(NULL, bitmap_git.map_size, PROT_READ, MAP_PRIVATE, fd, 0); bitmap_git.map_pos = 0; close(fd); if (load_bitmap_header(&bitmap_git) < 0) { munmap(bitmap_git.map, bitmap_git.map_size); bitmap_git.map = NULL; bitmap_git.map_size = 0; return -1; } return 0; } static int load_pack_bitmap(void) { assert(bitmap_git.map && !bitmap_git.loaded); bitmap_git.bitmaps = kh_init_sha1(); bitmap_git.ext_index.positions = kh_init_sha1_pos(); load_pack_revindex(bitmap_git.pack); if (!(bitmap_git.commits = read_bitmap_1(&bitmap_git)) || !(bitmap_git.trees = read_bitmap_1(&bitmap_git)) || !(bitmap_git.blobs = read_bitmap_1(&bitmap_git)) || !(bitmap_git.tags = read_bitmap_1(&bitmap_git))) goto failed; if (load_bitmap_entries_v1(&bitmap_git) < 0) goto failed; bitmap_git.loaded = 1; return 0; failed: munmap(bitmap_git.map, bitmap_git.map_size); bitmap_git.map = NULL; bitmap_git.map_size = 0; return -1; } static int open_pack_bitmap(void) { struct packed_git *p; int ret = -1; assert(!bitmap_git.map && !bitmap_git.loaded); prepare_packed_git(); for (p = packed_git; p; p = p->next) { if (open_pack_bitmap_1(p) == 0) ret = 0; } return ret; } int prepare_bitmap_git(void) { if (bitmap_git.loaded) return 0; if (!open_pack_bitmap()) return load_pack_bitmap(); return -1; } struct include_data { struct bitmap *base; struct bitmap *seen; }; static inline int bitmap_position_extended(const unsigned char *sha1) { khash_sha1_pos *positions = bitmap_git.ext_index.positions; khiter_t pos = kh_get_sha1_pos(positions, sha1); if (pos < kh_end(positions)) { int bitmap_pos = kh_value(positions, pos); return bitmap_pos + bitmap_git.pack->num_objects; } return -1; } static inline int bitmap_position_packfile(const unsigned char *sha1) { off_t offset = find_pack_entry_one(sha1, bitmap_git.pack); if (!offset) return -1; return find_revindex_position(bitmap_git.pack, offset); } static int bitmap_position(const unsigned char *sha1) { int pos = bitmap_position_packfile(sha1); return (pos >= 0) ? pos : bitmap_position_extended(sha1); } static int ext_index_add_object(struct object *object, const char *name) { struct eindex *eindex = &bitmap_git.ext_index; khiter_t hash_pos; int hash_ret; int bitmap_pos; hash_pos = kh_put_sha1_pos(eindex->positions, object->oid.hash, &hash_ret); if (hash_ret > 0) { if (eindex->count >= eindex->alloc) { eindex->alloc = (eindex->alloc + 16) * 3 / 2; REALLOC_ARRAY(eindex->objects, eindex->alloc); REALLOC_ARRAY(eindex->hashes, eindex->alloc); } bitmap_pos = eindex->count; eindex->objects[eindex->count] = object; eindex->hashes[eindex->count] = pack_name_hash(name); kh_value(eindex->positions, hash_pos) = bitmap_pos; eindex->count++; } else { bitmap_pos = kh_value(eindex->positions, hash_pos); } return bitmap_pos + bitmap_git.pack->num_objects; } static void show_object(struct object *object, struct strbuf *path, const char *last, void *data) { struct bitmap *base = data; int bitmap_pos; bitmap_pos = bitmap_position(object->oid.hash); if (bitmap_pos < 0) { char *name = path_name(path, last); bitmap_pos = ext_index_add_object(object, name); free(name); } bitmap_set(base, bitmap_pos); } static void show_commit(struct commit *commit, void *data) { } static int add_to_include_set(struct include_data *data, const unsigned char *sha1, int bitmap_pos) { khiter_t hash_pos; if (data->seen && bitmap_get(data->seen, bitmap_pos)) return 0; if (bitmap_get(data->base, bitmap_pos)) return 0; hash_pos = kh_get_sha1(bitmap_git.bitmaps, sha1); if (hash_pos < kh_end(bitmap_git.bitmaps)) { struct stored_bitmap *st = kh_value(bitmap_git.bitmaps, hash_pos); bitmap_or_ewah(data->base, lookup_stored_bitmap(st)); return 0; } bitmap_set(data->base, bitmap_pos); return 1; } static int should_include(struct commit *commit, void *_data) { struct include_data *data = _data; int bitmap_pos; bitmap_pos = bitmap_position(commit->object.oid.hash); if (bitmap_pos < 0) bitmap_pos = ext_index_add_object((struct object *)commit, NULL); if (!add_to_include_set(data, commit->object.oid.hash, bitmap_pos)) { struct commit_list *parent = commit->parents; while (parent) { parent->item->object.flags |= SEEN; parent = parent->next; } return 0; } return 1; } static struct bitmap *find_objects(struct rev_info *revs, struct object_list *roots, struct bitmap *seen) { struct bitmap *base = NULL; int needs_walk = 0; struct object_list *not_mapped = NULL; /* * Go through all the roots for the walk. The ones that have bitmaps * on the bitmap index will be `or`ed together to form an initial * global reachability analysis. * * The ones without bitmaps in the index will be stored in the * `not_mapped_list` for further processing. */ while (roots) { struct object *object = roots->item; roots = roots->next; if (object->type == OBJ_COMMIT) { khiter_t pos = kh_get_sha1(bitmap_git.bitmaps, object->oid.hash); if (pos < kh_end(bitmap_git.bitmaps)) { struct stored_bitmap *st = kh_value(bitmap_git.bitmaps, pos); struct ewah_bitmap *or_with = lookup_stored_bitmap(st); if (base == NULL) base = ewah_to_bitmap(or_with); else bitmap_or_ewah(base, or_with); object->flags |= SEEN; continue; } } object_list_insert(object, &not_mapped); } /* * Best case scenario: We found bitmaps for all the roots, * so the resulting `or` bitmap has the full reachability analysis */ if (not_mapped == NULL) return base; roots = not_mapped; /* * Let's iterate through all the roots that don't have bitmaps to * check if we can determine them to be reachable from the existing * global bitmap. * * If we cannot find them in the existing global bitmap, we'll need * to push them to an actual walk and run it until we can confirm * they are reachable */ while (roots) { struct object *object = roots->item; int pos; roots = roots->next; pos = bitmap_position(object->oid.hash); if (pos < 0 || base == NULL || !bitmap_get(base, pos)) { object->flags &= ~UNINTERESTING; add_pending_object(revs, object, ""); needs_walk = 1; } else { object->flags |= SEEN; } } if (needs_walk) { struct include_data incdata; if (base == NULL) base = bitmap_new(); incdata.base = base; incdata.seen = seen; revs->include_check = should_include; revs->include_check_data = &incdata; if (prepare_revision_walk(revs)) die("revision walk setup failed"); traverse_commit_list(revs, show_commit, show_object, base); } return base; } static void show_extended_objects(struct bitmap *objects, show_reachable_fn show_reach) { struct eindex *eindex = &bitmap_git.ext_index; uint32_t i; for (i = 0; i < eindex->count; ++i) { struct object *obj; if (!bitmap_get(objects, bitmap_git.pack->num_objects + i)) continue; obj = eindex->objects[i]; show_reach(obj->oid.hash, obj->type, 0, eindex->hashes[i], NULL, 0); } } static void show_objects_for_type( struct bitmap *objects, struct ewah_bitmap *type_filter, enum object_type object_type, show_reachable_fn show_reach) { size_t pos = 0, i = 0; uint32_t offset; struct ewah_iterator it; eword_t filter; if (bitmap_git.reuse_objects == bitmap_git.pack->num_objects) return; ewah_iterator_init(&it, type_filter); while (i < objects->word_alloc && ewah_iterator_next(&filter, &it)) { eword_t word = objects->words[i] & filter; for (offset = 0; offset < BITS_IN_EWORD; ++offset) { const unsigned char *sha1; struct revindex_entry *entry; uint32_t hash = 0; if ((word >> offset) == 0) break; offset += ewah_bit_ctz64(word >> offset); if (pos + offset < bitmap_git.reuse_objects) continue; entry = &bitmap_git.pack->revindex[pos + offset]; sha1 = nth_packed_object_sha1(bitmap_git.pack, entry->nr); if (bitmap_git.hashes) hash = ntohl(bitmap_git.hashes[entry->nr]); show_reach(sha1, object_type, 0, hash, bitmap_git.pack, entry->offset); } pos += BITS_IN_EWORD; i++; } } static int in_bitmapped_pack(struct object_list *roots) { while (roots) { struct object *object = roots->item; roots = roots->next; if (find_pack_entry_one(object->oid.hash, bitmap_git.pack) > 0) return 1; } return 0; } int prepare_bitmap_walk(struct rev_info *revs) { unsigned int i; unsigned int pending_nr = revs->pending.nr; struct object_array_entry *pending_e = revs->pending.objects; struct object_list *wants = NULL; struct object_list *haves = NULL; struct bitmap *wants_bitmap = NULL; struct bitmap *haves_bitmap = NULL; if (!bitmap_git.loaded) { /* try to open a bitmapped pack, but don't parse it yet * because we may not need to use it */ if (open_pack_bitmap() < 0) return -1; } for (i = 0; i < pending_nr; ++i) { struct object *object = pending_e[i].item; if (object->type == OBJ_NONE) parse_object_or_die(object->oid.hash, NULL); while (object->type == OBJ_TAG) { struct tag *tag = (struct tag *) object; if (object->flags & UNINTERESTING) object_list_insert(object, &haves); else object_list_insert(object, &wants); if (!tag->tagged) die("bad tag"); object = parse_object_or_die(tag->tagged->oid.hash, NULL); } if (object->flags & UNINTERESTING) object_list_insert(object, &haves); else object_list_insert(object, &wants); } /* * if we have a HAVES list, but none of those haves is contained * in the packfile that has a bitmap, we don't have anything to * optimize here */ if (haves && !in_bitmapped_pack(haves)) return -1; /* if we don't want anything, we're done here */ if (!wants) return -1; /* * now we're going to use bitmaps, so load the actual bitmap entries * from disk. this is the point of no return; after this the rev_list * becomes invalidated and we must perform the revwalk through bitmaps */ if (!bitmap_git.loaded && load_pack_bitmap() < 0) return -1; revs->pending.nr = 0; revs->pending.alloc = 0; revs->pending.objects = NULL; if (haves) { revs->ignore_missing_links = 1; haves_bitmap = find_objects(revs, haves, NULL); reset_revision_walk(); revs->ignore_missing_links = 0; if (haves_bitmap == NULL) die("BUG: failed to perform bitmap walk"); } wants_bitmap = find_objects(revs, wants, haves_bitmap); if (!wants_bitmap) die("BUG: failed to perform bitmap walk"); if (haves_bitmap) bitmap_and_not(wants_bitmap, haves_bitmap); bitmap_git.result = wants_bitmap; bitmap_free(haves_bitmap); return 0; } int reuse_partial_packfile_from_bitmap(struct packed_git **packfile, uint32_t *entries, off_t *up_to) { /* * Reuse the packfile content if we need more than * 90% of its objects */ static const double REUSE_PERCENT = 0.9; struct bitmap *result = bitmap_git.result; uint32_t reuse_threshold; uint32_t i, reuse_objects = 0; assert(result); for (i = 0; i < result->word_alloc; ++i) { if (result->words[i] != (eword_t)~0) { reuse_objects += ewah_bit_ctz64(~result->words[i]); break; } reuse_objects += BITS_IN_EWORD; } #ifdef GIT_BITMAP_DEBUG { const unsigned char *sha1; struct revindex_entry *entry; entry = &bitmap_git.reverse_index->revindex[reuse_objects]; sha1 = nth_packed_object_sha1(bitmap_git.pack, entry->nr); fprintf(stderr, "Failed to reuse at %d (%016llx)\n", reuse_objects, result->words[i]); fprintf(stderr, " %s\n", sha1_to_hex(sha1)); } #endif if (!reuse_objects) return -1; if (reuse_objects >= bitmap_git.pack->num_objects) { bitmap_git.reuse_objects = *entries = bitmap_git.pack->num_objects; *up_to = -1; /* reuse the full pack */ *packfile = bitmap_git.pack; return 0; } reuse_threshold = bitmap_popcount(bitmap_git.result) * REUSE_PERCENT; if (reuse_objects < reuse_threshold) return -1; bitmap_git.reuse_objects = *entries = reuse_objects; *up_to = bitmap_git.pack->revindex[reuse_objects].offset; *packfile = bitmap_git.pack; return 0; } void traverse_bitmap_commit_list(show_reachable_fn show_reachable) { assert(bitmap_git.result); show_objects_for_type(bitmap_git.result, bitmap_git.commits, OBJ_COMMIT, show_reachable); show_objects_for_type(bitmap_git.result, bitmap_git.trees, OBJ_TREE, show_reachable); show_objects_for_type(bitmap_git.result, bitmap_git.blobs, OBJ_BLOB, show_reachable); show_objects_for_type(bitmap_git.result, bitmap_git.tags, OBJ_TAG, show_reachable); show_extended_objects(bitmap_git.result, show_reachable); bitmap_free(bitmap_git.result); bitmap_git.result = NULL; } static uint32_t count_object_type(struct bitmap *objects, enum object_type type) { struct eindex *eindex = &bitmap_git.ext_index; uint32_t i = 0, count = 0; struct ewah_iterator it; eword_t filter; switch (type) { case OBJ_COMMIT: ewah_iterator_init(&it, bitmap_git.commits); break; case OBJ_TREE: ewah_iterator_init(&it, bitmap_git.trees); break; case OBJ_BLOB: ewah_iterator_init(&it, bitmap_git.blobs); break; case OBJ_TAG: ewah_iterator_init(&it, bitmap_git.tags); break; default: return 0; } while (i < objects->word_alloc && ewah_iterator_next(&filter, &it)) { eword_t word = objects->words[i++] & filter; count += ewah_bit_popcount64(word); } for (i = 0; i < eindex->count; ++i) { if (eindex->objects[i]->type == type && bitmap_get(objects, bitmap_git.pack->num_objects + i)) count++; } return count; } void count_bitmap_commit_list(uint32_t *commits, uint32_t *trees, uint32_t *blobs, uint32_t *tags) { assert(bitmap_git.result); if (commits) *commits = count_object_type(bitmap_git.result, OBJ_COMMIT); if (trees) *trees = count_object_type(bitmap_git.result, OBJ_TREE); if (blobs) *blobs = count_object_type(bitmap_git.result, OBJ_BLOB); if (tags) *tags = count_object_type(bitmap_git.result, OBJ_TAG); } struct bitmap_test_data { struct bitmap *base; struct progress *prg; size_t seen; }; static void test_show_object(struct object *object, struct strbuf *path, const char *last, void *data) { struct bitmap_test_data *tdata = data; int bitmap_pos; bitmap_pos = bitmap_position(object->oid.hash); if (bitmap_pos < 0) die("Object not in bitmap: %s\n", oid_to_hex(&object->oid)); bitmap_set(tdata->base, bitmap_pos); display_progress(tdata->prg, ++tdata->seen); } static void test_show_commit(struct commit *commit, void *data) { struct bitmap_test_data *tdata = data; int bitmap_pos; bitmap_pos = bitmap_position(commit->object.oid.hash); if (bitmap_pos < 0) die("Object not in bitmap: %s\n", oid_to_hex(&commit->object.oid)); bitmap_set(tdata->base, bitmap_pos); display_progress(tdata->prg, ++tdata->seen); } void test_bitmap_walk(struct rev_info *revs) { struct object *root; struct bitmap *result = NULL; khiter_t pos; size_t result_popcnt; struct bitmap_test_data tdata; if (prepare_bitmap_git()) die("failed to load bitmap indexes"); if (revs->pending.nr != 1) die("you must specify exactly one commit to test"); fprintf(stderr, "Bitmap v%d test (%d entries loaded)\n", bitmap_git.version, bitmap_git.entry_count); root = revs->pending.objects[0].item; pos = kh_get_sha1(bitmap_git.bitmaps, root->oid.hash); if (pos < kh_end(bitmap_git.bitmaps)) { struct stored_bitmap *st = kh_value(bitmap_git.bitmaps, pos); struct ewah_bitmap *bm = lookup_stored_bitmap(st); fprintf(stderr, "Found bitmap for %s. %d bits / %08x checksum\n", oid_to_hex(&root->oid), (int)bm->bit_size, ewah_checksum(bm)); result = ewah_to_bitmap(bm); } if (result == NULL) die("Commit %s doesn't have an indexed bitmap", oid_to_hex(&root->oid)); revs->tag_objects = 1; revs->tree_objects = 1; revs->blob_objects = 1; result_popcnt = bitmap_popcount(result); if (prepare_revision_walk(revs)) die("revision walk setup failed"); tdata.base = bitmap_new(); tdata.prg = start_progress("Verifying bitmap entries", result_popcnt); tdata.seen = 0; traverse_commit_list(revs, &test_show_commit, &test_show_object, &tdata); stop_progress(&tdata.prg); if (bitmap_equals(result, tdata.base)) fprintf(stderr, "OK!\n"); else fprintf(stderr, "Mismatch!\n"); bitmap_free(result); } static int rebuild_bitmap(uint32_t *reposition, struct ewah_bitmap *source, struct bitmap *dest) { uint32_t pos = 0; struct ewah_iterator it; eword_t word; ewah_iterator_init(&it, source); while (ewah_iterator_next(&word, &it)) { uint32_t offset, bit_pos; for (offset = 0; offset < BITS_IN_EWORD; ++offset) { if ((word >> offset) == 0) break; offset += ewah_bit_ctz64(word >> offset); bit_pos = reposition[pos + offset]; if (bit_pos > 0) bitmap_set(dest, bit_pos - 1); else /* can't reuse, we don't have the object */ return -1; } pos += BITS_IN_EWORD; } return 0; } int rebuild_existing_bitmaps(struct packing_data *mapping, khash_sha1 *reused_bitmaps, int show_progress) { uint32_t i, num_objects; uint32_t *reposition; struct bitmap *rebuild; struct stored_bitmap *stored; struct progress *progress = NULL; khiter_t hash_pos; int hash_ret; if (prepare_bitmap_git() < 0) return -1; num_objects = bitmap_git.pack->num_objects; reposition = xcalloc(num_objects, sizeof(uint32_t)); for (i = 0; i < num_objects; ++i) { const unsigned char *sha1; struct revindex_entry *entry; struct object_entry *oe; entry = &bitmap_git.pack->revindex[i]; sha1 = nth_packed_object_sha1(bitmap_git.pack, entry->nr); oe = packlist_find(mapping, sha1, NULL); if (oe) reposition[i] = oe->in_pack_pos + 1; } rebuild = bitmap_new(); i = 0; if (show_progress) progress = start_progress("Reusing bitmaps", 0); kh_foreach_value(bitmap_git.bitmaps, stored, { if (stored->flags & BITMAP_FLAG_REUSE) { if (!rebuild_bitmap(reposition, lookup_stored_bitmap(stored), rebuild)) { hash_pos = kh_put_sha1(reused_bitmaps, stored->sha1, &hash_ret); kh_value(reused_bitmaps, hash_pos) = bitmap_to_ewah(rebuild); } bitmap_reset(rebuild); display_progress(progress, ++i); } }); stop_progress(&progress); free(reposition); bitmap_free(rebuild); return 0; }
#include "cache.h" #include "commit.h" #include "tag.h" #include "diff.h" #include "revision.h" #include "progress.h" #include "list-objects.h" #include "pack.h" #include "pack-bitmap.h" #include "pack-revindex.h" #include "pack-objects.h" /* * An entry on the bitmap index, representing the bitmap for a given * commit. */ struct stored_bitmap { unsigned char sha1[20]; struct ewah_bitmap *root; struct stored_bitmap *xor; int flags; }; /* * The currently active bitmap index. By design, repositories only have * a single bitmap index available (the index for the biggest packfile in * the repository), since bitmap indexes need full closure. * * If there is more than one bitmap index available (e.g. because of alternates), * the active bitmap index is the largest one. */ static struct bitmap_index { /* Packfile to which this bitmap index belongs to */ struct packed_git *pack; /* * Mark the first `reuse_objects` in the packfile as reused: * they will be sent as-is without using them for repacking * calculations */ uint32_t reuse_objects; /* mmapped buffer of the whole bitmap index */ unsigned char *map; size_t map_size; /* size of the mmaped buffer */ size_t map_pos; /* current position when loading the index */ /* * Type indexes. * * Each bitmap marks which objects in the packfile are of the given * type. This provides type information when yielding the objects from * the packfile during a walk, which allows for better delta bases. */ struct ewah_bitmap *commits; struct ewah_bitmap *trees; struct ewah_bitmap *blobs; struct ewah_bitmap *tags; /* Map from SHA1 -> `stored_bitmap` for all the bitmapped commits */ khash_sha1 *bitmaps; /* Number of bitmapped commits */ uint32_t entry_count; /* Name-hash cache (or NULL if not present). */ uint32_t *hashes; /* * Extended index. * * When trying to perform bitmap operations with objects that are not * packed in `pack`, these objects are added to this "fake index" and * are assumed to appear at the end of the packfile for all operations */ struct eindex { struct object **objects; uint32_t *hashes; uint32_t count, alloc; khash_sha1_pos *positions; } ext_index; /* Bitmap result of the last performed walk */ struct bitmap *result; /* Version of the bitmap index */ unsigned int version; unsigned loaded : 1; } bitmap_git; static struct ewah_bitmap *lookup_stored_bitmap(struct stored_bitmap *st) { struct ewah_bitmap *parent; struct ewah_bitmap *composed; if (st->xor == NULL) return st->root; composed = ewah_pool_new(); parent = lookup_stored_bitmap(st->xor); ewah_xor(st->root, parent, composed); ewah_pool_free(st->root); st->root = composed; st->xor = NULL; return composed; } /* * Read a bitmap from the current read position on the mmaped * index, and increase the read position accordingly */ static struct ewah_bitmap *read_bitmap_1(struct bitmap_index *index) { struct ewah_bitmap *b = ewah_pool_new(); int bitmap_size = ewah_read_mmap(b, index->map + index->map_pos, index->map_size - index->map_pos); if (bitmap_size < 0) { error("Failed to load bitmap index (corrupted?)"); ewah_pool_free(b); return NULL; } index->map_pos += bitmap_size; return b; } static int load_bitmap_header(struct bitmap_index *index) { struct bitmap_disk_header *header = (void *)index->map; if (index->map_size < sizeof(*header) + 20) return error("Corrupted bitmap index (missing header data)"); if (memcmp(header->magic, BITMAP_IDX_SIGNATURE, sizeof(BITMAP_IDX_SIGNATURE)) != 0) return error("Corrupted bitmap index file (wrong header)"); index->version = ntohs(header->version); if (index->version != 1) return error("Unsupported version for bitmap index file (%d)", index->version); /* Parse known bitmap format options */ { uint32_t flags = ntohs(header->options); if ((flags & BITMAP_OPT_FULL_DAG) == 0) return error("Unsupported options for bitmap index file " "(Git requires BITMAP_OPT_FULL_DAG)"); if (flags & BITMAP_OPT_HASH_CACHE) { unsigned char *end = index->map + index->map_size - 20; index->hashes = ((uint32_t *)end) - index->pack->num_objects; } } index->entry_count = ntohl(header->entry_count); index->map_pos += sizeof(*header); return 0; } static struct stored_bitmap *store_bitmap(struct bitmap_index *index, struct ewah_bitmap *root, const unsigned char *sha1, struct stored_bitmap *xor_with, int flags) { struct stored_bitmap *stored; khiter_t hash_pos; int ret; stored = xmalloc(sizeof(struct stored_bitmap)); stored->root = root; stored->xor = xor_with; stored->flags = flags; hashcpy(stored->sha1, sha1); hash_pos = kh_put_sha1(index->bitmaps, stored->sha1, &ret); /* a 0 return code means the insertion succeeded with no changes, * because the SHA1 already existed on the map. this is bad, there * shouldn't be duplicated commits in the index */ if (ret == 0) { error("Duplicate entry in bitmap index: %s", sha1_to_hex(sha1)); return NULL; } kh_value(index->bitmaps, hash_pos) = stored; return stored; } static inline uint32_t read_be32(const unsigned char *buffer, size_t *pos) { uint32_t result = get_be32(buffer + *pos); (*pos) += sizeof(result); return result; } static inline uint8_t read_u8(const unsigned char *buffer, size_t *pos) { return buffer[(*pos)++]; } #define MAX_XOR_OFFSET 160 static int load_bitmap_entries_v1(struct bitmap_index *index) { uint32_t i; struct stored_bitmap *recent_bitmaps[MAX_XOR_OFFSET] = { NULL }; for (i = 0; i < index->entry_count; ++i) { int xor_offset, flags; struct ewah_bitmap *bitmap = NULL; struct stored_bitmap *xor_bitmap = NULL; uint32_t commit_idx_pos; const unsigned char *sha1; commit_idx_pos = read_be32(index->map, &index->map_pos); xor_offset = read_u8(index->map, &index->map_pos); flags = read_u8(index->map, &index->map_pos); sha1 = nth_packed_object_sha1(index->pack, commit_idx_pos); bitmap = read_bitmap_1(index); if (!bitmap) return -1; if (xor_offset > MAX_XOR_OFFSET || xor_offset > i) return error("Corrupted bitmap pack index"); if (xor_offset > 0) { xor_bitmap = recent_bitmaps[(i - xor_offset) % MAX_XOR_OFFSET]; if (xor_bitmap == NULL) return error("Invalid XOR offset in bitmap pack index"); } recent_bitmaps[i % MAX_XOR_OFFSET] = store_bitmap( index, bitmap, sha1, xor_bitmap, flags); } return 0; } static char *pack_bitmap_filename(struct packed_git *p) { size_t len; if (!strip_suffix(p->pack_name, ".pack", &len)) die("BUG: pack_name does not end in .pack"); return xstrfmt("%.*s.bitmap", (int)len, p->pack_name); } static int open_pack_bitmap_1(struct packed_git *packfile) { int fd; struct stat st; char *idx_name; if (open_pack_index(packfile)) return -1; idx_name = pack_bitmap_filename(packfile); fd = git_open_noatime(idx_name); free(idx_name); if (fd < 0) return -1; if (fstat(fd, &st)) { close(fd); return -1; } if (bitmap_git.pack) { warning("ignoring extra bitmap file: %s", packfile->pack_name); close(fd); return -1; } bitmap_git.pack = packfile; bitmap_git.map_size = xsize_t(st.st_size); bitmap_git.map = xmmap(NULL, bitmap_git.map_size, PROT_READ, MAP_PRIVATE, fd, 0); bitmap_git.map_pos = 0; close(fd); if (load_bitmap_header(&bitmap_git) < 0) { munmap(bitmap_git.map, bitmap_git.map_size); bitmap_git.map = NULL; bitmap_git.map_size = 0; return -1; } return 0; } static int load_pack_bitmap(void) { assert(bitmap_git.map && !bitmap_git.loaded); bitmap_git.bitmaps = kh_init_sha1(); bitmap_git.ext_index.positions = kh_init_sha1_pos(); load_pack_revindex(bitmap_git.pack); if (!(bitmap_git.commits = read_bitmap_1(&bitmap_git)) || !(bitmap_git.trees = read_bitmap_1(&bitmap_git)) || !(bitmap_git.blobs = read_bitmap_1(&bitmap_git)) || !(bitmap_git.tags = read_bitmap_1(&bitmap_git))) goto failed; if (load_bitmap_entries_v1(&bitmap_git) < 0) goto failed; bitmap_git.loaded = 1; return 0; failed: munmap(bitmap_git.map, bitmap_git.map_size); bitmap_git.map = NULL; bitmap_git.map_size = 0; return -1; } static int open_pack_bitmap(void) { struct packed_git *p; int ret = -1; assert(!bitmap_git.map && !bitmap_git.loaded); prepare_packed_git(); for (p = packed_git; p; p = p->next) { if (open_pack_bitmap_1(p) == 0) ret = 0; } return ret; } int prepare_bitmap_git(void) { if (bitmap_git.loaded) return 0; if (!open_pack_bitmap()) return load_pack_bitmap(); return -1; } struct include_data { struct bitmap *base; struct bitmap *seen; }; static inline int bitmap_position_extended(const unsigned char *sha1) { khash_sha1_pos *positions = bitmap_git.ext_index.positions; khiter_t pos = kh_get_sha1_pos(positions, sha1); if (pos < kh_end(positions)) { int bitmap_pos = kh_value(positions, pos); return bitmap_pos + bitmap_git.pack->num_objects; } return -1; } static inline int bitmap_position_packfile(const unsigned char *sha1) { off_t offset = find_pack_entry_one(sha1, bitmap_git.pack); if (!offset) return -1; return find_revindex_position(bitmap_git.pack, offset); } static int bitmap_position(const unsigned char *sha1) { int pos = bitmap_position_packfile(sha1); return (pos >= 0) ? pos : bitmap_position_extended(sha1); } static int ext_index_add_object(struct object *object, const char *name) { struct eindex *eindex = &bitmap_git.ext_index; khiter_t hash_pos; int hash_ret; int bitmap_pos; hash_pos = kh_put_sha1_pos(eindex->positions, object->oid.hash, &hash_ret); if (hash_ret > 0) { if (eindex->count >= eindex->alloc) { eindex->alloc = (eindex->alloc + 16) * 3 / 2; REALLOC_ARRAY(eindex->objects, eindex->alloc); REALLOC_ARRAY(eindex->hashes, eindex->alloc); } bitmap_pos = eindex->count; eindex->objects[eindex->count] = object; eindex->hashes[eindex->count] = pack_name_hash(name); kh_value(eindex->positions, hash_pos) = bitmap_pos; eindex->count++; } else { bitmap_pos = kh_value(eindex->positions, hash_pos); } return bitmap_pos + bitmap_git.pack->num_objects; } static void show_object(struct object *object, const char *name, void *data) { struct bitmap *base = data; int bitmap_pos; bitmap_pos = bitmap_position(object->oid.hash); if (bitmap_pos < 0) bitmap_pos = ext_index_add_object(object, name); bitmap_set(base, bitmap_pos); } static void show_commit(struct commit *commit, void *data) { } static int add_to_include_set(struct include_data *data, const unsigned char *sha1, int bitmap_pos) { khiter_t hash_pos; if (data->seen && bitmap_get(data->seen, bitmap_pos)) return 0; if (bitmap_get(data->base, bitmap_pos)) return 0; hash_pos = kh_get_sha1(bitmap_git.bitmaps, sha1); if (hash_pos < kh_end(bitmap_git.bitmaps)) { struct stored_bitmap *st = kh_value(bitmap_git.bitmaps, hash_pos); bitmap_or_ewah(data->base, lookup_stored_bitmap(st)); return 0; } bitmap_set(data->base, bitmap_pos); return 1; } static int should_include(struct commit *commit, void *_data) { struct include_data *data = _data; int bitmap_pos; bitmap_pos = bitmap_position(commit->object.oid.hash); if (bitmap_pos < 0) bitmap_pos = ext_index_add_object((struct object *)commit, NULL); if (!add_to_include_set(data, commit->object.oid.hash, bitmap_pos)) { struct commit_list *parent = commit->parents; while (parent) { parent->item->object.flags |= SEEN; parent = parent->next; } return 0; } return 1; } static struct bitmap *find_objects(struct rev_info *revs, struct object_list *roots, struct bitmap *seen) { struct bitmap *base = NULL; int needs_walk = 0; struct object_list *not_mapped = NULL; /* * Go through all the roots for the walk. The ones that have bitmaps * on the bitmap index will be `or`ed together to form an initial * global reachability analysis. * * The ones without bitmaps in the index will be stored in the * `not_mapped_list` for further processing. */ while (roots) { struct object *object = roots->item; roots = roots->next; if (object->type == OBJ_COMMIT) { khiter_t pos = kh_get_sha1(bitmap_git.bitmaps, object->oid.hash); if (pos < kh_end(bitmap_git.bitmaps)) { struct stored_bitmap *st = kh_value(bitmap_git.bitmaps, pos); struct ewah_bitmap *or_with = lookup_stored_bitmap(st); if (base == NULL) base = ewah_to_bitmap(or_with); else bitmap_or_ewah(base, or_with); object->flags |= SEEN; continue; } } object_list_insert(object, &not_mapped); } /* * Best case scenario: We found bitmaps for all the roots, * so the resulting `or` bitmap has the full reachability analysis */ if (not_mapped == NULL) return base; roots = not_mapped; /* * Let's iterate through all the roots that don't have bitmaps to * check if we can determine them to be reachable from the existing * global bitmap. * * If we cannot find them in the existing global bitmap, we'll need * to push them to an actual walk and run it until we can confirm * they are reachable */ while (roots) { struct object *object = roots->item; int pos; roots = roots->next; pos = bitmap_position(object->oid.hash); if (pos < 0 || base == NULL || !bitmap_get(base, pos)) { object->flags &= ~UNINTERESTING; add_pending_object(revs, object, ""); needs_walk = 1; } else { object->flags |= SEEN; } } if (needs_walk) { struct include_data incdata; if (base == NULL) base = bitmap_new(); incdata.base = base; incdata.seen = seen; revs->include_check = should_include; revs->include_check_data = &incdata; if (prepare_revision_walk(revs)) die("revision walk setup failed"); traverse_commit_list(revs, show_commit, show_object, base); } return base; } static void show_extended_objects(struct bitmap *objects, show_reachable_fn show_reach) { struct eindex *eindex = &bitmap_git.ext_index; uint32_t i; for (i = 0; i < eindex->count; ++i) { struct object *obj; if (!bitmap_get(objects, bitmap_git.pack->num_objects + i)) continue; obj = eindex->objects[i]; show_reach(obj->oid.hash, obj->type, 0, eindex->hashes[i], NULL, 0); } } static void show_objects_for_type( struct bitmap *objects, struct ewah_bitmap *type_filter, enum object_type object_type, show_reachable_fn show_reach) { size_t pos = 0, i = 0; uint32_t offset; struct ewah_iterator it; eword_t filter; if (bitmap_git.reuse_objects == bitmap_git.pack->num_objects) return; ewah_iterator_init(&it, type_filter); while (i < objects->word_alloc && ewah_iterator_next(&filter, &it)) { eword_t word = objects->words[i] & filter; for (offset = 0; offset < BITS_IN_EWORD; ++offset) { const unsigned char *sha1; struct revindex_entry *entry; uint32_t hash = 0; if ((word >> offset) == 0) break; offset += ewah_bit_ctz64(word >> offset); if (pos + offset < bitmap_git.reuse_objects) continue; entry = &bitmap_git.pack->revindex[pos + offset]; sha1 = nth_packed_object_sha1(bitmap_git.pack, entry->nr); if (bitmap_git.hashes) hash = ntohl(bitmap_git.hashes[entry->nr]); show_reach(sha1, object_type, 0, hash, bitmap_git.pack, entry->offset); } pos += BITS_IN_EWORD; i++; } } static int in_bitmapped_pack(struct object_list *roots) { while (roots) { struct object *object = roots->item; roots = roots->next; if (find_pack_entry_one(object->oid.hash, bitmap_git.pack) > 0) return 1; } return 0; } int prepare_bitmap_walk(struct rev_info *revs) { unsigned int i; unsigned int pending_nr = revs->pending.nr; struct object_array_entry *pending_e = revs->pending.objects; struct object_list *wants = NULL; struct object_list *haves = NULL; struct bitmap *wants_bitmap = NULL; struct bitmap *haves_bitmap = NULL; if (!bitmap_git.loaded) { /* try to open a bitmapped pack, but don't parse it yet * because we may not need to use it */ if (open_pack_bitmap() < 0) return -1; } for (i = 0; i < pending_nr; ++i) { struct object *object = pending_e[i].item; if (object->type == OBJ_NONE) parse_object_or_die(object->oid.hash, NULL); while (object->type == OBJ_TAG) { struct tag *tag = (struct tag *) object; if (object->flags & UNINTERESTING) object_list_insert(object, &haves); else object_list_insert(object, &wants); if (!tag->tagged) die("bad tag"); object = parse_object_or_die(tag->tagged->oid.hash, NULL); } if (object->flags & UNINTERESTING) object_list_insert(object, &haves); else object_list_insert(object, &wants); } /* * if we have a HAVES list, but none of those haves is contained * in the packfile that has a bitmap, we don't have anything to * optimize here */ if (haves && !in_bitmapped_pack(haves)) return -1; /* if we don't want anything, we're done here */ if (!wants) return -1; /* * now we're going to use bitmaps, so load the actual bitmap entries * from disk. this is the point of no return; after this the rev_list * becomes invalidated and we must perform the revwalk through bitmaps */ if (!bitmap_git.loaded && load_pack_bitmap() < 0) return -1; revs->pending.nr = 0; revs->pending.alloc = 0; revs->pending.objects = NULL; if (haves) { revs->ignore_missing_links = 1; haves_bitmap = find_objects(revs, haves, NULL); reset_revision_walk(); revs->ignore_missing_links = 0; if (haves_bitmap == NULL) die("BUG: failed to perform bitmap walk"); } wants_bitmap = find_objects(revs, wants, haves_bitmap); if (!wants_bitmap) die("BUG: failed to perform bitmap walk"); if (haves_bitmap) bitmap_and_not(wants_bitmap, haves_bitmap); bitmap_git.result = wants_bitmap; bitmap_free(haves_bitmap); return 0; } int reuse_partial_packfile_from_bitmap(struct packed_git **packfile, uint32_t *entries, off_t *up_to) { /* * Reuse the packfile content if we need more than * 90% of its objects */ static const double REUSE_PERCENT = 0.9; struct bitmap *result = bitmap_git.result; uint32_t reuse_threshold; uint32_t i, reuse_objects = 0; assert(result); for (i = 0; i < result->word_alloc; ++i) { if (result->words[i] != (eword_t)~0) { reuse_objects += ewah_bit_ctz64(~result->words[i]); break; } reuse_objects += BITS_IN_EWORD; } #ifdef GIT_BITMAP_DEBUG { const unsigned char *sha1; struct revindex_entry *entry; entry = &bitmap_git.reverse_index->revindex[reuse_objects]; sha1 = nth_packed_object_sha1(bitmap_git.pack, entry->nr); fprintf(stderr, "Failed to reuse at %d (%016llx)\n", reuse_objects, result->words[i]); fprintf(stderr, " %s\n", sha1_to_hex(sha1)); } #endif if (!reuse_objects) return -1; if (reuse_objects >= bitmap_git.pack->num_objects) { bitmap_git.reuse_objects = *entries = bitmap_git.pack->num_objects; *up_to = -1; /* reuse the full pack */ *packfile = bitmap_git.pack; return 0; } reuse_threshold = bitmap_popcount(bitmap_git.result) * REUSE_PERCENT; if (reuse_objects < reuse_threshold) return -1; bitmap_git.reuse_objects = *entries = reuse_objects; *up_to = bitmap_git.pack->revindex[reuse_objects].offset; *packfile = bitmap_git.pack; return 0; } void traverse_bitmap_commit_list(show_reachable_fn show_reachable) { assert(bitmap_git.result); show_objects_for_type(bitmap_git.result, bitmap_git.commits, OBJ_COMMIT, show_reachable); show_objects_for_type(bitmap_git.result, bitmap_git.trees, OBJ_TREE, show_reachable); show_objects_for_type(bitmap_git.result, bitmap_git.blobs, OBJ_BLOB, show_reachable); show_objects_for_type(bitmap_git.result, bitmap_git.tags, OBJ_TAG, show_reachable); show_extended_objects(bitmap_git.result, show_reachable); bitmap_free(bitmap_git.result); bitmap_git.result = NULL; } static uint32_t count_object_type(struct bitmap *objects, enum object_type type) { struct eindex *eindex = &bitmap_git.ext_index; uint32_t i = 0, count = 0; struct ewah_iterator it; eword_t filter; switch (type) { case OBJ_COMMIT: ewah_iterator_init(&it, bitmap_git.commits); break; case OBJ_TREE: ewah_iterator_init(&it, bitmap_git.trees); break; case OBJ_BLOB: ewah_iterator_init(&it, bitmap_git.blobs); break; case OBJ_TAG: ewah_iterator_init(&it, bitmap_git.tags); break; default: return 0; } while (i < objects->word_alloc && ewah_iterator_next(&filter, &it)) { eword_t word = objects->words[i++] & filter; count += ewah_bit_popcount64(word); } for (i = 0; i < eindex->count; ++i) { if (eindex->objects[i]->type == type && bitmap_get(objects, bitmap_git.pack->num_objects + i)) count++; } return count; } void count_bitmap_commit_list(uint32_t *commits, uint32_t *trees, uint32_t *blobs, uint32_t *tags) { assert(bitmap_git.result); if (commits) *commits = count_object_type(bitmap_git.result, OBJ_COMMIT); if (trees) *trees = count_object_type(bitmap_git.result, OBJ_TREE); if (blobs) *blobs = count_object_type(bitmap_git.result, OBJ_BLOB); if (tags) *tags = count_object_type(bitmap_git.result, OBJ_TAG); } struct bitmap_test_data { struct bitmap *base; struct progress *prg; size_t seen; }; static void test_show_object(struct object *object, const char *name, void *data) { struct bitmap_test_data *tdata = data; int bitmap_pos; bitmap_pos = bitmap_position(object->oid.hash); if (bitmap_pos < 0) die("Object not in bitmap: %s\n", oid_to_hex(&object->oid)); bitmap_set(tdata->base, bitmap_pos); display_progress(tdata->prg, ++tdata->seen); } static void test_show_commit(struct commit *commit, void *data) { struct bitmap_test_data *tdata = data; int bitmap_pos; bitmap_pos = bitmap_position(commit->object.oid.hash); if (bitmap_pos < 0) die("Object not in bitmap: %s\n", oid_to_hex(&commit->object.oid)); bitmap_set(tdata->base, bitmap_pos); display_progress(tdata->prg, ++tdata->seen); } void test_bitmap_walk(struct rev_info *revs) { struct object *root; struct bitmap *result = NULL; khiter_t pos; size_t result_popcnt; struct bitmap_test_data tdata; if (prepare_bitmap_git()) die("failed to load bitmap indexes"); if (revs->pending.nr != 1) die("you must specify exactly one commit to test"); fprintf(stderr, "Bitmap v%d test (%d entries loaded)\n", bitmap_git.version, bitmap_git.entry_count); root = revs->pending.objects[0].item; pos = kh_get_sha1(bitmap_git.bitmaps, root->oid.hash); if (pos < kh_end(bitmap_git.bitmaps)) { struct stored_bitmap *st = kh_value(bitmap_git.bitmaps, pos); struct ewah_bitmap *bm = lookup_stored_bitmap(st); fprintf(stderr, "Found bitmap for %s. %d bits / %08x checksum\n", oid_to_hex(&root->oid), (int)bm->bit_size, ewah_checksum(bm)); result = ewah_to_bitmap(bm); } if (result == NULL) die("Commit %s doesn't have an indexed bitmap", oid_to_hex(&root->oid)); revs->tag_objects = 1; revs->tree_objects = 1; revs->blob_objects = 1; result_popcnt = bitmap_popcount(result); if (prepare_revision_walk(revs)) die("revision walk setup failed"); tdata.base = bitmap_new(); tdata.prg = start_progress("Verifying bitmap entries", result_popcnt); tdata.seen = 0; traverse_commit_list(revs, &test_show_commit, &test_show_object, &tdata); stop_progress(&tdata.prg); if (bitmap_equals(result, tdata.base)) fprintf(stderr, "OK!\n"); else fprintf(stderr, "Mismatch!\n"); bitmap_free(result); } static int rebuild_bitmap(uint32_t *reposition, struct ewah_bitmap *source, struct bitmap *dest) { uint32_t pos = 0; struct ewah_iterator it; eword_t word; ewah_iterator_init(&it, source); while (ewah_iterator_next(&word, &it)) { uint32_t offset, bit_pos; for (offset = 0; offset < BITS_IN_EWORD; ++offset) { if ((word >> offset) == 0) break; offset += ewah_bit_ctz64(word >> offset); bit_pos = reposition[pos + offset]; if (bit_pos > 0) bitmap_set(dest, bit_pos - 1); else /* can't reuse, we don't have the object */ return -1; } pos += BITS_IN_EWORD; } return 0; } int rebuild_existing_bitmaps(struct packing_data *mapping, khash_sha1 *reused_bitmaps, int show_progress) { uint32_t i, num_objects; uint32_t *reposition; struct bitmap *rebuild; struct stored_bitmap *stored; struct progress *progress = NULL; khiter_t hash_pos; int hash_ret; if (prepare_bitmap_git() < 0) return -1; num_objects = bitmap_git.pack->num_objects; reposition = xcalloc(num_objects, sizeof(uint32_t)); for (i = 0; i < num_objects; ++i) { const unsigned char *sha1; struct revindex_entry *entry; struct object_entry *oe; entry = &bitmap_git.pack->revindex[i]; sha1 = nth_packed_object_sha1(bitmap_git.pack, entry->nr); oe = packlist_find(mapping, sha1, NULL); if (oe) reposition[i] = oe->in_pack_pos + 1; } rebuild = bitmap_new(); i = 0; if (show_progress) progress = start_progress("Reusing bitmaps", 0); kh_foreach_value(bitmap_git.bitmaps, stored, { if (stored->flags & BITMAP_FLAG_REUSE) { if (!rebuild_bitmap(reposition, lookup_stored_bitmap(stored), rebuild)) { hash_pos = kh_put_sha1(reused_bitmaps, stored->sha1, &hash_ret); kh_value(reused_bitmaps, hash_pos) = bitmap_to_ewah(rebuild); } bitmap_reset(rebuild); display_progress(progress, ++i); } }); stop_progress(&progress); free(reposition); bitmap_free(rebuild); return 0; }
static void test_show_object(struct object *object, struct strbuf *path, const char *last, void *data) { struct bitmap_test_data *tdata = data; int bitmap_pos; bitmap_pos = bitmap_position(object->oid.hash); if (bitmap_pos < 0) die("Object not in bitmap: %s\n", oid_to_hex(&object->oid)); bitmap_set(tdata->base, bitmap_pos); display_progress(tdata->prg, ++tdata->seen); }
static void test_show_object(struct object *object, const char *name, void *data) { struct bitmap_test_data *tdata = data; int bitmap_pos; bitmap_pos = bitmap_position(object->oid.hash); if (bitmap_pos < 0) die("Object not in bitmap: %s\n", oid_to_hex(&object->oid)); bitmap_set(tdata->base, bitmap_pos); display_progress(tdata->prg, ++tdata->seen); }
{'added': [(417, 'static void show_object(struct object *object, const char *name, void *data)'), (424, '\tif (bitmap_pos < 0)'), (893, 'static void test_show_object(struct object *object, const char *name,'), (894, '\t\t\t void *data)')], 'deleted': [(417, 'static void show_object(struct object *object, struct strbuf *path,'), (418, '\t\t\tconst char *last, void *data)'), (425, '\tif (bitmap_pos < 0) {'), (426, '\t\tchar *name = path_name(path, last);'), (428, '\t\tfree(name);'), (429, '\t}'), (897, 'static void test_show_object(struct object *object,'), (898, '\t\t\t struct strbuf *path,'), (899, '\t\t\t const char *last, void *data)')]}
4
9
744
4,729
https://github.com/git/git
CVE-2016-2315
['CWE-119']
tlb.c
flush_tlb_mm_range
#include <linux/init.h> #include <linux/mm.h> #include <linux/spinlock.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/cpu.h> #include <asm/tlbflush.h> #include <asm/mmu_context.h> #include <asm/cache.h> #include <asm/apic.h> #include <asm/uv/uv.h> #include <linux/debugfs.h> /* * Smarter SMP flushing macros. * c/o Linus Torvalds. * * These mean you can really definitely utterly forget about * writing to user space from interrupts. (Its not allowed anyway). * * Optimizations Manfred Spraul <manfred@colorfullife.com> * * More scalable flush, from Andi Kleen * * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi */ struct flush_tlb_info { struct mm_struct *flush_mm; unsigned long flush_start; unsigned long flush_end; }; /* * We cannot call mmdrop() because we are in interrupt context, * instead update mm->cpu_vm_mask. */ void leave_mm(int cpu) { struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm); if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) BUG(); if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) { cpumask_clear_cpu(cpu, mm_cpumask(active_mm)); load_cr3(swapper_pg_dir); /* * This gets called in the idle path where RCU * functions differently. Tracing normally * uses RCU, so we have to call the tracepoint * specially here. */ trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); } } EXPORT_SYMBOL_GPL(leave_mm); /* * The flush IPI assumes that a thread switch happens in this order: * [cpu0: the cpu that switches] * 1) switch_mm() either 1a) or 1b) * 1a) thread switch to a different mm * 1a1) set cpu_tlbstate to TLBSTATE_OK * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm * if cpu0 was in lazy tlb mode. * 1a2) update cpu active_mm * Now cpu0 accepts tlb flushes for the new mm. * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask); * Now the other cpus will send tlb flush ipis. * 1a4) change cr3. * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask); * Stop ipi delivery for the old mm. This is not synchronized with * the other cpus, but flush_tlb_func ignore flush ipis for the wrong * mm, and in the worst case we perform a superfluous tlb flush. * 1b) thread switch without mm change * cpu active_mm is correct, cpu0 already handles flush ipis. * 1b1) set cpu_tlbstate to TLBSTATE_OK * 1b2) test_and_set the cpu bit in cpu_vm_mask. * Atomically set the bit [other cpus will start sending flush ipis], * and test the bit. * 1b3) if the bit was 0: leave_mm was called, flush the tlb. * 2) switch %%esp, ie current * * The interrupt must handle 2 special cases: * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. * - the cpu performs speculative tlb reads, i.e. even if the cpu only * runs in kernel space, the cpu could load tlb entries for user space * pages. * * The good news is that cpu_tlbstate is local to each cpu, no * write/read ordering problems. */ /* * TLB flush funcation: * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. * 2) Leave the mm if we are in the lazy tlb mode. */ static void flush_tlb_func(void *info) { struct flush_tlb_info *f = info; inc_irq_stat(irq_tlb_count); if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) return; if (!f->flush_end) f->flush_end = f->flush_start + PAGE_SIZE; count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { if (f->flush_end == TLB_FLUSH_ALL) { local_flush_tlb(); trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL); } else { unsigned long addr; unsigned long nr_pages = (f->flush_end - f->flush_start) / PAGE_SIZE; addr = f->flush_start; while (addr < f->flush_end) { __flush_tlb_single(addr); addr += PAGE_SIZE; } trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages); } } else leave_mm(smp_processor_id()); } void native_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, unsigned long start, unsigned long end) { struct flush_tlb_info info; info.flush_mm = mm; info.flush_start = start; info.flush_end = end; count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); trace_tlb_flush(TLB_REMOTE_SEND_IPI, end - start); if (is_uv_system()) { unsigned int cpu; cpu = smp_processor_id(); cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu); if (cpumask) smp_call_function_many(cpumask, flush_tlb_func, &info, 1); return; } smp_call_function_many(cpumask, flush_tlb_func, &info, 1); } void flush_tlb_current_task(void) { struct mm_struct *mm = current->mm; preempt_disable(); count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); local_flush_tlb(); trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL); if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); preempt_enable(); } /* * See Documentation/x86/tlb.txt for details. We choose 33 * because it is large enough to cover the vast majority (at * least 95%) of allocations, and is small enough that we are * confident it will not cause too much overhead. Each single * flush is about 100 ns, so this caps the maximum overhead at * _about_ 3,000 ns. * * This is in units of pages. */ static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long vmflag) { unsigned long addr; /* do a global flush by default */ unsigned long base_pages_to_flush = TLB_FLUSH_ALL; preempt_disable(); if (current->active_mm != mm) goto out; if (!current->mm) { leave_mm(smp_processor_id()); goto out; } if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB)) base_pages_to_flush = (end - start) >> PAGE_SHIFT; if (base_pages_to_flush > tlb_single_page_flush_ceiling) { base_pages_to_flush = TLB_FLUSH_ALL; count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); local_flush_tlb(); } else { /* flush range by one by one 'invlpg' */ for (addr = start; addr < end; addr += PAGE_SIZE) { count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); __flush_tlb_single(addr); } } trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush); out: if (base_pages_to_flush == TLB_FLUSH_ALL) { start = 0UL; end = TLB_FLUSH_ALL; } if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, start, end); preempt_enable(); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) { struct mm_struct *mm = vma->vm_mm; preempt_disable(); if (current->active_mm == mm) { if (current->mm) __flush_tlb_one(start); else leave_mm(smp_processor_id()); } if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, start, 0UL); preempt_enable(); } static void do_flush_tlb_all(void *info) { count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); __flush_tlb_all(); if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) leave_mm(smp_processor_id()); } void flush_tlb_all(void) { count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); on_each_cpu(do_flush_tlb_all, NULL, 1); } static void do_kernel_range_flush(void *info) { struct flush_tlb_info *f = info; unsigned long addr; /* flush range by one by one 'invlpg' */ for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE) __flush_tlb_single(addr); } void flush_tlb_kernel_range(unsigned long start, unsigned long end) { /* Balance as user space task's flush, a bit conservative */ if (end == TLB_FLUSH_ALL || (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) { on_each_cpu(do_flush_tlb_all, NULL, 1); } else { struct flush_tlb_info info; info.flush_start = start; info.flush_end = end; on_each_cpu(do_kernel_range_flush, &info, 1); } } static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { char buf[32]; unsigned int len; len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t tlbflush_write_file(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { char buf[32]; ssize_t len; int ceiling; len = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, len)) return -EFAULT; buf[len] = '\0'; if (kstrtoint(buf, 0, &ceiling)) return -EINVAL; if (ceiling < 0) return -EINVAL; tlb_single_page_flush_ceiling = ceiling; return count; } static const struct file_operations fops_tlbflush = { .read = tlbflush_read_file, .write = tlbflush_write_file, .llseek = default_llseek, }; static int __init create_tlb_single_page_flush_ceiling(void) { debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR, arch_debugfs_dir, NULL, &fops_tlbflush); return 0; } late_initcall(create_tlb_single_page_flush_ceiling);
#include <linux/init.h> #include <linux/mm.h> #include <linux/spinlock.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/cpu.h> #include <asm/tlbflush.h> #include <asm/mmu_context.h> #include <asm/cache.h> #include <asm/apic.h> #include <asm/uv/uv.h> #include <linux/debugfs.h> /* * Smarter SMP flushing macros. * c/o Linus Torvalds. * * These mean you can really definitely utterly forget about * writing to user space from interrupts. (Its not allowed anyway). * * Optimizations Manfred Spraul <manfred@colorfullife.com> * * More scalable flush, from Andi Kleen * * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi */ struct flush_tlb_info { struct mm_struct *flush_mm; unsigned long flush_start; unsigned long flush_end; }; /* * We cannot call mmdrop() because we are in interrupt context, * instead update mm->cpu_vm_mask. */ void leave_mm(int cpu) { struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm); if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) BUG(); if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) { cpumask_clear_cpu(cpu, mm_cpumask(active_mm)); load_cr3(swapper_pg_dir); /* * This gets called in the idle path where RCU * functions differently. Tracing normally * uses RCU, so we have to call the tracepoint * specially here. */ trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); } } EXPORT_SYMBOL_GPL(leave_mm); /* * The flush IPI assumes that a thread switch happens in this order: * [cpu0: the cpu that switches] * 1) switch_mm() either 1a) or 1b) * 1a) thread switch to a different mm * 1a1) set cpu_tlbstate to TLBSTATE_OK * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm * if cpu0 was in lazy tlb mode. * 1a2) update cpu active_mm * Now cpu0 accepts tlb flushes for the new mm. * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask); * Now the other cpus will send tlb flush ipis. * 1a4) change cr3. * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask); * Stop ipi delivery for the old mm. This is not synchronized with * the other cpus, but flush_tlb_func ignore flush ipis for the wrong * mm, and in the worst case we perform a superfluous tlb flush. * 1b) thread switch without mm change * cpu active_mm is correct, cpu0 already handles flush ipis. * 1b1) set cpu_tlbstate to TLBSTATE_OK * 1b2) test_and_set the cpu bit in cpu_vm_mask. * Atomically set the bit [other cpus will start sending flush ipis], * and test the bit. * 1b3) if the bit was 0: leave_mm was called, flush the tlb. * 2) switch %%esp, ie current * * The interrupt must handle 2 special cases: * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. * - the cpu performs speculative tlb reads, i.e. even if the cpu only * runs in kernel space, the cpu could load tlb entries for user space * pages. * * The good news is that cpu_tlbstate is local to each cpu, no * write/read ordering problems. */ /* * TLB flush funcation: * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. * 2) Leave the mm if we are in the lazy tlb mode. */ static void flush_tlb_func(void *info) { struct flush_tlb_info *f = info; inc_irq_stat(irq_tlb_count); if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) return; if (!f->flush_end) f->flush_end = f->flush_start + PAGE_SIZE; count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { if (f->flush_end == TLB_FLUSH_ALL) { local_flush_tlb(); trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL); } else { unsigned long addr; unsigned long nr_pages = (f->flush_end - f->flush_start) / PAGE_SIZE; addr = f->flush_start; while (addr < f->flush_end) { __flush_tlb_single(addr); addr += PAGE_SIZE; } trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages); } } else leave_mm(smp_processor_id()); } void native_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, unsigned long start, unsigned long end) { struct flush_tlb_info info; info.flush_mm = mm; info.flush_start = start; info.flush_end = end; count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); trace_tlb_flush(TLB_REMOTE_SEND_IPI, end - start); if (is_uv_system()) { unsigned int cpu; cpu = smp_processor_id(); cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu); if (cpumask) smp_call_function_many(cpumask, flush_tlb_func, &info, 1); return; } smp_call_function_many(cpumask, flush_tlb_func, &info, 1); } void flush_tlb_current_task(void) { struct mm_struct *mm = current->mm; preempt_disable(); count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); /* This is an implicit full barrier that synchronizes with switch_mm. */ local_flush_tlb(); trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL); if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); preempt_enable(); } /* * See Documentation/x86/tlb.txt for details. We choose 33 * because it is large enough to cover the vast majority (at * least 95%) of allocations, and is small enough that we are * confident it will not cause too much overhead. Each single * flush is about 100 ns, so this caps the maximum overhead at * _about_ 3,000 ns. * * This is in units of pages. */ static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long vmflag) { unsigned long addr; /* do a global flush by default */ unsigned long base_pages_to_flush = TLB_FLUSH_ALL; preempt_disable(); if (current->active_mm != mm) { /* Synchronize with switch_mm. */ smp_mb(); goto out; } if (!current->mm) { leave_mm(smp_processor_id()); /* Synchronize with switch_mm. */ smp_mb(); goto out; } if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB)) base_pages_to_flush = (end - start) >> PAGE_SHIFT; /* * Both branches below are implicit full barriers (MOV to CR or * INVLPG) that synchronize with switch_mm. */ if (base_pages_to_flush > tlb_single_page_flush_ceiling) { base_pages_to_flush = TLB_FLUSH_ALL; count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); local_flush_tlb(); } else { /* flush range by one by one 'invlpg' */ for (addr = start; addr < end; addr += PAGE_SIZE) { count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); __flush_tlb_single(addr); } } trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush); out: if (base_pages_to_flush == TLB_FLUSH_ALL) { start = 0UL; end = TLB_FLUSH_ALL; } if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, start, end); preempt_enable(); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) { struct mm_struct *mm = vma->vm_mm; preempt_disable(); if (current->active_mm == mm) { if (current->mm) { /* * Implicit full barrier (INVLPG) that synchronizes * with switch_mm. */ __flush_tlb_one(start); } else { leave_mm(smp_processor_id()); /* Synchronize with switch_mm. */ smp_mb(); } } if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, start, 0UL); preempt_enable(); } static void do_flush_tlb_all(void *info) { count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); __flush_tlb_all(); if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) leave_mm(smp_processor_id()); } void flush_tlb_all(void) { count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); on_each_cpu(do_flush_tlb_all, NULL, 1); } static void do_kernel_range_flush(void *info) { struct flush_tlb_info *f = info; unsigned long addr; /* flush range by one by one 'invlpg' */ for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE) __flush_tlb_single(addr); } void flush_tlb_kernel_range(unsigned long start, unsigned long end) { /* Balance as user space task's flush, a bit conservative */ if (end == TLB_FLUSH_ALL || (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) { on_each_cpu(do_flush_tlb_all, NULL, 1); } else { struct flush_tlb_info info; info.flush_start = start; info.flush_end = end; on_each_cpu(do_kernel_range_flush, &info, 1); } } static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { char buf[32]; unsigned int len; len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t tlbflush_write_file(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { char buf[32]; ssize_t len; int ceiling; len = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, len)) return -EFAULT; buf[len] = '\0'; if (kstrtoint(buf, 0, &ceiling)) return -EINVAL; if (ceiling < 0) return -EINVAL; tlb_single_page_flush_ceiling = ceiling; return count; } static const struct file_operations fops_tlbflush = { .read = tlbflush_read_file, .write = tlbflush_write_file, .llseek = default_llseek, }; static int __init create_tlb_single_page_flush_ceiling(void) { debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR, arch_debugfs_dir, NULL, &fops_tlbflush); return 0; } late_initcall(create_tlb_single_page_flush_ceiling);
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long vmflag) { unsigned long addr; /* do a global flush by default */ unsigned long base_pages_to_flush = TLB_FLUSH_ALL; preempt_disable(); if (current->active_mm != mm) goto out; if (!current->mm) { leave_mm(smp_processor_id()); goto out; } if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB)) base_pages_to_flush = (end - start) >> PAGE_SHIFT; if (base_pages_to_flush > tlb_single_page_flush_ceiling) { base_pages_to_flush = TLB_FLUSH_ALL; count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); local_flush_tlb(); } else { /* flush range by one by one 'invlpg' */ for (addr = start; addr < end; addr += PAGE_SIZE) { count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); __flush_tlb_single(addr); } } trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush); out: if (base_pages_to_flush == TLB_FLUSH_ALL) { start = 0UL; end = TLB_FLUSH_ALL; } if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, start, end); preempt_enable(); }
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long vmflag) { unsigned long addr; /* do a global flush by default */ unsigned long base_pages_to_flush = TLB_FLUSH_ALL; preempt_disable(); if (current->active_mm != mm) { /* Synchronize with switch_mm. */ smp_mb(); goto out; } if (!current->mm) { leave_mm(smp_processor_id()); /* Synchronize with switch_mm. */ smp_mb(); goto out; } if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB)) base_pages_to_flush = (end - start) >> PAGE_SHIFT; /* * Both branches below are implicit full barriers (MOV to CR or * INVLPG) that synchronize with switch_mm. */ if (base_pages_to_flush > tlb_single_page_flush_ceiling) { base_pages_to_flush = TLB_FLUSH_ALL; count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); local_flush_tlb(); } else { /* flush range by one by one 'invlpg' */ for (addr = start; addr < end; addr += PAGE_SIZE) { count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); __flush_tlb_single(addr); } } trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush); out: if (base_pages_to_flush == TLB_FLUSH_ALL) { start = 0UL; end = TLB_FLUSH_ALL; } if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, start, end); preempt_enable(); }
{'added': [(164, ''), (165, '\t/* This is an implicit full barrier that synchronizes with switch_mm. */'), (167, ''), (194, '\tif (current->active_mm != mm) {'), (195, '\t\t/* Synchronize with switch_mm. */'), (196, '\t\tsmp_mb();'), (197, ''), (199, '\t}'), (203, ''), (204, '\t\t/* Synchronize with switch_mm. */'), (205, '\t\tsmp_mb();'), (206, ''), (213, '\t/*'), (214, '\t * Both branches below are implicit full barriers (MOV to CR or'), (215, '\t * INVLPG) that synchronize with switch_mm.'), (216, '\t */'), (246, '\t\tif (current->mm) {'), (247, '\t\t\t/*'), (248, '\t\t\t * Implicit full barrier (INVLPG) that synchronizes'), (249, '\t\t\t * with switch_mm.'), (250, '\t\t\t */'), (252, '\t\t} else {'), (254, ''), (255, '\t\t\t/* Synchronize with switch_mm. */'), (256, '\t\t\tsmp_mb();'), (257, '\t\t}')], 'deleted': [(191, '\tif (current->active_mm != mm)'), (231, '\t\tif (current->mm)'), (233, '\t\telse')]}
26
3
211
1,177
https://github.com/torvalds/linux
CVE-2016-2069
['CWE-362']
tlb.c
flush_tlb_page
#include <linux/init.h> #include <linux/mm.h> #include <linux/spinlock.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/cpu.h> #include <asm/tlbflush.h> #include <asm/mmu_context.h> #include <asm/cache.h> #include <asm/apic.h> #include <asm/uv/uv.h> #include <linux/debugfs.h> /* * Smarter SMP flushing macros. * c/o Linus Torvalds. * * These mean you can really definitely utterly forget about * writing to user space from interrupts. (Its not allowed anyway). * * Optimizations Manfred Spraul <manfred@colorfullife.com> * * More scalable flush, from Andi Kleen * * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi */ struct flush_tlb_info { struct mm_struct *flush_mm; unsigned long flush_start; unsigned long flush_end; }; /* * We cannot call mmdrop() because we are in interrupt context, * instead update mm->cpu_vm_mask. */ void leave_mm(int cpu) { struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm); if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) BUG(); if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) { cpumask_clear_cpu(cpu, mm_cpumask(active_mm)); load_cr3(swapper_pg_dir); /* * This gets called in the idle path where RCU * functions differently. Tracing normally * uses RCU, so we have to call the tracepoint * specially here. */ trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); } } EXPORT_SYMBOL_GPL(leave_mm); /* * The flush IPI assumes that a thread switch happens in this order: * [cpu0: the cpu that switches] * 1) switch_mm() either 1a) or 1b) * 1a) thread switch to a different mm * 1a1) set cpu_tlbstate to TLBSTATE_OK * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm * if cpu0 was in lazy tlb mode. * 1a2) update cpu active_mm * Now cpu0 accepts tlb flushes for the new mm. * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask); * Now the other cpus will send tlb flush ipis. * 1a4) change cr3. * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask); * Stop ipi delivery for the old mm. This is not synchronized with * the other cpus, but flush_tlb_func ignore flush ipis for the wrong * mm, and in the worst case we perform a superfluous tlb flush. * 1b) thread switch without mm change * cpu active_mm is correct, cpu0 already handles flush ipis. * 1b1) set cpu_tlbstate to TLBSTATE_OK * 1b2) test_and_set the cpu bit in cpu_vm_mask. * Atomically set the bit [other cpus will start sending flush ipis], * and test the bit. * 1b3) if the bit was 0: leave_mm was called, flush the tlb. * 2) switch %%esp, ie current * * The interrupt must handle 2 special cases: * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. * - the cpu performs speculative tlb reads, i.e. even if the cpu only * runs in kernel space, the cpu could load tlb entries for user space * pages. * * The good news is that cpu_tlbstate is local to each cpu, no * write/read ordering problems. */ /* * TLB flush funcation: * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. * 2) Leave the mm if we are in the lazy tlb mode. */ static void flush_tlb_func(void *info) { struct flush_tlb_info *f = info; inc_irq_stat(irq_tlb_count); if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) return; if (!f->flush_end) f->flush_end = f->flush_start + PAGE_SIZE; count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { if (f->flush_end == TLB_FLUSH_ALL) { local_flush_tlb(); trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL); } else { unsigned long addr; unsigned long nr_pages = (f->flush_end - f->flush_start) / PAGE_SIZE; addr = f->flush_start; while (addr < f->flush_end) { __flush_tlb_single(addr); addr += PAGE_SIZE; } trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages); } } else leave_mm(smp_processor_id()); } void native_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, unsigned long start, unsigned long end) { struct flush_tlb_info info; info.flush_mm = mm; info.flush_start = start; info.flush_end = end; count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); trace_tlb_flush(TLB_REMOTE_SEND_IPI, end - start); if (is_uv_system()) { unsigned int cpu; cpu = smp_processor_id(); cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu); if (cpumask) smp_call_function_many(cpumask, flush_tlb_func, &info, 1); return; } smp_call_function_many(cpumask, flush_tlb_func, &info, 1); } void flush_tlb_current_task(void) { struct mm_struct *mm = current->mm; preempt_disable(); count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); local_flush_tlb(); trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL); if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); preempt_enable(); } /* * See Documentation/x86/tlb.txt for details. We choose 33 * because it is large enough to cover the vast majority (at * least 95%) of allocations, and is small enough that we are * confident it will not cause too much overhead. Each single * flush is about 100 ns, so this caps the maximum overhead at * _about_ 3,000 ns. * * This is in units of pages. */ static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long vmflag) { unsigned long addr; /* do a global flush by default */ unsigned long base_pages_to_flush = TLB_FLUSH_ALL; preempt_disable(); if (current->active_mm != mm) goto out; if (!current->mm) { leave_mm(smp_processor_id()); goto out; } if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB)) base_pages_to_flush = (end - start) >> PAGE_SHIFT; if (base_pages_to_flush > tlb_single_page_flush_ceiling) { base_pages_to_flush = TLB_FLUSH_ALL; count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); local_flush_tlb(); } else { /* flush range by one by one 'invlpg' */ for (addr = start; addr < end; addr += PAGE_SIZE) { count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); __flush_tlb_single(addr); } } trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush); out: if (base_pages_to_flush == TLB_FLUSH_ALL) { start = 0UL; end = TLB_FLUSH_ALL; } if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, start, end); preempt_enable(); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) { struct mm_struct *mm = vma->vm_mm; preempt_disable(); if (current->active_mm == mm) { if (current->mm) __flush_tlb_one(start); else leave_mm(smp_processor_id()); } if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, start, 0UL); preempt_enable(); } static void do_flush_tlb_all(void *info) { count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); __flush_tlb_all(); if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) leave_mm(smp_processor_id()); } void flush_tlb_all(void) { count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); on_each_cpu(do_flush_tlb_all, NULL, 1); } static void do_kernel_range_flush(void *info) { struct flush_tlb_info *f = info; unsigned long addr; /* flush range by one by one 'invlpg' */ for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE) __flush_tlb_single(addr); } void flush_tlb_kernel_range(unsigned long start, unsigned long end) { /* Balance as user space task's flush, a bit conservative */ if (end == TLB_FLUSH_ALL || (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) { on_each_cpu(do_flush_tlb_all, NULL, 1); } else { struct flush_tlb_info info; info.flush_start = start; info.flush_end = end; on_each_cpu(do_kernel_range_flush, &info, 1); } } static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { char buf[32]; unsigned int len; len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t tlbflush_write_file(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { char buf[32]; ssize_t len; int ceiling; len = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, len)) return -EFAULT; buf[len] = '\0'; if (kstrtoint(buf, 0, &ceiling)) return -EINVAL; if (ceiling < 0) return -EINVAL; tlb_single_page_flush_ceiling = ceiling; return count; } static const struct file_operations fops_tlbflush = { .read = tlbflush_read_file, .write = tlbflush_write_file, .llseek = default_llseek, }; static int __init create_tlb_single_page_flush_ceiling(void) { debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR, arch_debugfs_dir, NULL, &fops_tlbflush); return 0; } late_initcall(create_tlb_single_page_flush_ceiling);
#include <linux/init.h> #include <linux/mm.h> #include <linux/spinlock.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/cpu.h> #include <asm/tlbflush.h> #include <asm/mmu_context.h> #include <asm/cache.h> #include <asm/apic.h> #include <asm/uv/uv.h> #include <linux/debugfs.h> /* * Smarter SMP flushing macros. * c/o Linus Torvalds. * * These mean you can really definitely utterly forget about * writing to user space from interrupts. (Its not allowed anyway). * * Optimizations Manfred Spraul <manfred@colorfullife.com> * * More scalable flush, from Andi Kleen * * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi */ struct flush_tlb_info { struct mm_struct *flush_mm; unsigned long flush_start; unsigned long flush_end; }; /* * We cannot call mmdrop() because we are in interrupt context, * instead update mm->cpu_vm_mask. */ void leave_mm(int cpu) { struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm); if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) BUG(); if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) { cpumask_clear_cpu(cpu, mm_cpumask(active_mm)); load_cr3(swapper_pg_dir); /* * This gets called in the idle path where RCU * functions differently. Tracing normally * uses RCU, so we have to call the tracepoint * specially here. */ trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); } } EXPORT_SYMBOL_GPL(leave_mm); /* * The flush IPI assumes that a thread switch happens in this order: * [cpu0: the cpu that switches] * 1) switch_mm() either 1a) or 1b) * 1a) thread switch to a different mm * 1a1) set cpu_tlbstate to TLBSTATE_OK * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm * if cpu0 was in lazy tlb mode. * 1a2) update cpu active_mm * Now cpu0 accepts tlb flushes for the new mm. * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask); * Now the other cpus will send tlb flush ipis. * 1a4) change cr3. * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask); * Stop ipi delivery for the old mm. This is not synchronized with * the other cpus, but flush_tlb_func ignore flush ipis for the wrong * mm, and in the worst case we perform a superfluous tlb flush. * 1b) thread switch without mm change * cpu active_mm is correct, cpu0 already handles flush ipis. * 1b1) set cpu_tlbstate to TLBSTATE_OK * 1b2) test_and_set the cpu bit in cpu_vm_mask. * Atomically set the bit [other cpus will start sending flush ipis], * and test the bit. * 1b3) if the bit was 0: leave_mm was called, flush the tlb. * 2) switch %%esp, ie current * * The interrupt must handle 2 special cases: * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. * - the cpu performs speculative tlb reads, i.e. even if the cpu only * runs in kernel space, the cpu could load tlb entries for user space * pages. * * The good news is that cpu_tlbstate is local to each cpu, no * write/read ordering problems. */ /* * TLB flush funcation: * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. * 2) Leave the mm if we are in the lazy tlb mode. */ static void flush_tlb_func(void *info) { struct flush_tlb_info *f = info; inc_irq_stat(irq_tlb_count); if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) return; if (!f->flush_end) f->flush_end = f->flush_start + PAGE_SIZE; count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { if (f->flush_end == TLB_FLUSH_ALL) { local_flush_tlb(); trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL); } else { unsigned long addr; unsigned long nr_pages = (f->flush_end - f->flush_start) / PAGE_SIZE; addr = f->flush_start; while (addr < f->flush_end) { __flush_tlb_single(addr); addr += PAGE_SIZE; } trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages); } } else leave_mm(smp_processor_id()); } void native_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, unsigned long start, unsigned long end) { struct flush_tlb_info info; info.flush_mm = mm; info.flush_start = start; info.flush_end = end; count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); trace_tlb_flush(TLB_REMOTE_SEND_IPI, end - start); if (is_uv_system()) { unsigned int cpu; cpu = smp_processor_id(); cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu); if (cpumask) smp_call_function_many(cpumask, flush_tlb_func, &info, 1); return; } smp_call_function_many(cpumask, flush_tlb_func, &info, 1); } void flush_tlb_current_task(void) { struct mm_struct *mm = current->mm; preempt_disable(); count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); /* This is an implicit full barrier that synchronizes with switch_mm. */ local_flush_tlb(); trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL); if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); preempt_enable(); } /* * See Documentation/x86/tlb.txt for details. We choose 33 * because it is large enough to cover the vast majority (at * least 95%) of allocations, and is small enough that we are * confident it will not cause too much overhead. Each single * flush is about 100 ns, so this caps the maximum overhead at * _about_ 3,000 ns. * * This is in units of pages. */ static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long vmflag) { unsigned long addr; /* do a global flush by default */ unsigned long base_pages_to_flush = TLB_FLUSH_ALL; preempt_disable(); if (current->active_mm != mm) { /* Synchronize with switch_mm. */ smp_mb(); goto out; } if (!current->mm) { leave_mm(smp_processor_id()); /* Synchronize with switch_mm. */ smp_mb(); goto out; } if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB)) base_pages_to_flush = (end - start) >> PAGE_SHIFT; /* * Both branches below are implicit full barriers (MOV to CR or * INVLPG) that synchronize with switch_mm. */ if (base_pages_to_flush > tlb_single_page_flush_ceiling) { base_pages_to_flush = TLB_FLUSH_ALL; count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); local_flush_tlb(); } else { /* flush range by one by one 'invlpg' */ for (addr = start; addr < end; addr += PAGE_SIZE) { count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); __flush_tlb_single(addr); } } trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush); out: if (base_pages_to_flush == TLB_FLUSH_ALL) { start = 0UL; end = TLB_FLUSH_ALL; } if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, start, end); preempt_enable(); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) { struct mm_struct *mm = vma->vm_mm; preempt_disable(); if (current->active_mm == mm) { if (current->mm) { /* * Implicit full barrier (INVLPG) that synchronizes * with switch_mm. */ __flush_tlb_one(start); } else { leave_mm(smp_processor_id()); /* Synchronize with switch_mm. */ smp_mb(); } } if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, start, 0UL); preempt_enable(); } static void do_flush_tlb_all(void *info) { count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); __flush_tlb_all(); if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) leave_mm(smp_processor_id()); } void flush_tlb_all(void) { count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); on_each_cpu(do_flush_tlb_all, NULL, 1); } static void do_kernel_range_flush(void *info) { struct flush_tlb_info *f = info; unsigned long addr; /* flush range by one by one 'invlpg' */ for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE) __flush_tlb_single(addr); } void flush_tlb_kernel_range(unsigned long start, unsigned long end) { /* Balance as user space task's flush, a bit conservative */ if (end == TLB_FLUSH_ALL || (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) { on_each_cpu(do_flush_tlb_all, NULL, 1); } else { struct flush_tlb_info info; info.flush_start = start; info.flush_end = end; on_each_cpu(do_kernel_range_flush, &info, 1); } } static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { char buf[32]; unsigned int len; len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t tlbflush_write_file(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { char buf[32]; ssize_t len; int ceiling; len = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, len)) return -EFAULT; buf[len] = '\0'; if (kstrtoint(buf, 0, &ceiling)) return -EINVAL; if (ceiling < 0) return -EINVAL; tlb_single_page_flush_ceiling = ceiling; return count; } static const struct file_operations fops_tlbflush = { .read = tlbflush_read_file, .write = tlbflush_write_file, .llseek = default_llseek, }; static int __init create_tlb_single_page_flush_ceiling(void) { debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR, arch_debugfs_dir, NULL, &fops_tlbflush); return 0; } late_initcall(create_tlb_single_page_flush_ceiling);
void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) { struct mm_struct *mm = vma->vm_mm; preempt_disable(); if (current->active_mm == mm) { if (current->mm) __flush_tlb_one(start); else leave_mm(smp_processor_id()); } if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, start, 0UL); preempt_enable(); }
void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) { struct mm_struct *mm = vma->vm_mm; preempt_disable(); if (current->active_mm == mm) { if (current->mm) { /* * Implicit full barrier (INVLPG) that synchronizes * with switch_mm. */ __flush_tlb_one(start); } else { leave_mm(smp_processor_id()); /* Synchronize with switch_mm. */ smp_mb(); } } if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, start, 0UL); preempt_enable(); }
{'added': [(164, ''), (165, '\t/* This is an implicit full barrier that synchronizes with switch_mm. */'), (167, ''), (194, '\tif (current->active_mm != mm) {'), (195, '\t\t/* Synchronize with switch_mm. */'), (196, '\t\tsmp_mb();'), (197, ''), (199, '\t}'), (203, ''), (204, '\t\t/* Synchronize with switch_mm. */'), (205, '\t\tsmp_mb();'), (206, ''), (213, '\t/*'), (214, '\t * Both branches below are implicit full barriers (MOV to CR or'), (215, '\t * INVLPG) that synchronize with switch_mm.'), (216, '\t */'), (246, '\t\tif (current->mm) {'), (247, '\t\t\t/*'), (248, '\t\t\t * Implicit full barrier (INVLPG) that synchronizes'), (249, '\t\t\t * with switch_mm.'), (250, '\t\t\t */'), (252, '\t\t} else {'), (254, ''), (255, '\t\t\t/* Synchronize with switch_mm. */'), (256, '\t\t\tsmp_mb();'), (257, '\t\t}')], 'deleted': [(191, '\tif (current->active_mm != mm)'), (231, '\t\tif (current->mm)'), (233, '\t\telse')]}
26
3
211
1,177
https://github.com/torvalds/linux
CVE-2016-2069
['CWE-362']
cil_reset_ast.c
cil_reset_classperms_set
#include "cil_internal.h" #include "cil_log.h" #include "cil_list.h" #include "cil_reset_ast.h" #include "cil_symtab.h" static inline void cil_reset_classperms_list(struct cil_list *cp_list); static inline void cil_reset_level(struct cil_level *level); static inline void cil_reset_levelrange(struct cil_levelrange *levelrange); static inline void cil_reset_context(struct cil_context *context); static int __class_reset_perm_values(__attribute__((unused)) hashtab_key_t k, hashtab_datum_t d, void *args) { struct cil_perm *perm = (struct cil_perm *)d; perm->value -= *((int *)args); return SEPOL_OK; } static void cil_reset_class(struct cil_class *class) { if (class->common != NULL) { /* Must assume that the common has been destroyed */ int num_common_perms = class->num_perms - class->perms.nprim; cil_symtab_map(&class->perms, __class_reset_perm_values, &num_common_perms); /* during a re-resolve, we need to reset the common, so a classcommon * statement isn't seen as a duplicate */ class->num_perms = class->perms.nprim; class->common = NULL; /* Must make this NULL or there will be an error when re-resolving */ } class->ordered = CIL_FALSE; } static void cil_reset_perm(struct cil_perm *perm) { cil_list_destroy(&perm->classperms, CIL_FALSE); } static inline void cil_reset_classperms(struct cil_classperms *cp) { if (cp == NULL) { return; } cil_list_destroy(&cp->perms, CIL_FALSE); } static void cil_reset_classpermission(struct cil_classpermission *cp) { if (cp == NULL) { return; } cil_list_destroy(&cp->classperms, CIL_FALSE); } static void cil_reset_classperms_set(struct cil_classperms_set *cp_set) { cil_reset_classpermission(cp_set->set); } static inline void cil_reset_classperms_list(struct cil_list *cp_list) { struct cil_list_item *curr; if (cp_list == NULL) { return; } cil_list_for_each(curr, cp_list) { if (curr->flavor == CIL_CLASSPERMS) { /* KERNEL or MAP */ cil_reset_classperms(curr->data); } else if (curr->flavor == CIL_CLASSPERMS_SET) { /* SET */ cil_reset_classperms_set(curr->data); } } } static void cil_reset_classpermissionset(struct cil_classpermissionset *cps) { cil_reset_classperms_list(cps->classperms); } static void cil_reset_classmapping(struct cil_classmapping *cm) { cil_reset_classperms_list(cm->classperms); } static void cil_reset_alias(struct cil_alias *alias) { /* reset actual to NULL during a re-resolve */ alias->actual = NULL; } static void cil_reset_user(struct cil_user *user) { /* reset the bounds to NULL during a re-resolve */ user->bounds = NULL; user->dftlevel = NULL; user->range = NULL; } static void cil_reset_userattr(struct cil_userattribute *attr) { struct cil_list_item *expr = NULL; struct cil_list_item *next = NULL; /* during a re-resolve, we need to reset the lists of expression stacks associated with this attribute from a userattribute statement */ if (attr->expr_list != NULL) { /* we don't want to destroy the expression stacks (cil_list) inside * this list cil_list_destroy destroys sublists, so we need to do it * manually */ expr = attr->expr_list->head; while (expr != NULL) { next = expr->next; cil_list_item_destroy(&expr, CIL_FALSE); expr = next; } free(attr->expr_list); attr->expr_list = NULL; } } static void cil_reset_userattributeset(struct cil_userattributeset *uas) { cil_list_destroy(&uas->datum_expr, CIL_FALSE); } static void cil_reset_selinuxuser(struct cil_selinuxuser *selinuxuser) { if (selinuxuser->range_str == NULL) { cil_reset_levelrange(selinuxuser->range); } } static void cil_reset_role(struct cil_role *role) { /* reset the bounds to NULL during a re-resolve */ role->bounds = NULL; } static void cil_reset_roleattr(struct cil_roleattribute *attr) { /* during a re-resolve, we need to reset the lists of expression stacks associated with this attribute from a attributeroles statement */ if (attr->expr_list != NULL) { /* we don't want to destroy the expression stacks (cil_list) inside * this list cil_list_destroy destroys sublists, so we need to do it * manually */ struct cil_list_item *expr = attr->expr_list->head; while (expr != NULL) { struct cil_list_item *next = expr->next; cil_list_item_destroy(&expr, CIL_FALSE); expr = next; } free(attr->expr_list); attr->expr_list = NULL; } } static void cil_reset_roleattributeset(struct cil_roleattributeset *ras) { cil_list_destroy(&ras->datum_expr, CIL_FALSE); } static void cil_reset_type(struct cil_type *type) { /* reset the bounds to NULL during a re-resolve */ type->bounds = NULL; } static void cil_reset_typeattr(struct cil_typeattribute *attr) { /* during a re-resolve, we need to reset the lists of expression stacks associated with this attribute from a attributetypes statement */ if (attr->expr_list != NULL) { /* we don't want to destroy the expression stacks (cil_list) inside * this list cil_list_destroy destroys sublists, so we need to do it * manually */ struct cil_list_item *expr = attr->expr_list->head; while (expr != NULL) { struct cil_list_item *next = expr->next; cil_list_item_destroy(&expr, CIL_FALSE); expr = next; } free(attr->expr_list); attr->expr_list = NULL; } attr->used = CIL_FALSE; attr->keep = CIL_FALSE; } static void cil_reset_typeattributeset(struct cil_typeattributeset *tas) { cil_list_destroy(&tas->datum_expr, CIL_FALSE); } static void cil_reset_avrule(struct cil_avrule *rule) { cil_reset_classperms_list(rule->perms.classperms); } static void cil_reset_rangetransition(struct cil_rangetransition *rangetrans) { if (rangetrans->range_str == NULL) { cil_reset_levelrange(rangetrans->range); } } static void cil_reset_sens(struct cil_sens *sens) { /* during a re-resolve, we need to reset the categories associated with * this sensitivity from a (sensitivitycategory) statement */ cil_list_destroy(&sens->cats_list, CIL_FALSE); sens->ordered = CIL_FALSE; } static void cil_reset_cat(struct cil_cat *cat) { cat->ordered = CIL_FALSE; } static inline void cil_reset_cats(struct cil_cats *cats) { if (cats != NULL) { cats->evaluated = CIL_FALSE; cil_list_destroy(&cats->datum_expr, CIL_FALSE); } } static void cil_reset_senscat(struct cil_senscat *senscat) { cil_reset_cats(senscat->cats); } static void cil_reset_catset(struct cil_catset *catset) { cil_reset_cats(catset->cats); } static inline void cil_reset_level(struct cil_level *level) { cil_reset_cats(level->cats); } static inline void cil_reset_levelrange(struct cil_levelrange *levelrange) { if (levelrange->low_str == NULL) { cil_reset_level(levelrange->low); } if (levelrange->high_str == NULL) { cil_reset_level(levelrange->high); } } static inline void cil_reset_userlevel(struct cil_userlevel *userlevel) { if (userlevel->level_str == NULL) { cil_reset_level(userlevel->level); } } static inline void cil_reset_userrange(struct cil_userrange *userrange) { if (userrange->range_str == NULL) { cil_reset_levelrange(userrange->range); } } static inline void cil_reset_context(struct cil_context *context) { if (context->range_str == NULL) { cil_reset_levelrange(context->range); } } static void cil_reset_sidcontext(struct cil_sidcontext *sidcontext) { if (sidcontext->context_str == NULL) { cil_reset_context(sidcontext->context); } } static void cil_reset_filecon(struct cil_filecon *filecon) { if (filecon->context_str == NULL && filecon->context != NULL) { cil_reset_context(filecon->context); } } static void cil_reset_ibpkeycon(struct cil_ibpkeycon *ibpkeycon) { if (!ibpkeycon->context_str) cil_reset_context(ibpkeycon->context); } static void cil_reset_portcon(struct cil_portcon *portcon) { if (portcon->context_str == NULL) { cil_reset_context(portcon->context); } } static void cil_reset_nodecon(struct cil_nodecon *nodecon) { if (nodecon->context_str == NULL) { cil_reset_context(nodecon->context); } } static void cil_reset_genfscon(struct cil_genfscon *genfscon) { if (genfscon->context_str == NULL) { cil_reset_context(genfscon->context); } } static void cil_reset_netifcon(struct cil_netifcon *netifcon) { if (netifcon->if_context_str == NULL) { cil_reset_context(netifcon->if_context); } if (netifcon->packet_context_str == NULL) { cil_reset_context(netifcon->packet_context); } } static void cil_reset_ibendportcon(struct cil_ibendportcon *ibendportcon) { if (!ibendportcon->context_str) { cil_reset_context(ibendportcon->context); } } static void cil_reset_pirqcon(struct cil_pirqcon *pirqcon) { if (pirqcon->context_str == NULL) { cil_reset_context(pirqcon->context); } } static void cil_reset_iomemcon(struct cil_iomemcon *iomemcon) { if (iomemcon->context_str == NULL) { cil_reset_context(iomemcon->context); } } static void cil_reset_ioportcon(struct cil_ioportcon *ioportcon) { if (ioportcon->context_str == NULL) { cil_reset_context(ioportcon->context); } } static void cil_reset_pcidevicecon(struct cil_pcidevicecon *pcidevicecon) { if (pcidevicecon->context_str == NULL) { cil_reset_context(pcidevicecon->context); } } static void cil_reset_devicetreecon(struct cil_devicetreecon *devicetreecon) { if (devicetreecon->context_str == NULL) { cil_reset_context(devicetreecon->context); } } static void cil_reset_fsuse(struct cil_fsuse *fsuse) { if (fsuse->context_str == NULL) { cil_reset_context(fsuse->context); } } static void cil_reset_sid(struct cil_sid *sid) { /* reset the context to NULL during a re-resolve */ sid->context = NULL; sid->ordered = CIL_FALSE; } static void cil_reset_constrain(struct cil_constrain *con) { cil_reset_classperms_list(con->classperms); cil_list_destroy(&con->datum_expr, CIL_FALSE); } static void cil_reset_validatetrans(struct cil_validatetrans *vt) { cil_list_destroy(&vt->datum_expr, CIL_FALSE); } static void cil_reset_default(struct cil_default *def) { cil_list_destroy(&def->class_datums, CIL_FALSE); } static void cil_reset_defaultrange(struct cil_defaultrange *def) { cil_list_destroy(&def->class_datums, CIL_FALSE); } static void cil_reset_booleanif(struct cil_booleanif *bif) { cil_list_destroy(&bif->datum_expr, CIL_FALSE); } int __cil_reset_node(struct cil_tree_node *node, __attribute__((unused)) uint32_t *finished, __attribute__((unused)) void *extra_args) { switch (node->flavor) { case CIL_CLASS: cil_reset_class(node->data); break; case CIL_PERM: case CIL_MAP_PERM: cil_reset_perm(node->data); break; case CIL_CLASSPERMISSION: cil_reset_classpermission(node->data); break; case CIL_CLASSPERMISSIONSET: cil_reset_classpermissionset(node->data); break; case CIL_CLASSMAPPING: cil_reset_classmapping(node->data); break; case CIL_TYPEALIAS: case CIL_SENSALIAS: case CIL_CATALIAS: cil_reset_alias(node->data); break; case CIL_USERRANGE: cil_reset_userrange(node->data); break; case CIL_USERLEVEL: cil_reset_userlevel(node->data); break; case CIL_USER: cil_reset_user(node->data); break; case CIL_USERATTRIBUTE: cil_reset_userattr(node->data); break; case CIL_USERATTRIBUTESET: cil_reset_userattributeset(node->data); break; case CIL_SELINUXUSERDEFAULT: case CIL_SELINUXUSER: cil_reset_selinuxuser(node->data); break; case CIL_ROLE: cil_reset_role(node->data); break; case CIL_ROLEATTRIBUTE: cil_reset_roleattr(node->data); break; case CIL_ROLEATTRIBUTESET: cil_reset_roleattributeset(node->data); break; case CIL_TYPE: cil_reset_type(node->data); break; case CIL_TYPEATTRIBUTE: cil_reset_typeattr(node->data); break; case CIL_TYPEATTRIBUTESET: cil_reset_typeattributeset(node->data); break; case CIL_RANGETRANSITION: cil_reset_rangetransition(node->data); break; case CIL_AVRULE: cil_reset_avrule(node->data); break; case CIL_SENS: cil_reset_sens(node->data); break; case CIL_CAT: cil_reset_cat(node->data); break; case CIL_SENSCAT: cil_reset_senscat(node->data); break; case CIL_CATSET: cil_reset_catset(node->data); break; case CIL_LEVEL: cil_reset_level(node->data); break; case CIL_LEVELRANGE: cil_reset_levelrange(node->data); break; case CIL_CONTEXT: cil_reset_context(node->data); break; case CIL_SIDCONTEXT: cil_reset_sidcontext(node->data); break; case CIL_FILECON: cil_reset_filecon(node->data); break; case CIL_IBPKEYCON: cil_reset_ibpkeycon(node->data); break; case CIL_IBENDPORTCON: cil_reset_ibendportcon(node->data); break; case CIL_PORTCON: cil_reset_portcon(node->data); break; case CIL_NODECON: cil_reset_nodecon(node->data); break; case CIL_GENFSCON: cil_reset_genfscon(node->data); break; case CIL_NETIFCON: cil_reset_netifcon(node->data); break; case CIL_PIRQCON: cil_reset_pirqcon(node->data); break; case CIL_IOMEMCON: cil_reset_iomemcon(node->data); break; case CIL_IOPORTCON: cil_reset_ioportcon(node->data); break; case CIL_PCIDEVICECON: cil_reset_pcidevicecon(node->data); break; case CIL_DEVICETREECON: cil_reset_devicetreecon(node->data); break; case CIL_FSUSE: cil_reset_fsuse(node->data); break; case CIL_SID: cil_reset_sid(node->data); break; case CIL_CONSTRAIN: case CIL_MLSCONSTRAIN: cil_reset_constrain(node->data); break; case CIL_VALIDATETRANS: case CIL_MLSVALIDATETRANS: cil_reset_validatetrans(node->data); break; case CIL_DEFAULTUSER: case CIL_DEFAULTROLE: case CIL_DEFAULTTYPE: cil_reset_default(node->data); break; case CIL_DEFAULTRANGE: cil_reset_defaultrange(node->data); break; case CIL_BOOLEANIF: cil_reset_booleanif(node->data); break; case CIL_TUNABLEIF: case CIL_CALL: break; /* Not effected by optional block disabling */ case CIL_MACRO: case CIL_SIDORDER: case CIL_CLASSORDER: case CIL_CATORDER: case CIL_SENSITIVITYORDER: case CIL_EXPANDTYPEATTRIBUTE: break; /* Nothing to reset */ default: break; } return SEPOL_OK; } int cil_reset_ast(struct cil_tree_node *current) { int rc = SEPOL_ERR; rc = cil_tree_walk(current, __cil_reset_node, NULL, NULL, NULL); if (rc != SEPOL_OK) { cil_log(CIL_ERR, "Failed to reset AST\n"); return SEPOL_ERR; } return SEPOL_OK; }
#include "cil_internal.h" #include "cil_log.h" #include "cil_list.h" #include "cil_reset_ast.h" #include "cil_symtab.h" static inline void cil_reset_classperms_list(struct cil_list *cp_list); static inline void cil_reset_level(struct cil_level *level); static inline void cil_reset_levelrange(struct cil_levelrange *levelrange); static inline void cil_reset_context(struct cil_context *context); static int __class_reset_perm_values(__attribute__((unused)) hashtab_key_t k, hashtab_datum_t d, void *args) { struct cil_perm *perm = (struct cil_perm *)d; perm->value -= *((int *)args); return SEPOL_OK; } static void cil_reset_class(struct cil_class *class) { if (class->common != NULL) { /* Must assume that the common has been destroyed */ int num_common_perms = class->num_perms - class->perms.nprim; cil_symtab_map(&class->perms, __class_reset_perm_values, &num_common_perms); /* during a re-resolve, we need to reset the common, so a classcommon * statement isn't seen as a duplicate */ class->num_perms = class->perms.nprim; class->common = NULL; /* Must make this NULL or there will be an error when re-resolving */ } class->ordered = CIL_FALSE; } static void cil_reset_perm(struct cil_perm *perm) { cil_list_destroy(&perm->classperms, CIL_FALSE); } static inline void cil_reset_classperms(struct cil_classperms *cp) { if (cp == NULL) { return; } cil_list_destroy(&cp->perms, CIL_FALSE); } static void cil_reset_classpermission(struct cil_classpermission *cp) { if (cp == NULL) { return; } cil_list_destroy(&cp->classperms, CIL_FALSE); } static void cil_reset_classperms_set(struct cil_classperms_set *cp_set) { if (cp_set == NULL) { return; } cp_set->set = NULL; } static inline void cil_reset_classperms_list(struct cil_list *cp_list) { struct cil_list_item *curr; if (cp_list == NULL) { return; } cil_list_for_each(curr, cp_list) { if (curr->flavor == CIL_CLASSPERMS) { /* KERNEL or MAP */ cil_reset_classperms(curr->data); } else if (curr->flavor == CIL_CLASSPERMS_SET) { /* SET */ cil_reset_classperms_set(curr->data); } } } static void cil_reset_classpermissionset(struct cil_classpermissionset *cps) { cil_reset_classperms_list(cps->classperms); } static void cil_reset_classmapping(struct cil_classmapping *cm) { cil_reset_classperms_list(cm->classperms); } static void cil_reset_alias(struct cil_alias *alias) { /* reset actual to NULL during a re-resolve */ alias->actual = NULL; } static void cil_reset_user(struct cil_user *user) { /* reset the bounds to NULL during a re-resolve */ user->bounds = NULL; user->dftlevel = NULL; user->range = NULL; } static void cil_reset_userattr(struct cil_userattribute *attr) { struct cil_list_item *expr = NULL; struct cil_list_item *next = NULL; /* during a re-resolve, we need to reset the lists of expression stacks associated with this attribute from a userattribute statement */ if (attr->expr_list != NULL) { /* we don't want to destroy the expression stacks (cil_list) inside * this list cil_list_destroy destroys sublists, so we need to do it * manually */ expr = attr->expr_list->head; while (expr != NULL) { next = expr->next; cil_list_item_destroy(&expr, CIL_FALSE); expr = next; } free(attr->expr_list); attr->expr_list = NULL; } } static void cil_reset_userattributeset(struct cil_userattributeset *uas) { cil_list_destroy(&uas->datum_expr, CIL_FALSE); } static void cil_reset_selinuxuser(struct cil_selinuxuser *selinuxuser) { if (selinuxuser->range_str == NULL) { cil_reset_levelrange(selinuxuser->range); } } static void cil_reset_role(struct cil_role *role) { /* reset the bounds to NULL during a re-resolve */ role->bounds = NULL; } static void cil_reset_roleattr(struct cil_roleattribute *attr) { /* during a re-resolve, we need to reset the lists of expression stacks associated with this attribute from a attributeroles statement */ if (attr->expr_list != NULL) { /* we don't want to destroy the expression stacks (cil_list) inside * this list cil_list_destroy destroys sublists, so we need to do it * manually */ struct cil_list_item *expr = attr->expr_list->head; while (expr != NULL) { struct cil_list_item *next = expr->next; cil_list_item_destroy(&expr, CIL_FALSE); expr = next; } free(attr->expr_list); attr->expr_list = NULL; } } static void cil_reset_roleattributeset(struct cil_roleattributeset *ras) { cil_list_destroy(&ras->datum_expr, CIL_FALSE); } static void cil_reset_type(struct cil_type *type) { /* reset the bounds to NULL during a re-resolve */ type->bounds = NULL; } static void cil_reset_typeattr(struct cil_typeattribute *attr) { /* during a re-resolve, we need to reset the lists of expression stacks associated with this attribute from a attributetypes statement */ if (attr->expr_list != NULL) { /* we don't want to destroy the expression stacks (cil_list) inside * this list cil_list_destroy destroys sublists, so we need to do it * manually */ struct cil_list_item *expr = attr->expr_list->head; while (expr != NULL) { struct cil_list_item *next = expr->next; cil_list_item_destroy(&expr, CIL_FALSE); expr = next; } free(attr->expr_list); attr->expr_list = NULL; } attr->used = CIL_FALSE; attr->keep = CIL_FALSE; } static void cil_reset_typeattributeset(struct cil_typeattributeset *tas) { cil_list_destroy(&tas->datum_expr, CIL_FALSE); } static void cil_reset_avrule(struct cil_avrule *rule) { cil_reset_classperms_list(rule->perms.classperms); } static void cil_reset_rangetransition(struct cil_rangetransition *rangetrans) { if (rangetrans->range_str == NULL) { cil_reset_levelrange(rangetrans->range); } } static void cil_reset_sens(struct cil_sens *sens) { /* during a re-resolve, we need to reset the categories associated with * this sensitivity from a (sensitivitycategory) statement */ cil_list_destroy(&sens->cats_list, CIL_FALSE); sens->ordered = CIL_FALSE; } static void cil_reset_cat(struct cil_cat *cat) { cat->ordered = CIL_FALSE; } static inline void cil_reset_cats(struct cil_cats *cats) { if (cats != NULL) { cats->evaluated = CIL_FALSE; cil_list_destroy(&cats->datum_expr, CIL_FALSE); } } static void cil_reset_senscat(struct cil_senscat *senscat) { cil_reset_cats(senscat->cats); } static void cil_reset_catset(struct cil_catset *catset) { cil_reset_cats(catset->cats); } static inline void cil_reset_level(struct cil_level *level) { cil_reset_cats(level->cats); } static inline void cil_reset_levelrange(struct cil_levelrange *levelrange) { if (levelrange->low_str == NULL) { cil_reset_level(levelrange->low); } if (levelrange->high_str == NULL) { cil_reset_level(levelrange->high); } } static inline void cil_reset_userlevel(struct cil_userlevel *userlevel) { if (userlevel->level_str == NULL) { cil_reset_level(userlevel->level); } } static inline void cil_reset_userrange(struct cil_userrange *userrange) { if (userrange->range_str == NULL) { cil_reset_levelrange(userrange->range); } } static inline void cil_reset_context(struct cil_context *context) { if (context->range_str == NULL) { cil_reset_levelrange(context->range); } } static void cil_reset_sidcontext(struct cil_sidcontext *sidcontext) { if (sidcontext->context_str == NULL) { cil_reset_context(sidcontext->context); } } static void cil_reset_filecon(struct cil_filecon *filecon) { if (filecon->context_str == NULL && filecon->context != NULL) { cil_reset_context(filecon->context); } } static void cil_reset_ibpkeycon(struct cil_ibpkeycon *ibpkeycon) { if (!ibpkeycon->context_str) cil_reset_context(ibpkeycon->context); } static void cil_reset_portcon(struct cil_portcon *portcon) { if (portcon->context_str == NULL) { cil_reset_context(portcon->context); } } static void cil_reset_nodecon(struct cil_nodecon *nodecon) { if (nodecon->context_str == NULL) { cil_reset_context(nodecon->context); } } static void cil_reset_genfscon(struct cil_genfscon *genfscon) { if (genfscon->context_str == NULL) { cil_reset_context(genfscon->context); } } static void cil_reset_netifcon(struct cil_netifcon *netifcon) { if (netifcon->if_context_str == NULL) { cil_reset_context(netifcon->if_context); } if (netifcon->packet_context_str == NULL) { cil_reset_context(netifcon->packet_context); } } static void cil_reset_ibendportcon(struct cil_ibendportcon *ibendportcon) { if (!ibendportcon->context_str) { cil_reset_context(ibendportcon->context); } } static void cil_reset_pirqcon(struct cil_pirqcon *pirqcon) { if (pirqcon->context_str == NULL) { cil_reset_context(pirqcon->context); } } static void cil_reset_iomemcon(struct cil_iomemcon *iomemcon) { if (iomemcon->context_str == NULL) { cil_reset_context(iomemcon->context); } } static void cil_reset_ioportcon(struct cil_ioportcon *ioportcon) { if (ioportcon->context_str == NULL) { cil_reset_context(ioportcon->context); } } static void cil_reset_pcidevicecon(struct cil_pcidevicecon *pcidevicecon) { if (pcidevicecon->context_str == NULL) { cil_reset_context(pcidevicecon->context); } } static void cil_reset_devicetreecon(struct cil_devicetreecon *devicetreecon) { if (devicetreecon->context_str == NULL) { cil_reset_context(devicetreecon->context); } } static void cil_reset_fsuse(struct cil_fsuse *fsuse) { if (fsuse->context_str == NULL) { cil_reset_context(fsuse->context); } } static void cil_reset_sid(struct cil_sid *sid) { /* reset the context to NULL during a re-resolve */ sid->context = NULL; sid->ordered = CIL_FALSE; } static void cil_reset_constrain(struct cil_constrain *con) { cil_reset_classperms_list(con->classperms); cil_list_destroy(&con->datum_expr, CIL_FALSE); } static void cil_reset_validatetrans(struct cil_validatetrans *vt) { cil_list_destroy(&vt->datum_expr, CIL_FALSE); } static void cil_reset_default(struct cil_default *def) { cil_list_destroy(&def->class_datums, CIL_FALSE); } static void cil_reset_defaultrange(struct cil_defaultrange *def) { cil_list_destroy(&def->class_datums, CIL_FALSE); } static void cil_reset_booleanif(struct cil_booleanif *bif) { cil_list_destroy(&bif->datum_expr, CIL_FALSE); } int __cil_reset_node(struct cil_tree_node *node, __attribute__((unused)) uint32_t *finished, __attribute__((unused)) void *extra_args) { switch (node->flavor) { case CIL_CLASS: cil_reset_class(node->data); break; case CIL_PERM: case CIL_MAP_PERM: cil_reset_perm(node->data); break; case CIL_CLASSPERMISSION: cil_reset_classpermission(node->data); break; case CIL_CLASSPERMISSIONSET: cil_reset_classpermissionset(node->data); break; case CIL_CLASSMAPPING: cil_reset_classmapping(node->data); break; case CIL_TYPEALIAS: case CIL_SENSALIAS: case CIL_CATALIAS: cil_reset_alias(node->data); break; case CIL_USERRANGE: cil_reset_userrange(node->data); break; case CIL_USERLEVEL: cil_reset_userlevel(node->data); break; case CIL_USER: cil_reset_user(node->data); break; case CIL_USERATTRIBUTE: cil_reset_userattr(node->data); break; case CIL_USERATTRIBUTESET: cil_reset_userattributeset(node->data); break; case CIL_SELINUXUSERDEFAULT: case CIL_SELINUXUSER: cil_reset_selinuxuser(node->data); break; case CIL_ROLE: cil_reset_role(node->data); break; case CIL_ROLEATTRIBUTE: cil_reset_roleattr(node->data); break; case CIL_ROLEATTRIBUTESET: cil_reset_roleattributeset(node->data); break; case CIL_TYPE: cil_reset_type(node->data); break; case CIL_TYPEATTRIBUTE: cil_reset_typeattr(node->data); break; case CIL_TYPEATTRIBUTESET: cil_reset_typeattributeset(node->data); break; case CIL_RANGETRANSITION: cil_reset_rangetransition(node->data); break; case CIL_AVRULE: cil_reset_avrule(node->data); break; case CIL_SENS: cil_reset_sens(node->data); break; case CIL_CAT: cil_reset_cat(node->data); break; case CIL_SENSCAT: cil_reset_senscat(node->data); break; case CIL_CATSET: cil_reset_catset(node->data); break; case CIL_LEVEL: cil_reset_level(node->data); break; case CIL_LEVELRANGE: cil_reset_levelrange(node->data); break; case CIL_CONTEXT: cil_reset_context(node->data); break; case CIL_SIDCONTEXT: cil_reset_sidcontext(node->data); break; case CIL_FILECON: cil_reset_filecon(node->data); break; case CIL_IBPKEYCON: cil_reset_ibpkeycon(node->data); break; case CIL_IBENDPORTCON: cil_reset_ibendportcon(node->data); break; case CIL_PORTCON: cil_reset_portcon(node->data); break; case CIL_NODECON: cil_reset_nodecon(node->data); break; case CIL_GENFSCON: cil_reset_genfscon(node->data); break; case CIL_NETIFCON: cil_reset_netifcon(node->data); break; case CIL_PIRQCON: cil_reset_pirqcon(node->data); break; case CIL_IOMEMCON: cil_reset_iomemcon(node->data); break; case CIL_IOPORTCON: cil_reset_ioportcon(node->data); break; case CIL_PCIDEVICECON: cil_reset_pcidevicecon(node->data); break; case CIL_DEVICETREECON: cil_reset_devicetreecon(node->data); break; case CIL_FSUSE: cil_reset_fsuse(node->data); break; case CIL_SID: cil_reset_sid(node->data); break; case CIL_CONSTRAIN: case CIL_MLSCONSTRAIN: cil_reset_constrain(node->data); break; case CIL_VALIDATETRANS: case CIL_MLSVALIDATETRANS: cil_reset_validatetrans(node->data); break; case CIL_DEFAULTUSER: case CIL_DEFAULTROLE: case CIL_DEFAULTTYPE: cil_reset_default(node->data); break; case CIL_DEFAULTRANGE: cil_reset_defaultrange(node->data); break; case CIL_BOOLEANIF: cil_reset_booleanif(node->data); break; case CIL_TUNABLEIF: case CIL_CALL: break; /* Not effected by optional block disabling */ case CIL_MACRO: case CIL_SIDORDER: case CIL_CLASSORDER: case CIL_CATORDER: case CIL_SENSITIVITYORDER: case CIL_EXPANDTYPEATTRIBUTE: break; /* Nothing to reset */ default: break; } return SEPOL_OK; } int cil_reset_ast(struct cil_tree_node *current) { int rc = SEPOL_ERR; rc = cil_tree_walk(current, __cil_reset_node, NULL, NULL, NULL); if (rc != SEPOL_OK) { cil_log(CIL_ERR, "Failed to reset AST\n"); return SEPOL_ERR; } return SEPOL_OK; }
static void cil_reset_classperms_set(struct cil_classperms_set *cp_set) { cil_reset_classpermission(cp_set->set); }
static void cil_reset_classperms_set(struct cil_classperms_set *cp_set) { if (cp_set == NULL) { return; } cp_set->set = NULL; }
{'added': [(62, '\tif (cp_set == NULL) {'), (63, '\t\treturn;'), (64, '\t}'), (65, ''), (66, '\tcp_set->set = NULL;')], 'deleted': [(62, '\tcil_reset_classpermission(cp_set->set);')]}
5
1
505
2,377
https://github.com/SELinuxProject/selinux
CVE-2021-36086
['CWE-416']
rose_loopback.c
rose_loopback_timer
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) */ #include <linux/types.h> #include <linux/slab.h> #include <linux/socket.h> #include <linux/timer.h> #include <net/ax25.h> #include <linux/skbuff.h> #include <net/rose.h> #include <linux/init.h> static struct sk_buff_head loopback_queue; static struct timer_list loopback_timer; static void rose_set_loopback_timer(void); void rose_loopback_init(void) { skb_queue_head_init(&loopback_queue); init_timer(&loopback_timer); } static int rose_loopback_running(void) { return timer_pending(&loopback_timer); } int rose_loopback_queue(struct sk_buff *skb, struct rose_neigh *neigh) { struct sk_buff *skbn; skbn = skb_clone(skb, GFP_ATOMIC); kfree_skb(skb); if (skbn != NULL) { skb_queue_tail(&loopback_queue, skbn); if (!rose_loopback_running()) rose_set_loopback_timer(); } return 1; } static void rose_loopback_timer(unsigned long); static void rose_set_loopback_timer(void) { del_timer(&loopback_timer); loopback_timer.data = 0; loopback_timer.function = &rose_loopback_timer; loopback_timer.expires = jiffies + 10; add_timer(&loopback_timer); } static void rose_loopback_timer(unsigned long param) { struct sk_buff *skb; struct net_device *dev; rose_address *dest; struct sock *sk; unsigned short frametype; unsigned int lci_i, lci_o; while ((skb = skb_dequeue(&loopback_queue)) != NULL) { lci_i = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); frametype = skb->data[2]; dest = (rose_address *)(skb->data + 4); lci_o = ROSE_DEFAULT_MAXVC + 1 - lci_i; skb_reset_transport_header(skb); sk = rose_find_socket(lci_o, rose_loopback_neigh); if (sk) { if (rose_process_rx_frame(sk, skb) == 0) kfree_skb(skb); continue; } if (frametype == ROSE_CALL_REQUEST) { if ((dev = rose_dev_get(dest)) != NULL) { if (rose_rx_call_request(skb, dev, rose_loopback_neigh, lci_o) == 0) kfree_skb(skb); } else { kfree_skb(skb); } } else { kfree_skb(skb); } } } void __exit rose_loopback_clear(void) { struct sk_buff *skb; del_timer(&loopback_timer); while ((skb = skb_dequeue(&loopback_queue)) != NULL) { skb->sk = NULL; kfree_skb(skb); } }
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) */ #include <linux/types.h> #include <linux/slab.h> #include <linux/socket.h> #include <linux/timer.h> #include <net/ax25.h> #include <linux/skbuff.h> #include <net/rose.h> #include <linux/init.h> static struct sk_buff_head loopback_queue; static struct timer_list loopback_timer; static void rose_set_loopback_timer(void); void rose_loopback_init(void) { skb_queue_head_init(&loopback_queue); init_timer(&loopback_timer); } static int rose_loopback_running(void) { return timer_pending(&loopback_timer); } int rose_loopback_queue(struct sk_buff *skb, struct rose_neigh *neigh) { struct sk_buff *skbn; skbn = skb_clone(skb, GFP_ATOMIC); kfree_skb(skb); if (skbn != NULL) { skb_queue_tail(&loopback_queue, skbn); if (!rose_loopback_running()) rose_set_loopback_timer(); } return 1; } static void rose_loopback_timer(unsigned long); static void rose_set_loopback_timer(void) { del_timer(&loopback_timer); loopback_timer.data = 0; loopback_timer.function = &rose_loopback_timer; loopback_timer.expires = jiffies + 10; add_timer(&loopback_timer); } static void rose_loopback_timer(unsigned long param) { struct sk_buff *skb; struct net_device *dev; rose_address *dest; struct sock *sk; unsigned short frametype; unsigned int lci_i, lci_o; while ((skb = skb_dequeue(&loopback_queue)) != NULL) { if (skb->len < ROSE_MIN_LEN) { kfree_skb(skb); continue; } lci_i = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); frametype = skb->data[2]; if (frametype == ROSE_CALL_REQUEST && (skb->len <= ROSE_CALL_REQ_FACILITIES_OFF || skb->data[ROSE_CALL_REQ_ADDR_LEN_OFF] != ROSE_CALL_REQ_ADDR_LEN_VAL)) { kfree_skb(skb); continue; } dest = (rose_address *)(skb->data + ROSE_CALL_REQ_DEST_ADDR_OFF); lci_o = ROSE_DEFAULT_MAXVC + 1 - lci_i; skb_reset_transport_header(skb); sk = rose_find_socket(lci_o, rose_loopback_neigh); if (sk) { if (rose_process_rx_frame(sk, skb) == 0) kfree_skb(skb); continue; } if (frametype == ROSE_CALL_REQUEST) { if ((dev = rose_dev_get(dest)) != NULL) { if (rose_rx_call_request(skb, dev, rose_loopback_neigh, lci_o) == 0) kfree_skb(skb); } else { kfree_skb(skb); } } else { kfree_skb(skb); } } } void __exit rose_loopback_clear(void) { struct sk_buff *skb; del_timer(&loopback_timer); while ((skb = skb_dequeue(&loopback_queue)) != NULL) { skb->sk = NULL; kfree_skb(skb); } }
static void rose_loopback_timer(unsigned long param) { struct sk_buff *skb; struct net_device *dev; rose_address *dest; struct sock *sk; unsigned short frametype; unsigned int lci_i, lci_o; while ((skb = skb_dequeue(&loopback_queue)) != NULL) { lci_i = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); frametype = skb->data[2]; dest = (rose_address *)(skb->data + 4); lci_o = ROSE_DEFAULT_MAXVC + 1 - lci_i; skb_reset_transport_header(skb); sk = rose_find_socket(lci_o, rose_loopback_neigh); if (sk) { if (rose_process_rx_frame(sk, skb) == 0) kfree_skb(skb); continue; } if (frametype == ROSE_CALL_REQUEST) { if ((dev = rose_dev_get(dest)) != NULL) { if (rose_rx_call_request(skb, dev, rose_loopback_neigh, lci_o) == 0) kfree_skb(skb); } else { kfree_skb(skb); } } else { kfree_skb(skb); } } }
static void rose_loopback_timer(unsigned long param) { struct sk_buff *skb; struct net_device *dev; rose_address *dest; struct sock *sk; unsigned short frametype; unsigned int lci_i, lci_o; while ((skb = skb_dequeue(&loopback_queue)) != NULL) { if (skb->len < ROSE_MIN_LEN) { kfree_skb(skb); continue; } lci_i = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); frametype = skb->data[2]; if (frametype == ROSE_CALL_REQUEST && (skb->len <= ROSE_CALL_REQ_FACILITIES_OFF || skb->data[ROSE_CALL_REQ_ADDR_LEN_OFF] != ROSE_CALL_REQ_ADDR_LEN_VAL)) { kfree_skb(skb); continue; } dest = (rose_address *)(skb->data + ROSE_CALL_REQ_DEST_ADDR_OFF); lci_o = ROSE_DEFAULT_MAXVC + 1 - lci_i; skb_reset_transport_header(skb); sk = rose_find_socket(lci_o, rose_loopback_neigh); if (sk) { if (rose_process_rx_frame(sk, skb) == 0) kfree_skb(skb); continue; } if (frametype == ROSE_CALL_REQUEST) { if ((dev = rose_dev_get(dest)) != NULL) { if (rose_rx_call_request(skb, dev, rose_loopback_neigh, lci_o) == 0) kfree_skb(skb); } else { kfree_skb(skb); } } else { kfree_skb(skb); } } }
{'added': [(76, '\t\tif (skb->len < ROSE_MIN_LEN) {'), (77, '\t\t\tkfree_skb(skb);'), (78, '\t\t\tcontinue;'), (79, '\t\t}'), (82, '\t\tif (frametype == ROSE_CALL_REQUEST &&'), (83, '\t\t (skb->len <= ROSE_CALL_REQ_FACILITIES_OFF ||'), (84, '\t\t skb->data[ROSE_CALL_REQ_ADDR_LEN_OFF] !='), (85, '\t\t ROSE_CALL_REQ_ADDR_LEN_VAL)) {'), (86, '\t\t\tkfree_skb(skb);'), (87, '\t\t\tcontinue;'), (88, '\t\t}'), (89, '\t\tdest = (rose_address *)(skb->data + ROSE_CALL_REQ_DEST_ADDR_OFF);')], 'deleted': [(78, '\t\tdest = (rose_address *)(skb->data + 4);')]}
12
1
93
492
https://github.com/torvalds/linux
CVE-2011-4914
['CWE-20']
gd_tiff.c
createFromTiffRgba
/* TIFF - Tagged Image File Format Encapsulation for GD Library gd_tiff.c Copyright (C) Pierre-A. Joye, M. Retallack --------------------------------------------------------------------------- ** ** Permission to use, copy, modify, and distribute this software and its ** documentation for any purpose and without fee is hereby granted, provided ** that the above copyright notice appear in all copies and that both that ** copyright notice and this permission notice appear in supporting ** documentation. This software is provided "as is" without express or ** implied warranty. ** --------------------------------------------------------------------------- Ctx code written by M. Retallack Todo: If we fail - cleanup Writer: Use gd error function, overflow check may not be necessary as we write our own data (check already done) Implement 2 color black/white saving using group4 fax compression Implement function to specify encoding to use when writing tiff data ---------------------------------------------------------------------------- */ /* $Id$ */ /** * File: TIFF IO * * Read and write TIFF images. * * There is only most basic support for the TIFF format available for now; * for instance, multiple pages are not yet supported. */ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include "gd.h" #include "gd_errors.h" #include "gdfonts.h" #include <stdio.h> #include <stdlib.h> #include <limits.h> #include "gdhelpers.h" #ifdef HAVE_LIBTIFF #include "tiff.h" #include "tiffio.h" #define GD_SUCCESS 1 #define GD_FAILURE 0 #define TRUE 1 #define FALSE 0 /* I define those here until the new formats * are commited. We can then rely on the global * def */ #define GD_PALETTE 1 #define GD_TRUECOLOR 2 #define GD_GRAY 3 #define GD_INDEXED 4 #define GD_RGB 5 #define MIN(a,b) (a < b) ? a : b; #define MAX(a,b) (a > b) ? a : b; typedef struct tiff_handle { int size; int pos; gdIOCtx *ctx; int written; } tiff_handle; /* Functions for reading, writing and seeking in gdIOCtx This allows for non-file i/o operations with no explicit use of libtiff fileio wrapper functions Note: because libtiff requires random access, but gdIOCtx only supports streams, all writes are buffered into memory and written out on close, also all reads are done from a memory mapped version of the tiff (assuming one already exists) */ tiff_handle * new_tiff_handle(gdIOCtx *g) { tiff_handle * t; if (!g) { gd_error("Cannot create a new tiff handle, missing Ctx argument"); return NULL; } t = (tiff_handle *) gdMalloc(sizeof(tiff_handle)); if (!t) { gd_error("Failed to allocate a new tiff handle"); return NULL; } t->size = 0; t->pos = 0; t->ctx = g; t->written = 0; return t; } /* TIFFReadWriteProc tiff_readproc - Will use gdIOCtx procs to read required (previously written) TIFF file content */ static tsize_t tiff_readproc(thandle_t clientdata, tdata_t data, tsize_t size) { tiff_handle *th = (tiff_handle *)clientdata; gdIOCtx *ctx = th->ctx; size = (ctx->getBuf)(ctx, data, size); return size; } /* TIFFReadWriteProc tiff_writeproc - Will use gdIOCtx procs to write out TIFF data */ static tsize_t tiff_writeproc(thandle_t clientdata, tdata_t data, tsize_t size) { tiff_handle *th = (tiff_handle *)clientdata; gdIOCtx *ctx = th->ctx; size = (ctx->putBuf)(ctx, data, size); if(size + th->pos>th->size) { th->size = size + th->pos; th->pos += size; } return size; } /* TIFFSeekProc tiff_seekproc * used to move around the partially written TIFF */ static toff_t tiff_seekproc(thandle_t clientdata, toff_t offset, int from) { tiff_handle *th = (tiff_handle *)clientdata; gdIOCtx *ctx = th->ctx; int result; switch(from) { default: case SEEK_SET: /* just use offset */ break; case SEEK_END: /* invert offset, so that it is from start, not end as supplied */ offset = th->size + offset; break; case SEEK_CUR: /* add current position to translate it to 'from start', * not from durrent as supplied */ offset += th->pos; break; } /* now, move pos in both io context and buf */ if((result = (ctx->seek)(ctx, offset))) { th->pos = offset; } return result ? offset : (toff_t)-1; } /* TIFFCloseProc tiff_closeproc - used to finally close the TIFF file */ static int tiff_closeproc(thandle_t clientdata) { (void)clientdata; /*tiff_handle *th = (tiff_handle *)clientdata; gdIOCtx *ctx = th->ctx; (ctx->gd_free)(ctx);*/ return 0; } /* TIFFSizeProc tiff_sizeproc */ static toff_t tiff_sizeproc(thandle_t clientdata) { tiff_handle *th = (tiff_handle *)clientdata; return th->size; } /* TIFFMapFileProc tiff_mapproc() */ static int tiff_mapproc(thandle_t h, tdata_t *d, toff_t *o) { (void)h; (void)d; (void)o; return 0; } /* TIFFUnmapFileProc tiff_unmapproc */ static void tiff_unmapproc(thandle_t h, tdata_t d, toff_t o) { (void)h; (void)d; (void)o; } /* tiffWriter * ---------- * Write the gd image as a tiff file (called by gdImageTiffCtx) * Parameters are: * image: gd image structure; * out: the stream where to write * bitDepth: depth in bits of each pixel */ void tiffWriter(gdImagePtr image, gdIOCtx *out, int bitDepth) { int x, y; int i; int r, g, b, a; TIFF *tiff; int width, height; int color; char *scan; int samplesPerPixel = 3; int bitsPerSample; int transparentColorR = -1; int transparentColorG = -1; int transparentColorB = -1; uint16 extraSamples[1]; uint16 *colorMapRed = NULL; uint16 *colorMapGreen = NULL; uint16 *colorMapBlue = NULL; tiff_handle *th; th = new_tiff_handle(out); if (!th) { return; } extraSamples[0] = EXTRASAMPLE_ASSOCALPHA; /* read in the width/height of gd image */ width = gdImageSX(image); height = gdImageSY(image); /* reset clip region to whole image */ gdImageSetClip(image, 0, 0, width, height); /* handle old-style single-colour mapping to 100% transparency */ if(image->transparent != -1) { /* set our 100% transparent colour value */ transparentColorR = gdImageRed(image, image->transparent); transparentColorG = gdImageGreen(image, image->transparent); transparentColorB = gdImageBlue(image, image->transparent); } /* Open tiff file writing routines, but use special read/write/seek * functions so that tiff lib writes correct bits of tiff content to * correct areas of file opened and modifieable by the gdIOCtx functions */ tiff = TIFFClientOpen("", "w", th, tiff_readproc, tiff_writeproc, tiff_seekproc, tiff_closeproc, tiff_sizeproc, tiff_mapproc, tiff_unmapproc); TIFFSetField(tiff, TIFFTAG_IMAGEWIDTH, width); TIFFSetField(tiff, TIFFTAG_IMAGELENGTH, height); TIFFSetField(tiff, TIFFTAG_COMPRESSION, COMPRESSION_DEFLATE); TIFFSetField(tiff, TIFFTAG_PLANARCONFIG, PLANARCONFIG_CONTIG); TIFFSetField(tiff, TIFFTAG_PHOTOMETRIC, (bitDepth == 24) ? PHOTOMETRIC_RGB : PHOTOMETRIC_PALETTE); bitsPerSample = (bitDepth == 24 || bitDepth == 8) ? 8 : 1; TIFFSetField(tiff, TIFFTAG_BITSPERSAMPLE, bitsPerSample); TIFFSetField(tiff, TIFFTAG_XRESOLUTION, (float)image->res_x); TIFFSetField(tiff, TIFFTAG_YRESOLUTION, (float)image->res_y); /* build the color map for 8 bit images */ if(bitDepth != 24) { colorMapRed = (uint16 *) gdMalloc(3 * (1 << bitsPerSample)); if (!colorMapRed) { gdFree(th); return; } colorMapGreen = (uint16 *) gdMalloc(3 * (1 << bitsPerSample)); if (!colorMapGreen) { gdFree(colorMapRed); gdFree(th); return; } colorMapBlue = (uint16 *) gdMalloc(3 * (1 << bitsPerSample)); if (!colorMapBlue) { gdFree(colorMapRed); gdFree(colorMapGreen); gdFree(th); return; } for(i = 0; i < image->colorsTotal; i++) { colorMapRed[i] = gdImageRed(image,i) + (gdImageRed(image,i) * 256); colorMapGreen[i] = gdImageGreen(image,i)+(gdImageGreen(image,i)*256); colorMapBlue[i] = gdImageBlue(image,i) + (gdImageBlue(image,i)*256); } TIFFSetField(tiff, TIFFTAG_COLORMAP, colorMapRed, colorMapGreen, colorMapBlue); samplesPerPixel = 1; } /* here, we check if the 'save alpha' flag is set on the source gd image */ if ((bitDepth == 24) && (image->saveAlphaFlag || image->transparent != -1)) { /* so, we need to store the alpha values too! * Also, tell TIFF what the extra sample means (associated alpha) */ samplesPerPixel = 4; TIFFSetField(tiff, TIFFTAG_SAMPLESPERPIXEL, samplesPerPixel); TIFFSetField(tiff, TIFFTAG_EXTRASAMPLES, 1, extraSamples); } else { TIFFSetField(tiff, TIFFTAG_SAMPLESPERPIXEL, samplesPerPixel); } TIFFSetField(tiff, TIFFTAG_ROWSPERSTRIP, 1); if(overflow2(width, samplesPerPixel)) { if (colorMapRed) gdFree(colorMapRed); if (colorMapGreen) gdFree(colorMapGreen); if (colorMapBlue) gdFree(colorMapBlue); gdFree(th); return; } if(!(scan = (char *)gdMalloc(width * samplesPerPixel))) { if (colorMapRed) gdFree(colorMapRed); if (colorMapGreen) gdFree(colorMapGreen); if (colorMapBlue) gdFree(colorMapBlue); gdFree(th); return; } /* loop through y-coords, and x-coords */ for(y = 0; y < height; y++) { for(x = 0; x < width; x++) { /* generate scan line for writing to tiff */ color = gdImageGetPixel(image, x, y); a = (127 - gdImageAlpha(image, color)) * 2; a = (a == 0xfe) ? 0xff : a & 0xff; b = gdImageBlue(image, color); g = gdImageGreen(image, color); r = gdImageRed(image, color); /* if this pixel has the same RGB as the transparent colour, * then set alpha fully transparent */ if (transparentColorR == r && transparentColorG == g && transparentColorB == b) { a = 0x00; } if(bitDepth != 24) { /* write out 1 or 8 bit value in 1 byte * (currently treats 1bit as 8bit) */ scan[(x * samplesPerPixel) + 0] = color; } else { /* write out 24 bit value in 3 (or 4 if transparent) bytes */ if(image->saveAlphaFlag || image->transparent != -1) { scan[(x * samplesPerPixel) + 3] = a; } scan[(x * samplesPerPixel) + 2] = b; scan[(x * samplesPerPixel) + 1] = g; scan[(x * samplesPerPixel) + 0] = r; } } /* Write the scan line to the tiff */ if(TIFFWriteEncodedStrip(tiff, y, scan, width * samplesPerPixel) == -1) { if (colorMapRed) gdFree(colorMapRed); if (colorMapGreen) gdFree(colorMapGreen); if (colorMapBlue) gdFree(colorMapBlue); gdFree(th); /* error handler here */ gd_error("Could not create TIFF\n"); return; } } /* now cloase and free up resources */ TIFFClose(tiff); gdFree(scan); gdFree(th); if(bitDepth != 24) { gdFree(colorMapRed); gdFree(colorMapGreen); gdFree(colorMapBlue); } } /* Function: gdImageTiffCtx Write the gd image as a tiff file. Parameters: image - gd image structure; out - the stream where to write */ BGD_DECLARE(void) gdImageTiffCtx(gdImagePtr image, gdIOCtx *out) { int clipx1P, clipy1P, clipx2P, clipy2P; int bitDepth = 24; /* First, switch off clipping, or we'll not get all the image! */ gdImageGetClip(image, &clipx1P, &clipy1P, &clipx2P, &clipy2P); /* use the appropriate routine depending on the bit depth of the image */ if(image->trueColor) { bitDepth = 24; } else if(image->colorsTotal == 2) { bitDepth = 1; } else { bitDepth = 8; } tiffWriter(image, out, bitDepth); /* reset clipping area to the gd image's original values */ gdImageSetClip(image, clipx1P, clipy1P, clipx2P, clipy2P); } /* Check if we are really in 8bit mode */ static int checkColorMap(n, r, g, b) int n; uint16 *r, *g, *b; { while (n-- > 0) if (*r++ >= 256 || *g++ >= 256 || *b++ >= 256) return (16); return (8); } /* Read and convert a TIFF colormap */ static int readTiffColorMap(gdImagePtr im, TIFF *tif, char is_bw, int photometric) { uint16 *redcmap, *greencmap, *bluecmap; uint16 bps; int i; if (is_bw) { if (photometric == PHOTOMETRIC_MINISWHITE) { gdImageColorAllocate(im, 255,255,255); gdImageColorAllocate(im, 0, 0, 0); } else { gdImageColorAllocate(im, 0, 0, 0); gdImageColorAllocate(im, 255,255,255); } } else { uint16 min_sample_val, max_sample_val; if (!TIFFGetField(tif, TIFFTAG_MINSAMPLEVALUE, &min_sample_val)) { min_sample_val = 0; } if (!TIFFGetField(tif, TIFFTAG_MAXSAMPLEVALUE, &max_sample_val)) { max_sample_val = 255; } if (photometric == PHOTOMETRIC_MINISBLACK || photometric == PHOTOMETRIC_MINISWHITE) { /* TODO: use TIFFTAG_MINSAMPLEVALUE and TIFFTAG_MAXSAMPLEVALUE */ /* Gray level palette */ for (i=min_sample_val; i <= max_sample_val; i++) { gdImageColorAllocate(im, i,i,i); } return GD_SUCCESS; } else if (!TIFFGetField(tif, TIFFTAG_COLORMAP, &redcmap, &greencmap, &bluecmap)) { gd_error("Cannot read the color map"); return GD_FAILURE; } TIFFGetFieldDefaulted(tif, TIFFTAG_BITSPERSAMPLE, &bps); #define CVT(x) (((x) * 255) / ((1L<<16)-1)) if (checkColorMap(1<<bps, redcmap, greencmap, bluecmap) == 16) { for (i = (1<<bps)-1; i > 0; i--) { redcmap[i] = CVT(redcmap[i]); greencmap[i] = CVT(greencmap[i]); bluecmap[i] = CVT(bluecmap[i]); } } for (i = 0; i < 256; i++) { gdImageColorAllocate(im, redcmap[i], greencmap[i], bluecmap[i]); } #undef CVT } return GD_SUCCESS; } static void readTiffBw (const unsigned char *src, gdImagePtr im, uint16 photometric, int startx, int starty, int width, int height, char has_alpha, int extra, int align) { int x = startx, y = starty; (void)has_alpha; (void)extra; (void)align; for (y = starty; y < starty + height; y++) { for (x = startx; x < startx + width; x++) { register unsigned char curr = *src++; register unsigned char mask; if (photometric == PHOTOMETRIC_MINISWHITE) { curr = ~curr; } for (mask = 0x80; mask != 0 && x < startx + width; mask >>= 1) { gdImageSetPixel(im, x, y, ((curr & mask) != 0)?0:1); } } } } static void readTiff8bit (const unsigned char *src, gdImagePtr im, uint16 photometric, int startx, int starty, int width, int height, char has_alpha, int extra, int align) { int red, green, blue, alpha; int x, y; (void)extra; (void)align; switch (photometric) { case PHOTOMETRIC_PALETTE: /* Palette has no alpha (see TIFF specs for more details */ for (y = starty; y < starty + height; y++) { for (x = startx; x < startx + width; x++) { gdImageSetPixel(im, x, y,*(src++)); } } break; case PHOTOMETRIC_RGB: if (has_alpha) { gdImageAlphaBlending(im, 0); gdImageSaveAlpha(im, 1); for (y = starty; y < starty + height; y++) { for (x = startx; x < startx + width; x++) { red = *src++; green = *src++; blue = *src++; alpha = *src++; red = MIN (red, alpha); blue = MIN (blue, alpha); green = MIN (green, alpha); if (alpha) { gdImageSetPixel(im, x, y, gdTrueColorAlpha(red * 255 / alpha, green * 255 / alpha, blue * 255 /alpha, gdAlphaMax - (alpha >> 1))); } else { gdImageSetPixel(im, x, y, gdTrueColorAlpha(red, green, blue, gdAlphaMax - (alpha >> 1))); } } } } else { for (y = 0; y < height; y++) { for (x = 0; x < width; x++) { register unsigned char r = *src++; register unsigned char g = *src++; register unsigned char b = *src++; gdImageSetPixel(im, x, y, gdTrueColor(r, g, b)); } } } break; case PHOTOMETRIC_MINISWHITE: if (has_alpha) { /* We don't process the extra yet */ } else { for (y = starty; y < starty + height; y++) { for (x = startx; x < startx + width; x++) { gdImageSetPixel(im, x, y, ~(*src++)); } } } break; case PHOTOMETRIC_MINISBLACK: if (has_alpha) { /* We don't process the extra yet */ } else { for (y = starty; y < height; y++) { for (x = 0; x < width; x++) { gdImageSetPixel(im, x, y, *src++); } } } break; } } static int createFromTiffTiles(TIFF *tif, gdImagePtr im, uint16 bps, uint16 photometric, char has_alpha, char is_bw, int extra) { uint16 planar; int im_width, im_height; int tile_width, tile_height; int x, y, height, width; unsigned char *buffer; if (!TIFFGetField (tif, TIFFTAG_PLANARCONFIG, &planar)) { planar = PLANARCONFIG_CONTIG; } if (TIFFGetField (tif, TIFFTAG_IMAGEWIDTH, &im_width) == 0 || TIFFGetField (tif, TIFFTAG_IMAGELENGTH, &im_height) == 0 || TIFFGetField (tif, TIFFTAG_TILEWIDTH, &tile_width) == 0 || TIFFGetField (tif, TIFFTAG_TILELENGTH, &tile_height) == 0) { return FALSE; } buffer = (unsigned char *) gdMalloc (TIFFTileSize (tif)); if (!buffer) { return FALSE; } for (y = 0; y < im_height; y += tile_height) { for (x = 0; x < im_width; x += tile_width) { TIFFReadTile(tif, buffer, x, y, 0, 0); width = MIN(im_width - x, tile_width); height = MIN(im_height - y, tile_height); if (bps == 16) { } else if (bps == 8) { readTiff8bit(buffer, im, photometric, x, y, width, height, has_alpha, extra, 0); } else if (is_bw) { readTiffBw(buffer, im, photometric, x, y, width, height, has_alpha, extra, 0); } else { /* TODO: implement some default reader or detect this case earlier use force_rgb */ } } } gdFree(buffer); return TRUE; } static int createFromTiffLines(TIFF *tif, gdImagePtr im, uint16 bps, uint16 photometric, char has_alpha, char is_bw, int extra) { uint16 planar; uint32 im_height, im_width, y; unsigned char *buffer; if (!TIFFGetField(tif, TIFFTAG_PLANARCONFIG, &planar)) { planar = PLANARCONFIG_CONTIG; } if (!TIFFGetField(tif, TIFFTAG_IMAGELENGTH, &im_height)) { gd_error("Can't fetch TIFF height\n"); return FALSE; } if (!TIFFGetField(tif, TIFFTAG_IMAGEWIDTH, &im_width)) { gd_error("Can't fetch TIFF width \n"); return FALSE; } buffer = (unsigned char *)gdMalloc(im_width * 4); if (!buffer) { return GD_FAILURE; } if (planar == PLANARCONFIG_CONTIG) { switch (bps) { case 16: /* TODO * or simply use force_rgba */ break; case 8: for (y = 0; y < im_height; y++ ) { if (!TIFFReadScanline (tif, buffer, y, 0)) { gd_error("Error while reading scanline %i", y); break; } /* reading one line at a time */ readTiff8bit(buffer, im, photometric, 0, y, im_width, 1, has_alpha, extra, 0); } break; default: if (is_bw) { for (y = 0; y < im_height; y++ ) { if (!TIFFReadScanline (tif, buffer, y, 0)) { gd_error("Error while reading scanline %i", y); break; } /* reading one line at a time */ readTiffBw(buffer, im, photometric, 0, y, im_width, 1, has_alpha, extra, 0); } } else { /* TODO: implement some default reader or detect this case earlier > force_rgb */ } break; } } else { /* TODO: implement a reader for separate panes. We detect this case earlier for now and use force_rgb */ } gdFree(buffer); return GD_SUCCESS; } static int createFromTiffRgba(TIFF * tif, gdImagePtr im) { int a; int x, y; int alphaBlendingFlag = 0; int color; int width = im->sx; int height = im->sy; uint32 *buffer; uint32 rgba; /* switch off colour merging on target gd image just while we write out * content - we want to preserve the alpha data until the user chooses * what to do with the image */ alphaBlendingFlag = im->alphaBlendingFlag; gdImageAlphaBlending(im, 0); buffer = (uint32 *) gdCalloc(sizeof(uint32), width * height); if (!buffer) { return GD_FAILURE; } TIFFReadRGBAImage(tif, width, height, buffer, 0); for(y = 0; y < height; y++) { for(x = 0; x < width; x++) { /* if it doesn't already exist, allocate a new colour, * else use existing one */ rgba = buffer[(y * width + x)]; a = (0xff - TIFFGetA(rgba)) / 2; color = gdTrueColorAlpha(TIFFGetR(rgba), TIFFGetG(rgba), TIFFGetB(rgba), a); /* set pixel colour to this colour */ gdImageSetPixel(im, x, height - y - 1, color); } } gdFree(buffer); /* now reset colour merge for alpha blending routines */ gdImageAlphaBlending(im, alphaBlendingFlag); return GD_SUCCESS; } /* Function: gdImageCreateFromTiffCtx Create a gdImage from a TIFF file input from an gdIOCtx. */ BGD_DECLARE(gdImagePtr) gdImageCreateFromTiffCtx(gdIOCtx *infile) { TIFF *tif; tiff_handle *th; uint16 bps, spp, photometric; uint16 orientation; int width, height; uint16 extra, *extra_types; uint16 planar; char has_alpha, is_bw, is_gray; char force_rgba = FALSE; char save_transparent; int image_type; int ret; float res_float; gdImagePtr im = NULL; th = new_tiff_handle(infile); if (!th) { return NULL; } tif = TIFFClientOpen("", "rb", th, tiff_readproc, tiff_writeproc, tiff_seekproc, tiff_closeproc, tiff_sizeproc, tiff_mapproc, tiff_unmapproc); if (!tif) { gd_error("Cannot open TIFF image"); gdFree(th); return NULL; } if (!TIFFGetField(tif, TIFFTAG_IMAGEWIDTH, &width)) { gd_error("TIFF error, Cannot read image width"); goto error; } if (!TIFFGetField(tif, TIFFTAG_IMAGELENGTH, &height)) { gd_error("TIFF error, Cannot read image width"); goto error; } TIFFGetFieldDefaulted (tif, TIFFTAG_BITSPERSAMPLE, &bps); /* Unsupported bps, force to RGBA */ if (1/*bps > 8 && bps != 16*/) { force_rgba = TRUE; } TIFFGetFieldDefaulted (tif, TIFFTAG_SAMPLESPERPIXEL, &spp); if (!TIFFGetField (tif, TIFFTAG_EXTRASAMPLES, &extra, &extra_types)) { extra = 0; } if (!TIFFGetField (tif, TIFFTAG_PHOTOMETRIC, &photometric)) { uint16 compression; if (TIFFGetField(tif, TIFFTAG_COMPRESSION, &compression) && (compression == COMPRESSION_CCITTFAX3 || compression == COMPRESSION_CCITTFAX4 || compression == COMPRESSION_CCITTRLE || compression == COMPRESSION_CCITTRLEW)) { gd_error("Could not get photometric. " "Image is CCITT compressed, assuming min-is-white"); photometric = PHOTOMETRIC_MINISWHITE; } else { gd_error("Could not get photometric. " "Assuming min-is-black"); photometric = PHOTOMETRIC_MINISBLACK; } } save_transparent = FALSE; /* test if the extrasample represents an associated alpha channel... */ if (extra > 0 && (extra_types[0] == EXTRASAMPLE_ASSOCALPHA)) { has_alpha = TRUE; save_transparent = FALSE; --extra; } else if (extra > 0 && (extra_types[0] == EXTRASAMPLE_UNASSALPHA)) { has_alpha = TRUE; save_transparent = TRUE; --extra; } else if (extra > 0 && (extra_types[0] == EXTRASAMPLE_UNSPECIFIED)) { /* assuming unassociated alpha if unspecified */ gd_error("alpha channel type not defined, assuming alpha is not premultiplied"); has_alpha = TRUE; save_transparent = TRUE; --extra; } else { has_alpha = FALSE; } if (photometric == PHOTOMETRIC_RGB && spp > 3 + extra) { has_alpha = TRUE; extra = spp - 4; } else if (photometric != PHOTOMETRIC_RGB && spp > 1 + extra) { has_alpha = TRUE; extra = spp - 2; } is_bw = FALSE; is_gray = FALSE; switch (photometric) { case PHOTOMETRIC_MINISBLACK: case PHOTOMETRIC_MINISWHITE: if (!has_alpha && bps == 1 && spp == 1) { image_type = GD_INDEXED; is_bw = TRUE; } else { image_type = GD_GRAY; } break; case PHOTOMETRIC_RGB: image_type = GD_RGB; break; case PHOTOMETRIC_PALETTE: image_type = GD_INDEXED; break; default: force_rgba = TRUE; break; } if (!TIFFGetField (tif, TIFFTAG_PLANARCONFIG, &planar)) { planar = PLANARCONFIG_CONTIG; } /* Force rgba if image plans are not contiguous */ if (force_rgba || planar != PLANARCONFIG_CONTIG) { image_type = GD_RGB; } if (!force_rgba && (image_type == GD_PALETTE || image_type == GD_INDEXED || image_type == GD_GRAY)) { im = gdImageCreate(width, height); if (!im) goto error; readTiffColorMap(im, tif, is_bw, photometric); } else { im = gdImageCreateTrueColor(width, height); if (!im) goto error; } #ifdef DEBUG printf("force rgba: %i\n", force_rgba); printf("has_alpha: %i\n", has_alpha); printf("save trans: %i\n", save_transparent); printf("is_bw: %i\n", is_bw); printf("is_gray: %i\n", is_gray); printf("type: %i\n", image_type); #else (void)is_gray; (void)save_transparent; #endif if (force_rgba) { ret = createFromTiffRgba(tif, im); } else if (TIFFIsTiled(tif)) { ret = createFromTiffTiles(tif, im, bps, photometric, has_alpha, is_bw, extra); } else { ret = createFromTiffLines(tif, im, bps, photometric, has_alpha, is_bw, extra); } if (!ret) { gdImageDestroy(im); im = NULL; goto error; } if (TIFFGetField(tif, TIFFTAG_XRESOLUTION, &res_float)) { im->res_x = (unsigned int)res_float; //truncate } if (TIFFGetField(tif, TIFFTAG_YRESOLUTION, &res_float)) { im->res_y = (unsigned int)res_float; //truncate } if (TIFFGetField(tif, TIFFTAG_ORIENTATION, &orientation)) { switch (orientation) { case ORIENTATION_TOPLEFT: case ORIENTATION_TOPRIGHT: case ORIENTATION_BOTRIGHT: case ORIENTATION_BOTLEFT: break; default: gd_error("Orientation %d not handled yet!", orientation); break; } } error: TIFFClose(tif); gdFree(th); return im; } /* Function: gdImageCreateFromTIFF */ BGD_DECLARE(gdImagePtr) gdImageCreateFromTiff(FILE *inFile) { gdImagePtr im; gdIOCtx *in = gdNewFileCtx(inFile); if (in == NULL) return NULL; im = gdImageCreateFromTiffCtx(in); in->gd_free(in); return im; } /* Function: gdImageCreateFromTiffPtr */ BGD_DECLARE(gdImagePtr) gdImageCreateFromTiffPtr(int size, void *data) { gdImagePtr im; gdIOCtx *in = gdNewDynamicCtxEx (size, data, 0); if (in == NULL) return NULL; im = gdImageCreateFromTiffCtx(in); in->gd_free(in); return im; } /* Function: gdImageTiff */ BGD_DECLARE(void) gdImageTiff(gdImagePtr im, FILE *outFile) { gdIOCtx *out = gdNewFileCtx(outFile); if (out == NULL) return; gdImageTiffCtx(im, out); /* what's an fg again? */ out->gd_free(out); } /* Function: gdImageTiffPtr */ BGD_DECLARE(void *) gdImageTiffPtr(gdImagePtr im, int *size) { void *rv; gdIOCtx *out = gdNewDynamicCtx (2048, NULL); if (out == NULL) return NULL; gdImageTiffCtx(im, out); /* what's an fg again? */ rv = gdDPExtractData(out, size); out->gd_free(out); return rv; } #endif
/* TIFF - Tagged Image File Format Encapsulation for GD Library gd_tiff.c Copyright (C) Pierre-A. Joye, M. Retallack --------------------------------------------------------------------------- ** ** Permission to use, copy, modify, and distribute this software and its ** documentation for any purpose and without fee is hereby granted, provided ** that the above copyright notice appear in all copies and that both that ** copyright notice and this permission notice appear in supporting ** documentation. This software is provided "as is" without express or ** implied warranty. ** --------------------------------------------------------------------------- Ctx code written by M. Retallack Todo: If we fail - cleanup Writer: Use gd error function, overflow check may not be necessary as we write our own data (check already done) Implement 2 color black/white saving using group4 fax compression Implement function to specify encoding to use when writing tiff data ---------------------------------------------------------------------------- */ /* $Id$ */ /** * File: TIFF IO * * Read and write TIFF images. * * There is only most basic support for the TIFF format available for now; * for instance, multiple pages are not yet supported. */ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include "gd.h" #include "gd_errors.h" #include "gdfonts.h" #include <stdio.h> #include <stdlib.h> #include <limits.h> #include "gdhelpers.h" #ifdef HAVE_LIBTIFF #include "tiff.h" #include "tiffio.h" #define GD_SUCCESS 1 #define GD_FAILURE 0 #define TRUE 1 #define FALSE 0 /* I define those here until the new formats * are commited. We can then rely on the global * def */ #define GD_PALETTE 1 #define GD_TRUECOLOR 2 #define GD_GRAY 3 #define GD_INDEXED 4 #define GD_RGB 5 #define MIN(a,b) (a < b) ? a : b; #define MAX(a,b) (a > b) ? a : b; typedef struct tiff_handle { int size; int pos; gdIOCtx *ctx; int written; } tiff_handle; /* Functions for reading, writing and seeking in gdIOCtx This allows for non-file i/o operations with no explicit use of libtiff fileio wrapper functions Note: because libtiff requires random access, but gdIOCtx only supports streams, all writes are buffered into memory and written out on close, also all reads are done from a memory mapped version of the tiff (assuming one already exists) */ tiff_handle * new_tiff_handle(gdIOCtx *g) { tiff_handle * t; if (!g) { gd_error("Cannot create a new tiff handle, missing Ctx argument"); return NULL; } t = (tiff_handle *) gdMalloc(sizeof(tiff_handle)); if (!t) { gd_error("Failed to allocate a new tiff handle"); return NULL; } t->size = 0; t->pos = 0; t->ctx = g; t->written = 0; return t; } /* TIFFReadWriteProc tiff_readproc - Will use gdIOCtx procs to read required (previously written) TIFF file content */ static tsize_t tiff_readproc(thandle_t clientdata, tdata_t data, tsize_t size) { tiff_handle *th = (tiff_handle *)clientdata; gdIOCtx *ctx = th->ctx; size = (ctx->getBuf)(ctx, data, size); return size; } /* TIFFReadWriteProc tiff_writeproc - Will use gdIOCtx procs to write out TIFF data */ static tsize_t tiff_writeproc(thandle_t clientdata, tdata_t data, tsize_t size) { tiff_handle *th = (tiff_handle *)clientdata; gdIOCtx *ctx = th->ctx; size = (ctx->putBuf)(ctx, data, size); if(size + th->pos>th->size) { th->size = size + th->pos; th->pos += size; } return size; } /* TIFFSeekProc tiff_seekproc * used to move around the partially written TIFF */ static toff_t tiff_seekproc(thandle_t clientdata, toff_t offset, int from) { tiff_handle *th = (tiff_handle *)clientdata; gdIOCtx *ctx = th->ctx; int result; switch(from) { default: case SEEK_SET: /* just use offset */ break; case SEEK_END: /* invert offset, so that it is from start, not end as supplied */ offset = th->size + offset; break; case SEEK_CUR: /* add current position to translate it to 'from start', * not from durrent as supplied */ offset += th->pos; break; } /* now, move pos in both io context and buf */ if((result = (ctx->seek)(ctx, offset))) { th->pos = offset; } return result ? offset : (toff_t)-1; } /* TIFFCloseProc tiff_closeproc - used to finally close the TIFF file */ static int tiff_closeproc(thandle_t clientdata) { (void)clientdata; /*tiff_handle *th = (tiff_handle *)clientdata; gdIOCtx *ctx = th->ctx; (ctx->gd_free)(ctx);*/ return 0; } /* TIFFSizeProc tiff_sizeproc */ static toff_t tiff_sizeproc(thandle_t clientdata) { tiff_handle *th = (tiff_handle *)clientdata; return th->size; } /* TIFFMapFileProc tiff_mapproc() */ static int tiff_mapproc(thandle_t h, tdata_t *d, toff_t *o) { (void)h; (void)d; (void)o; return 0; } /* TIFFUnmapFileProc tiff_unmapproc */ static void tiff_unmapproc(thandle_t h, tdata_t d, toff_t o) { (void)h; (void)d; (void)o; } /* tiffWriter * ---------- * Write the gd image as a tiff file (called by gdImageTiffCtx) * Parameters are: * image: gd image structure; * out: the stream where to write * bitDepth: depth in bits of each pixel */ void tiffWriter(gdImagePtr image, gdIOCtx *out, int bitDepth) { int x, y; int i; int r, g, b, a; TIFF *tiff; int width, height; int color; char *scan; int samplesPerPixel = 3; int bitsPerSample; int transparentColorR = -1; int transparentColorG = -1; int transparentColorB = -1; uint16 extraSamples[1]; uint16 *colorMapRed = NULL; uint16 *colorMapGreen = NULL; uint16 *colorMapBlue = NULL; tiff_handle *th; th = new_tiff_handle(out); if (!th) { return; } extraSamples[0] = EXTRASAMPLE_ASSOCALPHA; /* read in the width/height of gd image */ width = gdImageSX(image); height = gdImageSY(image); /* reset clip region to whole image */ gdImageSetClip(image, 0, 0, width, height); /* handle old-style single-colour mapping to 100% transparency */ if(image->transparent != -1) { /* set our 100% transparent colour value */ transparentColorR = gdImageRed(image, image->transparent); transparentColorG = gdImageGreen(image, image->transparent); transparentColorB = gdImageBlue(image, image->transparent); } /* Open tiff file writing routines, but use special read/write/seek * functions so that tiff lib writes correct bits of tiff content to * correct areas of file opened and modifieable by the gdIOCtx functions */ tiff = TIFFClientOpen("", "w", th, tiff_readproc, tiff_writeproc, tiff_seekproc, tiff_closeproc, tiff_sizeproc, tiff_mapproc, tiff_unmapproc); TIFFSetField(tiff, TIFFTAG_IMAGEWIDTH, width); TIFFSetField(tiff, TIFFTAG_IMAGELENGTH, height); TIFFSetField(tiff, TIFFTAG_COMPRESSION, COMPRESSION_DEFLATE); TIFFSetField(tiff, TIFFTAG_PLANARCONFIG, PLANARCONFIG_CONTIG); TIFFSetField(tiff, TIFFTAG_PHOTOMETRIC, (bitDepth == 24) ? PHOTOMETRIC_RGB : PHOTOMETRIC_PALETTE); bitsPerSample = (bitDepth == 24 || bitDepth == 8) ? 8 : 1; TIFFSetField(tiff, TIFFTAG_BITSPERSAMPLE, bitsPerSample); TIFFSetField(tiff, TIFFTAG_XRESOLUTION, (float)image->res_x); TIFFSetField(tiff, TIFFTAG_YRESOLUTION, (float)image->res_y); /* build the color map for 8 bit images */ if(bitDepth != 24) { colorMapRed = (uint16 *) gdMalloc(3 * (1 << bitsPerSample)); if (!colorMapRed) { gdFree(th); return; } colorMapGreen = (uint16 *) gdMalloc(3 * (1 << bitsPerSample)); if (!colorMapGreen) { gdFree(colorMapRed); gdFree(th); return; } colorMapBlue = (uint16 *) gdMalloc(3 * (1 << bitsPerSample)); if (!colorMapBlue) { gdFree(colorMapRed); gdFree(colorMapGreen); gdFree(th); return; } for(i = 0; i < image->colorsTotal; i++) { colorMapRed[i] = gdImageRed(image,i) + (gdImageRed(image,i) * 256); colorMapGreen[i] = gdImageGreen(image,i)+(gdImageGreen(image,i)*256); colorMapBlue[i] = gdImageBlue(image,i) + (gdImageBlue(image,i)*256); } TIFFSetField(tiff, TIFFTAG_COLORMAP, colorMapRed, colorMapGreen, colorMapBlue); samplesPerPixel = 1; } /* here, we check if the 'save alpha' flag is set on the source gd image */ if ((bitDepth == 24) && (image->saveAlphaFlag || image->transparent != -1)) { /* so, we need to store the alpha values too! * Also, tell TIFF what the extra sample means (associated alpha) */ samplesPerPixel = 4; TIFFSetField(tiff, TIFFTAG_SAMPLESPERPIXEL, samplesPerPixel); TIFFSetField(tiff, TIFFTAG_EXTRASAMPLES, 1, extraSamples); } else { TIFFSetField(tiff, TIFFTAG_SAMPLESPERPIXEL, samplesPerPixel); } TIFFSetField(tiff, TIFFTAG_ROWSPERSTRIP, 1); if(overflow2(width, samplesPerPixel)) { if (colorMapRed) gdFree(colorMapRed); if (colorMapGreen) gdFree(colorMapGreen); if (colorMapBlue) gdFree(colorMapBlue); gdFree(th); return; } if(!(scan = (char *)gdMalloc(width * samplesPerPixel))) { if (colorMapRed) gdFree(colorMapRed); if (colorMapGreen) gdFree(colorMapGreen); if (colorMapBlue) gdFree(colorMapBlue); gdFree(th); return; } /* loop through y-coords, and x-coords */ for(y = 0; y < height; y++) { for(x = 0; x < width; x++) { /* generate scan line for writing to tiff */ color = gdImageGetPixel(image, x, y); a = (127 - gdImageAlpha(image, color)) * 2; a = (a == 0xfe) ? 0xff : a & 0xff; b = gdImageBlue(image, color); g = gdImageGreen(image, color); r = gdImageRed(image, color); /* if this pixel has the same RGB as the transparent colour, * then set alpha fully transparent */ if (transparentColorR == r && transparentColorG == g && transparentColorB == b) { a = 0x00; } if(bitDepth != 24) { /* write out 1 or 8 bit value in 1 byte * (currently treats 1bit as 8bit) */ scan[(x * samplesPerPixel) + 0] = color; } else { /* write out 24 bit value in 3 (or 4 if transparent) bytes */ if(image->saveAlphaFlag || image->transparent != -1) { scan[(x * samplesPerPixel) + 3] = a; } scan[(x * samplesPerPixel) + 2] = b; scan[(x * samplesPerPixel) + 1] = g; scan[(x * samplesPerPixel) + 0] = r; } } /* Write the scan line to the tiff */ if(TIFFWriteEncodedStrip(tiff, y, scan, width * samplesPerPixel) == -1) { if (colorMapRed) gdFree(colorMapRed); if (colorMapGreen) gdFree(colorMapGreen); if (colorMapBlue) gdFree(colorMapBlue); gdFree(th); /* error handler here */ gd_error("Could not create TIFF\n"); return; } } /* now cloase and free up resources */ TIFFClose(tiff); gdFree(scan); gdFree(th); if(bitDepth != 24) { gdFree(colorMapRed); gdFree(colorMapGreen); gdFree(colorMapBlue); } } /* Function: gdImageTiffCtx Write the gd image as a tiff file. Parameters: image - gd image structure; out - the stream where to write */ BGD_DECLARE(void) gdImageTiffCtx(gdImagePtr image, gdIOCtx *out) { int clipx1P, clipy1P, clipx2P, clipy2P; int bitDepth = 24; /* First, switch off clipping, or we'll not get all the image! */ gdImageGetClip(image, &clipx1P, &clipy1P, &clipx2P, &clipy2P); /* use the appropriate routine depending on the bit depth of the image */ if(image->trueColor) { bitDepth = 24; } else if(image->colorsTotal == 2) { bitDepth = 1; } else { bitDepth = 8; } tiffWriter(image, out, bitDepth); /* reset clipping area to the gd image's original values */ gdImageSetClip(image, clipx1P, clipy1P, clipx2P, clipy2P); } /* Check if we are really in 8bit mode */ static int checkColorMap(n, r, g, b) int n; uint16 *r, *g, *b; { while (n-- > 0) if (*r++ >= 256 || *g++ >= 256 || *b++ >= 256) return (16); return (8); } /* Read and convert a TIFF colormap */ static int readTiffColorMap(gdImagePtr im, TIFF *tif, char is_bw, int photometric) { uint16 *redcmap, *greencmap, *bluecmap; uint16 bps; int i; if (is_bw) { if (photometric == PHOTOMETRIC_MINISWHITE) { gdImageColorAllocate(im, 255,255,255); gdImageColorAllocate(im, 0, 0, 0); } else { gdImageColorAllocate(im, 0, 0, 0); gdImageColorAllocate(im, 255,255,255); } } else { uint16 min_sample_val, max_sample_val; if (!TIFFGetField(tif, TIFFTAG_MINSAMPLEVALUE, &min_sample_val)) { min_sample_val = 0; } if (!TIFFGetField(tif, TIFFTAG_MAXSAMPLEVALUE, &max_sample_val)) { max_sample_val = 255; } if (photometric == PHOTOMETRIC_MINISBLACK || photometric == PHOTOMETRIC_MINISWHITE) { /* TODO: use TIFFTAG_MINSAMPLEVALUE and TIFFTAG_MAXSAMPLEVALUE */ /* Gray level palette */ for (i=min_sample_val; i <= max_sample_val; i++) { gdImageColorAllocate(im, i,i,i); } return GD_SUCCESS; } else if (!TIFFGetField(tif, TIFFTAG_COLORMAP, &redcmap, &greencmap, &bluecmap)) { gd_error("Cannot read the color map"); return GD_FAILURE; } TIFFGetFieldDefaulted(tif, TIFFTAG_BITSPERSAMPLE, &bps); #define CVT(x) (((x) * 255) / ((1L<<16)-1)) if (checkColorMap(1<<bps, redcmap, greencmap, bluecmap) == 16) { for (i = (1<<bps)-1; i > 0; i--) { redcmap[i] = CVT(redcmap[i]); greencmap[i] = CVT(greencmap[i]); bluecmap[i] = CVT(bluecmap[i]); } } for (i = 0; i < 256; i++) { gdImageColorAllocate(im, redcmap[i], greencmap[i], bluecmap[i]); } #undef CVT } return GD_SUCCESS; } static void readTiffBw (const unsigned char *src, gdImagePtr im, uint16 photometric, int startx, int starty, int width, int height, char has_alpha, int extra, int align) { int x = startx, y = starty; (void)has_alpha; (void)extra; (void)align; for (y = starty; y < starty + height; y++) { for (x = startx; x < startx + width; x++) { register unsigned char curr = *src++; register unsigned char mask; if (photometric == PHOTOMETRIC_MINISWHITE) { curr = ~curr; } for (mask = 0x80; mask != 0 && x < startx + width; mask >>= 1) { gdImageSetPixel(im, x, y, ((curr & mask) != 0)?0:1); } } } } static void readTiff8bit (const unsigned char *src, gdImagePtr im, uint16 photometric, int startx, int starty, int width, int height, char has_alpha, int extra, int align) { int red, green, blue, alpha; int x, y; (void)extra; (void)align; switch (photometric) { case PHOTOMETRIC_PALETTE: /* Palette has no alpha (see TIFF specs for more details */ for (y = starty; y < starty + height; y++) { for (x = startx; x < startx + width; x++) { gdImageSetPixel(im, x, y,*(src++)); } } break; case PHOTOMETRIC_RGB: if (has_alpha) { gdImageAlphaBlending(im, 0); gdImageSaveAlpha(im, 1); for (y = starty; y < starty + height; y++) { for (x = startx; x < startx + width; x++) { red = *src++; green = *src++; blue = *src++; alpha = *src++; red = MIN (red, alpha); blue = MIN (blue, alpha); green = MIN (green, alpha); if (alpha) { gdImageSetPixel(im, x, y, gdTrueColorAlpha(red * 255 / alpha, green * 255 / alpha, blue * 255 /alpha, gdAlphaMax - (alpha >> 1))); } else { gdImageSetPixel(im, x, y, gdTrueColorAlpha(red, green, blue, gdAlphaMax - (alpha >> 1))); } } } } else { for (y = 0; y < height; y++) { for (x = 0; x < width; x++) { register unsigned char r = *src++; register unsigned char g = *src++; register unsigned char b = *src++; gdImageSetPixel(im, x, y, gdTrueColor(r, g, b)); } } } break; case PHOTOMETRIC_MINISWHITE: if (has_alpha) { /* We don't process the extra yet */ } else { for (y = starty; y < starty + height; y++) { for (x = startx; x < startx + width; x++) { gdImageSetPixel(im, x, y, ~(*src++)); } } } break; case PHOTOMETRIC_MINISBLACK: if (has_alpha) { /* We don't process the extra yet */ } else { for (y = starty; y < height; y++) { for (x = 0; x < width; x++) { gdImageSetPixel(im, x, y, *src++); } } } break; } } static int createFromTiffTiles(TIFF *tif, gdImagePtr im, uint16 bps, uint16 photometric, char has_alpha, char is_bw, int extra) { uint16 planar; int im_width, im_height; int tile_width, tile_height; int x, y, height, width; unsigned char *buffer; if (!TIFFGetField (tif, TIFFTAG_PLANARCONFIG, &planar)) { planar = PLANARCONFIG_CONTIG; } if (TIFFGetField (tif, TIFFTAG_IMAGEWIDTH, &im_width) == 0 || TIFFGetField (tif, TIFFTAG_IMAGELENGTH, &im_height) == 0 || TIFFGetField (tif, TIFFTAG_TILEWIDTH, &tile_width) == 0 || TIFFGetField (tif, TIFFTAG_TILELENGTH, &tile_height) == 0) { return FALSE; } buffer = (unsigned char *) gdMalloc (TIFFTileSize (tif)); if (!buffer) { return FALSE; } for (y = 0; y < im_height; y += tile_height) { for (x = 0; x < im_width; x += tile_width) { TIFFReadTile(tif, buffer, x, y, 0, 0); width = MIN(im_width - x, tile_width); height = MIN(im_height - y, tile_height); if (bps == 16) { } else if (bps == 8) { readTiff8bit(buffer, im, photometric, x, y, width, height, has_alpha, extra, 0); } else if (is_bw) { readTiffBw(buffer, im, photometric, x, y, width, height, has_alpha, extra, 0); } else { /* TODO: implement some default reader or detect this case earlier use force_rgb */ } } } gdFree(buffer); return TRUE; } static int createFromTiffLines(TIFF *tif, gdImagePtr im, uint16 bps, uint16 photometric, char has_alpha, char is_bw, int extra) { uint16 planar; uint32 im_height, im_width, y; unsigned char *buffer; if (!TIFFGetField(tif, TIFFTAG_PLANARCONFIG, &planar)) { planar = PLANARCONFIG_CONTIG; } if (!TIFFGetField(tif, TIFFTAG_IMAGELENGTH, &im_height)) { gd_error("Can't fetch TIFF height\n"); return FALSE; } if (!TIFFGetField(tif, TIFFTAG_IMAGEWIDTH, &im_width)) { gd_error("Can't fetch TIFF width \n"); return FALSE; } buffer = (unsigned char *)gdMalloc(im_width * 4); if (!buffer) { return GD_FAILURE; } if (planar == PLANARCONFIG_CONTIG) { switch (bps) { case 16: /* TODO * or simply use force_rgba */ break; case 8: for (y = 0; y < im_height; y++ ) { if (!TIFFReadScanline (tif, buffer, y, 0)) { gd_error("Error while reading scanline %i", y); break; } /* reading one line at a time */ readTiff8bit(buffer, im, photometric, 0, y, im_width, 1, has_alpha, extra, 0); } break; default: if (is_bw) { for (y = 0; y < im_height; y++ ) { if (!TIFFReadScanline (tif, buffer, y, 0)) { gd_error("Error while reading scanline %i", y); break; } /* reading one line at a time */ readTiffBw(buffer, im, photometric, 0, y, im_width, 1, has_alpha, extra, 0); } } else { /* TODO: implement some default reader or detect this case earlier > force_rgb */ } break; } } else { /* TODO: implement a reader for separate panes. We detect this case earlier for now and use force_rgb */ } gdFree(buffer); return GD_SUCCESS; } static int createFromTiffRgba(TIFF * tif, gdImagePtr im) { int a; int x, y; int alphaBlendingFlag = 0; int color; int width = im->sx; int height = im->sy; uint32 *buffer; uint32 rgba; int success; /* switch off colour merging on target gd image just while we write out * content - we want to preserve the alpha data until the user chooses * what to do with the image */ alphaBlendingFlag = im->alphaBlendingFlag; gdImageAlphaBlending(im, 0); buffer = (uint32 *) gdCalloc(sizeof(uint32), width * height); if (!buffer) { return GD_FAILURE; } success = TIFFReadRGBAImage(tif, width, height, buffer, 1); if (success) { for(y = 0; y < height; y++) { for(x = 0; x < width; x++) { /* if it doesn't already exist, allocate a new colour, * else use existing one */ rgba = buffer[(y * width + x)]; a = (0xff - TIFFGetA(rgba)) / 2; color = gdTrueColorAlpha(TIFFGetR(rgba), TIFFGetG(rgba), TIFFGetB(rgba), a); /* set pixel colour to this colour */ gdImageSetPixel(im, x, height - y - 1, color); } } } gdFree(buffer); /* now reset colour merge for alpha blending routines */ gdImageAlphaBlending(im, alphaBlendingFlag); return success; } /* Function: gdImageCreateFromTiffCtx Create a gdImage from a TIFF file input from an gdIOCtx. */ BGD_DECLARE(gdImagePtr) gdImageCreateFromTiffCtx(gdIOCtx *infile) { TIFF *tif; tiff_handle *th; uint16 bps, spp, photometric; uint16 orientation; int width, height; uint16 extra, *extra_types; uint16 planar; char has_alpha, is_bw, is_gray; char force_rgba = FALSE; char save_transparent; int image_type; int ret; float res_float; gdImagePtr im = NULL; th = new_tiff_handle(infile); if (!th) { return NULL; } tif = TIFFClientOpen("", "rb", th, tiff_readproc, tiff_writeproc, tiff_seekproc, tiff_closeproc, tiff_sizeproc, tiff_mapproc, tiff_unmapproc); if (!tif) { gd_error("Cannot open TIFF image"); gdFree(th); return NULL; } if (!TIFFGetField(tif, TIFFTAG_IMAGEWIDTH, &width)) { gd_error("TIFF error, Cannot read image width"); goto error; } if (!TIFFGetField(tif, TIFFTAG_IMAGELENGTH, &height)) { gd_error("TIFF error, Cannot read image width"); goto error; } TIFFGetFieldDefaulted (tif, TIFFTAG_BITSPERSAMPLE, &bps); /* Unsupported bps, force to RGBA */ if (1/*bps > 8 && bps != 16*/) { force_rgba = TRUE; } TIFFGetFieldDefaulted (tif, TIFFTAG_SAMPLESPERPIXEL, &spp); if (!TIFFGetField (tif, TIFFTAG_EXTRASAMPLES, &extra, &extra_types)) { extra = 0; } if (!TIFFGetField (tif, TIFFTAG_PHOTOMETRIC, &photometric)) { uint16 compression; if (TIFFGetField(tif, TIFFTAG_COMPRESSION, &compression) && (compression == COMPRESSION_CCITTFAX3 || compression == COMPRESSION_CCITTFAX4 || compression == COMPRESSION_CCITTRLE || compression == COMPRESSION_CCITTRLEW)) { gd_error("Could not get photometric. " "Image is CCITT compressed, assuming min-is-white"); photometric = PHOTOMETRIC_MINISWHITE; } else { gd_error("Could not get photometric. " "Assuming min-is-black"); photometric = PHOTOMETRIC_MINISBLACK; } } save_transparent = FALSE; /* test if the extrasample represents an associated alpha channel... */ if (extra > 0 && (extra_types[0] == EXTRASAMPLE_ASSOCALPHA)) { has_alpha = TRUE; save_transparent = FALSE; --extra; } else if (extra > 0 && (extra_types[0] == EXTRASAMPLE_UNASSALPHA)) { has_alpha = TRUE; save_transparent = TRUE; --extra; } else if (extra > 0 && (extra_types[0] == EXTRASAMPLE_UNSPECIFIED)) { /* assuming unassociated alpha if unspecified */ gd_error("alpha channel type not defined, assuming alpha is not premultiplied"); has_alpha = TRUE; save_transparent = TRUE; --extra; } else { has_alpha = FALSE; } if (photometric == PHOTOMETRIC_RGB && spp > 3 + extra) { has_alpha = TRUE; extra = spp - 4; } else if (photometric != PHOTOMETRIC_RGB && spp > 1 + extra) { has_alpha = TRUE; extra = spp - 2; } is_bw = FALSE; is_gray = FALSE; switch (photometric) { case PHOTOMETRIC_MINISBLACK: case PHOTOMETRIC_MINISWHITE: if (!has_alpha && bps == 1 && spp == 1) { image_type = GD_INDEXED; is_bw = TRUE; } else { image_type = GD_GRAY; } break; case PHOTOMETRIC_RGB: image_type = GD_RGB; break; case PHOTOMETRIC_PALETTE: image_type = GD_INDEXED; break; default: force_rgba = TRUE; break; } if (!TIFFGetField (tif, TIFFTAG_PLANARCONFIG, &planar)) { planar = PLANARCONFIG_CONTIG; } /* Force rgba if image plans are not contiguous */ if (force_rgba || planar != PLANARCONFIG_CONTIG) { image_type = GD_RGB; } if (!force_rgba && (image_type == GD_PALETTE || image_type == GD_INDEXED || image_type == GD_GRAY)) { im = gdImageCreate(width, height); if (!im) goto error; readTiffColorMap(im, tif, is_bw, photometric); } else { im = gdImageCreateTrueColor(width, height); if (!im) goto error; } #ifdef DEBUG printf("force rgba: %i\n", force_rgba); printf("has_alpha: %i\n", has_alpha); printf("save trans: %i\n", save_transparent); printf("is_bw: %i\n", is_bw); printf("is_gray: %i\n", is_gray); printf("type: %i\n", image_type); #else (void)is_gray; (void)save_transparent; #endif if (force_rgba) { ret = createFromTiffRgba(tif, im); } else if (TIFFIsTiled(tif)) { ret = createFromTiffTiles(tif, im, bps, photometric, has_alpha, is_bw, extra); } else { ret = createFromTiffLines(tif, im, bps, photometric, has_alpha, is_bw, extra); } if (!ret) { gdImageDestroy(im); im = NULL; goto error; } if (TIFFGetField(tif, TIFFTAG_XRESOLUTION, &res_float)) { im->res_x = (unsigned int)res_float; //truncate } if (TIFFGetField(tif, TIFFTAG_YRESOLUTION, &res_float)) { im->res_y = (unsigned int)res_float; //truncate } if (TIFFGetField(tif, TIFFTAG_ORIENTATION, &orientation)) { switch (orientation) { case ORIENTATION_TOPLEFT: case ORIENTATION_TOPRIGHT: case ORIENTATION_BOTRIGHT: case ORIENTATION_BOTLEFT: break; default: gd_error("Orientation %d not handled yet!", orientation); break; } } error: TIFFClose(tif); gdFree(th); return im; } /* Function: gdImageCreateFromTIFF */ BGD_DECLARE(gdImagePtr) gdImageCreateFromTiff(FILE *inFile) { gdImagePtr im; gdIOCtx *in = gdNewFileCtx(inFile); if (in == NULL) return NULL; im = gdImageCreateFromTiffCtx(in); in->gd_free(in); return im; } /* Function: gdImageCreateFromTiffPtr */ BGD_DECLARE(gdImagePtr) gdImageCreateFromTiffPtr(int size, void *data) { gdImagePtr im; gdIOCtx *in = gdNewDynamicCtxEx (size, data, 0); if (in == NULL) return NULL; im = gdImageCreateFromTiffCtx(in); in->gd_free(in); return im; } /* Function: gdImageTiff */ BGD_DECLARE(void) gdImageTiff(gdImagePtr im, FILE *outFile) { gdIOCtx *out = gdNewFileCtx(outFile); if (out == NULL) return; gdImageTiffCtx(im, out); /* what's an fg again? */ out->gd_free(out); } /* Function: gdImageTiffPtr */ BGD_DECLARE(void *) gdImageTiffPtr(gdImagePtr im, int *size) { void *rv; gdIOCtx *out = gdNewDynamicCtx (2048, NULL); if (out == NULL) return NULL; gdImageTiffCtx(im, out); /* what's an fg again? */ rv = gdDPExtractData(out, size); out->gd_free(out); return rv; } #endif
static int createFromTiffRgba(TIFF * tif, gdImagePtr im) { int a; int x, y; int alphaBlendingFlag = 0; int color; int width = im->sx; int height = im->sy; uint32 *buffer; uint32 rgba; /* switch off colour merging on target gd image just while we write out * content - we want to preserve the alpha data until the user chooses * what to do with the image */ alphaBlendingFlag = im->alphaBlendingFlag; gdImageAlphaBlending(im, 0); buffer = (uint32 *) gdCalloc(sizeof(uint32), width * height); if (!buffer) { return GD_FAILURE; } TIFFReadRGBAImage(tif, width, height, buffer, 0); for(y = 0; y < height; y++) { for(x = 0; x < width; x++) { /* if it doesn't already exist, allocate a new colour, * else use existing one */ rgba = buffer[(y * width + x)]; a = (0xff - TIFFGetA(rgba)) / 2; color = gdTrueColorAlpha(TIFFGetR(rgba), TIFFGetG(rgba), TIFFGetB(rgba), a); /* set pixel colour to this colour */ gdImageSetPixel(im, x, height - y - 1, color); } } gdFree(buffer); /* now reset colour merge for alpha blending routines */ gdImageAlphaBlending(im, alphaBlendingFlag); return GD_SUCCESS; }
static int createFromTiffRgba(TIFF * tif, gdImagePtr im) { int a; int x, y; int alphaBlendingFlag = 0; int color; int width = im->sx; int height = im->sy; uint32 *buffer; uint32 rgba; int success; /* switch off colour merging on target gd image just while we write out * content - we want to preserve the alpha data until the user chooses * what to do with the image */ alphaBlendingFlag = im->alphaBlendingFlag; gdImageAlphaBlending(im, 0); buffer = (uint32 *) gdCalloc(sizeof(uint32), width * height); if (!buffer) { return GD_FAILURE; } success = TIFFReadRGBAImage(tif, width, height, buffer, 1); if (success) { for(y = 0; y < height; y++) { for(x = 0; x < width; x++) { /* if it doesn't already exist, allocate a new colour, * else use existing one */ rgba = buffer[(y * width + x)]; a = (0xff - TIFFGetA(rgba)) / 2; color = gdTrueColorAlpha(TIFFGetR(rgba), TIFFGetG(rgba), TIFFGetB(rgba), a); /* set pixel colour to this colour */ gdImageSetPixel(im, x, height - y - 1, color); } } } gdFree(buffer); /* now reset colour merge for alpha blending routines */ gdImageAlphaBlending(im, alphaBlendingFlag); return success; }
{'added': [(762, '\tint success;'), (775, '\tsuccess = TIFFReadRGBAImage(tif, width, height, buffer, 1);'), (776, ''), (777, '\tif (success) {'), (778, '\t\tfor(y = 0; y < height; y++) {'), (779, '\t\t\tfor(x = 0; x < width; x++) {'), (780, "\t\t\t\t/* if it doesn't already exist, allocate a new colour,"), (781, '\t\t\t\t * else use existing one */'), (782, '\t\t\t\trgba = buffer[(y * width + x)];'), (783, '\t\t\t\ta = (0xff - TIFFGetA(rgba)) / 2;'), (784, '\t\t\t\tcolor = gdTrueColorAlpha(TIFFGetR(rgba), TIFFGetG(rgba), TIFFGetB(rgba), a);'), (785, ''), (786, '\t\t\t\t/* set pixel colour to this colour */'), (787, '\t\t\t\tgdImageSetPixel(im, x, height - y - 1, color);'), (788, '\t\t\t}'), (796, '\treturn success;')], 'deleted': [(774, '\tTIFFReadRGBAImage(tif, width, height, buffer, 0);'), (775, ''), (776, '\tfor(y = 0; y < height; y++) {'), (777, '\t\tfor(x = 0; x < width; x++) {'), (778, "\t\t\t/* if it doesn't already exist, allocate a new colour,"), (779, '\t\t\t * else use existing one */'), (780, '\t\t\trgba = buffer[(y * width + x)];'), (781, '\t\t\ta = (0xff - TIFFGetA(rgba)) / 2;'), (782, '\t\t\tcolor = gdTrueColorAlpha(TIFFGetR(rgba), TIFFGetG(rgba), TIFFGetB(rgba), a);'), (783, ''), (784, '\t\t\t/* set pixel colour to this colour */'), (785, '\t\t\tgdImageSetPixel(im, x, height - y - 1, color);'), (793, '\treturn GD_SUCCESS;')]}
16
13
736
4,609
https://github.com/libgd/libgd
CVE-2016-6911
['CWE-125']
var_unserializer.c
object_common1
/* Generated by re2c 0.13.7.5 */ #line 1 "ext/standard/var_unserializer.re" /* +----------------------------------------------------------------------+ | PHP Version 5 | +----------------------------------------------------------------------+ | Copyright (c) 1997-2016 The PHP Group | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Author: Sascha Schumann <sascha@schumann.cx> | +----------------------------------------------------------------------+ */ /* $Id$ */ #include "php.h" #include "ext/standard/php_var.h" #include "php_incomplete_class.h" /* {{{ reference-handling for unserializer: var_* */ #define VAR_ENTRIES_MAX 1024 #define VAR_ENTRIES_DBG 0 typedef struct { zval *data[VAR_ENTRIES_MAX]; long used_slots; void *next; } var_entries; static inline void var_push(php_unserialize_data_t *var_hashx, zval **rval) { var_entries *var_hash = (*var_hashx)->last; #if VAR_ENTRIES_DBG fprintf(stderr, "var_push(%ld): %d\n", var_hash?var_hash->used_slots:-1L, Z_TYPE_PP(rval)); #endif if (!var_hash || var_hash->used_slots == VAR_ENTRIES_MAX) { var_hash = emalloc(sizeof(var_entries)); var_hash->used_slots = 0; var_hash->next = 0; if (!(*var_hashx)->first) { (*var_hashx)->first = var_hash; } else { ((var_entries *) (*var_hashx)->last)->next = var_hash; } (*var_hashx)->last = var_hash; } var_hash->data[var_hash->used_slots++] = *rval; } PHPAPI void var_push_dtor(php_unserialize_data_t *var_hashx, zval **rval) { var_entries *var_hash; if (!var_hashx || !*var_hashx) { return; } var_hash = (*var_hashx)->last_dtor; #if VAR_ENTRIES_DBG fprintf(stderr, "var_push_dtor(%p, %ld): %d\n", *rval, var_hash?var_hash->used_slots:-1L, Z_TYPE_PP(rval)); #endif if (!var_hash || var_hash->used_slots == VAR_ENTRIES_MAX) { var_hash = emalloc(sizeof(var_entries)); var_hash->used_slots = 0; var_hash->next = 0; if (!(*var_hashx)->first_dtor) { (*var_hashx)->first_dtor = var_hash; } else { ((var_entries *) (*var_hashx)->last_dtor)->next = var_hash; } (*var_hashx)->last_dtor = var_hash; } Z_ADDREF_PP(rval); var_hash->data[var_hash->used_slots++] = *rval; } PHPAPI void var_push_dtor_no_addref(php_unserialize_data_t *var_hashx, zval **rval) { var_entries *var_hash; if (!var_hashx || !*var_hashx) { return; } var_hash = (*var_hashx)->last_dtor; #if VAR_ENTRIES_DBG fprintf(stderr, "var_push_dtor_no_addref(%p, %ld): %d (%d)\n", *rval, var_hash?var_hash->used_slots:-1L, Z_TYPE_PP(rval), Z_REFCOUNT_PP(rval)); #endif if (!var_hash || var_hash->used_slots == VAR_ENTRIES_MAX) { var_hash = emalloc(sizeof(var_entries)); var_hash->used_slots = 0; var_hash->next = 0; if (!(*var_hashx)->first_dtor) { (*var_hashx)->first_dtor = var_hash; } else { ((var_entries *) (*var_hashx)->last_dtor)->next = var_hash; } (*var_hashx)->last_dtor = var_hash; } var_hash->data[var_hash->used_slots++] = *rval; } PHPAPI void var_replace(php_unserialize_data_t *var_hashx, zval *ozval, zval **nzval) { long i; var_entries *var_hash = (*var_hashx)->first; #if VAR_ENTRIES_DBG fprintf(stderr, "var_replace(%ld): %d\n", var_hash?var_hash->used_slots:-1L, Z_TYPE_PP(nzval)); #endif while (var_hash) { for (i = 0; i < var_hash->used_slots; i++) { if (var_hash->data[i] == ozval) { var_hash->data[i] = *nzval; /* do not break here */ } } var_hash = var_hash->next; } } static int var_access(php_unserialize_data_t *var_hashx, long id, zval ***store) { var_entries *var_hash = (*var_hashx)->first; #if VAR_ENTRIES_DBG fprintf(stderr, "var_access(%ld): %ld\n", var_hash?var_hash->used_slots:-1L, id); #endif while (id >= VAR_ENTRIES_MAX && var_hash && var_hash->used_slots == VAR_ENTRIES_MAX) { var_hash = var_hash->next; id -= VAR_ENTRIES_MAX; } if (!var_hash) return !SUCCESS; if (id < 0 || id >= var_hash->used_slots) return !SUCCESS; *store = &var_hash->data[id]; return SUCCESS; } PHPAPI void var_destroy(php_unserialize_data_t *var_hashx) { void *next; long i; var_entries *var_hash = (*var_hashx)->first; #if VAR_ENTRIES_DBG fprintf(stderr, "var_destroy(%ld)\n", var_hash?var_hash->used_slots:-1L); #endif while (var_hash) { next = var_hash->next; efree(var_hash); var_hash = next; } var_hash = (*var_hashx)->first_dtor; while (var_hash) { for (i = 0; i < var_hash->used_slots; i++) { #if VAR_ENTRIES_DBG fprintf(stderr, "var_destroy dtor(%p, %ld)\n", var_hash->data[i], Z_REFCOUNT_P(var_hash->data[i])); #endif zval_ptr_dtor(&var_hash->data[i]); } next = var_hash->next; efree(var_hash); var_hash = next; } } /* }}} */ static char *unserialize_str(const unsigned char **p, size_t *len, size_t maxlen) { size_t i, j; char *str = safe_emalloc(*len, 1, 1); unsigned char *end = *(unsigned char **)p+maxlen; if (end < *p) { efree(str); return NULL; } for (i = 0; i < *len; i++) { if (*p >= end) { efree(str); return NULL; } if (**p != '\\') { str[i] = (char)**p; } else { unsigned char ch = 0; for (j = 0; j < 2; j++) { (*p)++; if (**p >= '0' && **p <= '9') { ch = (ch << 4) + (**p -'0'); } else if (**p >= 'a' && **p <= 'f') { ch = (ch << 4) + (**p -'a'+10); } else if (**p >= 'A' && **p <= 'F') { ch = (ch << 4) + (**p -'A'+10); } else { efree(str); return NULL; } } str[i] = (char)ch; } (*p)++; } str[i] = 0; *len = i; return str; } #define YYFILL(n) do { } while (0) #define YYCTYPE unsigned char #define YYCURSOR cursor #define YYLIMIT limit #define YYMARKER marker #line 249 "ext/standard/var_unserializer.re" static inline long parse_iv2(const unsigned char *p, const unsigned char **q) { char cursor; long result = 0; int neg = 0; switch (*p) { case '-': neg++; /* fall-through */ case '+': p++; } while (1) { cursor = (char)*p; if (cursor >= '0' && cursor <= '9') { result = result * 10 + (size_t)(cursor - (unsigned char)'0'); } else { break; } p++; } if (q) *q = p; if (neg) return -result; return result; } static inline long parse_iv(const unsigned char *p) { return parse_iv2(p, NULL); } /* no need to check for length - re2c already did */ static inline size_t parse_uiv(const unsigned char *p) { unsigned char cursor; size_t result = 0; if (*p == '+') { p++; } while (1) { cursor = *p; if (cursor >= '0' && cursor <= '9') { result = result * 10 + (size_t)(cursor - (unsigned char)'0'); } else { break; } p++; } return result; } #define UNSERIALIZE_PARAMETER zval **rval, const unsigned char **p, const unsigned char *max, php_unserialize_data_t *var_hash TSRMLS_DC #define UNSERIALIZE_PASSTHRU rval, p, max, var_hash TSRMLS_CC static inline int process_nested_data(UNSERIALIZE_PARAMETER, HashTable *ht, long elements, int objprops) { while (elements-- > 0) { zval *key, *data, **old_data; ALLOC_INIT_ZVAL(key); if (!php_var_unserialize(&key, p, max, NULL TSRMLS_CC)) { var_push_dtor_no_addref(var_hash, &key); return 0; } if (Z_TYPE_P(key) != IS_LONG && Z_TYPE_P(key) != IS_STRING) { var_push_dtor_no_addref(var_hash, &key); return 0; } ALLOC_INIT_ZVAL(data); if (!php_var_unserialize(&data, p, max, var_hash TSRMLS_CC)) { var_push_dtor_no_addref(var_hash, &key); var_push_dtor_no_addref(var_hash, &data); return 0; } if (!objprops) { switch (Z_TYPE_P(key)) { case IS_LONG: if (zend_hash_index_find(ht, Z_LVAL_P(key), (void **)&old_data)==SUCCESS) { var_push_dtor(var_hash, old_data); } zend_hash_index_update(ht, Z_LVAL_P(key), &data, sizeof(data), NULL); break; case IS_STRING: if (zend_symtable_find(ht, Z_STRVAL_P(key), Z_STRLEN_P(key) + 1, (void **)&old_data)==SUCCESS) { var_push_dtor(var_hash, old_data); } zend_symtable_update(ht, Z_STRVAL_P(key), Z_STRLEN_P(key) + 1, &data, sizeof(data), NULL); break; } } else { /* object properties should include no integers */ convert_to_string(key); if (zend_hash_find(ht, Z_STRVAL_P(key), Z_STRLEN_P(key) + 1, (void **)&old_data)==SUCCESS) { var_push_dtor(var_hash, old_data); } zend_hash_update(ht, Z_STRVAL_P(key), Z_STRLEN_P(key) + 1, &data, sizeof data, NULL); } var_push_dtor(var_hash, &data); var_push_dtor_no_addref(var_hash, &key); if (elements && *(*p-1) != ';' && *(*p-1) != '}') { (*p)--; return 0; } } return 1; } static inline int finish_nested_data(UNSERIALIZE_PARAMETER) { if (*((*p)++) == '}') return 1; #if SOMETHING_NEW_MIGHT_LEAD_TO_CRASH_ENABLE_IF_YOU_ARE_BRAVE zval_ptr_dtor(rval); #endif return 0; } static inline int object_custom(UNSERIALIZE_PARAMETER, zend_class_entry *ce) { long datalen; datalen = parse_iv2((*p) + 2, p); (*p) += 2; if (datalen < 0 || (max - (*p)) <= datalen) { zend_error(E_WARNING, "Insufficient data for unserializing - %ld required, %ld present", datalen, (long)(max - (*p))); return 0; } if (ce->unserialize == NULL) { zend_error(E_WARNING, "Class %s has no unserializer", ce->name); object_init_ex(*rval, ce); } else if (ce->unserialize(rval, ce, (const unsigned char*)*p, datalen, (zend_unserialize_data *)var_hash TSRMLS_CC) != SUCCESS) { return 0; } (*p) += datalen; return finish_nested_data(UNSERIALIZE_PASSTHRU); } static inline long object_common1(UNSERIALIZE_PARAMETER, zend_class_entry *ce) { long elements; elements = parse_iv2((*p) + 2, p); (*p) += 2; if (ce->serialize == NULL) { object_init_ex(*rval, ce); } else { /* If this class implements Serializable, it should not land here but in object_custom(). The passed string obviously doesn't descend from the regular serializer. */ zend_error(E_WARNING, "Erroneous data format for unserializing '%s'", ce->name); return 0; } return elements; } #ifdef PHP_WIN32 # pragma optimize("", off) #endif static inline int object_common2(UNSERIALIZE_PARAMETER, long elements) { zval *retval_ptr = NULL; zval fname; if (Z_TYPE_PP(rval) != IS_OBJECT) { return 0; } if (!process_nested_data(UNSERIALIZE_PASSTHRU, Z_OBJPROP_PP(rval), elements, 1)) { /* We've got partially constructed object on our hands here. Wipe it. */ if(Z_TYPE_PP(rval) == IS_OBJECT) { zend_hash_clean(Z_OBJPROP_PP(rval)); zend_object_store_ctor_failed(*rval TSRMLS_CC); } ZVAL_NULL(*rval); return 0; } if (Z_TYPE_PP(rval) != IS_OBJECT) { return 0; } if (Z_OBJCE_PP(rval) != PHP_IC_ENTRY && zend_hash_exists(&Z_OBJCE_PP(rval)->function_table, "__wakeup", sizeof("__wakeup"))) { INIT_PZVAL(&fname); ZVAL_STRINGL(&fname, "__wakeup", sizeof("__wakeup") - 1, 0); BG(serialize_lock)++; call_user_function_ex(CG(function_table), rval, &fname, &retval_ptr, 0, 0, 1, NULL TSRMLS_CC); BG(serialize_lock)--; } if (retval_ptr) { zval_ptr_dtor(&retval_ptr); } if (EG(exception)) { return 0; } return finish_nested_data(UNSERIALIZE_PASSTHRU); } #ifdef PHP_WIN32 # pragma optimize("", on) #endif PHPAPI int php_var_unserialize(UNSERIALIZE_PARAMETER) { const unsigned char *cursor, *limit, *marker, *start; zval **rval_ref; limit = max; cursor = *p; if (YYCURSOR >= YYLIMIT) { return 0; } if (var_hash && cursor[0] != 'R') { var_push(var_hash, rval); } start = cursor; #line 496 "ext/standard/var_unserializer.c" { YYCTYPE yych; static const unsigned char yybm[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; if ((YYLIMIT - YYCURSOR) < 7) YYFILL(7); yych = *YYCURSOR; switch (yych) { case 'C': case 'O': goto yy13; case 'N': goto yy5; case 'R': goto yy2; case 'S': goto yy10; case 'a': goto yy11; case 'b': goto yy6; case 'd': goto yy8; case 'i': goto yy7; case 'o': goto yy12; case 'r': goto yy4; case 's': goto yy9; case '}': goto yy14; default: goto yy16; } yy2: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy95; yy3: #line 861 "ext/standard/var_unserializer.re" { return 0; } #line 558 "ext/standard/var_unserializer.c" yy4: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy89; goto yy3; yy5: yych = *++YYCURSOR; if (yych == ';') goto yy87; goto yy3; yy6: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy83; goto yy3; yy7: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy77; goto yy3; yy8: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy53; goto yy3; yy9: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy46; goto yy3; yy10: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy39; goto yy3; yy11: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy32; goto yy3; yy12: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy25; goto yy3; yy13: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy17; goto yy3; yy14: ++YYCURSOR; #line 855 "ext/standard/var_unserializer.re" { /* this is the case where we have less data than planned */ php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Unexpected end of serialized data"); return 0; /* not sure if it should be 0 or 1 here? */ } #line 607 "ext/standard/var_unserializer.c" yy16: yych = *++YYCURSOR; goto yy3; yy17: yych = *++YYCURSOR; if (yybm[0+yych] & 128) { goto yy20; } if (yych == '+') goto yy19; yy18: YYCURSOR = YYMARKER; goto yy3; yy19: yych = *++YYCURSOR; if (yybm[0+yych] & 128) { goto yy20; } goto yy18; yy20: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yybm[0+yych] & 128) { goto yy20; } if (yych <= '/') goto yy18; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '"') goto yy18; ++YYCURSOR; #line 708 "ext/standard/var_unserializer.re" { size_t len, len2, len3, maxlen; long elements; char *class_name; zend_class_entry *ce; zend_class_entry **pce; int incomplete_class = 0; int custom_object = 0; zval *user_func; zval *retval_ptr; zval **args[1]; zval *arg_func_name; if (!var_hash) return 0; if (*start == 'C') { custom_object = 1; } INIT_PZVAL(*rval); len2 = len = parse_uiv(start + 2); maxlen = max - YYCURSOR; if (maxlen < len || len == 0) { *p = start + 2; return 0; } class_name = (char*)YYCURSOR; YYCURSOR += len; if (*(YYCURSOR) != '"') { *p = YYCURSOR; return 0; } if (*(YYCURSOR+1) != ':') { *p = YYCURSOR+1; return 0; } len3 = strspn(class_name, "0123456789_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377\\"); if (len3 != len) { *p = YYCURSOR + len3 - len; return 0; } class_name = estrndup(class_name, len); do { /* Try to find class directly */ BG(serialize_lock)++; if (zend_lookup_class(class_name, len2, &pce TSRMLS_CC) == SUCCESS) { BG(serialize_lock)--; if (EG(exception)) { efree(class_name); return 0; } ce = *pce; break; } BG(serialize_lock)--; if (EG(exception)) { efree(class_name); return 0; } /* Check for unserialize callback */ if ((PG(unserialize_callback_func) == NULL) || (PG(unserialize_callback_func)[0] == '\0')) { incomplete_class = 1; ce = PHP_IC_ENTRY; break; } /* Call unserialize callback */ MAKE_STD_ZVAL(user_func); ZVAL_STRING(user_func, PG(unserialize_callback_func), 1); args[0] = &arg_func_name; MAKE_STD_ZVAL(arg_func_name); ZVAL_STRING(arg_func_name, class_name, 1); BG(serialize_lock)++; if (call_user_function_ex(CG(function_table), NULL, user_func, &retval_ptr, 1, args, 0, NULL TSRMLS_CC) != SUCCESS) { BG(serialize_lock)--; if (EG(exception)) { efree(class_name); zval_ptr_dtor(&user_func); zval_ptr_dtor(&arg_func_name); return 0; } php_error_docref(NULL TSRMLS_CC, E_WARNING, "defined (%s) but not found", user_func->value.str.val); incomplete_class = 1; ce = PHP_IC_ENTRY; zval_ptr_dtor(&user_func); zval_ptr_dtor(&arg_func_name); break; } BG(serialize_lock)--; if (retval_ptr) { zval_ptr_dtor(&retval_ptr); } if (EG(exception)) { efree(class_name); zval_ptr_dtor(&user_func); zval_ptr_dtor(&arg_func_name); return 0; } /* The callback function may have defined the class */ if (zend_lookup_class(class_name, len2, &pce TSRMLS_CC) == SUCCESS) { ce = *pce; } else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Function %s() hasn't defined the class it was called for", user_func->value.str.val); incomplete_class = 1; ce = PHP_IC_ENTRY; } zval_ptr_dtor(&user_func); zval_ptr_dtor(&arg_func_name); break; } while (1); *p = YYCURSOR; if (custom_object) { int ret; ret = object_custom(UNSERIALIZE_PASSTHRU, ce); if (ret && incomplete_class) { php_store_class_name(*rval, class_name, len2); } efree(class_name); return ret; } elements = object_common1(UNSERIALIZE_PASSTHRU, ce); if (incomplete_class) { php_store_class_name(*rval, class_name, len2); } efree(class_name); return object_common2(UNSERIALIZE_PASSTHRU, elements); } #line 785 "ext/standard/var_unserializer.c" yy25: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy26; if (yych <= '/') goto yy18; if (yych <= '9') goto yy27; goto yy18; } yy26: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy27: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy27; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '"') goto yy18; ++YYCURSOR; #line 699 "ext/standard/var_unserializer.re" { if (!var_hash) return 0; INIT_PZVAL(*rval); return object_common2(UNSERIALIZE_PASSTHRU, object_common1(UNSERIALIZE_PASSTHRU, ZEND_STANDARD_CLASS_DEF_PTR)); } #line 819 "ext/standard/var_unserializer.c" yy32: yych = *++YYCURSOR; if (yych == '+') goto yy33; if (yych <= '/') goto yy18; if (yych <= '9') goto yy34; goto yy18; yy33: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy34: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy34; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '{') goto yy18; ++YYCURSOR; #line 678 "ext/standard/var_unserializer.re" { long elements = parse_iv(start + 2); /* use iv() not uiv() in order to check data range */ *p = YYCURSOR; if (!var_hash) return 0; if (elements < 0) { return 0; } INIT_PZVAL(*rval); array_init_size(*rval, elements); if (!process_nested_data(UNSERIALIZE_PASSTHRU, Z_ARRVAL_PP(rval), elements, 0)) { return 0; } return finish_nested_data(UNSERIALIZE_PASSTHRU); } #line 861 "ext/standard/var_unserializer.c" yy39: yych = *++YYCURSOR; if (yych == '+') goto yy40; if (yych <= '/') goto yy18; if (yych <= '9') goto yy41; goto yy18; yy40: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy41: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy41; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '"') goto yy18; ++YYCURSOR; #line 643 "ext/standard/var_unserializer.re" { size_t len, maxlen; char *str; len = parse_uiv(start + 2); maxlen = max - YYCURSOR; if (maxlen < len) { *p = start + 2; return 0; } if ((str = unserialize_str(&YYCURSOR, &len, maxlen)) == NULL) { return 0; } if (*(YYCURSOR) != '"') { efree(str); *p = YYCURSOR; return 0; } if (*(YYCURSOR + 1) != ';') { efree(str); *p = YYCURSOR + 1; return 0; } YYCURSOR += 2; *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_STRINGL(*rval, str, len, 0); return 1; } #line 917 "ext/standard/var_unserializer.c" yy46: yych = *++YYCURSOR; if (yych == '+') goto yy47; if (yych <= '/') goto yy18; if (yych <= '9') goto yy48; goto yy18; yy47: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy48: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy48; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '"') goto yy18; ++YYCURSOR; #line 610 "ext/standard/var_unserializer.re" { size_t len, maxlen; char *str; len = parse_uiv(start + 2); maxlen = max - YYCURSOR; if (maxlen < len) { *p = start + 2; return 0; } str = (char*)YYCURSOR; YYCURSOR += len; if (*(YYCURSOR) != '"') { *p = YYCURSOR; return 0; } if (*(YYCURSOR + 1) != ';') { *p = YYCURSOR + 1; return 0; } YYCURSOR += 2; *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_STRINGL(*rval, str, len, 1); return 1; } #line 971 "ext/standard/var_unserializer.c" yy53: yych = *++YYCURSOR; if (yych <= '/') { if (yych <= ',') { if (yych == '+') goto yy57; goto yy18; } else { if (yych <= '-') goto yy55; if (yych <= '.') goto yy60; goto yy18; } } else { if (yych <= 'I') { if (yych <= '9') goto yy58; if (yych <= 'H') goto yy18; goto yy56; } else { if (yych != 'N') goto yy18; } } yych = *++YYCURSOR; if (yych == 'A') goto yy76; goto yy18; yy55: yych = *++YYCURSOR; if (yych <= '/') { if (yych == '.') goto yy60; goto yy18; } else { if (yych <= '9') goto yy58; if (yych != 'I') goto yy18; } yy56: yych = *++YYCURSOR; if (yych == 'N') goto yy72; goto yy18; yy57: yych = *++YYCURSOR; if (yych == '.') goto yy60; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy58: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 4) YYFILL(4); yych = *YYCURSOR; if (yych <= ':') { if (yych <= '.') { if (yych <= '-') goto yy18; goto yy70; } else { if (yych <= '/') goto yy18; if (yych <= '9') goto yy58; goto yy18; } } else { if (yych <= 'E') { if (yych <= ';') goto yy63; if (yych <= 'D') goto yy18; goto yy65; } else { if (yych == 'e') goto yy65; goto yy18; } } yy60: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy61: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 4) YYFILL(4); yych = *YYCURSOR; if (yych <= ';') { if (yych <= '/') goto yy18; if (yych <= '9') goto yy61; if (yych <= ':') goto yy18; } else { if (yych <= 'E') { if (yych <= 'D') goto yy18; goto yy65; } else { if (yych == 'e') goto yy65; goto yy18; } } yy63: ++YYCURSOR; #line 600 "ext/standard/var_unserializer.re" { #if SIZEOF_LONG == 4 use_double: #endif *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_DOUBLE(*rval, zend_strtod((const char *)start + 2, NULL)); return 1; } #line 1069 "ext/standard/var_unserializer.c" yy65: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy66; if (yych <= '/') goto yy18; if (yych <= '9') goto yy67; goto yy18; } yy66: yych = *++YYCURSOR; if (yych <= ',') { if (yych == '+') goto yy69; goto yy18; } else { if (yych <= '-') goto yy69; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; } yy67: ++YYCURSOR; if (YYLIMIT <= YYCURSOR) YYFILL(1); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy67; if (yych == ';') goto yy63; goto yy18; yy69: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy67; goto yy18; yy70: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 4) YYFILL(4); yych = *YYCURSOR; if (yych <= ';') { if (yych <= '/') goto yy18; if (yych <= '9') goto yy70; if (yych <= ':') goto yy18; goto yy63; } else { if (yych <= 'E') { if (yych <= 'D') goto yy18; goto yy65; } else { if (yych == 'e') goto yy65; goto yy18; } } yy72: yych = *++YYCURSOR; if (yych != 'F') goto yy18; yy73: yych = *++YYCURSOR; if (yych != ';') goto yy18; ++YYCURSOR; #line 585 "ext/standard/var_unserializer.re" { *p = YYCURSOR; INIT_PZVAL(*rval); if (!strncmp(start + 2, "NAN", 3)) { ZVAL_DOUBLE(*rval, php_get_nan()); } else if (!strncmp(start + 2, "INF", 3)) { ZVAL_DOUBLE(*rval, php_get_inf()); } else if (!strncmp(start + 2, "-INF", 4)) { ZVAL_DOUBLE(*rval, -php_get_inf()); } return 1; } #line 1143 "ext/standard/var_unserializer.c" yy76: yych = *++YYCURSOR; if (yych == 'N') goto yy73; goto yy18; yy77: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy78; if (yych <= '/') goto yy18; if (yych <= '9') goto yy79; goto yy18; } yy78: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy79: ++YYCURSOR; if (YYLIMIT <= YYCURSOR) YYFILL(1); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy79; if (yych != ';') goto yy18; ++YYCURSOR; #line 558 "ext/standard/var_unserializer.re" { #if SIZEOF_LONG == 4 int digits = YYCURSOR - start - 3; if (start[2] == '-' || start[2] == '+') { digits--; } /* Use double for large long values that were serialized on a 64-bit system */ if (digits >= MAX_LENGTH_OF_LONG - 1) { if (digits == MAX_LENGTH_OF_LONG - 1) { int cmp = strncmp(YYCURSOR - MAX_LENGTH_OF_LONG, long_min_digits, MAX_LENGTH_OF_LONG - 1); if (!(cmp < 0 || (cmp == 0 && start[2] == '-'))) { goto use_double; } } else { goto use_double; } } #endif *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_LONG(*rval, parse_iv(start + 2)); return 1; } #line 1197 "ext/standard/var_unserializer.c" yy83: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= '2') goto yy18; yych = *++YYCURSOR; if (yych != ';') goto yy18; ++YYCURSOR; #line 551 "ext/standard/var_unserializer.re" { *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_BOOL(*rval, parse_iv(start + 2)); return 1; } #line 1212 "ext/standard/var_unserializer.c" yy87: ++YYCURSOR; #line 544 "ext/standard/var_unserializer.re" { *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_NULL(*rval); return 1; } #line 1222 "ext/standard/var_unserializer.c" yy89: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy90; if (yych <= '/') goto yy18; if (yych <= '9') goto yy91; goto yy18; } yy90: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy91: ++YYCURSOR; if (YYLIMIT <= YYCURSOR) YYFILL(1); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy91; if (yych != ';') goto yy18; ++YYCURSOR; #line 521 "ext/standard/var_unserializer.re" { long id; *p = YYCURSOR; if (!var_hash) return 0; id = parse_iv(start + 2) - 1; if (id == -1 || var_access(var_hash, id, &rval_ref) != SUCCESS) { return 0; } if (*rval == *rval_ref) return 0; if (*rval != NULL) { var_push_dtor_no_addref(var_hash, rval); } *rval = *rval_ref; Z_ADDREF_PP(rval); Z_UNSET_ISREF_PP(rval); return 1; } #line 1268 "ext/standard/var_unserializer.c" yy95: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy96; if (yych <= '/') goto yy18; if (yych <= '9') goto yy97; goto yy18; } yy96: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy97: ++YYCURSOR; if (YYLIMIT <= YYCURSOR) YYFILL(1); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy97; if (yych != ';') goto yy18; ++YYCURSOR; #line 500 "ext/standard/var_unserializer.re" { long id; *p = YYCURSOR; if (!var_hash) return 0; id = parse_iv(start + 2) - 1; if (id == -1 || var_access(var_hash, id, &rval_ref) != SUCCESS) { return 0; } if (*rval != NULL) { var_push_dtor_no_addref(var_hash, rval); } *rval = *rval_ref; Z_ADDREF_PP(rval); Z_SET_ISREF_PP(rval); return 1; } #line 1312 "ext/standard/var_unserializer.c" } #line 863 "ext/standard/var_unserializer.re" return 0; }
/* Generated by re2c 0.13.7.5 */ #line 1 "ext/standard/var_unserializer.re" /* +----------------------------------------------------------------------+ | PHP Version 5 | +----------------------------------------------------------------------+ | Copyright (c) 1997-2016 The PHP Group | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Author: Sascha Schumann <sascha@schumann.cx> | +----------------------------------------------------------------------+ */ /* $Id$ */ #include "php.h" #include "ext/standard/php_var.h" #include "php_incomplete_class.h" /* {{{ reference-handling for unserializer: var_* */ #define VAR_ENTRIES_MAX 1024 #define VAR_ENTRIES_DBG 0 typedef struct { zval *data[VAR_ENTRIES_MAX]; long used_slots; void *next; } var_entries; static inline void var_push(php_unserialize_data_t *var_hashx, zval **rval) { var_entries *var_hash = (*var_hashx)->last; #if VAR_ENTRIES_DBG fprintf(stderr, "var_push(%ld): %d\n", var_hash?var_hash->used_slots:-1L, Z_TYPE_PP(rval)); #endif if (!var_hash || var_hash->used_slots == VAR_ENTRIES_MAX) { var_hash = emalloc(sizeof(var_entries)); var_hash->used_slots = 0; var_hash->next = 0; if (!(*var_hashx)->first) { (*var_hashx)->first = var_hash; } else { ((var_entries *) (*var_hashx)->last)->next = var_hash; } (*var_hashx)->last = var_hash; } var_hash->data[var_hash->used_slots++] = *rval; } PHPAPI void var_push_dtor(php_unserialize_data_t *var_hashx, zval **rval) { var_entries *var_hash; if (!var_hashx || !*var_hashx) { return; } var_hash = (*var_hashx)->last_dtor; #if VAR_ENTRIES_DBG fprintf(stderr, "var_push_dtor(%p, %ld): %d\n", *rval, var_hash?var_hash->used_slots:-1L, Z_TYPE_PP(rval)); #endif if (!var_hash || var_hash->used_slots == VAR_ENTRIES_MAX) { var_hash = emalloc(sizeof(var_entries)); var_hash->used_slots = 0; var_hash->next = 0; if (!(*var_hashx)->first_dtor) { (*var_hashx)->first_dtor = var_hash; } else { ((var_entries *) (*var_hashx)->last_dtor)->next = var_hash; } (*var_hashx)->last_dtor = var_hash; } Z_ADDREF_PP(rval); var_hash->data[var_hash->used_slots++] = *rval; } PHPAPI void var_push_dtor_no_addref(php_unserialize_data_t *var_hashx, zval **rval) { var_entries *var_hash; if (!var_hashx || !*var_hashx) { return; } var_hash = (*var_hashx)->last_dtor; #if VAR_ENTRIES_DBG fprintf(stderr, "var_push_dtor_no_addref(%p, %ld): %d (%d)\n", *rval, var_hash?var_hash->used_slots:-1L, Z_TYPE_PP(rval), Z_REFCOUNT_PP(rval)); #endif if (!var_hash || var_hash->used_slots == VAR_ENTRIES_MAX) { var_hash = emalloc(sizeof(var_entries)); var_hash->used_slots = 0; var_hash->next = 0; if (!(*var_hashx)->first_dtor) { (*var_hashx)->first_dtor = var_hash; } else { ((var_entries *) (*var_hashx)->last_dtor)->next = var_hash; } (*var_hashx)->last_dtor = var_hash; } var_hash->data[var_hash->used_slots++] = *rval; } PHPAPI void var_replace(php_unserialize_data_t *var_hashx, zval *ozval, zval **nzval) { long i; var_entries *var_hash = (*var_hashx)->first; #if VAR_ENTRIES_DBG fprintf(stderr, "var_replace(%ld): %d\n", var_hash?var_hash->used_slots:-1L, Z_TYPE_PP(nzval)); #endif while (var_hash) { for (i = 0; i < var_hash->used_slots; i++) { if (var_hash->data[i] == ozval) { var_hash->data[i] = *nzval; /* do not break here */ } } var_hash = var_hash->next; } } static int var_access(php_unserialize_data_t *var_hashx, long id, zval ***store) { var_entries *var_hash = (*var_hashx)->first; #if VAR_ENTRIES_DBG fprintf(stderr, "var_access(%ld): %ld\n", var_hash?var_hash->used_slots:-1L, id); #endif while (id >= VAR_ENTRIES_MAX && var_hash && var_hash->used_slots == VAR_ENTRIES_MAX) { var_hash = var_hash->next; id -= VAR_ENTRIES_MAX; } if (!var_hash) return !SUCCESS; if (id < 0 || id >= var_hash->used_slots) return !SUCCESS; *store = &var_hash->data[id]; return SUCCESS; } PHPAPI void var_destroy(php_unserialize_data_t *var_hashx) { void *next; long i; var_entries *var_hash = (*var_hashx)->first; #if VAR_ENTRIES_DBG fprintf(stderr, "var_destroy(%ld)\n", var_hash?var_hash->used_slots:-1L); #endif while (var_hash) { next = var_hash->next; efree(var_hash); var_hash = next; } var_hash = (*var_hashx)->first_dtor; while (var_hash) { for (i = 0; i < var_hash->used_slots; i++) { #if VAR_ENTRIES_DBG fprintf(stderr, "var_destroy dtor(%p, %ld)\n", var_hash->data[i], Z_REFCOUNT_P(var_hash->data[i])); #endif zval_ptr_dtor(&var_hash->data[i]); } next = var_hash->next; efree(var_hash); var_hash = next; } } /* }}} */ static char *unserialize_str(const unsigned char **p, size_t *len, size_t maxlen) { size_t i, j; char *str = safe_emalloc(*len, 1, 1); unsigned char *end = *(unsigned char **)p+maxlen; if (end < *p) { efree(str); return NULL; } for (i = 0; i < *len; i++) { if (*p >= end) { efree(str); return NULL; } if (**p != '\\') { str[i] = (char)**p; } else { unsigned char ch = 0; for (j = 0; j < 2; j++) { (*p)++; if (**p >= '0' && **p <= '9') { ch = (ch << 4) + (**p -'0'); } else if (**p >= 'a' && **p <= 'f') { ch = (ch << 4) + (**p -'a'+10); } else if (**p >= 'A' && **p <= 'F') { ch = (ch << 4) + (**p -'A'+10); } else { efree(str); return NULL; } } str[i] = (char)ch; } (*p)++; } str[i] = 0; *len = i; return str; } #define YYFILL(n) do { } while (0) #define YYCTYPE unsigned char #define YYCURSOR cursor #define YYLIMIT limit #define YYMARKER marker #line 249 "ext/standard/var_unserializer.re" static inline long parse_iv2(const unsigned char *p, const unsigned char **q) { char cursor; long result = 0; int neg = 0; switch (*p) { case '-': neg++; /* fall-through */ case '+': p++; } while (1) { cursor = (char)*p; if (cursor >= '0' && cursor <= '9') { result = result * 10 + (size_t)(cursor - (unsigned char)'0'); } else { break; } p++; } if (q) *q = p; if (neg) return -result; return result; } static inline long parse_iv(const unsigned char *p) { return parse_iv2(p, NULL); } /* no need to check for length - re2c already did */ static inline size_t parse_uiv(const unsigned char *p) { unsigned char cursor; size_t result = 0; if (*p == '+') { p++; } while (1) { cursor = *p; if (cursor >= '0' && cursor <= '9') { result = result * 10 + (size_t)(cursor - (unsigned char)'0'); } else { break; } p++; } return result; } #define UNSERIALIZE_PARAMETER zval **rval, const unsigned char **p, const unsigned char *max, php_unserialize_data_t *var_hash TSRMLS_DC #define UNSERIALIZE_PASSTHRU rval, p, max, var_hash TSRMLS_CC static inline int process_nested_data(UNSERIALIZE_PARAMETER, HashTable *ht, long elements, int objprops) { while (elements-- > 0) { zval *key, *data, **old_data; ALLOC_INIT_ZVAL(key); if (!php_var_unserialize(&key, p, max, NULL TSRMLS_CC)) { var_push_dtor_no_addref(var_hash, &key); return 0; } if (Z_TYPE_P(key) != IS_LONG && Z_TYPE_P(key) != IS_STRING) { var_push_dtor_no_addref(var_hash, &key); return 0; } ALLOC_INIT_ZVAL(data); if (!php_var_unserialize(&data, p, max, var_hash TSRMLS_CC)) { var_push_dtor_no_addref(var_hash, &key); var_push_dtor_no_addref(var_hash, &data); return 0; } if (!objprops) { switch (Z_TYPE_P(key)) { case IS_LONG: if (zend_hash_index_find(ht, Z_LVAL_P(key), (void **)&old_data)==SUCCESS) { var_push_dtor(var_hash, old_data); } zend_hash_index_update(ht, Z_LVAL_P(key), &data, sizeof(data), NULL); break; case IS_STRING: if (zend_symtable_find(ht, Z_STRVAL_P(key), Z_STRLEN_P(key) + 1, (void **)&old_data)==SUCCESS) { var_push_dtor(var_hash, old_data); } zend_symtable_update(ht, Z_STRVAL_P(key), Z_STRLEN_P(key) + 1, &data, sizeof(data), NULL); break; } } else { /* object properties should include no integers */ convert_to_string(key); if (zend_hash_find(ht, Z_STRVAL_P(key), Z_STRLEN_P(key) + 1, (void **)&old_data)==SUCCESS) { var_push_dtor(var_hash, old_data); } zend_hash_update(ht, Z_STRVAL_P(key), Z_STRLEN_P(key) + 1, &data, sizeof data, NULL); } var_push_dtor(var_hash, &data); var_push_dtor_no_addref(var_hash, &key); if (elements && *(*p-1) != ';' && *(*p-1) != '}') { (*p)--; return 0; } } return 1; } static inline int finish_nested_data(UNSERIALIZE_PARAMETER) { if (*((*p)++) == '}') return 1; #if SOMETHING_NEW_MIGHT_LEAD_TO_CRASH_ENABLE_IF_YOU_ARE_BRAVE zval_ptr_dtor(rval); #endif return 0; } static inline int object_custom(UNSERIALIZE_PARAMETER, zend_class_entry *ce) { long datalen; datalen = parse_iv2((*p) + 2, p); (*p) += 2; if (datalen < 0 || (max - (*p)) <= datalen) { zend_error(E_WARNING, "Insufficient data for unserializing - %ld required, %ld present", datalen, (long)(max - (*p))); return 0; } if (ce->unserialize == NULL) { zend_error(E_WARNING, "Class %s has no unserializer", ce->name); object_init_ex(*rval, ce); } else if (ce->unserialize(rval, ce, (const unsigned char*)*p, datalen, (zend_unserialize_data *)var_hash TSRMLS_CC) != SUCCESS) { return 0; } (*p) += datalen; return finish_nested_data(UNSERIALIZE_PASSTHRU); } static inline long object_common1(UNSERIALIZE_PARAMETER, zend_class_entry *ce) { long elements; if( *p >= max - 2) { zend_error(E_WARNING, "Bad unserialize data"); return -1; } elements = parse_iv2((*p) + 2, p); (*p) += 2; if (ce->serialize == NULL) { object_init_ex(*rval, ce); } else { /* If this class implements Serializable, it should not land here but in object_custom(). The passed string obviously doesn't descend from the regular serializer. */ zend_error(E_WARNING, "Erroneous data format for unserializing '%s'", ce->name); return -1; } return elements; } #ifdef PHP_WIN32 # pragma optimize("", off) #endif static inline int object_common2(UNSERIALIZE_PARAMETER, long elements) { zval *retval_ptr = NULL; zval fname; if (Z_TYPE_PP(rval) != IS_OBJECT) { return 0; } if (!process_nested_data(UNSERIALIZE_PASSTHRU, Z_OBJPROP_PP(rval), elements, 1)) { /* We've got partially constructed object on our hands here. Wipe it. */ if(Z_TYPE_PP(rval) == IS_OBJECT) { zend_hash_clean(Z_OBJPROP_PP(rval)); zend_object_store_ctor_failed(*rval TSRMLS_CC); } ZVAL_NULL(*rval); return 0; } if (Z_TYPE_PP(rval) != IS_OBJECT) { return 0; } if (Z_OBJCE_PP(rval) != PHP_IC_ENTRY && zend_hash_exists(&Z_OBJCE_PP(rval)->function_table, "__wakeup", sizeof("__wakeup"))) { INIT_PZVAL(&fname); ZVAL_STRINGL(&fname, "__wakeup", sizeof("__wakeup") - 1, 0); BG(serialize_lock)++; call_user_function_ex(CG(function_table), rval, &fname, &retval_ptr, 0, 0, 1, NULL TSRMLS_CC); BG(serialize_lock)--; } if (retval_ptr) { zval_ptr_dtor(&retval_ptr); } if (EG(exception)) { return 0; } return finish_nested_data(UNSERIALIZE_PASSTHRU); } #ifdef PHP_WIN32 # pragma optimize("", on) #endif PHPAPI int php_var_unserialize(UNSERIALIZE_PARAMETER) { const unsigned char *cursor, *limit, *marker, *start; zval **rval_ref; limit = max; cursor = *p; if (YYCURSOR >= YYLIMIT) { return 0; } if (var_hash && cursor[0] != 'R') { var_push(var_hash, rval); } start = cursor; #line 501 "ext/standard/var_unserializer.c" { YYCTYPE yych; static const unsigned char yybm[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; if ((YYLIMIT - YYCURSOR) < 7) YYFILL(7); yych = *YYCURSOR; switch (yych) { case 'C': case 'O': goto yy13; case 'N': goto yy5; case 'R': goto yy2; case 'S': goto yy10; case 'a': goto yy11; case 'b': goto yy6; case 'd': goto yy8; case 'i': goto yy7; case 'o': goto yy12; case 'r': goto yy4; case 's': goto yy9; case '}': goto yy14; default: goto yy16; } yy2: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy95; yy3: #line 875 "ext/standard/var_unserializer.re" { return 0; } #line 563 "ext/standard/var_unserializer.c" yy4: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy89; goto yy3; yy5: yych = *++YYCURSOR; if (yych == ';') goto yy87; goto yy3; yy6: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy83; goto yy3; yy7: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy77; goto yy3; yy8: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy53; goto yy3; yy9: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy46; goto yy3; yy10: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy39; goto yy3; yy11: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy32; goto yy3; yy12: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy25; goto yy3; yy13: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy17; goto yy3; yy14: ++YYCURSOR; #line 869 "ext/standard/var_unserializer.re" { /* this is the case where we have less data than planned */ php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Unexpected end of serialized data"); return 0; /* not sure if it should be 0 or 1 here? */ } #line 612 "ext/standard/var_unserializer.c" yy16: yych = *++YYCURSOR; goto yy3; yy17: yych = *++YYCURSOR; if (yybm[0+yych] & 128) { goto yy20; } if (yych == '+') goto yy19; yy18: YYCURSOR = YYMARKER; goto yy3; yy19: yych = *++YYCURSOR; if (yybm[0+yych] & 128) { goto yy20; } goto yy18; yy20: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yybm[0+yych] & 128) { goto yy20; } if (yych <= '/') goto yy18; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '"') goto yy18; ++YYCURSOR; #line 717 "ext/standard/var_unserializer.re" { size_t len, len2, len3, maxlen; long elements; char *class_name; zend_class_entry *ce; zend_class_entry **pce; int incomplete_class = 0; int custom_object = 0; zval *user_func; zval *retval_ptr; zval **args[1]; zval *arg_func_name; if (!var_hash) return 0; if (*start == 'C') { custom_object = 1; } INIT_PZVAL(*rval); len2 = len = parse_uiv(start + 2); maxlen = max - YYCURSOR; if (maxlen < len || len == 0) { *p = start + 2; return 0; } class_name = (char*)YYCURSOR; YYCURSOR += len; if (*(YYCURSOR) != '"') { *p = YYCURSOR; return 0; } if (*(YYCURSOR+1) != ':') { *p = YYCURSOR+1; return 0; } len3 = strspn(class_name, "0123456789_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377\\"); if (len3 != len) { *p = YYCURSOR + len3 - len; return 0; } class_name = estrndup(class_name, len); do { /* Try to find class directly */ BG(serialize_lock)++; if (zend_lookup_class(class_name, len2, &pce TSRMLS_CC) == SUCCESS) { BG(serialize_lock)--; if (EG(exception)) { efree(class_name); return 0; } ce = *pce; break; } BG(serialize_lock)--; if (EG(exception)) { efree(class_name); return 0; } /* Check for unserialize callback */ if ((PG(unserialize_callback_func) == NULL) || (PG(unserialize_callback_func)[0] == '\0')) { incomplete_class = 1; ce = PHP_IC_ENTRY; break; } /* Call unserialize callback */ MAKE_STD_ZVAL(user_func); ZVAL_STRING(user_func, PG(unserialize_callback_func), 1); args[0] = &arg_func_name; MAKE_STD_ZVAL(arg_func_name); ZVAL_STRING(arg_func_name, class_name, 1); BG(serialize_lock)++; if (call_user_function_ex(CG(function_table), NULL, user_func, &retval_ptr, 1, args, 0, NULL TSRMLS_CC) != SUCCESS) { BG(serialize_lock)--; if (EG(exception)) { efree(class_name); zval_ptr_dtor(&user_func); zval_ptr_dtor(&arg_func_name); return 0; } php_error_docref(NULL TSRMLS_CC, E_WARNING, "defined (%s) but not found", user_func->value.str.val); incomplete_class = 1; ce = PHP_IC_ENTRY; zval_ptr_dtor(&user_func); zval_ptr_dtor(&arg_func_name); break; } BG(serialize_lock)--; if (retval_ptr) { zval_ptr_dtor(&retval_ptr); } if (EG(exception)) { efree(class_name); zval_ptr_dtor(&user_func); zval_ptr_dtor(&arg_func_name); return 0; } /* The callback function may have defined the class */ if (zend_lookup_class(class_name, len2, &pce TSRMLS_CC) == SUCCESS) { ce = *pce; } else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Function %s() hasn't defined the class it was called for", user_func->value.str.val); incomplete_class = 1; ce = PHP_IC_ENTRY; } zval_ptr_dtor(&user_func); zval_ptr_dtor(&arg_func_name); break; } while (1); *p = YYCURSOR; if (custom_object) { int ret; ret = object_custom(UNSERIALIZE_PASSTHRU, ce); if (ret && incomplete_class) { php_store_class_name(*rval, class_name, len2); } efree(class_name); return ret; } elements = object_common1(UNSERIALIZE_PASSTHRU, ce); if (elements < 0) { efree(class_name); return 0; } if (incomplete_class) { php_store_class_name(*rval, class_name, len2); } efree(class_name); return object_common2(UNSERIALIZE_PASSTHRU, elements); } #line 795 "ext/standard/var_unserializer.c" yy25: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy26; if (yych <= '/') goto yy18; if (yych <= '9') goto yy27; goto yy18; } yy26: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy27: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy27; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '"') goto yy18; ++YYCURSOR; #line 704 "ext/standard/var_unserializer.re" { long elements; if (!var_hash) return 0; INIT_PZVAL(*rval); elements = object_common1(UNSERIALIZE_PASSTHRU, ZEND_STANDARD_CLASS_DEF_PTR); if (elements < 0) { return 0; } return object_common2(UNSERIALIZE_PASSTHRU, elements); } #line 833 "ext/standard/var_unserializer.c" yy32: yych = *++YYCURSOR; if (yych == '+') goto yy33; if (yych <= '/') goto yy18; if (yych <= '9') goto yy34; goto yy18; yy33: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy34: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy34; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '{') goto yy18; ++YYCURSOR; #line 683 "ext/standard/var_unserializer.re" { long elements = parse_iv(start + 2); /* use iv() not uiv() in order to check data range */ *p = YYCURSOR; if (!var_hash) return 0; if (elements < 0) { return 0; } INIT_PZVAL(*rval); array_init_size(*rval, elements); if (!process_nested_data(UNSERIALIZE_PASSTHRU, Z_ARRVAL_PP(rval), elements, 0)) { return 0; } return finish_nested_data(UNSERIALIZE_PASSTHRU); } #line 875 "ext/standard/var_unserializer.c" yy39: yych = *++YYCURSOR; if (yych == '+') goto yy40; if (yych <= '/') goto yy18; if (yych <= '9') goto yy41; goto yy18; yy40: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy41: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy41; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '"') goto yy18; ++YYCURSOR; #line 648 "ext/standard/var_unserializer.re" { size_t len, maxlen; char *str; len = parse_uiv(start + 2); maxlen = max - YYCURSOR; if (maxlen < len) { *p = start + 2; return 0; } if ((str = unserialize_str(&YYCURSOR, &len, maxlen)) == NULL) { return 0; } if (*(YYCURSOR) != '"') { efree(str); *p = YYCURSOR; return 0; } if (*(YYCURSOR + 1) != ';') { efree(str); *p = YYCURSOR + 1; return 0; } YYCURSOR += 2; *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_STRINGL(*rval, str, len, 0); return 1; } #line 931 "ext/standard/var_unserializer.c" yy46: yych = *++YYCURSOR; if (yych == '+') goto yy47; if (yych <= '/') goto yy18; if (yych <= '9') goto yy48; goto yy18; yy47: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy48: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy48; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '"') goto yy18; ++YYCURSOR; #line 615 "ext/standard/var_unserializer.re" { size_t len, maxlen; char *str; len = parse_uiv(start + 2); maxlen = max - YYCURSOR; if (maxlen < len) { *p = start + 2; return 0; } str = (char*)YYCURSOR; YYCURSOR += len; if (*(YYCURSOR) != '"') { *p = YYCURSOR; return 0; } if (*(YYCURSOR + 1) != ';') { *p = YYCURSOR + 1; return 0; } YYCURSOR += 2; *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_STRINGL(*rval, str, len, 1); return 1; } #line 985 "ext/standard/var_unserializer.c" yy53: yych = *++YYCURSOR; if (yych <= '/') { if (yych <= ',') { if (yych == '+') goto yy57; goto yy18; } else { if (yych <= '-') goto yy55; if (yych <= '.') goto yy60; goto yy18; } } else { if (yych <= 'I') { if (yych <= '9') goto yy58; if (yych <= 'H') goto yy18; goto yy56; } else { if (yych != 'N') goto yy18; } } yych = *++YYCURSOR; if (yych == 'A') goto yy76; goto yy18; yy55: yych = *++YYCURSOR; if (yych <= '/') { if (yych == '.') goto yy60; goto yy18; } else { if (yych <= '9') goto yy58; if (yych != 'I') goto yy18; } yy56: yych = *++YYCURSOR; if (yych == 'N') goto yy72; goto yy18; yy57: yych = *++YYCURSOR; if (yych == '.') goto yy60; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy58: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 4) YYFILL(4); yych = *YYCURSOR; if (yych <= ':') { if (yych <= '.') { if (yych <= '-') goto yy18; goto yy70; } else { if (yych <= '/') goto yy18; if (yych <= '9') goto yy58; goto yy18; } } else { if (yych <= 'E') { if (yych <= ';') goto yy63; if (yych <= 'D') goto yy18; goto yy65; } else { if (yych == 'e') goto yy65; goto yy18; } } yy60: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy61: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 4) YYFILL(4); yych = *YYCURSOR; if (yych <= ';') { if (yych <= '/') goto yy18; if (yych <= '9') goto yy61; if (yych <= ':') goto yy18; } else { if (yych <= 'E') { if (yych <= 'D') goto yy18; goto yy65; } else { if (yych == 'e') goto yy65; goto yy18; } } yy63: ++YYCURSOR; #line 605 "ext/standard/var_unserializer.re" { #if SIZEOF_LONG == 4 use_double: #endif *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_DOUBLE(*rval, zend_strtod((const char *)start + 2, NULL)); return 1; } #line 1083 "ext/standard/var_unserializer.c" yy65: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy66; if (yych <= '/') goto yy18; if (yych <= '9') goto yy67; goto yy18; } yy66: yych = *++YYCURSOR; if (yych <= ',') { if (yych == '+') goto yy69; goto yy18; } else { if (yych <= '-') goto yy69; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; } yy67: ++YYCURSOR; if (YYLIMIT <= YYCURSOR) YYFILL(1); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy67; if (yych == ';') goto yy63; goto yy18; yy69: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy67; goto yy18; yy70: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 4) YYFILL(4); yych = *YYCURSOR; if (yych <= ';') { if (yych <= '/') goto yy18; if (yych <= '9') goto yy70; if (yych <= ':') goto yy18; goto yy63; } else { if (yych <= 'E') { if (yych <= 'D') goto yy18; goto yy65; } else { if (yych == 'e') goto yy65; goto yy18; } } yy72: yych = *++YYCURSOR; if (yych != 'F') goto yy18; yy73: yych = *++YYCURSOR; if (yych != ';') goto yy18; ++YYCURSOR; #line 590 "ext/standard/var_unserializer.re" { *p = YYCURSOR; INIT_PZVAL(*rval); if (!strncmp(start + 2, "NAN", 3)) { ZVAL_DOUBLE(*rval, php_get_nan()); } else if (!strncmp(start + 2, "INF", 3)) { ZVAL_DOUBLE(*rval, php_get_inf()); } else if (!strncmp(start + 2, "-INF", 4)) { ZVAL_DOUBLE(*rval, -php_get_inf()); } return 1; } #line 1157 "ext/standard/var_unserializer.c" yy76: yych = *++YYCURSOR; if (yych == 'N') goto yy73; goto yy18; yy77: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy78; if (yych <= '/') goto yy18; if (yych <= '9') goto yy79; goto yy18; } yy78: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy79: ++YYCURSOR; if (YYLIMIT <= YYCURSOR) YYFILL(1); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy79; if (yych != ';') goto yy18; ++YYCURSOR; #line 563 "ext/standard/var_unserializer.re" { #if SIZEOF_LONG == 4 int digits = YYCURSOR - start - 3; if (start[2] == '-' || start[2] == '+') { digits--; } /* Use double for large long values that were serialized on a 64-bit system */ if (digits >= MAX_LENGTH_OF_LONG - 1) { if (digits == MAX_LENGTH_OF_LONG - 1) { int cmp = strncmp(YYCURSOR - MAX_LENGTH_OF_LONG, long_min_digits, MAX_LENGTH_OF_LONG - 1); if (!(cmp < 0 || (cmp == 0 && start[2] == '-'))) { goto use_double; } } else { goto use_double; } } #endif *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_LONG(*rval, parse_iv(start + 2)); return 1; } #line 1211 "ext/standard/var_unserializer.c" yy83: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= '2') goto yy18; yych = *++YYCURSOR; if (yych != ';') goto yy18; ++YYCURSOR; #line 556 "ext/standard/var_unserializer.re" { *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_BOOL(*rval, parse_iv(start + 2)); return 1; } #line 1226 "ext/standard/var_unserializer.c" yy87: ++YYCURSOR; #line 549 "ext/standard/var_unserializer.re" { *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_NULL(*rval); return 1; } #line 1236 "ext/standard/var_unserializer.c" yy89: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy90; if (yych <= '/') goto yy18; if (yych <= '9') goto yy91; goto yy18; } yy90: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy91: ++YYCURSOR; if (YYLIMIT <= YYCURSOR) YYFILL(1); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy91; if (yych != ';') goto yy18; ++YYCURSOR; #line 526 "ext/standard/var_unserializer.re" { long id; *p = YYCURSOR; if (!var_hash) return 0; id = parse_iv(start + 2) - 1; if (id == -1 || var_access(var_hash, id, &rval_ref) != SUCCESS) { return 0; } if (*rval == *rval_ref) return 0; if (*rval != NULL) { var_push_dtor_no_addref(var_hash, rval); } *rval = *rval_ref; Z_ADDREF_PP(rval); Z_UNSET_ISREF_PP(rval); return 1; } #line 1282 "ext/standard/var_unserializer.c" yy95: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy96; if (yych <= '/') goto yy18; if (yych <= '9') goto yy97; goto yy18; } yy96: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy97: ++YYCURSOR; if (YYLIMIT <= YYCURSOR) YYFILL(1); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy97; if (yych != ';') goto yy18; ++YYCURSOR; #line 505 "ext/standard/var_unserializer.re" { long id; *p = YYCURSOR; if (!var_hash) return 0; id = parse_iv(start + 2) - 1; if (id == -1 || var_access(var_hash, id, &rval_ref) != SUCCESS) { return 0; } if (*rval != NULL) { var_push_dtor_no_addref(var_hash, rval); } *rval = *rval_ref; Z_ADDREF_PP(rval); Z_SET_ISREF_PP(rval); return 1; } #line 1326 "ext/standard/var_unserializer.c" } #line 877 "ext/standard/var_unserializer.re" return 0; }
static inline long object_common1(UNSERIALIZE_PARAMETER, zend_class_entry *ce) { long elements; elements = parse_iv2((*p) + 2, p); (*p) += 2; if (ce->serialize == NULL) { object_init_ex(*rval, ce); } else { /* If this class implements Serializable, it should not land here but in object_custom(). The passed string obviously doesn't descend from the regular serializer. */ zend_error(E_WARNING, "Erroneous data format for unserializing '%s'", ce->name); return 0; } return elements; }
static inline long object_common1(UNSERIALIZE_PARAMETER, zend_class_entry *ce) { long elements; if( *p >= max - 2) { zend_error(E_WARNING, "Bad unserialize data"); return -1; } elements = parse_iv2((*p) + 2, p); (*p) += 2; if (ce->serialize == NULL) { object_init_ex(*rval, ce); } else { /* If this class implements Serializable, it should not land here but in object_custom(). The passed string obviously doesn't descend from the regular serializer. */ zend_error(E_WARNING, "Erroneous data format for unserializing '%s'", ce->name); return -1; } return elements; }
{'added': [(408, '\tif( *p >= max - 2) {'), (409, '\t\tzend_error(E_WARNING, "Bad unserialize data");'), (410, '\t\treturn -1;'), (411, '\t}'), (412, ''), (423, '\t\treturn -1;'), (500, '#line 501 "ext/standard/var_unserializer.c"'), (560, '#line 875 "ext/standard/var_unserializer.re"'), (562, '#line 563 "ext/standard/var_unserializer.c"'), (605, '#line 869 "ext/standard/var_unserializer.re"'), (611, '#line 612 "ext/standard/var_unserializer.c"'), (642, '#line 717 "ext/standard/var_unserializer.re"'), (782, '\tif (elements < 0) {'), (783, '\t efree(class_name);'), (784, '\t return 0;'), (785, '\t}'), (786, ''), (794, '#line 795 "ext/standard/var_unserializer.c"'), (819, '#line 704 "ext/standard/var_unserializer.re"'), (821, '\tlong elements;'), (826, '\telements = object_common1(UNSERIALIZE_PASSTHRU, ZEND_STANDARD_CLASS_DEF_PTR);'), (827, '\tif (elements < 0) {'), (828, '\t\treturn 0;'), (829, '\t}'), (830, '\treturn object_common2(UNSERIALIZE_PASSTHRU, elements);'), (832, '#line 833 "ext/standard/var_unserializer.c"'), (853, '#line 683 "ext/standard/var_unserializer.re"'), (874, '#line 875 "ext/standard/var_unserializer.c"'), (895, '#line 648 "ext/standard/var_unserializer.re"'), (930, '#line 931 "ext/standard/var_unserializer.c"'), (951, '#line 615 "ext/standard/var_unserializer.re"'), (984, '#line 985 "ext/standard/var_unserializer.c"'), (1072, '#line 605 "ext/standard/var_unserializer.re"'), (1082, '#line 1083 "ext/standard/var_unserializer.c"'), (1141, '#line 590 "ext/standard/var_unserializer.re"'), (1156, '#line 1157 "ext/standard/var_unserializer.c"'), (1183, '#line 563 "ext/standard/var_unserializer.re"'), (1210, '#line 1211 "ext/standard/var_unserializer.c"'), (1218, '#line 556 "ext/standard/var_unserializer.re"'), (1225, '#line 1226 "ext/standard/var_unserializer.c"'), (1228, '#line 549 "ext/standard/var_unserializer.re"'), (1235, '#line 1236 "ext/standard/var_unserializer.c"'), (1258, '#line 526 "ext/standard/var_unserializer.re"'), (1281, '#line 1282 "ext/standard/var_unserializer.c"'), (1304, '#line 505 "ext/standard/var_unserializer.re"'), (1325, '#line 1326 "ext/standard/var_unserializer.c"'), (1327, '#line 877 "ext/standard/var_unserializer.re"')], 'deleted': [(418, '\t\treturn 0;'), (495, '#line 496 "ext/standard/var_unserializer.c"'), (555, '#line 861 "ext/standard/var_unserializer.re"'), (557, '#line 558 "ext/standard/var_unserializer.c"'), (600, '#line 855 "ext/standard/var_unserializer.re"'), (606, '#line 607 "ext/standard/var_unserializer.c"'), (637, '#line 708 "ext/standard/var_unserializer.re"'), (784, '#line 785 "ext/standard/var_unserializer.c"'), (809, '#line 699 "ext/standard/var_unserializer.re"'), (815, '\treturn object_common2(UNSERIALIZE_PASSTHRU,'), (816, '\t\t\tobject_common1(UNSERIALIZE_PASSTHRU, ZEND_STANDARD_CLASS_DEF_PTR));'), (818, '#line 819 "ext/standard/var_unserializer.c"'), (839, '#line 678 "ext/standard/var_unserializer.re"'), (860, '#line 861 "ext/standard/var_unserializer.c"'), (881, '#line 643 "ext/standard/var_unserializer.re"'), (916, '#line 917 "ext/standard/var_unserializer.c"'), (937, '#line 610 "ext/standard/var_unserializer.re"'), (970, '#line 971 "ext/standard/var_unserializer.c"'), (1058, '#line 600 "ext/standard/var_unserializer.re"'), (1068, '#line 1069 "ext/standard/var_unserializer.c"'), (1127, '#line 585 "ext/standard/var_unserializer.re"'), (1142, '#line 1143 "ext/standard/var_unserializer.c"'), (1169, '#line 558 "ext/standard/var_unserializer.re"'), (1196, '#line 1197 "ext/standard/var_unserializer.c"'), (1204, '#line 551 "ext/standard/var_unserializer.re"'), (1211, '#line 1212 "ext/standard/var_unserializer.c"'), (1214, '#line 544 "ext/standard/var_unserializer.re"'), (1221, '#line 1222 "ext/standard/var_unserializer.c"'), (1244, '#line 521 "ext/standard/var_unserializer.re"'), (1267, '#line 1268 "ext/standard/var_unserializer.c"'), (1290, '#line 500 "ext/standard/var_unserializer.re"'), (1311, '#line 1312 "ext/standard/var_unserializer.c"'), (1313, '#line 863 "ext/standard/var_unserializer.re"')]}
47
33
1,073
6,905
https://github.com/php/php-src
CVE-2016-10161
['CWE-125']
var_unserializer.c
php_var_unserialize
/* Generated by re2c 0.13.7.5 */ #line 1 "ext/standard/var_unserializer.re" /* +----------------------------------------------------------------------+ | PHP Version 5 | +----------------------------------------------------------------------+ | Copyright (c) 1997-2016 The PHP Group | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Author: Sascha Schumann <sascha@schumann.cx> | +----------------------------------------------------------------------+ */ /* $Id$ */ #include "php.h" #include "ext/standard/php_var.h" #include "php_incomplete_class.h" /* {{{ reference-handling for unserializer: var_* */ #define VAR_ENTRIES_MAX 1024 #define VAR_ENTRIES_DBG 0 typedef struct { zval *data[VAR_ENTRIES_MAX]; long used_slots; void *next; } var_entries; static inline void var_push(php_unserialize_data_t *var_hashx, zval **rval) { var_entries *var_hash = (*var_hashx)->last; #if VAR_ENTRIES_DBG fprintf(stderr, "var_push(%ld): %d\n", var_hash?var_hash->used_slots:-1L, Z_TYPE_PP(rval)); #endif if (!var_hash || var_hash->used_slots == VAR_ENTRIES_MAX) { var_hash = emalloc(sizeof(var_entries)); var_hash->used_slots = 0; var_hash->next = 0; if (!(*var_hashx)->first) { (*var_hashx)->first = var_hash; } else { ((var_entries *) (*var_hashx)->last)->next = var_hash; } (*var_hashx)->last = var_hash; } var_hash->data[var_hash->used_slots++] = *rval; } PHPAPI void var_push_dtor(php_unserialize_data_t *var_hashx, zval **rval) { var_entries *var_hash; if (!var_hashx || !*var_hashx) { return; } var_hash = (*var_hashx)->last_dtor; #if VAR_ENTRIES_DBG fprintf(stderr, "var_push_dtor(%p, %ld): %d\n", *rval, var_hash?var_hash->used_slots:-1L, Z_TYPE_PP(rval)); #endif if (!var_hash || var_hash->used_slots == VAR_ENTRIES_MAX) { var_hash = emalloc(sizeof(var_entries)); var_hash->used_slots = 0; var_hash->next = 0; if (!(*var_hashx)->first_dtor) { (*var_hashx)->first_dtor = var_hash; } else { ((var_entries *) (*var_hashx)->last_dtor)->next = var_hash; } (*var_hashx)->last_dtor = var_hash; } Z_ADDREF_PP(rval); var_hash->data[var_hash->used_slots++] = *rval; } PHPAPI void var_push_dtor_no_addref(php_unserialize_data_t *var_hashx, zval **rval) { var_entries *var_hash; if (!var_hashx || !*var_hashx) { return; } var_hash = (*var_hashx)->last_dtor; #if VAR_ENTRIES_DBG fprintf(stderr, "var_push_dtor_no_addref(%p, %ld): %d (%d)\n", *rval, var_hash?var_hash->used_slots:-1L, Z_TYPE_PP(rval), Z_REFCOUNT_PP(rval)); #endif if (!var_hash || var_hash->used_slots == VAR_ENTRIES_MAX) { var_hash = emalloc(sizeof(var_entries)); var_hash->used_slots = 0; var_hash->next = 0; if (!(*var_hashx)->first_dtor) { (*var_hashx)->first_dtor = var_hash; } else { ((var_entries *) (*var_hashx)->last_dtor)->next = var_hash; } (*var_hashx)->last_dtor = var_hash; } var_hash->data[var_hash->used_slots++] = *rval; } PHPAPI void var_replace(php_unserialize_data_t *var_hashx, zval *ozval, zval **nzval) { long i; var_entries *var_hash = (*var_hashx)->first; #if VAR_ENTRIES_DBG fprintf(stderr, "var_replace(%ld): %d\n", var_hash?var_hash->used_slots:-1L, Z_TYPE_PP(nzval)); #endif while (var_hash) { for (i = 0; i < var_hash->used_slots; i++) { if (var_hash->data[i] == ozval) { var_hash->data[i] = *nzval; /* do not break here */ } } var_hash = var_hash->next; } } static int var_access(php_unserialize_data_t *var_hashx, long id, zval ***store) { var_entries *var_hash = (*var_hashx)->first; #if VAR_ENTRIES_DBG fprintf(stderr, "var_access(%ld): %ld\n", var_hash?var_hash->used_slots:-1L, id); #endif while (id >= VAR_ENTRIES_MAX && var_hash && var_hash->used_slots == VAR_ENTRIES_MAX) { var_hash = var_hash->next; id -= VAR_ENTRIES_MAX; } if (!var_hash) return !SUCCESS; if (id < 0 || id >= var_hash->used_slots) return !SUCCESS; *store = &var_hash->data[id]; return SUCCESS; } PHPAPI void var_destroy(php_unserialize_data_t *var_hashx) { void *next; long i; var_entries *var_hash = (*var_hashx)->first; #if VAR_ENTRIES_DBG fprintf(stderr, "var_destroy(%ld)\n", var_hash?var_hash->used_slots:-1L); #endif while (var_hash) { next = var_hash->next; efree(var_hash); var_hash = next; } var_hash = (*var_hashx)->first_dtor; while (var_hash) { for (i = 0; i < var_hash->used_slots; i++) { #if VAR_ENTRIES_DBG fprintf(stderr, "var_destroy dtor(%p, %ld)\n", var_hash->data[i], Z_REFCOUNT_P(var_hash->data[i])); #endif zval_ptr_dtor(&var_hash->data[i]); } next = var_hash->next; efree(var_hash); var_hash = next; } } /* }}} */ static char *unserialize_str(const unsigned char **p, size_t *len, size_t maxlen) { size_t i, j; char *str = safe_emalloc(*len, 1, 1); unsigned char *end = *(unsigned char **)p+maxlen; if (end < *p) { efree(str); return NULL; } for (i = 0; i < *len; i++) { if (*p >= end) { efree(str); return NULL; } if (**p != '\\') { str[i] = (char)**p; } else { unsigned char ch = 0; for (j = 0; j < 2; j++) { (*p)++; if (**p >= '0' && **p <= '9') { ch = (ch << 4) + (**p -'0'); } else if (**p >= 'a' && **p <= 'f') { ch = (ch << 4) + (**p -'a'+10); } else if (**p >= 'A' && **p <= 'F') { ch = (ch << 4) + (**p -'A'+10); } else { efree(str); return NULL; } } str[i] = (char)ch; } (*p)++; } str[i] = 0; *len = i; return str; } #define YYFILL(n) do { } while (0) #define YYCTYPE unsigned char #define YYCURSOR cursor #define YYLIMIT limit #define YYMARKER marker #line 249 "ext/standard/var_unserializer.re" static inline long parse_iv2(const unsigned char *p, const unsigned char **q) { char cursor; long result = 0; int neg = 0; switch (*p) { case '-': neg++; /* fall-through */ case '+': p++; } while (1) { cursor = (char)*p; if (cursor >= '0' && cursor <= '9') { result = result * 10 + (size_t)(cursor - (unsigned char)'0'); } else { break; } p++; } if (q) *q = p; if (neg) return -result; return result; } static inline long parse_iv(const unsigned char *p) { return parse_iv2(p, NULL); } /* no need to check for length - re2c already did */ static inline size_t parse_uiv(const unsigned char *p) { unsigned char cursor; size_t result = 0; if (*p == '+') { p++; } while (1) { cursor = *p; if (cursor >= '0' && cursor <= '9') { result = result * 10 + (size_t)(cursor - (unsigned char)'0'); } else { break; } p++; } return result; } #define UNSERIALIZE_PARAMETER zval **rval, const unsigned char **p, const unsigned char *max, php_unserialize_data_t *var_hash TSRMLS_DC #define UNSERIALIZE_PASSTHRU rval, p, max, var_hash TSRMLS_CC static inline int process_nested_data(UNSERIALIZE_PARAMETER, HashTable *ht, long elements, int objprops) { while (elements-- > 0) { zval *key, *data, **old_data; ALLOC_INIT_ZVAL(key); if (!php_var_unserialize(&key, p, max, NULL TSRMLS_CC)) { var_push_dtor_no_addref(var_hash, &key); return 0; } if (Z_TYPE_P(key) != IS_LONG && Z_TYPE_P(key) != IS_STRING) { var_push_dtor_no_addref(var_hash, &key); return 0; } ALLOC_INIT_ZVAL(data); if (!php_var_unserialize(&data, p, max, var_hash TSRMLS_CC)) { var_push_dtor_no_addref(var_hash, &key); var_push_dtor_no_addref(var_hash, &data); return 0; } if (!objprops) { switch (Z_TYPE_P(key)) { case IS_LONG: if (zend_hash_index_find(ht, Z_LVAL_P(key), (void **)&old_data)==SUCCESS) { var_push_dtor(var_hash, old_data); } zend_hash_index_update(ht, Z_LVAL_P(key), &data, sizeof(data), NULL); break; case IS_STRING: if (zend_symtable_find(ht, Z_STRVAL_P(key), Z_STRLEN_P(key) + 1, (void **)&old_data)==SUCCESS) { var_push_dtor(var_hash, old_data); } zend_symtable_update(ht, Z_STRVAL_P(key), Z_STRLEN_P(key) + 1, &data, sizeof(data), NULL); break; } } else { /* object properties should include no integers */ convert_to_string(key); if (zend_hash_find(ht, Z_STRVAL_P(key), Z_STRLEN_P(key) + 1, (void **)&old_data)==SUCCESS) { var_push_dtor(var_hash, old_data); } zend_hash_update(ht, Z_STRVAL_P(key), Z_STRLEN_P(key) + 1, &data, sizeof data, NULL); } var_push_dtor(var_hash, &data); var_push_dtor_no_addref(var_hash, &key); if (elements && *(*p-1) != ';' && *(*p-1) != '}') { (*p)--; return 0; } } return 1; } static inline int finish_nested_data(UNSERIALIZE_PARAMETER) { if (*((*p)++) == '}') return 1; #if SOMETHING_NEW_MIGHT_LEAD_TO_CRASH_ENABLE_IF_YOU_ARE_BRAVE zval_ptr_dtor(rval); #endif return 0; } static inline int object_custom(UNSERIALIZE_PARAMETER, zend_class_entry *ce) { long datalen; datalen = parse_iv2((*p) + 2, p); (*p) += 2; if (datalen < 0 || (max - (*p)) <= datalen) { zend_error(E_WARNING, "Insufficient data for unserializing - %ld required, %ld present", datalen, (long)(max - (*p))); return 0; } if (ce->unserialize == NULL) { zend_error(E_WARNING, "Class %s has no unserializer", ce->name); object_init_ex(*rval, ce); } else if (ce->unserialize(rval, ce, (const unsigned char*)*p, datalen, (zend_unserialize_data *)var_hash TSRMLS_CC) != SUCCESS) { return 0; } (*p) += datalen; return finish_nested_data(UNSERIALIZE_PASSTHRU); } static inline long object_common1(UNSERIALIZE_PARAMETER, zend_class_entry *ce) { long elements; elements = parse_iv2((*p) + 2, p); (*p) += 2; if (ce->serialize == NULL) { object_init_ex(*rval, ce); } else { /* If this class implements Serializable, it should not land here but in object_custom(). The passed string obviously doesn't descend from the regular serializer. */ zend_error(E_WARNING, "Erroneous data format for unserializing '%s'", ce->name); return 0; } return elements; } #ifdef PHP_WIN32 # pragma optimize("", off) #endif static inline int object_common2(UNSERIALIZE_PARAMETER, long elements) { zval *retval_ptr = NULL; zval fname; if (Z_TYPE_PP(rval) != IS_OBJECT) { return 0; } if (!process_nested_data(UNSERIALIZE_PASSTHRU, Z_OBJPROP_PP(rval), elements, 1)) { /* We've got partially constructed object on our hands here. Wipe it. */ if(Z_TYPE_PP(rval) == IS_OBJECT) { zend_hash_clean(Z_OBJPROP_PP(rval)); zend_object_store_ctor_failed(*rval TSRMLS_CC); } ZVAL_NULL(*rval); return 0; } if (Z_TYPE_PP(rval) != IS_OBJECT) { return 0; } if (Z_OBJCE_PP(rval) != PHP_IC_ENTRY && zend_hash_exists(&Z_OBJCE_PP(rval)->function_table, "__wakeup", sizeof("__wakeup"))) { INIT_PZVAL(&fname); ZVAL_STRINGL(&fname, "__wakeup", sizeof("__wakeup") - 1, 0); BG(serialize_lock)++; call_user_function_ex(CG(function_table), rval, &fname, &retval_ptr, 0, 0, 1, NULL TSRMLS_CC); BG(serialize_lock)--; } if (retval_ptr) { zval_ptr_dtor(&retval_ptr); } if (EG(exception)) { return 0; } return finish_nested_data(UNSERIALIZE_PASSTHRU); } #ifdef PHP_WIN32 # pragma optimize("", on) #endif PHPAPI int php_var_unserialize(UNSERIALIZE_PARAMETER) { const unsigned char *cursor, *limit, *marker, *start; zval **rval_ref; limit = max; cursor = *p; if (YYCURSOR >= YYLIMIT) { return 0; } if (var_hash && cursor[0] != 'R') { var_push(var_hash, rval); } start = cursor; #line 496 "ext/standard/var_unserializer.c" { YYCTYPE yych; static const unsigned char yybm[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; if ((YYLIMIT - YYCURSOR) < 7) YYFILL(7); yych = *YYCURSOR; switch (yych) { case 'C': case 'O': goto yy13; case 'N': goto yy5; case 'R': goto yy2; case 'S': goto yy10; case 'a': goto yy11; case 'b': goto yy6; case 'd': goto yy8; case 'i': goto yy7; case 'o': goto yy12; case 'r': goto yy4; case 's': goto yy9; case '}': goto yy14; default: goto yy16; } yy2: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy95; yy3: #line 861 "ext/standard/var_unserializer.re" { return 0; } #line 558 "ext/standard/var_unserializer.c" yy4: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy89; goto yy3; yy5: yych = *++YYCURSOR; if (yych == ';') goto yy87; goto yy3; yy6: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy83; goto yy3; yy7: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy77; goto yy3; yy8: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy53; goto yy3; yy9: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy46; goto yy3; yy10: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy39; goto yy3; yy11: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy32; goto yy3; yy12: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy25; goto yy3; yy13: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy17; goto yy3; yy14: ++YYCURSOR; #line 855 "ext/standard/var_unserializer.re" { /* this is the case where we have less data than planned */ php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Unexpected end of serialized data"); return 0; /* not sure if it should be 0 or 1 here? */ } #line 607 "ext/standard/var_unserializer.c" yy16: yych = *++YYCURSOR; goto yy3; yy17: yych = *++YYCURSOR; if (yybm[0+yych] & 128) { goto yy20; } if (yych == '+') goto yy19; yy18: YYCURSOR = YYMARKER; goto yy3; yy19: yych = *++YYCURSOR; if (yybm[0+yych] & 128) { goto yy20; } goto yy18; yy20: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yybm[0+yych] & 128) { goto yy20; } if (yych <= '/') goto yy18; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '"') goto yy18; ++YYCURSOR; #line 708 "ext/standard/var_unserializer.re" { size_t len, len2, len3, maxlen; long elements; char *class_name; zend_class_entry *ce; zend_class_entry **pce; int incomplete_class = 0; int custom_object = 0; zval *user_func; zval *retval_ptr; zval **args[1]; zval *arg_func_name; if (!var_hash) return 0; if (*start == 'C') { custom_object = 1; } INIT_PZVAL(*rval); len2 = len = parse_uiv(start + 2); maxlen = max - YYCURSOR; if (maxlen < len || len == 0) { *p = start + 2; return 0; } class_name = (char*)YYCURSOR; YYCURSOR += len; if (*(YYCURSOR) != '"') { *p = YYCURSOR; return 0; } if (*(YYCURSOR+1) != ':') { *p = YYCURSOR+1; return 0; } len3 = strspn(class_name, "0123456789_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377\\"); if (len3 != len) { *p = YYCURSOR + len3 - len; return 0; } class_name = estrndup(class_name, len); do { /* Try to find class directly */ BG(serialize_lock)++; if (zend_lookup_class(class_name, len2, &pce TSRMLS_CC) == SUCCESS) { BG(serialize_lock)--; if (EG(exception)) { efree(class_name); return 0; } ce = *pce; break; } BG(serialize_lock)--; if (EG(exception)) { efree(class_name); return 0; } /* Check for unserialize callback */ if ((PG(unserialize_callback_func) == NULL) || (PG(unserialize_callback_func)[0] == '\0')) { incomplete_class = 1; ce = PHP_IC_ENTRY; break; } /* Call unserialize callback */ MAKE_STD_ZVAL(user_func); ZVAL_STRING(user_func, PG(unserialize_callback_func), 1); args[0] = &arg_func_name; MAKE_STD_ZVAL(arg_func_name); ZVAL_STRING(arg_func_name, class_name, 1); BG(serialize_lock)++; if (call_user_function_ex(CG(function_table), NULL, user_func, &retval_ptr, 1, args, 0, NULL TSRMLS_CC) != SUCCESS) { BG(serialize_lock)--; if (EG(exception)) { efree(class_name); zval_ptr_dtor(&user_func); zval_ptr_dtor(&arg_func_name); return 0; } php_error_docref(NULL TSRMLS_CC, E_WARNING, "defined (%s) but not found", user_func->value.str.val); incomplete_class = 1; ce = PHP_IC_ENTRY; zval_ptr_dtor(&user_func); zval_ptr_dtor(&arg_func_name); break; } BG(serialize_lock)--; if (retval_ptr) { zval_ptr_dtor(&retval_ptr); } if (EG(exception)) { efree(class_name); zval_ptr_dtor(&user_func); zval_ptr_dtor(&arg_func_name); return 0; } /* The callback function may have defined the class */ if (zend_lookup_class(class_name, len2, &pce TSRMLS_CC) == SUCCESS) { ce = *pce; } else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Function %s() hasn't defined the class it was called for", user_func->value.str.val); incomplete_class = 1; ce = PHP_IC_ENTRY; } zval_ptr_dtor(&user_func); zval_ptr_dtor(&arg_func_name); break; } while (1); *p = YYCURSOR; if (custom_object) { int ret; ret = object_custom(UNSERIALIZE_PASSTHRU, ce); if (ret && incomplete_class) { php_store_class_name(*rval, class_name, len2); } efree(class_name); return ret; } elements = object_common1(UNSERIALIZE_PASSTHRU, ce); if (incomplete_class) { php_store_class_name(*rval, class_name, len2); } efree(class_name); return object_common2(UNSERIALIZE_PASSTHRU, elements); } #line 785 "ext/standard/var_unserializer.c" yy25: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy26; if (yych <= '/') goto yy18; if (yych <= '9') goto yy27; goto yy18; } yy26: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy27: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy27; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '"') goto yy18; ++YYCURSOR; #line 699 "ext/standard/var_unserializer.re" { if (!var_hash) return 0; INIT_PZVAL(*rval); return object_common2(UNSERIALIZE_PASSTHRU, object_common1(UNSERIALIZE_PASSTHRU, ZEND_STANDARD_CLASS_DEF_PTR)); } #line 819 "ext/standard/var_unserializer.c" yy32: yych = *++YYCURSOR; if (yych == '+') goto yy33; if (yych <= '/') goto yy18; if (yych <= '9') goto yy34; goto yy18; yy33: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy34: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy34; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '{') goto yy18; ++YYCURSOR; #line 678 "ext/standard/var_unserializer.re" { long elements = parse_iv(start + 2); /* use iv() not uiv() in order to check data range */ *p = YYCURSOR; if (!var_hash) return 0; if (elements < 0) { return 0; } INIT_PZVAL(*rval); array_init_size(*rval, elements); if (!process_nested_data(UNSERIALIZE_PASSTHRU, Z_ARRVAL_PP(rval), elements, 0)) { return 0; } return finish_nested_data(UNSERIALIZE_PASSTHRU); } #line 861 "ext/standard/var_unserializer.c" yy39: yych = *++YYCURSOR; if (yych == '+') goto yy40; if (yych <= '/') goto yy18; if (yych <= '9') goto yy41; goto yy18; yy40: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy41: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy41; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '"') goto yy18; ++YYCURSOR; #line 643 "ext/standard/var_unserializer.re" { size_t len, maxlen; char *str; len = parse_uiv(start + 2); maxlen = max - YYCURSOR; if (maxlen < len) { *p = start + 2; return 0; } if ((str = unserialize_str(&YYCURSOR, &len, maxlen)) == NULL) { return 0; } if (*(YYCURSOR) != '"') { efree(str); *p = YYCURSOR; return 0; } if (*(YYCURSOR + 1) != ';') { efree(str); *p = YYCURSOR + 1; return 0; } YYCURSOR += 2; *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_STRINGL(*rval, str, len, 0); return 1; } #line 917 "ext/standard/var_unserializer.c" yy46: yych = *++YYCURSOR; if (yych == '+') goto yy47; if (yych <= '/') goto yy18; if (yych <= '9') goto yy48; goto yy18; yy47: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy48: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy48; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '"') goto yy18; ++YYCURSOR; #line 610 "ext/standard/var_unserializer.re" { size_t len, maxlen; char *str; len = parse_uiv(start + 2); maxlen = max - YYCURSOR; if (maxlen < len) { *p = start + 2; return 0; } str = (char*)YYCURSOR; YYCURSOR += len; if (*(YYCURSOR) != '"') { *p = YYCURSOR; return 0; } if (*(YYCURSOR + 1) != ';') { *p = YYCURSOR + 1; return 0; } YYCURSOR += 2; *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_STRINGL(*rval, str, len, 1); return 1; } #line 971 "ext/standard/var_unserializer.c" yy53: yych = *++YYCURSOR; if (yych <= '/') { if (yych <= ',') { if (yych == '+') goto yy57; goto yy18; } else { if (yych <= '-') goto yy55; if (yych <= '.') goto yy60; goto yy18; } } else { if (yych <= 'I') { if (yych <= '9') goto yy58; if (yych <= 'H') goto yy18; goto yy56; } else { if (yych != 'N') goto yy18; } } yych = *++YYCURSOR; if (yych == 'A') goto yy76; goto yy18; yy55: yych = *++YYCURSOR; if (yych <= '/') { if (yych == '.') goto yy60; goto yy18; } else { if (yych <= '9') goto yy58; if (yych != 'I') goto yy18; } yy56: yych = *++YYCURSOR; if (yych == 'N') goto yy72; goto yy18; yy57: yych = *++YYCURSOR; if (yych == '.') goto yy60; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy58: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 4) YYFILL(4); yych = *YYCURSOR; if (yych <= ':') { if (yych <= '.') { if (yych <= '-') goto yy18; goto yy70; } else { if (yych <= '/') goto yy18; if (yych <= '9') goto yy58; goto yy18; } } else { if (yych <= 'E') { if (yych <= ';') goto yy63; if (yych <= 'D') goto yy18; goto yy65; } else { if (yych == 'e') goto yy65; goto yy18; } } yy60: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy61: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 4) YYFILL(4); yych = *YYCURSOR; if (yych <= ';') { if (yych <= '/') goto yy18; if (yych <= '9') goto yy61; if (yych <= ':') goto yy18; } else { if (yych <= 'E') { if (yych <= 'D') goto yy18; goto yy65; } else { if (yych == 'e') goto yy65; goto yy18; } } yy63: ++YYCURSOR; #line 600 "ext/standard/var_unserializer.re" { #if SIZEOF_LONG == 4 use_double: #endif *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_DOUBLE(*rval, zend_strtod((const char *)start + 2, NULL)); return 1; } #line 1069 "ext/standard/var_unserializer.c" yy65: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy66; if (yych <= '/') goto yy18; if (yych <= '9') goto yy67; goto yy18; } yy66: yych = *++YYCURSOR; if (yych <= ',') { if (yych == '+') goto yy69; goto yy18; } else { if (yych <= '-') goto yy69; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; } yy67: ++YYCURSOR; if (YYLIMIT <= YYCURSOR) YYFILL(1); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy67; if (yych == ';') goto yy63; goto yy18; yy69: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy67; goto yy18; yy70: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 4) YYFILL(4); yych = *YYCURSOR; if (yych <= ';') { if (yych <= '/') goto yy18; if (yych <= '9') goto yy70; if (yych <= ':') goto yy18; goto yy63; } else { if (yych <= 'E') { if (yych <= 'D') goto yy18; goto yy65; } else { if (yych == 'e') goto yy65; goto yy18; } } yy72: yych = *++YYCURSOR; if (yych != 'F') goto yy18; yy73: yych = *++YYCURSOR; if (yych != ';') goto yy18; ++YYCURSOR; #line 585 "ext/standard/var_unserializer.re" { *p = YYCURSOR; INIT_PZVAL(*rval); if (!strncmp(start + 2, "NAN", 3)) { ZVAL_DOUBLE(*rval, php_get_nan()); } else if (!strncmp(start + 2, "INF", 3)) { ZVAL_DOUBLE(*rval, php_get_inf()); } else if (!strncmp(start + 2, "-INF", 4)) { ZVAL_DOUBLE(*rval, -php_get_inf()); } return 1; } #line 1143 "ext/standard/var_unserializer.c" yy76: yych = *++YYCURSOR; if (yych == 'N') goto yy73; goto yy18; yy77: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy78; if (yych <= '/') goto yy18; if (yych <= '9') goto yy79; goto yy18; } yy78: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy79: ++YYCURSOR; if (YYLIMIT <= YYCURSOR) YYFILL(1); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy79; if (yych != ';') goto yy18; ++YYCURSOR; #line 558 "ext/standard/var_unserializer.re" { #if SIZEOF_LONG == 4 int digits = YYCURSOR - start - 3; if (start[2] == '-' || start[2] == '+') { digits--; } /* Use double for large long values that were serialized on a 64-bit system */ if (digits >= MAX_LENGTH_OF_LONG - 1) { if (digits == MAX_LENGTH_OF_LONG - 1) { int cmp = strncmp(YYCURSOR - MAX_LENGTH_OF_LONG, long_min_digits, MAX_LENGTH_OF_LONG - 1); if (!(cmp < 0 || (cmp == 0 && start[2] == '-'))) { goto use_double; } } else { goto use_double; } } #endif *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_LONG(*rval, parse_iv(start + 2)); return 1; } #line 1197 "ext/standard/var_unserializer.c" yy83: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= '2') goto yy18; yych = *++YYCURSOR; if (yych != ';') goto yy18; ++YYCURSOR; #line 551 "ext/standard/var_unserializer.re" { *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_BOOL(*rval, parse_iv(start + 2)); return 1; } #line 1212 "ext/standard/var_unserializer.c" yy87: ++YYCURSOR; #line 544 "ext/standard/var_unserializer.re" { *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_NULL(*rval); return 1; } #line 1222 "ext/standard/var_unserializer.c" yy89: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy90; if (yych <= '/') goto yy18; if (yych <= '9') goto yy91; goto yy18; } yy90: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy91: ++YYCURSOR; if (YYLIMIT <= YYCURSOR) YYFILL(1); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy91; if (yych != ';') goto yy18; ++YYCURSOR; #line 521 "ext/standard/var_unserializer.re" { long id; *p = YYCURSOR; if (!var_hash) return 0; id = parse_iv(start + 2) - 1; if (id == -1 || var_access(var_hash, id, &rval_ref) != SUCCESS) { return 0; } if (*rval == *rval_ref) return 0; if (*rval != NULL) { var_push_dtor_no_addref(var_hash, rval); } *rval = *rval_ref; Z_ADDREF_PP(rval); Z_UNSET_ISREF_PP(rval); return 1; } #line 1268 "ext/standard/var_unserializer.c" yy95: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy96; if (yych <= '/') goto yy18; if (yych <= '9') goto yy97; goto yy18; } yy96: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy97: ++YYCURSOR; if (YYLIMIT <= YYCURSOR) YYFILL(1); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy97; if (yych != ';') goto yy18; ++YYCURSOR; #line 500 "ext/standard/var_unserializer.re" { long id; *p = YYCURSOR; if (!var_hash) return 0; id = parse_iv(start + 2) - 1; if (id == -1 || var_access(var_hash, id, &rval_ref) != SUCCESS) { return 0; } if (*rval != NULL) { var_push_dtor_no_addref(var_hash, rval); } *rval = *rval_ref; Z_ADDREF_PP(rval); Z_SET_ISREF_PP(rval); return 1; } #line 1312 "ext/standard/var_unserializer.c" } #line 863 "ext/standard/var_unserializer.re" return 0; }
/* Generated by re2c 0.13.7.5 */ #line 1 "ext/standard/var_unserializer.re" /* +----------------------------------------------------------------------+ | PHP Version 5 | +----------------------------------------------------------------------+ | Copyright (c) 1997-2016 The PHP Group | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Author: Sascha Schumann <sascha@schumann.cx> | +----------------------------------------------------------------------+ */ /* $Id$ */ #include "php.h" #include "ext/standard/php_var.h" #include "php_incomplete_class.h" /* {{{ reference-handling for unserializer: var_* */ #define VAR_ENTRIES_MAX 1024 #define VAR_ENTRIES_DBG 0 typedef struct { zval *data[VAR_ENTRIES_MAX]; long used_slots; void *next; } var_entries; static inline void var_push(php_unserialize_data_t *var_hashx, zval **rval) { var_entries *var_hash = (*var_hashx)->last; #if VAR_ENTRIES_DBG fprintf(stderr, "var_push(%ld): %d\n", var_hash?var_hash->used_slots:-1L, Z_TYPE_PP(rval)); #endif if (!var_hash || var_hash->used_slots == VAR_ENTRIES_MAX) { var_hash = emalloc(sizeof(var_entries)); var_hash->used_slots = 0; var_hash->next = 0; if (!(*var_hashx)->first) { (*var_hashx)->first = var_hash; } else { ((var_entries *) (*var_hashx)->last)->next = var_hash; } (*var_hashx)->last = var_hash; } var_hash->data[var_hash->used_slots++] = *rval; } PHPAPI void var_push_dtor(php_unserialize_data_t *var_hashx, zval **rval) { var_entries *var_hash; if (!var_hashx || !*var_hashx) { return; } var_hash = (*var_hashx)->last_dtor; #if VAR_ENTRIES_DBG fprintf(stderr, "var_push_dtor(%p, %ld): %d\n", *rval, var_hash?var_hash->used_slots:-1L, Z_TYPE_PP(rval)); #endif if (!var_hash || var_hash->used_slots == VAR_ENTRIES_MAX) { var_hash = emalloc(sizeof(var_entries)); var_hash->used_slots = 0; var_hash->next = 0; if (!(*var_hashx)->first_dtor) { (*var_hashx)->first_dtor = var_hash; } else { ((var_entries *) (*var_hashx)->last_dtor)->next = var_hash; } (*var_hashx)->last_dtor = var_hash; } Z_ADDREF_PP(rval); var_hash->data[var_hash->used_slots++] = *rval; } PHPAPI void var_push_dtor_no_addref(php_unserialize_data_t *var_hashx, zval **rval) { var_entries *var_hash; if (!var_hashx || !*var_hashx) { return; } var_hash = (*var_hashx)->last_dtor; #if VAR_ENTRIES_DBG fprintf(stderr, "var_push_dtor_no_addref(%p, %ld): %d (%d)\n", *rval, var_hash?var_hash->used_slots:-1L, Z_TYPE_PP(rval), Z_REFCOUNT_PP(rval)); #endif if (!var_hash || var_hash->used_slots == VAR_ENTRIES_MAX) { var_hash = emalloc(sizeof(var_entries)); var_hash->used_slots = 0; var_hash->next = 0; if (!(*var_hashx)->first_dtor) { (*var_hashx)->first_dtor = var_hash; } else { ((var_entries *) (*var_hashx)->last_dtor)->next = var_hash; } (*var_hashx)->last_dtor = var_hash; } var_hash->data[var_hash->used_slots++] = *rval; } PHPAPI void var_replace(php_unserialize_data_t *var_hashx, zval *ozval, zval **nzval) { long i; var_entries *var_hash = (*var_hashx)->first; #if VAR_ENTRIES_DBG fprintf(stderr, "var_replace(%ld): %d\n", var_hash?var_hash->used_slots:-1L, Z_TYPE_PP(nzval)); #endif while (var_hash) { for (i = 0; i < var_hash->used_slots; i++) { if (var_hash->data[i] == ozval) { var_hash->data[i] = *nzval; /* do not break here */ } } var_hash = var_hash->next; } } static int var_access(php_unserialize_data_t *var_hashx, long id, zval ***store) { var_entries *var_hash = (*var_hashx)->first; #if VAR_ENTRIES_DBG fprintf(stderr, "var_access(%ld): %ld\n", var_hash?var_hash->used_slots:-1L, id); #endif while (id >= VAR_ENTRIES_MAX && var_hash && var_hash->used_slots == VAR_ENTRIES_MAX) { var_hash = var_hash->next; id -= VAR_ENTRIES_MAX; } if (!var_hash) return !SUCCESS; if (id < 0 || id >= var_hash->used_slots) return !SUCCESS; *store = &var_hash->data[id]; return SUCCESS; } PHPAPI void var_destroy(php_unserialize_data_t *var_hashx) { void *next; long i; var_entries *var_hash = (*var_hashx)->first; #if VAR_ENTRIES_DBG fprintf(stderr, "var_destroy(%ld)\n", var_hash?var_hash->used_slots:-1L); #endif while (var_hash) { next = var_hash->next; efree(var_hash); var_hash = next; } var_hash = (*var_hashx)->first_dtor; while (var_hash) { for (i = 0; i < var_hash->used_slots; i++) { #if VAR_ENTRIES_DBG fprintf(stderr, "var_destroy dtor(%p, %ld)\n", var_hash->data[i], Z_REFCOUNT_P(var_hash->data[i])); #endif zval_ptr_dtor(&var_hash->data[i]); } next = var_hash->next; efree(var_hash); var_hash = next; } } /* }}} */ static char *unserialize_str(const unsigned char **p, size_t *len, size_t maxlen) { size_t i, j; char *str = safe_emalloc(*len, 1, 1); unsigned char *end = *(unsigned char **)p+maxlen; if (end < *p) { efree(str); return NULL; } for (i = 0; i < *len; i++) { if (*p >= end) { efree(str); return NULL; } if (**p != '\\') { str[i] = (char)**p; } else { unsigned char ch = 0; for (j = 0; j < 2; j++) { (*p)++; if (**p >= '0' && **p <= '9') { ch = (ch << 4) + (**p -'0'); } else if (**p >= 'a' && **p <= 'f') { ch = (ch << 4) + (**p -'a'+10); } else if (**p >= 'A' && **p <= 'F') { ch = (ch << 4) + (**p -'A'+10); } else { efree(str); return NULL; } } str[i] = (char)ch; } (*p)++; } str[i] = 0; *len = i; return str; } #define YYFILL(n) do { } while (0) #define YYCTYPE unsigned char #define YYCURSOR cursor #define YYLIMIT limit #define YYMARKER marker #line 249 "ext/standard/var_unserializer.re" static inline long parse_iv2(const unsigned char *p, const unsigned char **q) { char cursor; long result = 0; int neg = 0; switch (*p) { case '-': neg++; /* fall-through */ case '+': p++; } while (1) { cursor = (char)*p; if (cursor >= '0' && cursor <= '9') { result = result * 10 + (size_t)(cursor - (unsigned char)'0'); } else { break; } p++; } if (q) *q = p; if (neg) return -result; return result; } static inline long parse_iv(const unsigned char *p) { return parse_iv2(p, NULL); } /* no need to check for length - re2c already did */ static inline size_t parse_uiv(const unsigned char *p) { unsigned char cursor; size_t result = 0; if (*p == '+') { p++; } while (1) { cursor = *p; if (cursor >= '0' && cursor <= '9') { result = result * 10 + (size_t)(cursor - (unsigned char)'0'); } else { break; } p++; } return result; } #define UNSERIALIZE_PARAMETER zval **rval, const unsigned char **p, const unsigned char *max, php_unserialize_data_t *var_hash TSRMLS_DC #define UNSERIALIZE_PASSTHRU rval, p, max, var_hash TSRMLS_CC static inline int process_nested_data(UNSERIALIZE_PARAMETER, HashTable *ht, long elements, int objprops) { while (elements-- > 0) { zval *key, *data, **old_data; ALLOC_INIT_ZVAL(key); if (!php_var_unserialize(&key, p, max, NULL TSRMLS_CC)) { var_push_dtor_no_addref(var_hash, &key); return 0; } if (Z_TYPE_P(key) != IS_LONG && Z_TYPE_P(key) != IS_STRING) { var_push_dtor_no_addref(var_hash, &key); return 0; } ALLOC_INIT_ZVAL(data); if (!php_var_unserialize(&data, p, max, var_hash TSRMLS_CC)) { var_push_dtor_no_addref(var_hash, &key); var_push_dtor_no_addref(var_hash, &data); return 0; } if (!objprops) { switch (Z_TYPE_P(key)) { case IS_LONG: if (zend_hash_index_find(ht, Z_LVAL_P(key), (void **)&old_data)==SUCCESS) { var_push_dtor(var_hash, old_data); } zend_hash_index_update(ht, Z_LVAL_P(key), &data, sizeof(data), NULL); break; case IS_STRING: if (zend_symtable_find(ht, Z_STRVAL_P(key), Z_STRLEN_P(key) + 1, (void **)&old_data)==SUCCESS) { var_push_dtor(var_hash, old_data); } zend_symtable_update(ht, Z_STRVAL_P(key), Z_STRLEN_P(key) + 1, &data, sizeof(data), NULL); break; } } else { /* object properties should include no integers */ convert_to_string(key); if (zend_hash_find(ht, Z_STRVAL_P(key), Z_STRLEN_P(key) + 1, (void **)&old_data)==SUCCESS) { var_push_dtor(var_hash, old_data); } zend_hash_update(ht, Z_STRVAL_P(key), Z_STRLEN_P(key) + 1, &data, sizeof data, NULL); } var_push_dtor(var_hash, &data); var_push_dtor_no_addref(var_hash, &key); if (elements && *(*p-1) != ';' && *(*p-1) != '}') { (*p)--; return 0; } } return 1; } static inline int finish_nested_data(UNSERIALIZE_PARAMETER) { if (*((*p)++) == '}') return 1; #if SOMETHING_NEW_MIGHT_LEAD_TO_CRASH_ENABLE_IF_YOU_ARE_BRAVE zval_ptr_dtor(rval); #endif return 0; } static inline int object_custom(UNSERIALIZE_PARAMETER, zend_class_entry *ce) { long datalen; datalen = parse_iv2((*p) + 2, p); (*p) += 2; if (datalen < 0 || (max - (*p)) <= datalen) { zend_error(E_WARNING, "Insufficient data for unserializing - %ld required, %ld present", datalen, (long)(max - (*p))); return 0; } if (ce->unserialize == NULL) { zend_error(E_WARNING, "Class %s has no unserializer", ce->name); object_init_ex(*rval, ce); } else if (ce->unserialize(rval, ce, (const unsigned char*)*p, datalen, (zend_unserialize_data *)var_hash TSRMLS_CC) != SUCCESS) { return 0; } (*p) += datalen; return finish_nested_data(UNSERIALIZE_PASSTHRU); } static inline long object_common1(UNSERIALIZE_PARAMETER, zend_class_entry *ce) { long elements; if( *p >= max - 2) { zend_error(E_WARNING, "Bad unserialize data"); return -1; } elements = parse_iv2((*p) + 2, p); (*p) += 2; if (ce->serialize == NULL) { object_init_ex(*rval, ce); } else { /* If this class implements Serializable, it should not land here but in object_custom(). The passed string obviously doesn't descend from the regular serializer. */ zend_error(E_WARNING, "Erroneous data format for unserializing '%s'", ce->name); return -1; } return elements; } #ifdef PHP_WIN32 # pragma optimize("", off) #endif static inline int object_common2(UNSERIALIZE_PARAMETER, long elements) { zval *retval_ptr = NULL; zval fname; if (Z_TYPE_PP(rval) != IS_OBJECT) { return 0; } if (!process_nested_data(UNSERIALIZE_PASSTHRU, Z_OBJPROP_PP(rval), elements, 1)) { /* We've got partially constructed object on our hands here. Wipe it. */ if(Z_TYPE_PP(rval) == IS_OBJECT) { zend_hash_clean(Z_OBJPROP_PP(rval)); zend_object_store_ctor_failed(*rval TSRMLS_CC); } ZVAL_NULL(*rval); return 0; } if (Z_TYPE_PP(rval) != IS_OBJECT) { return 0; } if (Z_OBJCE_PP(rval) != PHP_IC_ENTRY && zend_hash_exists(&Z_OBJCE_PP(rval)->function_table, "__wakeup", sizeof("__wakeup"))) { INIT_PZVAL(&fname); ZVAL_STRINGL(&fname, "__wakeup", sizeof("__wakeup") - 1, 0); BG(serialize_lock)++; call_user_function_ex(CG(function_table), rval, &fname, &retval_ptr, 0, 0, 1, NULL TSRMLS_CC); BG(serialize_lock)--; } if (retval_ptr) { zval_ptr_dtor(&retval_ptr); } if (EG(exception)) { return 0; } return finish_nested_data(UNSERIALIZE_PASSTHRU); } #ifdef PHP_WIN32 # pragma optimize("", on) #endif PHPAPI int php_var_unserialize(UNSERIALIZE_PARAMETER) { const unsigned char *cursor, *limit, *marker, *start; zval **rval_ref; limit = max; cursor = *p; if (YYCURSOR >= YYLIMIT) { return 0; } if (var_hash && cursor[0] != 'R') { var_push(var_hash, rval); } start = cursor; #line 501 "ext/standard/var_unserializer.c" { YYCTYPE yych; static const unsigned char yybm[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; if ((YYLIMIT - YYCURSOR) < 7) YYFILL(7); yych = *YYCURSOR; switch (yych) { case 'C': case 'O': goto yy13; case 'N': goto yy5; case 'R': goto yy2; case 'S': goto yy10; case 'a': goto yy11; case 'b': goto yy6; case 'd': goto yy8; case 'i': goto yy7; case 'o': goto yy12; case 'r': goto yy4; case 's': goto yy9; case '}': goto yy14; default: goto yy16; } yy2: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy95; yy3: #line 875 "ext/standard/var_unserializer.re" { return 0; } #line 563 "ext/standard/var_unserializer.c" yy4: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy89; goto yy3; yy5: yych = *++YYCURSOR; if (yych == ';') goto yy87; goto yy3; yy6: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy83; goto yy3; yy7: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy77; goto yy3; yy8: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy53; goto yy3; yy9: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy46; goto yy3; yy10: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy39; goto yy3; yy11: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy32; goto yy3; yy12: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy25; goto yy3; yy13: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy17; goto yy3; yy14: ++YYCURSOR; #line 869 "ext/standard/var_unserializer.re" { /* this is the case where we have less data than planned */ php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Unexpected end of serialized data"); return 0; /* not sure if it should be 0 or 1 here? */ } #line 612 "ext/standard/var_unserializer.c" yy16: yych = *++YYCURSOR; goto yy3; yy17: yych = *++YYCURSOR; if (yybm[0+yych] & 128) { goto yy20; } if (yych == '+') goto yy19; yy18: YYCURSOR = YYMARKER; goto yy3; yy19: yych = *++YYCURSOR; if (yybm[0+yych] & 128) { goto yy20; } goto yy18; yy20: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yybm[0+yych] & 128) { goto yy20; } if (yych <= '/') goto yy18; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '"') goto yy18; ++YYCURSOR; #line 717 "ext/standard/var_unserializer.re" { size_t len, len2, len3, maxlen; long elements; char *class_name; zend_class_entry *ce; zend_class_entry **pce; int incomplete_class = 0; int custom_object = 0; zval *user_func; zval *retval_ptr; zval **args[1]; zval *arg_func_name; if (!var_hash) return 0; if (*start == 'C') { custom_object = 1; } INIT_PZVAL(*rval); len2 = len = parse_uiv(start + 2); maxlen = max - YYCURSOR; if (maxlen < len || len == 0) { *p = start + 2; return 0; } class_name = (char*)YYCURSOR; YYCURSOR += len; if (*(YYCURSOR) != '"') { *p = YYCURSOR; return 0; } if (*(YYCURSOR+1) != ':') { *p = YYCURSOR+1; return 0; } len3 = strspn(class_name, "0123456789_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377\\"); if (len3 != len) { *p = YYCURSOR + len3 - len; return 0; } class_name = estrndup(class_name, len); do { /* Try to find class directly */ BG(serialize_lock)++; if (zend_lookup_class(class_name, len2, &pce TSRMLS_CC) == SUCCESS) { BG(serialize_lock)--; if (EG(exception)) { efree(class_name); return 0; } ce = *pce; break; } BG(serialize_lock)--; if (EG(exception)) { efree(class_name); return 0; } /* Check for unserialize callback */ if ((PG(unserialize_callback_func) == NULL) || (PG(unserialize_callback_func)[0] == '\0')) { incomplete_class = 1; ce = PHP_IC_ENTRY; break; } /* Call unserialize callback */ MAKE_STD_ZVAL(user_func); ZVAL_STRING(user_func, PG(unserialize_callback_func), 1); args[0] = &arg_func_name; MAKE_STD_ZVAL(arg_func_name); ZVAL_STRING(arg_func_name, class_name, 1); BG(serialize_lock)++; if (call_user_function_ex(CG(function_table), NULL, user_func, &retval_ptr, 1, args, 0, NULL TSRMLS_CC) != SUCCESS) { BG(serialize_lock)--; if (EG(exception)) { efree(class_name); zval_ptr_dtor(&user_func); zval_ptr_dtor(&arg_func_name); return 0; } php_error_docref(NULL TSRMLS_CC, E_WARNING, "defined (%s) but not found", user_func->value.str.val); incomplete_class = 1; ce = PHP_IC_ENTRY; zval_ptr_dtor(&user_func); zval_ptr_dtor(&arg_func_name); break; } BG(serialize_lock)--; if (retval_ptr) { zval_ptr_dtor(&retval_ptr); } if (EG(exception)) { efree(class_name); zval_ptr_dtor(&user_func); zval_ptr_dtor(&arg_func_name); return 0; } /* The callback function may have defined the class */ if (zend_lookup_class(class_name, len2, &pce TSRMLS_CC) == SUCCESS) { ce = *pce; } else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Function %s() hasn't defined the class it was called for", user_func->value.str.val); incomplete_class = 1; ce = PHP_IC_ENTRY; } zval_ptr_dtor(&user_func); zval_ptr_dtor(&arg_func_name); break; } while (1); *p = YYCURSOR; if (custom_object) { int ret; ret = object_custom(UNSERIALIZE_PASSTHRU, ce); if (ret && incomplete_class) { php_store_class_name(*rval, class_name, len2); } efree(class_name); return ret; } elements = object_common1(UNSERIALIZE_PASSTHRU, ce); if (elements < 0) { efree(class_name); return 0; } if (incomplete_class) { php_store_class_name(*rval, class_name, len2); } efree(class_name); return object_common2(UNSERIALIZE_PASSTHRU, elements); } #line 795 "ext/standard/var_unserializer.c" yy25: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy26; if (yych <= '/') goto yy18; if (yych <= '9') goto yy27; goto yy18; } yy26: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy27: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy27; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '"') goto yy18; ++YYCURSOR; #line 704 "ext/standard/var_unserializer.re" { long elements; if (!var_hash) return 0; INIT_PZVAL(*rval); elements = object_common1(UNSERIALIZE_PASSTHRU, ZEND_STANDARD_CLASS_DEF_PTR); if (elements < 0) { return 0; } return object_common2(UNSERIALIZE_PASSTHRU, elements); } #line 833 "ext/standard/var_unserializer.c" yy32: yych = *++YYCURSOR; if (yych == '+') goto yy33; if (yych <= '/') goto yy18; if (yych <= '9') goto yy34; goto yy18; yy33: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy34: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy34; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '{') goto yy18; ++YYCURSOR; #line 683 "ext/standard/var_unserializer.re" { long elements = parse_iv(start + 2); /* use iv() not uiv() in order to check data range */ *p = YYCURSOR; if (!var_hash) return 0; if (elements < 0) { return 0; } INIT_PZVAL(*rval); array_init_size(*rval, elements); if (!process_nested_data(UNSERIALIZE_PASSTHRU, Z_ARRVAL_PP(rval), elements, 0)) { return 0; } return finish_nested_data(UNSERIALIZE_PASSTHRU); } #line 875 "ext/standard/var_unserializer.c" yy39: yych = *++YYCURSOR; if (yych == '+') goto yy40; if (yych <= '/') goto yy18; if (yych <= '9') goto yy41; goto yy18; yy40: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy41: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy41; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '"') goto yy18; ++YYCURSOR; #line 648 "ext/standard/var_unserializer.re" { size_t len, maxlen; char *str; len = parse_uiv(start + 2); maxlen = max - YYCURSOR; if (maxlen < len) { *p = start + 2; return 0; } if ((str = unserialize_str(&YYCURSOR, &len, maxlen)) == NULL) { return 0; } if (*(YYCURSOR) != '"') { efree(str); *p = YYCURSOR; return 0; } if (*(YYCURSOR + 1) != ';') { efree(str); *p = YYCURSOR + 1; return 0; } YYCURSOR += 2; *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_STRINGL(*rval, str, len, 0); return 1; } #line 931 "ext/standard/var_unserializer.c" yy46: yych = *++YYCURSOR; if (yych == '+') goto yy47; if (yych <= '/') goto yy18; if (yych <= '9') goto yy48; goto yy18; yy47: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy48: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy48; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '"') goto yy18; ++YYCURSOR; #line 615 "ext/standard/var_unserializer.re" { size_t len, maxlen; char *str; len = parse_uiv(start + 2); maxlen = max - YYCURSOR; if (maxlen < len) { *p = start + 2; return 0; } str = (char*)YYCURSOR; YYCURSOR += len; if (*(YYCURSOR) != '"') { *p = YYCURSOR; return 0; } if (*(YYCURSOR + 1) != ';') { *p = YYCURSOR + 1; return 0; } YYCURSOR += 2; *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_STRINGL(*rval, str, len, 1); return 1; } #line 985 "ext/standard/var_unserializer.c" yy53: yych = *++YYCURSOR; if (yych <= '/') { if (yych <= ',') { if (yych == '+') goto yy57; goto yy18; } else { if (yych <= '-') goto yy55; if (yych <= '.') goto yy60; goto yy18; } } else { if (yych <= 'I') { if (yych <= '9') goto yy58; if (yych <= 'H') goto yy18; goto yy56; } else { if (yych != 'N') goto yy18; } } yych = *++YYCURSOR; if (yych == 'A') goto yy76; goto yy18; yy55: yych = *++YYCURSOR; if (yych <= '/') { if (yych == '.') goto yy60; goto yy18; } else { if (yych <= '9') goto yy58; if (yych != 'I') goto yy18; } yy56: yych = *++YYCURSOR; if (yych == 'N') goto yy72; goto yy18; yy57: yych = *++YYCURSOR; if (yych == '.') goto yy60; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy58: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 4) YYFILL(4); yych = *YYCURSOR; if (yych <= ':') { if (yych <= '.') { if (yych <= '-') goto yy18; goto yy70; } else { if (yych <= '/') goto yy18; if (yych <= '9') goto yy58; goto yy18; } } else { if (yych <= 'E') { if (yych <= ';') goto yy63; if (yych <= 'D') goto yy18; goto yy65; } else { if (yych == 'e') goto yy65; goto yy18; } } yy60: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy61: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 4) YYFILL(4); yych = *YYCURSOR; if (yych <= ';') { if (yych <= '/') goto yy18; if (yych <= '9') goto yy61; if (yych <= ':') goto yy18; } else { if (yych <= 'E') { if (yych <= 'D') goto yy18; goto yy65; } else { if (yych == 'e') goto yy65; goto yy18; } } yy63: ++YYCURSOR; #line 605 "ext/standard/var_unserializer.re" { #if SIZEOF_LONG == 4 use_double: #endif *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_DOUBLE(*rval, zend_strtod((const char *)start + 2, NULL)); return 1; } #line 1083 "ext/standard/var_unserializer.c" yy65: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy66; if (yych <= '/') goto yy18; if (yych <= '9') goto yy67; goto yy18; } yy66: yych = *++YYCURSOR; if (yych <= ',') { if (yych == '+') goto yy69; goto yy18; } else { if (yych <= '-') goto yy69; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; } yy67: ++YYCURSOR; if (YYLIMIT <= YYCURSOR) YYFILL(1); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy67; if (yych == ';') goto yy63; goto yy18; yy69: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy67; goto yy18; yy70: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 4) YYFILL(4); yych = *YYCURSOR; if (yych <= ';') { if (yych <= '/') goto yy18; if (yych <= '9') goto yy70; if (yych <= ':') goto yy18; goto yy63; } else { if (yych <= 'E') { if (yych <= 'D') goto yy18; goto yy65; } else { if (yych == 'e') goto yy65; goto yy18; } } yy72: yych = *++YYCURSOR; if (yych != 'F') goto yy18; yy73: yych = *++YYCURSOR; if (yych != ';') goto yy18; ++YYCURSOR; #line 590 "ext/standard/var_unserializer.re" { *p = YYCURSOR; INIT_PZVAL(*rval); if (!strncmp(start + 2, "NAN", 3)) { ZVAL_DOUBLE(*rval, php_get_nan()); } else if (!strncmp(start + 2, "INF", 3)) { ZVAL_DOUBLE(*rval, php_get_inf()); } else if (!strncmp(start + 2, "-INF", 4)) { ZVAL_DOUBLE(*rval, -php_get_inf()); } return 1; } #line 1157 "ext/standard/var_unserializer.c" yy76: yych = *++YYCURSOR; if (yych == 'N') goto yy73; goto yy18; yy77: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy78; if (yych <= '/') goto yy18; if (yych <= '9') goto yy79; goto yy18; } yy78: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy79: ++YYCURSOR; if (YYLIMIT <= YYCURSOR) YYFILL(1); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy79; if (yych != ';') goto yy18; ++YYCURSOR; #line 563 "ext/standard/var_unserializer.re" { #if SIZEOF_LONG == 4 int digits = YYCURSOR - start - 3; if (start[2] == '-' || start[2] == '+') { digits--; } /* Use double for large long values that were serialized on a 64-bit system */ if (digits >= MAX_LENGTH_OF_LONG - 1) { if (digits == MAX_LENGTH_OF_LONG - 1) { int cmp = strncmp(YYCURSOR - MAX_LENGTH_OF_LONG, long_min_digits, MAX_LENGTH_OF_LONG - 1); if (!(cmp < 0 || (cmp == 0 && start[2] == '-'))) { goto use_double; } } else { goto use_double; } } #endif *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_LONG(*rval, parse_iv(start + 2)); return 1; } #line 1211 "ext/standard/var_unserializer.c" yy83: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= '2') goto yy18; yych = *++YYCURSOR; if (yych != ';') goto yy18; ++YYCURSOR; #line 556 "ext/standard/var_unserializer.re" { *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_BOOL(*rval, parse_iv(start + 2)); return 1; } #line 1226 "ext/standard/var_unserializer.c" yy87: ++YYCURSOR; #line 549 "ext/standard/var_unserializer.re" { *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_NULL(*rval); return 1; } #line 1236 "ext/standard/var_unserializer.c" yy89: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy90; if (yych <= '/') goto yy18; if (yych <= '9') goto yy91; goto yy18; } yy90: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy91: ++YYCURSOR; if (YYLIMIT <= YYCURSOR) YYFILL(1); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy91; if (yych != ';') goto yy18; ++YYCURSOR; #line 526 "ext/standard/var_unserializer.re" { long id; *p = YYCURSOR; if (!var_hash) return 0; id = parse_iv(start + 2) - 1; if (id == -1 || var_access(var_hash, id, &rval_ref) != SUCCESS) { return 0; } if (*rval == *rval_ref) return 0; if (*rval != NULL) { var_push_dtor_no_addref(var_hash, rval); } *rval = *rval_ref; Z_ADDREF_PP(rval); Z_UNSET_ISREF_PP(rval); return 1; } #line 1282 "ext/standard/var_unserializer.c" yy95: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy96; if (yych <= '/') goto yy18; if (yych <= '9') goto yy97; goto yy18; } yy96: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy97: ++YYCURSOR; if (YYLIMIT <= YYCURSOR) YYFILL(1); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy97; if (yych != ';') goto yy18; ++YYCURSOR; #line 505 "ext/standard/var_unserializer.re" { long id; *p = YYCURSOR; if (!var_hash) return 0; id = parse_iv(start + 2) - 1; if (id == -1 || var_access(var_hash, id, &rval_ref) != SUCCESS) { return 0; } if (*rval != NULL) { var_push_dtor_no_addref(var_hash, rval); } *rval = *rval_ref; Z_ADDREF_PP(rval); Z_SET_ISREF_PP(rval); return 1; } #line 1326 "ext/standard/var_unserializer.c" } #line 877 "ext/standard/var_unserializer.re" return 0; }
PHPAPI int php_var_unserialize(UNSERIALIZE_PARAMETER) { const unsigned char *cursor, *limit, *marker, *start; zval **rval_ref; limit = max; cursor = *p; if (YYCURSOR >= YYLIMIT) { return 0; } if (var_hash && cursor[0] != 'R') { var_push(var_hash, rval); } start = cursor; #line 496 "ext/standard/var_unserializer.c" { YYCTYPE yych; static const unsigned char yybm[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; if ((YYLIMIT - YYCURSOR) < 7) YYFILL(7); yych = *YYCURSOR; switch (yych) { case 'C': case 'O': goto yy13; case 'N': goto yy5; case 'R': goto yy2; case 'S': goto yy10; case 'a': goto yy11; case 'b': goto yy6; case 'd': goto yy8; case 'i': goto yy7; case 'o': goto yy12; case 'r': goto yy4; case 's': goto yy9; case '}': goto yy14; default: goto yy16; } yy2: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy95; yy3: #line 861 "ext/standard/var_unserializer.re" { return 0; } #line 558 "ext/standard/var_unserializer.c" yy4: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy89; goto yy3; yy5: yych = *++YYCURSOR; if (yych == ';') goto yy87; goto yy3; yy6: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy83; goto yy3; yy7: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy77; goto yy3; yy8: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy53; goto yy3; yy9: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy46; goto yy3; yy10: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy39; goto yy3; yy11: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy32; goto yy3; yy12: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy25; goto yy3; yy13: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy17; goto yy3; yy14: ++YYCURSOR; #line 855 "ext/standard/var_unserializer.re" { /* this is the case where we have less data than planned */ php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Unexpected end of serialized data"); return 0; /* not sure if it should be 0 or 1 here? */ } #line 607 "ext/standard/var_unserializer.c" yy16: yych = *++YYCURSOR; goto yy3; yy17: yych = *++YYCURSOR; if (yybm[0+yych] & 128) { goto yy20; } if (yych == '+') goto yy19; yy18: YYCURSOR = YYMARKER; goto yy3; yy19: yych = *++YYCURSOR; if (yybm[0+yych] & 128) { goto yy20; } goto yy18; yy20: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yybm[0+yych] & 128) { goto yy20; } if (yych <= '/') goto yy18; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '"') goto yy18; ++YYCURSOR; #line 708 "ext/standard/var_unserializer.re" { size_t len, len2, len3, maxlen; long elements; char *class_name; zend_class_entry *ce; zend_class_entry **pce; int incomplete_class = 0; int custom_object = 0; zval *user_func; zval *retval_ptr; zval **args[1]; zval *arg_func_name; if (!var_hash) return 0; if (*start == 'C') { custom_object = 1; } INIT_PZVAL(*rval); len2 = len = parse_uiv(start + 2); maxlen = max - YYCURSOR; if (maxlen < len || len == 0) { *p = start + 2; return 0; } class_name = (char*)YYCURSOR; YYCURSOR += len; if (*(YYCURSOR) != '"') { *p = YYCURSOR; return 0; } if (*(YYCURSOR+1) != ':') { *p = YYCURSOR+1; return 0; } len3 = strspn(class_name, "0123456789_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377\\"); if (len3 != len) { *p = YYCURSOR + len3 - len; return 0; } class_name = estrndup(class_name, len); do { /* Try to find class directly */ BG(serialize_lock)++; if (zend_lookup_class(class_name, len2, &pce TSRMLS_CC) == SUCCESS) { BG(serialize_lock)--; if (EG(exception)) { efree(class_name); return 0; } ce = *pce; break; } BG(serialize_lock)--; if (EG(exception)) { efree(class_name); return 0; } /* Check for unserialize callback */ if ((PG(unserialize_callback_func) == NULL) || (PG(unserialize_callback_func)[0] == '\0')) { incomplete_class = 1; ce = PHP_IC_ENTRY; break; } /* Call unserialize callback */ MAKE_STD_ZVAL(user_func); ZVAL_STRING(user_func, PG(unserialize_callback_func), 1); args[0] = &arg_func_name; MAKE_STD_ZVAL(arg_func_name); ZVAL_STRING(arg_func_name, class_name, 1); BG(serialize_lock)++; if (call_user_function_ex(CG(function_table), NULL, user_func, &retval_ptr, 1, args, 0, NULL TSRMLS_CC) != SUCCESS) { BG(serialize_lock)--; if (EG(exception)) { efree(class_name); zval_ptr_dtor(&user_func); zval_ptr_dtor(&arg_func_name); return 0; } php_error_docref(NULL TSRMLS_CC, E_WARNING, "defined (%s) but not found", user_func->value.str.val); incomplete_class = 1; ce = PHP_IC_ENTRY; zval_ptr_dtor(&user_func); zval_ptr_dtor(&arg_func_name); break; } BG(serialize_lock)--; if (retval_ptr) { zval_ptr_dtor(&retval_ptr); } if (EG(exception)) { efree(class_name); zval_ptr_dtor(&user_func); zval_ptr_dtor(&arg_func_name); return 0; } /* The callback function may have defined the class */ if (zend_lookup_class(class_name, len2, &pce TSRMLS_CC) == SUCCESS) { ce = *pce; } else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Function %s() hasn't defined the class it was called for", user_func->value.str.val); incomplete_class = 1; ce = PHP_IC_ENTRY; } zval_ptr_dtor(&user_func); zval_ptr_dtor(&arg_func_name); break; } while (1); *p = YYCURSOR; if (custom_object) { int ret; ret = object_custom(UNSERIALIZE_PASSTHRU, ce); if (ret && incomplete_class) { php_store_class_name(*rval, class_name, len2); } efree(class_name); return ret; } elements = object_common1(UNSERIALIZE_PASSTHRU, ce); if (incomplete_class) { php_store_class_name(*rval, class_name, len2); } efree(class_name); return object_common2(UNSERIALIZE_PASSTHRU, elements); } #line 785 "ext/standard/var_unserializer.c" yy25: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy26; if (yych <= '/') goto yy18; if (yych <= '9') goto yy27; goto yy18; } yy26: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy27: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy27; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '"') goto yy18; ++YYCURSOR; #line 699 "ext/standard/var_unserializer.re" { if (!var_hash) return 0; INIT_PZVAL(*rval); return object_common2(UNSERIALIZE_PASSTHRU, object_common1(UNSERIALIZE_PASSTHRU, ZEND_STANDARD_CLASS_DEF_PTR)); } #line 819 "ext/standard/var_unserializer.c" yy32: yych = *++YYCURSOR; if (yych == '+') goto yy33; if (yych <= '/') goto yy18; if (yych <= '9') goto yy34; goto yy18; yy33: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy34: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy34; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '{') goto yy18; ++YYCURSOR; #line 678 "ext/standard/var_unserializer.re" { long elements = parse_iv(start + 2); /* use iv() not uiv() in order to check data range */ *p = YYCURSOR; if (!var_hash) return 0; if (elements < 0) { return 0; } INIT_PZVAL(*rval); array_init_size(*rval, elements); if (!process_nested_data(UNSERIALIZE_PASSTHRU, Z_ARRVAL_PP(rval), elements, 0)) { return 0; } return finish_nested_data(UNSERIALIZE_PASSTHRU); } #line 861 "ext/standard/var_unserializer.c" yy39: yych = *++YYCURSOR; if (yych == '+') goto yy40; if (yych <= '/') goto yy18; if (yych <= '9') goto yy41; goto yy18; yy40: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy41: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy41; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '"') goto yy18; ++YYCURSOR; #line 643 "ext/standard/var_unserializer.re" { size_t len, maxlen; char *str; len = parse_uiv(start + 2); maxlen = max - YYCURSOR; if (maxlen < len) { *p = start + 2; return 0; } if ((str = unserialize_str(&YYCURSOR, &len, maxlen)) == NULL) { return 0; } if (*(YYCURSOR) != '"') { efree(str); *p = YYCURSOR; return 0; } if (*(YYCURSOR + 1) != ';') { efree(str); *p = YYCURSOR + 1; return 0; } YYCURSOR += 2; *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_STRINGL(*rval, str, len, 0); return 1; } #line 917 "ext/standard/var_unserializer.c" yy46: yych = *++YYCURSOR; if (yych == '+') goto yy47; if (yych <= '/') goto yy18; if (yych <= '9') goto yy48; goto yy18; yy47: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy48: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy48; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '"') goto yy18; ++YYCURSOR; #line 610 "ext/standard/var_unserializer.re" { size_t len, maxlen; char *str; len = parse_uiv(start + 2); maxlen = max - YYCURSOR; if (maxlen < len) { *p = start + 2; return 0; } str = (char*)YYCURSOR; YYCURSOR += len; if (*(YYCURSOR) != '"') { *p = YYCURSOR; return 0; } if (*(YYCURSOR + 1) != ';') { *p = YYCURSOR + 1; return 0; } YYCURSOR += 2; *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_STRINGL(*rval, str, len, 1); return 1; } #line 971 "ext/standard/var_unserializer.c" yy53: yych = *++YYCURSOR; if (yych <= '/') { if (yych <= ',') { if (yych == '+') goto yy57; goto yy18; } else { if (yych <= '-') goto yy55; if (yych <= '.') goto yy60; goto yy18; } } else { if (yych <= 'I') { if (yych <= '9') goto yy58; if (yych <= 'H') goto yy18; goto yy56; } else { if (yych != 'N') goto yy18; } } yych = *++YYCURSOR; if (yych == 'A') goto yy76; goto yy18; yy55: yych = *++YYCURSOR; if (yych <= '/') { if (yych == '.') goto yy60; goto yy18; } else { if (yych <= '9') goto yy58; if (yych != 'I') goto yy18; } yy56: yych = *++YYCURSOR; if (yych == 'N') goto yy72; goto yy18; yy57: yych = *++YYCURSOR; if (yych == '.') goto yy60; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy58: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 4) YYFILL(4); yych = *YYCURSOR; if (yych <= ':') { if (yych <= '.') { if (yych <= '-') goto yy18; goto yy70; } else { if (yych <= '/') goto yy18; if (yych <= '9') goto yy58; goto yy18; } } else { if (yych <= 'E') { if (yych <= ';') goto yy63; if (yych <= 'D') goto yy18; goto yy65; } else { if (yych == 'e') goto yy65; goto yy18; } } yy60: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy61: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 4) YYFILL(4); yych = *YYCURSOR; if (yych <= ';') { if (yych <= '/') goto yy18; if (yych <= '9') goto yy61; if (yych <= ':') goto yy18; } else { if (yych <= 'E') { if (yych <= 'D') goto yy18; goto yy65; } else { if (yych == 'e') goto yy65; goto yy18; } } yy63: ++YYCURSOR; #line 600 "ext/standard/var_unserializer.re" { #if SIZEOF_LONG == 4 use_double: #endif *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_DOUBLE(*rval, zend_strtod((const char *)start + 2, NULL)); return 1; } #line 1069 "ext/standard/var_unserializer.c" yy65: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy66; if (yych <= '/') goto yy18; if (yych <= '9') goto yy67; goto yy18; } yy66: yych = *++YYCURSOR; if (yych <= ',') { if (yych == '+') goto yy69; goto yy18; } else { if (yych <= '-') goto yy69; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; } yy67: ++YYCURSOR; if (YYLIMIT <= YYCURSOR) YYFILL(1); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy67; if (yych == ';') goto yy63; goto yy18; yy69: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy67; goto yy18; yy70: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 4) YYFILL(4); yych = *YYCURSOR; if (yych <= ';') { if (yych <= '/') goto yy18; if (yych <= '9') goto yy70; if (yych <= ':') goto yy18; goto yy63; } else { if (yych <= 'E') { if (yych <= 'D') goto yy18; goto yy65; } else { if (yych == 'e') goto yy65; goto yy18; } } yy72: yych = *++YYCURSOR; if (yych != 'F') goto yy18; yy73: yych = *++YYCURSOR; if (yych != ';') goto yy18; ++YYCURSOR; #line 585 "ext/standard/var_unserializer.re" { *p = YYCURSOR; INIT_PZVAL(*rval); if (!strncmp(start + 2, "NAN", 3)) { ZVAL_DOUBLE(*rval, php_get_nan()); } else if (!strncmp(start + 2, "INF", 3)) { ZVAL_DOUBLE(*rval, php_get_inf()); } else if (!strncmp(start + 2, "-INF", 4)) { ZVAL_DOUBLE(*rval, -php_get_inf()); } return 1; } #line 1143 "ext/standard/var_unserializer.c" yy76: yych = *++YYCURSOR; if (yych == 'N') goto yy73; goto yy18; yy77: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy78; if (yych <= '/') goto yy18; if (yych <= '9') goto yy79; goto yy18; } yy78: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy79: ++YYCURSOR; if (YYLIMIT <= YYCURSOR) YYFILL(1); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy79; if (yych != ';') goto yy18; ++YYCURSOR; #line 558 "ext/standard/var_unserializer.re" { #if SIZEOF_LONG == 4 int digits = YYCURSOR - start - 3; if (start[2] == '-' || start[2] == '+') { digits--; } /* Use double for large long values that were serialized on a 64-bit system */ if (digits >= MAX_LENGTH_OF_LONG - 1) { if (digits == MAX_LENGTH_OF_LONG - 1) { int cmp = strncmp(YYCURSOR - MAX_LENGTH_OF_LONG, long_min_digits, MAX_LENGTH_OF_LONG - 1); if (!(cmp < 0 || (cmp == 0 && start[2] == '-'))) { goto use_double; } } else { goto use_double; } } #endif *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_LONG(*rval, parse_iv(start + 2)); return 1; } #line 1197 "ext/standard/var_unserializer.c" yy83: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= '2') goto yy18; yych = *++YYCURSOR; if (yych != ';') goto yy18; ++YYCURSOR; #line 551 "ext/standard/var_unserializer.re" { *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_BOOL(*rval, parse_iv(start + 2)); return 1; } #line 1212 "ext/standard/var_unserializer.c" yy87: ++YYCURSOR; #line 544 "ext/standard/var_unserializer.re" { *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_NULL(*rval); return 1; } #line 1222 "ext/standard/var_unserializer.c" yy89: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy90; if (yych <= '/') goto yy18; if (yych <= '9') goto yy91; goto yy18; } yy90: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy91: ++YYCURSOR; if (YYLIMIT <= YYCURSOR) YYFILL(1); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy91; if (yych != ';') goto yy18; ++YYCURSOR; #line 521 "ext/standard/var_unserializer.re" { long id; *p = YYCURSOR; if (!var_hash) return 0; id = parse_iv(start + 2) - 1; if (id == -1 || var_access(var_hash, id, &rval_ref) != SUCCESS) { return 0; } if (*rval == *rval_ref) return 0; if (*rval != NULL) { var_push_dtor_no_addref(var_hash, rval); } *rval = *rval_ref; Z_ADDREF_PP(rval); Z_UNSET_ISREF_PP(rval); return 1; } #line 1268 "ext/standard/var_unserializer.c" yy95: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy96; if (yych <= '/') goto yy18; if (yych <= '9') goto yy97; goto yy18; } yy96: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy97: ++YYCURSOR; if (YYLIMIT <= YYCURSOR) YYFILL(1); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy97; if (yych != ';') goto yy18; ++YYCURSOR; #line 500 "ext/standard/var_unserializer.re" { long id; *p = YYCURSOR; if (!var_hash) return 0; id = parse_iv(start + 2) - 1; if (id == -1 || var_access(var_hash, id, &rval_ref) != SUCCESS) { return 0; } if (*rval != NULL) { var_push_dtor_no_addref(var_hash, rval); } *rval = *rval_ref; Z_ADDREF_PP(rval); Z_SET_ISREF_PP(rval); return 1; } #line 1312 "ext/standard/var_unserializer.c" } #line 863 "ext/standard/var_unserializer.re" return 0; }
PHPAPI int php_var_unserialize(UNSERIALIZE_PARAMETER) { const unsigned char *cursor, *limit, *marker, *start; zval **rval_ref; limit = max; cursor = *p; if (YYCURSOR >= YYLIMIT) { return 0; } if (var_hash && cursor[0] != 'R') { var_push(var_hash, rval); } start = cursor; #line 501 "ext/standard/var_unserializer.c" { YYCTYPE yych; static const unsigned char yybm[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; if ((YYLIMIT - YYCURSOR) < 7) YYFILL(7); yych = *YYCURSOR; switch (yych) { case 'C': case 'O': goto yy13; case 'N': goto yy5; case 'R': goto yy2; case 'S': goto yy10; case 'a': goto yy11; case 'b': goto yy6; case 'd': goto yy8; case 'i': goto yy7; case 'o': goto yy12; case 'r': goto yy4; case 's': goto yy9; case '}': goto yy14; default: goto yy16; } yy2: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy95; yy3: #line 875 "ext/standard/var_unserializer.re" { return 0; } #line 563 "ext/standard/var_unserializer.c" yy4: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy89; goto yy3; yy5: yych = *++YYCURSOR; if (yych == ';') goto yy87; goto yy3; yy6: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy83; goto yy3; yy7: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy77; goto yy3; yy8: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy53; goto yy3; yy9: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy46; goto yy3; yy10: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy39; goto yy3; yy11: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy32; goto yy3; yy12: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy25; goto yy3; yy13: yych = *(YYMARKER = ++YYCURSOR); if (yych == ':') goto yy17; goto yy3; yy14: ++YYCURSOR; #line 869 "ext/standard/var_unserializer.re" { /* this is the case where we have less data than planned */ php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Unexpected end of serialized data"); return 0; /* not sure if it should be 0 or 1 here? */ } #line 612 "ext/standard/var_unserializer.c" yy16: yych = *++YYCURSOR; goto yy3; yy17: yych = *++YYCURSOR; if (yybm[0+yych] & 128) { goto yy20; } if (yych == '+') goto yy19; yy18: YYCURSOR = YYMARKER; goto yy3; yy19: yych = *++YYCURSOR; if (yybm[0+yych] & 128) { goto yy20; } goto yy18; yy20: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yybm[0+yych] & 128) { goto yy20; } if (yych <= '/') goto yy18; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '"') goto yy18; ++YYCURSOR; #line 717 "ext/standard/var_unserializer.re" { size_t len, len2, len3, maxlen; long elements; char *class_name; zend_class_entry *ce; zend_class_entry **pce; int incomplete_class = 0; int custom_object = 0; zval *user_func; zval *retval_ptr; zval **args[1]; zval *arg_func_name; if (!var_hash) return 0; if (*start == 'C') { custom_object = 1; } INIT_PZVAL(*rval); len2 = len = parse_uiv(start + 2); maxlen = max - YYCURSOR; if (maxlen < len || len == 0) { *p = start + 2; return 0; } class_name = (char*)YYCURSOR; YYCURSOR += len; if (*(YYCURSOR) != '"') { *p = YYCURSOR; return 0; } if (*(YYCURSOR+1) != ':') { *p = YYCURSOR+1; return 0; } len3 = strspn(class_name, "0123456789_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377\\"); if (len3 != len) { *p = YYCURSOR + len3 - len; return 0; } class_name = estrndup(class_name, len); do { /* Try to find class directly */ BG(serialize_lock)++; if (zend_lookup_class(class_name, len2, &pce TSRMLS_CC) == SUCCESS) { BG(serialize_lock)--; if (EG(exception)) { efree(class_name); return 0; } ce = *pce; break; } BG(serialize_lock)--; if (EG(exception)) { efree(class_name); return 0; } /* Check for unserialize callback */ if ((PG(unserialize_callback_func) == NULL) || (PG(unserialize_callback_func)[0] == '\0')) { incomplete_class = 1; ce = PHP_IC_ENTRY; break; } /* Call unserialize callback */ MAKE_STD_ZVAL(user_func); ZVAL_STRING(user_func, PG(unserialize_callback_func), 1); args[0] = &arg_func_name; MAKE_STD_ZVAL(arg_func_name); ZVAL_STRING(arg_func_name, class_name, 1); BG(serialize_lock)++; if (call_user_function_ex(CG(function_table), NULL, user_func, &retval_ptr, 1, args, 0, NULL TSRMLS_CC) != SUCCESS) { BG(serialize_lock)--; if (EG(exception)) { efree(class_name); zval_ptr_dtor(&user_func); zval_ptr_dtor(&arg_func_name); return 0; } php_error_docref(NULL TSRMLS_CC, E_WARNING, "defined (%s) but not found", user_func->value.str.val); incomplete_class = 1; ce = PHP_IC_ENTRY; zval_ptr_dtor(&user_func); zval_ptr_dtor(&arg_func_name); break; } BG(serialize_lock)--; if (retval_ptr) { zval_ptr_dtor(&retval_ptr); } if (EG(exception)) { efree(class_name); zval_ptr_dtor(&user_func); zval_ptr_dtor(&arg_func_name); return 0; } /* The callback function may have defined the class */ if (zend_lookup_class(class_name, len2, &pce TSRMLS_CC) == SUCCESS) { ce = *pce; } else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Function %s() hasn't defined the class it was called for", user_func->value.str.val); incomplete_class = 1; ce = PHP_IC_ENTRY; } zval_ptr_dtor(&user_func); zval_ptr_dtor(&arg_func_name); break; } while (1); *p = YYCURSOR; if (custom_object) { int ret; ret = object_custom(UNSERIALIZE_PASSTHRU, ce); if (ret && incomplete_class) { php_store_class_name(*rval, class_name, len2); } efree(class_name); return ret; } elements = object_common1(UNSERIALIZE_PASSTHRU, ce); if (elements < 0) { efree(class_name); return 0; } if (incomplete_class) { php_store_class_name(*rval, class_name, len2); } efree(class_name); return object_common2(UNSERIALIZE_PASSTHRU, elements); } #line 795 "ext/standard/var_unserializer.c" yy25: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy26; if (yych <= '/') goto yy18; if (yych <= '9') goto yy27; goto yy18; } yy26: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy27: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy27; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '"') goto yy18; ++YYCURSOR; #line 704 "ext/standard/var_unserializer.re" { long elements; if (!var_hash) return 0; INIT_PZVAL(*rval); elements = object_common1(UNSERIALIZE_PASSTHRU, ZEND_STANDARD_CLASS_DEF_PTR); if (elements < 0) { return 0; } return object_common2(UNSERIALIZE_PASSTHRU, elements); } #line 833 "ext/standard/var_unserializer.c" yy32: yych = *++YYCURSOR; if (yych == '+') goto yy33; if (yych <= '/') goto yy18; if (yych <= '9') goto yy34; goto yy18; yy33: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy34: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy34; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '{') goto yy18; ++YYCURSOR; #line 683 "ext/standard/var_unserializer.re" { long elements = parse_iv(start + 2); /* use iv() not uiv() in order to check data range */ *p = YYCURSOR; if (!var_hash) return 0; if (elements < 0) { return 0; } INIT_PZVAL(*rval); array_init_size(*rval, elements); if (!process_nested_data(UNSERIALIZE_PASSTHRU, Z_ARRVAL_PP(rval), elements, 0)) { return 0; } return finish_nested_data(UNSERIALIZE_PASSTHRU); } #line 875 "ext/standard/var_unserializer.c" yy39: yych = *++YYCURSOR; if (yych == '+') goto yy40; if (yych <= '/') goto yy18; if (yych <= '9') goto yy41; goto yy18; yy40: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy41: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy41; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '"') goto yy18; ++YYCURSOR; #line 648 "ext/standard/var_unserializer.re" { size_t len, maxlen; char *str; len = parse_uiv(start + 2); maxlen = max - YYCURSOR; if (maxlen < len) { *p = start + 2; return 0; } if ((str = unserialize_str(&YYCURSOR, &len, maxlen)) == NULL) { return 0; } if (*(YYCURSOR) != '"') { efree(str); *p = YYCURSOR; return 0; } if (*(YYCURSOR + 1) != ';') { efree(str); *p = YYCURSOR + 1; return 0; } YYCURSOR += 2; *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_STRINGL(*rval, str, len, 0); return 1; } #line 931 "ext/standard/var_unserializer.c" yy46: yych = *++YYCURSOR; if (yych == '+') goto yy47; if (yych <= '/') goto yy18; if (yych <= '9') goto yy48; goto yy18; yy47: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy48: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy48; if (yych >= ';') goto yy18; yych = *++YYCURSOR; if (yych != '"') goto yy18; ++YYCURSOR; #line 615 "ext/standard/var_unserializer.re" { size_t len, maxlen; char *str; len = parse_uiv(start + 2); maxlen = max - YYCURSOR; if (maxlen < len) { *p = start + 2; return 0; } str = (char*)YYCURSOR; YYCURSOR += len; if (*(YYCURSOR) != '"') { *p = YYCURSOR; return 0; } if (*(YYCURSOR + 1) != ';') { *p = YYCURSOR + 1; return 0; } YYCURSOR += 2; *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_STRINGL(*rval, str, len, 1); return 1; } #line 985 "ext/standard/var_unserializer.c" yy53: yych = *++YYCURSOR; if (yych <= '/') { if (yych <= ',') { if (yych == '+') goto yy57; goto yy18; } else { if (yych <= '-') goto yy55; if (yych <= '.') goto yy60; goto yy18; } } else { if (yych <= 'I') { if (yych <= '9') goto yy58; if (yych <= 'H') goto yy18; goto yy56; } else { if (yych != 'N') goto yy18; } } yych = *++YYCURSOR; if (yych == 'A') goto yy76; goto yy18; yy55: yych = *++YYCURSOR; if (yych <= '/') { if (yych == '.') goto yy60; goto yy18; } else { if (yych <= '9') goto yy58; if (yych != 'I') goto yy18; } yy56: yych = *++YYCURSOR; if (yych == 'N') goto yy72; goto yy18; yy57: yych = *++YYCURSOR; if (yych == '.') goto yy60; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy58: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 4) YYFILL(4); yych = *YYCURSOR; if (yych <= ':') { if (yych <= '.') { if (yych <= '-') goto yy18; goto yy70; } else { if (yych <= '/') goto yy18; if (yych <= '9') goto yy58; goto yy18; } } else { if (yych <= 'E') { if (yych <= ';') goto yy63; if (yych <= 'D') goto yy18; goto yy65; } else { if (yych == 'e') goto yy65; goto yy18; } } yy60: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy61: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 4) YYFILL(4); yych = *YYCURSOR; if (yych <= ';') { if (yych <= '/') goto yy18; if (yych <= '9') goto yy61; if (yych <= ':') goto yy18; } else { if (yych <= 'E') { if (yych <= 'D') goto yy18; goto yy65; } else { if (yych == 'e') goto yy65; goto yy18; } } yy63: ++YYCURSOR; #line 605 "ext/standard/var_unserializer.re" { #if SIZEOF_LONG == 4 use_double: #endif *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_DOUBLE(*rval, zend_strtod((const char *)start + 2, NULL)); return 1; } #line 1083 "ext/standard/var_unserializer.c" yy65: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy66; if (yych <= '/') goto yy18; if (yych <= '9') goto yy67; goto yy18; } yy66: yych = *++YYCURSOR; if (yych <= ',') { if (yych == '+') goto yy69; goto yy18; } else { if (yych <= '-') goto yy69; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; } yy67: ++YYCURSOR; if (YYLIMIT <= YYCURSOR) YYFILL(1); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy67; if (yych == ';') goto yy63; goto yy18; yy69: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy67; goto yy18; yy70: ++YYCURSOR; if ((YYLIMIT - YYCURSOR) < 4) YYFILL(4); yych = *YYCURSOR; if (yych <= ';') { if (yych <= '/') goto yy18; if (yych <= '9') goto yy70; if (yych <= ':') goto yy18; goto yy63; } else { if (yych <= 'E') { if (yych <= 'D') goto yy18; goto yy65; } else { if (yych == 'e') goto yy65; goto yy18; } } yy72: yych = *++YYCURSOR; if (yych != 'F') goto yy18; yy73: yych = *++YYCURSOR; if (yych != ';') goto yy18; ++YYCURSOR; #line 590 "ext/standard/var_unserializer.re" { *p = YYCURSOR; INIT_PZVAL(*rval); if (!strncmp(start + 2, "NAN", 3)) { ZVAL_DOUBLE(*rval, php_get_nan()); } else if (!strncmp(start + 2, "INF", 3)) { ZVAL_DOUBLE(*rval, php_get_inf()); } else if (!strncmp(start + 2, "-INF", 4)) { ZVAL_DOUBLE(*rval, -php_get_inf()); } return 1; } #line 1157 "ext/standard/var_unserializer.c" yy76: yych = *++YYCURSOR; if (yych == 'N') goto yy73; goto yy18; yy77: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy78; if (yych <= '/') goto yy18; if (yych <= '9') goto yy79; goto yy18; } yy78: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy79: ++YYCURSOR; if (YYLIMIT <= YYCURSOR) YYFILL(1); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy79; if (yych != ';') goto yy18; ++YYCURSOR; #line 563 "ext/standard/var_unserializer.re" { #if SIZEOF_LONG == 4 int digits = YYCURSOR - start - 3; if (start[2] == '-' || start[2] == '+') { digits--; } /* Use double for large long values that were serialized on a 64-bit system */ if (digits >= MAX_LENGTH_OF_LONG - 1) { if (digits == MAX_LENGTH_OF_LONG - 1) { int cmp = strncmp(YYCURSOR - MAX_LENGTH_OF_LONG, long_min_digits, MAX_LENGTH_OF_LONG - 1); if (!(cmp < 0 || (cmp == 0 && start[2] == '-'))) { goto use_double; } } else { goto use_double; } } #endif *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_LONG(*rval, parse_iv(start + 2)); return 1; } #line 1211 "ext/standard/var_unserializer.c" yy83: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= '2') goto yy18; yych = *++YYCURSOR; if (yych != ';') goto yy18; ++YYCURSOR; #line 556 "ext/standard/var_unserializer.re" { *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_BOOL(*rval, parse_iv(start + 2)); return 1; } #line 1226 "ext/standard/var_unserializer.c" yy87: ++YYCURSOR; #line 549 "ext/standard/var_unserializer.re" { *p = YYCURSOR; INIT_PZVAL(*rval); ZVAL_NULL(*rval); return 1; } #line 1236 "ext/standard/var_unserializer.c" yy89: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy90; if (yych <= '/') goto yy18; if (yych <= '9') goto yy91; goto yy18; } yy90: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy91: ++YYCURSOR; if (YYLIMIT <= YYCURSOR) YYFILL(1); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy91; if (yych != ';') goto yy18; ++YYCURSOR; #line 526 "ext/standard/var_unserializer.re" { long id; *p = YYCURSOR; if (!var_hash) return 0; id = parse_iv(start + 2) - 1; if (id == -1 || var_access(var_hash, id, &rval_ref) != SUCCESS) { return 0; } if (*rval == *rval_ref) return 0; if (*rval != NULL) { var_push_dtor_no_addref(var_hash, rval); } *rval = *rval_ref; Z_ADDREF_PP(rval); Z_UNSET_ISREF_PP(rval); return 1; } #line 1282 "ext/standard/var_unserializer.c" yy95: yych = *++YYCURSOR; if (yych <= ',') { if (yych != '+') goto yy18; } else { if (yych <= '-') goto yy96; if (yych <= '/') goto yy18; if (yych <= '9') goto yy97; goto yy18; } yy96: yych = *++YYCURSOR; if (yych <= '/') goto yy18; if (yych >= ':') goto yy18; yy97: ++YYCURSOR; if (YYLIMIT <= YYCURSOR) YYFILL(1); yych = *YYCURSOR; if (yych <= '/') goto yy18; if (yych <= '9') goto yy97; if (yych != ';') goto yy18; ++YYCURSOR; #line 505 "ext/standard/var_unserializer.re" { long id; *p = YYCURSOR; if (!var_hash) return 0; id = parse_iv(start + 2) - 1; if (id == -1 || var_access(var_hash, id, &rval_ref) != SUCCESS) { return 0; } if (*rval != NULL) { var_push_dtor_no_addref(var_hash, rval); } *rval = *rval_ref; Z_ADDREF_PP(rval); Z_SET_ISREF_PP(rval); return 1; } #line 1326 "ext/standard/var_unserializer.c" } #line 877 "ext/standard/var_unserializer.re" return 0; }
{'added': [(408, '\tif( *p >= max - 2) {'), (409, '\t\tzend_error(E_WARNING, "Bad unserialize data");'), (410, '\t\treturn -1;'), (411, '\t}'), (412, ''), (423, '\t\treturn -1;'), (500, '#line 501 "ext/standard/var_unserializer.c"'), (560, '#line 875 "ext/standard/var_unserializer.re"'), (562, '#line 563 "ext/standard/var_unserializer.c"'), (605, '#line 869 "ext/standard/var_unserializer.re"'), (611, '#line 612 "ext/standard/var_unserializer.c"'), (642, '#line 717 "ext/standard/var_unserializer.re"'), (782, '\tif (elements < 0) {'), (783, '\t efree(class_name);'), (784, '\t return 0;'), (785, '\t}'), (786, ''), (794, '#line 795 "ext/standard/var_unserializer.c"'), (819, '#line 704 "ext/standard/var_unserializer.re"'), (821, '\tlong elements;'), (826, '\telements = object_common1(UNSERIALIZE_PASSTHRU, ZEND_STANDARD_CLASS_DEF_PTR);'), (827, '\tif (elements < 0) {'), (828, '\t\treturn 0;'), (829, '\t}'), (830, '\treturn object_common2(UNSERIALIZE_PASSTHRU, elements);'), (832, '#line 833 "ext/standard/var_unserializer.c"'), (853, '#line 683 "ext/standard/var_unserializer.re"'), (874, '#line 875 "ext/standard/var_unserializer.c"'), (895, '#line 648 "ext/standard/var_unserializer.re"'), (930, '#line 931 "ext/standard/var_unserializer.c"'), (951, '#line 615 "ext/standard/var_unserializer.re"'), (984, '#line 985 "ext/standard/var_unserializer.c"'), (1072, '#line 605 "ext/standard/var_unserializer.re"'), (1082, '#line 1083 "ext/standard/var_unserializer.c"'), (1141, '#line 590 "ext/standard/var_unserializer.re"'), (1156, '#line 1157 "ext/standard/var_unserializer.c"'), (1183, '#line 563 "ext/standard/var_unserializer.re"'), (1210, '#line 1211 "ext/standard/var_unserializer.c"'), (1218, '#line 556 "ext/standard/var_unserializer.re"'), (1225, '#line 1226 "ext/standard/var_unserializer.c"'), (1228, '#line 549 "ext/standard/var_unserializer.re"'), (1235, '#line 1236 "ext/standard/var_unserializer.c"'), (1258, '#line 526 "ext/standard/var_unserializer.re"'), (1281, '#line 1282 "ext/standard/var_unserializer.c"'), (1304, '#line 505 "ext/standard/var_unserializer.re"'), (1325, '#line 1326 "ext/standard/var_unserializer.c"'), (1327, '#line 877 "ext/standard/var_unserializer.re"')], 'deleted': [(418, '\t\treturn 0;'), (495, '#line 496 "ext/standard/var_unserializer.c"'), (555, '#line 861 "ext/standard/var_unserializer.re"'), (557, '#line 558 "ext/standard/var_unserializer.c"'), (600, '#line 855 "ext/standard/var_unserializer.re"'), (606, '#line 607 "ext/standard/var_unserializer.c"'), (637, '#line 708 "ext/standard/var_unserializer.re"'), (784, '#line 785 "ext/standard/var_unserializer.c"'), (809, '#line 699 "ext/standard/var_unserializer.re"'), (815, '\treturn object_common2(UNSERIALIZE_PASSTHRU,'), (816, '\t\t\tobject_common1(UNSERIALIZE_PASSTHRU, ZEND_STANDARD_CLASS_DEF_PTR));'), (818, '#line 819 "ext/standard/var_unserializer.c"'), (839, '#line 678 "ext/standard/var_unserializer.re"'), (860, '#line 861 "ext/standard/var_unserializer.c"'), (881, '#line 643 "ext/standard/var_unserializer.re"'), (916, '#line 917 "ext/standard/var_unserializer.c"'), (937, '#line 610 "ext/standard/var_unserializer.re"'), (970, '#line 971 "ext/standard/var_unserializer.c"'), (1058, '#line 600 "ext/standard/var_unserializer.re"'), (1068, '#line 1069 "ext/standard/var_unserializer.c"'), (1127, '#line 585 "ext/standard/var_unserializer.re"'), (1142, '#line 1143 "ext/standard/var_unserializer.c"'), (1169, '#line 558 "ext/standard/var_unserializer.re"'), (1196, '#line 1197 "ext/standard/var_unserializer.c"'), (1204, '#line 551 "ext/standard/var_unserializer.re"'), (1211, '#line 1212 "ext/standard/var_unserializer.c"'), (1214, '#line 544 "ext/standard/var_unserializer.re"'), (1221, '#line 1222 "ext/standard/var_unserializer.c"'), (1244, '#line 521 "ext/standard/var_unserializer.re"'), (1267, '#line 1268 "ext/standard/var_unserializer.c"'), (1290, '#line 500 "ext/standard/var_unserializer.re"'), (1311, '#line 1312 "ext/standard/var_unserializer.c"'), (1313, '#line 863 "ext/standard/var_unserializer.re"')]}
47
33
1,073
6,905
https://github.com/php/php-src
CVE-2016-10161
['CWE-125']
elf.c
store_versioninfo_gnu_verdef
/* radare - LGPL - Copyright 2008-2017 - nibble, pancake, alvaro_fe */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <r_types.h> #include <r_util.h> #include "elf.h" #ifdef IFDBG #undef IFDBG #endif #define DO_THE_DBG 0 #define IFDBG if (DO_THE_DBG) #define IFINT if (0) #define ELF_PAGE_MASK 0xFFFFFFFFFFFFF000LL #define ELF_PAGE_SIZE 12 #define R_ELF_NO_RELRO 0 #define R_ELF_PART_RELRO 1 #define R_ELF_FULL_RELRO 2 #define bprintf if(bin->verbose)eprintf #define READ8(x, i) r_read_ble8(x + i); i += 1; #define READ16(x, i) r_read_ble16(x + i, bin->endian); i += 2; #define READ32(x, i) r_read_ble32(x + i, bin->endian); i += 4; #define READ64(x, i) r_read_ble64(x + i, bin->endian); i += 8; #define GROWTH_FACTOR (1.5) static inline int __strnlen(const char *str, int len) { int l = 0; while (IS_PRINTABLE (*str) && --len) { if (((ut8)*str) == 0xff) { break; } str++; l++; } return l + 1; } static int handle_e_ident(ELFOBJ *bin) { return !strncmp ((char *)bin->ehdr.e_ident, ELFMAG, SELFMAG) || !strncmp ((char *)bin->ehdr.e_ident, CGCMAG, SCGCMAG); } static int init_ehdr(ELFOBJ *bin) { ut8 e_ident[EI_NIDENT]; ut8 ehdr[sizeof (Elf_(Ehdr))] = {0}; int i, len; if (r_buf_read_at (bin->b, 0, e_ident, EI_NIDENT) == -1) { bprintf ("Warning: read (magic)\n"); return false; } sdb_set (bin->kv, "elf_type.cparse", "enum elf_type { ET_NONE=0, ET_REL=1," " ET_EXEC=2, ET_DYN=3, ET_CORE=4, ET_LOOS=0xfe00, ET_HIOS=0xfeff," " ET_LOPROC=0xff00, ET_HIPROC=0xffff };", 0); sdb_set (bin->kv, "elf_machine.cparse", "enum elf_machine{EM_NONE=0, EM_M32=1," " EM_SPARC=2, EM_386=3, EM_68K=4, EM_88K=5, EM_486=6, " " EM_860=7, EM_MIPS=8, EM_S370=9, EM_MIPS_RS3_LE=10, EM_RS6000=11," " EM_UNKNOWN12=12, EM_UNKNOWN13=13, EM_UNKNOWN14=14, " " EM_PA_RISC=15, EM_PARISC=EM_PA_RISC, EM_nCUBE=16, EM_VPP500=17," " EM_SPARC32PLUS=18, EM_960=19, EM_PPC=20, EM_PPC64=21, " " EM_S390=22, EM_UNKNOWN22=EM_S390, EM_UNKNOWN23=23, EM_UNKNOWN24=24," " EM_UNKNOWN25=25, EM_UNKNOWN26=26, EM_UNKNOWN27=27, EM_UNKNOWN28=28," " EM_UNKNOWN29=29, EM_UNKNOWN30=30, EM_UNKNOWN31=31, EM_UNKNOWN32=32," " EM_UNKNOWN33=33, EM_UNKNOWN34=34, EM_UNKNOWN35=35, EM_V800=36," " EM_FR20=37, EM_RH32=38, EM_RCE=39, EM_ARM=40, EM_ALPHA=41, EM_SH=42," " EM_SPARCV9=43, EM_TRICORE=44, EM_ARC=45, EM_H8_300=46, EM_H8_300H=47," " EM_H8S=48, EM_H8_500=49, EM_IA_64=50, EM_MIPS_X=51, EM_COLDFIRE=52," " EM_68HC12=53, EM_MMA=54, EM_PCP=55, EM_NCPU=56, EM_NDR1=57," " EM_STARCORE=58, EM_ME16=59, EM_ST100=60, EM_TINYJ=61, EM_AMD64=62," " EM_X86_64=EM_AMD64, EM_PDSP=63, EM_UNKNOWN64=64, EM_UNKNOWN65=65," " EM_FX66=66, EM_ST9PLUS=67, EM_ST7=68, EM_68HC16=69, EM_68HC11=70," " EM_68HC08=71, EM_68HC05=72, EM_SVX=73, EM_ST19=74, EM_VAX=75, " " EM_CRIS=76, EM_JAVELIN=77, EM_FIREPATH=78, EM_ZSP=79, EM_MMIX=80," " EM_HUANY=81, EM_PRISM=82, EM_AVR=83, EM_FR30=84, EM_D10V=85, EM_D30V=86," " EM_V850=87, EM_M32R=88, EM_MN10300=89, EM_MN10200=90, EM_PJ=91," " EM_OPENRISC=92, EM_ARC_A5=93, EM_XTENSA=94, EM_NUM=95};", 0); sdb_num_set (bin->kv, "elf_header.offset", 0, 0); sdb_num_set (bin->kv, "elf_header.size", sizeof (Elf_(Ehdr)), 0); #if R_BIN_ELF64 sdb_set (bin->kv, "elf_header.format", "[16]z[2]E[2]Exqqqxwwwwww" " ident (elf_type)type (elf_machine)machine version entry phoff shoff flags ehsize" " phentsize phnum shentsize shnum shstrndx", 0); #else sdb_set (bin->kv, "elf_header.format", "[16]z[2]E[2]Exxxxxwwwwww" " ident (elf_type)type (elf_machine)machine version entry phoff shoff flags ehsize" " phentsize phnum shentsize shnum shstrndx", 0); #endif bin->endian = (e_ident[EI_DATA] == ELFDATA2MSB)? 1: 0; memset (&bin->ehdr, 0, sizeof (Elf_(Ehdr))); len = r_buf_read_at (bin->b, 0, ehdr, sizeof (Elf_(Ehdr))); if (len < 1) { bprintf ("Warning: read (ehdr)\n"); return false; } memcpy (&bin->ehdr.e_ident, ehdr, 16); i = 16; bin->ehdr.e_type = READ16 (ehdr, i) bin->ehdr.e_machine = READ16 (ehdr, i) bin->ehdr.e_version = READ32 (ehdr, i) #if R_BIN_ELF64 bin->ehdr.e_entry = READ64 (ehdr, i) bin->ehdr.e_phoff = READ64 (ehdr, i) bin->ehdr.e_shoff = READ64 (ehdr, i) #else bin->ehdr.e_entry = READ32 (ehdr, i) bin->ehdr.e_phoff = READ32 (ehdr, i) bin->ehdr.e_shoff = READ32 (ehdr, i) #endif bin->ehdr.e_flags = READ32 (ehdr, i) bin->ehdr.e_ehsize = READ16 (ehdr, i) bin->ehdr.e_phentsize = READ16 (ehdr, i) bin->ehdr.e_phnum = READ16 (ehdr, i) bin->ehdr.e_shentsize = READ16 (ehdr, i) bin->ehdr.e_shnum = READ16 (ehdr, i) bin->ehdr.e_shstrndx = READ16 (ehdr, i) return handle_e_ident (bin); // Usage example: // > td `k bin/cur/info/elf_type.cparse`; td `k bin/cur/info/elf_machine.cparse` // > pf `k bin/cur/info/elf_header.format` @ `k bin/cur/info/elf_header.offset` } static int init_phdr(ELFOBJ *bin) { ut32 phdr_size; ut8 phdr[sizeof (Elf_(Phdr))] = {0}; int i, j, len; if (!bin->ehdr.e_phnum) { return false; } if (bin->phdr) { return true; } if (!UT32_MUL (&phdr_size, (ut32)bin->ehdr.e_phnum, sizeof (Elf_(Phdr)))) { return false; } if (!phdr_size) { return false; } if (phdr_size > bin->size) { return false; } if (phdr_size > (ut32)bin->size) { return false; } if (bin->ehdr.e_phoff > bin->size) { return false; } if (bin->ehdr.e_phoff + phdr_size > bin->size) { return false; } if (!(bin->phdr = calloc (phdr_size, 1))) { perror ("malloc (phdr)"); return false; } for (i = 0; i < bin->ehdr.e_phnum; i++) { j = 0; len = r_buf_read_at (bin->b, bin->ehdr.e_phoff + i * sizeof (Elf_(Phdr)), phdr, sizeof (Elf_(Phdr))); if (len < 1) { bprintf ("Warning: read (phdr)\n"); R_FREE (bin->phdr); return false; } bin->phdr[i].p_type = READ32 (phdr, j) #if R_BIN_ELF64 bin->phdr[i].p_flags = READ32 (phdr, j) bin->phdr[i].p_offset = READ64 (phdr, j) bin->phdr[i].p_vaddr = READ64 (phdr, j) bin->phdr[i].p_paddr = READ64 (phdr, j) bin->phdr[i].p_filesz = READ64 (phdr, j) bin->phdr[i].p_memsz = READ64 (phdr, j) bin->phdr[i].p_align = READ64 (phdr, j) #else bin->phdr[i].p_offset = READ32 (phdr, j) bin->phdr[i].p_vaddr = READ32 (phdr, j) bin->phdr[i].p_paddr = READ32 (phdr, j) bin->phdr[i].p_filesz = READ32 (phdr, j) bin->phdr[i].p_memsz = READ32 (phdr, j) bin->phdr[i].p_flags = READ32 (phdr, j) bin->phdr[i].p_align = READ32 (phdr, j) #endif } sdb_num_set (bin->kv, "elf_phdr.offset", bin->ehdr.e_phoff, 0); sdb_num_set (bin->kv, "elf_phdr.size", sizeof (Elf_(Phdr)), 0); sdb_set (bin->kv, "elf_p_type.cparse", "enum elf_p_type {PT_NULL=0,PT_LOAD=1,PT_DYNAMIC=2," "PT_INTERP=3,PT_NOTE=4,PT_SHLIB=5,PT_PHDR=6,PT_LOOS=0x60000000," "PT_HIOS=0x6fffffff,PT_LOPROC=0x70000000,PT_HIPROC=0x7fffffff};", 0); sdb_set (bin->kv, "elf_p_flags.cparse", "enum elf_p_flags {PF_None=0,PF_Exec=1," "PF_Write=2,PF_Write_Exec=3,PF_Read=4,PF_Read_Exec=5,PF_Read_Write=6," "PF_Read_Write_Exec=7};", 0); #if R_BIN_ELF64 sdb_set (bin->kv, "elf_phdr.format", "[4]E[4]Eqqqqqq (elf_p_type)type (elf_p_flags)flags" " offset vaddr paddr filesz memsz align", 0); #else sdb_set (bin->kv, "elf_phdr.format", "[4]Exxxxx[4]Ex (elf_p_type)type offset vaddr paddr" " filesz memsz (elf_p_flags)flags align", 0); #endif return true; // Usage example: // > td `k bin/cur/info/elf_p_type.cparse`; td `k bin/cur/info/elf_p_flags.cparse` // > pf `k bin/cur/info/elf_phdr.format` @ `k bin/cur/info/elf_phdr.offset` } static int init_shdr(ELFOBJ *bin) { ut32 shdr_size; ut8 shdr[sizeof (Elf_(Shdr))] = {0}; int i, j, len; if (!bin || bin->shdr) { return true; } if (!UT32_MUL (&shdr_size, bin->ehdr.e_shnum, sizeof (Elf_(Shdr)))) { return false; } if (shdr_size < 1) { return false; } if (shdr_size > bin->size) { return false; } if (bin->ehdr.e_shoff > bin->size) { return false; } if (bin->ehdr.e_shoff + shdr_size > bin->size) { return false; } if (!(bin->shdr = calloc (1, shdr_size + 1))) { perror ("malloc (shdr)"); return false; } sdb_num_set (bin->kv, "elf_shdr.offset", bin->ehdr.e_shoff, 0); sdb_num_set (bin->kv, "elf_shdr.size", sizeof (Elf_(Shdr)), 0); sdb_set (bin->kv, "elf_s_type.cparse", "enum elf_s_type {SHT_NULL=0,SHT_PROGBITS=1," "SHT_SYMTAB=2,SHT_STRTAB=3,SHT_RELA=4,SHT_HASH=5,SHT_DYNAMIC=6,SHT_NOTE=7," "SHT_NOBITS=8,SHT_REL=9,SHT_SHLIB=10,SHT_DYNSYM=11,SHT_LOOS=0x60000000," "SHT_HIOS=0x6fffffff,SHT_LOPROC=0x70000000,SHT_HIPROC=0x7fffffff};", 0); for (i = 0; i < bin->ehdr.e_shnum; i++) { j = 0; len = r_buf_read_at (bin->b, bin->ehdr.e_shoff + i * sizeof (Elf_(Shdr)), shdr, sizeof (Elf_(Shdr))); if (len < 1) { bprintf ("Warning: read (shdr) at 0x%"PFMT64x"\n", (ut64) bin->ehdr.e_shoff); R_FREE (bin->shdr); return false; } bin->shdr[i].sh_name = READ32 (shdr, j) bin->shdr[i].sh_type = READ32 (shdr, j) #if R_BIN_ELF64 bin->shdr[i].sh_flags = READ64 (shdr, j) bin->shdr[i].sh_addr = READ64 (shdr, j) bin->shdr[i].sh_offset = READ64 (shdr, j) bin->shdr[i].sh_size = READ64 (shdr, j) bin->shdr[i].sh_link = READ32 (shdr, j) bin->shdr[i].sh_info = READ32 (shdr, j) bin->shdr[i].sh_addralign = READ64 (shdr, j) bin->shdr[i].sh_entsize = READ64 (shdr, j) #else bin->shdr[i].sh_flags = READ32 (shdr, j) bin->shdr[i].sh_addr = READ32 (shdr, j) bin->shdr[i].sh_offset = READ32 (shdr, j) bin->shdr[i].sh_size = READ32 (shdr, j) bin->shdr[i].sh_link = READ32 (shdr, j) bin->shdr[i].sh_info = READ32 (shdr, j) bin->shdr[i].sh_addralign = READ32 (shdr, j) bin->shdr[i].sh_entsize = READ32 (shdr, j) #endif } #if R_BIN_ELF64 sdb_set (bin->kv, "elf_s_flags_64.cparse", "enum elf_s_flags_64 {SF64_None=0,SF64_Exec=1," "SF64_Alloc=2,SF64_Alloc_Exec=3,SF64_Write=4,SF64_Write_Exec=5," "SF64_Write_Alloc=6,SF64_Write_Alloc_Exec=7};", 0); sdb_set (bin->kv, "elf_shdr.format", "x[4]E[8]Eqqqxxqq name (elf_s_type)type" " (elf_s_flags_64)flags addr offset size link info addralign entsize", 0); #else sdb_set (bin->kv, "elf_s_flags_32.cparse", "enum elf_s_flags_32 {SF32_None=0,SF32_Exec=1," "SF32_Alloc=2,SF32_Alloc_Exec=3,SF32_Write=4,SF32_Write_Exec=5," "SF32_Write_Alloc=6,SF32_Write_Alloc_Exec=7};", 0); sdb_set (bin->kv, "elf_shdr.format", "x[4]E[4]Exxxxxxx name (elf_s_type)type" " (elf_s_flags_32)flags addr offset size link info addralign entsize", 0); #endif return true; // Usage example: // > td `k bin/cur/info/elf_s_type.cparse`; td `k bin/cur/info/elf_s_flags_64.cparse` // > pf `k bin/cur/info/elf_shdr.format` @ `k bin/cur/info/elf_shdr.offset` } static int init_strtab(ELFOBJ *bin) { if (bin->strtab || !bin->shdr) { return false; } if (bin->ehdr.e_shstrndx != SHN_UNDEF && (bin->ehdr.e_shstrndx >= bin->ehdr.e_shnum || (bin->ehdr.e_shstrndx >= SHN_LORESERVE && bin->ehdr.e_shstrndx < SHN_HIRESERVE))) return false; /* sh_size must be lower than UT32_MAX and not equal to zero, to avoid bugs on malloc() */ if (bin->shdr[bin->ehdr.e_shstrndx].sh_size > UT32_MAX) { return false; } if (!bin->shdr[bin->ehdr.e_shstrndx].sh_size) { return false; } bin->shstrtab_section = bin->strtab_section = &bin->shdr[bin->ehdr.e_shstrndx]; bin->shstrtab_size = bin->strtab_section->sh_size; if (bin->shstrtab_size > bin->size) { return false; } if (!(bin->shstrtab = calloc (1, bin->shstrtab_size + 1))) { perror ("malloc"); bin->shstrtab = NULL; return false; } if (bin->shstrtab_section->sh_offset > bin->size) { R_FREE (bin->shstrtab); return false; } if (bin->shstrtab_section->sh_offset + bin->shstrtab_section->sh_size > bin->size) { R_FREE (bin->shstrtab); return false; } if (r_buf_read_at (bin->b, bin->shstrtab_section->sh_offset, (ut8*)bin->shstrtab, bin->shstrtab_section->sh_size + 1) < 1) { bprintf ("Warning: read (shstrtab) at 0x%"PFMT64x"\n", (ut64) bin->shstrtab_section->sh_offset); R_FREE (bin->shstrtab); return false; } bin->shstrtab[bin->shstrtab_section->sh_size] = '\0'; sdb_num_set (bin->kv, "elf_shstrtab.offset", bin->shstrtab_section->sh_offset, 0); sdb_num_set (bin->kv, "elf_shstrtab.size", bin->shstrtab_section->sh_size, 0); return true; } static int init_dynamic_section(struct Elf_(r_bin_elf_obj_t) *bin) { Elf_(Dyn) *dyn = NULL; Elf_(Dyn) d = {0}; Elf_(Addr) strtabaddr = 0; ut64 offset = 0; char *strtab = NULL; size_t relentry = 0, strsize = 0; int entries; int i, j, len, r; ut8 sdyn[sizeof (Elf_(Dyn))] = {0}; ut32 dyn_size = 0; if (!bin || !bin->phdr || !bin->ehdr.e_phnum) { return false; } for (i = 0; i < bin->ehdr.e_phnum ; i++) { if (bin->phdr[i].p_type == PT_DYNAMIC) { dyn_size = bin->phdr[i].p_filesz; break; } } if (i == bin->ehdr.e_phnum) { return false; } if (bin->phdr[i].p_filesz > bin->size) { return false; } if (bin->phdr[i].p_offset > bin->size) { return false; } if (bin->phdr[i].p_offset + sizeof(Elf_(Dyn)) > bin->size) { return false; } for (entries = 0; entries < (dyn_size / sizeof (Elf_(Dyn))); entries++) { j = 0; len = r_buf_read_at (bin->b, bin->phdr[i].p_offset + entries * sizeof (Elf_(Dyn)), sdyn, sizeof (Elf_(Dyn))); if (len < 1) { goto beach; } #if R_BIN_ELF64 d.d_tag = READ64 (sdyn, j) #else d.d_tag = READ32 (sdyn, j) #endif if (d.d_tag == DT_NULL) { break; } } if (entries < 1) { return false; } dyn = (Elf_(Dyn)*)calloc (entries, sizeof (Elf_(Dyn))); if (!dyn) { return false; } if (!UT32_MUL (&dyn_size, entries, sizeof (Elf_(Dyn)))) { goto beach; } if (!dyn_size) { goto beach; } offset = Elf_(r_bin_elf_v2p) (bin, bin->phdr[i].p_vaddr); if (offset > bin->size || offset + dyn_size > bin->size) { goto beach; } for (i = 0; i < entries; i++) { j = 0; r_buf_read_at (bin->b, offset + i * sizeof (Elf_(Dyn)), sdyn, sizeof (Elf_(Dyn))); if (len < 1) { bprintf("Warning: read (dyn)\n"); } #if R_BIN_ELF64 dyn[i].d_tag = READ64 (sdyn, j) dyn[i].d_un.d_ptr = READ64 (sdyn, j) #else dyn[i].d_tag = READ32 (sdyn, j) dyn[i].d_un.d_ptr = READ32 (sdyn, j) #endif switch (dyn[i].d_tag) { case DT_STRTAB: strtabaddr = Elf_(r_bin_elf_v2p) (bin, dyn[i].d_un.d_ptr); break; case DT_STRSZ: strsize = dyn[i].d_un.d_val; break; case DT_PLTREL: bin->is_rela = dyn[i].d_un.d_val; break; case DT_RELAENT: relentry = dyn[i].d_un.d_val; break; default: if ((dyn[i].d_tag >= DT_VERSYM) && (dyn[i].d_tag <= DT_VERNEEDNUM)) { bin->version_info[DT_VERSIONTAGIDX (dyn[i].d_tag)] = dyn[i].d_un.d_val; } break; } } if (!bin->is_rela) { bin->is_rela = sizeof (Elf_(Rela)) == relentry? DT_RELA : DT_REL; } if (!strtabaddr || strtabaddr > bin->size || strsize > ST32_MAX || !strsize || strsize > bin->size) { if (!strtabaddr) { bprintf ("Warning: section.shstrtab not found or invalid\n"); } goto beach; } strtab = (char *)calloc (1, strsize + 1); if (!strtab) { goto beach; } if (strtabaddr + strsize > bin->size) { free (strtab); goto beach; } r = r_buf_read_at (bin->b, strtabaddr, (ut8 *)strtab, strsize); if (r < 1) { free (strtab); goto beach; } bin->dyn_buf = dyn; bin->dyn_entries = entries; bin->strtab = strtab; bin->strtab_size = strsize; r = Elf_(r_bin_elf_has_relro)(bin); switch (r) { case R_ELF_FULL_RELRO: sdb_set (bin->kv, "elf.relro", "full", 0); break; case R_ELF_PART_RELRO: sdb_set (bin->kv, "elf.relro", "partial", 0); break; default: sdb_set (bin->kv, "elf.relro", "no", 0); break; } sdb_num_set (bin->kv, "elf_strtab.offset", strtabaddr, 0); sdb_num_set (bin->kv, "elf_strtab.size", strsize, 0); return true; beach: free (dyn); return false; } static RBinElfSection* get_section_by_name(ELFOBJ *bin, const char *section_name) { int i; if (!bin->g_sections) { return NULL; } for (i = 0; !bin->g_sections[i].last; i++) { if (!strncmp (bin->g_sections[i].name, section_name, ELF_STRING_LENGTH-1)) { return &bin->g_sections[i]; } } return NULL; } static char *get_ver_flags(ut32 flags) { static char buff[32]; buff[0] = 0; if (!flags) { return "none"; } if (flags & VER_FLG_BASE) { strcpy (buff, "BASE "); } if (flags & VER_FLG_WEAK) { if (flags & VER_FLG_BASE) { strcat (buff, "| "); } strcat (buff, "WEAK "); } if (flags & ~(VER_FLG_BASE | VER_FLG_WEAK)) { strcat (buff, "| <unknown>"); } return buff; } static Sdb *store_versioninfo_gnu_versym(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { int i; const ut64 num_entries = sz / sizeof (Elf_(Versym)); const char *section_name = ""; const char *link_section_name = ""; Elf_(Shdr) *link_shdr = NULL; Sdb *sdb = sdb_new0(); if (!sdb) { return NULL; } if (!bin->version_info[DT_VERSIONTAGIDX (DT_VERSYM)]) { sdb_free (sdb); return NULL; } if (shdr->sh_link > bin->ehdr.e_shnum) { sdb_free (sdb); return NULL; } link_shdr = &bin->shdr[shdr->sh_link]; ut8 *edata = (ut8*) calloc (R_MAX (1, num_entries), sizeof (ut16)); if (!edata) { sdb_free (sdb); return NULL; } ut16 *data = (ut16*) calloc (R_MAX (1, num_entries), sizeof (ut16)); if (!data) { free (edata); sdb_free (sdb); return NULL; } ut64 off = Elf_(r_bin_elf_v2p) (bin, bin->version_info[DT_VERSIONTAGIDX (DT_VERSYM)]); if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } r_buf_read_at (bin->b, off, edata, sizeof (ut16) * num_entries); sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "num_entries", num_entries, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); for (i = num_entries; i--;) { data[i] = r_read_ble16 (&edata[i * sizeof (ut16)], bin->endian); } R_FREE (edata); for (i = 0; i < num_entries; i += 4) { int j; int check_def; char key[32] = {0}; Sdb *sdb_entry = sdb_new0 (); snprintf (key, sizeof (key), "entry%d", i / 4); sdb_ns_set (sdb, key, sdb_entry); sdb_num_set (sdb_entry, "idx", i, 0); for (j = 0; (j < 4) && (i + j) < num_entries; ++j) { int k; char *tmp_val = NULL; snprintf (key, sizeof (key), "value%d", j); switch (data[i + j]) { case 0: sdb_set (sdb_entry, key, "0 (*local*)", 0); break; case 1: sdb_set (sdb_entry, key, "1 (*global*)", 0); break; default: tmp_val = sdb_fmt (0, "%x ", data[i+j] & 0x7FFF); check_def = true; if (bin->version_info[DT_VERSIONTAGIDX (DT_VERNEED)]) { Elf_(Verneed) vn; ut8 svn[sizeof (Elf_(Verneed))] = {0}; ut64 offset = Elf_(r_bin_elf_v2p) (bin, bin->version_info[DT_VERSIONTAGIDX (DT_VERNEED)]); do { Elf_(Vernaux) vna; ut8 svna[sizeof (Elf_(Vernaux))] = {0}; ut64 a_off; if (offset > bin->size || offset + sizeof (vn) > bin->size) { goto beach; } if (r_buf_read_at (bin->b, offset, svn, sizeof (svn)) < 0) { bprintf ("Warning: Cannot read Verneed for Versym\n"); goto beach; } k = 0; vn.vn_version = READ16 (svn, k) vn.vn_cnt = READ16 (svn, k) vn.vn_file = READ32 (svn, k) vn.vn_aux = READ32 (svn, k) vn.vn_next = READ32 (svn, k) a_off = offset + vn.vn_aux; do { if (a_off > bin->size || a_off + sizeof (vna) > bin->size) { goto beach; } if (r_buf_read_at (bin->b, a_off, svna, sizeof (svna)) < 0) { bprintf ("Warning: Cannot read Vernaux for Versym\n"); goto beach; } k = 0; vna.vna_hash = READ32 (svna, k) vna.vna_flags = READ16 (svna, k) vna.vna_other = READ16 (svna, k) vna.vna_name = READ32 (svna, k) vna.vna_next = READ32 (svna, k) a_off += vna.vna_next; } while (vna.vna_other != data[i + j] && vna.vna_next != 0); if (vna.vna_other == data[i + j]) { if (vna.vna_name > bin->strtab_size) { goto beach; } sdb_set (sdb_entry, key, sdb_fmt (0, "%s(%s)", tmp_val, bin->strtab + vna.vna_name), 0); check_def = false; break; } offset += vn.vn_next; } while (vn.vn_next); } ut64 vinfoaddr = bin->version_info[DT_VERSIONTAGIDX (DT_VERDEF)]; if (check_def && data[i + j] != 0x8001 && vinfoaddr) { Elf_(Verdef) vd; ut8 svd[sizeof (Elf_(Verdef))] = {0}; ut64 offset = Elf_(r_bin_elf_v2p) (bin, vinfoaddr); if (offset > bin->size || offset + sizeof (vd) > bin->size) { goto beach; } do { if (r_buf_read_at (bin->b, offset, svd, sizeof (svd)) < 0) { bprintf ("Warning: Cannot read Verdef for Versym\n"); goto beach; } k = 0; vd.vd_version = READ16 (svd, k) vd.vd_flags = READ16 (svd, k) vd.vd_ndx = READ16 (svd, k) vd.vd_cnt = READ16 (svd, k) vd.vd_hash = READ32 (svd, k) vd.vd_aux = READ32 (svd, k) vd.vd_next = READ32 (svd, k) offset += vd.vd_next; } while (vd.vd_ndx != (data[i + j] & 0x7FFF) && vd.vd_next != 0); if (vd.vd_ndx == (data[i + j] & 0x7FFF)) { Elf_(Verdaux) vda; ut8 svda[sizeof (Elf_(Verdaux))] = {0}; ut64 off_vda = offset - vd.vd_next + vd.vd_aux; if (off_vda > bin->size || off_vda + sizeof (vda) > bin->size) { goto beach; } if (r_buf_read_at (bin->b, off_vda, svda, sizeof (svda)) < 0) { bprintf ("Warning: Cannot read Verdaux for Versym\n"); goto beach; } k = 0; vda.vda_name = READ32 (svda, k) vda.vda_next = READ32 (svda, k) if (vda.vda_name > bin->strtab_size) { goto beach; } const char *name = bin->strtab + vda.vda_name; sdb_set (sdb_entry, key, sdb_fmt (0,"%s(%s%-*s)", tmp_val, name, (int)(12 - strlen (name)),")") , 0); } } } } } beach: free (data); return sdb; } static Sdb *store_versioninfo_gnu_verdef(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { const char *section_name = ""; const char *link_section_name = ""; char *end = NULL; Elf_(Shdr) *link_shdr = NULL; ut8 dfs[sizeof (Elf_(Verdef))] = {0}; Sdb *sdb; int cnt, i; if (shdr->sh_link > bin->ehdr.e_shnum) { return false; } link_shdr = &bin->shdr[shdr->sh_link]; if ((int)shdr->sh_size < 1) { return false; } Elf_(Verdef) *defs = calloc (shdr->sh_size, sizeof (char)); if (!defs) { return false; } if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (link_shdr && bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } if (!defs) { bprintf ("Warning: Cannot allocate memory (Check Elf_(Verdef))\n"); return NULL; } sdb = sdb_new0 (); end = (char *)defs + shdr->sh_size; sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "entries", shdr->sh_info, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); for (cnt = 0, i = 0; i >= 0 && cnt < shdr->sh_info && ((char *)defs + i < end); ++cnt) { Sdb *sdb_verdef = sdb_new0 (); char *vstart = ((char*)defs) + i; char key[32] = {0}; Elf_(Verdef) *verdef = (Elf_(Verdef)*)vstart; Elf_(Verdaux) aux = {0}; int j = 0; int isum = 0; r_buf_read_at (bin->b, shdr->sh_offset + i, dfs, sizeof (Elf_(Verdef))); verdef->vd_version = READ16 (dfs, j) verdef->vd_flags = READ16 (dfs, j) verdef->vd_ndx = READ16 (dfs, j) verdef->vd_cnt = READ16 (dfs, j) verdef->vd_hash = READ32 (dfs, j) verdef->vd_aux = READ32 (dfs, j) verdef->vd_next = READ32 (dfs, j) int vdaux = verdef->vd_aux; if (vdaux < 1 || vstart + vdaux < vstart) { sdb_free (sdb_verdef); goto out_error; } vstart += vdaux; if (vstart > end || vstart + sizeof (Elf_(Verdaux)) > end) { sdb_free (sdb_verdef); goto out_error; } j = 0; aux.vda_name = READ32 (vstart, j) aux.vda_next = READ32 (vstart, j) isum = i + verdef->vd_aux; if (aux.vda_name > bin->dynstr_size) { sdb_free (sdb_verdef); goto out_error; } sdb_num_set (sdb_verdef, "idx", i, 0); sdb_num_set (sdb_verdef, "vd_version", verdef->vd_version, 0); sdb_num_set (sdb_verdef, "vd_ndx", verdef->vd_ndx, 0); sdb_num_set (sdb_verdef, "vd_cnt", verdef->vd_cnt, 0); sdb_set (sdb_verdef, "vda_name", &bin->dynstr[aux.vda_name], 0); sdb_set (sdb_verdef, "flags", get_ver_flags (verdef->vd_flags), 0); for (j = 1; j < verdef->vd_cnt; ++j) { int k; Sdb *sdb_parent = sdb_new0 (); isum += aux.vda_next; vstart += aux.vda_next; if (vstart > end || vstart + sizeof (Elf_(Verdaux)) > end) { sdb_free (sdb_verdef); sdb_free (sdb_parent); goto out_error; } k = 0; aux.vda_name = READ32 (vstart, k) aux.vda_next = READ32 (vstart, k) if (aux.vda_name > bin->dynstr_size) { sdb_free (sdb_verdef); sdb_free (sdb_parent); goto out_error; } sdb_num_set (sdb_parent, "idx", isum, 0); sdb_num_set (sdb_parent, "parent", j, 0); sdb_set (sdb_parent, "vda_name", &bin->dynstr[aux.vda_name], 0); snprintf (key, sizeof (key), "parent%d", j - 1); sdb_ns_set (sdb_verdef, key, sdb_parent); } snprintf (key, sizeof (key), "verdef%d", cnt); sdb_ns_set (sdb, key, sdb_verdef); if (!verdef->vd_next) { sdb_free (sdb_verdef); goto out_error; } if ((st32)verdef->vd_next < 1) { eprintf ("Warning: Invalid vd_next in the ELF version\n"); break; } i += verdef->vd_next; } free (defs); return sdb; out_error: free (defs); sdb_free (sdb); return NULL; } static Sdb *store_versioninfo_gnu_verneed(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { ut8 *end, *need = NULL; const char *section_name = ""; Elf_(Shdr) *link_shdr = NULL; const char *link_section_name = ""; Sdb *sdb_vernaux = NULL; Sdb *sdb_version = NULL; Sdb *sdb = NULL; int i, cnt; if (!bin || !bin->dynstr) { return NULL; } if (shdr->sh_link > bin->ehdr.e_shnum) { return NULL; } if ((int)shdr->sh_size < 1) { return NULL; } sdb = sdb_new0 (); if (!sdb) { return NULL; } link_shdr = &bin->shdr[shdr->sh_link]; if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } if (!(need = (ut8*) calloc (R_MAX (1, shdr->sh_size), sizeof (ut8)))) { bprintf ("Warning: Cannot allocate memory for Elf_(Verneed)\n"); goto beach; } end = need + shdr->sh_size; sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "num_entries", shdr->sh_info, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); if (shdr->sh_offset > bin->size || shdr->sh_offset + shdr->sh_size > bin->size) { goto beach; } if (shdr->sh_offset + shdr->sh_size < shdr->sh_size) { goto beach; } i = r_buf_read_at (bin->b, shdr->sh_offset, need, shdr->sh_size); if (i < 0) goto beach; //XXX we should use DT_VERNEEDNUM instead of sh_info //TODO https://sourceware.org/ml/binutils/2014-11/msg00353.html for (i = 0, cnt = 0; cnt < shdr->sh_info; ++cnt) { int j, isum; ut8 *vstart = need + i; Elf_(Verneed) vvn = {0}; if (vstart + sizeof (Elf_(Verneed)) > end) { goto beach; } Elf_(Verneed) *entry = &vvn; char key[32] = {0}; sdb_version = sdb_new0 (); if (!sdb_version) { goto beach; } j = 0; vvn.vn_version = READ16 (vstart, j) vvn.vn_cnt = READ16 (vstart, j) vvn.vn_file = READ32 (vstart, j) vvn.vn_aux = READ32 (vstart, j) vvn.vn_next = READ32 (vstart, j) sdb_num_set (sdb_version, "vn_version", entry->vn_version, 0); sdb_num_set (sdb_version, "idx", i, 0); if (entry->vn_file > bin->dynstr_size) { goto beach; } { char *s = r_str_ndup (&bin->dynstr[entry->vn_file], 16); sdb_set (sdb_version, "file_name", s, 0); free (s); } sdb_num_set (sdb_version, "cnt", entry->vn_cnt, 0); st32 vnaux = entry->vn_aux; if (vnaux < 1) { goto beach; } vstart += vnaux; for (j = 0, isum = i + entry->vn_aux; j < entry->vn_cnt && vstart + sizeof (Elf_(Vernaux)) <= end; ++j) { int k; Elf_(Vernaux) * aux = NULL; Elf_(Vernaux) vaux = {0}; sdb_vernaux = sdb_new0 (); if (!sdb_vernaux) { goto beach; } aux = (Elf_(Vernaux)*)&vaux; k = 0; vaux.vna_hash = READ32 (vstart, k) vaux.vna_flags = READ16 (vstart, k) vaux.vna_other = READ16 (vstart, k) vaux.vna_name = READ32 (vstart, k) vaux.vna_next = READ32 (vstart, k) if (aux->vna_name > bin->dynstr_size) { goto beach; } sdb_num_set (sdb_vernaux, "idx", isum, 0); if (aux->vna_name > 0 && aux->vna_name + 8 < bin->dynstr_size) { char name [16]; strncpy (name, &bin->dynstr[aux->vna_name], sizeof (name)-1); name[sizeof(name)-1] = 0; sdb_set (sdb_vernaux, "name", name, 0); } sdb_set (sdb_vernaux, "flags", get_ver_flags (aux->vna_flags), 0); sdb_num_set (sdb_vernaux, "version", aux->vna_other, 0); isum += aux->vna_next; vstart += aux->vna_next; snprintf (key, sizeof (key), "vernaux%d", j); sdb_ns_set (sdb_version, key, sdb_vernaux); } if ((int)entry->vn_next < 0) { bprintf ("Invalid vn_next\n"); break; } i += entry->vn_next; snprintf (key, sizeof (key), "version%d", cnt ); sdb_ns_set (sdb, key, sdb_version); //if entry->vn_next is 0 it iterate infinitely if (!entry->vn_next) { break; } } free (need); return sdb; beach: free (need); sdb_free (sdb_vernaux); sdb_free (sdb_version); sdb_free (sdb); return NULL; } static Sdb *store_versioninfo(ELFOBJ *bin) { Sdb *sdb_versioninfo = NULL; int num_verdef = 0; int num_verneed = 0; int num_versym = 0; int i; if (!bin || !bin->shdr) { return NULL; } if (!(sdb_versioninfo = sdb_new0 ())) { return NULL; } for (i = 0; i < bin->ehdr.e_shnum; i++) { Sdb *sdb = NULL; char key[32] = {0}; int size = bin->shdr[i].sh_size; if (size - (i*sizeof(Elf_(Shdr)) > bin->size)) { size = bin->size - (i*sizeof(Elf_(Shdr))); } int left = size - (i * sizeof (Elf_(Shdr))); left = R_MIN (left, bin->shdr[i].sh_size); if (left < 0) { break; } switch (bin->shdr[i].sh_type) { case SHT_GNU_verdef: sdb = store_versioninfo_gnu_verdef (bin, &bin->shdr[i], left); snprintf (key, sizeof (key), "verdef%d", num_verdef++); sdb_ns_set (sdb_versioninfo, key, sdb); break; case SHT_GNU_verneed: sdb = store_versioninfo_gnu_verneed (bin, &bin->shdr[i], left); snprintf (key, sizeof (key), "verneed%d", num_verneed++); sdb_ns_set (sdb_versioninfo, key, sdb); break; case SHT_GNU_versym: sdb = store_versioninfo_gnu_versym (bin, &bin->shdr[i], left); snprintf (key, sizeof (key), "versym%d", num_versym++); sdb_ns_set (sdb_versioninfo, key, sdb); break; } } return sdb_versioninfo; } static bool init_dynstr(ELFOBJ *bin) { int i, r; const char *section_name = NULL; if (!bin || !bin->shdr) { return false; } if (!bin->shstrtab) { return false; } for (i = 0; i < bin->ehdr.e_shnum; ++i) { if (bin->shdr[i].sh_name > bin->shstrtab_size) { return false; } section_name = &bin->shstrtab[bin->shdr[i].sh_name]; if (bin->shdr[i].sh_type == SHT_STRTAB && !strcmp (section_name, ".dynstr")) { if (!(bin->dynstr = (char*) calloc (bin->shdr[i].sh_size + 1, sizeof (char)))) { bprintf("Warning: Cannot allocate memory for dynamic strings\n"); return false; } if (bin->shdr[i].sh_offset > bin->size) { return false; } if (bin->shdr[i].sh_offset + bin->shdr[i].sh_size > bin->size) { return false; } if (bin->shdr[i].sh_offset + bin->shdr[i].sh_size < bin->shdr[i].sh_size) { return false; } r = r_buf_read_at (bin->b, bin->shdr[i].sh_offset, (ut8*)bin->dynstr, bin->shdr[i].sh_size); if (r < 1) { R_FREE (bin->dynstr); bin->dynstr_size = 0; return false; } bin->dynstr_size = bin->shdr[i].sh_size; return true; } } return false; } static int elf_init(ELFOBJ *bin) { bin->phdr = NULL; bin->shdr = NULL; bin->strtab = NULL; bin->shstrtab = NULL; bin->strtab_size = 0; bin->strtab_section = NULL; bin->dyn_buf = NULL; bin->dynstr = NULL; ZERO_FILL (bin->version_info); bin->g_sections = NULL; bin->g_symbols = NULL; bin->g_imports = NULL; /* bin is not an ELF */ if (!init_ehdr (bin)) { return false; } if (!init_phdr (bin)) { bprintf ("Warning: Cannot initialize program headers\n"); } if (!init_shdr (bin)) { bprintf ("Warning: Cannot initialize section headers\n"); } if (!init_strtab (bin)) { bprintf ("Warning: Cannot initialize strings table\n"); } if (!init_dynstr (bin)) { bprintf ("Warning: Cannot initialize dynamic strings\n"); } bin->baddr = Elf_(r_bin_elf_get_baddr) (bin); if (!init_dynamic_section (bin) && !Elf_(r_bin_elf_get_static)(bin)) bprintf ("Warning: Cannot initialize dynamic section\n"); bin->imports_by_ord_size = 0; bin->imports_by_ord = NULL; bin->symbols_by_ord_size = 0; bin->symbols_by_ord = NULL; bin->g_sections = Elf_(r_bin_elf_get_sections) (bin); bin->boffset = Elf_(r_bin_elf_get_boffset) (bin); sdb_ns_set (bin->kv, "versioninfo", store_versioninfo (bin)); return true; } ut64 Elf_(r_bin_elf_get_section_offset)(ELFOBJ *bin, const char *section_name) { RBinElfSection *section = get_section_by_name (bin, section_name); if (!section) return UT64_MAX; return section->offset; } ut64 Elf_(r_bin_elf_get_section_addr)(ELFOBJ *bin, const char *section_name) { RBinElfSection *section = get_section_by_name (bin, section_name); return section? section->rva: UT64_MAX; } ut64 Elf_(r_bin_elf_get_section_addr_end)(ELFOBJ *bin, const char *section_name) { RBinElfSection *section = get_section_by_name (bin, section_name); return section? section->rva + section->size: UT64_MAX; } #define REL (is_rela ? (void*)rela : (void*)rel) #define REL_BUF is_rela ? (ut8*)(&rela[k]) : (ut8*)(&rel[k]) #define REL_OFFSET is_rela ? rela[k].r_offset : rel[k].r_offset #define REL_TYPE is_rela ? rela[k].r_info : rel[k].r_info static ut64 get_import_addr(ELFOBJ *bin, int sym) { Elf_(Rel) *rel = NULL; Elf_(Rela) *rela = NULL; ut8 rl[sizeof (Elf_(Rel))] = {0}; ut8 rla[sizeof (Elf_(Rela))] = {0}; RBinElfSection *rel_sec = NULL; Elf_(Addr) plt_sym_addr = -1; ut64 got_addr, got_offset; ut64 plt_addr; int j, k, tsize, len, nrel; bool is_rela = false; const char *rel_sect[] = { ".rel.plt", ".rela.plt", ".rel.dyn", ".rela.dyn", NULL }; const char *rela_sect[] = { ".rela.plt", ".rel.plt", ".rela.dyn", ".rel.dyn", NULL }; if ((!bin->shdr || !bin->strtab) && !bin->phdr) { return -1; } if ((got_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".got")) == -1 && (got_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".got.plt")) == -1) { return -1; } if ((got_addr = Elf_(r_bin_elf_get_section_addr) (bin, ".got")) == -1 && (got_addr = Elf_(r_bin_elf_get_section_addr) (bin, ".got.plt")) == -1) { return -1; } if (bin->is_rela == DT_REL) { j = 0; while (!rel_sec && rel_sect[j]) { rel_sec = get_section_by_name (bin, rel_sect[j++]); } tsize = sizeof (Elf_(Rel)); } else if (bin->is_rela == DT_RELA) { j = 0; while (!rel_sec && rela_sect[j]) { rel_sec = get_section_by_name (bin, rela_sect[j++]); } is_rela = true; tsize = sizeof (Elf_(Rela)); } if (!rel_sec) { return -1; } if (rel_sec->size < 1) { return -1; } nrel = (ut32)((int)rel_sec->size / (int)tsize); if (nrel < 1) { return -1; } if (is_rela) { rela = calloc (nrel, tsize); if (!rela) { return -1; } } else { rel = calloc (nrel, tsize); if (!rel) { return -1; } } for (j = k = 0; j < rel_sec->size && k < nrel; j += tsize, k++) { int l = 0; if (rel_sec->offset + j > bin->size) { goto out; } if (rel_sec->offset + j + tsize > bin->size) { goto out; } len = r_buf_read_at ( bin->b, rel_sec->offset + j, is_rela ? rla : rl, is_rela ? sizeof (Elf_ (Rela)) : sizeof (Elf_ (Rel))); if (len < 1) { goto out; } #if R_BIN_ELF64 if (is_rela) { rela[k].r_offset = READ64 (rla, l) rela[k].r_info = READ64 (rla, l) rela[k].r_addend = READ64 (rla, l) } else { rel[k].r_offset = READ64 (rl, l) rel[k].r_info = READ64 (rl, l) } #else if (is_rela) { rela[k].r_offset = READ32 (rla, l) rela[k].r_info = READ32 (rla, l) rela[k].r_addend = READ32 (rla, l) } else { rel[k].r_offset = READ32 (rl, l) rel[k].r_info = READ32 (rl, l) } #endif int reloc_type = ELF_R_TYPE (REL_TYPE); int reloc_sym = ELF_R_SYM (REL_TYPE); if (reloc_sym == sym) { int of = REL_OFFSET; of = of - got_addr + got_offset; switch (bin->ehdr.e_machine) { case EM_PPC: case EM_PPC64: { RBinElfSection *s = get_section_by_name (bin, ".plt"); if (s) { ut8 buf[4]; ut64 base; len = r_buf_read_at (bin->b, s->offset, buf, sizeof (buf)); if (len < 4) { goto out; } base = r_read_be32 (buf); base -= (nrel * 16); base += (k * 16); plt_addr = base; free (REL); return plt_addr; } } break; case EM_SPARC: case EM_SPARCV9: case EM_SPARC32PLUS: plt_addr = Elf_(r_bin_elf_get_section_addr) (bin, ".plt"); if (plt_addr == -1) { free (rela); return -1; } if (reloc_type == R_386_PC16) { plt_addr += k * 12 + 20; // thumb symbol if (plt_addr & 1) { plt_addr--; } free (REL); return plt_addr; } else { bprintf ("Unknown sparc reloc type %d\n", reloc_type); } /* SPARC */ break; case EM_ARM: case EM_AARCH64: plt_addr = Elf_(r_bin_elf_get_section_addr) (bin, ".plt"); if (plt_addr == -1) { free (rela); return UT32_MAX; } switch (reloc_type) { case R_386_8: { plt_addr += k * 12 + 20; // thumb symbol if (plt_addr & 1) { plt_addr--; } free (REL); return plt_addr; } break; case 1026: // arm64 aarch64 plt_sym_addr = plt_addr + k * 16 + 32; goto done; default: bprintf ("Unsupported relocation type for imports %d\n", reloc_type); break; } break; case EM_386: case EM_X86_64: switch (reloc_type) { case 1: // unknown relocs found in voidlinux for x86-64 // break; case R_386_GLOB_DAT: case R_386_JMP_SLOT: { ut8 buf[8]; if (of + sizeof(Elf_(Addr)) < bin->size) { // ONLY FOR X86 if (of > bin->size || of + sizeof (Elf_(Addr)) > bin->size) { goto out; } len = r_buf_read_at (bin->b, of, buf, sizeof (Elf_(Addr))); if (len < -1) { goto out; } plt_sym_addr = sizeof (Elf_(Addr)) == 4 ? r_read_le32 (buf) : r_read_le64 (buf); if (!plt_sym_addr) { //XXX HACK ALERT!!!! full relro?? try to fix it //will there always be .plt.got, what would happen if is .got.plt? RBinElfSection *s = get_section_by_name (bin, ".plt.got"); if (Elf_(r_bin_elf_has_relro)(bin) < R_ELF_PART_RELRO || !s) { goto done; } plt_addr = s->offset; of = of + got_addr - got_offset; while (plt_addr + 2 + 4 < s->offset + s->size) { /*we try to locate the plt entry that correspond with the relocation since got does not point back to .plt. In this case it has the following form ff253a152000 JMP QWORD [RIP + 0x20153A] 6690 NOP ---- ff25ec9f0408 JMP DWORD [reloc.puts_236] plt_addr + 2 to remove jmp opcode and get the imm reading 4 and if RIP (plt_addr + 6) + imm == rel->offset return plt_addr, that will be our sym addr perhaps this hack doesn't work on 32 bits */ len = r_buf_read_at (bin->b, plt_addr + 2, buf, 4); if (len < -1) { goto out; } plt_sym_addr = sizeof (Elf_(Addr)) == 4 ? r_read_le32 (buf) : r_read_le64 (buf); //relative address if ((plt_addr + 6 + Elf_(r_bin_elf_v2p) (bin, plt_sym_addr)) == of) { plt_sym_addr = plt_addr; goto done; } else if (plt_sym_addr == of) { plt_sym_addr = plt_addr; goto done; } plt_addr += 8; } } else { plt_sym_addr -= 6; } goto done; } break; } default: bprintf ("Unsupported relocation type for imports %d\n", reloc_type); free (REL); return of; break; } break; case 8: // MIPS32 BIG ENDIAN relocs { RBinElfSection *s = get_section_by_name (bin, ".rela.plt"); if (s) { ut8 buf[1024]; const ut8 *base; plt_addr = s->rva + s->size; len = r_buf_read_at (bin->b, s->offset + s->size, buf, sizeof (buf)); if (len != sizeof (buf)) { // oops } base = r_mem_mem_aligned (buf, sizeof (buf), (const ut8*)"\x3c\x0f\x00", 3, 4); if (base) { plt_addr += (int)(size_t)(base - buf); } else { plt_addr += 108 + 8; // HARDCODED HACK } plt_addr += k * 16; free (REL); return plt_addr; } } break; default: bprintf ("Unsupported relocs type %d for arch %d\n", reloc_type, bin->ehdr.e_machine); break; } } } done: free (REL); return plt_sym_addr; out: free (REL); return -1; } int Elf_(r_bin_elf_has_nx)(ELFOBJ *bin) { int i; if (bin && bin->phdr) { for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_GNU_STACK) { return (!(bin->phdr[i].p_flags & 1))? 1: 0; } } } return 0; } int Elf_(r_bin_elf_has_relro)(ELFOBJ *bin) { int i; bool haveBindNow = false; bool haveGnuRelro = false; if (bin && bin->dyn_buf) { for (i = 0; i < bin->dyn_entries; i++) { switch (bin->dyn_buf[i].d_tag) { case DT_BIND_NOW: haveBindNow = true; break; case DT_FLAGS: for (i++; i < bin->dyn_entries ; i++) { ut32 dTag = bin->dyn_buf[i].d_tag; if (!dTag) { break; } switch (dTag) { case DT_FLAGS_1: if (bin->dyn_buf[i].d_un.d_val & DF_1_NOW) { haveBindNow = true; break; } } } break; } } } if (bin && bin->phdr) { for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_GNU_RELRO) { haveGnuRelro = true; break; } } } if (haveGnuRelro) { if (haveBindNow) { return R_ELF_FULL_RELRO; } return R_ELF_PART_RELRO; } return R_ELF_NO_RELRO; } /* To compute the base address, one determines the memory address associated with the lowest p_vaddr value for a PT_LOAD segment. One then obtains the base address by truncating the memory address to the nearest multiple of the maximum page size */ ut64 Elf_(r_bin_elf_get_baddr)(ELFOBJ *bin) { int i; ut64 tmp, base = UT64_MAX; if (!bin) { return 0; } if (bin->phdr) { for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_LOAD) { tmp = (ut64)bin->phdr[i].p_vaddr & ELF_PAGE_MASK; tmp = tmp - (tmp % (1 << ELF_PAGE_SIZE)); if (tmp < base) { base = tmp; } } } } if (base == UT64_MAX && bin->ehdr.e_type == ET_REL) { //we return our own base address for ET_REL type //we act as a loader for ELF return 0x08000000; } return base == UT64_MAX ? 0 : base; } ut64 Elf_(r_bin_elf_get_boffset)(ELFOBJ *bin) { int i; ut64 tmp, base = UT64_MAX; if (bin && bin->phdr) { for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_LOAD) { tmp = (ut64)bin->phdr[i].p_offset & ELF_PAGE_MASK; tmp = tmp - (tmp % (1 << ELF_PAGE_SIZE)); if (tmp < base) { base = tmp; } } } } return base == UT64_MAX ? 0 : base; } ut64 Elf_(r_bin_elf_get_init_offset)(ELFOBJ *bin) { ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); ut8 buf[512]; if (!bin) { return 0LL; } if (r_buf_read_at (bin->b, entry + 16, buf, sizeof (buf)) < 1) { bprintf ("Warning: read (init_offset)\n"); return 0; } if (buf[0] == 0x68) { // push // x86 only ut64 addr; memmove (buf, buf+1, 4); addr = (ut64)r_read_le32 (buf); return Elf_(r_bin_elf_v2p) (bin, addr); } return 0; } ut64 Elf_(r_bin_elf_get_fini_offset)(ELFOBJ *bin) { ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); ut8 buf[512]; if (!bin) { return 0LL; } if (r_buf_read_at (bin->b, entry+11, buf, sizeof (buf)) == -1) { bprintf ("Warning: read (get_fini)\n"); return 0; } if (*buf == 0x68) { // push // x86/32 only ut64 addr; memmove (buf, buf+1, 4); addr = (ut64)r_read_le32 (buf); return Elf_(r_bin_elf_v2p) (bin, addr); } return 0; } ut64 Elf_(r_bin_elf_get_entry_offset)(ELFOBJ *bin) { ut64 entry; if (!bin) { return 0LL; } entry = bin->ehdr.e_entry; if (!entry) { entry = Elf_(r_bin_elf_get_section_offset)(bin, ".init.text"); if (entry != UT64_MAX) { return entry; } entry = Elf_(r_bin_elf_get_section_offset)(bin, ".text"); if (entry != UT64_MAX) { return entry; } entry = Elf_(r_bin_elf_get_section_offset)(bin, ".init"); if (entry != UT64_MAX) { return entry; } if (entry == UT64_MAX) { return 0; } } return Elf_(r_bin_elf_v2p) (bin, entry); } static ut64 getmainsymbol(ELFOBJ *bin) { struct r_bin_elf_symbol_t *symbol; int i; if (!(symbol = Elf_(r_bin_elf_get_symbols) (bin))) { return UT64_MAX; } for (i = 0; !symbol[i].last; i++) { if (!strcmp (symbol[i].name, "main")) { ut64 paddr = symbol[i].offset; return Elf_(r_bin_elf_p2v) (bin, paddr); } } return UT64_MAX; } ut64 Elf_(r_bin_elf_get_main_offset)(ELFOBJ *bin) { ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); ut8 buf[512]; if (!bin) { return 0LL; } if (entry > bin->size || (entry + sizeof (buf)) > bin->size) { return 0; } if (r_buf_read_at (bin->b, entry, buf, sizeof (buf)) < 1) { bprintf ("Warning: read (main)\n"); return 0; } // ARM64 if (buf[0x18+3] == 0x58 && buf[0x2f] == 0x00) { ut32 entry_vaddr = Elf_(r_bin_elf_p2v) (bin, entry); ut32 main_addr = r_read_le32 (&buf[0x30]); if ((main_addr >> 16) == (entry_vaddr >> 16)) { return Elf_(r_bin_elf_v2p) (bin, main_addr); } } // TODO: Use arch to identify arch before memcmp's // ARM ut64 text = Elf_(r_bin_elf_get_section_offset)(bin, ".text"); ut64 text_end = text + bin->size; // ARM-Thumb-Linux if (entry & 1 && !memcmp (buf, "\xf0\x00\x0b\x4f\xf0\x00", 6)) { ut32 * ptr = (ut32*)(buf+40-1); if (*ptr &1) { return Elf_(r_bin_elf_v2p) (bin, *ptr -1); } } if (!memcmp (buf, "\x00\xb0\xa0\xe3\x00\xe0\xa0\xe3", 8)) { // endian stuff here ut32 *addr = (ut32*)(buf+0x34); /* 0x00012000 00b0a0e3 mov fp, 0 0x00012004 00e0a0e3 mov lr, 0 */ if (*addr > text && *addr < (text_end)) { return Elf_(r_bin_elf_v2p) (bin, *addr); } } // MIPS /* get .got, calculate offset of main symbol */ if (!memcmp (buf, "\x21\x00\xe0\x03\x01\x00\x11\x04", 8)) { /* assuming the startup code looks like got = gp-0x7ff0 got[index__libc_start_main] ( got[index_main] ); looking for the instruction generating the first argument to find main lw a0, offset(gp) */ ut64 got_offset; if ((got_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".got")) != -1 || (got_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".got.plt")) != -1) { const ut64 gp = got_offset + 0x7ff0; unsigned i; for (i = 0; i < sizeof(buf) / sizeof(buf[0]); i += 4) { const ut32 instr = r_read_le32 (&buf[i]); if ((instr & 0xffff0000) == 0x8f840000) { // lw a0, offset(gp) const short delta = instr & 0x0000ffff; r_buf_read_at (bin->b, /* got_entry_offset = */ gp + delta, buf, 4); return Elf_(r_bin_elf_v2p) (bin, r_read_le32 (&buf[0])); } } } return 0; } // ARM if (!memcmp (buf, "\x24\xc0\x9f\xe5\x00\xb0\xa0\xe3", 8)) { ut64 addr = r_read_le32 (&buf[48]); return Elf_(r_bin_elf_v2p) (bin, addr); } // X86-CGC if (buf[0] == 0xe8 && !memcmp (buf + 5, "\x50\xe8\x00\x00\x00\x00\xb8\x01\x00\x00\x00\x53", 12)) { size_t SIZEOF_CALL = 5; ut64 rel_addr = (ut64)((int)(buf[1] + (buf[2] << 8) + (buf[3] << 16) + (buf[4] << 24))); ut64 addr = Elf_(r_bin_elf_p2v)(bin, entry + SIZEOF_CALL); addr += rel_addr; return Elf_(r_bin_elf_v2p) (bin, addr); } // X86-PIE if (buf[0x00] == 0x48 && buf[0x1e] == 0x8d && buf[0x11] == 0xe8) { ut32 *pmain = (ut32*)(buf + 0x30); ut64 vmain = Elf_(r_bin_elf_p2v) (bin, (ut64)*pmain); ut64 ventry = Elf_(r_bin_elf_p2v) (bin, entry); if (vmain >> 16 == ventry >> 16) { return (ut64)vmain; } } // X86-PIE if (buf[0x1d] == 0x48 && buf[0x1e] == 0x8b) { if (!memcmp (buf, "\x31\xed\x49\x89", 4)) {// linux ut64 maddr, baddr; ut8 n32s[sizeof (ut32)] = {0}; maddr = entry + 0x24 + r_read_le32 (buf + 0x20); if (r_buf_read_at (bin->b, maddr, n32s, sizeof (ut32)) == -1) { bprintf ("Warning: read (maddr) 2\n"); return 0; } maddr = (ut64)r_read_le32 (&n32s[0]); baddr = (bin->ehdr.e_entry >> 16) << 16; if (bin->phdr) { baddr = Elf_(r_bin_elf_get_baddr) (bin); } maddr += baddr; return maddr; } } // X86-NONPIE #if R_BIN_ELF64 if (!memcmp (buf, "\x49\x89\xd9", 3) && buf[156] == 0xe8) { // openbsd return r_read_le32 (&buf[157]) + entry + 156 + 5; } if (!memcmp (buf+29, "\x48\xc7\xc7", 3)) { // linux ut64 addr = (ut64)r_read_le32 (&buf[29 + 3]); return Elf_(r_bin_elf_v2p) (bin, addr); } #else if (buf[23] == '\x68') { ut64 addr = (ut64)r_read_le32 (&buf[23 + 1]); return Elf_(r_bin_elf_v2p) (bin, addr); } #endif /* linux64 pie main -- probably buggy in some cases */ if (buf[29] == 0x48 && buf[30] == 0x8d) { // lea rdi, qword [rip-0x21c4] ut8 *p = buf + 32; st32 maindelta = (st32)r_read_le32 (p); ut64 vmain = (ut64)(entry + 29 + maindelta) + 7; ut64 ventry = Elf_(r_bin_elf_p2v) (bin, entry); if (vmain>>16 == ventry>>16) { return (ut64)vmain; } } /* find sym.main if possible */ { ut64 m = getmainsymbol (bin); if (m != UT64_MAX) return m; } return UT64_MAX; } int Elf_(r_bin_elf_get_stripped)(ELFOBJ *bin) { int i; if (!bin->shdr) { return false; } for (i = 0; i < bin->ehdr.e_shnum; i++) { if (bin->shdr[i].sh_type == SHT_SYMTAB) { return false; } } return true; } char *Elf_(r_bin_elf_intrp)(ELFOBJ *bin) { int i; if (!bin || !bin->phdr) { return NULL; } for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_INTERP) { char *str = NULL; ut64 addr = bin->phdr[i].p_offset; int sz = bin->phdr[i].p_memsz; sdb_num_set (bin->kv, "elf_header.intrp_addr", addr, 0); sdb_num_set (bin->kv, "elf_header.intrp_size", sz, 0); if (sz < 1) { return NULL; } str = malloc (sz + 1); if (!str) { return NULL; } if (r_buf_read_at (bin->b, addr, (ut8*)str, sz) < 1) { bprintf ("Warning: read (main)\n"); return 0; } str[sz] = 0; sdb_set (bin->kv, "elf_header.intrp", str, 0); return str; } } return NULL; } int Elf_(r_bin_elf_get_static)(ELFOBJ *bin) { int i; if (!bin->phdr) { return false; } for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_INTERP) { return false; } } return true; } char* Elf_(r_bin_elf_get_data_encoding)(ELFOBJ *bin) { switch (bin->ehdr.e_ident[EI_DATA]) { case ELFDATANONE: return strdup ("none"); case ELFDATA2LSB: return strdup ("2's complement, little endian"); case ELFDATA2MSB: return strdup ("2's complement, big endian"); default: return r_str_newf ("<unknown: %x>", bin->ehdr.e_ident[EI_DATA]); } } int Elf_(r_bin_elf_has_va)(ELFOBJ *bin) { return true; } char* Elf_(r_bin_elf_get_arch)(ELFOBJ *bin) { switch (bin->ehdr.e_machine) { case EM_ARC: case EM_ARC_A5: return strdup ("arc"); case EM_AVR: return strdup ("avr"); case EM_CRIS: return strdup ("cris"); case EM_68K: return strdup ("m68k"); case EM_MIPS: case EM_MIPS_RS3_LE: case EM_MIPS_X: return strdup ("mips"); case EM_MCST_ELBRUS: return strdup ("elbrus"); case EM_TRICORE: return strdup ("tricore"); case EM_ARM: case EM_AARCH64: return strdup ("arm"); case EM_HEXAGON: return strdup ("hexagon"); case EM_BLACKFIN: return strdup ("blackfin"); case EM_SPARC: case EM_SPARC32PLUS: case EM_SPARCV9: return strdup ("sparc"); case EM_PPC: case EM_PPC64: return strdup ("ppc"); case EM_PARISC: return strdup ("hppa"); case EM_PROPELLER: return strdup ("propeller"); case EM_MICROBLAZE: return strdup ("microblaze.gnu"); case EM_RISCV: return strdup ("riscv"); case EM_VAX: return strdup ("vax"); case EM_XTENSA: return strdup ("xtensa"); case EM_LANAI: return strdup ("lanai"); case EM_VIDEOCORE3: case EM_VIDEOCORE4: return strdup ("vc4"); case EM_SH: return strdup ("sh"); case EM_V850: return strdup ("v850"); case EM_IA_64: return strdup("ia64"); default: return strdup ("x86"); } } char* Elf_(r_bin_elf_get_machine_name)(ELFOBJ *bin) { switch (bin->ehdr.e_machine) { case EM_NONE: return strdup ("No machine"); case EM_M32: return strdup ("AT&T WE 32100"); case EM_SPARC: return strdup ("SUN SPARC"); case EM_386: return strdup ("Intel 80386"); case EM_68K: return strdup ("Motorola m68k family"); case EM_88K: return strdup ("Motorola m88k family"); case EM_860: return strdup ("Intel 80860"); case EM_MIPS: return strdup ("MIPS R3000"); case EM_S370: return strdup ("IBM System/370"); case EM_MIPS_RS3_LE: return strdup ("MIPS R3000 little-endian"); case EM_PARISC: return strdup ("HPPA"); case EM_VPP500: return strdup ("Fujitsu VPP500"); case EM_SPARC32PLUS: return strdup ("Sun's \"v8plus\""); case EM_960: return strdup ("Intel 80960"); case EM_PPC: return strdup ("PowerPC"); case EM_PPC64: return strdup ("PowerPC 64-bit"); case EM_S390: return strdup ("IBM S390"); case EM_V800: return strdup ("NEC V800 series"); case EM_FR20: return strdup ("Fujitsu FR20"); case EM_RH32: return strdup ("TRW RH-32"); case EM_RCE: return strdup ("Motorola RCE"); case EM_ARM: return strdup ("ARM"); case EM_BLACKFIN: return strdup ("Analog Devices Blackfin"); case EM_FAKE_ALPHA: return strdup ("Digital Alpha"); case EM_SH: return strdup ("Hitachi SH"); case EM_SPARCV9: return strdup ("SPARC v9 64-bit"); case EM_TRICORE: return strdup ("Siemens Tricore"); case EM_ARC: return strdup ("Argonaut RISC Core"); case EM_H8_300: return strdup ("Hitachi H8/300"); case EM_H8_300H: return strdup ("Hitachi H8/300H"); case EM_H8S: return strdup ("Hitachi H8S"); case EM_H8_500: return strdup ("Hitachi H8/500"); case EM_IA_64: return strdup ("Intel Merced"); case EM_MIPS_X: return strdup ("Stanford MIPS-X"); case EM_COLDFIRE: return strdup ("Motorola Coldfire"); case EM_68HC12: return strdup ("Motorola M68HC12"); case EM_MMA: return strdup ("Fujitsu MMA Multimedia Accelerator"); case EM_PCP: return strdup ("Siemens PCP"); case EM_NCPU: return strdup ("Sony nCPU embeeded RISC"); case EM_NDR1: return strdup ("Denso NDR1 microprocessor"); case EM_STARCORE: return strdup ("Motorola Start*Core processor"); case EM_ME16: return strdup ("Toyota ME16 processor"); case EM_ST100: return strdup ("STMicroelectronic ST100 processor"); case EM_TINYJ: return strdup ("Advanced Logic Corp. Tinyj emb.fam"); case EM_X86_64: return strdup ("AMD x86-64 architecture"); case EM_LANAI: return strdup ("32bit LANAI architecture"); case EM_PDSP: return strdup ("Sony DSP Processor"); case EM_FX66: return strdup ("Siemens FX66 microcontroller"); case EM_ST9PLUS: return strdup ("STMicroelectronics ST9+ 8/16 mc"); case EM_ST7: return strdup ("STmicroelectronics ST7 8 bit mc"); case EM_68HC16: return strdup ("Motorola MC68HC16 microcontroller"); case EM_68HC11: return strdup ("Motorola MC68HC11 microcontroller"); case EM_68HC08: return strdup ("Motorola MC68HC08 microcontroller"); case EM_68HC05: return strdup ("Motorola MC68HC05 microcontroller"); case EM_SVX: return strdup ("Silicon Graphics SVx"); case EM_ST19: return strdup ("STMicroelectronics ST19 8 bit mc"); case EM_VAX: return strdup ("Digital VAX"); case EM_CRIS: return strdup ("Axis Communications 32-bit embedded processor"); case EM_JAVELIN: return strdup ("Infineon Technologies 32-bit embedded processor"); case EM_FIREPATH: return strdup ("Element 14 64-bit DSP Processor"); case EM_ZSP: return strdup ("LSI Logic 16-bit DSP Processor"); case EM_MMIX: return strdup ("Donald Knuth's educational 64-bit processor"); case EM_HUANY: return strdup ("Harvard University machine-independent object files"); case EM_PRISM: return strdup ("SiTera Prism"); case EM_AVR: return strdup ("Atmel AVR 8-bit microcontroller"); case EM_FR30: return strdup ("Fujitsu FR30"); case EM_D10V: return strdup ("Mitsubishi D10V"); case EM_D30V: return strdup ("Mitsubishi D30V"); case EM_V850: return strdup ("NEC v850"); case EM_M32R: return strdup ("Mitsubishi M32R"); case EM_MN10300: return strdup ("Matsushita MN10300"); case EM_MN10200: return strdup ("Matsushita MN10200"); case EM_PJ: return strdup ("picoJava"); case EM_OPENRISC: return strdup ("OpenRISC 32-bit embedded processor"); case EM_ARC_A5: return strdup ("ARC Cores Tangent-A5"); case EM_XTENSA: return strdup ("Tensilica Xtensa Architecture"); case EM_AARCH64: return strdup ("ARM aarch64"); case EM_PROPELLER: return strdup ("Parallax Propeller"); case EM_MICROBLAZE: return strdup ("Xilinx MicroBlaze"); case EM_RISCV: return strdup ("RISC V"); case EM_VIDEOCORE3: return strdup ("VideoCore III"); case EM_VIDEOCORE4: return strdup ("VideoCore IV"); default: return r_str_newf ("<unknown>: 0x%x", bin->ehdr.e_machine); } } char* Elf_(r_bin_elf_get_file_type)(ELFOBJ *bin) { ut32 e_type; if (!bin) { return NULL; } e_type = (ut32)bin->ehdr.e_type; // cast to avoid warn in iphone-gcc, must be ut16 switch (e_type) { case ET_NONE: return strdup ("NONE (None)"); case ET_REL: return strdup ("REL (Relocatable file)"); case ET_EXEC: return strdup ("EXEC (Executable file)"); case ET_DYN: return strdup ("DYN (Shared object file)"); case ET_CORE: return strdup ("CORE (Core file)"); } if ((e_type >= ET_LOPROC) && (e_type <= ET_HIPROC)) { return r_str_newf ("Processor Specific: %x", e_type); } if ((e_type >= ET_LOOS) && (e_type <= ET_HIOS)) { return r_str_newf ("OS Specific: %x", e_type); } return r_str_newf ("<unknown>: %x", e_type); } char* Elf_(r_bin_elf_get_elf_class)(ELFOBJ *bin) { switch (bin->ehdr.e_ident[EI_CLASS]) { case ELFCLASSNONE: return strdup ("none"); case ELFCLASS32: return strdup ("ELF32"); case ELFCLASS64: return strdup ("ELF64"); default: return r_str_newf ("<unknown: %x>", bin->ehdr.e_ident[EI_CLASS]); } } int Elf_(r_bin_elf_get_bits)(ELFOBJ *bin) { /* Hack for ARCompact */ if (bin->ehdr.e_machine == EM_ARC_A5) { return 16; } /* Hack for Ps2 */ if (bin->phdr && bin->ehdr.e_machine == EM_MIPS) { const ut32 mipsType = bin->ehdr.e_flags & EF_MIPS_ARCH; if (bin->ehdr.e_type == ET_EXEC) { int i; bool haveInterp = false; for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_INTERP) { haveInterp = true; } } if (!haveInterp && mipsType == EF_MIPS_ARCH_3) { // Playstation2 Hack return 64; } } // TODO: show this specific asm.cpu somewhere in bininfo (mips1, mips2, mips3, mips32r2, ...) switch (mipsType) { case EF_MIPS_ARCH_1: case EF_MIPS_ARCH_2: case EF_MIPS_ARCH_3: case EF_MIPS_ARCH_4: case EF_MIPS_ARCH_5: case EF_MIPS_ARCH_32: return 32; case EF_MIPS_ARCH_64: return 64; case EF_MIPS_ARCH_32R2: return 32; case EF_MIPS_ARCH_64R2: return 64; break; } return 32; } /* Hack for Thumb */ if (bin->ehdr.e_machine == EM_ARM) { if (bin->ehdr.e_type != ET_EXEC) { struct r_bin_elf_symbol_t *symbol; if ((symbol = Elf_(r_bin_elf_get_symbols) (bin))) { int i = 0; for (i = 0; !symbol[i].last; i++) { ut64 paddr = symbol[i].offset; if (paddr & 1) { return 16; } } } } { ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); if (entry & 1) { return 16; } } } switch (bin->ehdr.e_ident[EI_CLASS]) { case ELFCLASS32: return 32; case ELFCLASS64: return 64; case ELFCLASSNONE: default: return 32; // defaults } } static inline int noodle(ELFOBJ *bin, const char *s) { const ut8 *p = bin->b->buf; if (bin->b->length > 64) { p += bin->b->length - 64; } else { return 0; } return r_mem_mem (p, 64, (const ut8 *)s, strlen (s)) != NULL; } static inline int needle(ELFOBJ *bin, const char *s) { if (bin->shstrtab) { ut32 len = bin->shstrtab_size; if (len > 4096) { len = 4096; // avoid slow loading .. can be buggy? } return r_mem_mem ((const ut8*)bin->shstrtab, len, (const ut8*)s, strlen (s)) != NULL; } return 0; } // TODO: must return const char * all those strings must be const char os[LINUX] or so char* Elf_(r_bin_elf_get_osabi_name)(ELFOBJ *bin) { switch (bin->ehdr.e_ident[EI_OSABI]) { case ELFOSABI_LINUX: return strdup("linux"); case ELFOSABI_SOLARIS: return strdup("solaris"); case ELFOSABI_FREEBSD: return strdup("freebsd"); case ELFOSABI_HPUX: return strdup("hpux"); } /* Hack to identify OS */ if (needle (bin, "openbsd")) return strdup ("openbsd"); if (needle (bin, "netbsd")) return strdup ("netbsd"); if (needle (bin, "freebsd")) return strdup ("freebsd"); if (noodle (bin, "BEOS:APP_VERSION")) return strdup ("beos"); if (needle (bin, "GNU")) return strdup ("linux"); return strdup ("linux"); } ut8 *Elf_(r_bin_elf_grab_regstate)(ELFOBJ *bin, int *len) { if (bin->phdr) { int i; int num = bin->ehdr.e_phnum; for (i = 0; i < num; i++) { if (bin->phdr[i].p_type != PT_NOTE) { continue; } int bits = Elf_(r_bin_elf_get_bits)(bin); int regdelta = (bits == 64)? 0x84: 0x40; // x64 vs x32 int regsize = 160; // for x86-64 ut8 *buf = malloc (regsize); if (r_buf_read_at (bin->b, bin->phdr[i].p_offset + regdelta, buf, regsize) != regsize) { free (buf); bprintf ("Cannot read register state from CORE file\n"); return NULL; } if (len) { *len = regsize; } return buf; } } bprintf ("Cannot find NOTE section\n"); return NULL; } int Elf_(r_bin_elf_is_big_endian)(ELFOBJ *bin) { return (bin->ehdr.e_ident[EI_DATA] == ELFDATA2MSB); } /* XXX Init dt_strtab? */ char *Elf_(r_bin_elf_get_rpath)(ELFOBJ *bin) { char *ret = NULL; int j; if (!bin || !bin->phdr || !bin->dyn_buf || !bin->strtab) { return NULL; } for (j = 0; j< bin->dyn_entries; j++) { if (bin->dyn_buf[j].d_tag == DT_RPATH || bin->dyn_buf[j].d_tag == DT_RUNPATH) { if (!(ret = calloc (1, ELF_STRING_LENGTH))) { perror ("malloc (rpath)"); return NULL; } if (bin->dyn_buf[j].d_un.d_val > bin->strtab_size) { free (ret); return NULL; } strncpy (ret, bin->strtab + bin->dyn_buf[j].d_un.d_val, ELF_STRING_LENGTH); ret[ELF_STRING_LENGTH - 1] = '\0'; break; } } return ret; } static size_t get_relocs_num(ELFOBJ *bin) { size_t i, size, ret = 0; /* we need to be careful here, in malformed files the section size might * not be a multiple of a Rel/Rela size; round up so we allocate enough * space. */ #define NUMENTRIES_ROUNDUP(sectionsize, entrysize) (((sectionsize)+(entrysize)-1)/(entrysize)) if (!bin->g_sections) { return 0; } size = bin->is_rela == DT_REL ? sizeof (Elf_(Rel)) : sizeof (Elf_(Rela)); for (i = 0; !bin->g_sections[i].last; i++) { if (!strncmp (bin->g_sections[i].name, ".rela.", strlen (".rela."))) { if (!bin->is_rela) { size = sizeof (Elf_(Rela)); } ret += NUMENTRIES_ROUNDUP (bin->g_sections[i].size, size); } else if (!strncmp (bin->g_sections[i].name, ".rel.", strlen (".rel."))){ if (!bin->is_rela) { size = sizeof (Elf_(Rel)); } ret += NUMENTRIES_ROUNDUP (bin->g_sections[i].size, size); } } return ret; #undef NUMENTRIES_ROUNDUP } static int read_reloc(ELFOBJ *bin, RBinElfReloc *r, int is_rela, ut64 offset) { ut8 *buf = bin->b->buf; int j = 0; if (offset + sizeof (Elf_ (Rela)) > bin->size || offset + sizeof (Elf_(Rela)) < offset) { return -1; } if (is_rela == DT_RELA) { Elf_(Rela) rela; #if R_BIN_ELF64 rela.r_offset = READ64 (buf + offset, j) rela.r_info = READ64 (buf + offset, j) rela.r_addend = READ64 (buf + offset, j) #else rela.r_offset = READ32 (buf + offset, j) rela.r_info = READ32 (buf + offset, j) rela.r_addend = READ32 (buf + offset, j) #endif r->is_rela = is_rela; r->offset = rela.r_offset; r->type = ELF_R_TYPE (rela.r_info); r->sym = ELF_R_SYM (rela.r_info); r->last = 0; r->addend = rela.r_addend; return sizeof (Elf_(Rela)); } else { Elf_(Rel) rel; #if R_BIN_ELF64 rel.r_offset = READ64 (buf + offset, j) rel.r_info = READ64 (buf + offset, j) #else rel.r_offset = READ32 (buf + offset, j) rel.r_info = READ32 (buf + offset, j) #endif r->is_rela = is_rela; r->offset = rel.r_offset; r->type = ELF_R_TYPE (rel.r_info); r->sym = ELF_R_SYM (rel.r_info); r->last = 0; return sizeof (Elf_(Rel)); } } RBinElfReloc* Elf_(r_bin_elf_get_relocs)(ELFOBJ *bin) { int res, rel, rela, i, j; size_t reloc_num = 0; RBinElfReloc *ret = NULL; if (!bin || !bin->g_sections) { return NULL; } reloc_num = get_relocs_num (bin); if (!reloc_num) { return NULL; } bin->reloc_num = reloc_num; ret = (RBinElfReloc*)calloc ((size_t)reloc_num + 1, sizeof(RBinElfReloc)); if (!ret) { return NULL; } #if DEAD_CODE ut64 section_text_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".text"); if (section_text_offset == -1) { section_text_offset = 0; } #endif for (i = 0, rel = 0; !bin->g_sections[i].last && rel < reloc_num ; i++) { bool is_rela = 0 == strncmp (bin->g_sections[i].name, ".rela.", strlen (".rela.")); bool is_rel = 0 == strncmp (bin->g_sections[i].name, ".rel.", strlen (".rel.")); if (!is_rela && !is_rel) { continue; } for (j = 0; j < bin->g_sections[i].size; j += res) { if (bin->g_sections[i].size > bin->size) { break; } if (bin->g_sections[i].offset > bin->size) { break; } if (rel >= reloc_num) { bprintf ("Internal error: ELF relocation buffer too small," "please file a bug report."); break; } if (!bin->is_rela) { rela = is_rela? DT_RELA : DT_REL; } else { rela = bin->is_rela; } res = read_reloc (bin, &ret[rel], rela, bin->g_sections[i].offset + j); if (j + res > bin->g_sections[i].size) { bprintf ("Warning: malformed file, relocation entry #%u is partially beyond the end of section %u.\n", rel, i); } if (bin->ehdr.e_type == ET_REL) { if (bin->g_sections[i].info < bin->ehdr.e_shnum && bin->shdr) { ret[rel].rva = bin->shdr[bin->g_sections[i].info].sh_offset + ret[rel].offset; ret[rel].rva = Elf_(r_bin_elf_p2v) (bin, ret[rel].rva); } else { ret[rel].rva = ret[rel].offset; } } else { ret[rel].rva = ret[rel].offset; ret[rel].offset = Elf_(r_bin_elf_v2p) (bin, ret[rel].offset); } ret[rel].last = 0; if (res < 0) { break; } rel++; } } ret[reloc_num].last = 1; return ret; } RBinElfLib* Elf_(r_bin_elf_get_libs)(ELFOBJ *bin) { RBinElfLib *ret = NULL; int j, k; if (!bin || !bin->phdr || !bin->dyn_buf || !bin->strtab || *(bin->strtab+1) == '0') { return NULL; } for (j = 0, k = 0; j < bin->dyn_entries; j++) if (bin->dyn_buf[j].d_tag == DT_NEEDED) { RBinElfLib *r = realloc (ret, (k + 1) * sizeof (RBinElfLib)); if (!r) { perror ("realloc (libs)"); free (ret); return NULL; } ret = r; if (bin->dyn_buf[j].d_un.d_val > bin->strtab_size) { free (ret); return NULL; } strncpy (ret[k].name, bin->strtab + bin->dyn_buf[j].d_un.d_val, ELF_STRING_LENGTH); ret[k].name[ELF_STRING_LENGTH - 1] = '\0'; ret[k].last = 0; if (ret[k].name[0]) { k++; } } RBinElfLib *r = realloc (ret, (k + 1) * sizeof (RBinElfLib)); if (!r) { perror ("realloc (libs)"); free (ret); return NULL; } ret = r; ret[k].last = 1; return ret; } static RBinElfSection* get_sections_from_phdr(ELFOBJ *bin) { RBinElfSection *ret; int i, num_sections = 0; ut64 reldyn = 0, relava = 0, pltgotva = 0, relva = 0; ut64 reldynsz = 0, relasz = 0, pltgotsz = 0; if (!bin || !bin->phdr || !bin->ehdr.e_phnum) return NULL; for (i = 0; i < bin->dyn_entries; i++) { switch (bin->dyn_buf[i].d_tag) { case DT_REL: reldyn = bin->dyn_buf[i].d_un.d_ptr; num_sections++; break; case DT_RELA: relva = bin->dyn_buf[i].d_un.d_ptr; num_sections++; break; case DT_RELSZ: reldynsz = bin->dyn_buf[i].d_un.d_val; break; case DT_RELASZ: relasz = bin->dyn_buf[i].d_un.d_val; break; case DT_PLTGOT: pltgotva = bin->dyn_buf[i].d_un.d_ptr; num_sections++; break; case DT_PLTRELSZ: pltgotsz = bin->dyn_buf[i].d_un.d_val; break; case DT_JMPREL: relava = bin->dyn_buf[i].d_un.d_ptr; num_sections++; break; default: break; } } ret = calloc (num_sections + 1, sizeof(RBinElfSection)); if (!ret) { return NULL; } i = 0; if (reldyn) { ret[i].offset = Elf_(r_bin_elf_v2p) (bin, reldyn); ret[i].rva = reldyn; ret[i].size = reldynsz; strcpy (ret[i].name, ".rel.dyn"); ret[i].last = 0; i++; } if (relava) { ret[i].offset = Elf_(r_bin_elf_v2p) (bin, relava); ret[i].rva = relava; ret[i].size = pltgotsz; strcpy (ret[i].name, ".rela.plt"); ret[i].last = 0; i++; } if (relva) { ret[i].offset = Elf_(r_bin_elf_v2p) (bin, relva); ret[i].rva = relva; ret[i].size = relasz; strcpy (ret[i].name, ".rel.plt"); ret[i].last = 0; i++; } if (pltgotva) { ret[i].offset = Elf_(r_bin_elf_v2p) (bin, pltgotva); ret[i].rva = pltgotva; ret[i].size = pltgotsz; strcpy (ret[i].name, ".got.plt"); ret[i].last = 0; i++; } ret[i].last = 1; return ret; } RBinElfSection* Elf_(r_bin_elf_get_sections)(ELFOBJ *bin) { RBinElfSection *ret = NULL; char unknown_s[20], invalid_s[20]; int i, nidx, unknown_c=0, invalid_c=0; if (!bin) { return NULL; } if (bin->g_sections) { return bin->g_sections; } if (!bin->shdr) { //we don't give up search in phdr section return get_sections_from_phdr (bin); } if (!(ret = calloc ((bin->ehdr.e_shnum + 1), sizeof (RBinElfSection)))) { return NULL; } for (i = 0; i < bin->ehdr.e_shnum; i++) { ret[i].offset = bin->shdr[i].sh_offset; ret[i].size = bin->shdr[i].sh_size; ret[i].align = bin->shdr[i].sh_addralign; ret[i].flags = bin->shdr[i].sh_flags; ret[i].link = bin->shdr[i].sh_link; ret[i].info = bin->shdr[i].sh_info; ret[i].type = bin->shdr[i].sh_type; if (bin->ehdr.e_type == ET_REL) { ret[i].rva = bin->baddr + bin->shdr[i].sh_offset; } else { ret[i].rva = bin->shdr[i].sh_addr; } nidx = bin->shdr[i].sh_name; #define SHNAME (int)bin->shdr[i].sh_name #define SHNLEN ELF_STRING_LENGTH - 4 #define SHSIZE (int)bin->shstrtab_size if (nidx < 0 || !bin->shstrtab_section || !bin->shstrtab_size || nidx > bin->shstrtab_size) { snprintf (invalid_s, sizeof (invalid_s) - 4, "invalid%d", invalid_c); strncpy (ret[i].name, invalid_s, SHNLEN); invalid_c++; } else { if (bin->shstrtab && (SHNAME > 0) && (SHNAME < SHSIZE)) { strncpy (ret[i].name, &bin->shstrtab[SHNAME], SHNLEN); } else { if (bin->shdr[i].sh_type == SHT_NULL) { //to follow the same behaviour as readelf strncpy (ret[i].name, "", sizeof (ret[i].name) - 4); } else { snprintf (unknown_s, sizeof (unknown_s)-4, "unknown%d", unknown_c); strncpy (ret[i].name, unknown_s, sizeof (ret[i].name)-4); unknown_c++; } } } ret[i].name[ELF_STRING_LENGTH-2] = '\0'; ret[i].last = 0; } ret[i].last = 1; return ret; } static void fill_symbol_bind_and_type (struct r_bin_elf_symbol_t *ret, Elf_(Sym) *sym) { #define s_bind(x) ret->bind = x #define s_type(x) ret->type = x switch (ELF_ST_BIND(sym->st_info)) { case STB_LOCAL: s_bind ("LOCAL"); break; case STB_GLOBAL: s_bind ("GLOBAL"); break; case STB_WEAK: s_bind ("WEAK"); break; case STB_NUM: s_bind ("NUM"); break; case STB_LOOS: s_bind ("LOOS"); break; case STB_HIOS: s_bind ("HIOS"); break; case STB_LOPROC: s_bind ("LOPROC"); break; case STB_HIPROC: s_bind ("HIPROC"); break; default: s_bind ("UNKNOWN"); } switch (ELF_ST_TYPE (sym->st_info)) { case STT_NOTYPE: s_type ("NOTYPE"); break; case STT_OBJECT: s_type ("OBJECT"); break; case STT_FUNC: s_type ("FUNC"); break; case STT_SECTION: s_type ("SECTION"); break; case STT_FILE: s_type ("FILE"); break; case STT_COMMON: s_type ("COMMON"); break; case STT_TLS: s_type ("TLS"); break; case STT_NUM: s_type ("NUM"); break; case STT_LOOS: s_type ("LOOS"); break; case STT_HIOS: s_type ("HIOS"); break; case STT_LOPROC: s_type ("LOPROC"); break; case STT_HIPROC: s_type ("HIPROC"); break; default: s_type ("UNKNOWN"); } } static RBinElfSymbol* get_symbols_from_phdr(ELFOBJ *bin, int type) { Elf_(Sym) *sym = NULL; Elf_(Addr) addr_sym_table = 0; ut8 s[sizeof (Elf_(Sym))] = {0}; RBinElfSymbol *ret = NULL; int i, j, r, tsize, nsym, ret_ctr; ut64 toffset = 0, tmp_offset; ut32 size, sym_size = 0; if (!bin || !bin->phdr || !bin->ehdr.e_phnum) { return NULL; } for (j = 0; j < bin->dyn_entries; j++) { switch (bin->dyn_buf[j].d_tag) { case (DT_SYMTAB): addr_sym_table = Elf_(r_bin_elf_v2p) (bin, bin->dyn_buf[j].d_un.d_ptr); break; case (DT_SYMENT): sym_size = bin->dyn_buf[j].d_un.d_val; break; default: break; } } if (!addr_sym_table) { return NULL; } if (!sym_size) { return NULL; } //since ELF doesn't specify the symbol table size we may read until the end of the buffer nsym = (bin->size - addr_sym_table) / sym_size; if (!UT32_MUL (&size, nsym, sizeof (Elf_ (Sym)))) { goto beach; } if (size < 1) { goto beach; } if (addr_sym_table > bin->size || addr_sym_table + size > bin->size) { goto beach; } if (nsym < 1) { return NULL; } // we reserve room for 4096 and grow as needed. size_t capacity1 = 4096; size_t capacity2 = 4096; sym = (Elf_(Sym)*) calloc (capacity1, sym_size); ret = (RBinElfSymbol *) calloc (capacity2, sizeof (struct r_bin_elf_symbol_t)); if (!sym || !ret) { goto beach; } for (i = 1, ret_ctr = 0; i < nsym; i++) { if (i >= capacity1) { // maybe grow // You take what you want, but you eat what you take. Elf_(Sym)* temp_sym = (Elf_(Sym)*) realloc(sym, (capacity1 * GROWTH_FACTOR) * sym_size); if (!temp_sym) { goto beach; } sym = temp_sym; capacity1 *= GROWTH_FACTOR; } if (ret_ctr >= capacity2) { // maybe grow RBinElfSymbol *temp_ret = realloc (ret, capacity2 * GROWTH_FACTOR * sizeof (struct r_bin_elf_symbol_t)); if (!temp_ret) { goto beach; } ret = temp_ret; capacity2 *= GROWTH_FACTOR; } // read in one entry r = r_buf_read_at (bin->b, addr_sym_table + i * sizeof (Elf_ (Sym)), s, sizeof (Elf_ (Sym))); if (r < 1) { goto beach; } int j = 0; #if R_BIN_ELF64 sym[i].st_name = READ32 (s, j); sym[i].st_info = READ8 (s, j); sym[i].st_other = READ8 (s, j); sym[i].st_shndx = READ16 (s, j); sym[i].st_value = READ64 (s, j); sym[i].st_size = READ64 (s, j); #else sym[i].st_name = READ32 (s, j); sym[i].st_value = READ32 (s, j); sym[i].st_size = READ32 (s, j); sym[i].st_info = READ8 (s, j); sym[i].st_other = READ8 (s, j); sym[i].st_shndx = READ16 (s, j); #endif // zero symbol is always empty // Examine entry and maybe store if (type == R_BIN_ELF_IMPORTS && sym[i].st_shndx == STN_UNDEF) { if (sym[i].st_value) { toffset = sym[i].st_value; } else if ((toffset = get_import_addr (bin, i)) == -1){ toffset = 0; } tsize = 16; } else if (type == R_BIN_ELF_SYMBOLS && sym[i].st_shndx != STN_UNDEF && ELF_ST_TYPE (sym[i].st_info) != STT_SECTION && ELF_ST_TYPE (sym[i].st_info) != STT_FILE) { tsize = sym[i].st_size; toffset = (ut64) sym[i].st_value; } else { continue; } tmp_offset = Elf_(r_bin_elf_v2p) (bin, toffset); if (tmp_offset > bin->size) { goto done; } if (sym[i].st_name + 2 > bin->strtab_size) { // Since we are reading beyond the symbol table what's happening // is that some entry is trying to dereference the strtab beyond its capacity // is not a symbol so is the end goto done; } ret[ret_ctr].offset = tmp_offset; ret[ret_ctr].size = tsize; { int rest = ELF_STRING_LENGTH - 1; int st_name = sym[i].st_name; int maxsize = R_MIN (bin->size, bin->strtab_size); if (st_name < 0 || st_name >= maxsize) { ret[ret_ctr].name[0] = 0; } else { const int len = __strnlen (bin->strtab + st_name, rest); memcpy (ret[ret_ctr].name, &bin->strtab[st_name], len); } } ret[ret_ctr].ordinal = i; ret[ret_ctr].in_shdr = false; ret[ret_ctr].name[ELF_STRING_LENGTH - 2] = '\0'; fill_symbol_bind_and_type (&ret[ret_ctr], &sym[i]); ret[ret_ctr].last = 0; ret_ctr++; } done: ret[ret_ctr].last = 1; // Size everything down to only what is used { nsym = i > 0 ? i : 1; Elf_ (Sym) * temp_sym = (Elf_ (Sym)*) realloc (sym, (nsym * GROWTH_FACTOR) * sym_size); if (!temp_sym) { goto beach; } sym = temp_sym; } { ret_ctr = ret_ctr > 0 ? ret_ctr : 1; RBinElfSymbol *p = (RBinElfSymbol *) realloc (ret, (ret_ctr + 1) * sizeof (RBinElfSymbol)); if (!p) { goto beach; } ret = p; } if (type == R_BIN_ELF_IMPORTS && !bin->imports_by_ord_size) { bin->imports_by_ord_size = ret_ctr + 1; if (ret_ctr > 0) { bin->imports_by_ord = (RBinImport * *) calloc (ret_ctr + 1, sizeof (RBinImport*)); } else { bin->imports_by_ord = NULL; } } else if (type == R_BIN_ELF_SYMBOLS && !bin->symbols_by_ord_size && ret_ctr) { bin->symbols_by_ord_size = ret_ctr + 1; if (ret_ctr > 0) { bin->symbols_by_ord = (RBinSymbol * *) calloc (ret_ctr + 1, sizeof (RBinSymbol*)); }else { bin->symbols_by_ord = NULL; } } free (sym); return ret; beach: free (sym); free (ret); return NULL; } static RBinElfSymbol *Elf_(r_bin_elf_get_phdr_symbols)(ELFOBJ *bin) { if (!bin) { return NULL; } if (bin->phdr_symbols) { return bin->phdr_symbols; } bin->phdr_symbols = get_symbols_from_phdr (bin, R_BIN_ELF_SYMBOLS); return bin->phdr_symbols; } static RBinElfSymbol *Elf_(r_bin_elf_get_phdr_imports)(ELFOBJ *bin) { if (!bin) { return NULL; } if (bin->phdr_imports) { return bin->phdr_imports; } bin->phdr_imports = get_symbols_from_phdr (bin, R_BIN_ELF_IMPORTS); return bin->phdr_imports; } static int Elf_(fix_symbols)(ELFOBJ *bin, int nsym, int type, RBinElfSymbol **sym) { int count = 0; RBinElfSymbol *ret = *sym; RBinElfSymbol *phdr_symbols = (type == R_BIN_ELF_SYMBOLS) ? Elf_(r_bin_elf_get_phdr_symbols) (bin) : Elf_(r_bin_elf_get_phdr_imports) (bin); RBinElfSymbol *tmp, *p; if (phdr_symbols) { RBinElfSymbol *d = ret; while (!d->last) { /* find match in phdr */ p = phdr_symbols; while (!p->last) { if (p->offset && d->offset == p->offset) { p->in_shdr = true; if (*p->name && strcmp (d->name, p->name)) { strcpy (d->name, p->name); } } p++; } d++; } p = phdr_symbols; while (!p->last) { if (!p->in_shdr) { count++; } p++; } /*Take those symbols that are not present in the shdr but yes in phdr*/ /*This should only should happen with fucked up binaries*/ if (count > 0) { /*what happens if a shdr says it has only one symbol? we should look anyway into phdr*/ tmp = (RBinElfSymbol*)realloc (ret, (nsym + count + 1) * sizeof (RBinElfSymbol)); if (!tmp) { return -1; } ret = tmp; ret[nsym--].last = 0; p = phdr_symbols; while (!p->last) { if (!p->in_shdr) { memcpy (&ret[++nsym], p, sizeof (RBinElfSymbol)); } p++; } ret[nsym + 1].last = 1; } *sym = ret; return nsym + 1; } return nsym; } static RBinElfSymbol* Elf_(_r_bin_elf_get_symbols_imports)(ELFOBJ *bin, int type) { ut32 shdr_size; int tsize, nsym, ret_ctr = 0, i, j, r, k, newsize; ut64 toffset; ut32 size = 0; RBinElfSymbol *ret = NULL; Elf_(Shdr) *strtab_section = NULL; Elf_(Sym) *sym = NULL; ut8 s[sizeof (Elf_(Sym))] = { 0 }; char *strtab = NULL; if (!bin || !bin->shdr || !bin->ehdr.e_shnum || bin->ehdr.e_shnum == 0xffff) { return (type == R_BIN_ELF_SYMBOLS) ? Elf_(r_bin_elf_get_phdr_symbols) (bin) : Elf_(r_bin_elf_get_phdr_imports) (bin); } if (!UT32_MUL (&shdr_size, bin->ehdr.e_shnum, sizeof (Elf_(Shdr)))) { return false; } if (shdr_size + 8 > bin->size) { return false; } for (i = 0; i < bin->ehdr.e_shnum; i++) { if ((type == R_BIN_ELF_IMPORTS && bin->shdr[i].sh_type == (bin->ehdr.e_type == ET_REL ? SHT_SYMTAB : SHT_DYNSYM)) || (type == R_BIN_ELF_SYMBOLS && bin->shdr[i].sh_type == (Elf_(r_bin_elf_get_stripped) (bin) ? SHT_DYNSYM : SHT_SYMTAB))) { if (bin->shdr[i].sh_link < 1) { /* oops. fix out of range pointers */ continue; } // hack to avoid asan cry if ((bin->shdr[i].sh_link * sizeof(Elf_(Shdr))) >= shdr_size) { /* oops. fix out of range pointers */ continue; } strtab_section = &bin->shdr[bin->shdr[i].sh_link]; if (strtab_section->sh_size > ST32_MAX || strtab_section->sh_size+8 > bin->size) { bprintf ("size (syms strtab)"); free (ret); free (strtab); return NULL; } if (!strtab) { if (!(strtab = (char *)calloc (1, 8 + strtab_section->sh_size))) { bprintf ("malloc (syms strtab)"); goto beach; } if (strtab_section->sh_offset > bin->size || strtab_section->sh_offset + strtab_section->sh_size > bin->size) { goto beach; } if (r_buf_read_at (bin->b, strtab_section->sh_offset, (ut8*)strtab, strtab_section->sh_size) == -1) { bprintf ("Warning: read (syms strtab)\n"); goto beach; } } newsize = 1 + bin->shdr[i].sh_size; if (newsize < 0 || newsize > bin->size) { bprintf ("invalid shdr %d size\n", i); goto beach; } nsym = (int)(bin->shdr[i].sh_size / sizeof (Elf_(Sym))); if (nsym < 0) { goto beach; } if (!(sym = (Elf_(Sym) *)calloc (nsym, sizeof (Elf_(Sym))))) { bprintf ("calloc (syms)"); goto beach; } if (!UT32_MUL (&size, nsym, sizeof (Elf_(Sym)))) { goto beach; } if (size < 1 || size > bin->size) { goto beach; } if (bin->shdr[i].sh_offset > bin->size) { goto beach; } if (bin->shdr[i].sh_offset + size > bin->size) { goto beach; } for (j = 0; j < nsym; j++) { int k = 0; r = r_buf_read_at (bin->b, bin->shdr[i].sh_offset + j * sizeof (Elf_(Sym)), s, sizeof (Elf_(Sym))); if (r < 1) { bprintf ("Warning: read (sym)\n"); goto beach; } #if R_BIN_ELF64 sym[j].st_name = READ32 (s, k) sym[j].st_info = READ8 (s, k) sym[j].st_other = READ8 (s, k) sym[j].st_shndx = READ16 (s, k) sym[j].st_value = READ64 (s, k) sym[j].st_size = READ64 (s, k) #else sym[j].st_name = READ32 (s, k) sym[j].st_value = READ32 (s, k) sym[j].st_size = READ32 (s, k) sym[j].st_info = READ8 (s, k) sym[j].st_other = READ8 (s, k) sym[j].st_shndx = READ16 (s, k) #endif } free (ret); ret = calloc (nsym, sizeof (RBinElfSymbol)); if (!ret) { bprintf ("Cannot allocate %d symbols\n", nsym); goto beach; } for (k = 1, ret_ctr = 0; k < nsym; k++) { if (type == R_BIN_ELF_IMPORTS && sym[k].st_shndx == STN_UNDEF) { if (sym[k].st_value) { toffset = sym[k].st_value; } else if ((toffset = get_import_addr (bin, k)) == -1){ toffset = 0; } tsize = 16; } else if (type == R_BIN_ELF_SYMBOLS && sym[k].st_shndx != STN_UNDEF && ELF_ST_TYPE (sym[k].st_info) != STT_SECTION && ELF_ST_TYPE (sym[k].st_info) != STT_FILE) { //int idx = sym[k].st_shndx; tsize = sym[k].st_size; toffset = (ut64)sym[k].st_value; } else { continue; } if (bin->ehdr.e_type == ET_REL) { if (sym[k].st_shndx < bin->ehdr.e_shnum) ret[ret_ctr].offset = sym[k].st_value + bin->shdr[sym[k].st_shndx].sh_offset; } else { ret[ret_ctr].offset = Elf_(r_bin_elf_v2p) (bin, toffset); } ret[ret_ctr].size = tsize; if (sym[k].st_name + 2 > strtab_section->sh_size) { bprintf ("Warning: index out of strtab range\n"); goto beach; } { int rest = ELF_STRING_LENGTH - 1; int st_name = sym[k].st_name; int maxsize = R_MIN (bin->b->length, strtab_section->sh_size); if (st_name < 0 || st_name >= maxsize) { ret[ret_ctr].name[0] = 0; } else { const size_t len = __strnlen (strtab + sym[k].st_name, rest); memcpy (ret[ret_ctr].name, &strtab[sym[k].st_name], len); } } ret[ret_ctr].ordinal = k; ret[ret_ctr].name[ELF_STRING_LENGTH - 2] = '\0'; fill_symbol_bind_and_type (&ret[ret_ctr], &sym[k]); ret[ret_ctr].last = 0; ret_ctr++; } ret[ret_ctr].last = 1; // ugly dirty hack :D R_FREE (strtab); R_FREE (sym); } } if (!ret) { return (type == R_BIN_ELF_SYMBOLS) ? Elf_(r_bin_elf_get_phdr_symbols) (bin) : Elf_(r_bin_elf_get_phdr_imports) (bin); } int max = -1; RBinElfSymbol *aux = NULL; nsym = Elf_(fix_symbols) (bin, ret_ctr, type, &ret); if (nsym == -1) { goto beach; } aux = ret; while (!aux->last) { if ((int)aux->ordinal > max) { max = aux->ordinal; } aux++; } nsym = max; if (type == R_BIN_ELF_IMPORTS) { R_FREE (bin->imports_by_ord); bin->imports_by_ord_size = nsym + 1; bin->imports_by_ord = (RBinImport**)calloc (R_MAX (1, nsym + 1), sizeof (RBinImport*)); } else if (type == R_BIN_ELF_SYMBOLS) { R_FREE (bin->symbols_by_ord); bin->symbols_by_ord_size = nsym + 1; bin->symbols_by_ord = (RBinSymbol**)calloc (R_MAX (1, nsym + 1), sizeof (RBinSymbol*)); } return ret; beach: free (ret); free (sym); free (strtab); return NULL; } RBinElfSymbol *Elf_(r_bin_elf_get_symbols)(ELFOBJ *bin) { if (!bin->g_symbols) { bin->g_symbols = Elf_(_r_bin_elf_get_symbols_imports) (bin, R_BIN_ELF_SYMBOLS); } return bin->g_symbols; } RBinElfSymbol *Elf_(r_bin_elf_get_imports)(ELFOBJ *bin) { if (!bin->g_imports) { bin->g_imports = Elf_(_r_bin_elf_get_symbols_imports) (bin, R_BIN_ELF_IMPORTS); } return bin->g_imports; } RBinElfField* Elf_(r_bin_elf_get_fields)(ELFOBJ *bin) { RBinElfField *ret = NULL; int i = 0, j; if (!bin || !(ret = calloc ((bin->ehdr.e_phnum + 3 + 1), sizeof (RBinElfField)))) { return NULL; } strncpy (ret[i].name, "ehdr", ELF_STRING_LENGTH); ret[i].offset = 0; ret[i++].last = 0; strncpy (ret[i].name, "shoff", ELF_STRING_LENGTH); ret[i].offset = bin->ehdr.e_shoff; ret[i++].last = 0; strncpy (ret[i].name, "phoff", ELF_STRING_LENGTH); ret[i].offset = bin->ehdr.e_phoff; ret[i++].last = 0; for (j = 0; bin->phdr && j < bin->ehdr.e_phnum; i++, j++) { snprintf (ret[i].name, ELF_STRING_LENGTH, "phdr_%i", j); ret[i].offset = bin->phdr[j].p_offset; ret[i].last = 0; } ret[i].last = 1; return ret; } void* Elf_(r_bin_elf_free)(ELFOBJ* bin) { int i; if (!bin) { return NULL; } free (bin->phdr); free (bin->shdr); free (bin->strtab); free (bin->dyn_buf); free (bin->shstrtab); free (bin->dynstr); //free (bin->strtab_section); if (bin->imports_by_ord) { for (i = 0; i<bin->imports_by_ord_size; i++) { free (bin->imports_by_ord[i]); } free (bin->imports_by_ord); } if (bin->symbols_by_ord) { for (i = 0; i<bin->symbols_by_ord_size; i++) { free (bin->symbols_by_ord[i]); } free (bin->symbols_by_ord); } r_buf_free (bin->b); if (bin->g_symbols != bin->phdr_symbols) { R_FREE (bin->phdr_symbols); } if (bin->g_imports != bin->phdr_imports) { R_FREE (bin->phdr_imports); } R_FREE (bin->g_sections); R_FREE (bin->g_symbols); R_FREE (bin->g_imports); free (bin); return NULL; } ELFOBJ* Elf_(r_bin_elf_new)(const char* file, bool verbose) { ut8 *buf; int size; ELFOBJ *bin = R_NEW0 (ELFOBJ); if (!bin) { return NULL; } memset (bin, 0, sizeof (ELFOBJ)); bin->file = file; if (!(buf = (ut8*)r_file_slurp (file, &size))) { return Elf_(r_bin_elf_free) (bin); } bin->size = size; bin->verbose = verbose; bin->b = r_buf_new (); if (!r_buf_set_bytes (bin->b, buf, bin->size)) { free (buf); return Elf_(r_bin_elf_free) (bin); } if (!elf_init (bin)) { free (buf); return Elf_(r_bin_elf_free) (bin); } free (buf); return bin; } ELFOBJ* Elf_(r_bin_elf_new_buf)(RBuffer *buf, bool verbose) { ELFOBJ *bin = R_NEW0 (ELFOBJ); bin->kv = sdb_new0 (); bin->b = r_buf_new (); bin->size = (ut32)buf->length; bin->verbose = verbose; if (!r_buf_set_bytes (bin->b, buf->buf, buf->length)) { return Elf_(r_bin_elf_free) (bin); } if (!elf_init (bin)) { return Elf_(r_bin_elf_free) (bin); } return bin; } static int is_in_pphdr (Elf_(Phdr) *p, ut64 addr) { return addr >= p->p_offset && addr < p->p_offset + p->p_memsz; } static int is_in_vphdr (Elf_(Phdr) *p, ut64 addr) { return addr >= p->p_vaddr && addr < p->p_vaddr + p->p_memsz; } /* converts a physical address to the virtual address, looking * at the program headers in the binary bin */ ut64 Elf_(r_bin_elf_p2v) (ELFOBJ *bin, ut64 paddr) { int i; if (!bin) return 0; if (!bin->phdr) { if (bin->ehdr.e_type == ET_REL) { return bin->baddr + paddr; } return paddr; } for (i = 0; i < bin->ehdr.e_phnum; ++i) { Elf_(Phdr) *p = &bin->phdr[i]; if (!p) { break; } if (p->p_type == PT_LOAD && is_in_pphdr (p, paddr)) { if (!p->p_vaddr && !p->p_offset) { continue; } return p->p_vaddr + paddr - p->p_offset; } } return paddr; } /* converts a virtual address to the relative physical address, looking * at the program headers in the binary bin */ ut64 Elf_(r_bin_elf_v2p) (ELFOBJ *bin, ut64 vaddr) { int i; if (!bin) { return 0; } if (!bin->phdr) { if (bin->ehdr.e_type == ET_REL) { return vaddr - bin->baddr; } return vaddr; } for (i = 0; i < bin->ehdr.e_phnum; ++i) { Elf_(Phdr) *p = &bin->phdr[i]; if (!p) { break; } if (p->p_type == PT_LOAD && is_in_vphdr (p, vaddr)) { if (!p->p_offset && !p->p_vaddr) { continue; } return p->p_offset + vaddr - p->p_vaddr; } } return vaddr; }
/* radare - LGPL - Copyright 2008-2017 - nibble, pancake, alvaro_fe */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <r_types.h> #include <r_util.h> #include "elf.h" #ifdef IFDBG #undef IFDBG #endif #define DO_THE_DBG 0 #define IFDBG if (DO_THE_DBG) #define IFINT if (0) #define ELF_PAGE_MASK 0xFFFFFFFFFFFFF000LL #define ELF_PAGE_SIZE 12 #define R_ELF_NO_RELRO 0 #define R_ELF_PART_RELRO 1 #define R_ELF_FULL_RELRO 2 #define bprintf if(bin->verbose)eprintf #define READ8(x, i) r_read_ble8(x + i); i += 1; #define READ16(x, i) r_read_ble16(x + i, bin->endian); i += 2; #define READ32(x, i) r_read_ble32(x + i, bin->endian); i += 4; #define READ64(x, i) r_read_ble64(x + i, bin->endian); i += 8; #define GROWTH_FACTOR (1.5) static inline int __strnlen(const char *str, int len) { int l = 0; while (IS_PRINTABLE (*str) && --len) { if (((ut8)*str) == 0xff) { break; } str++; l++; } return l + 1; } static int handle_e_ident(ELFOBJ *bin) { return !strncmp ((char *)bin->ehdr.e_ident, ELFMAG, SELFMAG) || !strncmp ((char *)bin->ehdr.e_ident, CGCMAG, SCGCMAG); } static int init_ehdr(ELFOBJ *bin) { ut8 e_ident[EI_NIDENT]; ut8 ehdr[sizeof (Elf_(Ehdr))] = {0}; int i, len; if (r_buf_read_at (bin->b, 0, e_ident, EI_NIDENT) == -1) { bprintf ("Warning: read (magic)\n"); return false; } sdb_set (bin->kv, "elf_type.cparse", "enum elf_type { ET_NONE=0, ET_REL=1," " ET_EXEC=2, ET_DYN=3, ET_CORE=4, ET_LOOS=0xfe00, ET_HIOS=0xfeff," " ET_LOPROC=0xff00, ET_HIPROC=0xffff };", 0); sdb_set (bin->kv, "elf_machine.cparse", "enum elf_machine{EM_NONE=0, EM_M32=1," " EM_SPARC=2, EM_386=3, EM_68K=4, EM_88K=5, EM_486=6, " " EM_860=7, EM_MIPS=8, EM_S370=9, EM_MIPS_RS3_LE=10, EM_RS6000=11," " EM_UNKNOWN12=12, EM_UNKNOWN13=13, EM_UNKNOWN14=14, " " EM_PA_RISC=15, EM_PARISC=EM_PA_RISC, EM_nCUBE=16, EM_VPP500=17," " EM_SPARC32PLUS=18, EM_960=19, EM_PPC=20, EM_PPC64=21, " " EM_S390=22, EM_UNKNOWN22=EM_S390, EM_UNKNOWN23=23, EM_UNKNOWN24=24," " EM_UNKNOWN25=25, EM_UNKNOWN26=26, EM_UNKNOWN27=27, EM_UNKNOWN28=28," " EM_UNKNOWN29=29, EM_UNKNOWN30=30, EM_UNKNOWN31=31, EM_UNKNOWN32=32," " EM_UNKNOWN33=33, EM_UNKNOWN34=34, EM_UNKNOWN35=35, EM_V800=36," " EM_FR20=37, EM_RH32=38, EM_RCE=39, EM_ARM=40, EM_ALPHA=41, EM_SH=42," " EM_SPARCV9=43, EM_TRICORE=44, EM_ARC=45, EM_H8_300=46, EM_H8_300H=47," " EM_H8S=48, EM_H8_500=49, EM_IA_64=50, EM_MIPS_X=51, EM_COLDFIRE=52," " EM_68HC12=53, EM_MMA=54, EM_PCP=55, EM_NCPU=56, EM_NDR1=57," " EM_STARCORE=58, EM_ME16=59, EM_ST100=60, EM_TINYJ=61, EM_AMD64=62," " EM_X86_64=EM_AMD64, EM_PDSP=63, EM_UNKNOWN64=64, EM_UNKNOWN65=65," " EM_FX66=66, EM_ST9PLUS=67, EM_ST7=68, EM_68HC16=69, EM_68HC11=70," " EM_68HC08=71, EM_68HC05=72, EM_SVX=73, EM_ST19=74, EM_VAX=75, " " EM_CRIS=76, EM_JAVELIN=77, EM_FIREPATH=78, EM_ZSP=79, EM_MMIX=80," " EM_HUANY=81, EM_PRISM=82, EM_AVR=83, EM_FR30=84, EM_D10V=85, EM_D30V=86," " EM_V850=87, EM_M32R=88, EM_MN10300=89, EM_MN10200=90, EM_PJ=91," " EM_OPENRISC=92, EM_ARC_A5=93, EM_XTENSA=94, EM_NUM=95};", 0); sdb_num_set (bin->kv, "elf_header.offset", 0, 0); sdb_num_set (bin->kv, "elf_header.size", sizeof (Elf_(Ehdr)), 0); #if R_BIN_ELF64 sdb_set (bin->kv, "elf_header.format", "[16]z[2]E[2]Exqqqxwwwwww" " ident (elf_type)type (elf_machine)machine version entry phoff shoff flags ehsize" " phentsize phnum shentsize shnum shstrndx", 0); #else sdb_set (bin->kv, "elf_header.format", "[16]z[2]E[2]Exxxxxwwwwww" " ident (elf_type)type (elf_machine)machine version entry phoff shoff flags ehsize" " phentsize phnum shentsize shnum shstrndx", 0); #endif bin->endian = (e_ident[EI_DATA] == ELFDATA2MSB)? 1: 0; memset (&bin->ehdr, 0, sizeof (Elf_(Ehdr))); len = r_buf_read_at (bin->b, 0, ehdr, sizeof (Elf_(Ehdr))); if (len < 1) { bprintf ("Warning: read (ehdr)\n"); return false; } memcpy (&bin->ehdr.e_ident, ehdr, 16); i = 16; bin->ehdr.e_type = READ16 (ehdr, i) bin->ehdr.e_machine = READ16 (ehdr, i) bin->ehdr.e_version = READ32 (ehdr, i) #if R_BIN_ELF64 bin->ehdr.e_entry = READ64 (ehdr, i) bin->ehdr.e_phoff = READ64 (ehdr, i) bin->ehdr.e_shoff = READ64 (ehdr, i) #else bin->ehdr.e_entry = READ32 (ehdr, i) bin->ehdr.e_phoff = READ32 (ehdr, i) bin->ehdr.e_shoff = READ32 (ehdr, i) #endif bin->ehdr.e_flags = READ32 (ehdr, i) bin->ehdr.e_ehsize = READ16 (ehdr, i) bin->ehdr.e_phentsize = READ16 (ehdr, i) bin->ehdr.e_phnum = READ16 (ehdr, i) bin->ehdr.e_shentsize = READ16 (ehdr, i) bin->ehdr.e_shnum = READ16 (ehdr, i) bin->ehdr.e_shstrndx = READ16 (ehdr, i) return handle_e_ident (bin); // Usage example: // > td `k bin/cur/info/elf_type.cparse`; td `k bin/cur/info/elf_machine.cparse` // > pf `k bin/cur/info/elf_header.format` @ `k bin/cur/info/elf_header.offset` } static int init_phdr(ELFOBJ *bin) { ut32 phdr_size; ut8 phdr[sizeof (Elf_(Phdr))] = {0}; int i, j, len; if (!bin->ehdr.e_phnum) { return false; } if (bin->phdr) { return true; } if (!UT32_MUL (&phdr_size, (ut32)bin->ehdr.e_phnum, sizeof (Elf_(Phdr)))) { return false; } if (!phdr_size) { return false; } if (phdr_size > bin->size) { return false; } if (phdr_size > (ut32)bin->size) { return false; } if (bin->ehdr.e_phoff > bin->size) { return false; } if (bin->ehdr.e_phoff + phdr_size > bin->size) { return false; } if (!(bin->phdr = calloc (phdr_size, 1))) { perror ("malloc (phdr)"); return false; } for (i = 0; i < bin->ehdr.e_phnum; i++) { j = 0; len = r_buf_read_at (bin->b, bin->ehdr.e_phoff + i * sizeof (Elf_(Phdr)), phdr, sizeof (Elf_(Phdr))); if (len < 1) { bprintf ("Warning: read (phdr)\n"); R_FREE (bin->phdr); return false; } bin->phdr[i].p_type = READ32 (phdr, j) #if R_BIN_ELF64 bin->phdr[i].p_flags = READ32 (phdr, j) bin->phdr[i].p_offset = READ64 (phdr, j) bin->phdr[i].p_vaddr = READ64 (phdr, j) bin->phdr[i].p_paddr = READ64 (phdr, j) bin->phdr[i].p_filesz = READ64 (phdr, j) bin->phdr[i].p_memsz = READ64 (phdr, j) bin->phdr[i].p_align = READ64 (phdr, j) #else bin->phdr[i].p_offset = READ32 (phdr, j) bin->phdr[i].p_vaddr = READ32 (phdr, j) bin->phdr[i].p_paddr = READ32 (phdr, j) bin->phdr[i].p_filesz = READ32 (phdr, j) bin->phdr[i].p_memsz = READ32 (phdr, j) bin->phdr[i].p_flags = READ32 (phdr, j) bin->phdr[i].p_align = READ32 (phdr, j) #endif } sdb_num_set (bin->kv, "elf_phdr.offset", bin->ehdr.e_phoff, 0); sdb_num_set (bin->kv, "elf_phdr.size", sizeof (Elf_(Phdr)), 0); sdb_set (bin->kv, "elf_p_type.cparse", "enum elf_p_type {PT_NULL=0,PT_LOAD=1,PT_DYNAMIC=2," "PT_INTERP=3,PT_NOTE=4,PT_SHLIB=5,PT_PHDR=6,PT_LOOS=0x60000000," "PT_HIOS=0x6fffffff,PT_LOPROC=0x70000000,PT_HIPROC=0x7fffffff};", 0); sdb_set (bin->kv, "elf_p_flags.cparse", "enum elf_p_flags {PF_None=0,PF_Exec=1," "PF_Write=2,PF_Write_Exec=3,PF_Read=4,PF_Read_Exec=5,PF_Read_Write=6," "PF_Read_Write_Exec=7};", 0); #if R_BIN_ELF64 sdb_set (bin->kv, "elf_phdr.format", "[4]E[4]Eqqqqqq (elf_p_type)type (elf_p_flags)flags" " offset vaddr paddr filesz memsz align", 0); #else sdb_set (bin->kv, "elf_phdr.format", "[4]Exxxxx[4]Ex (elf_p_type)type offset vaddr paddr" " filesz memsz (elf_p_flags)flags align", 0); #endif return true; // Usage example: // > td `k bin/cur/info/elf_p_type.cparse`; td `k bin/cur/info/elf_p_flags.cparse` // > pf `k bin/cur/info/elf_phdr.format` @ `k bin/cur/info/elf_phdr.offset` } static int init_shdr(ELFOBJ *bin) { ut32 shdr_size; ut8 shdr[sizeof (Elf_(Shdr))] = {0}; int i, j, len; if (!bin || bin->shdr) { return true; } if (!UT32_MUL (&shdr_size, bin->ehdr.e_shnum, sizeof (Elf_(Shdr)))) { return false; } if (shdr_size < 1) { return false; } if (shdr_size > bin->size) { return false; } if (bin->ehdr.e_shoff > bin->size) { return false; } if (bin->ehdr.e_shoff + shdr_size > bin->size) { return false; } if (!(bin->shdr = calloc (1, shdr_size + 1))) { perror ("malloc (shdr)"); return false; } sdb_num_set (bin->kv, "elf_shdr.offset", bin->ehdr.e_shoff, 0); sdb_num_set (bin->kv, "elf_shdr.size", sizeof (Elf_(Shdr)), 0); sdb_set (bin->kv, "elf_s_type.cparse", "enum elf_s_type {SHT_NULL=0,SHT_PROGBITS=1," "SHT_SYMTAB=2,SHT_STRTAB=3,SHT_RELA=4,SHT_HASH=5,SHT_DYNAMIC=6,SHT_NOTE=7," "SHT_NOBITS=8,SHT_REL=9,SHT_SHLIB=10,SHT_DYNSYM=11,SHT_LOOS=0x60000000," "SHT_HIOS=0x6fffffff,SHT_LOPROC=0x70000000,SHT_HIPROC=0x7fffffff};", 0); for (i = 0; i < bin->ehdr.e_shnum; i++) { j = 0; len = r_buf_read_at (bin->b, bin->ehdr.e_shoff + i * sizeof (Elf_(Shdr)), shdr, sizeof (Elf_(Shdr))); if (len < 1) { bprintf ("Warning: read (shdr) at 0x%"PFMT64x"\n", (ut64) bin->ehdr.e_shoff); R_FREE (bin->shdr); return false; } bin->shdr[i].sh_name = READ32 (shdr, j) bin->shdr[i].sh_type = READ32 (shdr, j) #if R_BIN_ELF64 bin->shdr[i].sh_flags = READ64 (shdr, j) bin->shdr[i].sh_addr = READ64 (shdr, j) bin->shdr[i].sh_offset = READ64 (shdr, j) bin->shdr[i].sh_size = READ64 (shdr, j) bin->shdr[i].sh_link = READ32 (shdr, j) bin->shdr[i].sh_info = READ32 (shdr, j) bin->shdr[i].sh_addralign = READ64 (shdr, j) bin->shdr[i].sh_entsize = READ64 (shdr, j) #else bin->shdr[i].sh_flags = READ32 (shdr, j) bin->shdr[i].sh_addr = READ32 (shdr, j) bin->shdr[i].sh_offset = READ32 (shdr, j) bin->shdr[i].sh_size = READ32 (shdr, j) bin->shdr[i].sh_link = READ32 (shdr, j) bin->shdr[i].sh_info = READ32 (shdr, j) bin->shdr[i].sh_addralign = READ32 (shdr, j) bin->shdr[i].sh_entsize = READ32 (shdr, j) #endif } #if R_BIN_ELF64 sdb_set (bin->kv, "elf_s_flags_64.cparse", "enum elf_s_flags_64 {SF64_None=0,SF64_Exec=1," "SF64_Alloc=2,SF64_Alloc_Exec=3,SF64_Write=4,SF64_Write_Exec=5," "SF64_Write_Alloc=6,SF64_Write_Alloc_Exec=7};", 0); sdb_set (bin->kv, "elf_shdr.format", "x[4]E[8]Eqqqxxqq name (elf_s_type)type" " (elf_s_flags_64)flags addr offset size link info addralign entsize", 0); #else sdb_set (bin->kv, "elf_s_flags_32.cparse", "enum elf_s_flags_32 {SF32_None=0,SF32_Exec=1," "SF32_Alloc=2,SF32_Alloc_Exec=3,SF32_Write=4,SF32_Write_Exec=5," "SF32_Write_Alloc=6,SF32_Write_Alloc_Exec=7};", 0); sdb_set (bin->kv, "elf_shdr.format", "x[4]E[4]Exxxxxxx name (elf_s_type)type" " (elf_s_flags_32)flags addr offset size link info addralign entsize", 0); #endif return true; // Usage example: // > td `k bin/cur/info/elf_s_type.cparse`; td `k bin/cur/info/elf_s_flags_64.cparse` // > pf `k bin/cur/info/elf_shdr.format` @ `k bin/cur/info/elf_shdr.offset` } static int init_strtab(ELFOBJ *bin) { if (bin->strtab || !bin->shdr) { return false; } if (bin->ehdr.e_shstrndx != SHN_UNDEF && (bin->ehdr.e_shstrndx >= bin->ehdr.e_shnum || (bin->ehdr.e_shstrndx >= SHN_LORESERVE && bin->ehdr.e_shstrndx < SHN_HIRESERVE))) return false; /* sh_size must be lower than UT32_MAX and not equal to zero, to avoid bugs on malloc() */ if (bin->shdr[bin->ehdr.e_shstrndx].sh_size > UT32_MAX) { return false; } if (!bin->shdr[bin->ehdr.e_shstrndx].sh_size) { return false; } bin->shstrtab_section = bin->strtab_section = &bin->shdr[bin->ehdr.e_shstrndx]; bin->shstrtab_size = bin->strtab_section->sh_size; if (bin->shstrtab_size > bin->size) { return false; } if (!(bin->shstrtab = calloc (1, bin->shstrtab_size + 1))) { perror ("malloc"); bin->shstrtab = NULL; return false; } if (bin->shstrtab_section->sh_offset > bin->size) { R_FREE (bin->shstrtab); return false; } if (bin->shstrtab_section->sh_offset + bin->shstrtab_section->sh_size > bin->size) { R_FREE (bin->shstrtab); return false; } if (r_buf_read_at (bin->b, bin->shstrtab_section->sh_offset, (ut8*)bin->shstrtab, bin->shstrtab_section->sh_size + 1) < 1) { bprintf ("Warning: read (shstrtab) at 0x%"PFMT64x"\n", (ut64) bin->shstrtab_section->sh_offset); R_FREE (bin->shstrtab); return false; } bin->shstrtab[bin->shstrtab_section->sh_size] = '\0'; sdb_num_set (bin->kv, "elf_shstrtab.offset", bin->shstrtab_section->sh_offset, 0); sdb_num_set (bin->kv, "elf_shstrtab.size", bin->shstrtab_section->sh_size, 0); return true; } static int init_dynamic_section(struct Elf_(r_bin_elf_obj_t) *bin) { Elf_(Dyn) *dyn = NULL; Elf_(Dyn) d = {0}; Elf_(Addr) strtabaddr = 0; ut64 offset = 0; char *strtab = NULL; size_t relentry = 0, strsize = 0; int entries; int i, j, len, r; ut8 sdyn[sizeof (Elf_(Dyn))] = {0}; ut32 dyn_size = 0; if (!bin || !bin->phdr || !bin->ehdr.e_phnum) { return false; } for (i = 0; i < bin->ehdr.e_phnum ; i++) { if (bin->phdr[i].p_type == PT_DYNAMIC) { dyn_size = bin->phdr[i].p_filesz; break; } } if (i == bin->ehdr.e_phnum) { return false; } if (bin->phdr[i].p_filesz > bin->size) { return false; } if (bin->phdr[i].p_offset > bin->size) { return false; } if (bin->phdr[i].p_offset + sizeof(Elf_(Dyn)) > bin->size) { return false; } for (entries = 0; entries < (dyn_size / sizeof (Elf_(Dyn))); entries++) { j = 0; len = r_buf_read_at (bin->b, bin->phdr[i].p_offset + entries * sizeof (Elf_(Dyn)), sdyn, sizeof (Elf_(Dyn))); if (len < 1) { goto beach; } #if R_BIN_ELF64 d.d_tag = READ64 (sdyn, j) #else d.d_tag = READ32 (sdyn, j) #endif if (d.d_tag == DT_NULL) { break; } } if (entries < 1) { return false; } dyn = (Elf_(Dyn)*)calloc (entries, sizeof (Elf_(Dyn))); if (!dyn) { return false; } if (!UT32_MUL (&dyn_size, entries, sizeof (Elf_(Dyn)))) { goto beach; } if (!dyn_size) { goto beach; } offset = Elf_(r_bin_elf_v2p) (bin, bin->phdr[i].p_vaddr); if (offset > bin->size || offset + dyn_size > bin->size) { goto beach; } for (i = 0; i < entries; i++) { j = 0; r_buf_read_at (bin->b, offset + i * sizeof (Elf_(Dyn)), sdyn, sizeof (Elf_(Dyn))); if (len < 1) { bprintf("Warning: read (dyn)\n"); } #if R_BIN_ELF64 dyn[i].d_tag = READ64 (sdyn, j) dyn[i].d_un.d_ptr = READ64 (sdyn, j) #else dyn[i].d_tag = READ32 (sdyn, j) dyn[i].d_un.d_ptr = READ32 (sdyn, j) #endif switch (dyn[i].d_tag) { case DT_STRTAB: strtabaddr = Elf_(r_bin_elf_v2p) (bin, dyn[i].d_un.d_ptr); break; case DT_STRSZ: strsize = dyn[i].d_un.d_val; break; case DT_PLTREL: bin->is_rela = dyn[i].d_un.d_val; break; case DT_RELAENT: relentry = dyn[i].d_un.d_val; break; default: if ((dyn[i].d_tag >= DT_VERSYM) && (dyn[i].d_tag <= DT_VERNEEDNUM)) { bin->version_info[DT_VERSIONTAGIDX (dyn[i].d_tag)] = dyn[i].d_un.d_val; } break; } } if (!bin->is_rela) { bin->is_rela = sizeof (Elf_(Rela)) == relentry? DT_RELA : DT_REL; } if (!strtabaddr || strtabaddr > bin->size || strsize > ST32_MAX || !strsize || strsize > bin->size) { if (!strtabaddr) { bprintf ("Warning: section.shstrtab not found or invalid\n"); } goto beach; } strtab = (char *)calloc (1, strsize + 1); if (!strtab) { goto beach; } if (strtabaddr + strsize > bin->size) { free (strtab); goto beach; } r = r_buf_read_at (bin->b, strtabaddr, (ut8 *)strtab, strsize); if (r < 1) { free (strtab); goto beach; } bin->dyn_buf = dyn; bin->dyn_entries = entries; bin->strtab = strtab; bin->strtab_size = strsize; r = Elf_(r_bin_elf_has_relro)(bin); switch (r) { case R_ELF_FULL_RELRO: sdb_set (bin->kv, "elf.relro", "full", 0); break; case R_ELF_PART_RELRO: sdb_set (bin->kv, "elf.relro", "partial", 0); break; default: sdb_set (bin->kv, "elf.relro", "no", 0); break; } sdb_num_set (bin->kv, "elf_strtab.offset", strtabaddr, 0); sdb_num_set (bin->kv, "elf_strtab.size", strsize, 0); return true; beach: free (dyn); return false; } static RBinElfSection* get_section_by_name(ELFOBJ *bin, const char *section_name) { int i; if (!bin->g_sections) { return NULL; } for (i = 0; !bin->g_sections[i].last; i++) { if (!strncmp (bin->g_sections[i].name, section_name, ELF_STRING_LENGTH-1)) { return &bin->g_sections[i]; } } return NULL; } static char *get_ver_flags(ut32 flags) { static char buff[32]; buff[0] = 0; if (!flags) { return "none"; } if (flags & VER_FLG_BASE) { strcpy (buff, "BASE "); } if (flags & VER_FLG_WEAK) { if (flags & VER_FLG_BASE) { strcat (buff, "| "); } strcat (buff, "WEAK "); } if (flags & ~(VER_FLG_BASE | VER_FLG_WEAK)) { strcat (buff, "| <unknown>"); } return buff; } static Sdb *store_versioninfo_gnu_versym(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { int i; const ut64 num_entries = sz / sizeof (Elf_(Versym)); const char *section_name = ""; const char *link_section_name = ""; Elf_(Shdr) *link_shdr = NULL; Sdb *sdb = sdb_new0(); if (!sdb) { return NULL; } if (!bin->version_info[DT_VERSIONTAGIDX (DT_VERSYM)]) { sdb_free (sdb); return NULL; } if (shdr->sh_link > bin->ehdr.e_shnum) { sdb_free (sdb); return NULL; } link_shdr = &bin->shdr[shdr->sh_link]; ut8 *edata = (ut8*) calloc (R_MAX (1, num_entries), sizeof (ut16)); if (!edata) { sdb_free (sdb); return NULL; } ut16 *data = (ut16*) calloc (R_MAX (1, num_entries), sizeof (ut16)); if (!data) { free (edata); sdb_free (sdb); return NULL; } ut64 off = Elf_(r_bin_elf_v2p) (bin, bin->version_info[DT_VERSIONTAGIDX (DT_VERSYM)]); if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } r_buf_read_at (bin->b, off, edata, sizeof (ut16) * num_entries); sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "num_entries", num_entries, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); for (i = num_entries; i--;) { data[i] = r_read_ble16 (&edata[i * sizeof (ut16)], bin->endian); } R_FREE (edata); for (i = 0; i < num_entries; i += 4) { int j; int check_def; char key[32] = {0}; Sdb *sdb_entry = sdb_new0 (); snprintf (key, sizeof (key), "entry%d", i / 4); sdb_ns_set (sdb, key, sdb_entry); sdb_num_set (sdb_entry, "idx", i, 0); for (j = 0; (j < 4) && (i + j) < num_entries; ++j) { int k; char *tmp_val = NULL; snprintf (key, sizeof (key), "value%d", j); switch (data[i + j]) { case 0: sdb_set (sdb_entry, key, "0 (*local*)", 0); break; case 1: sdb_set (sdb_entry, key, "1 (*global*)", 0); break; default: tmp_val = sdb_fmt (0, "%x ", data[i+j] & 0x7FFF); check_def = true; if (bin->version_info[DT_VERSIONTAGIDX (DT_VERNEED)]) { Elf_(Verneed) vn; ut8 svn[sizeof (Elf_(Verneed))] = {0}; ut64 offset = Elf_(r_bin_elf_v2p) (bin, bin->version_info[DT_VERSIONTAGIDX (DT_VERNEED)]); do { Elf_(Vernaux) vna; ut8 svna[sizeof (Elf_(Vernaux))] = {0}; ut64 a_off; if (offset > bin->size || offset + sizeof (vn) > bin->size) { goto beach; } if (r_buf_read_at (bin->b, offset, svn, sizeof (svn)) < 0) { bprintf ("Warning: Cannot read Verneed for Versym\n"); goto beach; } k = 0; vn.vn_version = READ16 (svn, k) vn.vn_cnt = READ16 (svn, k) vn.vn_file = READ32 (svn, k) vn.vn_aux = READ32 (svn, k) vn.vn_next = READ32 (svn, k) a_off = offset + vn.vn_aux; do { if (a_off > bin->size || a_off + sizeof (vna) > bin->size) { goto beach; } if (r_buf_read_at (bin->b, a_off, svna, sizeof (svna)) < 0) { bprintf ("Warning: Cannot read Vernaux for Versym\n"); goto beach; } k = 0; vna.vna_hash = READ32 (svna, k) vna.vna_flags = READ16 (svna, k) vna.vna_other = READ16 (svna, k) vna.vna_name = READ32 (svna, k) vna.vna_next = READ32 (svna, k) a_off += vna.vna_next; } while (vna.vna_other != data[i + j] && vna.vna_next != 0); if (vna.vna_other == data[i + j]) { if (vna.vna_name > bin->strtab_size) { goto beach; } sdb_set (sdb_entry, key, sdb_fmt (0, "%s(%s)", tmp_val, bin->strtab + vna.vna_name), 0); check_def = false; break; } offset += vn.vn_next; } while (vn.vn_next); } ut64 vinfoaddr = bin->version_info[DT_VERSIONTAGIDX (DT_VERDEF)]; if (check_def && data[i + j] != 0x8001 && vinfoaddr) { Elf_(Verdef) vd; ut8 svd[sizeof (Elf_(Verdef))] = {0}; ut64 offset = Elf_(r_bin_elf_v2p) (bin, vinfoaddr); if (offset > bin->size || offset + sizeof (vd) > bin->size) { goto beach; } do { if (r_buf_read_at (bin->b, offset, svd, sizeof (svd)) < 0) { bprintf ("Warning: Cannot read Verdef for Versym\n"); goto beach; } k = 0; vd.vd_version = READ16 (svd, k) vd.vd_flags = READ16 (svd, k) vd.vd_ndx = READ16 (svd, k) vd.vd_cnt = READ16 (svd, k) vd.vd_hash = READ32 (svd, k) vd.vd_aux = READ32 (svd, k) vd.vd_next = READ32 (svd, k) offset += vd.vd_next; } while (vd.vd_ndx != (data[i + j] & 0x7FFF) && vd.vd_next != 0); if (vd.vd_ndx == (data[i + j] & 0x7FFF)) { Elf_(Verdaux) vda; ut8 svda[sizeof (Elf_(Verdaux))] = {0}; ut64 off_vda = offset - vd.vd_next + vd.vd_aux; if (off_vda > bin->size || off_vda + sizeof (vda) > bin->size) { goto beach; } if (r_buf_read_at (bin->b, off_vda, svda, sizeof (svda)) < 0) { bprintf ("Warning: Cannot read Verdaux for Versym\n"); goto beach; } k = 0; vda.vda_name = READ32 (svda, k) vda.vda_next = READ32 (svda, k) if (vda.vda_name > bin->strtab_size) { goto beach; } const char *name = bin->strtab + vda.vda_name; sdb_set (sdb_entry, key, sdb_fmt (0,"%s(%s%-*s)", tmp_val, name, (int)(12 - strlen (name)),")") , 0); } } } } } beach: free (data); return sdb; } static Sdb *store_versioninfo_gnu_verdef(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { const char *section_name = ""; const char *link_section_name = ""; char *end = NULL; Elf_(Shdr) *link_shdr = NULL; ut8 dfs[sizeof (Elf_(Verdef))] = {0}; Sdb *sdb; int cnt, i; if (shdr->sh_link > bin->ehdr.e_shnum) { return false; } link_shdr = &bin->shdr[shdr->sh_link]; if ((int)shdr->sh_size < 1) { return false; } Elf_(Verdef) *defs = calloc (shdr->sh_size, sizeof (char)); if (!defs) { return false; } if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (link_shdr && bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } if (!defs) { bprintf ("Warning: Cannot allocate memory (Check Elf_(Verdef))\n"); return NULL; } sdb = sdb_new0 (); end = (char *)defs + shdr->sh_size; sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "entries", shdr->sh_info, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); for (cnt = 0, i = 0; i >= 0 && cnt < shdr->sh_info && ((char *)defs + i < end); ++cnt) { Sdb *sdb_verdef = sdb_new0 (); char *vstart = ((char*)defs) + i; size_t vstart_off = i; char key[32] = {0}; Elf_(Verdef) *verdef = (Elf_(Verdef)*)vstart; Elf_(Verdaux) aux = {0}; int j = 0; int isum = 0; r_buf_read_at (bin->b, shdr->sh_offset + i, dfs, sizeof (Elf_(Verdef))); verdef->vd_version = READ16 (dfs, j) verdef->vd_flags = READ16 (dfs, j) verdef->vd_ndx = READ16 (dfs, j) verdef->vd_cnt = READ16 (dfs, j) verdef->vd_hash = READ32 (dfs, j) verdef->vd_aux = READ32 (dfs, j) verdef->vd_next = READ32 (dfs, j) int vdaux = verdef->vd_aux; if (vdaux < 1 || shdr->sh_size - vstart_off < vdaux) { sdb_free (sdb_verdef); goto out_error; } vstart += vdaux; vstart_off += vdaux; if (vstart > end || vstart + sizeof (Elf_(Verdaux)) > end) { sdb_free (sdb_verdef); goto out_error; } j = 0; aux.vda_name = READ32 (vstart, j) aux.vda_next = READ32 (vstart, j) isum = i + verdef->vd_aux; if (aux.vda_name > bin->dynstr_size) { sdb_free (sdb_verdef); goto out_error; } sdb_num_set (sdb_verdef, "idx", i, 0); sdb_num_set (sdb_verdef, "vd_version", verdef->vd_version, 0); sdb_num_set (sdb_verdef, "vd_ndx", verdef->vd_ndx, 0); sdb_num_set (sdb_verdef, "vd_cnt", verdef->vd_cnt, 0); sdb_set (sdb_verdef, "vda_name", &bin->dynstr[aux.vda_name], 0); sdb_set (sdb_verdef, "flags", get_ver_flags (verdef->vd_flags), 0); for (j = 1; j < verdef->vd_cnt; ++j) { int k; Sdb *sdb_parent = sdb_new0 (); isum += aux.vda_next; vstart += aux.vda_next; vstart_off += aux.vda_next; if (vstart > end || vstart + sizeof (Elf_(Verdaux)) > end) { sdb_free (sdb_verdef); sdb_free (sdb_parent); goto out_error; } k = 0; aux.vda_name = READ32 (vstart, k) aux.vda_next = READ32 (vstart, k) if (aux.vda_name > bin->dynstr_size) { sdb_free (sdb_verdef); sdb_free (sdb_parent); goto out_error; } sdb_num_set (sdb_parent, "idx", isum, 0); sdb_num_set (sdb_parent, "parent", j, 0); sdb_set (sdb_parent, "vda_name", &bin->dynstr[aux.vda_name], 0); snprintf (key, sizeof (key), "parent%d", j - 1); sdb_ns_set (sdb_verdef, key, sdb_parent); } snprintf (key, sizeof (key), "verdef%d", cnt); sdb_ns_set (sdb, key, sdb_verdef); if (!verdef->vd_next) { sdb_free (sdb_verdef); goto out_error; } if ((st32)verdef->vd_next < 1) { eprintf ("Warning: Invalid vd_next in the ELF version\n"); break; } i += verdef->vd_next; } free (defs); return sdb; out_error: free (defs); sdb_free (sdb); return NULL; } static Sdb *store_versioninfo_gnu_verneed(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { ut8 *end, *need = NULL; const char *section_name = ""; Elf_(Shdr) *link_shdr = NULL; const char *link_section_name = ""; Sdb *sdb_vernaux = NULL; Sdb *sdb_version = NULL; Sdb *sdb = NULL; int i, cnt; if (!bin || !bin->dynstr) { return NULL; } if (shdr->sh_link > bin->ehdr.e_shnum) { return NULL; } if ((int)shdr->sh_size < 1) { return NULL; } sdb = sdb_new0 (); if (!sdb) { return NULL; } link_shdr = &bin->shdr[shdr->sh_link]; if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } if (!(need = (ut8*) calloc (R_MAX (1, shdr->sh_size), sizeof (ut8)))) { bprintf ("Warning: Cannot allocate memory for Elf_(Verneed)\n"); goto beach; } end = need + shdr->sh_size; sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "num_entries", shdr->sh_info, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); if (shdr->sh_offset > bin->size || shdr->sh_offset + shdr->sh_size > bin->size) { goto beach; } if (shdr->sh_offset + shdr->sh_size < shdr->sh_size) { goto beach; } i = r_buf_read_at (bin->b, shdr->sh_offset, need, shdr->sh_size); if (i < 0) goto beach; //XXX we should use DT_VERNEEDNUM instead of sh_info //TODO https://sourceware.org/ml/binutils/2014-11/msg00353.html for (i = 0, cnt = 0; cnt < shdr->sh_info; ++cnt) { int j, isum; ut8 *vstart = need + i; Elf_(Verneed) vvn = {0}; if (vstart + sizeof (Elf_(Verneed)) > end) { goto beach; } Elf_(Verneed) *entry = &vvn; char key[32] = {0}; sdb_version = sdb_new0 (); if (!sdb_version) { goto beach; } j = 0; vvn.vn_version = READ16 (vstart, j) vvn.vn_cnt = READ16 (vstart, j) vvn.vn_file = READ32 (vstart, j) vvn.vn_aux = READ32 (vstart, j) vvn.vn_next = READ32 (vstart, j) sdb_num_set (sdb_version, "vn_version", entry->vn_version, 0); sdb_num_set (sdb_version, "idx", i, 0); if (entry->vn_file > bin->dynstr_size) { goto beach; } { char *s = r_str_ndup (&bin->dynstr[entry->vn_file], 16); sdb_set (sdb_version, "file_name", s, 0); free (s); } sdb_num_set (sdb_version, "cnt", entry->vn_cnt, 0); st32 vnaux = entry->vn_aux; if (vnaux < 1) { goto beach; } vstart += vnaux; for (j = 0, isum = i + entry->vn_aux; j < entry->vn_cnt && vstart + sizeof (Elf_(Vernaux)) <= end; ++j) { int k; Elf_(Vernaux) * aux = NULL; Elf_(Vernaux) vaux = {0}; sdb_vernaux = sdb_new0 (); if (!sdb_vernaux) { goto beach; } aux = (Elf_(Vernaux)*)&vaux; k = 0; vaux.vna_hash = READ32 (vstart, k) vaux.vna_flags = READ16 (vstart, k) vaux.vna_other = READ16 (vstart, k) vaux.vna_name = READ32 (vstart, k) vaux.vna_next = READ32 (vstart, k) if (aux->vna_name > bin->dynstr_size) { goto beach; } sdb_num_set (sdb_vernaux, "idx", isum, 0); if (aux->vna_name > 0 && aux->vna_name + 8 < bin->dynstr_size) { char name [16]; strncpy (name, &bin->dynstr[aux->vna_name], sizeof (name)-1); name[sizeof(name)-1] = 0; sdb_set (sdb_vernaux, "name", name, 0); } sdb_set (sdb_vernaux, "flags", get_ver_flags (aux->vna_flags), 0); sdb_num_set (sdb_vernaux, "version", aux->vna_other, 0); isum += aux->vna_next; vstart += aux->vna_next; snprintf (key, sizeof (key), "vernaux%d", j); sdb_ns_set (sdb_version, key, sdb_vernaux); } if ((int)entry->vn_next < 0) { bprintf ("Invalid vn_next\n"); break; } i += entry->vn_next; snprintf (key, sizeof (key), "version%d", cnt ); sdb_ns_set (sdb, key, sdb_version); //if entry->vn_next is 0 it iterate infinitely if (!entry->vn_next) { break; } } free (need); return sdb; beach: free (need); sdb_free (sdb_vernaux); sdb_free (sdb_version); sdb_free (sdb); return NULL; } static Sdb *store_versioninfo(ELFOBJ *bin) { Sdb *sdb_versioninfo = NULL; int num_verdef = 0; int num_verneed = 0; int num_versym = 0; int i; if (!bin || !bin->shdr) { return NULL; } if (!(sdb_versioninfo = sdb_new0 ())) { return NULL; } for (i = 0; i < bin->ehdr.e_shnum; i++) { Sdb *sdb = NULL; char key[32] = {0}; int size = bin->shdr[i].sh_size; if (size - (i*sizeof(Elf_(Shdr)) > bin->size)) { size = bin->size - (i*sizeof(Elf_(Shdr))); } int left = size - (i * sizeof (Elf_(Shdr))); left = R_MIN (left, bin->shdr[i].sh_size); if (left < 0) { break; } switch (bin->shdr[i].sh_type) { case SHT_GNU_verdef: sdb = store_versioninfo_gnu_verdef (bin, &bin->shdr[i], left); snprintf (key, sizeof (key), "verdef%d", num_verdef++); sdb_ns_set (sdb_versioninfo, key, sdb); break; case SHT_GNU_verneed: sdb = store_versioninfo_gnu_verneed (bin, &bin->shdr[i], left); snprintf (key, sizeof (key), "verneed%d", num_verneed++); sdb_ns_set (sdb_versioninfo, key, sdb); break; case SHT_GNU_versym: sdb = store_versioninfo_gnu_versym (bin, &bin->shdr[i], left); snprintf (key, sizeof (key), "versym%d", num_versym++); sdb_ns_set (sdb_versioninfo, key, sdb); break; } } return sdb_versioninfo; } static bool init_dynstr(ELFOBJ *bin) { int i, r; const char *section_name = NULL; if (!bin || !bin->shdr) { return false; } if (!bin->shstrtab) { return false; } for (i = 0; i < bin->ehdr.e_shnum; ++i) { if (bin->shdr[i].sh_name > bin->shstrtab_size) { return false; } section_name = &bin->shstrtab[bin->shdr[i].sh_name]; if (bin->shdr[i].sh_type == SHT_STRTAB && !strcmp (section_name, ".dynstr")) { if (!(bin->dynstr = (char*) calloc (bin->shdr[i].sh_size + 1, sizeof (char)))) { bprintf("Warning: Cannot allocate memory for dynamic strings\n"); return false; } if (bin->shdr[i].sh_offset > bin->size) { return false; } if (bin->shdr[i].sh_offset + bin->shdr[i].sh_size > bin->size) { return false; } if (bin->shdr[i].sh_offset + bin->shdr[i].sh_size < bin->shdr[i].sh_size) { return false; } r = r_buf_read_at (bin->b, bin->shdr[i].sh_offset, (ut8*)bin->dynstr, bin->shdr[i].sh_size); if (r < 1) { R_FREE (bin->dynstr); bin->dynstr_size = 0; return false; } bin->dynstr_size = bin->shdr[i].sh_size; return true; } } return false; } static int elf_init(ELFOBJ *bin) { bin->phdr = NULL; bin->shdr = NULL; bin->strtab = NULL; bin->shstrtab = NULL; bin->strtab_size = 0; bin->strtab_section = NULL; bin->dyn_buf = NULL; bin->dynstr = NULL; ZERO_FILL (bin->version_info); bin->g_sections = NULL; bin->g_symbols = NULL; bin->g_imports = NULL; /* bin is not an ELF */ if (!init_ehdr (bin)) { return false; } if (!init_phdr (bin)) { bprintf ("Warning: Cannot initialize program headers\n"); } if (!init_shdr (bin)) { bprintf ("Warning: Cannot initialize section headers\n"); } if (!init_strtab (bin)) { bprintf ("Warning: Cannot initialize strings table\n"); } if (!init_dynstr (bin)) { bprintf ("Warning: Cannot initialize dynamic strings\n"); } bin->baddr = Elf_(r_bin_elf_get_baddr) (bin); if (!init_dynamic_section (bin) && !Elf_(r_bin_elf_get_static)(bin)) bprintf ("Warning: Cannot initialize dynamic section\n"); bin->imports_by_ord_size = 0; bin->imports_by_ord = NULL; bin->symbols_by_ord_size = 0; bin->symbols_by_ord = NULL; bin->g_sections = Elf_(r_bin_elf_get_sections) (bin); bin->boffset = Elf_(r_bin_elf_get_boffset) (bin); sdb_ns_set (bin->kv, "versioninfo", store_versioninfo (bin)); return true; } ut64 Elf_(r_bin_elf_get_section_offset)(ELFOBJ *bin, const char *section_name) { RBinElfSection *section = get_section_by_name (bin, section_name); if (!section) return UT64_MAX; return section->offset; } ut64 Elf_(r_bin_elf_get_section_addr)(ELFOBJ *bin, const char *section_name) { RBinElfSection *section = get_section_by_name (bin, section_name); return section? section->rva: UT64_MAX; } ut64 Elf_(r_bin_elf_get_section_addr_end)(ELFOBJ *bin, const char *section_name) { RBinElfSection *section = get_section_by_name (bin, section_name); return section? section->rva + section->size: UT64_MAX; } #define REL (is_rela ? (void*)rela : (void*)rel) #define REL_BUF is_rela ? (ut8*)(&rela[k]) : (ut8*)(&rel[k]) #define REL_OFFSET is_rela ? rela[k].r_offset : rel[k].r_offset #define REL_TYPE is_rela ? rela[k].r_info : rel[k].r_info static ut64 get_import_addr(ELFOBJ *bin, int sym) { Elf_(Rel) *rel = NULL; Elf_(Rela) *rela = NULL; ut8 rl[sizeof (Elf_(Rel))] = {0}; ut8 rla[sizeof (Elf_(Rela))] = {0}; RBinElfSection *rel_sec = NULL; Elf_(Addr) plt_sym_addr = -1; ut64 got_addr, got_offset; ut64 plt_addr; int j, k, tsize, len, nrel; bool is_rela = false; const char *rel_sect[] = { ".rel.plt", ".rela.plt", ".rel.dyn", ".rela.dyn", NULL }; const char *rela_sect[] = { ".rela.plt", ".rel.plt", ".rela.dyn", ".rel.dyn", NULL }; if ((!bin->shdr || !bin->strtab) && !bin->phdr) { return -1; } if ((got_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".got")) == -1 && (got_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".got.plt")) == -1) { return -1; } if ((got_addr = Elf_(r_bin_elf_get_section_addr) (bin, ".got")) == -1 && (got_addr = Elf_(r_bin_elf_get_section_addr) (bin, ".got.plt")) == -1) { return -1; } if (bin->is_rela == DT_REL) { j = 0; while (!rel_sec && rel_sect[j]) { rel_sec = get_section_by_name (bin, rel_sect[j++]); } tsize = sizeof (Elf_(Rel)); } else if (bin->is_rela == DT_RELA) { j = 0; while (!rel_sec && rela_sect[j]) { rel_sec = get_section_by_name (bin, rela_sect[j++]); } is_rela = true; tsize = sizeof (Elf_(Rela)); } if (!rel_sec) { return -1; } if (rel_sec->size < 1) { return -1; } nrel = (ut32)((int)rel_sec->size / (int)tsize); if (nrel < 1) { return -1; } if (is_rela) { rela = calloc (nrel, tsize); if (!rela) { return -1; } } else { rel = calloc (nrel, tsize); if (!rel) { return -1; } } for (j = k = 0; j < rel_sec->size && k < nrel; j += tsize, k++) { int l = 0; if (rel_sec->offset + j > bin->size) { goto out; } if (rel_sec->offset + j + tsize > bin->size) { goto out; } len = r_buf_read_at ( bin->b, rel_sec->offset + j, is_rela ? rla : rl, is_rela ? sizeof (Elf_ (Rela)) : sizeof (Elf_ (Rel))); if (len < 1) { goto out; } #if R_BIN_ELF64 if (is_rela) { rela[k].r_offset = READ64 (rla, l) rela[k].r_info = READ64 (rla, l) rela[k].r_addend = READ64 (rla, l) } else { rel[k].r_offset = READ64 (rl, l) rel[k].r_info = READ64 (rl, l) } #else if (is_rela) { rela[k].r_offset = READ32 (rla, l) rela[k].r_info = READ32 (rla, l) rela[k].r_addend = READ32 (rla, l) } else { rel[k].r_offset = READ32 (rl, l) rel[k].r_info = READ32 (rl, l) } #endif int reloc_type = ELF_R_TYPE (REL_TYPE); int reloc_sym = ELF_R_SYM (REL_TYPE); if (reloc_sym == sym) { int of = REL_OFFSET; of = of - got_addr + got_offset; switch (bin->ehdr.e_machine) { case EM_PPC: case EM_PPC64: { RBinElfSection *s = get_section_by_name (bin, ".plt"); if (s) { ut8 buf[4]; ut64 base; len = r_buf_read_at (bin->b, s->offset, buf, sizeof (buf)); if (len < 4) { goto out; } base = r_read_be32 (buf); base -= (nrel * 16); base += (k * 16); plt_addr = base; free (REL); return plt_addr; } } break; case EM_SPARC: case EM_SPARCV9: case EM_SPARC32PLUS: plt_addr = Elf_(r_bin_elf_get_section_addr) (bin, ".plt"); if (plt_addr == -1) { free (rela); return -1; } if (reloc_type == R_386_PC16) { plt_addr += k * 12 + 20; // thumb symbol if (plt_addr & 1) { plt_addr--; } free (REL); return plt_addr; } else { bprintf ("Unknown sparc reloc type %d\n", reloc_type); } /* SPARC */ break; case EM_ARM: case EM_AARCH64: plt_addr = Elf_(r_bin_elf_get_section_addr) (bin, ".plt"); if (plt_addr == -1) { free (rela); return UT32_MAX; } switch (reloc_type) { case R_386_8: { plt_addr += k * 12 + 20; // thumb symbol if (plt_addr & 1) { plt_addr--; } free (REL); return plt_addr; } break; case 1026: // arm64 aarch64 plt_sym_addr = plt_addr + k * 16 + 32; goto done; default: bprintf ("Unsupported relocation type for imports %d\n", reloc_type); break; } break; case EM_386: case EM_X86_64: switch (reloc_type) { case 1: // unknown relocs found in voidlinux for x86-64 // break; case R_386_GLOB_DAT: case R_386_JMP_SLOT: { ut8 buf[8]; if (of + sizeof(Elf_(Addr)) < bin->size) { // ONLY FOR X86 if (of > bin->size || of + sizeof (Elf_(Addr)) > bin->size) { goto out; } len = r_buf_read_at (bin->b, of, buf, sizeof (Elf_(Addr))); if (len < -1) { goto out; } plt_sym_addr = sizeof (Elf_(Addr)) == 4 ? r_read_le32 (buf) : r_read_le64 (buf); if (!plt_sym_addr) { //XXX HACK ALERT!!!! full relro?? try to fix it //will there always be .plt.got, what would happen if is .got.plt? RBinElfSection *s = get_section_by_name (bin, ".plt.got"); if (Elf_(r_bin_elf_has_relro)(bin) < R_ELF_PART_RELRO || !s) { goto done; } plt_addr = s->offset; of = of + got_addr - got_offset; while (plt_addr + 2 + 4 < s->offset + s->size) { /*we try to locate the plt entry that correspond with the relocation since got does not point back to .plt. In this case it has the following form ff253a152000 JMP QWORD [RIP + 0x20153A] 6690 NOP ---- ff25ec9f0408 JMP DWORD [reloc.puts_236] plt_addr + 2 to remove jmp opcode and get the imm reading 4 and if RIP (plt_addr + 6) + imm == rel->offset return plt_addr, that will be our sym addr perhaps this hack doesn't work on 32 bits */ len = r_buf_read_at (bin->b, plt_addr + 2, buf, 4); if (len < -1) { goto out; } plt_sym_addr = sizeof (Elf_(Addr)) == 4 ? r_read_le32 (buf) : r_read_le64 (buf); //relative address if ((plt_addr + 6 + Elf_(r_bin_elf_v2p) (bin, plt_sym_addr)) == of) { plt_sym_addr = plt_addr; goto done; } else if (plt_sym_addr == of) { plt_sym_addr = plt_addr; goto done; } plt_addr += 8; } } else { plt_sym_addr -= 6; } goto done; } break; } default: bprintf ("Unsupported relocation type for imports %d\n", reloc_type); free (REL); return of; break; } break; case 8: // MIPS32 BIG ENDIAN relocs { RBinElfSection *s = get_section_by_name (bin, ".rela.plt"); if (s) { ut8 buf[1024]; const ut8 *base; plt_addr = s->rva + s->size; len = r_buf_read_at (bin->b, s->offset + s->size, buf, sizeof (buf)); if (len != sizeof (buf)) { // oops } base = r_mem_mem_aligned (buf, sizeof (buf), (const ut8*)"\x3c\x0f\x00", 3, 4); if (base) { plt_addr += (int)(size_t)(base - buf); } else { plt_addr += 108 + 8; // HARDCODED HACK } plt_addr += k * 16; free (REL); return plt_addr; } } break; default: bprintf ("Unsupported relocs type %d for arch %d\n", reloc_type, bin->ehdr.e_machine); break; } } } done: free (REL); return plt_sym_addr; out: free (REL); return -1; } int Elf_(r_bin_elf_has_nx)(ELFOBJ *bin) { int i; if (bin && bin->phdr) { for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_GNU_STACK) { return (!(bin->phdr[i].p_flags & 1))? 1: 0; } } } return 0; } int Elf_(r_bin_elf_has_relro)(ELFOBJ *bin) { int i; bool haveBindNow = false; bool haveGnuRelro = false; if (bin && bin->dyn_buf) { for (i = 0; i < bin->dyn_entries; i++) { switch (bin->dyn_buf[i].d_tag) { case DT_BIND_NOW: haveBindNow = true; break; case DT_FLAGS: for (i++; i < bin->dyn_entries ; i++) { ut32 dTag = bin->dyn_buf[i].d_tag; if (!dTag) { break; } switch (dTag) { case DT_FLAGS_1: if (bin->dyn_buf[i].d_un.d_val & DF_1_NOW) { haveBindNow = true; break; } } } break; } } } if (bin && bin->phdr) { for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_GNU_RELRO) { haveGnuRelro = true; break; } } } if (haveGnuRelro) { if (haveBindNow) { return R_ELF_FULL_RELRO; } return R_ELF_PART_RELRO; } return R_ELF_NO_RELRO; } /* To compute the base address, one determines the memory address associated with the lowest p_vaddr value for a PT_LOAD segment. One then obtains the base address by truncating the memory address to the nearest multiple of the maximum page size */ ut64 Elf_(r_bin_elf_get_baddr)(ELFOBJ *bin) { int i; ut64 tmp, base = UT64_MAX; if (!bin) { return 0; } if (bin->phdr) { for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_LOAD) { tmp = (ut64)bin->phdr[i].p_vaddr & ELF_PAGE_MASK; tmp = tmp - (tmp % (1 << ELF_PAGE_SIZE)); if (tmp < base) { base = tmp; } } } } if (base == UT64_MAX && bin->ehdr.e_type == ET_REL) { //we return our own base address for ET_REL type //we act as a loader for ELF return 0x08000000; } return base == UT64_MAX ? 0 : base; } ut64 Elf_(r_bin_elf_get_boffset)(ELFOBJ *bin) { int i; ut64 tmp, base = UT64_MAX; if (bin && bin->phdr) { for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_LOAD) { tmp = (ut64)bin->phdr[i].p_offset & ELF_PAGE_MASK; tmp = tmp - (tmp % (1 << ELF_PAGE_SIZE)); if (tmp < base) { base = tmp; } } } } return base == UT64_MAX ? 0 : base; } ut64 Elf_(r_bin_elf_get_init_offset)(ELFOBJ *bin) { ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); ut8 buf[512]; if (!bin) { return 0LL; } if (r_buf_read_at (bin->b, entry + 16, buf, sizeof (buf)) < 1) { bprintf ("Warning: read (init_offset)\n"); return 0; } if (buf[0] == 0x68) { // push // x86 only ut64 addr; memmove (buf, buf+1, 4); addr = (ut64)r_read_le32 (buf); return Elf_(r_bin_elf_v2p) (bin, addr); } return 0; } ut64 Elf_(r_bin_elf_get_fini_offset)(ELFOBJ *bin) { ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); ut8 buf[512]; if (!bin) { return 0LL; } if (r_buf_read_at (bin->b, entry+11, buf, sizeof (buf)) == -1) { bprintf ("Warning: read (get_fini)\n"); return 0; } if (*buf == 0x68) { // push // x86/32 only ut64 addr; memmove (buf, buf+1, 4); addr = (ut64)r_read_le32 (buf); return Elf_(r_bin_elf_v2p) (bin, addr); } return 0; } ut64 Elf_(r_bin_elf_get_entry_offset)(ELFOBJ *bin) { ut64 entry; if (!bin) { return 0LL; } entry = bin->ehdr.e_entry; if (!entry) { entry = Elf_(r_bin_elf_get_section_offset)(bin, ".init.text"); if (entry != UT64_MAX) { return entry; } entry = Elf_(r_bin_elf_get_section_offset)(bin, ".text"); if (entry != UT64_MAX) { return entry; } entry = Elf_(r_bin_elf_get_section_offset)(bin, ".init"); if (entry != UT64_MAX) { return entry; } if (entry == UT64_MAX) { return 0; } } return Elf_(r_bin_elf_v2p) (bin, entry); } static ut64 getmainsymbol(ELFOBJ *bin) { struct r_bin_elf_symbol_t *symbol; int i; if (!(symbol = Elf_(r_bin_elf_get_symbols) (bin))) { return UT64_MAX; } for (i = 0; !symbol[i].last; i++) { if (!strcmp (symbol[i].name, "main")) { ut64 paddr = symbol[i].offset; return Elf_(r_bin_elf_p2v) (bin, paddr); } } return UT64_MAX; } ut64 Elf_(r_bin_elf_get_main_offset)(ELFOBJ *bin) { ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); ut8 buf[512]; if (!bin) { return 0LL; } if (entry > bin->size || (entry + sizeof (buf)) > bin->size) { return 0; } if (r_buf_read_at (bin->b, entry, buf, sizeof (buf)) < 1) { bprintf ("Warning: read (main)\n"); return 0; } // ARM64 if (buf[0x18+3] == 0x58 && buf[0x2f] == 0x00) { ut32 entry_vaddr = Elf_(r_bin_elf_p2v) (bin, entry); ut32 main_addr = r_read_le32 (&buf[0x30]); if ((main_addr >> 16) == (entry_vaddr >> 16)) { return Elf_(r_bin_elf_v2p) (bin, main_addr); } } // TODO: Use arch to identify arch before memcmp's // ARM ut64 text = Elf_(r_bin_elf_get_section_offset)(bin, ".text"); ut64 text_end = text + bin->size; // ARM-Thumb-Linux if (entry & 1 && !memcmp (buf, "\xf0\x00\x0b\x4f\xf0\x00", 6)) { ut32 * ptr = (ut32*)(buf+40-1); if (*ptr &1) { return Elf_(r_bin_elf_v2p) (bin, *ptr -1); } } if (!memcmp (buf, "\x00\xb0\xa0\xe3\x00\xe0\xa0\xe3", 8)) { // endian stuff here ut32 *addr = (ut32*)(buf+0x34); /* 0x00012000 00b0a0e3 mov fp, 0 0x00012004 00e0a0e3 mov lr, 0 */ if (*addr > text && *addr < (text_end)) { return Elf_(r_bin_elf_v2p) (bin, *addr); } } // MIPS /* get .got, calculate offset of main symbol */ if (!memcmp (buf, "\x21\x00\xe0\x03\x01\x00\x11\x04", 8)) { /* assuming the startup code looks like got = gp-0x7ff0 got[index__libc_start_main] ( got[index_main] ); looking for the instruction generating the first argument to find main lw a0, offset(gp) */ ut64 got_offset; if ((got_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".got")) != -1 || (got_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".got.plt")) != -1) { const ut64 gp = got_offset + 0x7ff0; unsigned i; for (i = 0; i < sizeof(buf) / sizeof(buf[0]); i += 4) { const ut32 instr = r_read_le32 (&buf[i]); if ((instr & 0xffff0000) == 0x8f840000) { // lw a0, offset(gp) const short delta = instr & 0x0000ffff; r_buf_read_at (bin->b, /* got_entry_offset = */ gp + delta, buf, 4); return Elf_(r_bin_elf_v2p) (bin, r_read_le32 (&buf[0])); } } } return 0; } // ARM if (!memcmp (buf, "\x24\xc0\x9f\xe5\x00\xb0\xa0\xe3", 8)) { ut64 addr = r_read_le32 (&buf[48]); return Elf_(r_bin_elf_v2p) (bin, addr); } // X86-CGC if (buf[0] == 0xe8 && !memcmp (buf + 5, "\x50\xe8\x00\x00\x00\x00\xb8\x01\x00\x00\x00\x53", 12)) { size_t SIZEOF_CALL = 5; ut64 rel_addr = (ut64)((int)(buf[1] + (buf[2] << 8) + (buf[3] << 16) + (buf[4] << 24))); ut64 addr = Elf_(r_bin_elf_p2v)(bin, entry + SIZEOF_CALL); addr += rel_addr; return Elf_(r_bin_elf_v2p) (bin, addr); } // X86-PIE if (buf[0x00] == 0x48 && buf[0x1e] == 0x8d && buf[0x11] == 0xe8) { ut32 *pmain = (ut32*)(buf + 0x30); ut64 vmain = Elf_(r_bin_elf_p2v) (bin, (ut64)*pmain); ut64 ventry = Elf_(r_bin_elf_p2v) (bin, entry); if (vmain >> 16 == ventry >> 16) { return (ut64)vmain; } } // X86-PIE if (buf[0x1d] == 0x48 && buf[0x1e] == 0x8b) { if (!memcmp (buf, "\x31\xed\x49\x89", 4)) {// linux ut64 maddr, baddr; ut8 n32s[sizeof (ut32)] = {0}; maddr = entry + 0x24 + r_read_le32 (buf + 0x20); if (r_buf_read_at (bin->b, maddr, n32s, sizeof (ut32)) == -1) { bprintf ("Warning: read (maddr) 2\n"); return 0; } maddr = (ut64)r_read_le32 (&n32s[0]); baddr = (bin->ehdr.e_entry >> 16) << 16; if (bin->phdr) { baddr = Elf_(r_bin_elf_get_baddr) (bin); } maddr += baddr; return maddr; } } // X86-NONPIE #if R_BIN_ELF64 if (!memcmp (buf, "\x49\x89\xd9", 3) && buf[156] == 0xe8) { // openbsd return r_read_le32 (&buf[157]) + entry + 156 + 5; } if (!memcmp (buf+29, "\x48\xc7\xc7", 3)) { // linux ut64 addr = (ut64)r_read_le32 (&buf[29 + 3]); return Elf_(r_bin_elf_v2p) (bin, addr); } #else if (buf[23] == '\x68') { ut64 addr = (ut64)r_read_le32 (&buf[23 + 1]); return Elf_(r_bin_elf_v2p) (bin, addr); } #endif /* linux64 pie main -- probably buggy in some cases */ if (buf[29] == 0x48 && buf[30] == 0x8d) { // lea rdi, qword [rip-0x21c4] ut8 *p = buf + 32; st32 maindelta = (st32)r_read_le32 (p); ut64 vmain = (ut64)(entry + 29 + maindelta) + 7; ut64 ventry = Elf_(r_bin_elf_p2v) (bin, entry); if (vmain>>16 == ventry>>16) { return (ut64)vmain; } } /* find sym.main if possible */ { ut64 m = getmainsymbol (bin); if (m != UT64_MAX) return m; } return UT64_MAX; } int Elf_(r_bin_elf_get_stripped)(ELFOBJ *bin) { int i; if (!bin->shdr) { return false; } for (i = 0; i < bin->ehdr.e_shnum; i++) { if (bin->shdr[i].sh_type == SHT_SYMTAB) { return false; } } return true; } char *Elf_(r_bin_elf_intrp)(ELFOBJ *bin) { int i; if (!bin || !bin->phdr) { return NULL; } for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_INTERP) { char *str = NULL; ut64 addr = bin->phdr[i].p_offset; int sz = bin->phdr[i].p_memsz; sdb_num_set (bin->kv, "elf_header.intrp_addr", addr, 0); sdb_num_set (bin->kv, "elf_header.intrp_size", sz, 0); if (sz < 1) { return NULL; } str = malloc (sz + 1); if (!str) { return NULL; } if (r_buf_read_at (bin->b, addr, (ut8*)str, sz) < 1) { bprintf ("Warning: read (main)\n"); return 0; } str[sz] = 0; sdb_set (bin->kv, "elf_header.intrp", str, 0); return str; } } return NULL; } int Elf_(r_bin_elf_get_static)(ELFOBJ *bin) { int i; if (!bin->phdr) { return false; } for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_INTERP) { return false; } } return true; } char* Elf_(r_bin_elf_get_data_encoding)(ELFOBJ *bin) { switch (bin->ehdr.e_ident[EI_DATA]) { case ELFDATANONE: return strdup ("none"); case ELFDATA2LSB: return strdup ("2's complement, little endian"); case ELFDATA2MSB: return strdup ("2's complement, big endian"); default: return r_str_newf ("<unknown: %x>", bin->ehdr.e_ident[EI_DATA]); } } int Elf_(r_bin_elf_has_va)(ELFOBJ *bin) { return true; } char* Elf_(r_bin_elf_get_arch)(ELFOBJ *bin) { switch (bin->ehdr.e_machine) { case EM_ARC: case EM_ARC_A5: return strdup ("arc"); case EM_AVR: return strdup ("avr"); case EM_CRIS: return strdup ("cris"); case EM_68K: return strdup ("m68k"); case EM_MIPS: case EM_MIPS_RS3_LE: case EM_MIPS_X: return strdup ("mips"); case EM_MCST_ELBRUS: return strdup ("elbrus"); case EM_TRICORE: return strdup ("tricore"); case EM_ARM: case EM_AARCH64: return strdup ("arm"); case EM_HEXAGON: return strdup ("hexagon"); case EM_BLACKFIN: return strdup ("blackfin"); case EM_SPARC: case EM_SPARC32PLUS: case EM_SPARCV9: return strdup ("sparc"); case EM_PPC: case EM_PPC64: return strdup ("ppc"); case EM_PARISC: return strdup ("hppa"); case EM_PROPELLER: return strdup ("propeller"); case EM_MICROBLAZE: return strdup ("microblaze.gnu"); case EM_RISCV: return strdup ("riscv"); case EM_VAX: return strdup ("vax"); case EM_XTENSA: return strdup ("xtensa"); case EM_LANAI: return strdup ("lanai"); case EM_VIDEOCORE3: case EM_VIDEOCORE4: return strdup ("vc4"); case EM_SH: return strdup ("sh"); case EM_V850: return strdup ("v850"); case EM_IA_64: return strdup("ia64"); default: return strdup ("x86"); } } char* Elf_(r_bin_elf_get_machine_name)(ELFOBJ *bin) { switch (bin->ehdr.e_machine) { case EM_NONE: return strdup ("No machine"); case EM_M32: return strdup ("AT&T WE 32100"); case EM_SPARC: return strdup ("SUN SPARC"); case EM_386: return strdup ("Intel 80386"); case EM_68K: return strdup ("Motorola m68k family"); case EM_88K: return strdup ("Motorola m88k family"); case EM_860: return strdup ("Intel 80860"); case EM_MIPS: return strdup ("MIPS R3000"); case EM_S370: return strdup ("IBM System/370"); case EM_MIPS_RS3_LE: return strdup ("MIPS R3000 little-endian"); case EM_PARISC: return strdup ("HPPA"); case EM_VPP500: return strdup ("Fujitsu VPP500"); case EM_SPARC32PLUS: return strdup ("Sun's \"v8plus\""); case EM_960: return strdup ("Intel 80960"); case EM_PPC: return strdup ("PowerPC"); case EM_PPC64: return strdup ("PowerPC 64-bit"); case EM_S390: return strdup ("IBM S390"); case EM_V800: return strdup ("NEC V800 series"); case EM_FR20: return strdup ("Fujitsu FR20"); case EM_RH32: return strdup ("TRW RH-32"); case EM_RCE: return strdup ("Motorola RCE"); case EM_ARM: return strdup ("ARM"); case EM_BLACKFIN: return strdup ("Analog Devices Blackfin"); case EM_FAKE_ALPHA: return strdup ("Digital Alpha"); case EM_SH: return strdup ("Hitachi SH"); case EM_SPARCV9: return strdup ("SPARC v9 64-bit"); case EM_TRICORE: return strdup ("Siemens Tricore"); case EM_ARC: return strdup ("Argonaut RISC Core"); case EM_H8_300: return strdup ("Hitachi H8/300"); case EM_H8_300H: return strdup ("Hitachi H8/300H"); case EM_H8S: return strdup ("Hitachi H8S"); case EM_H8_500: return strdup ("Hitachi H8/500"); case EM_IA_64: return strdup ("Intel Merced"); case EM_MIPS_X: return strdup ("Stanford MIPS-X"); case EM_COLDFIRE: return strdup ("Motorola Coldfire"); case EM_68HC12: return strdup ("Motorola M68HC12"); case EM_MMA: return strdup ("Fujitsu MMA Multimedia Accelerator"); case EM_PCP: return strdup ("Siemens PCP"); case EM_NCPU: return strdup ("Sony nCPU embeeded RISC"); case EM_NDR1: return strdup ("Denso NDR1 microprocessor"); case EM_STARCORE: return strdup ("Motorola Start*Core processor"); case EM_ME16: return strdup ("Toyota ME16 processor"); case EM_ST100: return strdup ("STMicroelectronic ST100 processor"); case EM_TINYJ: return strdup ("Advanced Logic Corp. Tinyj emb.fam"); case EM_X86_64: return strdup ("AMD x86-64 architecture"); case EM_LANAI: return strdup ("32bit LANAI architecture"); case EM_PDSP: return strdup ("Sony DSP Processor"); case EM_FX66: return strdup ("Siemens FX66 microcontroller"); case EM_ST9PLUS: return strdup ("STMicroelectronics ST9+ 8/16 mc"); case EM_ST7: return strdup ("STmicroelectronics ST7 8 bit mc"); case EM_68HC16: return strdup ("Motorola MC68HC16 microcontroller"); case EM_68HC11: return strdup ("Motorola MC68HC11 microcontroller"); case EM_68HC08: return strdup ("Motorola MC68HC08 microcontroller"); case EM_68HC05: return strdup ("Motorola MC68HC05 microcontroller"); case EM_SVX: return strdup ("Silicon Graphics SVx"); case EM_ST19: return strdup ("STMicroelectronics ST19 8 bit mc"); case EM_VAX: return strdup ("Digital VAX"); case EM_CRIS: return strdup ("Axis Communications 32-bit embedded processor"); case EM_JAVELIN: return strdup ("Infineon Technologies 32-bit embedded processor"); case EM_FIREPATH: return strdup ("Element 14 64-bit DSP Processor"); case EM_ZSP: return strdup ("LSI Logic 16-bit DSP Processor"); case EM_MMIX: return strdup ("Donald Knuth's educational 64-bit processor"); case EM_HUANY: return strdup ("Harvard University machine-independent object files"); case EM_PRISM: return strdup ("SiTera Prism"); case EM_AVR: return strdup ("Atmel AVR 8-bit microcontroller"); case EM_FR30: return strdup ("Fujitsu FR30"); case EM_D10V: return strdup ("Mitsubishi D10V"); case EM_D30V: return strdup ("Mitsubishi D30V"); case EM_V850: return strdup ("NEC v850"); case EM_M32R: return strdup ("Mitsubishi M32R"); case EM_MN10300: return strdup ("Matsushita MN10300"); case EM_MN10200: return strdup ("Matsushita MN10200"); case EM_PJ: return strdup ("picoJava"); case EM_OPENRISC: return strdup ("OpenRISC 32-bit embedded processor"); case EM_ARC_A5: return strdup ("ARC Cores Tangent-A5"); case EM_XTENSA: return strdup ("Tensilica Xtensa Architecture"); case EM_AARCH64: return strdup ("ARM aarch64"); case EM_PROPELLER: return strdup ("Parallax Propeller"); case EM_MICROBLAZE: return strdup ("Xilinx MicroBlaze"); case EM_RISCV: return strdup ("RISC V"); case EM_VIDEOCORE3: return strdup ("VideoCore III"); case EM_VIDEOCORE4: return strdup ("VideoCore IV"); default: return r_str_newf ("<unknown>: 0x%x", bin->ehdr.e_machine); } } char* Elf_(r_bin_elf_get_file_type)(ELFOBJ *bin) { ut32 e_type; if (!bin) { return NULL; } e_type = (ut32)bin->ehdr.e_type; // cast to avoid warn in iphone-gcc, must be ut16 switch (e_type) { case ET_NONE: return strdup ("NONE (None)"); case ET_REL: return strdup ("REL (Relocatable file)"); case ET_EXEC: return strdup ("EXEC (Executable file)"); case ET_DYN: return strdup ("DYN (Shared object file)"); case ET_CORE: return strdup ("CORE (Core file)"); } if ((e_type >= ET_LOPROC) && (e_type <= ET_HIPROC)) { return r_str_newf ("Processor Specific: %x", e_type); } if ((e_type >= ET_LOOS) && (e_type <= ET_HIOS)) { return r_str_newf ("OS Specific: %x", e_type); } return r_str_newf ("<unknown>: %x", e_type); } char* Elf_(r_bin_elf_get_elf_class)(ELFOBJ *bin) { switch (bin->ehdr.e_ident[EI_CLASS]) { case ELFCLASSNONE: return strdup ("none"); case ELFCLASS32: return strdup ("ELF32"); case ELFCLASS64: return strdup ("ELF64"); default: return r_str_newf ("<unknown: %x>", bin->ehdr.e_ident[EI_CLASS]); } } int Elf_(r_bin_elf_get_bits)(ELFOBJ *bin) { /* Hack for ARCompact */ if (bin->ehdr.e_machine == EM_ARC_A5) { return 16; } /* Hack for Ps2 */ if (bin->phdr && bin->ehdr.e_machine == EM_MIPS) { const ut32 mipsType = bin->ehdr.e_flags & EF_MIPS_ARCH; if (bin->ehdr.e_type == ET_EXEC) { int i; bool haveInterp = false; for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_INTERP) { haveInterp = true; } } if (!haveInterp && mipsType == EF_MIPS_ARCH_3) { // Playstation2 Hack return 64; } } // TODO: show this specific asm.cpu somewhere in bininfo (mips1, mips2, mips3, mips32r2, ...) switch (mipsType) { case EF_MIPS_ARCH_1: case EF_MIPS_ARCH_2: case EF_MIPS_ARCH_3: case EF_MIPS_ARCH_4: case EF_MIPS_ARCH_5: case EF_MIPS_ARCH_32: return 32; case EF_MIPS_ARCH_64: return 64; case EF_MIPS_ARCH_32R2: return 32; case EF_MIPS_ARCH_64R2: return 64; break; } return 32; } /* Hack for Thumb */ if (bin->ehdr.e_machine == EM_ARM) { if (bin->ehdr.e_type != ET_EXEC) { struct r_bin_elf_symbol_t *symbol; if ((symbol = Elf_(r_bin_elf_get_symbols) (bin))) { int i = 0; for (i = 0; !symbol[i].last; i++) { ut64 paddr = symbol[i].offset; if (paddr & 1) { return 16; } } } } { ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); if (entry & 1) { return 16; } } } switch (bin->ehdr.e_ident[EI_CLASS]) { case ELFCLASS32: return 32; case ELFCLASS64: return 64; case ELFCLASSNONE: default: return 32; // defaults } } static inline int noodle(ELFOBJ *bin, const char *s) { const ut8 *p = bin->b->buf; if (bin->b->length > 64) { p += bin->b->length - 64; } else { return 0; } return r_mem_mem (p, 64, (const ut8 *)s, strlen (s)) != NULL; } static inline int needle(ELFOBJ *bin, const char *s) { if (bin->shstrtab) { ut32 len = bin->shstrtab_size; if (len > 4096) { len = 4096; // avoid slow loading .. can be buggy? } return r_mem_mem ((const ut8*)bin->shstrtab, len, (const ut8*)s, strlen (s)) != NULL; } return 0; } // TODO: must return const char * all those strings must be const char os[LINUX] or so char* Elf_(r_bin_elf_get_osabi_name)(ELFOBJ *bin) { switch (bin->ehdr.e_ident[EI_OSABI]) { case ELFOSABI_LINUX: return strdup("linux"); case ELFOSABI_SOLARIS: return strdup("solaris"); case ELFOSABI_FREEBSD: return strdup("freebsd"); case ELFOSABI_HPUX: return strdup("hpux"); } /* Hack to identify OS */ if (needle (bin, "openbsd")) return strdup ("openbsd"); if (needle (bin, "netbsd")) return strdup ("netbsd"); if (needle (bin, "freebsd")) return strdup ("freebsd"); if (noodle (bin, "BEOS:APP_VERSION")) return strdup ("beos"); if (needle (bin, "GNU")) return strdup ("linux"); return strdup ("linux"); } ut8 *Elf_(r_bin_elf_grab_regstate)(ELFOBJ *bin, int *len) { if (bin->phdr) { int i; int num = bin->ehdr.e_phnum; for (i = 0; i < num; i++) { if (bin->phdr[i].p_type != PT_NOTE) { continue; } int bits = Elf_(r_bin_elf_get_bits)(bin); int regdelta = (bits == 64)? 0x84: 0x40; // x64 vs x32 int regsize = 160; // for x86-64 ut8 *buf = malloc (regsize); if (r_buf_read_at (bin->b, bin->phdr[i].p_offset + regdelta, buf, regsize) != regsize) { free (buf); bprintf ("Cannot read register state from CORE file\n"); return NULL; } if (len) { *len = regsize; } return buf; } } bprintf ("Cannot find NOTE section\n"); return NULL; } int Elf_(r_bin_elf_is_big_endian)(ELFOBJ *bin) { return (bin->ehdr.e_ident[EI_DATA] == ELFDATA2MSB); } /* XXX Init dt_strtab? */ char *Elf_(r_bin_elf_get_rpath)(ELFOBJ *bin) { char *ret = NULL; int j; if (!bin || !bin->phdr || !bin->dyn_buf || !bin->strtab) { return NULL; } for (j = 0; j< bin->dyn_entries; j++) { if (bin->dyn_buf[j].d_tag == DT_RPATH || bin->dyn_buf[j].d_tag == DT_RUNPATH) { if (!(ret = calloc (1, ELF_STRING_LENGTH))) { perror ("malloc (rpath)"); return NULL; } if (bin->dyn_buf[j].d_un.d_val > bin->strtab_size) { free (ret); return NULL; } strncpy (ret, bin->strtab + bin->dyn_buf[j].d_un.d_val, ELF_STRING_LENGTH); ret[ELF_STRING_LENGTH - 1] = '\0'; break; } } return ret; } static size_t get_relocs_num(ELFOBJ *bin) { size_t i, size, ret = 0; /* we need to be careful here, in malformed files the section size might * not be a multiple of a Rel/Rela size; round up so we allocate enough * space. */ #define NUMENTRIES_ROUNDUP(sectionsize, entrysize) (((sectionsize)+(entrysize)-1)/(entrysize)) if (!bin->g_sections) { return 0; } size = bin->is_rela == DT_REL ? sizeof (Elf_(Rel)) : sizeof (Elf_(Rela)); for (i = 0; !bin->g_sections[i].last; i++) { if (!strncmp (bin->g_sections[i].name, ".rela.", strlen (".rela."))) { if (!bin->is_rela) { size = sizeof (Elf_(Rela)); } ret += NUMENTRIES_ROUNDUP (bin->g_sections[i].size, size); } else if (!strncmp (bin->g_sections[i].name, ".rel.", strlen (".rel."))){ if (!bin->is_rela) { size = sizeof (Elf_(Rel)); } ret += NUMENTRIES_ROUNDUP (bin->g_sections[i].size, size); } } return ret; #undef NUMENTRIES_ROUNDUP } static int read_reloc(ELFOBJ *bin, RBinElfReloc *r, int is_rela, ut64 offset) { ut8 *buf = bin->b->buf; int j = 0; if (offset + sizeof (Elf_ (Rela)) > bin->size || offset + sizeof (Elf_(Rela)) < offset) { return -1; } if (is_rela == DT_RELA) { Elf_(Rela) rela; #if R_BIN_ELF64 rela.r_offset = READ64 (buf + offset, j) rela.r_info = READ64 (buf + offset, j) rela.r_addend = READ64 (buf + offset, j) #else rela.r_offset = READ32 (buf + offset, j) rela.r_info = READ32 (buf + offset, j) rela.r_addend = READ32 (buf + offset, j) #endif r->is_rela = is_rela; r->offset = rela.r_offset; r->type = ELF_R_TYPE (rela.r_info); r->sym = ELF_R_SYM (rela.r_info); r->last = 0; r->addend = rela.r_addend; return sizeof (Elf_(Rela)); } else { Elf_(Rel) rel; #if R_BIN_ELF64 rel.r_offset = READ64 (buf + offset, j) rel.r_info = READ64 (buf + offset, j) #else rel.r_offset = READ32 (buf + offset, j) rel.r_info = READ32 (buf + offset, j) #endif r->is_rela = is_rela; r->offset = rel.r_offset; r->type = ELF_R_TYPE (rel.r_info); r->sym = ELF_R_SYM (rel.r_info); r->last = 0; return sizeof (Elf_(Rel)); } } RBinElfReloc* Elf_(r_bin_elf_get_relocs)(ELFOBJ *bin) { int res, rel, rela, i, j; size_t reloc_num = 0; RBinElfReloc *ret = NULL; if (!bin || !bin->g_sections) { return NULL; } reloc_num = get_relocs_num (bin); if (!reloc_num) { return NULL; } bin->reloc_num = reloc_num; ret = (RBinElfReloc*)calloc ((size_t)reloc_num + 1, sizeof(RBinElfReloc)); if (!ret) { return NULL; } #if DEAD_CODE ut64 section_text_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".text"); if (section_text_offset == -1) { section_text_offset = 0; } #endif for (i = 0, rel = 0; !bin->g_sections[i].last && rel < reloc_num ; i++) { bool is_rela = 0 == strncmp (bin->g_sections[i].name, ".rela.", strlen (".rela.")); bool is_rel = 0 == strncmp (bin->g_sections[i].name, ".rel.", strlen (".rel.")); if (!is_rela && !is_rel) { continue; } for (j = 0; j < bin->g_sections[i].size; j += res) { if (bin->g_sections[i].size > bin->size) { break; } if (bin->g_sections[i].offset > bin->size) { break; } if (rel >= reloc_num) { bprintf ("Internal error: ELF relocation buffer too small," "please file a bug report."); break; } if (!bin->is_rela) { rela = is_rela? DT_RELA : DT_REL; } else { rela = bin->is_rela; } res = read_reloc (bin, &ret[rel], rela, bin->g_sections[i].offset + j); if (j + res > bin->g_sections[i].size) { bprintf ("Warning: malformed file, relocation entry #%u is partially beyond the end of section %u.\n", rel, i); } if (bin->ehdr.e_type == ET_REL) { if (bin->g_sections[i].info < bin->ehdr.e_shnum && bin->shdr) { ret[rel].rva = bin->shdr[bin->g_sections[i].info].sh_offset + ret[rel].offset; ret[rel].rva = Elf_(r_bin_elf_p2v) (bin, ret[rel].rva); } else { ret[rel].rva = ret[rel].offset; } } else { ret[rel].rva = ret[rel].offset; ret[rel].offset = Elf_(r_bin_elf_v2p) (bin, ret[rel].offset); } ret[rel].last = 0; if (res < 0) { break; } rel++; } } ret[reloc_num].last = 1; return ret; } RBinElfLib* Elf_(r_bin_elf_get_libs)(ELFOBJ *bin) { RBinElfLib *ret = NULL; int j, k; if (!bin || !bin->phdr || !bin->dyn_buf || !bin->strtab || *(bin->strtab+1) == '0') { return NULL; } for (j = 0, k = 0; j < bin->dyn_entries; j++) if (bin->dyn_buf[j].d_tag == DT_NEEDED) { RBinElfLib *r = realloc (ret, (k + 1) * sizeof (RBinElfLib)); if (!r) { perror ("realloc (libs)"); free (ret); return NULL; } ret = r; if (bin->dyn_buf[j].d_un.d_val > bin->strtab_size) { free (ret); return NULL; } strncpy (ret[k].name, bin->strtab + bin->dyn_buf[j].d_un.d_val, ELF_STRING_LENGTH); ret[k].name[ELF_STRING_LENGTH - 1] = '\0'; ret[k].last = 0; if (ret[k].name[0]) { k++; } } RBinElfLib *r = realloc (ret, (k + 1) * sizeof (RBinElfLib)); if (!r) { perror ("realloc (libs)"); free (ret); return NULL; } ret = r; ret[k].last = 1; return ret; } static RBinElfSection* get_sections_from_phdr(ELFOBJ *bin) { RBinElfSection *ret; int i, num_sections = 0; ut64 reldyn = 0, relava = 0, pltgotva = 0, relva = 0; ut64 reldynsz = 0, relasz = 0, pltgotsz = 0; if (!bin || !bin->phdr || !bin->ehdr.e_phnum) return NULL; for (i = 0; i < bin->dyn_entries; i++) { switch (bin->dyn_buf[i].d_tag) { case DT_REL: reldyn = bin->dyn_buf[i].d_un.d_ptr; num_sections++; break; case DT_RELA: relva = bin->dyn_buf[i].d_un.d_ptr; num_sections++; break; case DT_RELSZ: reldynsz = bin->dyn_buf[i].d_un.d_val; break; case DT_RELASZ: relasz = bin->dyn_buf[i].d_un.d_val; break; case DT_PLTGOT: pltgotva = bin->dyn_buf[i].d_un.d_ptr; num_sections++; break; case DT_PLTRELSZ: pltgotsz = bin->dyn_buf[i].d_un.d_val; break; case DT_JMPREL: relava = bin->dyn_buf[i].d_un.d_ptr; num_sections++; break; default: break; } } ret = calloc (num_sections + 1, sizeof(RBinElfSection)); if (!ret) { return NULL; } i = 0; if (reldyn) { ret[i].offset = Elf_(r_bin_elf_v2p) (bin, reldyn); ret[i].rva = reldyn; ret[i].size = reldynsz; strcpy (ret[i].name, ".rel.dyn"); ret[i].last = 0; i++; } if (relava) { ret[i].offset = Elf_(r_bin_elf_v2p) (bin, relava); ret[i].rva = relava; ret[i].size = pltgotsz; strcpy (ret[i].name, ".rela.plt"); ret[i].last = 0; i++; } if (relva) { ret[i].offset = Elf_(r_bin_elf_v2p) (bin, relva); ret[i].rva = relva; ret[i].size = relasz; strcpy (ret[i].name, ".rel.plt"); ret[i].last = 0; i++; } if (pltgotva) { ret[i].offset = Elf_(r_bin_elf_v2p) (bin, pltgotva); ret[i].rva = pltgotva; ret[i].size = pltgotsz; strcpy (ret[i].name, ".got.plt"); ret[i].last = 0; i++; } ret[i].last = 1; return ret; } RBinElfSection* Elf_(r_bin_elf_get_sections)(ELFOBJ *bin) { RBinElfSection *ret = NULL; char unknown_s[20], invalid_s[20]; int i, nidx, unknown_c=0, invalid_c=0; if (!bin) { return NULL; } if (bin->g_sections) { return bin->g_sections; } if (!bin->shdr) { //we don't give up search in phdr section return get_sections_from_phdr (bin); } if (!(ret = calloc ((bin->ehdr.e_shnum + 1), sizeof (RBinElfSection)))) { return NULL; } for (i = 0; i < bin->ehdr.e_shnum; i++) { ret[i].offset = bin->shdr[i].sh_offset; ret[i].size = bin->shdr[i].sh_size; ret[i].align = bin->shdr[i].sh_addralign; ret[i].flags = bin->shdr[i].sh_flags; ret[i].link = bin->shdr[i].sh_link; ret[i].info = bin->shdr[i].sh_info; ret[i].type = bin->shdr[i].sh_type; if (bin->ehdr.e_type == ET_REL) { ret[i].rva = bin->baddr + bin->shdr[i].sh_offset; } else { ret[i].rva = bin->shdr[i].sh_addr; } nidx = bin->shdr[i].sh_name; #define SHNAME (int)bin->shdr[i].sh_name #define SHNLEN ELF_STRING_LENGTH - 4 #define SHSIZE (int)bin->shstrtab_size if (nidx < 0 || !bin->shstrtab_section || !bin->shstrtab_size || nidx > bin->shstrtab_size) { snprintf (invalid_s, sizeof (invalid_s) - 4, "invalid%d", invalid_c); strncpy (ret[i].name, invalid_s, SHNLEN); invalid_c++; } else { if (bin->shstrtab && (SHNAME > 0) && (SHNAME < SHSIZE)) { strncpy (ret[i].name, &bin->shstrtab[SHNAME], SHNLEN); } else { if (bin->shdr[i].sh_type == SHT_NULL) { //to follow the same behaviour as readelf strncpy (ret[i].name, "", sizeof (ret[i].name) - 4); } else { snprintf (unknown_s, sizeof (unknown_s)-4, "unknown%d", unknown_c); strncpy (ret[i].name, unknown_s, sizeof (ret[i].name)-4); unknown_c++; } } } ret[i].name[ELF_STRING_LENGTH-2] = '\0'; ret[i].last = 0; } ret[i].last = 1; return ret; } static void fill_symbol_bind_and_type (struct r_bin_elf_symbol_t *ret, Elf_(Sym) *sym) { #define s_bind(x) ret->bind = x #define s_type(x) ret->type = x switch (ELF_ST_BIND(sym->st_info)) { case STB_LOCAL: s_bind ("LOCAL"); break; case STB_GLOBAL: s_bind ("GLOBAL"); break; case STB_WEAK: s_bind ("WEAK"); break; case STB_NUM: s_bind ("NUM"); break; case STB_LOOS: s_bind ("LOOS"); break; case STB_HIOS: s_bind ("HIOS"); break; case STB_LOPROC: s_bind ("LOPROC"); break; case STB_HIPROC: s_bind ("HIPROC"); break; default: s_bind ("UNKNOWN"); } switch (ELF_ST_TYPE (sym->st_info)) { case STT_NOTYPE: s_type ("NOTYPE"); break; case STT_OBJECT: s_type ("OBJECT"); break; case STT_FUNC: s_type ("FUNC"); break; case STT_SECTION: s_type ("SECTION"); break; case STT_FILE: s_type ("FILE"); break; case STT_COMMON: s_type ("COMMON"); break; case STT_TLS: s_type ("TLS"); break; case STT_NUM: s_type ("NUM"); break; case STT_LOOS: s_type ("LOOS"); break; case STT_HIOS: s_type ("HIOS"); break; case STT_LOPROC: s_type ("LOPROC"); break; case STT_HIPROC: s_type ("HIPROC"); break; default: s_type ("UNKNOWN"); } } static RBinElfSymbol* get_symbols_from_phdr(ELFOBJ *bin, int type) { Elf_(Sym) *sym = NULL; Elf_(Addr) addr_sym_table = 0; ut8 s[sizeof (Elf_(Sym))] = {0}; RBinElfSymbol *ret = NULL; int i, j, r, tsize, nsym, ret_ctr; ut64 toffset = 0, tmp_offset; ut32 size, sym_size = 0; if (!bin || !bin->phdr || !bin->ehdr.e_phnum) { return NULL; } for (j = 0; j < bin->dyn_entries; j++) { switch (bin->dyn_buf[j].d_tag) { case (DT_SYMTAB): addr_sym_table = Elf_(r_bin_elf_v2p) (bin, bin->dyn_buf[j].d_un.d_ptr); break; case (DT_SYMENT): sym_size = bin->dyn_buf[j].d_un.d_val; break; default: break; } } if (!addr_sym_table) { return NULL; } if (!sym_size) { return NULL; } //since ELF doesn't specify the symbol table size we may read until the end of the buffer nsym = (bin->size - addr_sym_table) / sym_size; if (!UT32_MUL (&size, nsym, sizeof (Elf_ (Sym)))) { goto beach; } if (size < 1) { goto beach; } if (addr_sym_table > bin->size || addr_sym_table + size > bin->size) { goto beach; } if (nsym < 1) { return NULL; } // we reserve room for 4096 and grow as needed. size_t capacity1 = 4096; size_t capacity2 = 4096; sym = (Elf_(Sym)*) calloc (capacity1, sym_size); ret = (RBinElfSymbol *) calloc (capacity2, sizeof (struct r_bin_elf_symbol_t)); if (!sym || !ret) { goto beach; } for (i = 1, ret_ctr = 0; i < nsym; i++) { if (i >= capacity1) { // maybe grow // You take what you want, but you eat what you take. Elf_(Sym)* temp_sym = (Elf_(Sym)*) realloc(sym, (capacity1 * GROWTH_FACTOR) * sym_size); if (!temp_sym) { goto beach; } sym = temp_sym; capacity1 *= GROWTH_FACTOR; } if (ret_ctr >= capacity2) { // maybe grow RBinElfSymbol *temp_ret = realloc (ret, capacity2 * GROWTH_FACTOR * sizeof (struct r_bin_elf_symbol_t)); if (!temp_ret) { goto beach; } ret = temp_ret; capacity2 *= GROWTH_FACTOR; } // read in one entry r = r_buf_read_at (bin->b, addr_sym_table + i * sizeof (Elf_ (Sym)), s, sizeof (Elf_ (Sym))); if (r < 1) { goto beach; } int j = 0; #if R_BIN_ELF64 sym[i].st_name = READ32 (s, j); sym[i].st_info = READ8 (s, j); sym[i].st_other = READ8 (s, j); sym[i].st_shndx = READ16 (s, j); sym[i].st_value = READ64 (s, j); sym[i].st_size = READ64 (s, j); #else sym[i].st_name = READ32 (s, j); sym[i].st_value = READ32 (s, j); sym[i].st_size = READ32 (s, j); sym[i].st_info = READ8 (s, j); sym[i].st_other = READ8 (s, j); sym[i].st_shndx = READ16 (s, j); #endif // zero symbol is always empty // Examine entry and maybe store if (type == R_BIN_ELF_IMPORTS && sym[i].st_shndx == STN_UNDEF) { if (sym[i].st_value) { toffset = sym[i].st_value; } else if ((toffset = get_import_addr (bin, i)) == -1){ toffset = 0; } tsize = 16; } else if (type == R_BIN_ELF_SYMBOLS && sym[i].st_shndx != STN_UNDEF && ELF_ST_TYPE (sym[i].st_info) != STT_SECTION && ELF_ST_TYPE (sym[i].st_info) != STT_FILE) { tsize = sym[i].st_size; toffset = (ut64) sym[i].st_value; } else { continue; } tmp_offset = Elf_(r_bin_elf_v2p) (bin, toffset); if (tmp_offset > bin->size) { goto done; } if (sym[i].st_name + 2 > bin->strtab_size) { // Since we are reading beyond the symbol table what's happening // is that some entry is trying to dereference the strtab beyond its capacity // is not a symbol so is the end goto done; } ret[ret_ctr].offset = tmp_offset; ret[ret_ctr].size = tsize; { int rest = ELF_STRING_LENGTH - 1; int st_name = sym[i].st_name; int maxsize = R_MIN (bin->size, bin->strtab_size); if (st_name < 0 || st_name >= maxsize) { ret[ret_ctr].name[0] = 0; } else { const int len = __strnlen (bin->strtab + st_name, rest); memcpy (ret[ret_ctr].name, &bin->strtab[st_name], len); } } ret[ret_ctr].ordinal = i; ret[ret_ctr].in_shdr = false; ret[ret_ctr].name[ELF_STRING_LENGTH - 2] = '\0'; fill_symbol_bind_and_type (&ret[ret_ctr], &sym[i]); ret[ret_ctr].last = 0; ret_ctr++; } done: ret[ret_ctr].last = 1; // Size everything down to only what is used { nsym = i > 0 ? i : 1; Elf_ (Sym) * temp_sym = (Elf_ (Sym)*) realloc (sym, (nsym * GROWTH_FACTOR) * sym_size); if (!temp_sym) { goto beach; } sym = temp_sym; } { ret_ctr = ret_ctr > 0 ? ret_ctr : 1; RBinElfSymbol *p = (RBinElfSymbol *) realloc (ret, (ret_ctr + 1) * sizeof (RBinElfSymbol)); if (!p) { goto beach; } ret = p; } if (type == R_BIN_ELF_IMPORTS && !bin->imports_by_ord_size) { bin->imports_by_ord_size = ret_ctr + 1; if (ret_ctr > 0) { bin->imports_by_ord = (RBinImport * *) calloc (ret_ctr + 1, sizeof (RBinImport*)); } else { bin->imports_by_ord = NULL; } } else if (type == R_BIN_ELF_SYMBOLS && !bin->symbols_by_ord_size && ret_ctr) { bin->symbols_by_ord_size = ret_ctr + 1; if (ret_ctr > 0) { bin->symbols_by_ord = (RBinSymbol * *) calloc (ret_ctr + 1, sizeof (RBinSymbol*)); }else { bin->symbols_by_ord = NULL; } } free (sym); return ret; beach: free (sym); free (ret); return NULL; } static RBinElfSymbol *Elf_(r_bin_elf_get_phdr_symbols)(ELFOBJ *bin) { if (!bin) { return NULL; } if (bin->phdr_symbols) { return bin->phdr_symbols; } bin->phdr_symbols = get_symbols_from_phdr (bin, R_BIN_ELF_SYMBOLS); return bin->phdr_symbols; } static RBinElfSymbol *Elf_(r_bin_elf_get_phdr_imports)(ELFOBJ *bin) { if (!bin) { return NULL; } if (bin->phdr_imports) { return bin->phdr_imports; } bin->phdr_imports = get_symbols_from_phdr (bin, R_BIN_ELF_IMPORTS); return bin->phdr_imports; } static int Elf_(fix_symbols)(ELFOBJ *bin, int nsym, int type, RBinElfSymbol **sym) { int count = 0; RBinElfSymbol *ret = *sym; RBinElfSymbol *phdr_symbols = (type == R_BIN_ELF_SYMBOLS) ? Elf_(r_bin_elf_get_phdr_symbols) (bin) : Elf_(r_bin_elf_get_phdr_imports) (bin); RBinElfSymbol *tmp, *p; if (phdr_symbols) { RBinElfSymbol *d = ret; while (!d->last) { /* find match in phdr */ p = phdr_symbols; while (!p->last) { if (p->offset && d->offset == p->offset) { p->in_shdr = true; if (*p->name && strcmp (d->name, p->name)) { strcpy (d->name, p->name); } } p++; } d++; } p = phdr_symbols; while (!p->last) { if (!p->in_shdr) { count++; } p++; } /*Take those symbols that are not present in the shdr but yes in phdr*/ /*This should only should happen with fucked up binaries*/ if (count > 0) { /*what happens if a shdr says it has only one symbol? we should look anyway into phdr*/ tmp = (RBinElfSymbol*)realloc (ret, (nsym + count + 1) * sizeof (RBinElfSymbol)); if (!tmp) { return -1; } ret = tmp; ret[nsym--].last = 0; p = phdr_symbols; while (!p->last) { if (!p->in_shdr) { memcpy (&ret[++nsym], p, sizeof (RBinElfSymbol)); } p++; } ret[nsym + 1].last = 1; } *sym = ret; return nsym + 1; } return nsym; } static RBinElfSymbol* Elf_(_r_bin_elf_get_symbols_imports)(ELFOBJ *bin, int type) { ut32 shdr_size; int tsize, nsym, ret_ctr = 0, i, j, r, k, newsize; ut64 toffset; ut32 size = 0; RBinElfSymbol *ret = NULL; Elf_(Shdr) *strtab_section = NULL; Elf_(Sym) *sym = NULL; ut8 s[sizeof (Elf_(Sym))] = { 0 }; char *strtab = NULL; if (!bin || !bin->shdr || !bin->ehdr.e_shnum || bin->ehdr.e_shnum == 0xffff) { return (type == R_BIN_ELF_SYMBOLS) ? Elf_(r_bin_elf_get_phdr_symbols) (bin) : Elf_(r_bin_elf_get_phdr_imports) (bin); } if (!UT32_MUL (&shdr_size, bin->ehdr.e_shnum, sizeof (Elf_(Shdr)))) { return false; } if (shdr_size + 8 > bin->size) { return false; } for (i = 0; i < bin->ehdr.e_shnum; i++) { if ((type == R_BIN_ELF_IMPORTS && bin->shdr[i].sh_type == (bin->ehdr.e_type == ET_REL ? SHT_SYMTAB : SHT_DYNSYM)) || (type == R_BIN_ELF_SYMBOLS && bin->shdr[i].sh_type == (Elf_(r_bin_elf_get_stripped) (bin) ? SHT_DYNSYM : SHT_SYMTAB))) { if (bin->shdr[i].sh_link < 1) { /* oops. fix out of range pointers */ continue; } // hack to avoid asan cry if ((bin->shdr[i].sh_link * sizeof(Elf_(Shdr))) >= shdr_size) { /* oops. fix out of range pointers */ continue; } strtab_section = &bin->shdr[bin->shdr[i].sh_link]; if (strtab_section->sh_size > ST32_MAX || strtab_section->sh_size+8 > bin->size) { bprintf ("size (syms strtab)"); free (ret); free (strtab); return NULL; } if (!strtab) { if (!(strtab = (char *)calloc (1, 8 + strtab_section->sh_size))) { bprintf ("malloc (syms strtab)"); goto beach; } if (strtab_section->sh_offset > bin->size || strtab_section->sh_offset + strtab_section->sh_size > bin->size) { goto beach; } if (r_buf_read_at (bin->b, strtab_section->sh_offset, (ut8*)strtab, strtab_section->sh_size) == -1) { bprintf ("Warning: read (syms strtab)\n"); goto beach; } } newsize = 1 + bin->shdr[i].sh_size; if (newsize < 0 || newsize > bin->size) { bprintf ("invalid shdr %d size\n", i); goto beach; } nsym = (int)(bin->shdr[i].sh_size / sizeof (Elf_(Sym))); if (nsym < 0) { goto beach; } if (!(sym = (Elf_(Sym) *)calloc (nsym, sizeof (Elf_(Sym))))) { bprintf ("calloc (syms)"); goto beach; } if (!UT32_MUL (&size, nsym, sizeof (Elf_(Sym)))) { goto beach; } if (size < 1 || size > bin->size) { goto beach; } if (bin->shdr[i].sh_offset > bin->size) { goto beach; } if (bin->shdr[i].sh_offset + size > bin->size) { goto beach; } for (j = 0; j < nsym; j++) { int k = 0; r = r_buf_read_at (bin->b, bin->shdr[i].sh_offset + j * sizeof (Elf_(Sym)), s, sizeof (Elf_(Sym))); if (r < 1) { bprintf ("Warning: read (sym)\n"); goto beach; } #if R_BIN_ELF64 sym[j].st_name = READ32 (s, k) sym[j].st_info = READ8 (s, k) sym[j].st_other = READ8 (s, k) sym[j].st_shndx = READ16 (s, k) sym[j].st_value = READ64 (s, k) sym[j].st_size = READ64 (s, k) #else sym[j].st_name = READ32 (s, k) sym[j].st_value = READ32 (s, k) sym[j].st_size = READ32 (s, k) sym[j].st_info = READ8 (s, k) sym[j].st_other = READ8 (s, k) sym[j].st_shndx = READ16 (s, k) #endif } free (ret); ret = calloc (nsym, sizeof (RBinElfSymbol)); if (!ret) { bprintf ("Cannot allocate %d symbols\n", nsym); goto beach; } for (k = 1, ret_ctr = 0; k < nsym; k++) { if (type == R_BIN_ELF_IMPORTS && sym[k].st_shndx == STN_UNDEF) { if (sym[k].st_value) { toffset = sym[k].st_value; } else if ((toffset = get_import_addr (bin, k)) == -1){ toffset = 0; } tsize = 16; } else if (type == R_BIN_ELF_SYMBOLS && sym[k].st_shndx != STN_UNDEF && ELF_ST_TYPE (sym[k].st_info) != STT_SECTION && ELF_ST_TYPE (sym[k].st_info) != STT_FILE) { //int idx = sym[k].st_shndx; tsize = sym[k].st_size; toffset = (ut64)sym[k].st_value; } else { continue; } if (bin->ehdr.e_type == ET_REL) { if (sym[k].st_shndx < bin->ehdr.e_shnum) ret[ret_ctr].offset = sym[k].st_value + bin->shdr[sym[k].st_shndx].sh_offset; } else { ret[ret_ctr].offset = Elf_(r_bin_elf_v2p) (bin, toffset); } ret[ret_ctr].size = tsize; if (sym[k].st_name + 2 > strtab_section->sh_size) { bprintf ("Warning: index out of strtab range\n"); goto beach; } { int rest = ELF_STRING_LENGTH - 1; int st_name = sym[k].st_name; int maxsize = R_MIN (bin->b->length, strtab_section->sh_size); if (st_name < 0 || st_name >= maxsize) { ret[ret_ctr].name[0] = 0; } else { const size_t len = __strnlen (strtab + sym[k].st_name, rest); memcpy (ret[ret_ctr].name, &strtab[sym[k].st_name], len); } } ret[ret_ctr].ordinal = k; ret[ret_ctr].name[ELF_STRING_LENGTH - 2] = '\0'; fill_symbol_bind_and_type (&ret[ret_ctr], &sym[k]); ret[ret_ctr].last = 0; ret_ctr++; } ret[ret_ctr].last = 1; // ugly dirty hack :D R_FREE (strtab); R_FREE (sym); } } if (!ret) { return (type == R_BIN_ELF_SYMBOLS) ? Elf_(r_bin_elf_get_phdr_symbols) (bin) : Elf_(r_bin_elf_get_phdr_imports) (bin); } int max = -1; RBinElfSymbol *aux = NULL; nsym = Elf_(fix_symbols) (bin, ret_ctr, type, &ret); if (nsym == -1) { goto beach; } aux = ret; while (!aux->last) { if ((int)aux->ordinal > max) { max = aux->ordinal; } aux++; } nsym = max; if (type == R_BIN_ELF_IMPORTS) { R_FREE (bin->imports_by_ord); bin->imports_by_ord_size = nsym + 1; bin->imports_by_ord = (RBinImport**)calloc (R_MAX (1, nsym + 1), sizeof (RBinImport*)); } else if (type == R_BIN_ELF_SYMBOLS) { R_FREE (bin->symbols_by_ord); bin->symbols_by_ord_size = nsym + 1; bin->symbols_by_ord = (RBinSymbol**)calloc (R_MAX (1, nsym + 1), sizeof (RBinSymbol*)); } return ret; beach: free (ret); free (sym); free (strtab); return NULL; } RBinElfSymbol *Elf_(r_bin_elf_get_symbols)(ELFOBJ *bin) { if (!bin->g_symbols) { bin->g_symbols = Elf_(_r_bin_elf_get_symbols_imports) (bin, R_BIN_ELF_SYMBOLS); } return bin->g_symbols; } RBinElfSymbol *Elf_(r_bin_elf_get_imports)(ELFOBJ *bin) { if (!bin->g_imports) { bin->g_imports = Elf_(_r_bin_elf_get_symbols_imports) (bin, R_BIN_ELF_IMPORTS); } return bin->g_imports; } RBinElfField* Elf_(r_bin_elf_get_fields)(ELFOBJ *bin) { RBinElfField *ret = NULL; int i = 0, j; if (!bin || !(ret = calloc ((bin->ehdr.e_phnum + 3 + 1), sizeof (RBinElfField)))) { return NULL; } strncpy (ret[i].name, "ehdr", ELF_STRING_LENGTH); ret[i].offset = 0; ret[i++].last = 0; strncpy (ret[i].name, "shoff", ELF_STRING_LENGTH); ret[i].offset = bin->ehdr.e_shoff; ret[i++].last = 0; strncpy (ret[i].name, "phoff", ELF_STRING_LENGTH); ret[i].offset = bin->ehdr.e_phoff; ret[i++].last = 0; for (j = 0; bin->phdr && j < bin->ehdr.e_phnum; i++, j++) { snprintf (ret[i].name, ELF_STRING_LENGTH, "phdr_%i", j); ret[i].offset = bin->phdr[j].p_offset; ret[i].last = 0; } ret[i].last = 1; return ret; } void* Elf_(r_bin_elf_free)(ELFOBJ* bin) { int i; if (!bin) { return NULL; } free (bin->phdr); free (bin->shdr); free (bin->strtab); free (bin->dyn_buf); free (bin->shstrtab); free (bin->dynstr); //free (bin->strtab_section); if (bin->imports_by_ord) { for (i = 0; i<bin->imports_by_ord_size; i++) { free (bin->imports_by_ord[i]); } free (bin->imports_by_ord); } if (bin->symbols_by_ord) { for (i = 0; i<bin->symbols_by_ord_size; i++) { free (bin->symbols_by_ord[i]); } free (bin->symbols_by_ord); } r_buf_free (bin->b); if (bin->g_symbols != bin->phdr_symbols) { R_FREE (bin->phdr_symbols); } if (bin->g_imports != bin->phdr_imports) { R_FREE (bin->phdr_imports); } R_FREE (bin->g_sections); R_FREE (bin->g_symbols); R_FREE (bin->g_imports); free (bin); return NULL; } ELFOBJ* Elf_(r_bin_elf_new)(const char* file, bool verbose) { ut8 *buf; int size; ELFOBJ *bin = R_NEW0 (ELFOBJ); if (!bin) { return NULL; } memset (bin, 0, sizeof (ELFOBJ)); bin->file = file; if (!(buf = (ut8*)r_file_slurp (file, &size))) { return Elf_(r_bin_elf_free) (bin); } bin->size = size; bin->verbose = verbose; bin->b = r_buf_new (); if (!r_buf_set_bytes (bin->b, buf, bin->size)) { free (buf); return Elf_(r_bin_elf_free) (bin); } if (!elf_init (bin)) { free (buf); return Elf_(r_bin_elf_free) (bin); } free (buf); return bin; } ELFOBJ* Elf_(r_bin_elf_new_buf)(RBuffer *buf, bool verbose) { ELFOBJ *bin = R_NEW0 (ELFOBJ); bin->kv = sdb_new0 (); bin->b = r_buf_new (); bin->size = (ut32)buf->length; bin->verbose = verbose; if (!r_buf_set_bytes (bin->b, buf->buf, buf->length)) { return Elf_(r_bin_elf_free) (bin); } if (!elf_init (bin)) { return Elf_(r_bin_elf_free) (bin); } return bin; } static int is_in_pphdr (Elf_(Phdr) *p, ut64 addr) { return addr >= p->p_offset && addr < p->p_offset + p->p_memsz; } static int is_in_vphdr (Elf_(Phdr) *p, ut64 addr) { return addr >= p->p_vaddr && addr < p->p_vaddr + p->p_memsz; } /* converts a physical address to the virtual address, looking * at the program headers in the binary bin */ ut64 Elf_(r_bin_elf_p2v) (ELFOBJ *bin, ut64 paddr) { int i; if (!bin) return 0; if (!bin->phdr) { if (bin->ehdr.e_type == ET_REL) { return bin->baddr + paddr; } return paddr; } for (i = 0; i < bin->ehdr.e_phnum; ++i) { Elf_(Phdr) *p = &bin->phdr[i]; if (!p) { break; } if (p->p_type == PT_LOAD && is_in_pphdr (p, paddr)) { if (!p->p_vaddr && !p->p_offset) { continue; } return p->p_vaddr + paddr - p->p_offset; } } return paddr; } /* converts a virtual address to the relative physical address, looking * at the program headers in the binary bin */ ut64 Elf_(r_bin_elf_v2p) (ELFOBJ *bin, ut64 vaddr) { int i; if (!bin) { return 0; } if (!bin->phdr) { if (bin->ehdr.e_type == ET_REL) { return vaddr - bin->baddr; } return vaddr; } for (i = 0; i < bin->ehdr.e_phnum; ++i) { Elf_(Phdr) *p = &bin->phdr[i]; if (!p) { break; } if (p->p_type == PT_LOAD && is_in_vphdr (p, vaddr)) { if (!p->p_offset && !p->p_vaddr) { continue; } return p->p_offset + vaddr - p->p_vaddr; } } return vaddr; }
static Sdb *store_versioninfo_gnu_verdef(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { const char *section_name = ""; const char *link_section_name = ""; char *end = NULL; Elf_(Shdr) *link_shdr = NULL; ut8 dfs[sizeof (Elf_(Verdef))] = {0}; Sdb *sdb; int cnt, i; if (shdr->sh_link > bin->ehdr.e_shnum) { return false; } link_shdr = &bin->shdr[shdr->sh_link]; if ((int)shdr->sh_size < 1) { return false; } Elf_(Verdef) *defs = calloc (shdr->sh_size, sizeof (char)); if (!defs) { return false; } if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (link_shdr && bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } if (!defs) { bprintf ("Warning: Cannot allocate memory (Check Elf_(Verdef))\n"); return NULL; } sdb = sdb_new0 (); end = (char *)defs + shdr->sh_size; sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "entries", shdr->sh_info, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); for (cnt = 0, i = 0; i >= 0 && cnt < shdr->sh_info && ((char *)defs + i < end); ++cnt) { Sdb *sdb_verdef = sdb_new0 (); char *vstart = ((char*)defs) + i; char key[32] = {0}; Elf_(Verdef) *verdef = (Elf_(Verdef)*)vstart; Elf_(Verdaux) aux = {0}; int j = 0; int isum = 0; r_buf_read_at (bin->b, shdr->sh_offset + i, dfs, sizeof (Elf_(Verdef))); verdef->vd_version = READ16 (dfs, j) verdef->vd_flags = READ16 (dfs, j) verdef->vd_ndx = READ16 (dfs, j) verdef->vd_cnt = READ16 (dfs, j) verdef->vd_hash = READ32 (dfs, j) verdef->vd_aux = READ32 (dfs, j) verdef->vd_next = READ32 (dfs, j) int vdaux = verdef->vd_aux; if (vdaux < 1 || vstart + vdaux < vstart) { sdb_free (sdb_verdef); goto out_error; } vstart += vdaux; if (vstart > end || vstart + sizeof (Elf_(Verdaux)) > end) { sdb_free (sdb_verdef); goto out_error; } j = 0; aux.vda_name = READ32 (vstart, j) aux.vda_next = READ32 (vstart, j) isum = i + verdef->vd_aux; if (aux.vda_name > bin->dynstr_size) { sdb_free (sdb_verdef); goto out_error; } sdb_num_set (sdb_verdef, "idx", i, 0); sdb_num_set (sdb_verdef, "vd_version", verdef->vd_version, 0); sdb_num_set (sdb_verdef, "vd_ndx", verdef->vd_ndx, 0); sdb_num_set (sdb_verdef, "vd_cnt", verdef->vd_cnt, 0); sdb_set (sdb_verdef, "vda_name", &bin->dynstr[aux.vda_name], 0); sdb_set (sdb_verdef, "flags", get_ver_flags (verdef->vd_flags), 0); for (j = 1; j < verdef->vd_cnt; ++j) { int k; Sdb *sdb_parent = sdb_new0 (); isum += aux.vda_next; vstart += aux.vda_next; if (vstart > end || vstart + sizeof (Elf_(Verdaux)) > end) { sdb_free (sdb_verdef); sdb_free (sdb_parent); goto out_error; } k = 0; aux.vda_name = READ32 (vstart, k) aux.vda_next = READ32 (vstart, k) if (aux.vda_name > bin->dynstr_size) { sdb_free (sdb_verdef); sdb_free (sdb_parent); goto out_error; } sdb_num_set (sdb_parent, "idx", isum, 0); sdb_num_set (sdb_parent, "parent", j, 0); sdb_set (sdb_parent, "vda_name", &bin->dynstr[aux.vda_name], 0); snprintf (key, sizeof (key), "parent%d", j - 1); sdb_ns_set (sdb_verdef, key, sdb_parent); } snprintf (key, sizeof (key), "verdef%d", cnt); sdb_ns_set (sdb, key, sdb_verdef); if (!verdef->vd_next) { sdb_free (sdb_verdef); goto out_error; } if ((st32)verdef->vd_next < 1) { eprintf ("Warning: Invalid vd_next in the ELF version\n"); break; } i += verdef->vd_next; } free (defs); return sdb; out_error: free (defs); sdb_free (sdb); return NULL; }
static Sdb *store_versioninfo_gnu_verdef(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { const char *section_name = ""; const char *link_section_name = ""; char *end = NULL; Elf_(Shdr) *link_shdr = NULL; ut8 dfs[sizeof (Elf_(Verdef))] = {0}; Sdb *sdb; int cnt, i; if (shdr->sh_link > bin->ehdr.e_shnum) { return false; } link_shdr = &bin->shdr[shdr->sh_link]; if ((int)shdr->sh_size < 1) { return false; } Elf_(Verdef) *defs = calloc (shdr->sh_size, sizeof (char)); if (!defs) { return false; } if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (link_shdr && bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } if (!defs) { bprintf ("Warning: Cannot allocate memory (Check Elf_(Verdef))\n"); return NULL; } sdb = sdb_new0 (); end = (char *)defs + shdr->sh_size; sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "entries", shdr->sh_info, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); for (cnt = 0, i = 0; i >= 0 && cnt < shdr->sh_info && ((char *)defs + i < end); ++cnt) { Sdb *sdb_verdef = sdb_new0 (); char *vstart = ((char*)defs) + i; size_t vstart_off = i; char key[32] = {0}; Elf_(Verdef) *verdef = (Elf_(Verdef)*)vstart; Elf_(Verdaux) aux = {0}; int j = 0; int isum = 0; r_buf_read_at (bin->b, shdr->sh_offset + i, dfs, sizeof (Elf_(Verdef))); verdef->vd_version = READ16 (dfs, j) verdef->vd_flags = READ16 (dfs, j) verdef->vd_ndx = READ16 (dfs, j) verdef->vd_cnt = READ16 (dfs, j) verdef->vd_hash = READ32 (dfs, j) verdef->vd_aux = READ32 (dfs, j) verdef->vd_next = READ32 (dfs, j) int vdaux = verdef->vd_aux; if (vdaux < 1 || shdr->sh_size - vstart_off < vdaux) { sdb_free (sdb_verdef); goto out_error; } vstart += vdaux; vstart_off += vdaux; if (vstart > end || vstart + sizeof (Elf_(Verdaux)) > end) { sdb_free (sdb_verdef); goto out_error; } j = 0; aux.vda_name = READ32 (vstart, j) aux.vda_next = READ32 (vstart, j) isum = i + verdef->vd_aux; if (aux.vda_name > bin->dynstr_size) { sdb_free (sdb_verdef); goto out_error; } sdb_num_set (sdb_verdef, "idx", i, 0); sdb_num_set (sdb_verdef, "vd_version", verdef->vd_version, 0); sdb_num_set (sdb_verdef, "vd_ndx", verdef->vd_ndx, 0); sdb_num_set (sdb_verdef, "vd_cnt", verdef->vd_cnt, 0); sdb_set (sdb_verdef, "vda_name", &bin->dynstr[aux.vda_name], 0); sdb_set (sdb_verdef, "flags", get_ver_flags (verdef->vd_flags), 0); for (j = 1; j < verdef->vd_cnt; ++j) { int k; Sdb *sdb_parent = sdb_new0 (); isum += aux.vda_next; vstart += aux.vda_next; vstart_off += aux.vda_next; if (vstart > end || vstart + sizeof (Elf_(Verdaux)) > end) { sdb_free (sdb_verdef); sdb_free (sdb_parent); goto out_error; } k = 0; aux.vda_name = READ32 (vstart, k) aux.vda_next = READ32 (vstart, k) if (aux.vda_name > bin->dynstr_size) { sdb_free (sdb_verdef); sdb_free (sdb_parent); goto out_error; } sdb_num_set (sdb_parent, "idx", isum, 0); sdb_num_set (sdb_parent, "parent", j, 0); sdb_set (sdb_parent, "vda_name", &bin->dynstr[aux.vda_name], 0); snprintf (key, sizeof (key), "parent%d", j - 1); sdb_ns_set (sdb_verdef, key, sdb_parent); } snprintf (key, sizeof (key), "verdef%d", cnt); sdb_ns_set (sdb, key, sdb_verdef); if (!verdef->vd_next) { sdb_free (sdb_verdef); goto out_error; } if ((st32)verdef->vd_next < 1) { eprintf ("Warning: Invalid vd_next in the ELF version\n"); break; } i += verdef->vd_next; } free (defs); return sdb; out_error: free (defs); sdb_free (sdb); return NULL; }
{'added': [(737, '\t\tsize_t vstart_off = i;'), (753, '\t\tif (vdaux < 1 || shdr->sh_size - vstart_off < vdaux) {'), (758, '\t\tvstart_off += vdaux;'), (786, '\t\t\tvstart_off += aux.vda_next;')], 'deleted': [(752, '\t\tif (vdaux < 1 || vstart + vdaux < vstart) {')]}
4
1
2,833
21,486
https://github.com/radare/radare2
CVE-2017-16359
['CWE-476']
foreign.c
vips_foreign_load_start
/* foreign file formats base class * * 7/2/12 * - add support for sequential reads * 18/6/12 * - flatten alpha with vips_flatten() * 28/5/13 * - auto rshift down to 8 bits during save * 19/1/14 * - pack and unpack rad to scrgb * 18/8/14 * - fix conversion to 16-bit RGB, thanks John * 18/6/15 * - forward progress signals from load * 23/5/16 * - remove max-alpha stuff, this is now automatic * 12/6/17 * - transform cmyk->rgb if there's an embedded profile * 16/6/17 * - add page_height */ /* This file is part of VIPS. VIPS is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /* These files are distributed with VIPS - http://www.vips.ecs.soton.ac.uk */ /* #define DEBUG */ #ifdef HAVE_CONFIG_H #include <config.h> #endif /*HAVE_CONFIG_H*/ #include <vips/intl.h> #include <stdio.h> #include <stdlib.h> #include <vips/vips.h> #include <vips/internal.h> #include <vips/debug.h> #include "pforeign.h" /** * SECTION: foreign * @short_description: load and save images in a variety of formats * @stability: Stable * @see_also: <link linkend="libvips-image">image</link> * @include: vips/vips.h * * This set of operations load and save images in a variety of formats. * * The operations share a base class that offers a simple way to search for a * subclass of #VipsForeign which can load a certain file (see * vips_foreign_find_load()) or buffer (see vips_foreign_find_load_buffer()), * or which could be used to save an image to a * certain file type (see vips_foreign_find_save() and * vips_foreign_find_save_buffer()). You can then run these * operations using vips_call() and friends to perform the load or save. * * vips_image_write_to_file() and vips_image_new_from_file() and friends use * these functions to automate file load and save. * * You can also invoke the operations directly, for example: * * |[ * vips_tiffsave (my_image, "frank.anything", * "compression", VIPS_FOREIGN_TIFF_COMPRESSION_JPEG, * NULL); * ]| * * To add support for a new file format to vips, simply define a new subclass * of #VipsForeignLoad or #VipsForeignSave. * * If you define a new operation which is a subclass of #VipsForeign, support * for it automatically appears in all VIPS user-interfaces. It will also be * transparently supported by vips_image_new_from_file() and friends. * * VIPS comes with VipsForeign for TIFF, JPEG, PNG, Analyze, PPM, OpenEXR, CSV, * Matlab, Radiance, RAW, FITS, WebP, SVG, PDF, GIF and VIPS. It also includes * import filters which can load with libMagick and with OpenSlide. * * ## Writing a new loader * * Add a new loader to VIPS by subclassing #VipsForeignLoad. Subclasses need to * implement at least @header(). * * @header() must set at least the header fields of @out. @load(), if defined, * must load the pixels to @real. * * The suffix list is used to select a format to save a file in, and to pick a * loader if you don't define is_a(). * * You should also define @nickname and @description in #VipsObject. * * As a complete example, here's code for a PNG loader, minus the actual * calls to libpng. * * |[ * typedef struct _VipsForeignLoadPng { * VipsForeignLoad parent_object; * * char *filename; * } VipsForeignLoadPng; * * typedef VipsForeignLoadClass VipsForeignLoadPngClass; * * G_DEFINE_TYPE( VipsForeignLoadPng, vips_foreign_load_png, * VIPS_TYPE_FOREIGN_LOAD ); * * static VipsForeignFlags * vips_foreign_load_png_get_flags_filename( const char *filename ) * { * VipsForeignFlags flags; * * flags = 0; * if( vips__png_isinterlaced( filename ) ) * flags = VIPS_FOREIGN_PARTIAL; * else * flags = VIPS_FOREIGN_SEQUENTIAL; * * return( flags ); * } * * static VipsForeignFlags * vips_foreign_load_png_get_flags( VipsForeignLoad *load ) * { * VipsForeignLoadPng *png = (VipsForeignLoadPng *) load; * * return( vips_foreign_load_png_get_flags_filename( png->filename ) ); * } * * static int * vips_foreign_load_png_header( VipsForeignLoad *load ) * { * VipsForeignLoadPng *png = (VipsForeignLoadPng *) load; * * if( vips__png_header( png->filename, load->out ) ) * return( -1 ); * * return( 0 ); * } * * static int * vips_foreign_load_png_load( VipsForeignLoad *load ) * { * VipsForeignLoadPng *png = (VipsForeignLoadPng *) load; * * if( vips__png_read( png->filename, load->real ) ) * return( -1 ); * * return( 0 ); * } * * static void * vips_foreign_load_png_class_init( VipsForeignLoadPngClass *class ) * { * GObjectClass *gobject_class = G_OBJECT_CLASS( class ); * VipsObjectClass *object_class = (VipsObjectClass *) class; * VipsForeignClass *foreign_class = (VipsForeignClass *) class; * VipsForeignLoadClass *load_class = (VipsForeignLoadClass *) class; * * gobject_class->set_property = vips_object_set_property; * gobject_class->get_property = vips_object_get_property; * * object_class->nickname = "pngload"; * object_class->description = _( "load png from file" ); * * foreign_class->suffs = vips__png_suffs; * * load_class->is_a = vips__png_ispng; * load_class->get_flags_filename = * vips_foreign_load_png_get_flags_filename; * load_class->get_flags = vips_foreign_load_png_get_flags; * load_class->header = vips_foreign_load_png_header; * load_class->load = vips_foreign_load_png_load; * * VIPS_ARG_STRING( class, "filename", 1, * _( "Filename" ), * _( "Filename to load from" ), * VIPS_ARGUMENT_REQUIRED_INPUT, * G_STRUCT_OFFSET( VipsForeignLoadPng, filename ), * NULL ); * } * * static void * vips_foreign_load_png_init( VipsForeignLoadPng *png ) * { * } * ]| * * ## Writing a new saver * * Call your saver in the class' @build() method after chaining up. The * prepared image should be ready for you to save in @ready. * * As a complete example, here's the code for the CSV saver, minus the calls * to the actual save routines. * * |[ * typedef struct _VipsForeignSaveCsv { * VipsForeignSave parent_object; * * char *filename; * const char *separator; * } VipsForeignSaveCsv; * * typedef VipsForeignSaveClass VipsForeignSaveCsvClass; * * G_DEFINE_TYPE( VipsForeignSaveCsv, vips_foreign_save_csv, * VIPS_TYPE_FOREIGN_SAVE ); * * static int * vips_foreign_save_csv_build( VipsObject *object ) * { * VipsForeignSave *save = (VipsForeignSave *) object; * VipsForeignSaveCsv *csv = (VipsForeignSaveCsv *) object; * * if( VIPS_OBJECT_CLASS( vips_foreign_save_csv_parent_class )-> * build( object ) ) * return( -1 ); * * if( vips__csv_write( save->ready, csv->filename, csv->separator ) ) * return( -1 ); * * return( 0 ); * } * * static void * vips_foreign_save_csv_class_init( VipsForeignSaveCsvClass *class ) * { * GObjectClass *gobject_class = G_OBJECT_CLASS( class ); * VipsObjectClass *object_class = (VipsObjectClass *) class; * VipsForeignClass *foreign_class = (VipsForeignClass *) class; * VipsForeignSaveClass *save_class = (VipsForeignSaveClass *) class; * * gobject_class->set_property = vips_object_set_property; * gobject_class->get_property = vips_object_get_property; * * object_class->nickname = "csvsave"; * object_class->description = _( "save image to csv file" ); * object_class->build = vips_foreign_save_csv_build; * * foreign_class->suffs = vips__foreign_csv_suffs; * * save_class->saveable = VIPS_SAVEABLE_MONO; * // no need to define ->format_table, we don't want the input * // cast for us * * VIPS_ARG_STRING( class, "filename", 1, * _( "Filename" ), * _( "Filename to save to" ), * VIPS_ARGUMENT_REQUIRED_INPUT, * G_STRUCT_OFFSET( VipsForeignSaveCsv, filename ), * NULL ); * * VIPS_ARG_STRING( class, "separator", 13, * _( "Separator" ), * _( "Separator characters" ), * VIPS_ARGUMENT_OPTIONAL_INPUT, * G_STRUCT_OFFSET( VipsForeignSaveCsv, separator ), * "\t" ); * } * * static void * vips_foreign_save_csv_init( VipsForeignSaveCsv *csv ) * { * csv->separator = g_strdup( "\t" ); * } * ]| */ /* Use this to link images to the load operation that made them. */ static GQuark vips__foreign_load_operation = 0; /** * VipsForeignFlags: * @VIPS_FOREIGN_NONE: no flags set * @VIPS_FOREIGN_PARTIAL: the image may be read lazilly * @VIPS_FOREIGN_BIGENDIAN: image pixels are most-significant byte first * @VIPS_FOREIGN_SEQUENTIAL: top-to-bottom lazy reading * * Some hints about the image loader. * * #VIPS_FOREIGN_PARTIAL means that the image can be read directly from the * file without needing to be unpacked to a temporary image first. * * #VIPS_FOREIGN_SEQUENTIAL means that the loader supports lazy reading, but * only top-to-bottom (sequential) access. Formats like PNG can read sets of * scanlines, for example, but only in order. * * If neither PARTIAL or SEQUENTIAL is set, the loader only supports whole * image read. Setting both PARTIAL and SEQUENTIAL is an error. * * #VIPS_FOREIGN_BIGENDIAN means that image pixels are most-significant byte * first. Depending on the native byte order of the host machine, you may * need to swap bytes. See vips_copy(). */ G_DEFINE_ABSTRACT_TYPE( VipsForeign, vips_foreign, VIPS_TYPE_OPERATION ); static void vips_foreign_summary_class( VipsObjectClass *object_class, VipsBuf *buf ) { VipsForeignClass *class = VIPS_FOREIGN_CLASS( object_class ); VIPS_OBJECT_CLASS( vips_foreign_parent_class )-> summary_class( object_class, buf ); if( class->suffs ) { const char **p; vips_buf_appends( buf, " (" ); for( p = class->suffs; *p; p++ ) { vips_buf_appendf( buf, "%s", *p ); if( p[1] ) vips_buf_appends( buf, ", " ); } vips_buf_appends( buf, ")" ); } vips_buf_appendf( buf, ", priority=%d", class->priority ); } static void vips_foreign_class_init( VipsForeignClass *class ) { GObjectClass *gobject_class = G_OBJECT_CLASS( class ); VipsObjectClass *object_class = (VipsObjectClass *) class; gobject_class->set_property = vips_object_set_property; gobject_class->get_property = vips_object_get_property; object_class->nickname = "foreign"; object_class->description = _( "load and save image files" ); object_class->summary_class = vips_foreign_summary_class; } static void vips_foreign_init( VipsForeign *object ) { } /* To iterate over supported files we build a temp list of subclasses of * VipsForeign, sort by priority, iterate, and free. */ static void * file_add_class( VipsForeignClass *class, GSList **files ) { /* Append so we don't reverse the list of files. Sort will not reorder * items of equal priority. */ *files = g_slist_append( *files, class ); return( NULL ); } static gint file_compare( VipsForeignClass *a, VipsForeignClass *b ) { return( b->priority - a->priority ); } /** * vips_foreign_map: * @base: base class to search below (eg. "VipsForeignLoad") * @fn: (scope call): function to apply to each #VipsForeignClass * @a: user data * @b: user data * * Apply a function to every #VipsForeignClass that VIPS knows about. Foreigns * are presented to the function in priority order. * * Like all VIPS map functions, if @fn returns %NULL, iteration continues. If * it returns non-%NULL, iteration terminates and that value is returned. The * map function returns %NULL if all calls return %NULL. * * See also: vips_slist_map(). * * Returns: (transfer none): the result of iteration */ void * vips_foreign_map( const char *base, VipsSListMap2Fn fn, void *a, void *b ) { GSList *files; void *result; files = NULL; (void) vips_class_map_all( g_type_from_name( base ), (VipsClassMapFn) file_add_class, (void *) &files ); files = g_slist_sort( files, (GCompareFunc) file_compare ); result = vips_slist_map2( files, fn, a, b ); g_slist_free( files ); return( result ); } /* Abstract base class for image load. */ G_DEFINE_ABSTRACT_TYPE( VipsForeignLoad, vips_foreign_load, VIPS_TYPE_FOREIGN ); static void vips_foreign_load_dispose( GObject *gobject ) { VipsForeignLoad *load = VIPS_FOREIGN_LOAD( gobject ); VIPS_UNREF( load->real ); G_OBJECT_CLASS( vips_foreign_load_parent_class )->dispose( gobject ); } static void vips_foreign_load_summary_class( VipsObjectClass *object_class, VipsBuf *buf ) { VipsForeignLoadClass *class = VIPS_FOREIGN_LOAD_CLASS( object_class ); VIPS_OBJECT_CLASS( vips_foreign_load_parent_class )-> summary_class( object_class, buf ); if( !G_TYPE_IS_ABSTRACT( G_TYPE_FROM_CLASS( class ) ) ) { if( class->is_a ) vips_buf_appends( buf, ", is_a" ); if( class->is_a_buffer ) vips_buf_appends( buf, ", is_a_buffer" ); if( class->get_flags ) vips_buf_appends( buf, ", get_flags" ); if( class->get_flags_filename ) vips_buf_appends( buf, ", get_flags_filename" ); if( class->header ) vips_buf_appends( buf, ", header" ); if( class->load ) vips_buf_appends( buf, ", load" ); /* You can omit ->load(), you must not omit ->header(). */ g_assert( class->header ); } } /* Can this VipsForeign open this file? */ static void * vips_foreign_find_load_sub( VipsForeignLoadClass *load_class, const char *filename ) { VipsForeignClass *class = VIPS_FOREIGN_CLASS( load_class ); #ifdef DEBUG printf( "vips_foreign_find_load_sub: %s\n", VIPS_OBJECT_CLASS( class )->nickname ); #endif /*DEBUG*/ if( load_class->is_a ) { if( load_class->is_a( filename ) ) return( load_class ); #ifdef DEBUG printf( "vips_foreign_find_load_sub: is_a failed\n" ); #endif /*DEBUG*/ } else if( class->suffs && vips_filename_suffix_match( filename, class->suffs ) ) return( load_class ); else { #ifdef DEBUG printf( "vips_foreign_find_load_sub: suffix match failed\n" ); #endif /*DEBUG*/ } return( NULL ); } /** * vips_foreign_find_load: * @filename: file to find a loader for * * Searches for an operation you could use to load @filename. Any trailing * options on @filename are stripped and ignored. * * See also: vips_foreign_find_load_buffer(), vips_image_new_from_file(). * * Returns: the name of an operation on success, %NULL on error */ const char * vips_foreign_find_load( const char *name ) { char filename[VIPS_PATH_MAX]; char option_string[VIPS_PATH_MAX]; VipsForeignLoadClass *load_class; vips__filename_split8( name, filename, option_string ); if( !vips_existsf( "%s", filename ) ) { vips_error( "VipsForeignLoad", _( "file \"%s\" not found" ), name ); return( NULL ); } if( !(load_class = (VipsForeignLoadClass *) vips_foreign_map( "VipsForeignLoad", (VipsSListMap2Fn) vips_foreign_find_load_sub, (void *) filename, NULL )) ) { vips_error( "VipsForeignLoad", _( "\"%s\" is not a known file format" ), name ); return( NULL ); } #ifdef DEBUG printf( "vips_foreign_find_load: selected %s\n", VIPS_OBJECT_CLASS( load_class )->nickname ); #endif /*DEBUG*/ return( G_OBJECT_CLASS_NAME( load_class ) ); } /* Kept for compat with earlier version of the vip8 API. Use * vips_image_new_from_file() now. */ int vips_foreign_load( const char *name, VipsImage **out, ... ) { char filename[VIPS_PATH_MAX]; char option_string[VIPS_PATH_MAX]; const char *operation_name; va_list ap; int result; vips__filename_split8( name, filename, option_string ); if( !(operation_name = vips_foreign_find_load( filename )) ) return( -1 ); va_start( ap, out ); result = vips_call_split_option_string( operation_name, option_string, ap, filename, out ); va_end( ap ); return( result ); } /* Can this VipsForeign open this buffer? */ static void * vips_foreign_find_load_buffer_sub( VipsForeignLoadClass *load_class, const void **buf, size_t *len ) { if( load_class->is_a_buffer && load_class->is_a_buffer( *buf, *len ) ) return( load_class ); return( NULL ); } /** * vips_foreign_find_load_buffer: * @data: (array length=size) (element-type guint8) (transfer none): start of * memory buffer * @size: (type gsize): number of bytes in @data * * Searches for an operation you could use to load a memory buffer. To see the * range of buffer loaders supported by your vips, try something like: * * vips -l | grep load_buffer * * See also: vips_image_new_from_buffer(). * * Returns: (transfer none): the name of an operation on success, %NULL on * error. */ const char * vips_foreign_find_load_buffer( const void *data, size_t size ) { VipsForeignLoadClass *load_class; if( !(load_class = (VipsForeignLoadClass *) vips_foreign_map( "VipsForeignLoad", (VipsSListMap2Fn) vips_foreign_find_load_buffer_sub, &data, &size )) ) { vips_error( "VipsForeignLoad", "%s", _( "buffer is not in a known format" ) ); return( NULL ); } return( G_OBJECT_CLASS_NAME( load_class ) ); } /** * vips_foreign_is_a: * @loader: name of loader to use for test * @filename: file to test * * Return %TRUE if @filename can be loaded by @loader. @loader is something * like "tiffload" or "VipsForeignLoadTiff". * * Returns: %TRUE if @filename can be loaded by @loader. */ gboolean vips_foreign_is_a( const char *loader, const char *filename ) { const VipsObjectClass *class; VipsForeignLoadClass *load_class; if( !(class = vips_class_find( "VipsForeignLoad", loader )) ) return( FALSE ); load_class = VIPS_FOREIGN_LOAD_CLASS( class ); if( load_class->is_a && load_class->is_a( filename ) ) return( TRUE ); return( FALSE ); } /** * vips_foreign_is_a_buffer: * @loader: name of loader to use for test * @data: (array length=size) (element-type guint8): pointer to the buffer to test * @size: (type gsize): size of the buffer to test * * Return %TRUE if @data can be loaded by @loader. @loader is something * like "tiffload_buffer" or "VipsForeignLoadTiffBuffer". * * Returns: %TRUE if @data can be loaded by @loader. */ gboolean vips_foreign_is_a_buffer( const char *loader, const void *data, size_t size ) { const VipsObjectClass *class; VipsForeignLoadClass *load_class; if( !(class = vips_class_find( "VipsForeignLoad", loader )) ) return( FALSE ); load_class = VIPS_FOREIGN_LOAD_CLASS( class ); if( load_class->is_a_buffer && load_class->is_a_buffer( data, size ) ) return( TRUE ); return( FALSE ); } /** * vips_foreign_flags: * @loader: name of loader to use for test * @filename: file to test * * Return the flags for @filename using @loader. * @loader is something like "tiffload" or "VipsForeignLoadTiff". * * Returns: the flags for @filename. */ VipsForeignFlags vips_foreign_flags( const char *loader, const char *filename ) { const VipsObjectClass *class; if( (class = vips_class_find( "VipsForeignLoad", loader )) ) { VipsForeignLoadClass *load_class = VIPS_FOREIGN_LOAD_CLASS( class ); if( load_class->get_flags_filename ) return( load_class->get_flags_filename( filename ) ); } return( 0 ); } static VipsObject * vips_foreign_load_new_from_string( const char *string ) { const char *file_op; GType type; VipsForeignLoad *load; if( !(file_op = vips_foreign_find_load( string )) ) return( NULL ); type = g_type_from_name( file_op ); g_assert( type ); load = VIPS_FOREIGN_LOAD( g_object_new( type, NULL ) ); g_object_set( load, "filename", string, NULL ); return( VIPS_OBJECT( load ) ); } static VipsImage * vips_foreign_load_temp( VipsForeignLoad *load ) { const guint64 disc_threshold = vips_get_disc_threshold(); const guint64 image_size = VIPS_IMAGE_SIZEOF_IMAGE( load->out ); /* If this is a partial operation, we can open directly. */ if( load->flags & VIPS_FOREIGN_PARTIAL ) { #ifdef DEBUG printf( "vips_foreign_load_temp: partial temp\n" ); #endif /*DEBUG*/ return( vips_image_new() ); } /* If it can do sequential access and it's been requested, we can open * directly. */ if( (load->flags & VIPS_FOREIGN_SEQUENTIAL) && load->access != VIPS_ACCESS_RANDOM ) { #ifdef DEBUG printf( "vips_foreign_load_temp: partial sequential temp\n" ); #endif /*DEBUG*/ return( vips_image_new() ); } /* ->memory used to be called ->disc and default TRUE. If it's been * forced FALSE, set memory TRUE. */ if( !load->disc ) load->memory = TRUE; /* We open via disc if: * - 'memory' is off * - the uncompressed image will be larger than * vips_get_disc_threshold() */ if( !load->memory && image_size > disc_threshold ) { #ifdef DEBUG printf( "vips_foreign_load_temp: disc temp\n" ); #endif /*DEBUG*/ return( vips_image_new_temp_file( "%s.v" ) ); } #ifdef DEBUG printf( "vips_foreign_load_temp: memory temp\n" ); #endif /*DEBUG*/ /* Otherwise, fall back to a memory buffer. */ return( vips_image_new_memory() ); } /* Check two images for compatibility: their geometries need to match. */ static gboolean vips_foreign_load_iscompat( VipsImage *a, VipsImage *b ) { if( a->Xsize != b->Xsize || a->Ysize != b->Ysize || a->Bands != b->Bands || a->Coding != b->Coding || a->BandFmt != b->BandFmt ) { vips_error( "VipsForeignLoad", "%s", _( "images do not match" ) ); return( FALSE ); } return( TRUE ); } /* Our start function ... do the lazy open, if necessary, and return a region * on the new image. */ static void * vips_foreign_load_start( VipsImage *out, void *a, void *b ) { VipsForeignLoad *load = VIPS_FOREIGN_LOAD( b ); VipsForeignLoadClass *class = VIPS_FOREIGN_LOAD_GET_CLASS( load ); if( !load->real ) { if( !(load->real = vips_foreign_load_temp( load )) ) return( NULL ); #ifdef DEBUG printf( "vips_foreign_load_start: triggering ->load()\n" ); #endif /*DEBUG*/ /* Read the image in. This may involve a long computation and * will finish with load->real holding the decompressed image. * * We want our caller to be able to see this computation on * @out, so eval signals on ->real need to appear on ->out. */ load->real->progress_signal = load->out; /* Note the load object on the image. Loaders can use * this to signal invalidate if they hit a load error. See * vips_foreign_load_invalidate() below. */ g_object_set_qdata( G_OBJECT( load->real ), vips__foreign_load_operation, load ); if( class->load( load ) || vips_image_pio_input( load->real ) ) return( NULL ); /* ->header() read the header into @out, load has read the * image into @real. They must match exactly in size, bands, * format and coding for the copy to work. * * Some versions of ImageMagick give different results between * Ping and Load for some formats, for example. */ if( !vips_foreign_load_iscompat( load->real, out ) ) return( NULL ); /* We have to tell vips that out depends on real. We've set * the demand hint below, but not given an input there. */ vips_image_pipelinev( load->out, load->out->dhint, load->real, NULL ); } return( vips_region_new( load->real ) ); } /* Just pointer-copy. */ static int vips_foreign_load_generate( VipsRegion *or, void *seq, void *a, void *b, gboolean *stop ) { VipsRegion *ir = (VipsRegion *) seq; VipsRect *r = &or->valid; /* Ask for input we need. */ if( vips_region_prepare( ir, r ) ) return( -1 ); /* Attach output region to that. */ if( vips_region_region( or, ir, r, r->left, r->top ) ) return( -1 ); return( 0 ); } static int vips_foreign_load_build( VipsObject *object ) { VipsObjectClass *class = VIPS_OBJECT_GET_CLASS( object ); VipsForeignLoad *load = VIPS_FOREIGN_LOAD( object ); VipsForeignLoadClass *fclass = VIPS_FOREIGN_LOAD_GET_CLASS( object ); VipsForeignFlags flags; #ifdef DEBUG printf( "vips_foreign_load_build:\n" ); #endif /*DEBUG*/ flags = 0; if( fclass->get_flags ) flags |= fclass->get_flags( load ); if( (flags & VIPS_FOREIGN_PARTIAL) && (flags & VIPS_FOREIGN_SEQUENTIAL) ) { g_warning( "%s", _( "VIPS_FOREIGN_PARTIAL and VIPS_FOREIGN_SEQUENTIAL " "both set -- using SEQUENTIAL" ) ); flags ^= VIPS_FOREIGN_PARTIAL; } g_object_set( load, "flags", flags, NULL ); /* If the loader can do sequential mode and sequential has been * requested, we need to block caching. */ if( (load->flags & VIPS_FOREIGN_SEQUENTIAL) && load->access != VIPS_ACCESS_RANDOM ) load->nocache = TRUE; if( VIPS_OBJECT_CLASS( vips_foreign_load_parent_class )-> build( object ) ) return( -1 ); if( load->sequential ) g_warning( "%s", _( "ignoring deprecated \"sequential\" mode -- " "please use \"access\" instead" ) ); g_object_set( object, "out", vips_image_new(), NULL ); vips_image_set_string( load->out, VIPS_META_LOADER, class->nickname ); #ifdef DEBUG printf( "vips_foreign_load_build: triggering ->header()\n" ); #endif /*DEBUG*/ /* Read the header into @out. */ if( fclass->header && fclass->header( load ) ) return( -1 ); /* If there's no ->load() method then the header read has done * everything. Otherwise, it's just set fields and we must also * load pixels. * * Delay the load until the first pixel is requested by doing the work * in the start function of the copy. */ if( fclass->load ) { #ifdef DEBUG printf( "vips_foreign_load_build: delaying read ...\n" ); #endif /*DEBUG*/ /* ->header() should set the dhint. It'll default to the safe * SMALLTILE if header() did not set it. */ vips_image_pipelinev( load->out, load->out->dhint, NULL ); /* Then 'start' creates the real image and 'gen' fetches * pixels for @out from @real on demand. */ if( vips_image_generate( load->out, vips_foreign_load_start, vips_foreign_load_generate, vips_stop_one, NULL, load ) ) return( -1 ); } /* If random access has been requested, make sure that we don't have a * SEQ tag left from a sequential loader. */ if( load->access == VIPS_ACCESS_RANDOM ) (void) vips_image_remove( load->out, VIPS_META_SEQUENTIAL ); return( 0 ); } static VipsOperationFlags vips_foreign_load_operation_get_flags( VipsOperation *operation ) { VipsForeignLoad *load = VIPS_FOREIGN_LOAD( operation ); VipsOperationFlags flags; flags = VIPS_OPERATION_CLASS( vips_foreign_load_parent_class )-> get_flags( operation ); if( load->nocache ) flags |= VIPS_OPERATION_NOCACHE; return( flags ); } static void vips_foreign_load_class_init( VipsForeignLoadClass *class ) { GObjectClass *gobject_class = G_OBJECT_CLASS( class ); VipsObjectClass *object_class = (VipsObjectClass *) class; VipsOperationClass *operation_class = (VipsOperationClass *) class; gobject_class->dispose = vips_foreign_load_dispose; gobject_class->set_property = vips_object_set_property; gobject_class->get_property = vips_object_get_property; object_class->build = vips_foreign_load_build; object_class->summary_class = vips_foreign_load_summary_class; object_class->new_from_string = vips_foreign_load_new_from_string; object_class->nickname = "fileload"; object_class->description = _( "file loaders" ); operation_class->get_flags = vips_foreign_load_operation_get_flags; VIPS_ARG_IMAGE( class, "out", 2, _( "Output" ), _( "Output image" ), VIPS_ARGUMENT_REQUIRED_OUTPUT, G_STRUCT_OFFSET( VipsForeignLoad, out ) ); VIPS_ARG_FLAGS( class, "flags", 6, _( "Flags" ), _( "Flags for this file" ), VIPS_ARGUMENT_OPTIONAL_OUTPUT, G_STRUCT_OFFSET( VipsForeignLoad, flags ), VIPS_TYPE_FOREIGN_FLAGS, VIPS_FOREIGN_NONE ); VIPS_ARG_BOOL( class, "memory", 7, _( "Memory" ), _( "Force open via memory" ), VIPS_ARGUMENT_OPTIONAL_INPUT, G_STRUCT_OFFSET( VipsForeignLoad, memory ), FALSE ); VIPS_ARG_ENUM( class, "access", 8, _( "Access" ), _( "Required access pattern for this file" ), VIPS_ARGUMENT_OPTIONAL_INPUT, G_STRUCT_OFFSET( VipsForeignLoad, access ), VIPS_TYPE_ACCESS, VIPS_ACCESS_RANDOM ); VIPS_ARG_BOOL( class, "sequential", 10, _( "Sequential" ), _( "Sequential read only" ), VIPS_ARGUMENT_OPTIONAL_INPUT | VIPS_ARGUMENT_DEPRECATED, G_STRUCT_OFFSET( VipsForeignLoad, sequential ), FALSE ); VIPS_ARG_BOOL( class, "fail", 11, _( "Fail" ), _( "Fail on first error" ), VIPS_ARGUMENT_OPTIONAL_INPUT, G_STRUCT_OFFSET( VipsForeignLoad, fail ), FALSE ); VIPS_ARG_BOOL( class, "disc", 12, _( "Disc" ), _( "Open to disc" ), VIPS_ARGUMENT_OPTIONAL_INPUT | VIPS_ARGUMENT_DEPRECATED, G_STRUCT_OFFSET( VipsForeignLoad, disc ), TRUE ); } static void vips_foreign_load_init( VipsForeignLoad *load ) { load->disc = TRUE; load->access = VIPS_ACCESS_RANDOM; } /* * Loaders can call this */ /** * vips_foreign_load_invalidate: (method) * @image: image to invalidate * * Loaders can call this on the image they are making if they see a read error * from the load library. It signals "invalidate" on the load operation and * will cause it to be dropped from cache. * * If we know a file will cause a read error, we don't want to cache the * failing operation, we want to make sure the image will really be opened * again if our caller tries again. For example, a broken file might be * replaced by a working one. */ void vips_foreign_load_invalidate( VipsImage *image ) { VipsOperation *operation; #ifdef DEBUG printf( "vips_foreign_load_invalidate: %p\n", image ); #endif /*DEBUG*/ if( (operation = g_object_get_qdata( G_OBJECT( image ), vips__foreign_load_operation )) ) { vips_operation_invalidate( operation ); } } /* Abstract base class for image savers. */ G_DEFINE_ABSTRACT_TYPE( VipsForeignSave, vips_foreign_save, VIPS_TYPE_FOREIGN ); static void vips_foreign_save_dispose( GObject *gobject ) { VipsForeignSave *save = VIPS_FOREIGN_SAVE( gobject ); VIPS_UNREF( save->ready ); G_OBJECT_CLASS( vips_foreign_save_parent_class )->dispose( gobject ); } static void vips_foreign_save_summary_class( VipsObjectClass *object_class, VipsBuf *buf ) { VipsForeignSaveClass *class = VIPS_FOREIGN_SAVE_CLASS( object_class ); VIPS_OBJECT_CLASS( vips_foreign_save_parent_class )-> summary_class( object_class, buf ); vips_buf_appendf( buf, ", %s", vips_enum_nick( VIPS_TYPE_SAVEABLE, class->saveable ) ); } static VipsObject * vips_foreign_save_new_from_string( const char *string ) { const char *file_op; GType type; VipsForeignSave *save; if( !(file_op = vips_foreign_find_save( string )) ) return( NULL ); type = g_type_from_name( file_op ); g_assert( type ); save = VIPS_FOREIGN_SAVE( g_object_new( type, NULL ) ); g_object_set( save, "filename", string, NULL ); return( VIPS_OBJECT( save ) ); } /* Convert an image for saving. */ int vips__foreign_convert_saveable( VipsImage *in, VipsImage **ready, VipsSaveable saveable, VipsBandFormat *format, VipsCoding *coding, VipsArrayDouble *background ) { /* in holds a reference to the output of our chain as we build it. */ g_object_ref( in ); /* For coded images, can this class save the coding we are in now? * Nothing to do. */ if( in->Coding != VIPS_CODING_NONE && coding[in->Coding] ) { *ready = in; return( 0 ); } /* For uncoded images, if this saver supports ANY bands and this * format we have nothing to do. */ if( in->Coding == VIPS_CODING_NONE && saveable == VIPS_SAVEABLE_ANY && format[in->BandFmt] == in->BandFmt ) { *ready = in; return( 0 ); } /* Otherwise ... we need to decode and then (possibly) recode at the * end. */ /* If this is an VIPS_CODING_LABQ, we can go straight to RGB. */ if( in->Coding == VIPS_CODING_LABQ ) { VipsImage *out; if( vips_LabQ2sRGB( in, &out, NULL ) ) { g_object_unref( in ); return( -1 ); } g_object_unref( in ); in = out; } /* If this is an VIPS_CODING_RAD, we unpack to float. This could be * scRGB or XYZ. */ if( in->Coding == VIPS_CODING_RAD ) { VipsImage *out; if( vips_rad2float( in, &out, NULL ) ) { g_object_unref( in ); return( -1 ); } g_object_unref( in ); in = out; } /* If the saver supports RAD, we need to go to scRGB or XYZ. */ if( coding[VIPS_CODING_RAD] ) { if( in->Type != VIPS_INTERPRETATION_scRGB && in->Type != VIPS_INTERPRETATION_XYZ ) { VipsImage *out; if( vips_colourspace( in, &out, VIPS_INTERPRETATION_scRGB, NULL ) ) { g_object_unref( in ); return( -1 ); } g_object_unref( in ); in = out; } } /* If this image is CMYK and the saver is RGB-only, use lcms to try to * import to XYZ. This will only work if the image has an embedded * profile. */ if( in->Type == VIPS_INTERPRETATION_CMYK && in->Bands >= 4 && (saveable == VIPS_SAVEABLE_RGB || saveable == VIPS_SAVEABLE_RGBA || saveable == VIPS_SAVEABLE_RGBA_ONLY) ) { VipsImage *out; if( vips_icc_import( in, &out, "pcs", VIPS_PCS_XYZ, NULL ) ) { g_object_unref( in ); return( -1 ); } g_object_unref( in ); in = out; /* We've imported to PCS, we must remove the embedded profile, * since it no longer matches the image. * * For example, when converting CMYK JPG to RGB PNG, we need * to remove the CMYK profile on import, or the png writer will * try to attach it when we write the image as RGB. */ vips_image_remove( in, VIPS_META_ICC_NAME ); } /* If this is something other than CMYK or RAD, eg. maybe a LAB image, * we need to transform to RGB. */ if( !coding[VIPS_CODING_RAD] && in->Bands >= 3 && in->Type != VIPS_INTERPRETATION_CMYK && vips_colourspace_issupported( in ) && (saveable == VIPS_SAVEABLE_RGB || saveable == VIPS_SAVEABLE_RGBA || saveable == VIPS_SAVEABLE_RGBA_ONLY || saveable == VIPS_SAVEABLE_RGB_CMYK) ) { VipsImage *out; VipsInterpretation interpretation; /* Do we make RGB or RGB16? We don't want to squash a 16-bit * RGB down to 8 bits if the saver supports 16. */ if( vips_band_format_is8bit( format[in->BandFmt] ) ) interpretation = VIPS_INTERPRETATION_sRGB; else interpretation = VIPS_INTERPRETATION_RGB16; if( vips_colourspace( in, &out, interpretation, NULL ) ) { g_object_unref( in ); return( -1 ); } g_object_unref( in ); in = out; } /* VIPS_SAVEABLE_RGBA_ONLY does not support 1 or 2 bands ... convert * to sRGB. */ if( !coding[VIPS_CODING_RAD] && in->Bands < 3 && vips_colourspace_issupported( in ) && saveable == VIPS_SAVEABLE_RGBA_ONLY ) { VipsImage *out; VipsInterpretation interpretation; /* Do we make RGB or RGB16? We don't want to squash a 16-bit * RGB down to 8 bits if the saver supports 16. */ if( vips_band_format_is8bit( format[in->BandFmt] ) ) interpretation = VIPS_INTERPRETATION_sRGB; else interpretation = VIPS_INTERPRETATION_RGB16; if( vips_colourspace( in, &out, interpretation, NULL ) ) { g_object_unref( in ); return( -1 ); } g_object_unref( in ); in = out; } /* Get the bands right. We must do this after all colourspace * transforms, since they can change the number of bands. */ if( in->Coding == VIPS_CODING_NONE ) { /* Do we need to flatten out an alpha channel? There needs to * be an alpha there now, and this writer needs to not support * alpha. */ if( (in->Bands == 2 || (in->Bands == 4 && in->Type != VIPS_INTERPRETATION_CMYK)) && (saveable == VIPS_SAVEABLE_MONO || saveable == VIPS_SAVEABLE_RGB || saveable == VIPS_SAVEABLE_RGB_CMYK) ) { VipsImage *out; if( vips_flatten( in, &out, "background", background, NULL ) ) { g_object_unref( in ); return( -1 ); } g_object_unref( in ); in = out; } /* Other alpha removal strategies ... just drop the extra * bands. */ else if( in->Bands > 3 && (saveable == VIPS_SAVEABLE_RGB || (saveable == VIPS_SAVEABLE_RGB_CMYK && in->Type != VIPS_INTERPRETATION_CMYK)) ) { VipsImage *out; /* Don't let 4 bands though unless the image really is * a CMYK. * * Consider a RGBA png being saved as JPG. We can * write CMYK jpg, but we mustn't do that for RGBA * images. */ if( vips_extract_band( in, &out, 0, "n", 3, NULL ) ) { g_object_unref( in ); return( -1 ); } g_object_unref( in ); in = out; } else if( in->Bands > 4 && ((saveable == VIPS_SAVEABLE_RGB_CMYK && in->Type == VIPS_INTERPRETATION_CMYK) || saveable == VIPS_SAVEABLE_RGBA || saveable == VIPS_SAVEABLE_RGBA_ONLY) ) { VipsImage *out; if( vips_extract_band( in, &out, 0, "n", 4, NULL ) ) { g_object_unref( in ); return( -1 ); } g_object_unref( in ); in = out; } else if( in->Bands > 1 && saveable == VIPS_SAVEABLE_MONO ) { VipsImage *out; if( vips_extract_band( in, &out, 0, NULL ) ) { g_object_unref( in ); return( -1 ); } g_object_unref( in ); in = out; } /* Else we have VIPS_SAVEABLE_ANY and we don't chop bands down. */ } /* Handle the ushort interpretations. * * RGB16 and GREY16 use 0-65535 for black-white. If we have an image * tagged like this, and it has more than 8 bits (we leave crazy uchar * images tagged as RGB16 alone), we'll need to get it ready for the * saver. */ if( (in->Type == VIPS_INTERPRETATION_RGB16 || in->Type == VIPS_INTERPRETATION_GREY16) && !vips_band_format_is8bit( in->BandFmt ) ) { /* If the saver supports ushort, cast to ushort. It may be * float at the moment, for example. * * If the saver does not support ushort, automatically shift * it down. This is the behaviour we want for saving an RGB16 * image as JPG, for example. */ if( format[VIPS_FORMAT_USHORT] == VIPS_FORMAT_USHORT ) { VipsImage *out; if( vips_cast( in, &out, VIPS_FORMAT_USHORT, NULL ) ) { g_object_unref( in ); return( -1 ); } g_object_unref( in ); in = out; } else { VipsImage *out; if( vips_rshift_const1( in, &out, 8, NULL ) ) { g_object_unref( in ); return( -1 ); } g_object_unref( in ); in = out; /* That could have produced an int image ... make sure * we are now uchar. */ if( vips_cast( in, &out, VIPS_FORMAT_UCHAR, NULL ) ) { g_object_unref( in ); return( -1 ); } g_object_unref( in ); in = out; } } /* Cast to the output format. */ { VipsImage *out; if( vips_cast( in, &out, format[in->BandFmt], NULL ) ) { g_object_unref( in ); return( -1 ); } g_object_unref( in ); in = out; } /* Does this class want a coded image? Search the coding table for the * first one. */ if( coding[VIPS_CODING_NONE] ) { /* Already NONE, nothing to do. */ } else if( coding[VIPS_CODING_LABQ] ) { VipsImage *out; if( vips_Lab2LabQ( in, &out, NULL ) ) { g_object_unref( in ); return( -1 ); } g_object_unref( in ); in = out; } else if( coding[VIPS_CODING_RAD] ) { VipsImage *out; if( vips_float2rad( in, &out, NULL ) ) { g_object_unref( in ); return( -1 ); } g_object_unref( in ); in = out; } *ready = in; return( 0 ); } static int vips_foreign_save_build( VipsObject *object ) { VipsForeignSave *save = VIPS_FOREIGN_SAVE( object ); if( save->in ) { VipsForeignSaveClass *class = VIPS_FOREIGN_SAVE_GET_CLASS( save ); VipsImage *ready; if( vips__foreign_convert_saveable( save->in, &ready, class->saveable, class->format_table, class->coding, save->background ) ) return( -1 ); if( save->page_height ) vips_image_set_int( ready, VIPS_META_PAGE_HEIGHT, save->page_height ); VIPS_UNREF( save->ready ); save->ready = ready; } if( VIPS_OBJECT_CLASS( vips_foreign_save_parent_class )-> build( object ) ) return( -1 ); return( 0 ); } #define UC VIPS_FORMAT_UCHAR #define C VIPS_FORMAT_CHAR #define US VIPS_FORMAT_USHORT #define S VIPS_FORMAT_SHORT #define UI VIPS_FORMAT_UINT #define I VIPS_FORMAT_INT #define F VIPS_FORMAT_FLOAT #define X VIPS_FORMAT_COMPLEX #define D VIPS_FORMAT_DOUBLE #define DX VIPS_FORMAT_DPCOMPLEX static int vips_foreign_save_format_table[10] = { // UC C US S UI I F X D DX UC, C, US, S, UI, I, F, X, D, DX }; static void vips_foreign_save_class_init( VipsForeignSaveClass *class ) { GObjectClass *gobject_class = G_OBJECT_CLASS( class ); VipsObjectClass *object_class = (VipsObjectClass *) class; VipsOperationClass *operation_class = (VipsOperationClass *) class; int i; gobject_class->dispose = vips_foreign_save_dispose; gobject_class->set_property = vips_object_set_property; gobject_class->get_property = vips_object_get_property; object_class->build = vips_foreign_save_build; object_class->summary_class = vips_foreign_save_summary_class; object_class->new_from_string = vips_foreign_save_new_from_string; object_class->nickname = "filesave"; object_class->description = _( "file savers" ); /* All savers are sequential by definition. Things like tiled tiff * write and interlaced png write, which are not, add extra caches * on their input. */ operation_class->flags |= VIPS_OPERATION_SEQUENTIAL; /* Must not cache savers. */ operation_class->flags |= VIPS_OPERATION_NOCACHE; /* Default to no coding allowed. */ for( i = 0; i < VIPS_CODING_LAST; i++ ) class->coding[i] = FALSE; class->coding[VIPS_CODING_NONE] = TRUE; /* Default to no cast on save. */ class->format_table = vips_foreign_save_format_table; VIPS_ARG_IMAGE( class, "in", 0, _( "Input" ), _( "Image to save" ), VIPS_ARGUMENT_REQUIRED_INPUT, G_STRUCT_OFFSET( VipsForeignSave, in ) ); VIPS_ARG_BOOL( class, "strip", 100, _( "Strip" ), _( "Strip all metadata from image" ), VIPS_ARGUMENT_OPTIONAL_INPUT, G_STRUCT_OFFSET( VipsForeignSave, strip ), FALSE ); VIPS_ARG_BOXED( class, "background", 101, _( "Background" ), _( "Background value" ), VIPS_ARGUMENT_OPTIONAL_INPUT, G_STRUCT_OFFSET( VipsForeignSave, background ), VIPS_TYPE_ARRAY_DOUBLE ); VIPS_ARG_INT( class, "page_height", 8, _( "Page height" ), _( "Set page height for multipage save" ), VIPS_ARGUMENT_OPTIONAL_INPUT, G_STRUCT_OFFSET( VipsForeignSave, page_height ), 0, VIPS_MAX_COORD, 0 ); } static void vips_foreign_save_init( VipsForeignSave *save ) { save->background = vips_array_double_newv( 1, 0.0 ); } /* Can we write this filename with this file? */ static void * vips_foreign_find_save_sub( VipsForeignSaveClass *save_class, const char *filename ) { VipsForeignClass *class = VIPS_FOREIGN_CLASS( save_class ); /* The suffs might be defined on an abstract base class, make sure we * don't pick that. */ if( !G_TYPE_IS_ABSTRACT( G_TYPE_FROM_CLASS( class ) ) && class->suffs && vips_filename_suffix_match( filename, class->suffs ) ) return( save_class ); return( NULL ); } /** * vips_foreign_find_save: * @filename: name to find a saver for * * Searches for an operation you could use to write to @filename. * Any trailing options on @filename are stripped and ignored. * * See also: vips_foreign_find_save_buffer(), vips_image_write_to_file(). * * Returns: the name of an operation on success, %NULL on error */ const char * vips_foreign_find_save( const char *name ) { char filename[VIPS_PATH_MAX]; char option_string[VIPS_PATH_MAX]; VipsForeignSaveClass *save_class; vips__filename_split8( name, filename, option_string ); if( !(save_class = (VipsForeignSaveClass *) vips_foreign_map( "VipsForeignSave", (VipsSListMap2Fn) vips_foreign_find_save_sub, (void *) filename, NULL )) ) { vips_error( "VipsForeignSave", _( "\"%s\" is not a known file format" ), name ); return( NULL ); } return( G_OBJECT_CLASS_NAME( save_class ) ); } /* Kept for early vips8 API compat. */ int vips_foreign_save( VipsImage *in, const char *name, ... ) { char filename[VIPS_PATH_MAX]; char option_string[VIPS_PATH_MAX]; const char *operation_name; va_list ap; int result; vips__filename_split8( name, filename, option_string ); if( !(operation_name = vips_foreign_find_save( filename )) ) return( -1 ); va_start( ap, name ); result = vips_call_split_option_string( operation_name, option_string, ap, in, filename ); va_end( ap ); return( result ); } /* Can we write this buffer with this file type? */ static void * vips_foreign_find_save_buffer_sub( VipsForeignSaveClass *save_class, const char *suffix ) { VipsObjectClass *object_class = VIPS_OBJECT_CLASS( save_class ); VipsForeignClass *class = VIPS_FOREIGN_CLASS( save_class ); if( class->suffs && vips_ispostfix( object_class->nickname, "_buffer" ) && vips_filename_suffix_match( suffix, class->suffs ) ) return( save_class ); return( NULL ); } /** * vips_foreign_find_save_buffer: * @suffix: name to find a saver for * * Searches for an operation you could use to write to a buffer in @suffix * format. * * See also: vips_image_write_to_buffer(). * * Returns: the name of an operation on success, %NULL on error */ const char * vips_foreign_find_save_buffer( const char *name ) { char suffix[VIPS_PATH_MAX]; char option_string[VIPS_PATH_MAX]; VipsForeignSaveClass *save_class; vips__filename_split8( name, suffix, option_string ); if( !(save_class = (VipsForeignSaveClass *) vips_foreign_map( "VipsForeignSave", (VipsSListMap2Fn) vips_foreign_find_save_buffer_sub, (void *) suffix, NULL )) ) { vips_error( "VipsForeignSave", _( "\"%s\" is not a known buffer format" ), name ); return( NULL ); } return( G_OBJECT_CLASS_NAME( save_class ) ); } /* Called from iofuncs to init all operations in this dir. Use a plugin system * instead? */ void vips_foreign_operation_init( void ) { extern GType vips_foreign_load_rad_get_type( void ); extern GType vips_foreign_save_rad_file_get_type( void ); extern GType vips_foreign_save_rad_buffer_get_type( void ); extern GType vips_foreign_load_mat_get_type( void ); extern GType vips_foreign_load_ppm_get_type( void ); extern GType vips_foreign_save_ppm_get_type( void ); extern GType vips_foreign_load_png_get_type( void ); extern GType vips_foreign_load_png_buffer_get_type( void ); extern GType vips_foreign_save_png_file_get_type( void ); extern GType vips_foreign_save_png_buffer_get_type( void ); extern GType vips_foreign_load_csv_get_type( void ); extern GType vips_foreign_save_csv_get_type( void ); extern GType vips_foreign_load_matrix_get_type( void ); extern GType vips_foreign_save_matrix_get_type( void ); extern GType vips_foreign_print_matrix_get_type( void ); extern GType vips_foreign_load_fits_get_type( void ); extern GType vips_foreign_save_fits_get_type( void ); extern GType vips_foreign_load_analyze_get_type( void ); extern GType vips_foreign_load_openexr_get_type( void ); extern GType vips_foreign_load_openslide_get_type( void ); extern GType vips_foreign_load_jpeg_file_get_type( void ); extern GType vips_foreign_load_jpeg_buffer_get_type( void ); extern GType vips_foreign_save_jpeg_file_get_type( void ); extern GType vips_foreign_save_jpeg_buffer_get_type( void ); extern GType vips_foreign_save_jpeg_mime_get_type( void ); extern GType vips_foreign_load_tiff_file_get_type( void ); extern GType vips_foreign_load_tiff_buffer_get_type( void ); extern GType vips_foreign_save_tiff_file_get_type( void ); extern GType vips_foreign_save_tiff_buffer_get_type( void ); extern GType vips_foreign_load_vips_get_type( void ); extern GType vips_foreign_save_vips_get_type( void ); extern GType vips_foreign_load_raw_get_type( void ); extern GType vips_foreign_save_raw_get_type( void ); extern GType vips_foreign_save_raw_fd_get_type( void ); extern GType vips_foreign_load_magick_file_get_type( void ); extern GType vips_foreign_load_magick_buffer_get_type( void ); extern GType vips_foreign_load_magick7_file_get_type( void ); extern GType vips_foreign_load_magick7_buffer_get_type( void ); extern GType vips_foreign_save_dz_file_get_type( void ); extern GType vips_foreign_save_dz_buffer_get_type( void ); extern GType vips_foreign_load_webp_file_get_type( void ); extern GType vips_foreign_load_webp_buffer_get_type( void ); extern GType vips_foreign_save_webp_file_get_type( void ); extern GType vips_foreign_save_webp_buffer_get_type( void ); extern GType vips_foreign_load_pdf_get_type( void ); extern GType vips_foreign_load_pdf_file_get_type( void ); extern GType vips_foreign_load_pdf_buffer_get_type( void ); extern GType vips_foreign_load_svg_get_type( void ); extern GType vips_foreign_load_svg_file_get_type( void ); extern GType vips_foreign_load_svg_buffer_get_type( void ); extern GType vips_foreign_load_gif_get_type( void ); extern GType vips_foreign_load_gif_file_get_type( void ); extern GType vips_foreign_load_gif_buffer_get_type( void ); vips_foreign_load_csv_get_type(); vips_foreign_save_csv_get_type(); vips_foreign_load_matrix_get_type(); vips_foreign_save_matrix_get_type(); vips_foreign_print_matrix_get_type(); vips_foreign_load_raw_get_type(); vips_foreign_save_raw_get_type(); vips_foreign_save_raw_fd_get_type(); vips_foreign_load_vips_get_type(); vips_foreign_save_vips_get_type(); #ifdef HAVE_ANALYZE vips_foreign_load_analyze_get_type(); #endif /*HAVE_ANALYZE*/ #ifdef HAVE_PPM vips_foreign_load_ppm_get_type(); vips_foreign_save_ppm_get_type(); #endif /*HAVE_PPM*/ #ifdef HAVE_RADIANCE vips_foreign_load_rad_get_type(); vips_foreign_save_rad_file_get_type(); vips_foreign_save_rad_buffer_get_type(); #endif /*HAVE_RADIANCE*/ #ifdef HAVE_POPPLER vips_foreign_load_pdf_get_type(); vips_foreign_load_pdf_file_get_type(); vips_foreign_load_pdf_buffer_get_type(); #endif /*HAVE_POPPLER*/ #ifdef HAVE_RSVG vips_foreign_load_svg_get_type(); vips_foreign_load_svg_file_get_type(); vips_foreign_load_svg_buffer_get_type(); #endif /*HAVE_RSVG*/ #ifdef HAVE_GIFLIB vips_foreign_load_gif_get_type(); vips_foreign_load_gif_file_get_type(); vips_foreign_load_gif_buffer_get_type(); #endif /*HAVE_GIFLIB*/ #ifdef HAVE_GSF vips_foreign_save_dz_file_get_type(); vips_foreign_save_dz_buffer_get_type(); #endif /*HAVE_GSF*/ #ifdef HAVE_PNG vips_foreign_load_png_get_type(); vips_foreign_load_png_buffer_get_type(); vips_foreign_save_png_file_get_type(); vips_foreign_save_png_buffer_get_type(); #endif /*HAVE_PNG*/ #ifdef HAVE_MATIO vips_foreign_load_mat_get_type(); #endif /*HAVE_MATIO*/ #ifdef HAVE_JPEG vips_foreign_load_jpeg_file_get_type(); vips_foreign_load_jpeg_buffer_get_type(); vips_foreign_save_jpeg_file_get_type(); vips_foreign_save_jpeg_buffer_get_type(); vips_foreign_save_jpeg_mime_get_type(); #endif /*HAVE_JPEG*/ #ifdef HAVE_LIBWEBP vips_foreign_load_webp_file_get_type(); vips_foreign_load_webp_buffer_get_type(); vips_foreign_save_webp_file_get_type(); vips_foreign_save_webp_buffer_get_type(); #endif /*HAVE_LIBWEBP*/ #ifdef HAVE_TIFF vips_foreign_load_tiff_file_get_type(); vips_foreign_load_tiff_buffer_get_type(); vips_foreign_save_tiff_file_get_type(); vips_foreign_save_tiff_buffer_get_type(); #endif /*HAVE_TIFF*/ #ifdef HAVE_OPENSLIDE vips_foreign_load_openslide_get_type(); #endif /*HAVE_OPENSLIDE*/ #ifdef HAVE_MAGICK vips_foreign_load_magick_file_get_type(); vips_foreign_load_magick_buffer_get_type(); #endif /*HAVE_MAGICK*/ #ifdef HAVE_MAGICK7 vips_foreign_load_magick7_file_get_type(); vips_foreign_load_magick7_buffer_get_type(); #endif /*HAVE_MAGICK7*/ #ifdef HAVE_CFITSIO vips_foreign_load_fits_get_type(); vips_foreign_save_fits_get_type(); #endif /*HAVE_CFITSIO*/ #ifdef HAVE_OPENEXR vips_foreign_load_openexr_get_type(); #endif /*HAVE_OPENEXR*/ vips__foreign_load_operation = g_quark_from_static_string( "vips-foreign-load-operation" ); }
/* foreign file formats base class * * 7/2/12 * - add support for sequential reads * 18/6/12 * - flatten alpha with vips_flatten() * 28/5/13 * - auto rshift down to 8 bits during save * 19/1/14 * - pack and unpack rad to scrgb * 18/8/14 * - fix conversion to 16-bit RGB, thanks John * 18/6/15 * - forward progress signals from load * 23/5/16 * - remove max-alpha stuff, this is now automatic * 12/6/17 * - transform cmyk->rgb if there's an embedded profile * 16/6/17 * - add page_height * 5/3/18 * - block _start if one start fails, see #893 */ /* This file is part of VIPS. VIPS is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /* These files are distributed with VIPS - http://www.vips.ecs.soton.ac.uk */ /* #define DEBUG */ #ifdef HAVE_CONFIG_H #include <config.h> #endif /*HAVE_CONFIG_H*/ #include <vips/intl.h> #include <stdio.h> #include <stdlib.h> #include <vips/vips.h> #include <vips/internal.h> #include <vips/debug.h> #include "pforeign.h" /** * SECTION: foreign * @short_description: load and save images in a variety of formats * @stability: Stable * @see_also: <link linkend="libvips-image">image</link> * @include: vips/vips.h * * This set of operations load and save images in a variety of formats. * * The operations share a base class that offers a simple way to search for a * subclass of #VipsForeign which can load a certain file (see * vips_foreign_find_load()) or buffer (see vips_foreign_find_load_buffer()), * or which could be used to save an image to a * certain file type (see vips_foreign_find_save() and * vips_foreign_find_save_buffer()). You can then run these * operations using vips_call() and friends to perform the load or save. * * vips_image_write_to_file() and vips_image_new_from_file() and friends use * these functions to automate file load and save. * * You can also invoke the operations directly, for example: * * |[ * vips_tiffsave (my_image, "frank.anything", * "compression", VIPS_FOREIGN_TIFF_COMPRESSION_JPEG, * NULL); * ]| * * To add support for a new file format to vips, simply define a new subclass * of #VipsForeignLoad or #VipsForeignSave. * * If you define a new operation which is a subclass of #VipsForeign, support * for it automatically appears in all VIPS user-interfaces. It will also be * transparently supported by vips_image_new_from_file() and friends. * * VIPS comes with VipsForeign for TIFF, JPEG, PNG, Analyze, PPM, OpenEXR, CSV, * Matlab, Radiance, RAW, FITS, WebP, SVG, PDF, GIF and VIPS. It also includes * import filters which can load with libMagick and with OpenSlide. * * ## Writing a new loader * * Add a new loader to VIPS by subclassing #VipsForeignLoad. Subclasses need to * implement at least @header(). * * @header() must set at least the header fields of @out. @load(), if defined, * must load the pixels to @real. * * The suffix list is used to select a format to save a file in, and to pick a * loader if you don't define is_a(). * * You should also define @nickname and @description in #VipsObject. * * As a complete example, here's code for a PNG loader, minus the actual * calls to libpng. * * |[ * typedef struct _VipsForeignLoadPng { * VipsForeignLoad parent_object; * * char *filename; * } VipsForeignLoadPng; * * typedef VipsForeignLoadClass VipsForeignLoadPngClass; * * G_DEFINE_TYPE( VipsForeignLoadPng, vips_foreign_load_png, * VIPS_TYPE_FOREIGN_LOAD ); * * static VipsForeignFlags * vips_foreign_load_png_get_flags_filename( const char *filename ) * { * VipsForeignFlags flags; * * flags = 0; * if( vips__png_isinterlaced( filename ) ) * flags = VIPS_FOREIGN_PARTIAL; * else * flags = VIPS_FOREIGN_SEQUENTIAL; * * return( flags ); * } * * static VipsForeignFlags * vips_foreign_load_png_get_flags( VipsForeignLoad *load ) * { * VipsForeignLoadPng *png = (VipsForeignLoadPng *) load; * * return( vips_foreign_load_png_get_flags_filename( png->filename ) ); * } * * static int * vips_foreign_load_png_header( VipsForeignLoad *load ) * { * VipsForeignLoadPng *png = (VipsForeignLoadPng *) load; * * if( vips__png_header( png->filename, load->out ) ) * return( -1 ); * * return( 0 ); * } * * static int * vips_foreign_load_png_load( VipsForeignLoad *load ) * { * VipsForeignLoadPng *png = (VipsForeignLoadPng *) load; * * if( vips__png_read( png->filename, load->real ) ) * return( -1 ); * * return( 0 ); * } * * static void * vips_foreign_load_png_class_init( VipsForeignLoadPngClass *class ) * { * GObjectClass *gobject_class = G_OBJECT_CLASS( class ); * VipsObjectClass *object_class = (VipsObjectClass *) class; * VipsForeignClass *foreign_class = (VipsForeignClass *) class; * VipsForeignLoadClass *load_class = (VipsForeignLoadClass *) class; * * gobject_class->set_property = vips_object_set_property; * gobject_class->get_property = vips_object_get_property; * * object_class->nickname = "pngload"; * object_class->description = _( "load png from file" ); * * foreign_class->suffs = vips__png_suffs; * * load_class->is_a = vips__png_ispng; * load_class->get_flags_filename = * vips_foreign_load_png_get_flags_filename; * load_class->get_flags = vips_foreign_load_png_get_flags; * load_class->header = vips_foreign_load_png_header; * load_class->load = vips_foreign_load_png_load; * * VIPS_ARG_STRING( class, "filename", 1, * _( "Filename" ), * _( "Filename to load from" ), * VIPS_ARGUMENT_REQUIRED_INPUT, * G_STRUCT_OFFSET( VipsForeignLoadPng, filename ), * NULL ); * } * * static void * vips_foreign_load_png_init( VipsForeignLoadPng *png ) * { * } * ]| * * ## Writing a new saver * * Call your saver in the class' @build() method after chaining up. The * prepared image should be ready for you to save in @ready. * * As a complete example, here's the code for the CSV saver, minus the calls * to the actual save routines. * * |[ * typedef struct _VipsForeignSaveCsv { * VipsForeignSave parent_object; * * char *filename; * const char *separator; * } VipsForeignSaveCsv; * * typedef VipsForeignSaveClass VipsForeignSaveCsvClass; * * G_DEFINE_TYPE( VipsForeignSaveCsv, vips_foreign_save_csv, * VIPS_TYPE_FOREIGN_SAVE ); * * static int * vips_foreign_save_csv_build( VipsObject *object ) * { * VipsForeignSave *save = (VipsForeignSave *) object; * VipsForeignSaveCsv *csv = (VipsForeignSaveCsv *) object; * * if( VIPS_OBJECT_CLASS( vips_foreign_save_csv_parent_class )-> * build( object ) ) * return( -1 ); * * if( vips__csv_write( save->ready, csv->filename, csv->separator ) ) * return( -1 ); * * return( 0 ); * } * * static void * vips_foreign_save_csv_class_init( VipsForeignSaveCsvClass *class ) * { * GObjectClass *gobject_class = G_OBJECT_CLASS( class ); * VipsObjectClass *object_class = (VipsObjectClass *) class; * VipsForeignClass *foreign_class = (VipsForeignClass *) class; * VipsForeignSaveClass *save_class = (VipsForeignSaveClass *) class; * * gobject_class->set_property = vips_object_set_property; * gobject_class->get_property = vips_object_get_property; * * object_class->nickname = "csvsave"; * object_class->description = _( "save image to csv file" ); * object_class->build = vips_foreign_save_csv_build; * * foreign_class->suffs = vips__foreign_csv_suffs; * * save_class->saveable = VIPS_SAVEABLE_MONO; * // no need to define ->format_table, we don't want the input * // cast for us * * VIPS_ARG_STRING( class, "filename", 1, * _( "Filename" ), * _( "Filename to save to" ), * VIPS_ARGUMENT_REQUIRED_INPUT, * G_STRUCT_OFFSET( VipsForeignSaveCsv, filename ), * NULL ); * * VIPS_ARG_STRING( class, "separator", 13, * _( "Separator" ), * _( "Separator characters" ), * VIPS_ARGUMENT_OPTIONAL_INPUT, * G_STRUCT_OFFSET( VipsForeignSaveCsv, separator ), * "\t" ); * } * * static void * vips_foreign_save_csv_init( VipsForeignSaveCsv *csv ) * { * csv->separator = g_strdup( "\t" ); * } * ]| */ /* Use this to link images to the load operation that made them. */ static GQuark vips__foreign_load_operation = 0; /** * VipsForeignFlags: * @VIPS_FOREIGN_NONE: no flags set * @VIPS_FOREIGN_PARTIAL: the image may be read lazilly * @VIPS_FOREIGN_BIGENDIAN: image pixels are most-significant byte first * @VIPS_FOREIGN_SEQUENTIAL: top-to-bottom lazy reading * * Some hints about the image loader. * * #VIPS_FOREIGN_PARTIAL means that the image can be read directly from the * file without needing to be unpacked to a temporary image first. * * #VIPS_FOREIGN_SEQUENTIAL means that the loader supports lazy reading, but * only top-to-bottom (sequential) access. Formats like PNG can read sets of * scanlines, for example, but only in order. * * If neither PARTIAL or SEQUENTIAL is set, the loader only supports whole * image read. Setting both PARTIAL and SEQUENTIAL is an error. * * #VIPS_FOREIGN_BIGENDIAN means that image pixels are most-significant byte * first. Depending on the native byte order of the host machine, you may * need to swap bytes. See vips_copy(). */ G_DEFINE_ABSTRACT_TYPE( VipsForeign, vips_foreign, VIPS_TYPE_OPERATION ); static void vips_foreign_summary_class( VipsObjectClass *object_class, VipsBuf *buf ) { VipsForeignClass *class = VIPS_FOREIGN_CLASS( object_class ); VIPS_OBJECT_CLASS( vips_foreign_parent_class )-> summary_class( object_class, buf ); if( class->suffs ) { const char **p; vips_buf_appends( buf, " (" ); for( p = class->suffs; *p; p++ ) { vips_buf_appendf( buf, "%s", *p ); if( p[1] ) vips_buf_appends( buf, ", " ); } vips_buf_appends( buf, ")" ); } vips_buf_appendf( buf, ", priority=%d", class->priority ); } static void vips_foreign_class_init( VipsForeignClass *class ) { GObjectClass *gobject_class = G_OBJECT_CLASS( class ); VipsObjectClass *object_class = (VipsObjectClass *) class; gobject_class->set_property = vips_object_set_property; gobject_class->get_property = vips_object_get_property; object_class->nickname = "foreign"; object_class->description = _( "load and save image files" ); object_class->summary_class = vips_foreign_summary_class; } static void vips_foreign_init( VipsForeign *object ) { } /* To iterate over supported files we build a temp list of subclasses of * VipsForeign, sort by priority, iterate, and free. */ static void * file_add_class( VipsForeignClass *class, GSList **files ) { /* Append so we don't reverse the list of files. Sort will not reorder * items of equal priority. */ *files = g_slist_append( *files, class ); return( NULL ); } static gint file_compare( VipsForeignClass *a, VipsForeignClass *b ) { return( b->priority - a->priority ); } /** * vips_foreign_map: * @base: base class to search below (eg. "VipsForeignLoad") * @fn: (scope call): function to apply to each #VipsForeignClass * @a: user data * @b: user data * * Apply a function to every #VipsForeignClass that VIPS knows about. Foreigns * are presented to the function in priority order. * * Like all VIPS map functions, if @fn returns %NULL, iteration continues. If * it returns non-%NULL, iteration terminates and that value is returned. The * map function returns %NULL if all calls return %NULL. * * See also: vips_slist_map(). * * Returns: (transfer none): the result of iteration */ void * vips_foreign_map( const char *base, VipsSListMap2Fn fn, void *a, void *b ) { GSList *files; void *result; files = NULL; (void) vips_class_map_all( g_type_from_name( base ), (VipsClassMapFn) file_add_class, (void *) &files ); files = g_slist_sort( files, (GCompareFunc) file_compare ); result = vips_slist_map2( files, fn, a, b ); g_slist_free( files ); return( result ); } /* Abstract base class for image load. */ G_DEFINE_ABSTRACT_TYPE( VipsForeignLoad, vips_foreign_load, VIPS_TYPE_FOREIGN ); static void vips_foreign_load_dispose( GObject *gobject ) { VipsForeignLoad *load = VIPS_FOREIGN_LOAD( gobject ); VIPS_UNREF( load->real ); G_OBJECT_CLASS( vips_foreign_load_parent_class )->dispose( gobject ); } static void vips_foreign_load_summary_class( VipsObjectClass *object_class, VipsBuf *buf ) { VipsForeignLoadClass *class = VIPS_FOREIGN_LOAD_CLASS( object_class ); VIPS_OBJECT_CLASS( vips_foreign_load_parent_class )-> summary_class( object_class, buf ); if( !G_TYPE_IS_ABSTRACT( G_TYPE_FROM_CLASS( class ) ) ) { if( class->is_a ) vips_buf_appends( buf, ", is_a" ); if( class->is_a_buffer ) vips_buf_appends( buf, ", is_a_buffer" ); if( class->get_flags ) vips_buf_appends( buf, ", get_flags" ); if( class->get_flags_filename ) vips_buf_appends( buf, ", get_flags_filename" ); if( class->header ) vips_buf_appends( buf, ", header" ); if( class->load ) vips_buf_appends( buf, ", load" ); /* You can omit ->load(), you must not omit ->header(). */ g_assert( class->header ); } } /* Can this VipsForeign open this file? */ static void * vips_foreign_find_load_sub( VipsForeignLoadClass *load_class, const char *filename ) { VipsForeignClass *class = VIPS_FOREIGN_CLASS( load_class ); #ifdef DEBUG printf( "vips_foreign_find_load_sub: %s\n", VIPS_OBJECT_CLASS( class )->nickname ); #endif /*DEBUG*/ if( load_class->is_a ) { if( load_class->is_a( filename ) ) return( load_class ); #ifdef DEBUG printf( "vips_foreign_find_load_sub: is_a failed\n" ); #endif /*DEBUG*/ } else if( class->suffs && vips_filename_suffix_match( filename, class->suffs ) ) return( load_class ); else { #ifdef DEBUG printf( "vips_foreign_find_load_sub: suffix match failed\n" ); #endif /*DEBUG*/ } return( NULL ); } /** * vips_foreign_find_load: * @filename: file to find a loader for * * Searches for an operation you could use to load @filename. Any trailing * options on @filename are stripped and ignored. * * See also: vips_foreign_find_load_buffer(), vips_image_new_from_file(). * * Returns: the name of an operation on success, %NULL on error */ const char * vips_foreign_find_load( const char *name ) { char filename[VIPS_PATH_MAX]; char option_string[VIPS_PATH_MAX]; VipsForeignLoadClass *load_class; vips__filename_split8( name, filename, option_string ); if( !vips_existsf( "%s", filename ) ) { vips_error( "VipsForeignLoad", _( "file \"%s\" not found" ), name ); return( NULL ); } if( !(load_class = (VipsForeignLoadClass *) vips_foreign_map( "VipsForeignLoad", (VipsSListMap2Fn) vips_foreign_find_load_sub, (void *) filename, NULL )) ) { vips_error( "VipsForeignLoad", _( "\"%s\" is not a known file format" ), name ); return( NULL ); } #ifdef DEBUG printf( "vips_foreign_find_load: selected %s\n", VIPS_OBJECT_CLASS( load_class )->nickname ); #endif /*DEBUG*/ return( G_OBJECT_CLASS_NAME( load_class ) ); } /* Kept for compat with earlier version of the vip8 API. Use * vips_image_new_from_file() now. */ int vips_foreign_load( const char *name, VipsImage **out, ... ) { char filename[VIPS_PATH_MAX]; char option_string[VIPS_PATH_MAX]; const char *operation_name; va_list ap; int result; vips__filename_split8( name, filename, option_string ); if( !(operation_name = vips_foreign_find_load( filename )) ) return( -1 ); va_start( ap, out ); result = vips_call_split_option_string( operation_name, option_string, ap, filename, out ); va_end( ap ); return( result ); } /* Can this VipsForeign open this buffer? */ static void * vips_foreign_find_load_buffer_sub( VipsForeignLoadClass *load_class, const void **buf, size_t *len ) { if( load_class->is_a_buffer && load_class->is_a_buffer( *buf, *len ) ) return( load_class ); return( NULL ); } /** * vips_foreign_find_load_buffer: * @data: (array length=size) (element-type guint8) (transfer none): start of * memory buffer * @size: (type gsize): number of bytes in @data * * Searches for an operation you could use to load a memory buffer. To see the * range of buffer loaders supported by your vips, try something like: * * vips -l | grep load_buffer * * See also: vips_image_new_from_buffer(). * * Returns: (transfer none): the name of an operation on success, %NULL on * error. */ const char * vips_foreign_find_load_buffer( const void *data, size_t size ) { VipsForeignLoadClass *load_class; if( !(load_class = (VipsForeignLoadClass *) vips_foreign_map( "VipsForeignLoad", (VipsSListMap2Fn) vips_foreign_find_load_buffer_sub, &data, &size )) ) { vips_error( "VipsForeignLoad", "%s", _( "buffer is not in a known format" ) ); return( NULL ); } return( G_OBJECT_CLASS_NAME( load_class ) ); } /** * vips_foreign_is_a: * @loader: name of loader to use for test * @filename: file to test * * Return %TRUE if @filename can be loaded by @loader. @loader is something * like "tiffload" or "VipsForeignLoadTiff". * * Returns: %TRUE if @filename can be loaded by @loader. */ gboolean vips_foreign_is_a( const char *loader, const char *filename ) { const VipsObjectClass *class; VipsForeignLoadClass *load_class; if( !(class = vips_class_find( "VipsForeignLoad", loader )) ) return( FALSE ); load_class = VIPS_FOREIGN_LOAD_CLASS( class ); if( load_class->is_a && load_class->is_a( filename ) ) return( TRUE ); return( FALSE ); } /** * vips_foreign_is_a_buffer: * @loader: name of loader to use for test * @data: (array length=size) (element-type guint8): pointer to the buffer to test * @size: (type gsize): size of the buffer to test * * Return %TRUE if @data can be loaded by @loader. @loader is something * like "tiffload_buffer" or "VipsForeignLoadTiffBuffer". * * Returns: %TRUE if @data can be loaded by @loader. */ gboolean vips_foreign_is_a_buffer( const char *loader, const void *data, size_t size ) { const VipsObjectClass *class; VipsForeignLoadClass *load_class; if( !(class = vips_class_find( "VipsForeignLoad", loader )) ) return( FALSE ); load_class = VIPS_FOREIGN_LOAD_CLASS( class ); if( load_class->is_a_buffer && load_class->is_a_buffer( data, size ) ) return( TRUE ); return( FALSE ); } /** * vips_foreign_flags: * @loader: name of loader to use for test * @filename: file to test * * Return the flags for @filename using @loader. * @loader is something like "tiffload" or "VipsForeignLoadTiff". * * Returns: the flags for @filename. */ VipsForeignFlags vips_foreign_flags( const char *loader, const char *filename ) { const VipsObjectClass *class; if( (class = vips_class_find( "VipsForeignLoad", loader )) ) { VipsForeignLoadClass *load_class = VIPS_FOREIGN_LOAD_CLASS( class ); if( load_class->get_flags_filename ) return( load_class->get_flags_filename( filename ) ); } return( 0 ); } static VipsObject * vips_foreign_load_new_from_string( const char *string ) { const char *file_op; GType type; VipsForeignLoad *load; if( !(file_op = vips_foreign_find_load( string )) ) return( NULL ); type = g_type_from_name( file_op ); g_assert( type ); load = VIPS_FOREIGN_LOAD( g_object_new( type, NULL ) ); g_object_set( load, "filename", string, NULL ); return( VIPS_OBJECT( load ) ); } static VipsImage * vips_foreign_load_temp( VipsForeignLoad *load ) { const guint64 disc_threshold = vips_get_disc_threshold(); const guint64 image_size = VIPS_IMAGE_SIZEOF_IMAGE( load->out ); /* If this is a partial operation, we can open directly. */ if( load->flags & VIPS_FOREIGN_PARTIAL ) { #ifdef DEBUG printf( "vips_foreign_load_temp: partial temp\n" ); #endif /*DEBUG*/ return( vips_image_new() ); } /* If it can do sequential access and it's been requested, we can open * directly. */ if( (load->flags & VIPS_FOREIGN_SEQUENTIAL) && load->access != VIPS_ACCESS_RANDOM ) { #ifdef DEBUG printf( "vips_foreign_load_temp: partial sequential temp\n" ); #endif /*DEBUG*/ return( vips_image_new() ); } /* ->memory used to be called ->disc and default TRUE. If it's been * forced FALSE, set memory TRUE. */ if( !load->disc ) load->memory = TRUE; /* We open via disc if: * - 'memory' is off * - the uncompressed image will be larger than * vips_get_disc_threshold() */ if( !load->memory && image_size > disc_threshold ) { #ifdef DEBUG printf( "vips_foreign_load_temp: disc temp\n" ); #endif /*DEBUG*/ return( vips_image_new_temp_file( "%s.v" ) ); } #ifdef DEBUG printf( "vips_foreign_load_temp: memory temp\n" ); #endif /*DEBUG*/ /* Otherwise, fall back to a memory buffer. */ return( vips_image_new_memory() ); } /* Check two images for compatibility: their geometries need to match. */ static gboolean vips_foreign_load_iscompat( VipsImage *a, VipsImage *b ) { if( a->Xsize != b->Xsize || a->Ysize != b->Ysize || a->Bands != b->Bands || a->Coding != b->Coding || a->BandFmt != b->BandFmt ) { vips_error( "VipsForeignLoad", "%s", _( "images do not match" ) ); return( FALSE ); } return( TRUE ); } /* Our start function ... do the lazy open, if necessary, and return a region * on the new image. */ static void * vips_foreign_load_start( VipsImage *out, void *a, void *b ) { VipsForeignLoad *load = VIPS_FOREIGN_LOAD( b ); VipsForeignLoadClass *class = VIPS_FOREIGN_LOAD_GET_CLASS( load ); /* If this start has failed before in another thread, we can fail now. */ if( load->error ) return( NULL ); if( !load->real ) { if( !(load->real = vips_foreign_load_temp( load )) ) return( NULL ); #ifdef DEBUG printf( "vips_foreign_load_start: triggering ->load()\n" ); #endif /*DEBUG*/ /* Read the image in. This may involve a long computation and * will finish with load->real holding the decompressed image. * * We want our caller to be able to see this computation on * @out, so eval signals on ->real need to appear on ->out. */ load->real->progress_signal = load->out; /* Note the load object on the image. Loaders can use * this to signal invalidate if they hit a load error. See * vips_foreign_load_invalidate() below. */ g_object_set_qdata( G_OBJECT( load->real ), vips__foreign_load_operation, load ); /* Load the image and check the result. * * ->header() read the header into @out, load has read the * image into @real. They must match exactly in size, bands, * format and coding for the copy to work. * * Some versions of ImageMagick give different results between * Ping and Load for some formats, for example. * * If the load fails, we need to stop */ if( class->load( load ) || vips_image_pio_input( load->real ) || vips_foreign_load_iscompat( load->real, out ) ) { vips_operation_invalidate( VIPS_OPERATION( load ) ); load->error = TRUE; return( NULL ); } /* We have to tell vips that out depends on real. We've set * the demand hint below, but not given an input there. */ vips_image_pipelinev( load->out, load->out->dhint, load->real, NULL ); } return( vips_region_new( load->real ) ); } /* Just pointer-copy. */ static int vips_foreign_load_generate( VipsRegion *or, void *seq, void *a, void *b, gboolean *stop ) { VipsRegion *ir = (VipsRegion *) seq; VipsRect *r = &or->valid; /* Ask for input we need. */ if( vips_region_prepare( ir, r ) ) return( -1 ); /* Attach output region to that. */ if( vips_region_region( or, ir, r, r->left, r->top ) ) return( -1 ); return( 0 ); } static int vips_foreign_load_build( VipsObject *object ) { VipsObjectClass *class = VIPS_OBJECT_GET_CLASS( object ); VipsForeignLoad *load = VIPS_FOREIGN_LOAD( object ); VipsForeignLoadClass *fclass = VIPS_FOREIGN_LOAD_GET_CLASS( object ); VipsForeignFlags flags; #ifdef DEBUG printf( "vips_foreign_load_build:\n" ); #endif /*DEBUG*/ flags = 0; if( fclass->get_flags ) flags |= fclass->get_flags( load ); if( (flags & VIPS_FOREIGN_PARTIAL) && (flags & VIPS_FOREIGN_SEQUENTIAL) ) { g_warning( "%s", _( "VIPS_FOREIGN_PARTIAL and VIPS_FOREIGN_SEQUENTIAL " "both set -- using SEQUENTIAL" ) ); flags ^= VIPS_FOREIGN_PARTIAL; } g_object_set( load, "flags", flags, NULL ); /* If the loader can do sequential mode and sequential has been * requested, we need to block caching. */ if( (load->flags & VIPS_FOREIGN_SEQUENTIAL) && load->access != VIPS_ACCESS_RANDOM ) load->nocache = TRUE; if( VIPS_OBJECT_CLASS( vips_foreign_load_parent_class )-> build( object ) ) return( -1 ); if( load->sequential ) g_warning( "%s", _( "ignoring deprecated \"sequential\" mode -- " "please use \"access\" instead" ) ); g_object_set( object, "out", vips_image_new(), NULL ); vips_image_set_string( load->out, VIPS_META_LOADER, class->nickname ); #ifdef DEBUG printf( "vips_foreign_load_build: triggering ->header()\n" ); #endif /*DEBUG*/ /* Read the header into @out. */ if( fclass->header && fclass->header( load ) ) return( -1 ); /* If there's no ->load() method then the header read has done * everything. Otherwise, it's just set fields and we must also * load pixels. * * Delay the load until the first pixel is requested by doing the work * in the start function of the copy. */ if( fclass->load ) { #ifdef DEBUG printf( "vips_foreign_load_build: delaying read ...\n" ); #endif /*DEBUG*/ /* ->header() should set the dhint. It'll default to the safe * SMALLTILE if header() did not set it. */ vips_image_pipelinev( load->out, load->out->dhint, NULL ); /* Then 'start' creates the real image and 'gen' fetches * pixels for @out from @real on demand. */ if( vips_image_generate( load->out, vips_foreign_load_start, vips_foreign_load_generate, vips_stop_one, NULL, load ) ) return( -1 ); } /* If random access has been requested, make sure that we don't have a * SEQ tag left from a sequential loader. */ if( load->access == VIPS_ACCESS_RANDOM ) (void) vips_image_remove( load->out, VIPS_META_SEQUENTIAL ); return( 0 ); } static VipsOperationFlags vips_foreign_load_operation_get_flags( VipsOperation *operation ) { VipsForeignLoad *load = VIPS_FOREIGN_LOAD( operation ); VipsOperationFlags flags; flags = VIPS_OPERATION_CLASS( vips_foreign_load_parent_class )-> get_flags( operation ); if( load->nocache ) flags |= VIPS_OPERATION_NOCACHE; return( flags ); } static void vips_foreign_load_class_init( VipsForeignLoadClass *class ) { GObjectClass *gobject_class = G_OBJECT_CLASS( class ); VipsObjectClass *object_class = (VipsObjectClass *) class; VipsOperationClass *operation_class = (VipsOperationClass *) class; gobject_class->dispose = vips_foreign_load_dispose; gobject_class->set_property = vips_object_set_property; gobject_class->get_property = vips_object_get_property; object_class->build = vips_foreign_load_build; object_class->summary_class = vips_foreign_load_summary_class; object_class->new_from_string = vips_foreign_load_new_from_string; object_class->nickname = "fileload"; object_class->description = _( "file loaders" ); operation_class->get_flags = vips_foreign_load_operation_get_flags; VIPS_ARG_IMAGE( class, "out", 2, _( "Output" ), _( "Output image" ), VIPS_ARGUMENT_REQUIRED_OUTPUT, G_STRUCT_OFFSET( VipsForeignLoad, out ) ); VIPS_ARG_FLAGS( class, "flags", 6, _( "Flags" ), _( "Flags for this file" ), VIPS_ARGUMENT_OPTIONAL_OUTPUT, G_STRUCT_OFFSET( VipsForeignLoad, flags ), VIPS_TYPE_FOREIGN_FLAGS, VIPS_FOREIGN_NONE ); VIPS_ARG_BOOL( class, "memory", 7, _( "Memory" ), _( "Force open via memory" ), VIPS_ARGUMENT_OPTIONAL_INPUT, G_STRUCT_OFFSET( VipsForeignLoad, memory ), FALSE ); VIPS_ARG_ENUM( class, "access", 8, _( "Access" ), _( "Required access pattern for this file" ), VIPS_ARGUMENT_OPTIONAL_INPUT, G_STRUCT_OFFSET( VipsForeignLoad, access ), VIPS_TYPE_ACCESS, VIPS_ACCESS_RANDOM ); VIPS_ARG_BOOL( class, "sequential", 10, _( "Sequential" ), _( "Sequential read only" ), VIPS_ARGUMENT_OPTIONAL_INPUT | VIPS_ARGUMENT_DEPRECATED, G_STRUCT_OFFSET( VipsForeignLoad, sequential ), FALSE ); VIPS_ARG_BOOL( class, "fail", 11, _( "Fail" ), _( "Fail on first error" ), VIPS_ARGUMENT_OPTIONAL_INPUT, G_STRUCT_OFFSET( VipsForeignLoad, fail ), FALSE ); VIPS_ARG_BOOL( class, "disc", 12, _( "Disc" ), _( "Open to disc" ), VIPS_ARGUMENT_OPTIONAL_INPUT | VIPS_ARGUMENT_DEPRECATED, G_STRUCT_OFFSET( VipsForeignLoad, disc ), TRUE ); } static void vips_foreign_load_init( VipsForeignLoad *load ) { load->disc = TRUE; load->access = VIPS_ACCESS_RANDOM; } /* * Loaders can call this */ /** * vips_foreign_load_invalidate: (method) * @image: image to invalidate * * Loaders can call this on the image they are making if they see a read error * from the load library. It signals "invalidate" on the load operation and * will cause it to be dropped from cache. * * If we know a file will cause a read error, we don't want to cache the * failing operation, we want to make sure the image will really be opened * again if our caller tries again. For example, a broken file might be * replaced by a working one. */ void vips_foreign_load_invalidate( VipsImage *image ) { VipsOperation *operation; #ifdef DEBUG printf( "vips_foreign_load_invalidate: %p\n", image ); #endif /*DEBUG*/ if( (operation = g_object_get_qdata( G_OBJECT( image ), vips__foreign_load_operation )) ) { vips_operation_invalidate( operation ); } } /* Abstract base class for image savers. */ G_DEFINE_ABSTRACT_TYPE( VipsForeignSave, vips_foreign_save, VIPS_TYPE_FOREIGN ); static void vips_foreign_save_dispose( GObject *gobject ) { VipsForeignSave *save = VIPS_FOREIGN_SAVE( gobject ); VIPS_UNREF( save->ready ); G_OBJECT_CLASS( vips_foreign_save_parent_class )->dispose( gobject ); } static void vips_foreign_save_summary_class( VipsObjectClass *object_class, VipsBuf *buf ) { VipsForeignSaveClass *class = VIPS_FOREIGN_SAVE_CLASS( object_class ); VIPS_OBJECT_CLASS( vips_foreign_save_parent_class )-> summary_class( object_class, buf ); vips_buf_appendf( buf, ", %s", vips_enum_nick( VIPS_TYPE_SAVEABLE, class->saveable ) ); } static VipsObject * vips_foreign_save_new_from_string( const char *string ) { const char *file_op; GType type; VipsForeignSave *save; if( !(file_op = vips_foreign_find_save( string )) ) return( NULL ); type = g_type_from_name( file_op ); g_assert( type ); save = VIPS_FOREIGN_SAVE( g_object_new( type, NULL ) ); g_object_set( save, "filename", string, NULL ); return( VIPS_OBJECT( save ) ); } /* Convert an image for saving. */ int vips__foreign_convert_saveable( VipsImage *in, VipsImage **ready, VipsSaveable saveable, VipsBandFormat *format, VipsCoding *coding, VipsArrayDouble *background ) { /* in holds a reference to the output of our chain as we build it. */ g_object_ref( in ); /* For coded images, can this class save the coding we are in now? * Nothing to do. */ if( in->Coding != VIPS_CODING_NONE && coding[in->Coding] ) { *ready = in; return( 0 ); } /* For uncoded images, if this saver supports ANY bands and this * format we have nothing to do. */ if( in->Coding == VIPS_CODING_NONE && saveable == VIPS_SAVEABLE_ANY && format[in->BandFmt] == in->BandFmt ) { *ready = in; return( 0 ); } /* Otherwise ... we need to decode and then (possibly) recode at the * end. */ /* If this is an VIPS_CODING_LABQ, we can go straight to RGB. */ if( in->Coding == VIPS_CODING_LABQ ) { VipsImage *out; if( vips_LabQ2sRGB( in, &out, NULL ) ) { g_object_unref( in ); return( -1 ); } g_object_unref( in ); in = out; } /* If this is an VIPS_CODING_RAD, we unpack to float. This could be * scRGB or XYZ. */ if( in->Coding == VIPS_CODING_RAD ) { VipsImage *out; if( vips_rad2float( in, &out, NULL ) ) { g_object_unref( in ); return( -1 ); } g_object_unref( in ); in = out; } /* If the saver supports RAD, we need to go to scRGB or XYZ. */ if( coding[VIPS_CODING_RAD] ) { if( in->Type != VIPS_INTERPRETATION_scRGB && in->Type != VIPS_INTERPRETATION_XYZ ) { VipsImage *out; if( vips_colourspace( in, &out, VIPS_INTERPRETATION_scRGB, NULL ) ) { g_object_unref( in ); return( -1 ); } g_object_unref( in ); in = out; } } /* If this image is CMYK and the saver is RGB-only, use lcms to try to * import to XYZ. This will only work if the image has an embedded * profile. */ if( in->Type == VIPS_INTERPRETATION_CMYK && in->Bands >= 4 && (saveable == VIPS_SAVEABLE_RGB || saveable == VIPS_SAVEABLE_RGBA || saveable == VIPS_SAVEABLE_RGBA_ONLY) ) { VipsImage *out; if( vips_icc_import( in, &out, "pcs", VIPS_PCS_XYZ, NULL ) ) { g_object_unref( in ); return( -1 ); } g_object_unref( in ); in = out; /* We've imported to PCS, we must remove the embedded profile, * since it no longer matches the image. * * For example, when converting CMYK JPG to RGB PNG, we need * to remove the CMYK profile on import, or the png writer will * try to attach it when we write the image as RGB. */ vips_image_remove( in, VIPS_META_ICC_NAME ); } /* If this is something other than CMYK or RAD, eg. maybe a LAB image, * we need to transform to RGB. */ if( !coding[VIPS_CODING_RAD] && in->Bands >= 3 && in->Type != VIPS_INTERPRETATION_CMYK && vips_colourspace_issupported( in ) && (saveable == VIPS_SAVEABLE_RGB || saveable == VIPS_SAVEABLE_RGBA || saveable == VIPS_SAVEABLE_RGBA_ONLY || saveable == VIPS_SAVEABLE_RGB_CMYK) ) { VipsImage *out; VipsInterpretation interpretation; /* Do we make RGB or RGB16? We don't want to squash a 16-bit * RGB down to 8 bits if the saver supports 16. */ if( vips_band_format_is8bit( format[in->BandFmt] ) ) interpretation = VIPS_INTERPRETATION_sRGB; else interpretation = VIPS_INTERPRETATION_RGB16; if( vips_colourspace( in, &out, interpretation, NULL ) ) { g_object_unref( in ); return( -1 ); } g_object_unref( in ); in = out; } /* VIPS_SAVEABLE_RGBA_ONLY does not support 1 or 2 bands ... convert * to sRGB. */ if( !coding[VIPS_CODING_RAD] && in->Bands < 3 && vips_colourspace_issupported( in ) && saveable == VIPS_SAVEABLE_RGBA_ONLY ) { VipsImage *out; VipsInterpretation interpretation; /* Do we make RGB or RGB16? We don't want to squash a 16-bit * RGB down to 8 bits if the saver supports 16. */ if( vips_band_format_is8bit( format[in->BandFmt] ) ) interpretation = VIPS_INTERPRETATION_sRGB; else interpretation = VIPS_INTERPRETATION_RGB16; if( vips_colourspace( in, &out, interpretation, NULL ) ) { g_object_unref( in ); return( -1 ); } g_object_unref( in ); in = out; } /* Get the bands right. We must do this after all colourspace * transforms, since they can change the number of bands. */ if( in->Coding == VIPS_CODING_NONE ) { /* Do we need to flatten out an alpha channel? There needs to * be an alpha there now, and this writer needs to not support * alpha. */ if( (in->Bands == 2 || (in->Bands == 4 && in->Type != VIPS_INTERPRETATION_CMYK)) && (saveable == VIPS_SAVEABLE_MONO || saveable == VIPS_SAVEABLE_RGB || saveable == VIPS_SAVEABLE_RGB_CMYK) ) { VipsImage *out; if( vips_flatten( in, &out, "background", background, NULL ) ) { g_object_unref( in ); return( -1 ); } g_object_unref( in ); in = out; } /* Other alpha removal strategies ... just drop the extra * bands. */ else if( in->Bands > 3 && (saveable == VIPS_SAVEABLE_RGB || (saveable == VIPS_SAVEABLE_RGB_CMYK && in->Type != VIPS_INTERPRETATION_CMYK)) ) { VipsImage *out; /* Don't let 4 bands though unless the image really is * a CMYK. * * Consider a RGBA png being saved as JPG. We can * write CMYK jpg, but we mustn't do that for RGBA * images. */ if( vips_extract_band( in, &out, 0, "n", 3, NULL ) ) { g_object_unref( in ); return( -1 ); } g_object_unref( in ); in = out; } else if( in->Bands > 4 && ((saveable == VIPS_SAVEABLE_RGB_CMYK && in->Type == VIPS_INTERPRETATION_CMYK) || saveable == VIPS_SAVEABLE_RGBA || saveable == VIPS_SAVEABLE_RGBA_ONLY) ) { VipsImage *out; if( vips_extract_band( in, &out, 0, "n", 4, NULL ) ) { g_object_unref( in ); return( -1 ); } g_object_unref( in ); in = out; } else if( in->Bands > 1 && saveable == VIPS_SAVEABLE_MONO ) { VipsImage *out; if( vips_extract_band( in, &out, 0, NULL ) ) { g_object_unref( in ); return( -1 ); } g_object_unref( in ); in = out; } /* Else we have VIPS_SAVEABLE_ANY and we don't chop bands down. */ } /* Handle the ushort interpretations. * * RGB16 and GREY16 use 0-65535 for black-white. If we have an image * tagged like this, and it has more than 8 bits (we leave crazy uchar * images tagged as RGB16 alone), we'll need to get it ready for the * saver. */ if( (in->Type == VIPS_INTERPRETATION_RGB16 || in->Type == VIPS_INTERPRETATION_GREY16) && !vips_band_format_is8bit( in->BandFmt ) ) { /* If the saver supports ushort, cast to ushort. It may be * float at the moment, for example. * * If the saver does not support ushort, automatically shift * it down. This is the behaviour we want for saving an RGB16 * image as JPG, for example. */ if( format[VIPS_FORMAT_USHORT] == VIPS_FORMAT_USHORT ) { VipsImage *out; if( vips_cast( in, &out, VIPS_FORMAT_USHORT, NULL ) ) { g_object_unref( in ); return( -1 ); } g_object_unref( in ); in = out; } else { VipsImage *out; if( vips_rshift_const1( in, &out, 8, NULL ) ) { g_object_unref( in ); return( -1 ); } g_object_unref( in ); in = out; /* That could have produced an int image ... make sure * we are now uchar. */ if( vips_cast( in, &out, VIPS_FORMAT_UCHAR, NULL ) ) { g_object_unref( in ); return( -1 ); } g_object_unref( in ); in = out; } } /* Cast to the output format. */ { VipsImage *out; if( vips_cast( in, &out, format[in->BandFmt], NULL ) ) { g_object_unref( in ); return( -1 ); } g_object_unref( in ); in = out; } /* Does this class want a coded image? Search the coding table for the * first one. */ if( coding[VIPS_CODING_NONE] ) { /* Already NONE, nothing to do. */ } else if( coding[VIPS_CODING_LABQ] ) { VipsImage *out; if( vips_Lab2LabQ( in, &out, NULL ) ) { g_object_unref( in ); return( -1 ); } g_object_unref( in ); in = out; } else if( coding[VIPS_CODING_RAD] ) { VipsImage *out; if( vips_float2rad( in, &out, NULL ) ) { g_object_unref( in ); return( -1 ); } g_object_unref( in ); in = out; } *ready = in; return( 0 ); } static int vips_foreign_save_build( VipsObject *object ) { VipsForeignSave *save = VIPS_FOREIGN_SAVE( object ); if( save->in ) { VipsForeignSaveClass *class = VIPS_FOREIGN_SAVE_GET_CLASS( save ); VipsImage *ready; if( vips__foreign_convert_saveable( save->in, &ready, class->saveable, class->format_table, class->coding, save->background ) ) return( -1 ); if( save->page_height ) vips_image_set_int( ready, VIPS_META_PAGE_HEIGHT, save->page_height ); VIPS_UNREF( save->ready ); save->ready = ready; } if( VIPS_OBJECT_CLASS( vips_foreign_save_parent_class )-> build( object ) ) return( -1 ); return( 0 ); } #define UC VIPS_FORMAT_UCHAR #define C VIPS_FORMAT_CHAR #define US VIPS_FORMAT_USHORT #define S VIPS_FORMAT_SHORT #define UI VIPS_FORMAT_UINT #define I VIPS_FORMAT_INT #define F VIPS_FORMAT_FLOAT #define X VIPS_FORMAT_COMPLEX #define D VIPS_FORMAT_DOUBLE #define DX VIPS_FORMAT_DPCOMPLEX static int vips_foreign_save_format_table[10] = { // UC C US S UI I F X D DX UC, C, US, S, UI, I, F, X, D, DX }; static void vips_foreign_save_class_init( VipsForeignSaveClass *class ) { GObjectClass *gobject_class = G_OBJECT_CLASS( class ); VipsObjectClass *object_class = (VipsObjectClass *) class; VipsOperationClass *operation_class = (VipsOperationClass *) class; int i; gobject_class->dispose = vips_foreign_save_dispose; gobject_class->set_property = vips_object_set_property; gobject_class->get_property = vips_object_get_property; object_class->build = vips_foreign_save_build; object_class->summary_class = vips_foreign_save_summary_class; object_class->new_from_string = vips_foreign_save_new_from_string; object_class->nickname = "filesave"; object_class->description = _( "file savers" ); /* All savers are sequential by definition. Things like tiled tiff * write and interlaced png write, which are not, add extra caches * on their input. */ operation_class->flags |= VIPS_OPERATION_SEQUENTIAL; /* Must not cache savers. */ operation_class->flags |= VIPS_OPERATION_NOCACHE; /* Default to no coding allowed. */ for( i = 0; i < VIPS_CODING_LAST; i++ ) class->coding[i] = FALSE; class->coding[VIPS_CODING_NONE] = TRUE; /* Default to no cast on save. */ class->format_table = vips_foreign_save_format_table; VIPS_ARG_IMAGE( class, "in", 0, _( "Input" ), _( "Image to save" ), VIPS_ARGUMENT_REQUIRED_INPUT, G_STRUCT_OFFSET( VipsForeignSave, in ) ); VIPS_ARG_BOOL( class, "strip", 100, _( "Strip" ), _( "Strip all metadata from image" ), VIPS_ARGUMENT_OPTIONAL_INPUT, G_STRUCT_OFFSET( VipsForeignSave, strip ), FALSE ); VIPS_ARG_BOXED( class, "background", 101, _( "Background" ), _( "Background value" ), VIPS_ARGUMENT_OPTIONAL_INPUT, G_STRUCT_OFFSET( VipsForeignSave, background ), VIPS_TYPE_ARRAY_DOUBLE ); VIPS_ARG_INT( class, "page_height", 8, _( "Page height" ), _( "Set page height for multipage save" ), VIPS_ARGUMENT_OPTIONAL_INPUT, G_STRUCT_OFFSET( VipsForeignSave, page_height ), 0, VIPS_MAX_COORD, 0 ); } static void vips_foreign_save_init( VipsForeignSave *save ) { save->background = vips_array_double_newv( 1, 0.0 ); } /* Can we write this filename with this file? */ static void * vips_foreign_find_save_sub( VipsForeignSaveClass *save_class, const char *filename ) { VipsForeignClass *class = VIPS_FOREIGN_CLASS( save_class ); /* The suffs might be defined on an abstract base class, make sure we * don't pick that. */ if( !G_TYPE_IS_ABSTRACT( G_TYPE_FROM_CLASS( class ) ) && class->suffs && vips_filename_suffix_match( filename, class->suffs ) ) return( save_class ); return( NULL ); } /** * vips_foreign_find_save: * @filename: name to find a saver for * * Searches for an operation you could use to write to @filename. * Any trailing options on @filename are stripped and ignored. * * See also: vips_foreign_find_save_buffer(), vips_image_write_to_file(). * * Returns: the name of an operation on success, %NULL on error */ const char * vips_foreign_find_save( const char *name ) { char filename[VIPS_PATH_MAX]; char option_string[VIPS_PATH_MAX]; VipsForeignSaveClass *save_class; vips__filename_split8( name, filename, option_string ); if( !(save_class = (VipsForeignSaveClass *) vips_foreign_map( "VipsForeignSave", (VipsSListMap2Fn) vips_foreign_find_save_sub, (void *) filename, NULL )) ) { vips_error( "VipsForeignSave", _( "\"%s\" is not a known file format" ), name ); return( NULL ); } return( G_OBJECT_CLASS_NAME( save_class ) ); } /* Kept for early vips8 API compat. */ int vips_foreign_save( VipsImage *in, const char *name, ... ) { char filename[VIPS_PATH_MAX]; char option_string[VIPS_PATH_MAX]; const char *operation_name; va_list ap; int result; vips__filename_split8( name, filename, option_string ); if( !(operation_name = vips_foreign_find_save( filename )) ) return( -1 ); va_start( ap, name ); result = vips_call_split_option_string( operation_name, option_string, ap, in, filename ); va_end( ap ); return( result ); } /* Can we write this buffer with this file type? */ static void * vips_foreign_find_save_buffer_sub( VipsForeignSaveClass *save_class, const char *suffix ) { VipsObjectClass *object_class = VIPS_OBJECT_CLASS( save_class ); VipsForeignClass *class = VIPS_FOREIGN_CLASS( save_class ); if( class->suffs && vips_ispostfix( object_class->nickname, "_buffer" ) && vips_filename_suffix_match( suffix, class->suffs ) ) return( save_class ); return( NULL ); } /** * vips_foreign_find_save_buffer: * @suffix: name to find a saver for * * Searches for an operation you could use to write to a buffer in @suffix * format. * * See also: vips_image_write_to_buffer(). * * Returns: the name of an operation on success, %NULL on error */ const char * vips_foreign_find_save_buffer( const char *name ) { char suffix[VIPS_PATH_MAX]; char option_string[VIPS_PATH_MAX]; VipsForeignSaveClass *save_class; vips__filename_split8( name, suffix, option_string ); if( !(save_class = (VipsForeignSaveClass *) vips_foreign_map( "VipsForeignSave", (VipsSListMap2Fn) vips_foreign_find_save_buffer_sub, (void *) suffix, NULL )) ) { vips_error( "VipsForeignSave", _( "\"%s\" is not a known buffer format" ), name ); return( NULL ); } return( G_OBJECT_CLASS_NAME( save_class ) ); } /* Called from iofuncs to init all operations in this dir. Use a plugin system * instead? */ void vips_foreign_operation_init( void ) { extern GType vips_foreign_load_rad_get_type( void ); extern GType vips_foreign_save_rad_file_get_type( void ); extern GType vips_foreign_save_rad_buffer_get_type( void ); extern GType vips_foreign_load_mat_get_type( void ); extern GType vips_foreign_load_ppm_get_type( void ); extern GType vips_foreign_save_ppm_get_type( void ); extern GType vips_foreign_load_png_get_type( void ); extern GType vips_foreign_load_png_buffer_get_type( void ); extern GType vips_foreign_save_png_file_get_type( void ); extern GType vips_foreign_save_png_buffer_get_type( void ); extern GType vips_foreign_load_csv_get_type( void ); extern GType vips_foreign_save_csv_get_type( void ); extern GType vips_foreign_load_matrix_get_type( void ); extern GType vips_foreign_save_matrix_get_type( void ); extern GType vips_foreign_print_matrix_get_type( void ); extern GType vips_foreign_load_fits_get_type( void ); extern GType vips_foreign_save_fits_get_type( void ); extern GType vips_foreign_load_analyze_get_type( void ); extern GType vips_foreign_load_openexr_get_type( void ); extern GType vips_foreign_load_openslide_get_type( void ); extern GType vips_foreign_load_jpeg_file_get_type( void ); extern GType vips_foreign_load_jpeg_buffer_get_type( void ); extern GType vips_foreign_save_jpeg_file_get_type( void ); extern GType vips_foreign_save_jpeg_buffer_get_type( void ); extern GType vips_foreign_save_jpeg_mime_get_type( void ); extern GType vips_foreign_load_tiff_file_get_type( void ); extern GType vips_foreign_load_tiff_buffer_get_type( void ); extern GType vips_foreign_save_tiff_file_get_type( void ); extern GType vips_foreign_save_tiff_buffer_get_type( void ); extern GType vips_foreign_load_vips_get_type( void ); extern GType vips_foreign_save_vips_get_type( void ); extern GType vips_foreign_load_raw_get_type( void ); extern GType vips_foreign_save_raw_get_type( void ); extern GType vips_foreign_save_raw_fd_get_type( void ); extern GType vips_foreign_load_magick_file_get_type( void ); extern GType vips_foreign_load_magick_buffer_get_type( void ); extern GType vips_foreign_load_magick7_file_get_type( void ); extern GType vips_foreign_load_magick7_buffer_get_type( void ); extern GType vips_foreign_save_dz_file_get_type( void ); extern GType vips_foreign_save_dz_buffer_get_type( void ); extern GType vips_foreign_load_webp_file_get_type( void ); extern GType vips_foreign_load_webp_buffer_get_type( void ); extern GType vips_foreign_save_webp_file_get_type( void ); extern GType vips_foreign_save_webp_buffer_get_type( void ); extern GType vips_foreign_load_pdf_get_type( void ); extern GType vips_foreign_load_pdf_file_get_type( void ); extern GType vips_foreign_load_pdf_buffer_get_type( void ); extern GType vips_foreign_load_svg_get_type( void ); extern GType vips_foreign_load_svg_file_get_type( void ); extern GType vips_foreign_load_svg_buffer_get_type( void ); extern GType vips_foreign_load_gif_get_type( void ); extern GType vips_foreign_load_gif_file_get_type( void ); extern GType vips_foreign_load_gif_buffer_get_type( void ); vips_foreign_load_csv_get_type(); vips_foreign_save_csv_get_type(); vips_foreign_load_matrix_get_type(); vips_foreign_save_matrix_get_type(); vips_foreign_print_matrix_get_type(); vips_foreign_load_raw_get_type(); vips_foreign_save_raw_get_type(); vips_foreign_save_raw_fd_get_type(); vips_foreign_load_vips_get_type(); vips_foreign_save_vips_get_type(); #ifdef HAVE_ANALYZE vips_foreign_load_analyze_get_type(); #endif /*HAVE_ANALYZE*/ #ifdef HAVE_PPM vips_foreign_load_ppm_get_type(); vips_foreign_save_ppm_get_type(); #endif /*HAVE_PPM*/ #ifdef HAVE_RADIANCE vips_foreign_load_rad_get_type(); vips_foreign_save_rad_file_get_type(); vips_foreign_save_rad_buffer_get_type(); #endif /*HAVE_RADIANCE*/ #ifdef HAVE_POPPLER vips_foreign_load_pdf_get_type(); vips_foreign_load_pdf_file_get_type(); vips_foreign_load_pdf_buffer_get_type(); #endif /*HAVE_POPPLER*/ #ifdef HAVE_RSVG vips_foreign_load_svg_get_type(); vips_foreign_load_svg_file_get_type(); vips_foreign_load_svg_buffer_get_type(); #endif /*HAVE_RSVG*/ #ifdef HAVE_GIFLIB vips_foreign_load_gif_get_type(); vips_foreign_load_gif_file_get_type(); vips_foreign_load_gif_buffer_get_type(); #endif /*HAVE_GIFLIB*/ #ifdef HAVE_GSF vips_foreign_save_dz_file_get_type(); vips_foreign_save_dz_buffer_get_type(); #endif /*HAVE_GSF*/ #ifdef HAVE_PNG vips_foreign_load_png_get_type(); vips_foreign_load_png_buffer_get_type(); vips_foreign_save_png_file_get_type(); vips_foreign_save_png_buffer_get_type(); #endif /*HAVE_PNG*/ #ifdef HAVE_MATIO vips_foreign_load_mat_get_type(); #endif /*HAVE_MATIO*/ #ifdef HAVE_JPEG vips_foreign_load_jpeg_file_get_type(); vips_foreign_load_jpeg_buffer_get_type(); vips_foreign_save_jpeg_file_get_type(); vips_foreign_save_jpeg_buffer_get_type(); vips_foreign_save_jpeg_mime_get_type(); #endif /*HAVE_JPEG*/ #ifdef HAVE_LIBWEBP vips_foreign_load_webp_file_get_type(); vips_foreign_load_webp_buffer_get_type(); vips_foreign_save_webp_file_get_type(); vips_foreign_save_webp_buffer_get_type(); #endif /*HAVE_LIBWEBP*/ #ifdef HAVE_TIFF vips_foreign_load_tiff_file_get_type(); vips_foreign_load_tiff_buffer_get_type(); vips_foreign_save_tiff_file_get_type(); vips_foreign_save_tiff_buffer_get_type(); #endif /*HAVE_TIFF*/ #ifdef HAVE_OPENSLIDE vips_foreign_load_openslide_get_type(); #endif /*HAVE_OPENSLIDE*/ #ifdef HAVE_MAGICK vips_foreign_load_magick_file_get_type(); vips_foreign_load_magick_buffer_get_type(); #endif /*HAVE_MAGICK*/ #ifdef HAVE_MAGICK7 vips_foreign_load_magick7_file_get_type(); vips_foreign_load_magick7_buffer_get_type(); #endif /*HAVE_MAGICK7*/ #ifdef HAVE_CFITSIO vips_foreign_load_fits_get_type(); vips_foreign_save_fits_get_type(); #endif /*HAVE_CFITSIO*/ #ifdef HAVE_OPENEXR vips_foreign_load_openexr_get_type(); #endif /*HAVE_OPENEXR*/ vips__foreign_load_operation = g_quark_from_static_string( "vips-foreign-load-operation" ); }
vips_foreign_load_start( VipsImage *out, void *a, void *b ) { VipsForeignLoad *load = VIPS_FOREIGN_LOAD( b ); VipsForeignLoadClass *class = VIPS_FOREIGN_LOAD_GET_CLASS( load ); if( !load->real ) { if( !(load->real = vips_foreign_load_temp( load )) ) return( NULL ); #ifdef DEBUG printf( "vips_foreign_load_start: triggering ->load()\n" ); #endif /*DEBUG*/ /* Read the image in. This may involve a long computation and * will finish with load->real holding the decompressed image. * * We want our caller to be able to see this computation on * @out, so eval signals on ->real need to appear on ->out. */ load->real->progress_signal = load->out; /* Note the load object on the image. Loaders can use * this to signal invalidate if they hit a load error. See * vips_foreign_load_invalidate() below. */ g_object_set_qdata( G_OBJECT( load->real ), vips__foreign_load_operation, load ); if( class->load( load ) || vips_image_pio_input( load->real ) ) return( NULL ); /* ->header() read the header into @out, load has read the * image into @real. They must match exactly in size, bands, * format and coding for the copy to work. * * Some versions of ImageMagick give different results between * Ping and Load for some formats, for example. */ if( !vips_foreign_load_iscompat( load->real, out ) ) return( NULL ); /* We have to tell vips that out depends on real. We've set * the demand hint below, but not given an input there. */ vips_image_pipelinev( load->out, load->out->dhint, load->real, NULL ); } return( vips_region_new( load->real ) ); }
vips_foreign_load_start( VipsImage *out, void *a, void *b ) { VipsForeignLoad *load = VIPS_FOREIGN_LOAD( b ); VipsForeignLoadClass *class = VIPS_FOREIGN_LOAD_GET_CLASS( load ); /* If this start has failed before in another thread, we can fail now. */ if( load->error ) return( NULL ); if( !load->real ) { if( !(load->real = vips_foreign_load_temp( load )) ) return( NULL ); #ifdef DEBUG printf( "vips_foreign_load_start: triggering ->load()\n" ); #endif /*DEBUG*/ /* Read the image in. This may involve a long computation and * will finish with load->real holding the decompressed image. * * We want our caller to be able to see this computation on * @out, so eval signals on ->real need to appear on ->out. */ load->real->progress_signal = load->out; /* Note the load object on the image. Loaders can use * this to signal invalidate if they hit a load error. See * vips_foreign_load_invalidate() below. */ g_object_set_qdata( G_OBJECT( load->real ), vips__foreign_load_operation, load ); /* Load the image and check the result. * * ->header() read the header into @out, load has read the * image into @real. They must match exactly in size, bands, * format and coding for the copy to work. * * Some versions of ImageMagick give different results between * Ping and Load for some formats, for example. * * If the load fails, we need to stop */ if( class->load( load ) || vips_image_pio_input( load->real ) || vips_foreign_load_iscompat( load->real, out ) ) { vips_operation_invalidate( VIPS_OPERATION( load ) ); load->error = TRUE; return( NULL ); } /* We have to tell vips that out depends on real. We've set * the demand hint below, but not given an input there. */ vips_image_pipelinev( load->out, load->out->dhint, load->real, NULL ); } return( vips_region_new( load->real ) ); }
{'added': [(21, ' * 5/3/18'), (22, ' * \t- block _start if one start fails, see #893'), (801, '\t/* If this start has failed before in another thread, we can fail now.'), (802, '\t */'), (803, '\tif( load->error )'), (804, '\t\treturn( NULL );'), (805, ''), (829, '\t\t/* Load the image and check the result.'), (830, '\t\t *'), (831, '\t\t * ->header() read the header into @out, load has read the'), (837, '\t\t *'), (838, '\t\t * If the load fails, we need to stop'), (840, '\t\tif( class->load( load ) ||'), (841, '\t\t\tvips_image_pio_input( load->real ) ||'), (842, '\t\t\tvips_foreign_load_iscompat( load->real, out ) ) {'), (843, '\t\t\tvips_operation_invalidate( VIPS_OPERATION( load ) );'), (844, '\t\t\tload->error = TRUE;'), (845, ''), (847, '\t\t}')], 'deleted': [(822, '\t\tif( class->load( load ) ||'), (823, '\t\t\tvips_image_pio_input( load->real ) )'), (824, '\t\t\treturn( NULL );'), (825, ''), (826, '\t\t/* ->header() read the header into @out, load has read the'), (833, '\t\tif( !vips_foreign_load_iscompat( load->real, out ) )')]}
19
6
959
5,271
https://github.com/jcupitt/libvips
CVE-2018-7998
['CWE-476', 'CWE-362']
ne.c
r_bin_ne_get_relocs
/* radare - LGPL - Copyright 2019-2022 - GustavoLCR */ #include "ne.h" #define NE_BUG 0 static char *__get_target_os(r_bin_ne_obj_t *bin) { switch (bin->ne_header->targOS) { case 1: return "OS/2"; case 2: return "Windows"; case 3: return "European MS-DOS 4.x"; case 4: return "Windows 386"; case 5: return "BOSS (Borland Operating System Services)"; default: return "Unknown"; } } static int __translate_perms(int flags) { int perms = 0; if (flags & IS_RX) { if (flags & IS_DATA) { perms = R_PERM_R; } else { perms = R_PERM_X; } } if (!perms) { perms = R_PERM_RWX; } return perms; } static char *__read_nonnull_str_at(RBuffer *buf, ut64 offset) { ut8 sz = r_buf_read8_at (buf, offset); if (!sz) { return NULL; } char *str = malloc ((ut64)sz + 1); if (!str) { return NULL; } r_buf_read_at (buf, offset + 1, (ut8 *)str, sz); str[sz] = '\0'; return str; } static char *__func_name_from_ord(const char *module, ut16 ordinal) { if (!module) { return NULL; } char *lower_module = strdup (module); r_str_case (lower_module, false); char *path = r_str_newf (R_JOIN_4_PATHS ("%s", R2_SDB_FORMAT, "dll", "%s.sdb"), r_sys_prefix (NULL), lower_module); free (lower_module); char *ord = r_str_newf ("%d", ordinal); char *name; if (r_file_exists (path)) { Sdb *sdb = sdb_new (NULL, path, 0); name = sdb_get (sdb, ord, NULL); if (!name) { name = ord; } else { free (ord); } sdb_close (sdb); free (sdb); } else { name = ord; } free (path); return name; } RList *r_bin_ne_get_segments(r_bin_ne_obj_t *bin) { int i; if (!bin) { return NULL; } RList *segments = r_list_newf (free); for (i = 0; i < bin->ne_header->SegCount; i++) { RBinSection *bs = R_NEW0 (RBinSection); if (!bs) { return segments; } NE_image_segment_entry *se = &bin->segment_entries[i]; bs->size = se->length; bs->vsize = se->minAllocSz ? se->minAllocSz : 64000; bs->bits = R_SYS_BITS_16; bs->is_data = se->flags & IS_DATA; bs->perm = __translate_perms (se->flags); bs->paddr = (ut64)se->offset * bin->alignment; bs->name = r_str_newf ("%s.%" PFMT64d, se->flags & IS_MOVEABLE ? "MOVEABLE" : "FIXED", bs->paddr); bs->is_segment = true; r_list_append (segments, bs); } bin->segments = segments; return segments; } static int __find_symbol_by_paddr(const void *paddr, const void *sym) { return (int)!(*(ut64 *)paddr == ((RBinSymbol *)sym)->paddr); } RList *r_bin_ne_get_symbols(r_bin_ne_obj_t *bin) { RBinSymbol *sym; ut16 off = bin->ne_header->ResidNamTable + bin->header_offset; RList *symbols = r_list_newf (free); if (!symbols) { return NULL; } RList *entries = r_bin_ne_get_entrypoints (bin); bool resident = true, first = true; while (entries) { ut8 sz = r_buf_read8_at (bin->buf, off); if (!sz) { first = true; if (resident) { resident = false; off = bin->ne_header->OffStartNonResTab; sz = r_buf_read8_at (bin->buf, off); if (!sz) { break; } } else { break; } } char *name = malloc ((ut64)sz + 1); if (!name) { break; } off++; r_buf_read_at (bin->buf, off, (ut8 *)name, sz); name[sz] = '\0'; off += sz; sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->name = name; if (!first) { sym->bind = R_BIN_BIND_GLOBAL_STR; } ut16 entry_off = r_buf_read_le16_at (bin->buf, off); off += 2; RBinAddr *entry = r_list_get_n (entries, entry_off); if (entry) { sym->paddr = entry->paddr; } else { sym->paddr = -1; } sym->ordinal = entry_off; r_list_append (symbols, sym); first = false; } RListIter *it; RBinAddr *en; int i = 1; r_list_foreach (entries, it, en) { if (!r_list_find (symbols, &en->paddr, __find_symbol_by_paddr)) { sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->name = r_str_newf ("entry%d", i - 1); sym->paddr = en->paddr; sym->bind = R_BIN_BIND_GLOBAL_STR; sym->ordinal = i; r_list_append (symbols, sym); } i++; } bin->symbols = symbols; return symbols; } static char *__resource_type_str(int type) { char *typeName; switch (type) { case 1: typeName = "CURSOR"; break; case 2: typeName = "BITMAP"; break; case 3: typeName = "ICON"; break; case 4: typeName = "MENU"; break; case 5: typeName = "DIALOG"; break; case 6: typeName = "STRING"; break; case 7: typeName = "FONTDIR"; break; case 8: typeName = "FONT"; break; case 9: typeName = "ACCELERATOR"; break; case 10: typeName = "RCDATA"; break; case 11: typeName = "MESSAGETABLE"; break; case 12: typeName = "GROUP_CURSOR"; break; case 14: typeName = "GROUP_ICON"; break; case 15: typeName = "NAMETABLE"; break; case 16: typeName = "VERSION"; break; case 17: typeName = "DLGINCLUDE"; break; case 19: typeName = "PLUGPLAY"; break; case 20: typeName = "VXD"; break; case 21: typeName = "ANICURSOR"; break; case 22: typeName = "ANIICON"; break; case 23: typeName = "HTML"; break; case 24: typeName = "MANIFEST"; break; default: return r_str_newf ("UNKNOWN (%d)", type); } return strdup (typeName); } static void __free_resource_entry(void *entry) { r_ne_resource_entry *en = (r_ne_resource_entry *)entry; free (en->name); free (en); } static void __free_resource(void *resource) { r_ne_resource *res = (r_ne_resource *)resource; free (res->name); r_list_free (res->entry); free (res); } static bool __ne_get_resources(r_bin_ne_obj_t *bin) { if (!bin->resources) { bin->resources = r_list_newf (__free_resource); } ut16 resoff = bin->ne_header->ResTableOffset + bin->header_offset; ut16 alignment = r_buf_read_le16_at (bin->buf, resoff); ut32 off = resoff + 2; while (true) { NE_image_typeinfo_entry ti = {0}; r_ne_resource *res = R_NEW0 (r_ne_resource); if (!res) { break; } res->entry = r_list_newf (__free_resource_entry); if (!res->entry) { break; } r_buf_read_at (bin->buf, off, (ut8 *)&ti, sizeof (ti)); if (!ti.rtTypeID) { break; } else if (ti.rtTypeID & 0x8000) { res->name = __resource_type_str (ti.rtTypeID & ~0x8000); } else { // Offset to resident name table res->name = __read_nonnull_str_at (bin->buf, (ut64)resoff + ti.rtTypeID); } off += sizeof (NE_image_typeinfo_entry); int i; for (i = 0; i < ti.rtResourceCount; i++) { NE_image_nameinfo_entry ni; r_ne_resource_entry *ren = R_NEW0 (r_ne_resource_entry); if (!ren) { break; } r_buf_read_at (bin->buf, off, (ut8 *)&ni, sizeof (NE_image_nameinfo_entry)); ren->offset = ni.rnOffset << alignment; ren->size = ni.rnLength; if (ni.rnID & 0x8000) { ren->name = r_str_newf ("%d", ni.rnID & ~0x8000); } else { // Offset to resident name table ren->name = __read_nonnull_str_at (bin->buf, (ut64)resoff + ni.rnID); } r_list_append (res->entry, ren); off += sizeof (NE_image_nameinfo_entry); } r_list_append (bin->resources, res); } return true; } RList *r_bin_ne_get_imports(r_bin_ne_obj_t *bin) { RList *imports = r_list_newf ((RListFree)r_bin_import_free); if (!imports) { return NULL; } ut16 off = bin->ne_header->ImportNameTable + bin->header_offset + 1; int i; for (i = 0; i < bin->ne_header->ModRefs; i++) { RBinImport *imp = R_NEW0 (RBinImport); if (!imp) { break; } ut8 sz = r_buf_read8_at (bin->buf, off); if (!sz) { r_bin_import_free (imp); break; } off++; char *name = malloc ((ut64)sz + 1); if (!name) { break; } r_buf_read_at (bin->buf, off, (ut8 *)name, sz); name[sz] = '\0'; imp->name = name; imp->ordinal = i + 1; r_list_append (imports, imp); off += sz; } bin->imports = imports; return imports; } RList *r_bin_ne_get_entrypoints(r_bin_ne_obj_t *bin) { if (!bin->entry_table) { return NULL; } RList *entries = r_list_newf (free); if (!entries) { return NULL; } RList *segments = r_bin_ne_get_segments (bin); if (!segments) { r_list_free (entries); return NULL; } if (bin->ne_header->csEntryPoint) { RBinAddr *entry = R_NEW0 (RBinAddr); if (!entry) { r_list_free (entries); return NULL; } entry->bits = 16; ut32 entry_cs = bin->ne_header->csEntryPoint; RBinSection *s = r_list_get_n (segments, entry_cs - 1); entry->paddr = bin->ne_header->ipEntryPoint + (s? s->paddr: 0); r_list_append (entries, entry); } int off = 0; size_t tableat = bin->header_offset + bin->ne_header->EntryTableOffset; while (off < bin->ne_header->EntryTableLength) { if (tableat + off >= r_buf_size (bin->buf)) { break; } ut8 bundle_length = *(ut8 *)(bin->entry_table + off); if (!bundle_length) { break; } off++; ut8 bundle_type = *(ut8 *)(bin->entry_table + off); off++; int i; for (i = 0; i < bundle_length; i++) { if (tableat + off + 4 >= r_buf_size (bin->buf)) { break; } RBinAddr *entry = R_NEW0 (RBinAddr); if (!entry) { r_list_free (entries); return NULL; } off++; if (!bundle_type) { // Skip off--; free (entry); break; } else if (bundle_type == 0xff) { // moveable off += 2; ut8 segnum = *(bin->entry_table + off); off++; ut16 segoff = *(ut16 *)(bin->entry_table + off); if (segnum > 0) { entry->paddr = (ut64)bin->segment_entries[segnum - 1].offset * bin->alignment + segoff; } } else { // Fixed if (bundle_type < bin->ne_header->SegCount) { entry->paddr = (ut64)bin->segment_entries[bundle_type - 1].offset * bin->alignment + *(ut16 *)(bin->entry_table + off); } } off += 2; r_list_append (entries, entry); } } r_list_free (segments); bin->entries = entries; return entries; } RList *r_bin_ne_get_relocs(r_bin_ne_obj_t *bin) { RList *segments = bin->segments; if (!segments) { return NULL; } RList *entries = bin->entries; if (!entries) { return NULL; } RList *symbols = bin->symbols; if (!symbols) { return NULL; } ut16 *modref = calloc (bin->ne_header->ModRefs, sizeof (ut16)); if (!modref) { return NULL; } r_buf_read_at (bin->buf, (ut64)bin->ne_header->ModRefTable + bin->header_offset, (ut8 *)modref, bin->ne_header->ModRefs * sizeof (ut16)); RList *relocs = r_list_newf (free); if (!relocs) { free (modref); return NULL; } RListIter *it; RBinSection *seg; int index = -1; r_list_foreach (segments, it, seg) { index++; if (!(bin->segment_entries[index].flags & RELOCINFO)) { continue; } ut32 off = seg->paddr + seg->size; ut32 start = off; ut16 length = r_buf_read_le16_at (bin->buf, off); if (!length) { continue; } off += 2; // size_t buf_size = r_buf_size (bin->buf); while (off < start + length * sizeof (NE_image_reloc_item)) { // && off + sizeof (NE_image_reloc_item) < buf_size) NE_image_reloc_item rel = {0}; if (r_buf_read_at (bin->buf, off, (ut8 *)&rel, sizeof (rel)) < 1) { return NULL; } RBinReloc *reloc = R_NEW0 (RBinReloc); if (!reloc) { return NULL; } reloc->paddr = seg->paddr + rel.offset; switch (rel.type) { case LOBYTE: reloc->type = R_BIN_RELOC_8; break; case SEL_16: case OFF_16: reloc->type = R_BIN_RELOC_16; break; case POI_32: case OFF_32: reloc->type = R_BIN_RELOC_32; break; case POI_48: reloc->type = R_BIN_RELOC_64; break; } ut32 offset; if (rel.flags & (IMPORTED_ORD | IMPORTED_NAME)) { RBinImport *imp = R_NEW0 (RBinImport); if (!imp) { free (reloc); break; } char *name; #if NE_BUG if (rel.index > 0 && rel.index < bin->ne_header->ModRefs) { offset = modref[rel.index - 1] + bin->header_offset + bin->ne_header->ImportNameTable; name = __read_nonnull_str_at (bin->buf, offset); } else { name = r_str_newf ("UnknownModule%d_%x", rel.index, off); // ???? } #else if (rel.index > bin->ne_header->ModRefs) { name = r_str_newf ("UnknownModule%d_%x", rel.index, off); // ???? } else { offset = modref[rel.index - 1] + bin->header_offset + bin->ne_header->ImportNameTable; name = __read_nonnull_str_at (bin->buf, offset); } #endif if (rel.flags & IMPORTED_ORD) { imp->ordinal = rel.func_ord; imp->name = r_str_newf ("%s.%s", name, __func_name_from_ord(name, rel.func_ord)); } else { offset = bin->header_offset + bin->ne_header->ImportNameTable + rel.name_off; char *func = __read_nonnull_str_at (bin->buf, offset); imp->name = r_str_newf ("%s.%s", name, func); free (func); } free (name); reloc->import = imp; } else if (rel.flags & OSFIXUP) { // TODO } else { if (strstr (seg->name, "FIXED")) { RBinSection *s = r_list_get_n (segments, rel.segnum - 1); if (s) { offset = s->paddr + rel.segoff; } else { offset = -1; } } else { RBinAddr *entry = r_list_get_n (entries, rel.entry_ordinal - 1); if (entry) { offset = entry->paddr; } else { offset = -1; } } reloc->addend = offset; RBinSymbol *sym = NULL; RListIter *sit; r_list_foreach (symbols, sit, sym) { if (sym->paddr == reloc->addend) { reloc->symbol = sym; break; } } } if (rel.flags & ADDITIVE) { reloc->additive = 1; r_list_append (relocs, reloc); } else { do { #if NE_BUG if (reloc->paddr + 4 < r_buf_size (bin->buf)) { break; } #endif r_list_append (relocs, reloc); offset = r_buf_read_le16_at (bin->buf, reloc->paddr); RBinReloc *tmp = reloc; reloc = R_NEW0 (RBinReloc); if (!reloc) { break; } *reloc = *tmp; reloc->paddr = seg->paddr + offset; } while (offset != 0xFFFF); free (reloc); } off += sizeof (NE_image_reloc_item); } } free (modref); return relocs; } void __init(RBuffer *buf, r_bin_ne_obj_t *bin) { bin->header_offset = r_buf_read_le16_at (buf, 0x3c); bin->ne_header = R_NEW0 (NE_image_header); if (!bin->ne_header) { return; } bin->buf = buf; // XXX this is endian unsafe if (r_buf_read_at (buf, bin->header_offset, (ut8 *)bin->ne_header, sizeof (NE_image_header)) < 1) { R_FREE (bin->ne_header); return; } if (bin->ne_header->FileAlnSzShftCnt > 15) { bin->ne_header->FileAlnSzShftCnt = 15; } ut64 from = bin->ne_header->ModRefTable + bin->header_offset; ut64 left = r_buf_size (bin->buf) - from; if (from + bin->ne_header->ModRefs * sizeof (ut16) >= left) { bin->ne_header->ModRefs = left / sizeof (ut16); } bin->alignment = 1 << bin->ne_header->FileAlnSzShftCnt; if (!bin->alignment) { bin->alignment = 1 << 9; } bin->os = __get_target_os (bin); ut16 offset = bin->ne_header->SegTableOffset + bin->header_offset; size_t size = bin->ne_header->SegCount * sizeof (NE_image_segment_entry); if (offset >= r_buf_size (bin->buf)) { return; } size_t remaining = r_buf_size (bin->buf) - offset; size = R_MIN (remaining, size); bin->ne_header->SegCount = size / sizeof (NE_image_segment_entry); // * sizeof (NE_image_segment_entry); bin->segment_entries = calloc (1, size); if (size >= remaining) { bin->ne_header->SegCount = size / sizeof (NE_image_segment_entry); } if (!bin->segment_entries) { return; } r_buf_read_at (buf, offset, (ut8 *)bin->segment_entries, size); bin->entry_table = calloc (4, bin->ne_header->EntryTableLength); if (!bin->entry_table) { R_FREE (bin->segment_entries); return; } r_buf_read_at (buf, (ut64)bin->header_offset + bin->ne_header->EntryTableOffset, bin->entry_table, bin->ne_header->EntryTableLength); bin->imports = r_bin_ne_get_imports (bin); __ne_get_resources (bin); } void r_bin_ne_free(r_bin_ne_obj_t *bin) { // r_list_free (bin->imports); // double free r_list_free (bin->resources); free (bin->entry_table); free (bin->ne_header); free (bin->resident_name_table); free (bin->segment_entries); free (bin); } r_bin_ne_obj_t *r_bin_ne_new_buf(RBuffer *buf, bool verbose) { r_bin_ne_obj_t *bin = R_NEW0 (r_bin_ne_obj_t); if (!bin) { return NULL; } __init(buf, bin); return bin; }
/* radare - LGPL - Copyright 2019-2022 - GustavoLCR */ #include "ne.h" static char *__get_target_os(r_bin_ne_obj_t *bin) { switch (bin->ne_header->targOS) { case 1: return "OS/2"; case 2: return "Windows"; case 3: return "European MS-DOS 4.x"; case 4: return "Windows 386"; case 5: return "BOSS (Borland Operating System Services)"; default: return "Unknown"; } } static int __translate_perms(int flags) { int perms = 0; if (flags & IS_RX) { if (flags & IS_DATA) { perms = R_PERM_R; } else { perms = R_PERM_X; } } if (!perms) { perms = R_PERM_RWX; } return perms; } static char *__read_nonnull_str_at(RBuffer *buf, ut64 offset) { ut8 sz = r_buf_read8_at (buf, offset); if (!sz) { return NULL; } char *str = malloc ((ut64)sz + 1); if (!str) { return NULL; } r_buf_read_at (buf, offset + 1, (ut8 *)str, sz); str[sz] = '\0'; return str; } static char *__func_name_from_ord(const char *module, ut16 ordinal) { if (!module) { return NULL; } char *lower_module = strdup (module); r_str_case (lower_module, false); char *path = r_str_newf (R_JOIN_4_PATHS ("%s", R2_SDB_FORMAT, "dll", "%s.sdb"), r_sys_prefix (NULL), lower_module); free (lower_module); char *ord = r_str_newf ("%d", ordinal); char *name; if (r_file_exists (path)) { Sdb *sdb = sdb_new (NULL, path, 0); name = sdb_get (sdb, ord, NULL); if (!name) { name = ord; } else { free (ord); } sdb_close (sdb); free (sdb); } else { name = ord; } free (path); return name; } RList *r_bin_ne_get_segments(r_bin_ne_obj_t *bin) { int i; if (!bin) { return NULL; } RList *segments = r_list_newf (free); for (i = 0; i < bin->ne_header->SegCount; i++) { RBinSection *bs = R_NEW0 (RBinSection); if (!bs) { return segments; } NE_image_segment_entry *se = &bin->segment_entries[i]; bs->size = se->length; bs->vsize = se->minAllocSz ? se->minAllocSz : 64000; bs->bits = R_SYS_BITS_16; bs->is_data = se->flags & IS_DATA; bs->perm = __translate_perms (se->flags); bs->paddr = (ut64)se->offset * bin->alignment; bs->name = r_str_newf ("%s.%" PFMT64d, se->flags & IS_MOVEABLE ? "MOVEABLE" : "FIXED", bs->paddr); bs->is_segment = true; r_list_append (segments, bs); } bin->segments = segments; return segments; } static int __find_symbol_by_paddr(const void *paddr, const void *sym) { return (int)!(*(ut64 *)paddr == ((RBinSymbol *)sym)->paddr); } RList *r_bin_ne_get_symbols(r_bin_ne_obj_t *bin) { RBinSymbol *sym; ut16 off = bin->ne_header->ResidNamTable + bin->header_offset; RList *symbols = r_list_newf (free); if (!symbols) { return NULL; } RList *entries = r_bin_ne_get_entrypoints (bin); bool resident = true, first = true; while (entries) { ut8 sz = r_buf_read8_at (bin->buf, off); if (!sz) { first = true; if (resident) { resident = false; off = bin->ne_header->OffStartNonResTab; sz = r_buf_read8_at (bin->buf, off); if (!sz) { break; } } else { break; } } char *name = malloc ((ut64)sz + 1); if (!name) { break; } off++; r_buf_read_at (bin->buf, off, (ut8 *)name, sz); name[sz] = '\0'; off += sz; sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->name = name; if (!first) { sym->bind = R_BIN_BIND_GLOBAL_STR; } ut16 entry_off = r_buf_read_le16_at (bin->buf, off); off += 2; RBinAddr *entry = r_list_get_n (entries, entry_off); if (entry) { sym->paddr = entry->paddr; } else { sym->paddr = -1; } sym->ordinal = entry_off; r_list_append (symbols, sym); first = false; } RListIter *it; RBinAddr *en; int i = 1; r_list_foreach (entries, it, en) { if (!r_list_find (symbols, &en->paddr, __find_symbol_by_paddr)) { sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->name = r_str_newf ("entry%d", i - 1); sym->paddr = en->paddr; sym->bind = R_BIN_BIND_GLOBAL_STR; sym->ordinal = i; r_list_append (symbols, sym); } i++; } bin->symbols = symbols; return symbols; } static char *__resource_type_str(int type) { char *typeName; switch (type) { case 1: typeName = "CURSOR"; break; case 2: typeName = "BITMAP"; break; case 3: typeName = "ICON"; break; case 4: typeName = "MENU"; break; case 5: typeName = "DIALOG"; break; case 6: typeName = "STRING"; break; case 7: typeName = "FONTDIR"; break; case 8: typeName = "FONT"; break; case 9: typeName = "ACCELERATOR"; break; case 10: typeName = "RCDATA"; break; case 11: typeName = "MESSAGETABLE"; break; case 12: typeName = "GROUP_CURSOR"; break; case 14: typeName = "GROUP_ICON"; break; case 15: typeName = "NAMETABLE"; break; case 16: typeName = "VERSION"; break; case 17: typeName = "DLGINCLUDE"; break; case 19: typeName = "PLUGPLAY"; break; case 20: typeName = "VXD"; break; case 21: typeName = "ANICURSOR"; break; case 22: typeName = "ANIICON"; break; case 23: typeName = "HTML"; break; case 24: typeName = "MANIFEST"; break; default: return r_str_newf ("UNKNOWN (%d)", type); } return strdup (typeName); } static void __free_resource_entry(void *entry) { r_ne_resource_entry *en = (r_ne_resource_entry *)entry; free (en->name); free (en); } static void __free_resource(void *resource) { r_ne_resource *res = (r_ne_resource *)resource; free (res->name); r_list_free (res->entry); free (res); } static bool __ne_get_resources(r_bin_ne_obj_t *bin) { if (!bin->resources) { bin->resources = r_list_newf (__free_resource); } ut16 resoff = bin->ne_header->ResTableOffset + bin->header_offset; ut16 alignment = r_buf_read_le16_at (bin->buf, resoff); ut32 off = resoff + 2; while (true) { NE_image_typeinfo_entry ti = {0}; r_ne_resource *res = R_NEW0 (r_ne_resource); if (!res) { break; } res->entry = r_list_newf (__free_resource_entry); if (!res->entry) { break; } r_buf_read_at (bin->buf, off, (ut8 *)&ti, sizeof (ti)); if (!ti.rtTypeID) { break; } else if (ti.rtTypeID & 0x8000) { res->name = __resource_type_str (ti.rtTypeID & ~0x8000); } else { // Offset to resident name table res->name = __read_nonnull_str_at (bin->buf, (ut64)resoff + ti.rtTypeID); } off += sizeof (NE_image_typeinfo_entry); int i; for (i = 0; i < ti.rtResourceCount; i++) { NE_image_nameinfo_entry ni; r_ne_resource_entry *ren = R_NEW0 (r_ne_resource_entry); if (!ren) { break; } r_buf_read_at (bin->buf, off, (ut8 *)&ni, sizeof (NE_image_nameinfo_entry)); ren->offset = ni.rnOffset << alignment; ren->size = ni.rnLength; if (ni.rnID & 0x8000) { ren->name = r_str_newf ("%d", ni.rnID & ~0x8000); } else { // Offset to resident name table ren->name = __read_nonnull_str_at (bin->buf, (ut64)resoff + ni.rnID); } r_list_append (res->entry, ren); off += sizeof (NE_image_nameinfo_entry); } r_list_append (bin->resources, res); } return true; } RList *r_bin_ne_get_imports(r_bin_ne_obj_t *bin) { RList *imports = r_list_newf ((RListFree)r_bin_import_free); if (!imports) { return NULL; } ut16 off = bin->ne_header->ImportNameTable + bin->header_offset + 1; int i; for (i = 0; i < bin->ne_header->ModRefs; i++) { RBinImport *imp = R_NEW0 (RBinImport); if (!imp) { break; } ut8 sz = r_buf_read8_at (bin->buf, off); if (!sz) { r_bin_import_free (imp); break; } off++; char *name = malloc ((ut64)sz + 1); if (!name) { break; } r_buf_read_at (bin->buf, off, (ut8 *)name, sz); name[sz] = '\0'; imp->name = name; imp->ordinal = i + 1; r_list_append (imports, imp); off += sz; } bin->imports = imports; return imports; } RList *r_bin_ne_get_entrypoints(r_bin_ne_obj_t *bin) { if (!bin->entry_table) { return NULL; } RList *entries = r_list_newf (free); if (!entries) { return NULL; } RList *segments = r_bin_ne_get_segments (bin); if (!segments) { r_list_free (entries); return NULL; } if (bin->ne_header->csEntryPoint) { RBinAddr *entry = R_NEW0 (RBinAddr); if (!entry) { r_list_free (entries); return NULL; } entry->bits = 16; ut32 entry_cs = bin->ne_header->csEntryPoint; RBinSection *s = r_list_get_n (segments, entry_cs - 1); entry->paddr = bin->ne_header->ipEntryPoint + (s? s->paddr: 0); r_list_append (entries, entry); } int off = 0; size_t tableat = bin->header_offset + bin->ne_header->EntryTableOffset; while (off < bin->ne_header->EntryTableLength) { if (tableat + off >= r_buf_size (bin->buf)) { break; } ut8 bundle_length = *(ut8 *)(bin->entry_table + off); if (!bundle_length) { break; } off++; ut8 bundle_type = *(ut8 *)(bin->entry_table + off); off++; int i; for (i = 0; i < bundle_length; i++) { if (tableat + off + 4 >= r_buf_size (bin->buf)) { break; } RBinAddr *entry = R_NEW0 (RBinAddr); if (!entry) { r_list_free (entries); return NULL; } off++; if (!bundle_type) { // Skip off--; free (entry); break; } else if (bundle_type == 0xff) { // moveable off += 2; ut8 segnum = *(bin->entry_table + off); off++; ut16 segoff = *(ut16 *)(bin->entry_table + off); if (segnum > 0) { entry->paddr = (ut64)bin->segment_entries[segnum - 1].offset * bin->alignment + segoff; } } else { // Fixed if (bundle_type < bin->ne_header->SegCount) { entry->paddr = (ut64)bin->segment_entries[bundle_type - 1].offset * bin->alignment + *(ut16 *)(bin->entry_table + off); } } off += 2; r_list_append (entries, entry); } } r_list_free (segments); bin->entries = entries; return entries; } RList *r_bin_ne_get_relocs(r_bin_ne_obj_t *bin) { RList *segments = bin->segments; if (!segments) { return NULL; } RList *entries = bin->entries; if (!entries) { return NULL; } RList *symbols = bin->symbols; if (!symbols) { return NULL; } ut16 *modref = calloc (bin->ne_header->ModRefs, sizeof (ut16)); if (!modref) { return NULL; } r_buf_read_at (bin->buf, (ut64)bin->ne_header->ModRefTable + bin->header_offset, (ut8 *)modref, bin->ne_header->ModRefs * sizeof (ut16)); RList *relocs = r_list_newf (free); if (!relocs) { free (modref); return NULL; } RListIter *it; RBinSection *seg; int index = -1; r_list_foreach (segments, it, seg) { index++; if (!(bin->segment_entries[index].flags & RELOCINFO)) { continue; } ut32 off = seg->paddr + seg->size; ut32 start = off; ut16 length = r_buf_read_le16_at (bin->buf, off); if (!length) { continue; } off += 2; // size_t buf_size = r_buf_size (bin->buf); while (off < start + length * sizeof (NE_image_reloc_item)) { // && off + sizeof (NE_image_reloc_item) < buf_size) NE_image_reloc_item rel = {0}; if (r_buf_read_at (bin->buf, off, (ut8 *)&rel, sizeof (rel)) < 1) { return NULL; } RBinReloc *reloc = R_NEW0 (RBinReloc); if (!reloc) { return NULL; } reloc->paddr = seg->paddr + rel.offset; switch (rel.type) { case LOBYTE: reloc->type = R_BIN_RELOC_8; break; case SEL_16: case OFF_16: reloc->type = R_BIN_RELOC_16; break; case POI_32: case OFF_32: reloc->type = R_BIN_RELOC_32; break; case POI_48: reloc->type = R_BIN_RELOC_64; break; } ut32 offset; if (rel.flags & (IMPORTED_ORD | IMPORTED_NAME)) { RBinImport *imp = R_NEW0 (RBinImport); if (!imp) { free (reloc); break; } char *name = NULL; if (rel.index > bin->ne_header->ModRefs) { name = r_str_newf ("UnknownModule%d_%x", rel.index, off); // ???? } else if (rel.index > 0) { offset = modref[rel.index - 1] + bin->header_offset + bin->ne_header->ImportNameTable; name = __read_nonnull_str_at (bin->buf, offset); } if (rel.flags & IMPORTED_ORD) { imp->ordinal = rel.func_ord; char *fname = __func_name_from_ord (name, rel.func_ord); imp->name = r_str_newf ("%s.%s", name, fname); free (fname); } else { offset = bin->header_offset + bin->ne_header->ImportNameTable + rel.name_off; char *func = __read_nonnull_str_at (bin->buf, offset); imp->name = r_str_newf ("%s.%s", name, func); free (func); } free (name); reloc->import = imp; } else if (rel.flags & OSFIXUP) { // TODO } else { if (strstr (seg->name, "FIXED")) { RBinSection *s = r_list_get_n (segments, rel.segnum - 1); if (s) { offset = s->paddr + rel.segoff; } else { offset = -1; } } else { RBinAddr *entry = r_list_get_n (entries, rel.entry_ordinal - 1); if (entry) { offset = entry->paddr; } else { offset = -1; } } reloc->addend = offset; RBinSymbol *sym = NULL; RListIter *sit; r_list_foreach (symbols, sit, sym) { if (sym->paddr == reloc->addend) { reloc->symbol = sym; break; } } } if (rel.flags & ADDITIVE) { reloc->additive = 1; r_list_append (relocs, reloc); } else { do { #define NE_BUG 0 #if NE_BUG if (reloc->paddr + 4 < r_buf_size (bin->buf)) { break; } #endif r_list_append (relocs, reloc); offset = r_buf_read_le16_at (bin->buf, reloc->paddr); RBinReloc *tmp = reloc; reloc = R_NEW0 (RBinReloc); if (!reloc) { break; } *reloc = *tmp; reloc->paddr = seg->paddr + offset; } while (offset != 0xFFFF); free (reloc); } off += sizeof (NE_image_reloc_item); } } free (modref); return relocs; } void __init(RBuffer *buf, r_bin_ne_obj_t *bin) { bin->header_offset = r_buf_read_le16_at (buf, 0x3c); bin->ne_header = R_NEW0 (NE_image_header); if (!bin->ne_header) { return; } bin->buf = buf; // XXX this is endian unsafe if (r_buf_read_at (buf, bin->header_offset, (ut8 *)bin->ne_header, sizeof (NE_image_header)) < 1) { R_FREE (bin->ne_header); return; } if (bin->ne_header->FileAlnSzShftCnt > 15) { bin->ne_header->FileAlnSzShftCnt = 15; } ut64 from = bin->ne_header->ModRefTable + bin->header_offset; ut64 left = r_buf_size (bin->buf) - from; if (from + bin->ne_header->ModRefs * sizeof (ut16) >= left) { bin->ne_header->ModRefs = left / sizeof (ut16); } bin->alignment = 1 << bin->ne_header->FileAlnSzShftCnt; if (!bin->alignment) { bin->alignment = 1 << 9; } bin->os = __get_target_os (bin); ut16 offset = bin->ne_header->SegTableOffset + bin->header_offset; size_t size = bin->ne_header->SegCount * sizeof (NE_image_segment_entry); if (offset >= r_buf_size (bin->buf)) { return; } size_t remaining = r_buf_size (bin->buf) - offset; size = R_MIN (remaining, size); bin->ne_header->SegCount = size / sizeof (NE_image_segment_entry); // * sizeof (NE_image_segment_entry); bin->segment_entries = calloc (1, size); if (size >= remaining) { bin->ne_header->SegCount = size / sizeof (NE_image_segment_entry); } if (!bin->segment_entries) { return; } r_buf_read_at (buf, offset, (ut8 *)bin->segment_entries, size); bin->entry_table = calloc (4, bin->ne_header->EntryTableLength); if (!bin->entry_table) { R_FREE (bin->segment_entries); return; } r_buf_read_at (buf, (ut64)bin->header_offset + bin->ne_header->EntryTableOffset, bin->entry_table, bin->ne_header->EntryTableLength); bin->imports = r_bin_ne_get_imports (bin); __ne_get_resources (bin); } void r_bin_ne_free(r_bin_ne_obj_t *bin) { // r_list_free (bin->imports); // double free r_list_free (bin->resources); free (bin->entry_table); free (bin->ne_header); free (bin->resident_name_table); free (bin->segment_entries); free (bin); } r_bin_ne_obj_t *r_bin_ne_new_buf(RBuffer *buf, bool verbose) { r_bin_ne_obj_t *bin = R_NEW0 (r_bin_ne_obj_t); if (!bin) { return NULL; } __init(buf, bin); return bin; }
RList *r_bin_ne_get_relocs(r_bin_ne_obj_t *bin) { RList *segments = bin->segments; if (!segments) { return NULL; } RList *entries = bin->entries; if (!entries) { return NULL; } RList *symbols = bin->symbols; if (!symbols) { return NULL; } ut16 *modref = calloc (bin->ne_header->ModRefs, sizeof (ut16)); if (!modref) { return NULL; } r_buf_read_at (bin->buf, (ut64)bin->ne_header->ModRefTable + bin->header_offset, (ut8 *)modref, bin->ne_header->ModRefs * sizeof (ut16)); RList *relocs = r_list_newf (free); if (!relocs) { free (modref); return NULL; } RListIter *it; RBinSection *seg; int index = -1; r_list_foreach (segments, it, seg) { index++; if (!(bin->segment_entries[index].flags & RELOCINFO)) { continue; } ut32 off = seg->paddr + seg->size; ut32 start = off; ut16 length = r_buf_read_le16_at (bin->buf, off); if (!length) { continue; } off += 2; // size_t buf_size = r_buf_size (bin->buf); while (off < start + length * sizeof (NE_image_reloc_item)) { // && off + sizeof (NE_image_reloc_item) < buf_size) NE_image_reloc_item rel = {0}; if (r_buf_read_at (bin->buf, off, (ut8 *)&rel, sizeof (rel)) < 1) { return NULL; } RBinReloc *reloc = R_NEW0 (RBinReloc); if (!reloc) { return NULL; } reloc->paddr = seg->paddr + rel.offset; switch (rel.type) { case LOBYTE: reloc->type = R_BIN_RELOC_8; break; case SEL_16: case OFF_16: reloc->type = R_BIN_RELOC_16; break; case POI_32: case OFF_32: reloc->type = R_BIN_RELOC_32; break; case POI_48: reloc->type = R_BIN_RELOC_64; break; } ut32 offset; if (rel.flags & (IMPORTED_ORD | IMPORTED_NAME)) { RBinImport *imp = R_NEW0 (RBinImport); if (!imp) { free (reloc); break; } char *name; #if NE_BUG if (rel.index > 0 && rel.index < bin->ne_header->ModRefs) { offset = modref[rel.index - 1] + bin->header_offset + bin->ne_header->ImportNameTable; name = __read_nonnull_str_at (bin->buf, offset); } else { name = r_str_newf ("UnknownModule%d_%x", rel.index, off); // ???? } #else if (rel.index > bin->ne_header->ModRefs) { name = r_str_newf ("UnknownModule%d_%x", rel.index, off); // ???? } else { offset = modref[rel.index - 1] + bin->header_offset + bin->ne_header->ImportNameTable; name = __read_nonnull_str_at (bin->buf, offset); } #endif if (rel.flags & IMPORTED_ORD) { imp->ordinal = rel.func_ord; imp->name = r_str_newf ("%s.%s", name, __func_name_from_ord(name, rel.func_ord)); } else { offset = bin->header_offset + bin->ne_header->ImportNameTable + rel.name_off; char *func = __read_nonnull_str_at (bin->buf, offset); imp->name = r_str_newf ("%s.%s", name, func); free (func); } free (name); reloc->import = imp; } else if (rel.flags & OSFIXUP) { // TODO } else { if (strstr (seg->name, "FIXED")) { RBinSection *s = r_list_get_n (segments, rel.segnum - 1); if (s) { offset = s->paddr + rel.segoff; } else { offset = -1; } } else { RBinAddr *entry = r_list_get_n (entries, rel.entry_ordinal - 1); if (entry) { offset = entry->paddr; } else { offset = -1; } } reloc->addend = offset; RBinSymbol *sym = NULL; RListIter *sit; r_list_foreach (symbols, sit, sym) { if (sym->paddr == reloc->addend) { reloc->symbol = sym; break; } } } if (rel.flags & ADDITIVE) { reloc->additive = 1; r_list_append (relocs, reloc); } else { do { #if NE_BUG if (reloc->paddr + 4 < r_buf_size (bin->buf)) { break; } #endif r_list_append (relocs, reloc); offset = r_buf_read_le16_at (bin->buf, reloc->paddr); RBinReloc *tmp = reloc; reloc = R_NEW0 (RBinReloc); if (!reloc) { break; } *reloc = *tmp; reloc->paddr = seg->paddr + offset; } while (offset != 0xFFFF); free (reloc); } off += sizeof (NE_image_reloc_item); } } free (modref); return relocs; }
RList *r_bin_ne_get_relocs(r_bin_ne_obj_t *bin) { RList *segments = bin->segments; if (!segments) { return NULL; } RList *entries = bin->entries; if (!entries) { return NULL; } RList *symbols = bin->symbols; if (!symbols) { return NULL; } ut16 *modref = calloc (bin->ne_header->ModRefs, sizeof (ut16)); if (!modref) { return NULL; } r_buf_read_at (bin->buf, (ut64)bin->ne_header->ModRefTable + bin->header_offset, (ut8 *)modref, bin->ne_header->ModRefs * sizeof (ut16)); RList *relocs = r_list_newf (free); if (!relocs) { free (modref); return NULL; } RListIter *it; RBinSection *seg; int index = -1; r_list_foreach (segments, it, seg) { index++; if (!(bin->segment_entries[index].flags & RELOCINFO)) { continue; } ut32 off = seg->paddr + seg->size; ut32 start = off; ut16 length = r_buf_read_le16_at (bin->buf, off); if (!length) { continue; } off += 2; // size_t buf_size = r_buf_size (bin->buf); while (off < start + length * sizeof (NE_image_reloc_item)) { // && off + sizeof (NE_image_reloc_item) < buf_size) NE_image_reloc_item rel = {0}; if (r_buf_read_at (bin->buf, off, (ut8 *)&rel, sizeof (rel)) < 1) { return NULL; } RBinReloc *reloc = R_NEW0 (RBinReloc); if (!reloc) { return NULL; } reloc->paddr = seg->paddr + rel.offset; switch (rel.type) { case LOBYTE: reloc->type = R_BIN_RELOC_8; break; case SEL_16: case OFF_16: reloc->type = R_BIN_RELOC_16; break; case POI_32: case OFF_32: reloc->type = R_BIN_RELOC_32; break; case POI_48: reloc->type = R_BIN_RELOC_64; break; } ut32 offset; if (rel.flags & (IMPORTED_ORD | IMPORTED_NAME)) { RBinImport *imp = R_NEW0 (RBinImport); if (!imp) { free (reloc); break; } char *name = NULL; if (rel.index > bin->ne_header->ModRefs) { name = r_str_newf ("UnknownModule%d_%x", rel.index, off); // ???? } else if (rel.index > 0) { offset = modref[rel.index - 1] + bin->header_offset + bin->ne_header->ImportNameTable; name = __read_nonnull_str_at (bin->buf, offset); } if (rel.flags & IMPORTED_ORD) { imp->ordinal = rel.func_ord; char *fname = __func_name_from_ord (name, rel.func_ord); imp->name = r_str_newf ("%s.%s", name, fname); free (fname); } else { offset = bin->header_offset + bin->ne_header->ImportNameTable + rel.name_off; char *func = __read_nonnull_str_at (bin->buf, offset); imp->name = r_str_newf ("%s.%s", name, func); free (func); } free (name); reloc->import = imp; } else if (rel.flags & OSFIXUP) { // TODO } else { if (strstr (seg->name, "FIXED")) { RBinSection *s = r_list_get_n (segments, rel.segnum - 1); if (s) { offset = s->paddr + rel.segoff; } else { offset = -1; } } else { RBinAddr *entry = r_list_get_n (entries, rel.entry_ordinal - 1); if (entry) { offset = entry->paddr; } else { offset = -1; } } reloc->addend = offset; RBinSymbol *sym = NULL; RListIter *sit; r_list_foreach (symbols, sit, sym) { if (sym->paddr == reloc->addend) { reloc->symbol = sym; break; } } } if (rel.flags & ADDITIVE) { reloc->additive = 1; r_list_append (relocs, reloc); } else { do { #define NE_BUG 0 #if NE_BUG if (reloc->paddr + 4 < r_buf_size (bin->buf)) { break; } #endif r_list_append (relocs, reloc); offset = r_buf_read_le16_at (bin->buf, reloc->paddr); RBinReloc *tmp = reloc; reloc = R_NEW0 (RBinReloc); if (!reloc) { break; } *reloc = *tmp; reloc->paddr = seg->paddr + offset; } while (offset != 0xFFFF); free (reloc); } off += sizeof (NE_image_reloc_item); } } free (modref); return relocs; }
{'added': [(507, '\t\t\t\tchar *name = NULL;'), (510, '\t\t\t\t} else if (rel.index > 0) {'), (516, '\t\t\t\t\tchar *fname = __func_name_from_ord (name, rel.func_ord);'), (517, '\t\t\t\t\timp->name = r_str_newf ("%s.%s", name, fname);'), (518, '\t\t\t\t\tfree (fname);'), (561, '#define NE_BUG 0')], 'deleted': [(4, '#define NE_BUG 0'), (508, '\t\t\t\tchar *name;'), (509, '#if NE_BUG'), (510, '\t\t\t\tif (rel.index > 0 && rel.index < bin->ne_header->ModRefs) {'), (511, '\t\t\t\t\toffset = modref[rel.index - 1] + bin->header_offset + bin->ne_header->ImportNameTable;'), (512, '\t\t\t\t\tname = __read_nonnull_str_at (bin->buf, offset);'), (513, '\t\t\t\t} else {'), (514, '\t\t\t\t\tname = r_str_newf ("UnknownModule%d_%x", rel.index, off); // ????'), (515, '\t\t\t\t}'), (516, '#else'), (519, '\t\t\t\t} else {'), (523, '#endif'), (526, '\t\t\t\t\timp->name = r_str_newf ("%s.%s", name, __func_name_from_ord(name, rel.func_ord));')]}
6
13
619
3,727
https://github.com/radareorg/radare2
CVE-2022-1296
['CWE-125']
jpc_t2cod.c
jpc_pi_nextcprl
/* * Copyright (c) 1999-2000 Image Power, Inc. and the University of * British Columbia. * Copyright (c) 2001-2003 Michael David Adams. * All rights reserved. */ /* __START_OF_JASPER_LICENSE__ * * JasPer License Version 2.0 * * Copyright (c) 2001-2006 Michael David Adams * Copyright (c) 1999-2000 Image Power, Inc. * Copyright (c) 1999-2000 The University of British Columbia * * All rights reserved. * * Permission is hereby granted, free of charge, to any person (the * "User") obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, and/or sell copies of the Software, and to permit * persons to whom the Software is furnished to do so, subject to the * following conditions: * * 1. The above copyright notices and this permission notice (which * includes the disclaimer below) shall be included in all copies or * substantial portions of the Software. * * 2. The name of a copyright holder shall not be used to endorse or * promote products derived from the Software without specific prior * written permission. * * THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS * LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER * THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS * "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL * INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE * PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE * THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY. * EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS * BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL * PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS * GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE * ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE * IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL * SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES, * AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL * SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH * THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH, * PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH * RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY * EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES. * * __END_OF_JASPER_LICENSE__ */ /* * Tier-2 Coding Library * * $Id$ */ #include "jasper/jas_math.h" #include "jasper/jas_malloc.h" #include "jpc_cs.h" #include "jpc_t2cod.h" #include "jpc_math.h" static int jpc_pi_nextlrcp(jpc_pi_t *pi); static int jpc_pi_nextrlcp(jpc_pi_t *pi); static int jpc_pi_nextrpcl(jpc_pi_t *pi); static int jpc_pi_nextpcrl(jpc_pi_t *pi); static int jpc_pi_nextcprl(jpc_pi_t *pi); int jpc_pi_next(jpc_pi_t *pi) { jpc_pchg_t *pchg; int ret; for (;;) { pi->valid = false; if (!pi->pchg) { ++pi->pchgno; pi->compno = 0; pi->rlvlno = 0; pi->prcno = 0; pi->lyrno = 0; pi->prgvolfirst = true; if (pi->pchgno < jpc_pchglist_numpchgs(pi->pchglist)) { pi->pchg = jpc_pchglist_get(pi->pchglist, pi->pchgno); } else if (pi->pchgno == jpc_pchglist_numpchgs(pi->pchglist)) { pi->pchg = &pi->defaultpchg; } else { return 1; } } pchg = pi->pchg; switch (pchg->prgord) { case JPC_COD_LRCPPRG: ret = jpc_pi_nextlrcp(pi); break; case JPC_COD_RLCPPRG: ret = jpc_pi_nextrlcp(pi); break; case JPC_COD_RPCLPRG: ret = jpc_pi_nextrpcl(pi); break; case JPC_COD_PCRLPRG: ret = jpc_pi_nextpcrl(pi); break; case JPC_COD_CPRLPRG: ret = jpc_pi_nextcprl(pi); break; default: ret = -1; break; } if (!ret) { pi->valid = true; ++pi->pktno; return 0; } pi->pchg = 0; } } static int jpc_pi_nextlrcp(register jpc_pi_t *pi) { jpc_pchg_t *pchg; int *prclyrno; pchg = pi->pchg; if (!pi->prgvolfirst) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; goto skip; } else { pi->prgvolfirst = false; } for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { for (pi->rlvlno = pchg->rlvlnostart; pi->rlvlno < pi->maxrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < pi->numcomps && pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno, ++pi->picomp) { if (pi->rlvlno >= pi->picomp->numrlvls) { continue; } pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; for (pi->prcno = 0, prclyrno = pi->pirlvl->prclyrnos; pi->prcno < pi->pirlvl->numprcs; ++pi->prcno, ++prclyrno) { if (pi->lyrno >= *prclyrno) { *prclyrno = pi->lyrno; ++(*prclyrno); return 0; } skip: ; } } } } return 1; } static int jpc_pi_nextrlcp(register jpc_pi_t *pi) { jpc_pchg_t *pchg; int *prclyrno; pchg = pi->pchg; if (!pi->prgvolfirst) { assert(pi->prcno < pi->pirlvl->numprcs); prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; goto skip; } else { pi->prgvolfirst = 0; } for (pi->rlvlno = pchg->rlvlnostart; pi->rlvlno < pi->maxrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno) { for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < pi->numcomps && pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno, ++pi->picomp) { if (pi->rlvlno >= pi->picomp->numrlvls) { continue; } pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; for (pi->prcno = 0, prclyrno = pi->pirlvl->prclyrnos; pi->prcno < pi->pirlvl->numprcs; ++pi->prcno, ++prclyrno) { if (pi->lyrno >= *prclyrno) { *prclyrno = pi->lyrno; ++(*prclyrno); return 0; } skip: ; } } } } return 1; } static int jpc_pi_nextrpcl(register jpc_pi_t *pi) { int rlvlno; jpc_pirlvl_t *pirlvl; jpc_pchg_t *pchg; int prchind; int prcvind; int *prclyrno; int compno; jpc_picomp_t *picomp; int xstep; int ystep; uint_fast32_t r; uint_fast32_t rpx; uint_fast32_t rpy; uint_fast32_t trx0; uint_fast32_t try0; pchg = pi->pchg; if (!pi->prgvolfirst) { goto skip; } else { pi->xstep = 0; pi->ystep = 0; for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { xstep = picomp->hsamp * (1 << (pirlvl->prcwidthexpn + picomp->numrlvls - rlvlno - 1)); ystep = picomp->vsamp * (1 << (pirlvl->prcheightexpn + picomp->numrlvls - rlvlno - 1)); pi->xstep = (!pi->xstep) ? xstep : JAS_MIN(pi->xstep, xstep); pi->ystep = (!pi->ystep) ? ystep : JAS_MIN(pi->ystep, ystep); } } pi->prgvolfirst = 0; } for (pi->rlvlno = pchg->rlvlnostart; pi->rlvlno < pchg->rlvlnoend && pi->rlvlno < pi->maxrlvls; ++pi->rlvlno) { for (pi->y = pi->ystart; pi->y < pi->yend; pi->y += pi->ystep - (pi->y % pi->ystep)) { for (pi->x = pi->xstart; pi->x < pi->xend; pi->x += pi->xstep - (pi->x % pi->xstep)) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < JAS_CAST(int, pchg->compnoend) && pi->compno < pi->numcomps; ++pi->compno, ++pi->picomp) { if (pi->rlvlno >= pi->picomp->numrlvls) { continue; } pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; if (pi->pirlvl->numprcs == 0) { continue; } r = pi->picomp->numrlvls - 1 - pi->rlvlno; rpx = r + pi->pirlvl->prcwidthexpn; rpy = r + pi->pirlvl->prcheightexpn; trx0 = JPC_CEILDIV(pi->xstart, pi->picomp->hsamp << r); try0 = JPC_CEILDIV(pi->ystart, pi->picomp->vsamp << r); if (((pi->x == pi->xstart && ((trx0 << r) % (1 << rpx))) || !(pi->x % (1 << rpx))) && ((pi->y == pi->ystart && ((try0 << r) % (1 << rpy))) || !(pi->y % (1 << rpy)))) { prchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn); prcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn); pi->prcno = prcvind * pi->pirlvl->numhprcs + prchind; assert(pi->prcno < pi->pirlvl->numprcs); for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; if (pi->lyrno >= *prclyrno) { ++(*prclyrno); return 0; } skip: ; } } } } } } return 1; } static int jpc_pi_nextpcrl(register jpc_pi_t *pi) { int rlvlno; jpc_pirlvl_t *pirlvl; jpc_pchg_t *pchg; int prchind; int prcvind; int *prclyrno; int compno; jpc_picomp_t *picomp; int xstep; int ystep; uint_fast32_t trx0; uint_fast32_t try0; uint_fast32_t r; uint_fast32_t rpx; uint_fast32_t rpy; pchg = pi->pchg; if (!pi->prgvolfirst) { goto skip; } else { pi->xstep = 0; pi->ystep = 0; for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { xstep = picomp->hsamp * (1 << (pirlvl->prcwidthexpn + picomp->numrlvls - rlvlno - 1)); ystep = picomp->vsamp * (1 << (pirlvl->prcheightexpn + picomp->numrlvls - rlvlno - 1)); pi->xstep = (!pi->xstep) ? xstep : JAS_MIN(pi->xstep, xstep); pi->ystep = (!pi->ystep) ? ystep : JAS_MIN(pi->ystep, ystep); } } pi->prgvolfirst = 0; } for (pi->y = pi->ystart; pi->y < pi->yend; pi->y += pi->ystep - (pi->y % pi->ystep)) { for (pi->x = pi->xstart; pi->x < pi->xend; pi->x += pi->xstep - (pi->x % pi->xstep)) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < pi->numcomps && pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno, ++pi->picomp) { for (pi->rlvlno = pchg->rlvlnostart, pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; pi->rlvlno < pi->picomp->numrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno, ++pi->pirlvl) { if (pi->pirlvl->numprcs == 0) { continue; } r = pi->picomp->numrlvls - 1 - pi->rlvlno; trx0 = JPC_CEILDIV(pi->xstart, pi->picomp->hsamp << r); try0 = JPC_CEILDIV(pi->ystart, pi->picomp->vsamp << r); rpx = r + pi->pirlvl->prcwidthexpn; rpy = r + pi->pirlvl->prcheightexpn; if (((pi->x == pi->xstart && ((trx0 << r) % (1 << rpx))) || !(pi->x % (pi->picomp->hsamp << rpx))) && ((pi->y == pi->ystart && ((try0 << r) % (1 << rpy))) || !(pi->y % (pi->picomp->vsamp << rpy)))) { prchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn); prcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn); pi->prcno = prcvind * pi->pirlvl->numhprcs + prchind; assert(pi->prcno < pi->pirlvl->numprcs); for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; if (pi->lyrno >= *prclyrno) { ++(*prclyrno); return 0; } skip: ; } } } } } } return 1; } static int jpc_pi_nextcprl(register jpc_pi_t *pi) { int rlvlno; jpc_pirlvl_t *pirlvl; jpc_pchg_t *pchg; int prchind; int prcvind; int *prclyrno; uint_fast32_t trx0; uint_fast32_t try0; uint_fast32_t r; uint_fast32_t rpx; uint_fast32_t rpy; pchg = pi->pchg; if (!pi->prgvolfirst) { goto skip; } else { pi->prgvolfirst = 0; } for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < JAS_CAST(int, pchg->compnoend) && pi->compno < pi->numcomps; ++pi->compno, ++pi->picomp) { pirlvl = pi->picomp->pirlvls; pi->xstep = pi->picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcwidthexpn + pi->picomp->numrlvls - 1)); pi->ystep = pi->picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcheightexpn + pi->picomp->numrlvls - 1)); for (rlvlno = 1, pirlvl = &pi->picomp->pirlvls[1]; rlvlno < pi->picomp->numrlvls; ++rlvlno, ++pirlvl) { pi->xstep = JAS_MIN(pi->xstep, pi->picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcwidthexpn + pi->picomp->numrlvls - rlvlno - 1))); pi->ystep = JAS_MIN(pi->ystep, pi->picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcheightexpn + pi->picomp->numrlvls - rlvlno - 1))); } for (pi->y = pi->ystart; pi->y < pi->yend; pi->y += pi->ystep - (pi->y % pi->ystep)) { for (pi->x = pi->xstart; pi->x < pi->xend; pi->x += pi->xstep - (pi->x % pi->xstep)) { for (pi->rlvlno = pchg->rlvlnostart, pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; pi->rlvlno < pi->picomp->numrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno, ++pi->pirlvl) { if (pi->pirlvl->numprcs == 0) { continue; } r = pi->picomp->numrlvls - 1 - pi->rlvlno; trx0 = JPC_CEILDIV(pi->xstart, pi->picomp->hsamp << r); try0 = JPC_CEILDIV(pi->ystart, pi->picomp->vsamp << r); rpx = r + pi->pirlvl->prcwidthexpn; rpy = r + pi->pirlvl->prcheightexpn; if (((pi->x == pi->xstart && ((trx0 << r) % (1 << rpx))) || !(pi->x % (pi->picomp->hsamp << rpx))) && ((pi->y == pi->ystart && ((try0 << r) % (1 << rpy))) || !(pi->y % (pi->picomp->vsamp << rpy)))) { prchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn); prcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn); pi->prcno = prcvind * pi->pirlvl->numhprcs + prchind; assert(pi->prcno < pi->pirlvl->numprcs); for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; if (pi->lyrno >= *prclyrno) { ++(*prclyrno); return 0; } skip: ; } } } } } } return 1; } static void pirlvl_destroy(jpc_pirlvl_t *rlvl) { if (rlvl->prclyrnos) { jas_free(rlvl->prclyrnos); } } static void jpc_picomp_destroy(jpc_picomp_t *picomp) { int rlvlno; jpc_pirlvl_t *pirlvl; if (picomp->pirlvls) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { pirlvl_destroy(pirlvl); } jas_free(picomp->pirlvls); } } void jpc_pi_destroy(jpc_pi_t *pi) { jpc_picomp_t *picomp; int compno; if (pi->picomps) { for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { jpc_picomp_destroy(picomp); } jas_free(pi->picomps); } if (pi->pchglist) { jpc_pchglist_destroy(pi->pchglist); } jas_free(pi); } jpc_pi_t *jpc_pi_create0() { jpc_pi_t *pi; if (!(pi = jas_malloc(sizeof(jpc_pi_t)))) { return 0; } pi->picomps = 0; pi->pchgno = 0; if (!(pi->pchglist = jpc_pchglist_create())) { jas_free(pi); return 0; } return pi; } int jpc_pi_addpchg(jpc_pi_t *pi, jpc_pocpchg_t *pchg) { return jpc_pchglist_insert(pi->pchglist, -1, pchg); } jpc_pchglist_t *jpc_pchglist_create() { jpc_pchglist_t *pchglist; if (!(pchglist = jas_malloc(sizeof(jpc_pchglist_t)))) { return 0; } pchglist->numpchgs = 0; pchglist->maxpchgs = 0; pchglist->pchgs = 0; return pchglist; } int jpc_pchglist_insert(jpc_pchglist_t *pchglist, int pchgno, jpc_pchg_t *pchg) { int i; int newmaxpchgs; jpc_pchg_t **newpchgs; if (pchgno < 0) { pchgno = pchglist->numpchgs; } if (pchglist->numpchgs >= pchglist->maxpchgs) { newmaxpchgs = pchglist->maxpchgs + 128; if (!(newpchgs = jas_realloc2(pchglist->pchgs, newmaxpchgs, sizeof(jpc_pchg_t *)))) { return -1; } pchglist->maxpchgs = newmaxpchgs; pchglist->pchgs = newpchgs; } for (i = pchglist->numpchgs; i > pchgno; --i) { pchglist->pchgs[i] = pchglist->pchgs[i - 1]; } pchglist->pchgs[pchgno] = pchg; ++pchglist->numpchgs; return 0; } jpc_pchg_t *jpc_pchglist_remove(jpc_pchglist_t *pchglist, int pchgno) { int i; jpc_pchg_t *pchg; assert(pchgno < pchglist->numpchgs); pchg = pchglist->pchgs[pchgno]; for (i = pchgno + 1; i < pchglist->numpchgs; ++i) { pchglist->pchgs[i - 1] = pchglist->pchgs[i]; } --pchglist->numpchgs; return pchg; } jpc_pchg_t *jpc_pchg_copy(jpc_pchg_t *pchg) { jpc_pchg_t *newpchg; if (!(newpchg = jas_malloc(sizeof(jpc_pchg_t)))) { return 0; } *newpchg = *pchg; return newpchg; } jpc_pchglist_t *jpc_pchglist_copy(jpc_pchglist_t *pchglist) { jpc_pchglist_t *newpchglist; jpc_pchg_t *newpchg; int pchgno; if (!(newpchglist = jpc_pchglist_create())) { return 0; } for (pchgno = 0; pchgno < pchglist->numpchgs; ++pchgno) { if (!(newpchg = jpc_pchg_copy(pchglist->pchgs[pchgno])) || jpc_pchglist_insert(newpchglist, -1, newpchg)) { jpc_pchglist_destroy(newpchglist); return 0; } } return newpchglist; } void jpc_pchglist_destroy(jpc_pchglist_t *pchglist) { int pchgno; if (pchglist->pchgs) { for (pchgno = 0; pchgno < pchglist->numpchgs; ++pchgno) { jpc_pchg_destroy(pchglist->pchgs[pchgno]); } jas_free(pchglist->pchgs); } jas_free(pchglist); } void jpc_pchg_destroy(jpc_pchg_t *pchg) { jas_free(pchg); } jpc_pchg_t *jpc_pchglist_get(jpc_pchglist_t *pchglist, int pchgno) { return pchglist->pchgs[pchgno]; } int jpc_pchglist_numpchgs(jpc_pchglist_t *pchglist) { return pchglist->numpchgs; } int jpc_pi_init(jpc_pi_t *pi) { int compno; int rlvlno; int prcno; jpc_picomp_t *picomp; jpc_pirlvl_t *pirlvl; int *prclyrno; pi->prgvolfirst = 0; pi->valid = 0; pi->pktno = -1; pi->pchgno = -1; pi->pchg = 0; for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { for (prcno = 0, prclyrno = pirlvl->prclyrnos; prcno < pirlvl->numprcs; ++prcno, ++prclyrno) { *prclyrno = 0; } } } return 0; }
/* * Copyright (c) 1999-2000 Image Power, Inc. and the University of * British Columbia. * Copyright (c) 2001-2003 Michael David Adams. * All rights reserved. */ /* __START_OF_JASPER_LICENSE__ * * JasPer License Version 2.0 * * Copyright (c) 2001-2006 Michael David Adams * Copyright (c) 1999-2000 Image Power, Inc. * Copyright (c) 1999-2000 The University of British Columbia * * All rights reserved. * * Permission is hereby granted, free of charge, to any person (the * "User") obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, and/or sell copies of the Software, and to permit * persons to whom the Software is furnished to do so, subject to the * following conditions: * * 1. The above copyright notices and this permission notice (which * includes the disclaimer below) shall be included in all copies or * substantial portions of the Software. * * 2. The name of a copyright holder shall not be used to endorse or * promote products derived from the Software without specific prior * written permission. * * THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS * LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER * THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS * "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL * INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE * PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE * THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY. * EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS * BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL * PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS * GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE * ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE * IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL * SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES, * AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL * SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH * THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH, * PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH * RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY * EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES. * * __END_OF_JASPER_LICENSE__ */ /* * Tier-2 Coding Library * * $Id$ */ #include "jasper/jas_math.h" #include "jasper/jas_malloc.h" #include "jpc_cs.h" #include "jpc_t2cod.h" #include "jpc_math.h" static int jpc_pi_nextlrcp(jpc_pi_t *pi); static int jpc_pi_nextrlcp(jpc_pi_t *pi); static int jpc_pi_nextrpcl(jpc_pi_t *pi); static int jpc_pi_nextpcrl(jpc_pi_t *pi); static int jpc_pi_nextcprl(jpc_pi_t *pi); int jpc_pi_next(jpc_pi_t *pi) { jpc_pchg_t *pchg; int ret; for (;;) { pi->valid = false; if (!pi->pchg) { ++pi->pchgno; pi->compno = 0; pi->rlvlno = 0; pi->prcno = 0; pi->lyrno = 0; pi->prgvolfirst = true; if (pi->pchgno < jpc_pchglist_numpchgs(pi->pchglist)) { pi->pchg = jpc_pchglist_get(pi->pchglist, pi->pchgno); } else if (pi->pchgno == jpc_pchglist_numpchgs(pi->pchglist)) { pi->pchg = &pi->defaultpchg; } else { return 1; } } pchg = pi->pchg; switch (pchg->prgord) { case JPC_COD_LRCPPRG: ret = jpc_pi_nextlrcp(pi); break; case JPC_COD_RLCPPRG: ret = jpc_pi_nextrlcp(pi); break; case JPC_COD_RPCLPRG: ret = jpc_pi_nextrpcl(pi); break; case JPC_COD_PCRLPRG: ret = jpc_pi_nextpcrl(pi); break; case JPC_COD_CPRLPRG: ret = jpc_pi_nextcprl(pi); break; default: ret = -1; break; } if (!ret) { pi->valid = true; ++pi->pktno; return 0; } pi->pchg = 0; } } static int jpc_pi_nextlrcp(register jpc_pi_t *pi) { jpc_pchg_t *pchg; int *prclyrno; pchg = pi->pchg; if (!pi->prgvolfirst) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; goto skip; } else { pi->prgvolfirst = false; } for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { for (pi->rlvlno = pchg->rlvlnostart; pi->rlvlno < pi->maxrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < pi->numcomps && pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno, ++pi->picomp) { if (pi->rlvlno >= pi->picomp->numrlvls) { continue; } pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; for (pi->prcno = 0, prclyrno = pi->pirlvl->prclyrnos; pi->prcno < pi->pirlvl->numprcs; ++pi->prcno, ++prclyrno) { if (pi->lyrno >= *prclyrno) { *prclyrno = pi->lyrno; ++(*prclyrno); return 0; } skip: ; } } } } return 1; } static int jpc_pi_nextrlcp(register jpc_pi_t *pi) { jpc_pchg_t *pchg; int *prclyrno; pchg = pi->pchg; if (!pi->prgvolfirst) { assert(pi->prcno < pi->pirlvl->numprcs); prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; goto skip; } else { pi->prgvolfirst = 0; } for (pi->rlvlno = pchg->rlvlnostart; pi->rlvlno < pi->maxrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno) { for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < pi->numcomps && pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno, ++pi->picomp) { if (pi->rlvlno >= pi->picomp->numrlvls) { continue; } pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; for (pi->prcno = 0, prclyrno = pi->pirlvl->prclyrnos; pi->prcno < pi->pirlvl->numprcs; ++pi->prcno, ++prclyrno) { if (pi->lyrno >= *prclyrno) { *prclyrno = pi->lyrno; ++(*prclyrno); return 0; } skip: ; } } } } return 1; } static int jpc_pi_nextrpcl(register jpc_pi_t *pi) { int rlvlno; jpc_pirlvl_t *pirlvl; jpc_pchg_t *pchg; int prchind; int prcvind; int *prclyrno; int compno; jpc_picomp_t *picomp; int xstep; int ystep; uint_fast32_t r; uint_fast32_t rpx; uint_fast32_t rpy; uint_fast32_t trx0; uint_fast32_t try0; pchg = pi->pchg; if (!pi->prgvolfirst) { goto skip; } else { pi->xstep = 0; pi->ystep = 0; for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { // Check for the potential for overflow problems. if (pirlvl->prcwidthexpn + pi->picomp->numrlvls > JAS_UINTFAST32_NUMBITS - 2 || pirlvl->prcheightexpn + pi->picomp->numrlvls > JAS_UINTFAST32_NUMBITS - 2) { return -1; } xstep = picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcwidthexpn + picomp->numrlvls - rlvlno - 1)); ystep = picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcheightexpn + picomp->numrlvls - rlvlno - 1)); pi->xstep = (!pi->xstep) ? xstep : JAS_MIN(pi->xstep, xstep); pi->ystep = (!pi->ystep) ? ystep : JAS_MIN(pi->ystep, ystep); } } pi->prgvolfirst = 0; } for (pi->rlvlno = pchg->rlvlnostart; pi->rlvlno < pchg->rlvlnoend && pi->rlvlno < pi->maxrlvls; ++pi->rlvlno) { for (pi->y = pi->ystart; pi->y < pi->yend; pi->y += pi->ystep - (pi->y % pi->ystep)) { for (pi->x = pi->xstart; pi->x < pi->xend; pi->x += pi->xstep - (pi->x % pi->xstep)) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < JAS_CAST(int, pchg->compnoend) && pi->compno < pi->numcomps; ++pi->compno, ++pi->picomp) { if (pi->rlvlno >= pi->picomp->numrlvls) { continue; } pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; if (pi->pirlvl->numprcs == 0) { continue; } r = pi->picomp->numrlvls - 1 - pi->rlvlno; rpx = r + pi->pirlvl->prcwidthexpn; rpy = r + pi->pirlvl->prcheightexpn; trx0 = JPC_CEILDIV(pi->xstart, pi->picomp->hsamp << r); try0 = JPC_CEILDIV(pi->ystart, pi->picomp->vsamp << r); if (((pi->x == pi->xstart && ((trx0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpx))) || !(pi->x % (JAS_CAST(uint_fast32_t, 1) << rpx))) && ((pi->y == pi->ystart && ((try0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpy))) || !(pi->y % (JAS_CAST(uint_fast32_t, 1) << rpy)))) { prchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn); prcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn); pi->prcno = prcvind * pi->pirlvl->numhprcs + prchind; assert(pi->prcno < pi->pirlvl->numprcs); for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; if (pi->lyrno >= *prclyrno) { ++(*prclyrno); return 0; } skip: ; } } } } } } return 1; } static int jpc_pi_nextpcrl(register jpc_pi_t *pi) { int rlvlno; jpc_pirlvl_t *pirlvl; jpc_pchg_t *pchg; int prchind; int prcvind; int *prclyrno; int compno; jpc_picomp_t *picomp; int xstep; int ystep; uint_fast32_t trx0; uint_fast32_t try0; uint_fast32_t r; uint_fast32_t rpx; uint_fast32_t rpy; pchg = pi->pchg; if (!pi->prgvolfirst) { goto skip; } else { pi->xstep = 0; pi->ystep = 0; for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { // Check for the potential for overflow problems. if (pirlvl->prcwidthexpn + pi->picomp->numrlvls > JAS_UINTFAST32_NUMBITS - 2 || pirlvl->prcheightexpn + pi->picomp->numrlvls > JAS_UINTFAST32_NUMBITS - 2) { return -1; } xstep = picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcwidthexpn + picomp->numrlvls - rlvlno - 1)); ystep = picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcheightexpn + picomp->numrlvls - rlvlno - 1)); pi->xstep = (!pi->xstep) ? xstep : JAS_MIN(pi->xstep, xstep); pi->ystep = (!pi->ystep) ? ystep : JAS_MIN(pi->ystep, ystep); } } pi->prgvolfirst = 0; } for (pi->y = pi->ystart; pi->y < pi->yend; pi->y += pi->ystep - (pi->y % pi->ystep)) { for (pi->x = pi->xstart; pi->x < pi->xend; pi->x += pi->xstep - (pi->x % pi->xstep)) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < pi->numcomps && pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno, ++pi->picomp) { for (pi->rlvlno = pchg->rlvlnostart, pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; pi->rlvlno < pi->picomp->numrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno, ++pi->pirlvl) { if (pi->pirlvl->numprcs == 0) { continue; } r = pi->picomp->numrlvls - 1 - pi->rlvlno; trx0 = JPC_CEILDIV(pi->xstart, pi->picomp->hsamp << r); try0 = JPC_CEILDIV(pi->ystart, pi->picomp->vsamp << r); rpx = r + pi->pirlvl->prcwidthexpn; rpy = r + pi->pirlvl->prcheightexpn; if (((pi->x == pi->xstart && ((trx0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpx))) || !(pi->x % (pi->picomp->hsamp << rpx))) && ((pi->y == pi->ystart && ((try0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpy))) || !(pi->y % (pi->picomp->vsamp << rpy)))) { prchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn); prcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn); pi->prcno = prcvind * pi->pirlvl->numhprcs + prchind; assert(pi->prcno < pi->pirlvl->numprcs); for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; if (pi->lyrno >= *prclyrno) { ++(*prclyrno); return 0; } skip: ; } } } } } } return 1; } static int jpc_pi_nextcprl(register jpc_pi_t *pi) { int rlvlno; jpc_pirlvl_t *pirlvl; jpc_pchg_t *pchg; int prchind; int prcvind; int *prclyrno; uint_fast32_t trx0; uint_fast32_t try0; uint_fast32_t r; uint_fast32_t rpx; uint_fast32_t rpy; pchg = pi->pchg; if (!pi->prgvolfirst) { goto skip; } else { pi->prgvolfirst = 0; } for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < JAS_CAST(int, pchg->compnoend) && pi->compno < pi->numcomps; ++pi->compno, ++pi->picomp) { pirlvl = pi->picomp->pirlvls; // Check for the potential for overflow problems. if (pirlvl->prcwidthexpn + pi->picomp->numrlvls > JAS_UINTFAST32_NUMBITS - 2 || pirlvl->prcheightexpn + pi->picomp->numrlvls > JAS_UINTFAST32_NUMBITS - 2) { return -1; } pi->xstep = pi->picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcwidthexpn + pi->picomp->numrlvls - 1)); pi->ystep = pi->picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcheightexpn + pi->picomp->numrlvls - 1)); for (rlvlno = 1, pirlvl = &pi->picomp->pirlvls[1]; rlvlno < pi->picomp->numrlvls; ++rlvlno, ++pirlvl) { pi->xstep = JAS_MIN(pi->xstep, pi->picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcwidthexpn + pi->picomp->numrlvls - rlvlno - 1))); pi->ystep = JAS_MIN(pi->ystep, pi->picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcheightexpn + pi->picomp->numrlvls - rlvlno - 1))); } for (pi->y = pi->ystart; pi->y < pi->yend; pi->y += pi->ystep - (pi->y % pi->ystep)) { for (pi->x = pi->xstart; pi->x < pi->xend; pi->x += pi->xstep - (pi->x % pi->xstep)) { for (pi->rlvlno = pchg->rlvlnostart, pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; pi->rlvlno < pi->picomp->numrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno, ++pi->pirlvl) { if (pi->pirlvl->numprcs == 0) { continue; } r = pi->picomp->numrlvls - 1 - pi->rlvlno; trx0 = JPC_CEILDIV(pi->xstart, pi->picomp->hsamp << r); try0 = JPC_CEILDIV(pi->ystart, pi->picomp->vsamp << r); rpx = r + pi->pirlvl->prcwidthexpn; rpy = r + pi->pirlvl->prcheightexpn; if (((pi->x == pi->xstart && ((trx0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpx))) || !(pi->x % (pi->picomp->hsamp << rpx))) && ((pi->y == pi->ystart && ((try0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpy))) || !(pi->y % (pi->picomp->vsamp << rpy)))) { prchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn); prcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn); pi->prcno = prcvind * pi->pirlvl->numhprcs + prchind; assert(pi->prcno < pi->pirlvl->numprcs); for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; if (pi->lyrno >= *prclyrno) { ++(*prclyrno); return 0; } skip: ; } } } } } } return 1; } static void pirlvl_destroy(jpc_pirlvl_t *rlvl) { if (rlvl->prclyrnos) { jas_free(rlvl->prclyrnos); } } static void jpc_picomp_destroy(jpc_picomp_t *picomp) { int rlvlno; jpc_pirlvl_t *pirlvl; if (picomp->pirlvls) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { pirlvl_destroy(pirlvl); } jas_free(picomp->pirlvls); } } void jpc_pi_destroy(jpc_pi_t *pi) { jpc_picomp_t *picomp; int compno; if (pi->picomps) { for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { jpc_picomp_destroy(picomp); } jas_free(pi->picomps); } if (pi->pchglist) { jpc_pchglist_destroy(pi->pchglist); } jas_free(pi); } jpc_pi_t *jpc_pi_create0() { jpc_pi_t *pi; if (!(pi = jas_malloc(sizeof(jpc_pi_t)))) { return 0; } pi->picomps = 0; pi->pchgno = 0; if (!(pi->pchglist = jpc_pchglist_create())) { jas_free(pi); return 0; } return pi; } int jpc_pi_addpchg(jpc_pi_t *pi, jpc_pocpchg_t *pchg) { return jpc_pchglist_insert(pi->pchglist, -1, pchg); } jpc_pchglist_t *jpc_pchglist_create() { jpc_pchglist_t *pchglist; if (!(pchglist = jas_malloc(sizeof(jpc_pchglist_t)))) { return 0; } pchglist->numpchgs = 0; pchglist->maxpchgs = 0; pchglist->pchgs = 0; return pchglist; } int jpc_pchglist_insert(jpc_pchglist_t *pchglist, int pchgno, jpc_pchg_t *pchg) { int i; int newmaxpchgs; jpc_pchg_t **newpchgs; if (pchgno < 0) { pchgno = pchglist->numpchgs; } if (pchglist->numpchgs >= pchglist->maxpchgs) { newmaxpchgs = pchglist->maxpchgs + 128; if (!(newpchgs = jas_realloc2(pchglist->pchgs, newmaxpchgs, sizeof(jpc_pchg_t *)))) { return -1; } pchglist->maxpchgs = newmaxpchgs; pchglist->pchgs = newpchgs; } for (i = pchglist->numpchgs; i > pchgno; --i) { pchglist->pchgs[i] = pchglist->pchgs[i - 1]; } pchglist->pchgs[pchgno] = pchg; ++pchglist->numpchgs; return 0; } jpc_pchg_t *jpc_pchglist_remove(jpc_pchglist_t *pchglist, int pchgno) { int i; jpc_pchg_t *pchg; assert(pchgno < pchglist->numpchgs); pchg = pchglist->pchgs[pchgno]; for (i = pchgno + 1; i < pchglist->numpchgs; ++i) { pchglist->pchgs[i - 1] = pchglist->pchgs[i]; } --pchglist->numpchgs; return pchg; } jpc_pchg_t *jpc_pchg_copy(jpc_pchg_t *pchg) { jpc_pchg_t *newpchg; if (!(newpchg = jas_malloc(sizeof(jpc_pchg_t)))) { return 0; } *newpchg = *pchg; return newpchg; } jpc_pchglist_t *jpc_pchglist_copy(jpc_pchglist_t *pchglist) { jpc_pchglist_t *newpchglist; jpc_pchg_t *newpchg; int pchgno; if (!(newpchglist = jpc_pchglist_create())) { return 0; } for (pchgno = 0; pchgno < pchglist->numpchgs; ++pchgno) { if (!(newpchg = jpc_pchg_copy(pchglist->pchgs[pchgno])) || jpc_pchglist_insert(newpchglist, -1, newpchg)) { jpc_pchglist_destroy(newpchglist); return 0; } } return newpchglist; } void jpc_pchglist_destroy(jpc_pchglist_t *pchglist) { int pchgno; if (pchglist->pchgs) { for (pchgno = 0; pchgno < pchglist->numpchgs; ++pchgno) { jpc_pchg_destroy(pchglist->pchgs[pchgno]); } jas_free(pchglist->pchgs); } jas_free(pchglist); } void jpc_pchg_destroy(jpc_pchg_t *pchg) { jas_free(pchg); } jpc_pchg_t *jpc_pchglist_get(jpc_pchglist_t *pchglist, int pchgno) { return pchglist->pchgs[pchgno]; } int jpc_pchglist_numpchgs(jpc_pchglist_t *pchglist) { return pchglist->numpchgs; } int jpc_pi_init(jpc_pi_t *pi) { int compno; int rlvlno; int prcno; jpc_picomp_t *picomp; jpc_pirlvl_t *pirlvl; int *prclyrno; pi->prgvolfirst = 0; pi->valid = 0; pi->pktno = -1; pi->pchgno = -1; pi->pchg = 0; for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { for (prcno = 0, prclyrno = pirlvl->prclyrnos; prcno < pirlvl->numprcs; ++prcno, ++prclyrno) { *prclyrno = 0; } } } return 0; }
static int jpc_pi_nextcprl(register jpc_pi_t *pi) { int rlvlno; jpc_pirlvl_t *pirlvl; jpc_pchg_t *pchg; int prchind; int prcvind; int *prclyrno; uint_fast32_t trx0; uint_fast32_t try0; uint_fast32_t r; uint_fast32_t rpx; uint_fast32_t rpy; pchg = pi->pchg; if (!pi->prgvolfirst) { goto skip; } else { pi->prgvolfirst = 0; } for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < JAS_CAST(int, pchg->compnoend) && pi->compno < pi->numcomps; ++pi->compno, ++pi->picomp) { pirlvl = pi->picomp->pirlvls; pi->xstep = pi->picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcwidthexpn + pi->picomp->numrlvls - 1)); pi->ystep = pi->picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcheightexpn + pi->picomp->numrlvls - 1)); for (rlvlno = 1, pirlvl = &pi->picomp->pirlvls[1]; rlvlno < pi->picomp->numrlvls; ++rlvlno, ++pirlvl) { pi->xstep = JAS_MIN(pi->xstep, pi->picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcwidthexpn + pi->picomp->numrlvls - rlvlno - 1))); pi->ystep = JAS_MIN(pi->ystep, pi->picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcheightexpn + pi->picomp->numrlvls - rlvlno - 1))); } for (pi->y = pi->ystart; pi->y < pi->yend; pi->y += pi->ystep - (pi->y % pi->ystep)) { for (pi->x = pi->xstart; pi->x < pi->xend; pi->x += pi->xstep - (pi->x % pi->xstep)) { for (pi->rlvlno = pchg->rlvlnostart, pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; pi->rlvlno < pi->picomp->numrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno, ++pi->pirlvl) { if (pi->pirlvl->numprcs == 0) { continue; } r = pi->picomp->numrlvls - 1 - pi->rlvlno; trx0 = JPC_CEILDIV(pi->xstart, pi->picomp->hsamp << r); try0 = JPC_CEILDIV(pi->ystart, pi->picomp->vsamp << r); rpx = r + pi->pirlvl->prcwidthexpn; rpy = r + pi->pirlvl->prcheightexpn; if (((pi->x == pi->xstart && ((trx0 << r) % (1 << rpx))) || !(pi->x % (pi->picomp->hsamp << rpx))) && ((pi->y == pi->ystart && ((try0 << r) % (1 << rpy))) || !(pi->y % (pi->picomp->vsamp << rpy)))) { prchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn); prcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn); pi->prcno = prcvind * pi->pirlvl->numhprcs + prchind; assert(pi->prcno < pi->pirlvl->numprcs); for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; if (pi->lyrno >= *prclyrno) { ++(*prclyrno); return 0; } skip: ; } } } } } } return 1; }
static int jpc_pi_nextcprl(register jpc_pi_t *pi) { int rlvlno; jpc_pirlvl_t *pirlvl; jpc_pchg_t *pchg; int prchind; int prcvind; int *prclyrno; uint_fast32_t trx0; uint_fast32_t try0; uint_fast32_t r; uint_fast32_t rpx; uint_fast32_t rpy; pchg = pi->pchg; if (!pi->prgvolfirst) { goto skip; } else { pi->prgvolfirst = 0; } for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < JAS_CAST(int, pchg->compnoend) && pi->compno < pi->numcomps; ++pi->compno, ++pi->picomp) { pirlvl = pi->picomp->pirlvls; // Check for the potential for overflow problems. if (pirlvl->prcwidthexpn + pi->picomp->numrlvls > JAS_UINTFAST32_NUMBITS - 2 || pirlvl->prcheightexpn + pi->picomp->numrlvls > JAS_UINTFAST32_NUMBITS - 2) { return -1; } pi->xstep = pi->picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcwidthexpn + pi->picomp->numrlvls - 1)); pi->ystep = pi->picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcheightexpn + pi->picomp->numrlvls - 1)); for (rlvlno = 1, pirlvl = &pi->picomp->pirlvls[1]; rlvlno < pi->picomp->numrlvls; ++rlvlno, ++pirlvl) { pi->xstep = JAS_MIN(pi->xstep, pi->picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcwidthexpn + pi->picomp->numrlvls - rlvlno - 1))); pi->ystep = JAS_MIN(pi->ystep, pi->picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcheightexpn + pi->picomp->numrlvls - rlvlno - 1))); } for (pi->y = pi->ystart; pi->y < pi->yend; pi->y += pi->ystep - (pi->y % pi->ystep)) { for (pi->x = pi->xstart; pi->x < pi->xend; pi->x += pi->xstep - (pi->x % pi->xstep)) { for (pi->rlvlno = pchg->rlvlnostart, pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; pi->rlvlno < pi->picomp->numrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno, ++pi->pirlvl) { if (pi->pirlvl->numprcs == 0) { continue; } r = pi->picomp->numrlvls - 1 - pi->rlvlno; trx0 = JPC_CEILDIV(pi->xstart, pi->picomp->hsamp << r); try0 = JPC_CEILDIV(pi->ystart, pi->picomp->vsamp << r); rpx = r + pi->pirlvl->prcwidthexpn; rpy = r + pi->pirlvl->prcheightexpn; if (((pi->x == pi->xstart && ((trx0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpx))) || !(pi->x % (pi->picomp->hsamp << rpx))) && ((pi->y == pi->ystart && ((try0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpy))) || !(pi->y % (pi->picomp->vsamp << rpy)))) { prchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn); prcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn); pi->prcno = prcvind * pi->pirlvl->numhprcs + prchind; assert(pi->prcno < pi->pirlvl->numprcs); for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; if (pi->lyrno >= *prclyrno) { ++(*prclyrno); return 0; } skip: ; } } } } } } return 1; }
{'added': [(201, '\t\t\t pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno,'), (202, '\t\t\t ++pi->picomp) {'), (251, '\t\t\t\t// Check for the potential for overflow problems.'), (252, '\t\t\t\tif (pirlvl->prcwidthexpn + pi->picomp->numrlvls >'), (253, '\t\t\t\t JAS_UINTFAST32_NUMBITS - 2 ||'), (254, '\t\t\t\t pirlvl->prcheightexpn + pi->picomp->numrlvls >'), (255, '\t\t\t\t JAS_UINTFAST32_NUMBITS - 2) {'), (256, '\t\t\t\t\treturn -1;'), (257, '\t\t\t\t}'), (258, '\t\t\t\txstep = picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) <<'), (259, '\t\t\t\t (pirlvl->prcwidthexpn + picomp->numrlvls - rlvlno - 1));'), (260, '\t\t\t\tystep = picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) <<'), (261, '\t\t\t\t (pirlvl->prcheightexpn + picomp->numrlvls - rlvlno - 1));'), (291, '\t\t\t\t\tif (((pi->x == pi->xstart &&'), (292, '\t\t\t\t\t ((trx0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpx)))'), (293, '\t\t\t\t\t || !(pi->x % (JAS_CAST(uint_fast32_t, 1) << rpx))) &&'), (294, '\t\t\t\t\t ((pi->y == pi->ystart &&'), (295, '\t\t\t\t\t ((try0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpy)))'), (296, '\t\t\t\t\t || !(pi->y % (JAS_CAST(uint_fast32_t, 1) << rpy)))) {'), (297, '\t\t\t\t\t\tprchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x,'), (298, '\t\t\t\t\t\t pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) -'), (299, '\t\t\t\t\t\t JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn);'), (300, '\t\t\t\t\t\tprcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y,'), (301, '\t\t\t\t\t\t pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) -'), (302, '\t\t\t\t\t\t JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn);'), (307, '\t\t\t\t\t\t pi->numlyrs && pi->lyrno < JAS_CAST(int,'), (308, '\t\t\t\t\t\t pchg->lyrnoend); ++pi->lyrno) {'), (353, '\t\t\t\t// Check for the potential for overflow problems.'), (354, '\t\t\t\tif (pirlvl->prcwidthexpn + pi->picomp->numrlvls >'), (355, '\t\t\t\t JAS_UINTFAST32_NUMBITS - 2 ||'), (356, '\t\t\t\t pirlvl->prcheightexpn + pi->picomp->numrlvls >'), (357, '\t\t\t\t JAS_UINTFAST32_NUMBITS - 2) {'), (358, '\t\t\t\t\treturn -1;'), (359, '\t\t\t\t}'), (360, '\t\t\t\txstep = picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) <<'), (361, '\t\t\t\t (pirlvl->prcwidthexpn + picomp->numrlvls - rlvlno - 1));'), (362, '\t\t\t\tystep = picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) <<'), (363, '\t\t\t\t (pirlvl->prcheightexpn + picomp->numrlvls - rlvlno - 1));'), (364, '\t\t\t\tpi->xstep = (!pi->xstep) ? xstep : JAS_MIN(pi->xstep, xstep);'), (365, '\t\t\t\tpi->ystep = (!pi->ystep) ? ystep : JAS_MIN(pi->ystep, ystep);'), (392, '\t\t\t\t\tif (((pi->x == pi->xstart &&'), (393, '\t\t\t\t\t ((trx0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpx))) ||'), (395, '\t\t\t\t\t ((pi->y == pi->ystart &&'), (396, '\t\t\t\t\t ((try0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpy))) ||'), (398, '\t\t\t\t\t\tprchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x,'), (399, '\t\t\t\t\t\t pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) -'), (400, '\t\t\t\t\t\t JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn);'), (401, '\t\t\t\t\t\tprcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y,'), (402, '\t\t\t\t\t\t pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) -'), (403, '\t\t\t\t\t\t JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn);'), (407, '\t\t\t\t\t\t pi->lyrno < JAS_CAST(int, pchg->lyrnoend);'), (408, '\t\t\t\t\t\t ++pi->lyrno) {'), (446, '\tfor (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno];'), (447, '\t pi->compno < JAS_CAST(int, pchg->compnoend) && pi->compno < pi->numcomps;'), (448, '\t ++pi->compno, ++pi->picomp) {'), (450, '\t\t// Check for the potential for overflow problems.'), (451, '\t\tif (pirlvl->prcwidthexpn + pi->picomp->numrlvls >'), (452, '\t\t JAS_UINTFAST32_NUMBITS - 2 ||'), (453, '\t\t pirlvl->prcheightexpn + pi->picomp->numrlvls >'), (454, '\t\t JAS_UINTFAST32_NUMBITS - 2) {'), (455, '\t\t\treturn -1;'), (456, '\t\t}'), (486, '\t\t\t\t\tif (((pi->x == pi->xstart &&'), (487, '\t\t\t\t\t ((trx0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpx))) ||'), (489, '\t\t\t\t\t ((pi->y == pi->ystart &&'), (490, '\t\t\t\t\t ((try0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpy))) ||'), (492, '\t\t\t\t\t\tprchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x,'), (493, '\t\t\t\t\t\t pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) -'), (494, '\t\t\t\t\t\t JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn);'), (495, '\t\t\t\t\t\tprcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y,'), (496, '\t\t\t\t\t\t pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) -'), (497, '\t\t\t\t\t\t JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn);'), (498, '\t\t\t\t\t\tpi->prcno = prcvind * pi->pirlvl->numhprcs + prchind;'), (499, '\t\t\t\t\t\tassert(pi->prcno < pi->pirlvl->numprcs);'), (500, '\t\t\t\t\t\tfor (pi->lyrno = 0; pi->lyrno < pi->numlyrs &&'), (501, '\t\t\t\t\t\t pi->lyrno < JAS_CAST(int, pchg->lyrnoend);'), (502, '\t\t\t\t\t\t ++pi->lyrno) {')], 'deleted': [(201, '\t\t\t pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno, ++pi->picomp) {'), (250, '\t\t\t\txstep = picomp->hsamp * (1 << (pirlvl->prcwidthexpn +'), (251, '\t\t\t\t picomp->numrlvls - rlvlno - 1));'), (252, '\t\t\t\tystep = picomp->vsamp * (1 << (pirlvl->prcheightexpn +'), (253, '\t\t\t\t picomp->numrlvls - rlvlno - 1));'), (283, '\t\t\t\t\tif (((pi->x == pi->xstart && ((trx0 << r) % (1 << rpx)))'), (284, '\t\t\t\t\t || !(pi->x % (1 << rpx))) &&'), (285, '\t\t\t\t\t ((pi->y == pi->ystart && ((try0 << r) % (1 << rpy)))'), (286, '\t\t\t\t\t || !(pi->y % (1 << rpy)))) {'), (287, '\t\t\t\t\t\tprchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp'), (288, '\t\t\t\t\t\t << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0,'), (289, '\t\t\t\t\t\t pi->pirlvl->prcwidthexpn);'), (290, '\t\t\t\t\t\tprcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp'), (291, '\t\t\t\t\t\t << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0,'), (292, '\t\t\t\t\t\t pi->pirlvl->prcheightexpn);'), (297, '\t\t\t\t\t\t pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) {'), (342, '\t\t\t\txstep = picomp->hsamp * (1 <<'), (343, '\t\t\t\t (pirlvl->prcwidthexpn + picomp->numrlvls -'), (344, '\t\t\t\t rlvlno - 1));'), (345, '\t\t\t\tystep = picomp->vsamp * (1 <<'), (346, '\t\t\t\t (pirlvl->prcheightexpn + picomp->numrlvls -'), (347, '\t\t\t\t rlvlno - 1));'), (348, '\t\t\t\tpi->xstep = (!pi->xstep) ? xstep :'), (349, '\t\t\t\t JAS_MIN(pi->xstep, xstep);'), (350, '\t\t\t\tpi->ystep = (!pi->ystep) ? ystep :'), (351, '\t\t\t\t JAS_MIN(pi->ystep, ystep);'), (378, '\t\t\t\t\tif (((pi->x == pi->xstart && ((trx0 << r) % (1 << rpx))) ||'), (380, '\t\t\t\t\t ((pi->y == pi->ystart && ((try0 << r) % (1 << rpy))) ||'), (382, '\t\t\t\t\t\tprchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp'), (383, '\t\t\t\t\t\t << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0,'), (384, '\t\t\t\t\t\t pi->pirlvl->prcwidthexpn);'), (385, '\t\t\t\t\t\tprcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp'), (386, '\t\t\t\t\t\t << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0,'), (387, '\t\t\t\t\t\t pi->pirlvl->prcheightexpn);'), (391, '\t\t\t\t\t\t pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) {'), (429, '\tfor (pi->compno = pchg->compnostart, pi->picomp ='), (430, '\t &pi->picomps[pi->compno]; pi->compno < JAS_CAST(int, pchg->compnoend) && pi->compno < pi->numcomps; ++pi->compno,'), (431, '\t ++pi->picomp) {'), (462, '\t\t\t\t\tif (((pi->x == pi->xstart && ((trx0 << r) % (1 << rpx))) ||'), (464, '\t\t\t\t\t ((pi->y == pi->ystart && ((try0 << r) % (1 << rpy))) ||'), (466, '\t\t\t\t\t\tprchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp'), (467, '\t\t\t\t\t\t << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0,'), (468, '\t\t\t\t\t\t pi->pirlvl->prcwidthexpn);'), (469, '\t\t\t\t\t\tprcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp'), (470, '\t\t\t\t\t\t << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0,'), (471, '\t\t\t\t\t\t pi->pirlvl->prcheightexpn);'), (472, '\t\t\t\t\t\tpi->prcno = prcvind *'), (473, '\t\t\t\t\t\t pi->pirlvl->numhprcs +'), (474, '\t\t\t\t\t\t prchind;'), (475, '\t\t\t\t\t\tassert(pi->prcno <'), (476, '\t\t\t\t\t\t pi->pirlvl->numprcs);'), (477, '\t\t\t\t\t\tfor (pi->lyrno = 0; pi->lyrno <'), (478, '\t\t\t\t\t\t pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) {')]}
77
53
595
4,400
https://github.com/mdadams/jasper
CVE-2016-9583
['CWE-125']
jpc_t2cod.c
jpc_pi_nextpcrl
/* * Copyright (c) 1999-2000 Image Power, Inc. and the University of * British Columbia. * Copyright (c) 2001-2003 Michael David Adams. * All rights reserved. */ /* __START_OF_JASPER_LICENSE__ * * JasPer License Version 2.0 * * Copyright (c) 2001-2006 Michael David Adams * Copyright (c) 1999-2000 Image Power, Inc. * Copyright (c) 1999-2000 The University of British Columbia * * All rights reserved. * * Permission is hereby granted, free of charge, to any person (the * "User") obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, and/or sell copies of the Software, and to permit * persons to whom the Software is furnished to do so, subject to the * following conditions: * * 1. The above copyright notices and this permission notice (which * includes the disclaimer below) shall be included in all copies or * substantial portions of the Software. * * 2. The name of a copyright holder shall not be used to endorse or * promote products derived from the Software without specific prior * written permission. * * THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS * LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER * THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS * "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL * INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE * PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE * THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY. * EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS * BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL * PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS * GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE * ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE * IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL * SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES, * AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL * SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH * THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH, * PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH * RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY * EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES. * * __END_OF_JASPER_LICENSE__ */ /* * Tier-2 Coding Library * * $Id$ */ #include "jasper/jas_math.h" #include "jasper/jas_malloc.h" #include "jpc_cs.h" #include "jpc_t2cod.h" #include "jpc_math.h" static int jpc_pi_nextlrcp(jpc_pi_t *pi); static int jpc_pi_nextrlcp(jpc_pi_t *pi); static int jpc_pi_nextrpcl(jpc_pi_t *pi); static int jpc_pi_nextpcrl(jpc_pi_t *pi); static int jpc_pi_nextcprl(jpc_pi_t *pi); int jpc_pi_next(jpc_pi_t *pi) { jpc_pchg_t *pchg; int ret; for (;;) { pi->valid = false; if (!pi->pchg) { ++pi->pchgno; pi->compno = 0; pi->rlvlno = 0; pi->prcno = 0; pi->lyrno = 0; pi->prgvolfirst = true; if (pi->pchgno < jpc_pchglist_numpchgs(pi->pchglist)) { pi->pchg = jpc_pchglist_get(pi->pchglist, pi->pchgno); } else if (pi->pchgno == jpc_pchglist_numpchgs(pi->pchglist)) { pi->pchg = &pi->defaultpchg; } else { return 1; } } pchg = pi->pchg; switch (pchg->prgord) { case JPC_COD_LRCPPRG: ret = jpc_pi_nextlrcp(pi); break; case JPC_COD_RLCPPRG: ret = jpc_pi_nextrlcp(pi); break; case JPC_COD_RPCLPRG: ret = jpc_pi_nextrpcl(pi); break; case JPC_COD_PCRLPRG: ret = jpc_pi_nextpcrl(pi); break; case JPC_COD_CPRLPRG: ret = jpc_pi_nextcprl(pi); break; default: ret = -1; break; } if (!ret) { pi->valid = true; ++pi->pktno; return 0; } pi->pchg = 0; } } static int jpc_pi_nextlrcp(register jpc_pi_t *pi) { jpc_pchg_t *pchg; int *prclyrno; pchg = pi->pchg; if (!pi->prgvolfirst) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; goto skip; } else { pi->prgvolfirst = false; } for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { for (pi->rlvlno = pchg->rlvlnostart; pi->rlvlno < pi->maxrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < pi->numcomps && pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno, ++pi->picomp) { if (pi->rlvlno >= pi->picomp->numrlvls) { continue; } pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; for (pi->prcno = 0, prclyrno = pi->pirlvl->prclyrnos; pi->prcno < pi->pirlvl->numprcs; ++pi->prcno, ++prclyrno) { if (pi->lyrno >= *prclyrno) { *prclyrno = pi->lyrno; ++(*prclyrno); return 0; } skip: ; } } } } return 1; } static int jpc_pi_nextrlcp(register jpc_pi_t *pi) { jpc_pchg_t *pchg; int *prclyrno; pchg = pi->pchg; if (!pi->prgvolfirst) { assert(pi->prcno < pi->pirlvl->numprcs); prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; goto skip; } else { pi->prgvolfirst = 0; } for (pi->rlvlno = pchg->rlvlnostart; pi->rlvlno < pi->maxrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno) { for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < pi->numcomps && pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno, ++pi->picomp) { if (pi->rlvlno >= pi->picomp->numrlvls) { continue; } pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; for (pi->prcno = 0, prclyrno = pi->pirlvl->prclyrnos; pi->prcno < pi->pirlvl->numprcs; ++pi->prcno, ++prclyrno) { if (pi->lyrno >= *prclyrno) { *prclyrno = pi->lyrno; ++(*prclyrno); return 0; } skip: ; } } } } return 1; } static int jpc_pi_nextrpcl(register jpc_pi_t *pi) { int rlvlno; jpc_pirlvl_t *pirlvl; jpc_pchg_t *pchg; int prchind; int prcvind; int *prclyrno; int compno; jpc_picomp_t *picomp; int xstep; int ystep; uint_fast32_t r; uint_fast32_t rpx; uint_fast32_t rpy; uint_fast32_t trx0; uint_fast32_t try0; pchg = pi->pchg; if (!pi->prgvolfirst) { goto skip; } else { pi->xstep = 0; pi->ystep = 0; for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { xstep = picomp->hsamp * (1 << (pirlvl->prcwidthexpn + picomp->numrlvls - rlvlno - 1)); ystep = picomp->vsamp * (1 << (pirlvl->prcheightexpn + picomp->numrlvls - rlvlno - 1)); pi->xstep = (!pi->xstep) ? xstep : JAS_MIN(pi->xstep, xstep); pi->ystep = (!pi->ystep) ? ystep : JAS_MIN(pi->ystep, ystep); } } pi->prgvolfirst = 0; } for (pi->rlvlno = pchg->rlvlnostart; pi->rlvlno < pchg->rlvlnoend && pi->rlvlno < pi->maxrlvls; ++pi->rlvlno) { for (pi->y = pi->ystart; pi->y < pi->yend; pi->y += pi->ystep - (pi->y % pi->ystep)) { for (pi->x = pi->xstart; pi->x < pi->xend; pi->x += pi->xstep - (pi->x % pi->xstep)) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < JAS_CAST(int, pchg->compnoend) && pi->compno < pi->numcomps; ++pi->compno, ++pi->picomp) { if (pi->rlvlno >= pi->picomp->numrlvls) { continue; } pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; if (pi->pirlvl->numprcs == 0) { continue; } r = pi->picomp->numrlvls - 1 - pi->rlvlno; rpx = r + pi->pirlvl->prcwidthexpn; rpy = r + pi->pirlvl->prcheightexpn; trx0 = JPC_CEILDIV(pi->xstart, pi->picomp->hsamp << r); try0 = JPC_CEILDIV(pi->ystart, pi->picomp->vsamp << r); if (((pi->x == pi->xstart && ((trx0 << r) % (1 << rpx))) || !(pi->x % (1 << rpx))) && ((pi->y == pi->ystart && ((try0 << r) % (1 << rpy))) || !(pi->y % (1 << rpy)))) { prchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn); prcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn); pi->prcno = prcvind * pi->pirlvl->numhprcs + prchind; assert(pi->prcno < pi->pirlvl->numprcs); for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; if (pi->lyrno >= *prclyrno) { ++(*prclyrno); return 0; } skip: ; } } } } } } return 1; } static int jpc_pi_nextpcrl(register jpc_pi_t *pi) { int rlvlno; jpc_pirlvl_t *pirlvl; jpc_pchg_t *pchg; int prchind; int prcvind; int *prclyrno; int compno; jpc_picomp_t *picomp; int xstep; int ystep; uint_fast32_t trx0; uint_fast32_t try0; uint_fast32_t r; uint_fast32_t rpx; uint_fast32_t rpy; pchg = pi->pchg; if (!pi->prgvolfirst) { goto skip; } else { pi->xstep = 0; pi->ystep = 0; for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { xstep = picomp->hsamp * (1 << (pirlvl->prcwidthexpn + picomp->numrlvls - rlvlno - 1)); ystep = picomp->vsamp * (1 << (pirlvl->prcheightexpn + picomp->numrlvls - rlvlno - 1)); pi->xstep = (!pi->xstep) ? xstep : JAS_MIN(pi->xstep, xstep); pi->ystep = (!pi->ystep) ? ystep : JAS_MIN(pi->ystep, ystep); } } pi->prgvolfirst = 0; } for (pi->y = pi->ystart; pi->y < pi->yend; pi->y += pi->ystep - (pi->y % pi->ystep)) { for (pi->x = pi->xstart; pi->x < pi->xend; pi->x += pi->xstep - (pi->x % pi->xstep)) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < pi->numcomps && pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno, ++pi->picomp) { for (pi->rlvlno = pchg->rlvlnostart, pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; pi->rlvlno < pi->picomp->numrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno, ++pi->pirlvl) { if (pi->pirlvl->numprcs == 0) { continue; } r = pi->picomp->numrlvls - 1 - pi->rlvlno; trx0 = JPC_CEILDIV(pi->xstart, pi->picomp->hsamp << r); try0 = JPC_CEILDIV(pi->ystart, pi->picomp->vsamp << r); rpx = r + pi->pirlvl->prcwidthexpn; rpy = r + pi->pirlvl->prcheightexpn; if (((pi->x == pi->xstart && ((trx0 << r) % (1 << rpx))) || !(pi->x % (pi->picomp->hsamp << rpx))) && ((pi->y == pi->ystart && ((try0 << r) % (1 << rpy))) || !(pi->y % (pi->picomp->vsamp << rpy)))) { prchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn); prcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn); pi->prcno = prcvind * pi->pirlvl->numhprcs + prchind; assert(pi->prcno < pi->pirlvl->numprcs); for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; if (pi->lyrno >= *prclyrno) { ++(*prclyrno); return 0; } skip: ; } } } } } } return 1; } static int jpc_pi_nextcprl(register jpc_pi_t *pi) { int rlvlno; jpc_pirlvl_t *pirlvl; jpc_pchg_t *pchg; int prchind; int prcvind; int *prclyrno; uint_fast32_t trx0; uint_fast32_t try0; uint_fast32_t r; uint_fast32_t rpx; uint_fast32_t rpy; pchg = pi->pchg; if (!pi->prgvolfirst) { goto skip; } else { pi->prgvolfirst = 0; } for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < JAS_CAST(int, pchg->compnoend) && pi->compno < pi->numcomps; ++pi->compno, ++pi->picomp) { pirlvl = pi->picomp->pirlvls; pi->xstep = pi->picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcwidthexpn + pi->picomp->numrlvls - 1)); pi->ystep = pi->picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcheightexpn + pi->picomp->numrlvls - 1)); for (rlvlno = 1, pirlvl = &pi->picomp->pirlvls[1]; rlvlno < pi->picomp->numrlvls; ++rlvlno, ++pirlvl) { pi->xstep = JAS_MIN(pi->xstep, pi->picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcwidthexpn + pi->picomp->numrlvls - rlvlno - 1))); pi->ystep = JAS_MIN(pi->ystep, pi->picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcheightexpn + pi->picomp->numrlvls - rlvlno - 1))); } for (pi->y = pi->ystart; pi->y < pi->yend; pi->y += pi->ystep - (pi->y % pi->ystep)) { for (pi->x = pi->xstart; pi->x < pi->xend; pi->x += pi->xstep - (pi->x % pi->xstep)) { for (pi->rlvlno = pchg->rlvlnostart, pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; pi->rlvlno < pi->picomp->numrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno, ++pi->pirlvl) { if (pi->pirlvl->numprcs == 0) { continue; } r = pi->picomp->numrlvls - 1 - pi->rlvlno; trx0 = JPC_CEILDIV(pi->xstart, pi->picomp->hsamp << r); try0 = JPC_CEILDIV(pi->ystart, pi->picomp->vsamp << r); rpx = r + pi->pirlvl->prcwidthexpn; rpy = r + pi->pirlvl->prcheightexpn; if (((pi->x == pi->xstart && ((trx0 << r) % (1 << rpx))) || !(pi->x % (pi->picomp->hsamp << rpx))) && ((pi->y == pi->ystart && ((try0 << r) % (1 << rpy))) || !(pi->y % (pi->picomp->vsamp << rpy)))) { prchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn); prcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn); pi->prcno = prcvind * pi->pirlvl->numhprcs + prchind; assert(pi->prcno < pi->pirlvl->numprcs); for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; if (pi->lyrno >= *prclyrno) { ++(*prclyrno); return 0; } skip: ; } } } } } } return 1; } static void pirlvl_destroy(jpc_pirlvl_t *rlvl) { if (rlvl->prclyrnos) { jas_free(rlvl->prclyrnos); } } static void jpc_picomp_destroy(jpc_picomp_t *picomp) { int rlvlno; jpc_pirlvl_t *pirlvl; if (picomp->pirlvls) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { pirlvl_destroy(pirlvl); } jas_free(picomp->pirlvls); } } void jpc_pi_destroy(jpc_pi_t *pi) { jpc_picomp_t *picomp; int compno; if (pi->picomps) { for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { jpc_picomp_destroy(picomp); } jas_free(pi->picomps); } if (pi->pchglist) { jpc_pchglist_destroy(pi->pchglist); } jas_free(pi); } jpc_pi_t *jpc_pi_create0() { jpc_pi_t *pi; if (!(pi = jas_malloc(sizeof(jpc_pi_t)))) { return 0; } pi->picomps = 0; pi->pchgno = 0; if (!(pi->pchglist = jpc_pchglist_create())) { jas_free(pi); return 0; } return pi; } int jpc_pi_addpchg(jpc_pi_t *pi, jpc_pocpchg_t *pchg) { return jpc_pchglist_insert(pi->pchglist, -1, pchg); } jpc_pchglist_t *jpc_pchglist_create() { jpc_pchglist_t *pchglist; if (!(pchglist = jas_malloc(sizeof(jpc_pchglist_t)))) { return 0; } pchglist->numpchgs = 0; pchglist->maxpchgs = 0; pchglist->pchgs = 0; return pchglist; } int jpc_pchglist_insert(jpc_pchglist_t *pchglist, int pchgno, jpc_pchg_t *pchg) { int i; int newmaxpchgs; jpc_pchg_t **newpchgs; if (pchgno < 0) { pchgno = pchglist->numpchgs; } if (pchglist->numpchgs >= pchglist->maxpchgs) { newmaxpchgs = pchglist->maxpchgs + 128; if (!(newpchgs = jas_realloc2(pchglist->pchgs, newmaxpchgs, sizeof(jpc_pchg_t *)))) { return -1; } pchglist->maxpchgs = newmaxpchgs; pchglist->pchgs = newpchgs; } for (i = pchglist->numpchgs; i > pchgno; --i) { pchglist->pchgs[i] = pchglist->pchgs[i - 1]; } pchglist->pchgs[pchgno] = pchg; ++pchglist->numpchgs; return 0; } jpc_pchg_t *jpc_pchglist_remove(jpc_pchglist_t *pchglist, int pchgno) { int i; jpc_pchg_t *pchg; assert(pchgno < pchglist->numpchgs); pchg = pchglist->pchgs[pchgno]; for (i = pchgno + 1; i < pchglist->numpchgs; ++i) { pchglist->pchgs[i - 1] = pchglist->pchgs[i]; } --pchglist->numpchgs; return pchg; } jpc_pchg_t *jpc_pchg_copy(jpc_pchg_t *pchg) { jpc_pchg_t *newpchg; if (!(newpchg = jas_malloc(sizeof(jpc_pchg_t)))) { return 0; } *newpchg = *pchg; return newpchg; } jpc_pchglist_t *jpc_pchglist_copy(jpc_pchglist_t *pchglist) { jpc_pchglist_t *newpchglist; jpc_pchg_t *newpchg; int pchgno; if (!(newpchglist = jpc_pchglist_create())) { return 0; } for (pchgno = 0; pchgno < pchglist->numpchgs; ++pchgno) { if (!(newpchg = jpc_pchg_copy(pchglist->pchgs[pchgno])) || jpc_pchglist_insert(newpchglist, -1, newpchg)) { jpc_pchglist_destroy(newpchglist); return 0; } } return newpchglist; } void jpc_pchglist_destroy(jpc_pchglist_t *pchglist) { int pchgno; if (pchglist->pchgs) { for (pchgno = 0; pchgno < pchglist->numpchgs; ++pchgno) { jpc_pchg_destroy(pchglist->pchgs[pchgno]); } jas_free(pchglist->pchgs); } jas_free(pchglist); } void jpc_pchg_destroy(jpc_pchg_t *pchg) { jas_free(pchg); } jpc_pchg_t *jpc_pchglist_get(jpc_pchglist_t *pchglist, int pchgno) { return pchglist->pchgs[pchgno]; } int jpc_pchglist_numpchgs(jpc_pchglist_t *pchglist) { return pchglist->numpchgs; } int jpc_pi_init(jpc_pi_t *pi) { int compno; int rlvlno; int prcno; jpc_picomp_t *picomp; jpc_pirlvl_t *pirlvl; int *prclyrno; pi->prgvolfirst = 0; pi->valid = 0; pi->pktno = -1; pi->pchgno = -1; pi->pchg = 0; for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { for (prcno = 0, prclyrno = pirlvl->prclyrnos; prcno < pirlvl->numprcs; ++prcno, ++prclyrno) { *prclyrno = 0; } } } return 0; }
/* * Copyright (c) 1999-2000 Image Power, Inc. and the University of * British Columbia. * Copyright (c) 2001-2003 Michael David Adams. * All rights reserved. */ /* __START_OF_JASPER_LICENSE__ * * JasPer License Version 2.0 * * Copyright (c) 2001-2006 Michael David Adams * Copyright (c) 1999-2000 Image Power, Inc. * Copyright (c) 1999-2000 The University of British Columbia * * All rights reserved. * * Permission is hereby granted, free of charge, to any person (the * "User") obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, and/or sell copies of the Software, and to permit * persons to whom the Software is furnished to do so, subject to the * following conditions: * * 1. The above copyright notices and this permission notice (which * includes the disclaimer below) shall be included in all copies or * substantial portions of the Software. * * 2. The name of a copyright holder shall not be used to endorse or * promote products derived from the Software without specific prior * written permission. * * THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS * LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER * THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS * "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL * INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE * PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE * THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY. * EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS * BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL * PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS * GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE * ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE * IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL * SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES, * AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL * SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH * THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH, * PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH * RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY * EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES. * * __END_OF_JASPER_LICENSE__ */ /* * Tier-2 Coding Library * * $Id$ */ #include "jasper/jas_math.h" #include "jasper/jas_malloc.h" #include "jpc_cs.h" #include "jpc_t2cod.h" #include "jpc_math.h" static int jpc_pi_nextlrcp(jpc_pi_t *pi); static int jpc_pi_nextrlcp(jpc_pi_t *pi); static int jpc_pi_nextrpcl(jpc_pi_t *pi); static int jpc_pi_nextpcrl(jpc_pi_t *pi); static int jpc_pi_nextcprl(jpc_pi_t *pi); int jpc_pi_next(jpc_pi_t *pi) { jpc_pchg_t *pchg; int ret; for (;;) { pi->valid = false; if (!pi->pchg) { ++pi->pchgno; pi->compno = 0; pi->rlvlno = 0; pi->prcno = 0; pi->lyrno = 0; pi->prgvolfirst = true; if (pi->pchgno < jpc_pchglist_numpchgs(pi->pchglist)) { pi->pchg = jpc_pchglist_get(pi->pchglist, pi->pchgno); } else if (pi->pchgno == jpc_pchglist_numpchgs(pi->pchglist)) { pi->pchg = &pi->defaultpchg; } else { return 1; } } pchg = pi->pchg; switch (pchg->prgord) { case JPC_COD_LRCPPRG: ret = jpc_pi_nextlrcp(pi); break; case JPC_COD_RLCPPRG: ret = jpc_pi_nextrlcp(pi); break; case JPC_COD_RPCLPRG: ret = jpc_pi_nextrpcl(pi); break; case JPC_COD_PCRLPRG: ret = jpc_pi_nextpcrl(pi); break; case JPC_COD_CPRLPRG: ret = jpc_pi_nextcprl(pi); break; default: ret = -1; break; } if (!ret) { pi->valid = true; ++pi->pktno; return 0; } pi->pchg = 0; } } static int jpc_pi_nextlrcp(register jpc_pi_t *pi) { jpc_pchg_t *pchg; int *prclyrno; pchg = pi->pchg; if (!pi->prgvolfirst) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; goto skip; } else { pi->prgvolfirst = false; } for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { for (pi->rlvlno = pchg->rlvlnostart; pi->rlvlno < pi->maxrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < pi->numcomps && pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno, ++pi->picomp) { if (pi->rlvlno >= pi->picomp->numrlvls) { continue; } pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; for (pi->prcno = 0, prclyrno = pi->pirlvl->prclyrnos; pi->prcno < pi->pirlvl->numprcs; ++pi->prcno, ++prclyrno) { if (pi->lyrno >= *prclyrno) { *prclyrno = pi->lyrno; ++(*prclyrno); return 0; } skip: ; } } } } return 1; } static int jpc_pi_nextrlcp(register jpc_pi_t *pi) { jpc_pchg_t *pchg; int *prclyrno; pchg = pi->pchg; if (!pi->prgvolfirst) { assert(pi->prcno < pi->pirlvl->numprcs); prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; goto skip; } else { pi->prgvolfirst = 0; } for (pi->rlvlno = pchg->rlvlnostart; pi->rlvlno < pi->maxrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno) { for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < pi->numcomps && pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno, ++pi->picomp) { if (pi->rlvlno >= pi->picomp->numrlvls) { continue; } pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; for (pi->prcno = 0, prclyrno = pi->pirlvl->prclyrnos; pi->prcno < pi->pirlvl->numprcs; ++pi->prcno, ++prclyrno) { if (pi->lyrno >= *prclyrno) { *prclyrno = pi->lyrno; ++(*prclyrno); return 0; } skip: ; } } } } return 1; } static int jpc_pi_nextrpcl(register jpc_pi_t *pi) { int rlvlno; jpc_pirlvl_t *pirlvl; jpc_pchg_t *pchg; int prchind; int prcvind; int *prclyrno; int compno; jpc_picomp_t *picomp; int xstep; int ystep; uint_fast32_t r; uint_fast32_t rpx; uint_fast32_t rpy; uint_fast32_t trx0; uint_fast32_t try0; pchg = pi->pchg; if (!pi->prgvolfirst) { goto skip; } else { pi->xstep = 0; pi->ystep = 0; for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { // Check for the potential for overflow problems. if (pirlvl->prcwidthexpn + pi->picomp->numrlvls > JAS_UINTFAST32_NUMBITS - 2 || pirlvl->prcheightexpn + pi->picomp->numrlvls > JAS_UINTFAST32_NUMBITS - 2) { return -1; } xstep = picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcwidthexpn + picomp->numrlvls - rlvlno - 1)); ystep = picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcheightexpn + picomp->numrlvls - rlvlno - 1)); pi->xstep = (!pi->xstep) ? xstep : JAS_MIN(pi->xstep, xstep); pi->ystep = (!pi->ystep) ? ystep : JAS_MIN(pi->ystep, ystep); } } pi->prgvolfirst = 0; } for (pi->rlvlno = pchg->rlvlnostart; pi->rlvlno < pchg->rlvlnoend && pi->rlvlno < pi->maxrlvls; ++pi->rlvlno) { for (pi->y = pi->ystart; pi->y < pi->yend; pi->y += pi->ystep - (pi->y % pi->ystep)) { for (pi->x = pi->xstart; pi->x < pi->xend; pi->x += pi->xstep - (pi->x % pi->xstep)) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < JAS_CAST(int, pchg->compnoend) && pi->compno < pi->numcomps; ++pi->compno, ++pi->picomp) { if (pi->rlvlno >= pi->picomp->numrlvls) { continue; } pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; if (pi->pirlvl->numprcs == 0) { continue; } r = pi->picomp->numrlvls - 1 - pi->rlvlno; rpx = r + pi->pirlvl->prcwidthexpn; rpy = r + pi->pirlvl->prcheightexpn; trx0 = JPC_CEILDIV(pi->xstart, pi->picomp->hsamp << r); try0 = JPC_CEILDIV(pi->ystart, pi->picomp->vsamp << r); if (((pi->x == pi->xstart && ((trx0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpx))) || !(pi->x % (JAS_CAST(uint_fast32_t, 1) << rpx))) && ((pi->y == pi->ystart && ((try0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpy))) || !(pi->y % (JAS_CAST(uint_fast32_t, 1) << rpy)))) { prchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn); prcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn); pi->prcno = prcvind * pi->pirlvl->numhprcs + prchind; assert(pi->prcno < pi->pirlvl->numprcs); for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; if (pi->lyrno >= *prclyrno) { ++(*prclyrno); return 0; } skip: ; } } } } } } return 1; } static int jpc_pi_nextpcrl(register jpc_pi_t *pi) { int rlvlno; jpc_pirlvl_t *pirlvl; jpc_pchg_t *pchg; int prchind; int prcvind; int *prclyrno; int compno; jpc_picomp_t *picomp; int xstep; int ystep; uint_fast32_t trx0; uint_fast32_t try0; uint_fast32_t r; uint_fast32_t rpx; uint_fast32_t rpy; pchg = pi->pchg; if (!pi->prgvolfirst) { goto skip; } else { pi->xstep = 0; pi->ystep = 0; for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { // Check for the potential for overflow problems. if (pirlvl->prcwidthexpn + pi->picomp->numrlvls > JAS_UINTFAST32_NUMBITS - 2 || pirlvl->prcheightexpn + pi->picomp->numrlvls > JAS_UINTFAST32_NUMBITS - 2) { return -1; } xstep = picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcwidthexpn + picomp->numrlvls - rlvlno - 1)); ystep = picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcheightexpn + picomp->numrlvls - rlvlno - 1)); pi->xstep = (!pi->xstep) ? xstep : JAS_MIN(pi->xstep, xstep); pi->ystep = (!pi->ystep) ? ystep : JAS_MIN(pi->ystep, ystep); } } pi->prgvolfirst = 0; } for (pi->y = pi->ystart; pi->y < pi->yend; pi->y += pi->ystep - (pi->y % pi->ystep)) { for (pi->x = pi->xstart; pi->x < pi->xend; pi->x += pi->xstep - (pi->x % pi->xstep)) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < pi->numcomps && pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno, ++pi->picomp) { for (pi->rlvlno = pchg->rlvlnostart, pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; pi->rlvlno < pi->picomp->numrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno, ++pi->pirlvl) { if (pi->pirlvl->numprcs == 0) { continue; } r = pi->picomp->numrlvls - 1 - pi->rlvlno; trx0 = JPC_CEILDIV(pi->xstart, pi->picomp->hsamp << r); try0 = JPC_CEILDIV(pi->ystart, pi->picomp->vsamp << r); rpx = r + pi->pirlvl->prcwidthexpn; rpy = r + pi->pirlvl->prcheightexpn; if (((pi->x == pi->xstart && ((trx0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpx))) || !(pi->x % (pi->picomp->hsamp << rpx))) && ((pi->y == pi->ystart && ((try0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpy))) || !(pi->y % (pi->picomp->vsamp << rpy)))) { prchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn); prcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn); pi->prcno = prcvind * pi->pirlvl->numhprcs + prchind; assert(pi->prcno < pi->pirlvl->numprcs); for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; if (pi->lyrno >= *prclyrno) { ++(*prclyrno); return 0; } skip: ; } } } } } } return 1; } static int jpc_pi_nextcprl(register jpc_pi_t *pi) { int rlvlno; jpc_pirlvl_t *pirlvl; jpc_pchg_t *pchg; int prchind; int prcvind; int *prclyrno; uint_fast32_t trx0; uint_fast32_t try0; uint_fast32_t r; uint_fast32_t rpx; uint_fast32_t rpy; pchg = pi->pchg; if (!pi->prgvolfirst) { goto skip; } else { pi->prgvolfirst = 0; } for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < JAS_CAST(int, pchg->compnoend) && pi->compno < pi->numcomps; ++pi->compno, ++pi->picomp) { pirlvl = pi->picomp->pirlvls; // Check for the potential for overflow problems. if (pirlvl->prcwidthexpn + pi->picomp->numrlvls > JAS_UINTFAST32_NUMBITS - 2 || pirlvl->prcheightexpn + pi->picomp->numrlvls > JAS_UINTFAST32_NUMBITS - 2) { return -1; } pi->xstep = pi->picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcwidthexpn + pi->picomp->numrlvls - 1)); pi->ystep = pi->picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcheightexpn + pi->picomp->numrlvls - 1)); for (rlvlno = 1, pirlvl = &pi->picomp->pirlvls[1]; rlvlno < pi->picomp->numrlvls; ++rlvlno, ++pirlvl) { pi->xstep = JAS_MIN(pi->xstep, pi->picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcwidthexpn + pi->picomp->numrlvls - rlvlno - 1))); pi->ystep = JAS_MIN(pi->ystep, pi->picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcheightexpn + pi->picomp->numrlvls - rlvlno - 1))); } for (pi->y = pi->ystart; pi->y < pi->yend; pi->y += pi->ystep - (pi->y % pi->ystep)) { for (pi->x = pi->xstart; pi->x < pi->xend; pi->x += pi->xstep - (pi->x % pi->xstep)) { for (pi->rlvlno = pchg->rlvlnostart, pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; pi->rlvlno < pi->picomp->numrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno, ++pi->pirlvl) { if (pi->pirlvl->numprcs == 0) { continue; } r = pi->picomp->numrlvls - 1 - pi->rlvlno; trx0 = JPC_CEILDIV(pi->xstart, pi->picomp->hsamp << r); try0 = JPC_CEILDIV(pi->ystart, pi->picomp->vsamp << r); rpx = r + pi->pirlvl->prcwidthexpn; rpy = r + pi->pirlvl->prcheightexpn; if (((pi->x == pi->xstart && ((trx0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpx))) || !(pi->x % (pi->picomp->hsamp << rpx))) && ((pi->y == pi->ystart && ((try0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpy))) || !(pi->y % (pi->picomp->vsamp << rpy)))) { prchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn); prcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn); pi->prcno = prcvind * pi->pirlvl->numhprcs + prchind; assert(pi->prcno < pi->pirlvl->numprcs); for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; if (pi->lyrno >= *prclyrno) { ++(*prclyrno); return 0; } skip: ; } } } } } } return 1; } static void pirlvl_destroy(jpc_pirlvl_t *rlvl) { if (rlvl->prclyrnos) { jas_free(rlvl->prclyrnos); } } static void jpc_picomp_destroy(jpc_picomp_t *picomp) { int rlvlno; jpc_pirlvl_t *pirlvl; if (picomp->pirlvls) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { pirlvl_destroy(pirlvl); } jas_free(picomp->pirlvls); } } void jpc_pi_destroy(jpc_pi_t *pi) { jpc_picomp_t *picomp; int compno; if (pi->picomps) { for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { jpc_picomp_destroy(picomp); } jas_free(pi->picomps); } if (pi->pchglist) { jpc_pchglist_destroy(pi->pchglist); } jas_free(pi); } jpc_pi_t *jpc_pi_create0() { jpc_pi_t *pi; if (!(pi = jas_malloc(sizeof(jpc_pi_t)))) { return 0; } pi->picomps = 0; pi->pchgno = 0; if (!(pi->pchglist = jpc_pchglist_create())) { jas_free(pi); return 0; } return pi; } int jpc_pi_addpchg(jpc_pi_t *pi, jpc_pocpchg_t *pchg) { return jpc_pchglist_insert(pi->pchglist, -1, pchg); } jpc_pchglist_t *jpc_pchglist_create() { jpc_pchglist_t *pchglist; if (!(pchglist = jas_malloc(sizeof(jpc_pchglist_t)))) { return 0; } pchglist->numpchgs = 0; pchglist->maxpchgs = 0; pchglist->pchgs = 0; return pchglist; } int jpc_pchglist_insert(jpc_pchglist_t *pchglist, int pchgno, jpc_pchg_t *pchg) { int i; int newmaxpchgs; jpc_pchg_t **newpchgs; if (pchgno < 0) { pchgno = pchglist->numpchgs; } if (pchglist->numpchgs >= pchglist->maxpchgs) { newmaxpchgs = pchglist->maxpchgs + 128; if (!(newpchgs = jas_realloc2(pchglist->pchgs, newmaxpchgs, sizeof(jpc_pchg_t *)))) { return -1; } pchglist->maxpchgs = newmaxpchgs; pchglist->pchgs = newpchgs; } for (i = pchglist->numpchgs; i > pchgno; --i) { pchglist->pchgs[i] = pchglist->pchgs[i - 1]; } pchglist->pchgs[pchgno] = pchg; ++pchglist->numpchgs; return 0; } jpc_pchg_t *jpc_pchglist_remove(jpc_pchglist_t *pchglist, int pchgno) { int i; jpc_pchg_t *pchg; assert(pchgno < pchglist->numpchgs); pchg = pchglist->pchgs[pchgno]; for (i = pchgno + 1; i < pchglist->numpchgs; ++i) { pchglist->pchgs[i - 1] = pchglist->pchgs[i]; } --pchglist->numpchgs; return pchg; } jpc_pchg_t *jpc_pchg_copy(jpc_pchg_t *pchg) { jpc_pchg_t *newpchg; if (!(newpchg = jas_malloc(sizeof(jpc_pchg_t)))) { return 0; } *newpchg = *pchg; return newpchg; } jpc_pchglist_t *jpc_pchglist_copy(jpc_pchglist_t *pchglist) { jpc_pchglist_t *newpchglist; jpc_pchg_t *newpchg; int pchgno; if (!(newpchglist = jpc_pchglist_create())) { return 0; } for (pchgno = 0; pchgno < pchglist->numpchgs; ++pchgno) { if (!(newpchg = jpc_pchg_copy(pchglist->pchgs[pchgno])) || jpc_pchglist_insert(newpchglist, -1, newpchg)) { jpc_pchglist_destroy(newpchglist); return 0; } } return newpchglist; } void jpc_pchglist_destroy(jpc_pchglist_t *pchglist) { int pchgno; if (pchglist->pchgs) { for (pchgno = 0; pchgno < pchglist->numpchgs; ++pchgno) { jpc_pchg_destroy(pchglist->pchgs[pchgno]); } jas_free(pchglist->pchgs); } jas_free(pchglist); } void jpc_pchg_destroy(jpc_pchg_t *pchg) { jas_free(pchg); } jpc_pchg_t *jpc_pchglist_get(jpc_pchglist_t *pchglist, int pchgno) { return pchglist->pchgs[pchgno]; } int jpc_pchglist_numpchgs(jpc_pchglist_t *pchglist) { return pchglist->numpchgs; } int jpc_pi_init(jpc_pi_t *pi) { int compno; int rlvlno; int prcno; jpc_picomp_t *picomp; jpc_pirlvl_t *pirlvl; int *prclyrno; pi->prgvolfirst = 0; pi->valid = 0; pi->pktno = -1; pi->pchgno = -1; pi->pchg = 0; for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { for (prcno = 0, prclyrno = pirlvl->prclyrnos; prcno < pirlvl->numprcs; ++prcno, ++prclyrno) { *prclyrno = 0; } } } return 0; }
static int jpc_pi_nextpcrl(register jpc_pi_t *pi) { int rlvlno; jpc_pirlvl_t *pirlvl; jpc_pchg_t *pchg; int prchind; int prcvind; int *prclyrno; int compno; jpc_picomp_t *picomp; int xstep; int ystep; uint_fast32_t trx0; uint_fast32_t try0; uint_fast32_t r; uint_fast32_t rpx; uint_fast32_t rpy; pchg = pi->pchg; if (!pi->prgvolfirst) { goto skip; } else { pi->xstep = 0; pi->ystep = 0; for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { xstep = picomp->hsamp * (1 << (pirlvl->prcwidthexpn + picomp->numrlvls - rlvlno - 1)); ystep = picomp->vsamp * (1 << (pirlvl->prcheightexpn + picomp->numrlvls - rlvlno - 1)); pi->xstep = (!pi->xstep) ? xstep : JAS_MIN(pi->xstep, xstep); pi->ystep = (!pi->ystep) ? ystep : JAS_MIN(pi->ystep, ystep); } } pi->prgvolfirst = 0; } for (pi->y = pi->ystart; pi->y < pi->yend; pi->y += pi->ystep - (pi->y % pi->ystep)) { for (pi->x = pi->xstart; pi->x < pi->xend; pi->x += pi->xstep - (pi->x % pi->xstep)) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < pi->numcomps && pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno, ++pi->picomp) { for (pi->rlvlno = pchg->rlvlnostart, pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; pi->rlvlno < pi->picomp->numrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno, ++pi->pirlvl) { if (pi->pirlvl->numprcs == 0) { continue; } r = pi->picomp->numrlvls - 1 - pi->rlvlno; trx0 = JPC_CEILDIV(pi->xstart, pi->picomp->hsamp << r); try0 = JPC_CEILDIV(pi->ystart, pi->picomp->vsamp << r); rpx = r + pi->pirlvl->prcwidthexpn; rpy = r + pi->pirlvl->prcheightexpn; if (((pi->x == pi->xstart && ((trx0 << r) % (1 << rpx))) || !(pi->x % (pi->picomp->hsamp << rpx))) && ((pi->y == pi->ystart && ((try0 << r) % (1 << rpy))) || !(pi->y % (pi->picomp->vsamp << rpy)))) { prchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn); prcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn); pi->prcno = prcvind * pi->pirlvl->numhprcs + prchind; assert(pi->prcno < pi->pirlvl->numprcs); for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; if (pi->lyrno >= *prclyrno) { ++(*prclyrno); return 0; } skip: ; } } } } } } return 1; }
static int jpc_pi_nextpcrl(register jpc_pi_t *pi) { int rlvlno; jpc_pirlvl_t *pirlvl; jpc_pchg_t *pchg; int prchind; int prcvind; int *prclyrno; int compno; jpc_picomp_t *picomp; int xstep; int ystep; uint_fast32_t trx0; uint_fast32_t try0; uint_fast32_t r; uint_fast32_t rpx; uint_fast32_t rpy; pchg = pi->pchg; if (!pi->prgvolfirst) { goto skip; } else { pi->xstep = 0; pi->ystep = 0; for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { // Check for the potential for overflow problems. if (pirlvl->prcwidthexpn + pi->picomp->numrlvls > JAS_UINTFAST32_NUMBITS - 2 || pirlvl->prcheightexpn + pi->picomp->numrlvls > JAS_UINTFAST32_NUMBITS - 2) { return -1; } xstep = picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcwidthexpn + picomp->numrlvls - rlvlno - 1)); ystep = picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcheightexpn + picomp->numrlvls - rlvlno - 1)); pi->xstep = (!pi->xstep) ? xstep : JAS_MIN(pi->xstep, xstep); pi->ystep = (!pi->ystep) ? ystep : JAS_MIN(pi->ystep, ystep); } } pi->prgvolfirst = 0; } for (pi->y = pi->ystart; pi->y < pi->yend; pi->y += pi->ystep - (pi->y % pi->ystep)) { for (pi->x = pi->xstart; pi->x < pi->xend; pi->x += pi->xstep - (pi->x % pi->xstep)) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < pi->numcomps && pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno, ++pi->picomp) { for (pi->rlvlno = pchg->rlvlnostart, pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; pi->rlvlno < pi->picomp->numrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno, ++pi->pirlvl) { if (pi->pirlvl->numprcs == 0) { continue; } r = pi->picomp->numrlvls - 1 - pi->rlvlno; trx0 = JPC_CEILDIV(pi->xstart, pi->picomp->hsamp << r); try0 = JPC_CEILDIV(pi->ystart, pi->picomp->vsamp << r); rpx = r + pi->pirlvl->prcwidthexpn; rpy = r + pi->pirlvl->prcheightexpn; if (((pi->x == pi->xstart && ((trx0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpx))) || !(pi->x % (pi->picomp->hsamp << rpx))) && ((pi->y == pi->ystart && ((try0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpy))) || !(pi->y % (pi->picomp->vsamp << rpy)))) { prchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn); prcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn); pi->prcno = prcvind * pi->pirlvl->numhprcs + prchind; assert(pi->prcno < pi->pirlvl->numprcs); for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; if (pi->lyrno >= *prclyrno) { ++(*prclyrno); return 0; } skip: ; } } } } } } return 1; }
{'added': [(201, '\t\t\t pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno,'), (202, '\t\t\t ++pi->picomp) {'), (251, '\t\t\t\t// Check for the potential for overflow problems.'), (252, '\t\t\t\tif (pirlvl->prcwidthexpn + pi->picomp->numrlvls >'), (253, '\t\t\t\t JAS_UINTFAST32_NUMBITS - 2 ||'), (254, '\t\t\t\t pirlvl->prcheightexpn + pi->picomp->numrlvls >'), (255, '\t\t\t\t JAS_UINTFAST32_NUMBITS - 2) {'), (256, '\t\t\t\t\treturn -1;'), (257, '\t\t\t\t}'), (258, '\t\t\t\txstep = picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) <<'), (259, '\t\t\t\t (pirlvl->prcwidthexpn + picomp->numrlvls - rlvlno - 1));'), (260, '\t\t\t\tystep = picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) <<'), (261, '\t\t\t\t (pirlvl->prcheightexpn + picomp->numrlvls - rlvlno - 1));'), (291, '\t\t\t\t\tif (((pi->x == pi->xstart &&'), (292, '\t\t\t\t\t ((trx0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpx)))'), (293, '\t\t\t\t\t || !(pi->x % (JAS_CAST(uint_fast32_t, 1) << rpx))) &&'), (294, '\t\t\t\t\t ((pi->y == pi->ystart &&'), (295, '\t\t\t\t\t ((try0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpy)))'), (296, '\t\t\t\t\t || !(pi->y % (JAS_CAST(uint_fast32_t, 1) << rpy)))) {'), (297, '\t\t\t\t\t\tprchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x,'), (298, '\t\t\t\t\t\t pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) -'), (299, '\t\t\t\t\t\t JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn);'), (300, '\t\t\t\t\t\tprcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y,'), (301, '\t\t\t\t\t\t pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) -'), (302, '\t\t\t\t\t\t JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn);'), (307, '\t\t\t\t\t\t pi->numlyrs && pi->lyrno < JAS_CAST(int,'), (308, '\t\t\t\t\t\t pchg->lyrnoend); ++pi->lyrno) {'), (353, '\t\t\t\t// Check for the potential for overflow problems.'), (354, '\t\t\t\tif (pirlvl->prcwidthexpn + pi->picomp->numrlvls >'), (355, '\t\t\t\t JAS_UINTFAST32_NUMBITS - 2 ||'), (356, '\t\t\t\t pirlvl->prcheightexpn + pi->picomp->numrlvls >'), (357, '\t\t\t\t JAS_UINTFAST32_NUMBITS - 2) {'), (358, '\t\t\t\t\treturn -1;'), (359, '\t\t\t\t}'), (360, '\t\t\t\txstep = picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) <<'), (361, '\t\t\t\t (pirlvl->prcwidthexpn + picomp->numrlvls - rlvlno - 1));'), (362, '\t\t\t\tystep = picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) <<'), (363, '\t\t\t\t (pirlvl->prcheightexpn + picomp->numrlvls - rlvlno - 1));'), (364, '\t\t\t\tpi->xstep = (!pi->xstep) ? xstep : JAS_MIN(pi->xstep, xstep);'), (365, '\t\t\t\tpi->ystep = (!pi->ystep) ? ystep : JAS_MIN(pi->ystep, ystep);'), (392, '\t\t\t\t\tif (((pi->x == pi->xstart &&'), (393, '\t\t\t\t\t ((trx0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpx))) ||'), (395, '\t\t\t\t\t ((pi->y == pi->ystart &&'), (396, '\t\t\t\t\t ((try0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpy))) ||'), (398, '\t\t\t\t\t\tprchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x,'), (399, '\t\t\t\t\t\t pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) -'), (400, '\t\t\t\t\t\t JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn);'), (401, '\t\t\t\t\t\tprcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y,'), (402, '\t\t\t\t\t\t pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) -'), (403, '\t\t\t\t\t\t JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn);'), (407, '\t\t\t\t\t\t pi->lyrno < JAS_CAST(int, pchg->lyrnoend);'), (408, '\t\t\t\t\t\t ++pi->lyrno) {'), (446, '\tfor (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno];'), (447, '\t pi->compno < JAS_CAST(int, pchg->compnoend) && pi->compno < pi->numcomps;'), (448, '\t ++pi->compno, ++pi->picomp) {'), (450, '\t\t// Check for the potential for overflow problems.'), (451, '\t\tif (pirlvl->prcwidthexpn + pi->picomp->numrlvls >'), (452, '\t\t JAS_UINTFAST32_NUMBITS - 2 ||'), (453, '\t\t pirlvl->prcheightexpn + pi->picomp->numrlvls >'), (454, '\t\t JAS_UINTFAST32_NUMBITS - 2) {'), (455, '\t\t\treturn -1;'), (456, '\t\t}'), (486, '\t\t\t\t\tif (((pi->x == pi->xstart &&'), (487, '\t\t\t\t\t ((trx0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpx))) ||'), (489, '\t\t\t\t\t ((pi->y == pi->ystart &&'), (490, '\t\t\t\t\t ((try0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpy))) ||'), (492, '\t\t\t\t\t\tprchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x,'), (493, '\t\t\t\t\t\t pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) -'), (494, '\t\t\t\t\t\t JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn);'), (495, '\t\t\t\t\t\tprcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y,'), (496, '\t\t\t\t\t\t pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) -'), (497, '\t\t\t\t\t\t JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn);'), (498, '\t\t\t\t\t\tpi->prcno = prcvind * pi->pirlvl->numhprcs + prchind;'), (499, '\t\t\t\t\t\tassert(pi->prcno < pi->pirlvl->numprcs);'), (500, '\t\t\t\t\t\tfor (pi->lyrno = 0; pi->lyrno < pi->numlyrs &&'), (501, '\t\t\t\t\t\t pi->lyrno < JAS_CAST(int, pchg->lyrnoend);'), (502, '\t\t\t\t\t\t ++pi->lyrno) {')], 'deleted': [(201, '\t\t\t pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno, ++pi->picomp) {'), (250, '\t\t\t\txstep = picomp->hsamp * (1 << (pirlvl->prcwidthexpn +'), (251, '\t\t\t\t picomp->numrlvls - rlvlno - 1));'), (252, '\t\t\t\tystep = picomp->vsamp * (1 << (pirlvl->prcheightexpn +'), (253, '\t\t\t\t picomp->numrlvls - rlvlno - 1));'), (283, '\t\t\t\t\tif (((pi->x == pi->xstart && ((trx0 << r) % (1 << rpx)))'), (284, '\t\t\t\t\t || !(pi->x % (1 << rpx))) &&'), (285, '\t\t\t\t\t ((pi->y == pi->ystart && ((try0 << r) % (1 << rpy)))'), (286, '\t\t\t\t\t || !(pi->y % (1 << rpy)))) {'), (287, '\t\t\t\t\t\tprchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp'), (288, '\t\t\t\t\t\t << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0,'), (289, '\t\t\t\t\t\t pi->pirlvl->prcwidthexpn);'), (290, '\t\t\t\t\t\tprcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp'), (291, '\t\t\t\t\t\t << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0,'), (292, '\t\t\t\t\t\t pi->pirlvl->prcheightexpn);'), (297, '\t\t\t\t\t\t pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) {'), (342, '\t\t\t\txstep = picomp->hsamp * (1 <<'), (343, '\t\t\t\t (pirlvl->prcwidthexpn + picomp->numrlvls -'), (344, '\t\t\t\t rlvlno - 1));'), (345, '\t\t\t\tystep = picomp->vsamp * (1 <<'), (346, '\t\t\t\t (pirlvl->prcheightexpn + picomp->numrlvls -'), (347, '\t\t\t\t rlvlno - 1));'), (348, '\t\t\t\tpi->xstep = (!pi->xstep) ? xstep :'), (349, '\t\t\t\t JAS_MIN(pi->xstep, xstep);'), (350, '\t\t\t\tpi->ystep = (!pi->ystep) ? ystep :'), (351, '\t\t\t\t JAS_MIN(pi->ystep, ystep);'), (378, '\t\t\t\t\tif (((pi->x == pi->xstart && ((trx0 << r) % (1 << rpx))) ||'), (380, '\t\t\t\t\t ((pi->y == pi->ystart && ((try0 << r) % (1 << rpy))) ||'), (382, '\t\t\t\t\t\tprchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp'), (383, '\t\t\t\t\t\t << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0,'), (384, '\t\t\t\t\t\t pi->pirlvl->prcwidthexpn);'), (385, '\t\t\t\t\t\tprcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp'), (386, '\t\t\t\t\t\t << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0,'), (387, '\t\t\t\t\t\t pi->pirlvl->prcheightexpn);'), (391, '\t\t\t\t\t\t pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) {'), (429, '\tfor (pi->compno = pchg->compnostart, pi->picomp ='), (430, '\t &pi->picomps[pi->compno]; pi->compno < JAS_CAST(int, pchg->compnoend) && pi->compno < pi->numcomps; ++pi->compno,'), (431, '\t ++pi->picomp) {'), (462, '\t\t\t\t\tif (((pi->x == pi->xstart && ((trx0 << r) % (1 << rpx))) ||'), (464, '\t\t\t\t\t ((pi->y == pi->ystart && ((try0 << r) % (1 << rpy))) ||'), (466, '\t\t\t\t\t\tprchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp'), (467, '\t\t\t\t\t\t << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0,'), (468, '\t\t\t\t\t\t pi->pirlvl->prcwidthexpn);'), (469, '\t\t\t\t\t\tprcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp'), (470, '\t\t\t\t\t\t << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0,'), (471, '\t\t\t\t\t\t pi->pirlvl->prcheightexpn);'), (472, '\t\t\t\t\t\tpi->prcno = prcvind *'), (473, '\t\t\t\t\t\t pi->pirlvl->numhprcs +'), (474, '\t\t\t\t\t\t prchind;'), (475, '\t\t\t\t\t\tassert(pi->prcno <'), (476, '\t\t\t\t\t\t pi->pirlvl->numprcs);'), (477, '\t\t\t\t\t\tfor (pi->lyrno = 0; pi->lyrno <'), (478, '\t\t\t\t\t\t pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) {')]}
77
53
595
4,400
https://github.com/mdadams/jasper
CVE-2016-9583
['CWE-125']
jpc_t2cod.c
jpc_pi_nextrlcp
/* * Copyright (c) 1999-2000 Image Power, Inc. and the University of * British Columbia. * Copyright (c) 2001-2003 Michael David Adams. * All rights reserved. */ /* __START_OF_JASPER_LICENSE__ * * JasPer License Version 2.0 * * Copyright (c) 2001-2006 Michael David Adams * Copyright (c) 1999-2000 Image Power, Inc. * Copyright (c) 1999-2000 The University of British Columbia * * All rights reserved. * * Permission is hereby granted, free of charge, to any person (the * "User") obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, and/or sell copies of the Software, and to permit * persons to whom the Software is furnished to do so, subject to the * following conditions: * * 1. The above copyright notices and this permission notice (which * includes the disclaimer below) shall be included in all copies or * substantial portions of the Software. * * 2. The name of a copyright holder shall not be used to endorse or * promote products derived from the Software without specific prior * written permission. * * THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS * LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER * THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS * "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL * INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE * PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE * THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY. * EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS * BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL * PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS * GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE * ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE * IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL * SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES, * AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL * SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH * THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH, * PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH * RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY * EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES. * * __END_OF_JASPER_LICENSE__ */ /* * Tier-2 Coding Library * * $Id$ */ #include "jasper/jas_math.h" #include "jasper/jas_malloc.h" #include "jpc_cs.h" #include "jpc_t2cod.h" #include "jpc_math.h" static int jpc_pi_nextlrcp(jpc_pi_t *pi); static int jpc_pi_nextrlcp(jpc_pi_t *pi); static int jpc_pi_nextrpcl(jpc_pi_t *pi); static int jpc_pi_nextpcrl(jpc_pi_t *pi); static int jpc_pi_nextcprl(jpc_pi_t *pi); int jpc_pi_next(jpc_pi_t *pi) { jpc_pchg_t *pchg; int ret; for (;;) { pi->valid = false; if (!pi->pchg) { ++pi->pchgno; pi->compno = 0; pi->rlvlno = 0; pi->prcno = 0; pi->lyrno = 0; pi->prgvolfirst = true; if (pi->pchgno < jpc_pchglist_numpchgs(pi->pchglist)) { pi->pchg = jpc_pchglist_get(pi->pchglist, pi->pchgno); } else if (pi->pchgno == jpc_pchglist_numpchgs(pi->pchglist)) { pi->pchg = &pi->defaultpchg; } else { return 1; } } pchg = pi->pchg; switch (pchg->prgord) { case JPC_COD_LRCPPRG: ret = jpc_pi_nextlrcp(pi); break; case JPC_COD_RLCPPRG: ret = jpc_pi_nextrlcp(pi); break; case JPC_COD_RPCLPRG: ret = jpc_pi_nextrpcl(pi); break; case JPC_COD_PCRLPRG: ret = jpc_pi_nextpcrl(pi); break; case JPC_COD_CPRLPRG: ret = jpc_pi_nextcprl(pi); break; default: ret = -1; break; } if (!ret) { pi->valid = true; ++pi->pktno; return 0; } pi->pchg = 0; } } static int jpc_pi_nextlrcp(register jpc_pi_t *pi) { jpc_pchg_t *pchg; int *prclyrno; pchg = pi->pchg; if (!pi->prgvolfirst) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; goto skip; } else { pi->prgvolfirst = false; } for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { for (pi->rlvlno = pchg->rlvlnostart; pi->rlvlno < pi->maxrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < pi->numcomps && pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno, ++pi->picomp) { if (pi->rlvlno >= pi->picomp->numrlvls) { continue; } pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; for (pi->prcno = 0, prclyrno = pi->pirlvl->prclyrnos; pi->prcno < pi->pirlvl->numprcs; ++pi->prcno, ++prclyrno) { if (pi->lyrno >= *prclyrno) { *prclyrno = pi->lyrno; ++(*prclyrno); return 0; } skip: ; } } } } return 1; } static int jpc_pi_nextrlcp(register jpc_pi_t *pi) { jpc_pchg_t *pchg; int *prclyrno; pchg = pi->pchg; if (!pi->prgvolfirst) { assert(pi->prcno < pi->pirlvl->numprcs); prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; goto skip; } else { pi->prgvolfirst = 0; } for (pi->rlvlno = pchg->rlvlnostart; pi->rlvlno < pi->maxrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno) { for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < pi->numcomps && pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno, ++pi->picomp) { if (pi->rlvlno >= pi->picomp->numrlvls) { continue; } pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; for (pi->prcno = 0, prclyrno = pi->pirlvl->prclyrnos; pi->prcno < pi->pirlvl->numprcs; ++pi->prcno, ++prclyrno) { if (pi->lyrno >= *prclyrno) { *prclyrno = pi->lyrno; ++(*prclyrno); return 0; } skip: ; } } } } return 1; } static int jpc_pi_nextrpcl(register jpc_pi_t *pi) { int rlvlno; jpc_pirlvl_t *pirlvl; jpc_pchg_t *pchg; int prchind; int prcvind; int *prclyrno; int compno; jpc_picomp_t *picomp; int xstep; int ystep; uint_fast32_t r; uint_fast32_t rpx; uint_fast32_t rpy; uint_fast32_t trx0; uint_fast32_t try0; pchg = pi->pchg; if (!pi->prgvolfirst) { goto skip; } else { pi->xstep = 0; pi->ystep = 0; for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { xstep = picomp->hsamp * (1 << (pirlvl->prcwidthexpn + picomp->numrlvls - rlvlno - 1)); ystep = picomp->vsamp * (1 << (pirlvl->prcheightexpn + picomp->numrlvls - rlvlno - 1)); pi->xstep = (!pi->xstep) ? xstep : JAS_MIN(pi->xstep, xstep); pi->ystep = (!pi->ystep) ? ystep : JAS_MIN(pi->ystep, ystep); } } pi->prgvolfirst = 0; } for (pi->rlvlno = pchg->rlvlnostart; pi->rlvlno < pchg->rlvlnoend && pi->rlvlno < pi->maxrlvls; ++pi->rlvlno) { for (pi->y = pi->ystart; pi->y < pi->yend; pi->y += pi->ystep - (pi->y % pi->ystep)) { for (pi->x = pi->xstart; pi->x < pi->xend; pi->x += pi->xstep - (pi->x % pi->xstep)) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < JAS_CAST(int, pchg->compnoend) && pi->compno < pi->numcomps; ++pi->compno, ++pi->picomp) { if (pi->rlvlno >= pi->picomp->numrlvls) { continue; } pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; if (pi->pirlvl->numprcs == 0) { continue; } r = pi->picomp->numrlvls - 1 - pi->rlvlno; rpx = r + pi->pirlvl->prcwidthexpn; rpy = r + pi->pirlvl->prcheightexpn; trx0 = JPC_CEILDIV(pi->xstart, pi->picomp->hsamp << r); try0 = JPC_CEILDIV(pi->ystart, pi->picomp->vsamp << r); if (((pi->x == pi->xstart && ((trx0 << r) % (1 << rpx))) || !(pi->x % (1 << rpx))) && ((pi->y == pi->ystart && ((try0 << r) % (1 << rpy))) || !(pi->y % (1 << rpy)))) { prchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn); prcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn); pi->prcno = prcvind * pi->pirlvl->numhprcs + prchind; assert(pi->prcno < pi->pirlvl->numprcs); for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; if (pi->lyrno >= *prclyrno) { ++(*prclyrno); return 0; } skip: ; } } } } } } return 1; } static int jpc_pi_nextpcrl(register jpc_pi_t *pi) { int rlvlno; jpc_pirlvl_t *pirlvl; jpc_pchg_t *pchg; int prchind; int prcvind; int *prclyrno; int compno; jpc_picomp_t *picomp; int xstep; int ystep; uint_fast32_t trx0; uint_fast32_t try0; uint_fast32_t r; uint_fast32_t rpx; uint_fast32_t rpy; pchg = pi->pchg; if (!pi->prgvolfirst) { goto skip; } else { pi->xstep = 0; pi->ystep = 0; for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { xstep = picomp->hsamp * (1 << (pirlvl->prcwidthexpn + picomp->numrlvls - rlvlno - 1)); ystep = picomp->vsamp * (1 << (pirlvl->prcheightexpn + picomp->numrlvls - rlvlno - 1)); pi->xstep = (!pi->xstep) ? xstep : JAS_MIN(pi->xstep, xstep); pi->ystep = (!pi->ystep) ? ystep : JAS_MIN(pi->ystep, ystep); } } pi->prgvolfirst = 0; } for (pi->y = pi->ystart; pi->y < pi->yend; pi->y += pi->ystep - (pi->y % pi->ystep)) { for (pi->x = pi->xstart; pi->x < pi->xend; pi->x += pi->xstep - (pi->x % pi->xstep)) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < pi->numcomps && pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno, ++pi->picomp) { for (pi->rlvlno = pchg->rlvlnostart, pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; pi->rlvlno < pi->picomp->numrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno, ++pi->pirlvl) { if (pi->pirlvl->numprcs == 0) { continue; } r = pi->picomp->numrlvls - 1 - pi->rlvlno; trx0 = JPC_CEILDIV(pi->xstart, pi->picomp->hsamp << r); try0 = JPC_CEILDIV(pi->ystart, pi->picomp->vsamp << r); rpx = r + pi->pirlvl->prcwidthexpn; rpy = r + pi->pirlvl->prcheightexpn; if (((pi->x == pi->xstart && ((trx0 << r) % (1 << rpx))) || !(pi->x % (pi->picomp->hsamp << rpx))) && ((pi->y == pi->ystart && ((try0 << r) % (1 << rpy))) || !(pi->y % (pi->picomp->vsamp << rpy)))) { prchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn); prcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn); pi->prcno = prcvind * pi->pirlvl->numhprcs + prchind; assert(pi->prcno < pi->pirlvl->numprcs); for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; if (pi->lyrno >= *prclyrno) { ++(*prclyrno); return 0; } skip: ; } } } } } } return 1; } static int jpc_pi_nextcprl(register jpc_pi_t *pi) { int rlvlno; jpc_pirlvl_t *pirlvl; jpc_pchg_t *pchg; int prchind; int prcvind; int *prclyrno; uint_fast32_t trx0; uint_fast32_t try0; uint_fast32_t r; uint_fast32_t rpx; uint_fast32_t rpy; pchg = pi->pchg; if (!pi->prgvolfirst) { goto skip; } else { pi->prgvolfirst = 0; } for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < JAS_CAST(int, pchg->compnoend) && pi->compno < pi->numcomps; ++pi->compno, ++pi->picomp) { pirlvl = pi->picomp->pirlvls; pi->xstep = pi->picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcwidthexpn + pi->picomp->numrlvls - 1)); pi->ystep = pi->picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcheightexpn + pi->picomp->numrlvls - 1)); for (rlvlno = 1, pirlvl = &pi->picomp->pirlvls[1]; rlvlno < pi->picomp->numrlvls; ++rlvlno, ++pirlvl) { pi->xstep = JAS_MIN(pi->xstep, pi->picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcwidthexpn + pi->picomp->numrlvls - rlvlno - 1))); pi->ystep = JAS_MIN(pi->ystep, pi->picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcheightexpn + pi->picomp->numrlvls - rlvlno - 1))); } for (pi->y = pi->ystart; pi->y < pi->yend; pi->y += pi->ystep - (pi->y % pi->ystep)) { for (pi->x = pi->xstart; pi->x < pi->xend; pi->x += pi->xstep - (pi->x % pi->xstep)) { for (pi->rlvlno = pchg->rlvlnostart, pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; pi->rlvlno < pi->picomp->numrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno, ++pi->pirlvl) { if (pi->pirlvl->numprcs == 0) { continue; } r = pi->picomp->numrlvls - 1 - pi->rlvlno; trx0 = JPC_CEILDIV(pi->xstart, pi->picomp->hsamp << r); try0 = JPC_CEILDIV(pi->ystart, pi->picomp->vsamp << r); rpx = r + pi->pirlvl->prcwidthexpn; rpy = r + pi->pirlvl->prcheightexpn; if (((pi->x == pi->xstart && ((trx0 << r) % (1 << rpx))) || !(pi->x % (pi->picomp->hsamp << rpx))) && ((pi->y == pi->ystart && ((try0 << r) % (1 << rpy))) || !(pi->y % (pi->picomp->vsamp << rpy)))) { prchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn); prcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn); pi->prcno = prcvind * pi->pirlvl->numhprcs + prchind; assert(pi->prcno < pi->pirlvl->numprcs); for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; if (pi->lyrno >= *prclyrno) { ++(*prclyrno); return 0; } skip: ; } } } } } } return 1; } static void pirlvl_destroy(jpc_pirlvl_t *rlvl) { if (rlvl->prclyrnos) { jas_free(rlvl->prclyrnos); } } static void jpc_picomp_destroy(jpc_picomp_t *picomp) { int rlvlno; jpc_pirlvl_t *pirlvl; if (picomp->pirlvls) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { pirlvl_destroy(pirlvl); } jas_free(picomp->pirlvls); } } void jpc_pi_destroy(jpc_pi_t *pi) { jpc_picomp_t *picomp; int compno; if (pi->picomps) { for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { jpc_picomp_destroy(picomp); } jas_free(pi->picomps); } if (pi->pchglist) { jpc_pchglist_destroy(pi->pchglist); } jas_free(pi); } jpc_pi_t *jpc_pi_create0() { jpc_pi_t *pi; if (!(pi = jas_malloc(sizeof(jpc_pi_t)))) { return 0; } pi->picomps = 0; pi->pchgno = 0; if (!(pi->pchglist = jpc_pchglist_create())) { jas_free(pi); return 0; } return pi; } int jpc_pi_addpchg(jpc_pi_t *pi, jpc_pocpchg_t *pchg) { return jpc_pchglist_insert(pi->pchglist, -1, pchg); } jpc_pchglist_t *jpc_pchglist_create() { jpc_pchglist_t *pchglist; if (!(pchglist = jas_malloc(sizeof(jpc_pchglist_t)))) { return 0; } pchglist->numpchgs = 0; pchglist->maxpchgs = 0; pchglist->pchgs = 0; return pchglist; } int jpc_pchglist_insert(jpc_pchglist_t *pchglist, int pchgno, jpc_pchg_t *pchg) { int i; int newmaxpchgs; jpc_pchg_t **newpchgs; if (pchgno < 0) { pchgno = pchglist->numpchgs; } if (pchglist->numpchgs >= pchglist->maxpchgs) { newmaxpchgs = pchglist->maxpchgs + 128; if (!(newpchgs = jas_realloc2(pchglist->pchgs, newmaxpchgs, sizeof(jpc_pchg_t *)))) { return -1; } pchglist->maxpchgs = newmaxpchgs; pchglist->pchgs = newpchgs; } for (i = pchglist->numpchgs; i > pchgno; --i) { pchglist->pchgs[i] = pchglist->pchgs[i - 1]; } pchglist->pchgs[pchgno] = pchg; ++pchglist->numpchgs; return 0; } jpc_pchg_t *jpc_pchglist_remove(jpc_pchglist_t *pchglist, int pchgno) { int i; jpc_pchg_t *pchg; assert(pchgno < pchglist->numpchgs); pchg = pchglist->pchgs[pchgno]; for (i = pchgno + 1; i < pchglist->numpchgs; ++i) { pchglist->pchgs[i - 1] = pchglist->pchgs[i]; } --pchglist->numpchgs; return pchg; } jpc_pchg_t *jpc_pchg_copy(jpc_pchg_t *pchg) { jpc_pchg_t *newpchg; if (!(newpchg = jas_malloc(sizeof(jpc_pchg_t)))) { return 0; } *newpchg = *pchg; return newpchg; } jpc_pchglist_t *jpc_pchglist_copy(jpc_pchglist_t *pchglist) { jpc_pchglist_t *newpchglist; jpc_pchg_t *newpchg; int pchgno; if (!(newpchglist = jpc_pchglist_create())) { return 0; } for (pchgno = 0; pchgno < pchglist->numpchgs; ++pchgno) { if (!(newpchg = jpc_pchg_copy(pchglist->pchgs[pchgno])) || jpc_pchglist_insert(newpchglist, -1, newpchg)) { jpc_pchglist_destroy(newpchglist); return 0; } } return newpchglist; } void jpc_pchglist_destroy(jpc_pchglist_t *pchglist) { int pchgno; if (pchglist->pchgs) { for (pchgno = 0; pchgno < pchglist->numpchgs; ++pchgno) { jpc_pchg_destroy(pchglist->pchgs[pchgno]); } jas_free(pchglist->pchgs); } jas_free(pchglist); } void jpc_pchg_destroy(jpc_pchg_t *pchg) { jas_free(pchg); } jpc_pchg_t *jpc_pchglist_get(jpc_pchglist_t *pchglist, int pchgno) { return pchglist->pchgs[pchgno]; } int jpc_pchglist_numpchgs(jpc_pchglist_t *pchglist) { return pchglist->numpchgs; } int jpc_pi_init(jpc_pi_t *pi) { int compno; int rlvlno; int prcno; jpc_picomp_t *picomp; jpc_pirlvl_t *pirlvl; int *prclyrno; pi->prgvolfirst = 0; pi->valid = 0; pi->pktno = -1; pi->pchgno = -1; pi->pchg = 0; for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { for (prcno = 0, prclyrno = pirlvl->prclyrnos; prcno < pirlvl->numprcs; ++prcno, ++prclyrno) { *prclyrno = 0; } } } return 0; }
/* * Copyright (c) 1999-2000 Image Power, Inc. and the University of * British Columbia. * Copyright (c) 2001-2003 Michael David Adams. * All rights reserved. */ /* __START_OF_JASPER_LICENSE__ * * JasPer License Version 2.0 * * Copyright (c) 2001-2006 Michael David Adams * Copyright (c) 1999-2000 Image Power, Inc. * Copyright (c) 1999-2000 The University of British Columbia * * All rights reserved. * * Permission is hereby granted, free of charge, to any person (the * "User") obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, and/or sell copies of the Software, and to permit * persons to whom the Software is furnished to do so, subject to the * following conditions: * * 1. The above copyright notices and this permission notice (which * includes the disclaimer below) shall be included in all copies or * substantial portions of the Software. * * 2. The name of a copyright holder shall not be used to endorse or * promote products derived from the Software without specific prior * written permission. * * THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS * LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER * THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS * "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL * INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE * PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE * THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY. * EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS * BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL * PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS * GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE * ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE * IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL * SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES, * AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL * SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH * THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH, * PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH * RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY * EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES. * * __END_OF_JASPER_LICENSE__ */ /* * Tier-2 Coding Library * * $Id$ */ #include "jasper/jas_math.h" #include "jasper/jas_malloc.h" #include "jpc_cs.h" #include "jpc_t2cod.h" #include "jpc_math.h" static int jpc_pi_nextlrcp(jpc_pi_t *pi); static int jpc_pi_nextrlcp(jpc_pi_t *pi); static int jpc_pi_nextrpcl(jpc_pi_t *pi); static int jpc_pi_nextpcrl(jpc_pi_t *pi); static int jpc_pi_nextcprl(jpc_pi_t *pi); int jpc_pi_next(jpc_pi_t *pi) { jpc_pchg_t *pchg; int ret; for (;;) { pi->valid = false; if (!pi->pchg) { ++pi->pchgno; pi->compno = 0; pi->rlvlno = 0; pi->prcno = 0; pi->lyrno = 0; pi->prgvolfirst = true; if (pi->pchgno < jpc_pchglist_numpchgs(pi->pchglist)) { pi->pchg = jpc_pchglist_get(pi->pchglist, pi->pchgno); } else if (pi->pchgno == jpc_pchglist_numpchgs(pi->pchglist)) { pi->pchg = &pi->defaultpchg; } else { return 1; } } pchg = pi->pchg; switch (pchg->prgord) { case JPC_COD_LRCPPRG: ret = jpc_pi_nextlrcp(pi); break; case JPC_COD_RLCPPRG: ret = jpc_pi_nextrlcp(pi); break; case JPC_COD_RPCLPRG: ret = jpc_pi_nextrpcl(pi); break; case JPC_COD_PCRLPRG: ret = jpc_pi_nextpcrl(pi); break; case JPC_COD_CPRLPRG: ret = jpc_pi_nextcprl(pi); break; default: ret = -1; break; } if (!ret) { pi->valid = true; ++pi->pktno; return 0; } pi->pchg = 0; } } static int jpc_pi_nextlrcp(register jpc_pi_t *pi) { jpc_pchg_t *pchg; int *prclyrno; pchg = pi->pchg; if (!pi->prgvolfirst) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; goto skip; } else { pi->prgvolfirst = false; } for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { for (pi->rlvlno = pchg->rlvlnostart; pi->rlvlno < pi->maxrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < pi->numcomps && pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno, ++pi->picomp) { if (pi->rlvlno >= pi->picomp->numrlvls) { continue; } pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; for (pi->prcno = 0, prclyrno = pi->pirlvl->prclyrnos; pi->prcno < pi->pirlvl->numprcs; ++pi->prcno, ++prclyrno) { if (pi->lyrno >= *prclyrno) { *prclyrno = pi->lyrno; ++(*prclyrno); return 0; } skip: ; } } } } return 1; } static int jpc_pi_nextrlcp(register jpc_pi_t *pi) { jpc_pchg_t *pchg; int *prclyrno; pchg = pi->pchg; if (!pi->prgvolfirst) { assert(pi->prcno < pi->pirlvl->numprcs); prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; goto skip; } else { pi->prgvolfirst = 0; } for (pi->rlvlno = pchg->rlvlnostart; pi->rlvlno < pi->maxrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno) { for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < pi->numcomps && pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno, ++pi->picomp) { if (pi->rlvlno >= pi->picomp->numrlvls) { continue; } pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; for (pi->prcno = 0, prclyrno = pi->pirlvl->prclyrnos; pi->prcno < pi->pirlvl->numprcs; ++pi->prcno, ++prclyrno) { if (pi->lyrno >= *prclyrno) { *prclyrno = pi->lyrno; ++(*prclyrno); return 0; } skip: ; } } } } return 1; } static int jpc_pi_nextrpcl(register jpc_pi_t *pi) { int rlvlno; jpc_pirlvl_t *pirlvl; jpc_pchg_t *pchg; int prchind; int prcvind; int *prclyrno; int compno; jpc_picomp_t *picomp; int xstep; int ystep; uint_fast32_t r; uint_fast32_t rpx; uint_fast32_t rpy; uint_fast32_t trx0; uint_fast32_t try0; pchg = pi->pchg; if (!pi->prgvolfirst) { goto skip; } else { pi->xstep = 0; pi->ystep = 0; for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { // Check for the potential for overflow problems. if (pirlvl->prcwidthexpn + pi->picomp->numrlvls > JAS_UINTFAST32_NUMBITS - 2 || pirlvl->prcheightexpn + pi->picomp->numrlvls > JAS_UINTFAST32_NUMBITS - 2) { return -1; } xstep = picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcwidthexpn + picomp->numrlvls - rlvlno - 1)); ystep = picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcheightexpn + picomp->numrlvls - rlvlno - 1)); pi->xstep = (!pi->xstep) ? xstep : JAS_MIN(pi->xstep, xstep); pi->ystep = (!pi->ystep) ? ystep : JAS_MIN(pi->ystep, ystep); } } pi->prgvolfirst = 0; } for (pi->rlvlno = pchg->rlvlnostart; pi->rlvlno < pchg->rlvlnoend && pi->rlvlno < pi->maxrlvls; ++pi->rlvlno) { for (pi->y = pi->ystart; pi->y < pi->yend; pi->y += pi->ystep - (pi->y % pi->ystep)) { for (pi->x = pi->xstart; pi->x < pi->xend; pi->x += pi->xstep - (pi->x % pi->xstep)) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < JAS_CAST(int, pchg->compnoend) && pi->compno < pi->numcomps; ++pi->compno, ++pi->picomp) { if (pi->rlvlno >= pi->picomp->numrlvls) { continue; } pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; if (pi->pirlvl->numprcs == 0) { continue; } r = pi->picomp->numrlvls - 1 - pi->rlvlno; rpx = r + pi->pirlvl->prcwidthexpn; rpy = r + pi->pirlvl->prcheightexpn; trx0 = JPC_CEILDIV(pi->xstart, pi->picomp->hsamp << r); try0 = JPC_CEILDIV(pi->ystart, pi->picomp->vsamp << r); if (((pi->x == pi->xstart && ((trx0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpx))) || !(pi->x % (JAS_CAST(uint_fast32_t, 1) << rpx))) && ((pi->y == pi->ystart && ((try0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpy))) || !(pi->y % (JAS_CAST(uint_fast32_t, 1) << rpy)))) { prchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn); prcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn); pi->prcno = prcvind * pi->pirlvl->numhprcs + prchind; assert(pi->prcno < pi->pirlvl->numprcs); for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; if (pi->lyrno >= *prclyrno) { ++(*prclyrno); return 0; } skip: ; } } } } } } return 1; } static int jpc_pi_nextpcrl(register jpc_pi_t *pi) { int rlvlno; jpc_pirlvl_t *pirlvl; jpc_pchg_t *pchg; int prchind; int prcvind; int *prclyrno; int compno; jpc_picomp_t *picomp; int xstep; int ystep; uint_fast32_t trx0; uint_fast32_t try0; uint_fast32_t r; uint_fast32_t rpx; uint_fast32_t rpy; pchg = pi->pchg; if (!pi->prgvolfirst) { goto skip; } else { pi->xstep = 0; pi->ystep = 0; for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { // Check for the potential for overflow problems. if (pirlvl->prcwidthexpn + pi->picomp->numrlvls > JAS_UINTFAST32_NUMBITS - 2 || pirlvl->prcheightexpn + pi->picomp->numrlvls > JAS_UINTFAST32_NUMBITS - 2) { return -1; } xstep = picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcwidthexpn + picomp->numrlvls - rlvlno - 1)); ystep = picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcheightexpn + picomp->numrlvls - rlvlno - 1)); pi->xstep = (!pi->xstep) ? xstep : JAS_MIN(pi->xstep, xstep); pi->ystep = (!pi->ystep) ? ystep : JAS_MIN(pi->ystep, ystep); } } pi->prgvolfirst = 0; } for (pi->y = pi->ystart; pi->y < pi->yend; pi->y += pi->ystep - (pi->y % pi->ystep)) { for (pi->x = pi->xstart; pi->x < pi->xend; pi->x += pi->xstep - (pi->x % pi->xstep)) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < pi->numcomps && pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno, ++pi->picomp) { for (pi->rlvlno = pchg->rlvlnostart, pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; pi->rlvlno < pi->picomp->numrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno, ++pi->pirlvl) { if (pi->pirlvl->numprcs == 0) { continue; } r = pi->picomp->numrlvls - 1 - pi->rlvlno; trx0 = JPC_CEILDIV(pi->xstart, pi->picomp->hsamp << r); try0 = JPC_CEILDIV(pi->ystart, pi->picomp->vsamp << r); rpx = r + pi->pirlvl->prcwidthexpn; rpy = r + pi->pirlvl->prcheightexpn; if (((pi->x == pi->xstart && ((trx0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpx))) || !(pi->x % (pi->picomp->hsamp << rpx))) && ((pi->y == pi->ystart && ((try0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpy))) || !(pi->y % (pi->picomp->vsamp << rpy)))) { prchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn); prcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn); pi->prcno = prcvind * pi->pirlvl->numhprcs + prchind; assert(pi->prcno < pi->pirlvl->numprcs); for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; if (pi->lyrno >= *prclyrno) { ++(*prclyrno); return 0; } skip: ; } } } } } } return 1; } static int jpc_pi_nextcprl(register jpc_pi_t *pi) { int rlvlno; jpc_pirlvl_t *pirlvl; jpc_pchg_t *pchg; int prchind; int prcvind; int *prclyrno; uint_fast32_t trx0; uint_fast32_t try0; uint_fast32_t r; uint_fast32_t rpx; uint_fast32_t rpy; pchg = pi->pchg; if (!pi->prgvolfirst) { goto skip; } else { pi->prgvolfirst = 0; } for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < JAS_CAST(int, pchg->compnoend) && pi->compno < pi->numcomps; ++pi->compno, ++pi->picomp) { pirlvl = pi->picomp->pirlvls; // Check for the potential for overflow problems. if (pirlvl->prcwidthexpn + pi->picomp->numrlvls > JAS_UINTFAST32_NUMBITS - 2 || pirlvl->prcheightexpn + pi->picomp->numrlvls > JAS_UINTFAST32_NUMBITS - 2) { return -1; } pi->xstep = pi->picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcwidthexpn + pi->picomp->numrlvls - 1)); pi->ystep = pi->picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcheightexpn + pi->picomp->numrlvls - 1)); for (rlvlno = 1, pirlvl = &pi->picomp->pirlvls[1]; rlvlno < pi->picomp->numrlvls; ++rlvlno, ++pirlvl) { pi->xstep = JAS_MIN(pi->xstep, pi->picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcwidthexpn + pi->picomp->numrlvls - rlvlno - 1))); pi->ystep = JAS_MIN(pi->ystep, pi->picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcheightexpn + pi->picomp->numrlvls - rlvlno - 1))); } for (pi->y = pi->ystart; pi->y < pi->yend; pi->y += pi->ystep - (pi->y % pi->ystep)) { for (pi->x = pi->xstart; pi->x < pi->xend; pi->x += pi->xstep - (pi->x % pi->xstep)) { for (pi->rlvlno = pchg->rlvlnostart, pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; pi->rlvlno < pi->picomp->numrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno, ++pi->pirlvl) { if (pi->pirlvl->numprcs == 0) { continue; } r = pi->picomp->numrlvls - 1 - pi->rlvlno; trx0 = JPC_CEILDIV(pi->xstart, pi->picomp->hsamp << r); try0 = JPC_CEILDIV(pi->ystart, pi->picomp->vsamp << r); rpx = r + pi->pirlvl->prcwidthexpn; rpy = r + pi->pirlvl->prcheightexpn; if (((pi->x == pi->xstart && ((trx0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpx))) || !(pi->x % (pi->picomp->hsamp << rpx))) && ((pi->y == pi->ystart && ((try0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpy))) || !(pi->y % (pi->picomp->vsamp << rpy)))) { prchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn); prcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn); pi->prcno = prcvind * pi->pirlvl->numhprcs + prchind; assert(pi->prcno < pi->pirlvl->numprcs); for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; if (pi->lyrno >= *prclyrno) { ++(*prclyrno); return 0; } skip: ; } } } } } } return 1; } static void pirlvl_destroy(jpc_pirlvl_t *rlvl) { if (rlvl->prclyrnos) { jas_free(rlvl->prclyrnos); } } static void jpc_picomp_destroy(jpc_picomp_t *picomp) { int rlvlno; jpc_pirlvl_t *pirlvl; if (picomp->pirlvls) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { pirlvl_destroy(pirlvl); } jas_free(picomp->pirlvls); } } void jpc_pi_destroy(jpc_pi_t *pi) { jpc_picomp_t *picomp; int compno; if (pi->picomps) { for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { jpc_picomp_destroy(picomp); } jas_free(pi->picomps); } if (pi->pchglist) { jpc_pchglist_destroy(pi->pchglist); } jas_free(pi); } jpc_pi_t *jpc_pi_create0() { jpc_pi_t *pi; if (!(pi = jas_malloc(sizeof(jpc_pi_t)))) { return 0; } pi->picomps = 0; pi->pchgno = 0; if (!(pi->pchglist = jpc_pchglist_create())) { jas_free(pi); return 0; } return pi; } int jpc_pi_addpchg(jpc_pi_t *pi, jpc_pocpchg_t *pchg) { return jpc_pchglist_insert(pi->pchglist, -1, pchg); } jpc_pchglist_t *jpc_pchglist_create() { jpc_pchglist_t *pchglist; if (!(pchglist = jas_malloc(sizeof(jpc_pchglist_t)))) { return 0; } pchglist->numpchgs = 0; pchglist->maxpchgs = 0; pchglist->pchgs = 0; return pchglist; } int jpc_pchglist_insert(jpc_pchglist_t *pchglist, int pchgno, jpc_pchg_t *pchg) { int i; int newmaxpchgs; jpc_pchg_t **newpchgs; if (pchgno < 0) { pchgno = pchglist->numpchgs; } if (pchglist->numpchgs >= pchglist->maxpchgs) { newmaxpchgs = pchglist->maxpchgs + 128; if (!(newpchgs = jas_realloc2(pchglist->pchgs, newmaxpchgs, sizeof(jpc_pchg_t *)))) { return -1; } pchglist->maxpchgs = newmaxpchgs; pchglist->pchgs = newpchgs; } for (i = pchglist->numpchgs; i > pchgno; --i) { pchglist->pchgs[i] = pchglist->pchgs[i - 1]; } pchglist->pchgs[pchgno] = pchg; ++pchglist->numpchgs; return 0; } jpc_pchg_t *jpc_pchglist_remove(jpc_pchglist_t *pchglist, int pchgno) { int i; jpc_pchg_t *pchg; assert(pchgno < pchglist->numpchgs); pchg = pchglist->pchgs[pchgno]; for (i = pchgno + 1; i < pchglist->numpchgs; ++i) { pchglist->pchgs[i - 1] = pchglist->pchgs[i]; } --pchglist->numpchgs; return pchg; } jpc_pchg_t *jpc_pchg_copy(jpc_pchg_t *pchg) { jpc_pchg_t *newpchg; if (!(newpchg = jas_malloc(sizeof(jpc_pchg_t)))) { return 0; } *newpchg = *pchg; return newpchg; } jpc_pchglist_t *jpc_pchglist_copy(jpc_pchglist_t *pchglist) { jpc_pchglist_t *newpchglist; jpc_pchg_t *newpchg; int pchgno; if (!(newpchglist = jpc_pchglist_create())) { return 0; } for (pchgno = 0; pchgno < pchglist->numpchgs; ++pchgno) { if (!(newpchg = jpc_pchg_copy(pchglist->pchgs[pchgno])) || jpc_pchglist_insert(newpchglist, -1, newpchg)) { jpc_pchglist_destroy(newpchglist); return 0; } } return newpchglist; } void jpc_pchglist_destroy(jpc_pchglist_t *pchglist) { int pchgno; if (pchglist->pchgs) { for (pchgno = 0; pchgno < pchglist->numpchgs; ++pchgno) { jpc_pchg_destroy(pchglist->pchgs[pchgno]); } jas_free(pchglist->pchgs); } jas_free(pchglist); } void jpc_pchg_destroy(jpc_pchg_t *pchg) { jas_free(pchg); } jpc_pchg_t *jpc_pchglist_get(jpc_pchglist_t *pchglist, int pchgno) { return pchglist->pchgs[pchgno]; } int jpc_pchglist_numpchgs(jpc_pchglist_t *pchglist) { return pchglist->numpchgs; } int jpc_pi_init(jpc_pi_t *pi) { int compno; int rlvlno; int prcno; jpc_picomp_t *picomp; jpc_pirlvl_t *pirlvl; int *prclyrno; pi->prgvolfirst = 0; pi->valid = 0; pi->pktno = -1; pi->pchgno = -1; pi->pchg = 0; for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { for (prcno = 0, prclyrno = pirlvl->prclyrnos; prcno < pirlvl->numprcs; ++prcno, ++prclyrno) { *prclyrno = 0; } } } return 0; }
static int jpc_pi_nextrlcp(register jpc_pi_t *pi) { jpc_pchg_t *pchg; int *prclyrno; pchg = pi->pchg; if (!pi->prgvolfirst) { assert(pi->prcno < pi->pirlvl->numprcs); prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; goto skip; } else { pi->prgvolfirst = 0; } for (pi->rlvlno = pchg->rlvlnostart; pi->rlvlno < pi->maxrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno) { for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < pi->numcomps && pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno, ++pi->picomp) { if (pi->rlvlno >= pi->picomp->numrlvls) { continue; } pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; for (pi->prcno = 0, prclyrno = pi->pirlvl->prclyrnos; pi->prcno < pi->pirlvl->numprcs; ++pi->prcno, ++prclyrno) { if (pi->lyrno >= *prclyrno) { *prclyrno = pi->lyrno; ++(*prclyrno); return 0; } skip: ; } } } } return 1; }
static int jpc_pi_nextrlcp(register jpc_pi_t *pi) { jpc_pchg_t *pchg; int *prclyrno; pchg = pi->pchg; if (!pi->prgvolfirst) { assert(pi->prcno < pi->pirlvl->numprcs); prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; goto skip; } else { pi->prgvolfirst = 0; } for (pi->rlvlno = pchg->rlvlnostart; pi->rlvlno < pi->maxrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno) { for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < pi->numcomps && pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno, ++pi->picomp) { if (pi->rlvlno >= pi->picomp->numrlvls) { continue; } pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; for (pi->prcno = 0, prclyrno = pi->pirlvl->prclyrnos; pi->prcno < pi->pirlvl->numprcs; ++pi->prcno, ++prclyrno) { if (pi->lyrno >= *prclyrno) { *prclyrno = pi->lyrno; ++(*prclyrno); return 0; } skip: ; } } } } return 1; }
{'added': [(201, '\t\t\t pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno,'), (202, '\t\t\t ++pi->picomp) {'), (251, '\t\t\t\t// Check for the potential for overflow problems.'), (252, '\t\t\t\tif (pirlvl->prcwidthexpn + pi->picomp->numrlvls >'), (253, '\t\t\t\t JAS_UINTFAST32_NUMBITS - 2 ||'), (254, '\t\t\t\t pirlvl->prcheightexpn + pi->picomp->numrlvls >'), (255, '\t\t\t\t JAS_UINTFAST32_NUMBITS - 2) {'), (256, '\t\t\t\t\treturn -1;'), (257, '\t\t\t\t}'), (258, '\t\t\t\txstep = picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) <<'), (259, '\t\t\t\t (pirlvl->prcwidthexpn + picomp->numrlvls - rlvlno - 1));'), (260, '\t\t\t\tystep = picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) <<'), (261, '\t\t\t\t (pirlvl->prcheightexpn + picomp->numrlvls - rlvlno - 1));'), (291, '\t\t\t\t\tif (((pi->x == pi->xstart &&'), (292, '\t\t\t\t\t ((trx0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpx)))'), (293, '\t\t\t\t\t || !(pi->x % (JAS_CAST(uint_fast32_t, 1) << rpx))) &&'), (294, '\t\t\t\t\t ((pi->y == pi->ystart &&'), (295, '\t\t\t\t\t ((try0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpy)))'), (296, '\t\t\t\t\t || !(pi->y % (JAS_CAST(uint_fast32_t, 1) << rpy)))) {'), (297, '\t\t\t\t\t\tprchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x,'), (298, '\t\t\t\t\t\t pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) -'), (299, '\t\t\t\t\t\t JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn);'), (300, '\t\t\t\t\t\tprcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y,'), (301, '\t\t\t\t\t\t pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) -'), (302, '\t\t\t\t\t\t JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn);'), (307, '\t\t\t\t\t\t pi->numlyrs && pi->lyrno < JAS_CAST(int,'), (308, '\t\t\t\t\t\t pchg->lyrnoend); ++pi->lyrno) {'), (353, '\t\t\t\t// Check for the potential for overflow problems.'), (354, '\t\t\t\tif (pirlvl->prcwidthexpn + pi->picomp->numrlvls >'), (355, '\t\t\t\t JAS_UINTFAST32_NUMBITS - 2 ||'), (356, '\t\t\t\t pirlvl->prcheightexpn + pi->picomp->numrlvls >'), (357, '\t\t\t\t JAS_UINTFAST32_NUMBITS - 2) {'), (358, '\t\t\t\t\treturn -1;'), (359, '\t\t\t\t}'), (360, '\t\t\t\txstep = picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) <<'), (361, '\t\t\t\t (pirlvl->prcwidthexpn + picomp->numrlvls - rlvlno - 1));'), (362, '\t\t\t\tystep = picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) <<'), (363, '\t\t\t\t (pirlvl->prcheightexpn + picomp->numrlvls - rlvlno - 1));'), (364, '\t\t\t\tpi->xstep = (!pi->xstep) ? xstep : JAS_MIN(pi->xstep, xstep);'), (365, '\t\t\t\tpi->ystep = (!pi->ystep) ? ystep : JAS_MIN(pi->ystep, ystep);'), (392, '\t\t\t\t\tif (((pi->x == pi->xstart &&'), (393, '\t\t\t\t\t ((trx0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpx))) ||'), (395, '\t\t\t\t\t ((pi->y == pi->ystart &&'), (396, '\t\t\t\t\t ((try0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpy))) ||'), (398, '\t\t\t\t\t\tprchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x,'), (399, '\t\t\t\t\t\t pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) -'), (400, '\t\t\t\t\t\t JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn);'), (401, '\t\t\t\t\t\tprcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y,'), (402, '\t\t\t\t\t\t pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) -'), (403, '\t\t\t\t\t\t JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn);'), (407, '\t\t\t\t\t\t pi->lyrno < JAS_CAST(int, pchg->lyrnoend);'), (408, '\t\t\t\t\t\t ++pi->lyrno) {'), (446, '\tfor (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno];'), (447, '\t pi->compno < JAS_CAST(int, pchg->compnoend) && pi->compno < pi->numcomps;'), (448, '\t ++pi->compno, ++pi->picomp) {'), (450, '\t\t// Check for the potential for overflow problems.'), (451, '\t\tif (pirlvl->prcwidthexpn + pi->picomp->numrlvls >'), (452, '\t\t JAS_UINTFAST32_NUMBITS - 2 ||'), (453, '\t\t pirlvl->prcheightexpn + pi->picomp->numrlvls >'), (454, '\t\t JAS_UINTFAST32_NUMBITS - 2) {'), (455, '\t\t\treturn -1;'), (456, '\t\t}'), (486, '\t\t\t\t\tif (((pi->x == pi->xstart &&'), (487, '\t\t\t\t\t ((trx0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpx))) ||'), (489, '\t\t\t\t\t ((pi->y == pi->ystart &&'), (490, '\t\t\t\t\t ((try0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpy))) ||'), (492, '\t\t\t\t\t\tprchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x,'), (493, '\t\t\t\t\t\t pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) -'), (494, '\t\t\t\t\t\t JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn);'), (495, '\t\t\t\t\t\tprcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y,'), (496, '\t\t\t\t\t\t pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) -'), (497, '\t\t\t\t\t\t JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn);'), (498, '\t\t\t\t\t\tpi->prcno = prcvind * pi->pirlvl->numhprcs + prchind;'), (499, '\t\t\t\t\t\tassert(pi->prcno < pi->pirlvl->numprcs);'), (500, '\t\t\t\t\t\tfor (pi->lyrno = 0; pi->lyrno < pi->numlyrs &&'), (501, '\t\t\t\t\t\t pi->lyrno < JAS_CAST(int, pchg->lyrnoend);'), (502, '\t\t\t\t\t\t ++pi->lyrno) {')], 'deleted': [(201, '\t\t\t pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno, ++pi->picomp) {'), (250, '\t\t\t\txstep = picomp->hsamp * (1 << (pirlvl->prcwidthexpn +'), (251, '\t\t\t\t picomp->numrlvls - rlvlno - 1));'), (252, '\t\t\t\tystep = picomp->vsamp * (1 << (pirlvl->prcheightexpn +'), (253, '\t\t\t\t picomp->numrlvls - rlvlno - 1));'), (283, '\t\t\t\t\tif (((pi->x == pi->xstart && ((trx0 << r) % (1 << rpx)))'), (284, '\t\t\t\t\t || !(pi->x % (1 << rpx))) &&'), (285, '\t\t\t\t\t ((pi->y == pi->ystart && ((try0 << r) % (1 << rpy)))'), (286, '\t\t\t\t\t || !(pi->y % (1 << rpy)))) {'), (287, '\t\t\t\t\t\tprchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp'), (288, '\t\t\t\t\t\t << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0,'), (289, '\t\t\t\t\t\t pi->pirlvl->prcwidthexpn);'), (290, '\t\t\t\t\t\tprcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp'), (291, '\t\t\t\t\t\t << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0,'), (292, '\t\t\t\t\t\t pi->pirlvl->prcheightexpn);'), (297, '\t\t\t\t\t\t pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) {'), (342, '\t\t\t\txstep = picomp->hsamp * (1 <<'), (343, '\t\t\t\t (pirlvl->prcwidthexpn + picomp->numrlvls -'), (344, '\t\t\t\t rlvlno - 1));'), (345, '\t\t\t\tystep = picomp->vsamp * (1 <<'), (346, '\t\t\t\t (pirlvl->prcheightexpn + picomp->numrlvls -'), (347, '\t\t\t\t rlvlno - 1));'), (348, '\t\t\t\tpi->xstep = (!pi->xstep) ? xstep :'), (349, '\t\t\t\t JAS_MIN(pi->xstep, xstep);'), (350, '\t\t\t\tpi->ystep = (!pi->ystep) ? ystep :'), (351, '\t\t\t\t JAS_MIN(pi->ystep, ystep);'), (378, '\t\t\t\t\tif (((pi->x == pi->xstart && ((trx0 << r) % (1 << rpx))) ||'), (380, '\t\t\t\t\t ((pi->y == pi->ystart && ((try0 << r) % (1 << rpy))) ||'), (382, '\t\t\t\t\t\tprchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp'), (383, '\t\t\t\t\t\t << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0,'), (384, '\t\t\t\t\t\t pi->pirlvl->prcwidthexpn);'), (385, '\t\t\t\t\t\tprcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp'), (386, '\t\t\t\t\t\t << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0,'), (387, '\t\t\t\t\t\t pi->pirlvl->prcheightexpn);'), (391, '\t\t\t\t\t\t pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) {'), (429, '\tfor (pi->compno = pchg->compnostart, pi->picomp ='), (430, '\t &pi->picomps[pi->compno]; pi->compno < JAS_CAST(int, pchg->compnoend) && pi->compno < pi->numcomps; ++pi->compno,'), (431, '\t ++pi->picomp) {'), (462, '\t\t\t\t\tif (((pi->x == pi->xstart && ((trx0 << r) % (1 << rpx))) ||'), (464, '\t\t\t\t\t ((pi->y == pi->ystart && ((try0 << r) % (1 << rpy))) ||'), (466, '\t\t\t\t\t\tprchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp'), (467, '\t\t\t\t\t\t << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0,'), (468, '\t\t\t\t\t\t pi->pirlvl->prcwidthexpn);'), (469, '\t\t\t\t\t\tprcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp'), (470, '\t\t\t\t\t\t << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0,'), (471, '\t\t\t\t\t\t pi->pirlvl->prcheightexpn);'), (472, '\t\t\t\t\t\tpi->prcno = prcvind *'), (473, '\t\t\t\t\t\t pi->pirlvl->numhprcs +'), (474, '\t\t\t\t\t\t prchind;'), (475, '\t\t\t\t\t\tassert(pi->prcno <'), (476, '\t\t\t\t\t\t pi->pirlvl->numprcs);'), (477, '\t\t\t\t\t\tfor (pi->lyrno = 0; pi->lyrno <'), (478, '\t\t\t\t\t\t pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) {')]}
77
53
595
4,400
https://github.com/mdadams/jasper
CVE-2016-9583
['CWE-125']
jpc_t2cod.c
jpc_pi_nextrpcl
/* * Copyright (c) 1999-2000 Image Power, Inc. and the University of * British Columbia. * Copyright (c) 2001-2003 Michael David Adams. * All rights reserved. */ /* __START_OF_JASPER_LICENSE__ * * JasPer License Version 2.0 * * Copyright (c) 2001-2006 Michael David Adams * Copyright (c) 1999-2000 Image Power, Inc. * Copyright (c) 1999-2000 The University of British Columbia * * All rights reserved. * * Permission is hereby granted, free of charge, to any person (the * "User") obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, and/or sell copies of the Software, and to permit * persons to whom the Software is furnished to do so, subject to the * following conditions: * * 1. The above copyright notices and this permission notice (which * includes the disclaimer below) shall be included in all copies or * substantial portions of the Software. * * 2. The name of a copyright holder shall not be used to endorse or * promote products derived from the Software without specific prior * written permission. * * THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS * LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER * THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS * "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL * INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE * PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE * THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY. * EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS * BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL * PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS * GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE * ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE * IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL * SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES, * AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL * SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH * THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH, * PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH * RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY * EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES. * * __END_OF_JASPER_LICENSE__ */ /* * Tier-2 Coding Library * * $Id$ */ #include "jasper/jas_math.h" #include "jasper/jas_malloc.h" #include "jpc_cs.h" #include "jpc_t2cod.h" #include "jpc_math.h" static int jpc_pi_nextlrcp(jpc_pi_t *pi); static int jpc_pi_nextrlcp(jpc_pi_t *pi); static int jpc_pi_nextrpcl(jpc_pi_t *pi); static int jpc_pi_nextpcrl(jpc_pi_t *pi); static int jpc_pi_nextcprl(jpc_pi_t *pi); int jpc_pi_next(jpc_pi_t *pi) { jpc_pchg_t *pchg; int ret; for (;;) { pi->valid = false; if (!pi->pchg) { ++pi->pchgno; pi->compno = 0; pi->rlvlno = 0; pi->prcno = 0; pi->lyrno = 0; pi->prgvolfirst = true; if (pi->pchgno < jpc_pchglist_numpchgs(pi->pchglist)) { pi->pchg = jpc_pchglist_get(pi->pchglist, pi->pchgno); } else if (pi->pchgno == jpc_pchglist_numpchgs(pi->pchglist)) { pi->pchg = &pi->defaultpchg; } else { return 1; } } pchg = pi->pchg; switch (pchg->prgord) { case JPC_COD_LRCPPRG: ret = jpc_pi_nextlrcp(pi); break; case JPC_COD_RLCPPRG: ret = jpc_pi_nextrlcp(pi); break; case JPC_COD_RPCLPRG: ret = jpc_pi_nextrpcl(pi); break; case JPC_COD_PCRLPRG: ret = jpc_pi_nextpcrl(pi); break; case JPC_COD_CPRLPRG: ret = jpc_pi_nextcprl(pi); break; default: ret = -1; break; } if (!ret) { pi->valid = true; ++pi->pktno; return 0; } pi->pchg = 0; } } static int jpc_pi_nextlrcp(register jpc_pi_t *pi) { jpc_pchg_t *pchg; int *prclyrno; pchg = pi->pchg; if (!pi->prgvolfirst) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; goto skip; } else { pi->prgvolfirst = false; } for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { for (pi->rlvlno = pchg->rlvlnostart; pi->rlvlno < pi->maxrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < pi->numcomps && pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno, ++pi->picomp) { if (pi->rlvlno >= pi->picomp->numrlvls) { continue; } pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; for (pi->prcno = 0, prclyrno = pi->pirlvl->prclyrnos; pi->prcno < pi->pirlvl->numprcs; ++pi->prcno, ++prclyrno) { if (pi->lyrno >= *prclyrno) { *prclyrno = pi->lyrno; ++(*prclyrno); return 0; } skip: ; } } } } return 1; } static int jpc_pi_nextrlcp(register jpc_pi_t *pi) { jpc_pchg_t *pchg; int *prclyrno; pchg = pi->pchg; if (!pi->prgvolfirst) { assert(pi->prcno < pi->pirlvl->numprcs); prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; goto skip; } else { pi->prgvolfirst = 0; } for (pi->rlvlno = pchg->rlvlnostart; pi->rlvlno < pi->maxrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno) { for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < pi->numcomps && pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno, ++pi->picomp) { if (pi->rlvlno >= pi->picomp->numrlvls) { continue; } pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; for (pi->prcno = 0, prclyrno = pi->pirlvl->prclyrnos; pi->prcno < pi->pirlvl->numprcs; ++pi->prcno, ++prclyrno) { if (pi->lyrno >= *prclyrno) { *prclyrno = pi->lyrno; ++(*prclyrno); return 0; } skip: ; } } } } return 1; } static int jpc_pi_nextrpcl(register jpc_pi_t *pi) { int rlvlno; jpc_pirlvl_t *pirlvl; jpc_pchg_t *pchg; int prchind; int prcvind; int *prclyrno; int compno; jpc_picomp_t *picomp; int xstep; int ystep; uint_fast32_t r; uint_fast32_t rpx; uint_fast32_t rpy; uint_fast32_t trx0; uint_fast32_t try0; pchg = pi->pchg; if (!pi->prgvolfirst) { goto skip; } else { pi->xstep = 0; pi->ystep = 0; for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { xstep = picomp->hsamp * (1 << (pirlvl->prcwidthexpn + picomp->numrlvls - rlvlno - 1)); ystep = picomp->vsamp * (1 << (pirlvl->prcheightexpn + picomp->numrlvls - rlvlno - 1)); pi->xstep = (!pi->xstep) ? xstep : JAS_MIN(pi->xstep, xstep); pi->ystep = (!pi->ystep) ? ystep : JAS_MIN(pi->ystep, ystep); } } pi->prgvolfirst = 0; } for (pi->rlvlno = pchg->rlvlnostart; pi->rlvlno < pchg->rlvlnoend && pi->rlvlno < pi->maxrlvls; ++pi->rlvlno) { for (pi->y = pi->ystart; pi->y < pi->yend; pi->y += pi->ystep - (pi->y % pi->ystep)) { for (pi->x = pi->xstart; pi->x < pi->xend; pi->x += pi->xstep - (pi->x % pi->xstep)) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < JAS_CAST(int, pchg->compnoend) && pi->compno < pi->numcomps; ++pi->compno, ++pi->picomp) { if (pi->rlvlno >= pi->picomp->numrlvls) { continue; } pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; if (pi->pirlvl->numprcs == 0) { continue; } r = pi->picomp->numrlvls - 1 - pi->rlvlno; rpx = r + pi->pirlvl->prcwidthexpn; rpy = r + pi->pirlvl->prcheightexpn; trx0 = JPC_CEILDIV(pi->xstart, pi->picomp->hsamp << r); try0 = JPC_CEILDIV(pi->ystart, pi->picomp->vsamp << r); if (((pi->x == pi->xstart && ((trx0 << r) % (1 << rpx))) || !(pi->x % (1 << rpx))) && ((pi->y == pi->ystart && ((try0 << r) % (1 << rpy))) || !(pi->y % (1 << rpy)))) { prchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn); prcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn); pi->prcno = prcvind * pi->pirlvl->numhprcs + prchind; assert(pi->prcno < pi->pirlvl->numprcs); for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; if (pi->lyrno >= *prclyrno) { ++(*prclyrno); return 0; } skip: ; } } } } } } return 1; } static int jpc_pi_nextpcrl(register jpc_pi_t *pi) { int rlvlno; jpc_pirlvl_t *pirlvl; jpc_pchg_t *pchg; int prchind; int prcvind; int *prclyrno; int compno; jpc_picomp_t *picomp; int xstep; int ystep; uint_fast32_t trx0; uint_fast32_t try0; uint_fast32_t r; uint_fast32_t rpx; uint_fast32_t rpy; pchg = pi->pchg; if (!pi->prgvolfirst) { goto skip; } else { pi->xstep = 0; pi->ystep = 0; for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { xstep = picomp->hsamp * (1 << (pirlvl->prcwidthexpn + picomp->numrlvls - rlvlno - 1)); ystep = picomp->vsamp * (1 << (pirlvl->prcheightexpn + picomp->numrlvls - rlvlno - 1)); pi->xstep = (!pi->xstep) ? xstep : JAS_MIN(pi->xstep, xstep); pi->ystep = (!pi->ystep) ? ystep : JAS_MIN(pi->ystep, ystep); } } pi->prgvolfirst = 0; } for (pi->y = pi->ystart; pi->y < pi->yend; pi->y += pi->ystep - (pi->y % pi->ystep)) { for (pi->x = pi->xstart; pi->x < pi->xend; pi->x += pi->xstep - (pi->x % pi->xstep)) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < pi->numcomps && pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno, ++pi->picomp) { for (pi->rlvlno = pchg->rlvlnostart, pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; pi->rlvlno < pi->picomp->numrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno, ++pi->pirlvl) { if (pi->pirlvl->numprcs == 0) { continue; } r = pi->picomp->numrlvls - 1 - pi->rlvlno; trx0 = JPC_CEILDIV(pi->xstart, pi->picomp->hsamp << r); try0 = JPC_CEILDIV(pi->ystart, pi->picomp->vsamp << r); rpx = r + pi->pirlvl->prcwidthexpn; rpy = r + pi->pirlvl->prcheightexpn; if (((pi->x == pi->xstart && ((trx0 << r) % (1 << rpx))) || !(pi->x % (pi->picomp->hsamp << rpx))) && ((pi->y == pi->ystart && ((try0 << r) % (1 << rpy))) || !(pi->y % (pi->picomp->vsamp << rpy)))) { prchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn); prcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn); pi->prcno = prcvind * pi->pirlvl->numhprcs + prchind; assert(pi->prcno < pi->pirlvl->numprcs); for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; if (pi->lyrno >= *prclyrno) { ++(*prclyrno); return 0; } skip: ; } } } } } } return 1; } static int jpc_pi_nextcprl(register jpc_pi_t *pi) { int rlvlno; jpc_pirlvl_t *pirlvl; jpc_pchg_t *pchg; int prchind; int prcvind; int *prclyrno; uint_fast32_t trx0; uint_fast32_t try0; uint_fast32_t r; uint_fast32_t rpx; uint_fast32_t rpy; pchg = pi->pchg; if (!pi->prgvolfirst) { goto skip; } else { pi->prgvolfirst = 0; } for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < JAS_CAST(int, pchg->compnoend) && pi->compno < pi->numcomps; ++pi->compno, ++pi->picomp) { pirlvl = pi->picomp->pirlvls; pi->xstep = pi->picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcwidthexpn + pi->picomp->numrlvls - 1)); pi->ystep = pi->picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcheightexpn + pi->picomp->numrlvls - 1)); for (rlvlno = 1, pirlvl = &pi->picomp->pirlvls[1]; rlvlno < pi->picomp->numrlvls; ++rlvlno, ++pirlvl) { pi->xstep = JAS_MIN(pi->xstep, pi->picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcwidthexpn + pi->picomp->numrlvls - rlvlno - 1))); pi->ystep = JAS_MIN(pi->ystep, pi->picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcheightexpn + pi->picomp->numrlvls - rlvlno - 1))); } for (pi->y = pi->ystart; pi->y < pi->yend; pi->y += pi->ystep - (pi->y % pi->ystep)) { for (pi->x = pi->xstart; pi->x < pi->xend; pi->x += pi->xstep - (pi->x % pi->xstep)) { for (pi->rlvlno = pchg->rlvlnostart, pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; pi->rlvlno < pi->picomp->numrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno, ++pi->pirlvl) { if (pi->pirlvl->numprcs == 0) { continue; } r = pi->picomp->numrlvls - 1 - pi->rlvlno; trx0 = JPC_CEILDIV(pi->xstart, pi->picomp->hsamp << r); try0 = JPC_CEILDIV(pi->ystart, pi->picomp->vsamp << r); rpx = r + pi->pirlvl->prcwidthexpn; rpy = r + pi->pirlvl->prcheightexpn; if (((pi->x == pi->xstart && ((trx0 << r) % (1 << rpx))) || !(pi->x % (pi->picomp->hsamp << rpx))) && ((pi->y == pi->ystart && ((try0 << r) % (1 << rpy))) || !(pi->y % (pi->picomp->vsamp << rpy)))) { prchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn); prcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn); pi->prcno = prcvind * pi->pirlvl->numhprcs + prchind; assert(pi->prcno < pi->pirlvl->numprcs); for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; if (pi->lyrno >= *prclyrno) { ++(*prclyrno); return 0; } skip: ; } } } } } } return 1; } static void pirlvl_destroy(jpc_pirlvl_t *rlvl) { if (rlvl->prclyrnos) { jas_free(rlvl->prclyrnos); } } static void jpc_picomp_destroy(jpc_picomp_t *picomp) { int rlvlno; jpc_pirlvl_t *pirlvl; if (picomp->pirlvls) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { pirlvl_destroy(pirlvl); } jas_free(picomp->pirlvls); } } void jpc_pi_destroy(jpc_pi_t *pi) { jpc_picomp_t *picomp; int compno; if (pi->picomps) { for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { jpc_picomp_destroy(picomp); } jas_free(pi->picomps); } if (pi->pchglist) { jpc_pchglist_destroy(pi->pchglist); } jas_free(pi); } jpc_pi_t *jpc_pi_create0() { jpc_pi_t *pi; if (!(pi = jas_malloc(sizeof(jpc_pi_t)))) { return 0; } pi->picomps = 0; pi->pchgno = 0; if (!(pi->pchglist = jpc_pchglist_create())) { jas_free(pi); return 0; } return pi; } int jpc_pi_addpchg(jpc_pi_t *pi, jpc_pocpchg_t *pchg) { return jpc_pchglist_insert(pi->pchglist, -1, pchg); } jpc_pchglist_t *jpc_pchglist_create() { jpc_pchglist_t *pchglist; if (!(pchglist = jas_malloc(sizeof(jpc_pchglist_t)))) { return 0; } pchglist->numpchgs = 0; pchglist->maxpchgs = 0; pchglist->pchgs = 0; return pchglist; } int jpc_pchglist_insert(jpc_pchglist_t *pchglist, int pchgno, jpc_pchg_t *pchg) { int i; int newmaxpchgs; jpc_pchg_t **newpchgs; if (pchgno < 0) { pchgno = pchglist->numpchgs; } if (pchglist->numpchgs >= pchglist->maxpchgs) { newmaxpchgs = pchglist->maxpchgs + 128; if (!(newpchgs = jas_realloc2(pchglist->pchgs, newmaxpchgs, sizeof(jpc_pchg_t *)))) { return -1; } pchglist->maxpchgs = newmaxpchgs; pchglist->pchgs = newpchgs; } for (i = pchglist->numpchgs; i > pchgno; --i) { pchglist->pchgs[i] = pchglist->pchgs[i - 1]; } pchglist->pchgs[pchgno] = pchg; ++pchglist->numpchgs; return 0; } jpc_pchg_t *jpc_pchglist_remove(jpc_pchglist_t *pchglist, int pchgno) { int i; jpc_pchg_t *pchg; assert(pchgno < pchglist->numpchgs); pchg = pchglist->pchgs[pchgno]; for (i = pchgno + 1; i < pchglist->numpchgs; ++i) { pchglist->pchgs[i - 1] = pchglist->pchgs[i]; } --pchglist->numpchgs; return pchg; } jpc_pchg_t *jpc_pchg_copy(jpc_pchg_t *pchg) { jpc_pchg_t *newpchg; if (!(newpchg = jas_malloc(sizeof(jpc_pchg_t)))) { return 0; } *newpchg = *pchg; return newpchg; } jpc_pchglist_t *jpc_pchglist_copy(jpc_pchglist_t *pchglist) { jpc_pchglist_t *newpchglist; jpc_pchg_t *newpchg; int pchgno; if (!(newpchglist = jpc_pchglist_create())) { return 0; } for (pchgno = 0; pchgno < pchglist->numpchgs; ++pchgno) { if (!(newpchg = jpc_pchg_copy(pchglist->pchgs[pchgno])) || jpc_pchglist_insert(newpchglist, -1, newpchg)) { jpc_pchglist_destroy(newpchglist); return 0; } } return newpchglist; } void jpc_pchglist_destroy(jpc_pchglist_t *pchglist) { int pchgno; if (pchglist->pchgs) { for (pchgno = 0; pchgno < pchglist->numpchgs; ++pchgno) { jpc_pchg_destroy(pchglist->pchgs[pchgno]); } jas_free(pchglist->pchgs); } jas_free(pchglist); } void jpc_pchg_destroy(jpc_pchg_t *pchg) { jas_free(pchg); } jpc_pchg_t *jpc_pchglist_get(jpc_pchglist_t *pchglist, int pchgno) { return pchglist->pchgs[pchgno]; } int jpc_pchglist_numpchgs(jpc_pchglist_t *pchglist) { return pchglist->numpchgs; } int jpc_pi_init(jpc_pi_t *pi) { int compno; int rlvlno; int prcno; jpc_picomp_t *picomp; jpc_pirlvl_t *pirlvl; int *prclyrno; pi->prgvolfirst = 0; pi->valid = 0; pi->pktno = -1; pi->pchgno = -1; pi->pchg = 0; for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { for (prcno = 0, prclyrno = pirlvl->prclyrnos; prcno < pirlvl->numprcs; ++prcno, ++prclyrno) { *prclyrno = 0; } } } return 0; }
/* * Copyright (c) 1999-2000 Image Power, Inc. and the University of * British Columbia. * Copyright (c) 2001-2003 Michael David Adams. * All rights reserved. */ /* __START_OF_JASPER_LICENSE__ * * JasPer License Version 2.0 * * Copyright (c) 2001-2006 Michael David Adams * Copyright (c) 1999-2000 Image Power, Inc. * Copyright (c) 1999-2000 The University of British Columbia * * All rights reserved. * * Permission is hereby granted, free of charge, to any person (the * "User") obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, and/or sell copies of the Software, and to permit * persons to whom the Software is furnished to do so, subject to the * following conditions: * * 1. The above copyright notices and this permission notice (which * includes the disclaimer below) shall be included in all copies or * substantial portions of the Software. * * 2. The name of a copyright holder shall not be used to endorse or * promote products derived from the Software without specific prior * written permission. * * THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS * LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER * THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS * "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL * INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE * PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE * THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY. * EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS * BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL * PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS * GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE * ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE * IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL * SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES, * AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL * SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH * THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH, * PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH * RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY * EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES. * * __END_OF_JASPER_LICENSE__ */ /* * Tier-2 Coding Library * * $Id$ */ #include "jasper/jas_math.h" #include "jasper/jas_malloc.h" #include "jpc_cs.h" #include "jpc_t2cod.h" #include "jpc_math.h" static int jpc_pi_nextlrcp(jpc_pi_t *pi); static int jpc_pi_nextrlcp(jpc_pi_t *pi); static int jpc_pi_nextrpcl(jpc_pi_t *pi); static int jpc_pi_nextpcrl(jpc_pi_t *pi); static int jpc_pi_nextcprl(jpc_pi_t *pi); int jpc_pi_next(jpc_pi_t *pi) { jpc_pchg_t *pchg; int ret; for (;;) { pi->valid = false; if (!pi->pchg) { ++pi->pchgno; pi->compno = 0; pi->rlvlno = 0; pi->prcno = 0; pi->lyrno = 0; pi->prgvolfirst = true; if (pi->pchgno < jpc_pchglist_numpchgs(pi->pchglist)) { pi->pchg = jpc_pchglist_get(pi->pchglist, pi->pchgno); } else if (pi->pchgno == jpc_pchglist_numpchgs(pi->pchglist)) { pi->pchg = &pi->defaultpchg; } else { return 1; } } pchg = pi->pchg; switch (pchg->prgord) { case JPC_COD_LRCPPRG: ret = jpc_pi_nextlrcp(pi); break; case JPC_COD_RLCPPRG: ret = jpc_pi_nextrlcp(pi); break; case JPC_COD_RPCLPRG: ret = jpc_pi_nextrpcl(pi); break; case JPC_COD_PCRLPRG: ret = jpc_pi_nextpcrl(pi); break; case JPC_COD_CPRLPRG: ret = jpc_pi_nextcprl(pi); break; default: ret = -1; break; } if (!ret) { pi->valid = true; ++pi->pktno; return 0; } pi->pchg = 0; } } static int jpc_pi_nextlrcp(register jpc_pi_t *pi) { jpc_pchg_t *pchg; int *prclyrno; pchg = pi->pchg; if (!pi->prgvolfirst) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; goto skip; } else { pi->prgvolfirst = false; } for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { for (pi->rlvlno = pchg->rlvlnostart; pi->rlvlno < pi->maxrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < pi->numcomps && pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno, ++pi->picomp) { if (pi->rlvlno >= pi->picomp->numrlvls) { continue; } pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; for (pi->prcno = 0, prclyrno = pi->pirlvl->prclyrnos; pi->prcno < pi->pirlvl->numprcs; ++pi->prcno, ++prclyrno) { if (pi->lyrno >= *prclyrno) { *prclyrno = pi->lyrno; ++(*prclyrno); return 0; } skip: ; } } } } return 1; } static int jpc_pi_nextrlcp(register jpc_pi_t *pi) { jpc_pchg_t *pchg; int *prclyrno; pchg = pi->pchg; if (!pi->prgvolfirst) { assert(pi->prcno < pi->pirlvl->numprcs); prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; goto skip; } else { pi->prgvolfirst = 0; } for (pi->rlvlno = pchg->rlvlnostart; pi->rlvlno < pi->maxrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno) { for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < pi->numcomps && pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno, ++pi->picomp) { if (pi->rlvlno >= pi->picomp->numrlvls) { continue; } pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; for (pi->prcno = 0, prclyrno = pi->pirlvl->prclyrnos; pi->prcno < pi->pirlvl->numprcs; ++pi->prcno, ++prclyrno) { if (pi->lyrno >= *prclyrno) { *prclyrno = pi->lyrno; ++(*prclyrno); return 0; } skip: ; } } } } return 1; } static int jpc_pi_nextrpcl(register jpc_pi_t *pi) { int rlvlno; jpc_pirlvl_t *pirlvl; jpc_pchg_t *pchg; int prchind; int prcvind; int *prclyrno; int compno; jpc_picomp_t *picomp; int xstep; int ystep; uint_fast32_t r; uint_fast32_t rpx; uint_fast32_t rpy; uint_fast32_t trx0; uint_fast32_t try0; pchg = pi->pchg; if (!pi->prgvolfirst) { goto skip; } else { pi->xstep = 0; pi->ystep = 0; for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { // Check for the potential for overflow problems. if (pirlvl->prcwidthexpn + pi->picomp->numrlvls > JAS_UINTFAST32_NUMBITS - 2 || pirlvl->prcheightexpn + pi->picomp->numrlvls > JAS_UINTFAST32_NUMBITS - 2) { return -1; } xstep = picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcwidthexpn + picomp->numrlvls - rlvlno - 1)); ystep = picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcheightexpn + picomp->numrlvls - rlvlno - 1)); pi->xstep = (!pi->xstep) ? xstep : JAS_MIN(pi->xstep, xstep); pi->ystep = (!pi->ystep) ? ystep : JAS_MIN(pi->ystep, ystep); } } pi->prgvolfirst = 0; } for (pi->rlvlno = pchg->rlvlnostart; pi->rlvlno < pchg->rlvlnoend && pi->rlvlno < pi->maxrlvls; ++pi->rlvlno) { for (pi->y = pi->ystart; pi->y < pi->yend; pi->y += pi->ystep - (pi->y % pi->ystep)) { for (pi->x = pi->xstart; pi->x < pi->xend; pi->x += pi->xstep - (pi->x % pi->xstep)) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < JAS_CAST(int, pchg->compnoend) && pi->compno < pi->numcomps; ++pi->compno, ++pi->picomp) { if (pi->rlvlno >= pi->picomp->numrlvls) { continue; } pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; if (pi->pirlvl->numprcs == 0) { continue; } r = pi->picomp->numrlvls - 1 - pi->rlvlno; rpx = r + pi->pirlvl->prcwidthexpn; rpy = r + pi->pirlvl->prcheightexpn; trx0 = JPC_CEILDIV(pi->xstart, pi->picomp->hsamp << r); try0 = JPC_CEILDIV(pi->ystart, pi->picomp->vsamp << r); if (((pi->x == pi->xstart && ((trx0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpx))) || !(pi->x % (JAS_CAST(uint_fast32_t, 1) << rpx))) && ((pi->y == pi->ystart && ((try0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpy))) || !(pi->y % (JAS_CAST(uint_fast32_t, 1) << rpy)))) { prchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn); prcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn); pi->prcno = prcvind * pi->pirlvl->numhprcs + prchind; assert(pi->prcno < pi->pirlvl->numprcs); for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; if (pi->lyrno >= *prclyrno) { ++(*prclyrno); return 0; } skip: ; } } } } } } return 1; } static int jpc_pi_nextpcrl(register jpc_pi_t *pi) { int rlvlno; jpc_pirlvl_t *pirlvl; jpc_pchg_t *pchg; int prchind; int prcvind; int *prclyrno; int compno; jpc_picomp_t *picomp; int xstep; int ystep; uint_fast32_t trx0; uint_fast32_t try0; uint_fast32_t r; uint_fast32_t rpx; uint_fast32_t rpy; pchg = pi->pchg; if (!pi->prgvolfirst) { goto skip; } else { pi->xstep = 0; pi->ystep = 0; for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { // Check for the potential for overflow problems. if (pirlvl->prcwidthexpn + pi->picomp->numrlvls > JAS_UINTFAST32_NUMBITS - 2 || pirlvl->prcheightexpn + pi->picomp->numrlvls > JAS_UINTFAST32_NUMBITS - 2) { return -1; } xstep = picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcwidthexpn + picomp->numrlvls - rlvlno - 1)); ystep = picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcheightexpn + picomp->numrlvls - rlvlno - 1)); pi->xstep = (!pi->xstep) ? xstep : JAS_MIN(pi->xstep, xstep); pi->ystep = (!pi->ystep) ? ystep : JAS_MIN(pi->ystep, ystep); } } pi->prgvolfirst = 0; } for (pi->y = pi->ystart; pi->y < pi->yend; pi->y += pi->ystep - (pi->y % pi->ystep)) { for (pi->x = pi->xstart; pi->x < pi->xend; pi->x += pi->xstep - (pi->x % pi->xstep)) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < pi->numcomps && pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno, ++pi->picomp) { for (pi->rlvlno = pchg->rlvlnostart, pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; pi->rlvlno < pi->picomp->numrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno, ++pi->pirlvl) { if (pi->pirlvl->numprcs == 0) { continue; } r = pi->picomp->numrlvls - 1 - pi->rlvlno; trx0 = JPC_CEILDIV(pi->xstart, pi->picomp->hsamp << r); try0 = JPC_CEILDIV(pi->ystart, pi->picomp->vsamp << r); rpx = r + pi->pirlvl->prcwidthexpn; rpy = r + pi->pirlvl->prcheightexpn; if (((pi->x == pi->xstart && ((trx0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpx))) || !(pi->x % (pi->picomp->hsamp << rpx))) && ((pi->y == pi->ystart && ((try0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpy))) || !(pi->y % (pi->picomp->vsamp << rpy)))) { prchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn); prcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn); pi->prcno = prcvind * pi->pirlvl->numhprcs + prchind; assert(pi->prcno < pi->pirlvl->numprcs); for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; if (pi->lyrno >= *prclyrno) { ++(*prclyrno); return 0; } skip: ; } } } } } } return 1; } static int jpc_pi_nextcprl(register jpc_pi_t *pi) { int rlvlno; jpc_pirlvl_t *pirlvl; jpc_pchg_t *pchg; int prchind; int prcvind; int *prclyrno; uint_fast32_t trx0; uint_fast32_t try0; uint_fast32_t r; uint_fast32_t rpx; uint_fast32_t rpy; pchg = pi->pchg; if (!pi->prgvolfirst) { goto skip; } else { pi->prgvolfirst = 0; } for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < JAS_CAST(int, pchg->compnoend) && pi->compno < pi->numcomps; ++pi->compno, ++pi->picomp) { pirlvl = pi->picomp->pirlvls; // Check for the potential for overflow problems. if (pirlvl->prcwidthexpn + pi->picomp->numrlvls > JAS_UINTFAST32_NUMBITS - 2 || pirlvl->prcheightexpn + pi->picomp->numrlvls > JAS_UINTFAST32_NUMBITS - 2) { return -1; } pi->xstep = pi->picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcwidthexpn + pi->picomp->numrlvls - 1)); pi->ystep = pi->picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcheightexpn + pi->picomp->numrlvls - 1)); for (rlvlno = 1, pirlvl = &pi->picomp->pirlvls[1]; rlvlno < pi->picomp->numrlvls; ++rlvlno, ++pirlvl) { pi->xstep = JAS_MIN(pi->xstep, pi->picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcwidthexpn + pi->picomp->numrlvls - rlvlno - 1))); pi->ystep = JAS_MIN(pi->ystep, pi->picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcheightexpn + pi->picomp->numrlvls - rlvlno - 1))); } for (pi->y = pi->ystart; pi->y < pi->yend; pi->y += pi->ystep - (pi->y % pi->ystep)) { for (pi->x = pi->xstart; pi->x < pi->xend; pi->x += pi->xstep - (pi->x % pi->xstep)) { for (pi->rlvlno = pchg->rlvlnostart, pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; pi->rlvlno < pi->picomp->numrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno, ++pi->pirlvl) { if (pi->pirlvl->numprcs == 0) { continue; } r = pi->picomp->numrlvls - 1 - pi->rlvlno; trx0 = JPC_CEILDIV(pi->xstart, pi->picomp->hsamp << r); try0 = JPC_CEILDIV(pi->ystart, pi->picomp->vsamp << r); rpx = r + pi->pirlvl->prcwidthexpn; rpy = r + pi->pirlvl->prcheightexpn; if (((pi->x == pi->xstart && ((trx0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpx))) || !(pi->x % (pi->picomp->hsamp << rpx))) && ((pi->y == pi->ystart && ((try0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpy))) || !(pi->y % (pi->picomp->vsamp << rpy)))) { prchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn); prcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn); pi->prcno = prcvind * pi->pirlvl->numhprcs + prchind; assert(pi->prcno < pi->pirlvl->numprcs); for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; if (pi->lyrno >= *prclyrno) { ++(*prclyrno); return 0; } skip: ; } } } } } } return 1; } static void pirlvl_destroy(jpc_pirlvl_t *rlvl) { if (rlvl->prclyrnos) { jas_free(rlvl->prclyrnos); } } static void jpc_picomp_destroy(jpc_picomp_t *picomp) { int rlvlno; jpc_pirlvl_t *pirlvl; if (picomp->pirlvls) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { pirlvl_destroy(pirlvl); } jas_free(picomp->pirlvls); } } void jpc_pi_destroy(jpc_pi_t *pi) { jpc_picomp_t *picomp; int compno; if (pi->picomps) { for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { jpc_picomp_destroy(picomp); } jas_free(pi->picomps); } if (pi->pchglist) { jpc_pchglist_destroy(pi->pchglist); } jas_free(pi); } jpc_pi_t *jpc_pi_create0() { jpc_pi_t *pi; if (!(pi = jas_malloc(sizeof(jpc_pi_t)))) { return 0; } pi->picomps = 0; pi->pchgno = 0; if (!(pi->pchglist = jpc_pchglist_create())) { jas_free(pi); return 0; } return pi; } int jpc_pi_addpchg(jpc_pi_t *pi, jpc_pocpchg_t *pchg) { return jpc_pchglist_insert(pi->pchglist, -1, pchg); } jpc_pchglist_t *jpc_pchglist_create() { jpc_pchglist_t *pchglist; if (!(pchglist = jas_malloc(sizeof(jpc_pchglist_t)))) { return 0; } pchglist->numpchgs = 0; pchglist->maxpchgs = 0; pchglist->pchgs = 0; return pchglist; } int jpc_pchglist_insert(jpc_pchglist_t *pchglist, int pchgno, jpc_pchg_t *pchg) { int i; int newmaxpchgs; jpc_pchg_t **newpchgs; if (pchgno < 0) { pchgno = pchglist->numpchgs; } if (pchglist->numpchgs >= pchglist->maxpchgs) { newmaxpchgs = pchglist->maxpchgs + 128; if (!(newpchgs = jas_realloc2(pchglist->pchgs, newmaxpchgs, sizeof(jpc_pchg_t *)))) { return -1; } pchglist->maxpchgs = newmaxpchgs; pchglist->pchgs = newpchgs; } for (i = pchglist->numpchgs; i > pchgno; --i) { pchglist->pchgs[i] = pchglist->pchgs[i - 1]; } pchglist->pchgs[pchgno] = pchg; ++pchglist->numpchgs; return 0; } jpc_pchg_t *jpc_pchglist_remove(jpc_pchglist_t *pchglist, int pchgno) { int i; jpc_pchg_t *pchg; assert(pchgno < pchglist->numpchgs); pchg = pchglist->pchgs[pchgno]; for (i = pchgno + 1; i < pchglist->numpchgs; ++i) { pchglist->pchgs[i - 1] = pchglist->pchgs[i]; } --pchglist->numpchgs; return pchg; } jpc_pchg_t *jpc_pchg_copy(jpc_pchg_t *pchg) { jpc_pchg_t *newpchg; if (!(newpchg = jas_malloc(sizeof(jpc_pchg_t)))) { return 0; } *newpchg = *pchg; return newpchg; } jpc_pchglist_t *jpc_pchglist_copy(jpc_pchglist_t *pchglist) { jpc_pchglist_t *newpchglist; jpc_pchg_t *newpchg; int pchgno; if (!(newpchglist = jpc_pchglist_create())) { return 0; } for (pchgno = 0; pchgno < pchglist->numpchgs; ++pchgno) { if (!(newpchg = jpc_pchg_copy(pchglist->pchgs[pchgno])) || jpc_pchglist_insert(newpchglist, -1, newpchg)) { jpc_pchglist_destroy(newpchglist); return 0; } } return newpchglist; } void jpc_pchglist_destroy(jpc_pchglist_t *pchglist) { int pchgno; if (pchglist->pchgs) { for (pchgno = 0; pchgno < pchglist->numpchgs; ++pchgno) { jpc_pchg_destroy(pchglist->pchgs[pchgno]); } jas_free(pchglist->pchgs); } jas_free(pchglist); } void jpc_pchg_destroy(jpc_pchg_t *pchg) { jas_free(pchg); } jpc_pchg_t *jpc_pchglist_get(jpc_pchglist_t *pchglist, int pchgno) { return pchglist->pchgs[pchgno]; } int jpc_pchglist_numpchgs(jpc_pchglist_t *pchglist) { return pchglist->numpchgs; } int jpc_pi_init(jpc_pi_t *pi) { int compno; int rlvlno; int prcno; jpc_picomp_t *picomp; jpc_pirlvl_t *pirlvl; int *prclyrno; pi->prgvolfirst = 0; pi->valid = 0; pi->pktno = -1; pi->pchgno = -1; pi->pchg = 0; for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { for (prcno = 0, prclyrno = pirlvl->prclyrnos; prcno < pirlvl->numprcs; ++prcno, ++prclyrno) { *prclyrno = 0; } } } return 0; }
static int jpc_pi_nextrpcl(register jpc_pi_t *pi) { int rlvlno; jpc_pirlvl_t *pirlvl; jpc_pchg_t *pchg; int prchind; int prcvind; int *prclyrno; int compno; jpc_picomp_t *picomp; int xstep; int ystep; uint_fast32_t r; uint_fast32_t rpx; uint_fast32_t rpy; uint_fast32_t trx0; uint_fast32_t try0; pchg = pi->pchg; if (!pi->prgvolfirst) { goto skip; } else { pi->xstep = 0; pi->ystep = 0; for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { xstep = picomp->hsamp * (1 << (pirlvl->prcwidthexpn + picomp->numrlvls - rlvlno - 1)); ystep = picomp->vsamp * (1 << (pirlvl->prcheightexpn + picomp->numrlvls - rlvlno - 1)); pi->xstep = (!pi->xstep) ? xstep : JAS_MIN(pi->xstep, xstep); pi->ystep = (!pi->ystep) ? ystep : JAS_MIN(pi->ystep, ystep); } } pi->prgvolfirst = 0; } for (pi->rlvlno = pchg->rlvlnostart; pi->rlvlno < pchg->rlvlnoend && pi->rlvlno < pi->maxrlvls; ++pi->rlvlno) { for (pi->y = pi->ystart; pi->y < pi->yend; pi->y += pi->ystep - (pi->y % pi->ystep)) { for (pi->x = pi->xstart; pi->x < pi->xend; pi->x += pi->xstep - (pi->x % pi->xstep)) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < JAS_CAST(int, pchg->compnoend) && pi->compno < pi->numcomps; ++pi->compno, ++pi->picomp) { if (pi->rlvlno >= pi->picomp->numrlvls) { continue; } pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; if (pi->pirlvl->numprcs == 0) { continue; } r = pi->picomp->numrlvls - 1 - pi->rlvlno; rpx = r + pi->pirlvl->prcwidthexpn; rpy = r + pi->pirlvl->prcheightexpn; trx0 = JPC_CEILDIV(pi->xstart, pi->picomp->hsamp << r); try0 = JPC_CEILDIV(pi->ystart, pi->picomp->vsamp << r); if (((pi->x == pi->xstart && ((trx0 << r) % (1 << rpx))) || !(pi->x % (1 << rpx))) && ((pi->y == pi->ystart && ((try0 << r) % (1 << rpy))) || !(pi->y % (1 << rpy)))) { prchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn); prcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn); pi->prcno = prcvind * pi->pirlvl->numhprcs + prchind; assert(pi->prcno < pi->pirlvl->numprcs); for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; if (pi->lyrno >= *prclyrno) { ++(*prclyrno); return 0; } skip: ; } } } } } } return 1; }
static int jpc_pi_nextrpcl(register jpc_pi_t *pi) { int rlvlno; jpc_pirlvl_t *pirlvl; jpc_pchg_t *pchg; int prchind; int prcvind; int *prclyrno; int compno; jpc_picomp_t *picomp; int xstep; int ystep; uint_fast32_t r; uint_fast32_t rpx; uint_fast32_t rpy; uint_fast32_t trx0; uint_fast32_t try0; pchg = pi->pchg; if (!pi->prgvolfirst) { goto skip; } else { pi->xstep = 0; pi->ystep = 0; for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { // Check for the potential for overflow problems. if (pirlvl->prcwidthexpn + pi->picomp->numrlvls > JAS_UINTFAST32_NUMBITS - 2 || pirlvl->prcheightexpn + pi->picomp->numrlvls > JAS_UINTFAST32_NUMBITS - 2) { return -1; } xstep = picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcwidthexpn + picomp->numrlvls - rlvlno - 1)); ystep = picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcheightexpn + picomp->numrlvls - rlvlno - 1)); pi->xstep = (!pi->xstep) ? xstep : JAS_MIN(pi->xstep, xstep); pi->ystep = (!pi->ystep) ? ystep : JAS_MIN(pi->ystep, ystep); } } pi->prgvolfirst = 0; } for (pi->rlvlno = pchg->rlvlnostart; pi->rlvlno < pchg->rlvlnoend && pi->rlvlno < pi->maxrlvls; ++pi->rlvlno) { for (pi->y = pi->ystart; pi->y < pi->yend; pi->y += pi->ystep - (pi->y % pi->ystep)) { for (pi->x = pi->xstart; pi->x < pi->xend; pi->x += pi->xstep - (pi->x % pi->xstep)) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < JAS_CAST(int, pchg->compnoend) && pi->compno < pi->numcomps; ++pi->compno, ++pi->picomp) { if (pi->rlvlno >= pi->picomp->numrlvls) { continue; } pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; if (pi->pirlvl->numprcs == 0) { continue; } r = pi->picomp->numrlvls - 1 - pi->rlvlno; rpx = r + pi->pirlvl->prcwidthexpn; rpy = r + pi->pirlvl->prcheightexpn; trx0 = JPC_CEILDIV(pi->xstart, pi->picomp->hsamp << r); try0 = JPC_CEILDIV(pi->ystart, pi->picomp->vsamp << r); if (((pi->x == pi->xstart && ((trx0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpx))) || !(pi->x % (JAS_CAST(uint_fast32_t, 1) << rpx))) && ((pi->y == pi->ystart && ((try0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpy))) || !(pi->y % (JAS_CAST(uint_fast32_t, 1) << rpy)))) { prchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn); prcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn); pi->prcno = prcvind * pi->pirlvl->numhprcs + prchind; assert(pi->prcno < pi->pirlvl->numprcs); for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; if (pi->lyrno >= *prclyrno) { ++(*prclyrno); return 0; } skip: ; } } } } } } return 1; }
{'added': [(201, '\t\t\t pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno,'), (202, '\t\t\t ++pi->picomp) {'), (251, '\t\t\t\t// Check for the potential for overflow problems.'), (252, '\t\t\t\tif (pirlvl->prcwidthexpn + pi->picomp->numrlvls >'), (253, '\t\t\t\t JAS_UINTFAST32_NUMBITS - 2 ||'), (254, '\t\t\t\t pirlvl->prcheightexpn + pi->picomp->numrlvls >'), (255, '\t\t\t\t JAS_UINTFAST32_NUMBITS - 2) {'), (256, '\t\t\t\t\treturn -1;'), (257, '\t\t\t\t}'), (258, '\t\t\t\txstep = picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) <<'), (259, '\t\t\t\t (pirlvl->prcwidthexpn + picomp->numrlvls - rlvlno - 1));'), (260, '\t\t\t\tystep = picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) <<'), (261, '\t\t\t\t (pirlvl->prcheightexpn + picomp->numrlvls - rlvlno - 1));'), (291, '\t\t\t\t\tif (((pi->x == pi->xstart &&'), (292, '\t\t\t\t\t ((trx0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpx)))'), (293, '\t\t\t\t\t || !(pi->x % (JAS_CAST(uint_fast32_t, 1) << rpx))) &&'), (294, '\t\t\t\t\t ((pi->y == pi->ystart &&'), (295, '\t\t\t\t\t ((try0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpy)))'), (296, '\t\t\t\t\t || !(pi->y % (JAS_CAST(uint_fast32_t, 1) << rpy)))) {'), (297, '\t\t\t\t\t\tprchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x,'), (298, '\t\t\t\t\t\t pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) -'), (299, '\t\t\t\t\t\t JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn);'), (300, '\t\t\t\t\t\tprcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y,'), (301, '\t\t\t\t\t\t pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) -'), (302, '\t\t\t\t\t\t JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn);'), (307, '\t\t\t\t\t\t pi->numlyrs && pi->lyrno < JAS_CAST(int,'), (308, '\t\t\t\t\t\t pchg->lyrnoend); ++pi->lyrno) {'), (353, '\t\t\t\t// Check for the potential for overflow problems.'), (354, '\t\t\t\tif (pirlvl->prcwidthexpn + pi->picomp->numrlvls >'), (355, '\t\t\t\t JAS_UINTFAST32_NUMBITS - 2 ||'), (356, '\t\t\t\t pirlvl->prcheightexpn + pi->picomp->numrlvls >'), (357, '\t\t\t\t JAS_UINTFAST32_NUMBITS - 2) {'), (358, '\t\t\t\t\treturn -1;'), (359, '\t\t\t\t}'), (360, '\t\t\t\txstep = picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) <<'), (361, '\t\t\t\t (pirlvl->prcwidthexpn + picomp->numrlvls - rlvlno - 1));'), (362, '\t\t\t\tystep = picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) <<'), (363, '\t\t\t\t (pirlvl->prcheightexpn + picomp->numrlvls - rlvlno - 1));'), (364, '\t\t\t\tpi->xstep = (!pi->xstep) ? xstep : JAS_MIN(pi->xstep, xstep);'), (365, '\t\t\t\tpi->ystep = (!pi->ystep) ? ystep : JAS_MIN(pi->ystep, ystep);'), (392, '\t\t\t\t\tif (((pi->x == pi->xstart &&'), (393, '\t\t\t\t\t ((trx0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpx))) ||'), (395, '\t\t\t\t\t ((pi->y == pi->ystart &&'), (396, '\t\t\t\t\t ((try0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpy))) ||'), (398, '\t\t\t\t\t\tprchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x,'), (399, '\t\t\t\t\t\t pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) -'), (400, '\t\t\t\t\t\t JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn);'), (401, '\t\t\t\t\t\tprcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y,'), (402, '\t\t\t\t\t\t pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) -'), (403, '\t\t\t\t\t\t JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn);'), (407, '\t\t\t\t\t\t pi->lyrno < JAS_CAST(int, pchg->lyrnoend);'), (408, '\t\t\t\t\t\t ++pi->lyrno) {'), (446, '\tfor (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno];'), (447, '\t pi->compno < JAS_CAST(int, pchg->compnoend) && pi->compno < pi->numcomps;'), (448, '\t ++pi->compno, ++pi->picomp) {'), (450, '\t\t// Check for the potential for overflow problems.'), (451, '\t\tif (pirlvl->prcwidthexpn + pi->picomp->numrlvls >'), (452, '\t\t JAS_UINTFAST32_NUMBITS - 2 ||'), (453, '\t\t pirlvl->prcheightexpn + pi->picomp->numrlvls >'), (454, '\t\t JAS_UINTFAST32_NUMBITS - 2) {'), (455, '\t\t\treturn -1;'), (456, '\t\t}'), (486, '\t\t\t\t\tif (((pi->x == pi->xstart &&'), (487, '\t\t\t\t\t ((trx0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpx))) ||'), (489, '\t\t\t\t\t ((pi->y == pi->ystart &&'), (490, '\t\t\t\t\t ((try0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpy))) ||'), (492, '\t\t\t\t\t\tprchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x,'), (493, '\t\t\t\t\t\t pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) -'), (494, '\t\t\t\t\t\t JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn);'), (495, '\t\t\t\t\t\tprcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y,'), (496, '\t\t\t\t\t\t pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) -'), (497, '\t\t\t\t\t\t JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn);'), (498, '\t\t\t\t\t\tpi->prcno = prcvind * pi->pirlvl->numhprcs + prchind;'), (499, '\t\t\t\t\t\tassert(pi->prcno < pi->pirlvl->numprcs);'), (500, '\t\t\t\t\t\tfor (pi->lyrno = 0; pi->lyrno < pi->numlyrs &&'), (501, '\t\t\t\t\t\t pi->lyrno < JAS_CAST(int, pchg->lyrnoend);'), (502, '\t\t\t\t\t\t ++pi->lyrno) {')], 'deleted': [(201, '\t\t\t pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno, ++pi->picomp) {'), (250, '\t\t\t\txstep = picomp->hsamp * (1 << (pirlvl->prcwidthexpn +'), (251, '\t\t\t\t picomp->numrlvls - rlvlno - 1));'), (252, '\t\t\t\tystep = picomp->vsamp * (1 << (pirlvl->prcheightexpn +'), (253, '\t\t\t\t picomp->numrlvls - rlvlno - 1));'), (283, '\t\t\t\t\tif (((pi->x == pi->xstart && ((trx0 << r) % (1 << rpx)))'), (284, '\t\t\t\t\t || !(pi->x % (1 << rpx))) &&'), (285, '\t\t\t\t\t ((pi->y == pi->ystart && ((try0 << r) % (1 << rpy)))'), (286, '\t\t\t\t\t || !(pi->y % (1 << rpy)))) {'), (287, '\t\t\t\t\t\tprchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp'), (288, '\t\t\t\t\t\t << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0,'), (289, '\t\t\t\t\t\t pi->pirlvl->prcwidthexpn);'), (290, '\t\t\t\t\t\tprcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp'), (291, '\t\t\t\t\t\t << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0,'), (292, '\t\t\t\t\t\t pi->pirlvl->prcheightexpn);'), (297, '\t\t\t\t\t\t pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) {'), (342, '\t\t\t\txstep = picomp->hsamp * (1 <<'), (343, '\t\t\t\t (pirlvl->prcwidthexpn + picomp->numrlvls -'), (344, '\t\t\t\t rlvlno - 1));'), (345, '\t\t\t\tystep = picomp->vsamp * (1 <<'), (346, '\t\t\t\t (pirlvl->prcheightexpn + picomp->numrlvls -'), (347, '\t\t\t\t rlvlno - 1));'), (348, '\t\t\t\tpi->xstep = (!pi->xstep) ? xstep :'), (349, '\t\t\t\t JAS_MIN(pi->xstep, xstep);'), (350, '\t\t\t\tpi->ystep = (!pi->ystep) ? ystep :'), (351, '\t\t\t\t JAS_MIN(pi->ystep, ystep);'), (378, '\t\t\t\t\tif (((pi->x == pi->xstart && ((trx0 << r) % (1 << rpx))) ||'), (380, '\t\t\t\t\t ((pi->y == pi->ystart && ((try0 << r) % (1 << rpy))) ||'), (382, '\t\t\t\t\t\tprchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp'), (383, '\t\t\t\t\t\t << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0,'), (384, '\t\t\t\t\t\t pi->pirlvl->prcwidthexpn);'), (385, '\t\t\t\t\t\tprcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp'), (386, '\t\t\t\t\t\t << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0,'), (387, '\t\t\t\t\t\t pi->pirlvl->prcheightexpn);'), (391, '\t\t\t\t\t\t pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) {'), (429, '\tfor (pi->compno = pchg->compnostart, pi->picomp ='), (430, '\t &pi->picomps[pi->compno]; pi->compno < JAS_CAST(int, pchg->compnoend) && pi->compno < pi->numcomps; ++pi->compno,'), (431, '\t ++pi->picomp) {'), (462, '\t\t\t\t\tif (((pi->x == pi->xstart && ((trx0 << r) % (1 << rpx))) ||'), (464, '\t\t\t\t\t ((pi->y == pi->ystart && ((try0 << r) % (1 << rpy))) ||'), (466, '\t\t\t\t\t\tprchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp'), (467, '\t\t\t\t\t\t << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0,'), (468, '\t\t\t\t\t\t pi->pirlvl->prcwidthexpn);'), (469, '\t\t\t\t\t\tprcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp'), (470, '\t\t\t\t\t\t << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0,'), (471, '\t\t\t\t\t\t pi->pirlvl->prcheightexpn);'), (472, '\t\t\t\t\t\tpi->prcno = prcvind *'), (473, '\t\t\t\t\t\t pi->pirlvl->numhprcs +'), (474, '\t\t\t\t\t\t prchind;'), (475, '\t\t\t\t\t\tassert(pi->prcno <'), (476, '\t\t\t\t\t\t pi->pirlvl->numprcs);'), (477, '\t\t\t\t\t\tfor (pi->lyrno = 0; pi->lyrno <'), (478, '\t\t\t\t\t\t pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) {')]}
77
53
595
4,400
https://github.com/mdadams/jasper
CVE-2016-9583
['CWE-125']
pgtable.h
pmd_none_or_trans_huge_or_clear_bad
#ifndef _ASM_GENERIC_PGTABLE_H #define _ASM_GENERIC_PGTABLE_H #ifndef __ASSEMBLY__ #ifdef CONFIG_MMU #include <linux/mm_types.h> #include <linux/bug.h> #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, pte_t *ptep, pte_t entry, int dirty); #endif #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS extern int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, pmd_t entry, int dirty); #endif #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { pte_t pte = *ptep; int r = 1; if (!pte_young(pte)) r = 0; else set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte)); return r; } #endif #ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { pmd_t pmd = *pmdp; int r = 1; if (!pmd_young(pmd)) r = 0; else set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd)); return r; } #else /* CONFIG_TRANSPARENT_HUGEPAGE */ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { BUG(); return 0; } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long address, pte_t *ptep); #endif #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp); #endif #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long address, pte_t *ptep) { pte_t pte = *ptep; pte_clear(mm, address, ptep); return pte; } #endif #ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long address, pmd_t *pmdp) { pmd_t pmd = *pmdp; pmd_clear(mm, address, pmdp); return pmd; } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long address, pte_t *ptep, int full) { pte_t pte; pte = ptep_get_and_clear(mm, address, ptep); return pte; } #endif /* * Some architectures may be able to avoid expensive synchronization * primitives when modifications are made to PTE's which are already * not present, or in the process of an address space destruction. */ #ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL static inline void pte_clear_not_present_full(struct mm_struct *mm, unsigned long address, pte_t *ptep, int full) { pte_clear(mm, address, ptep); } #endif #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH extern pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, pte_t *ptep); #endif #ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp); #endif #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT struct mm_struct; static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) { pte_t old_pte = *ptep; set_pte_at(mm, address, ptep, pte_wrprotect(old_pte)); } #endif #ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long address, pmd_t *pmdp) { pmd_t old_pmd = *pmdp; set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd)); } #else /* CONFIG_TRANSPARENT_HUGEPAGE */ static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long address, pmd_t *pmdp) { BUG(); } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH extern void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp); #endif #ifndef __HAVE_ARCH_PTE_SAME static inline int pte_same(pte_t pte_a, pte_t pte_b) { return pte_val(pte_a) == pte_val(pte_b); } #endif #ifndef __HAVE_ARCH_PMD_SAME #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) { return pmd_val(pmd_a) == pmd_val(pmd_b); } #else /* CONFIG_TRANSPARENT_HUGEPAGE */ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) { BUG(); return 0; } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY #define page_test_and_clear_dirty(pfn, mapped) (0) #endif #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY #define pte_maybe_dirty(pte) pte_dirty(pte) #else #define pte_maybe_dirty(pte) (1) #endif #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG #define page_test_and_clear_young(pfn) (0) #endif #ifndef __HAVE_ARCH_PGD_OFFSET_GATE #define pgd_offset_gate(mm, addr) pgd_offset(mm, addr) #endif #ifndef __HAVE_ARCH_MOVE_PTE #define move_pte(pte, prot, old_addr, new_addr) (pte) #endif #ifndef flush_tlb_fix_spurious_fault #define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address) #endif #ifndef pgprot_noncached #define pgprot_noncached(prot) (prot) #endif #ifndef pgprot_writecombine #define pgprot_writecombine pgprot_noncached #endif /* * When walking page tables, get the address of the next boundary, * or the end address of the range if that comes earlier. Although no * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. */ #define pgd_addr_end(addr, end) \ ({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \ (__boundary - 1 < (end) - 1)? __boundary: (end); \ }) #ifndef pud_addr_end #define pud_addr_end(addr, end) \ ({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \ (__boundary - 1 < (end) - 1)? __boundary: (end); \ }) #endif #ifndef pmd_addr_end #define pmd_addr_end(addr, end) \ ({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ (__boundary - 1 < (end) - 1)? __boundary: (end); \ }) #endif /* * When walking page tables, we usually want to skip any p?d_none entries; * and any p?d_bad entries - reporting the error before resetting to none. * Do the tests inline, but report and clear the bad entry in mm/memory.c. */ void pgd_clear_bad(pgd_t *); void pud_clear_bad(pud_t *); void pmd_clear_bad(pmd_t *); static inline int pgd_none_or_clear_bad(pgd_t *pgd) { if (pgd_none(*pgd)) return 1; if (unlikely(pgd_bad(*pgd))) { pgd_clear_bad(pgd); return 1; } return 0; } static inline int pud_none_or_clear_bad(pud_t *pud) { if (pud_none(*pud)) return 1; if (unlikely(pud_bad(*pud))) { pud_clear_bad(pud); return 1; } return 0; } static inline int pmd_none_or_clear_bad(pmd_t *pmd) { if (pmd_none(*pmd)) return 1; if (unlikely(pmd_bad(*pmd))) { pmd_clear_bad(pmd); return 1; } return 0; } static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { /* * Get the current pte state, but zero it out to make it * non-present, preventing the hardware from asynchronously * updating it. */ return ptep_get_and_clear(mm, addr, ptep); } static inline void __ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { /* * The pte is non-present, so there's no hardware state to * preserve. */ set_pte_at(mm, addr, ptep, pte); } #ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION /* * Start a pte protection read-modify-write transaction, which * protects against asynchronous hardware modifications to the pte. * The intention is not to prevent the hardware from making pte * updates, but to prevent any updates it may make from being lost. * * This does not protect against other software modifications of the * pte; the appropriate pte lock must be held over the transation. * * Note that this interface is intended to be batchable, meaning that * ptep_modify_prot_commit may not actually update the pte, but merely * queue the update to be done at some later time. The update must be * actually committed before the pte lock is released, however. */ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { return __ptep_modify_prot_start(mm, addr, ptep); } /* * Commit an update to a pte, leaving any hardware-controlled bits in * the PTE unmodified. */ static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { __ptep_modify_prot_commit(mm, addr, ptep, pte); } #endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */ #endif /* CONFIG_MMU */ /* * A facility to provide lazy MMU batching. This allows PTE updates and * page invalidations to be delayed until a call to leave lazy MMU mode * is issued. Some architectures may benefit from doing this, and it is * beneficial for both shadow and direct mode hypervisors, which may batch * the PTE updates which happen during this window. Note that using this * interface requires that read hazards be removed from the code. A read * hazard could result in the direct mode hypervisor case, since the actual * write to the page tables may not yet have taken place, so reads though * a raw PTE pointer after it has been modified are not guaranteed to be * up to date. This mode can only be entered and left under the protection of * the page table locks for all page tables which may be modified. In the UP * case, this is required so that preemption is disabled, and in the SMP case, * it must synchronize the delayed page table writes properly on other CPUs. */ #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE #define arch_enter_lazy_mmu_mode() do {} while (0) #define arch_leave_lazy_mmu_mode() do {} while (0) #define arch_flush_lazy_mmu_mode() do {} while (0) #endif /* * A facility to provide batching of the reload of page tables and * other process state with the actual context switch code for * paravirtualized guests. By convention, only one of the batched * update (lazy) modes (CPU, MMU) should be active at any given time, * entry should never be nested, and entry and exits should always be * paired. This is for sanity of maintaining and reasoning about the * kernel code. In this case, the exit (end of the context switch) is * in architecture-specific code, and so doesn't need a generic * definition. */ #ifndef __HAVE_ARCH_START_CONTEXT_SWITCH #define arch_start_context_switch(prev) do {} while (0) #endif #ifndef __HAVE_PFNMAP_TRACKING /* * Interface that can be used by architecture code to keep track of * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn) * * track_pfn_vma_new is called when a _new_ pfn mapping is being established * for physical range indicated by pfn and size. */ static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, unsigned long pfn, unsigned long size) { return 0; } /* * Interface that can be used by architecture code to keep track of * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn) * * track_pfn_vma_copy is called when vma that is covering the pfnmap gets * copied through copy_page_range(). */ static inline int track_pfn_vma_copy(struct vm_area_struct *vma) { return 0; } /* * Interface that can be used by architecture code to keep track of * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn) * * untrack_pfn_vma is called while unmapping a pfnmap for a region. * untrack can be called for a specific region indicated by pfn and size or * can be for the entire vma (in which case size can be zero). */ static inline void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, unsigned long size) { } #else extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, unsigned long pfn, unsigned long size); extern int track_pfn_vma_copy(struct vm_area_struct *vma); extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, unsigned long size); #endif #ifdef CONFIG_MMU #ifndef CONFIG_TRANSPARENT_HUGEPAGE static inline int pmd_trans_huge(pmd_t pmd) { return 0; } static inline int pmd_trans_splitting(pmd_t pmd) { return 0; } #ifndef __HAVE_ARCH_PMD_WRITE static inline int pmd_write(pmd_t pmd) { BUG(); return 0; } #endif /* __HAVE_ARCH_PMD_WRITE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ /* * This function is meant to be used by sites walking pagetables with * the mmap_sem hold in read mode to protect against MADV_DONTNEED and * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd * into a null pmd and the transhuge page fault can convert a null pmd * into an hugepmd or into a regular pmd (if the hugepage allocation * fails). While holding the mmap_sem in read mode the pmd becomes * stable and stops changing under us only if it's not null and not a * transhuge pmd. When those races occurs and this function makes a * difference vs the standard pmd_none_or_clear_bad, the result is * undefined so behaving like if the pmd was none is safe (because it * can return none anyway). The compiler level barrier() is critically * important to compute the two checks atomically on the same pmdval. */ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) { /* depend on compiler for an atomic pmd read */ pmd_t pmdval = *pmd; /* * The barrier will stabilize the pmdval in a register or on * the stack so that it will stop changing under the code. */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE barrier(); #endif if (pmd_none(pmdval)) return 1; if (unlikely(pmd_bad(pmdval))) { if (!pmd_trans_huge(pmdval)) pmd_clear_bad(pmd); return 1; } return 0; } /* * This is a noop if Transparent Hugepage Support is not built into * the kernel. Otherwise it is equivalent to * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in * places that already verified the pmd is not none and they want to * walk ptes while holding the mmap sem in read mode (write mode don't * need this). If THP is not enabled, the pmd can't go away under the * code even if MADV_DONTNEED runs, but if THP is enabled we need to * run a pmd_trans_unstable before walking the ptes after * split_huge_page_pmd returns (because it may have run when the pmd * become null, but then a page fault can map in a THP and not a * regular page). */ static inline int pmd_trans_unstable(pmd_t *pmd) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE return pmd_none_or_trans_huge_or_clear_bad(pmd); #else return 0; #endif } #endif /* CONFIG_MMU */ #endif /* !__ASSEMBLY__ */ #endif /* _ASM_GENERIC_PGTABLE_H */
#ifndef _ASM_GENERIC_PGTABLE_H #define _ASM_GENERIC_PGTABLE_H #ifndef __ASSEMBLY__ #ifdef CONFIG_MMU #include <linux/mm_types.h> #include <linux/bug.h> #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, pte_t *ptep, pte_t entry, int dirty); #endif #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS extern int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, pmd_t entry, int dirty); #endif #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { pte_t pte = *ptep; int r = 1; if (!pte_young(pte)) r = 0; else set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte)); return r; } #endif #ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { pmd_t pmd = *pmdp; int r = 1; if (!pmd_young(pmd)) r = 0; else set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd)); return r; } #else /* CONFIG_TRANSPARENT_HUGEPAGE */ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { BUG(); return 0; } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long address, pte_t *ptep); #endif #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp); #endif #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long address, pte_t *ptep) { pte_t pte = *ptep; pte_clear(mm, address, ptep); return pte; } #endif #ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long address, pmd_t *pmdp) { pmd_t pmd = *pmdp; pmd_clear(mm, address, pmdp); return pmd; } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long address, pte_t *ptep, int full) { pte_t pte; pte = ptep_get_and_clear(mm, address, ptep); return pte; } #endif /* * Some architectures may be able to avoid expensive synchronization * primitives when modifications are made to PTE's which are already * not present, or in the process of an address space destruction. */ #ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL static inline void pte_clear_not_present_full(struct mm_struct *mm, unsigned long address, pte_t *ptep, int full) { pte_clear(mm, address, ptep); } #endif #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH extern pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, pte_t *ptep); #endif #ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp); #endif #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT struct mm_struct; static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) { pte_t old_pte = *ptep; set_pte_at(mm, address, ptep, pte_wrprotect(old_pte)); } #endif #ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long address, pmd_t *pmdp) { pmd_t old_pmd = *pmdp; set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd)); } #else /* CONFIG_TRANSPARENT_HUGEPAGE */ static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long address, pmd_t *pmdp) { BUG(); } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH extern void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp); #endif #ifndef __HAVE_ARCH_PTE_SAME static inline int pte_same(pte_t pte_a, pte_t pte_b) { return pte_val(pte_a) == pte_val(pte_b); } #endif #ifndef __HAVE_ARCH_PMD_SAME #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) { return pmd_val(pmd_a) == pmd_val(pmd_b); } #else /* CONFIG_TRANSPARENT_HUGEPAGE */ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) { BUG(); return 0; } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY #define page_test_and_clear_dirty(pfn, mapped) (0) #endif #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY #define pte_maybe_dirty(pte) pte_dirty(pte) #else #define pte_maybe_dirty(pte) (1) #endif #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG #define page_test_and_clear_young(pfn) (0) #endif #ifndef __HAVE_ARCH_PGD_OFFSET_GATE #define pgd_offset_gate(mm, addr) pgd_offset(mm, addr) #endif #ifndef __HAVE_ARCH_MOVE_PTE #define move_pte(pte, prot, old_addr, new_addr) (pte) #endif #ifndef flush_tlb_fix_spurious_fault #define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address) #endif #ifndef pgprot_noncached #define pgprot_noncached(prot) (prot) #endif #ifndef pgprot_writecombine #define pgprot_writecombine pgprot_noncached #endif /* * When walking page tables, get the address of the next boundary, * or the end address of the range if that comes earlier. Although no * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. */ #define pgd_addr_end(addr, end) \ ({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \ (__boundary - 1 < (end) - 1)? __boundary: (end); \ }) #ifndef pud_addr_end #define pud_addr_end(addr, end) \ ({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \ (__boundary - 1 < (end) - 1)? __boundary: (end); \ }) #endif #ifndef pmd_addr_end #define pmd_addr_end(addr, end) \ ({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ (__boundary - 1 < (end) - 1)? __boundary: (end); \ }) #endif /* * When walking page tables, we usually want to skip any p?d_none entries; * and any p?d_bad entries - reporting the error before resetting to none. * Do the tests inline, but report and clear the bad entry in mm/memory.c. */ void pgd_clear_bad(pgd_t *); void pud_clear_bad(pud_t *); void pmd_clear_bad(pmd_t *); static inline int pgd_none_or_clear_bad(pgd_t *pgd) { if (pgd_none(*pgd)) return 1; if (unlikely(pgd_bad(*pgd))) { pgd_clear_bad(pgd); return 1; } return 0; } static inline int pud_none_or_clear_bad(pud_t *pud) { if (pud_none(*pud)) return 1; if (unlikely(pud_bad(*pud))) { pud_clear_bad(pud); return 1; } return 0; } static inline int pmd_none_or_clear_bad(pmd_t *pmd) { if (pmd_none(*pmd)) return 1; if (unlikely(pmd_bad(*pmd))) { pmd_clear_bad(pmd); return 1; } return 0; } static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { /* * Get the current pte state, but zero it out to make it * non-present, preventing the hardware from asynchronously * updating it. */ return ptep_get_and_clear(mm, addr, ptep); } static inline void __ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { /* * The pte is non-present, so there's no hardware state to * preserve. */ set_pte_at(mm, addr, ptep, pte); } #ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION /* * Start a pte protection read-modify-write transaction, which * protects against asynchronous hardware modifications to the pte. * The intention is not to prevent the hardware from making pte * updates, but to prevent any updates it may make from being lost. * * This does not protect against other software modifications of the * pte; the appropriate pte lock must be held over the transation. * * Note that this interface is intended to be batchable, meaning that * ptep_modify_prot_commit may not actually update the pte, but merely * queue the update to be done at some later time. The update must be * actually committed before the pte lock is released, however. */ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { return __ptep_modify_prot_start(mm, addr, ptep); } /* * Commit an update to a pte, leaving any hardware-controlled bits in * the PTE unmodified. */ static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { __ptep_modify_prot_commit(mm, addr, ptep, pte); } #endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */ #endif /* CONFIG_MMU */ /* * A facility to provide lazy MMU batching. This allows PTE updates and * page invalidations to be delayed until a call to leave lazy MMU mode * is issued. Some architectures may benefit from doing this, and it is * beneficial for both shadow and direct mode hypervisors, which may batch * the PTE updates which happen during this window. Note that using this * interface requires that read hazards be removed from the code. A read * hazard could result in the direct mode hypervisor case, since the actual * write to the page tables may not yet have taken place, so reads though * a raw PTE pointer after it has been modified are not guaranteed to be * up to date. This mode can only be entered and left under the protection of * the page table locks for all page tables which may be modified. In the UP * case, this is required so that preemption is disabled, and in the SMP case, * it must synchronize the delayed page table writes properly on other CPUs. */ #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE #define arch_enter_lazy_mmu_mode() do {} while (0) #define arch_leave_lazy_mmu_mode() do {} while (0) #define arch_flush_lazy_mmu_mode() do {} while (0) #endif /* * A facility to provide batching of the reload of page tables and * other process state with the actual context switch code for * paravirtualized guests. By convention, only one of the batched * update (lazy) modes (CPU, MMU) should be active at any given time, * entry should never be nested, and entry and exits should always be * paired. This is for sanity of maintaining and reasoning about the * kernel code. In this case, the exit (end of the context switch) is * in architecture-specific code, and so doesn't need a generic * definition. */ #ifndef __HAVE_ARCH_START_CONTEXT_SWITCH #define arch_start_context_switch(prev) do {} while (0) #endif #ifndef __HAVE_PFNMAP_TRACKING /* * Interface that can be used by architecture code to keep track of * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn) * * track_pfn_vma_new is called when a _new_ pfn mapping is being established * for physical range indicated by pfn and size. */ static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, unsigned long pfn, unsigned long size) { return 0; } /* * Interface that can be used by architecture code to keep track of * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn) * * track_pfn_vma_copy is called when vma that is covering the pfnmap gets * copied through copy_page_range(). */ static inline int track_pfn_vma_copy(struct vm_area_struct *vma) { return 0; } /* * Interface that can be used by architecture code to keep track of * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn) * * untrack_pfn_vma is called while unmapping a pfnmap for a region. * untrack can be called for a specific region indicated by pfn and size or * can be for the entire vma (in which case size can be zero). */ static inline void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, unsigned long size) { } #else extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, unsigned long pfn, unsigned long size); extern int track_pfn_vma_copy(struct vm_area_struct *vma); extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, unsigned long size); #endif #ifdef CONFIG_MMU #ifndef CONFIG_TRANSPARENT_HUGEPAGE static inline int pmd_trans_huge(pmd_t pmd) { return 0; } static inline int pmd_trans_splitting(pmd_t pmd) { return 0; } #ifndef __HAVE_ARCH_PMD_WRITE static inline int pmd_write(pmd_t pmd) { BUG(); return 0; } #endif /* __HAVE_ARCH_PMD_WRITE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #ifndef pmd_read_atomic static inline pmd_t pmd_read_atomic(pmd_t *pmdp) { /* * Depend on compiler for an atomic pmd read. NOTE: this is * only going to work, if the pmdval_t isn't larger than * an unsigned long. */ return *pmdp; } #endif /* * This function is meant to be used by sites walking pagetables with * the mmap_sem hold in read mode to protect against MADV_DONTNEED and * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd * into a null pmd and the transhuge page fault can convert a null pmd * into an hugepmd or into a regular pmd (if the hugepage allocation * fails). While holding the mmap_sem in read mode the pmd becomes * stable and stops changing under us only if it's not null and not a * transhuge pmd. When those races occurs and this function makes a * difference vs the standard pmd_none_or_clear_bad, the result is * undefined so behaving like if the pmd was none is safe (because it * can return none anyway). The compiler level barrier() is critically * important to compute the two checks atomically on the same pmdval. * * For 32bit kernels with a 64bit large pmd_t this automatically takes * care of reading the pmd atomically to avoid SMP race conditions * against pmd_populate() when the mmap_sem is hold for reading by the * caller (a special atomic read not done by "gcc" as in the generic * version above, is also needed when THP is disabled because the page * fault can populate the pmd from under us). */ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) { pmd_t pmdval = pmd_read_atomic(pmd); /* * The barrier will stabilize the pmdval in a register or on * the stack so that it will stop changing under the code. */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE barrier(); #endif if (pmd_none(pmdval)) return 1; if (unlikely(pmd_bad(pmdval))) { if (!pmd_trans_huge(pmdval)) pmd_clear_bad(pmd); return 1; } return 0; } /* * This is a noop if Transparent Hugepage Support is not built into * the kernel. Otherwise it is equivalent to * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in * places that already verified the pmd is not none and they want to * walk ptes while holding the mmap sem in read mode (write mode don't * need this). If THP is not enabled, the pmd can't go away under the * code even if MADV_DONTNEED runs, but if THP is enabled we need to * run a pmd_trans_unstable before walking the ptes after * split_huge_page_pmd returns (because it may have run when the pmd * become null, but then a page fault can map in a THP and not a * regular page). */ static inline int pmd_trans_unstable(pmd_t *pmd) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE return pmd_none_or_trans_huge_or_clear_bad(pmd); #else return 0; #endif } #endif /* CONFIG_MMU */ #endif /* !__ASSEMBLY__ */ #endif /* _ASM_GENERIC_PGTABLE_H */
static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) { /* depend on compiler for an atomic pmd read */ pmd_t pmdval = *pmd; /* * The barrier will stabilize the pmdval in a register or on * the stack so that it will stop changing under the code. */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE barrier(); #endif if (pmd_none(pmdval)) return 1; if (unlikely(pmd_bad(pmdval))) { if (!pmd_trans_huge(pmdval)) pmd_clear_bad(pmd); return 1; } return 0; }
static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) { pmd_t pmdval = pmd_read_atomic(pmd); /* * The barrier will stabilize the pmdval in a register or on * the stack so that it will stop changing under the code. */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE barrier(); #endif if (pmd_none(pmdval)) return 1; if (unlikely(pmd_bad(pmdval))) { if (!pmd_trans_huge(pmdval)) pmd_clear_bad(pmd); return 1; } return 0; }
{'added': [(448, '#ifndef pmd_read_atomic'), (449, 'static inline pmd_t pmd_read_atomic(pmd_t *pmdp)'), (450, '{'), (451, '\t/*'), (452, '\t * Depend on compiler for an atomic pmd read. NOTE: this is'), (453, "\t * only going to work, if the pmdval_t isn't larger than"), (454, '\t * an unsigned long.'), (455, '\t */'), (456, '\treturn *pmdp;'), (457, '}'), (458, '#endif'), (459, ''), (473, ' *'), (474, ' * For 32bit kernels with a 64bit large pmd_t this automatically takes'), (475, ' * care of reading the pmd atomically to avoid SMP race conditions'), (476, ' * against pmd_populate() when the mmap_sem is hold for reading by the'), (477, ' * caller (a special atomic read not done by "gcc" as in the generic'), (478, ' * version above, is also needed when THP is disabled because the page'), (479, ' * fault can populate the pmd from under us).'), (483, '\tpmd_t pmdval = pmd_read_atomic(pmd);')], 'deleted': [(464, '\t/* depend on compiler for an atomic pmd read */'), (465, '\tpmd_t pmdval = *pmd;')]}
20
2
222
1,157
https://github.com/torvalds/linux
CVE-2012-2373
['CWE-362']
cmd_info.c
cmd_info
/* radare - LGPL - Copyright 2009-2017 - pancake */ #include <string.h> #include "r_bin.h" #include "r_config.h" #include "r_cons.h" #include "r_core.h" #define PAIR_WIDTH 9 // TODO: reuse implementation in core/bin.c static void pair(const char *a, const char *b) { char ws[16]; int al = strlen (a); if (!b) { return; } memset (ws, ' ', sizeof (ws)); al = PAIR_WIDTH - al; if (al < 0) { al = 0; } ws[al] = 0; r_cons_printf ("%s%s%s\n", a, ws, b); } static bool demangle_internal(RCore *core, const char *lang, const char *s) { char *res = NULL; int type = r_bin_demangle_type (lang); switch (type) { case R_BIN_NM_CXX: res = r_bin_demangle_cxx (core->bin->cur, s, 0); break; case R_BIN_NM_JAVA: res = r_bin_demangle_java (s); break; case R_BIN_NM_OBJC: res = r_bin_demangle_objc (NULL, s); break; case R_BIN_NM_SWIFT: res = r_bin_demangle_swift (s, core->bin->demanglercmd); break; case R_BIN_NM_DLANG: res = r_bin_demangle_plugin (core->bin, "dlang", s); break; default: r_bin_demangle_list (core->bin); return true; } if (res) { if (*res) { printf ("%s\n", res); } free (res); return false; } return true; } static int demangle(RCore *core, const char *s) { char *p, *q; const char *ss = strchr (s, ' '); if (!*s) { return 0; } if (!ss) { const char *lang = r_config_get (core->config, "bin.lang"); demangle_internal (core, lang, s); return 1; } p = strdup (s); q = p + (ss - s); *q = 0; demangle_internal (core, p, q + 1); free (p); return 1; } #define STR(x) (x)? (x): "" static void r_core_file_info(RCore *core, int mode) { const char *fn = NULL; int dbg = r_config_get_i (core->config, "cfg.debug"); bool io_cache = r_config_get_i (core->config, "io.cache"); RBinInfo *info = r_bin_get_info (core->bin); RBinFile *binfile = r_core_bin_cur (core); RCoreFile *cf = core->file; RBinPlugin *plugin = r_bin_file_cur_plugin (binfile); if (mode == R_CORE_BIN_JSON) { r_cons_printf ("{"); } if (mode == R_CORE_BIN_RADARE) { return; } if (mode == R_CORE_BIN_SIMPLE) { return; } if (info) { fn = info->file; if (mode == R_CORE_BIN_JSON) { r_cons_printf ("\"type\":\"%s\"", STR (info->type)); } } else { fn = (cf && cf->desc)? cf->desc->name: NULL; } if (cf && mode == R_CORE_BIN_JSON) { const char *uri = fn; if (!uri) { if (cf->desc && cf->desc->uri && *cf->desc->uri) { uri = cf->desc->uri; } else { uri = ""; } } { char *escapedFile = r_str_utf16_encode (uri, -1); r_cons_printf (",\"file\":\"%s\"", escapedFile); free (escapedFile); } if (dbg) { dbg = R_IO_WRITE | R_IO_EXEC; } if (cf->desc) { ut64 fsz = r_io_desc_size (core->io, cf->desc); r_cons_printf (",\"fd\":%d", cf->desc->fd); if (fsz != UT64_MAX) { r_cons_printf (",\"size\":%"PFMT64d, fsz); char *humansz = r_num_units (NULL, fsz); if (humansz) { r_cons_printf (",\"humansz\":\"%s\"", humansz); free (humansz); } } r_cons_printf (",\"iorw\":%s", r_str_bool ( io_cache ||\ cf->desc->flags & R_IO_WRITE )); r_cons_printf (",\"mode\":\"%s\"", r_str_rwx_i ( cf->desc->flags & 7 )); r_cons_printf (",\"obsz\":%"PFMT64d, (ut64) core->io->desc->obsz); if (cf->desc->referer && *cf->desc->referer) { r_cons_printf (",\"referer\":\"%s\"", cf->desc->referer); } } r_cons_printf (",\"block\":%d", core->blocksize); if (binfile) { if (binfile->curxtr) { r_cons_printf (",\"packet\":\"%s\"", binfile->curxtr->name); } if (plugin) { r_cons_printf (",\"format\":\"%s\"", plugin->name); } } r_cons_printf ("}"); } else if (cf && mode != R_CORE_BIN_SIMPLE) { //r_cons_printf ("# Core file info\n"); if (dbg) { dbg = R_IO_WRITE | R_IO_EXEC; } if (cf->desc) { pair ("blksz", sdb_fmt (0, "0x%"PFMT64x, (ut64) core->io->desc->obsz)); } pair ("block", sdb_fmt (0, "0x%x", core->blocksize)); if (cf->desc) { pair ("fd", sdb_fmt (0, "%d", cf->desc->fd)); } if (fn || (cf->desc && cf->desc->uri)) { pair ("file", fn? fn: cf->desc->uri); } if (plugin) { pair ("format", plugin->name); } if (cf->desc) { pair ("iorw", r_str_bool (io_cache || cf->desc->flags & R_IO_WRITE )); pair ("mode", r_str_rwx_i (cf->desc->flags & 7)); } if (binfile && binfile->curxtr) { pair ("packet", binfile->curxtr->name); } if (cf->desc && cf->desc->referer && *cf->desc->referer) { pair ("referer", cf->desc->referer); } if (cf->desc) { ut64 fsz = r_io_desc_size (core->io, cf->desc); if (fsz != UT64_MAX) { pair ("size", sdb_fmt (0,"0x%"PFMT64x, fsz)); char *humansz = r_num_units (NULL, fsz); if (humansz) { pair ("humansz", humansz); free (humansz); } } } if (info) { pair ("type", info->type); } } } static int bin_is_executable(RBinObject *obj){ RListIter *it; RBinSection *sec; if (obj) { if (obj->info && obj->info->arch) { return true; } r_list_foreach (obj->sections, it, sec){ if (R_BIN_SCN_EXECUTABLE & sec->srwx) { return true; } } } return false; } static void cmd_info_bin(RCore *core, int va, int mode) { RBinObject *obj = r_bin_cur_object (core->bin); int array = 0; if (core->file) { if ((mode & R_CORE_BIN_JSON) && !(mode & R_CORE_BIN_ARRAY)) { mode = R_CORE_BIN_JSON; r_cons_printf ("{\"core\":"); } if ((mode & R_CORE_BIN_JSON) && (mode & R_CORE_BIN_ARRAY)) { mode = R_CORE_BIN_JSON; array = 1; r_cons_printf (",\"core\":"); } r_core_file_info (core, mode); if (bin_is_executable (obj)) { if ((mode & R_CORE_BIN_JSON)) { r_cons_printf (",\"bin\":"); } r_core_bin_info (core, R_CORE_BIN_ACC_INFO, mode, va, NULL, NULL); } if (mode == R_CORE_BIN_JSON && array == 0) { r_cons_printf ("}\n"); } } else { eprintf ("No file selected\n"); } } static void playMsg(RCore *core, const char *n, int len) { if (r_config_get_i (core->config, "scr.tts")) { if (len > 0) { char *s = r_str_newf ("%d %s", len, n); r_sys_tts (s, true); free (s); } else if (len == 0) { char *s = r_str_newf ("there are no %s", n); r_sys_tts (s, true); free (s); } } } static int cmd_info(void *data, const char *input) { RCore *core = (RCore *) data; bool newline = r_config_get_i (core->config, "scr.interactive"); RBinObject *o = r_bin_cur_object (core->bin); RCoreFile *cf = core->file; int i, va = core->io->va || core->io->debug; int mode = 0; //R_CORE_BIN_SIMPLE; int is_array = 0; Sdb *db; for (i = 0; input[i] && input[i] != ' '; i++) ; if (i > 0) { switch (input[i - 1]) { case '*': mode = R_CORE_BIN_RADARE; break; case 'j': mode = R_CORE_BIN_JSON; break; case 'q': mode = R_CORE_BIN_SIMPLE; break; } } if (mode == R_CORE_BIN_JSON) { if (strlen (input + 1) > 1) { is_array = 1; } } if (is_array) { r_cons_printf ("{"); } if (!*input) { cmd_info_bin (core, va, mode); } /* i* is an alias for iI* */ if (!strcmp (input, "*")) { input = "I*"; } RBinObject *obj = r_bin_cur_object (core->bin); while (*input) { switch (*input) { case 'b': // "ib" { ut64 baddr = r_config_get_i (core->config, "bin.baddr"); if (input[1] == ' ') { baddr = r_num_math (core->num, input + 1); } // XXX: this will reload the bin using the buffer. // An assumption is made that assumes there is an underlying // plugin that will be used to load the bin (e.g. malloc://) // TODO: Might be nice to reload a bin at a specified offset? r_core_bin_reload (core, NULL, baddr); r_core_block_read (core); newline = false; } break; case 'k': db = o? o->kv: NULL; //:eprintf ("db = %p\n", db); switch (input[1]) { case 'v': if (db) { char *o = sdb_querys (db, NULL, 0, input + 3); if (o && *o) { r_cons_print (o); } free (o); } break; case '*': r_core_bin_export_info_rad (core); break; case '.': case ' ': if (db) { char *o = sdb_querys (db, NULL, 0, input + 2); if (o && *o) { r_cons_print (o); } free (o); } break; case '\0': if (db) { char *o = sdb_querys (db, NULL, 0, "*"); if (o && *o) { r_cons_print (o); } free (o); } break; case '?': default: eprintf ("Usage: ik [sdb-query]\n"); eprintf ("Usage: ik* # load all header information\n"); } goto done; break; case 'o': { if (!cf) { eprintf ("Core file not open\n"); return 0; } const char *fn = input[1] == ' '? input + 2: cf->desc->name; ut64 baddr = r_config_get_i (core->config, "bin.baddr"); r_core_bin_load (core, fn, baddr); } break; #define RBININFO(n,x,y,z)\ if (is_array) {\ if (is_array == 1) { is_array++;\ } else { r_cons_printf (",");}\ r_cons_printf ("\"%s\":",n);\ }\ if (z) { playMsg (core, n, z);}\ r_core_bin_info (core, x, mode, va, NULL, y); case 'A': newline = false; if (input[1] == 'j') { r_cons_printf ("{"); r_bin_list_archs (core->bin, 'j'); r_cons_printf ("}\n"); } else { r_bin_list_archs (core->bin, 1); } break; case 'E': RBININFO ("exports", R_CORE_BIN_ACC_EXPORTS, NULL, 0); break; case 'Z': RBININFO ("size", R_CORE_BIN_ACC_SIZE, NULL, 0); break; case 'S': //we comes from ia or iS if ((input[1] == 'm' && input[2] == 'z') || !input[1]) { RBININFO ("sections", R_CORE_BIN_ACC_SECTIONS, NULL, 0); } else { //iS entropy,sha1 RBinObject *obj = r_bin_cur_object (core->bin); if (mode == R_CORE_BIN_RADARE || mode == R_CORE_BIN_JSON || mode == R_CORE_BIN_SIMPLE) { RBININFO ("sections", R_CORE_BIN_ACC_SECTIONS, input + 2, obj? r_list_length (obj->sections): 0); } else { RBININFO ("sections", R_CORE_BIN_ACC_SECTIONS, input + 1, obj? r_list_length (obj->sections): 0); } //we move input until get '\0' while (*(++input)) ; //input-- because we are inside a while that does input++ // oob read if not input-- input--; } break; case 'H': if (input[1] == 'H') { // "iHH" RBININFO ("header", R_CORE_BIN_ACC_HEADER, NULL, -1); break; } case 'h': RBININFO ("fields", R_CORE_BIN_ACC_FIELDS, NULL, 0); break; case 'l': RBININFO ("libs", R_CORE_BIN_ACC_LIBS, NULL, obj? r_list_length (obj->libs): 0); break; case 'L': { char *ptr = strchr (input, ' '); int json = input[1] == 'j'? 'j': 0; if (ptr && ptr[1]) { const char *plugin_name = ptr + 1; if (is_array) { r_cons_printf ("\"plugin\": "); } r_bin_list_plugin (core->bin, plugin_name, json); } else { r_bin_list (core->bin, json); } newline = false; goto done; } break; case 's': if (input[1] == '.') { ut64 addr = core->offset + (core->print->cur_enabled? core->print->cur: 0); RFlagItem *f = r_flag_get_at (core->flags, addr, false); if (f) { if (f->offset == addr || !f->offset) { r_cons_printf ("%s", f->name); } else { r_cons_printf ("%s+%d", f->name, (int) (addr - f->offset)); } } input++; break; } else { RBinObject *obj = r_bin_cur_object (core->bin); RBININFO ("symbols", R_CORE_BIN_ACC_SYMBOLS, NULL, obj? r_list_length (obj->symbols): 0); break; } case 'R': if (input[1] == '*') { mode = R_CORE_BIN_RADARE; } else if (input[1] == 'j') { mode = R_CORE_BIN_JSON; } RBININFO ("resources", R_CORE_BIN_ACC_RESOURCES, NULL, 0); break; case 'r': RBININFO ("relocs", R_CORE_BIN_ACC_RELOCS, NULL, 0); break; case 'd': RBININFO ("dwarf", R_CORE_BIN_ACC_DWARF, NULL, -1); break; case 'i': RBININFO ("imports",R_CORE_BIN_ACC_IMPORTS, NULL, obj? r_list_length (obj->imports): 0); break; case 'I': RBININFO ("info", R_CORE_BIN_ACC_INFO, NULL, 0); break; case 'e': RBININFO ("entries", R_CORE_BIN_ACC_ENTRIES, NULL, 0); break; case 'M': RBININFO ("main", R_CORE_BIN_ACC_MAIN, NULL, 0); break; case 'm': RBININFO ("memory", R_CORE_BIN_ACC_MEM, NULL, 0); break; case 'V': RBININFO ("versioninfo", R_CORE_BIN_ACC_VERSIONINFO, NULL, 0); break; case 'C': RBININFO ("signature", R_CORE_BIN_ACC_SIGNATURE, NULL, 0); break; case 'z': if (input[1] == 'z') { //izz switch (input[2]) { case '*': mode = R_CORE_BIN_RADARE; break; case 'j': mode = R_CORE_BIN_JSON; break; case 'q': //izzq if (input[3] == 'q') { //izzqq mode = R_CORE_BIN_SIMPLEST; input++; } else { mode = R_CORE_BIN_SIMPLE; } break; default: mode = R_CORE_BIN_PRINT; break; } input++; RBININFO ("strings", R_CORE_BIN_ACC_RAW_STRINGS, NULL, 0); } else { RBinObject *obj = r_bin_cur_object (core->bin); if (input[1] == 'q') { mode = (input[2] == 'q') ? R_CORE_BIN_SIMPLEST : R_CORE_BIN_SIMPLE; input++; } if (obj) { RBININFO ("strings", R_CORE_BIN_ACC_STRINGS, NULL, obj? r_list_length (obj->strings): 0); } } break; case 'c': // for r2 `ic` if (input[1] == '?') { eprintf ("Usage: ic[ljq*] [class-index or name]\n"); } else if (input[1] == ' ' || input[1] == 'q' || input[1] == 'j' || input[1] == 'l') { RBinClass *cls; RBinSymbol *sym; RListIter *iter, *iter2; RBinObject *obj = r_bin_cur_object (core->bin); if (obj) { if (input[2]) { int idx = -1; const char * cls_name = NULL; if (r_num_is_valid_input (core->num, input + 2)) { idx = r_num_math (core->num, input + 2); } else { const char * first_char = input + ((input[1] == ' ') ? 1 : 2); int not_space = strspn (first_char, " "); if (first_char[not_space]) { cls_name = first_char + not_space; } } int count = 0; r_list_foreach (obj->classes, iter, cls) { if ((idx >= 0 && idx != count++) || (cls_name && strcmp (cls_name, cls->name) != 0)){ continue; } switch (input[1]) { case '*': r_list_foreach (cls->methods, iter2, sym) { r_cons_printf ("f sym.%s @ 0x%"PFMT64x "\n", sym->name, sym->vaddr); } input++; break; case 'l': r_list_foreach (cls->methods, iter2, sym) { const char *comma = iter2->p? " ": ""; r_cons_printf ("%s0x%"PFMT64d, comma, sym->vaddr); } r_cons_newline (); input++; break; case 'j': input++; r_cons_printf ("\"class\":\"%s\"", cls->name); r_cons_printf (",\"methods\":["); r_list_foreach (cls->methods, iter2, sym) { const char *comma = iter2->p? ",": ""; if (sym->method_flags) { char *flags = r_core_bin_method_flags_str (sym, R_CORE_BIN_JSON); r_cons_printf ("%s{\"name\":\"%s\",\"flags\":%s,\"vaddr\":%"PFMT64d "}", comma, sym->name, flags, sym->vaddr); R_FREE (flags); } else { r_cons_printf ("%s{\"name\":\"%s\",\"vaddr\":%"PFMT64d "}", comma, sym->name, sym->vaddr); } } r_cons_printf ("]"); break; default: r_cons_printf ("class %s\n", cls->name); r_list_foreach (cls->methods, iter2, sym) { char *flags = r_core_bin_method_flags_str (sym, 0); r_cons_printf ("0x%08"PFMT64x " method %s %s %s\n", sym->vaddr, cls->name, flags, sym->name); R_FREE (flags); } break; } goto done; } goto done; } else { playMsg (core, "classes", r_list_length (obj->classes)); if (input[1] == 'l' && obj) { // "icl" r_list_foreach (obj->classes, iter, cls) { r_list_foreach (cls->methods, iter2, sym) { const char *comma = iter2->p? " ": ""; r_cons_printf ("%s0x%"PFMT64d, comma, sym->vaddr); } if (!r_list_empty (cls->methods)) { r_cons_newline (); } } } else { RBININFO ("classes", R_CORE_BIN_ACC_CLASSES, NULL, r_list_length (obj->classes)); } } } } else { RBinObject *obj = r_bin_cur_object (core->bin); int len = obj? r_list_length (obj->classes): 0; RBININFO ("classes", R_CORE_BIN_ACC_CLASSES, NULL, len); } break; case 'D': if (input[1] != ' ' || !demangle (core, input + 2)) { eprintf ("|Usage: iD lang symbolname\n"); } return 0; case 'a': switch (mode) { case R_CORE_BIN_RADARE: cmd_info (core, "iIiecsSmz*"); break; case R_CORE_BIN_JSON: cmd_info (core, "iIiecsSmzj"); break; case R_CORE_BIN_SIMPLE: cmd_info (core, "iIiecsSmzq"); break; default: cmd_info (core, "IiEecsSmz"); break; } break; case '?': { const char *help_message[] = { "Usage: i", "", "Get info from opened file (see rabin2's manpage)", "Output mode:", "", "", "'*'", "", "Output in radare commands", "'j'", "", "Output in json", "'q'", "", "Simple quiet output", "Actions:", "", "", "i|ij", "", "Show info of current file (in JSON)", "iA", "", "List archs", "ia", "", "Show all info (imports, exports, sections..)", "ib", "", "Reload the current buffer for setting of the bin (use once only)", "ic", "", "List classes, methods and fields", "iC", "", "Show signature info (entitlements, ...)", "id", "", "Debug information (source lines)", "iD", " lang sym", "demangle symbolname for given language", "ie", "", "Entrypoint", "iE", "", "Exports (global symbols)", "ih", "", "Headers (alias for iH)", "iHH", "", "Verbose Headers in raw text", "ii", "", "Imports", "iI", "", "Binary info", "ik", " [query]", "Key-value database from RBinObject", "il", "", "Libraries", "iL ", "[plugin]", "List all RBin plugins loaded or plugin details", "im", "", "Show info about predefined memory allocation", "iM", "", "Show main address", "io", " [file]", "Load info from file (or last opened) use bin.baddr", "ir", "", "Relocs", "iR", "", "Resources", "is", "", "Symbols", "iS ", "[entropy,sha1]", "Sections (choose which hash algorithm to use)", "iV", "", "Display file version info", "iz|izj", "", "Strings in data sections (in JSON/Base64)", "izz", "", "Search for Strings in the whole binary", "iZ", "", "Guess size of binary program", NULL }; r_core_cmd_help (core, help_message); } goto done; case '*': mode = R_CORE_BIN_RADARE; goto done; case 'q': mode = R_CORE_BIN_SIMPLE; cmd_info_bin (core, va, mode); goto done; case 'j': mode = R_CORE_BIN_JSON; if (is_array > 1) { mode |= R_CORE_BIN_ARRAY; } cmd_info_bin (core, va, mode); goto done; default: cmd_info_bin (core, va, mode); break; } input++; if ((*input == 'j' || *input == 'q') && !input[1]) { break; } } done: if (is_array) { r_cons_printf ("}\n"); } if (newline) { r_cons_newline (); } return 0; }
/* radare - LGPL - Copyright 2009-2017 - pancake */ #include <string.h> #include "r_bin.h" #include "r_config.h" #include "r_cons.h" #include "r_core.h" #define PAIR_WIDTH 9 // TODO: reuse implementation in core/bin.c static void pair(const char *a, const char *b) { char ws[16]; int al = strlen (a); if (!b) { return; } memset (ws, ' ', sizeof (ws)); al = PAIR_WIDTH - al; if (al < 0) { al = 0; } ws[al] = 0; r_cons_printf ("%s%s%s\n", a, ws, b); } static bool demangle_internal(RCore *core, const char *lang, const char *s) { char *res = NULL; int type = r_bin_demangle_type (lang); switch (type) { case R_BIN_NM_CXX: res = r_bin_demangle_cxx (core->bin->cur, s, 0); break; case R_BIN_NM_JAVA: res = r_bin_demangle_java (s); break; case R_BIN_NM_OBJC: res = r_bin_demangle_objc (NULL, s); break; case R_BIN_NM_SWIFT: res = r_bin_demangle_swift (s, core->bin->demanglercmd); break; case R_BIN_NM_DLANG: res = r_bin_demangle_plugin (core->bin, "dlang", s); break; default: r_bin_demangle_list (core->bin); return true; } if (res) { if (*res) { printf ("%s\n", res); } free (res); return false; } return true; } static int demangle(RCore *core, const char *s) { char *p, *q; const char *ss = strchr (s, ' '); if (!*s) { return 0; } if (!ss) { const char *lang = r_config_get (core->config, "bin.lang"); demangle_internal (core, lang, s); return 1; } p = strdup (s); q = p + (ss - s); *q = 0; demangle_internal (core, p, q + 1); free (p); return 1; } #define STR(x) (x)? (x): "" static void r_core_file_info(RCore *core, int mode) { const char *fn = NULL; int dbg = r_config_get_i (core->config, "cfg.debug"); bool io_cache = r_config_get_i (core->config, "io.cache"); RBinInfo *info = r_bin_get_info (core->bin); RBinFile *binfile = r_core_bin_cur (core); RCoreFile *cf = core->file; RBinPlugin *plugin = r_bin_file_cur_plugin (binfile); if (mode == R_CORE_BIN_JSON) { r_cons_printf ("{"); } if (mode == R_CORE_BIN_RADARE) { return; } if (mode == R_CORE_BIN_SIMPLE) { return; } if (info) { fn = info->file; if (mode == R_CORE_BIN_JSON) { r_cons_printf ("\"type\":\"%s\"", STR (info->type)); } } else { fn = (cf && cf->desc)? cf->desc->name: NULL; } if (cf && mode == R_CORE_BIN_JSON) { const char *uri = fn; if (!uri) { if (cf->desc && cf->desc->uri && *cf->desc->uri) { uri = cf->desc->uri; } else { uri = ""; } } { char *escapedFile = r_str_utf16_encode (uri, -1); r_cons_printf (",\"file\":\"%s\"", escapedFile); free (escapedFile); } if (dbg) { dbg = R_IO_WRITE | R_IO_EXEC; } if (cf->desc) { ut64 fsz = r_io_desc_size (core->io, cf->desc); r_cons_printf (",\"fd\":%d", cf->desc->fd); if (fsz != UT64_MAX) { r_cons_printf (",\"size\":%"PFMT64d, fsz); char *humansz = r_num_units (NULL, fsz); if (humansz) { r_cons_printf (",\"humansz\":\"%s\"", humansz); free (humansz); } } r_cons_printf (",\"iorw\":%s", r_str_bool ( io_cache ||\ cf->desc->flags & R_IO_WRITE )); r_cons_printf (",\"mode\":\"%s\"", r_str_rwx_i ( cf->desc->flags & 7 )); r_cons_printf (",\"obsz\":%"PFMT64d, (ut64) core->io->desc->obsz); if (cf->desc->referer && *cf->desc->referer) { r_cons_printf (",\"referer\":\"%s\"", cf->desc->referer); } } r_cons_printf (",\"block\":%d", core->blocksize); if (binfile) { if (binfile->curxtr) { r_cons_printf (",\"packet\":\"%s\"", binfile->curxtr->name); } if (plugin) { r_cons_printf (",\"format\":\"%s\"", plugin->name); } } r_cons_printf ("}"); } else if (cf && mode != R_CORE_BIN_SIMPLE) { //r_cons_printf ("# Core file info\n"); if (dbg) { dbg = R_IO_WRITE | R_IO_EXEC; } if (cf->desc) { pair ("blksz", sdb_fmt (0, "0x%"PFMT64x, (ut64) core->io->desc->obsz)); } pair ("block", sdb_fmt (0, "0x%x", core->blocksize)); if (cf->desc) { pair ("fd", sdb_fmt (0, "%d", cf->desc->fd)); } if (fn || (cf->desc && cf->desc->uri)) { pair ("file", fn? fn: cf->desc->uri); } if (plugin) { pair ("format", plugin->name); } if (cf->desc) { pair ("iorw", r_str_bool (io_cache || cf->desc->flags & R_IO_WRITE )); pair ("mode", r_str_rwx_i (cf->desc->flags & 7)); } if (binfile && binfile->curxtr) { pair ("packet", binfile->curxtr->name); } if (cf->desc && cf->desc->referer && *cf->desc->referer) { pair ("referer", cf->desc->referer); } if (cf->desc) { ut64 fsz = r_io_desc_size (core->io, cf->desc); if (fsz != UT64_MAX) { pair ("size", sdb_fmt (0,"0x%"PFMT64x, fsz)); char *humansz = r_num_units (NULL, fsz); if (humansz) { pair ("humansz", humansz); free (humansz); } } } if (info) { pair ("type", info->type); } } } static int bin_is_executable(RBinObject *obj){ RListIter *it; RBinSection *sec; if (obj) { if (obj->info && obj->info->arch) { return true; } r_list_foreach (obj->sections, it, sec){ if (R_BIN_SCN_EXECUTABLE & sec->srwx) { return true; } } } return false; } static void cmd_info_bin(RCore *core, int va, int mode) { RBinObject *obj = r_bin_cur_object (core->bin); int array = 0; if (core->file) { if ((mode & R_CORE_BIN_JSON) && !(mode & R_CORE_BIN_ARRAY)) { mode = R_CORE_BIN_JSON; r_cons_printf ("{\"core\":"); } if ((mode & R_CORE_BIN_JSON) && (mode & R_CORE_BIN_ARRAY)) { mode = R_CORE_BIN_JSON; array = 1; r_cons_printf (",\"core\":"); } r_core_file_info (core, mode); if (bin_is_executable (obj)) { if ((mode & R_CORE_BIN_JSON)) { r_cons_printf (",\"bin\":"); } r_core_bin_info (core, R_CORE_BIN_ACC_INFO, mode, va, NULL, NULL); } if (mode == R_CORE_BIN_JSON && array == 0) { r_cons_printf ("}\n"); } } else { eprintf ("No file selected\n"); } } static void playMsg(RCore *core, const char *n, int len) { if (r_config_get_i (core->config, "scr.tts")) { if (len > 0) { char *s = r_str_newf ("%d %s", len, n); r_sys_tts (s, true); free (s); } else if (len == 0) { char *s = r_str_newf ("there are no %s", n); r_sys_tts (s, true); free (s); } } } static int cmd_info(void *data, const char *input) { RCore *core = (RCore *) data; bool newline = r_config_get_i (core->config, "scr.interactive"); RBinObject *o = r_bin_cur_object (core->bin); RCoreFile *cf = core->file; int i, va = core->io->va || core->io->debug; int mode = 0; //R_CORE_BIN_SIMPLE; int is_array = 0; Sdb *db; for (i = 0; input[i] && input[i] != ' '; i++) ; if (i > 0) { switch (input[i - 1]) { case '*': mode = R_CORE_BIN_RADARE; break; case 'j': mode = R_CORE_BIN_JSON; break; case 'q': mode = R_CORE_BIN_SIMPLE; break; } } if (mode == R_CORE_BIN_JSON) { if (strlen (input + 1) > 1) { is_array = 1; } } if (is_array) { r_cons_printf ("{"); } if (!*input) { cmd_info_bin (core, va, mode); } /* i* is an alias for iI* */ if (!strcmp (input, "*")) { input = "I*"; } RBinObject *obj = r_bin_cur_object (core->bin); while (*input) { switch (*input) { case 'b': // "ib" { ut64 baddr = r_config_get_i (core->config, "bin.baddr"); if (input[1] == ' ') { baddr = r_num_math (core->num, input + 1); } // XXX: this will reload the bin using the buffer. // An assumption is made that assumes there is an underlying // plugin that will be used to load the bin (e.g. malloc://) // TODO: Might be nice to reload a bin at a specified offset? r_core_bin_reload (core, NULL, baddr); r_core_block_read (core); newline = false; } break; case 'k': db = o? o->kv: NULL; //:eprintf ("db = %p\n", db); switch (input[1]) { case 'v': if (db) { char *o = sdb_querys (db, NULL, 0, input + 3); if (o && *o) { r_cons_print (o); } free (o); } break; case '*': r_core_bin_export_info_rad (core); break; case '.': case ' ': if (db) { char *o = sdb_querys (db, NULL, 0, input + 2); if (o && *o) { r_cons_print (o); } free (o); } break; case '\0': if (db) { char *o = sdb_querys (db, NULL, 0, "*"); if (o && *o) { r_cons_print (o); } free (o); } break; case '?': default: eprintf ("Usage: ik [sdb-query]\n"); eprintf ("Usage: ik* # load all header information\n"); } goto done; break; case 'o': { if (!cf) { eprintf ("Core file not open\n"); return 0; } const char *fn = input[1] == ' '? input + 2: cf->desc->name; ut64 baddr = r_config_get_i (core->config, "bin.baddr"); r_core_bin_load (core, fn, baddr); } break; #define RBININFO(n,x,y,z)\ if (is_array) {\ if (is_array == 1) { is_array++;\ } else { r_cons_printf (",");}\ r_cons_printf ("\"%s\":",n);\ }\ if (z) { playMsg (core, n, z);}\ r_core_bin_info (core, x, mode, va, NULL, y); case 'A': newline = false; if (input[1] == 'j') { r_cons_printf ("{"); r_bin_list_archs (core->bin, 'j'); r_cons_printf ("}\n"); } else { r_bin_list_archs (core->bin, 1); } break; case 'E': RBININFO ("exports", R_CORE_BIN_ACC_EXPORTS, NULL, 0); break; case 'Z': RBININFO ("size", R_CORE_BIN_ACC_SIZE, NULL, 0); break; case 'S': //we comes from ia or iS if ((input[1] == 'm' && input[2] == 'z') || !input[1]) { RBININFO ("sections", R_CORE_BIN_ACC_SECTIONS, NULL, 0); } else { //iS entropy,sha1 RBinObject *obj = r_bin_cur_object (core->bin); if (mode == R_CORE_BIN_RADARE || mode == R_CORE_BIN_JSON || mode == R_CORE_BIN_SIMPLE) { RBININFO ("sections", R_CORE_BIN_ACC_SECTIONS, input + 2, obj? r_list_length (obj->sections): 0); } else { RBININFO ("sections", R_CORE_BIN_ACC_SECTIONS, input + 1, obj? r_list_length (obj->sections): 0); } //we move input until get '\0' while (*(++input)) ; //input-- because we are inside a while that does input++ // oob read if not input-- input--; } break; case 'H': if (input[1] == 'H') { // "iHH" RBININFO ("header", R_CORE_BIN_ACC_HEADER, NULL, -1); break; } case 'h': RBININFO ("fields", R_CORE_BIN_ACC_FIELDS, NULL, 0); break; case 'l': { RBinObject *obj = r_bin_cur_object (core->bin); RBININFO ("libs", R_CORE_BIN_ACC_LIBS, NULL, obj? r_list_length (obj->libs): 0); } break; case 'L': { char *ptr = strchr (input, ' '); int json = input[1] == 'j'? 'j': 0; if (ptr && ptr[1]) { const char *plugin_name = ptr + 1; if (is_array) { r_cons_printf ("\"plugin\": "); } r_bin_list_plugin (core->bin, plugin_name, json); } else { r_bin_list (core->bin, json); } newline = false; goto done; } break; case 's': if (input[1] == '.') { ut64 addr = core->offset + (core->print->cur_enabled? core->print->cur: 0); RFlagItem *f = r_flag_get_at (core->flags, addr, false); if (f) { if (f->offset == addr || !f->offset) { r_cons_printf ("%s", f->name); } else { r_cons_printf ("%s+%d", f->name, (int) (addr - f->offset)); } } input++; break; } else { RBinObject *obj = r_bin_cur_object (core->bin); RBININFO ("symbols", R_CORE_BIN_ACC_SYMBOLS, NULL, obj? r_list_length (obj->symbols): 0); break; } case 'R': if (input[1] == '*') { mode = R_CORE_BIN_RADARE; } else if (input[1] == 'j') { mode = R_CORE_BIN_JSON; } RBININFO ("resources", R_CORE_BIN_ACC_RESOURCES, NULL, 0); break; case 'r': RBININFO ("relocs", R_CORE_BIN_ACC_RELOCS, NULL, 0); break; case 'd': RBININFO ("dwarf", R_CORE_BIN_ACC_DWARF, NULL, -1); break; case 'i': { RBinObject *obj = r_bin_cur_object (core->bin); RBININFO ("imports", R_CORE_BIN_ACC_IMPORTS, NULL, obj? r_list_length (obj->imports): 0); } break; case 'I': RBININFO ("info", R_CORE_BIN_ACC_INFO, NULL, 0); break; case 'e': RBININFO ("entries", R_CORE_BIN_ACC_ENTRIES, NULL, 0); break; case 'M': RBININFO ("main", R_CORE_BIN_ACC_MAIN, NULL, 0); break; case 'm': RBININFO ("memory", R_CORE_BIN_ACC_MEM, NULL, 0); break; case 'V': RBININFO ("versioninfo", R_CORE_BIN_ACC_VERSIONINFO, NULL, 0); break; case 'C': RBININFO ("signature", R_CORE_BIN_ACC_SIGNATURE, NULL, 0); break; case 'z': if (input[1] == 'z') { //izz switch (input[2]) { case '*': mode = R_CORE_BIN_RADARE; break; case 'j': mode = R_CORE_BIN_JSON; break; case 'q': //izzq if (input[3] == 'q') { //izzqq mode = R_CORE_BIN_SIMPLEST; input++; } else { mode = R_CORE_BIN_SIMPLE; } break; default: mode = R_CORE_BIN_PRINT; break; } input++; RBININFO ("strings", R_CORE_BIN_ACC_RAW_STRINGS, NULL, 0); } else { RBinObject *obj = r_bin_cur_object (core->bin); if (input[1] == 'q') { mode = (input[2] == 'q') ? R_CORE_BIN_SIMPLEST : R_CORE_BIN_SIMPLE; input++; } if (obj) { RBININFO ("strings", R_CORE_BIN_ACC_STRINGS, NULL, obj? r_list_length (obj->strings): 0); } } break; case 'c': // for r2 `ic` if (input[1] == '?') { eprintf ("Usage: ic[ljq*] [class-index or name]\n"); } else if (input[1] == ' ' || input[1] == 'q' || input[1] == 'j' || input[1] == 'l') { RBinClass *cls; RBinSymbol *sym; RListIter *iter, *iter2; RBinObject *obj = r_bin_cur_object (core->bin); if (obj) { if (input[2]) { int idx = -1; const char * cls_name = NULL; if (r_num_is_valid_input (core->num, input + 2)) { idx = r_num_math (core->num, input + 2); } else { const char * first_char = input + ((input[1] == ' ') ? 1 : 2); int not_space = strspn (first_char, " "); if (first_char[not_space]) { cls_name = first_char + not_space; } } int count = 0; r_list_foreach (obj->classes, iter, cls) { if ((idx >= 0 && idx != count++) || (cls_name && strcmp (cls_name, cls->name) != 0)){ continue; } switch (input[1]) { case '*': r_list_foreach (cls->methods, iter2, sym) { r_cons_printf ("f sym.%s @ 0x%"PFMT64x "\n", sym->name, sym->vaddr); } input++; break; case 'l': r_list_foreach (cls->methods, iter2, sym) { const char *comma = iter2->p? " ": ""; r_cons_printf ("%s0x%"PFMT64d, comma, sym->vaddr); } r_cons_newline (); input++; break; case 'j': input++; r_cons_printf ("\"class\":\"%s\"", cls->name); r_cons_printf (",\"methods\":["); r_list_foreach (cls->methods, iter2, sym) { const char *comma = iter2->p? ",": ""; if (sym->method_flags) { char *flags = r_core_bin_method_flags_str (sym, R_CORE_BIN_JSON); r_cons_printf ("%s{\"name\":\"%s\",\"flags\":%s,\"vaddr\":%"PFMT64d "}", comma, sym->name, flags, sym->vaddr); R_FREE (flags); } else { r_cons_printf ("%s{\"name\":\"%s\",\"vaddr\":%"PFMT64d "}", comma, sym->name, sym->vaddr); } } r_cons_printf ("]"); break; default: r_cons_printf ("class %s\n", cls->name); r_list_foreach (cls->methods, iter2, sym) { char *flags = r_core_bin_method_flags_str (sym, 0); r_cons_printf ("0x%08"PFMT64x " method %s %s %s\n", sym->vaddr, cls->name, flags, sym->name); R_FREE (flags); } break; } goto done; } goto done; } else { playMsg (core, "classes", r_list_length (obj->classes)); if (input[1] == 'l' && obj) { // "icl" r_list_foreach (obj->classes, iter, cls) { r_list_foreach (cls->methods, iter2, sym) { const char *comma = iter2->p? " ": ""; r_cons_printf ("%s0x%"PFMT64d, comma, sym->vaddr); } if (!r_list_empty (cls->methods)) { r_cons_newline (); } } } else { RBININFO ("classes", R_CORE_BIN_ACC_CLASSES, NULL, r_list_length (obj->classes)); } } } } else { RBinObject *obj = r_bin_cur_object (core->bin); int len = obj? r_list_length (obj->classes): 0; RBININFO ("classes", R_CORE_BIN_ACC_CLASSES, NULL, len); } break; case 'D': if (input[1] != ' ' || !demangle (core, input + 2)) { eprintf ("|Usage: iD lang symbolname\n"); } return 0; case 'a': switch (mode) { case R_CORE_BIN_RADARE: cmd_info (core, "iIiecsSmz*"); break; case R_CORE_BIN_JSON: cmd_info (core, "iIiecsSmzj"); break; case R_CORE_BIN_SIMPLE: cmd_info (core, "iIiecsSmzq"); break; default: cmd_info (core, "IiEecsSmz"); break; } break; case '?': { const char *help_message[] = { "Usage: i", "", "Get info from opened file (see rabin2's manpage)", "Output mode:", "", "", "'*'", "", "Output in radare commands", "'j'", "", "Output in json", "'q'", "", "Simple quiet output", "Actions:", "", "", "i|ij", "", "Show info of current file (in JSON)", "iA", "", "List archs", "ia", "", "Show all info (imports, exports, sections..)", "ib", "", "Reload the current buffer for setting of the bin (use once only)", "ic", "", "List classes, methods and fields", "iC", "", "Show signature info (entitlements, ...)", "id", "", "Debug information (source lines)", "iD", " lang sym", "demangle symbolname for given language", "ie", "", "Entrypoint", "iE", "", "Exports (global symbols)", "ih", "", "Headers (alias for iH)", "iHH", "", "Verbose Headers in raw text", "ii", "", "Imports", "iI", "", "Binary info", "ik", " [query]", "Key-value database from RBinObject", "il", "", "Libraries", "iL ", "[plugin]", "List all RBin plugins loaded or plugin details", "im", "", "Show info about predefined memory allocation", "iM", "", "Show main address", "io", " [file]", "Load info from file (or last opened) use bin.baddr", "ir", "", "Relocs", "iR", "", "Resources", "is", "", "Symbols", "iS ", "[entropy,sha1]", "Sections (choose which hash algorithm to use)", "iV", "", "Display file version info", "iz|izj", "", "Strings in data sections (in JSON/Base64)", "izz", "", "Search for Strings in the whole binary", "iZ", "", "Guess size of binary program", NULL }; r_core_cmd_help (core, help_message); } goto done; case '*': mode = R_CORE_BIN_RADARE; goto done; case 'q': mode = R_CORE_BIN_SIMPLE; cmd_info_bin (core, va, mode); goto done; case 'j': mode = R_CORE_BIN_JSON; if (is_array > 1) { mode |= R_CORE_BIN_ARRAY; } cmd_info_bin (core, va, mode); goto done; default: cmd_info_bin (core, va, mode); break; } input++; if ((*input == 'j' || *input == 'q') && !input[1]) { break; } } done: if (is_array) { r_cons_printf ("}\n"); } if (newline) { r_cons_newline (); } return 0; }
static int cmd_info(void *data, const char *input) { RCore *core = (RCore *) data; bool newline = r_config_get_i (core->config, "scr.interactive"); RBinObject *o = r_bin_cur_object (core->bin); RCoreFile *cf = core->file; int i, va = core->io->va || core->io->debug; int mode = 0; //R_CORE_BIN_SIMPLE; int is_array = 0; Sdb *db; for (i = 0; input[i] && input[i] != ' '; i++) ; if (i > 0) { switch (input[i - 1]) { case '*': mode = R_CORE_BIN_RADARE; break; case 'j': mode = R_CORE_BIN_JSON; break; case 'q': mode = R_CORE_BIN_SIMPLE; break; } } if (mode == R_CORE_BIN_JSON) { if (strlen (input + 1) > 1) { is_array = 1; } } if (is_array) { r_cons_printf ("{"); } if (!*input) { cmd_info_bin (core, va, mode); } /* i* is an alias for iI* */ if (!strcmp (input, "*")) { input = "I*"; } RBinObject *obj = r_bin_cur_object (core->bin); while (*input) { switch (*input) { case 'b': // "ib" { ut64 baddr = r_config_get_i (core->config, "bin.baddr"); if (input[1] == ' ') { baddr = r_num_math (core->num, input + 1); } // XXX: this will reload the bin using the buffer. // An assumption is made that assumes there is an underlying // plugin that will be used to load the bin (e.g. malloc://) // TODO: Might be nice to reload a bin at a specified offset? r_core_bin_reload (core, NULL, baddr); r_core_block_read (core); newline = false; } break; case 'k': db = o? o->kv: NULL; //:eprintf ("db = %p\n", db); switch (input[1]) { case 'v': if (db) { char *o = sdb_querys (db, NULL, 0, input + 3); if (o && *o) { r_cons_print (o); } free (o); } break; case '*': r_core_bin_export_info_rad (core); break; case '.': case ' ': if (db) { char *o = sdb_querys (db, NULL, 0, input + 2); if (o && *o) { r_cons_print (o); } free (o); } break; case '\0': if (db) { char *o = sdb_querys (db, NULL, 0, "*"); if (o && *o) { r_cons_print (o); } free (o); } break; case '?': default: eprintf ("Usage: ik [sdb-query]\n"); eprintf ("Usage: ik* # load all header information\n"); } goto done; break; case 'o': { if (!cf) { eprintf ("Core file not open\n"); return 0; } const char *fn = input[1] == ' '? input + 2: cf->desc->name; ut64 baddr = r_config_get_i (core->config, "bin.baddr"); r_core_bin_load (core, fn, baddr); } break; #define RBININFO(n,x,y,z)\ if (is_array) {\ if (is_array == 1) { is_array++;\ } else { r_cons_printf (",");}\ r_cons_printf ("\"%s\":",n);\ }\ if (z) { playMsg (core, n, z);}\ r_core_bin_info (core, x, mode, va, NULL, y); case 'A': newline = false; if (input[1] == 'j') { r_cons_printf ("{"); r_bin_list_archs (core->bin, 'j'); r_cons_printf ("}\n"); } else { r_bin_list_archs (core->bin, 1); } break; case 'E': RBININFO ("exports", R_CORE_BIN_ACC_EXPORTS, NULL, 0); break; case 'Z': RBININFO ("size", R_CORE_BIN_ACC_SIZE, NULL, 0); break; case 'S': //we comes from ia or iS if ((input[1] == 'm' && input[2] == 'z') || !input[1]) { RBININFO ("sections", R_CORE_BIN_ACC_SECTIONS, NULL, 0); } else { //iS entropy,sha1 RBinObject *obj = r_bin_cur_object (core->bin); if (mode == R_CORE_BIN_RADARE || mode == R_CORE_BIN_JSON || mode == R_CORE_BIN_SIMPLE) { RBININFO ("sections", R_CORE_BIN_ACC_SECTIONS, input + 2, obj? r_list_length (obj->sections): 0); } else { RBININFO ("sections", R_CORE_BIN_ACC_SECTIONS, input + 1, obj? r_list_length (obj->sections): 0); } //we move input until get '\0' while (*(++input)) ; //input-- because we are inside a while that does input++ // oob read if not input-- input--; } break; case 'H': if (input[1] == 'H') { // "iHH" RBININFO ("header", R_CORE_BIN_ACC_HEADER, NULL, -1); break; } case 'h': RBININFO ("fields", R_CORE_BIN_ACC_FIELDS, NULL, 0); break; case 'l': RBININFO ("libs", R_CORE_BIN_ACC_LIBS, NULL, obj? r_list_length (obj->libs): 0); break; case 'L': { char *ptr = strchr (input, ' '); int json = input[1] == 'j'? 'j': 0; if (ptr && ptr[1]) { const char *plugin_name = ptr + 1; if (is_array) { r_cons_printf ("\"plugin\": "); } r_bin_list_plugin (core->bin, plugin_name, json); } else { r_bin_list (core->bin, json); } newline = false; goto done; } break; case 's': if (input[1] == '.') { ut64 addr = core->offset + (core->print->cur_enabled? core->print->cur: 0); RFlagItem *f = r_flag_get_at (core->flags, addr, false); if (f) { if (f->offset == addr || !f->offset) { r_cons_printf ("%s", f->name); } else { r_cons_printf ("%s+%d", f->name, (int) (addr - f->offset)); } } input++; break; } else { RBinObject *obj = r_bin_cur_object (core->bin); RBININFO ("symbols", R_CORE_BIN_ACC_SYMBOLS, NULL, obj? r_list_length (obj->symbols): 0); break; } case 'R': if (input[1] == '*') { mode = R_CORE_BIN_RADARE; } else if (input[1] == 'j') { mode = R_CORE_BIN_JSON; } RBININFO ("resources", R_CORE_BIN_ACC_RESOURCES, NULL, 0); break; case 'r': RBININFO ("relocs", R_CORE_BIN_ACC_RELOCS, NULL, 0); break; case 'd': RBININFO ("dwarf", R_CORE_BIN_ACC_DWARF, NULL, -1); break; case 'i': RBININFO ("imports",R_CORE_BIN_ACC_IMPORTS, NULL, obj? r_list_length (obj->imports): 0); break; case 'I': RBININFO ("info", R_CORE_BIN_ACC_INFO, NULL, 0); break; case 'e': RBININFO ("entries", R_CORE_BIN_ACC_ENTRIES, NULL, 0); break; case 'M': RBININFO ("main", R_CORE_BIN_ACC_MAIN, NULL, 0); break; case 'm': RBININFO ("memory", R_CORE_BIN_ACC_MEM, NULL, 0); break; case 'V': RBININFO ("versioninfo", R_CORE_BIN_ACC_VERSIONINFO, NULL, 0); break; case 'C': RBININFO ("signature", R_CORE_BIN_ACC_SIGNATURE, NULL, 0); break; case 'z': if (input[1] == 'z') { //izz switch (input[2]) { case '*': mode = R_CORE_BIN_RADARE; break; case 'j': mode = R_CORE_BIN_JSON; break; case 'q': //izzq if (input[3] == 'q') { //izzqq mode = R_CORE_BIN_SIMPLEST; input++; } else { mode = R_CORE_BIN_SIMPLE; } break; default: mode = R_CORE_BIN_PRINT; break; } input++; RBININFO ("strings", R_CORE_BIN_ACC_RAW_STRINGS, NULL, 0); } else { RBinObject *obj = r_bin_cur_object (core->bin); if (input[1] == 'q') { mode = (input[2] == 'q') ? R_CORE_BIN_SIMPLEST : R_CORE_BIN_SIMPLE; input++; } if (obj) { RBININFO ("strings", R_CORE_BIN_ACC_STRINGS, NULL, obj? r_list_length (obj->strings): 0); } } break; case 'c': // for r2 `ic` if (input[1] == '?') { eprintf ("Usage: ic[ljq*] [class-index or name]\n"); } else if (input[1] == ' ' || input[1] == 'q' || input[1] == 'j' || input[1] == 'l') { RBinClass *cls; RBinSymbol *sym; RListIter *iter, *iter2; RBinObject *obj = r_bin_cur_object (core->bin); if (obj) { if (input[2]) { int idx = -1; const char * cls_name = NULL; if (r_num_is_valid_input (core->num, input + 2)) { idx = r_num_math (core->num, input + 2); } else { const char * first_char = input + ((input[1] == ' ') ? 1 : 2); int not_space = strspn (first_char, " "); if (first_char[not_space]) { cls_name = first_char + not_space; } } int count = 0; r_list_foreach (obj->classes, iter, cls) { if ((idx >= 0 && idx != count++) || (cls_name && strcmp (cls_name, cls->name) != 0)){ continue; } switch (input[1]) { case '*': r_list_foreach (cls->methods, iter2, sym) { r_cons_printf ("f sym.%s @ 0x%"PFMT64x "\n", sym->name, sym->vaddr); } input++; break; case 'l': r_list_foreach (cls->methods, iter2, sym) { const char *comma = iter2->p? " ": ""; r_cons_printf ("%s0x%"PFMT64d, comma, sym->vaddr); } r_cons_newline (); input++; break; case 'j': input++; r_cons_printf ("\"class\":\"%s\"", cls->name); r_cons_printf (",\"methods\":["); r_list_foreach (cls->methods, iter2, sym) { const char *comma = iter2->p? ",": ""; if (sym->method_flags) { char *flags = r_core_bin_method_flags_str (sym, R_CORE_BIN_JSON); r_cons_printf ("%s{\"name\":\"%s\",\"flags\":%s,\"vaddr\":%"PFMT64d "}", comma, sym->name, flags, sym->vaddr); R_FREE (flags); } else { r_cons_printf ("%s{\"name\":\"%s\",\"vaddr\":%"PFMT64d "}", comma, sym->name, sym->vaddr); } } r_cons_printf ("]"); break; default: r_cons_printf ("class %s\n", cls->name); r_list_foreach (cls->methods, iter2, sym) { char *flags = r_core_bin_method_flags_str (sym, 0); r_cons_printf ("0x%08"PFMT64x " method %s %s %s\n", sym->vaddr, cls->name, flags, sym->name); R_FREE (flags); } break; } goto done; } goto done; } else { playMsg (core, "classes", r_list_length (obj->classes)); if (input[1] == 'l' && obj) { // "icl" r_list_foreach (obj->classes, iter, cls) { r_list_foreach (cls->methods, iter2, sym) { const char *comma = iter2->p? " ": ""; r_cons_printf ("%s0x%"PFMT64d, comma, sym->vaddr); } if (!r_list_empty (cls->methods)) { r_cons_newline (); } } } else { RBININFO ("classes", R_CORE_BIN_ACC_CLASSES, NULL, r_list_length (obj->classes)); } } } } else { RBinObject *obj = r_bin_cur_object (core->bin); int len = obj? r_list_length (obj->classes): 0; RBININFO ("classes", R_CORE_BIN_ACC_CLASSES, NULL, len); } break; case 'D': if (input[1] != ' ' || !demangle (core, input + 2)) { eprintf ("|Usage: iD lang symbolname\n"); } return 0; case 'a': switch (mode) { case R_CORE_BIN_RADARE: cmd_info (core, "iIiecsSmz*"); break; case R_CORE_BIN_JSON: cmd_info (core, "iIiecsSmzj"); break; case R_CORE_BIN_SIMPLE: cmd_info (core, "iIiecsSmzq"); break; default: cmd_info (core, "IiEecsSmz"); break; } break; case '?': { const char *help_message[] = { "Usage: i", "", "Get info from opened file (see rabin2's manpage)", "Output mode:", "", "", "'*'", "", "Output in radare commands", "'j'", "", "Output in json", "'q'", "", "Simple quiet output", "Actions:", "", "", "i|ij", "", "Show info of current file (in JSON)", "iA", "", "List archs", "ia", "", "Show all info (imports, exports, sections..)", "ib", "", "Reload the current buffer for setting of the bin (use once only)", "ic", "", "List classes, methods and fields", "iC", "", "Show signature info (entitlements, ...)", "id", "", "Debug information (source lines)", "iD", " lang sym", "demangle symbolname for given language", "ie", "", "Entrypoint", "iE", "", "Exports (global symbols)", "ih", "", "Headers (alias for iH)", "iHH", "", "Verbose Headers in raw text", "ii", "", "Imports", "iI", "", "Binary info", "ik", " [query]", "Key-value database from RBinObject", "il", "", "Libraries", "iL ", "[plugin]", "List all RBin plugins loaded or plugin details", "im", "", "Show info about predefined memory allocation", "iM", "", "Show main address", "io", " [file]", "Load info from file (or last opened) use bin.baddr", "ir", "", "Relocs", "iR", "", "Resources", "is", "", "Symbols", "iS ", "[entropy,sha1]", "Sections (choose which hash algorithm to use)", "iV", "", "Display file version info", "iz|izj", "", "Strings in data sections (in JSON/Base64)", "izz", "", "Search for Strings in the whole binary", "iZ", "", "Guess size of binary program", NULL }; r_core_cmd_help (core, help_message); } goto done; case '*': mode = R_CORE_BIN_RADARE; goto done; case 'q': mode = R_CORE_BIN_SIMPLE; cmd_info_bin (core, va, mode); goto done; case 'j': mode = R_CORE_BIN_JSON; if (is_array > 1) { mode |= R_CORE_BIN_ARRAY; } cmd_info_bin (core, va, mode); goto done; default: cmd_info_bin (core, va, mode); break; } input++; if ((*input == 'j' || *input == 'q') && !input[1]) { break; } } done: if (is_array) { r_cons_printf ("}\n"); } if (newline) { r_cons_newline (); } return 0; }
static int cmd_info(void *data, const char *input) { RCore *core = (RCore *) data; bool newline = r_config_get_i (core->config, "scr.interactive"); RBinObject *o = r_bin_cur_object (core->bin); RCoreFile *cf = core->file; int i, va = core->io->va || core->io->debug; int mode = 0; //R_CORE_BIN_SIMPLE; int is_array = 0; Sdb *db; for (i = 0; input[i] && input[i] != ' '; i++) ; if (i > 0) { switch (input[i - 1]) { case '*': mode = R_CORE_BIN_RADARE; break; case 'j': mode = R_CORE_BIN_JSON; break; case 'q': mode = R_CORE_BIN_SIMPLE; break; } } if (mode == R_CORE_BIN_JSON) { if (strlen (input + 1) > 1) { is_array = 1; } } if (is_array) { r_cons_printf ("{"); } if (!*input) { cmd_info_bin (core, va, mode); } /* i* is an alias for iI* */ if (!strcmp (input, "*")) { input = "I*"; } RBinObject *obj = r_bin_cur_object (core->bin); while (*input) { switch (*input) { case 'b': // "ib" { ut64 baddr = r_config_get_i (core->config, "bin.baddr"); if (input[1] == ' ') { baddr = r_num_math (core->num, input + 1); } // XXX: this will reload the bin using the buffer. // An assumption is made that assumes there is an underlying // plugin that will be used to load the bin (e.g. malloc://) // TODO: Might be nice to reload a bin at a specified offset? r_core_bin_reload (core, NULL, baddr); r_core_block_read (core); newline = false; } break; case 'k': db = o? o->kv: NULL; //:eprintf ("db = %p\n", db); switch (input[1]) { case 'v': if (db) { char *o = sdb_querys (db, NULL, 0, input + 3); if (o && *o) { r_cons_print (o); } free (o); } break; case '*': r_core_bin_export_info_rad (core); break; case '.': case ' ': if (db) { char *o = sdb_querys (db, NULL, 0, input + 2); if (o && *o) { r_cons_print (o); } free (o); } break; case '\0': if (db) { char *o = sdb_querys (db, NULL, 0, "*"); if (o && *o) { r_cons_print (o); } free (o); } break; case '?': default: eprintf ("Usage: ik [sdb-query]\n"); eprintf ("Usage: ik* # load all header information\n"); } goto done; break; case 'o': { if (!cf) { eprintf ("Core file not open\n"); return 0; } const char *fn = input[1] == ' '? input + 2: cf->desc->name; ut64 baddr = r_config_get_i (core->config, "bin.baddr"); r_core_bin_load (core, fn, baddr); } break; #define RBININFO(n,x,y,z)\ if (is_array) {\ if (is_array == 1) { is_array++;\ } else { r_cons_printf (",");}\ r_cons_printf ("\"%s\":",n);\ }\ if (z) { playMsg (core, n, z);}\ r_core_bin_info (core, x, mode, va, NULL, y); case 'A': newline = false; if (input[1] == 'j') { r_cons_printf ("{"); r_bin_list_archs (core->bin, 'j'); r_cons_printf ("}\n"); } else { r_bin_list_archs (core->bin, 1); } break; case 'E': RBININFO ("exports", R_CORE_BIN_ACC_EXPORTS, NULL, 0); break; case 'Z': RBININFO ("size", R_CORE_BIN_ACC_SIZE, NULL, 0); break; case 'S': //we comes from ia or iS if ((input[1] == 'm' && input[2] == 'z') || !input[1]) { RBININFO ("sections", R_CORE_BIN_ACC_SECTIONS, NULL, 0); } else { //iS entropy,sha1 RBinObject *obj = r_bin_cur_object (core->bin); if (mode == R_CORE_BIN_RADARE || mode == R_CORE_BIN_JSON || mode == R_CORE_BIN_SIMPLE) { RBININFO ("sections", R_CORE_BIN_ACC_SECTIONS, input + 2, obj? r_list_length (obj->sections): 0); } else { RBININFO ("sections", R_CORE_BIN_ACC_SECTIONS, input + 1, obj? r_list_length (obj->sections): 0); } //we move input until get '\0' while (*(++input)) ; //input-- because we are inside a while that does input++ // oob read if not input-- input--; } break; case 'H': if (input[1] == 'H') { // "iHH" RBININFO ("header", R_CORE_BIN_ACC_HEADER, NULL, -1); break; } case 'h': RBININFO ("fields", R_CORE_BIN_ACC_FIELDS, NULL, 0); break; case 'l': { RBinObject *obj = r_bin_cur_object (core->bin); RBININFO ("libs", R_CORE_BIN_ACC_LIBS, NULL, obj? r_list_length (obj->libs): 0); } break; case 'L': { char *ptr = strchr (input, ' '); int json = input[1] == 'j'? 'j': 0; if (ptr && ptr[1]) { const char *plugin_name = ptr + 1; if (is_array) { r_cons_printf ("\"plugin\": "); } r_bin_list_plugin (core->bin, plugin_name, json); } else { r_bin_list (core->bin, json); } newline = false; goto done; } break; case 's': if (input[1] == '.') { ut64 addr = core->offset + (core->print->cur_enabled? core->print->cur: 0); RFlagItem *f = r_flag_get_at (core->flags, addr, false); if (f) { if (f->offset == addr || !f->offset) { r_cons_printf ("%s", f->name); } else { r_cons_printf ("%s+%d", f->name, (int) (addr - f->offset)); } } input++; break; } else { RBinObject *obj = r_bin_cur_object (core->bin); RBININFO ("symbols", R_CORE_BIN_ACC_SYMBOLS, NULL, obj? r_list_length (obj->symbols): 0); break; } case 'R': if (input[1] == '*') { mode = R_CORE_BIN_RADARE; } else if (input[1] == 'j') { mode = R_CORE_BIN_JSON; } RBININFO ("resources", R_CORE_BIN_ACC_RESOURCES, NULL, 0); break; case 'r': RBININFO ("relocs", R_CORE_BIN_ACC_RELOCS, NULL, 0); break; case 'd': RBININFO ("dwarf", R_CORE_BIN_ACC_DWARF, NULL, -1); break; case 'i': { RBinObject *obj = r_bin_cur_object (core->bin); RBININFO ("imports", R_CORE_BIN_ACC_IMPORTS, NULL, obj? r_list_length (obj->imports): 0); } break; case 'I': RBININFO ("info", R_CORE_BIN_ACC_INFO, NULL, 0); break; case 'e': RBININFO ("entries", R_CORE_BIN_ACC_ENTRIES, NULL, 0); break; case 'M': RBININFO ("main", R_CORE_BIN_ACC_MAIN, NULL, 0); break; case 'm': RBININFO ("memory", R_CORE_BIN_ACC_MEM, NULL, 0); break; case 'V': RBININFO ("versioninfo", R_CORE_BIN_ACC_VERSIONINFO, NULL, 0); break; case 'C': RBININFO ("signature", R_CORE_BIN_ACC_SIGNATURE, NULL, 0); break; case 'z': if (input[1] == 'z') { //izz switch (input[2]) { case '*': mode = R_CORE_BIN_RADARE; break; case 'j': mode = R_CORE_BIN_JSON; break; case 'q': //izzq if (input[3] == 'q') { //izzqq mode = R_CORE_BIN_SIMPLEST; input++; } else { mode = R_CORE_BIN_SIMPLE; } break; default: mode = R_CORE_BIN_PRINT; break; } input++; RBININFO ("strings", R_CORE_BIN_ACC_RAW_STRINGS, NULL, 0); } else { RBinObject *obj = r_bin_cur_object (core->bin); if (input[1] == 'q') { mode = (input[2] == 'q') ? R_CORE_BIN_SIMPLEST : R_CORE_BIN_SIMPLE; input++; } if (obj) { RBININFO ("strings", R_CORE_BIN_ACC_STRINGS, NULL, obj? r_list_length (obj->strings): 0); } } break; case 'c': // for r2 `ic` if (input[1] == '?') { eprintf ("Usage: ic[ljq*] [class-index or name]\n"); } else if (input[1] == ' ' || input[1] == 'q' || input[1] == 'j' || input[1] == 'l') { RBinClass *cls; RBinSymbol *sym; RListIter *iter, *iter2; RBinObject *obj = r_bin_cur_object (core->bin); if (obj) { if (input[2]) { int idx = -1; const char * cls_name = NULL; if (r_num_is_valid_input (core->num, input + 2)) { idx = r_num_math (core->num, input + 2); } else { const char * first_char = input + ((input[1] == ' ') ? 1 : 2); int not_space = strspn (first_char, " "); if (first_char[not_space]) { cls_name = first_char + not_space; } } int count = 0; r_list_foreach (obj->classes, iter, cls) { if ((idx >= 0 && idx != count++) || (cls_name && strcmp (cls_name, cls->name) != 0)){ continue; } switch (input[1]) { case '*': r_list_foreach (cls->methods, iter2, sym) { r_cons_printf ("f sym.%s @ 0x%"PFMT64x "\n", sym->name, sym->vaddr); } input++; break; case 'l': r_list_foreach (cls->methods, iter2, sym) { const char *comma = iter2->p? " ": ""; r_cons_printf ("%s0x%"PFMT64d, comma, sym->vaddr); } r_cons_newline (); input++; break; case 'j': input++; r_cons_printf ("\"class\":\"%s\"", cls->name); r_cons_printf (",\"methods\":["); r_list_foreach (cls->methods, iter2, sym) { const char *comma = iter2->p? ",": ""; if (sym->method_flags) { char *flags = r_core_bin_method_flags_str (sym, R_CORE_BIN_JSON); r_cons_printf ("%s{\"name\":\"%s\",\"flags\":%s,\"vaddr\":%"PFMT64d "}", comma, sym->name, flags, sym->vaddr); R_FREE (flags); } else { r_cons_printf ("%s{\"name\":\"%s\",\"vaddr\":%"PFMT64d "}", comma, sym->name, sym->vaddr); } } r_cons_printf ("]"); break; default: r_cons_printf ("class %s\n", cls->name); r_list_foreach (cls->methods, iter2, sym) { char *flags = r_core_bin_method_flags_str (sym, 0); r_cons_printf ("0x%08"PFMT64x " method %s %s %s\n", sym->vaddr, cls->name, flags, sym->name); R_FREE (flags); } break; } goto done; } goto done; } else { playMsg (core, "classes", r_list_length (obj->classes)); if (input[1] == 'l' && obj) { // "icl" r_list_foreach (obj->classes, iter, cls) { r_list_foreach (cls->methods, iter2, sym) { const char *comma = iter2->p? " ": ""; r_cons_printf ("%s0x%"PFMT64d, comma, sym->vaddr); } if (!r_list_empty (cls->methods)) { r_cons_newline (); } } } else { RBININFO ("classes", R_CORE_BIN_ACC_CLASSES, NULL, r_list_length (obj->classes)); } } } } else { RBinObject *obj = r_bin_cur_object (core->bin); int len = obj? r_list_length (obj->classes): 0; RBININFO ("classes", R_CORE_BIN_ACC_CLASSES, NULL, len); } break; case 'D': if (input[1] != ' ' || !demangle (core, input + 2)) { eprintf ("|Usage: iD lang symbolname\n"); } return 0; case 'a': switch (mode) { case R_CORE_BIN_RADARE: cmd_info (core, "iIiecsSmz*"); break; case R_CORE_BIN_JSON: cmd_info (core, "iIiecsSmzj"); break; case R_CORE_BIN_SIMPLE: cmd_info (core, "iIiecsSmzq"); break; default: cmd_info (core, "IiEecsSmz"); break; } break; case '?': { const char *help_message[] = { "Usage: i", "", "Get info from opened file (see rabin2's manpage)", "Output mode:", "", "", "'*'", "", "Output in radare commands", "'j'", "", "Output in json", "'q'", "", "Simple quiet output", "Actions:", "", "", "i|ij", "", "Show info of current file (in JSON)", "iA", "", "List archs", "ia", "", "Show all info (imports, exports, sections..)", "ib", "", "Reload the current buffer for setting of the bin (use once only)", "ic", "", "List classes, methods and fields", "iC", "", "Show signature info (entitlements, ...)", "id", "", "Debug information (source lines)", "iD", " lang sym", "demangle symbolname for given language", "ie", "", "Entrypoint", "iE", "", "Exports (global symbols)", "ih", "", "Headers (alias for iH)", "iHH", "", "Verbose Headers in raw text", "ii", "", "Imports", "iI", "", "Binary info", "ik", " [query]", "Key-value database from RBinObject", "il", "", "Libraries", "iL ", "[plugin]", "List all RBin plugins loaded or plugin details", "im", "", "Show info about predefined memory allocation", "iM", "", "Show main address", "io", " [file]", "Load info from file (or last opened) use bin.baddr", "ir", "", "Relocs", "iR", "", "Resources", "is", "", "Symbols", "iS ", "[entropy,sha1]", "Sections (choose which hash algorithm to use)", "iV", "", "Display file version info", "iz|izj", "", "Strings in data sections (in JSON/Base64)", "izz", "", "Search for Strings in the whole binary", "iZ", "", "Guess size of binary program", NULL }; r_core_cmd_help (core, help_message); } goto done; case '*': mode = R_CORE_BIN_RADARE; goto done; case 'q': mode = R_CORE_BIN_SIMPLE; cmd_info_bin (core, va, mode); goto done; case 'j': mode = R_CORE_BIN_JSON; if (is_array > 1) { mode |= R_CORE_BIN_ARRAY; } cmd_info_bin (core, va, mode); goto done; default: cmd_info_bin (core, va, mode); break; } input++; if ((*input == 'j' || *input == 'q') && !input[1]) { break; } } done: if (is_array) { r_cons_printf ("}\n"); } if (newline) { r_cons_newline (); } return 0; }
{'added': [(398, "\t\tcase 'l':"), (399, '\t\t\t {'), (400, '\t\t\t\t RBinObject *obj = r_bin_cur_object (core->bin);'), (401, '\t\t\t\t RBININFO ("libs", R_CORE_BIN_ACC_LIBS, NULL, obj? r_list_length (obj->libs): 0);'), (402, '\t\t\t }'), (403, '\t\t\t break;'), (452, "\t\tcase 'i': {"), (453, '\t\t\t\t RBinObject *obj = r_bin_cur_object (core->bin);'), (454, '\t\t\t\t RBININFO ("imports", R_CORE_BIN_ACC_IMPORTS, NULL,'), (455, '\t\t\t\t\t\t obj? r_list_length (obj->imports): 0);'), (456, '\t\t\t }'), (457, '\t\t\t break;')], 'deleted': [(398, '\t\tcase \'l\': RBININFO ("libs", R_CORE_BIN_ACC_LIBS, NULL, obj? r_list_length (obj->libs): 0); break;'), (447, '\t\tcase \'i\': RBININFO ("imports",R_CORE_BIN_ACC_IMPORTS, NULL, obj? r_list_length (obj->imports): 0); break;')]}
12
2
646
4,110
https://github.com/radare/radare2
CVE-2017-9761
['CWE-119']
tja1101_driver.c
tja1101Init
/** * @file tja1101_driver.c * @brief TJA1101 100Base-T1 Ethernet PHY driver * * @section License * * SPDX-License-Identifier: GPL-2.0-or-later * * Copyright (C) 2010-2020 Oryx Embedded SARL. All rights reserved. * * This file is part of CycloneTCP Open. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * @author Oryx Embedded SARL (www.oryx-embedded.com) * @version 2.0.0 **/ //Switch to the appropriate trace level #define TRACE_LEVEL NIC_TRACE_LEVEL //Dependencies #include "core/net.h" #include "drivers/phy/tja1101_driver.h" #include "debug.h" /** * @brief TJA1101 Ethernet PHY driver **/ const PhyDriver tja1101PhyDriver = { tja1101Init, tja1101Tick, tja1101EnableIrq, tja1101DisableIrq, tja1101EventHandler }; /** * @brief TJA1101 PHY transceiver initialization * @param[in] interface Underlying network interface * @return Error code **/ error_t tja1101Init(NetInterface *interface) { uint16_t value; //Debug message TRACE_INFO("Initializing TJA1101...\r\n"); //Undefined PHY address? if(interface->phyAddr >= 32) { //Use the default address interface->phyAddr = TJA1101_PHY_ADDR; } //Initialize serial management interface if(interface->smiDriver != NULL) { interface->smiDriver->init(); } //Initialize external interrupt line driver if(interface->extIntDriver != NULL) { interface->extIntDriver->init(); } //Reset PHY transceiver tja1101WritePhyReg(interface, TJA1101_BASIC_CTRL, TJA1101_BASIC_CTRL_RESET); //Wait for the reset to complete while(tja1101ReadPhyReg(interface, TJA1101_BASIC_CTRL) & TJA1101_BASIC_CTRL_RESET) { } //Dump PHY registers for debugging purpose tja1101DumpPhyReg(interface); //Enable configuration register access value = tja1101ReadPhyReg(interface, TJA1101_EXTENDED_CTRL); value |= TJA1101_EXTENDED_CTRL_CONFIG_EN; tja1101WritePhyReg(interface, TJA1101_EXTENDED_CTRL, value); //Select RMII mode (25MHz XTAL) value = tja1101ReadPhyReg(interface, TJA1101_CONFIG1); value &= ~TJA1101_CONFIG1_MII_MODE; value |= TJA1101_CONFIG1_MII_MODE_RMII_25MHZ; tja1101WritePhyReg(interface, TJA1101_CONFIG1, value); //The PHY is configured for autonomous operation value = tja1101ReadPhyReg(interface, TJA1101_COMM_CTRL); value |= TJA1101_COMM_CTRL_AUTO_OP; tja1101WritePhyReg(interface, TJA1101_COMM_CTRL, value); //Force the TCP/IP stack to poll the link state at startup interface->phyEvent = TRUE; //Notify the TCP/IP stack of the event osSetEvent(&netEvent); //Successful initialization return NO_ERROR; } /** * @brief TJA1101 timer handler * @param[in] interface Underlying network interface **/ void tja1101Tick(NetInterface *interface) { uint16_t value; bool_t linkState; //No external interrupt line driver? if(interface->extIntDriver == NULL) { //Read status register value = tja1101ReadPhyReg(interface, TJA1101_BASIC_STAT); //Retrieve current link state linkState = (value & TJA1101_BASIC_STAT_LINK_STATUS) ? TRUE : FALSE; //Link up event? if(linkState && !interface->linkState) { //Set event flag interface->phyEvent = TRUE; //Notify the TCP/IP stack of the event osSetEvent(&netEvent); } //Link down event? else if(!linkState && interface->linkState) { //Set event flag interface->phyEvent = TRUE; //Notify the TCP/IP stack of the event osSetEvent(&netEvent); } } } /** * @brief Enable interrupts * @param[in] interface Underlying network interface **/ void tja1101EnableIrq(NetInterface *interface) { //Enable PHY transceiver interrupts if(interface->extIntDriver != NULL) { interface->extIntDriver->enableIrq(); } } /** * @brief Disable interrupts * @param[in] interface Underlying network interface **/ void tja1101DisableIrq(NetInterface *interface) { //Disable PHY transceiver interrupts if(interface->extIntDriver != NULL) { interface->extIntDriver->disableIrq(); } } /** * @brief TJA1101 event handler * @param[in] interface Underlying network interface **/ void tja1101EventHandler(NetInterface *interface) { uint16_t value; //Read status register value = tja1101ReadPhyReg(interface, TJA1101_BASIC_STAT); //Link is up? if((value & TJA1101_BASIC_STAT_LINK_STATUS) != 0) { //Adjust MAC configuration parameters for proper operation interface->linkSpeed = NIC_LINK_SPEED_100MBPS; interface->duplexMode = NIC_FULL_DUPLEX_MODE; interface->nicDriver->updateMacConfig(interface); //Update link state interface->linkState = TRUE; } else { //Update link state interface->linkState = FALSE; } //Process link state change event nicNotifyLinkChange(interface); } /** * @brief Write PHY register * @param[in] interface Underlying network interface * @param[in] address PHY register address * @param[in] data Register value **/ void tja1101WritePhyReg(NetInterface *interface, uint8_t address, uint16_t data) { //Write the specified PHY register if(interface->smiDriver != NULL) { interface->smiDriver->writePhyReg(SMI_OPCODE_WRITE, interface->phyAddr, address, data); } else { interface->nicDriver->writePhyReg(SMI_OPCODE_WRITE, interface->phyAddr, address, data); } } /** * @brief Read PHY register * @param[in] interface Underlying network interface * @param[in] address PHY register address * @return Register value **/ uint16_t tja1101ReadPhyReg(NetInterface *interface, uint8_t address) { uint16_t data; //Read the specified PHY register if(interface->smiDriver != NULL) { data = interface->smiDriver->readPhyReg(SMI_OPCODE_READ, interface->phyAddr, address); } else { data = interface->nicDriver->readPhyReg(SMI_OPCODE_READ, interface->phyAddr, address); } //Return the value of the PHY register return data; } /** * @brief Dump PHY registers for debugging purpose * @param[in] interface Underlying network interface **/ void tja1101DumpPhyReg(NetInterface *interface) { uint8_t i; //Loop through PHY registers for(i = 0; i < 32; i++) { //Display current PHY register TRACE_DEBUG("%02" PRIu8 ": 0x%04" PRIX16 "\r\n", i, tja1101ReadPhyReg(interface, i)); } //Terminate with a line feed TRACE_DEBUG("\r\n"); }
/** * @file tja1101_driver.c * @brief TJA1101 100Base-T1 Ethernet PHY driver * * @section License * * SPDX-License-Identifier: GPL-2.0-or-later * * Copyright (C) 2010-2021 Oryx Embedded SARL. All rights reserved. * * This file is part of CycloneTCP Open. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * @author Oryx Embedded SARL (www.oryx-embedded.com) * @version 2.0.2 **/ //Switch to the appropriate trace level #define TRACE_LEVEL NIC_TRACE_LEVEL //Dependencies #include "core/net.h" #include "drivers/phy/tja1101_driver.h" #include "debug.h" /** * @brief TJA1101 Ethernet PHY driver **/ const PhyDriver tja1101PhyDriver = { tja1101Init, tja1101Tick, tja1101EnableIrq, tja1101DisableIrq, tja1101EventHandler }; /** * @brief TJA1101 PHY transceiver initialization * @param[in] interface Underlying network interface * @return Error code **/ error_t tja1101Init(NetInterface *interface) { uint16_t value; //Debug message TRACE_INFO("Initializing TJA1101...\r\n"); //Undefined PHY address? if(interface->phyAddr >= 32) { //Use the default address interface->phyAddr = TJA1101_PHY_ADDR; } //Initialize serial management interface if(interface->smiDriver != NULL) { interface->smiDriver->init(); } //Initialize external interrupt line driver if(interface->extIntDriver != NULL) { interface->extIntDriver->init(); } //Reset PHY transceiver tja1101WritePhyReg(interface, TJA1101_BASIC_CTRL, TJA1101_BASIC_CTRL_RESET); //Wait for the reset to complete while(tja1101ReadPhyReg(interface, TJA1101_BASIC_CTRL) & TJA1101_BASIC_CTRL_RESET) { } //Dump PHY registers for debugging purpose tja1101DumpPhyReg(interface); //Enable configuration register access value = tja1101ReadPhyReg(interface, TJA1101_EXTENDED_CTRL); value |= TJA1101_EXTENDED_CTRL_CONFIG_EN; tja1101WritePhyReg(interface, TJA1101_EXTENDED_CTRL, value); //Select RMII mode (50MHz output on REF_CLK) value = tja1101ReadPhyReg(interface, TJA1101_CONFIG1); value &= ~TJA1101_CONFIG1_MII_MODE; value |= TJA1101_CONFIG1_MII_MODE_RMII_50MHZ_REF_CLK_OUT; tja1101WritePhyReg(interface, TJA1101_CONFIG1, value); //The PHY is configured for autonomous operation value = tja1101ReadPhyReg(interface, TJA1101_COMM_CTRL); value |= TJA1101_COMM_CTRL_AUTO_OP; tja1101WritePhyReg(interface, TJA1101_COMM_CTRL, value); //Force the TCP/IP stack to poll the link state at startup interface->phyEvent = TRUE; //Notify the TCP/IP stack of the event osSetEvent(&netEvent); //Successful initialization return NO_ERROR; } /** * @brief TJA1101 timer handler * @param[in] interface Underlying network interface **/ void tja1101Tick(NetInterface *interface) { uint16_t value; bool_t linkState; //No external interrupt line driver? if(interface->extIntDriver == NULL) { //Read status register value = tja1101ReadPhyReg(interface, TJA1101_BASIC_STAT); //Retrieve current link state linkState = (value & TJA1101_BASIC_STAT_LINK_STATUS) ? TRUE : FALSE; //Link up event? if(linkState && !interface->linkState) { //Set event flag interface->phyEvent = TRUE; //Notify the TCP/IP stack of the event osSetEvent(&netEvent); } //Link down event? else if(!linkState && interface->linkState) { //Set event flag interface->phyEvent = TRUE; //Notify the TCP/IP stack of the event osSetEvent(&netEvent); } } } /** * @brief Enable interrupts * @param[in] interface Underlying network interface **/ void tja1101EnableIrq(NetInterface *interface) { //Enable PHY transceiver interrupts if(interface->extIntDriver != NULL) { interface->extIntDriver->enableIrq(); } } /** * @brief Disable interrupts * @param[in] interface Underlying network interface **/ void tja1101DisableIrq(NetInterface *interface) { //Disable PHY transceiver interrupts if(interface->extIntDriver != NULL) { interface->extIntDriver->disableIrq(); } } /** * @brief TJA1101 event handler * @param[in] interface Underlying network interface **/ void tja1101EventHandler(NetInterface *interface) { uint16_t value; //Read status register value = tja1101ReadPhyReg(interface, TJA1101_BASIC_STAT); //Link is up? if((value & TJA1101_BASIC_STAT_LINK_STATUS) != 0) { //Adjust MAC configuration parameters for proper operation interface->linkSpeed = NIC_LINK_SPEED_100MBPS; interface->duplexMode = NIC_FULL_DUPLEX_MODE; interface->nicDriver->updateMacConfig(interface); //Update link state interface->linkState = TRUE; } else { //Update link state interface->linkState = FALSE; } //Process link state change event nicNotifyLinkChange(interface); } /** * @brief Write PHY register * @param[in] interface Underlying network interface * @param[in] address PHY register address * @param[in] data Register value **/ void tja1101WritePhyReg(NetInterface *interface, uint8_t address, uint16_t data) { //Write the specified PHY register if(interface->smiDriver != NULL) { interface->smiDriver->writePhyReg(SMI_OPCODE_WRITE, interface->phyAddr, address, data); } else { interface->nicDriver->writePhyReg(SMI_OPCODE_WRITE, interface->phyAddr, address, data); } } /** * @brief Read PHY register * @param[in] interface Underlying network interface * @param[in] address PHY register address * @return Register value **/ uint16_t tja1101ReadPhyReg(NetInterface *interface, uint8_t address) { uint16_t data; //Read the specified PHY register if(interface->smiDriver != NULL) { data = interface->smiDriver->readPhyReg(SMI_OPCODE_READ, interface->phyAddr, address); } else { data = interface->nicDriver->readPhyReg(SMI_OPCODE_READ, interface->phyAddr, address); } //Return the value of the PHY register return data; } /** * @brief Dump PHY registers for debugging purpose * @param[in] interface Underlying network interface **/ void tja1101DumpPhyReg(NetInterface *interface) { uint8_t i; //Loop through PHY registers for(i = 0; i < 32; i++) { //Display current PHY register TRACE_DEBUG("%02" PRIu8 ": 0x%04" PRIX16 "\r\n", i, tja1101ReadPhyReg(interface, i)); } //Terminate with a line feed TRACE_DEBUG("\r\n"); }
error_t tja1101Init(NetInterface *interface) { uint16_t value; //Debug message TRACE_INFO("Initializing TJA1101...\r\n"); //Undefined PHY address? if(interface->phyAddr >= 32) { //Use the default address interface->phyAddr = TJA1101_PHY_ADDR; } //Initialize serial management interface if(interface->smiDriver != NULL) { interface->smiDriver->init(); } //Initialize external interrupt line driver if(interface->extIntDriver != NULL) { interface->extIntDriver->init(); } //Reset PHY transceiver tja1101WritePhyReg(interface, TJA1101_BASIC_CTRL, TJA1101_BASIC_CTRL_RESET); //Wait for the reset to complete while(tja1101ReadPhyReg(interface, TJA1101_BASIC_CTRL) & TJA1101_BASIC_CTRL_RESET) { } //Dump PHY registers for debugging purpose tja1101DumpPhyReg(interface); //Enable configuration register access value = tja1101ReadPhyReg(interface, TJA1101_EXTENDED_CTRL); value |= TJA1101_EXTENDED_CTRL_CONFIG_EN; tja1101WritePhyReg(interface, TJA1101_EXTENDED_CTRL, value); //Select RMII mode (25MHz XTAL) value = tja1101ReadPhyReg(interface, TJA1101_CONFIG1); value &= ~TJA1101_CONFIG1_MII_MODE; value |= TJA1101_CONFIG1_MII_MODE_RMII_25MHZ; tja1101WritePhyReg(interface, TJA1101_CONFIG1, value); //The PHY is configured for autonomous operation value = tja1101ReadPhyReg(interface, TJA1101_COMM_CTRL); value |= TJA1101_COMM_CTRL_AUTO_OP; tja1101WritePhyReg(interface, TJA1101_COMM_CTRL, value); //Force the TCP/IP stack to poll the link state at startup interface->phyEvent = TRUE; //Notify the TCP/IP stack of the event osSetEvent(&netEvent); //Successful initialization return NO_ERROR; }
error_t tja1101Init(NetInterface *interface) { uint16_t value; //Debug message TRACE_INFO("Initializing TJA1101...\r\n"); //Undefined PHY address? if(interface->phyAddr >= 32) { //Use the default address interface->phyAddr = TJA1101_PHY_ADDR; } //Initialize serial management interface if(interface->smiDriver != NULL) { interface->smiDriver->init(); } //Initialize external interrupt line driver if(interface->extIntDriver != NULL) { interface->extIntDriver->init(); } //Reset PHY transceiver tja1101WritePhyReg(interface, TJA1101_BASIC_CTRL, TJA1101_BASIC_CTRL_RESET); //Wait for the reset to complete while(tja1101ReadPhyReg(interface, TJA1101_BASIC_CTRL) & TJA1101_BASIC_CTRL_RESET) { } //Dump PHY registers for debugging purpose tja1101DumpPhyReg(interface); //Enable configuration register access value = tja1101ReadPhyReg(interface, TJA1101_EXTENDED_CTRL); value |= TJA1101_EXTENDED_CTRL_CONFIG_EN; tja1101WritePhyReg(interface, TJA1101_EXTENDED_CTRL, value); //Select RMII mode (50MHz output on REF_CLK) value = tja1101ReadPhyReg(interface, TJA1101_CONFIG1); value &= ~TJA1101_CONFIG1_MII_MODE; value |= TJA1101_CONFIG1_MII_MODE_RMII_50MHZ_REF_CLK_OUT; tja1101WritePhyReg(interface, TJA1101_CONFIG1, value); //The PHY is configured for autonomous operation value = tja1101ReadPhyReg(interface, TJA1101_COMM_CTRL); value |= TJA1101_COMM_CTRL_AUTO_OP; tja1101WritePhyReg(interface, TJA1101_COMM_CTRL, value); //Force the TCP/IP stack to poll the link state at startup interface->phyEvent = TRUE; //Notify the TCP/IP stack of the event osSetEvent(&netEvent); //Successful initialization return NO_ERROR; }
{'added': [(9, ' * Copyright (C) 2010-2021 Oryx Embedded SARL. All rights reserved.'), (28, ' * @version 2.0.2'), (104, ' //Select RMII mode (50MHz output on REF_CLK)'), (107, ' value |= TJA1101_CONFIG1_MII_MODE_RMII_50MHZ_REF_CLK_OUT;')], 'deleted': [(9, ' * Copyright (C) 2010-2020 Oryx Embedded SARL. All rights reserved.'), (28, ' * @version 2.0.0'), (104, ' //Select RMII mode (25MHz XTAL)'), (107, ' value |= TJA1101_CONFIG1_MII_MODE_RMII_25MHZ;')]}
4
4
138
601
https://github.com/Oryx-Embedded/CycloneTCP
CVE-2021-26788
['CWE-20']
rawsock.c
rawsock_create
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2011 Instituto Nokia de Tecnologia * * Authors: * Aloisio Almeida Jr <aloisio.almeida@openbossa.org> * Lauro Ramos Venancio <lauro.venancio@openbossa.org> */ #define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ #include <net/tcp_states.h> #include <linux/nfc.h> #include <linux/export.h> #include "nfc.h" static struct nfc_sock_list raw_sk_list = { .lock = __RW_LOCK_UNLOCKED(raw_sk_list.lock) }; static void nfc_sock_link(struct nfc_sock_list *l, struct sock *sk) { write_lock(&l->lock); sk_add_node(sk, &l->head); write_unlock(&l->lock); } static void nfc_sock_unlink(struct nfc_sock_list *l, struct sock *sk) { write_lock(&l->lock); sk_del_node_init(sk); write_unlock(&l->lock); } static void rawsock_write_queue_purge(struct sock *sk) { pr_debug("sk=%p\n", sk); spin_lock_bh(&sk->sk_write_queue.lock); __skb_queue_purge(&sk->sk_write_queue); nfc_rawsock(sk)->tx_work_scheduled = false; spin_unlock_bh(&sk->sk_write_queue.lock); } static void rawsock_report_error(struct sock *sk, int err) { pr_debug("sk=%p err=%d\n", sk, err); sk->sk_shutdown = SHUTDOWN_MASK; sk->sk_err = -err; sk->sk_error_report(sk); rawsock_write_queue_purge(sk); } static int rawsock_release(struct socket *sock) { struct sock *sk = sock->sk; pr_debug("sock=%p sk=%p\n", sock, sk); if (!sk) return 0; if (sock->type == SOCK_RAW) nfc_sock_unlink(&raw_sk_list, sk); sock_orphan(sk); sock_put(sk); return 0; } static int rawsock_connect(struct socket *sock, struct sockaddr *_addr, int len, int flags) { struct sock *sk = sock->sk; struct sockaddr_nfc *addr = (struct sockaddr_nfc *)_addr; struct nfc_dev *dev; int rc = 0; pr_debug("sock=%p sk=%p flags=%d\n", sock, sk, flags); if (!addr || len < sizeof(struct sockaddr_nfc) || addr->sa_family != AF_NFC) return -EINVAL; pr_debug("addr dev_idx=%u target_idx=%u protocol=%u\n", addr->dev_idx, addr->target_idx, addr->nfc_protocol); lock_sock(sk); if (sock->state == SS_CONNECTED) { rc = -EISCONN; goto error; } dev = nfc_get_device(addr->dev_idx); if (!dev) { rc = -ENODEV; goto error; } if (addr->target_idx > dev->target_next_idx - 1 || addr->target_idx < dev->target_next_idx - dev->n_targets) { rc = -EINVAL; goto error; } rc = nfc_activate_target(dev, addr->target_idx, addr->nfc_protocol); if (rc) goto put_dev; nfc_rawsock(sk)->dev = dev; nfc_rawsock(sk)->target_idx = addr->target_idx; sock->state = SS_CONNECTED; sk->sk_state = TCP_ESTABLISHED; sk->sk_state_change(sk); release_sock(sk); return 0; put_dev: nfc_put_device(dev); error: release_sock(sk); return rc; } static int rawsock_add_header(struct sk_buff *skb) { *(u8 *)skb_push(skb, NFC_HEADER_SIZE) = 0; return 0; } static void rawsock_data_exchange_complete(void *context, struct sk_buff *skb, int err) { struct sock *sk = (struct sock *) context; BUG_ON(in_irq()); pr_debug("sk=%p err=%d\n", sk, err); if (err) goto error; err = rawsock_add_header(skb); if (err) goto error_skb; err = sock_queue_rcv_skb(sk, skb); if (err) goto error_skb; spin_lock_bh(&sk->sk_write_queue.lock); if (!skb_queue_empty(&sk->sk_write_queue)) schedule_work(&nfc_rawsock(sk)->tx_work); else nfc_rawsock(sk)->tx_work_scheduled = false; spin_unlock_bh(&sk->sk_write_queue.lock); sock_put(sk); return; error_skb: kfree_skb(skb); error: rawsock_report_error(sk, err); sock_put(sk); } static void rawsock_tx_work(struct work_struct *work) { struct sock *sk = to_rawsock_sk(work); struct nfc_dev *dev = nfc_rawsock(sk)->dev; u32 target_idx = nfc_rawsock(sk)->target_idx; struct sk_buff *skb; int rc; pr_debug("sk=%p target_idx=%u\n", sk, target_idx); if (sk->sk_shutdown & SEND_SHUTDOWN) { rawsock_write_queue_purge(sk); return; } skb = skb_dequeue(&sk->sk_write_queue); sock_hold(sk); rc = nfc_data_exchange(dev, target_idx, skb, rawsock_data_exchange_complete, sk); if (rc) { rawsock_report_error(sk, rc); sock_put(sk); } } static int rawsock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct nfc_dev *dev = nfc_rawsock(sk)->dev; struct sk_buff *skb; int rc; pr_debug("sock=%p sk=%p len=%zu\n", sock, sk, len); if (msg->msg_namelen) return -EOPNOTSUPP; if (sock->state != SS_CONNECTED) return -ENOTCONN; skb = nfc_alloc_send_skb(dev, sk, msg->msg_flags, len, &rc); if (skb == NULL) return rc; rc = memcpy_from_msg(skb_put(skb, len), msg, len); if (rc < 0) { kfree_skb(skb); return rc; } spin_lock_bh(&sk->sk_write_queue.lock); __skb_queue_tail(&sk->sk_write_queue, skb); if (!nfc_rawsock(sk)->tx_work_scheduled) { schedule_work(&nfc_rawsock(sk)->tx_work); nfc_rawsock(sk)->tx_work_scheduled = true; } spin_unlock_bh(&sk->sk_write_queue.lock); return len; } static int rawsock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; struct sk_buff *skb; int copied; int rc; pr_debug("sock=%p sk=%p len=%zu flags=%d\n", sock, sk, len, flags); skb = skb_recv_datagram(sk, flags, noblock, &rc); if (!skb) return rc; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } rc = skb_copy_datagram_msg(skb, 0, msg, copied); skb_free_datagram(sk, skb); return rc ? : copied; } static const struct proto_ops rawsock_ops = { .family = PF_NFC, .owner = THIS_MODULE, .release = rawsock_release, .bind = sock_no_bind, .connect = rawsock_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, .poll = datagram_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .sendmsg = rawsock_sendmsg, .recvmsg = rawsock_recvmsg, .mmap = sock_no_mmap, }; static const struct proto_ops rawsock_raw_ops = { .family = PF_NFC, .owner = THIS_MODULE, .release = rawsock_release, .bind = sock_no_bind, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, .poll = datagram_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .sendmsg = sock_no_sendmsg, .recvmsg = rawsock_recvmsg, .mmap = sock_no_mmap, }; static void rawsock_destruct(struct sock *sk) { pr_debug("sk=%p\n", sk); if (sk->sk_state == TCP_ESTABLISHED) { nfc_deactivate_target(nfc_rawsock(sk)->dev, nfc_rawsock(sk)->target_idx, NFC_TARGET_MODE_IDLE); nfc_put_device(nfc_rawsock(sk)->dev); } skb_queue_purge(&sk->sk_receive_queue); if (!sock_flag(sk, SOCK_DEAD)) { pr_err("Freeing alive NFC raw socket %p\n", sk); return; } } static int rawsock_create(struct net *net, struct socket *sock, const struct nfc_protocol *nfc_proto, int kern) { struct sock *sk; pr_debug("sock=%p\n", sock); if ((sock->type != SOCK_SEQPACKET) && (sock->type != SOCK_RAW)) return -ESOCKTNOSUPPORT; if (sock->type == SOCK_RAW) sock->ops = &rawsock_raw_ops; else sock->ops = &rawsock_ops; sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto, kern); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sk->sk_protocol = nfc_proto->id; sk->sk_destruct = rawsock_destruct; sock->state = SS_UNCONNECTED; if (sock->type == SOCK_RAW) nfc_sock_link(&raw_sk_list, sk); else { INIT_WORK(&nfc_rawsock(sk)->tx_work, rawsock_tx_work); nfc_rawsock(sk)->tx_work_scheduled = false; } return 0; } void nfc_send_to_raw_sock(struct nfc_dev *dev, struct sk_buff *skb, u8 payload_type, u8 direction) { struct sk_buff *skb_copy = NULL, *nskb; struct sock *sk; u8 *data; read_lock(&raw_sk_list.lock); sk_for_each(sk, &raw_sk_list.head) { if (!skb_copy) { skb_copy = __pskb_copy_fclone(skb, NFC_RAW_HEADER_SIZE, GFP_ATOMIC, true); if (!skb_copy) continue; data = skb_push(skb_copy, NFC_RAW_HEADER_SIZE); data[0] = dev ? dev->idx : 0xFF; data[1] = direction & 0x01; data[1] |= (payload_type << 1); } nskb = skb_clone(skb_copy, GFP_ATOMIC); if (!nskb) continue; if (sock_queue_rcv_skb(sk, nskb)) kfree_skb(nskb); } read_unlock(&raw_sk_list.lock); kfree_skb(skb_copy); } EXPORT_SYMBOL(nfc_send_to_raw_sock); static struct proto rawsock_proto = { .name = "NFC_RAW", .owner = THIS_MODULE, .obj_size = sizeof(struct nfc_rawsock), }; static const struct nfc_protocol rawsock_nfc_proto = { .id = NFC_SOCKPROTO_RAW, .proto = &rawsock_proto, .owner = THIS_MODULE, .create = rawsock_create }; int __init rawsock_init(void) { int rc; rc = nfc_proto_register(&rawsock_nfc_proto); return rc; } void rawsock_exit(void) { nfc_proto_unregister(&rawsock_nfc_proto); }
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2011 Instituto Nokia de Tecnologia * * Authors: * Aloisio Almeida Jr <aloisio.almeida@openbossa.org> * Lauro Ramos Venancio <lauro.venancio@openbossa.org> */ #define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ #include <net/tcp_states.h> #include <linux/nfc.h> #include <linux/export.h> #include "nfc.h" static struct nfc_sock_list raw_sk_list = { .lock = __RW_LOCK_UNLOCKED(raw_sk_list.lock) }; static void nfc_sock_link(struct nfc_sock_list *l, struct sock *sk) { write_lock(&l->lock); sk_add_node(sk, &l->head); write_unlock(&l->lock); } static void nfc_sock_unlink(struct nfc_sock_list *l, struct sock *sk) { write_lock(&l->lock); sk_del_node_init(sk); write_unlock(&l->lock); } static void rawsock_write_queue_purge(struct sock *sk) { pr_debug("sk=%p\n", sk); spin_lock_bh(&sk->sk_write_queue.lock); __skb_queue_purge(&sk->sk_write_queue); nfc_rawsock(sk)->tx_work_scheduled = false; spin_unlock_bh(&sk->sk_write_queue.lock); } static void rawsock_report_error(struct sock *sk, int err) { pr_debug("sk=%p err=%d\n", sk, err); sk->sk_shutdown = SHUTDOWN_MASK; sk->sk_err = -err; sk->sk_error_report(sk); rawsock_write_queue_purge(sk); } static int rawsock_release(struct socket *sock) { struct sock *sk = sock->sk; pr_debug("sock=%p sk=%p\n", sock, sk); if (!sk) return 0; if (sock->type == SOCK_RAW) nfc_sock_unlink(&raw_sk_list, sk); sock_orphan(sk); sock_put(sk); return 0; } static int rawsock_connect(struct socket *sock, struct sockaddr *_addr, int len, int flags) { struct sock *sk = sock->sk; struct sockaddr_nfc *addr = (struct sockaddr_nfc *)_addr; struct nfc_dev *dev; int rc = 0; pr_debug("sock=%p sk=%p flags=%d\n", sock, sk, flags); if (!addr || len < sizeof(struct sockaddr_nfc) || addr->sa_family != AF_NFC) return -EINVAL; pr_debug("addr dev_idx=%u target_idx=%u protocol=%u\n", addr->dev_idx, addr->target_idx, addr->nfc_protocol); lock_sock(sk); if (sock->state == SS_CONNECTED) { rc = -EISCONN; goto error; } dev = nfc_get_device(addr->dev_idx); if (!dev) { rc = -ENODEV; goto error; } if (addr->target_idx > dev->target_next_idx - 1 || addr->target_idx < dev->target_next_idx - dev->n_targets) { rc = -EINVAL; goto error; } rc = nfc_activate_target(dev, addr->target_idx, addr->nfc_protocol); if (rc) goto put_dev; nfc_rawsock(sk)->dev = dev; nfc_rawsock(sk)->target_idx = addr->target_idx; sock->state = SS_CONNECTED; sk->sk_state = TCP_ESTABLISHED; sk->sk_state_change(sk); release_sock(sk); return 0; put_dev: nfc_put_device(dev); error: release_sock(sk); return rc; } static int rawsock_add_header(struct sk_buff *skb) { *(u8 *)skb_push(skb, NFC_HEADER_SIZE) = 0; return 0; } static void rawsock_data_exchange_complete(void *context, struct sk_buff *skb, int err) { struct sock *sk = (struct sock *) context; BUG_ON(in_irq()); pr_debug("sk=%p err=%d\n", sk, err); if (err) goto error; err = rawsock_add_header(skb); if (err) goto error_skb; err = sock_queue_rcv_skb(sk, skb); if (err) goto error_skb; spin_lock_bh(&sk->sk_write_queue.lock); if (!skb_queue_empty(&sk->sk_write_queue)) schedule_work(&nfc_rawsock(sk)->tx_work); else nfc_rawsock(sk)->tx_work_scheduled = false; spin_unlock_bh(&sk->sk_write_queue.lock); sock_put(sk); return; error_skb: kfree_skb(skb); error: rawsock_report_error(sk, err); sock_put(sk); } static void rawsock_tx_work(struct work_struct *work) { struct sock *sk = to_rawsock_sk(work); struct nfc_dev *dev = nfc_rawsock(sk)->dev; u32 target_idx = nfc_rawsock(sk)->target_idx; struct sk_buff *skb; int rc; pr_debug("sk=%p target_idx=%u\n", sk, target_idx); if (sk->sk_shutdown & SEND_SHUTDOWN) { rawsock_write_queue_purge(sk); return; } skb = skb_dequeue(&sk->sk_write_queue); sock_hold(sk); rc = nfc_data_exchange(dev, target_idx, skb, rawsock_data_exchange_complete, sk); if (rc) { rawsock_report_error(sk, rc); sock_put(sk); } } static int rawsock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct nfc_dev *dev = nfc_rawsock(sk)->dev; struct sk_buff *skb; int rc; pr_debug("sock=%p sk=%p len=%zu\n", sock, sk, len); if (msg->msg_namelen) return -EOPNOTSUPP; if (sock->state != SS_CONNECTED) return -ENOTCONN; skb = nfc_alloc_send_skb(dev, sk, msg->msg_flags, len, &rc); if (skb == NULL) return rc; rc = memcpy_from_msg(skb_put(skb, len), msg, len); if (rc < 0) { kfree_skb(skb); return rc; } spin_lock_bh(&sk->sk_write_queue.lock); __skb_queue_tail(&sk->sk_write_queue, skb); if (!nfc_rawsock(sk)->tx_work_scheduled) { schedule_work(&nfc_rawsock(sk)->tx_work); nfc_rawsock(sk)->tx_work_scheduled = true; } spin_unlock_bh(&sk->sk_write_queue.lock); return len; } static int rawsock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; struct sk_buff *skb; int copied; int rc; pr_debug("sock=%p sk=%p len=%zu flags=%d\n", sock, sk, len, flags); skb = skb_recv_datagram(sk, flags, noblock, &rc); if (!skb) return rc; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } rc = skb_copy_datagram_msg(skb, 0, msg, copied); skb_free_datagram(sk, skb); return rc ? : copied; } static const struct proto_ops rawsock_ops = { .family = PF_NFC, .owner = THIS_MODULE, .release = rawsock_release, .bind = sock_no_bind, .connect = rawsock_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, .poll = datagram_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .sendmsg = rawsock_sendmsg, .recvmsg = rawsock_recvmsg, .mmap = sock_no_mmap, }; static const struct proto_ops rawsock_raw_ops = { .family = PF_NFC, .owner = THIS_MODULE, .release = rawsock_release, .bind = sock_no_bind, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, .poll = datagram_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .sendmsg = sock_no_sendmsg, .recvmsg = rawsock_recvmsg, .mmap = sock_no_mmap, }; static void rawsock_destruct(struct sock *sk) { pr_debug("sk=%p\n", sk); if (sk->sk_state == TCP_ESTABLISHED) { nfc_deactivate_target(nfc_rawsock(sk)->dev, nfc_rawsock(sk)->target_idx, NFC_TARGET_MODE_IDLE); nfc_put_device(nfc_rawsock(sk)->dev); } skb_queue_purge(&sk->sk_receive_queue); if (!sock_flag(sk, SOCK_DEAD)) { pr_err("Freeing alive NFC raw socket %p\n", sk); return; } } static int rawsock_create(struct net *net, struct socket *sock, const struct nfc_protocol *nfc_proto, int kern) { struct sock *sk; pr_debug("sock=%p\n", sock); if ((sock->type != SOCK_SEQPACKET) && (sock->type != SOCK_RAW)) return -ESOCKTNOSUPPORT; if (sock->type == SOCK_RAW) { if (!capable(CAP_NET_RAW)) return -EPERM; sock->ops = &rawsock_raw_ops; } else { sock->ops = &rawsock_ops; } sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto, kern); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sk->sk_protocol = nfc_proto->id; sk->sk_destruct = rawsock_destruct; sock->state = SS_UNCONNECTED; if (sock->type == SOCK_RAW) nfc_sock_link(&raw_sk_list, sk); else { INIT_WORK(&nfc_rawsock(sk)->tx_work, rawsock_tx_work); nfc_rawsock(sk)->tx_work_scheduled = false; } return 0; } void nfc_send_to_raw_sock(struct nfc_dev *dev, struct sk_buff *skb, u8 payload_type, u8 direction) { struct sk_buff *skb_copy = NULL, *nskb; struct sock *sk; u8 *data; read_lock(&raw_sk_list.lock); sk_for_each(sk, &raw_sk_list.head) { if (!skb_copy) { skb_copy = __pskb_copy_fclone(skb, NFC_RAW_HEADER_SIZE, GFP_ATOMIC, true); if (!skb_copy) continue; data = skb_push(skb_copy, NFC_RAW_HEADER_SIZE); data[0] = dev ? dev->idx : 0xFF; data[1] = direction & 0x01; data[1] |= (payload_type << 1); } nskb = skb_clone(skb_copy, GFP_ATOMIC); if (!nskb) continue; if (sock_queue_rcv_skb(sk, nskb)) kfree_skb(nskb); } read_unlock(&raw_sk_list.lock); kfree_skb(skb_copy); } EXPORT_SYMBOL(nfc_send_to_raw_sock); static struct proto rawsock_proto = { .name = "NFC_RAW", .owner = THIS_MODULE, .obj_size = sizeof(struct nfc_rawsock), }; static const struct nfc_protocol rawsock_nfc_proto = { .id = NFC_SOCKPROTO_RAW, .proto = &rawsock_proto, .owner = THIS_MODULE, .create = rawsock_create }; int __init rawsock_init(void) { int rc; rc = nfc_proto_register(&rawsock_nfc_proto); return rc; } void rawsock_exit(void) { nfc_proto_unregister(&rawsock_nfc_proto); }
static int rawsock_create(struct net *net, struct socket *sock, const struct nfc_protocol *nfc_proto, int kern) { struct sock *sk; pr_debug("sock=%p\n", sock); if ((sock->type != SOCK_SEQPACKET) && (sock->type != SOCK_RAW)) return -ESOCKTNOSUPPORT; if (sock->type == SOCK_RAW) sock->ops = &rawsock_raw_ops; else sock->ops = &rawsock_ops; sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto, kern); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sk->sk_protocol = nfc_proto->id; sk->sk_destruct = rawsock_destruct; sock->state = SS_UNCONNECTED; if (sock->type == SOCK_RAW) nfc_sock_link(&raw_sk_list, sk); else { INIT_WORK(&nfc_rawsock(sk)->tx_work, rawsock_tx_work); nfc_rawsock(sk)->tx_work_scheduled = false; } return 0; }
static int rawsock_create(struct net *net, struct socket *sock, const struct nfc_protocol *nfc_proto, int kern) { struct sock *sk; pr_debug("sock=%p\n", sock); if ((sock->type != SOCK_SEQPACKET) && (sock->type != SOCK_RAW)) return -ESOCKTNOSUPPORT; if (sock->type == SOCK_RAW) { if (!capable(CAP_NET_RAW)) return -EPERM; sock->ops = &rawsock_raw_ops; } else { sock->ops = &rawsock_ops; } sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto, kern); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sk->sk_protocol = nfc_proto->id; sk->sk_destruct = rawsock_destruct; sock->state = SS_UNCONNECTED; if (sock->type == SOCK_RAW) nfc_sock_link(&raw_sk_list, sk); else { INIT_WORK(&nfc_rawsock(sk)->tx_work, rawsock_tx_work); nfc_rawsock(sk)->tx_work_scheduled = false; } return 0; }
{'added': [(331, '\tif (sock->type == SOCK_RAW) {'), (332, '\t\tif (!capable(CAP_NET_RAW))'), (333, '\t\t\treturn -EPERM;'), (335, '\t} else {'), (337, '\t}')], 'deleted': [(331, '\tif (sock->type == SOCK_RAW)'), (333, '\telse')]}
5
2
321
1,916
https://github.com/torvalds/linux
CVE-2020-26088
['CWE-276']
print-pim.c
pimv2_print
/* * Copyright (c) 1995, 1996 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* \summary: Protocol Independent Multicast (PIM) printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include "netdissect.h" #include "addrtoname.h" #include "extract.h" #include "ip.h" #include "ip6.h" #include "ipproto.h" #define PIMV1_TYPE_QUERY 0 #define PIMV1_TYPE_REGISTER 1 #define PIMV1_TYPE_REGISTER_STOP 2 #define PIMV1_TYPE_JOIN_PRUNE 3 #define PIMV1_TYPE_RP_REACHABILITY 4 #define PIMV1_TYPE_ASSERT 5 #define PIMV1_TYPE_GRAFT 6 #define PIMV1_TYPE_GRAFT_ACK 7 static const struct tok pimv1_type_str[] = { { PIMV1_TYPE_QUERY, "Query" }, { PIMV1_TYPE_REGISTER, "Register" }, { PIMV1_TYPE_REGISTER_STOP, "Register-Stop" }, { PIMV1_TYPE_JOIN_PRUNE, "Join/Prune" }, { PIMV1_TYPE_RP_REACHABILITY, "RP-reachable" }, { PIMV1_TYPE_ASSERT, "Assert" }, { PIMV1_TYPE_GRAFT, "Graft" }, { PIMV1_TYPE_GRAFT_ACK, "Graft-ACK" }, { 0, NULL } }; #define PIMV2_TYPE_HELLO 0 #define PIMV2_TYPE_REGISTER 1 #define PIMV2_TYPE_REGISTER_STOP 2 #define PIMV2_TYPE_JOIN_PRUNE 3 #define PIMV2_TYPE_BOOTSTRAP 4 #define PIMV2_TYPE_ASSERT 5 #define PIMV2_TYPE_GRAFT 6 #define PIMV2_TYPE_GRAFT_ACK 7 #define PIMV2_TYPE_CANDIDATE_RP 8 #define PIMV2_TYPE_PRUNE_REFRESH 9 #define PIMV2_TYPE_DF_ELECTION 10 #define PIMV2_TYPE_ECMP_REDIRECT 11 static const struct tok pimv2_type_values[] = { { PIMV2_TYPE_HELLO, "Hello" }, { PIMV2_TYPE_REGISTER, "Register" }, { PIMV2_TYPE_REGISTER_STOP, "Register Stop" }, { PIMV2_TYPE_JOIN_PRUNE, "Join / Prune" }, { PIMV2_TYPE_BOOTSTRAP, "Bootstrap" }, { PIMV2_TYPE_ASSERT, "Assert" }, { PIMV2_TYPE_GRAFT, "Graft" }, { PIMV2_TYPE_GRAFT_ACK, "Graft Acknowledgement" }, { PIMV2_TYPE_CANDIDATE_RP, "Candidate RP Advertisement" }, { PIMV2_TYPE_PRUNE_REFRESH, "Prune Refresh" }, { PIMV2_TYPE_DF_ELECTION, "DF Election" }, { PIMV2_TYPE_ECMP_REDIRECT, "ECMP Redirect" }, { 0, NULL} }; #define PIMV2_HELLO_OPTION_HOLDTIME 1 #define PIMV2_HELLO_OPTION_LANPRUNEDELAY 2 #define PIMV2_HELLO_OPTION_DR_PRIORITY_OLD 18 #define PIMV2_HELLO_OPTION_DR_PRIORITY 19 #define PIMV2_HELLO_OPTION_GENID 20 #define PIMV2_HELLO_OPTION_REFRESH_CAP 21 #define PIMV2_HELLO_OPTION_BIDIR_CAP 22 #define PIMV2_HELLO_OPTION_ADDRESS_LIST 24 #define PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD 65001 static const struct tok pimv2_hello_option_values[] = { { PIMV2_HELLO_OPTION_HOLDTIME, "Hold Time" }, { PIMV2_HELLO_OPTION_LANPRUNEDELAY, "LAN Prune Delay" }, { PIMV2_HELLO_OPTION_DR_PRIORITY_OLD, "DR Priority (Old)" }, { PIMV2_HELLO_OPTION_DR_PRIORITY, "DR Priority" }, { PIMV2_HELLO_OPTION_GENID, "Generation ID" }, { PIMV2_HELLO_OPTION_REFRESH_CAP, "State Refresh Capability" }, { PIMV2_HELLO_OPTION_BIDIR_CAP, "Bi-Directional Capability" }, { PIMV2_HELLO_OPTION_ADDRESS_LIST, "Address List" }, { PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD, "Address List (Old)" }, { 0, NULL} }; #define PIMV2_REGISTER_FLAG_LEN 4 #define PIMV2_REGISTER_FLAG_BORDER 0x80000000 #define PIMV2_REGISTER_FLAG_NULL 0x40000000 static const struct tok pimv2_register_flag_values[] = { { PIMV2_REGISTER_FLAG_BORDER, "Border" }, { PIMV2_REGISTER_FLAG_NULL, "Null" }, { 0, NULL} }; /* * XXX: We consider a case where IPv6 is not ready yet for portability, * but PIM dependent defintions should be independent of IPv6... */ struct pim { uint8_t pim_typever; /* upper 4bit: PIM version number; 2 for PIMv2 */ /* lower 4bit: the PIM message type, currently they are: * Hello, Register, Register-Stop, Join/Prune, * Bootstrap, Assert, Graft (PIM-DM only), * Graft-Ack (PIM-DM only), C-RP-Adv */ #define PIM_VER(x) (((x) & 0xf0) >> 4) #define PIM_TYPE(x) ((x) & 0x0f) u_char pim_rsv; /* Reserved */ u_short pim_cksum; /* IP style check sum */ }; static void pimv2_print(netdissect_options *, register const u_char *bp, register u_int len, const u_char *); static void pimv1_join_prune_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int ngroups, njoin, nprune; int njp; /* If it's a single group and a single source, use 1-line output. */ if (ND_TTEST2(bp[0], 30) && bp[11] == 1 && ((njoin = EXTRACT_16BITS(&bp[20])) + EXTRACT_16BITS(&bp[22])) == 1) { int hold; ND_PRINT((ndo, " RPF %s ", ipaddr_string(ndo, bp))); hold = EXTRACT_16BITS(&bp[6]); if (hold != 180) { ND_PRINT((ndo, "Hold ")); unsigned_relts_print(ndo, hold); } ND_PRINT((ndo, "%s (%s/%d, %s", njoin ? "Join" : "Prune", ipaddr_string(ndo, &bp[26]), bp[25] & 0x3f, ipaddr_string(ndo, &bp[12]))); if (EXTRACT_32BITS(&bp[16]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[16]))); ND_PRINT((ndo, ") %s%s %s", (bp[24] & 0x01) ? "Sparse" : "Dense", (bp[25] & 0x80) ? " WC" : "", (bp[25] & 0x40) ? "RP" : "SPT")); return; } ND_TCHECK2(bp[0], sizeof(struct in_addr)); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Upstream Nbr: %s", ipaddr_string(ndo, bp))); ND_TCHECK2(bp[6], 2); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Hold time: ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[6])); if (ndo->ndo_vflag < 2) return; bp += 8; len -= 8; ND_TCHECK2(bp[0], 4); ngroups = bp[3]; bp += 4; len -= 4; while (ngroups--) { /* * XXX - does the address have length "addrlen" and the * mask length "maddrlen"? */ ND_TCHECK2(bp[0], sizeof(struct in_addr)); ND_PRINT((ndo, "\n\tGroup: %s", ipaddr_string(ndo, bp))); ND_TCHECK2(bp[4], sizeof(struct in_addr)); if (EXTRACT_32BITS(&bp[4]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[4]))); ND_TCHECK2(bp[8], 4); njoin = EXTRACT_16BITS(&bp[8]); nprune = EXTRACT_16BITS(&bp[10]); ND_PRINT((ndo, " joined: %d pruned: %d", njoin, nprune)); bp += 12; len -= 12; for (njp = 0; njp < (njoin + nprune); njp++) { const char *type; if (njp < njoin) type = "Join "; else type = "Prune"; ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "\n\t%s %s%s%s%s/%d", type, (bp[0] & 0x01) ? "Sparse " : "Dense ", (bp[1] & 0x80) ? "WC " : "", (bp[1] & 0x40) ? "RP " : "SPT ", ipaddr_string(ndo, &bp[2]), bp[1] & 0x3f)); bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|pim]")); return; } void pimv1_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { register const u_char *ep; register u_char type; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; ND_TCHECK(bp[1]); type = bp[1]; ND_PRINT((ndo, " %s", tok2str(pimv1_type_str, "[type %u]", type))); switch (type) { case PIMV1_TYPE_QUERY: if (ND_TTEST(bp[8])) { switch (bp[8] >> 4) { case 0: ND_PRINT((ndo, " Dense-mode")); break; case 1: ND_PRINT((ndo, " Sparse-mode")); break; case 2: ND_PRINT((ndo, " Sparse-Dense-mode")); break; default: ND_PRINT((ndo, " mode-%d", bp[8] >> 4)); break; } } if (ndo->ndo_vflag) { ND_TCHECK2(bp[10],2); ND_PRINT((ndo, " (Hold-time ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[10])); ND_PRINT((ndo, ")")); } break; case PIMV1_TYPE_REGISTER: ND_TCHECK2(bp[8], 20); /* ip header */ ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[20]), ipaddr_string(ndo, &bp[24]))); break; case PIMV1_TYPE_REGISTER_STOP: ND_TCHECK2(bp[12], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[8]), ipaddr_string(ndo, &bp[12]))); break; case PIMV1_TYPE_RP_REACHABILITY: if (ndo->ndo_vflag) { ND_TCHECK2(bp[22], 2); ND_PRINT((ndo, " group %s", ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_PRINT((ndo, " RP %s hold ", ipaddr_string(ndo, &bp[16]))); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[22])); } break; case PIMV1_TYPE_ASSERT: ND_TCHECK2(bp[16], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[16]), ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_TCHECK2(bp[24], 4); ND_PRINT((ndo, " %s pref %d metric %d", (bp[20] & 0x80) ? "RP-tree" : "SPT", EXTRACT_32BITS(&bp[20]) & 0x7fffffff, EXTRACT_32BITS(&bp[24]))); break; case PIMV1_TYPE_JOIN_PRUNE: case PIMV1_TYPE_GRAFT: case PIMV1_TYPE_GRAFT_ACK: if (ndo->ndo_vflag) pimv1_join_prune_print(ndo, &bp[8], len - 8); break; } ND_TCHECK(bp[4]); if ((bp[4] >> 4) != 1) ND_PRINT((ndo, " [v%d]", bp[4] >> 4)); return; trunc: ND_PRINT((ndo, "[|pim]")); return; } /* * auto-RP is a cisco protocol, documented at * ftp://ftpeng.cisco.com/ipmulticast/specs/pim-autorp-spec01.txt * * This implements version 1+, dated Sept 9, 1998. */ void cisco_autorp_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int type; int numrps; int hold; ND_TCHECK(bp[0]); ND_PRINT((ndo, " auto-rp ")); type = bp[0]; switch (type) { case 0x11: ND_PRINT((ndo, "candidate-advert")); break; case 0x12: ND_PRINT((ndo, "mapping")); break; default: ND_PRINT((ndo, "type-0x%02x", type)); break; } ND_TCHECK(bp[1]); numrps = bp[1]; ND_TCHECK2(bp[2], 2); ND_PRINT((ndo, " Hold ")); hold = EXTRACT_16BITS(&bp[2]); if (hold) unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); else ND_PRINT((ndo, "FOREVER")); /* Next 4 bytes are reserved. */ bp += 8; len -= 8; /*XXX skip unless -v? */ /* * Rest of packet: * numrps entries of the form: * 32 bits: RP * 6 bits: reserved * 2 bits: PIM version supported, bit 0 is "supports v1", 1 is "v2". * 8 bits: # of entries for this RP * each entry: 7 bits: reserved, 1 bit: negative, * 8 bits: mask 32 bits: source * lather, rinse, repeat. */ while (numrps--) { int nentries; char s; ND_TCHECK2(bp[0], 4); ND_PRINT((ndo, " RP %s", ipaddr_string(ndo, bp))); ND_TCHECK(bp[4]); switch (bp[4] & 0x3) { case 0: ND_PRINT((ndo, " PIMv?")); break; case 1: ND_PRINT((ndo, " PIMv1")); break; case 2: ND_PRINT((ndo, " PIMv2")); break; case 3: ND_PRINT((ndo, " PIMv1+2")); break; } if (bp[4] & 0xfc) ND_PRINT((ndo, " [rsvd=0x%02x]", bp[4] & 0xfc)); ND_TCHECK(bp[5]); nentries = bp[5]; bp += 6; len -= 6; s = ' '; for (; nentries; nentries--) { ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "%c%s%s/%d", s, bp[0] & 1 ? "!" : "", ipaddr_string(ndo, &bp[2]), bp[1])); if (bp[0] & 0x02) { ND_PRINT((ndo, " bidir")); } if (bp[0] & 0xfc) { ND_PRINT((ndo, "[rsvd=0x%02x]", bp[0] & 0xfc)); } s = ','; bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|autorp]")); return; } void pim_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const u_char *ep; register const struct pim *pim = (const struct pim *)bp; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; #ifdef notyet /* currently we see only version and type */ ND_TCHECK(pim->pim_rsv); #endif switch (PIM_VER(pim->pim_typever)) { case 2: if (!ndo->ndo_vflag) { ND_PRINT((ndo, "PIMv%u, %s, length %u", PIM_VER(pim->pim_typever), tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)), len)); return; } else { ND_PRINT((ndo, "PIMv%u, length %u\n\t%s", PIM_VER(pim->pim_typever), len, tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)))); pimv2_print(ndo, bp, len, bp2); } break; default: ND_PRINT((ndo, "PIMv%u, length %u", PIM_VER(pim->pim_typever), len)); break; } return; } /* * PIMv2 uses encoded address representations. * * The last PIM-SM I-D before RFC2117 was published specified the * following representation for unicast addresses. However, RFC2117 * specified no encoding for unicast addresses with the unicast * address length specified in the header. Therefore, we have to * guess which encoding is being used (Cisco's PIMv2 implementation * uses the non-RFC encoding). RFC2117 turns a previously "Reserved" * field into a 'unicast-address-length-in-bytes' field. We guess * that it's the draft encoding if this reserved field is zero. * * RFC2362 goes back to the encoded format, and calls the addr length * field "reserved" again. * * The first byte is the address family, from: * * 0 Reserved * 1 IP (IP version 4) * 2 IP6 (IP version 6) * 3 NSAP * 4 HDLC (8-bit multidrop) * 5 BBN 1822 * 6 802 (includes all 802 media plus Ethernet "canonical format") * 7 E.163 * 8 E.164 (SMDS, Frame Relay, ATM) * 9 F.69 (Telex) * 10 X.121 (X.25, Frame Relay) * 11 IPX * 12 Appletalk * 13 Decnet IV * 14 Banyan Vines * 15 E.164 with NSAP format subaddress * * In addition, the second byte is an "Encoding". 0 is the default * encoding for the address family, and no other encodings are currently * specified. * */ static int pimv2_addr_len; enum pimv2_addrtype { pimv2_unicast, pimv2_group, pimv2_source }; /* 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Unicast Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+++++++ * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Reserved | Mask Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Group multicast Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Rsrvd |S|W|R| Mask Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Source Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ static int pimv2_addr_print(netdissect_options *ndo, const u_char *bp, enum pimv2_addrtype at, int silent) { int af; int len, hdrlen; ND_TCHECK(bp[0]); if (pimv2_addr_len == 0) { ND_TCHECK(bp[1]); switch (bp[0]) { case 1: af = AF_INET; len = sizeof(struct in_addr); break; case 2: af = AF_INET6; len = sizeof(struct in6_addr); break; default: return -1; } if (bp[1] != 0) return -1; hdrlen = 2; } else { switch (pimv2_addr_len) { case sizeof(struct in_addr): af = AF_INET; break; case sizeof(struct in6_addr): af = AF_INET6; break; default: return -1; break; } len = pimv2_addr_len; hdrlen = 0; } bp += hdrlen; switch (at) { case pimv2_unicast: ND_TCHECK2(bp[0], len); if (af == AF_INET) { if (!silent) ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp))); } else if (af == AF_INET6) { if (!silent) ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp))); } return hdrlen + len; case pimv2_group: case pimv2_source: ND_TCHECK2(bp[0], len + 2); if (af == AF_INET) { if (!silent) { ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp + 2))); if (bp[1] != 32) ND_PRINT((ndo, "/%u", bp[1])); } } else if (af == AF_INET6) { if (!silent) { ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp + 2))); if (bp[1] != 128) ND_PRINT((ndo, "/%u", bp[1])); } } if (bp[0] && !silent) { if (at == pimv2_group) { ND_PRINT((ndo, "(0x%02x)", bp[0])); } else { ND_PRINT((ndo, "(%s%s%s", bp[0] & 0x04 ? "S" : "", bp[0] & 0x02 ? "W" : "", bp[0] & 0x01 ? "R" : "")); if (bp[0] & 0xf8) { ND_PRINT((ndo, "+0x%02x", bp[0] & 0xf8)); } ND_PRINT((ndo, ")")); } } return hdrlen + 2 + len; default: return -1; } trunc: return -1; } enum checksum_status { CORRECT, INCORRECT, UNVERIFIED }; static enum checksum_status pimv2_check_checksum(netdissect_options *ndo, const u_char *bp, const u_char *bp2, u_int len) { const struct ip *ip; u_int cksum; if (!ND_TTEST2(bp[0], len)) { /* We don't have all the data. */ return (UNVERIFIED); } ip = (const struct ip *)bp2; if (IP_V(ip) == 4) { struct cksum_vec vec[1]; vec[0].ptr = bp; vec[0].len = len; cksum = in_cksum(vec, 1); return (cksum ? INCORRECT : CORRECT); } else if (IP_V(ip) == 6) { const struct ip6_hdr *ip6; ip6 = (const struct ip6_hdr *)bp2; cksum = nextproto6_cksum(ndo, ip6, bp, len, len, IPPROTO_PIM); return (cksum ? INCORRECT : CORRECT); } else { return (UNVERIFIED); } } static void pimv2_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const u_char *ep; register const struct pim *pim = (const struct pim *)bp; int advance; enum checksum_status cksum_status; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; if (ep > bp + len) ep = bp + len; ND_TCHECK(pim->pim_rsv); pimv2_addr_len = pim->pim_rsv; if (pimv2_addr_len != 0) ND_PRINT((ndo, ", RFC2117-encoding")); ND_PRINT((ndo, ", cksum 0x%04x ", EXTRACT_16BITS(&pim->pim_cksum))); if (EXTRACT_16BITS(&pim->pim_cksum) == 0) { ND_PRINT((ndo, "(unverified)")); } else { if (PIM_TYPE(pim->pim_typever) == PIMV2_TYPE_REGISTER) { /* * The checksum only covers the packet header, * not the encapsulated packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, 8); if (cksum_status == INCORRECT) { /* * To quote RFC 4601, "For interoperability * reasons, a message carrying a checksum * calculated over the entire PIM Register * message should also be accepted." */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } } else { /* * The checksum covers the entire packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } switch (cksum_status) { case CORRECT: ND_PRINT((ndo, "(correct)")); break; case INCORRECT: ND_PRINT((ndo, "(incorrect)")); break; case UNVERIFIED: ND_PRINT((ndo, "(unverified)")); break; } } switch (PIM_TYPE(pim->pim_typever)) { case PIMV2_TYPE_HELLO: { uint16_t otype, olen; bp += 4; while (bp < ep) { ND_TCHECK2(bp[0], 4); otype = EXTRACT_16BITS(&bp[0]); olen = EXTRACT_16BITS(&bp[2]); ND_TCHECK2(bp[0], 4 + olen); ND_PRINT((ndo, "\n\t %s Option (%u), length %u, Value: ", tok2str(pimv2_hello_option_values, "Unknown", otype), otype, olen)); bp += 4; switch (otype) { case PIMV2_HELLO_OPTION_HOLDTIME: unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); break; case PIMV2_HELLO_OPTION_LANPRUNEDELAY: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { char t_bit; uint16_t lan_delay, override_interval; lan_delay = EXTRACT_16BITS(bp); override_interval = EXTRACT_16BITS(bp+2); t_bit = (lan_delay & 0x8000)? 1 : 0; lan_delay &= ~0x8000; ND_PRINT((ndo, "\n\t T-bit=%d, LAN delay %dms, Override interval %dms", t_bit, lan_delay, override_interval)); } break; case PIMV2_HELLO_OPTION_DR_PRIORITY_OLD: case PIMV2_HELLO_OPTION_DR_PRIORITY: switch (olen) { case 0: ND_PRINT((ndo, "Bi-Directional Capability (Old)")); break; case 4: ND_PRINT((ndo, "%u", EXTRACT_32BITS(bp))); break; default: ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); break; } break; case PIMV2_HELLO_OPTION_GENID: ND_PRINT((ndo, "0x%08x", EXTRACT_32BITS(bp))); break; case PIMV2_HELLO_OPTION_REFRESH_CAP: ND_PRINT((ndo, "v%d", *bp)); if (*(bp+1) != 0) { ND_PRINT((ndo, ", interval ")); unsigned_relts_print(ndo, *(bp+1)); } if (EXTRACT_16BITS(bp+2) != 0) { ND_PRINT((ndo, " ?0x%04x?", EXTRACT_16BITS(bp+2))); } break; case PIMV2_HELLO_OPTION_BIDIR_CAP: break; case PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD: case PIMV2_HELLO_OPTION_ADDRESS_LIST: if (ndo->ndo_vflag > 1) { const u_char *ptr = bp; while (ptr < (bp+olen)) { ND_PRINT((ndo, "\n\t ")); advance = pimv2_addr_print(ndo, ptr, pimv2_unicast, 0); if (advance < 0) { ND_PRINT((ndo, "...")); break; } ptr += advance; } } break; default: if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, bp, "\n\t ", olen); break; } /* do we want to see an additionally hexdump ? */ if (ndo->ndo_vflag> 1) print_unknown_data(ndo, bp, "\n\t ", olen); bp += olen; } break; } case PIMV2_TYPE_REGISTER: { const struct ip *ip; ND_TCHECK2(*(bp + 4), PIMV2_REGISTER_FLAG_LEN); ND_PRINT((ndo, ", Flags [ %s ]\n\t", tok2str(pimv2_register_flag_values, "none", EXTRACT_32BITS(bp+4)))); bp += 8; len -= 8; /* encapsulated multicast packet */ ip = (const struct ip *)bp; switch (IP_V(ip)) { case 0: /* Null header */ ND_PRINT((ndo, "IP-Null-header %s > %s", ipaddr_string(ndo, &ip->ip_src), ipaddr_string(ndo, &ip->ip_dst))); break; case 4: /* IPv4 */ ip_print(ndo, bp, len); break; case 6: /* IPv6 */ ip6_print(ndo, bp, len); break; default: ND_PRINT((ndo, "IP ver %d", IP_V(ip))); break; } break; } case PIMV2_TYPE_REGISTER_STOP: bp += 4; len -= 4; if (bp >= ep) break; ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp >= ep) break; ND_PRINT((ndo, " source=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; break; case PIMV2_TYPE_JOIN_PRUNE: case PIMV2_TYPE_GRAFT: case PIMV2_TYPE_GRAFT_ACK: /* * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |PIM Ver| Type | Addr length | Checksum | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Unicast-Upstream Neighbor Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Reserved | Num groups | Holdtime | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Number of Joined Sources | Number of Pruned Sources | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ { uint8_t ngroup; uint16_t holdtime; uint16_t njoin; uint16_t nprune; int i, j; bp += 4; len -= 4; if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ if (bp >= ep) break; ND_PRINT((ndo, ", upstream-neighbor: ")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; } if (bp + 4 > ep) break; ngroup = bp[1]; holdtime = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, "\n\t %u group(s)", ngroup)); if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ ND_PRINT((ndo, ", holdtime: ")); if (holdtime == 0xffff) ND_PRINT((ndo, "infinite")); else unsigned_relts_print(ndo, holdtime); } bp += 4; len -= 4; for (i = 0; i < ngroup; i++) { if (bp >= ep) goto jp_done; ND_PRINT((ndo, "\n\t group #%u: ", i+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; if (bp + 4 > ep) { ND_PRINT((ndo, "...)")); goto jp_done; } njoin = EXTRACT_16BITS(&bp[0]); nprune = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, ", joined sources: %u, pruned sources: %u", njoin, nprune)); bp += 4; len -= 4; for (j = 0; j < njoin; j++) { ND_PRINT((ndo, "\n\t joined source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; } for (j = 0; j < nprune; j++) { ND_PRINT((ndo, "\n\t pruned source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; } } jp_done: break; } case PIMV2_TYPE_BOOTSTRAP: { int i, j, frpcnt; bp += 4; /* Fragment Tag, Hash Mask len, and BSR-priority */ if (bp + sizeof(uint16_t) >= ep) break; ND_PRINT((ndo, " tag=%x", EXTRACT_16BITS(bp))); bp += sizeof(uint16_t); if (bp >= ep) break; ND_PRINT((ndo, " hashmlen=%d", bp[0])); if (bp + 1 >= ep) break; ND_PRINT((ndo, " BSRprio=%d", bp[1])); bp += 2; /* Encoded-Unicast-BSR-Address */ if (bp >= ep) break; ND_PRINT((ndo, " BSR=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; for (i = 0; bp < ep; i++) { /* Encoded-Group Address */ ND_PRINT((ndo, " (group%d: ", i)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...)")); goto bs_done; } bp += advance; /* RP-Count, Frag RP-Cnt, and rsvd */ if (bp >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, " RPcnt=%d", bp[0])); if (bp + 1 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, " FRPcnt=%d", frpcnt = bp[1])); bp += 4; for (j = 0; j < frpcnt && bp < ep; j++) { /* each RP info */ ND_PRINT((ndo, " RP%d=", j)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...)")); goto bs_done; } bp += advance; if (bp + 1 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, ",holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); if (bp + 2 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, ",prio=%d", bp[2])); bp += 4; } ND_PRINT((ndo, ")")); } bs_done: break; } case PIMV2_TYPE_ASSERT: bp += 4; len -= 4; if (bp >= ep) break; ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp >= ep) break; ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp + 8 > ep) break; if (bp[0] & 0x80) ND_PRINT((ndo, " RPT")); ND_PRINT((ndo, " pref=%u", EXTRACT_32BITS(&bp[0]) & 0x7fffffff)); ND_PRINT((ndo, " metric=%u", EXTRACT_32BITS(&bp[4]))); break; case PIMV2_TYPE_CANDIDATE_RP: { int i, pfxcnt; bp += 4; /* Prefix-Cnt, Priority, and Holdtime */ if (bp >= ep) break; ND_PRINT((ndo, " prefix-cnt=%d", bp[0])); pfxcnt = bp[0]; if (bp + 1 >= ep) break; ND_PRINT((ndo, " prio=%d", bp[1])); if (bp + 3 >= ep) break; ND_PRINT((ndo, " holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); bp += 4; /* Encoded-Unicast-RP-Address */ if (bp >= ep) break; ND_PRINT((ndo, " RP=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; /* Encoded-Group Addresses */ for (i = 0; i < pfxcnt && bp < ep; i++) { ND_PRINT((ndo, " Group%d=", i)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; } break; } case PIMV2_TYPE_PRUNE_REFRESH: ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_PRINT((ndo, " grp=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_PRINT((ndo, " forwarder=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_TCHECK2(bp[0], 2); ND_PRINT((ndo, " TUNR ")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); break; default: ND_PRINT((ndo, " [type %d]", PIM_TYPE(pim->pim_typever))); break; } return; trunc: ND_PRINT((ndo, "[|pim]")); } /* * Local Variables: * c-style: whitesmith * c-basic-offset: 8 * End: */
/* * Copyright (c) 1995, 1996 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* \summary: Protocol Independent Multicast (PIM) printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include "netdissect.h" #include "addrtoname.h" #include "extract.h" #include "ip.h" #include "ip6.h" #include "ipproto.h" #define PIMV1_TYPE_QUERY 0 #define PIMV1_TYPE_REGISTER 1 #define PIMV1_TYPE_REGISTER_STOP 2 #define PIMV1_TYPE_JOIN_PRUNE 3 #define PIMV1_TYPE_RP_REACHABILITY 4 #define PIMV1_TYPE_ASSERT 5 #define PIMV1_TYPE_GRAFT 6 #define PIMV1_TYPE_GRAFT_ACK 7 static const struct tok pimv1_type_str[] = { { PIMV1_TYPE_QUERY, "Query" }, { PIMV1_TYPE_REGISTER, "Register" }, { PIMV1_TYPE_REGISTER_STOP, "Register-Stop" }, { PIMV1_TYPE_JOIN_PRUNE, "Join/Prune" }, { PIMV1_TYPE_RP_REACHABILITY, "RP-reachable" }, { PIMV1_TYPE_ASSERT, "Assert" }, { PIMV1_TYPE_GRAFT, "Graft" }, { PIMV1_TYPE_GRAFT_ACK, "Graft-ACK" }, { 0, NULL } }; #define PIMV2_TYPE_HELLO 0 #define PIMV2_TYPE_REGISTER 1 #define PIMV2_TYPE_REGISTER_STOP 2 #define PIMV2_TYPE_JOIN_PRUNE 3 #define PIMV2_TYPE_BOOTSTRAP 4 #define PIMV2_TYPE_ASSERT 5 #define PIMV2_TYPE_GRAFT 6 #define PIMV2_TYPE_GRAFT_ACK 7 #define PIMV2_TYPE_CANDIDATE_RP 8 #define PIMV2_TYPE_PRUNE_REFRESH 9 #define PIMV2_TYPE_DF_ELECTION 10 #define PIMV2_TYPE_ECMP_REDIRECT 11 static const struct tok pimv2_type_values[] = { { PIMV2_TYPE_HELLO, "Hello" }, { PIMV2_TYPE_REGISTER, "Register" }, { PIMV2_TYPE_REGISTER_STOP, "Register Stop" }, { PIMV2_TYPE_JOIN_PRUNE, "Join / Prune" }, { PIMV2_TYPE_BOOTSTRAP, "Bootstrap" }, { PIMV2_TYPE_ASSERT, "Assert" }, { PIMV2_TYPE_GRAFT, "Graft" }, { PIMV2_TYPE_GRAFT_ACK, "Graft Acknowledgement" }, { PIMV2_TYPE_CANDIDATE_RP, "Candidate RP Advertisement" }, { PIMV2_TYPE_PRUNE_REFRESH, "Prune Refresh" }, { PIMV2_TYPE_DF_ELECTION, "DF Election" }, { PIMV2_TYPE_ECMP_REDIRECT, "ECMP Redirect" }, { 0, NULL} }; #define PIMV2_HELLO_OPTION_HOLDTIME 1 #define PIMV2_HELLO_OPTION_LANPRUNEDELAY 2 #define PIMV2_HELLO_OPTION_DR_PRIORITY_OLD 18 #define PIMV2_HELLO_OPTION_DR_PRIORITY 19 #define PIMV2_HELLO_OPTION_GENID 20 #define PIMV2_HELLO_OPTION_REFRESH_CAP 21 #define PIMV2_HELLO_OPTION_BIDIR_CAP 22 #define PIMV2_HELLO_OPTION_ADDRESS_LIST 24 #define PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD 65001 static const struct tok pimv2_hello_option_values[] = { { PIMV2_HELLO_OPTION_HOLDTIME, "Hold Time" }, { PIMV2_HELLO_OPTION_LANPRUNEDELAY, "LAN Prune Delay" }, { PIMV2_HELLO_OPTION_DR_PRIORITY_OLD, "DR Priority (Old)" }, { PIMV2_HELLO_OPTION_DR_PRIORITY, "DR Priority" }, { PIMV2_HELLO_OPTION_GENID, "Generation ID" }, { PIMV2_HELLO_OPTION_REFRESH_CAP, "State Refresh Capability" }, { PIMV2_HELLO_OPTION_BIDIR_CAP, "Bi-Directional Capability" }, { PIMV2_HELLO_OPTION_ADDRESS_LIST, "Address List" }, { PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD, "Address List (Old)" }, { 0, NULL} }; #define PIMV2_REGISTER_FLAG_LEN 4 #define PIMV2_REGISTER_FLAG_BORDER 0x80000000 #define PIMV2_REGISTER_FLAG_NULL 0x40000000 static const struct tok pimv2_register_flag_values[] = { { PIMV2_REGISTER_FLAG_BORDER, "Border" }, { PIMV2_REGISTER_FLAG_NULL, "Null" }, { 0, NULL} }; /* * XXX: We consider a case where IPv6 is not ready yet for portability, * but PIM dependent defintions should be independent of IPv6... */ struct pim { uint8_t pim_typever; /* upper 4bit: PIM version number; 2 for PIMv2 */ /* lower 4bit: the PIM message type, currently they are: * Hello, Register, Register-Stop, Join/Prune, * Bootstrap, Assert, Graft (PIM-DM only), * Graft-Ack (PIM-DM only), C-RP-Adv */ #define PIM_VER(x) (((x) & 0xf0) >> 4) #define PIM_TYPE(x) ((x) & 0x0f) u_char pim_rsv; /* Reserved */ u_short pim_cksum; /* IP style check sum */ }; static void pimv2_print(netdissect_options *, register const u_char *bp, register u_int len, const u_char *); static void pimv1_join_prune_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int ngroups, njoin, nprune; int njp; /* If it's a single group and a single source, use 1-line output. */ if (ND_TTEST2(bp[0], 30) && bp[11] == 1 && ((njoin = EXTRACT_16BITS(&bp[20])) + EXTRACT_16BITS(&bp[22])) == 1) { int hold; ND_PRINT((ndo, " RPF %s ", ipaddr_string(ndo, bp))); hold = EXTRACT_16BITS(&bp[6]); if (hold != 180) { ND_PRINT((ndo, "Hold ")); unsigned_relts_print(ndo, hold); } ND_PRINT((ndo, "%s (%s/%d, %s", njoin ? "Join" : "Prune", ipaddr_string(ndo, &bp[26]), bp[25] & 0x3f, ipaddr_string(ndo, &bp[12]))); if (EXTRACT_32BITS(&bp[16]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[16]))); ND_PRINT((ndo, ") %s%s %s", (bp[24] & 0x01) ? "Sparse" : "Dense", (bp[25] & 0x80) ? " WC" : "", (bp[25] & 0x40) ? "RP" : "SPT")); return; } ND_TCHECK2(bp[0], sizeof(struct in_addr)); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Upstream Nbr: %s", ipaddr_string(ndo, bp))); ND_TCHECK2(bp[6], 2); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Hold time: ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[6])); if (ndo->ndo_vflag < 2) return; bp += 8; len -= 8; ND_TCHECK2(bp[0], 4); ngroups = bp[3]; bp += 4; len -= 4; while (ngroups--) { /* * XXX - does the address have length "addrlen" and the * mask length "maddrlen"? */ ND_TCHECK2(bp[0], sizeof(struct in_addr)); ND_PRINT((ndo, "\n\tGroup: %s", ipaddr_string(ndo, bp))); ND_TCHECK2(bp[4], sizeof(struct in_addr)); if (EXTRACT_32BITS(&bp[4]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[4]))); ND_TCHECK2(bp[8], 4); njoin = EXTRACT_16BITS(&bp[8]); nprune = EXTRACT_16BITS(&bp[10]); ND_PRINT((ndo, " joined: %d pruned: %d", njoin, nprune)); bp += 12; len -= 12; for (njp = 0; njp < (njoin + nprune); njp++) { const char *type; if (njp < njoin) type = "Join "; else type = "Prune"; ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "\n\t%s %s%s%s%s/%d", type, (bp[0] & 0x01) ? "Sparse " : "Dense ", (bp[1] & 0x80) ? "WC " : "", (bp[1] & 0x40) ? "RP " : "SPT ", ipaddr_string(ndo, &bp[2]), bp[1] & 0x3f)); bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|pim]")); return; } void pimv1_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { register const u_char *ep; register u_char type; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; ND_TCHECK(bp[1]); type = bp[1]; ND_PRINT((ndo, " %s", tok2str(pimv1_type_str, "[type %u]", type))); switch (type) { case PIMV1_TYPE_QUERY: if (ND_TTEST(bp[8])) { switch (bp[8] >> 4) { case 0: ND_PRINT((ndo, " Dense-mode")); break; case 1: ND_PRINT((ndo, " Sparse-mode")); break; case 2: ND_PRINT((ndo, " Sparse-Dense-mode")); break; default: ND_PRINT((ndo, " mode-%d", bp[8] >> 4)); break; } } if (ndo->ndo_vflag) { ND_TCHECK2(bp[10],2); ND_PRINT((ndo, " (Hold-time ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[10])); ND_PRINT((ndo, ")")); } break; case PIMV1_TYPE_REGISTER: ND_TCHECK2(bp[8], 20); /* ip header */ ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[20]), ipaddr_string(ndo, &bp[24]))); break; case PIMV1_TYPE_REGISTER_STOP: ND_TCHECK2(bp[12], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[8]), ipaddr_string(ndo, &bp[12]))); break; case PIMV1_TYPE_RP_REACHABILITY: if (ndo->ndo_vflag) { ND_TCHECK2(bp[22], 2); ND_PRINT((ndo, " group %s", ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_PRINT((ndo, " RP %s hold ", ipaddr_string(ndo, &bp[16]))); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[22])); } break; case PIMV1_TYPE_ASSERT: ND_TCHECK2(bp[16], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[16]), ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_TCHECK2(bp[24], 4); ND_PRINT((ndo, " %s pref %d metric %d", (bp[20] & 0x80) ? "RP-tree" : "SPT", EXTRACT_32BITS(&bp[20]) & 0x7fffffff, EXTRACT_32BITS(&bp[24]))); break; case PIMV1_TYPE_JOIN_PRUNE: case PIMV1_TYPE_GRAFT: case PIMV1_TYPE_GRAFT_ACK: if (ndo->ndo_vflag) pimv1_join_prune_print(ndo, &bp[8], len - 8); break; } ND_TCHECK(bp[4]); if ((bp[4] >> 4) != 1) ND_PRINT((ndo, " [v%d]", bp[4] >> 4)); return; trunc: ND_PRINT((ndo, "[|pim]")); return; } /* * auto-RP is a cisco protocol, documented at * ftp://ftpeng.cisco.com/ipmulticast/specs/pim-autorp-spec01.txt * * This implements version 1+, dated Sept 9, 1998. */ void cisco_autorp_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int type; int numrps; int hold; ND_TCHECK(bp[0]); ND_PRINT((ndo, " auto-rp ")); type = bp[0]; switch (type) { case 0x11: ND_PRINT((ndo, "candidate-advert")); break; case 0x12: ND_PRINT((ndo, "mapping")); break; default: ND_PRINT((ndo, "type-0x%02x", type)); break; } ND_TCHECK(bp[1]); numrps = bp[1]; ND_TCHECK2(bp[2], 2); ND_PRINT((ndo, " Hold ")); hold = EXTRACT_16BITS(&bp[2]); if (hold) unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); else ND_PRINT((ndo, "FOREVER")); /* Next 4 bytes are reserved. */ bp += 8; len -= 8; /*XXX skip unless -v? */ /* * Rest of packet: * numrps entries of the form: * 32 bits: RP * 6 bits: reserved * 2 bits: PIM version supported, bit 0 is "supports v1", 1 is "v2". * 8 bits: # of entries for this RP * each entry: 7 bits: reserved, 1 bit: negative, * 8 bits: mask 32 bits: source * lather, rinse, repeat. */ while (numrps--) { int nentries; char s; ND_TCHECK2(bp[0], 4); ND_PRINT((ndo, " RP %s", ipaddr_string(ndo, bp))); ND_TCHECK(bp[4]); switch (bp[4] & 0x3) { case 0: ND_PRINT((ndo, " PIMv?")); break; case 1: ND_PRINT((ndo, " PIMv1")); break; case 2: ND_PRINT((ndo, " PIMv2")); break; case 3: ND_PRINT((ndo, " PIMv1+2")); break; } if (bp[4] & 0xfc) ND_PRINT((ndo, " [rsvd=0x%02x]", bp[4] & 0xfc)); ND_TCHECK(bp[5]); nentries = bp[5]; bp += 6; len -= 6; s = ' '; for (; nentries; nentries--) { ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "%c%s%s/%d", s, bp[0] & 1 ? "!" : "", ipaddr_string(ndo, &bp[2]), bp[1])); if (bp[0] & 0x02) { ND_PRINT((ndo, " bidir")); } if (bp[0] & 0xfc) { ND_PRINT((ndo, "[rsvd=0x%02x]", bp[0] & 0xfc)); } s = ','; bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|autorp]")); return; } void pim_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const u_char *ep; register const struct pim *pim = (const struct pim *)bp; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; #ifdef notyet /* currently we see only version and type */ ND_TCHECK(pim->pim_rsv); #endif switch (PIM_VER(pim->pim_typever)) { case 2: if (!ndo->ndo_vflag) { ND_PRINT((ndo, "PIMv%u, %s, length %u", PIM_VER(pim->pim_typever), tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)), len)); return; } else { ND_PRINT((ndo, "PIMv%u, length %u\n\t%s", PIM_VER(pim->pim_typever), len, tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)))); pimv2_print(ndo, bp, len, bp2); } break; default: ND_PRINT((ndo, "PIMv%u, length %u", PIM_VER(pim->pim_typever), len)); break; } return; } /* * PIMv2 uses encoded address representations. * * The last PIM-SM I-D before RFC2117 was published specified the * following representation for unicast addresses. However, RFC2117 * specified no encoding for unicast addresses with the unicast * address length specified in the header. Therefore, we have to * guess which encoding is being used (Cisco's PIMv2 implementation * uses the non-RFC encoding). RFC2117 turns a previously "Reserved" * field into a 'unicast-address-length-in-bytes' field. We guess * that it's the draft encoding if this reserved field is zero. * * RFC2362 goes back to the encoded format, and calls the addr length * field "reserved" again. * * The first byte is the address family, from: * * 0 Reserved * 1 IP (IP version 4) * 2 IP6 (IP version 6) * 3 NSAP * 4 HDLC (8-bit multidrop) * 5 BBN 1822 * 6 802 (includes all 802 media plus Ethernet "canonical format") * 7 E.163 * 8 E.164 (SMDS, Frame Relay, ATM) * 9 F.69 (Telex) * 10 X.121 (X.25, Frame Relay) * 11 IPX * 12 Appletalk * 13 Decnet IV * 14 Banyan Vines * 15 E.164 with NSAP format subaddress * * In addition, the second byte is an "Encoding". 0 is the default * encoding for the address family, and no other encodings are currently * specified. * */ static int pimv2_addr_len; enum pimv2_addrtype { pimv2_unicast, pimv2_group, pimv2_source }; /* 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Unicast Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+++++++ * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Reserved | Mask Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Group multicast Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Rsrvd |S|W|R| Mask Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Source Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ static int pimv2_addr_print(netdissect_options *ndo, const u_char *bp, enum pimv2_addrtype at, int silent) { int af; int len, hdrlen; ND_TCHECK(bp[0]); if (pimv2_addr_len == 0) { ND_TCHECK(bp[1]); switch (bp[0]) { case 1: af = AF_INET; len = sizeof(struct in_addr); break; case 2: af = AF_INET6; len = sizeof(struct in6_addr); break; default: return -1; } if (bp[1] != 0) return -1; hdrlen = 2; } else { switch (pimv2_addr_len) { case sizeof(struct in_addr): af = AF_INET; break; case sizeof(struct in6_addr): af = AF_INET6; break; default: return -1; break; } len = pimv2_addr_len; hdrlen = 0; } bp += hdrlen; switch (at) { case pimv2_unicast: ND_TCHECK2(bp[0], len); if (af == AF_INET) { if (!silent) ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp))); } else if (af == AF_INET6) { if (!silent) ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp))); } return hdrlen + len; case pimv2_group: case pimv2_source: ND_TCHECK2(bp[0], len + 2); if (af == AF_INET) { if (!silent) { ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp + 2))); if (bp[1] != 32) ND_PRINT((ndo, "/%u", bp[1])); } } else if (af == AF_INET6) { if (!silent) { ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp + 2))); if (bp[1] != 128) ND_PRINT((ndo, "/%u", bp[1])); } } if (bp[0] && !silent) { if (at == pimv2_group) { ND_PRINT((ndo, "(0x%02x)", bp[0])); } else { ND_PRINT((ndo, "(%s%s%s", bp[0] & 0x04 ? "S" : "", bp[0] & 0x02 ? "W" : "", bp[0] & 0x01 ? "R" : "")); if (bp[0] & 0xf8) { ND_PRINT((ndo, "+0x%02x", bp[0] & 0xf8)); } ND_PRINT((ndo, ")")); } } return hdrlen + 2 + len; default: return -1; } trunc: return -1; } enum checksum_status { CORRECT, INCORRECT, UNVERIFIED }; static enum checksum_status pimv2_check_checksum(netdissect_options *ndo, const u_char *bp, const u_char *bp2, u_int len) { const struct ip *ip; u_int cksum; if (!ND_TTEST2(bp[0], len)) { /* We don't have all the data. */ return (UNVERIFIED); } ip = (const struct ip *)bp2; if (IP_V(ip) == 4) { struct cksum_vec vec[1]; vec[0].ptr = bp; vec[0].len = len; cksum = in_cksum(vec, 1); return (cksum ? INCORRECT : CORRECT); } else if (IP_V(ip) == 6) { const struct ip6_hdr *ip6; ip6 = (const struct ip6_hdr *)bp2; cksum = nextproto6_cksum(ndo, ip6, bp, len, len, IPPROTO_PIM); return (cksum ? INCORRECT : CORRECT); } else { return (UNVERIFIED); } } static void pimv2_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const u_char *ep; register const struct pim *pim = (const struct pim *)bp; int advance; enum checksum_status cksum_status; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; if (ep > bp + len) ep = bp + len; ND_TCHECK(pim->pim_rsv); pimv2_addr_len = pim->pim_rsv; if (pimv2_addr_len != 0) ND_PRINT((ndo, ", RFC2117-encoding")); ND_PRINT((ndo, ", cksum 0x%04x ", EXTRACT_16BITS(&pim->pim_cksum))); if (EXTRACT_16BITS(&pim->pim_cksum) == 0) { ND_PRINT((ndo, "(unverified)")); } else { if (PIM_TYPE(pim->pim_typever) == PIMV2_TYPE_REGISTER) { /* * The checksum only covers the packet header, * not the encapsulated packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, 8); if (cksum_status == INCORRECT) { /* * To quote RFC 4601, "For interoperability * reasons, a message carrying a checksum * calculated over the entire PIM Register * message should also be accepted." */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } } else { /* * The checksum covers the entire packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } switch (cksum_status) { case CORRECT: ND_PRINT((ndo, "(correct)")); break; case INCORRECT: ND_PRINT((ndo, "(incorrect)")); break; case UNVERIFIED: ND_PRINT((ndo, "(unverified)")); break; } } switch (PIM_TYPE(pim->pim_typever)) { case PIMV2_TYPE_HELLO: { uint16_t otype, olen; bp += 4; while (bp < ep) { ND_TCHECK2(bp[0], 4); otype = EXTRACT_16BITS(&bp[0]); olen = EXTRACT_16BITS(&bp[2]); ND_TCHECK2(bp[0], 4 + olen); ND_PRINT((ndo, "\n\t %s Option (%u), length %u, Value: ", tok2str(pimv2_hello_option_values, "Unknown", otype), otype, olen)); bp += 4; switch (otype) { case PIMV2_HELLO_OPTION_HOLDTIME: if (olen != 2) { ND_PRINT((ndo, "ERROR: Option Length != 2 Bytes (%u)", olen)); } else { unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); } break; case PIMV2_HELLO_OPTION_LANPRUNEDELAY: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { char t_bit; uint16_t lan_delay, override_interval; lan_delay = EXTRACT_16BITS(bp); override_interval = EXTRACT_16BITS(bp+2); t_bit = (lan_delay & 0x8000)? 1 : 0; lan_delay &= ~0x8000; ND_PRINT((ndo, "\n\t T-bit=%d, LAN delay %dms, Override interval %dms", t_bit, lan_delay, override_interval)); } break; case PIMV2_HELLO_OPTION_DR_PRIORITY_OLD: case PIMV2_HELLO_OPTION_DR_PRIORITY: switch (olen) { case 0: ND_PRINT((ndo, "Bi-Directional Capability (Old)")); break; case 4: ND_PRINT((ndo, "%u", EXTRACT_32BITS(bp))); break; default: ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); break; } break; case PIMV2_HELLO_OPTION_GENID: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "0x%08x", EXTRACT_32BITS(bp))); } break; case PIMV2_HELLO_OPTION_REFRESH_CAP: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "v%d", *bp)); if (*(bp+1) != 0) { ND_PRINT((ndo, ", interval ")); unsigned_relts_print(ndo, *(bp+1)); } if (EXTRACT_16BITS(bp+2) != 0) { ND_PRINT((ndo, " ?0x%04x?", EXTRACT_16BITS(bp+2))); } } break; case PIMV2_HELLO_OPTION_BIDIR_CAP: break; case PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD: case PIMV2_HELLO_OPTION_ADDRESS_LIST: if (ndo->ndo_vflag > 1) { const u_char *ptr = bp; while (ptr < (bp+olen)) { ND_PRINT((ndo, "\n\t ")); advance = pimv2_addr_print(ndo, ptr, pimv2_unicast, 0); if (advance < 0) { ND_PRINT((ndo, "...")); break; } ptr += advance; } } break; default: if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, bp, "\n\t ", olen); break; } /* do we want to see an additionally hexdump ? */ if (ndo->ndo_vflag> 1) print_unknown_data(ndo, bp, "\n\t ", olen); bp += olen; } break; } case PIMV2_TYPE_REGISTER: { const struct ip *ip; ND_TCHECK2(*(bp + 4), PIMV2_REGISTER_FLAG_LEN); ND_PRINT((ndo, ", Flags [ %s ]\n\t", tok2str(pimv2_register_flag_values, "none", EXTRACT_32BITS(bp+4)))); bp += 8; len -= 8; /* encapsulated multicast packet */ ip = (const struct ip *)bp; switch (IP_V(ip)) { case 0: /* Null header */ ND_PRINT((ndo, "IP-Null-header %s > %s", ipaddr_string(ndo, &ip->ip_src), ipaddr_string(ndo, &ip->ip_dst))); break; case 4: /* IPv4 */ ip_print(ndo, bp, len); break; case 6: /* IPv6 */ ip6_print(ndo, bp, len); break; default: ND_PRINT((ndo, "IP ver %d", IP_V(ip))); break; } break; } case PIMV2_TYPE_REGISTER_STOP: bp += 4; len -= 4; if (bp >= ep) break; ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp >= ep) break; ND_PRINT((ndo, " source=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; break; case PIMV2_TYPE_JOIN_PRUNE: case PIMV2_TYPE_GRAFT: case PIMV2_TYPE_GRAFT_ACK: /* * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |PIM Ver| Type | Addr length | Checksum | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Unicast-Upstream Neighbor Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Reserved | Num groups | Holdtime | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Number of Joined Sources | Number of Pruned Sources | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ { uint8_t ngroup; uint16_t holdtime; uint16_t njoin; uint16_t nprune; int i, j; bp += 4; len -= 4; if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ if (bp >= ep) break; ND_PRINT((ndo, ", upstream-neighbor: ")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; } if (bp + 4 > ep) break; ngroup = bp[1]; holdtime = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, "\n\t %u group(s)", ngroup)); if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ ND_PRINT((ndo, ", holdtime: ")); if (holdtime == 0xffff) ND_PRINT((ndo, "infinite")); else unsigned_relts_print(ndo, holdtime); } bp += 4; len -= 4; for (i = 0; i < ngroup; i++) { if (bp >= ep) goto jp_done; ND_PRINT((ndo, "\n\t group #%u: ", i+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; if (bp + 4 > ep) { ND_PRINT((ndo, "...)")); goto jp_done; } njoin = EXTRACT_16BITS(&bp[0]); nprune = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, ", joined sources: %u, pruned sources: %u", njoin, nprune)); bp += 4; len -= 4; for (j = 0; j < njoin; j++) { ND_PRINT((ndo, "\n\t joined source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; } for (j = 0; j < nprune; j++) { ND_PRINT((ndo, "\n\t pruned source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; } } jp_done: break; } case PIMV2_TYPE_BOOTSTRAP: { int i, j, frpcnt; bp += 4; /* Fragment Tag, Hash Mask len, and BSR-priority */ if (bp + sizeof(uint16_t) >= ep) break; ND_PRINT((ndo, " tag=%x", EXTRACT_16BITS(bp))); bp += sizeof(uint16_t); if (bp >= ep) break; ND_PRINT((ndo, " hashmlen=%d", bp[0])); if (bp + 1 >= ep) break; ND_PRINT((ndo, " BSRprio=%d", bp[1])); bp += 2; /* Encoded-Unicast-BSR-Address */ if (bp >= ep) break; ND_PRINT((ndo, " BSR=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; for (i = 0; bp < ep; i++) { /* Encoded-Group Address */ ND_PRINT((ndo, " (group%d: ", i)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...)")); goto bs_done; } bp += advance; /* RP-Count, Frag RP-Cnt, and rsvd */ if (bp >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, " RPcnt=%d", bp[0])); if (bp + 1 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, " FRPcnt=%d", frpcnt = bp[1])); bp += 4; for (j = 0; j < frpcnt && bp < ep; j++) { /* each RP info */ ND_PRINT((ndo, " RP%d=", j)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...)")); goto bs_done; } bp += advance; if (bp + 1 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, ",holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); if (bp + 2 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, ",prio=%d", bp[2])); bp += 4; } ND_PRINT((ndo, ")")); } bs_done: break; } case PIMV2_TYPE_ASSERT: bp += 4; len -= 4; if (bp >= ep) break; ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp >= ep) break; ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp + 8 > ep) break; if (bp[0] & 0x80) ND_PRINT((ndo, " RPT")); ND_PRINT((ndo, " pref=%u", EXTRACT_32BITS(&bp[0]) & 0x7fffffff)); ND_PRINT((ndo, " metric=%u", EXTRACT_32BITS(&bp[4]))); break; case PIMV2_TYPE_CANDIDATE_RP: { int i, pfxcnt; bp += 4; /* Prefix-Cnt, Priority, and Holdtime */ if (bp >= ep) break; ND_PRINT((ndo, " prefix-cnt=%d", bp[0])); pfxcnt = bp[0]; if (bp + 1 >= ep) break; ND_PRINT((ndo, " prio=%d", bp[1])); if (bp + 3 >= ep) break; ND_PRINT((ndo, " holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); bp += 4; /* Encoded-Unicast-RP-Address */ if (bp >= ep) break; ND_PRINT((ndo, " RP=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; /* Encoded-Group Addresses */ for (i = 0; i < pfxcnt && bp < ep; i++) { ND_PRINT((ndo, " Group%d=", i)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; } break; } case PIMV2_TYPE_PRUNE_REFRESH: ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_PRINT((ndo, " grp=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_PRINT((ndo, " forwarder=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_TCHECK2(bp[0], 2); ND_PRINT((ndo, " TUNR ")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); break; default: ND_PRINT((ndo, " [type %d]", PIM_TYPE(pim->pim_typever))); break; } return; trunc: ND_PRINT((ndo, "[|pim]")); } /* * Local Variables: * c-style: whitesmith * c-basic-offset: 8 * End: */
pimv2_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const u_char *ep; register const struct pim *pim = (const struct pim *)bp; int advance; enum checksum_status cksum_status; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; if (ep > bp + len) ep = bp + len; ND_TCHECK(pim->pim_rsv); pimv2_addr_len = pim->pim_rsv; if (pimv2_addr_len != 0) ND_PRINT((ndo, ", RFC2117-encoding")); ND_PRINT((ndo, ", cksum 0x%04x ", EXTRACT_16BITS(&pim->pim_cksum))); if (EXTRACT_16BITS(&pim->pim_cksum) == 0) { ND_PRINT((ndo, "(unverified)")); } else { if (PIM_TYPE(pim->pim_typever) == PIMV2_TYPE_REGISTER) { /* * The checksum only covers the packet header, * not the encapsulated packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, 8); if (cksum_status == INCORRECT) { /* * To quote RFC 4601, "For interoperability * reasons, a message carrying a checksum * calculated over the entire PIM Register * message should also be accepted." */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } } else { /* * The checksum covers the entire packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } switch (cksum_status) { case CORRECT: ND_PRINT((ndo, "(correct)")); break; case INCORRECT: ND_PRINT((ndo, "(incorrect)")); break; case UNVERIFIED: ND_PRINT((ndo, "(unverified)")); break; } } switch (PIM_TYPE(pim->pim_typever)) { case PIMV2_TYPE_HELLO: { uint16_t otype, olen; bp += 4; while (bp < ep) { ND_TCHECK2(bp[0], 4); otype = EXTRACT_16BITS(&bp[0]); olen = EXTRACT_16BITS(&bp[2]); ND_TCHECK2(bp[0], 4 + olen); ND_PRINT((ndo, "\n\t %s Option (%u), length %u, Value: ", tok2str(pimv2_hello_option_values, "Unknown", otype), otype, olen)); bp += 4; switch (otype) { case PIMV2_HELLO_OPTION_HOLDTIME: unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); break; case PIMV2_HELLO_OPTION_LANPRUNEDELAY: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { char t_bit; uint16_t lan_delay, override_interval; lan_delay = EXTRACT_16BITS(bp); override_interval = EXTRACT_16BITS(bp+2); t_bit = (lan_delay & 0x8000)? 1 : 0; lan_delay &= ~0x8000; ND_PRINT((ndo, "\n\t T-bit=%d, LAN delay %dms, Override interval %dms", t_bit, lan_delay, override_interval)); } break; case PIMV2_HELLO_OPTION_DR_PRIORITY_OLD: case PIMV2_HELLO_OPTION_DR_PRIORITY: switch (olen) { case 0: ND_PRINT((ndo, "Bi-Directional Capability (Old)")); break; case 4: ND_PRINT((ndo, "%u", EXTRACT_32BITS(bp))); break; default: ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); break; } break; case PIMV2_HELLO_OPTION_GENID: ND_PRINT((ndo, "0x%08x", EXTRACT_32BITS(bp))); break; case PIMV2_HELLO_OPTION_REFRESH_CAP: ND_PRINT((ndo, "v%d", *bp)); if (*(bp+1) != 0) { ND_PRINT((ndo, ", interval ")); unsigned_relts_print(ndo, *(bp+1)); } if (EXTRACT_16BITS(bp+2) != 0) { ND_PRINT((ndo, " ?0x%04x?", EXTRACT_16BITS(bp+2))); } break; case PIMV2_HELLO_OPTION_BIDIR_CAP: break; case PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD: case PIMV2_HELLO_OPTION_ADDRESS_LIST: if (ndo->ndo_vflag > 1) { const u_char *ptr = bp; while (ptr < (bp+olen)) { ND_PRINT((ndo, "\n\t ")); advance = pimv2_addr_print(ndo, ptr, pimv2_unicast, 0); if (advance < 0) { ND_PRINT((ndo, "...")); break; } ptr += advance; } } break; default: if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, bp, "\n\t ", olen); break; } /* do we want to see an additionally hexdump ? */ if (ndo->ndo_vflag> 1) print_unknown_data(ndo, bp, "\n\t ", olen); bp += olen; } break; } case PIMV2_TYPE_REGISTER: { const struct ip *ip; ND_TCHECK2(*(bp + 4), PIMV2_REGISTER_FLAG_LEN); ND_PRINT((ndo, ", Flags [ %s ]\n\t", tok2str(pimv2_register_flag_values, "none", EXTRACT_32BITS(bp+4)))); bp += 8; len -= 8; /* encapsulated multicast packet */ ip = (const struct ip *)bp; switch (IP_V(ip)) { case 0: /* Null header */ ND_PRINT((ndo, "IP-Null-header %s > %s", ipaddr_string(ndo, &ip->ip_src), ipaddr_string(ndo, &ip->ip_dst))); break; case 4: /* IPv4 */ ip_print(ndo, bp, len); break; case 6: /* IPv6 */ ip6_print(ndo, bp, len); break; default: ND_PRINT((ndo, "IP ver %d", IP_V(ip))); break; } break; } case PIMV2_TYPE_REGISTER_STOP: bp += 4; len -= 4; if (bp >= ep) break; ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp >= ep) break; ND_PRINT((ndo, " source=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; break; case PIMV2_TYPE_JOIN_PRUNE: case PIMV2_TYPE_GRAFT: case PIMV2_TYPE_GRAFT_ACK: /* * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |PIM Ver| Type | Addr length | Checksum | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Unicast-Upstream Neighbor Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Reserved | Num groups | Holdtime | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Number of Joined Sources | Number of Pruned Sources | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ { uint8_t ngroup; uint16_t holdtime; uint16_t njoin; uint16_t nprune; int i, j; bp += 4; len -= 4; if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ if (bp >= ep) break; ND_PRINT((ndo, ", upstream-neighbor: ")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; } if (bp + 4 > ep) break; ngroup = bp[1]; holdtime = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, "\n\t %u group(s)", ngroup)); if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ ND_PRINT((ndo, ", holdtime: ")); if (holdtime == 0xffff) ND_PRINT((ndo, "infinite")); else unsigned_relts_print(ndo, holdtime); } bp += 4; len -= 4; for (i = 0; i < ngroup; i++) { if (bp >= ep) goto jp_done; ND_PRINT((ndo, "\n\t group #%u: ", i+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; if (bp + 4 > ep) { ND_PRINT((ndo, "...)")); goto jp_done; } njoin = EXTRACT_16BITS(&bp[0]); nprune = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, ", joined sources: %u, pruned sources: %u", njoin, nprune)); bp += 4; len -= 4; for (j = 0; j < njoin; j++) { ND_PRINT((ndo, "\n\t joined source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; } for (j = 0; j < nprune; j++) { ND_PRINT((ndo, "\n\t pruned source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; } } jp_done: break; } case PIMV2_TYPE_BOOTSTRAP: { int i, j, frpcnt; bp += 4; /* Fragment Tag, Hash Mask len, and BSR-priority */ if (bp + sizeof(uint16_t) >= ep) break; ND_PRINT((ndo, " tag=%x", EXTRACT_16BITS(bp))); bp += sizeof(uint16_t); if (bp >= ep) break; ND_PRINT((ndo, " hashmlen=%d", bp[0])); if (bp + 1 >= ep) break; ND_PRINT((ndo, " BSRprio=%d", bp[1])); bp += 2; /* Encoded-Unicast-BSR-Address */ if (bp >= ep) break; ND_PRINT((ndo, " BSR=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; for (i = 0; bp < ep; i++) { /* Encoded-Group Address */ ND_PRINT((ndo, " (group%d: ", i)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...)")); goto bs_done; } bp += advance; /* RP-Count, Frag RP-Cnt, and rsvd */ if (bp >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, " RPcnt=%d", bp[0])); if (bp + 1 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, " FRPcnt=%d", frpcnt = bp[1])); bp += 4; for (j = 0; j < frpcnt && bp < ep; j++) { /* each RP info */ ND_PRINT((ndo, " RP%d=", j)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...)")); goto bs_done; } bp += advance; if (bp + 1 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, ",holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); if (bp + 2 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, ",prio=%d", bp[2])); bp += 4; } ND_PRINT((ndo, ")")); } bs_done: break; } case PIMV2_TYPE_ASSERT: bp += 4; len -= 4; if (bp >= ep) break; ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp >= ep) break; ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp + 8 > ep) break; if (bp[0] & 0x80) ND_PRINT((ndo, " RPT")); ND_PRINT((ndo, " pref=%u", EXTRACT_32BITS(&bp[0]) & 0x7fffffff)); ND_PRINT((ndo, " metric=%u", EXTRACT_32BITS(&bp[4]))); break; case PIMV2_TYPE_CANDIDATE_RP: { int i, pfxcnt; bp += 4; /* Prefix-Cnt, Priority, and Holdtime */ if (bp >= ep) break; ND_PRINT((ndo, " prefix-cnt=%d", bp[0])); pfxcnt = bp[0]; if (bp + 1 >= ep) break; ND_PRINT((ndo, " prio=%d", bp[1])); if (bp + 3 >= ep) break; ND_PRINT((ndo, " holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); bp += 4; /* Encoded-Unicast-RP-Address */ if (bp >= ep) break; ND_PRINT((ndo, " RP=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; /* Encoded-Group Addresses */ for (i = 0; i < pfxcnt && bp < ep; i++) { ND_PRINT((ndo, " Group%d=", i)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; } break; } case PIMV2_TYPE_PRUNE_REFRESH: ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_PRINT((ndo, " grp=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_PRINT((ndo, " forwarder=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_TCHECK2(bp[0], 2); ND_PRINT((ndo, " TUNR ")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); break; default: ND_PRINT((ndo, " [type %d]", PIM_TYPE(pim->pim_typever))); break; } return; trunc: ND_PRINT((ndo, "[|pim]")); }
pimv2_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const u_char *ep; register const struct pim *pim = (const struct pim *)bp; int advance; enum checksum_status cksum_status; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; if (ep > bp + len) ep = bp + len; ND_TCHECK(pim->pim_rsv); pimv2_addr_len = pim->pim_rsv; if (pimv2_addr_len != 0) ND_PRINT((ndo, ", RFC2117-encoding")); ND_PRINT((ndo, ", cksum 0x%04x ", EXTRACT_16BITS(&pim->pim_cksum))); if (EXTRACT_16BITS(&pim->pim_cksum) == 0) { ND_PRINT((ndo, "(unverified)")); } else { if (PIM_TYPE(pim->pim_typever) == PIMV2_TYPE_REGISTER) { /* * The checksum only covers the packet header, * not the encapsulated packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, 8); if (cksum_status == INCORRECT) { /* * To quote RFC 4601, "For interoperability * reasons, a message carrying a checksum * calculated over the entire PIM Register * message should also be accepted." */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } } else { /* * The checksum covers the entire packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } switch (cksum_status) { case CORRECT: ND_PRINT((ndo, "(correct)")); break; case INCORRECT: ND_PRINT((ndo, "(incorrect)")); break; case UNVERIFIED: ND_PRINT((ndo, "(unverified)")); break; } } switch (PIM_TYPE(pim->pim_typever)) { case PIMV2_TYPE_HELLO: { uint16_t otype, olen; bp += 4; while (bp < ep) { ND_TCHECK2(bp[0], 4); otype = EXTRACT_16BITS(&bp[0]); olen = EXTRACT_16BITS(&bp[2]); ND_TCHECK2(bp[0], 4 + olen); ND_PRINT((ndo, "\n\t %s Option (%u), length %u, Value: ", tok2str(pimv2_hello_option_values, "Unknown", otype), otype, olen)); bp += 4; switch (otype) { case PIMV2_HELLO_OPTION_HOLDTIME: if (olen != 2) { ND_PRINT((ndo, "ERROR: Option Length != 2 Bytes (%u)", olen)); } else { unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); } break; case PIMV2_HELLO_OPTION_LANPRUNEDELAY: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { char t_bit; uint16_t lan_delay, override_interval; lan_delay = EXTRACT_16BITS(bp); override_interval = EXTRACT_16BITS(bp+2); t_bit = (lan_delay & 0x8000)? 1 : 0; lan_delay &= ~0x8000; ND_PRINT((ndo, "\n\t T-bit=%d, LAN delay %dms, Override interval %dms", t_bit, lan_delay, override_interval)); } break; case PIMV2_HELLO_OPTION_DR_PRIORITY_OLD: case PIMV2_HELLO_OPTION_DR_PRIORITY: switch (olen) { case 0: ND_PRINT((ndo, "Bi-Directional Capability (Old)")); break; case 4: ND_PRINT((ndo, "%u", EXTRACT_32BITS(bp))); break; default: ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); break; } break; case PIMV2_HELLO_OPTION_GENID: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "0x%08x", EXTRACT_32BITS(bp))); } break; case PIMV2_HELLO_OPTION_REFRESH_CAP: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "v%d", *bp)); if (*(bp+1) != 0) { ND_PRINT((ndo, ", interval ")); unsigned_relts_print(ndo, *(bp+1)); } if (EXTRACT_16BITS(bp+2) != 0) { ND_PRINT((ndo, " ?0x%04x?", EXTRACT_16BITS(bp+2))); } } break; case PIMV2_HELLO_OPTION_BIDIR_CAP: break; case PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD: case PIMV2_HELLO_OPTION_ADDRESS_LIST: if (ndo->ndo_vflag > 1) { const u_char *ptr = bp; while (ptr < (bp+olen)) { ND_PRINT((ndo, "\n\t ")); advance = pimv2_addr_print(ndo, ptr, pimv2_unicast, 0); if (advance < 0) { ND_PRINT((ndo, "...")); break; } ptr += advance; } } break; default: if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, bp, "\n\t ", olen); break; } /* do we want to see an additionally hexdump ? */ if (ndo->ndo_vflag> 1) print_unknown_data(ndo, bp, "\n\t ", olen); bp += olen; } break; } case PIMV2_TYPE_REGISTER: { const struct ip *ip; ND_TCHECK2(*(bp + 4), PIMV2_REGISTER_FLAG_LEN); ND_PRINT((ndo, ", Flags [ %s ]\n\t", tok2str(pimv2_register_flag_values, "none", EXTRACT_32BITS(bp+4)))); bp += 8; len -= 8; /* encapsulated multicast packet */ ip = (const struct ip *)bp; switch (IP_V(ip)) { case 0: /* Null header */ ND_PRINT((ndo, "IP-Null-header %s > %s", ipaddr_string(ndo, &ip->ip_src), ipaddr_string(ndo, &ip->ip_dst))); break; case 4: /* IPv4 */ ip_print(ndo, bp, len); break; case 6: /* IPv6 */ ip6_print(ndo, bp, len); break; default: ND_PRINT((ndo, "IP ver %d", IP_V(ip))); break; } break; } case PIMV2_TYPE_REGISTER_STOP: bp += 4; len -= 4; if (bp >= ep) break; ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp >= ep) break; ND_PRINT((ndo, " source=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; break; case PIMV2_TYPE_JOIN_PRUNE: case PIMV2_TYPE_GRAFT: case PIMV2_TYPE_GRAFT_ACK: /* * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |PIM Ver| Type | Addr length | Checksum | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Unicast-Upstream Neighbor Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Reserved | Num groups | Holdtime | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Number of Joined Sources | Number of Pruned Sources | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ { uint8_t ngroup; uint16_t holdtime; uint16_t njoin; uint16_t nprune; int i, j; bp += 4; len -= 4; if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ if (bp >= ep) break; ND_PRINT((ndo, ", upstream-neighbor: ")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; } if (bp + 4 > ep) break; ngroup = bp[1]; holdtime = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, "\n\t %u group(s)", ngroup)); if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ ND_PRINT((ndo, ", holdtime: ")); if (holdtime == 0xffff) ND_PRINT((ndo, "infinite")); else unsigned_relts_print(ndo, holdtime); } bp += 4; len -= 4; for (i = 0; i < ngroup; i++) { if (bp >= ep) goto jp_done; ND_PRINT((ndo, "\n\t group #%u: ", i+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; if (bp + 4 > ep) { ND_PRINT((ndo, "...)")); goto jp_done; } njoin = EXTRACT_16BITS(&bp[0]); nprune = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, ", joined sources: %u, pruned sources: %u", njoin, nprune)); bp += 4; len -= 4; for (j = 0; j < njoin; j++) { ND_PRINT((ndo, "\n\t joined source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; } for (j = 0; j < nprune; j++) { ND_PRINT((ndo, "\n\t pruned source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; } } jp_done: break; } case PIMV2_TYPE_BOOTSTRAP: { int i, j, frpcnt; bp += 4; /* Fragment Tag, Hash Mask len, and BSR-priority */ if (bp + sizeof(uint16_t) >= ep) break; ND_PRINT((ndo, " tag=%x", EXTRACT_16BITS(bp))); bp += sizeof(uint16_t); if (bp >= ep) break; ND_PRINT((ndo, " hashmlen=%d", bp[0])); if (bp + 1 >= ep) break; ND_PRINT((ndo, " BSRprio=%d", bp[1])); bp += 2; /* Encoded-Unicast-BSR-Address */ if (bp >= ep) break; ND_PRINT((ndo, " BSR=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; for (i = 0; bp < ep; i++) { /* Encoded-Group Address */ ND_PRINT((ndo, " (group%d: ", i)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...)")); goto bs_done; } bp += advance; /* RP-Count, Frag RP-Cnt, and rsvd */ if (bp >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, " RPcnt=%d", bp[0])); if (bp + 1 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, " FRPcnt=%d", frpcnt = bp[1])); bp += 4; for (j = 0; j < frpcnt && bp < ep; j++) { /* each RP info */ ND_PRINT((ndo, " RP%d=", j)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...)")); goto bs_done; } bp += advance; if (bp + 1 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, ",holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); if (bp + 2 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, ",prio=%d", bp[2])); bp += 4; } ND_PRINT((ndo, ")")); } bs_done: break; } case PIMV2_TYPE_ASSERT: bp += 4; len -= 4; if (bp >= ep) break; ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp >= ep) break; ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp + 8 > ep) break; if (bp[0] & 0x80) ND_PRINT((ndo, " RPT")); ND_PRINT((ndo, " pref=%u", EXTRACT_32BITS(&bp[0]) & 0x7fffffff)); ND_PRINT((ndo, " metric=%u", EXTRACT_32BITS(&bp[4]))); break; case PIMV2_TYPE_CANDIDATE_RP: { int i, pfxcnt; bp += 4; /* Prefix-Cnt, Priority, and Holdtime */ if (bp >= ep) break; ND_PRINT((ndo, " prefix-cnt=%d", bp[0])); pfxcnt = bp[0]; if (bp + 1 >= ep) break; ND_PRINT((ndo, " prio=%d", bp[1])); if (bp + 3 >= ep) break; ND_PRINT((ndo, " holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); bp += 4; /* Encoded-Unicast-RP-Address */ if (bp >= ep) break; ND_PRINT((ndo, " RP=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; /* Encoded-Group Addresses */ for (i = 0; i < pfxcnt && bp < ep; i++) { ND_PRINT((ndo, " Group%d=", i)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; } break; } case PIMV2_TYPE_PRUNE_REFRESH: ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_PRINT((ndo, " grp=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_PRINT((ndo, " forwarder=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_TCHECK2(bp[0], 2); ND_PRINT((ndo, " TUNR ")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); break; default: ND_PRINT((ndo, " [type %d]", PIM_TYPE(pim->pim_typever))); break; } return; trunc: ND_PRINT((ndo, "[|pim]")); }
{'added': [(733, '\t\t\t\tif (olen != 2) {'), (734, '\t\t\t\t\tND_PRINT((ndo, "ERROR: Option Length != 2 Bytes (%u)", olen));'), (735, '\t\t\t\t} else {'), (736, '\t\t\t\t\tunsigned_relts_print(ndo, EXTRACT_16BITS(bp));'), (737, '\t\t\t\t}'), (771, '\t\t\t\tif (olen != 4) {'), (772, '\t\t\t\t\tND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen));'), (773, '\t\t\t\t} else {'), (774, '\t\t\t\t\tND_PRINT((ndo, "0x%08x", EXTRACT_32BITS(bp)));'), (775, '\t\t\t\t}'), (779, '\t\t\t\tif (olen != 4) {'), (780, '\t\t\t\t\tND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen));'), (781, '\t\t\t\t} else {'), (782, '\t\t\t\t\tND_PRINT((ndo, "v%d", *bp));'), (783, '\t\t\t\t\tif (*(bp+1) != 0) {'), (784, '\t\t\t\t\t\tND_PRINT((ndo, ", interval "));'), (785, '\t\t\t\t\t\tunsigned_relts_print(ndo, *(bp+1));'), (786, '\t\t\t\t\t}'), (787, '\t\t\t\t\tif (EXTRACT_16BITS(bp+2) != 0) {'), (788, '\t\t\t\t\t\tND_PRINT((ndo, " ?0x%04x?", EXTRACT_16BITS(bp+2)));'), (789, '\t\t\t\t\t}')], 'deleted': [(733, '\t\t\t\tunsigned_relts_print(ndo, EXTRACT_16BITS(bp));'), (767, '\t\t\t\tND_PRINT((ndo, "0x%08x", EXTRACT_32BITS(bp)));'), (771, '\t\t\t\tND_PRINT((ndo, "v%d", *bp));'), (772, '\t\t\t\tif (*(bp+1) != 0) {'), (773, '\t\t\t\t\tND_PRINT((ndo, ", interval "));'), (774, '\t\t\t\t\tunsigned_relts_print(ndo, *(bp+1));'), (775, '\t\t\t\t}'), (776, '\t\t\t\tif (EXTRACT_16BITS(bp+2) != 0) {'), (777, '\t\t\t\t\tND_PRINT((ndo, " ?0x%04x?", EXTRACT_16BITS(bp+2)));')]}
21
9
857
5,713
https://github.com/the-tcpdump-group/tcpdump
CVE-2017-12996
['CWE-125']
index.c
mobi_trie_insert_infl
/** @file index.c * @brief Functions to parse index records * * Copyright (c) 2020 Bartek Fabiszewski * http://www.fabiszewski.net * * This file is part of libmobi. * Licensed under LGPL, either version 3, or any later. * See <http://www.gnu.org/licenses/> */ #define _GNU_SOURCE 1 #ifndef __USE_BSD #define __USE_BSD /* for strdup on linux/glibc */ #endif #include <string.h> #include <stdlib.h> #include <stdint.h> #include "index.h" #include "util.h" #include "memory.h" #include "debug.h" #include "buffer.h" /** @brief Read index entry label from buffer pointing at index record data @param[in,out] output Output string @param[in,out] buf MOBIBuffer structure, offset pointing at index entry label @param[in] length Number of bytes to be read @param[in] has_ligatures Decode ligatures if true @return Size of read label */ size_t mobi_indx_get_label(unsigned char *output, MOBIBuffer *buf, const size_t length, const size_t has_ligatures) { if (!output) { buf->error = MOBI_PARAM_ERR; return 0; } if (buf->offset + length > buf->maxlen) { debug_print("%s", "End of buffer\n"); buf->error = MOBI_BUFFER_END; return 0; } const unsigned char replacement = 0x3f; size_t output_length = 0; size_t i = 0; while (i < length && output_length < INDX_LABEL_SIZEMAX) { unsigned char c = mobi_buffer_get8(buf); i++; if (c == 0) { /* FIXME: is it safe to replace zeroes? */ debug_print("Invalid character: %u\n", c); c = replacement; } if (c <= 5 && has_ligatures) { unsigned char c2 = mobi_buffer_get8(buf); c = mobi_ligature_to_cp1252(c, c2); if (c == 0) { debug_print("Invalid ligature sequence%s", "\n"); mobi_buffer_seek(buf, -1); c = replacement; } else { i++; } } *output++ = c; output_length++; } *output = '\0'; return output_length; } /** @brief Parser of ORDT section of INDX record @param[in,out] buf MOBIBuffer structure, offset pointing at beginning of TAGX section @param[in,out] ordt MOBIOrdt structure to be filled by the function @return MOBI_RET status code (on success MOBI_SUCCESS) */ static MOBI_RET mobi_parse_ordt(MOBIBuffer *buf, MOBIOrdt *ordt) { /* read ORDT1 */ mobi_buffer_setpos(buf, ordt->ordt1_pos); if (mobi_buffer_match_magic(buf, ORDT_MAGIC)) { debug_print("%s\n", "ORDT1 section found"); mobi_buffer_seek(buf, 4); if (ordt->offsets_count + buf->offset > buf->maxlen) { debug_print("ORDT1 section too long (%zu)", ordt->offsets_count); return MOBI_DATA_CORRUPT; } ordt->ordt1 = malloc(ordt->offsets_count * sizeof(*ordt->ordt1)); if (ordt->ordt1 == NULL) { debug_print("%s", "Memory allocation failed for ORDT1 offsets\n"); return MOBI_MALLOC_FAILED; } size_t i = 0; while (i < ordt->offsets_count) { ordt->ordt1[i++] = mobi_buffer_get8(buf); } debug_print("ORDT1: read %zu entries\n", ordt->offsets_count); } /* read ORDT2 */ mobi_buffer_setpos(buf, ordt->ordt2_pos); if (mobi_buffer_match_magic(buf, ORDT_MAGIC)) { debug_print("%s\n", "ORDT2 section found"); mobi_buffer_seek(buf, 4); if (ordt->offsets_count * 2 + buf->offset > buf->maxlen) { debug_print("ORDT2 section too long (%zu)", ordt->offsets_count); return MOBI_DATA_CORRUPT; } ordt->ordt2 = malloc(ordt->offsets_count * sizeof(*ordt->ordt2)); if (ordt->ordt2 == NULL) { debug_print("%s", "Memory allocation failed for ORDT2 offsets\n"); return MOBI_MALLOC_FAILED; } size_t i = 0; while (i < ordt->offsets_count) { ordt->ordt2[i++] = mobi_buffer_get16(buf); } debug_print("ORDT2: read %zu entries\n", ordt->offsets_count); } return MOBI_SUCCESS; } /** @brief Parser of TAGX section of INDX record @param[in,out] buf MOBIBuffer structure, offset pointing at beginning of TAGX section @param[in,out] tagx MOBITagx structure to be filled by the function @return MOBI_RET status code (on success MOBI_SUCCESS) */ static MOBI_RET mobi_parse_tagx(MOBIBuffer *buf, MOBITagx *tagx) { tagx->control_byte_count = 0; tagx->tags_count = 0; tagx->tags = NULL; mobi_buffer_seek(buf, 4); /* skip header */ uint32_t tagx_record_length = mobi_buffer_get32(buf); if (tagx_record_length < 12) { debug_print("INDX record too short: %u\n", tagx_record_length); return MOBI_DATA_CORRUPT; } tagx->control_byte_count = mobi_buffer_get32(buf); tagx_record_length -= 12; if (tagx_record_length + buf->offset > buf->maxlen) { debug_print("INDX record too long: %u\n", tagx_record_length); return MOBI_DATA_CORRUPT; } tagx->tags = malloc(tagx_record_length * sizeof(TAGXTags)); if (tagx->tags == NULL) { debug_print("%s", "Memory allocation failed for TAGX tags\n"); return MOBI_MALLOC_FAILED; } size_t i = 0; const size_t tagx_data_length = tagx_record_length / 4; size_t control_byte_count = 0; while (i < tagx_data_length) { tagx->tags[i].tag = mobi_buffer_get8(buf); tagx->tags[i].values_count = mobi_buffer_get8(buf); tagx->tags[i].bitmask = mobi_buffer_get8(buf); const uint8_t control_byte = mobi_buffer_get8(buf); if (control_byte) { control_byte_count++; } tagx->tags[i].control_byte = control_byte; debug_print("tagx[%zu]:\t%i\t%i\t%i\t%i\n", i, tagx->tags[i].tag, tagx->tags[i].values_count, tagx->tags[i].bitmask, control_byte); i++; } if (tagx->control_byte_count != control_byte_count) { debug_print("Wrong count of control bytes: %zu != %zu\n", tagx->control_byte_count, control_byte_count); free(tagx->tags); tagx->tags = NULL; tagx->control_byte_count = 0; return MOBI_DATA_CORRUPT; } tagx->tags_count = i; return MOBI_SUCCESS; } /** @brief Parser of IDXT section of INDX record @param[in,out] buf MOBIBuffer structure, offset pointing at beginning of TAGX section @param[in,out] idxt MOBITagx structure to be filled by the function @param[in] entries_count Number of index entries @return MOBI_RET status code (on success MOBI_SUCCESS) */ static MOBI_RET mobi_parse_idxt(MOBIBuffer *buf, MOBIIdxt *idxt, const size_t entries_count) { const uint32_t idxt_offset = (uint32_t) buf->offset; idxt->offsets_count = 0; char idxt_magic[5]; mobi_buffer_getstring(idxt_magic, buf, 4); if (strncmp(idxt_magic, IDXT_MAGIC, 4) != 0) { debug_print("IDXT wrong magic: %s\n", idxt_magic); return MOBI_DATA_CORRUPT; } size_t i = 0; while (i < entries_count) { /* entry offsets */ idxt->offsets[i++] = mobi_buffer_get16(buf); } /* last entry end position is IDXT tag offset */ idxt->offsets[i] = idxt_offset; idxt->offsets_count = i; return MOBI_SUCCESS; } /** @brief Get encoded character from dictionary index The characters are offsets into ORDT table @param[in] ordt MOBIOrdt structure (ORDT data and metadata) @param[in,out] buf MOBIBuffer structure with index data @param[in,out] offset Value read from buffer @return Number of bytes read (zero in case of error) */ size_t mobi_ordt_getbuffer(const MOBIOrdt *ordt, MOBIBuffer *buf, uint16_t *offset) { size_t i = 0; if (ordt->type == 1) { *offset = mobi_buffer_get8(buf); i++; } else { *offset = mobi_buffer_get16(buf); i += 2; } return i; } /** @brief Fetch UTF-16 value from ORDT2 table @param[in] ordt MOBIOrdt structure (ORDT data and metadata) @param[in] offset Offset in ORDT2 table @return UTF-16 code point */ uint16_t mobi_ordt_lookup(const MOBIOrdt *ordt, const uint16_t offset) { uint16_t utf16; if (offset < ordt->offsets_count) { utf16 = ordt->ordt2[offset]; } else { utf16 = offset; } return utf16; } /** @brief Get UTF-8 string from buffer, decoded by lookups in ORDT2 table @param[in] ordt MOBIOrdt structure (ORDT data and metadata) @param[in,out] buf MOBIBuffer structure with input string @param[in,out] output Output buffer (INDX_LABEL_SIZEMAX bytes) @param[in] length Length of input string contained in buf @return Number of bytes read */ size_t mobi_getstring_ordt(const MOBIOrdt *ordt, MOBIBuffer *buf, unsigned char *output, size_t length) { size_t i = 0; size_t output_length = 0; const uint32_t bytemask = 0xbf; const uint32_t bytemark = 0x80; const uint32_t uni_replacement = 0xfffd; const uint32_t surrogate_offset = 0x35fdc00; static const uint8_t init_byte[7] = { 0x00, 0x00, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc }; while (i < length) { uint16_t offset; i += mobi_ordt_getbuffer(ordt, buf, &offset); uint32_t codepoint = mobi_ordt_lookup(ordt, offset); if (codepoint <= 5) { size_t k = mobi_ordt_getbuffer(ordt, buf, &offset); uint32_t codepoint2 = mobi_ordt_lookup(ordt, offset); codepoint = mobi_ligature_to_utf16(codepoint, codepoint2); if (codepoint == uni_replacement) { /* rewind buffer to codepoint2 */ debug_print("Invalid ligature sequence%s", "\n"); mobi_buffer_seek(buf, - (int) k); } else { i += k; } } /* convert UTF-16 surrogates into UTF-32 */ if (codepoint >= 0xd800 && codepoint <= 0xdbff) { size_t k = mobi_ordt_getbuffer(ordt, buf, &offset); uint32_t codepoint2 = mobi_ordt_lookup(ordt, offset); if (codepoint2 >= 0xdc00 && codepoint2 <= 0xdfff) { i += k; codepoint = (codepoint << 10) + codepoint2 - surrogate_offset; } else { /* illegal unpaired high surrogate */ /* rewind buffer to codepoint2 */ debug_print("Invalid code point: %u\n", codepoint); mobi_buffer_seek(buf, - (int) k); codepoint = uni_replacement; } } if ((codepoint >= 0xdc00 && codepoint <= 0xdfff) /* unpaired low surrogate */ || (codepoint >= 0xfdd0 && codepoint <= 0xfdef) /* invalid characters */ || (codepoint & 0xfffe) == 0xfffe /* reserved characters */ || codepoint == 0 /* remove zeroes */) { codepoint = uni_replacement; debug_print("Invalid code point: %u\n", codepoint); } /* Conversion routine based on unicode's ConvertUTF.c */ size_t bytes; if (codepoint < 0x80) { bytes = 1; } else if (codepoint < 0x800) { bytes = 2; } else if (codepoint < 0x10000) { bytes = 3; } else if (codepoint < 0x110000) { bytes = 4; } else { bytes = 3; codepoint = uni_replacement; debug_print("Invalid code point: %u\n", codepoint); } if (output_length + bytes >= INDX_LABEL_SIZEMAX) { debug_print("%s\n", "INDX label too long"); break; } output += bytes; switch (bytes) { case 4: *--output = (uint8_t)((codepoint | bytemark) & bytemask); codepoint >>= 6; /* falls through */ case 3: *--output = (uint8_t)((codepoint | bytemark) & bytemask); codepoint >>= 6; /* falls through */ case 2: *--output = (uint8_t)((codepoint | bytemark) & bytemask); codepoint >>= 6; /* falls through */ case 1: *--output = (uint8_t)(codepoint | init_byte[bytes]); } output += bytes; output_length += bytes; } *output = '\0'; return output_length; } /** @brief Parser of INDX index entry @param[in,out] indx MOBIIndx structure, to be filled with parsed data @param[in] idxt MOBIIdxt structure with parsed IDXT index @param[in] tagx MOBITagx structure with parsed TAGX index @param[in,out] buf MOBIBuffer structure with index data @param[in] curr_number Sequential number of an index entry for current record @return MOBI_RET status code (on success MOBI_SUCCESS) */ static MOBI_RET mobi_parse_index_entry(MOBIIndx *indx, const MOBIIdxt idxt, const MOBITagx *tagx, const MOBIOrdt *ordt, MOBIBuffer *buf, const size_t curr_number) { if (indx == NULL) { debug_print("%s", "INDX structure not initialized\n"); return MOBI_INIT_FAILED; } const size_t entry_offset = indx->entries_count; const size_t entry_length = idxt.offsets[curr_number + 1] - idxt.offsets[curr_number]; mobi_buffer_setpos(buf, idxt.offsets[curr_number]); size_t entry_number = curr_number + entry_offset; if (entry_number >= indx->total_entries_count) { debug_print("Entry number beyond array: %zu\n", entry_number); return MOBI_DATA_CORRUPT; } /* save original record maxlen */ const size_t buf_maxlen = buf->maxlen; if (buf->offset + entry_length >= buf_maxlen) { debug_print("Entry length too long: %zu\n", entry_length); return MOBI_DATA_CORRUPT; } buf->maxlen = buf->offset + entry_length; size_t label_length = mobi_buffer_get8(buf); if (label_length > entry_length) { debug_print("Label length too long: %zu\n", label_length); return MOBI_DATA_CORRUPT; } char text[INDX_LABEL_SIZEMAX]; /* FIXME: what is ORDT1 for? */ if (ordt->ordt2) { label_length = mobi_getstring_ordt(ordt, buf, (unsigned char*) text, label_length); } else { label_length = mobi_indx_get_label((unsigned char*) text, buf, label_length, indx->ligt_entries_count); } indx->entries[entry_number].label = malloc(label_length + 1); if (indx->entries[entry_number].label == NULL) { debug_print("Memory allocation failed (%zu bytes)\n", label_length); return MOBI_MALLOC_FAILED; } strncpy(indx->entries[entry_number].label, text, label_length + 1); //debug_print("tag label[%zu]: %s\n", entry_number, indx->entries[entry_number].label); unsigned char *control_bytes; control_bytes = buf->data + buf->offset; mobi_buffer_seek(buf, (int) tagx->control_byte_count); indx->entries[entry_number].tags_count = 0; indx->entries[entry_number].tags = NULL; if (tagx->tags_count > 0) { typedef struct { uint8_t tag; uint8_t tag_value_count; uint32_t value_count; uint32_t value_bytes; } MOBIPtagx; MOBIPtagx *ptagx = malloc(tagx->tags_count * sizeof(MOBIPtagx)); if (ptagx == NULL) { debug_print("Memory allocation failed (%zu bytes)\n", tagx->tags_count * sizeof(MOBIPtagx)); return MOBI_MALLOC_FAILED; } uint32_t ptagx_count = 0; size_t len; size_t i = 0; while (i < tagx->tags_count) { if (tagx->tags[i].control_byte == 1) { control_bytes++; i++; continue; } uint32_t value = control_bytes[0] & tagx->tags[i].bitmask; if (value != 0) { /* FIXME: is it safe to use MOBI_NOTSET? */ uint32_t value_count = MOBI_NOTSET; uint32_t value_bytes = MOBI_NOTSET; /* all bits of masked value are set */ if (value == tagx->tags[i].bitmask) { /* more than 1 bit set */ if (mobi_bitcount(tagx->tags[i].bitmask) > 1) { /* read value bytes from entry */ len = 0; value_bytes = mobi_buffer_get_varlen(buf, &len); } else { value_count = 1; } } else { uint8_t mask = tagx->tags[i].bitmask; while ((mask & 1) == 0) { mask >>= 1; value >>= 1; } value_count = value; } ptagx[ptagx_count].tag = tagx->tags[i].tag; ptagx[ptagx_count].tag_value_count = tagx->tags[i].values_count; ptagx[ptagx_count].value_count = value_count; ptagx[ptagx_count].value_bytes = value_bytes; ptagx_count++; } i++; } indx->entries[entry_number].tags = malloc(tagx->tags_count * sizeof(MOBIIndexTag)); if (indx->entries[entry_number].tags == NULL) { debug_print("Memory allocation failed (%zu bytes)\n", tagx->tags_count * sizeof(MOBIIndexTag)); free(ptagx); return MOBI_MALLOC_FAILED; } i = 0; while (i < ptagx_count) { uint32_t tagvalues_count = 0; /* FIXME: is it safe to use MOBI_NOTSET? */ /* value count is set */ uint32_t tagvalues[INDX_TAGVALUES_MAX]; if (ptagx[i].value_count != MOBI_NOTSET) { size_t count = ptagx[i].value_count * ptagx[i].tag_value_count; while (count-- && tagvalues_count < INDX_TAGVALUES_MAX) { len = 0; const uint32_t value_bytes = mobi_buffer_get_varlen(buf, &len); tagvalues[tagvalues_count++] = value_bytes; } /* value count is not set */ } else { /* read value_bytes bytes */ len = 0; while (len < ptagx[i].value_bytes && tagvalues_count < INDX_TAGVALUES_MAX) { const uint32_t value_bytes = mobi_buffer_get_varlen(buf, &len); tagvalues[tagvalues_count++] = value_bytes; } } if (tagvalues_count) { const size_t arr_size = tagvalues_count * sizeof(*indx->entries[entry_number].tags[i].tagvalues); indx->entries[entry_number].tags[i].tagvalues = malloc(arr_size); if (indx->entries[entry_number].tags[i].tagvalues == NULL) { debug_print("Memory allocation failed (%zu bytes)\n", arr_size); free(ptagx); return MOBI_MALLOC_FAILED; } memcpy(indx->entries[entry_number].tags[i].tagvalues, tagvalues, arr_size); } else { indx->entries[entry_number].tags[i].tagvalues = NULL; } indx->entries[entry_number].tags[i].tagid = ptagx[i].tag; indx->entries[entry_number].tags[i].tagvalues_count = tagvalues_count; indx->entries[entry_number].tags_count++; i++; } free(ptagx); } /* restore buffer maxlen */ buf->maxlen = buf_maxlen; return MOBI_SUCCESS; } /** @brief Parser of INDX record @param[in] indx_record MOBIPdbRecord structure with INDX record @param[in,out] indx MOBIIndx structure to be filled with parsed entries @param[in,out] tagx MOBITagx structure, will be filled with parsed TAGX section data if present in the INDX record, otherwise TAGX data will be used to parse the record @param[in,out] ordt MOBIOrdt structure, will be filled with parsed ORDT sections @return MOBI_RET status code (on success MOBI_SUCCESS) */ MOBI_RET mobi_parse_indx(const MOBIPdbRecord *indx_record, MOBIIndx *indx, MOBITagx *tagx, MOBIOrdt *ordt) { if (indx_record == NULL || indx == NULL || tagx == NULL || ordt == NULL) { debug_print("%s", "index structure not initialized\n"); return MOBI_INIT_FAILED; } MOBI_RET ret = MOBI_SUCCESS; MOBIBuffer *buf = mobi_buffer_init_null(indx_record->data, indx_record->size); if (buf == NULL) { debug_print("%s\n", "Memory allocation failed"); return MOBI_MALLOC_FAILED; } char indx_magic[5]; mobi_buffer_getstring(indx_magic, buf, 4); /* 0: INDX magic */ const uint32_t header_length = mobi_buffer_get32(buf); /* 4: header length */ if (strncmp(indx_magic, INDX_MAGIC, 4) != 0 || header_length == 0 || header_length > indx_record->size) { debug_print("INDX wrong magic: %s or header length: %u\n", indx_magic, header_length); mobi_buffer_free_null(buf); return MOBI_DATA_CORRUPT; } mobi_buffer_seek(buf, 4); /* 8: unk, usually zeroes */ const uint32_t type = mobi_buffer_get32(buf); /* 12: 0 - normal, 2 - inflection */ mobi_buffer_seek(buf, 4); /* 16: unk */ const uint32_t idxt_offset = mobi_buffer_get32(buf); /* 20: IDXT offset */ const uint32_t entries_count = mobi_buffer_get32(buf); /* 24: entries count */ if (entries_count > INDX_RECORD_MAXCNT) { debug_print("Too many index entries (%u)\n", entries_count); mobi_buffer_free_null(buf); return MOBI_DATA_CORRUPT; } /* if record contains TAGX section, read it (and ORDT) and return */ if (mobi_buffer_match_magic_offset(buf, TAGX_MAGIC, header_length) && indx->total_entries_count == 0) { buf->maxlen = header_length; /* TAGX metadata */ uint32_t encoding = mobi_buffer_get32(buf); /* 28: encoding */ if (encoding == MOBI_NOTSET) { encoding = MOBI_CP1252; } mobi_buffer_seek(buf, 4); /* 32 */ const uint32_t total_entries_count = mobi_buffer_get32(buf); /* 36: total entries count */ if (total_entries_count > INDX_TOTAL_MAXCNT) { debug_print("Too many total index entries (%u)\n", total_entries_count); mobi_buffer_free_null(buf); return MOBI_DATA_CORRUPT; } uint32_t ordt_offset = mobi_buffer_get32(buf); /* 40: ORDT offset; currently not used */ if (ordt_offset + ORDT_RECORD_MAXCNT + 4 > indx_record->size) { ordt_offset = 0; } uint32_t ligt_offset = mobi_buffer_get32(buf); /* 44: LIGT offset; currently static table used instead */ uint32_t ligt_entries_count = mobi_buffer_get32(buf); /* 48: LIGT entries count */ if (ligt_offset + 4 * ligt_entries_count + 4 > indx_record->size) { ligt_offset = 0; ligt_entries_count = 0; } const uint32_t cncx_records_count = mobi_buffer_get32(buf); /* 52: CNCX entries count */ if (cncx_records_count > CNCX_RECORD_MAXCNT) { debug_print("Too many CNCX records (%u)\n", cncx_records_count); mobi_buffer_free_null(buf); return MOBI_DATA_CORRUPT; } /* 56: unk count */ /* 60-148: phonetizer */ uint32_t ordt_type = 0; uint32_t ordt_entries_count = 0; uint32_t ordt1_offset = 0; uint32_t ordt2_offset = 0; uint32_t index_name_offset = 0; uint32_t index_name_length = 0; if (header_length >= 180) { mobi_buffer_setpos(buf, 164); ordt_type = mobi_buffer_get32(buf); /* 164: ORDT type */ ordt_entries_count = mobi_buffer_get32(buf); /* 168: ORDT entries count */ ordt1_offset = mobi_buffer_get32(buf); /* 172: ORDT1 offset; currently not used */ ordt2_offset = mobi_buffer_get32(buf); /* 176: ORDT2 offset */ const size_t entry_size = (ordt_type == 0) ? 1 : 2; if (ordt1_offset + entry_size * ordt_entries_count > indx_record->size || ordt2_offset + 2 * ordt_entries_count > indx_record->size) { ordt1_offset = 0; ordt2_offset = 0; ordt_entries_count = 0; } index_name_offset = mobi_buffer_get32(buf); /* 180: Index name offset */ index_name_length = mobi_buffer_get32(buf); /* 184: Index name length */ } buf->maxlen = indx_record->size; mobi_buffer_setpos(buf, header_length); ret = mobi_parse_tagx(buf, tagx); if (ret != MOBI_SUCCESS) { mobi_buffer_free_null(buf); return ret; } if (ordt_entries_count > 0) { /* parse ORDT sections */ ordt->offsets_count = ordt_entries_count; ordt->type = ordt_type; ordt->ordt1_pos = ordt1_offset; ordt->ordt2_pos = ordt2_offset; ret = mobi_parse_ordt(buf, ordt); debug_print("ORDT: %u, %u, %u, %u\n", ordt_type, ordt_entries_count, ordt1_offset, ordt2_offset); if (ret != MOBI_SUCCESS) { mobi_buffer_free_null(buf); return ret; } } if (index_name_offset > 0 && index_name_length > 0) { if (index_name_length <= header_length - index_name_offset && index_name_length < INDX_NAME_SIZEMAX) { mobi_buffer_setpos(buf, index_name_offset); char *name = malloc(index_name_length + 1); if (name == NULL) { debug_print("%s", "Memory allocation failed\n"); mobi_buffer_free_null(buf); return MOBI_MALLOC_FAILED; } mobi_buffer_getstring(name, buf, index_name_length); indx->orth_index_name = name; debug_print("Orth index name: %s\n", name); } } indx->encoding = encoding; indx->type = type; indx->entries_count = entries_count; indx->total_entries_count = total_entries_count; if (ligt_entries_count != 0 && !mobi_buffer_match_magic_offset(buf, LIGT_MAGIC, ligt_offset)) { ligt_offset = 0; ligt_entries_count = 0; } indx->ligt_offset = ligt_offset; indx->ligt_entries_count = ligt_entries_count; indx->ordt_offset = ordt_offset; indx->cncx_records_count = cncx_records_count; } else { /* else parse IDXT entries offsets */ if (idxt_offset == 0) { debug_print("%s", "Missing IDXT offset\n"); mobi_buffer_free_null(buf); return MOBI_DATA_CORRUPT; } if (idxt_offset + 2 * entries_count + 4 > indx_record->size ) { debug_print("IDXT entries beyond record end%s", "\n"); mobi_buffer_free_null(buf); return MOBI_DATA_CORRUPT; } mobi_buffer_setpos(buf, idxt_offset); MOBIIdxt idxt; uint32_t *offsets = malloc((entries_count + 1) * sizeof(uint32_t)); if (offsets == NULL) { mobi_buffer_free_null(buf); debug_print("%s\n", "Memory allocation failed"); return MOBI_MALLOC_FAILED; } idxt.offsets = offsets; ret = mobi_parse_idxt(buf, &idxt, entries_count); if (ret != MOBI_SUCCESS) { debug_print("%s", "IDXT parsing failed\n"); mobi_buffer_free_null(buf); free(offsets); return ret; } /* parse entries */ if (entries_count > 0) { if (indx->entries == NULL) { indx->entries = malloc(indx->total_entries_count * sizeof(MOBIIndexEntry)); if (indx->entries == NULL) { mobi_buffer_free_null(buf); free(offsets); debug_print("%s\n", "Memory allocation failed"); return MOBI_MALLOC_FAILED; } } size_t i = 0; while (i < entries_count) { ret = mobi_parse_index_entry(indx, idxt, tagx, ordt, buf, i++); if (ret != MOBI_SUCCESS) { mobi_buffer_free_null(buf); free(offsets); return ret; } } indx->entries_count += entries_count; } free(offsets); } mobi_buffer_free_null(buf); return MOBI_SUCCESS; } /** @brief Parser of a set of index records @param[in] m MOBIData structure containing MOBI file metadata and data @param[in,out] indx MOBIIndx structure to be filled with parsed entries @param[in] indx_record_number Number of the first record of the set @return MOBI_RET status code (on success MOBI_SUCCESS) */ MOBI_RET mobi_parse_index(const MOBIData *m, MOBIIndx *indx, const size_t indx_record_number) { MOBI_RET ret; /* tagx->tags array will be allocated in mobi_parse_tagx */ MOBITagx *tagx = calloc(1, sizeof(MOBITagx)); if (tagx == NULL) { mobi_free_indx(indx); debug_print("%s\n", "Memory allocation failed"); return MOBI_MALLOC_FAILED; } /* ordt->ordt1 and ordt.ordt2 arrays will be allocated in mobi_parse_ordt */ MOBIOrdt *ordt = calloc(1, sizeof(MOBIOrdt)); if (ordt == NULL) { mobi_free_indx(indx); mobi_free_tagx(tagx); debug_print("%s\n", "Memory allocation failed"); return MOBI_MALLOC_FAILED; } /* parse first meta INDX record */ MOBIPdbRecord *record = mobi_get_record_by_seqnumber(m, indx_record_number); ret = mobi_parse_indx(record, indx, tagx, ordt); if (ret != MOBI_SUCCESS) { mobi_free_indx(indx); mobi_free_tagx(tagx); mobi_free_ordt(ordt); return ret; } /* parse remaining INDX records for the index */ size_t count = indx->entries_count; indx->entries_count = 0; while (count--) { record = record->next; ret = mobi_parse_indx(record, indx, tagx, ordt); if (ret != MOBI_SUCCESS) { mobi_free_indx(indx); mobi_free_tagx(tagx); mobi_free_ordt(ordt); return ret; } } if (indx->entries_count != indx->total_entries_count) { debug_print("Entries count %zu != total entries count %zu\n", indx->entries_count, indx->total_entries_count); mobi_free_indx(indx); mobi_free_tagx(tagx); mobi_free_ordt(ordt); return MOBI_DATA_CORRUPT; } /* copy pointer to first cncx record if present and set info from first record */ if (indx->cncx_records_count) { indx->cncx_record = record->next; } mobi_free_tagx(tagx); mobi_free_ordt(ordt); return MOBI_SUCCESS; } /** @brief Get a value of tag[tagid][tagindex] for given index entry @param[in,out] tagvalue Will be set to a tag value @param[in] entry Index entry to be search for the value @param[in] tag_arr Array: tag_arr[0] = tagid, tag_arr[1] = tagindex @return MOBI_RET status code (on success MOBI_SUCCESS) */ MOBI_RET mobi_get_indxentry_tagvalue(uint32_t *tagvalue, const MOBIIndexEntry *entry, const unsigned tag_arr[]) { if (entry == NULL) { debug_print("%s", "INDX entry not initialized\n"); return MOBI_INIT_FAILED; } size_t i = 0; while (i < entry->tags_count) { if (entry->tags[i].tagid == tag_arr[0]) { if (entry->tags[i].tagvalues_count > tag_arr[1]) { *tagvalue = entry->tags[i].tagvalues[tag_arr[1]]; return MOBI_SUCCESS; } break; } i++; } //debug_print("tag[%i][%i] not found in entry: %s\n", tag_arr[0], tag_arr[1], entry->label); return MOBI_DATA_CORRUPT; } /** @brief Get array of tagvalues of tag[tagid] for given index entry @param[in,out] tagarr Pointer to tagvalues array @param[in] entry Index entry to be search for the value @param[in] tagid Id of the tag @return Size of the array (zero on failure) */ size_t mobi_get_indxentry_tagarray(uint32_t **tagarr, const MOBIIndexEntry *entry, const size_t tagid) { if (entry == NULL) { debug_print("%s", "INDX entry not initialized\n"); return 0; } size_t i = 0; while (i < entry->tags_count) { if (entry->tags[i].tagid == tagid) { *tagarr = entry->tags[i].tagvalues; return entry->tags[i].tagvalues_count; } i++; } //debug_print("tag[%zu] not found in entry: %s\n", tagid, entry->label); return 0; } /** @brief Get entry start offset for the orth entry @param[in] entry MOBIIndexEntry structure @return Start offset, MOBI_NOTSET on failure */ uint32_t mobi_get_orth_entry_offset(const MOBIIndexEntry *entry) { uint32_t entry_startpos; MOBI_RET ret = mobi_get_indxentry_tagvalue(&entry_startpos, entry, INDX_TAG_ORTH_POSITION); if (ret != MOBI_SUCCESS) { return MOBI_NOTSET; } return entry_startpos; } /** @brief Get text length for the orth entry @param[in] entry MOBIIndexEntry structure @return Text length, MOBI_NOTSET on failure */ uint32_t mobi_get_orth_entry_length(const MOBIIndexEntry *entry) { uint32_t entry_textlen; MOBI_RET ret = mobi_get_indxentry_tagvalue(&entry_textlen, entry, INDX_TAG_ORTH_LENGTH); if (ret != MOBI_SUCCESS) { return MOBI_NOTSET; } return entry_textlen; } /** @brief Check if given tagid is present in the index @param[in] indx Index MOBIIndx structure @param[in] tagid Id of the tag @return True on success, false otherwise */ bool mobi_indx_has_tag(const MOBIIndx *indx, const size_t tagid) { if (indx) { for (size_t i = 0; i < indx->entries_count; i++) { MOBIIndexEntry entry = indx->entries[i]; for(size_t j = 0; j < entry.tags_count; j++) { if (entry.tags[j].tagid == tagid) { return true; } } } } return false; } /** @brief Get compiled index entry string Allocates memory for the string. Must be freed by caller. @param[in] cncx_record MOBIPdbRecord structure with cncx record @param[in] cncx_offset Offset of string entry from the beginning of the record @return Entry string or null if malloc failed */ char * mobi_get_cncx_string(const MOBIPdbRecord *cncx_record, const uint32_t cncx_offset) { /* TODO: handle multiple cncx records */ MOBIBuffer *buf = mobi_buffer_init_null(cncx_record->data, cncx_record->size); if (buf == NULL) { debug_print("%s\n", "Memory allocation failed"); return NULL; } mobi_buffer_setpos(buf, cncx_offset); size_t len = 0; const uint32_t string_length = mobi_buffer_get_varlen(buf, &len); char *string = malloc(string_length + 1); if (string) { mobi_buffer_getstring(string, buf, string_length); } mobi_buffer_free_null(buf); return string; } /** @brief Get compiled index entry string, converted to utf8 encoding Allocates memory for the string. Must be freed by caller. @param[in] cncx_record MOBIPdbRecord structure with cncx record @param[in] cncx_offset Offset of string entry from the beginning of the record @param[in] cncx_encoding Encoding @return Entry string or null if malloc failed */ char * mobi_get_cncx_string_utf8(const MOBIPdbRecord *cncx_record, const uint32_t cncx_offset, MOBIEncoding cncx_encoding) { char *string = mobi_get_cncx_string(cncx_record, cncx_offset); if (string != NULL && cncx_encoding == MOBI_CP1252) { size_t in_len = strlen(string); size_t out_len = in_len * 3 + 1; char *decoded = malloc(out_len); if (decoded) { mobi_cp1252_to_utf8(decoded, string, &out_len, in_len); free(string); string = strdup(decoded); free(decoded); } } return string; } /** @brief Get flat index entry string Allocates memory for the string. Must be freed by caller. @param[in] cncx_record MOBIPdbRecord structure with cncx record @param[in] cncx_offset Offset of string entry from the beginning of the record @param[in] length Length of the string to be extracted @return Entry string */ char * mobi_get_cncx_string_flat(const MOBIPdbRecord *cncx_record, const uint32_t cncx_offset, const size_t length) { /* TODO: handle multiple cncx records */ MOBIBuffer *buf = mobi_buffer_init_null(cncx_record->data, cncx_record->size); if (buf == NULL) { debug_print("%s\n", "Memory allocation failed"); return NULL; } mobi_buffer_setpos(buf, cncx_offset); char *string = malloc(length + 1); if (string) { mobi_buffer_getstring(string, buf, length); } mobi_buffer_free_null(buf); return string; } /** @brief Decode compiled infl index entry Buffer decoded must be initialized with basic index entry. Basic index entry will be transformed into inflected form, based on compiled rule. Min. size of input buffer (decoded) must be INDX_INFLBUF_SIZEMAX + 1 @param[in,out] decoded Decoded entry string @param[in,out] decoded_size Decoded entry size @param[in] rule Compiled rule @return MOBI_RET status code (on success MOBI_SUCCESS) */ MOBI_RET mobi_decode_infl(unsigned char *decoded, int *decoded_size, const unsigned char *rule) { int pos = *decoded_size; char mod = 'i'; char dir = '<'; char olddir; unsigned char c; while ((c = *rule++)) { if (c <= 4) { mod = (c <= 2) ? 'i' : 'd'; /* insert, delete */ olddir = dir; dir = (c & 2) ? '<' : '>'; /* left, right */ if (olddir != dir && olddir) { pos = (c & 2) ? *decoded_size : 0; } } else if (c > 10 && c < 20) { if (dir == '>') { pos = *decoded_size; } pos -= c - 10; dir = 0; if (pos < 0 || pos > *decoded_size) { debug_print("Position setting failed (%s)\n", decoded); return MOBI_DATA_CORRUPT; } } else { if (mod == 'i') { const unsigned char *s = decoded + pos; unsigned char *d = decoded + pos + 1; const int l = *decoded_size - pos; if (l < 0 || d + l > decoded + INDX_INFLBUF_SIZEMAX) { debug_print("Out of buffer in %s at pos: %i\n", decoded, pos); return MOBI_DATA_CORRUPT; } memmove(d, s, (size_t) l); decoded[pos] = c; (*decoded_size)++; if (dir == '>') { pos++; } } else { if (dir == '<') { pos--; } const unsigned char *s = decoded + pos + 1; unsigned char *d = decoded + pos; const int l = *decoded_size - pos; if (l < 0 || d + l > decoded + INDX_INFLBUF_SIZEMAX) { debug_print("Out of buffer in %s at pos: %i\n", decoded, pos); return MOBI_DATA_CORRUPT; } if (decoded[pos] != c) { debug_print("Character mismatch in %s at pos: %i (%c != %c)\n", decoded, pos, decoded[pos], c); return MOBI_DATA_CORRUPT; } memmove(d, s, (size_t) l); (*decoded_size)--; } } } return MOBI_SUCCESS; } /** @brief Get all matches for given string from trie structure Matches are made agains reversed string and all its substrings @param[in,out] infl_strings Array of returned strings @param[in,out] root Root node of the tree @param[in,out] string Index entry number @return Number of returned strings */ size_t mobi_trie_get_inflgroups(char **infl_strings, MOBITrie * const root, const char *string) { /* travers trie and get values for each substring */ if (root == NULL) { return MOBI_PARAM_ERR; } size_t count = 0; size_t length = strlen(string); MOBITrie *node = root; while (node && length > 0) { char **values = NULL; size_t values_count = 0; node = mobi_trie_get_next(&values, &values_count, node, string[length - 1]); length--; for (size_t j = 0; j < values_count; j++) { if (count == INDX_INFLSTRINGS_MAX) { debug_print("Inflection strings array too small (%d)\n", INDX_INFLSTRINGS_MAX); break; } char infl_string[INDX_LABEL_SIZEMAX + 1]; const size_t suffix_length = strlen(values[j]); if (length + suffix_length > INDX_LABEL_SIZEMAX) { debug_print("Label too long (%zu + %zu)\n", length, suffix_length); continue; } memcpy(infl_string, string, length); memcpy(infl_string + length, values[j], suffix_length); infl_string[length + suffix_length] = '\0'; infl_strings[count++] = strdup(infl_string); } } return count; } /** @brief Insert inversed inlection string for given entry into trie structure @param[in,out] root Root node of the tree, created if NULL @param[in,out] indx MOBIIndx infl index records @param[in,out] i Index entry number @return MOBI_RET status code (on success MOBI_SUCCESS) */ MOBI_RET mobi_trie_insert_infl(MOBITrie **root, const MOBIIndx *indx, size_t i) { MOBIIndexEntry e = indx->entries[i]; char *inflected = e.label; for (size_t j = 0; j < e.tags_count; j++) { MOBIIndexTag t = e.tags[j]; if (t.tagid == INDX_TAGARR_INFL_PARTS_V1) { for (size_t k = 0; k < t.tagvalues_count - 1; k += 2) { uint32_t len = t.tagvalues[k]; uint32_t offset = t.tagvalues[k + 1]; char *base = mobi_get_cncx_string_flat(indx->cncx_record, offset, len); if (base == NULL) { return MOBI_MALLOC_FAILED; } MOBI_RET ret = mobi_trie_insert_reversed(root, base, inflected); free(base); if (ret != MOBI_SUCCESS) { return ret; } } } } return MOBI_SUCCESS; }
/** @file index.c * @brief Functions to parse index records * * Copyright (c) 2020 Bartek Fabiszewski * http://www.fabiszewski.net * * This file is part of libmobi. * Licensed under LGPL, either version 3, or any later. * See <http://www.gnu.org/licenses/> */ #define _GNU_SOURCE 1 #ifndef __USE_BSD #define __USE_BSD /* for strdup on linux/glibc */ #endif #include <string.h> #include <stdlib.h> #include <stdint.h> #include "index.h" #include "util.h" #include "memory.h" #include "debug.h" #include "buffer.h" /** @brief Read index entry label from buffer pointing at index record data @param[in,out] output Output string @param[in,out] buf MOBIBuffer structure, offset pointing at index entry label @param[in] length Number of bytes to be read @param[in] has_ligatures Decode ligatures if true @return Size of read label */ size_t mobi_indx_get_label(unsigned char *output, MOBIBuffer *buf, const size_t length, const size_t has_ligatures) { if (!output) { buf->error = MOBI_PARAM_ERR; return 0; } if (buf->offset + length > buf->maxlen) { debug_print("%s", "End of buffer\n"); buf->error = MOBI_BUFFER_END; return 0; } const unsigned char replacement = 0x3f; size_t output_length = 0; size_t i = 0; while (i < length && output_length < INDX_LABEL_SIZEMAX) { unsigned char c = mobi_buffer_get8(buf); i++; if (c == 0) { /* FIXME: is it safe to replace zeroes? */ debug_print("Invalid character: %u\n", c); c = replacement; } if (c <= 5 && has_ligatures) { unsigned char c2 = mobi_buffer_get8(buf); c = mobi_ligature_to_cp1252(c, c2); if (c == 0) { debug_print("Invalid ligature sequence%s", "\n"); mobi_buffer_seek(buf, -1); c = replacement; } else { i++; } } *output++ = c; output_length++; } *output = '\0'; return output_length; } /** @brief Parser of ORDT section of INDX record @param[in,out] buf MOBIBuffer structure, offset pointing at beginning of TAGX section @param[in,out] ordt MOBIOrdt structure to be filled by the function @return MOBI_RET status code (on success MOBI_SUCCESS) */ static MOBI_RET mobi_parse_ordt(MOBIBuffer *buf, MOBIOrdt *ordt) { /* read ORDT1 */ mobi_buffer_setpos(buf, ordt->ordt1_pos); if (mobi_buffer_match_magic(buf, ORDT_MAGIC)) { debug_print("%s\n", "ORDT1 section found"); mobi_buffer_seek(buf, 4); if (ordt->offsets_count + buf->offset > buf->maxlen) { debug_print("ORDT1 section too long (%zu)", ordt->offsets_count); return MOBI_DATA_CORRUPT; } ordt->ordt1 = malloc(ordt->offsets_count * sizeof(*ordt->ordt1)); if (ordt->ordt1 == NULL) { debug_print("%s", "Memory allocation failed for ORDT1 offsets\n"); return MOBI_MALLOC_FAILED; } size_t i = 0; while (i < ordt->offsets_count) { ordt->ordt1[i++] = mobi_buffer_get8(buf); } debug_print("ORDT1: read %zu entries\n", ordt->offsets_count); } /* read ORDT2 */ mobi_buffer_setpos(buf, ordt->ordt2_pos); if (mobi_buffer_match_magic(buf, ORDT_MAGIC)) { debug_print("%s\n", "ORDT2 section found"); mobi_buffer_seek(buf, 4); if (ordt->offsets_count * 2 + buf->offset > buf->maxlen) { debug_print("ORDT2 section too long (%zu)", ordt->offsets_count); return MOBI_DATA_CORRUPT; } ordt->ordt2 = malloc(ordt->offsets_count * sizeof(*ordt->ordt2)); if (ordt->ordt2 == NULL) { debug_print("%s", "Memory allocation failed for ORDT2 offsets\n"); return MOBI_MALLOC_FAILED; } size_t i = 0; while (i < ordt->offsets_count) { ordt->ordt2[i++] = mobi_buffer_get16(buf); } debug_print("ORDT2: read %zu entries\n", ordt->offsets_count); } return MOBI_SUCCESS; } /** @brief Parser of TAGX section of INDX record @param[in,out] buf MOBIBuffer structure, offset pointing at beginning of TAGX section @param[in,out] tagx MOBITagx structure to be filled by the function @return MOBI_RET status code (on success MOBI_SUCCESS) */ static MOBI_RET mobi_parse_tagx(MOBIBuffer *buf, MOBITagx *tagx) { tagx->control_byte_count = 0; tagx->tags_count = 0; tagx->tags = NULL; mobi_buffer_seek(buf, 4); /* skip header */ uint32_t tagx_record_length = mobi_buffer_get32(buf); if (tagx_record_length < 12) { debug_print("INDX record too short: %u\n", tagx_record_length); return MOBI_DATA_CORRUPT; } tagx->control_byte_count = mobi_buffer_get32(buf); tagx_record_length -= 12; if (tagx_record_length + buf->offset > buf->maxlen) { debug_print("INDX record too long: %u\n", tagx_record_length); return MOBI_DATA_CORRUPT; } tagx->tags = malloc(tagx_record_length * sizeof(TAGXTags)); if (tagx->tags == NULL) { debug_print("%s", "Memory allocation failed for TAGX tags\n"); return MOBI_MALLOC_FAILED; } size_t i = 0; const size_t tagx_data_length = tagx_record_length / 4; size_t control_byte_count = 0; while (i < tagx_data_length) { tagx->tags[i].tag = mobi_buffer_get8(buf); tagx->tags[i].values_count = mobi_buffer_get8(buf); tagx->tags[i].bitmask = mobi_buffer_get8(buf); const uint8_t control_byte = mobi_buffer_get8(buf); if (control_byte) { control_byte_count++; } tagx->tags[i].control_byte = control_byte; debug_print("tagx[%zu]:\t%i\t%i\t%i\t%i\n", i, tagx->tags[i].tag, tagx->tags[i].values_count, tagx->tags[i].bitmask, control_byte); i++; } if (tagx->control_byte_count != control_byte_count) { debug_print("Wrong count of control bytes: %zu != %zu\n", tagx->control_byte_count, control_byte_count); free(tagx->tags); tagx->tags = NULL; tagx->control_byte_count = 0; return MOBI_DATA_CORRUPT; } tagx->tags_count = i; return MOBI_SUCCESS; } /** @brief Parser of IDXT section of INDX record @param[in,out] buf MOBIBuffer structure, offset pointing at beginning of TAGX section @param[in,out] idxt MOBITagx structure to be filled by the function @param[in] entries_count Number of index entries @return MOBI_RET status code (on success MOBI_SUCCESS) */ static MOBI_RET mobi_parse_idxt(MOBIBuffer *buf, MOBIIdxt *idxt, const size_t entries_count) { const uint32_t idxt_offset = (uint32_t) buf->offset; idxt->offsets_count = 0; char idxt_magic[5]; mobi_buffer_getstring(idxt_magic, buf, 4); if (strncmp(idxt_magic, IDXT_MAGIC, 4) != 0) { debug_print("IDXT wrong magic: %s\n", idxt_magic); return MOBI_DATA_CORRUPT; } size_t i = 0; while (i < entries_count) { /* entry offsets */ idxt->offsets[i++] = mobi_buffer_get16(buf); } /* last entry end position is IDXT tag offset */ idxt->offsets[i] = idxt_offset; idxt->offsets_count = i; return MOBI_SUCCESS; } /** @brief Get encoded character from dictionary index The characters are offsets into ORDT table @param[in] ordt MOBIOrdt structure (ORDT data and metadata) @param[in,out] buf MOBIBuffer structure with index data @param[in,out] offset Value read from buffer @return Number of bytes read (zero in case of error) */ size_t mobi_ordt_getbuffer(const MOBIOrdt *ordt, MOBIBuffer *buf, uint16_t *offset) { size_t i = 0; if (ordt->type == 1) { *offset = mobi_buffer_get8(buf); i++; } else { *offset = mobi_buffer_get16(buf); i += 2; } return i; } /** @brief Fetch UTF-16 value from ORDT2 table @param[in] ordt MOBIOrdt structure (ORDT data and metadata) @param[in] offset Offset in ORDT2 table @return UTF-16 code point */ uint16_t mobi_ordt_lookup(const MOBIOrdt *ordt, const uint16_t offset) { uint16_t utf16; if (offset < ordt->offsets_count) { utf16 = ordt->ordt2[offset]; } else { utf16 = offset; } return utf16; } /** @brief Get UTF-8 string from buffer, decoded by lookups in ORDT2 table @param[in] ordt MOBIOrdt structure (ORDT data and metadata) @param[in,out] buf MOBIBuffer structure with input string @param[in,out] output Output buffer (INDX_LABEL_SIZEMAX bytes) @param[in] length Length of input string contained in buf @return Number of bytes read */ size_t mobi_getstring_ordt(const MOBIOrdt *ordt, MOBIBuffer *buf, unsigned char *output, size_t length) { size_t i = 0; size_t output_length = 0; const uint32_t bytemask = 0xbf; const uint32_t bytemark = 0x80; const uint32_t uni_replacement = 0xfffd; const uint32_t surrogate_offset = 0x35fdc00; static const uint8_t init_byte[7] = { 0x00, 0x00, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc }; while (i < length) { uint16_t offset; i += mobi_ordt_getbuffer(ordt, buf, &offset); uint32_t codepoint = mobi_ordt_lookup(ordt, offset); if (codepoint <= 5) { size_t k = mobi_ordt_getbuffer(ordt, buf, &offset); uint32_t codepoint2 = mobi_ordt_lookup(ordt, offset); codepoint = mobi_ligature_to_utf16(codepoint, codepoint2); if (codepoint == uni_replacement) { /* rewind buffer to codepoint2 */ debug_print("Invalid ligature sequence%s", "\n"); mobi_buffer_seek(buf, - (int) k); } else { i += k; } } /* convert UTF-16 surrogates into UTF-32 */ if (codepoint >= 0xd800 && codepoint <= 0xdbff) { size_t k = mobi_ordt_getbuffer(ordt, buf, &offset); uint32_t codepoint2 = mobi_ordt_lookup(ordt, offset); if (codepoint2 >= 0xdc00 && codepoint2 <= 0xdfff) { i += k; codepoint = (codepoint << 10) + codepoint2 - surrogate_offset; } else { /* illegal unpaired high surrogate */ /* rewind buffer to codepoint2 */ debug_print("Invalid code point: %u\n", codepoint); mobi_buffer_seek(buf, - (int) k); codepoint = uni_replacement; } } if ((codepoint >= 0xdc00 && codepoint <= 0xdfff) /* unpaired low surrogate */ || (codepoint >= 0xfdd0 && codepoint <= 0xfdef) /* invalid characters */ || (codepoint & 0xfffe) == 0xfffe /* reserved characters */ || codepoint == 0 /* remove zeroes */) { codepoint = uni_replacement; debug_print("Invalid code point: %u\n", codepoint); } /* Conversion routine based on unicode's ConvertUTF.c */ size_t bytes; if (codepoint < 0x80) { bytes = 1; } else if (codepoint < 0x800) { bytes = 2; } else if (codepoint < 0x10000) { bytes = 3; } else if (codepoint < 0x110000) { bytes = 4; } else { bytes = 3; codepoint = uni_replacement; debug_print("Invalid code point: %u\n", codepoint); } if (output_length + bytes >= INDX_LABEL_SIZEMAX) { debug_print("%s\n", "INDX label too long"); break; } output += bytes; switch (bytes) { case 4: *--output = (uint8_t)((codepoint | bytemark) & bytemask); codepoint >>= 6; /* falls through */ case 3: *--output = (uint8_t)((codepoint | bytemark) & bytemask); codepoint >>= 6; /* falls through */ case 2: *--output = (uint8_t)((codepoint | bytemark) & bytemask); codepoint >>= 6; /* falls through */ case 1: *--output = (uint8_t)(codepoint | init_byte[bytes]); } output += bytes; output_length += bytes; } *output = '\0'; return output_length; } /** @brief Parser of INDX index entry @param[in,out] indx MOBIIndx structure, to be filled with parsed data @param[in] idxt MOBIIdxt structure with parsed IDXT index @param[in] tagx MOBITagx structure with parsed TAGX index @param[in,out] buf MOBIBuffer structure with index data @param[in] curr_number Sequential number of an index entry for current record @return MOBI_RET status code (on success MOBI_SUCCESS) */ static MOBI_RET mobi_parse_index_entry(MOBIIndx *indx, const MOBIIdxt idxt, const MOBITagx *tagx, const MOBIOrdt *ordt, MOBIBuffer *buf, const size_t curr_number) { if (indx == NULL) { debug_print("%s", "INDX structure not initialized\n"); return MOBI_INIT_FAILED; } const size_t entry_offset = indx->entries_count; const size_t entry_length = idxt.offsets[curr_number + 1] - idxt.offsets[curr_number]; mobi_buffer_setpos(buf, idxt.offsets[curr_number]); size_t entry_number = curr_number + entry_offset; if (entry_number >= indx->total_entries_count) { debug_print("Entry number beyond array: %zu\n", entry_number); return MOBI_DATA_CORRUPT; } /* save original record maxlen */ const size_t buf_maxlen = buf->maxlen; if (buf->offset + entry_length >= buf_maxlen) { debug_print("Entry length too long: %zu\n", entry_length); return MOBI_DATA_CORRUPT; } buf->maxlen = buf->offset + entry_length; size_t label_length = mobi_buffer_get8(buf); if (label_length > entry_length) { debug_print("Label length too long: %zu\n", label_length); return MOBI_DATA_CORRUPT; } char text[INDX_LABEL_SIZEMAX]; /* FIXME: what is ORDT1 for? */ if (ordt->ordt2) { label_length = mobi_getstring_ordt(ordt, buf, (unsigned char*) text, label_length); } else { label_length = mobi_indx_get_label((unsigned char*) text, buf, label_length, indx->ligt_entries_count); } indx->entries[entry_number].label = malloc(label_length + 1); if (indx->entries[entry_number].label == NULL) { debug_print("Memory allocation failed (%zu bytes)\n", label_length); return MOBI_MALLOC_FAILED; } strncpy(indx->entries[entry_number].label, text, label_length + 1); //debug_print("tag label[%zu]: %s\n", entry_number, indx->entries[entry_number].label); unsigned char *control_bytes; control_bytes = buf->data + buf->offset; mobi_buffer_seek(buf, (int) tagx->control_byte_count); indx->entries[entry_number].tags_count = 0; indx->entries[entry_number].tags = NULL; if (tagx->tags_count > 0) { typedef struct { uint8_t tag; uint8_t tag_value_count; uint32_t value_count; uint32_t value_bytes; } MOBIPtagx; MOBIPtagx *ptagx = malloc(tagx->tags_count * sizeof(MOBIPtagx)); if (ptagx == NULL) { debug_print("Memory allocation failed (%zu bytes)\n", tagx->tags_count * sizeof(MOBIPtagx)); return MOBI_MALLOC_FAILED; } uint32_t ptagx_count = 0; size_t len; size_t i = 0; while (i < tagx->tags_count) { if (tagx->tags[i].control_byte == 1) { control_bytes++; i++; continue; } uint32_t value = control_bytes[0] & tagx->tags[i].bitmask; if (value != 0) { /* FIXME: is it safe to use MOBI_NOTSET? */ uint32_t value_count = MOBI_NOTSET; uint32_t value_bytes = MOBI_NOTSET; /* all bits of masked value are set */ if (value == tagx->tags[i].bitmask) { /* more than 1 bit set */ if (mobi_bitcount(tagx->tags[i].bitmask) > 1) { /* read value bytes from entry */ len = 0; value_bytes = mobi_buffer_get_varlen(buf, &len); } else { value_count = 1; } } else { uint8_t mask = tagx->tags[i].bitmask; while ((mask & 1) == 0) { mask >>= 1; value >>= 1; } value_count = value; } ptagx[ptagx_count].tag = tagx->tags[i].tag; ptagx[ptagx_count].tag_value_count = tagx->tags[i].values_count; ptagx[ptagx_count].value_count = value_count; ptagx[ptagx_count].value_bytes = value_bytes; ptagx_count++; } i++; } indx->entries[entry_number].tags = malloc(tagx->tags_count * sizeof(MOBIIndexTag)); if (indx->entries[entry_number].tags == NULL) { debug_print("Memory allocation failed (%zu bytes)\n", tagx->tags_count * sizeof(MOBIIndexTag)); free(ptagx); return MOBI_MALLOC_FAILED; } i = 0; while (i < ptagx_count) { uint32_t tagvalues_count = 0; /* FIXME: is it safe to use MOBI_NOTSET? */ /* value count is set */ uint32_t tagvalues[INDX_TAGVALUES_MAX]; if (ptagx[i].value_count != MOBI_NOTSET) { size_t count = ptagx[i].value_count * ptagx[i].tag_value_count; while (count-- && tagvalues_count < INDX_TAGVALUES_MAX) { len = 0; const uint32_t value_bytes = mobi_buffer_get_varlen(buf, &len); tagvalues[tagvalues_count++] = value_bytes; } /* value count is not set */ } else { /* read value_bytes bytes */ len = 0; while (len < ptagx[i].value_bytes && tagvalues_count < INDX_TAGVALUES_MAX) { const uint32_t value_bytes = mobi_buffer_get_varlen(buf, &len); tagvalues[tagvalues_count++] = value_bytes; } } if (tagvalues_count) { const size_t arr_size = tagvalues_count * sizeof(*indx->entries[entry_number].tags[i].tagvalues); indx->entries[entry_number].tags[i].tagvalues = malloc(arr_size); if (indx->entries[entry_number].tags[i].tagvalues == NULL) { debug_print("Memory allocation failed (%zu bytes)\n", arr_size); free(ptagx); return MOBI_MALLOC_FAILED; } memcpy(indx->entries[entry_number].tags[i].tagvalues, tagvalues, arr_size); } else { indx->entries[entry_number].tags[i].tagvalues = NULL; } indx->entries[entry_number].tags[i].tagid = ptagx[i].tag; indx->entries[entry_number].tags[i].tagvalues_count = tagvalues_count; indx->entries[entry_number].tags_count++; i++; } free(ptagx); } /* restore buffer maxlen */ buf->maxlen = buf_maxlen; return MOBI_SUCCESS; } /** @brief Parser of INDX record @param[in] indx_record MOBIPdbRecord structure with INDX record @param[in,out] indx MOBIIndx structure to be filled with parsed entries @param[in,out] tagx MOBITagx structure, will be filled with parsed TAGX section data if present in the INDX record, otherwise TAGX data will be used to parse the record @param[in,out] ordt MOBIOrdt structure, will be filled with parsed ORDT sections @return MOBI_RET status code (on success MOBI_SUCCESS) */ MOBI_RET mobi_parse_indx(const MOBIPdbRecord *indx_record, MOBIIndx *indx, MOBITagx *tagx, MOBIOrdt *ordt) { if (indx_record == NULL || indx == NULL || tagx == NULL || ordt == NULL) { debug_print("%s", "index structure not initialized\n"); return MOBI_INIT_FAILED; } MOBI_RET ret = MOBI_SUCCESS; MOBIBuffer *buf = mobi_buffer_init_null(indx_record->data, indx_record->size); if (buf == NULL) { debug_print("%s\n", "Memory allocation failed"); return MOBI_MALLOC_FAILED; } char indx_magic[5]; mobi_buffer_getstring(indx_magic, buf, 4); /* 0: INDX magic */ const uint32_t header_length = mobi_buffer_get32(buf); /* 4: header length */ if (strncmp(indx_magic, INDX_MAGIC, 4) != 0 || header_length == 0 || header_length > indx_record->size) { debug_print("INDX wrong magic: %s or header length: %u\n", indx_magic, header_length); mobi_buffer_free_null(buf); return MOBI_DATA_CORRUPT; } mobi_buffer_seek(buf, 4); /* 8: unk, usually zeroes */ const uint32_t type = mobi_buffer_get32(buf); /* 12: 0 - normal, 2 - inflection */ mobi_buffer_seek(buf, 4); /* 16: unk */ const uint32_t idxt_offset = mobi_buffer_get32(buf); /* 20: IDXT offset */ const uint32_t entries_count = mobi_buffer_get32(buf); /* 24: entries count */ if (entries_count > INDX_RECORD_MAXCNT) { debug_print("Too many index entries (%u)\n", entries_count); mobi_buffer_free_null(buf); return MOBI_DATA_CORRUPT; } /* if record contains TAGX section, read it (and ORDT) and return */ if (mobi_buffer_match_magic_offset(buf, TAGX_MAGIC, header_length) && indx->total_entries_count == 0) { buf->maxlen = header_length; /* TAGX metadata */ uint32_t encoding = mobi_buffer_get32(buf); /* 28: encoding */ if (encoding == MOBI_NOTSET) { encoding = MOBI_CP1252; } mobi_buffer_seek(buf, 4); /* 32 */ const uint32_t total_entries_count = mobi_buffer_get32(buf); /* 36: total entries count */ if (total_entries_count > INDX_TOTAL_MAXCNT) { debug_print("Too many total index entries (%u)\n", total_entries_count); mobi_buffer_free_null(buf); return MOBI_DATA_CORRUPT; } uint32_t ordt_offset = mobi_buffer_get32(buf); /* 40: ORDT offset; currently not used */ if (ordt_offset + ORDT_RECORD_MAXCNT + 4 > indx_record->size) { ordt_offset = 0; } uint32_t ligt_offset = mobi_buffer_get32(buf); /* 44: LIGT offset; currently static table used instead */ uint32_t ligt_entries_count = mobi_buffer_get32(buf); /* 48: LIGT entries count */ if (ligt_offset + 4 * ligt_entries_count + 4 > indx_record->size) { ligt_offset = 0; ligt_entries_count = 0; } const uint32_t cncx_records_count = mobi_buffer_get32(buf); /* 52: CNCX entries count */ if (cncx_records_count > CNCX_RECORD_MAXCNT) { debug_print("Too many CNCX records (%u)\n", cncx_records_count); mobi_buffer_free_null(buf); return MOBI_DATA_CORRUPT; } /* 56: unk count */ /* 60-148: phonetizer */ uint32_t ordt_type = 0; uint32_t ordt_entries_count = 0; uint32_t ordt1_offset = 0; uint32_t ordt2_offset = 0; uint32_t index_name_offset = 0; uint32_t index_name_length = 0; if (header_length >= 180) { mobi_buffer_setpos(buf, 164); ordt_type = mobi_buffer_get32(buf); /* 164: ORDT type */ ordt_entries_count = mobi_buffer_get32(buf); /* 168: ORDT entries count */ ordt1_offset = mobi_buffer_get32(buf); /* 172: ORDT1 offset; currently not used */ ordt2_offset = mobi_buffer_get32(buf); /* 176: ORDT2 offset */ const size_t entry_size = (ordt_type == 0) ? 1 : 2; if (ordt1_offset + entry_size * ordt_entries_count > indx_record->size || ordt2_offset + 2 * ordt_entries_count > indx_record->size) { ordt1_offset = 0; ordt2_offset = 0; ordt_entries_count = 0; } index_name_offset = mobi_buffer_get32(buf); /* 180: Index name offset */ index_name_length = mobi_buffer_get32(buf); /* 184: Index name length */ } buf->maxlen = indx_record->size; mobi_buffer_setpos(buf, header_length); ret = mobi_parse_tagx(buf, tagx); if (ret != MOBI_SUCCESS) { mobi_buffer_free_null(buf); return ret; } if (ordt_entries_count > 0) { /* parse ORDT sections */ ordt->offsets_count = ordt_entries_count; ordt->type = ordt_type; ordt->ordt1_pos = ordt1_offset; ordt->ordt2_pos = ordt2_offset; ret = mobi_parse_ordt(buf, ordt); debug_print("ORDT: %u, %u, %u, %u\n", ordt_type, ordt_entries_count, ordt1_offset, ordt2_offset); if (ret != MOBI_SUCCESS) { mobi_buffer_free_null(buf); return ret; } } if (index_name_offset > 0 && index_name_length > 0) { if (index_name_length <= header_length - index_name_offset && index_name_length < INDX_NAME_SIZEMAX) { mobi_buffer_setpos(buf, index_name_offset); char *name = malloc(index_name_length + 1); if (name == NULL) { debug_print("%s", "Memory allocation failed\n"); mobi_buffer_free_null(buf); return MOBI_MALLOC_FAILED; } mobi_buffer_getstring(name, buf, index_name_length); indx->orth_index_name = name; debug_print("Orth index name: %s\n", name); } } indx->encoding = encoding; indx->type = type; indx->entries_count = entries_count; indx->total_entries_count = total_entries_count; if (ligt_entries_count != 0 && !mobi_buffer_match_magic_offset(buf, LIGT_MAGIC, ligt_offset)) { ligt_offset = 0; ligt_entries_count = 0; } indx->ligt_offset = ligt_offset; indx->ligt_entries_count = ligt_entries_count; indx->ordt_offset = ordt_offset; indx->cncx_records_count = cncx_records_count; } else { /* else parse IDXT entries offsets */ if (idxt_offset == 0) { debug_print("%s", "Missing IDXT offset\n"); mobi_buffer_free_null(buf); return MOBI_DATA_CORRUPT; } if (idxt_offset + 2 * entries_count + 4 > indx_record->size ) { debug_print("IDXT entries beyond record end%s", "\n"); mobi_buffer_free_null(buf); return MOBI_DATA_CORRUPT; } mobi_buffer_setpos(buf, idxt_offset); MOBIIdxt idxt; uint32_t *offsets = malloc((entries_count + 1) * sizeof(uint32_t)); if (offsets == NULL) { mobi_buffer_free_null(buf); debug_print("%s\n", "Memory allocation failed"); return MOBI_MALLOC_FAILED; } idxt.offsets = offsets; ret = mobi_parse_idxt(buf, &idxt, entries_count); if (ret != MOBI_SUCCESS) { debug_print("%s", "IDXT parsing failed\n"); mobi_buffer_free_null(buf); free(offsets); return ret; } /* parse entries */ if (entries_count > 0) { if (indx->entries == NULL) { indx->entries = malloc(indx->total_entries_count * sizeof(MOBIIndexEntry)); if (indx->entries == NULL) { mobi_buffer_free_null(buf); free(offsets); debug_print("%s\n", "Memory allocation failed"); return MOBI_MALLOC_FAILED; } } size_t i = 0; while (i < entries_count) { ret = mobi_parse_index_entry(indx, idxt, tagx, ordt, buf, i++); if (ret != MOBI_SUCCESS) { mobi_buffer_free_null(buf); free(offsets); return ret; } } indx->entries_count += entries_count; } free(offsets); } mobi_buffer_free_null(buf); return MOBI_SUCCESS; } /** @brief Parser of a set of index records @param[in] m MOBIData structure containing MOBI file metadata and data @param[in,out] indx MOBIIndx structure to be filled with parsed entries @param[in] indx_record_number Number of the first record of the set @return MOBI_RET status code (on success MOBI_SUCCESS) */ MOBI_RET mobi_parse_index(const MOBIData *m, MOBIIndx *indx, const size_t indx_record_number) { MOBI_RET ret; /* tagx->tags array will be allocated in mobi_parse_tagx */ MOBITagx *tagx = calloc(1, sizeof(MOBITagx)); if (tagx == NULL) { mobi_free_indx(indx); debug_print("%s\n", "Memory allocation failed"); return MOBI_MALLOC_FAILED; } /* ordt->ordt1 and ordt.ordt2 arrays will be allocated in mobi_parse_ordt */ MOBIOrdt *ordt = calloc(1, sizeof(MOBIOrdt)); if (ordt == NULL) { mobi_free_indx(indx); mobi_free_tagx(tagx); debug_print("%s\n", "Memory allocation failed"); return MOBI_MALLOC_FAILED; } /* parse first meta INDX record */ MOBIPdbRecord *record = mobi_get_record_by_seqnumber(m, indx_record_number); ret = mobi_parse_indx(record, indx, tagx, ordt); if (ret != MOBI_SUCCESS) { mobi_free_indx(indx); mobi_free_tagx(tagx); mobi_free_ordt(ordt); return ret; } /* parse remaining INDX records for the index */ size_t count = indx->entries_count; indx->entries_count = 0; while (count--) { record = record->next; ret = mobi_parse_indx(record, indx, tagx, ordt); if (ret != MOBI_SUCCESS) { mobi_free_indx(indx); mobi_free_tagx(tagx); mobi_free_ordt(ordt); return ret; } } if (indx->entries_count != indx->total_entries_count) { debug_print("Entries count %zu != total entries count %zu\n", indx->entries_count, indx->total_entries_count); mobi_free_indx(indx); mobi_free_tagx(tagx); mobi_free_ordt(ordt); return MOBI_DATA_CORRUPT; } /* copy pointer to first cncx record if present and set info from first record */ if (indx->cncx_records_count) { indx->cncx_record = record->next; } mobi_free_tagx(tagx); mobi_free_ordt(ordt); return MOBI_SUCCESS; } /** @brief Get a value of tag[tagid][tagindex] for given index entry @param[in,out] tagvalue Will be set to a tag value @param[in] entry Index entry to be search for the value @param[in] tag_arr Array: tag_arr[0] = tagid, tag_arr[1] = tagindex @return MOBI_RET status code (on success MOBI_SUCCESS) */ MOBI_RET mobi_get_indxentry_tagvalue(uint32_t *tagvalue, const MOBIIndexEntry *entry, const unsigned tag_arr[]) { if (entry == NULL) { debug_print("%s", "INDX entry not initialized\n"); return MOBI_INIT_FAILED; } size_t i = 0; while (i < entry->tags_count) { if (entry->tags[i].tagid == tag_arr[0]) { if (entry->tags[i].tagvalues_count > tag_arr[1]) { *tagvalue = entry->tags[i].tagvalues[tag_arr[1]]; return MOBI_SUCCESS; } break; } i++; } //debug_print("tag[%i][%i] not found in entry: %s\n", tag_arr[0], tag_arr[1], entry->label); return MOBI_DATA_CORRUPT; } /** @brief Get array of tagvalues of tag[tagid] for given index entry @param[in,out] tagarr Pointer to tagvalues array @param[in] entry Index entry to be search for the value @param[in] tagid Id of the tag @return Size of the array (zero on failure) */ size_t mobi_get_indxentry_tagarray(uint32_t **tagarr, const MOBIIndexEntry *entry, const size_t tagid) { if (entry == NULL) { debug_print("%s", "INDX entry not initialized\n"); return 0; } size_t i = 0; while (i < entry->tags_count) { if (entry->tags[i].tagid == tagid) { *tagarr = entry->tags[i].tagvalues; return entry->tags[i].tagvalues_count; } i++; } //debug_print("tag[%zu] not found in entry: %s\n", tagid, entry->label); return 0; } /** @brief Get entry start offset for the orth entry @param[in] entry MOBIIndexEntry structure @return Start offset, MOBI_NOTSET on failure */ uint32_t mobi_get_orth_entry_offset(const MOBIIndexEntry *entry) { uint32_t entry_startpos; MOBI_RET ret = mobi_get_indxentry_tagvalue(&entry_startpos, entry, INDX_TAG_ORTH_POSITION); if (ret != MOBI_SUCCESS) { return MOBI_NOTSET; } return entry_startpos; } /** @brief Get text length for the orth entry @param[in] entry MOBIIndexEntry structure @return Text length, MOBI_NOTSET on failure */ uint32_t mobi_get_orth_entry_length(const MOBIIndexEntry *entry) { uint32_t entry_textlen; MOBI_RET ret = mobi_get_indxentry_tagvalue(&entry_textlen, entry, INDX_TAG_ORTH_LENGTH); if (ret != MOBI_SUCCESS) { return MOBI_NOTSET; } return entry_textlen; } /** @brief Check if given tagid is present in the index @param[in] indx Index MOBIIndx structure @param[in] tagid Id of the tag @return True on success, false otherwise */ bool mobi_indx_has_tag(const MOBIIndx *indx, const size_t tagid) { if (indx) { for (size_t i = 0; i < indx->entries_count; i++) { MOBIIndexEntry entry = indx->entries[i]; for(size_t j = 0; j < entry.tags_count; j++) { if (entry.tags[j].tagid == tagid) { return true; } } } } return false; } /** @brief Get compiled index entry string Allocates memory for the string. Must be freed by caller. @param[in] cncx_record MOBIPdbRecord structure with cncx record @param[in] cncx_offset Offset of string entry from the beginning of the record @return Entry string or null if malloc failed */ char * mobi_get_cncx_string(const MOBIPdbRecord *cncx_record, const uint32_t cncx_offset) { /* TODO: handle multiple cncx records */ MOBIBuffer *buf = mobi_buffer_init_null(cncx_record->data, cncx_record->size); if (buf == NULL) { debug_print("%s\n", "Memory allocation failed"); return NULL; } mobi_buffer_setpos(buf, cncx_offset); size_t len = 0; const uint32_t string_length = mobi_buffer_get_varlen(buf, &len); char *string = malloc(string_length + 1); if (string) { mobi_buffer_getstring(string, buf, string_length); } mobi_buffer_free_null(buf); return string; } /** @brief Get compiled index entry string, converted to utf8 encoding Allocates memory for the string. Must be freed by caller. @param[in] cncx_record MOBIPdbRecord structure with cncx record @param[in] cncx_offset Offset of string entry from the beginning of the record @param[in] cncx_encoding Encoding @return Entry string or null if malloc failed */ char * mobi_get_cncx_string_utf8(const MOBIPdbRecord *cncx_record, const uint32_t cncx_offset, MOBIEncoding cncx_encoding) { char *string = mobi_get_cncx_string(cncx_record, cncx_offset); if (string != NULL && cncx_encoding == MOBI_CP1252) { size_t in_len = strlen(string); size_t out_len = in_len * 3 + 1; char *decoded = malloc(out_len); if (decoded) { mobi_cp1252_to_utf8(decoded, string, &out_len, in_len); free(string); string = strdup(decoded); free(decoded); } } return string; } /** @brief Get flat index entry string Allocates memory for the string. Must be freed by caller. @param[in] cncx_record MOBIPdbRecord structure with cncx record @param[in] cncx_offset Offset of string entry from the beginning of the record @param[in] length Length of the string to be extracted @return Entry string */ char * mobi_get_cncx_string_flat(const MOBIPdbRecord *cncx_record, const uint32_t cncx_offset, const size_t length) { /* TODO: handle multiple cncx records */ MOBIBuffer *buf = mobi_buffer_init_null(cncx_record->data, cncx_record->size); if (buf == NULL) { debug_print("%s\n", "Memory allocation failed"); return NULL; } mobi_buffer_setpos(buf, cncx_offset); char *string = malloc(length + 1); if (string) { mobi_buffer_getstring(string, buf, length); } mobi_buffer_free_null(buf); return string; } /** @brief Decode compiled infl index entry Buffer decoded must be initialized with basic index entry. Basic index entry will be transformed into inflected form, based on compiled rule. Min. size of input buffer (decoded) must be INDX_INFLBUF_SIZEMAX + 1 @param[in,out] decoded Decoded entry string @param[in,out] decoded_size Decoded entry size @param[in] rule Compiled rule @return MOBI_RET status code (on success MOBI_SUCCESS) */ MOBI_RET mobi_decode_infl(unsigned char *decoded, int *decoded_size, const unsigned char *rule) { int pos = *decoded_size; char mod = 'i'; char dir = '<'; char olddir; unsigned char c; while ((c = *rule++)) { if (c <= 4) { mod = (c <= 2) ? 'i' : 'd'; /* insert, delete */ olddir = dir; dir = (c & 2) ? '<' : '>'; /* left, right */ if (olddir != dir && olddir) { pos = (c & 2) ? *decoded_size : 0; } } else if (c > 10 && c < 20) { if (dir == '>') { pos = *decoded_size; } pos -= c - 10; dir = 0; if (pos < 0 || pos > *decoded_size) { debug_print("Position setting failed (%s)\n", decoded); return MOBI_DATA_CORRUPT; } } else { if (mod == 'i') { const unsigned char *s = decoded + pos; unsigned char *d = decoded + pos + 1; const int l = *decoded_size - pos; if (l < 0 || d + l > decoded + INDX_INFLBUF_SIZEMAX) { debug_print("Out of buffer in %s at pos: %i\n", decoded, pos); return MOBI_DATA_CORRUPT; } memmove(d, s, (size_t) l); decoded[pos] = c; (*decoded_size)++; if (dir == '>') { pos++; } } else { if (dir == '<') { pos--; } const unsigned char *s = decoded + pos + 1; unsigned char *d = decoded + pos; const int l = *decoded_size - pos; if (l < 0 || d + l > decoded + INDX_INFLBUF_SIZEMAX) { debug_print("Out of buffer in %s at pos: %i\n", decoded, pos); return MOBI_DATA_CORRUPT; } if (decoded[pos] != c) { debug_print("Character mismatch in %s at pos: %i (%c != %c)\n", decoded, pos, decoded[pos], c); return MOBI_DATA_CORRUPT; } memmove(d, s, (size_t) l); (*decoded_size)--; } } } return MOBI_SUCCESS; } /** @brief Get all matches for given string from trie structure Matches are made agains reversed string and all its substrings @param[in,out] infl_strings Array of returned strings @param[in,out] root Root node of the tree @param[in,out] string Index entry number @return Number of returned strings */ size_t mobi_trie_get_inflgroups(char **infl_strings, MOBITrie * const root, const char *string) { /* travers trie and get values for each substring */ if (root == NULL) { return MOBI_PARAM_ERR; } size_t count = 0; size_t length = strlen(string); MOBITrie *node = root; while (node && length > 0) { char **values = NULL; size_t values_count = 0; node = mobi_trie_get_next(&values, &values_count, node, string[length - 1]); length--; for (size_t j = 0; j < values_count; j++) { if (count == INDX_INFLSTRINGS_MAX) { debug_print("Inflection strings array too small (%d)\n", INDX_INFLSTRINGS_MAX); break; } char infl_string[INDX_LABEL_SIZEMAX + 1]; const size_t suffix_length = strlen(values[j]); if (length + suffix_length > INDX_LABEL_SIZEMAX) { debug_print("Label too long (%zu + %zu)\n", length, suffix_length); continue; } memcpy(infl_string, string, length); memcpy(infl_string + length, values[j], suffix_length); infl_string[length + suffix_length] = '\0'; infl_strings[count++] = strdup(infl_string); } } return count; } /** @brief Insert inversed inlection string for given entry into trie structure @param[in,out] root Root node of the tree, created if NULL @param[in,out] indx MOBIIndx infl index records @param[in,out] i Index entry number @return MOBI_RET status code (on success MOBI_SUCCESS) */ MOBI_RET mobi_trie_insert_infl(MOBITrie **root, const MOBIIndx *indx, size_t i) { MOBIIndexEntry e = indx->entries[i]; char *inflected = e.label; for (size_t j = 0; j < e.tags_count; j++) { MOBIIndexTag t = e.tags[j]; if (t.tagid == INDX_TAGARR_INFL_PARTS_V1) { for (size_t k = 0; k + 1 < t.tagvalues_count; k += 2) { uint32_t len = t.tagvalues[k]; uint32_t offset = t.tagvalues[k + 1]; char *base = mobi_get_cncx_string_flat(indx->cncx_record, offset, len); if (base == NULL) { return MOBI_MALLOC_FAILED; } MOBI_RET ret = mobi_trie_insert_reversed(root, base, inflected); free(base); if (ret != MOBI_SUCCESS) { return ret; } } } } return MOBI_SUCCESS; }
MOBI_RET mobi_trie_insert_infl(MOBITrie **root, const MOBIIndx *indx, size_t i) { MOBIIndexEntry e = indx->entries[i]; char *inflected = e.label; for (size_t j = 0; j < e.tags_count; j++) { MOBIIndexTag t = e.tags[j]; if (t.tagid == INDX_TAGARR_INFL_PARTS_V1) { for (size_t k = 0; k < t.tagvalues_count - 1; k += 2) { uint32_t len = t.tagvalues[k]; uint32_t offset = t.tagvalues[k + 1]; char *base = mobi_get_cncx_string_flat(indx->cncx_record, offset, len); if (base == NULL) { return MOBI_MALLOC_FAILED; } MOBI_RET ret = mobi_trie_insert_reversed(root, base, inflected); free(base); if (ret != MOBI_SUCCESS) { return ret; } } } } return MOBI_SUCCESS; }
MOBI_RET mobi_trie_insert_infl(MOBITrie **root, const MOBIIndx *indx, size_t i) { MOBIIndexEntry e = indx->entries[i]; char *inflected = e.label; for (size_t j = 0; j < e.tags_count; j++) { MOBIIndexTag t = e.tags[j]; if (t.tagid == INDX_TAGARR_INFL_PARTS_V1) { for (size_t k = 0; k + 1 < t.tagvalues_count; k += 2) { uint32_t len = t.tagvalues[k]; uint32_t offset = t.tagvalues[k + 1]; char *base = mobi_get_cncx_string_flat(indx->cncx_record, offset, len); if (base == NULL) { return MOBI_MALLOC_FAILED; } MOBI_RET ret = mobi_trie_insert_reversed(root, base, inflected); free(base); if (ret != MOBI_SUCCESS) { return ret; } } } } return MOBI_SUCCESS; }
{'added': [(1060, ' for (size_t k = 0; k + 1 < t.tagvalues_count; k += 2) {')], 'deleted': [(1060, ' for (size_t k = 0; k < t.tagvalues_count - 1; k += 2) {')]}
1
1
817
5,479
https://github.com/bfabiszewski/libmobi
CVE-2022-29788
['CWE-476']
dns.c
PHP_FUNCTION
/* +----------------------------------------------------------------------+ | PHP Version 5 | +----------------------------------------------------------------------+ | Copyright (c) 1997-2014 The PHP Group | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Authors: The typical suspects | | Pollita <pollita@php.net> | | Marcus Boerger <helly@php.net> | +----------------------------------------------------------------------+ */ /* $Id$ */ /* {{{ includes */ #include "php.h" #include "php_network.h" #if HAVE_SYS_SOCKET_H #include <sys/socket.h> #endif #ifdef PHP_WIN32 # include "win32/inet.h" # include <winsock2.h> # include <windows.h> # include <Ws2tcpip.h> #else /* This holds good for NetWare too, both for Winsock and Berkeley sockets */ #include <netinet/in.h> #if HAVE_ARPA_INET_H #include <arpa/inet.h> #endif #include <netdb.h> #ifdef _OSD_POSIX #undef STATUS #undef T_UNSPEC #endif #if HAVE_ARPA_NAMESER_H #ifdef DARWIN # define BIND_8_COMPAT 1 #endif #include <arpa/nameser.h> #endif #if HAVE_RESOLV_H #include <resolv.h> #endif #ifdef HAVE_DNS_H #include <dns.h> #endif #endif /* Borrowed from SYS/SOCKET.H */ #if defined(NETWARE) && defined(USE_WINSOCK) #define AF_INET 2 /* internetwork: UDP, TCP, etc. */ #endif #ifndef MAXHOSTNAMELEN #define MAXHOSTNAMELEN 255 #endif /* For the local hostname obtained via gethostname which is different from the dns-related MAXHOSTNAMELEN constant above */ #ifndef HOST_NAME_MAX #define HOST_NAME_MAX 255 #endif #include "php_dns.h" /* type compat */ #ifndef DNS_T_A #define DNS_T_A 1 #endif #ifndef DNS_T_NS #define DNS_T_NS 2 #endif #ifndef DNS_T_CNAME #define DNS_T_CNAME 5 #endif #ifndef DNS_T_SOA #define DNS_T_SOA 6 #endif #ifndef DNS_T_PTR #define DNS_T_PTR 12 #endif #ifndef DNS_T_HINFO #define DNS_T_HINFO 13 #endif #ifndef DNS_T_MINFO #define DNS_T_MINFO 14 #endif #ifndef DNS_T_MX #define DNS_T_MX 15 #endif #ifndef DNS_T_TXT #define DNS_T_TXT 16 #endif #ifndef DNS_T_AAAA #define DNS_T_AAAA 28 #endif #ifndef DNS_T_SRV #define DNS_T_SRV 33 #endif #ifndef DNS_T_NAPTR #define DNS_T_NAPTR 35 #endif #ifndef DNS_T_A6 #define DNS_T_A6 38 #endif #ifndef DNS_T_ANY #define DNS_T_ANY 255 #endif /* }}} */ static char *php_gethostbyaddr(char *ip); static char *php_gethostbyname(char *name); #ifdef HAVE_GETHOSTNAME /* {{{ proto string gethostname() Get the host name of the current machine */ PHP_FUNCTION(gethostname) { char buf[HOST_NAME_MAX]; if (zend_parse_parameters_none() == FAILURE) { return; } if (gethostname(buf, sizeof(buf) - 1)) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "unable to fetch host [%d]: %s", errno, strerror(errno)); RETURN_FALSE; } RETURN_STRING(buf, 1); } /* }}} */ #endif /* TODO: Reimplement the gethostby* functions using the new winxp+ API, in dns_win32.c, then we can have a dns.c, dns_unix.c and dns_win32.c instead of a messy dns.c full of #ifdef */ /* {{{ proto string gethostbyaddr(string ip_address) Get the Internet host name corresponding to a given IP address */ PHP_FUNCTION(gethostbyaddr) { char *addr; int addr_len; char *hostname; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &addr, &addr_len) == FAILURE) { return; } hostname = php_gethostbyaddr(addr); if (hostname == NULL) { #if HAVE_IPV6 && HAVE_INET_PTON php_error_docref(NULL TSRMLS_CC, E_WARNING, "Address is not a valid IPv4 or IPv6 address"); #else php_error_docref(NULL TSRMLS_CC, E_WARNING, "Address is not in a.b.c.d form"); #endif RETVAL_FALSE; } else { RETVAL_STRING(hostname, 0); } } /* }}} */ /* {{{ php_gethostbyaddr */ static char *php_gethostbyaddr(char *ip) { #if HAVE_IPV6 && HAVE_INET_PTON struct in6_addr addr6; #endif struct in_addr addr; struct hostent *hp; #if HAVE_IPV6 && HAVE_INET_PTON if (inet_pton(AF_INET6, ip, &addr6)) { hp = gethostbyaddr((char *) &addr6, sizeof(addr6), AF_INET6); } else if (inet_pton(AF_INET, ip, &addr)) { hp = gethostbyaddr((char *) &addr, sizeof(addr), AF_INET); } else { return NULL; } #else addr.s_addr = inet_addr(ip); if (addr.s_addr == -1) { return NULL; } hp = gethostbyaddr((char *) &addr, sizeof(addr), AF_INET); #endif if (!hp || hp->h_name == NULL || hp->h_name[0] == '\0') { return estrdup(ip); } return estrdup(hp->h_name); } /* }}} */ /* {{{ proto string gethostbyname(string hostname) Get the IP address corresponding to a given Internet host name */ PHP_FUNCTION(gethostbyname) { char *hostname; int hostname_len; char *addr; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &hostname, &hostname_len) == FAILURE) { return; } addr = php_gethostbyname(hostname); RETVAL_STRING(addr, 0); } /* }}} */ /* {{{ proto array gethostbynamel(string hostname) Return a list of IP addresses that a given hostname resolves to. */ PHP_FUNCTION(gethostbynamel) { char *hostname; int hostname_len; struct hostent *hp; struct in_addr in; int i; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &hostname, &hostname_len) == FAILURE) { return; } hp = gethostbyname(hostname); if (hp == NULL || hp->h_addr_list == NULL) { RETURN_FALSE; } array_init(return_value); for (i = 0 ; hp->h_addr_list[i] != 0 ; i++) { in = *(struct in_addr *) hp->h_addr_list[i]; add_next_index_string(return_value, inet_ntoa(in), 1); } } /* }}} */ /* {{{ php_gethostbyname */ static char *php_gethostbyname(char *name) { struct hostent *hp; struct in_addr in; hp = gethostbyname(name); if (!hp || !*(hp->h_addr_list)) { return estrdup(name); } memcpy(&in.s_addr, *(hp->h_addr_list), sizeof(in.s_addr)); return estrdup(inet_ntoa(in)); } /* }}} */ #if HAVE_FULL_DNS_FUNCS || defined(PHP_WIN32) # define PHP_DNS_NUM_TYPES 12 /* Number of DNS Types Supported by PHP currently */ # define PHP_DNS_A 0x00000001 # define PHP_DNS_NS 0x00000002 # define PHP_DNS_CNAME 0x00000010 # define PHP_DNS_SOA 0x00000020 # define PHP_DNS_PTR 0x00000800 # define PHP_DNS_HINFO 0x00001000 # define PHP_DNS_MX 0x00004000 # define PHP_DNS_TXT 0x00008000 # define PHP_DNS_A6 0x01000000 # define PHP_DNS_SRV 0x02000000 # define PHP_DNS_NAPTR 0x04000000 # define PHP_DNS_AAAA 0x08000000 # define PHP_DNS_ANY 0x10000000 # define PHP_DNS_ALL (PHP_DNS_A|PHP_DNS_NS|PHP_DNS_CNAME|PHP_DNS_SOA|PHP_DNS_PTR|PHP_DNS_HINFO|PHP_DNS_MX|PHP_DNS_TXT|PHP_DNS_A6|PHP_DNS_SRV|PHP_DNS_NAPTR|PHP_DNS_AAAA) #endif /* HAVE_FULL_DNS_FUNCS || defined(PHP_WIN32) */ /* Note: These functions are defined in ext/standard/dns_win32.c for Windows! */ #if !defined(PHP_WIN32) && (HAVE_DNS_SEARCH_FUNC && !(defined(__BEOS__) || defined(NETWARE))) #ifndef HFIXEDSZ #define HFIXEDSZ 12 /* fixed data in header <arpa/nameser.h> */ #endif /* HFIXEDSZ */ #ifndef QFIXEDSZ #define QFIXEDSZ 4 /* fixed data in query <arpa/nameser.h> */ #endif /* QFIXEDSZ */ #undef MAXHOSTNAMELEN #define MAXHOSTNAMELEN 1024 #ifndef MAXRESOURCERECORDS #define MAXRESOURCERECORDS 64 #endif /* MAXRESOURCERECORDS */ typedef union { HEADER qb1; u_char qb2[65536]; } querybuf; /* just a hack to free resources allocated by glibc in __res_nsend() * See also: * res_thread_freeres() in glibc/resolv/res_init.c * __libc_res_nsend() in resolv/res_send.c * */ #if defined(__GLIBC__) && !defined(HAVE_DEPRECATED_DNS_FUNCS) #define php_dns_free_res(__res__) _php_dns_free_res(__res__) static void _php_dns_free_res(struct __res_state res) { /* {{{ */ int ns; for (ns = 0; ns < MAXNS; ns++) { if (res._u._ext.nsaddrs[ns] != NULL) { free (res._u._ext.nsaddrs[ns]); res._u._ext.nsaddrs[ns] = NULL; } } } /* }}} */ #else #define php_dns_free_res(__res__) #endif /* {{{ proto bool dns_check_record(string host [, string type]) Check DNS records corresponding to a given Internet host name or IP address */ PHP_FUNCTION(dns_check_record) { #ifndef MAXPACKET #define MAXPACKET 8192 /* max packet size used internally by BIND */ #endif u_char ans[MAXPACKET]; char *hostname, *rectype = NULL; int hostname_len, rectype_len = 0; int type = T_MX, i; #if defined(HAVE_DNS_SEARCH) struct sockaddr_storage from; uint32_t fromsize = sizeof(from); dns_handle_t handle; #elif defined(HAVE_RES_NSEARCH) struct __res_state state; struct __res_state *handle = &state; #endif if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|s", &hostname, &hostname_len, &rectype, &rectype_len) == FAILURE) { return; } if (hostname_len == 0) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Host cannot be empty"); RETURN_FALSE; } if (rectype) { if (!strcasecmp("A", rectype)) type = T_A; else if (!strcasecmp("NS", rectype)) type = DNS_T_NS; else if (!strcasecmp("MX", rectype)) type = DNS_T_MX; else if (!strcasecmp("PTR", rectype)) type = DNS_T_PTR; else if (!strcasecmp("ANY", rectype)) type = DNS_T_ANY; else if (!strcasecmp("SOA", rectype)) type = DNS_T_SOA; else if (!strcasecmp("TXT", rectype)) type = DNS_T_TXT; else if (!strcasecmp("CNAME", rectype)) type = DNS_T_CNAME; else if (!strcasecmp("AAAA", rectype)) type = DNS_T_AAAA; else if (!strcasecmp("SRV", rectype)) type = DNS_T_SRV; else if (!strcasecmp("NAPTR", rectype)) type = DNS_T_NAPTR; else if (!strcasecmp("A6", rectype)) type = DNS_T_A6; else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Type '%s' not supported", rectype); RETURN_FALSE; } } #if defined(HAVE_DNS_SEARCH) handle = dns_open(NULL); if (handle == NULL) { RETURN_FALSE; } #elif defined(HAVE_RES_NSEARCH) memset(&state, 0, sizeof(state)); if (res_ninit(handle)) { RETURN_FALSE; } #else res_init(); #endif RETVAL_TRUE; i = php_dns_search(handle, hostname, C_IN, type, ans, sizeof(ans)); if (i < 0) { RETVAL_FALSE; } php_dns_free_handle(handle); } /* }}} */ #if HAVE_FULL_DNS_FUNCS /* {{{ php_parserr */ static u_char *php_parserr(u_char *cp, querybuf *answer, int type_to_fetch, int store, int raw, zval **subarray) { u_short type, class, dlen; u_long ttl; long n, i; u_short s; u_char *tp, *p; char name[MAXHOSTNAMELEN]; int have_v6_break = 0, in_v6_break = 0; *subarray = NULL; n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, sizeof(name) - 2); if (n < 0) { return NULL; } cp += n; GETSHORT(type, cp); GETSHORT(class, cp); GETLONG(ttl, cp); GETSHORT(dlen, cp); if (type_to_fetch != T_ANY && type != type_to_fetch) { cp += dlen; return cp; } if (!store) { cp += dlen; return cp; } ALLOC_INIT_ZVAL(*subarray); array_init(*subarray); add_assoc_string(*subarray, "host", name, 1); add_assoc_string(*subarray, "class", "IN", 1); add_assoc_long(*subarray, "ttl", ttl); if (raw) { add_assoc_long(*subarray, "type", type); add_assoc_stringl(*subarray, "data", (char*) cp, (uint) dlen, 1); cp += dlen; return cp; } switch (type) { case DNS_T_A: add_assoc_string(*subarray, "type", "A", 1); snprintf(name, sizeof(name), "%d.%d.%d.%d", cp[0], cp[1], cp[2], cp[3]); add_assoc_string(*subarray, "ip", name, 1); cp += dlen; break; case DNS_T_MX: add_assoc_string(*subarray, "type", "MX", 1); GETSHORT(n, cp); add_assoc_long(*subarray, "pri", n); /* no break; */ case DNS_T_CNAME: if (type == DNS_T_CNAME) { add_assoc_string(*subarray, "type", "CNAME", 1); } /* no break; */ case DNS_T_NS: if (type == DNS_T_NS) { add_assoc_string(*subarray, "type", "NS", 1); } /* no break; */ case DNS_T_PTR: if (type == DNS_T_PTR) { add_assoc_string(*subarray, "type", "PTR", 1); } n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "target", name, 1); break; case DNS_T_HINFO: /* See RFC 1010 for values */ add_assoc_string(*subarray, "type", "HINFO", 1); n = *cp & 0xFF; cp++; add_assoc_stringl(*subarray, "cpu", (char*)cp, n, 1); cp += n; n = *cp & 0xFF; cp++; add_assoc_stringl(*subarray, "os", (char*)cp, n, 1); cp += n; break; case DNS_T_TXT: { int ll = 0; zval *entries = NULL; add_assoc_string(*subarray, "type", "TXT", 1); tp = emalloc(dlen + 1); MAKE_STD_ZVAL(entries); array_init(entries); while (ll < dlen) { n = cp[ll]; if ((ll + n) >= dlen) { // Invalid chunk length, truncate n = dlen - (ll + 1); } memcpy(tp + ll , cp + ll + 1, n); add_next_index_stringl(entries, cp + ll + 1, n, 1); ll = ll + n + 1; } tp[dlen] = '\0'; cp += dlen; add_assoc_stringl(*subarray, "txt", tp, (dlen>0)?dlen - 1:0, 0); add_assoc_zval(*subarray, "entries", entries); } break; case DNS_T_SOA: add_assoc_string(*subarray, "type", "SOA", 1); n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) -2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "mname", name, 1); n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) -2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "rname", name, 1); GETLONG(n, cp); add_assoc_long(*subarray, "serial", n); GETLONG(n, cp); add_assoc_long(*subarray, "refresh", n); GETLONG(n, cp); add_assoc_long(*subarray, "retry", n); GETLONG(n, cp); add_assoc_long(*subarray, "expire", n); GETLONG(n, cp); add_assoc_long(*subarray, "minimum-ttl", n); break; case DNS_T_AAAA: tp = (u_char*)name; for(i=0; i < 8; i++) { GETSHORT(s, cp); if (s != 0) { if (tp > (u_char *)name) { in_v6_break = 0; tp[0] = ':'; tp++; } tp += sprintf((char*)tp,"%x",s); } else { if (!have_v6_break) { have_v6_break = 1; in_v6_break = 1; tp[0] = ':'; tp++; } else if (!in_v6_break) { tp[0] = ':'; tp++; tp[0] = '0'; tp++; } } } if (have_v6_break && in_v6_break) { tp[0] = ':'; tp++; } tp[0] = '\0'; add_assoc_string(*subarray, "type", "AAAA", 1); add_assoc_string(*subarray, "ipv6", name, 1); break; case DNS_T_A6: p = cp; add_assoc_string(*subarray, "type", "A6", 1); n = ((int)cp[0]) & 0xFF; cp++; add_assoc_long(*subarray, "masklen", n); tp = (u_char*)name; if (n > 15) { have_v6_break = 1; in_v6_break = 1; tp[0] = ':'; tp++; } if (n % 16 > 8) { /* Partial short */ if (cp[0] != 0) { if (tp > (u_char *)name) { in_v6_break = 0; tp[0] = ':'; tp++; } sprintf((char*)tp, "%x", cp[0] & 0xFF); } else { if (!have_v6_break) { have_v6_break = 1; in_v6_break = 1; tp[0] = ':'; tp++; } else if (!in_v6_break) { tp[0] = ':'; tp++; tp[0] = '0'; tp++; } } cp++; } for (i = (n + 8) / 16; i < 8; i++) { GETSHORT(s, cp); if (s != 0) { if (tp > (u_char *)name) { in_v6_break = 0; tp[0] = ':'; tp++; } tp += sprintf((char*)tp,"%x",s); } else { if (!have_v6_break) { have_v6_break = 1; in_v6_break = 1; tp[0] = ':'; tp++; } else if (!in_v6_break) { tp[0] = ':'; tp++; tp[0] = '0'; tp++; } } } if (have_v6_break && in_v6_break) { tp[0] = ':'; tp++; } tp[0] = '\0'; add_assoc_string(*subarray, "ipv6", name, 1); if (cp < p + dlen) { n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "chain", name, 1); } break; case DNS_T_SRV: add_assoc_string(*subarray, "type", "SRV", 1); GETSHORT(n, cp); add_assoc_long(*subarray, "pri", n); GETSHORT(n, cp); add_assoc_long(*subarray, "weight", n); GETSHORT(n, cp); add_assoc_long(*subarray, "port", n); n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "target", name, 1); break; case DNS_T_NAPTR: add_assoc_string(*subarray, "type", "NAPTR", 1); GETSHORT(n, cp); add_assoc_long(*subarray, "order", n); GETSHORT(n, cp); add_assoc_long(*subarray, "pref", n); n = (cp[0] & 0xFF); add_assoc_stringl(*subarray, "flags", (char*)++cp, n, 1); cp += n; n = (cp[0] & 0xFF); add_assoc_stringl(*subarray, "services", (char*)++cp, n, 1); cp += n; n = (cp[0] & 0xFF); add_assoc_stringl(*subarray, "regex", (char*)++cp, n, 1); cp += n; n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "replacement", name, 1); break; default: zval_ptr_dtor(subarray); *subarray = NULL; cp += dlen; break; } return cp; } /* }}} */ /* {{{ proto array|false dns_get_record(string hostname [, int type[, array authns, array addtl]]) Get any Resource Record corresponding to a given Internet host name */ PHP_FUNCTION(dns_get_record) { char *hostname; int hostname_len; long type_param = PHP_DNS_ANY; zval *authns = NULL, *addtl = NULL; int type_to_fetch; #if defined(HAVE_DNS_SEARCH) struct sockaddr_storage from; uint32_t fromsize = sizeof(from); dns_handle_t handle; #elif defined(HAVE_RES_NSEARCH) struct __res_state state; struct __res_state *handle = &state; #endif HEADER *hp; querybuf answer; u_char *cp = NULL, *end = NULL; int n, qd, an, ns = 0, ar = 0; int type, first_query = 1, store_results = 1; zend_bool raw = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|lz!z!b", &hostname, &hostname_len, &type_param, &authns, &addtl, &raw) == FAILURE) { return; } if (authns) { zval_dtor(authns); array_init(authns); } if (addtl) { zval_dtor(addtl); array_init(addtl); } if (!raw) { if ((type_param & ~PHP_DNS_ALL) && (type_param != PHP_DNS_ANY)) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Type '%ld' not supported", type_param); RETURN_FALSE; } } else { if ((type_param < 1) || (type_param > 0xFFFF)) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Numeric DNS record type must be between 1 and 65535, '%ld' given", type_param); RETURN_FALSE; } } /* Initialize the return array */ array_init(return_value); /* - We emulate an or'ed type mask by querying type by type. (Steps 0 - NUMTYPES-1 ) * If additional info is wanted we check again with DNS_T_ANY (step NUMTYPES / NUMTYPES+1 ) * store_results is used to skip storing the results retrieved in step * NUMTYPES+1 when results were already fetched. * - In case of PHP_DNS_ANY we use the directly fetch DNS_T_ANY. (step NUMTYPES+1 ) * - In case of raw mode, we query only the requestd type instead of looping type by type * before going with the additional info stuff. */ if (raw) { type = -1; } else if (type_param == PHP_DNS_ANY) { type = PHP_DNS_NUM_TYPES + 1; } else { type = 0; } for ( ; type < (addtl ? (PHP_DNS_NUM_TYPES + 2) : PHP_DNS_NUM_TYPES) || first_query; type++ ) { first_query = 0; switch (type) { case -1: /* raw */ type_to_fetch = type_param; /* skip over the rest and go directly to additional records */ type = PHP_DNS_NUM_TYPES - 1; break; case 0: type_to_fetch = type_param&PHP_DNS_A ? DNS_T_A : 0; break; case 1: type_to_fetch = type_param&PHP_DNS_NS ? DNS_T_NS : 0; break; case 2: type_to_fetch = type_param&PHP_DNS_CNAME ? DNS_T_CNAME : 0; break; case 3: type_to_fetch = type_param&PHP_DNS_SOA ? DNS_T_SOA : 0; break; case 4: type_to_fetch = type_param&PHP_DNS_PTR ? DNS_T_PTR : 0; break; case 5: type_to_fetch = type_param&PHP_DNS_HINFO ? DNS_T_HINFO : 0; break; case 6: type_to_fetch = type_param&PHP_DNS_MX ? DNS_T_MX : 0; break; case 7: type_to_fetch = type_param&PHP_DNS_TXT ? DNS_T_TXT : 0; break; case 8: type_to_fetch = type_param&PHP_DNS_AAAA ? DNS_T_AAAA : 0; break; case 9: type_to_fetch = type_param&PHP_DNS_SRV ? DNS_T_SRV : 0; break; case 10: type_to_fetch = type_param&PHP_DNS_NAPTR ? DNS_T_NAPTR : 0; break; case 11: type_to_fetch = type_param&PHP_DNS_A6 ? DNS_T_A6 : 0; break; case PHP_DNS_NUM_TYPES: store_results = 0; continue; default: case (PHP_DNS_NUM_TYPES + 1): type_to_fetch = DNS_T_ANY; break; } if (type_to_fetch) { #if defined(HAVE_DNS_SEARCH) handle = dns_open(NULL); if (handle == NULL) { zval_dtor(return_value); RETURN_FALSE; } #elif defined(HAVE_RES_NSEARCH) memset(&state, 0, sizeof(state)); if (res_ninit(handle)) { zval_dtor(return_value); RETURN_FALSE; } #else res_init(); #endif n = php_dns_search(handle, hostname, C_IN, type_to_fetch, answer.qb2, sizeof answer); if (n < 0) { php_dns_free_handle(handle); continue; } cp = answer.qb2 + HFIXEDSZ; end = answer.qb2 + n; hp = (HEADER *)&answer; qd = ntohs(hp->qdcount); an = ntohs(hp->ancount); ns = ntohs(hp->nscount); ar = ntohs(hp->arcount); /* Skip QD entries, they're only used by dn_expand later on */ while (qd-- > 0) { n = dn_skipname(cp, end); if (n < 0) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to parse DNS data received"); zval_dtor(return_value); php_dns_free_handle(handle); RETURN_FALSE; } cp += n + QFIXEDSZ; } /* YAY! Our real answers! */ while (an-- && cp && cp < end) { zval *retval; cp = php_parserr(cp, &answer, type_to_fetch, store_results, raw, &retval); if (retval != NULL && store_results) { add_next_index_zval(return_value, retval); } } if (authns || addtl) { /* List of Authoritative Name Servers * Process when only requesting addtl so that we can skip through the section */ while (ns-- > 0 && cp && cp < end) { zval *retval = NULL; cp = php_parserr(cp, &answer, DNS_T_ANY, authns != NULL, raw, &retval); if (retval != NULL) { add_next_index_zval(authns, retval); } } } if (addtl) { /* Additional records associated with authoritative name servers */ while (ar-- > 0 && cp && cp < end) { zval *retval = NULL; cp = php_parserr(cp, &answer, DNS_T_ANY, 1, raw, &retval); if (retval != NULL) { add_next_index_zval(addtl, retval); } } } php_dns_free_handle(handle); } } } /* }}} */ /* {{{ proto bool dns_get_mx(string hostname, array mxhosts [, array weight]) Get MX records corresponding to a given Internet host name */ PHP_FUNCTION(dns_get_mx) { char *hostname; int hostname_len; zval *mx_list, *weight_list = NULL; int count, qdc; u_short type, weight; u_char ans[MAXPACKET]; char buf[MAXHOSTNAMELEN]; HEADER *hp; u_char *cp, *end; int i; #if defined(HAVE_DNS_SEARCH) struct sockaddr_storage from; uint32_t fromsize = sizeof(from); dns_handle_t handle; #elif defined(HAVE_RES_NSEARCH) struct __res_state state; struct __res_state *handle = &state; #endif if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sz|z", &hostname, &hostname_len, &mx_list, &weight_list) == FAILURE) { return; } zval_dtor(mx_list); array_init(mx_list); if (weight_list) { zval_dtor(weight_list); array_init(weight_list); } #if defined(HAVE_DNS_SEARCH) handle = dns_open(NULL); if (handle == NULL) { RETURN_FALSE; } #elif defined(HAVE_RES_NSEARCH) memset(&state, 0, sizeof(state)); if (res_ninit(handle)) { RETURN_FALSE; } #else res_init(); #endif i = php_dns_search(handle, hostname, C_IN, DNS_T_MX, (u_char *)&ans, sizeof(ans)); if (i < 0) { RETURN_FALSE; } if (i > (int)sizeof(ans)) { i = sizeof(ans); } hp = (HEADER *)&ans; cp = (u_char *)&ans + HFIXEDSZ; end = (u_char *)&ans +i; for (qdc = ntohs((unsigned short)hp->qdcount); qdc--; cp += i + QFIXEDSZ) { if ((i = dn_skipname(cp, end)) < 0 ) { php_dns_free_handle(handle); RETURN_FALSE; } } count = ntohs((unsigned short)hp->ancount); while (--count >= 0 && cp < end) { if ((i = dn_skipname(cp, end)) < 0 ) { php_dns_free_handle(handle); RETURN_FALSE; } cp += i; GETSHORT(type, cp); cp += INT16SZ + INT32SZ; GETSHORT(i, cp); if (type != DNS_T_MX) { cp += i; continue; } GETSHORT(weight, cp); if ((i = dn_expand(ans, end, cp, buf, sizeof(buf)-1)) < 0) { php_dns_free_handle(handle); RETURN_FALSE; } cp += i; add_next_index_string(mx_list, buf, 1); if (weight_list) { add_next_index_long(weight_list, weight); } } php_dns_free_handle(handle); RETURN_TRUE; } /* }}} */ #endif /* HAVE_FULL_DNS_FUNCS */ #endif /* !defined(PHP_WIN32) && (HAVE_DNS_SEARCH_FUNC && !(defined(__BEOS__) || defined(NETWARE))) */ #if HAVE_FULL_DNS_FUNCS || defined(PHP_WIN32) PHP_MINIT_FUNCTION(dns) { REGISTER_LONG_CONSTANT("DNS_A", PHP_DNS_A, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_NS", PHP_DNS_NS, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_CNAME", PHP_DNS_CNAME, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_SOA", PHP_DNS_SOA, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_PTR", PHP_DNS_PTR, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_HINFO", PHP_DNS_HINFO, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_MX", PHP_DNS_MX, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_TXT", PHP_DNS_TXT, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_SRV", PHP_DNS_SRV, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_NAPTR", PHP_DNS_NAPTR, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_AAAA", PHP_DNS_AAAA, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_A6", PHP_DNS_A6, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_ANY", PHP_DNS_ANY, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_ALL", PHP_DNS_ALL, CONST_CS | CONST_PERSISTENT); return SUCCESS; } #endif /* HAVE_FULL_DNS_FUNCS */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: sw=4 ts=4 fdm=marker * vim<600: sw=4 ts=4 */
/* +----------------------------------------------------------------------+ | PHP Version 5 | +----------------------------------------------------------------------+ | Copyright (c) 1997-2014 The PHP Group | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Authors: The typical suspects | | Pollita <pollita@php.net> | | Marcus Boerger <helly@php.net> | +----------------------------------------------------------------------+ */ /* $Id$ */ /* {{{ includes */ #include "php.h" #include "php_network.h" #if HAVE_SYS_SOCKET_H #include <sys/socket.h> #endif #ifdef PHP_WIN32 # include "win32/inet.h" # include <winsock2.h> # include <windows.h> # include <Ws2tcpip.h> #else /* This holds good for NetWare too, both for Winsock and Berkeley sockets */ #include <netinet/in.h> #if HAVE_ARPA_INET_H #include <arpa/inet.h> #endif #include <netdb.h> #ifdef _OSD_POSIX #undef STATUS #undef T_UNSPEC #endif #if HAVE_ARPA_NAMESER_H #ifdef DARWIN # define BIND_8_COMPAT 1 #endif #include <arpa/nameser.h> #endif #if HAVE_RESOLV_H #include <resolv.h> #endif #ifdef HAVE_DNS_H #include <dns.h> #endif #endif /* Borrowed from SYS/SOCKET.H */ #if defined(NETWARE) && defined(USE_WINSOCK) #define AF_INET 2 /* internetwork: UDP, TCP, etc. */ #endif #ifndef MAXHOSTNAMELEN #define MAXHOSTNAMELEN 255 #endif /* For the local hostname obtained via gethostname which is different from the dns-related MAXHOSTNAMELEN constant above */ #ifndef HOST_NAME_MAX #define HOST_NAME_MAX 255 #endif #include "php_dns.h" /* type compat */ #ifndef DNS_T_A #define DNS_T_A 1 #endif #ifndef DNS_T_NS #define DNS_T_NS 2 #endif #ifndef DNS_T_CNAME #define DNS_T_CNAME 5 #endif #ifndef DNS_T_SOA #define DNS_T_SOA 6 #endif #ifndef DNS_T_PTR #define DNS_T_PTR 12 #endif #ifndef DNS_T_HINFO #define DNS_T_HINFO 13 #endif #ifndef DNS_T_MINFO #define DNS_T_MINFO 14 #endif #ifndef DNS_T_MX #define DNS_T_MX 15 #endif #ifndef DNS_T_TXT #define DNS_T_TXT 16 #endif #ifndef DNS_T_AAAA #define DNS_T_AAAA 28 #endif #ifndef DNS_T_SRV #define DNS_T_SRV 33 #endif #ifndef DNS_T_NAPTR #define DNS_T_NAPTR 35 #endif #ifndef DNS_T_A6 #define DNS_T_A6 38 #endif #ifndef DNS_T_ANY #define DNS_T_ANY 255 #endif /* }}} */ static char *php_gethostbyaddr(char *ip); static char *php_gethostbyname(char *name); #ifdef HAVE_GETHOSTNAME /* {{{ proto string gethostname() Get the host name of the current machine */ PHP_FUNCTION(gethostname) { char buf[HOST_NAME_MAX]; if (zend_parse_parameters_none() == FAILURE) { return; } if (gethostname(buf, sizeof(buf) - 1)) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "unable to fetch host [%d]: %s", errno, strerror(errno)); RETURN_FALSE; } RETURN_STRING(buf, 1); } /* }}} */ #endif /* TODO: Reimplement the gethostby* functions using the new winxp+ API, in dns_win32.c, then we can have a dns.c, dns_unix.c and dns_win32.c instead of a messy dns.c full of #ifdef */ /* {{{ proto string gethostbyaddr(string ip_address) Get the Internet host name corresponding to a given IP address */ PHP_FUNCTION(gethostbyaddr) { char *addr; int addr_len; char *hostname; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &addr, &addr_len) == FAILURE) { return; } hostname = php_gethostbyaddr(addr); if (hostname == NULL) { #if HAVE_IPV6 && HAVE_INET_PTON php_error_docref(NULL TSRMLS_CC, E_WARNING, "Address is not a valid IPv4 or IPv6 address"); #else php_error_docref(NULL TSRMLS_CC, E_WARNING, "Address is not in a.b.c.d form"); #endif RETVAL_FALSE; } else { RETVAL_STRING(hostname, 0); } } /* }}} */ /* {{{ php_gethostbyaddr */ static char *php_gethostbyaddr(char *ip) { #if HAVE_IPV6 && HAVE_INET_PTON struct in6_addr addr6; #endif struct in_addr addr; struct hostent *hp; #if HAVE_IPV6 && HAVE_INET_PTON if (inet_pton(AF_INET6, ip, &addr6)) { hp = gethostbyaddr((char *) &addr6, sizeof(addr6), AF_INET6); } else if (inet_pton(AF_INET, ip, &addr)) { hp = gethostbyaddr((char *) &addr, sizeof(addr), AF_INET); } else { return NULL; } #else addr.s_addr = inet_addr(ip); if (addr.s_addr == -1) { return NULL; } hp = gethostbyaddr((char *) &addr, sizeof(addr), AF_INET); #endif if (!hp || hp->h_name == NULL || hp->h_name[0] == '\0') { return estrdup(ip); } return estrdup(hp->h_name); } /* }}} */ /* {{{ proto string gethostbyname(string hostname) Get the IP address corresponding to a given Internet host name */ PHP_FUNCTION(gethostbyname) { char *hostname; int hostname_len; char *addr; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &hostname, &hostname_len) == FAILURE) { return; } addr = php_gethostbyname(hostname); RETVAL_STRING(addr, 0); } /* }}} */ /* {{{ proto array gethostbynamel(string hostname) Return a list of IP addresses that a given hostname resolves to. */ PHP_FUNCTION(gethostbynamel) { char *hostname; int hostname_len; struct hostent *hp; struct in_addr in; int i; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &hostname, &hostname_len) == FAILURE) { return; } hp = gethostbyname(hostname); if (hp == NULL || hp->h_addr_list == NULL) { RETURN_FALSE; } array_init(return_value); for (i = 0 ; hp->h_addr_list[i] != 0 ; i++) { in = *(struct in_addr *) hp->h_addr_list[i]; add_next_index_string(return_value, inet_ntoa(in), 1); } } /* }}} */ /* {{{ php_gethostbyname */ static char *php_gethostbyname(char *name) { struct hostent *hp; struct in_addr in; hp = gethostbyname(name); if (!hp || !*(hp->h_addr_list)) { return estrdup(name); } memcpy(&in.s_addr, *(hp->h_addr_list), sizeof(in.s_addr)); return estrdup(inet_ntoa(in)); } /* }}} */ #if HAVE_FULL_DNS_FUNCS || defined(PHP_WIN32) # define PHP_DNS_NUM_TYPES 12 /* Number of DNS Types Supported by PHP currently */ # define PHP_DNS_A 0x00000001 # define PHP_DNS_NS 0x00000002 # define PHP_DNS_CNAME 0x00000010 # define PHP_DNS_SOA 0x00000020 # define PHP_DNS_PTR 0x00000800 # define PHP_DNS_HINFO 0x00001000 # define PHP_DNS_MX 0x00004000 # define PHP_DNS_TXT 0x00008000 # define PHP_DNS_A6 0x01000000 # define PHP_DNS_SRV 0x02000000 # define PHP_DNS_NAPTR 0x04000000 # define PHP_DNS_AAAA 0x08000000 # define PHP_DNS_ANY 0x10000000 # define PHP_DNS_ALL (PHP_DNS_A|PHP_DNS_NS|PHP_DNS_CNAME|PHP_DNS_SOA|PHP_DNS_PTR|PHP_DNS_HINFO|PHP_DNS_MX|PHP_DNS_TXT|PHP_DNS_A6|PHP_DNS_SRV|PHP_DNS_NAPTR|PHP_DNS_AAAA) #endif /* HAVE_FULL_DNS_FUNCS || defined(PHP_WIN32) */ /* Note: These functions are defined in ext/standard/dns_win32.c for Windows! */ #if !defined(PHP_WIN32) && (HAVE_DNS_SEARCH_FUNC && !(defined(__BEOS__) || defined(NETWARE))) #ifndef HFIXEDSZ #define HFIXEDSZ 12 /* fixed data in header <arpa/nameser.h> */ #endif /* HFIXEDSZ */ #ifndef QFIXEDSZ #define QFIXEDSZ 4 /* fixed data in query <arpa/nameser.h> */ #endif /* QFIXEDSZ */ #undef MAXHOSTNAMELEN #define MAXHOSTNAMELEN 1024 #ifndef MAXRESOURCERECORDS #define MAXRESOURCERECORDS 64 #endif /* MAXRESOURCERECORDS */ typedef union { HEADER qb1; u_char qb2[65536]; } querybuf; /* just a hack to free resources allocated by glibc in __res_nsend() * See also: * res_thread_freeres() in glibc/resolv/res_init.c * __libc_res_nsend() in resolv/res_send.c * */ #if defined(__GLIBC__) && !defined(HAVE_DEPRECATED_DNS_FUNCS) #define php_dns_free_res(__res__) _php_dns_free_res(__res__) static void _php_dns_free_res(struct __res_state res) { /* {{{ */ int ns; for (ns = 0; ns < MAXNS; ns++) { if (res._u._ext.nsaddrs[ns] != NULL) { free (res._u._ext.nsaddrs[ns]); res._u._ext.nsaddrs[ns] = NULL; } } } /* }}} */ #else #define php_dns_free_res(__res__) #endif /* {{{ proto bool dns_check_record(string host [, string type]) Check DNS records corresponding to a given Internet host name or IP address */ PHP_FUNCTION(dns_check_record) { #ifndef MAXPACKET #define MAXPACKET 8192 /* max packet size used internally by BIND */ #endif u_char ans[MAXPACKET]; char *hostname, *rectype = NULL; int hostname_len, rectype_len = 0; int type = T_MX, i; #if defined(HAVE_DNS_SEARCH) struct sockaddr_storage from; uint32_t fromsize = sizeof(from); dns_handle_t handle; #elif defined(HAVE_RES_NSEARCH) struct __res_state state; struct __res_state *handle = &state; #endif if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|s", &hostname, &hostname_len, &rectype, &rectype_len) == FAILURE) { return; } if (hostname_len == 0) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Host cannot be empty"); RETURN_FALSE; } if (rectype) { if (!strcasecmp("A", rectype)) type = T_A; else if (!strcasecmp("NS", rectype)) type = DNS_T_NS; else if (!strcasecmp("MX", rectype)) type = DNS_T_MX; else if (!strcasecmp("PTR", rectype)) type = DNS_T_PTR; else if (!strcasecmp("ANY", rectype)) type = DNS_T_ANY; else if (!strcasecmp("SOA", rectype)) type = DNS_T_SOA; else if (!strcasecmp("TXT", rectype)) type = DNS_T_TXT; else if (!strcasecmp("CNAME", rectype)) type = DNS_T_CNAME; else if (!strcasecmp("AAAA", rectype)) type = DNS_T_AAAA; else if (!strcasecmp("SRV", rectype)) type = DNS_T_SRV; else if (!strcasecmp("NAPTR", rectype)) type = DNS_T_NAPTR; else if (!strcasecmp("A6", rectype)) type = DNS_T_A6; else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Type '%s' not supported", rectype); RETURN_FALSE; } } #if defined(HAVE_DNS_SEARCH) handle = dns_open(NULL); if (handle == NULL) { RETURN_FALSE; } #elif defined(HAVE_RES_NSEARCH) memset(&state, 0, sizeof(state)); if (res_ninit(handle)) { RETURN_FALSE; } #else res_init(); #endif RETVAL_TRUE; i = php_dns_search(handle, hostname, C_IN, type, ans, sizeof(ans)); if (i < 0) { RETVAL_FALSE; } php_dns_free_handle(handle); } /* }}} */ #if HAVE_FULL_DNS_FUNCS #define CHECKCP(n) do { \ if (cp + n > end) { \ return NULL; \ } \ } while (0) /* {{{ php_parserr */ static u_char *php_parserr(u_char *cp, u_char *end, querybuf *answer, int type_to_fetch, int store, int raw, zval **subarray) { u_short type, class, dlen; u_long ttl; long n, i; u_short s; u_char *tp, *p; char name[MAXHOSTNAMELEN]; int have_v6_break = 0, in_v6_break = 0; *subarray = NULL; n = dn_expand(answer->qb2, end, cp, name, sizeof(name) - 2); if (n < 0) { return NULL; } cp += n; CHECKCP(10); GETSHORT(type, cp); GETSHORT(class, cp); GETLONG(ttl, cp); GETSHORT(dlen, cp); CHECKCP(dlen); if (type_to_fetch != T_ANY && type != type_to_fetch) { cp += dlen; return cp; } if (!store) { cp += dlen; return cp; } ALLOC_INIT_ZVAL(*subarray); array_init(*subarray); add_assoc_string(*subarray, "host", name, 1); add_assoc_string(*subarray, "class", "IN", 1); add_assoc_long(*subarray, "ttl", ttl); if (raw) { add_assoc_long(*subarray, "type", type); add_assoc_stringl(*subarray, "data", (char*) cp, (uint) dlen, 1); cp += dlen; return cp; } switch (type) { case DNS_T_A: CHECKCP(4); add_assoc_string(*subarray, "type", "A", 1); snprintf(name, sizeof(name), "%d.%d.%d.%d", cp[0], cp[1], cp[2], cp[3]); add_assoc_string(*subarray, "ip", name, 1); cp += dlen; break; case DNS_T_MX: CHECKCP(2); add_assoc_string(*subarray, "type", "MX", 1); GETSHORT(n, cp); add_assoc_long(*subarray, "pri", n); /* no break; */ case DNS_T_CNAME: if (type == DNS_T_CNAME) { add_assoc_string(*subarray, "type", "CNAME", 1); } /* no break; */ case DNS_T_NS: if (type == DNS_T_NS) { add_assoc_string(*subarray, "type", "NS", 1); } /* no break; */ case DNS_T_PTR: if (type == DNS_T_PTR) { add_assoc_string(*subarray, "type", "PTR", 1); } n = dn_expand(answer->qb2, end, cp, name, (sizeof name) - 2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "target", name, 1); break; case DNS_T_HINFO: /* See RFC 1010 for values */ add_assoc_string(*subarray, "type", "HINFO", 1); CHECKCP(1); n = *cp & 0xFF; cp++; CHECKCP(n); add_assoc_stringl(*subarray, "cpu", (char*)cp, n, 1); cp += n; CHECKCP(1); n = *cp & 0xFF; cp++; CHECKCP(n); add_assoc_stringl(*subarray, "os", (char*)cp, n, 1); cp += n; break; case DNS_T_TXT: { int l1 = 0, l2 = 0; zval *entries = NULL; add_assoc_string(*subarray, "type", "TXT", 1); tp = emalloc(dlen + 1); MAKE_STD_ZVAL(entries); array_init(entries); while (l1 < dlen) { n = cp[l1]; if ((l1 + n) >= dlen) { // Invalid chunk length, truncate n = dlen - (l1 + 1); } if (n) { memcpy(tp + l2 , cp + l1 + 1, n); add_next_index_stringl(entries, cp + l1 + 1, n, 1); } l1 = l1 + n + 1; l2 = l2 + n; } tp[l2] = '\0'; cp += dlen; add_assoc_stringl(*subarray, "txt", tp, l2, 0); add_assoc_zval(*subarray, "entries", entries); } break; case DNS_T_SOA: add_assoc_string(*subarray, "type", "SOA", 1); n = dn_expand(answer->qb2, end, cp, name, (sizeof name) -2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "mname", name, 1); n = dn_expand(answer->qb2, end, cp, name, (sizeof name) -2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "rname", name, 1); CHECKCP(5*4); GETLONG(n, cp); add_assoc_long(*subarray, "serial", n); GETLONG(n, cp); add_assoc_long(*subarray, "refresh", n); GETLONG(n, cp); add_assoc_long(*subarray, "retry", n); GETLONG(n, cp); add_assoc_long(*subarray, "expire", n); GETLONG(n, cp); add_assoc_long(*subarray, "minimum-ttl", n); break; case DNS_T_AAAA: tp = (u_char*)name; CHECKCP(8*2); for(i=0; i < 8; i++) { GETSHORT(s, cp); if (s != 0) { if (tp > (u_char *)name) { in_v6_break = 0; tp[0] = ':'; tp++; } tp += sprintf((char*)tp,"%x",s); } else { if (!have_v6_break) { have_v6_break = 1; in_v6_break = 1; tp[0] = ':'; tp++; } else if (!in_v6_break) { tp[0] = ':'; tp++; tp[0] = '0'; tp++; } } } if (have_v6_break && in_v6_break) { tp[0] = ':'; tp++; } tp[0] = '\0'; add_assoc_string(*subarray, "type", "AAAA", 1); add_assoc_string(*subarray, "ipv6", name, 1); break; case DNS_T_A6: p = cp; add_assoc_string(*subarray, "type", "A6", 1); CHECKCP(1); n = ((int)cp[0]) & 0xFF; cp++; add_assoc_long(*subarray, "masklen", n); tp = (u_char*)name; if (n > 15) { have_v6_break = 1; in_v6_break = 1; tp[0] = ':'; tp++; } if (n % 16 > 8) { /* Partial short */ if (cp[0] != 0) { if (tp > (u_char *)name) { in_v6_break = 0; tp[0] = ':'; tp++; } sprintf((char*)tp, "%x", cp[0] & 0xFF); } else { if (!have_v6_break) { have_v6_break = 1; in_v6_break = 1; tp[0] = ':'; tp++; } else if (!in_v6_break) { tp[0] = ':'; tp++; tp[0] = '0'; tp++; } } cp++; } for (i = (n + 8) / 16; i < 8; i++) { CHECKCP(2); GETSHORT(s, cp); if (s != 0) { if (tp > (u_char *)name) { in_v6_break = 0; tp[0] = ':'; tp++; } tp += sprintf((char*)tp,"%x",s); } else { if (!have_v6_break) { have_v6_break = 1; in_v6_break = 1; tp[0] = ':'; tp++; } else if (!in_v6_break) { tp[0] = ':'; tp++; tp[0] = '0'; tp++; } } } if (have_v6_break && in_v6_break) { tp[0] = ':'; tp++; } tp[0] = '\0'; add_assoc_string(*subarray, "ipv6", name, 1); if (cp < p + dlen) { n = dn_expand(answer->qb2, end, cp, name, (sizeof name) - 2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "chain", name, 1); } break; case DNS_T_SRV: CHECKCP(3*2); add_assoc_string(*subarray, "type", "SRV", 1); GETSHORT(n, cp); add_assoc_long(*subarray, "pri", n); GETSHORT(n, cp); add_assoc_long(*subarray, "weight", n); GETSHORT(n, cp); add_assoc_long(*subarray, "port", n); n = dn_expand(answer->qb2, end, cp, name, (sizeof name) - 2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "target", name, 1); break; case DNS_T_NAPTR: CHECKCP(2*2); add_assoc_string(*subarray, "type", "NAPTR", 1); GETSHORT(n, cp); add_assoc_long(*subarray, "order", n); GETSHORT(n, cp); add_assoc_long(*subarray, "pref", n); CHECKCP(1); n = (cp[0] & 0xFF); cp++; CHECKCP(n); add_assoc_stringl(*subarray, "flags", (char*)cp, n, 1); cp += n; CHECKCP(1); n = (cp[0] & 0xFF); cp++; CHECKCP(n); add_assoc_stringl(*subarray, "services", (char*)cp, n, 1); cp += n; CHECKCP(1); n = (cp[0] & 0xFF); cp++; CHECKCP(n); add_assoc_stringl(*subarray, "regex", (char*)cp, n, 1); cp += n; n = dn_expand(answer->qb2, end, cp, name, (sizeof name) - 2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "replacement", name, 1); break; default: zval_ptr_dtor(subarray); *subarray = NULL; cp += dlen; break; } return cp; } /* }}} */ /* {{{ proto array|false dns_get_record(string hostname [, int type[, array authns, array addtl]]) Get any Resource Record corresponding to a given Internet host name */ PHP_FUNCTION(dns_get_record) { char *hostname; int hostname_len; long type_param = PHP_DNS_ANY; zval *authns = NULL, *addtl = NULL; int type_to_fetch; #if defined(HAVE_DNS_SEARCH) struct sockaddr_storage from; uint32_t fromsize = sizeof(from); dns_handle_t handle; #elif defined(HAVE_RES_NSEARCH) struct __res_state state; struct __res_state *handle = &state; #endif HEADER *hp; querybuf answer; u_char *cp = NULL, *end = NULL; int n, qd, an, ns = 0, ar = 0; int type, first_query = 1, store_results = 1; zend_bool raw = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|lz!z!b", &hostname, &hostname_len, &type_param, &authns, &addtl, &raw) == FAILURE) { return; } if (authns) { zval_dtor(authns); array_init(authns); } if (addtl) { zval_dtor(addtl); array_init(addtl); } if (!raw) { if ((type_param & ~PHP_DNS_ALL) && (type_param != PHP_DNS_ANY)) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Type '%ld' not supported", type_param); RETURN_FALSE; } } else { if ((type_param < 1) || (type_param > 0xFFFF)) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Numeric DNS record type must be between 1 and 65535, '%ld' given", type_param); RETURN_FALSE; } } /* Initialize the return array */ array_init(return_value); /* - We emulate an or'ed type mask by querying type by type. (Steps 0 - NUMTYPES-1 ) * If additional info is wanted we check again with DNS_T_ANY (step NUMTYPES / NUMTYPES+1 ) * store_results is used to skip storing the results retrieved in step * NUMTYPES+1 when results were already fetched. * - In case of PHP_DNS_ANY we use the directly fetch DNS_T_ANY. (step NUMTYPES+1 ) * - In case of raw mode, we query only the requestd type instead of looping type by type * before going with the additional info stuff. */ if (raw) { type = -1; } else if (type_param == PHP_DNS_ANY) { type = PHP_DNS_NUM_TYPES + 1; } else { type = 0; } for ( ; type < (addtl ? (PHP_DNS_NUM_TYPES + 2) : PHP_DNS_NUM_TYPES) || first_query; type++ ) { first_query = 0; switch (type) { case -1: /* raw */ type_to_fetch = type_param; /* skip over the rest and go directly to additional records */ type = PHP_DNS_NUM_TYPES - 1; break; case 0: type_to_fetch = type_param&PHP_DNS_A ? DNS_T_A : 0; break; case 1: type_to_fetch = type_param&PHP_DNS_NS ? DNS_T_NS : 0; break; case 2: type_to_fetch = type_param&PHP_DNS_CNAME ? DNS_T_CNAME : 0; break; case 3: type_to_fetch = type_param&PHP_DNS_SOA ? DNS_T_SOA : 0; break; case 4: type_to_fetch = type_param&PHP_DNS_PTR ? DNS_T_PTR : 0; break; case 5: type_to_fetch = type_param&PHP_DNS_HINFO ? DNS_T_HINFO : 0; break; case 6: type_to_fetch = type_param&PHP_DNS_MX ? DNS_T_MX : 0; break; case 7: type_to_fetch = type_param&PHP_DNS_TXT ? DNS_T_TXT : 0; break; case 8: type_to_fetch = type_param&PHP_DNS_AAAA ? DNS_T_AAAA : 0; break; case 9: type_to_fetch = type_param&PHP_DNS_SRV ? DNS_T_SRV : 0; break; case 10: type_to_fetch = type_param&PHP_DNS_NAPTR ? DNS_T_NAPTR : 0; break; case 11: type_to_fetch = type_param&PHP_DNS_A6 ? DNS_T_A6 : 0; break; case PHP_DNS_NUM_TYPES: store_results = 0; continue; default: case (PHP_DNS_NUM_TYPES + 1): type_to_fetch = DNS_T_ANY; break; } if (type_to_fetch) { #if defined(HAVE_DNS_SEARCH) handle = dns_open(NULL); if (handle == NULL) { zval_dtor(return_value); RETURN_FALSE; } #elif defined(HAVE_RES_NSEARCH) memset(&state, 0, sizeof(state)); if (res_ninit(handle)) { zval_dtor(return_value); RETURN_FALSE; } #else res_init(); #endif n = php_dns_search(handle, hostname, C_IN, type_to_fetch, answer.qb2, sizeof answer); if (n < 0) { php_dns_free_handle(handle); continue; } cp = answer.qb2 + HFIXEDSZ; end = answer.qb2 + n; hp = (HEADER *)&answer; qd = ntohs(hp->qdcount); an = ntohs(hp->ancount); ns = ntohs(hp->nscount); ar = ntohs(hp->arcount); /* Skip QD entries, they're only used by dn_expand later on */ while (qd-- > 0) { n = dn_skipname(cp, end); if (n < 0) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to parse DNS data received"); zval_dtor(return_value); php_dns_free_handle(handle); RETURN_FALSE; } cp += n + QFIXEDSZ; } /* YAY! Our real answers! */ while (an-- && cp && cp < end) { zval *retval; cp = php_parserr(cp, end, &answer, type_to_fetch, store_results, raw, &retval); if (retval != NULL && store_results) { add_next_index_zval(return_value, retval); } } if (authns || addtl) { /* List of Authoritative Name Servers * Process when only requesting addtl so that we can skip through the section */ while (ns-- > 0 && cp && cp < end) { zval *retval = NULL; cp = php_parserr(cp, end, &answer, DNS_T_ANY, authns != NULL, raw, &retval); if (retval != NULL) { add_next_index_zval(authns, retval); } } } if (addtl) { /* Additional records associated with authoritative name servers */ while (ar-- > 0 && cp && cp < end) { zval *retval = NULL; cp = php_parserr(cp, end, &answer, DNS_T_ANY, 1, raw, &retval); if (retval != NULL) { add_next_index_zval(addtl, retval); } } } php_dns_free_handle(handle); } } } /* }}} */ /* {{{ proto bool dns_get_mx(string hostname, array mxhosts [, array weight]) Get MX records corresponding to a given Internet host name */ PHP_FUNCTION(dns_get_mx) { char *hostname; int hostname_len; zval *mx_list, *weight_list = NULL; int count, qdc; u_short type, weight; u_char ans[MAXPACKET]; char buf[MAXHOSTNAMELEN]; HEADER *hp; u_char *cp, *end; int i; #if defined(HAVE_DNS_SEARCH) struct sockaddr_storage from; uint32_t fromsize = sizeof(from); dns_handle_t handle; #elif defined(HAVE_RES_NSEARCH) struct __res_state state; struct __res_state *handle = &state; #endif if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sz|z", &hostname, &hostname_len, &mx_list, &weight_list) == FAILURE) { return; } zval_dtor(mx_list); array_init(mx_list); if (weight_list) { zval_dtor(weight_list); array_init(weight_list); } #if defined(HAVE_DNS_SEARCH) handle = dns_open(NULL); if (handle == NULL) { RETURN_FALSE; } #elif defined(HAVE_RES_NSEARCH) memset(&state, 0, sizeof(state)); if (res_ninit(handle)) { RETURN_FALSE; } #else res_init(); #endif i = php_dns_search(handle, hostname, C_IN, DNS_T_MX, (u_char *)&ans, sizeof(ans)); if (i < 0) { RETURN_FALSE; } if (i > (int)sizeof(ans)) { i = sizeof(ans); } hp = (HEADER *)&ans; cp = (u_char *)&ans + HFIXEDSZ; end = (u_char *)&ans +i; for (qdc = ntohs((unsigned short)hp->qdcount); qdc--; cp += i + QFIXEDSZ) { if ((i = dn_skipname(cp, end)) < 0 ) { php_dns_free_handle(handle); RETURN_FALSE; } } count = ntohs((unsigned short)hp->ancount); while (--count >= 0 && cp < end) { if ((i = dn_skipname(cp, end)) < 0 ) { php_dns_free_handle(handle); RETURN_FALSE; } cp += i; GETSHORT(type, cp); cp += INT16SZ + INT32SZ; GETSHORT(i, cp); if (type != DNS_T_MX) { cp += i; continue; } GETSHORT(weight, cp); if ((i = dn_expand(ans, end, cp, buf, sizeof(buf)-1)) < 0) { php_dns_free_handle(handle); RETURN_FALSE; } cp += i; add_next_index_string(mx_list, buf, 1); if (weight_list) { add_next_index_long(weight_list, weight); } } php_dns_free_handle(handle); RETURN_TRUE; } /* }}} */ #endif /* HAVE_FULL_DNS_FUNCS */ #endif /* !defined(PHP_WIN32) && (HAVE_DNS_SEARCH_FUNC && !(defined(__BEOS__) || defined(NETWARE))) */ #if HAVE_FULL_DNS_FUNCS || defined(PHP_WIN32) PHP_MINIT_FUNCTION(dns) { REGISTER_LONG_CONSTANT("DNS_A", PHP_DNS_A, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_NS", PHP_DNS_NS, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_CNAME", PHP_DNS_CNAME, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_SOA", PHP_DNS_SOA, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_PTR", PHP_DNS_PTR, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_HINFO", PHP_DNS_HINFO, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_MX", PHP_DNS_MX, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_TXT", PHP_DNS_TXT, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_SRV", PHP_DNS_SRV, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_NAPTR", PHP_DNS_NAPTR, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_AAAA", PHP_DNS_AAAA, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_A6", PHP_DNS_A6, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_ANY", PHP_DNS_ANY, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_ALL", PHP_DNS_ALL, CONST_CS | CONST_PERSISTENT); return SUCCESS; } #endif /* HAVE_FULL_DNS_FUNCS */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: sw=4 ts=4 fdm=marker * vim<600: sw=4 ts=4 */
PHP_FUNCTION(dns_get_record) { char *hostname; int hostname_len; long type_param = PHP_DNS_ANY; zval *authns = NULL, *addtl = NULL; int type_to_fetch; #if defined(HAVE_DNS_SEARCH) struct sockaddr_storage from; uint32_t fromsize = sizeof(from); dns_handle_t handle; #elif defined(HAVE_RES_NSEARCH) struct __res_state state; struct __res_state *handle = &state; #endif HEADER *hp; querybuf answer; u_char *cp = NULL, *end = NULL; int n, qd, an, ns = 0, ar = 0; int type, first_query = 1, store_results = 1; zend_bool raw = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|lz!z!b", &hostname, &hostname_len, &type_param, &authns, &addtl, &raw) == FAILURE) { return; } if (authns) { zval_dtor(authns); array_init(authns); } if (addtl) { zval_dtor(addtl); array_init(addtl); } if (!raw) { if ((type_param & ~PHP_DNS_ALL) && (type_param != PHP_DNS_ANY)) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Type '%ld' not supported", type_param); RETURN_FALSE; } } else { if ((type_param < 1) || (type_param > 0xFFFF)) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Numeric DNS record type must be between 1 and 65535, '%ld' given", type_param); RETURN_FALSE; } } /* Initialize the return array */ array_init(return_value); /* - We emulate an or'ed type mask by querying type by type. (Steps 0 - NUMTYPES-1 ) * If additional info is wanted we check again with DNS_T_ANY (step NUMTYPES / NUMTYPES+1 ) * store_results is used to skip storing the results retrieved in step * NUMTYPES+1 when results were already fetched. * - In case of PHP_DNS_ANY we use the directly fetch DNS_T_ANY. (step NUMTYPES+1 ) * - In case of raw mode, we query only the requestd type instead of looping type by type * before going with the additional info stuff. */ if (raw) { type = -1; } else if (type_param == PHP_DNS_ANY) { type = PHP_DNS_NUM_TYPES + 1; } else { type = 0; } for ( ; type < (addtl ? (PHP_DNS_NUM_TYPES + 2) : PHP_DNS_NUM_TYPES) || first_query; type++ ) { first_query = 0; switch (type) { case -1: /* raw */ type_to_fetch = type_param; /* skip over the rest and go directly to additional records */ type = PHP_DNS_NUM_TYPES - 1; break; case 0: type_to_fetch = type_param&PHP_DNS_A ? DNS_T_A : 0; break; case 1: type_to_fetch = type_param&PHP_DNS_NS ? DNS_T_NS : 0; break; case 2: type_to_fetch = type_param&PHP_DNS_CNAME ? DNS_T_CNAME : 0; break; case 3: type_to_fetch = type_param&PHP_DNS_SOA ? DNS_T_SOA : 0; break; case 4: type_to_fetch = type_param&PHP_DNS_PTR ? DNS_T_PTR : 0; break; case 5: type_to_fetch = type_param&PHP_DNS_HINFO ? DNS_T_HINFO : 0; break; case 6: type_to_fetch = type_param&PHP_DNS_MX ? DNS_T_MX : 0; break; case 7: type_to_fetch = type_param&PHP_DNS_TXT ? DNS_T_TXT : 0; break; case 8: type_to_fetch = type_param&PHP_DNS_AAAA ? DNS_T_AAAA : 0; break; case 9: type_to_fetch = type_param&PHP_DNS_SRV ? DNS_T_SRV : 0; break; case 10: type_to_fetch = type_param&PHP_DNS_NAPTR ? DNS_T_NAPTR : 0; break; case 11: type_to_fetch = type_param&PHP_DNS_A6 ? DNS_T_A6 : 0; break; case PHP_DNS_NUM_TYPES: store_results = 0; continue; default: case (PHP_DNS_NUM_TYPES + 1): type_to_fetch = DNS_T_ANY; break; } if (type_to_fetch) { #if defined(HAVE_DNS_SEARCH) handle = dns_open(NULL); if (handle == NULL) { zval_dtor(return_value); RETURN_FALSE; } #elif defined(HAVE_RES_NSEARCH) memset(&state, 0, sizeof(state)); if (res_ninit(handle)) { zval_dtor(return_value); RETURN_FALSE; } #else res_init(); #endif n = php_dns_search(handle, hostname, C_IN, type_to_fetch, answer.qb2, sizeof answer); if (n < 0) { php_dns_free_handle(handle); continue; } cp = answer.qb2 + HFIXEDSZ; end = answer.qb2 + n; hp = (HEADER *)&answer; qd = ntohs(hp->qdcount); an = ntohs(hp->ancount); ns = ntohs(hp->nscount); ar = ntohs(hp->arcount); /* Skip QD entries, they're only used by dn_expand later on */ while (qd-- > 0) { n = dn_skipname(cp, end); if (n < 0) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to parse DNS data received"); zval_dtor(return_value); php_dns_free_handle(handle); RETURN_FALSE; } cp += n + QFIXEDSZ; } /* YAY! Our real answers! */ while (an-- && cp && cp < end) { zval *retval; cp = php_parserr(cp, &answer, type_to_fetch, store_results, raw, &retval); if (retval != NULL && store_results) { add_next_index_zval(return_value, retval); } } if (authns || addtl) { /* List of Authoritative Name Servers * Process when only requesting addtl so that we can skip through the section */ while (ns-- > 0 && cp && cp < end) { zval *retval = NULL; cp = php_parserr(cp, &answer, DNS_T_ANY, authns != NULL, raw, &retval); if (retval != NULL) { add_next_index_zval(authns, retval); } } } if (addtl) { /* Additional records associated with authoritative name servers */ while (ar-- > 0 && cp && cp < end) { zval *retval = NULL; cp = php_parserr(cp, &answer, DNS_T_ANY, 1, raw, &retval); if (retval != NULL) { add_next_index_zval(addtl, retval); } } } php_dns_free_handle(handle); } } }
PHP_FUNCTION(dns_get_record) { char *hostname; int hostname_len; long type_param = PHP_DNS_ANY; zval *authns = NULL, *addtl = NULL; int type_to_fetch; #if defined(HAVE_DNS_SEARCH) struct sockaddr_storage from; uint32_t fromsize = sizeof(from); dns_handle_t handle; #elif defined(HAVE_RES_NSEARCH) struct __res_state state; struct __res_state *handle = &state; #endif HEADER *hp; querybuf answer; u_char *cp = NULL, *end = NULL; int n, qd, an, ns = 0, ar = 0; int type, first_query = 1, store_results = 1; zend_bool raw = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|lz!z!b", &hostname, &hostname_len, &type_param, &authns, &addtl, &raw) == FAILURE) { return; } if (authns) { zval_dtor(authns); array_init(authns); } if (addtl) { zval_dtor(addtl); array_init(addtl); } if (!raw) { if ((type_param & ~PHP_DNS_ALL) && (type_param != PHP_DNS_ANY)) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Type '%ld' not supported", type_param); RETURN_FALSE; } } else { if ((type_param < 1) || (type_param > 0xFFFF)) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Numeric DNS record type must be between 1 and 65535, '%ld' given", type_param); RETURN_FALSE; } } /* Initialize the return array */ array_init(return_value); /* - We emulate an or'ed type mask by querying type by type. (Steps 0 - NUMTYPES-1 ) * If additional info is wanted we check again with DNS_T_ANY (step NUMTYPES / NUMTYPES+1 ) * store_results is used to skip storing the results retrieved in step * NUMTYPES+1 when results were already fetched. * - In case of PHP_DNS_ANY we use the directly fetch DNS_T_ANY. (step NUMTYPES+1 ) * - In case of raw mode, we query only the requestd type instead of looping type by type * before going with the additional info stuff. */ if (raw) { type = -1; } else if (type_param == PHP_DNS_ANY) { type = PHP_DNS_NUM_TYPES + 1; } else { type = 0; } for ( ; type < (addtl ? (PHP_DNS_NUM_TYPES + 2) : PHP_DNS_NUM_TYPES) || first_query; type++ ) { first_query = 0; switch (type) { case -1: /* raw */ type_to_fetch = type_param; /* skip over the rest and go directly to additional records */ type = PHP_DNS_NUM_TYPES - 1; break; case 0: type_to_fetch = type_param&PHP_DNS_A ? DNS_T_A : 0; break; case 1: type_to_fetch = type_param&PHP_DNS_NS ? DNS_T_NS : 0; break; case 2: type_to_fetch = type_param&PHP_DNS_CNAME ? DNS_T_CNAME : 0; break; case 3: type_to_fetch = type_param&PHP_DNS_SOA ? DNS_T_SOA : 0; break; case 4: type_to_fetch = type_param&PHP_DNS_PTR ? DNS_T_PTR : 0; break; case 5: type_to_fetch = type_param&PHP_DNS_HINFO ? DNS_T_HINFO : 0; break; case 6: type_to_fetch = type_param&PHP_DNS_MX ? DNS_T_MX : 0; break; case 7: type_to_fetch = type_param&PHP_DNS_TXT ? DNS_T_TXT : 0; break; case 8: type_to_fetch = type_param&PHP_DNS_AAAA ? DNS_T_AAAA : 0; break; case 9: type_to_fetch = type_param&PHP_DNS_SRV ? DNS_T_SRV : 0; break; case 10: type_to_fetch = type_param&PHP_DNS_NAPTR ? DNS_T_NAPTR : 0; break; case 11: type_to_fetch = type_param&PHP_DNS_A6 ? DNS_T_A6 : 0; break; case PHP_DNS_NUM_TYPES: store_results = 0; continue; default: case (PHP_DNS_NUM_TYPES + 1): type_to_fetch = DNS_T_ANY; break; } if (type_to_fetch) { #if defined(HAVE_DNS_SEARCH) handle = dns_open(NULL); if (handle == NULL) { zval_dtor(return_value); RETURN_FALSE; } #elif defined(HAVE_RES_NSEARCH) memset(&state, 0, sizeof(state)); if (res_ninit(handle)) { zval_dtor(return_value); RETURN_FALSE; } #else res_init(); #endif n = php_dns_search(handle, hostname, C_IN, type_to_fetch, answer.qb2, sizeof answer); if (n < 0) { php_dns_free_handle(handle); continue; } cp = answer.qb2 + HFIXEDSZ; end = answer.qb2 + n; hp = (HEADER *)&answer; qd = ntohs(hp->qdcount); an = ntohs(hp->ancount); ns = ntohs(hp->nscount); ar = ntohs(hp->arcount); /* Skip QD entries, they're only used by dn_expand later on */ while (qd-- > 0) { n = dn_skipname(cp, end); if (n < 0) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to parse DNS data received"); zval_dtor(return_value); php_dns_free_handle(handle); RETURN_FALSE; } cp += n + QFIXEDSZ; } /* YAY! Our real answers! */ while (an-- && cp && cp < end) { zval *retval; cp = php_parserr(cp, end, &answer, type_to_fetch, store_results, raw, &retval); if (retval != NULL && store_results) { add_next_index_zval(return_value, retval); } } if (authns || addtl) { /* List of Authoritative Name Servers * Process when only requesting addtl so that we can skip through the section */ while (ns-- > 0 && cp && cp < end) { zval *retval = NULL; cp = php_parserr(cp, end, &answer, DNS_T_ANY, authns != NULL, raw, &retval); if (retval != NULL) { add_next_index_zval(authns, retval); } } } if (addtl) { /* Additional records associated with authoritative name servers */ while (ar-- > 0 && cp && cp < end) { zval *retval = NULL; cp = php_parserr(cp, end, &answer, DNS_T_ANY, 1, raw, &retval); if (retval != NULL) { add_next_index_zval(addtl, retval); } } } php_dns_free_handle(handle); } } }
{'added': [(415, '#define CHECKCP(n) do { \\'), (416, '\tif (cp + n > end) { \\'), (417, '\t\treturn NULL; \\'), (418, '\t} \\'), (419, '} while (0)'), (420, ''), (422, 'static u_char *php_parserr(u_char *cp, u_char *end, querybuf *answer, int type_to_fetch, int store, int raw, zval **subarray)'), (434, '\tn = dn_expand(answer->qb2, end, cp, name, sizeof(name) - 2);'), (440, '\tCHECKCP(10);'), (445, '\tCHECKCP(dlen);'), (472, '\t\t\tCHECKCP(4);'), (479, '\t\t\tCHECKCP(2);'), (498, '\t\t\tn = dn_expand(answer->qb2, end, cp, name, (sizeof name) - 2);'), (508, '\t\t\tCHECKCP(1);'), (511, '\t\t\tCHECKCP(n);'), (514, '\t\t\tCHECKCP(1);'), (517, '\t\t\tCHECKCP(n);'), (523, '\t\t\t\tint l1 = 0, l2 = 0;'), (532, '\t\t\t\twhile (l1 < dlen) {'), (533, '\t\t\t\t\tn = cp[l1];'), (534, '\t\t\t\t\tif ((l1 + n) >= dlen) {'), (536, '\t\t\t\t\t\tn = dlen - (l1 + 1);'), (537, '\t\t\t\t\t}'), (538, '\t\t\t\t\tif (n) {'), (539, '\t\t\t\t\t\tmemcpy(tp + l2 , cp + l1 + 1, n);'), (540, '\t\t\t\t\t\tadd_next_index_stringl(entries, cp + l1 + 1, n, 1);'), (542, '\t\t\t\t\tl1 = l1 + n + 1;'), (543, '\t\t\t\t\tl2 = l2 + n;'), (545, "\t\t\t\ttp[l2] = '\\0';"), (548, '\t\t\t\tadd_assoc_stringl(*subarray, "txt", tp, l2, 0);'), (554, '\t\t\tn = dn_expand(answer->qb2, end, cp, name, (sizeof name) -2);'), (560, '\t\t\tn = dn_expand(answer->qb2, end, cp, name, (sizeof name) -2);'), (566, '\t\t\tCHECKCP(5*4);'), (580, '\t\t\tCHECKCP(8*2);'), (615, '\t\t\tCHECKCP(1);'), (651, '\t\t\t\tCHECKCP(2);'), (681, '\t\t\t\tn = dn_expand(answer->qb2, end, cp, name, (sizeof name) - 2);'), (690, '\t\t\tCHECKCP(3*2);'), (698, '\t\t\tn = dn_expand(answer->qb2, end, cp, name, (sizeof name) - 2);'), (706, '\t\t\tCHECKCP(2*2);'), (712, ''), (713, '\t\t\tCHECKCP(1);'), (715, '\t\t\tcp++;'), (716, '\t\t\tCHECKCP(n);'), (717, '\t\t\tadd_assoc_stringl(*subarray, "flags", (char*)cp, n, 1);'), (719, ''), (720, '\t\t\tCHECKCP(1);'), (722, '\t\t\tcp++;'), (723, '\t\t\tCHECKCP(n);'), (724, '\t\t\tadd_assoc_stringl(*subarray, "services", (char*)cp, n, 1);'), (726, ''), (727, '\t\t\tCHECKCP(1);'), (729, '\t\t\tcp++;'), (730, '\t\t\tCHECKCP(n);'), (731, '\t\t\tadd_assoc_stringl(*subarray, "regex", (char*)cp, n, 1);'), (733, ''), (734, '\t\t\tn = dn_expand(answer->qb2, end, cp, name, (sizeof name) - 2);'), (927, '\t\t\t\tcp = php_parserr(cp, end, &answer, type_to_fetch, store_results, raw, &retval);'), (940, '\t\t\t\t\tcp = php_parserr(cp, end, &answer, DNS_T_ANY, authns != NULL, raw, &retval);'), (952, '\t\t\t\t\tcp = php_parserr(cp, end, &answer, DNS_T_ANY, 1, raw, &retval);')], 'deleted': [(416, 'static u_char *php_parserr(u_char *cp, querybuf *answer, int type_to_fetch, int store, int raw, zval **subarray)'), (428, '\tn = dn_expand(answer->qb2, answer->qb2+65536, cp, name, sizeof(name) - 2);'), (488, '\t\t\tn = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2);'), (509, '\t\t\t\tint ll = 0;'), (518, '\t\t\t\twhile (ll < dlen) {'), (519, '\t\t\t\t\tn = cp[ll];'), (520, '\t\t\t\t\tif ((ll + n) >= dlen) {'), (522, '\t\t\t\t\t\tn = dlen - (ll + 1);'), (524, '\t\t\t\t\tmemcpy(tp + ll , cp + ll + 1, n);'), (525, '\t\t\t\t\tadd_next_index_stringl(entries, cp + ll + 1, n, 1);'), (526, '\t\t\t\t\tll = ll + n + 1;'), (528, "\t\t\t\ttp[dlen] = '\\0';"), (531, '\t\t\t\tadd_assoc_stringl(*subarray, "txt", tp, (dlen>0)?dlen - 1:0, 0);'), (537, '\t\t\tn = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) -2);'), (543, '\t\t\tn = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) -2);'), (660, '\t\t\t\tn = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2);'), (676, '\t\t\tn = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2);'), (690, '\t\t\tadd_assoc_stringl(*subarray, "flags", (char*)++cp, n, 1);'), (693, '\t\t\tadd_assoc_stringl(*subarray, "services", (char*)++cp, n, 1);'), (696, '\t\t\tadd_assoc_stringl(*subarray, "regex", (char*)++cp, n, 1);'), (698, '\t\t\tn = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2);'), (891, '\t\t\t\tcp = php_parserr(cp, &answer, type_to_fetch, store_results, raw, &retval);'), (904, '\t\t\t\t\tcp = php_parserr(cp, &answer, DNS_T_ANY, authns != NULL, raw, &retval);'), (916, '\t\t\t\t\tcp = php_parserr(cp, &answer, DNS_T_ANY, 1, raw, &retval);')]}
60
24
742
4,728
https://github.com/php/php-src
CVE-2014-3597
['CWE-119']
dns.c
php_parserr
/* +----------------------------------------------------------------------+ | PHP Version 5 | +----------------------------------------------------------------------+ | Copyright (c) 1997-2014 The PHP Group | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Authors: The typical suspects | | Pollita <pollita@php.net> | | Marcus Boerger <helly@php.net> | +----------------------------------------------------------------------+ */ /* $Id$ */ /* {{{ includes */ #include "php.h" #include "php_network.h" #if HAVE_SYS_SOCKET_H #include <sys/socket.h> #endif #ifdef PHP_WIN32 # include "win32/inet.h" # include <winsock2.h> # include <windows.h> # include <Ws2tcpip.h> #else /* This holds good for NetWare too, both for Winsock and Berkeley sockets */ #include <netinet/in.h> #if HAVE_ARPA_INET_H #include <arpa/inet.h> #endif #include <netdb.h> #ifdef _OSD_POSIX #undef STATUS #undef T_UNSPEC #endif #if HAVE_ARPA_NAMESER_H #ifdef DARWIN # define BIND_8_COMPAT 1 #endif #include <arpa/nameser.h> #endif #if HAVE_RESOLV_H #include <resolv.h> #endif #ifdef HAVE_DNS_H #include <dns.h> #endif #endif /* Borrowed from SYS/SOCKET.H */ #if defined(NETWARE) && defined(USE_WINSOCK) #define AF_INET 2 /* internetwork: UDP, TCP, etc. */ #endif #ifndef MAXHOSTNAMELEN #define MAXHOSTNAMELEN 255 #endif /* For the local hostname obtained via gethostname which is different from the dns-related MAXHOSTNAMELEN constant above */ #ifndef HOST_NAME_MAX #define HOST_NAME_MAX 255 #endif #include "php_dns.h" /* type compat */ #ifndef DNS_T_A #define DNS_T_A 1 #endif #ifndef DNS_T_NS #define DNS_T_NS 2 #endif #ifndef DNS_T_CNAME #define DNS_T_CNAME 5 #endif #ifndef DNS_T_SOA #define DNS_T_SOA 6 #endif #ifndef DNS_T_PTR #define DNS_T_PTR 12 #endif #ifndef DNS_T_HINFO #define DNS_T_HINFO 13 #endif #ifndef DNS_T_MINFO #define DNS_T_MINFO 14 #endif #ifndef DNS_T_MX #define DNS_T_MX 15 #endif #ifndef DNS_T_TXT #define DNS_T_TXT 16 #endif #ifndef DNS_T_AAAA #define DNS_T_AAAA 28 #endif #ifndef DNS_T_SRV #define DNS_T_SRV 33 #endif #ifndef DNS_T_NAPTR #define DNS_T_NAPTR 35 #endif #ifndef DNS_T_A6 #define DNS_T_A6 38 #endif #ifndef DNS_T_ANY #define DNS_T_ANY 255 #endif /* }}} */ static char *php_gethostbyaddr(char *ip); static char *php_gethostbyname(char *name); #ifdef HAVE_GETHOSTNAME /* {{{ proto string gethostname() Get the host name of the current machine */ PHP_FUNCTION(gethostname) { char buf[HOST_NAME_MAX]; if (zend_parse_parameters_none() == FAILURE) { return; } if (gethostname(buf, sizeof(buf) - 1)) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "unable to fetch host [%d]: %s", errno, strerror(errno)); RETURN_FALSE; } RETURN_STRING(buf, 1); } /* }}} */ #endif /* TODO: Reimplement the gethostby* functions using the new winxp+ API, in dns_win32.c, then we can have a dns.c, dns_unix.c and dns_win32.c instead of a messy dns.c full of #ifdef */ /* {{{ proto string gethostbyaddr(string ip_address) Get the Internet host name corresponding to a given IP address */ PHP_FUNCTION(gethostbyaddr) { char *addr; int addr_len; char *hostname; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &addr, &addr_len) == FAILURE) { return; } hostname = php_gethostbyaddr(addr); if (hostname == NULL) { #if HAVE_IPV6 && HAVE_INET_PTON php_error_docref(NULL TSRMLS_CC, E_WARNING, "Address is not a valid IPv4 or IPv6 address"); #else php_error_docref(NULL TSRMLS_CC, E_WARNING, "Address is not in a.b.c.d form"); #endif RETVAL_FALSE; } else { RETVAL_STRING(hostname, 0); } } /* }}} */ /* {{{ php_gethostbyaddr */ static char *php_gethostbyaddr(char *ip) { #if HAVE_IPV6 && HAVE_INET_PTON struct in6_addr addr6; #endif struct in_addr addr; struct hostent *hp; #if HAVE_IPV6 && HAVE_INET_PTON if (inet_pton(AF_INET6, ip, &addr6)) { hp = gethostbyaddr((char *) &addr6, sizeof(addr6), AF_INET6); } else if (inet_pton(AF_INET, ip, &addr)) { hp = gethostbyaddr((char *) &addr, sizeof(addr), AF_INET); } else { return NULL; } #else addr.s_addr = inet_addr(ip); if (addr.s_addr == -1) { return NULL; } hp = gethostbyaddr((char *) &addr, sizeof(addr), AF_INET); #endif if (!hp || hp->h_name == NULL || hp->h_name[0] == '\0') { return estrdup(ip); } return estrdup(hp->h_name); } /* }}} */ /* {{{ proto string gethostbyname(string hostname) Get the IP address corresponding to a given Internet host name */ PHP_FUNCTION(gethostbyname) { char *hostname; int hostname_len; char *addr; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &hostname, &hostname_len) == FAILURE) { return; } addr = php_gethostbyname(hostname); RETVAL_STRING(addr, 0); } /* }}} */ /* {{{ proto array gethostbynamel(string hostname) Return a list of IP addresses that a given hostname resolves to. */ PHP_FUNCTION(gethostbynamel) { char *hostname; int hostname_len; struct hostent *hp; struct in_addr in; int i; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &hostname, &hostname_len) == FAILURE) { return; } hp = gethostbyname(hostname); if (hp == NULL || hp->h_addr_list == NULL) { RETURN_FALSE; } array_init(return_value); for (i = 0 ; hp->h_addr_list[i] != 0 ; i++) { in = *(struct in_addr *) hp->h_addr_list[i]; add_next_index_string(return_value, inet_ntoa(in), 1); } } /* }}} */ /* {{{ php_gethostbyname */ static char *php_gethostbyname(char *name) { struct hostent *hp; struct in_addr in; hp = gethostbyname(name); if (!hp || !*(hp->h_addr_list)) { return estrdup(name); } memcpy(&in.s_addr, *(hp->h_addr_list), sizeof(in.s_addr)); return estrdup(inet_ntoa(in)); } /* }}} */ #if HAVE_FULL_DNS_FUNCS || defined(PHP_WIN32) # define PHP_DNS_NUM_TYPES 12 /* Number of DNS Types Supported by PHP currently */ # define PHP_DNS_A 0x00000001 # define PHP_DNS_NS 0x00000002 # define PHP_DNS_CNAME 0x00000010 # define PHP_DNS_SOA 0x00000020 # define PHP_DNS_PTR 0x00000800 # define PHP_DNS_HINFO 0x00001000 # define PHP_DNS_MX 0x00004000 # define PHP_DNS_TXT 0x00008000 # define PHP_DNS_A6 0x01000000 # define PHP_DNS_SRV 0x02000000 # define PHP_DNS_NAPTR 0x04000000 # define PHP_DNS_AAAA 0x08000000 # define PHP_DNS_ANY 0x10000000 # define PHP_DNS_ALL (PHP_DNS_A|PHP_DNS_NS|PHP_DNS_CNAME|PHP_DNS_SOA|PHP_DNS_PTR|PHP_DNS_HINFO|PHP_DNS_MX|PHP_DNS_TXT|PHP_DNS_A6|PHP_DNS_SRV|PHP_DNS_NAPTR|PHP_DNS_AAAA) #endif /* HAVE_FULL_DNS_FUNCS || defined(PHP_WIN32) */ /* Note: These functions are defined in ext/standard/dns_win32.c for Windows! */ #if !defined(PHP_WIN32) && (HAVE_DNS_SEARCH_FUNC && !(defined(__BEOS__) || defined(NETWARE))) #ifndef HFIXEDSZ #define HFIXEDSZ 12 /* fixed data in header <arpa/nameser.h> */ #endif /* HFIXEDSZ */ #ifndef QFIXEDSZ #define QFIXEDSZ 4 /* fixed data in query <arpa/nameser.h> */ #endif /* QFIXEDSZ */ #undef MAXHOSTNAMELEN #define MAXHOSTNAMELEN 1024 #ifndef MAXRESOURCERECORDS #define MAXRESOURCERECORDS 64 #endif /* MAXRESOURCERECORDS */ typedef union { HEADER qb1; u_char qb2[65536]; } querybuf; /* just a hack to free resources allocated by glibc in __res_nsend() * See also: * res_thread_freeres() in glibc/resolv/res_init.c * __libc_res_nsend() in resolv/res_send.c * */ #if defined(__GLIBC__) && !defined(HAVE_DEPRECATED_DNS_FUNCS) #define php_dns_free_res(__res__) _php_dns_free_res(__res__) static void _php_dns_free_res(struct __res_state res) { /* {{{ */ int ns; for (ns = 0; ns < MAXNS; ns++) { if (res._u._ext.nsaddrs[ns] != NULL) { free (res._u._ext.nsaddrs[ns]); res._u._ext.nsaddrs[ns] = NULL; } } } /* }}} */ #else #define php_dns_free_res(__res__) #endif /* {{{ proto bool dns_check_record(string host [, string type]) Check DNS records corresponding to a given Internet host name or IP address */ PHP_FUNCTION(dns_check_record) { #ifndef MAXPACKET #define MAXPACKET 8192 /* max packet size used internally by BIND */ #endif u_char ans[MAXPACKET]; char *hostname, *rectype = NULL; int hostname_len, rectype_len = 0; int type = T_MX, i; #if defined(HAVE_DNS_SEARCH) struct sockaddr_storage from; uint32_t fromsize = sizeof(from); dns_handle_t handle; #elif defined(HAVE_RES_NSEARCH) struct __res_state state; struct __res_state *handle = &state; #endif if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|s", &hostname, &hostname_len, &rectype, &rectype_len) == FAILURE) { return; } if (hostname_len == 0) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Host cannot be empty"); RETURN_FALSE; } if (rectype) { if (!strcasecmp("A", rectype)) type = T_A; else if (!strcasecmp("NS", rectype)) type = DNS_T_NS; else if (!strcasecmp("MX", rectype)) type = DNS_T_MX; else if (!strcasecmp("PTR", rectype)) type = DNS_T_PTR; else if (!strcasecmp("ANY", rectype)) type = DNS_T_ANY; else if (!strcasecmp("SOA", rectype)) type = DNS_T_SOA; else if (!strcasecmp("TXT", rectype)) type = DNS_T_TXT; else if (!strcasecmp("CNAME", rectype)) type = DNS_T_CNAME; else if (!strcasecmp("AAAA", rectype)) type = DNS_T_AAAA; else if (!strcasecmp("SRV", rectype)) type = DNS_T_SRV; else if (!strcasecmp("NAPTR", rectype)) type = DNS_T_NAPTR; else if (!strcasecmp("A6", rectype)) type = DNS_T_A6; else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Type '%s' not supported", rectype); RETURN_FALSE; } } #if defined(HAVE_DNS_SEARCH) handle = dns_open(NULL); if (handle == NULL) { RETURN_FALSE; } #elif defined(HAVE_RES_NSEARCH) memset(&state, 0, sizeof(state)); if (res_ninit(handle)) { RETURN_FALSE; } #else res_init(); #endif RETVAL_TRUE; i = php_dns_search(handle, hostname, C_IN, type, ans, sizeof(ans)); if (i < 0) { RETVAL_FALSE; } php_dns_free_handle(handle); } /* }}} */ #if HAVE_FULL_DNS_FUNCS /* {{{ php_parserr */ static u_char *php_parserr(u_char *cp, querybuf *answer, int type_to_fetch, int store, int raw, zval **subarray) { u_short type, class, dlen; u_long ttl; long n, i; u_short s; u_char *tp, *p; char name[MAXHOSTNAMELEN]; int have_v6_break = 0, in_v6_break = 0; *subarray = NULL; n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, sizeof(name) - 2); if (n < 0) { return NULL; } cp += n; GETSHORT(type, cp); GETSHORT(class, cp); GETLONG(ttl, cp); GETSHORT(dlen, cp); if (type_to_fetch != T_ANY && type != type_to_fetch) { cp += dlen; return cp; } if (!store) { cp += dlen; return cp; } ALLOC_INIT_ZVAL(*subarray); array_init(*subarray); add_assoc_string(*subarray, "host", name, 1); add_assoc_string(*subarray, "class", "IN", 1); add_assoc_long(*subarray, "ttl", ttl); if (raw) { add_assoc_long(*subarray, "type", type); add_assoc_stringl(*subarray, "data", (char*) cp, (uint) dlen, 1); cp += dlen; return cp; } switch (type) { case DNS_T_A: add_assoc_string(*subarray, "type", "A", 1); snprintf(name, sizeof(name), "%d.%d.%d.%d", cp[0], cp[1], cp[2], cp[3]); add_assoc_string(*subarray, "ip", name, 1); cp += dlen; break; case DNS_T_MX: add_assoc_string(*subarray, "type", "MX", 1); GETSHORT(n, cp); add_assoc_long(*subarray, "pri", n); /* no break; */ case DNS_T_CNAME: if (type == DNS_T_CNAME) { add_assoc_string(*subarray, "type", "CNAME", 1); } /* no break; */ case DNS_T_NS: if (type == DNS_T_NS) { add_assoc_string(*subarray, "type", "NS", 1); } /* no break; */ case DNS_T_PTR: if (type == DNS_T_PTR) { add_assoc_string(*subarray, "type", "PTR", 1); } n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "target", name, 1); break; case DNS_T_HINFO: /* See RFC 1010 for values */ add_assoc_string(*subarray, "type", "HINFO", 1); n = *cp & 0xFF; cp++; add_assoc_stringl(*subarray, "cpu", (char*)cp, n, 1); cp += n; n = *cp & 0xFF; cp++; add_assoc_stringl(*subarray, "os", (char*)cp, n, 1); cp += n; break; case DNS_T_TXT: { int ll = 0; zval *entries = NULL; add_assoc_string(*subarray, "type", "TXT", 1); tp = emalloc(dlen + 1); MAKE_STD_ZVAL(entries); array_init(entries); while (ll < dlen) { n = cp[ll]; if ((ll + n) >= dlen) { // Invalid chunk length, truncate n = dlen - (ll + 1); } memcpy(tp + ll , cp + ll + 1, n); add_next_index_stringl(entries, cp + ll + 1, n, 1); ll = ll + n + 1; } tp[dlen] = '\0'; cp += dlen; add_assoc_stringl(*subarray, "txt", tp, (dlen>0)?dlen - 1:0, 0); add_assoc_zval(*subarray, "entries", entries); } break; case DNS_T_SOA: add_assoc_string(*subarray, "type", "SOA", 1); n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) -2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "mname", name, 1); n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) -2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "rname", name, 1); GETLONG(n, cp); add_assoc_long(*subarray, "serial", n); GETLONG(n, cp); add_assoc_long(*subarray, "refresh", n); GETLONG(n, cp); add_assoc_long(*subarray, "retry", n); GETLONG(n, cp); add_assoc_long(*subarray, "expire", n); GETLONG(n, cp); add_assoc_long(*subarray, "minimum-ttl", n); break; case DNS_T_AAAA: tp = (u_char*)name; for(i=0; i < 8; i++) { GETSHORT(s, cp); if (s != 0) { if (tp > (u_char *)name) { in_v6_break = 0; tp[0] = ':'; tp++; } tp += sprintf((char*)tp,"%x",s); } else { if (!have_v6_break) { have_v6_break = 1; in_v6_break = 1; tp[0] = ':'; tp++; } else if (!in_v6_break) { tp[0] = ':'; tp++; tp[0] = '0'; tp++; } } } if (have_v6_break && in_v6_break) { tp[0] = ':'; tp++; } tp[0] = '\0'; add_assoc_string(*subarray, "type", "AAAA", 1); add_assoc_string(*subarray, "ipv6", name, 1); break; case DNS_T_A6: p = cp; add_assoc_string(*subarray, "type", "A6", 1); n = ((int)cp[0]) & 0xFF; cp++; add_assoc_long(*subarray, "masklen", n); tp = (u_char*)name; if (n > 15) { have_v6_break = 1; in_v6_break = 1; tp[0] = ':'; tp++; } if (n % 16 > 8) { /* Partial short */ if (cp[0] != 0) { if (tp > (u_char *)name) { in_v6_break = 0; tp[0] = ':'; tp++; } sprintf((char*)tp, "%x", cp[0] & 0xFF); } else { if (!have_v6_break) { have_v6_break = 1; in_v6_break = 1; tp[0] = ':'; tp++; } else if (!in_v6_break) { tp[0] = ':'; tp++; tp[0] = '0'; tp++; } } cp++; } for (i = (n + 8) / 16; i < 8; i++) { GETSHORT(s, cp); if (s != 0) { if (tp > (u_char *)name) { in_v6_break = 0; tp[0] = ':'; tp++; } tp += sprintf((char*)tp,"%x",s); } else { if (!have_v6_break) { have_v6_break = 1; in_v6_break = 1; tp[0] = ':'; tp++; } else if (!in_v6_break) { tp[0] = ':'; tp++; tp[0] = '0'; tp++; } } } if (have_v6_break && in_v6_break) { tp[0] = ':'; tp++; } tp[0] = '\0'; add_assoc_string(*subarray, "ipv6", name, 1); if (cp < p + dlen) { n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "chain", name, 1); } break; case DNS_T_SRV: add_assoc_string(*subarray, "type", "SRV", 1); GETSHORT(n, cp); add_assoc_long(*subarray, "pri", n); GETSHORT(n, cp); add_assoc_long(*subarray, "weight", n); GETSHORT(n, cp); add_assoc_long(*subarray, "port", n); n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "target", name, 1); break; case DNS_T_NAPTR: add_assoc_string(*subarray, "type", "NAPTR", 1); GETSHORT(n, cp); add_assoc_long(*subarray, "order", n); GETSHORT(n, cp); add_assoc_long(*subarray, "pref", n); n = (cp[0] & 0xFF); add_assoc_stringl(*subarray, "flags", (char*)++cp, n, 1); cp += n; n = (cp[0] & 0xFF); add_assoc_stringl(*subarray, "services", (char*)++cp, n, 1); cp += n; n = (cp[0] & 0xFF); add_assoc_stringl(*subarray, "regex", (char*)++cp, n, 1); cp += n; n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "replacement", name, 1); break; default: zval_ptr_dtor(subarray); *subarray = NULL; cp += dlen; break; } return cp; } /* }}} */ /* {{{ proto array|false dns_get_record(string hostname [, int type[, array authns, array addtl]]) Get any Resource Record corresponding to a given Internet host name */ PHP_FUNCTION(dns_get_record) { char *hostname; int hostname_len; long type_param = PHP_DNS_ANY; zval *authns = NULL, *addtl = NULL; int type_to_fetch; #if defined(HAVE_DNS_SEARCH) struct sockaddr_storage from; uint32_t fromsize = sizeof(from); dns_handle_t handle; #elif defined(HAVE_RES_NSEARCH) struct __res_state state; struct __res_state *handle = &state; #endif HEADER *hp; querybuf answer; u_char *cp = NULL, *end = NULL; int n, qd, an, ns = 0, ar = 0; int type, first_query = 1, store_results = 1; zend_bool raw = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|lz!z!b", &hostname, &hostname_len, &type_param, &authns, &addtl, &raw) == FAILURE) { return; } if (authns) { zval_dtor(authns); array_init(authns); } if (addtl) { zval_dtor(addtl); array_init(addtl); } if (!raw) { if ((type_param & ~PHP_DNS_ALL) && (type_param != PHP_DNS_ANY)) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Type '%ld' not supported", type_param); RETURN_FALSE; } } else { if ((type_param < 1) || (type_param > 0xFFFF)) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Numeric DNS record type must be between 1 and 65535, '%ld' given", type_param); RETURN_FALSE; } } /* Initialize the return array */ array_init(return_value); /* - We emulate an or'ed type mask by querying type by type. (Steps 0 - NUMTYPES-1 ) * If additional info is wanted we check again with DNS_T_ANY (step NUMTYPES / NUMTYPES+1 ) * store_results is used to skip storing the results retrieved in step * NUMTYPES+1 when results were already fetched. * - In case of PHP_DNS_ANY we use the directly fetch DNS_T_ANY. (step NUMTYPES+1 ) * - In case of raw mode, we query only the requestd type instead of looping type by type * before going with the additional info stuff. */ if (raw) { type = -1; } else if (type_param == PHP_DNS_ANY) { type = PHP_DNS_NUM_TYPES + 1; } else { type = 0; } for ( ; type < (addtl ? (PHP_DNS_NUM_TYPES + 2) : PHP_DNS_NUM_TYPES) || first_query; type++ ) { first_query = 0; switch (type) { case -1: /* raw */ type_to_fetch = type_param; /* skip over the rest and go directly to additional records */ type = PHP_DNS_NUM_TYPES - 1; break; case 0: type_to_fetch = type_param&PHP_DNS_A ? DNS_T_A : 0; break; case 1: type_to_fetch = type_param&PHP_DNS_NS ? DNS_T_NS : 0; break; case 2: type_to_fetch = type_param&PHP_DNS_CNAME ? DNS_T_CNAME : 0; break; case 3: type_to_fetch = type_param&PHP_DNS_SOA ? DNS_T_SOA : 0; break; case 4: type_to_fetch = type_param&PHP_DNS_PTR ? DNS_T_PTR : 0; break; case 5: type_to_fetch = type_param&PHP_DNS_HINFO ? DNS_T_HINFO : 0; break; case 6: type_to_fetch = type_param&PHP_DNS_MX ? DNS_T_MX : 0; break; case 7: type_to_fetch = type_param&PHP_DNS_TXT ? DNS_T_TXT : 0; break; case 8: type_to_fetch = type_param&PHP_DNS_AAAA ? DNS_T_AAAA : 0; break; case 9: type_to_fetch = type_param&PHP_DNS_SRV ? DNS_T_SRV : 0; break; case 10: type_to_fetch = type_param&PHP_DNS_NAPTR ? DNS_T_NAPTR : 0; break; case 11: type_to_fetch = type_param&PHP_DNS_A6 ? DNS_T_A6 : 0; break; case PHP_DNS_NUM_TYPES: store_results = 0; continue; default: case (PHP_DNS_NUM_TYPES + 1): type_to_fetch = DNS_T_ANY; break; } if (type_to_fetch) { #if defined(HAVE_DNS_SEARCH) handle = dns_open(NULL); if (handle == NULL) { zval_dtor(return_value); RETURN_FALSE; } #elif defined(HAVE_RES_NSEARCH) memset(&state, 0, sizeof(state)); if (res_ninit(handle)) { zval_dtor(return_value); RETURN_FALSE; } #else res_init(); #endif n = php_dns_search(handle, hostname, C_IN, type_to_fetch, answer.qb2, sizeof answer); if (n < 0) { php_dns_free_handle(handle); continue; } cp = answer.qb2 + HFIXEDSZ; end = answer.qb2 + n; hp = (HEADER *)&answer; qd = ntohs(hp->qdcount); an = ntohs(hp->ancount); ns = ntohs(hp->nscount); ar = ntohs(hp->arcount); /* Skip QD entries, they're only used by dn_expand later on */ while (qd-- > 0) { n = dn_skipname(cp, end); if (n < 0) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to parse DNS data received"); zval_dtor(return_value); php_dns_free_handle(handle); RETURN_FALSE; } cp += n + QFIXEDSZ; } /* YAY! Our real answers! */ while (an-- && cp && cp < end) { zval *retval; cp = php_parserr(cp, &answer, type_to_fetch, store_results, raw, &retval); if (retval != NULL && store_results) { add_next_index_zval(return_value, retval); } } if (authns || addtl) { /* List of Authoritative Name Servers * Process when only requesting addtl so that we can skip through the section */ while (ns-- > 0 && cp && cp < end) { zval *retval = NULL; cp = php_parserr(cp, &answer, DNS_T_ANY, authns != NULL, raw, &retval); if (retval != NULL) { add_next_index_zval(authns, retval); } } } if (addtl) { /* Additional records associated with authoritative name servers */ while (ar-- > 0 && cp && cp < end) { zval *retval = NULL; cp = php_parserr(cp, &answer, DNS_T_ANY, 1, raw, &retval); if (retval != NULL) { add_next_index_zval(addtl, retval); } } } php_dns_free_handle(handle); } } } /* }}} */ /* {{{ proto bool dns_get_mx(string hostname, array mxhosts [, array weight]) Get MX records corresponding to a given Internet host name */ PHP_FUNCTION(dns_get_mx) { char *hostname; int hostname_len; zval *mx_list, *weight_list = NULL; int count, qdc; u_short type, weight; u_char ans[MAXPACKET]; char buf[MAXHOSTNAMELEN]; HEADER *hp; u_char *cp, *end; int i; #if defined(HAVE_DNS_SEARCH) struct sockaddr_storage from; uint32_t fromsize = sizeof(from); dns_handle_t handle; #elif defined(HAVE_RES_NSEARCH) struct __res_state state; struct __res_state *handle = &state; #endif if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sz|z", &hostname, &hostname_len, &mx_list, &weight_list) == FAILURE) { return; } zval_dtor(mx_list); array_init(mx_list); if (weight_list) { zval_dtor(weight_list); array_init(weight_list); } #if defined(HAVE_DNS_SEARCH) handle = dns_open(NULL); if (handle == NULL) { RETURN_FALSE; } #elif defined(HAVE_RES_NSEARCH) memset(&state, 0, sizeof(state)); if (res_ninit(handle)) { RETURN_FALSE; } #else res_init(); #endif i = php_dns_search(handle, hostname, C_IN, DNS_T_MX, (u_char *)&ans, sizeof(ans)); if (i < 0) { RETURN_FALSE; } if (i > (int)sizeof(ans)) { i = sizeof(ans); } hp = (HEADER *)&ans; cp = (u_char *)&ans + HFIXEDSZ; end = (u_char *)&ans +i; for (qdc = ntohs((unsigned short)hp->qdcount); qdc--; cp += i + QFIXEDSZ) { if ((i = dn_skipname(cp, end)) < 0 ) { php_dns_free_handle(handle); RETURN_FALSE; } } count = ntohs((unsigned short)hp->ancount); while (--count >= 0 && cp < end) { if ((i = dn_skipname(cp, end)) < 0 ) { php_dns_free_handle(handle); RETURN_FALSE; } cp += i; GETSHORT(type, cp); cp += INT16SZ + INT32SZ; GETSHORT(i, cp); if (type != DNS_T_MX) { cp += i; continue; } GETSHORT(weight, cp); if ((i = dn_expand(ans, end, cp, buf, sizeof(buf)-1)) < 0) { php_dns_free_handle(handle); RETURN_FALSE; } cp += i; add_next_index_string(mx_list, buf, 1); if (weight_list) { add_next_index_long(weight_list, weight); } } php_dns_free_handle(handle); RETURN_TRUE; } /* }}} */ #endif /* HAVE_FULL_DNS_FUNCS */ #endif /* !defined(PHP_WIN32) && (HAVE_DNS_SEARCH_FUNC && !(defined(__BEOS__) || defined(NETWARE))) */ #if HAVE_FULL_DNS_FUNCS || defined(PHP_WIN32) PHP_MINIT_FUNCTION(dns) { REGISTER_LONG_CONSTANT("DNS_A", PHP_DNS_A, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_NS", PHP_DNS_NS, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_CNAME", PHP_DNS_CNAME, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_SOA", PHP_DNS_SOA, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_PTR", PHP_DNS_PTR, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_HINFO", PHP_DNS_HINFO, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_MX", PHP_DNS_MX, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_TXT", PHP_DNS_TXT, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_SRV", PHP_DNS_SRV, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_NAPTR", PHP_DNS_NAPTR, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_AAAA", PHP_DNS_AAAA, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_A6", PHP_DNS_A6, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_ANY", PHP_DNS_ANY, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_ALL", PHP_DNS_ALL, CONST_CS | CONST_PERSISTENT); return SUCCESS; } #endif /* HAVE_FULL_DNS_FUNCS */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: sw=4 ts=4 fdm=marker * vim<600: sw=4 ts=4 */
/* +----------------------------------------------------------------------+ | PHP Version 5 | +----------------------------------------------------------------------+ | Copyright (c) 1997-2014 The PHP Group | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Authors: The typical suspects | | Pollita <pollita@php.net> | | Marcus Boerger <helly@php.net> | +----------------------------------------------------------------------+ */ /* $Id$ */ /* {{{ includes */ #include "php.h" #include "php_network.h" #if HAVE_SYS_SOCKET_H #include <sys/socket.h> #endif #ifdef PHP_WIN32 # include "win32/inet.h" # include <winsock2.h> # include <windows.h> # include <Ws2tcpip.h> #else /* This holds good for NetWare too, both for Winsock and Berkeley sockets */ #include <netinet/in.h> #if HAVE_ARPA_INET_H #include <arpa/inet.h> #endif #include <netdb.h> #ifdef _OSD_POSIX #undef STATUS #undef T_UNSPEC #endif #if HAVE_ARPA_NAMESER_H #ifdef DARWIN # define BIND_8_COMPAT 1 #endif #include <arpa/nameser.h> #endif #if HAVE_RESOLV_H #include <resolv.h> #endif #ifdef HAVE_DNS_H #include <dns.h> #endif #endif /* Borrowed from SYS/SOCKET.H */ #if defined(NETWARE) && defined(USE_WINSOCK) #define AF_INET 2 /* internetwork: UDP, TCP, etc. */ #endif #ifndef MAXHOSTNAMELEN #define MAXHOSTNAMELEN 255 #endif /* For the local hostname obtained via gethostname which is different from the dns-related MAXHOSTNAMELEN constant above */ #ifndef HOST_NAME_MAX #define HOST_NAME_MAX 255 #endif #include "php_dns.h" /* type compat */ #ifndef DNS_T_A #define DNS_T_A 1 #endif #ifndef DNS_T_NS #define DNS_T_NS 2 #endif #ifndef DNS_T_CNAME #define DNS_T_CNAME 5 #endif #ifndef DNS_T_SOA #define DNS_T_SOA 6 #endif #ifndef DNS_T_PTR #define DNS_T_PTR 12 #endif #ifndef DNS_T_HINFO #define DNS_T_HINFO 13 #endif #ifndef DNS_T_MINFO #define DNS_T_MINFO 14 #endif #ifndef DNS_T_MX #define DNS_T_MX 15 #endif #ifndef DNS_T_TXT #define DNS_T_TXT 16 #endif #ifndef DNS_T_AAAA #define DNS_T_AAAA 28 #endif #ifndef DNS_T_SRV #define DNS_T_SRV 33 #endif #ifndef DNS_T_NAPTR #define DNS_T_NAPTR 35 #endif #ifndef DNS_T_A6 #define DNS_T_A6 38 #endif #ifndef DNS_T_ANY #define DNS_T_ANY 255 #endif /* }}} */ static char *php_gethostbyaddr(char *ip); static char *php_gethostbyname(char *name); #ifdef HAVE_GETHOSTNAME /* {{{ proto string gethostname() Get the host name of the current machine */ PHP_FUNCTION(gethostname) { char buf[HOST_NAME_MAX]; if (zend_parse_parameters_none() == FAILURE) { return; } if (gethostname(buf, sizeof(buf) - 1)) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "unable to fetch host [%d]: %s", errno, strerror(errno)); RETURN_FALSE; } RETURN_STRING(buf, 1); } /* }}} */ #endif /* TODO: Reimplement the gethostby* functions using the new winxp+ API, in dns_win32.c, then we can have a dns.c, dns_unix.c and dns_win32.c instead of a messy dns.c full of #ifdef */ /* {{{ proto string gethostbyaddr(string ip_address) Get the Internet host name corresponding to a given IP address */ PHP_FUNCTION(gethostbyaddr) { char *addr; int addr_len; char *hostname; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &addr, &addr_len) == FAILURE) { return; } hostname = php_gethostbyaddr(addr); if (hostname == NULL) { #if HAVE_IPV6 && HAVE_INET_PTON php_error_docref(NULL TSRMLS_CC, E_WARNING, "Address is not a valid IPv4 or IPv6 address"); #else php_error_docref(NULL TSRMLS_CC, E_WARNING, "Address is not in a.b.c.d form"); #endif RETVAL_FALSE; } else { RETVAL_STRING(hostname, 0); } } /* }}} */ /* {{{ php_gethostbyaddr */ static char *php_gethostbyaddr(char *ip) { #if HAVE_IPV6 && HAVE_INET_PTON struct in6_addr addr6; #endif struct in_addr addr; struct hostent *hp; #if HAVE_IPV6 && HAVE_INET_PTON if (inet_pton(AF_INET6, ip, &addr6)) { hp = gethostbyaddr((char *) &addr6, sizeof(addr6), AF_INET6); } else if (inet_pton(AF_INET, ip, &addr)) { hp = gethostbyaddr((char *) &addr, sizeof(addr), AF_INET); } else { return NULL; } #else addr.s_addr = inet_addr(ip); if (addr.s_addr == -1) { return NULL; } hp = gethostbyaddr((char *) &addr, sizeof(addr), AF_INET); #endif if (!hp || hp->h_name == NULL || hp->h_name[0] == '\0') { return estrdup(ip); } return estrdup(hp->h_name); } /* }}} */ /* {{{ proto string gethostbyname(string hostname) Get the IP address corresponding to a given Internet host name */ PHP_FUNCTION(gethostbyname) { char *hostname; int hostname_len; char *addr; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &hostname, &hostname_len) == FAILURE) { return; } addr = php_gethostbyname(hostname); RETVAL_STRING(addr, 0); } /* }}} */ /* {{{ proto array gethostbynamel(string hostname) Return a list of IP addresses that a given hostname resolves to. */ PHP_FUNCTION(gethostbynamel) { char *hostname; int hostname_len; struct hostent *hp; struct in_addr in; int i; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &hostname, &hostname_len) == FAILURE) { return; } hp = gethostbyname(hostname); if (hp == NULL || hp->h_addr_list == NULL) { RETURN_FALSE; } array_init(return_value); for (i = 0 ; hp->h_addr_list[i] != 0 ; i++) { in = *(struct in_addr *) hp->h_addr_list[i]; add_next_index_string(return_value, inet_ntoa(in), 1); } } /* }}} */ /* {{{ php_gethostbyname */ static char *php_gethostbyname(char *name) { struct hostent *hp; struct in_addr in; hp = gethostbyname(name); if (!hp || !*(hp->h_addr_list)) { return estrdup(name); } memcpy(&in.s_addr, *(hp->h_addr_list), sizeof(in.s_addr)); return estrdup(inet_ntoa(in)); } /* }}} */ #if HAVE_FULL_DNS_FUNCS || defined(PHP_WIN32) # define PHP_DNS_NUM_TYPES 12 /* Number of DNS Types Supported by PHP currently */ # define PHP_DNS_A 0x00000001 # define PHP_DNS_NS 0x00000002 # define PHP_DNS_CNAME 0x00000010 # define PHP_DNS_SOA 0x00000020 # define PHP_DNS_PTR 0x00000800 # define PHP_DNS_HINFO 0x00001000 # define PHP_DNS_MX 0x00004000 # define PHP_DNS_TXT 0x00008000 # define PHP_DNS_A6 0x01000000 # define PHP_DNS_SRV 0x02000000 # define PHP_DNS_NAPTR 0x04000000 # define PHP_DNS_AAAA 0x08000000 # define PHP_DNS_ANY 0x10000000 # define PHP_DNS_ALL (PHP_DNS_A|PHP_DNS_NS|PHP_DNS_CNAME|PHP_DNS_SOA|PHP_DNS_PTR|PHP_DNS_HINFO|PHP_DNS_MX|PHP_DNS_TXT|PHP_DNS_A6|PHP_DNS_SRV|PHP_DNS_NAPTR|PHP_DNS_AAAA) #endif /* HAVE_FULL_DNS_FUNCS || defined(PHP_WIN32) */ /* Note: These functions are defined in ext/standard/dns_win32.c for Windows! */ #if !defined(PHP_WIN32) && (HAVE_DNS_SEARCH_FUNC && !(defined(__BEOS__) || defined(NETWARE))) #ifndef HFIXEDSZ #define HFIXEDSZ 12 /* fixed data in header <arpa/nameser.h> */ #endif /* HFIXEDSZ */ #ifndef QFIXEDSZ #define QFIXEDSZ 4 /* fixed data in query <arpa/nameser.h> */ #endif /* QFIXEDSZ */ #undef MAXHOSTNAMELEN #define MAXHOSTNAMELEN 1024 #ifndef MAXRESOURCERECORDS #define MAXRESOURCERECORDS 64 #endif /* MAXRESOURCERECORDS */ typedef union { HEADER qb1; u_char qb2[65536]; } querybuf; /* just a hack to free resources allocated by glibc in __res_nsend() * See also: * res_thread_freeres() in glibc/resolv/res_init.c * __libc_res_nsend() in resolv/res_send.c * */ #if defined(__GLIBC__) && !defined(HAVE_DEPRECATED_DNS_FUNCS) #define php_dns_free_res(__res__) _php_dns_free_res(__res__) static void _php_dns_free_res(struct __res_state res) { /* {{{ */ int ns; for (ns = 0; ns < MAXNS; ns++) { if (res._u._ext.nsaddrs[ns] != NULL) { free (res._u._ext.nsaddrs[ns]); res._u._ext.nsaddrs[ns] = NULL; } } } /* }}} */ #else #define php_dns_free_res(__res__) #endif /* {{{ proto bool dns_check_record(string host [, string type]) Check DNS records corresponding to a given Internet host name or IP address */ PHP_FUNCTION(dns_check_record) { #ifndef MAXPACKET #define MAXPACKET 8192 /* max packet size used internally by BIND */ #endif u_char ans[MAXPACKET]; char *hostname, *rectype = NULL; int hostname_len, rectype_len = 0; int type = T_MX, i; #if defined(HAVE_DNS_SEARCH) struct sockaddr_storage from; uint32_t fromsize = sizeof(from); dns_handle_t handle; #elif defined(HAVE_RES_NSEARCH) struct __res_state state; struct __res_state *handle = &state; #endif if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|s", &hostname, &hostname_len, &rectype, &rectype_len) == FAILURE) { return; } if (hostname_len == 0) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Host cannot be empty"); RETURN_FALSE; } if (rectype) { if (!strcasecmp("A", rectype)) type = T_A; else if (!strcasecmp("NS", rectype)) type = DNS_T_NS; else if (!strcasecmp("MX", rectype)) type = DNS_T_MX; else if (!strcasecmp("PTR", rectype)) type = DNS_T_PTR; else if (!strcasecmp("ANY", rectype)) type = DNS_T_ANY; else if (!strcasecmp("SOA", rectype)) type = DNS_T_SOA; else if (!strcasecmp("TXT", rectype)) type = DNS_T_TXT; else if (!strcasecmp("CNAME", rectype)) type = DNS_T_CNAME; else if (!strcasecmp("AAAA", rectype)) type = DNS_T_AAAA; else if (!strcasecmp("SRV", rectype)) type = DNS_T_SRV; else if (!strcasecmp("NAPTR", rectype)) type = DNS_T_NAPTR; else if (!strcasecmp("A6", rectype)) type = DNS_T_A6; else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Type '%s' not supported", rectype); RETURN_FALSE; } } #if defined(HAVE_DNS_SEARCH) handle = dns_open(NULL); if (handle == NULL) { RETURN_FALSE; } #elif defined(HAVE_RES_NSEARCH) memset(&state, 0, sizeof(state)); if (res_ninit(handle)) { RETURN_FALSE; } #else res_init(); #endif RETVAL_TRUE; i = php_dns_search(handle, hostname, C_IN, type, ans, sizeof(ans)); if (i < 0) { RETVAL_FALSE; } php_dns_free_handle(handle); } /* }}} */ #if HAVE_FULL_DNS_FUNCS #define CHECKCP(n) do { \ if (cp + n > end) { \ return NULL; \ } \ } while (0) /* {{{ php_parserr */ static u_char *php_parserr(u_char *cp, u_char *end, querybuf *answer, int type_to_fetch, int store, int raw, zval **subarray) { u_short type, class, dlen; u_long ttl; long n, i; u_short s; u_char *tp, *p; char name[MAXHOSTNAMELEN]; int have_v6_break = 0, in_v6_break = 0; *subarray = NULL; n = dn_expand(answer->qb2, end, cp, name, sizeof(name) - 2); if (n < 0) { return NULL; } cp += n; CHECKCP(10); GETSHORT(type, cp); GETSHORT(class, cp); GETLONG(ttl, cp); GETSHORT(dlen, cp); CHECKCP(dlen); if (type_to_fetch != T_ANY && type != type_to_fetch) { cp += dlen; return cp; } if (!store) { cp += dlen; return cp; } ALLOC_INIT_ZVAL(*subarray); array_init(*subarray); add_assoc_string(*subarray, "host", name, 1); add_assoc_string(*subarray, "class", "IN", 1); add_assoc_long(*subarray, "ttl", ttl); if (raw) { add_assoc_long(*subarray, "type", type); add_assoc_stringl(*subarray, "data", (char*) cp, (uint) dlen, 1); cp += dlen; return cp; } switch (type) { case DNS_T_A: CHECKCP(4); add_assoc_string(*subarray, "type", "A", 1); snprintf(name, sizeof(name), "%d.%d.%d.%d", cp[0], cp[1], cp[2], cp[3]); add_assoc_string(*subarray, "ip", name, 1); cp += dlen; break; case DNS_T_MX: CHECKCP(2); add_assoc_string(*subarray, "type", "MX", 1); GETSHORT(n, cp); add_assoc_long(*subarray, "pri", n); /* no break; */ case DNS_T_CNAME: if (type == DNS_T_CNAME) { add_assoc_string(*subarray, "type", "CNAME", 1); } /* no break; */ case DNS_T_NS: if (type == DNS_T_NS) { add_assoc_string(*subarray, "type", "NS", 1); } /* no break; */ case DNS_T_PTR: if (type == DNS_T_PTR) { add_assoc_string(*subarray, "type", "PTR", 1); } n = dn_expand(answer->qb2, end, cp, name, (sizeof name) - 2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "target", name, 1); break; case DNS_T_HINFO: /* See RFC 1010 for values */ add_assoc_string(*subarray, "type", "HINFO", 1); CHECKCP(1); n = *cp & 0xFF; cp++; CHECKCP(n); add_assoc_stringl(*subarray, "cpu", (char*)cp, n, 1); cp += n; CHECKCP(1); n = *cp & 0xFF; cp++; CHECKCP(n); add_assoc_stringl(*subarray, "os", (char*)cp, n, 1); cp += n; break; case DNS_T_TXT: { int l1 = 0, l2 = 0; zval *entries = NULL; add_assoc_string(*subarray, "type", "TXT", 1); tp = emalloc(dlen + 1); MAKE_STD_ZVAL(entries); array_init(entries); while (l1 < dlen) { n = cp[l1]; if ((l1 + n) >= dlen) { // Invalid chunk length, truncate n = dlen - (l1 + 1); } if (n) { memcpy(tp + l2 , cp + l1 + 1, n); add_next_index_stringl(entries, cp + l1 + 1, n, 1); } l1 = l1 + n + 1; l2 = l2 + n; } tp[l2] = '\0'; cp += dlen; add_assoc_stringl(*subarray, "txt", tp, l2, 0); add_assoc_zval(*subarray, "entries", entries); } break; case DNS_T_SOA: add_assoc_string(*subarray, "type", "SOA", 1); n = dn_expand(answer->qb2, end, cp, name, (sizeof name) -2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "mname", name, 1); n = dn_expand(answer->qb2, end, cp, name, (sizeof name) -2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "rname", name, 1); CHECKCP(5*4); GETLONG(n, cp); add_assoc_long(*subarray, "serial", n); GETLONG(n, cp); add_assoc_long(*subarray, "refresh", n); GETLONG(n, cp); add_assoc_long(*subarray, "retry", n); GETLONG(n, cp); add_assoc_long(*subarray, "expire", n); GETLONG(n, cp); add_assoc_long(*subarray, "minimum-ttl", n); break; case DNS_T_AAAA: tp = (u_char*)name; CHECKCP(8*2); for(i=0; i < 8; i++) { GETSHORT(s, cp); if (s != 0) { if (tp > (u_char *)name) { in_v6_break = 0; tp[0] = ':'; tp++; } tp += sprintf((char*)tp,"%x",s); } else { if (!have_v6_break) { have_v6_break = 1; in_v6_break = 1; tp[0] = ':'; tp++; } else if (!in_v6_break) { tp[0] = ':'; tp++; tp[0] = '0'; tp++; } } } if (have_v6_break && in_v6_break) { tp[0] = ':'; tp++; } tp[0] = '\0'; add_assoc_string(*subarray, "type", "AAAA", 1); add_assoc_string(*subarray, "ipv6", name, 1); break; case DNS_T_A6: p = cp; add_assoc_string(*subarray, "type", "A6", 1); CHECKCP(1); n = ((int)cp[0]) & 0xFF; cp++; add_assoc_long(*subarray, "masklen", n); tp = (u_char*)name; if (n > 15) { have_v6_break = 1; in_v6_break = 1; tp[0] = ':'; tp++; } if (n % 16 > 8) { /* Partial short */ if (cp[0] != 0) { if (tp > (u_char *)name) { in_v6_break = 0; tp[0] = ':'; tp++; } sprintf((char*)tp, "%x", cp[0] & 0xFF); } else { if (!have_v6_break) { have_v6_break = 1; in_v6_break = 1; tp[0] = ':'; tp++; } else if (!in_v6_break) { tp[0] = ':'; tp++; tp[0] = '0'; tp++; } } cp++; } for (i = (n + 8) / 16; i < 8; i++) { CHECKCP(2); GETSHORT(s, cp); if (s != 0) { if (tp > (u_char *)name) { in_v6_break = 0; tp[0] = ':'; tp++; } tp += sprintf((char*)tp,"%x",s); } else { if (!have_v6_break) { have_v6_break = 1; in_v6_break = 1; tp[0] = ':'; tp++; } else if (!in_v6_break) { tp[0] = ':'; tp++; tp[0] = '0'; tp++; } } } if (have_v6_break && in_v6_break) { tp[0] = ':'; tp++; } tp[0] = '\0'; add_assoc_string(*subarray, "ipv6", name, 1); if (cp < p + dlen) { n = dn_expand(answer->qb2, end, cp, name, (sizeof name) - 2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "chain", name, 1); } break; case DNS_T_SRV: CHECKCP(3*2); add_assoc_string(*subarray, "type", "SRV", 1); GETSHORT(n, cp); add_assoc_long(*subarray, "pri", n); GETSHORT(n, cp); add_assoc_long(*subarray, "weight", n); GETSHORT(n, cp); add_assoc_long(*subarray, "port", n); n = dn_expand(answer->qb2, end, cp, name, (sizeof name) - 2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "target", name, 1); break; case DNS_T_NAPTR: CHECKCP(2*2); add_assoc_string(*subarray, "type", "NAPTR", 1); GETSHORT(n, cp); add_assoc_long(*subarray, "order", n); GETSHORT(n, cp); add_assoc_long(*subarray, "pref", n); CHECKCP(1); n = (cp[0] & 0xFF); cp++; CHECKCP(n); add_assoc_stringl(*subarray, "flags", (char*)cp, n, 1); cp += n; CHECKCP(1); n = (cp[0] & 0xFF); cp++; CHECKCP(n); add_assoc_stringl(*subarray, "services", (char*)cp, n, 1); cp += n; CHECKCP(1); n = (cp[0] & 0xFF); cp++; CHECKCP(n); add_assoc_stringl(*subarray, "regex", (char*)cp, n, 1); cp += n; n = dn_expand(answer->qb2, end, cp, name, (sizeof name) - 2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "replacement", name, 1); break; default: zval_ptr_dtor(subarray); *subarray = NULL; cp += dlen; break; } return cp; } /* }}} */ /* {{{ proto array|false dns_get_record(string hostname [, int type[, array authns, array addtl]]) Get any Resource Record corresponding to a given Internet host name */ PHP_FUNCTION(dns_get_record) { char *hostname; int hostname_len; long type_param = PHP_DNS_ANY; zval *authns = NULL, *addtl = NULL; int type_to_fetch; #if defined(HAVE_DNS_SEARCH) struct sockaddr_storage from; uint32_t fromsize = sizeof(from); dns_handle_t handle; #elif defined(HAVE_RES_NSEARCH) struct __res_state state; struct __res_state *handle = &state; #endif HEADER *hp; querybuf answer; u_char *cp = NULL, *end = NULL; int n, qd, an, ns = 0, ar = 0; int type, first_query = 1, store_results = 1; zend_bool raw = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|lz!z!b", &hostname, &hostname_len, &type_param, &authns, &addtl, &raw) == FAILURE) { return; } if (authns) { zval_dtor(authns); array_init(authns); } if (addtl) { zval_dtor(addtl); array_init(addtl); } if (!raw) { if ((type_param & ~PHP_DNS_ALL) && (type_param != PHP_DNS_ANY)) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Type '%ld' not supported", type_param); RETURN_FALSE; } } else { if ((type_param < 1) || (type_param > 0xFFFF)) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Numeric DNS record type must be between 1 and 65535, '%ld' given", type_param); RETURN_FALSE; } } /* Initialize the return array */ array_init(return_value); /* - We emulate an or'ed type mask by querying type by type. (Steps 0 - NUMTYPES-1 ) * If additional info is wanted we check again with DNS_T_ANY (step NUMTYPES / NUMTYPES+1 ) * store_results is used to skip storing the results retrieved in step * NUMTYPES+1 when results were already fetched. * - In case of PHP_DNS_ANY we use the directly fetch DNS_T_ANY. (step NUMTYPES+1 ) * - In case of raw mode, we query only the requestd type instead of looping type by type * before going with the additional info stuff. */ if (raw) { type = -1; } else if (type_param == PHP_DNS_ANY) { type = PHP_DNS_NUM_TYPES + 1; } else { type = 0; } for ( ; type < (addtl ? (PHP_DNS_NUM_TYPES + 2) : PHP_DNS_NUM_TYPES) || first_query; type++ ) { first_query = 0; switch (type) { case -1: /* raw */ type_to_fetch = type_param; /* skip over the rest and go directly to additional records */ type = PHP_DNS_NUM_TYPES - 1; break; case 0: type_to_fetch = type_param&PHP_DNS_A ? DNS_T_A : 0; break; case 1: type_to_fetch = type_param&PHP_DNS_NS ? DNS_T_NS : 0; break; case 2: type_to_fetch = type_param&PHP_DNS_CNAME ? DNS_T_CNAME : 0; break; case 3: type_to_fetch = type_param&PHP_DNS_SOA ? DNS_T_SOA : 0; break; case 4: type_to_fetch = type_param&PHP_DNS_PTR ? DNS_T_PTR : 0; break; case 5: type_to_fetch = type_param&PHP_DNS_HINFO ? DNS_T_HINFO : 0; break; case 6: type_to_fetch = type_param&PHP_DNS_MX ? DNS_T_MX : 0; break; case 7: type_to_fetch = type_param&PHP_DNS_TXT ? DNS_T_TXT : 0; break; case 8: type_to_fetch = type_param&PHP_DNS_AAAA ? DNS_T_AAAA : 0; break; case 9: type_to_fetch = type_param&PHP_DNS_SRV ? DNS_T_SRV : 0; break; case 10: type_to_fetch = type_param&PHP_DNS_NAPTR ? DNS_T_NAPTR : 0; break; case 11: type_to_fetch = type_param&PHP_DNS_A6 ? DNS_T_A6 : 0; break; case PHP_DNS_NUM_TYPES: store_results = 0; continue; default: case (PHP_DNS_NUM_TYPES + 1): type_to_fetch = DNS_T_ANY; break; } if (type_to_fetch) { #if defined(HAVE_DNS_SEARCH) handle = dns_open(NULL); if (handle == NULL) { zval_dtor(return_value); RETURN_FALSE; } #elif defined(HAVE_RES_NSEARCH) memset(&state, 0, sizeof(state)); if (res_ninit(handle)) { zval_dtor(return_value); RETURN_FALSE; } #else res_init(); #endif n = php_dns_search(handle, hostname, C_IN, type_to_fetch, answer.qb2, sizeof answer); if (n < 0) { php_dns_free_handle(handle); continue; } cp = answer.qb2 + HFIXEDSZ; end = answer.qb2 + n; hp = (HEADER *)&answer; qd = ntohs(hp->qdcount); an = ntohs(hp->ancount); ns = ntohs(hp->nscount); ar = ntohs(hp->arcount); /* Skip QD entries, they're only used by dn_expand later on */ while (qd-- > 0) { n = dn_skipname(cp, end); if (n < 0) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to parse DNS data received"); zval_dtor(return_value); php_dns_free_handle(handle); RETURN_FALSE; } cp += n + QFIXEDSZ; } /* YAY! Our real answers! */ while (an-- && cp && cp < end) { zval *retval; cp = php_parserr(cp, end, &answer, type_to_fetch, store_results, raw, &retval); if (retval != NULL && store_results) { add_next_index_zval(return_value, retval); } } if (authns || addtl) { /* List of Authoritative Name Servers * Process when only requesting addtl so that we can skip through the section */ while (ns-- > 0 && cp && cp < end) { zval *retval = NULL; cp = php_parserr(cp, end, &answer, DNS_T_ANY, authns != NULL, raw, &retval); if (retval != NULL) { add_next_index_zval(authns, retval); } } } if (addtl) { /* Additional records associated with authoritative name servers */ while (ar-- > 0 && cp && cp < end) { zval *retval = NULL; cp = php_parserr(cp, end, &answer, DNS_T_ANY, 1, raw, &retval); if (retval != NULL) { add_next_index_zval(addtl, retval); } } } php_dns_free_handle(handle); } } } /* }}} */ /* {{{ proto bool dns_get_mx(string hostname, array mxhosts [, array weight]) Get MX records corresponding to a given Internet host name */ PHP_FUNCTION(dns_get_mx) { char *hostname; int hostname_len; zval *mx_list, *weight_list = NULL; int count, qdc; u_short type, weight; u_char ans[MAXPACKET]; char buf[MAXHOSTNAMELEN]; HEADER *hp; u_char *cp, *end; int i; #if defined(HAVE_DNS_SEARCH) struct sockaddr_storage from; uint32_t fromsize = sizeof(from); dns_handle_t handle; #elif defined(HAVE_RES_NSEARCH) struct __res_state state; struct __res_state *handle = &state; #endif if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sz|z", &hostname, &hostname_len, &mx_list, &weight_list) == FAILURE) { return; } zval_dtor(mx_list); array_init(mx_list); if (weight_list) { zval_dtor(weight_list); array_init(weight_list); } #if defined(HAVE_DNS_SEARCH) handle = dns_open(NULL); if (handle == NULL) { RETURN_FALSE; } #elif defined(HAVE_RES_NSEARCH) memset(&state, 0, sizeof(state)); if (res_ninit(handle)) { RETURN_FALSE; } #else res_init(); #endif i = php_dns_search(handle, hostname, C_IN, DNS_T_MX, (u_char *)&ans, sizeof(ans)); if (i < 0) { RETURN_FALSE; } if (i > (int)sizeof(ans)) { i = sizeof(ans); } hp = (HEADER *)&ans; cp = (u_char *)&ans + HFIXEDSZ; end = (u_char *)&ans +i; for (qdc = ntohs((unsigned short)hp->qdcount); qdc--; cp += i + QFIXEDSZ) { if ((i = dn_skipname(cp, end)) < 0 ) { php_dns_free_handle(handle); RETURN_FALSE; } } count = ntohs((unsigned short)hp->ancount); while (--count >= 0 && cp < end) { if ((i = dn_skipname(cp, end)) < 0 ) { php_dns_free_handle(handle); RETURN_FALSE; } cp += i; GETSHORT(type, cp); cp += INT16SZ + INT32SZ; GETSHORT(i, cp); if (type != DNS_T_MX) { cp += i; continue; } GETSHORT(weight, cp); if ((i = dn_expand(ans, end, cp, buf, sizeof(buf)-1)) < 0) { php_dns_free_handle(handle); RETURN_FALSE; } cp += i; add_next_index_string(mx_list, buf, 1); if (weight_list) { add_next_index_long(weight_list, weight); } } php_dns_free_handle(handle); RETURN_TRUE; } /* }}} */ #endif /* HAVE_FULL_DNS_FUNCS */ #endif /* !defined(PHP_WIN32) && (HAVE_DNS_SEARCH_FUNC && !(defined(__BEOS__) || defined(NETWARE))) */ #if HAVE_FULL_DNS_FUNCS || defined(PHP_WIN32) PHP_MINIT_FUNCTION(dns) { REGISTER_LONG_CONSTANT("DNS_A", PHP_DNS_A, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_NS", PHP_DNS_NS, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_CNAME", PHP_DNS_CNAME, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_SOA", PHP_DNS_SOA, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_PTR", PHP_DNS_PTR, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_HINFO", PHP_DNS_HINFO, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_MX", PHP_DNS_MX, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_TXT", PHP_DNS_TXT, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_SRV", PHP_DNS_SRV, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_NAPTR", PHP_DNS_NAPTR, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_AAAA", PHP_DNS_AAAA, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_A6", PHP_DNS_A6, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_ANY", PHP_DNS_ANY, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT("DNS_ALL", PHP_DNS_ALL, CONST_CS | CONST_PERSISTENT); return SUCCESS; } #endif /* HAVE_FULL_DNS_FUNCS */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: sw=4 ts=4 fdm=marker * vim<600: sw=4 ts=4 */
static u_char *php_parserr(u_char *cp, querybuf *answer, int type_to_fetch, int store, int raw, zval **subarray) { u_short type, class, dlen; u_long ttl; long n, i; u_short s; u_char *tp, *p; char name[MAXHOSTNAMELEN]; int have_v6_break = 0, in_v6_break = 0; *subarray = NULL; n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, sizeof(name) - 2); if (n < 0) { return NULL; } cp += n; GETSHORT(type, cp); GETSHORT(class, cp); GETLONG(ttl, cp); GETSHORT(dlen, cp); if (type_to_fetch != T_ANY && type != type_to_fetch) { cp += dlen; return cp; } if (!store) { cp += dlen; return cp; } ALLOC_INIT_ZVAL(*subarray); array_init(*subarray); add_assoc_string(*subarray, "host", name, 1); add_assoc_string(*subarray, "class", "IN", 1); add_assoc_long(*subarray, "ttl", ttl); if (raw) { add_assoc_long(*subarray, "type", type); add_assoc_stringl(*subarray, "data", (char*) cp, (uint) dlen, 1); cp += dlen; return cp; } switch (type) { case DNS_T_A: add_assoc_string(*subarray, "type", "A", 1); snprintf(name, sizeof(name), "%d.%d.%d.%d", cp[0], cp[1], cp[2], cp[3]); add_assoc_string(*subarray, "ip", name, 1); cp += dlen; break; case DNS_T_MX: add_assoc_string(*subarray, "type", "MX", 1); GETSHORT(n, cp); add_assoc_long(*subarray, "pri", n); /* no break; */ case DNS_T_CNAME: if (type == DNS_T_CNAME) { add_assoc_string(*subarray, "type", "CNAME", 1); } /* no break; */ case DNS_T_NS: if (type == DNS_T_NS) { add_assoc_string(*subarray, "type", "NS", 1); } /* no break; */ case DNS_T_PTR: if (type == DNS_T_PTR) { add_assoc_string(*subarray, "type", "PTR", 1); } n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "target", name, 1); break; case DNS_T_HINFO: /* See RFC 1010 for values */ add_assoc_string(*subarray, "type", "HINFO", 1); n = *cp & 0xFF; cp++; add_assoc_stringl(*subarray, "cpu", (char*)cp, n, 1); cp += n; n = *cp & 0xFF; cp++; add_assoc_stringl(*subarray, "os", (char*)cp, n, 1); cp += n; break; case DNS_T_TXT: { int ll = 0; zval *entries = NULL; add_assoc_string(*subarray, "type", "TXT", 1); tp = emalloc(dlen + 1); MAKE_STD_ZVAL(entries); array_init(entries); while (ll < dlen) { n = cp[ll]; if ((ll + n) >= dlen) { // Invalid chunk length, truncate n = dlen - (ll + 1); } memcpy(tp + ll , cp + ll + 1, n); add_next_index_stringl(entries, cp + ll + 1, n, 1); ll = ll + n + 1; } tp[dlen] = '\0'; cp += dlen; add_assoc_stringl(*subarray, "txt", tp, (dlen>0)?dlen - 1:0, 0); add_assoc_zval(*subarray, "entries", entries); } break; case DNS_T_SOA: add_assoc_string(*subarray, "type", "SOA", 1); n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) -2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "mname", name, 1); n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) -2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "rname", name, 1); GETLONG(n, cp); add_assoc_long(*subarray, "serial", n); GETLONG(n, cp); add_assoc_long(*subarray, "refresh", n); GETLONG(n, cp); add_assoc_long(*subarray, "retry", n); GETLONG(n, cp); add_assoc_long(*subarray, "expire", n); GETLONG(n, cp); add_assoc_long(*subarray, "minimum-ttl", n); break; case DNS_T_AAAA: tp = (u_char*)name; for(i=0; i < 8; i++) { GETSHORT(s, cp); if (s != 0) { if (tp > (u_char *)name) { in_v6_break = 0; tp[0] = ':'; tp++; } tp += sprintf((char*)tp,"%x",s); } else { if (!have_v6_break) { have_v6_break = 1; in_v6_break = 1; tp[0] = ':'; tp++; } else if (!in_v6_break) { tp[0] = ':'; tp++; tp[0] = '0'; tp++; } } } if (have_v6_break && in_v6_break) { tp[0] = ':'; tp++; } tp[0] = '\0'; add_assoc_string(*subarray, "type", "AAAA", 1); add_assoc_string(*subarray, "ipv6", name, 1); break; case DNS_T_A6: p = cp; add_assoc_string(*subarray, "type", "A6", 1); n = ((int)cp[0]) & 0xFF; cp++; add_assoc_long(*subarray, "masklen", n); tp = (u_char*)name; if (n > 15) { have_v6_break = 1; in_v6_break = 1; tp[0] = ':'; tp++; } if (n % 16 > 8) { /* Partial short */ if (cp[0] != 0) { if (tp > (u_char *)name) { in_v6_break = 0; tp[0] = ':'; tp++; } sprintf((char*)tp, "%x", cp[0] & 0xFF); } else { if (!have_v6_break) { have_v6_break = 1; in_v6_break = 1; tp[0] = ':'; tp++; } else if (!in_v6_break) { tp[0] = ':'; tp++; tp[0] = '0'; tp++; } } cp++; } for (i = (n + 8) / 16; i < 8; i++) { GETSHORT(s, cp); if (s != 0) { if (tp > (u_char *)name) { in_v6_break = 0; tp[0] = ':'; tp++; } tp += sprintf((char*)tp,"%x",s); } else { if (!have_v6_break) { have_v6_break = 1; in_v6_break = 1; tp[0] = ':'; tp++; } else if (!in_v6_break) { tp[0] = ':'; tp++; tp[0] = '0'; tp++; } } } if (have_v6_break && in_v6_break) { tp[0] = ':'; tp++; } tp[0] = '\0'; add_assoc_string(*subarray, "ipv6", name, 1); if (cp < p + dlen) { n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "chain", name, 1); } break; case DNS_T_SRV: add_assoc_string(*subarray, "type", "SRV", 1); GETSHORT(n, cp); add_assoc_long(*subarray, "pri", n); GETSHORT(n, cp); add_assoc_long(*subarray, "weight", n); GETSHORT(n, cp); add_assoc_long(*subarray, "port", n); n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "target", name, 1); break; case DNS_T_NAPTR: add_assoc_string(*subarray, "type", "NAPTR", 1); GETSHORT(n, cp); add_assoc_long(*subarray, "order", n); GETSHORT(n, cp); add_assoc_long(*subarray, "pref", n); n = (cp[0] & 0xFF); add_assoc_stringl(*subarray, "flags", (char*)++cp, n, 1); cp += n; n = (cp[0] & 0xFF); add_assoc_stringl(*subarray, "services", (char*)++cp, n, 1); cp += n; n = (cp[0] & 0xFF); add_assoc_stringl(*subarray, "regex", (char*)++cp, n, 1); cp += n; n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "replacement", name, 1); break; default: zval_ptr_dtor(subarray); *subarray = NULL; cp += dlen; break; } return cp; }
static u_char *php_parserr(u_char *cp, u_char *end, querybuf *answer, int type_to_fetch, int store, int raw, zval **subarray) { u_short type, class, dlen; u_long ttl; long n, i; u_short s; u_char *tp, *p; char name[MAXHOSTNAMELEN]; int have_v6_break = 0, in_v6_break = 0; *subarray = NULL; n = dn_expand(answer->qb2, end, cp, name, sizeof(name) - 2); if (n < 0) { return NULL; } cp += n; CHECKCP(10); GETSHORT(type, cp); GETSHORT(class, cp); GETLONG(ttl, cp); GETSHORT(dlen, cp); CHECKCP(dlen); if (type_to_fetch != T_ANY && type != type_to_fetch) { cp += dlen; return cp; } if (!store) { cp += dlen; return cp; } ALLOC_INIT_ZVAL(*subarray); array_init(*subarray); add_assoc_string(*subarray, "host", name, 1); add_assoc_string(*subarray, "class", "IN", 1); add_assoc_long(*subarray, "ttl", ttl); if (raw) { add_assoc_long(*subarray, "type", type); add_assoc_stringl(*subarray, "data", (char*) cp, (uint) dlen, 1); cp += dlen; return cp; } switch (type) { case DNS_T_A: CHECKCP(4); add_assoc_string(*subarray, "type", "A", 1); snprintf(name, sizeof(name), "%d.%d.%d.%d", cp[0], cp[1], cp[2], cp[3]); add_assoc_string(*subarray, "ip", name, 1); cp += dlen; break; case DNS_T_MX: CHECKCP(2); add_assoc_string(*subarray, "type", "MX", 1); GETSHORT(n, cp); add_assoc_long(*subarray, "pri", n); /* no break; */ case DNS_T_CNAME: if (type == DNS_T_CNAME) { add_assoc_string(*subarray, "type", "CNAME", 1); } /* no break; */ case DNS_T_NS: if (type == DNS_T_NS) { add_assoc_string(*subarray, "type", "NS", 1); } /* no break; */ case DNS_T_PTR: if (type == DNS_T_PTR) { add_assoc_string(*subarray, "type", "PTR", 1); } n = dn_expand(answer->qb2, end, cp, name, (sizeof name) - 2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "target", name, 1); break; case DNS_T_HINFO: /* See RFC 1010 for values */ add_assoc_string(*subarray, "type", "HINFO", 1); CHECKCP(1); n = *cp & 0xFF; cp++; CHECKCP(n); add_assoc_stringl(*subarray, "cpu", (char*)cp, n, 1); cp += n; CHECKCP(1); n = *cp & 0xFF; cp++; CHECKCP(n); add_assoc_stringl(*subarray, "os", (char*)cp, n, 1); cp += n; break; case DNS_T_TXT: { int l1 = 0, l2 = 0; zval *entries = NULL; add_assoc_string(*subarray, "type", "TXT", 1); tp = emalloc(dlen + 1); MAKE_STD_ZVAL(entries); array_init(entries); while (l1 < dlen) { n = cp[l1]; if ((l1 + n) >= dlen) { // Invalid chunk length, truncate n = dlen - (l1 + 1); } if (n) { memcpy(tp + l2 , cp + l1 + 1, n); add_next_index_stringl(entries, cp + l1 + 1, n, 1); } l1 = l1 + n + 1; l2 = l2 + n; } tp[l2] = '\0'; cp += dlen; add_assoc_stringl(*subarray, "txt", tp, l2, 0); add_assoc_zval(*subarray, "entries", entries); } break; case DNS_T_SOA: add_assoc_string(*subarray, "type", "SOA", 1); n = dn_expand(answer->qb2, end, cp, name, (sizeof name) -2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "mname", name, 1); n = dn_expand(answer->qb2, end, cp, name, (sizeof name) -2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "rname", name, 1); CHECKCP(5*4); GETLONG(n, cp); add_assoc_long(*subarray, "serial", n); GETLONG(n, cp); add_assoc_long(*subarray, "refresh", n); GETLONG(n, cp); add_assoc_long(*subarray, "retry", n); GETLONG(n, cp); add_assoc_long(*subarray, "expire", n); GETLONG(n, cp); add_assoc_long(*subarray, "minimum-ttl", n); break; case DNS_T_AAAA: tp = (u_char*)name; CHECKCP(8*2); for(i=0; i < 8; i++) { GETSHORT(s, cp); if (s != 0) { if (tp > (u_char *)name) { in_v6_break = 0; tp[0] = ':'; tp++; } tp += sprintf((char*)tp,"%x",s); } else { if (!have_v6_break) { have_v6_break = 1; in_v6_break = 1; tp[0] = ':'; tp++; } else if (!in_v6_break) { tp[0] = ':'; tp++; tp[0] = '0'; tp++; } } } if (have_v6_break && in_v6_break) { tp[0] = ':'; tp++; } tp[0] = '\0'; add_assoc_string(*subarray, "type", "AAAA", 1); add_assoc_string(*subarray, "ipv6", name, 1); break; case DNS_T_A6: p = cp; add_assoc_string(*subarray, "type", "A6", 1); CHECKCP(1); n = ((int)cp[0]) & 0xFF; cp++; add_assoc_long(*subarray, "masklen", n); tp = (u_char*)name; if (n > 15) { have_v6_break = 1; in_v6_break = 1; tp[0] = ':'; tp++; } if (n % 16 > 8) { /* Partial short */ if (cp[0] != 0) { if (tp > (u_char *)name) { in_v6_break = 0; tp[0] = ':'; tp++; } sprintf((char*)tp, "%x", cp[0] & 0xFF); } else { if (!have_v6_break) { have_v6_break = 1; in_v6_break = 1; tp[0] = ':'; tp++; } else if (!in_v6_break) { tp[0] = ':'; tp++; tp[0] = '0'; tp++; } } cp++; } for (i = (n + 8) / 16; i < 8; i++) { CHECKCP(2); GETSHORT(s, cp); if (s != 0) { if (tp > (u_char *)name) { in_v6_break = 0; tp[0] = ':'; tp++; } tp += sprintf((char*)tp,"%x",s); } else { if (!have_v6_break) { have_v6_break = 1; in_v6_break = 1; tp[0] = ':'; tp++; } else if (!in_v6_break) { tp[0] = ':'; tp++; tp[0] = '0'; tp++; } } } if (have_v6_break && in_v6_break) { tp[0] = ':'; tp++; } tp[0] = '\0'; add_assoc_string(*subarray, "ipv6", name, 1); if (cp < p + dlen) { n = dn_expand(answer->qb2, end, cp, name, (sizeof name) - 2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "chain", name, 1); } break; case DNS_T_SRV: CHECKCP(3*2); add_assoc_string(*subarray, "type", "SRV", 1); GETSHORT(n, cp); add_assoc_long(*subarray, "pri", n); GETSHORT(n, cp); add_assoc_long(*subarray, "weight", n); GETSHORT(n, cp); add_assoc_long(*subarray, "port", n); n = dn_expand(answer->qb2, end, cp, name, (sizeof name) - 2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "target", name, 1); break; case DNS_T_NAPTR: CHECKCP(2*2); add_assoc_string(*subarray, "type", "NAPTR", 1); GETSHORT(n, cp); add_assoc_long(*subarray, "order", n); GETSHORT(n, cp); add_assoc_long(*subarray, "pref", n); CHECKCP(1); n = (cp[0] & 0xFF); cp++; CHECKCP(n); add_assoc_stringl(*subarray, "flags", (char*)cp, n, 1); cp += n; CHECKCP(1); n = (cp[0] & 0xFF); cp++; CHECKCP(n); add_assoc_stringl(*subarray, "services", (char*)cp, n, 1); cp += n; CHECKCP(1); n = (cp[0] & 0xFF); cp++; CHECKCP(n); add_assoc_stringl(*subarray, "regex", (char*)cp, n, 1); cp += n; n = dn_expand(answer->qb2, end, cp, name, (sizeof name) - 2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "replacement", name, 1); break; default: zval_ptr_dtor(subarray); *subarray = NULL; cp += dlen; break; } return cp; }
{'added': [(415, '#define CHECKCP(n) do { \\'), (416, '\tif (cp + n > end) { \\'), (417, '\t\treturn NULL; \\'), (418, '\t} \\'), (419, '} while (0)'), (420, ''), (422, 'static u_char *php_parserr(u_char *cp, u_char *end, querybuf *answer, int type_to_fetch, int store, int raw, zval **subarray)'), (434, '\tn = dn_expand(answer->qb2, end, cp, name, sizeof(name) - 2);'), (440, '\tCHECKCP(10);'), (445, '\tCHECKCP(dlen);'), (472, '\t\t\tCHECKCP(4);'), (479, '\t\t\tCHECKCP(2);'), (498, '\t\t\tn = dn_expand(answer->qb2, end, cp, name, (sizeof name) - 2);'), (508, '\t\t\tCHECKCP(1);'), (511, '\t\t\tCHECKCP(n);'), (514, '\t\t\tCHECKCP(1);'), (517, '\t\t\tCHECKCP(n);'), (523, '\t\t\t\tint l1 = 0, l2 = 0;'), (532, '\t\t\t\twhile (l1 < dlen) {'), (533, '\t\t\t\t\tn = cp[l1];'), (534, '\t\t\t\t\tif ((l1 + n) >= dlen) {'), (536, '\t\t\t\t\t\tn = dlen - (l1 + 1);'), (537, '\t\t\t\t\t}'), (538, '\t\t\t\t\tif (n) {'), (539, '\t\t\t\t\t\tmemcpy(tp + l2 , cp + l1 + 1, n);'), (540, '\t\t\t\t\t\tadd_next_index_stringl(entries, cp + l1 + 1, n, 1);'), (542, '\t\t\t\t\tl1 = l1 + n + 1;'), (543, '\t\t\t\t\tl2 = l2 + n;'), (545, "\t\t\t\ttp[l2] = '\\0';"), (548, '\t\t\t\tadd_assoc_stringl(*subarray, "txt", tp, l2, 0);'), (554, '\t\t\tn = dn_expand(answer->qb2, end, cp, name, (sizeof name) -2);'), (560, '\t\t\tn = dn_expand(answer->qb2, end, cp, name, (sizeof name) -2);'), (566, '\t\t\tCHECKCP(5*4);'), (580, '\t\t\tCHECKCP(8*2);'), (615, '\t\t\tCHECKCP(1);'), (651, '\t\t\t\tCHECKCP(2);'), (681, '\t\t\t\tn = dn_expand(answer->qb2, end, cp, name, (sizeof name) - 2);'), (690, '\t\t\tCHECKCP(3*2);'), (698, '\t\t\tn = dn_expand(answer->qb2, end, cp, name, (sizeof name) - 2);'), (706, '\t\t\tCHECKCP(2*2);'), (712, ''), (713, '\t\t\tCHECKCP(1);'), (715, '\t\t\tcp++;'), (716, '\t\t\tCHECKCP(n);'), (717, '\t\t\tadd_assoc_stringl(*subarray, "flags", (char*)cp, n, 1);'), (719, ''), (720, '\t\t\tCHECKCP(1);'), (722, '\t\t\tcp++;'), (723, '\t\t\tCHECKCP(n);'), (724, '\t\t\tadd_assoc_stringl(*subarray, "services", (char*)cp, n, 1);'), (726, ''), (727, '\t\t\tCHECKCP(1);'), (729, '\t\t\tcp++;'), (730, '\t\t\tCHECKCP(n);'), (731, '\t\t\tadd_assoc_stringl(*subarray, "regex", (char*)cp, n, 1);'), (733, ''), (734, '\t\t\tn = dn_expand(answer->qb2, end, cp, name, (sizeof name) - 2);'), (927, '\t\t\t\tcp = php_parserr(cp, end, &answer, type_to_fetch, store_results, raw, &retval);'), (940, '\t\t\t\t\tcp = php_parserr(cp, end, &answer, DNS_T_ANY, authns != NULL, raw, &retval);'), (952, '\t\t\t\t\tcp = php_parserr(cp, end, &answer, DNS_T_ANY, 1, raw, &retval);')], 'deleted': [(416, 'static u_char *php_parserr(u_char *cp, querybuf *answer, int type_to_fetch, int store, int raw, zval **subarray)'), (428, '\tn = dn_expand(answer->qb2, answer->qb2+65536, cp, name, sizeof(name) - 2);'), (488, '\t\t\tn = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2);'), (509, '\t\t\t\tint ll = 0;'), (518, '\t\t\t\twhile (ll < dlen) {'), (519, '\t\t\t\t\tn = cp[ll];'), (520, '\t\t\t\t\tif ((ll + n) >= dlen) {'), (522, '\t\t\t\t\t\tn = dlen - (ll + 1);'), (524, '\t\t\t\t\tmemcpy(tp + ll , cp + ll + 1, n);'), (525, '\t\t\t\t\tadd_next_index_stringl(entries, cp + ll + 1, n, 1);'), (526, '\t\t\t\t\tll = ll + n + 1;'), (528, "\t\t\t\ttp[dlen] = '\\0';"), (531, '\t\t\t\tadd_assoc_stringl(*subarray, "txt", tp, (dlen>0)?dlen - 1:0, 0);'), (537, '\t\t\tn = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) -2);'), (543, '\t\t\tn = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) -2);'), (660, '\t\t\t\tn = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2);'), (676, '\t\t\tn = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2);'), (690, '\t\t\tadd_assoc_stringl(*subarray, "flags", (char*)++cp, n, 1);'), (693, '\t\t\tadd_assoc_stringl(*subarray, "services", (char*)++cp, n, 1);'), (696, '\t\t\tadd_assoc_stringl(*subarray, "regex", (char*)++cp, n, 1);'), (698, '\t\t\tn = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2);'), (891, '\t\t\t\tcp = php_parserr(cp, &answer, type_to_fetch, store_results, raw, &retval);'), (904, '\t\t\t\t\tcp = php_parserr(cp, &answer, DNS_T_ANY, authns != NULL, raw, &retval);'), (916, '\t\t\t\t\tcp = php_parserr(cp, &answer, DNS_T_ANY, 1, raw, &retval);')]}
60
24
742
4,728
https://github.com/php/php-src
CVE-2014-3597
['CWE-119']
mod_auth_openidc.c
oidc_request_post_preserved_restore
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*************************************************************************** * Copyright (C) 2017-2021 ZmartZone Holding BV * Copyright (C) 2013-2017 Ping Identity Corporation * All rights reserved. * * DISCLAIMER OF WARRANTIES: * * THE SOFTWARE PROVIDED HEREUNDER IS PROVIDED ON AN "AS IS" BASIS, WITHOUT * ANY WARRANTIES OR REPRESENTATIONS EXPRESS, IMPLIED OR STATUTORY; INCLUDING, * WITHOUT LIMITATION, WARRANTIES OF QUALITY, PERFORMANCE, NONINFRINGEMENT, * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. NOR ARE THERE ANY * WARRANTIES CREATED BY A COURSE OR DEALING, COURSE OF PERFORMANCE OR TRADE * USAGE. FURTHERMORE, THERE ARE NO WARRANTIES THAT THE SOFTWARE WILL MEET * YOUR NEEDS OR BE FREE FROM ERRORS, OR THAT THE OPERATION OF THE SOFTWARE * WILL BE UNINTERRUPTED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Initially based on mod_auth_cas.c: * https://github.com/Jasig/mod_auth_cas * * Other code copied/borrowed/adapted: * shared memory caching: mod_auth_mellon * * @Author: Hans Zandbelt - hans.zandbelt@zmartzone.eu * **************************************************************************/ #include "apr_hash.h" #include "apr_strings.h" #include "ap_config.h" #include "ap_provider.h" #include "apr_lib.h" #include "apr_file_io.h" #include "apr_sha1.h" #include "apr_base64.h" #include "httpd.h" #include "http_core.h" #include "http_config.h" #include "http_log.h" #include "http_protocol.h" #include "http_request.h" #include "mod_auth_openidc.h" #define OIDC_REFRESH_ERROR 2 static int oidc_handle_logout_request(request_rec *r, oidc_cfg *c, oidc_session_t *session, const char *url); // TODO: // - sort out oidc_cfg vs. oidc_dir_cfg stuff // - rigid input checking on discovery responses // - check self-issued support // - README.quickstart // - refresh metadata once-per too? (for non-signing key changes) extern module AP_MODULE_DECLARE_DATA auth_openidc_module; /* * clean any suspicious headers in the HTTP request sent by the user agent */ static void oidc_scrub_request_headers(request_rec *r, const char *claim_prefix, apr_hash_t *scrub) { const int prefix_len = claim_prefix ? strlen(claim_prefix) : 0; /* get an array representation of the incoming HTTP headers */ const apr_array_header_t *const h = apr_table_elts(r->headers_in); /* table to keep the non-suspicious headers */ apr_table_t *clean_headers = apr_table_make(r->pool, h->nelts); /* loop over the incoming HTTP headers */ const apr_table_entry_t *const e = (const apr_table_entry_t*) h->elts; int i; for (i = 0; i < h->nelts; i++) { const char *const k = e[i].key; /* is this header's name equivalent to a header that needs scrubbing? */ const char *hdr = (k != NULL) && (scrub != NULL) ? apr_hash_get(scrub, k, APR_HASH_KEY_STRING) : NULL; const int header_matches = (hdr != NULL) && (oidc_strnenvcmp(k, hdr, -1) == 0); /* * would this header be interpreted as a mod_auth_openidc attribute? Note * that prefix_len will be zero if no attr_prefix is defined, * so this will always be false. Also note that we do not * scrub headers if the prefix is empty because every header * would match. */ const int prefix_matches = (k != NULL) && prefix_len && (oidc_strnenvcmp(k, claim_prefix, prefix_len) == 0); /* add to the clean_headers if non-suspicious, skip and report otherwise */ if (!prefix_matches && !header_matches) { apr_table_addn(clean_headers, k, e[i].val); } else { oidc_warn(r, "scrubbed suspicious request header (%s: %.32s)", k, e[i].val); } } /* overwrite the incoming headers with the cleaned result */ r->headers_in = clean_headers; } /* * scrub all mod_auth_openidc related headers */ void oidc_scrub_headers(request_rec *r) { oidc_cfg *cfg = ap_get_module_config(r->server->module_config, &auth_openidc_module); const char *prefix = oidc_cfg_claim_prefix(r); apr_hash_t *hdrs = apr_hash_make(r->pool); if (apr_strnatcmp(prefix, "") == 0) { if ((cfg->white_listed_claims != NULL) && (apr_hash_count(cfg->white_listed_claims) > 0)) hdrs = apr_hash_overlay(r->pool, cfg->white_listed_claims, hdrs); else oidc_warn(r, "both " OIDCClaimPrefix " and " OIDCWhiteListedClaims " are empty: this renders an insecure setup!"); } char *authn_hdr = oidc_cfg_dir_authn_header(r); if (authn_hdr != NULL) apr_hash_set(hdrs, authn_hdr, APR_HASH_KEY_STRING, authn_hdr); /* * scrub all headers starting with OIDC_ first */ oidc_scrub_request_headers(r, OIDC_DEFAULT_HEADER_PREFIX, hdrs); /* * then see if the claim headers need to be removed on top of that * (i.e. the prefix does not start with the default OIDC_) */ if ((strstr(prefix, OIDC_DEFAULT_HEADER_PREFIX) != prefix)) { oidc_scrub_request_headers(r, prefix, NULL); } } /* * strip the session cookie from the headers sent to the application/backend */ void oidc_strip_cookies(request_rec *r) { char *cookie, *ctx, *result = NULL; const char *name = NULL; int i; apr_array_header_t *strip = oidc_dir_cfg_strip_cookies(r); char *cookies = apr_pstrdup(r->pool, oidc_util_hdr_in_cookie_get(r)); if ((cookies != NULL) && (strip != NULL)) { oidc_debug(r, "looking for the following cookies to strip from cookie header: %s", apr_array_pstrcat(r->pool, strip, OIDC_CHAR_COMMA)); cookie = apr_strtok(cookies, OIDC_STR_SEMI_COLON, &ctx); do { while (cookie != NULL && *cookie == OIDC_CHAR_SPACE) cookie++; for (i = 0; i < strip->nelts; i++) { name = ((const char**) strip->elts)[i]; if ((strncmp(cookie, name, strlen(name)) == 0) && (cookie[strlen(name)] == OIDC_CHAR_EQUAL)) { oidc_debug(r, "stripping: %s", name); break; } } if (i == strip->nelts) { result = result ? apr_psprintf(r->pool, "%s%s %s", result, OIDC_STR_SEMI_COLON, cookie) : cookie; } cookie = apr_strtok(NULL, OIDC_STR_SEMI_COLON, &ctx); } while (cookie != NULL); oidc_util_hdr_in_cookie_set(r, result); } } #define OIDC_SHA1_LEN 20 /* * calculates a hash value based on request fingerprint plus a provided nonce string. */ static char* oidc_get_browser_state_hash(request_rec *r, oidc_cfg *c, const char *nonce) { oidc_debug(r, "enter"); /* helper to hold to header values */ const char *value = NULL; /* the hash context */ apr_sha1_ctx_t sha1; /* Initialize the hash context */ apr_sha1_init(&sha1); if (c->state_input_headers & OIDC_STATE_INPUT_HEADERS_X_FORWARDED_FOR) { /* get the X-FORWARDED-FOR header value */ value = oidc_util_hdr_in_x_forwarded_for_get(r); /* if we have a value for this header, concat it to the hash input */ if (value != NULL) apr_sha1_update(&sha1, value, strlen(value)); } if (c->state_input_headers & OIDC_STATE_INPUT_HEADERS_USER_AGENT) { /* get the USER-AGENT header value */ value = oidc_util_hdr_in_user_agent_get(r); /* if we have a value for this header, concat it to the hash input */ if (value != NULL) apr_sha1_update(&sha1, value, strlen(value)); } /* get the remote client IP address or host name */ /* int remotehost_is_ip; value = ap_get_remote_host(r->connection, r->per_dir_config, REMOTE_NOLOOKUP, &remotehost_is_ip); apr_sha1_update(&sha1, value, strlen(value)); */ /* concat the nonce parameter to the hash input */ apr_sha1_update(&sha1, nonce, strlen(nonce)); /* concat the token binding ID if present */ value = oidc_util_get_provided_token_binding_id(r); if (value != NULL) { oidc_debug(r, "Provided Token Binding ID environment variable found; adding its value to the state"); apr_sha1_update(&sha1, value, strlen(value)); } /* finalize the hash input and calculate the resulting hash output */ unsigned char hash[OIDC_SHA1_LEN]; apr_sha1_final(hash, &sha1); /* base64url-encode the resulting hash and return it */ char *result = NULL; oidc_base64url_encode(r, &result, (const char*) hash, OIDC_SHA1_LEN, TRUE); return result; } /* * return the name for the state cookie */ static char* oidc_get_state_cookie_name(request_rec *r, const char *state) { return apr_psprintf(r->pool, "%s%s", oidc_cfg_dir_state_cookie_prefix(r), state); } /* * return the static provider configuration, i.e. from a metadata URL or configuration primitives */ static apr_byte_t oidc_provider_static_config(request_rec *r, oidc_cfg *c, oidc_provider_t **provider) { json_t *j_provider = NULL; char *s_json = NULL; /* see if we should configure a static provider based on external (cached) metadata */ if ((c->metadata_dir != NULL) || (c->provider.metadata_url == NULL)) { *provider = &c->provider; return TRUE; } oidc_cache_get_provider(r, c->provider.metadata_url, &s_json); if (s_json == NULL) { if (oidc_metadata_provider_retrieve(r, c, NULL, c->provider.metadata_url, &j_provider, &s_json) == FALSE) { oidc_error(r, "could not retrieve metadata from url: %s", c->provider.metadata_url); return FALSE; } oidc_cache_set_provider(r, c->provider.metadata_url, s_json, apr_time_now() + (c->provider_metadata_refresh_interval <= 0 ? apr_time_from_sec( OIDC_CACHE_PROVIDER_METADATA_EXPIRY_DEFAULT) : c->provider_metadata_refresh_interval)); } else { oidc_util_decode_json_object(r, s_json, &j_provider); /* check to see if it is valid metadata */ if (oidc_metadata_provider_is_valid(r, c, j_provider, NULL) == FALSE) { oidc_error(r, "cache corruption detected: invalid metadata from url: %s", c->provider.metadata_url); return FALSE; } } *provider = apr_pcalloc(r->pool, sizeof(oidc_provider_t)); memcpy(*provider, &c->provider, sizeof(oidc_provider_t)); if (oidc_metadata_provider_parse(r, c, j_provider, *provider) == FALSE) { oidc_error(r, "could not parse metadata from url: %s", c->provider.metadata_url); if (j_provider) json_decref(j_provider); return FALSE; } json_decref(j_provider); return TRUE; } /* * return the oidc_provider_t struct for the specified issuer */ static oidc_provider_t* oidc_get_provider_for_issuer(request_rec *r, oidc_cfg *c, const char *issuer, apr_byte_t allow_discovery) { /* by default we'll assume that we're dealing with a single statically configured OP */ oidc_provider_t *provider = NULL; if (oidc_provider_static_config(r, c, &provider) == FALSE) return NULL; /* unless a metadata directory was configured, so we'll try and get the provider settings from there */ if (c->metadata_dir != NULL) { /* try and get metadata from the metadata directory for the OP that sent this response */ if ((oidc_metadata_get(r, c, issuer, &provider, allow_discovery) == FALSE) || (provider == NULL)) { /* don't know nothing about this OP/issuer */ oidc_error(r, "no provider metadata found for issuer \"%s\"", issuer); return NULL; } } return provider; } /* * find out whether the request is a response from an IDP discovery page */ static apr_byte_t oidc_is_discovery_response(request_rec *r, oidc_cfg *cfg) { /* * prereq: this is a call to the configured redirect_uri, now see if: * the OIDC_DISC_OP_PARAM is present */ return oidc_util_request_has_parameter(r, OIDC_DISC_OP_PARAM) || oidc_util_request_has_parameter(r, OIDC_DISC_USER_PARAM); } /* * return the HTTP method being called: only for POST data persistence purposes */ static const char* oidc_original_request_method(request_rec *r, oidc_cfg *cfg, apr_byte_t handle_discovery_response) { const char *method = OIDC_METHOD_GET; char *m = NULL; if ((handle_discovery_response == TRUE) && (oidc_util_request_matches_url(r, oidc_get_redirect_uri(r, cfg))) && (oidc_is_discovery_response(r, cfg))) { oidc_util_get_request_parameter(r, OIDC_DISC_RM_PARAM, &m); if (m != NULL) method = apr_pstrdup(r->pool, m); } else { /* * if POST preserve is not enabled for this location, there's no point in preserving * the method either which would result in POSTing empty data on return; * so we revert to legacy behavior */ if (oidc_cfg_dir_preserve_post(r) == 0) return OIDC_METHOD_GET; const char *content_type = oidc_util_hdr_in_content_type_get(r); if ((r->method_number == M_POST) && (apr_strnatcmp(content_type, OIDC_CONTENT_TYPE_FORM_ENCODED) == 0)) method = OIDC_METHOD_FORM_POST; } oidc_debug(r, "return: %s", method); return method; } /* * send an OpenID Connect authorization request to the specified provider preserving POST parameters using HTML5 storage */ apr_byte_t oidc_post_preserve_javascript(request_rec *r, const char *location, char **javascript, char **javascript_method) { if (oidc_cfg_dir_preserve_post(r) == 0) return FALSE; oidc_debug(r, "enter"); oidc_cfg *cfg = ap_get_module_config(r->server->module_config, &auth_openidc_module); const char *method = oidc_original_request_method(r, cfg, FALSE); if (apr_strnatcmp(method, OIDC_METHOD_FORM_POST) != 0) return FALSE; /* read the parameters that are POST-ed to us */ apr_table_t *params = apr_table_make(r->pool, 8); if (oidc_util_read_post_params(r, params, FALSE, NULL) == FALSE) { oidc_error(r, "something went wrong when reading the POST parameters"); return FALSE; } const apr_array_header_t *arr = apr_table_elts(params); const apr_table_entry_t *elts = (const apr_table_entry_t*) arr->elts; int i; char *json = ""; for (i = 0; i < arr->nelts; i++) { json = apr_psprintf(r->pool, "%s'%s': '%s'%s", json, oidc_util_escape_string(r, elts[i].key), oidc_util_escape_string(r, elts[i].val), i < arr->nelts - 1 ? "," : ""); } json = apr_psprintf(r->pool, "{ %s }", json); const char *jmethod = "preserveOnLoad"; const char *jscript = apr_psprintf(r->pool, " <script type=\"text/javascript\">\n" " function %s() {\n" " sessionStorage.setItem('mod_auth_openidc_preserve_post_params', JSON.stringify(%s));\n" " %s" " }\n" " </script>\n", jmethod, json, location ? apr_psprintf(r->pool, "window.location='%s';\n", location) : ""); if (location == NULL) { if (javascript_method) *javascript_method = apr_pstrdup(r->pool, jmethod); if (javascript) *javascript = apr_pstrdup(r->pool, jscript); } else { oidc_util_html_send(r, "Preserving...", jscript, jmethod, "<p>Preserving...</p>", OK); } return TRUE; } /* * restore POST parameters on original_url from HTML5 local storage */ static int oidc_request_post_preserved_restore(request_rec *r, const char *original_url) { oidc_debug(r, "enter: original_url=%s", original_url); const char *method = "postOnLoad"; const char *script = apr_psprintf(r->pool, " <script type=\"text/javascript\">\n" " function str_decode(string) {\n" " try {\n" " result = decodeURIComponent(string);\n" " } catch (e) {\n" " result = unescape(string);\n" " }\n" " return result;\n" " }\n" " function %s() {\n" " var mod_auth_openidc_preserve_post_params = JSON.parse(sessionStorage.getItem('mod_auth_openidc_preserve_post_params'));\n" " sessionStorage.removeItem('mod_auth_openidc_preserve_post_params');\n" " for (var key in mod_auth_openidc_preserve_post_params) {\n" " var input = document.createElement(\"input\");\n" " input.name = str_decode(key);\n" " input.value = str_decode(mod_auth_openidc_preserve_post_params[key]);\n" " input.type = \"hidden\";\n" " document.forms[0].appendChild(input);\n" " }\n" " document.forms[0].action = '%s';\n" " document.forms[0].submit();\n" " }\n" " </script>\n", method, original_url); const char *body = " <p>Restoring...</p>\n" " <form method=\"post\"></form>\n"; return oidc_util_html_send(r, "Restoring...", script, method, body, OK); } typedef struct oidc_state_cookies_t { char *name; apr_time_t timestamp; struct oidc_state_cookies_t *next; } oidc_state_cookies_t; static int oidc_delete_oldest_state_cookies(request_rec *r, int number_of_valid_state_cookies, int max_number_of_state_cookies, oidc_state_cookies_t *first) { oidc_state_cookies_t *cur = NULL, *prev = NULL, *prev_oldest = NULL, *oldest = NULL; while (number_of_valid_state_cookies >= max_number_of_state_cookies) { oldest = first; prev_oldest = NULL; prev = first; cur = first->next; while (cur) { if ((cur->timestamp < oldest->timestamp)) { oldest = cur; prev_oldest = prev; } prev = cur; cur = cur->next; } oidc_warn(r, "deleting oldest state cookie: %s (time until expiry %" APR_TIME_T_FMT " seconds)", oldest->name, apr_time_sec(oldest->timestamp - apr_time_now())); oidc_util_set_cookie(r, oldest->name, "", 0, OIDC_COOKIE_EXT_SAME_SITE_NONE(r)); if (prev_oldest) prev_oldest->next = oldest->next; else first = first->next; number_of_valid_state_cookies--; } return number_of_valid_state_cookies; } /* * clean state cookies that have expired i.e. for outstanding requests that will never return * successfully and return the number of remaining valid cookies/outstanding-requests while * doing so */ static int oidc_clean_expired_state_cookies(request_rec *r, oidc_cfg *c, const char *currentCookieName, int delete_oldest) { int number_of_valid_state_cookies = 0; oidc_state_cookies_t *first = NULL, *last = NULL; char *cookie, *tokenizerCtx = NULL; char *cookies = apr_pstrdup(r->pool, oidc_util_hdr_in_cookie_get(r)); if (cookies != NULL) { cookie = apr_strtok(cookies, OIDC_STR_SEMI_COLON, &tokenizerCtx); while (cookie != NULL) { while (*cookie == OIDC_CHAR_SPACE) cookie++; if (strstr(cookie, oidc_cfg_dir_state_cookie_prefix(r)) == cookie) { char *cookieName = cookie; while (cookie != NULL && *cookie != OIDC_CHAR_EQUAL) cookie++; if (*cookie == OIDC_CHAR_EQUAL) { *cookie = '\0'; cookie++; if ((currentCookieName == NULL) || (apr_strnatcmp(cookieName, currentCookieName) != 0)) { oidc_proto_state_t *proto_state = oidc_proto_state_from_cookie(r, c, cookie); if (proto_state != NULL) { json_int_t ts = oidc_proto_state_get_timestamp( proto_state); if (apr_time_now() > ts + apr_time_from_sec(c->state_timeout)) { oidc_warn(r, "state (%s) has expired (original_url=%s)", cookieName, oidc_proto_state_get_original_url( proto_state)); oidc_util_set_cookie(r, cookieName, "", 0, OIDC_COOKIE_EXT_SAME_SITE_NONE(r)); } else { if (first == NULL) { first = apr_pcalloc(r->pool, sizeof(oidc_state_cookies_t)); last = first; } else { last->next = apr_pcalloc(r->pool, sizeof(oidc_state_cookies_t)); last = last->next; } last->name = cookieName; last->timestamp = ts; last->next = NULL; number_of_valid_state_cookies++; } oidc_proto_state_destroy(proto_state); } else { oidc_warn(r, "state cookie could not be retrieved/decoded, deleting: %s", cookieName); oidc_util_set_cookie(r, cookieName, "", 0, OIDC_COOKIE_EXT_SAME_SITE_NONE(r)); } } } } cookie = apr_strtok(NULL, OIDC_STR_SEMI_COLON, &tokenizerCtx); } } if (delete_oldest > 0) number_of_valid_state_cookies = oidc_delete_oldest_state_cookies(r, number_of_valid_state_cookies, c->max_number_of_state_cookies, first); return number_of_valid_state_cookies; } /* * restore the state that was maintained between authorization request and response in an encrypted cookie */ static apr_byte_t oidc_restore_proto_state(request_rec *r, oidc_cfg *c, const char *state, oidc_proto_state_t **proto_state) { oidc_debug(r, "enter"); const char *cookieName = oidc_get_state_cookie_name(r, state); /* clean expired state cookies to avoid pollution */ oidc_clean_expired_state_cookies(r, c, cookieName, FALSE); /* get the state cookie value first */ char *cookieValue = oidc_util_get_cookie(r, cookieName); if (cookieValue == NULL) { oidc_error(r, "no \"%s\" state cookie found: check domain and samesite cookie settings", cookieName); return FALSE; } /* clear state cookie because we don't need it anymore */ oidc_util_set_cookie(r, cookieName, "", 0, OIDC_COOKIE_EXT_SAME_SITE_NONE(r)); *proto_state = oidc_proto_state_from_cookie(r, c, cookieValue); if (*proto_state == NULL) return FALSE; const char *nonce = oidc_proto_state_get_nonce(*proto_state); /* calculate the hash of the browser fingerprint concatenated with the nonce */ char *calc = oidc_get_browser_state_hash(r, c, nonce); /* compare the calculated hash with the value provided in the authorization response */ if (apr_strnatcmp(calc, state) != 0) { oidc_error(r, "calculated state from cookie does not match state parameter passed back in URL: \"%s\" != \"%s\"", state, calc); oidc_proto_state_destroy(*proto_state); return FALSE; } apr_time_t ts = oidc_proto_state_get_timestamp(*proto_state); /* check that the timestamp is not beyond the valid interval */ if (apr_time_now() > ts + apr_time_from_sec(c->state_timeout)) { oidc_error(r, "state has expired"); if ((c->default_sso_url == NULL) || (apr_table_get(r->subprocess_env, "OIDC_NO_DEFAULT_URL_ON_STATE_TIMEOUT") != NULL)) { oidc_util_html_send_error(r, c->error_template, "Invalid Authentication Response", apr_psprintf(r->pool, "This is due to a timeout; please restart your authentication session by re-entering the URL/bookmark you originally wanted to access: %s", oidc_proto_state_get_original_url(*proto_state)), OK); /* * a hack for Apache 2.4 to prevent it from writing its own 500/400/302 HTML document * text by making ap_send_error_response in http_protocol.c return early... */ r->header_only = 1; } oidc_proto_state_destroy(*proto_state); return FALSE; } /* add the state */ oidc_proto_state_set_state(*proto_state, state); /* log the restored state object */ oidc_debug(r, "restored state: %s", oidc_proto_state_to_string(r, *proto_state)); /* we've made it */ return TRUE; } /* * set the state that is maintained between an authorization request and an authorization response * in a cookie in the browser that is cryptographically bound to that state */ static int oidc_authorization_request_set_cookie(request_rec *r, oidc_cfg *c, const char *state, oidc_proto_state_t *proto_state) { /* * create a cookie consisting of 8 elements: * random value, original URL, original method, issuer, response_type, response_mod, prompt and timestamp * encoded as JSON, encrypting the resulting JSON value */ char *cookieValue = oidc_proto_state_to_cookie(r, c, proto_state); if (cookieValue == NULL) return HTTP_INTERNAL_SERVER_ERROR; /* * clean expired state cookies to avoid pollution and optionally * try to avoid the number of state cookies exceeding a max */ int number_of_cookies = oidc_clean_expired_state_cookies(r, c, NULL, oidc_cfg_delete_oldest_state_cookies(c)); int max_number_of_cookies = oidc_cfg_max_number_of_state_cookies(c); if ((max_number_of_cookies > 0) && (number_of_cookies >= max_number_of_cookies)) { oidc_warn(r, "the number of existing, valid state cookies (%d) has exceeded the limit (%d), no additional authorization request + state cookie can be generated, aborting the request", number_of_cookies, max_number_of_cookies); /* * TODO: the html_send code below caters for the case that there's a user behind a * browser generating this request, rather than a piece of XHR code; how would an * XHR client handle this? */ /* * it appears that sending content with a 503 turns the HTTP status code * into a 200 so we'll avoid that for now: the user will see Apache specific * readable text anyway * return oidc_util_html_send_error(r, c->error_template, "Too Many Outstanding Requests", apr_psprintf(r->pool, "No authentication request could be generated since there are too many outstanding authentication requests already; you may have to wait up to %d seconds to be able to create a new request", c->state_timeout), HTTP_SERVICE_UNAVAILABLE); */ return HTTP_SERVICE_UNAVAILABLE; } /* assemble the cookie name for the state cookie */ const char *cookieName = oidc_get_state_cookie_name(r, state); /* set it as a cookie */ oidc_util_set_cookie(r, cookieName, cookieValue, -1, OIDC_COOKIE_SAMESITE_LAX(c, r)); return OK; } /* * get the mod_auth_openidc related context from the (userdata in the) request * (used for passing state between various Apache request processing stages and hook callbacks) */ static apr_table_t* oidc_request_state(request_rec *rr) { /* our state is always stored in the main request */ request_rec *r = (rr->main != NULL) ? rr->main : rr; /* our state is a table, get it */ apr_table_t *state = NULL; apr_pool_userdata_get((void**) &state, OIDC_USERDATA_KEY, r->pool); /* if it does not exist, we'll create a new table */ if (state == NULL) { state = apr_table_make(r->pool, 5); apr_pool_userdata_set(state, OIDC_USERDATA_KEY, NULL, r->pool); } /* return the resulting table, always non-null now */ return state; } /* * set a name/value pair in the mod_auth_openidc-specific request context * (used for passing state between various Apache request processing stages and hook callbacks) */ void oidc_request_state_set(request_rec *r, const char *key, const char *value) { /* get a handle to the global state, which is a table */ apr_table_t *state = oidc_request_state(r); /* put the name/value pair in that table */ apr_table_set(state, key, value); } /* * get a name/value pair from the mod_auth_openidc-specific request context * (used for passing state between various Apache request processing stages and hook callbacks) */ const char* oidc_request_state_get(request_rec *r, const char *key) { /* get a handle to the global state, which is a table */ apr_table_t *state = oidc_request_state(r); /* return the value from the table */ return apr_table_get(state, key); } /* * set the claims from a JSON object (c.q. id_token or user_info response) stored * in the session in to HTTP headers passed on to the application */ static apr_byte_t oidc_set_app_claims(request_rec *r, const oidc_cfg *const cfg, oidc_session_t *session, const char *s_claims) { json_t *j_claims = NULL; /* decode the string-encoded attributes in to a JSON structure */ if (s_claims != NULL) { if (oidc_util_decode_json_object(r, s_claims, &j_claims) == FALSE) return FALSE; } /* set the resolved claims a HTTP headers for the application */ if (j_claims != NULL) { oidc_util_set_app_infos(r, j_claims, oidc_cfg_claim_prefix(r), cfg->claim_delimiter, oidc_cfg_dir_pass_info_in_headers(r), oidc_cfg_dir_pass_info_in_envvars(r), oidc_cfg_dir_pass_info_base64url(r)); /* release resources */ json_decref(j_claims); } return TRUE; } static int oidc_authenticate_user(request_rec *r, oidc_cfg *c, oidc_provider_t *provider, const char *original_url, const char *login_hint, const char *id_token_hint, const char *prompt, const char *auth_request_params, const char *path_scope); /* * log message about max session duration */ static void oidc_log_session_expires(request_rec *r, const char *msg, apr_time_t session_expires) { char buf[APR_RFC822_DATE_LEN + 1]; apr_rfc822_date(buf, session_expires); oidc_debug(r, "%s: %s (in %" APR_TIME_T_FMT " secs from now)", msg, buf, apr_time_sec(session_expires - apr_time_now())); } /* * see if this is a non-browser request */ static apr_byte_t oidc_is_xml_http_request(request_rec *r) { if ((oidc_util_hdr_in_x_requested_with_get(r) != NULL) && (apr_strnatcasecmp(oidc_util_hdr_in_x_requested_with_get(r), OIDC_HTTP_HDR_VAL_XML_HTTP_REQUEST) == 0)) return TRUE; if ((oidc_util_hdr_in_accept_contains(r, OIDC_CONTENT_TYPE_TEXT_HTML) == FALSE) && (oidc_util_hdr_in_accept_contains(r, OIDC_CONTENT_TYPE_APP_XHTML_XML) == FALSE) && (oidc_util_hdr_in_accept_contains(r, OIDC_CONTENT_TYPE_ANY) == FALSE)) return TRUE; return FALSE; } /* * find out which action we need to take when encountering an unauthenticated request */ static int oidc_handle_unauthenticated_user(request_rec *r, oidc_cfg *c) { /* see if we've configured OIDCUnAuthAction for this path */ switch (oidc_dir_cfg_unauth_action(r)) { case OIDC_UNAUTH_RETURN410: return HTTP_GONE; case OIDC_UNAUTH_RETURN407: return HTTP_PROXY_AUTHENTICATION_REQUIRED; case OIDC_UNAUTH_RETURN401: return HTTP_UNAUTHORIZED; case OIDC_UNAUTH_PASS: r->user = ""; /* * we're not going to pass information about an authenticated user to the application, * but we do need to scrub the headers that mod_auth_openidc would set for security reasons */ oidc_scrub_headers(r); return OK; case OIDC_UNAUTH_AUTHENTICATE: /* * exception handling: if this looks like a XMLHttpRequest call we * won't redirect the user and thus avoid creating a state cookie * for a non-browser (= Javascript) call that will never return from the OP */ if ((oidc_dir_cfg_unauth_expr_is_set(r) == FALSE) && (oidc_is_xml_http_request(r) == TRUE)) return HTTP_UNAUTHORIZED; } /* * else: no session (regardless of whether it is main or sub-request), * and we need to authenticate the user */ return oidc_authenticate_user(r, c, NULL, oidc_get_current_url(r), NULL, NULL, NULL, oidc_dir_cfg_path_auth_request_params(r), oidc_dir_cfg_path_scope(r)); } /* * check if maximum session duration was exceeded */ static int oidc_check_max_session_duration(request_rec *r, oidc_cfg *cfg, oidc_session_t *session) { /* get the session expiry from the session data */ apr_time_t session_expires = oidc_session_get_session_expires(r, session); /* check the expire timestamp against the current time */ if (apr_time_now() > session_expires) { oidc_warn(r, "maximum session duration exceeded for user: %s", session->remote_user); oidc_session_kill(r, session); return oidc_handle_unauthenticated_user(r, cfg); } /* log message about max session duration */ oidc_log_session_expires(r, "session max lifetime", session_expires); return OK; } /* * validate received session cookie against the domain it was issued for: * * this handles the case where the cache configured is a the same single memcache, Redis, or file * backend for different (virtual) hosts, or a client-side cookie protected with the same secret * * it also handles the case that a cookie is unexpectedly shared across multiple hosts in * name-based virtual hosting even though the OP(s) would be the same */ static apr_byte_t oidc_check_cookie_domain(request_rec *r, oidc_cfg *cfg, oidc_session_t *session) { const char *c_cookie_domain = cfg->cookie_domain ? cfg->cookie_domain : oidc_get_current_url_host(r); const char *s_cookie_domain = oidc_session_get_cookie_domain(r, session); if ((s_cookie_domain == NULL) || (apr_strnatcmp(c_cookie_domain, s_cookie_domain) != 0)) { oidc_warn(r, "aborting: detected attempt to play cookie against a different domain/host than issued for! (issued=%s, current=%s)", s_cookie_domain, c_cookie_domain); return FALSE; } return TRUE; } /* * get a handle to the provider configuration via the "issuer" stored in the session */ apr_byte_t oidc_get_provider_from_session(request_rec *r, oidc_cfg *c, oidc_session_t *session, oidc_provider_t **provider) { oidc_debug(r, "enter"); /* get the issuer value from the session state */ const char *issuer = oidc_session_get_issuer(r, session); if (issuer == NULL) { oidc_warn(r, "empty or invalid session: no issuer found"); return FALSE; } /* get the provider info associated with the issuer value */ oidc_provider_t *p = oidc_get_provider_for_issuer(r, c, issuer, FALSE); if (p == NULL) { oidc_error(r, "session corrupted: no provider found for issuer: %s", issuer); return FALSE; } *provider = p; return TRUE; } /* * store claims resolved from the userinfo endpoint in the session */ static void oidc_store_userinfo_claims(request_rec *r, oidc_cfg *c, oidc_session_t *session, oidc_provider_t *provider, const char *claims, const char *userinfo_jwt) { oidc_debug(r, "enter"); /* see if we've resolved any claims */ if (claims != NULL) { /* * Successfully decoded a set claims from the response so we can store them * (well actually the stringified representation in the response) * in the session context safely now */ oidc_session_set_userinfo_claims(r, session, claims); if (c->session_type != OIDC_SESSION_TYPE_CLIENT_COOKIE) { /* this will also clear the entry if a JWT was not returned at this point */ oidc_session_set_userinfo_jwt(r, session, userinfo_jwt); } } else { /* * clear the existing claims because we could not refresh them */ oidc_session_set_userinfo_claims(r, session, NULL); oidc_session_set_userinfo_jwt(r, session, NULL); } /* store the last refresh time if we've configured a userinfo refresh interval */ if (provider->userinfo_refresh_interval > 0) oidc_session_reset_userinfo_last_refresh(r, session); } /* * execute refresh token grant to refresh the existing access token */ static apr_byte_t oidc_refresh_access_token(request_rec *r, oidc_cfg *c, oidc_session_t *session, oidc_provider_t *provider, char **new_access_token) { oidc_debug(r, "enter"); /* get the refresh token that was stored in the session */ const char *refresh_token = oidc_session_get_refresh_token(r, session); if (refresh_token == NULL) { oidc_warn(r, "refresh token routine called but no refresh_token found in the session"); return FALSE; } /* elements returned in the refresh response */ char *s_id_token = NULL; int expires_in = -1; char *s_token_type = NULL; char *s_access_token = NULL; char *s_refresh_token = NULL; /* refresh the tokens by calling the token endpoint */ if (oidc_proto_refresh_request(r, c, provider, refresh_token, &s_id_token, &s_access_token, &s_token_type, &expires_in, &s_refresh_token) == FALSE) { oidc_error(r, "access_token could not be refreshed"); return FALSE; } /* store the new access_token in the session and discard the old one */ oidc_session_set_access_token(r, session, s_access_token); oidc_session_set_access_token_expires(r, session, expires_in); /* reset the access token refresh timestamp */ oidc_session_reset_access_token_last_refresh(r, session); /* see if we need to return it as a parameter */ if (new_access_token != NULL) *new_access_token = s_access_token; /* if we have a new refresh token (rolling refresh), store it in the session and overwrite the old one */ if (s_refresh_token != NULL) oidc_session_set_refresh_token(r, session, s_refresh_token); return TRUE; } /* * retrieve claims from the userinfo endpoint and return the stringified response */ static const char* oidc_retrieve_claims_from_userinfo_endpoint(request_rec *r, oidc_cfg *c, oidc_provider_t *provider, const char *access_token, oidc_session_t *session, char *id_token_sub, char **userinfo_jwt) { oidc_debug(r, "enter"); char *result = NULL; char *refreshed_access_token = NULL; /* see if a userinfo endpoint is set, otherwise there's nothing to do for us */ if (provider->userinfo_endpoint_url == NULL) { oidc_debug(r, "not retrieving userinfo claims because userinfo_endpoint is not set"); return NULL; } /* see if there's an access token, otherwise we can't call the userinfo endpoint at all */ if (access_token == NULL) { oidc_debug(r, "not retrieving userinfo claims because access_token is not provided"); return NULL; } if ((id_token_sub == NULL) && (session != NULL)) { // when refreshing claims from the userinfo endpoint json_t *id_token_claims = oidc_session_get_idtoken_claims_json(r, session); if (id_token_claims == NULL) { oidc_error(r, "no id_token_claims found in session"); return NULL; } oidc_jose_get_string(r->pool, id_token_claims, OIDC_CLAIM_SUB, FALSE, &id_token_sub, NULL); } // TODO: return code should indicate whether the token expired or some other error occurred // TODO: long-term: session storage should be JSON (with explicit types and less conversion, using standard routines) /* try to get claims from the userinfo endpoint using the provided access token */ if (oidc_proto_resolve_userinfo(r, c, provider, id_token_sub, access_token, &result, userinfo_jwt) == FALSE) { /* see if we have an existing session and we are refreshing the user info claims */ if (session != NULL) { /* first call to user info endpoint failed, but the access token may have just expired, so refresh it */ if (oidc_refresh_access_token(r, c, session, provider, &refreshed_access_token) == TRUE) { /* try again with the new access token */ if (oidc_proto_resolve_userinfo(r, c, provider, id_token_sub, refreshed_access_token, &result, userinfo_jwt) == FALSE) { oidc_error(r, "resolving user info claims with the refreshed access token failed, nothing will be stored in the session"); result = NULL; } } else { oidc_warn(r, "refreshing access token failed, claims will not be retrieved/refreshed from the userinfo endpoint"); result = NULL; } } else { oidc_error(r, "resolving user info claims with the existing/provided access token failed, nothing will be stored in the session"); result = NULL; } } return result; } /* * get (new) claims from the userinfo endpoint */ static apr_byte_t oidc_refresh_claims_from_userinfo_endpoint(request_rec *r, oidc_cfg *cfg, oidc_session_t *session) { oidc_provider_t *provider = NULL; const char *claims = NULL; const char *access_token = NULL; char *userinfo_jwt = NULL; /* get the current provider info */ if (oidc_get_provider_from_session(r, cfg, session, &provider) == FALSE) return FALSE; /* see if we can do anything here, i.e. we have a userinfo endpoint and a refresh interval is configured */ apr_time_t interval = apr_time_from_sec( provider->userinfo_refresh_interval); oidc_debug(r, "userinfo_endpoint=%s, interval=%d", provider->userinfo_endpoint_url, provider->userinfo_refresh_interval); if ((provider->userinfo_endpoint_url != NULL) && (interval > 0)) { /* get the last refresh timestamp from the session info */ apr_time_t last_refresh = oidc_session_get_userinfo_last_refresh(r, session); oidc_debug(r, "refresh needed in: %" APR_TIME_T_FMT " seconds", apr_time_sec(last_refresh + interval - apr_time_now())); /* see if we need to refresh again */ if (last_refresh + interval < apr_time_now()) { /* get the current access token */ access_token = oidc_session_get_access_token(r, session); /* retrieve the current claims */ claims = oidc_retrieve_claims_from_userinfo_endpoint(r, cfg, provider, access_token, session, NULL, &userinfo_jwt); /* store claims resolved from userinfo endpoint */ oidc_store_userinfo_claims(r, cfg, session, provider, claims, userinfo_jwt); /* indicated something changed */ return TRUE; } } return FALSE; } /* * copy the claims and id_token from the session to the request state and optionally return them */ static void oidc_copy_tokens_to_request_state(request_rec *r, oidc_session_t *session, const char **s_id_token, const char **s_claims) { const char *id_token = oidc_session_get_idtoken_claims(r, session); const char *claims = oidc_session_get_userinfo_claims(r, session); oidc_debug(r, "id_token=%s claims=%s", id_token, claims); if (id_token != NULL) { oidc_request_state_set(r, OIDC_REQUEST_STATE_KEY_IDTOKEN, id_token); if (s_id_token != NULL) *s_id_token = id_token; } if (claims != NULL) { oidc_request_state_set(r, OIDC_REQUEST_STATE_KEY_CLAIMS, claims); if (s_claims != NULL) *s_claims = claims; } } /* * pass refresh_token, access_token and access_token_expires as headers/environment variables to the application */ static apr_byte_t oidc_session_pass_tokens(request_rec *r, oidc_cfg *cfg, oidc_session_t *session, apr_byte_t *needs_save) { apr_byte_t pass_headers = oidc_cfg_dir_pass_info_in_headers(r); apr_byte_t pass_envvars = oidc_cfg_dir_pass_info_in_envvars(r); apr_byte_t pass_base64url = oidc_cfg_dir_pass_info_base64url(r); /* set the refresh_token in the app headers/variables, if enabled for this location/directory */ const char *refresh_token = oidc_session_get_refresh_token(r, session); if ((oidc_cfg_dir_pass_refresh_token(r) != 0) && (refresh_token != NULL)) { /* pass it to the app in a header or environment variable */ oidc_util_set_app_info(r, OIDC_APP_INFO_REFRESH_TOKEN, refresh_token, OIDC_DEFAULT_HEADER_PREFIX, pass_headers, pass_envvars, pass_base64url); } /* set the access_token in the app headers/variables */ const char *access_token = oidc_session_get_access_token(r, session); if (access_token != NULL) { /* pass it to the app in a header or environment variable */ oidc_util_set_app_info(r, OIDC_APP_INFO_ACCESS_TOKEN, access_token, OIDC_DEFAULT_HEADER_PREFIX, pass_headers, pass_envvars, pass_base64url); } /* set the expiry timestamp in the app headers/variables */ const char *access_token_expires = oidc_session_get_access_token_expires(r, session); if (access_token_expires != NULL) { /* pass it to the app in a header or environment variable */ oidc_util_set_app_info(r, OIDC_APP_INFO_ACCESS_TOKEN_EXP, access_token_expires, OIDC_DEFAULT_HEADER_PREFIX, pass_headers, pass_envvars, pass_base64url); } /* * reset the session inactivity timer * but only do this once per 10% of the inactivity timeout interval (with a max to 60 seconds) * for performance reasons * * now there's a small chance that the session ends 10% (or a minute) earlier than configured/expected * cq. when there's a request after a recent save (so no update) and then no activity happens until * a request comes in just before the session should expire * ("recent" and "just before" refer to 10%-with-a-max-of-60-seconds of the inactivity interval after * the start/last-update and before the expiry of the session respectively) * * this is be deemed acceptable here because of performance gain */ apr_time_t interval = apr_time_from_sec(cfg->session_inactivity_timeout); apr_time_t now = apr_time_now(); apr_time_t slack = interval / 10; if (slack > apr_time_from_sec(60)) slack = apr_time_from_sec(60); if (session->expiry - now < interval - slack) { session->expiry = now + interval; *needs_save = TRUE; } /* log message about session expiry */ oidc_log_session_expires(r, "session inactivity timeout", session->expiry); return TRUE; } static apr_byte_t oidc_refresh_access_token_before_expiry(request_rec *r, oidc_cfg *cfg, oidc_session_t *session, int ttl_minimum, int logout_on_error) { const char *s_access_token_expires = NULL; apr_time_t t_expires = -1; oidc_provider_t *provider = NULL; oidc_debug(r, "ttl_minimum=%d", ttl_minimum); if (ttl_minimum < 0) return FALSE; s_access_token_expires = oidc_session_get_access_token_expires(r, session); if (s_access_token_expires == NULL) { oidc_debug(r, "no access token expires_in stored in the session (i.e. returned from in the authorization response), so cannot refresh the access token based on TTL requirement"); return FALSE; } if (oidc_session_get_refresh_token(r, session) == NULL) { oidc_debug(r, "no refresh token stored in the session, so cannot refresh the access token based on TTL requirement"); return FALSE; } if (sscanf(s_access_token_expires, "%" APR_TIME_T_FMT, &t_expires) != 1) { oidc_error(r, "could not parse s_access_token_expires %s", s_access_token_expires); return FALSE; } t_expires = apr_time_from_sec(t_expires - ttl_minimum); oidc_debug(r, "refresh needed in: %" APR_TIME_T_FMT " seconds", apr_time_sec(t_expires - apr_time_now())); if (t_expires > apr_time_now()) return FALSE; if (oidc_get_provider_from_session(r, cfg, session, &provider) == FALSE) return FALSE; if (oidc_refresh_access_token(r, cfg, session, provider, NULL) == FALSE) { oidc_warn(r, "access_token could not be refreshed, logout=%d", logout_on_error & OIDC_LOGOUT_ON_ERROR_REFRESH); if (logout_on_error & OIDC_LOGOUT_ON_ERROR_REFRESH) return OIDC_REFRESH_ERROR; else return FALSE; } return TRUE; } /* * handle the case where we have identified an existing authentication session for a user */ static int oidc_handle_existing_session(request_rec *r, oidc_cfg *cfg, oidc_session_t *session, apr_byte_t *needs_save) { apr_byte_t rv = FALSE; oidc_debug(r, "enter"); /* set the user in the main request for further (incl. sub-request) processing */ r->user = apr_pstrdup(r->pool, session->remote_user); oidc_debug(r, "set remote_user to \"%s\"", r->user); /* get the header name in which the remote user name needs to be passed */ char *authn_header = oidc_cfg_dir_authn_header(r); apr_byte_t pass_headers = oidc_cfg_dir_pass_info_in_headers(r); apr_byte_t pass_envvars = oidc_cfg_dir_pass_info_in_envvars(r); apr_byte_t pass_base64url = oidc_cfg_dir_pass_info_base64url(r); /* verify current cookie domain against issued cookie domain */ if (oidc_check_cookie_domain(r, cfg, session) == FALSE) return HTTP_UNAUTHORIZED; /* check if the maximum session duration was exceeded */ int rc = oidc_check_max_session_duration(r, cfg, session); if (rc != OK) return rc; /* if needed, refresh the access token */ rv = oidc_refresh_access_token_before_expiry(r, cfg, session, oidc_cfg_dir_refresh_access_token_before_expiry(r), oidc_cfg_dir_logout_on_error_refresh(r)); if (rv == OIDC_REFRESH_ERROR) { *needs_save = FALSE; return oidc_handle_logout_request(r, cfg, session, cfg->default_slo_url); } *needs_save |= rv; /* if needed, refresh claims from the user info endpoint */ if (oidc_refresh_claims_from_userinfo_endpoint(r, cfg, session) == TRUE) *needs_save = TRUE; /* * we're going to pass the information that we have to the application, * but first we need to scrub the headers that we're going to use for security reasons */ oidc_scrub_headers(r); /* set the user authentication HTTP header if set and required */ if ((r->user != NULL) && (authn_header != NULL)) oidc_util_hdr_in_set(r, authn_header, r->user); const char *s_claims = NULL; const char *s_id_token = NULL; /* copy id_token and claims from session to request state and obtain their values */ oidc_copy_tokens_to_request_state(r, session, &s_id_token, &s_claims); if ((cfg->pass_userinfo_as & OIDC_PASS_USERINFO_AS_CLAIMS)) { /* set the userinfo claims in the app headers */ if (oidc_set_app_claims(r, cfg, session, s_claims) == FALSE) return HTTP_INTERNAL_SERVER_ERROR; } if ((cfg->pass_userinfo_as & OIDC_PASS_USERINFO_AS_JSON_OBJECT)) { /* pass the userinfo JSON object to the app in a header or environment variable */ oidc_util_set_app_info(r, OIDC_APP_INFO_USERINFO_JSON, s_claims, OIDC_DEFAULT_HEADER_PREFIX, pass_headers, pass_envvars, pass_base64url); } if ((cfg->pass_userinfo_as & OIDC_PASS_USERINFO_AS_JWT)) { if (cfg->session_type != OIDC_SESSION_TYPE_CLIENT_COOKIE) { /* get the compact serialized JWT from the session */ const char *s_userinfo_jwt = oidc_session_get_userinfo_jwt(r, session); if (s_userinfo_jwt != NULL) { /* pass the compact serialized JWT to the app in a header or environment variable */ oidc_util_set_app_info(r, OIDC_APP_INFO_USERINFO_JWT, s_userinfo_jwt, OIDC_DEFAULT_HEADER_PREFIX, pass_headers, pass_envvars, pass_base64url); } else { oidc_debug(r, "configured to pass userinfo in a JWT, but no such JWT was found in the session (probably no such JWT was returned from the userinfo endpoint)"); } } else { oidc_error(r, "session type \"client-cookie\" does not allow storing/passing a userinfo JWT; use \"" OIDCSessionType " server-cache\" for that"); } } if ((cfg->pass_idtoken_as & OIDC_PASS_IDTOKEN_AS_CLAIMS)) { /* set the id_token in the app headers */ if (oidc_set_app_claims(r, cfg, session, s_id_token) == FALSE) return HTTP_INTERNAL_SERVER_ERROR; } if ((cfg->pass_idtoken_as & OIDC_PASS_IDTOKEN_AS_PAYLOAD)) { /* pass the id_token JSON object to the app in a header or environment variable */ oidc_util_set_app_info(r, OIDC_APP_INFO_ID_TOKEN_PAYLOAD, s_id_token, OIDC_DEFAULT_HEADER_PREFIX, pass_headers, pass_envvars, pass_base64url); } if ((cfg->pass_idtoken_as & OIDC_PASS_IDTOKEN_AS_SERIALIZED)) { if (cfg->session_type != OIDC_SESSION_TYPE_CLIENT_COOKIE) { /* get the compact serialized JWT from the session */ const char *s_id_token = oidc_session_get_idtoken(r, session); /* pass the compact serialized JWT to the app in a header or environment variable */ oidc_util_set_app_info(r, OIDC_APP_INFO_ID_TOKEN, s_id_token, OIDC_DEFAULT_HEADER_PREFIX, pass_headers, pass_envvars, pass_base64url); } else { oidc_error(r, "session type \"client-cookie\" does not allow storing/passing the id_token; use \"" OIDCSessionType " server-cache\" for that"); } } /* pass the at, rt and at expiry to the application, possibly update the session expiry */ if (oidc_session_pass_tokens(r, cfg, session, needs_save) == FALSE) return HTTP_INTERNAL_SERVER_ERROR; /* return "user authenticated" status */ return OK; } /* * helper function for basic/implicit client flows upon receiving an authorization response: * check that it matches the state stored in the browser and return the variables associated * with the state, such as original_url and OP oidc_provider_t pointer. */ static apr_byte_t oidc_authorization_response_match_state(request_rec *r, oidc_cfg *c, const char *state, struct oidc_provider_t **provider, oidc_proto_state_t **proto_state) { oidc_debug(r, "enter (state=%s)", state); if ((state == NULL) || (apr_strnatcmp(state, "") == 0)) { oidc_error(r, "state parameter is not set"); return FALSE; } /* check the state parameter against what we stored in a cookie */ if (oidc_restore_proto_state(r, c, state, proto_state) == FALSE) { oidc_error(r, "unable to restore state"); return FALSE; } *provider = oidc_get_provider_for_issuer(r, c, oidc_proto_state_get_issuer(*proto_state), FALSE); if (*provider == NULL) { oidc_proto_state_destroy(*proto_state); *proto_state = NULL; return FALSE; } return TRUE; } /* * redirect the browser to the session logout endpoint */ static int oidc_session_redirect_parent_window_to_logout(request_rec *r, oidc_cfg *c) { oidc_debug(r, "enter"); char *java_script = apr_psprintf(r->pool, " <script type=\"text/javascript\">\n" " window.top.location.href = '%s?session=logout';\n" " </script>\n", oidc_get_redirect_uri(r, c)); return oidc_util_html_send(r, "Redirecting...", java_script, NULL, NULL, OK); } /* * handle an error returned by the OP */ static int oidc_authorization_response_error(request_rec *r, oidc_cfg *c, oidc_proto_state_t *proto_state, const char *error, const char *error_description) { const char *prompt = oidc_proto_state_get_prompt(proto_state); if (prompt != NULL) prompt = apr_pstrdup(r->pool, prompt); oidc_proto_state_destroy(proto_state); if ((prompt != NULL) && (apr_strnatcmp(prompt, OIDC_PROTO_PROMPT_NONE) == 0)) { return oidc_session_redirect_parent_window_to_logout(r, c); } return oidc_util_html_send_error(r, c->error_template, apr_psprintf(r->pool, "OpenID Connect Provider error: %s", error), error_description, OK); } /* * get the r->user for this request based on the configuration for OIDC/OAuth */ apr_byte_t oidc_get_remote_user(request_rec *r, const char *claim_name, const char *reg_exp, const char *replace, json_t *json, char **request_user) { /* get the claim value from the JSON object */ json_t *username = json_object_get(json, claim_name); if ((username == NULL) || (!json_is_string(username))) { oidc_warn(r, "JSON object did not contain a \"%s\" string", claim_name); return FALSE; } *request_user = apr_pstrdup(r->pool, json_string_value(username)); if (reg_exp != NULL) { char *error_str = NULL; if (replace == NULL) { if (oidc_util_regexp_first_match(r->pool, *request_user, reg_exp, request_user, &error_str) == FALSE) { oidc_error(r, "oidc_util_regexp_first_match failed: %s", error_str); *request_user = NULL; return FALSE; } } else if (oidc_util_regexp_substitute(r->pool, *request_user, reg_exp, replace, request_user, &error_str) == FALSE) { oidc_error(r, "oidc_util_regexp_substitute failed: %s", error_str); *request_user = NULL; return FALSE; } } return TRUE; } /* * set the unique user identifier that will be propagated in the Apache r->user and REMOTE_USER variables */ static apr_byte_t oidc_set_request_user(request_rec *r, oidc_cfg *c, oidc_provider_t *provider, oidc_jwt_t *jwt, const char *s_claims) { char *issuer = provider->issuer; char *claim_name = apr_pstrdup(r->pool, c->remote_user_claim.claim_name); int n = strlen(claim_name); apr_byte_t post_fix_with_issuer = (claim_name[n - 1] == OIDC_CHAR_AT); if (post_fix_with_issuer == TRUE) { claim_name[n - 1] = '\0'; issuer = (strstr(issuer, "https://") == NULL) ? apr_pstrdup(r->pool, issuer) : apr_pstrdup(r->pool, issuer + strlen("https://")); } /* extract the username claim (default: "sub") from the id_token payload or user claims */ apr_byte_t rc = FALSE; char *remote_user = NULL; json_t *claims = NULL; oidc_util_decode_json_object(r, s_claims, &claims); if (claims == NULL) { rc = oidc_get_remote_user(r, claim_name, c->remote_user_claim.reg_exp, c->remote_user_claim.replace, jwt->payload.value.json, &remote_user); } else { oidc_util_json_merge(r, jwt->payload.value.json, claims); rc = oidc_get_remote_user(r, claim_name, c->remote_user_claim.reg_exp, c->remote_user_claim.replace, claims, &remote_user); json_decref(claims); } if ((rc == FALSE) || (remote_user == NULL)) { oidc_error(r, "" OIDCRemoteUserClaim "is set to \"%s\", but could not set the remote user based on the requested claim \"%s\" and the available claims for the user", c->remote_user_claim.claim_name, claim_name); return FALSE; } if (post_fix_with_issuer == TRUE) remote_user = apr_psprintf(r->pool, "%s%s%s", remote_user, OIDC_STR_AT, issuer); r->user = apr_pstrdup(r->pool, remote_user); oidc_debug(r, "set remote_user to \"%s\" based on claim: \"%s\"%s", r->user, c->remote_user_claim.claim_name, c->remote_user_claim.reg_exp ? apr_psprintf(r->pool, " and expression: \"%s\" and replace string: \"%s\"", c->remote_user_claim.reg_exp, c->remote_user_claim.replace) : ""); return TRUE; } static char* oidc_make_sid_iss_unique(request_rec *r, const char *sid, const char *issuer) { return apr_psprintf(r->pool, "%s@%s", sid, issuer); } /* * store resolved information in the session */ static apr_byte_t oidc_save_in_session(request_rec *r, oidc_cfg *c, oidc_session_t *session, oidc_provider_t *provider, const char *remoteUser, const char *id_token, oidc_jwt_t *id_token_jwt, const char *claims, const char *access_token, const int expires_in, const char *refresh_token, const char *session_state, const char *state, const char *original_url, const char *userinfo_jwt) { /* store the user in the session */ session->remote_user = remoteUser; /* set the session expiry to the inactivity timeout */ session->expiry = apr_time_now() + apr_time_from_sec(c->session_inactivity_timeout); /* store the claims payload in the id_token for later reference */ oidc_session_set_idtoken_claims(r, session, id_token_jwt->payload.value.str); if (c->session_type != OIDC_SESSION_TYPE_CLIENT_COOKIE) { /* store the compact serialized representation of the id_token for later reference */ oidc_session_set_idtoken(r, session, id_token); } /* store the issuer in the session (at least needed for session mgmt and token refresh */ oidc_session_set_issuer(r, session, provider->issuer); /* store the state and original URL in the session for handling browser-back more elegantly */ oidc_session_set_request_state(r, session, state); oidc_session_set_original_url(r, session, original_url); if ((session_state != NULL) && (provider->check_session_iframe != NULL)) { /* store the session state and required parameters session management */ oidc_session_set_session_state(r, session, session_state); oidc_debug(r, "session management enabled: stored session_state (%s), check_session_iframe (%s) and client_id (%s) in the session", session_state, provider->check_session_iframe, provider->client_id); } else if (provider->check_session_iframe == NULL) { oidc_debug(r, "session management disabled: \"check_session_iframe\" is not set in provider configuration"); } else { oidc_debug(r, "session management disabled: no \"session_state\" value is provided in the authentication response even though \"check_session_iframe\" (%s) is set in the provider configuration", provider->check_session_iframe); } /* store claims resolved from userinfo endpoint */ oidc_store_userinfo_claims(r, c, session, provider, claims, userinfo_jwt); /* see if we have an access_token */ if (access_token != NULL) { /* store the access_token in the session context */ oidc_session_set_access_token(r, session, access_token); /* store the associated expires_in value */ oidc_session_set_access_token_expires(r, session, expires_in); /* reset the access token refresh timestamp */ oidc_session_reset_access_token_last_refresh(r, session); } /* see if we have a refresh_token */ if (refresh_token != NULL) { /* store the refresh_token in the session context */ oidc_session_set_refresh_token(r, session, refresh_token); } /* store max session duration in the session as a hard cut-off expiry timestamp */ apr_time_t session_expires = (provider->session_max_duration == 0) ? apr_time_from_sec(id_token_jwt->payload.exp) : (apr_time_now() + apr_time_from_sec(provider->session_max_duration)); oidc_session_set_session_expires(r, session, session_expires); oidc_debug(r, "provider->session_max_duration = %d, session_expires=%" APR_TIME_T_FMT, provider->session_max_duration, session_expires); /* log message about max session duration */ oidc_log_session_expires(r, "session max lifetime", session_expires); /* store the domain for which this session is valid */ oidc_session_set_cookie_domain(r, session, c->cookie_domain ? c->cookie_domain : oidc_get_current_url_host(r)); char *sid = NULL; oidc_debug(r, "provider->backchannel_logout_supported=%d", provider->backchannel_logout_supported); if (provider->backchannel_logout_supported > 0) { oidc_jose_get_string(r->pool, id_token_jwt->payload.value.json, OIDC_CLAIM_SID, FALSE, &sid, NULL); if (sid == NULL) sid = id_token_jwt->payload.sub; session->sid = oidc_make_sid_iss_unique(r, sid, provider->issuer); } /* store the session */ return oidc_session_save(r, session, TRUE); } /* * parse the expiry for the access token */ static int oidc_parse_expires_in(request_rec *r, const char *expires_in) { if (expires_in != NULL) { char *ptr = NULL; long number = strtol(expires_in, &ptr, 10); if (number <= 0) { oidc_warn(r, "could not convert \"expires_in\" value (%s) to a number", expires_in); return -1; } return number; } return -1; } /* * handle the different flows (hybrid, implicit, Authorization Code) */ static apr_byte_t oidc_handle_flows(request_rec *r, oidc_cfg *c, oidc_proto_state_t *proto_state, oidc_provider_t *provider, apr_table_t *params, const char *response_mode, oidc_jwt_t **jwt) { apr_byte_t rc = FALSE; const char *requested_response_type = oidc_proto_state_get_response_type( proto_state); /* handle the requested response type/mode */ if (oidc_util_spaced_string_equals(r->pool, requested_response_type, OIDC_PROTO_RESPONSE_TYPE_CODE_IDTOKEN_TOKEN)) { rc = oidc_proto_authorization_response_code_idtoken_token(r, c, proto_state, provider, params, response_mode, jwt); } else if (oidc_util_spaced_string_equals(r->pool, requested_response_type, OIDC_PROTO_RESPONSE_TYPE_CODE_IDTOKEN)) { rc = oidc_proto_authorization_response_code_idtoken(r, c, proto_state, provider, params, response_mode, jwt); } else if (oidc_util_spaced_string_equals(r->pool, requested_response_type, OIDC_PROTO_RESPONSE_TYPE_CODE_TOKEN)) { rc = oidc_proto_handle_authorization_response_code_token(r, c, proto_state, provider, params, response_mode, jwt); } else if (oidc_util_spaced_string_equals(r->pool, requested_response_type, OIDC_PROTO_RESPONSE_TYPE_CODE)) { rc = oidc_proto_handle_authorization_response_code(r, c, proto_state, provider, params, response_mode, jwt); } else if (oidc_util_spaced_string_equals(r->pool, requested_response_type, OIDC_PROTO_RESPONSE_TYPE_IDTOKEN_TOKEN)) { rc = oidc_proto_handle_authorization_response_idtoken_token(r, c, proto_state, provider, params, response_mode, jwt); } else if (oidc_util_spaced_string_equals(r->pool, requested_response_type, OIDC_PROTO_RESPONSE_TYPE_IDTOKEN)) { rc = oidc_proto_handle_authorization_response_idtoken(r, c, proto_state, provider, params, response_mode, jwt); } else { oidc_error(r, "unsupported response type: \"%s\"", requested_response_type); } if ((rc == FALSE) && (*jwt != NULL)) { oidc_jwt_destroy(*jwt); *jwt = NULL; } return rc; } /* handle the browser back on an authorization response */ static apr_byte_t oidc_handle_browser_back(request_rec *r, const char *r_state, oidc_session_t *session) { /* see if we have an existing session and browser-back was used */ const char *s_state = NULL, *o_url = NULL; if (session->remote_user != NULL) { s_state = oidc_session_get_request_state(r, session); o_url = oidc_session_get_original_url(r, session); if ((r_state != NULL) && (s_state != NULL) && (apr_strnatcmp(r_state, s_state) == 0)) { /* log the browser back event detection */ oidc_warn(r, "browser back detected, redirecting to original URL: %s", o_url); /* go back to the URL that he originally tried to access */ oidc_util_hdr_out_location_set(r, o_url); return TRUE; } } return FALSE; } /* * complete the handling of an authorization response by obtaining, parsing and verifying the * id_token and storing the authenticated user state in the session */ static int oidc_handle_authorization_response(request_rec *r, oidc_cfg *c, oidc_session_t *session, apr_table_t *params, const char *response_mode) { oidc_debug(r, "enter, response_mode=%s", response_mode); oidc_provider_t *provider = NULL; oidc_proto_state_t *proto_state = NULL; oidc_jwt_t *jwt = NULL; /* see if this response came from a browser-back event */ if (oidc_handle_browser_back(r, apr_table_get(params, OIDC_PROTO_STATE), session) == TRUE) return HTTP_MOVED_TEMPORARILY; /* match the returned state parameter against the state stored in the browser */ if (oidc_authorization_response_match_state(r, c, apr_table_get(params, OIDC_PROTO_STATE), &provider, &proto_state) == FALSE) { if (c->default_sso_url != NULL) { oidc_warn(r, "invalid authorization response state; a default SSO URL is set, sending the user there: %s", c->default_sso_url); oidc_util_hdr_out_location_set(r, c->default_sso_url); //oidc_util_hdr_err_out_add(r, "Location", c->default_sso_url)); return HTTP_MOVED_TEMPORARILY; } oidc_error(r, "invalid authorization response state and no default SSO URL is set, sending an error..."); // if content was already returned via html/http send then don't return 500 // but send 200 to avoid extraneous internal error document text to be sent return ((r->user) && (strncmp(r->user, "", 1) == 0)) ? OK : HTTP_BAD_REQUEST; } /* see if the response is an error response */ if (apr_table_get(params, OIDC_PROTO_ERROR) != NULL) return oidc_authorization_response_error(r, c, proto_state, apr_table_get(params, OIDC_PROTO_ERROR), apr_table_get(params, OIDC_PROTO_ERROR_DESCRIPTION)); /* handle the code, implicit or hybrid flow */ if (oidc_handle_flows(r, c, proto_state, provider, params, response_mode, &jwt) == FALSE) return oidc_authorization_response_error(r, c, proto_state, "Error in handling response type.", NULL); if (jwt == NULL) { oidc_error(r, "no id_token was provided"); return oidc_authorization_response_error(r, c, proto_state, "No id_token was provided.", NULL); } int expires_in = oidc_parse_expires_in(r, apr_table_get(params, OIDC_PROTO_EXPIRES_IN)); char *userinfo_jwt = NULL; /* * optionally resolve additional claims against the userinfo endpoint * parsed claims are not actually used here but need to be parsed anyway for error checking purposes */ const char *claims = oidc_retrieve_claims_from_userinfo_endpoint(r, c, provider, apr_table_get(params, OIDC_PROTO_ACCESS_TOKEN), NULL, jwt->payload.sub, &userinfo_jwt); /* restore the original protected URL that the user was trying to access */ const char *original_url = oidc_proto_state_get_original_url(proto_state); if (original_url != NULL) original_url = apr_pstrdup(r->pool, original_url); const char *original_method = oidc_proto_state_get_original_method( proto_state); if (original_method != NULL) original_method = apr_pstrdup(r->pool, original_method); const char *prompt = oidc_proto_state_get_prompt(proto_state); /* set the user */ if (oidc_set_request_user(r, c, provider, jwt, claims) == TRUE) { /* session management: if the user in the new response is not equal to the old one, error out */ if ((prompt != NULL) && (apr_strnatcmp(prompt, OIDC_PROTO_PROMPT_NONE) == 0)) { // TOOD: actually need to compare sub? (need to store it in the session separately then //const char *sub = NULL; //oidc_session_get(r, session, "sub", &sub); //if (apr_strnatcmp(sub, jwt->payload.sub) != 0) { if (apr_strnatcmp(session->remote_user, r->user) != 0) { oidc_warn(r, "user set from new id_token is different from current one"); oidc_jwt_destroy(jwt); return oidc_authorization_response_error(r, c, proto_state, "User changed!", NULL); } } /* store resolved information in the session */ if (oidc_save_in_session(r, c, session, provider, r->user, apr_table_get(params, OIDC_PROTO_ID_TOKEN), jwt, claims, apr_table_get(params, OIDC_PROTO_ACCESS_TOKEN), expires_in, apr_table_get(params, OIDC_PROTO_REFRESH_TOKEN), apr_table_get(params, OIDC_PROTO_SESSION_STATE), apr_table_get(params, OIDC_PROTO_STATE), original_url, userinfo_jwt) == FALSE) { oidc_proto_state_destroy(proto_state); oidc_jwt_destroy(jwt); return HTTP_INTERNAL_SERVER_ERROR; } } else { oidc_error(r, "remote user could not be set"); oidc_jwt_destroy(jwt); return oidc_authorization_response_error(r, c, proto_state, "Remote user could not be set: contact the website administrator", NULL); } /* cleanup */ oidc_proto_state_destroy(proto_state); oidc_jwt_destroy(jwt); /* check that we've actually authenticated a user; functions as error handling for oidc_get_remote_user */ if (r->user == NULL) return HTTP_UNAUTHORIZED; /* log the successful response */ oidc_debug(r, "session created and stored, returning to original URL: %s, original method: %s", original_url, original_method); /* check whether form post data was preserved; if so restore it */ if (apr_strnatcmp(original_method, OIDC_METHOD_FORM_POST) == 0) { return oidc_request_post_preserved_restore(r, original_url); } /* now we've authenticated the user so go back to the URL that he originally tried to access */ oidc_util_hdr_out_location_set(r, original_url); /* do the actual redirect to the original URL */ return HTTP_MOVED_TEMPORARILY; } /* * handle an OpenID Connect Authorization Response using the POST (+fragment->POST) response_mode */ static int oidc_handle_post_authorization_response(request_rec *r, oidc_cfg *c, oidc_session_t *session) { oidc_debug(r, "enter"); /* initialize local variables */ char *response_mode = NULL; /* read the parameters that are POST-ed to us */ apr_table_t *params = apr_table_make(r->pool, 8); if (oidc_util_read_post_params(r, params, FALSE, NULL) == FALSE) { oidc_error(r, "something went wrong when reading the POST parameters"); return HTTP_INTERNAL_SERVER_ERROR; } /* see if we've got any POST-ed data at all */ if ((apr_table_elts(params)->nelts < 1) || ((apr_table_elts(params)->nelts == 1) && apr_table_get(params, OIDC_PROTO_RESPONSE_MODE) && (apr_strnatcmp( apr_table_get(params, OIDC_PROTO_RESPONSE_MODE), OIDC_PROTO_RESPONSE_MODE_FRAGMENT) == 0))) { return oidc_util_html_send_error(r, c->error_template, "Invalid Request", "You've hit an OpenID Connect Redirect URI with no parameters, this is an invalid request; you should not open this URL in your browser directly, or have the server administrator use a different " OIDCRedirectURI " setting.", HTTP_INTERNAL_SERVER_ERROR); } /* get the parameters */ response_mode = (char*) apr_table_get(params, OIDC_PROTO_RESPONSE_MODE); /* do the actual implicit work */ return oidc_handle_authorization_response(r, c, session, params, response_mode ? response_mode : OIDC_PROTO_RESPONSE_MODE_FORM_POST); } /* * handle an OpenID Connect Authorization Response using the redirect response_mode */ static int oidc_handle_redirect_authorization_response(request_rec *r, oidc_cfg *c, oidc_session_t *session) { oidc_debug(r, "enter"); /* read the parameters from the query string */ apr_table_t *params = apr_table_make(r->pool, 8); oidc_util_read_form_encoded_params(r, params, r->args); /* do the actual work */ return oidc_handle_authorization_response(r, c, session, params, OIDC_PROTO_RESPONSE_MODE_QUERY); } /* * present the user with an OP selection screen */ static int oidc_discovery(request_rec *r, oidc_cfg *cfg) { oidc_debug(r, "enter"); /* obtain the URL we're currently accessing, to be stored in the state/session */ char *current_url = oidc_get_current_url(r); const char *method = oidc_original_request_method(r, cfg, FALSE); /* generate CSRF token */ char *csrf = NULL; if (oidc_proto_generate_nonce(r, &csrf, 8) == FALSE) return HTTP_INTERNAL_SERVER_ERROR; char *path_scopes = oidc_dir_cfg_path_scope(r); char *path_auth_request_params = oidc_dir_cfg_path_auth_request_params(r); char *discover_url = oidc_cfg_dir_discover_url(r); /* see if there's an external discovery page configured */ if (discover_url != NULL) { /* yes, assemble the parameters for external discovery */ char *url = apr_psprintf(r->pool, "%s%s%s=%s&%s=%s&%s=%s&%s=%s", discover_url, strchr(discover_url, OIDC_CHAR_QUERY) != NULL ? OIDC_STR_AMP : OIDC_STR_QUERY, OIDC_DISC_RT_PARAM, oidc_util_escape_string(r, current_url), OIDC_DISC_RM_PARAM, method, OIDC_DISC_CB_PARAM, oidc_util_escape_string(r, oidc_get_redirect_uri(r, cfg)), OIDC_CSRF_NAME, oidc_util_escape_string(r, csrf)); if (path_scopes != NULL) url = apr_psprintf(r->pool, "%s&%s=%s", url, OIDC_DISC_SC_PARAM, oidc_util_escape_string(r, path_scopes)); if (path_auth_request_params != NULL) url = apr_psprintf(r->pool, "%s&%s=%s", url, OIDC_DISC_AR_PARAM, oidc_util_escape_string(r, path_auth_request_params)); /* log what we're about to do */ oidc_debug(r, "redirecting to external discovery page: %s", url); /* set CSRF cookie */ oidc_util_set_cookie(r, OIDC_CSRF_NAME, csrf, -1, OIDC_COOKIE_SAMESITE_STRICT(cfg, r)); /* see if we need to preserve POST parameters through Javascript/HTML5 storage */ if (oidc_post_preserve_javascript(r, url, NULL, NULL) == TRUE) return OK; /* do the actual redirect to an external discovery page */ oidc_util_hdr_out_location_set(r, url); return HTTP_MOVED_TEMPORARILY; } /* get a list of all providers configured in the metadata directory */ apr_array_header_t *arr = NULL; if (oidc_metadata_list(r, cfg, &arr) == FALSE) return oidc_util_html_send_error(r, cfg->error_template, "Configuration Error", "No configured providers found, contact your administrator", HTTP_UNAUTHORIZED); /* assemble a where-are-you-from IDP discovery HTML page */ const char *s = " <h3>Select your OpenID Connect Identity Provider</h3>\n"; /* list all configured providers in there */ int i; for (i = 0; i < arr->nelts; i++) { const char *issuer = ((const char**) arr->elts)[i]; // TODO: html escape (especially & character) char *href = apr_psprintf(r->pool, "%s?%s=%s&amp;%s=%s&amp;%s=%s&amp;%s=%s", oidc_get_redirect_uri(r, cfg), OIDC_DISC_OP_PARAM, oidc_util_escape_string(r, issuer), OIDC_DISC_RT_PARAM, oidc_util_escape_string(r, current_url), OIDC_DISC_RM_PARAM, method, OIDC_CSRF_NAME, csrf); if (path_scopes != NULL) href = apr_psprintf(r->pool, "%s&amp;%s=%s", href, OIDC_DISC_SC_PARAM, oidc_util_escape_string(r, path_scopes)); if (path_auth_request_params != NULL) href = apr_psprintf(r->pool, "%s&amp;%s=%s", href, OIDC_DISC_AR_PARAM, oidc_util_escape_string(r, path_auth_request_params)); char *display = (strstr(issuer, "https://") == NULL) ? apr_pstrdup(r->pool, issuer) : apr_pstrdup(r->pool, issuer + strlen("https://")); /* strip port number */ //char *p = strstr(display, ":"); //if (p != NULL) *p = '\0'; /* point back to the redirect_uri, where the selection is handled, with an IDP selection and return_to URL */ s = apr_psprintf(r->pool, "%s<p><a href=\"%s\">%s</a></p>\n", s, href, display); } /* add an option to enter an account or issuer name for dynamic OP discovery */ s = apr_psprintf(r->pool, "%s<form method=\"get\" action=\"%s\">\n", s, oidc_get_redirect_uri(r, cfg)); s = apr_psprintf(r->pool, "%s<p><input type=\"hidden\" name=\"%s\" value=\"%s\"><p>\n", s, OIDC_DISC_RT_PARAM, current_url); s = apr_psprintf(r->pool, "%s<p><input type=\"hidden\" name=\"%s\" value=\"%s\"><p>\n", s, OIDC_DISC_RM_PARAM, method); s = apr_psprintf(r->pool, "%s<p><input type=\"hidden\" name=\"%s\" value=\"%s\"><p>\n", s, OIDC_CSRF_NAME, csrf); if (path_scopes != NULL) s = apr_psprintf(r->pool, "%s<p><input type=\"hidden\" name=\"%s\" value=\"%s\"><p>\n", s, OIDC_DISC_SC_PARAM, path_scopes); if (path_auth_request_params != NULL) s = apr_psprintf(r->pool, "%s<p><input type=\"hidden\" name=\"%s\" value=\"%s\"><p>\n", s, OIDC_DISC_AR_PARAM, path_auth_request_params); s = apr_psprintf(r->pool, "%s<p>Or enter your account name (eg. &quot;mike@seed.gluu.org&quot;, or an IDP identifier (eg. &quot;mitreid.org&quot;):</p>\n", s); s = apr_psprintf(r->pool, "%s<p><input type=\"text\" name=\"%s\" value=\"%s\"></p>\n", s, OIDC_DISC_OP_PARAM, ""); s = apr_psprintf(r->pool, "%s<p><input type=\"submit\" value=\"Submit\"></p>\n", s); s = apr_psprintf(r->pool, "%s</form>\n", s); oidc_util_set_cookie(r, OIDC_CSRF_NAME, csrf, -1, OIDC_COOKIE_SAMESITE_STRICT(cfg, r)); char *javascript = NULL, *javascript_method = NULL; char *html_head = "<style type=\"text/css\">body {text-align: center}</style>"; if (oidc_post_preserve_javascript(r, NULL, &javascript, &javascript_method) == TRUE) html_head = apr_psprintf(r->pool, "%s%s", html_head, javascript); /* now send the HTML contents to the user agent */ return oidc_util_html_send(r, "OpenID Connect Provider Discovery", html_head, javascript_method, s, OK); } /* * authenticate the user to the selected OP, if the OP is not selected yet perform discovery first */ static int oidc_authenticate_user(request_rec *r, oidc_cfg *c, oidc_provider_t *provider, const char *original_url, const char *login_hint, const char *id_token_hint, const char *prompt, const char *auth_request_params, const char *path_scope) { oidc_debug(r, "enter"); if (provider == NULL) { // TODO: should we use an explicit redirect to the discovery endpoint (maybe a "discovery" param to the redirect_uri)? if (c->metadata_dir != NULL) return oidc_discovery(r, c); /* we're not using multiple OP's configured in a metadata directory, pick the statically configured OP */ if (oidc_provider_static_config(r, c, &provider) == FALSE) return HTTP_INTERNAL_SERVER_ERROR; } /* generate the random nonce value that correlates requests and responses */ char *nonce = NULL; if (oidc_proto_generate_nonce(r, &nonce, OIDC_PROTO_NONCE_LENGTH) == FALSE) return HTTP_INTERNAL_SERVER_ERROR; char *pkce_state = NULL; char *code_challenge = NULL; if ((oidc_util_spaced_string_contains(r->pool, provider->response_type, OIDC_PROTO_CODE) == TRUE) && (provider->pkce != NULL)) { /* generate the code verifier value that correlates authorization requests and code exchange requests */ if (provider->pkce->state(r, &pkce_state) == FALSE) return HTTP_INTERNAL_SERVER_ERROR; /* generate the PKCE code challenge */ if (provider->pkce->challenge(r, pkce_state, &code_challenge) == FALSE) return HTTP_INTERNAL_SERVER_ERROR; } /* create the state between request/response */ oidc_proto_state_t *proto_state = oidc_proto_state_new(); oidc_proto_state_set_original_url(proto_state, original_url); oidc_proto_state_set_original_method(proto_state, oidc_original_request_method(r, c, TRUE)); oidc_proto_state_set_issuer(proto_state, provider->issuer); oidc_proto_state_set_response_type(proto_state, provider->response_type); oidc_proto_state_set_nonce(proto_state, nonce); oidc_proto_state_set_timestamp_now(proto_state); if (provider->response_mode) oidc_proto_state_set_response_mode(proto_state, provider->response_mode); if (prompt) oidc_proto_state_set_prompt(proto_state, prompt); if (pkce_state) oidc_proto_state_set_pkce_state(proto_state, pkce_state); /* get a hash value that fingerprints the browser concatenated with the random input */ char *state = oidc_get_browser_state_hash(r, c, nonce); /* * create state that restores the context when the authorization response comes in * and cryptographically bind it to the browser */ int rc = oidc_authorization_request_set_cookie(r, c, state, proto_state); if (rc != OK) { oidc_proto_state_destroy(proto_state); return rc; } /* * printout errors if Cookie settings are not going to work * TODO: separate this code out into its own function */ apr_uri_t o_uri; memset(&o_uri, 0, sizeof(apr_uri_t)); apr_uri_t r_uri; memset(&r_uri, 0, sizeof(apr_uri_t)); apr_uri_parse(r->pool, original_url, &o_uri); apr_uri_parse(r->pool, oidc_get_redirect_uri(r, c), &r_uri); if ((apr_strnatcmp(o_uri.scheme, r_uri.scheme) != 0) && (apr_strnatcmp(r_uri.scheme, "https") == 0)) { oidc_error(r, "the URL scheme (%s) of the configured " OIDCRedirectURI " does not match the URL scheme of the URL being accessed (%s): the \"state\" and \"session\" cookies will not be shared between the two!", r_uri.scheme, o_uri.scheme); oidc_proto_state_destroy(proto_state); return HTTP_INTERNAL_SERVER_ERROR; } if (c->cookie_domain == NULL) { if (apr_strnatcmp(o_uri.hostname, r_uri.hostname) != 0) { char *p = strstr(o_uri.hostname, r_uri.hostname); if ((p == NULL) || (apr_strnatcmp(r_uri.hostname, p) != 0)) { oidc_error(r, "the URL hostname (%s) of the configured " OIDCRedirectURI " does not match the URL hostname of the URL being accessed (%s): the \"state\" and \"session\" cookies will not be shared between the two!", r_uri.hostname, o_uri.hostname); oidc_proto_state_destroy(proto_state); return HTTP_INTERNAL_SERVER_ERROR; } } } else { if (!oidc_util_cookie_domain_valid(r_uri.hostname, c->cookie_domain)) { oidc_error(r, "the domain (%s) configured in " OIDCCookieDomain " does not match the URL hostname (%s) of the URL being accessed (%s): setting \"state\" and \"session\" cookies will not work!!", c->cookie_domain, o_uri.hostname, original_url); oidc_proto_state_destroy(proto_state); return HTTP_INTERNAL_SERVER_ERROR; } } /* send off to the OpenID Connect Provider */ // TODO: maybe show intermediate/progress screen "redirecting to" return oidc_proto_authorization_request(r, provider, login_hint, oidc_get_redirect_uri_iss(r, c, provider), state, proto_state, id_token_hint, code_challenge, auth_request_params, path_scope); } /* * check if the target_link_uri matches to configuration settings to prevent an open redirect */ static int oidc_target_link_uri_matches_configuration(request_rec *r, oidc_cfg *cfg, const char *target_link_uri) { apr_uri_t o_uri; apr_uri_parse(r->pool, target_link_uri, &o_uri); if (o_uri.hostname == NULL) { oidc_error(r, "could not parse the \"target_link_uri\" (%s) in to a valid URL: aborting.", target_link_uri); return FALSE; } apr_uri_t r_uri; apr_uri_parse(r->pool, oidc_get_redirect_uri(r, cfg), &r_uri); if (cfg->cookie_domain == NULL) { /* cookie_domain set: see if the target_link_uri matches the redirect_uri host (because the session cookie will be set host-wide) */ if (apr_strnatcmp(o_uri.hostname, r_uri.hostname) != 0) { char *p = strstr(o_uri.hostname, r_uri.hostname); if ((p == NULL) || (apr_strnatcmp(r_uri.hostname, p) != 0)) { oidc_error(r, "the URL hostname (%s) of the configured " OIDCRedirectURI " does not match the URL hostname of the \"target_link_uri\" (%s): aborting to prevent an open redirect.", r_uri.hostname, o_uri.hostname); return FALSE; } } } else { /* cookie_domain set: see if the target_link_uri is within the cookie_domain */ char *p = strstr(o_uri.hostname, cfg->cookie_domain); if ((p == NULL) || (apr_strnatcmp(cfg->cookie_domain, p) != 0)) { oidc_error(r, "the domain (%s) configured in " OIDCCookieDomain " does not match the URL hostname (%s) of the \"target_link_uri\" (%s): aborting to prevent an open redirect.", cfg->cookie_domain, o_uri.hostname, target_link_uri); return FALSE; } } /* see if the cookie_path setting matches the target_link_uri path */ char *cookie_path = oidc_cfg_dir_cookie_path(r); if (cookie_path != NULL) { char *p = (o_uri.path != NULL) ? strstr(o_uri.path, cookie_path) : NULL; if ((p == NULL) || (p != o_uri.path)) { oidc_error(r, "the path (%s) configured in " OIDCCookiePath " does not match the URL path (%s) of the \"target_link_uri\" (%s): aborting to prevent an open redirect.", cfg->cookie_domain, o_uri.path, target_link_uri); return FALSE; } else if (strlen(o_uri.path) > strlen(cookie_path)) { int n = strlen(cookie_path); if (cookie_path[n - 1] == OIDC_CHAR_FORWARD_SLASH) n--; if (o_uri.path[n] != OIDC_CHAR_FORWARD_SLASH) { oidc_error(r, "the path (%s) configured in " OIDCCookiePath " does not match the URL path (%s) of the \"target_link_uri\" (%s): aborting to prevent an open redirect.", cfg->cookie_domain, o_uri.path, target_link_uri); return FALSE; } } } return TRUE; } /* * handle a response from an IDP discovery page and/or handle 3rd-party initiated SSO */ static int oidc_handle_discovery_response(request_rec *r, oidc_cfg *c) { /* variables to hold the values returned in the response */ char *issuer = NULL, *target_link_uri = NULL, *login_hint = NULL, *auth_request_params = NULL, *csrf_cookie, *csrf_query = NULL, *user = NULL, *path_scopes; oidc_provider_t *provider = NULL; oidc_util_get_request_parameter(r, OIDC_DISC_OP_PARAM, &issuer); oidc_util_get_request_parameter(r, OIDC_DISC_USER_PARAM, &user); oidc_util_get_request_parameter(r, OIDC_DISC_RT_PARAM, &target_link_uri); oidc_util_get_request_parameter(r, OIDC_DISC_LH_PARAM, &login_hint); oidc_util_get_request_parameter(r, OIDC_DISC_SC_PARAM, &path_scopes); oidc_util_get_request_parameter(r, OIDC_DISC_AR_PARAM, &auth_request_params); oidc_util_get_request_parameter(r, OIDC_CSRF_NAME, &csrf_query); csrf_cookie = oidc_util_get_cookie(r, OIDC_CSRF_NAME); /* do CSRF protection if not 3rd party initiated SSO */ if (csrf_cookie) { /* clean CSRF cookie */ oidc_util_set_cookie(r, OIDC_CSRF_NAME, "", 0, OIDC_COOKIE_EXT_SAME_SITE_NONE(r)); /* compare CSRF cookie value with query parameter value */ if ((csrf_query == NULL) || apr_strnatcmp(csrf_query, csrf_cookie) != 0) { oidc_warn(r, "CSRF protection failed, no Discovery and dynamic client registration will be allowed"); csrf_cookie = NULL; } } // TODO: trim issuer/accountname/domain input and do more input validation oidc_debug(r, "issuer=\"%s\", target_link_uri=\"%s\", login_hint=\"%s\", user=\"%s\"", issuer, target_link_uri, login_hint, user); if (target_link_uri == NULL) { if (c->default_sso_url == NULL) { return oidc_util_html_send_error(r, c->error_template, "Invalid Request", "SSO to this module without specifying a \"target_link_uri\" parameter is not possible because " OIDCDefaultURL " is not set.", HTTP_INTERNAL_SERVER_ERROR); } target_link_uri = c->default_sso_url; } /* do open redirect prevention */ if (oidc_target_link_uri_matches_configuration(r, c, target_link_uri) == FALSE) { return oidc_util_html_send_error(r, c->error_template, "Invalid Request", "\"target_link_uri\" parameter does not match configuration settings, aborting to prevent an open redirect.", HTTP_UNAUTHORIZED); } /* see if this is a static setup */ if (c->metadata_dir == NULL) { if ((oidc_provider_static_config(r, c, &provider) == TRUE) && (issuer != NULL)) { if (apr_strnatcmp(provider->issuer, issuer) != 0) { return oidc_util_html_send_error(r, c->error_template, "Invalid Request", apr_psprintf(r->pool, "The \"iss\" value must match the configured providers' one (%s != %s).", issuer, c->provider.issuer), HTTP_INTERNAL_SERVER_ERROR); } } return oidc_authenticate_user(r, c, NULL, target_link_uri, login_hint, NULL, NULL, auth_request_params, path_scopes); } /* find out if the user entered an account name or selected an OP manually */ if (user != NULL) { if (login_hint == NULL) login_hint = apr_pstrdup(r->pool, user); /* normalize the user identifier */ if (strstr(user, "https://") != user) user = apr_psprintf(r->pool, "https://%s", user); /* got an user identifier as input, perform OP discovery with that */ if (oidc_proto_url_based_discovery(r, c, user, &issuer) == FALSE) { /* something did not work out, show a user facing error */ return oidc_util_html_send_error(r, c->error_template, "Invalid Request", "Could not resolve the provided user identifier to an OpenID Connect provider; check your syntax.", HTTP_NOT_FOUND); } /* issuer is set now, so let's continue as planned */ } else if (strstr(issuer, OIDC_STR_AT) != NULL) { if (login_hint == NULL) { login_hint = apr_pstrdup(r->pool, issuer); //char *p = strstr(issuer, OIDC_STR_AT); //*p = '\0'; } /* got an account name as input, perform OP discovery with that */ if (oidc_proto_account_based_discovery(r, c, issuer, &issuer) == FALSE) { /* something did not work out, show a user facing error */ return oidc_util_html_send_error(r, c->error_template, "Invalid Request", "Could not resolve the provided account name to an OpenID Connect provider; check your syntax.", HTTP_NOT_FOUND); } /* issuer is set now, so let's continue as planned */ } /* strip trailing '/' */ int n = strlen(issuer); if (issuer[n - 1] == OIDC_CHAR_FORWARD_SLASH) issuer[n - 1] = '\0'; /* try and get metadata from the metadata directories for the selected OP */ if ((oidc_metadata_get(r, c, issuer, &provider, csrf_cookie != NULL) == TRUE) && (provider != NULL)) { /* now we've got a selected OP, send the user there to authenticate */ return oidc_authenticate_user(r, c, provider, target_link_uri, login_hint, NULL, NULL, auth_request_params, path_scopes); } /* something went wrong */ return oidc_util_html_send_error(r, c->error_template, "Invalid Request", "Could not find valid provider metadata for the selected OpenID Connect provider; contact the administrator", HTTP_NOT_FOUND); } static apr_uint32_t oidc_transparent_pixel[17] = { 0x474e5089, 0x0a1a0a0d, 0x0d000000, 0x52444849, 0x01000000, 0x01000000, 0x00000408, 0x0c1cb500, 0x00000002, 0x4144490b, 0x639c7854, 0x0000cffa, 0x02010702, 0x71311c9a, 0x00000000, 0x444e4549, 0x826042ae }; static apr_byte_t oidc_is_front_channel_logout(const char *logout_param_value) { return ((logout_param_value != NULL) && ((apr_strnatcmp(logout_param_value, OIDC_GET_STYLE_LOGOUT_PARAM_VALUE) == 0) || (apr_strnatcmp(logout_param_value, OIDC_IMG_STYLE_LOGOUT_PARAM_VALUE) == 0))); } static apr_byte_t oidc_is_back_channel_logout(const char *logout_param_value) { return ((logout_param_value != NULL) && (apr_strnatcmp(logout_param_value, OIDC_BACKCHANNEL_STYLE_LOGOUT_PARAM_VALUE) == 0)); } /* * revoke refresh token and access token stored in the session if the * OP has an RFC 7009 compliant token revocation endpoint */ static void oidc_revoke_tokens(request_rec *r, oidc_cfg *c, oidc_session_t *session) { char *response = NULL; char *basic_auth = NULL; char *bearer_auth = NULL; apr_table_t *params = NULL; const char *token = NULL; oidc_provider_t *provider = NULL; oidc_debug(r, "enter"); if (oidc_get_provider_from_session(r, c, session, &provider) == FALSE) goto out; oidc_debug(r, "revocation_endpoint=%s", provider->revocation_endpoint_url ? provider->revocation_endpoint_url : "(null)"); if (provider->revocation_endpoint_url == NULL) goto out; params = apr_table_make(r->pool, 4); // add the token endpoint authentication credentials to the revocation endpoint call... if (oidc_proto_token_endpoint_auth(r, c, provider->token_endpoint_auth, provider->client_id, provider->client_secret, provider->client_signing_keys, provider->token_endpoint_url, params, NULL, &basic_auth, &bearer_auth) == FALSE) goto out; // TODO: use oauth.ssl_validate_server ... token = oidc_session_get_refresh_token(r, session); if (token != NULL) { apr_table_addn(params, "token_type_hint", "refresh_token"); apr_table_addn(params, "token", token); if (oidc_util_http_post_form(r, provider->revocation_endpoint_url, params, basic_auth, bearer_auth, c->oauth.ssl_validate_server, &response, c->http_timeout_long, c->outgoing_proxy, oidc_dir_cfg_pass_cookies(r), NULL, NULL, NULL) == FALSE) { oidc_warn(r, "revoking refresh token failed"); } apr_table_clear(params); } token = oidc_session_get_access_token(r, session); if (token != NULL) { apr_table_addn(params, "token_type_hint", "access_token"); apr_table_addn(params, "token", token); if (oidc_util_http_post_form(r, provider->revocation_endpoint_url, params, basic_auth, bearer_auth, c->oauth.ssl_validate_server, &response, c->http_timeout_long, c->outgoing_proxy, oidc_dir_cfg_pass_cookies(r), NULL, NULL, NULL) == FALSE) { oidc_warn(r, "revoking access token failed"); } } out: oidc_debug(r, "leave"); } /* * handle a local logout */ static int oidc_handle_logout_request(request_rec *r, oidc_cfg *c, oidc_session_t *session, const char *url) { oidc_debug(r, "enter (url=%s)", url); /* if there's no remote_user then there's no (stored) session to kill */ if (session->remote_user != NULL) oidc_revoke_tokens(r, c, session); /* * remove session state (cq. cache entry and cookie) * always clear the session cookie because the cookie may be not sent (but still in the browser) * due to SameSite policies */ oidc_session_kill(r, session); /* see if this is the OP calling us */ if (oidc_is_front_channel_logout(url)) { /* set recommended cache control headers */ oidc_util_hdr_err_out_add(r, OIDC_HTTP_HDR_CACHE_CONTROL, "no-cache, no-store"); oidc_util_hdr_err_out_add(r, OIDC_HTTP_HDR_PRAGMA, "no-cache"); oidc_util_hdr_err_out_add(r, OIDC_HTTP_HDR_P3P, "CAO PSA OUR"); oidc_util_hdr_err_out_add(r, OIDC_HTTP_HDR_EXPIRES, "0"); oidc_util_hdr_err_out_add(r, OIDC_HTTP_HDR_X_FRAME_OPTIONS, "DENY"); /* see if this is PF-PA style logout in which case we return a transparent pixel */ const char *accept = oidc_util_hdr_in_accept_get(r); if ((apr_strnatcmp(url, OIDC_IMG_STYLE_LOGOUT_PARAM_VALUE) == 0) || ((accept) && strstr(accept, OIDC_CONTENT_TYPE_IMAGE_PNG))) { // terminate with DONE instead of OK // to avoid Apache returning auth/authz error 401 for the redirect URI return oidc_util_http_send(r, (const char*) &oidc_transparent_pixel, sizeof(oidc_transparent_pixel), OIDC_CONTENT_TYPE_IMAGE_PNG, DONE); } /* standard HTTP based logout: should be called in an iframe from the OP */ return oidc_util_html_send(r, "Logged Out", NULL, NULL, "<p>Logged Out</p>", DONE); } /* see if we don't need to go somewhere special after killing the session locally */ if (url == NULL) return oidc_util_html_send(r, "Logged Out", NULL, NULL, "<p>Logged Out</p>", OK); /* send the user to the specified where-to-go-after-logout URL */ oidc_util_hdr_out_location_set(r, url); return HTTP_MOVED_TEMPORARILY; } /* * handle a backchannel logout */ #define OIDC_EVENTS_BLOGOUT_KEY "http://schemas.openid.net/event/backchannel-logout" static int oidc_handle_logout_backchannel(request_rec *r, oidc_cfg *cfg) { oidc_debug(r, "enter"); const char *logout_token = NULL; oidc_jwt_t *jwt = NULL; oidc_jose_error_t err; oidc_jwk_t *jwk = NULL; oidc_provider_t *provider = NULL; char *sid = NULL, *uuid = NULL; oidc_session_t session; int rc = HTTP_BAD_REQUEST; apr_table_t *params = apr_table_make(r->pool, 8); if (oidc_util_read_post_params(r, params, FALSE, NULL) == FALSE) { oidc_error(r, "could not read POST-ed parameters to the logout endpoint"); goto out; } logout_token = apr_table_get(params, OIDC_PROTO_LOGOUT_TOKEN); if (logout_token == NULL) { oidc_error(r, "backchannel lggout endpoint was called but could not find a parameter named \"%s\"", OIDC_PROTO_LOGOUT_TOKEN); goto out; } // TODO: jwk symmetric key based on provider if (oidc_jwt_parse(r->pool, logout_token, &jwt, oidc_util_merge_symmetric_key(r->pool, cfg->private_keys, NULL), &err) == FALSE) { oidc_error(r, "oidc_jwt_parse failed: %s", oidc_jose_e2s(r->pool, err)); goto out; } provider = oidc_get_provider_for_issuer(r, cfg, jwt->payload.iss, FALSE); if (provider == NULL) { oidc_error(r, "no provider found for issuer: %s", jwt->payload.iss); goto out; } // TODO: destroy the JWK used for decryption jwk = NULL; if (oidc_util_create_symmetric_key(r, provider->client_secret, 0, NULL, TRUE, &jwk) == FALSE) return FALSE; oidc_jwks_uri_t jwks_uri = { provider->jwks_uri, provider->jwks_refresh_interval, provider->ssl_validate_server }; if (oidc_proto_jwt_verify(r, cfg, jwt, &jwks_uri, oidc_util_merge_symmetric_key(r->pool, NULL, jwk), provider->id_token_signed_response_alg) == FALSE) { oidc_error(r, "id_token signature could not be validated, aborting"); goto out; } // oidc_proto_validate_idtoken would try and require a token binding cnf // if the policy is set to "required", so don't use that here if (oidc_proto_validate_jwt(r, jwt, provider->validate_issuer ? provider->issuer : NULL, FALSE, FALSE, provider->idtoken_iat_slack, OIDC_TOKEN_BINDING_POLICY_DISABLED) == FALSE) goto out; /* verify the "aud" and "azp" values */ if (oidc_proto_validate_aud_and_azp(r, cfg, provider, &jwt->payload) == FALSE) goto out; json_t *events = json_object_get(jwt->payload.value.json, OIDC_CLAIM_EVENTS); if (events == NULL) { oidc_error(r, "\"%s\" claim could not be found in logout token", OIDC_CLAIM_EVENTS); goto out; } json_t *blogout = json_object_get(events, OIDC_EVENTS_BLOGOUT_KEY); if (!json_is_object(blogout)) { oidc_error(r, "\"%s\" object could not be found in \"%s\" claim", OIDC_EVENTS_BLOGOUT_KEY, OIDC_CLAIM_EVENTS); goto out; } char *nonce = NULL; oidc_json_object_get_string(r->pool, jwt->payload.value.json, OIDC_CLAIM_NONCE, &nonce, NULL); if (nonce != NULL) { oidc_error(r, "rejecting logout request/token since it contains a \"%s\" claim", OIDC_CLAIM_NONCE); goto out; } char *jti = NULL; oidc_json_object_get_string(r->pool, jwt->payload.value.json, OIDC_CLAIM_JTI, &jti, NULL); if (jti != NULL) { char *replay = NULL; oidc_cache_get_jti(r, jti, &replay); if (replay != NULL) { oidc_error(r, "the \"%s\" value (%s) passed in logout token was found in the cache already; possible replay attack!?", OIDC_CLAIM_JTI, jti); goto out; } } /* jti cache duration is the configured replay prevention window for token issuance plus 10 seconds for safety */ apr_time_t jti_cache_duration = apr_time_from_sec( provider->idtoken_iat_slack * 2 + 10); /* store it in the cache for the calculated duration */ oidc_cache_set_jti(r, jti, jti, apr_time_now() + jti_cache_duration); oidc_json_object_get_string(r->pool, jwt->payload.value.json, OIDC_CLAIM_EVENTS, &sid, NULL); // TODO: by-spec we should cater for the fact that "sid" has been provided // in the id_token returned in the authentication request, but "sub" // is used in the logout token but that requires a 2nd entry in the // cache and a separate session "sub" member, ugh; we'll just assume // that is "sid" is specified in the id_token, the OP will actually use // this for logout // (and probably call us multiple times or the same sub if needed) oidc_json_object_get_string(r->pool, jwt->payload.value.json, OIDC_CLAIM_SID, &sid, NULL); if (sid == NULL) sid = jwt->payload.sub; if (sid == NULL) { oidc_error(r, "no \"sub\" and no \"sid\" claim found in logout token"); goto out; } // TODO: when dealing with sub instead of a true sid, we'll be killing all sessions for // a specific user, across hosts that share the *same* cache backend // if those hosts haven't been configured with a different OIDCCryptoPassphrase // - perhaps that's even acceptable since non-memory caching is encrypted by default // and memory-based caching doesn't suffer from this (different shm segments)? // - it will result in 400 errors returned from backchannel logout calls to the other hosts... sid = oidc_make_sid_iss_unique(r, sid, provider->issuer); oidc_cache_get_sid(r, sid, &uuid); if (uuid == NULL) { oidc_error(r, "could not find session based on sid/sub provided in logout token: %s", sid); // return HTTP 200 according to (new?) spec and terminate early // to avoid Apache returning auth/authz error 500 for the redirect URI rc = DONE; goto out; } // revoke tokens if we can get a handle on those if (cfg->session_type != OIDC_SESSION_TYPE_CLIENT_COOKIE) { if (oidc_session_load_cache_by_uuid(r, cfg, uuid, &session) != FALSE) if (oidc_session_extract(r, &session) != FALSE) oidc_revoke_tokens(r, cfg, &session); } // clear the session cache oidc_cache_set_sid(r, sid, NULL, 0); oidc_cache_set_session(r, uuid, NULL, 0); // terminate with DONE instead of OK // to avoid Apache returning auth/authz error 500 for the redirect URI rc = DONE; out: if (jwk != NULL) { oidc_jwk_destroy(jwk); jwk = NULL; } if (jwt != NULL) { oidc_jwt_destroy(jwt); jwt = NULL; } oidc_util_hdr_err_out_add(r, OIDC_HTTP_HDR_CACHE_CONTROL, "no-cache, no-store"); oidc_util_hdr_err_out_add(r, OIDC_HTTP_HDR_PRAGMA, "no-cache"); return rc; } static apr_byte_t oidc_validate_redirect_url(request_rec *r, oidc_cfg *c, const char *url, apr_byte_t restrict_to_host, char **err_str, char **err_desc) { apr_uri_t uri; const char *c_host = NULL; apr_hash_index_t *hi = NULL; if (apr_uri_parse(r->pool, url, &uri) != APR_SUCCESS) { *err_str = apr_pstrdup(r->pool, "Malformed URL"); *err_desc = apr_psprintf(r->pool, "not a valid URL value: %s", url); oidc_error(r, "%s: %s", *err_str, *err_desc); return FALSE; } if (c->redirect_urls_allowed != NULL) { for (hi = apr_hash_first(NULL, c->redirect_urls_allowed); hi; hi = apr_hash_next(hi)) { apr_hash_this(hi, (const void**) &c_host, NULL, NULL); if (oidc_util_regexp_first_match(r->pool, url, c_host, NULL, err_str) == TRUE) break; } if (hi == NULL) { *err_str = apr_pstrdup(r->pool, "URL not allowed"); *err_desc = apr_psprintf(r->pool, "value does not match the list of allowed redirect URLs: %s", url); oidc_error(r, "%s: %s", *err_str, *err_desc); return FALSE; } } else if ((uri.hostname != NULL) && (restrict_to_host == TRUE)) { c_host = oidc_get_current_url_host(r); if ((strstr(c_host, uri.hostname) == NULL) || (strstr(uri.hostname, c_host) == NULL)) { *err_str = apr_pstrdup(r->pool, "Invalid Request"); *err_desc = apr_psprintf(r->pool, "URL value \"%s\" does not match the hostname of the current request \"%s\"", apr_uri_unparse(r->pool, &uri, 0), c_host); oidc_error(r, "%s: %s", *err_str, *err_desc); return FALSE; } } if ((uri.hostname == NULL) && (strstr(url, "/") != url)) { *err_str = apr_pstrdup(r->pool, "Malformed URL"); *err_desc = apr_psprintf(r->pool, "No hostname was parsed and it does not seem to be relative, i.e starting with '/': %s", url); oidc_error(r, "%s: %s", *err_str, *err_desc); return FALSE; } else if ((uri.hostname == NULL) && (strstr(url, "//") == url)) { *err_str = apr_pstrdup(r->pool, "Malformed URL"); *err_desc = apr_psprintf(r->pool, "No hostname was parsed and starting with '//': %s", url); oidc_error(r, "%s: %s", *err_str, *err_desc); return FALSE; } else if ((uri.hostname == NULL) && (strstr(url, "/\\") == url)) { *err_str = apr_pstrdup(r->pool, "Malformed URL"); *err_desc = apr_psprintf(r->pool, "No hostname was parsed and starting with '/\\': %s", url); oidc_error(r, "%s: %s", *err_str, *err_desc); return FALSE; } /* validate the URL to prevent HTTP header splitting */ if (((strstr(url, "\n") != NULL) || strstr(url, "\r") != NULL)) { *err_str = apr_pstrdup(r->pool, "Invalid URL"); *err_desc = apr_psprintf(r->pool, "URL value \"%s\" contains illegal \"\n\" or \"\r\" character(s)", url); oidc_error(r, "%s: %s", *err_str, *err_desc); return FALSE; } return TRUE; } /* * perform (single) logout */ static int oidc_handle_logout(request_rec *r, oidc_cfg *c, oidc_session_t *session) { oidc_provider_t *provider = NULL; /* pickup the command or URL where the user wants to go after logout */ char *url = NULL; char *error_str = NULL; char *error_description = NULL; oidc_util_get_request_parameter(r, OIDC_REDIRECT_URI_REQUEST_LOGOUT, &url); oidc_debug(r, "enter (url=%s)", url); if (oidc_is_front_channel_logout(url)) { return oidc_handle_logout_request(r, c, session, url); } else if (oidc_is_back_channel_logout(url)) { return oidc_handle_logout_backchannel(r, c); } if ((url == NULL) || (apr_strnatcmp(url, "") == 0)) { url = c->default_slo_url; } else { /* do input validation on the logout parameter value */ if (oidc_validate_redirect_url(r, c, url, TRUE, &error_str, &error_description) == FALSE) { return oidc_util_html_send_error(r, c->error_template, error_str, error_description, HTTP_BAD_REQUEST); } } oidc_get_provider_from_session(r, c, session, &provider); if ((provider != NULL) && (provider->end_session_endpoint != NULL)) { const char *id_token_hint = oidc_session_get_idtoken(r, session); char *logout_request = apr_pstrdup(r->pool, provider->end_session_endpoint); if (id_token_hint != NULL) { logout_request = apr_psprintf(r->pool, "%s%sid_token_hint=%s", logout_request, strchr(logout_request ? logout_request : "", OIDC_CHAR_QUERY) != NULL ? OIDC_STR_AMP : OIDC_STR_QUERY, oidc_util_escape_string(r, id_token_hint)); } if (url != NULL) { logout_request = apr_psprintf(r->pool, "%s%spost_logout_redirect_uri=%s", logout_request, strchr(logout_request ? logout_request : "", OIDC_CHAR_QUERY) != NULL ? OIDC_STR_AMP : OIDC_STR_QUERY, oidc_util_escape_string(r, url)); } url = logout_request; } return oidc_handle_logout_request(r, c, session, url); } /* * handle request for JWKs */ int oidc_handle_jwks(request_rec *r, oidc_cfg *c) { /* pickup requested JWKs type */ // char *jwks_type = NULL; // oidc_util_get_request_parameter(r, OIDC_REDIRECT_URI_REQUEST_JWKS, &jwks_type); char *jwks = apr_pstrdup(r->pool, "{ \"keys\" : ["); int i = 0; apr_byte_t first = TRUE; oidc_jose_error_t err; if (c->public_keys != NULL) { /* loop over the RSA public keys */ for (i = 0; i < c->public_keys->nelts; i++) { const oidc_jwk_t *jwk = ((const oidc_jwk_t**) c->public_keys->elts)[i]; char *s_json = NULL; if (oidc_jwk_to_json(r->pool, jwk, &s_json, &err) == TRUE) { jwks = apr_psprintf(r->pool, "%s%s %s ", jwks, first ? "" : ",", s_json); first = FALSE; } else { oidc_error(r, "could not convert RSA JWK to JSON using oidc_jwk_to_json: %s", oidc_jose_e2s(r->pool, err)); } } } // TODO: send stuff if first == FALSE? jwks = apr_psprintf(r->pool, "%s ] }", jwks); return oidc_util_http_send(r, jwks, strlen(jwks), OIDC_CONTENT_TYPE_JSON, OK); } static int oidc_handle_session_management_iframe_op(request_rec *r, oidc_cfg *c, oidc_session_t *session, const char *check_session_iframe) { oidc_debug(r, "enter"); oidc_util_hdr_out_location_set(r, check_session_iframe); return HTTP_MOVED_TEMPORARILY; } static int oidc_handle_session_management_iframe_rp(request_rec *r, oidc_cfg *c, oidc_session_t *session, const char *client_id, const char *check_session_iframe) { oidc_debug(r, "enter"); const char *java_script = " <script type=\"text/javascript\">\n" " var targetOrigin = '%s';\n" " var clientId = '%s';\n" " var sessionId = '%s';\n" " var loginUrl = '%s';\n" " var message = clientId + ' ' + sessionId;\n" " var timerID;\n" "\n" " function checkSession() {\n" " console.debug('checkSession: posting ' + message + ' to ' + targetOrigin);\n" " var win = window.parent.document.getElementById('%s').contentWindow;\n" " win.postMessage( message, targetOrigin);\n" " }\n" "\n" " function setTimer() {\n" " checkSession();\n" " timerID = setInterval('checkSession()', %d);\n" " }\n" "\n" " function receiveMessage(e) {\n" " console.debug('receiveMessage: ' + e.data + ' from ' + e.origin);\n" " if (e.origin !== targetOrigin ) {\n" " console.debug('receiveMessage: cross-site scripting attack?');\n" " return;\n" " }\n" " if (e.data != 'unchanged') {\n" " clearInterval(timerID);\n" " if (e.data == 'changed' && sessionId == '' ) {\n" " // 'changed' + no session: enforce a login (if we have a login url...)\n" " if (loginUrl != '') {\n" " window.top.location.replace(loginUrl);\n" " }\n" " } else {\n" " // either 'changed' + active session, or 'error': enforce a logout\n" " window.top.location.replace('%s?logout=' + encodeURIComponent(window.top.location.href));\n" " }\n" " }\n" " }\n" "\n" " window.addEventListener('message', receiveMessage, false);\n" "\n" " </script>\n"; /* determine the origin for the check_session_iframe endpoint */ char *origin = apr_pstrdup(r->pool, check_session_iframe); apr_uri_t uri; apr_uri_parse(r->pool, check_session_iframe, &uri); char *p = strstr(origin, uri.path); *p = '\0'; /* the element identifier for the OP iframe */ const char *op_iframe_id = "openidc-op"; /* restore the OP session_state from the session */ const char *session_state = oidc_session_get_session_state(r, session); if (session_state == NULL) { oidc_warn(r, "no session_state found in the session; the OP does probably not support session management!?"); //return OK; } char *s_poll_interval = NULL; oidc_util_get_request_parameter(r, "poll", &s_poll_interval); int poll_interval = s_poll_interval ? strtol(s_poll_interval, NULL, 10) : 0; if ((poll_interval <= 0) || (poll_interval > 3600 * 24)) poll_interval = 3000; char *login_uri = NULL, *error_str = NULL, *error_description = NULL; oidc_util_get_request_parameter(r, "login_uri", &login_uri); if ((login_uri != NULL) && (oidc_validate_redirect_url(r, c, login_uri, FALSE, &error_str, &error_description) == FALSE)) { return HTTP_BAD_REQUEST; } const char *redirect_uri = oidc_get_redirect_uri(r, c); java_script = apr_psprintf(r->pool, java_script, origin, client_id, session_state ? session_state : "", login_uri ? login_uri : "", op_iframe_id, poll_interval, redirect_uri, redirect_uri); return oidc_util_html_send(r, NULL, java_script, "setTimer", NULL, OK); } /* * handle session management request */ static int oidc_handle_session_management(request_rec *r, oidc_cfg *c, oidc_session_t *session) { char *cmd = NULL; const char *id_token_hint = NULL; oidc_provider_t *provider = NULL; /* get the command passed to the session management handler */ oidc_util_get_request_parameter(r, OIDC_REDIRECT_URI_REQUEST_SESSION, &cmd); if (cmd == NULL) { oidc_error(r, "session management handler called with no command"); return HTTP_INTERNAL_SERVER_ERROR; } /* see if this is a local logout during session management */ if (apr_strnatcmp("logout", cmd) == 0) { oidc_debug(r, "[session=logout] calling oidc_handle_logout_request because of session mgmt local logout call."); return oidc_handle_logout_request(r, c, session, c->default_slo_url); } if (oidc_get_provider_from_session(r, c, session, &provider) == FALSE) { if ((oidc_provider_static_config(r, c, &provider) == FALSE) || (provider == NULL)) return HTTP_NOT_FOUND; } /* see if this is a request for the OP iframe */ if (apr_strnatcmp("iframe_op", cmd) == 0) { if (provider->check_session_iframe != NULL) { return oidc_handle_session_management_iframe_op(r, c, session, provider->check_session_iframe); } return HTTP_NOT_FOUND; } /* see if this is a request for the RP iframe */ if (apr_strnatcmp("iframe_rp", cmd) == 0) { if ((provider->client_id != NULL) && (provider->check_session_iframe != NULL)) { return oidc_handle_session_management_iframe_rp(r, c, session, provider->client_id, provider->check_session_iframe); } oidc_debug(r, "iframe_rp command issued but no client (%s) and/or no check_session_iframe (%s) set", provider->client_id, provider->check_session_iframe); return HTTP_NOT_FOUND; } /* see if this is a request check the login state with the OP */ if (apr_strnatcmp("check", cmd) == 0) { id_token_hint = oidc_session_get_idtoken(r, session); /* * TODO: this doesn't work with per-path provided auth_request_params and scopes * as oidc_dir_cfg_path_auth_request_params and oidc_dir_cfg_path_scope will pick * those for the redirect_uri itself; do we need to store those as part of the * session now? */ return oidc_authenticate_user(r, c, provider, apr_psprintf(r->pool, "%s?session=iframe_rp", oidc_get_redirect_uri_iss(r, c, provider)), NULL, id_token_hint, "none", oidc_dir_cfg_path_auth_request_params(r), oidc_dir_cfg_path_scope(r)); } /* handle failure in fallthrough */ oidc_error(r, "unknown command: %s", cmd); return HTTP_INTERNAL_SERVER_ERROR; } /* * handle refresh token request */ static int oidc_handle_refresh_token_request(request_rec *r, oidc_cfg *c, oidc_session_t *session) { char *return_to = NULL; char *r_access_token = NULL; char *error_code = NULL; char *error_str = NULL; char *error_description = NULL; apr_byte_t needs_save = TRUE; /* get the command passed to the session management handler */ oidc_util_get_request_parameter(r, OIDC_REDIRECT_URI_REQUEST_REFRESH, &return_to); oidc_util_get_request_parameter(r, OIDC_PROTO_ACCESS_TOKEN, &r_access_token); /* check the input parameters */ if (return_to == NULL) { oidc_error(r, "refresh token request handler called with no URL to return to"); return HTTP_INTERNAL_SERVER_ERROR; } /* do input validation on the return to parameter value */ if (oidc_validate_redirect_url(r, c, return_to, TRUE, &error_str, &error_description) == FALSE) { oidc_error(r, "return_to URL validation failed: %s: %s", error_str, error_description); return HTTP_INTERNAL_SERVER_ERROR; } if (r_access_token == NULL) { oidc_error(r, "refresh token request handler called with no access_token parameter"); error_code = "no_access_token"; goto end; } const char *s_access_token = oidc_session_get_access_token(r, session); if (s_access_token == NULL) { oidc_error(r, "no existing access_token found in the session, nothing to refresh"); error_code = "no_access_token_exists"; goto end; } /* compare the access_token parameter used for XSRF protection */ if (apr_strnatcmp(s_access_token, r_access_token) != 0) { oidc_error(r, "access_token passed in refresh request does not match the one stored in the session"); error_code = "no_access_token_match"; goto end; } /* get a handle to the provider configuration */ oidc_provider_t *provider = NULL; if (oidc_get_provider_from_session(r, c, session, &provider) == FALSE) { error_code = "session_corruption"; goto end; } /* execute the actual refresh grant */ if (oidc_refresh_access_token(r, c, session, provider, NULL) == FALSE) { oidc_error(r, "access_token could not be refreshed"); error_code = "refresh_failed"; goto end; } /* pass the tokens to the application, possibly updating the expiry */ if (oidc_session_pass_tokens(r, c, session, &needs_save) == FALSE) { error_code = "session_corruption"; goto end; } if (oidc_session_save(r, session, FALSE) == FALSE) { error_code = "error saving session"; goto end; } end: /* pass optional error message to the return URL */ if (error_code != NULL) return_to = apr_psprintf(r->pool, "%s%serror_code=%s", return_to, strchr(return_to ? return_to : "", OIDC_CHAR_QUERY) ? OIDC_STR_AMP : OIDC_STR_QUERY, oidc_util_escape_string(r, error_code)); /* add the redirect location header */ oidc_util_hdr_out_location_set(r, return_to); return HTTP_MOVED_TEMPORARILY; } /* * handle request object by reference request */ static int oidc_handle_request_uri(request_rec *r, oidc_cfg *c) { char *request_ref = NULL; oidc_util_get_request_parameter(r, OIDC_REDIRECT_URI_REQUEST_REQUEST_URI, &request_ref); if (request_ref == NULL) { oidc_error(r, "no \"%s\" parameter found", OIDC_REDIRECT_URI_REQUEST_REQUEST_URI); return HTTP_BAD_REQUEST; } char *jwt = NULL; oidc_cache_get_request_uri(r, request_ref, &jwt); if (jwt == NULL) { oidc_error(r, "no cached JWT found for %s reference: %s", OIDC_REDIRECT_URI_REQUEST_REQUEST_URI, request_ref); return HTTP_NOT_FOUND; } oidc_cache_set_request_uri(r, request_ref, NULL, 0); return oidc_util_http_send(r, jwt, strlen(jwt), OIDC_CONTENT_TYPE_JWT, OK); } /* * handle a request to invalidate a cached access token introspection result */ int oidc_handle_remove_at_cache(request_rec *r, oidc_cfg *c) { char *access_token = NULL; oidc_util_get_request_parameter(r, OIDC_REDIRECT_URI_REQUEST_REMOVE_AT_CACHE, &access_token); char *cache_entry = NULL; oidc_cache_get_access_token(r, access_token, &cache_entry); if (cache_entry == NULL) { oidc_error(r, "no cached access token found for value: %s", access_token); return HTTP_NOT_FOUND; } oidc_cache_set_access_token(r, access_token, NULL, 0); return OK; } #define OIDC_INFO_PARAM_ACCESS_TOKEN_REFRESH_INTERVAL "access_token_refresh_interval" /* * handle request for session info */ static int oidc_handle_info_request(request_rec *r, oidc_cfg *c, oidc_session_t *session, apr_byte_t needs_save) { int rc = HTTP_UNAUTHORIZED; char *s_format = NULL, *s_interval = NULL, *r_value = NULL; oidc_util_get_request_parameter(r, OIDC_REDIRECT_URI_REQUEST_INFO, &s_format); oidc_util_get_request_parameter(r, OIDC_INFO_PARAM_ACCESS_TOKEN_REFRESH_INTERVAL, &s_interval); /* see if this is a request for a format that is supported */ if ((apr_strnatcmp(OIDC_HOOK_INFO_FORMAT_JSON, s_format) != 0) && (apr_strnatcmp(OIDC_HOOK_INFO_FORMAT_HTML, s_format) != 0)) { oidc_warn(r, "request for unknown format: %s", s_format); return HTTP_UNSUPPORTED_MEDIA_TYPE; } /* check that we actually have a user session and this is someone calling with a proper session cookie */ if (session->remote_user == NULL) { oidc_warn(r, "no user session found"); return HTTP_UNAUTHORIZED; } /* set the user in the main request for further (incl. sub-request and authz) processing */ r->user = apr_pstrdup(r->pool, session->remote_user); if (c->info_hook_data == NULL) { oidc_warn(r, "no data configured to return in " OIDCInfoHook); return HTTP_NOT_FOUND; } /* see if we can and need to refresh the access token */ if ((s_interval != NULL) && (oidc_session_get_refresh_token(r, session) != NULL)) { apr_time_t t_interval; if (sscanf(s_interval, "%" APR_TIME_T_FMT, &t_interval) == 1) { t_interval = apr_time_from_sec(t_interval); /* get the last refresh timestamp from the session info */ apr_time_t last_refresh = oidc_session_get_access_token_last_refresh(r, session); oidc_debug(r, "refresh needed in: %" APR_TIME_T_FMT " seconds", apr_time_sec(last_refresh + t_interval - apr_time_now())); /* see if we need to refresh again */ if (last_refresh + t_interval < apr_time_now()) { /* get the current provider info */ oidc_provider_t *provider = NULL; if (oidc_get_provider_from_session(r, c, session, &provider) == FALSE) return HTTP_INTERNAL_SERVER_ERROR; /* execute the actual refresh grant */ if (oidc_refresh_access_token(r, c, session, provider, NULL) == FALSE) oidc_warn(r, "access_token could not be refreshed"); else needs_save = TRUE; } } } /* create the JSON object */ json_t *json = json_object(); /* add a timestamp of creation in there for the caller */ if (apr_hash_get(c->info_hook_data, OIDC_HOOK_INFO_TIMESTAMP, APR_HASH_KEY_STRING)) { json_object_set_new(json, OIDC_HOOK_INFO_TIMESTAMP, json_integer(apr_time_sec(apr_time_now()))); } /* * refresh the claims from the userinfo endpoint * side-effect is that this may refresh the access token if not already done * note that OIDCUserInfoRefreshInterval should be set to control the refresh policy */ needs_save |= oidc_refresh_claims_from_userinfo_endpoint(r, c, session); /* include the access token in the session info */ if (apr_hash_get(c->info_hook_data, OIDC_HOOK_INFO_ACCES_TOKEN, APR_HASH_KEY_STRING)) { const char *access_token = oidc_session_get_access_token(r, session); if (access_token != NULL) json_object_set_new(json, OIDC_HOOK_INFO_ACCES_TOKEN, json_string(access_token)); } /* include the access token expiry timestamp in the session info */ if (apr_hash_get(c->info_hook_data, OIDC_HOOK_INFO_ACCES_TOKEN_EXP, APR_HASH_KEY_STRING)) { const char *access_token_expires = oidc_session_get_access_token_expires(r, session); if (access_token_expires != NULL) json_object_set_new(json, OIDC_HOOK_INFO_ACCES_TOKEN_EXP, json_string(access_token_expires)); } /* include the id_token claims in the session info */ if (apr_hash_get(c->info_hook_data, OIDC_HOOK_INFO_ID_TOKEN, APR_HASH_KEY_STRING)) { json_t *id_token = oidc_session_get_idtoken_claims_json(r, session); if (id_token) json_object_set_new(json, OIDC_HOOK_INFO_ID_TOKEN, id_token); } if (apr_hash_get(c->info_hook_data, OIDC_HOOK_INFO_USER_INFO, APR_HASH_KEY_STRING)) { /* include the claims from the userinfo endpoint the session info */ json_t *claims = oidc_session_get_userinfo_claims_json(r, session); if (claims) json_object_set_new(json, OIDC_HOOK_INFO_USER_INFO, claims); } /* include the maximum session lifetime in the session info */ if (apr_hash_get(c->info_hook_data, OIDC_HOOK_INFO_SESSION_EXP, APR_HASH_KEY_STRING)) { apr_time_t session_expires = oidc_session_get_session_expires(r, session); json_object_set_new(json, OIDC_HOOK_INFO_SESSION_EXP, json_integer(apr_time_sec(session_expires))); } /* include the inactivity timeout in the session info */ if (apr_hash_get(c->info_hook_data, OIDC_HOOK_INFO_SESSION_TIMEOUT, APR_HASH_KEY_STRING)) { json_object_set_new(json, OIDC_HOOK_INFO_SESSION_TIMEOUT, json_integer(apr_time_sec(session->expiry))); } /* include the remote_user in the session info */ if (apr_hash_get(c->info_hook_data, OIDC_HOOK_INFO_SESSION_REMOTE_USER, APR_HASH_KEY_STRING)) { json_object_set_new(json, OIDC_HOOK_INFO_SESSION_REMOTE_USER, json_string(session->remote_user)); } if (apr_hash_get(c->info_hook_data, OIDC_HOOK_INFO_SESSION, APR_HASH_KEY_STRING)) { json_t *j_session = json_object(); json_object_set(j_session, OIDC_HOOK_INFO_SESSION_STATE, session->state); json_object_set_new(j_session, OIDC_HOOK_INFO_SESSION_UUID, json_string(session->uuid)); json_object_set_new(json, OIDC_HOOK_INFO_SESSION, j_session); } if (apr_hash_get(c->info_hook_data, OIDC_HOOK_INFO_REFRESH_TOKEN, APR_HASH_KEY_STRING)) { /* include the refresh token in the session info */ const char *refresh_token = oidc_session_get_refresh_token(r, session); if (refresh_token != NULL) json_object_set_new(json, OIDC_HOOK_INFO_REFRESH_TOKEN, json_string(refresh_token)); } /* pass the tokens to the application and save the session, possibly updating the expiry */ if (oidc_session_pass_tokens(r, c, session, &needs_save) == FALSE) oidc_warn(r, "error passing tokens"); /* check if something was updated in the session and we need to save it again */ if (needs_save) { if (oidc_session_save(r, session, FALSE) == FALSE) { oidc_warn(r, "error saving session"); rc = HTTP_INTERNAL_SERVER_ERROR; } } if (apr_strnatcmp(OIDC_HOOK_INFO_FORMAT_JSON, s_format) == 0) { /* JSON-encode the result */ r_value = oidc_util_encode_json_object(r, json, 0); /* return the stringified JSON result */ rc = oidc_util_http_send(r, r_value, strlen(r_value), OIDC_CONTENT_TYPE_JSON, OK); } else if (apr_strnatcmp(OIDC_HOOK_INFO_FORMAT_HTML, s_format) == 0) { /* JSON-encode the result */ r_value = oidc_util_encode_json_object(r, json, JSON_INDENT(2)); rc = oidc_util_html_send(r, "Session Info", NULL, NULL, apr_psprintf(r->pool, "<pre>%s</pre>", r_value), OK); } /* free the allocated resources */ json_decref(json); return rc; } /* * handle all requests to the redirect_uri */ int oidc_handle_redirect_uri_request(request_rec *r, oidc_cfg *c, oidc_session_t *session) { if (oidc_proto_is_redirect_authorization_response(r, c)) { /* this is an authorization response from the OP using the Basic Client profile or a Hybrid flow*/ return oidc_handle_redirect_authorization_response(r, c, session); /* * * Note that we are checking for logout *before* checking for a POST authorization response * to handle backchannel POST-based logout * * so any POST to the Redirect URI that does not have a logout query parameter will be handled * as an authorization response; alternatively we could assume that a POST response has no * parameters */ } else if (oidc_util_request_has_parameter(r, OIDC_REDIRECT_URI_REQUEST_LOGOUT)) { /* handle logout */ return oidc_handle_logout(r, c, session); } else if (oidc_proto_is_post_authorization_response(r, c)) { /* this is an authorization response using the fragment(+POST) response_mode with the Implicit Client profile */ return oidc_handle_post_authorization_response(r, c, session); } else if (oidc_is_discovery_response(r, c)) { /* this is response from the OP discovery page */ return oidc_handle_discovery_response(r, c); } else if (oidc_util_request_has_parameter(r, OIDC_REDIRECT_URI_REQUEST_JWKS)) { /* * Will be handled in the content handler; avoid: * No authentication done but request not allowed without authentication * by setting r->user */ r->user = ""; return OK; } else if (oidc_util_request_has_parameter(r, OIDC_REDIRECT_URI_REQUEST_SESSION)) { /* handle session management request */ return oidc_handle_session_management(r, c, session); } else if (oidc_util_request_has_parameter(r, OIDC_REDIRECT_URI_REQUEST_REFRESH)) { /* handle refresh token request */ return oidc_handle_refresh_token_request(r, c, session); } else if (oidc_util_request_has_parameter(r, OIDC_REDIRECT_URI_REQUEST_REQUEST_URI)) { /* handle request object by reference request */ return oidc_handle_request_uri(r, c); } else if (oidc_util_request_has_parameter(r, OIDC_REDIRECT_URI_REQUEST_REMOVE_AT_CACHE)) { /* handle request to invalidate access token cache */ return oidc_handle_remove_at_cache(r, c); } else if (oidc_util_request_has_parameter(r, OIDC_REDIRECT_URI_REQUEST_INFO)) { if (session->remote_user == NULL) return HTTP_UNAUTHORIZED; /* * Will be handled in the content handler; avoid: * No authentication done but request not allowed without authentication * by setting r->user */ r->user = ""; return OK; } else if ((r->args == NULL) || (apr_strnatcmp(r->args, "") == 0)) { /* this is a "bare" request to the redirect URI, indicating implicit flow using the fragment response_mode */ return oidc_proto_javascript_implicit(r, c); } /* this is not an authorization response or logout request */ /* check for "error" response */ if (oidc_util_request_has_parameter(r, OIDC_PROTO_ERROR)) { // char *error = NULL, *descr = NULL; // oidc_util_get_request_parameter(r, "error", &error); // oidc_util_get_request_parameter(r, "error_description", &descr); // // /* send user facing error to browser */ // return oidc_util_html_send_error(r, error, descr, DONE); return oidc_handle_redirect_authorization_response(r, c, session); } oidc_error(r, "The OpenID Connect callback URL received an invalid request: %s; returning HTTP_INTERNAL_SERVER_ERROR", r->args); /* something went wrong */ return oidc_util_html_send_error(r, c->error_template, "Invalid Request", apr_psprintf(r->pool, "The OpenID Connect callback URL received an invalid request"), HTTP_INTERNAL_SERVER_ERROR); } #define OIDC_AUTH_TYPE_OPENID_CONNECT "openid-connect" #define OIDC_AUTH_TYPE_OPENID_OAUTH20 "oauth20" #define OIDC_AUTH_TYPE_OPENID_BOTH "auth-openidc" /* * main routine: handle OpenID Connect authentication */ static int oidc_check_userid_openidc(request_rec *r, oidc_cfg *c) { if (oidc_get_redirect_uri(r, c) == NULL) { oidc_error(r, "configuration error: the authentication type is set to \"" OIDC_AUTH_TYPE_OPENID_CONNECT "\" but " OIDCRedirectURI " has not been set"); return HTTP_INTERNAL_SERVER_ERROR; } /* check if this is a sub-request or an initial request */ if (!ap_is_initial_req(r)) { /* not an initial request, try to recycle what we've already established in the main request */ if (r->main != NULL) r->user = r->main->user; else if (r->prev != NULL) r->user = r->prev->user; if (r->user != NULL) { /* this is a sub-request and we have a session (headers will have been scrubbed and set already) */ oidc_debug(r, "recycling user '%s' from initial request for sub-request", r->user); /* * apparently request state can get lost in sub-requests, so let's see * if we need to restore id_token and/or claims from the session cache */ const char *s_id_token = oidc_request_state_get(r, OIDC_REQUEST_STATE_KEY_IDTOKEN); if (s_id_token == NULL) { oidc_session_t *session = NULL; oidc_session_load(r, &session); oidc_copy_tokens_to_request_state(r, session, NULL, NULL); /* free resources allocated for the session */ oidc_session_free(r, session); } /* strip any cookies that we need to */ oidc_strip_cookies(r); return OK; } /* * else: not initial request, but we could not find a session, so: * try to load a new session as if this were the initial request */ } int rc = OK; apr_byte_t needs_save = FALSE; /* load the session from the request state; this will be a new "empty" session if no state exists */ oidc_session_t *session = NULL; oidc_session_load(r, &session); /* see if the initial request is to the redirect URI; this handles potential logout too */ if (oidc_util_request_matches_url(r, oidc_get_redirect_uri(r, c))) { /* handle request to the redirect_uri */ rc = oidc_handle_redirect_uri_request(r, c, session); /* free resources allocated for the session */ oidc_session_free(r, session); return rc; /* initial request to non-redirect URI, check if we have an existing session */ } else if (session->remote_user != NULL) { /* this is initial request and we already have a session */ rc = oidc_handle_existing_session(r, c, session, &needs_save); if (rc == OK) { /* check if something was updated in the session and we need to save it again */ if (needs_save) { if (oidc_session_save(r, session, FALSE) == FALSE) { oidc_warn(r, "error saving session"); rc = HTTP_INTERNAL_SERVER_ERROR; } } } /* free resources allocated for the session */ oidc_session_free(r, session); /* strip any cookies that we need to */ oidc_strip_cookies(r); return rc; } /* free resources allocated for the session */ oidc_session_free(r, session); /* * else: we have no session and it is not an authorization or * discovery response: just hit the default flow for unauthenticated users */ return oidc_handle_unauthenticated_user(r, c); } /* * main routine: handle "mixed" OIDC/OAuth authentication */ static int oidc_check_mixed_userid_oauth(request_rec *r, oidc_cfg *c) { /* get the bearer access token from the Authorization header */ const char *access_token = NULL; if (oidc_oauth_get_bearer_token(r, &access_token) == TRUE) { r->ap_auth_type = apr_pstrdup(r->pool, OIDC_AUTH_TYPE_OPENID_OAUTH20); return oidc_oauth_check_userid(r, c, access_token); } /* no bearer token found: then treat this as a regular OIDC browser request */ r->ap_auth_type = apr_pstrdup(r->pool, OIDC_AUTH_TYPE_OPENID_CONNECT); return oidc_check_userid_openidc(r, c); } /* * generic Apache authentication hook for this module: dispatches to OpenID Connect or OAuth 2.0 specific routines */ int oidc_check_user_id(request_rec *r) { oidc_cfg *c = ap_get_module_config(r->server->module_config, &auth_openidc_module); /* log some stuff about the incoming HTTP request */ oidc_debug(r, "incoming request: \"%s?%s\", ap_is_initial_req(r)=%d", r->parsed_uri.path, r->args, ap_is_initial_req(r)); /* see if any authentication has been defined at all */ const char *current_auth = ap_auth_type(r); if (current_auth == NULL) return DECLINED; /* see if we've configured OpenID Connect user authentication for this request */ if (strcasecmp(current_auth, OIDC_AUTH_TYPE_OPENID_CONNECT) == 0) { r->ap_auth_type = (char*) current_auth; return oidc_check_userid_openidc(r, c); } /* see if we've configured OAuth 2.0 access control for this request */ if (strcasecmp(current_auth, OIDC_AUTH_TYPE_OPENID_OAUTH20) == 0) { r->ap_auth_type = (char*) current_auth; return oidc_oauth_check_userid(r, c, NULL); } /* see if we've configured "mixed mode" for this request */ if (strcasecmp(current_auth, OIDC_AUTH_TYPE_OPENID_BOTH) == 0) return oidc_check_mixed_userid_oauth(r, c); /* this is not for us but for some other handler */ return DECLINED; } /* * get the claims and id_token from request state */ static void oidc_authz_get_claims_and_idtoken(request_rec *r, json_t **claims, json_t **id_token) { const char *s_claims = oidc_request_state_get(r, OIDC_REQUEST_STATE_KEY_CLAIMS); if (s_claims != NULL) oidc_util_decode_json_object(r, s_claims, claims); const char *s_id_token = oidc_request_state_get(r, OIDC_REQUEST_STATE_KEY_IDTOKEN); if (s_id_token != NULL) oidc_util_decode_json_object(r, s_id_token, id_token); } #if MODULE_MAGIC_NUMBER_MAJOR >= 20100714 #define OIDC_OAUTH_BEARER_SCOPE_ERROR "OIDC_OAUTH_BEARER_SCOPE_ERROR" #define OIDC_OAUTH_BEARER_SCOPE_ERROR_VALUE "Bearer error=\"insufficient_scope\", error_description=\"Different scope(s) or other claims required\"" /* * find out which action we need to take when encountering an unauthorized request */ static authz_status oidc_handle_unauthorized_user24(request_rec *r) { oidc_debug(r, "enter"); oidc_cfg *c = ap_get_module_config(r->server->module_config, &auth_openidc_module); if (apr_strnatcasecmp((const char*) ap_auth_type(r), OIDC_AUTH_TYPE_OPENID_OAUTH20) == 0) { oidc_debug(r, "setting environment variable %s to \"%s\" for usage in mod_headers", OIDC_OAUTH_BEARER_SCOPE_ERROR, OIDC_OAUTH_BEARER_SCOPE_ERROR_VALUE); apr_table_set(r->subprocess_env, OIDC_OAUTH_BEARER_SCOPE_ERROR, OIDC_OAUTH_BEARER_SCOPE_ERROR_VALUE); return AUTHZ_DENIED; } /* see if we've configured OIDCUnAutzAction for this path */ switch (oidc_dir_cfg_unautz_action(r)) { // TODO: document that AuthzSendForbiddenOnFailure is required to return 403 FORBIDDEN case OIDC_UNAUTZ_RETURN403: case OIDC_UNAUTZ_RETURN401: return AUTHZ_DENIED; break; case OIDC_UNAUTZ_AUTHENTICATE: /* * exception handling: if this looks like a XMLHttpRequest call we * won't redirect the user and thus avoid creating a state cookie * for a non-browser (= Javascript) call that will never return from the OP */ if (oidc_is_xml_http_request(r) == TRUE) return AUTHZ_DENIED; break; } oidc_authenticate_user(r, c, NULL, oidc_get_current_url(r), NULL, NULL, NULL, oidc_dir_cfg_path_auth_request_params(r), oidc_dir_cfg_path_scope(r)); const char *location = oidc_util_hdr_out_location_get(r); if (location != NULL) { oidc_debug(r, "send HTML refresh with authorization redirect: %s", location); char *html_head = apr_psprintf(r->pool, "<meta http-equiv=\"refresh\" content=\"0; url=%s\">", location); oidc_util_html_send(r, "Stepup Authentication", html_head, NULL, NULL, HTTP_UNAUTHORIZED); /* * a hack for Apache 2.4 to prevent it from writing its own 401 HTML document * text by making ap_send_error_response in http_protocol.c return early... */ r->header_only = 1; } return AUTHZ_DENIED; } /* * generic Apache >=2.4 authorization hook for this module * handles both OpenID Connect or OAuth 2.0 in the same way, based on the claims stored in the session */ authz_status oidc_authz_checker(request_rec *r, const char *require_args, const void *parsed_require_args, oidc_authz_match_claim_fn_type match_claim_fn) { oidc_debug(r, "enter: require_args=\"%s\"", require_args); /* check for anonymous access and PASS mode */ if (r->user != NULL && strlen(r->user) == 0) { r->user = NULL; if (oidc_dir_cfg_unauth_action(r) == OIDC_UNAUTH_PASS) return AUTHZ_GRANTED; } /* get the set of claims from the request state (they've been set in the authentication part earlier */ json_t *claims = NULL, *id_token = NULL; oidc_authz_get_claims_and_idtoken(r, &claims, &id_token); /* merge id_token claims (e.g. "iss") in to claims json object */ if (claims) oidc_util_json_merge(r, id_token, claims); /* dispatch to the >=2.4 specific authz routine */ authz_status rc = oidc_authz_worker24(r, claims ? claims : id_token, require_args, parsed_require_args, match_claim_fn); /* cleanup */ if (claims) json_decref(claims); if (id_token) json_decref(id_token); if ((rc == AUTHZ_DENIED) && ap_auth_type(r)) rc = oidc_handle_unauthorized_user24(r); return rc; } authz_status oidc_authz_checker_claim(request_rec *r, const char *require_args, const void *parsed_require_args) { return oidc_authz_checker(r, require_args, parsed_require_args, oidc_authz_match_claim); } #ifdef USE_LIBJQ authz_status oidc_authz_checker_claims_expr(request_rec *r, const char *require_args, const void *parsed_require_args) { return oidc_authz_checker(r, require_args, parsed_require_args, oidc_authz_match_claims_expr); } #endif #else /* * find out which action we need to take when encountering an unauthorized request */ static int oidc_handle_unauthorized_user22(request_rec *r) { oidc_cfg *c = ap_get_module_config(r->server->module_config, &auth_openidc_module); if (apr_strnatcasecmp((const char *) ap_auth_type(r), OIDC_AUTH_TYPE_OPENID_OAUTH20) == 0) { oidc_oauth_return_www_authenticate(r, "insufficient_scope", "Different scope(s) or other claims required"); return HTTP_UNAUTHORIZED; } /* see if we've configured OIDCUnAutzAction for this path */ switch (oidc_dir_cfg_unautz_action(r)) { case OIDC_UNAUTZ_RETURN403: return HTTP_FORBIDDEN; case OIDC_UNAUTZ_RETURN401: return HTTP_UNAUTHORIZED; case OIDC_UNAUTZ_AUTHENTICATE: /* * exception handling: if this looks like a XMLHttpRequest call we * won't redirect the user and thus avoid creating a state cookie * for a non-browser (= Javascript) call that will never return from the OP */ if (oidc_is_xml_http_request(r) == TRUE) return HTTP_UNAUTHORIZED; } return oidc_authenticate_user(r, c, NULL, oidc_get_current_url(r), NULL, NULL, NULL, oidc_dir_cfg_path_auth_request_params(r), oidc_dir_cfg_path_scope(r)); } /* * generic Apache <2.4 authorization hook for this module * handles both OpenID Connect and OAuth 2.0 in the same way, based on the claims stored in the request context */ int oidc_auth_checker(request_rec *r) { /* check for anonymous access and PASS mode */ if (r->user != NULL && strlen(r->user) == 0) { r->user = NULL; if (oidc_dir_cfg_unauth_action(r) == OIDC_UNAUTH_PASS) return OK; } /* get the set of claims from the request state (they've been set in the authentication part earlier */ json_t *claims = NULL, *id_token = NULL; oidc_authz_get_claims_and_idtoken(r, &claims, &id_token); /* get the Require statements */ const apr_array_header_t * const reqs_arr = ap_requires(r); /* see if we have any */ const require_line * const reqs = reqs_arr ? (require_line *) reqs_arr->elts : NULL; if (!reqs_arr) { oidc_debug(r, "no require statements found, so declining to perform authorization."); return DECLINED; } /* merge id_token claims (e.g. "iss") in to claims json object */ if (claims) oidc_util_json_merge(r, id_token, claims); /* dispatch to the <2.4 specific authz routine */ int rc = oidc_authz_worker22(r, claims ? claims : id_token, reqs, reqs_arr->nelts); /* cleanup */ if (claims) json_decref(claims); if (id_token) json_decref(id_token); if ((rc == HTTP_UNAUTHORIZED) && ap_auth_type(r)) rc = oidc_handle_unauthorized_user22(r); return rc; } #endif apr_byte_t oidc_enabled(request_rec *r) { if (ap_auth_type(r) == NULL) return FALSE; if (apr_strnatcasecmp((const char*) ap_auth_type(r), OIDC_AUTH_TYPE_OPENID_CONNECT) == 0) return TRUE; if (apr_strnatcasecmp((const char*) ap_auth_type(r), OIDC_AUTH_TYPE_OPENID_OAUTH20) == 0) return TRUE; if (apr_strnatcasecmp((const char*) ap_auth_type(r), OIDC_AUTH_TYPE_OPENID_BOTH) == 0) return TRUE; return FALSE; } /* * handle content generating requests */ int oidc_content_handler(request_rec *r) { oidc_cfg *c = ap_get_module_config(r->server->module_config, &auth_openidc_module); int rc = DECLINED; /* track if the session needs to be updated/saved into the cache */ apr_byte_t needs_save = FALSE; oidc_session_t *session = NULL; if (oidc_enabled(r) && oidc_util_request_matches_url(r, oidc_get_redirect_uri(r, c))) { if (oidc_util_request_has_parameter(r, OIDC_REDIRECT_URI_REQUEST_INFO)) { oidc_session_load(r, &session); rc = oidc_handle_existing_session(r, c, session, &needs_save); if (rc == OK) /* handle request for session info */ rc = oidc_handle_info_request(r, c, session, needs_save); /* free resources allocated for the session */ oidc_session_free(r, session); } else if (oidc_util_request_has_parameter(r, OIDC_REDIRECT_URI_REQUEST_JWKS)) { /* handle JWKs request */ rc = oidc_handle_jwks(r, c); } } return rc; } extern const command_rec oidc_config_cmds[]; module AP_MODULE_DECLARE_DATA auth_openidc_module = { STANDARD20_MODULE_STUFF, oidc_create_dir_config, oidc_merge_dir_config, oidc_create_server_config, oidc_merge_server_config, oidc_config_cmds, oidc_register_hooks };
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*************************************************************************** * Copyright (C) 2017-2021 ZmartZone Holding BV * Copyright (C) 2013-2017 Ping Identity Corporation * All rights reserved. * * DISCLAIMER OF WARRANTIES: * * THE SOFTWARE PROVIDED HEREUNDER IS PROVIDED ON AN "AS IS" BASIS, WITHOUT * ANY WARRANTIES OR REPRESENTATIONS EXPRESS, IMPLIED OR STATUTORY; INCLUDING, * WITHOUT LIMITATION, WARRANTIES OF QUALITY, PERFORMANCE, NONINFRINGEMENT, * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. NOR ARE THERE ANY * WARRANTIES CREATED BY A COURSE OR DEALING, COURSE OF PERFORMANCE OR TRADE * USAGE. FURTHERMORE, THERE ARE NO WARRANTIES THAT THE SOFTWARE WILL MEET * YOUR NEEDS OR BE FREE FROM ERRORS, OR THAT THE OPERATION OF THE SOFTWARE * WILL BE UNINTERRUPTED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Initially based on mod_auth_cas.c: * https://github.com/Jasig/mod_auth_cas * * Other code copied/borrowed/adapted: * shared memory caching: mod_auth_mellon * * @Author: Hans Zandbelt - hans.zandbelt@zmartzone.eu * **************************************************************************/ #include "apr_hash.h" #include "apr_strings.h" #include "ap_config.h" #include "ap_provider.h" #include "apr_lib.h" #include "apr_file_io.h" #include "apr_sha1.h" #include "apr_base64.h" #include "httpd.h" #include "http_core.h" #include "http_config.h" #include "http_log.h" #include "http_protocol.h" #include "http_request.h" #include "mod_auth_openidc.h" #define OIDC_REFRESH_ERROR 2 static int oidc_handle_logout_request(request_rec *r, oidc_cfg *c, oidc_session_t *session, const char *url); // TODO: // - sort out oidc_cfg vs. oidc_dir_cfg stuff // - rigid input checking on discovery responses // - check self-issued support // - README.quickstart // - refresh metadata once-per too? (for non-signing key changes) extern module AP_MODULE_DECLARE_DATA auth_openidc_module; /* * clean any suspicious headers in the HTTP request sent by the user agent */ static void oidc_scrub_request_headers(request_rec *r, const char *claim_prefix, apr_hash_t *scrub) { const int prefix_len = claim_prefix ? strlen(claim_prefix) : 0; /* get an array representation of the incoming HTTP headers */ const apr_array_header_t *const h = apr_table_elts(r->headers_in); /* table to keep the non-suspicious headers */ apr_table_t *clean_headers = apr_table_make(r->pool, h->nelts); /* loop over the incoming HTTP headers */ const apr_table_entry_t *const e = (const apr_table_entry_t*) h->elts; int i; for (i = 0; i < h->nelts; i++) { const char *const k = e[i].key; /* is this header's name equivalent to a header that needs scrubbing? */ const char *hdr = (k != NULL) && (scrub != NULL) ? apr_hash_get(scrub, k, APR_HASH_KEY_STRING) : NULL; const int header_matches = (hdr != NULL) && (oidc_strnenvcmp(k, hdr, -1) == 0); /* * would this header be interpreted as a mod_auth_openidc attribute? Note * that prefix_len will be zero if no attr_prefix is defined, * so this will always be false. Also note that we do not * scrub headers if the prefix is empty because every header * would match. */ const int prefix_matches = (k != NULL) && prefix_len && (oidc_strnenvcmp(k, claim_prefix, prefix_len) == 0); /* add to the clean_headers if non-suspicious, skip and report otherwise */ if (!prefix_matches && !header_matches) { apr_table_addn(clean_headers, k, e[i].val); } else { oidc_warn(r, "scrubbed suspicious request header (%s: %.32s)", k, e[i].val); } } /* overwrite the incoming headers with the cleaned result */ r->headers_in = clean_headers; } /* * scrub all mod_auth_openidc related headers */ void oidc_scrub_headers(request_rec *r) { oidc_cfg *cfg = ap_get_module_config(r->server->module_config, &auth_openidc_module); const char *prefix = oidc_cfg_claim_prefix(r); apr_hash_t *hdrs = apr_hash_make(r->pool); if (apr_strnatcmp(prefix, "") == 0) { if ((cfg->white_listed_claims != NULL) && (apr_hash_count(cfg->white_listed_claims) > 0)) hdrs = apr_hash_overlay(r->pool, cfg->white_listed_claims, hdrs); else oidc_warn(r, "both " OIDCClaimPrefix " and " OIDCWhiteListedClaims " are empty: this renders an insecure setup!"); } char *authn_hdr = oidc_cfg_dir_authn_header(r); if (authn_hdr != NULL) apr_hash_set(hdrs, authn_hdr, APR_HASH_KEY_STRING, authn_hdr); /* * scrub all headers starting with OIDC_ first */ oidc_scrub_request_headers(r, OIDC_DEFAULT_HEADER_PREFIX, hdrs); /* * then see if the claim headers need to be removed on top of that * (i.e. the prefix does not start with the default OIDC_) */ if ((strstr(prefix, OIDC_DEFAULT_HEADER_PREFIX) != prefix)) { oidc_scrub_request_headers(r, prefix, NULL); } } /* * strip the session cookie from the headers sent to the application/backend */ void oidc_strip_cookies(request_rec *r) { char *cookie, *ctx, *result = NULL; const char *name = NULL; int i; apr_array_header_t *strip = oidc_dir_cfg_strip_cookies(r); char *cookies = apr_pstrdup(r->pool, oidc_util_hdr_in_cookie_get(r)); if ((cookies != NULL) && (strip != NULL)) { oidc_debug(r, "looking for the following cookies to strip from cookie header: %s", apr_array_pstrcat(r->pool, strip, OIDC_CHAR_COMMA)); cookie = apr_strtok(cookies, OIDC_STR_SEMI_COLON, &ctx); do { while (cookie != NULL && *cookie == OIDC_CHAR_SPACE) cookie++; for (i = 0; i < strip->nelts; i++) { name = ((const char**) strip->elts)[i]; if ((strncmp(cookie, name, strlen(name)) == 0) && (cookie[strlen(name)] == OIDC_CHAR_EQUAL)) { oidc_debug(r, "stripping: %s", name); break; } } if (i == strip->nelts) { result = result ? apr_psprintf(r->pool, "%s%s %s", result, OIDC_STR_SEMI_COLON, cookie) : cookie; } cookie = apr_strtok(NULL, OIDC_STR_SEMI_COLON, &ctx); } while (cookie != NULL); oidc_util_hdr_in_cookie_set(r, result); } } #define OIDC_SHA1_LEN 20 /* * calculates a hash value based on request fingerprint plus a provided nonce string. */ static char* oidc_get_browser_state_hash(request_rec *r, oidc_cfg *c, const char *nonce) { oidc_debug(r, "enter"); /* helper to hold to header values */ const char *value = NULL; /* the hash context */ apr_sha1_ctx_t sha1; /* Initialize the hash context */ apr_sha1_init(&sha1); if (c->state_input_headers & OIDC_STATE_INPUT_HEADERS_X_FORWARDED_FOR) { /* get the X-FORWARDED-FOR header value */ value = oidc_util_hdr_in_x_forwarded_for_get(r); /* if we have a value for this header, concat it to the hash input */ if (value != NULL) apr_sha1_update(&sha1, value, strlen(value)); } if (c->state_input_headers & OIDC_STATE_INPUT_HEADERS_USER_AGENT) { /* get the USER-AGENT header value */ value = oidc_util_hdr_in_user_agent_get(r); /* if we have a value for this header, concat it to the hash input */ if (value != NULL) apr_sha1_update(&sha1, value, strlen(value)); } /* get the remote client IP address or host name */ /* int remotehost_is_ip; value = ap_get_remote_host(r->connection, r->per_dir_config, REMOTE_NOLOOKUP, &remotehost_is_ip); apr_sha1_update(&sha1, value, strlen(value)); */ /* concat the nonce parameter to the hash input */ apr_sha1_update(&sha1, nonce, strlen(nonce)); /* concat the token binding ID if present */ value = oidc_util_get_provided_token_binding_id(r); if (value != NULL) { oidc_debug(r, "Provided Token Binding ID environment variable found; adding its value to the state"); apr_sha1_update(&sha1, value, strlen(value)); } /* finalize the hash input and calculate the resulting hash output */ unsigned char hash[OIDC_SHA1_LEN]; apr_sha1_final(hash, &sha1); /* base64url-encode the resulting hash and return it */ char *result = NULL; oidc_base64url_encode(r, &result, (const char*) hash, OIDC_SHA1_LEN, TRUE); return result; } /* * return the name for the state cookie */ static char* oidc_get_state_cookie_name(request_rec *r, const char *state) { return apr_psprintf(r->pool, "%s%s", oidc_cfg_dir_state_cookie_prefix(r), state); } /* * return the static provider configuration, i.e. from a metadata URL or configuration primitives */ static apr_byte_t oidc_provider_static_config(request_rec *r, oidc_cfg *c, oidc_provider_t **provider) { json_t *j_provider = NULL; char *s_json = NULL; /* see if we should configure a static provider based on external (cached) metadata */ if ((c->metadata_dir != NULL) || (c->provider.metadata_url == NULL)) { *provider = &c->provider; return TRUE; } oidc_cache_get_provider(r, c->provider.metadata_url, &s_json); if (s_json == NULL) { if (oidc_metadata_provider_retrieve(r, c, NULL, c->provider.metadata_url, &j_provider, &s_json) == FALSE) { oidc_error(r, "could not retrieve metadata from url: %s", c->provider.metadata_url); return FALSE; } oidc_cache_set_provider(r, c->provider.metadata_url, s_json, apr_time_now() + (c->provider_metadata_refresh_interval <= 0 ? apr_time_from_sec( OIDC_CACHE_PROVIDER_METADATA_EXPIRY_DEFAULT) : c->provider_metadata_refresh_interval)); } else { oidc_util_decode_json_object(r, s_json, &j_provider); /* check to see if it is valid metadata */ if (oidc_metadata_provider_is_valid(r, c, j_provider, NULL) == FALSE) { oidc_error(r, "cache corruption detected: invalid metadata from url: %s", c->provider.metadata_url); return FALSE; } } *provider = apr_pcalloc(r->pool, sizeof(oidc_provider_t)); memcpy(*provider, &c->provider, sizeof(oidc_provider_t)); if (oidc_metadata_provider_parse(r, c, j_provider, *provider) == FALSE) { oidc_error(r, "could not parse metadata from url: %s", c->provider.metadata_url); if (j_provider) json_decref(j_provider); return FALSE; } json_decref(j_provider); return TRUE; } /* * return the oidc_provider_t struct for the specified issuer */ static oidc_provider_t* oidc_get_provider_for_issuer(request_rec *r, oidc_cfg *c, const char *issuer, apr_byte_t allow_discovery) { /* by default we'll assume that we're dealing with a single statically configured OP */ oidc_provider_t *provider = NULL; if (oidc_provider_static_config(r, c, &provider) == FALSE) return NULL; /* unless a metadata directory was configured, so we'll try and get the provider settings from there */ if (c->metadata_dir != NULL) { /* try and get metadata from the metadata directory for the OP that sent this response */ if ((oidc_metadata_get(r, c, issuer, &provider, allow_discovery) == FALSE) || (provider == NULL)) { /* don't know nothing about this OP/issuer */ oidc_error(r, "no provider metadata found for issuer \"%s\"", issuer); return NULL; } } return provider; } /* * find out whether the request is a response from an IDP discovery page */ static apr_byte_t oidc_is_discovery_response(request_rec *r, oidc_cfg *cfg) { /* * prereq: this is a call to the configured redirect_uri, now see if: * the OIDC_DISC_OP_PARAM is present */ return oidc_util_request_has_parameter(r, OIDC_DISC_OP_PARAM) || oidc_util_request_has_parameter(r, OIDC_DISC_USER_PARAM); } /* * return the HTTP method being called: only for POST data persistence purposes */ static const char* oidc_original_request_method(request_rec *r, oidc_cfg *cfg, apr_byte_t handle_discovery_response) { const char *method = OIDC_METHOD_GET; char *m = NULL; if ((handle_discovery_response == TRUE) && (oidc_util_request_matches_url(r, oidc_get_redirect_uri(r, cfg))) && (oidc_is_discovery_response(r, cfg))) { oidc_util_get_request_parameter(r, OIDC_DISC_RM_PARAM, &m); if (m != NULL) method = apr_pstrdup(r->pool, m); } else { /* * if POST preserve is not enabled for this location, there's no point in preserving * the method either which would result in POSTing empty data on return; * so we revert to legacy behavior */ if (oidc_cfg_dir_preserve_post(r) == 0) return OIDC_METHOD_GET; const char *content_type = oidc_util_hdr_in_content_type_get(r); if ((r->method_number == M_POST) && (apr_strnatcmp(content_type, OIDC_CONTENT_TYPE_FORM_ENCODED) == 0)) method = OIDC_METHOD_FORM_POST; } oidc_debug(r, "return: %s", method); return method; } /* * send an OpenID Connect authorization request to the specified provider preserving POST parameters using HTML5 storage */ apr_byte_t oidc_post_preserve_javascript(request_rec *r, const char *location, char **javascript, char **javascript_method) { if (oidc_cfg_dir_preserve_post(r) == 0) return FALSE; oidc_debug(r, "enter"); oidc_cfg *cfg = ap_get_module_config(r->server->module_config, &auth_openidc_module); const char *method = oidc_original_request_method(r, cfg, FALSE); if (apr_strnatcmp(method, OIDC_METHOD_FORM_POST) != 0) return FALSE; /* read the parameters that are POST-ed to us */ apr_table_t *params = apr_table_make(r->pool, 8); if (oidc_util_read_post_params(r, params, FALSE, NULL) == FALSE) { oidc_error(r, "something went wrong when reading the POST parameters"); return FALSE; } const apr_array_header_t *arr = apr_table_elts(params); const apr_table_entry_t *elts = (const apr_table_entry_t*) arr->elts; int i; char *json = ""; for (i = 0; i < arr->nelts; i++) { json = apr_psprintf(r->pool, "%s'%s': '%s'%s", json, oidc_util_escape_string(r, elts[i].key), oidc_util_escape_string(r, elts[i].val), i < arr->nelts - 1 ? "," : ""); } json = apr_psprintf(r->pool, "{ %s }", json); const char *jmethod = "preserveOnLoad"; const char *jscript = apr_psprintf(r->pool, " <script type=\"text/javascript\">\n" " function %s() {\n" " sessionStorage.setItem('mod_auth_openidc_preserve_post_params', JSON.stringify(%s));\n" " %s" " }\n" " </script>\n", jmethod, json, location ? apr_psprintf(r->pool, "window.location='%s';\n", location) : ""); if (location == NULL) { if (javascript_method) *javascript_method = apr_pstrdup(r->pool, jmethod); if (javascript) *javascript = apr_pstrdup(r->pool, jscript); } else { oidc_util_html_send(r, "Preserving...", jscript, jmethod, "<p>Preserving...</p>", OK); } return TRUE; } /* * restore POST parameters on original_url from HTML5 local storage */ static int oidc_request_post_preserved_restore(request_rec *r, const char *original_url) { oidc_debug(r, "enter: original_url=%s", original_url); const char *method = "postOnLoad"; const char *script = apr_psprintf(r->pool, " <script type=\"text/javascript\">\n" " function str_decode(string) {\n" " try {\n" " result = decodeURIComponent(string);\n" " } catch (e) {\n" " result = unescape(string);\n" " }\n" " return result;\n" " }\n" " function %s() {\n" " var mod_auth_openidc_preserve_post_params = JSON.parse(sessionStorage.getItem('mod_auth_openidc_preserve_post_params'));\n" " sessionStorage.removeItem('mod_auth_openidc_preserve_post_params');\n" " for (var key in mod_auth_openidc_preserve_post_params) {\n" " var input = document.createElement(\"input\");\n" " input.name = str_decode(key);\n" " input.value = str_decode(mod_auth_openidc_preserve_post_params[key]);\n" " input.type = \"hidden\";\n" " document.forms[0].appendChild(input);\n" " }\n" " document.forms[0].action = \"%s\";\n" " document.forms[0].submit();\n" " }\n" " </script>\n", method, original_url); const char *body = " <p>Restoring...</p>\n" " <form method=\"post\"></form>\n"; return oidc_util_html_send(r, "Restoring...", script, method, body, OK); } typedef struct oidc_state_cookies_t { char *name; apr_time_t timestamp; struct oidc_state_cookies_t *next; } oidc_state_cookies_t; static int oidc_delete_oldest_state_cookies(request_rec *r, int number_of_valid_state_cookies, int max_number_of_state_cookies, oidc_state_cookies_t *first) { oidc_state_cookies_t *cur = NULL, *prev = NULL, *prev_oldest = NULL, *oldest = NULL; while (number_of_valid_state_cookies >= max_number_of_state_cookies) { oldest = first; prev_oldest = NULL; prev = first; cur = first->next; while (cur) { if ((cur->timestamp < oldest->timestamp)) { oldest = cur; prev_oldest = prev; } prev = cur; cur = cur->next; } oidc_warn(r, "deleting oldest state cookie: %s (time until expiry %" APR_TIME_T_FMT " seconds)", oldest->name, apr_time_sec(oldest->timestamp - apr_time_now())); oidc_util_set_cookie(r, oldest->name, "", 0, OIDC_COOKIE_EXT_SAME_SITE_NONE(r)); if (prev_oldest) prev_oldest->next = oldest->next; else first = first->next; number_of_valid_state_cookies--; } return number_of_valid_state_cookies; } /* * clean state cookies that have expired i.e. for outstanding requests that will never return * successfully and return the number of remaining valid cookies/outstanding-requests while * doing so */ static int oidc_clean_expired_state_cookies(request_rec *r, oidc_cfg *c, const char *currentCookieName, int delete_oldest) { int number_of_valid_state_cookies = 0; oidc_state_cookies_t *first = NULL, *last = NULL; char *cookie, *tokenizerCtx = NULL; char *cookies = apr_pstrdup(r->pool, oidc_util_hdr_in_cookie_get(r)); if (cookies != NULL) { cookie = apr_strtok(cookies, OIDC_STR_SEMI_COLON, &tokenizerCtx); while (cookie != NULL) { while (*cookie == OIDC_CHAR_SPACE) cookie++; if (strstr(cookie, oidc_cfg_dir_state_cookie_prefix(r)) == cookie) { char *cookieName = cookie; while (cookie != NULL && *cookie != OIDC_CHAR_EQUAL) cookie++; if (*cookie == OIDC_CHAR_EQUAL) { *cookie = '\0'; cookie++; if ((currentCookieName == NULL) || (apr_strnatcmp(cookieName, currentCookieName) != 0)) { oidc_proto_state_t *proto_state = oidc_proto_state_from_cookie(r, c, cookie); if (proto_state != NULL) { json_int_t ts = oidc_proto_state_get_timestamp( proto_state); if (apr_time_now() > ts + apr_time_from_sec(c->state_timeout)) { oidc_warn(r, "state (%s) has expired (original_url=%s)", cookieName, oidc_proto_state_get_original_url( proto_state)); oidc_util_set_cookie(r, cookieName, "", 0, OIDC_COOKIE_EXT_SAME_SITE_NONE(r)); } else { if (first == NULL) { first = apr_pcalloc(r->pool, sizeof(oidc_state_cookies_t)); last = first; } else { last->next = apr_pcalloc(r->pool, sizeof(oidc_state_cookies_t)); last = last->next; } last->name = cookieName; last->timestamp = ts; last->next = NULL; number_of_valid_state_cookies++; } oidc_proto_state_destroy(proto_state); } else { oidc_warn(r, "state cookie could not be retrieved/decoded, deleting: %s", cookieName); oidc_util_set_cookie(r, cookieName, "", 0, OIDC_COOKIE_EXT_SAME_SITE_NONE(r)); } } } } cookie = apr_strtok(NULL, OIDC_STR_SEMI_COLON, &tokenizerCtx); } } if (delete_oldest > 0) number_of_valid_state_cookies = oidc_delete_oldest_state_cookies(r, number_of_valid_state_cookies, c->max_number_of_state_cookies, first); return number_of_valid_state_cookies; } /* * restore the state that was maintained between authorization request and response in an encrypted cookie */ static apr_byte_t oidc_restore_proto_state(request_rec *r, oidc_cfg *c, const char *state, oidc_proto_state_t **proto_state) { oidc_debug(r, "enter"); const char *cookieName = oidc_get_state_cookie_name(r, state); /* clean expired state cookies to avoid pollution */ oidc_clean_expired_state_cookies(r, c, cookieName, FALSE); /* get the state cookie value first */ char *cookieValue = oidc_util_get_cookie(r, cookieName); if (cookieValue == NULL) { oidc_error(r, "no \"%s\" state cookie found: check domain and samesite cookie settings", cookieName); return FALSE; } /* clear state cookie because we don't need it anymore */ oidc_util_set_cookie(r, cookieName, "", 0, OIDC_COOKIE_EXT_SAME_SITE_NONE(r)); *proto_state = oidc_proto_state_from_cookie(r, c, cookieValue); if (*proto_state == NULL) return FALSE; const char *nonce = oidc_proto_state_get_nonce(*proto_state); /* calculate the hash of the browser fingerprint concatenated with the nonce */ char *calc = oidc_get_browser_state_hash(r, c, nonce); /* compare the calculated hash with the value provided in the authorization response */ if (apr_strnatcmp(calc, state) != 0) { oidc_error(r, "calculated state from cookie does not match state parameter passed back in URL: \"%s\" != \"%s\"", state, calc); oidc_proto_state_destroy(*proto_state); return FALSE; } apr_time_t ts = oidc_proto_state_get_timestamp(*proto_state); /* check that the timestamp is not beyond the valid interval */ if (apr_time_now() > ts + apr_time_from_sec(c->state_timeout)) { oidc_error(r, "state has expired"); if ((c->default_sso_url == NULL) || (apr_table_get(r->subprocess_env, "OIDC_NO_DEFAULT_URL_ON_STATE_TIMEOUT") != NULL)) { oidc_util_html_send_error(r, c->error_template, "Invalid Authentication Response", apr_psprintf(r->pool, "This is due to a timeout; please restart your authentication session by re-entering the URL/bookmark you originally wanted to access: %s", oidc_proto_state_get_original_url(*proto_state)), OK); /* * a hack for Apache 2.4 to prevent it from writing its own 500/400/302 HTML document * text by making ap_send_error_response in http_protocol.c return early... */ r->header_only = 1; } oidc_proto_state_destroy(*proto_state); return FALSE; } /* add the state */ oidc_proto_state_set_state(*proto_state, state); /* log the restored state object */ oidc_debug(r, "restored state: %s", oidc_proto_state_to_string(r, *proto_state)); /* we've made it */ return TRUE; } /* * set the state that is maintained between an authorization request and an authorization response * in a cookie in the browser that is cryptographically bound to that state */ static int oidc_authorization_request_set_cookie(request_rec *r, oidc_cfg *c, const char *state, oidc_proto_state_t *proto_state) { /* * create a cookie consisting of 8 elements: * random value, original URL, original method, issuer, response_type, response_mod, prompt and timestamp * encoded as JSON, encrypting the resulting JSON value */ char *cookieValue = oidc_proto_state_to_cookie(r, c, proto_state); if (cookieValue == NULL) return HTTP_INTERNAL_SERVER_ERROR; /* * clean expired state cookies to avoid pollution and optionally * try to avoid the number of state cookies exceeding a max */ int number_of_cookies = oidc_clean_expired_state_cookies(r, c, NULL, oidc_cfg_delete_oldest_state_cookies(c)); int max_number_of_cookies = oidc_cfg_max_number_of_state_cookies(c); if ((max_number_of_cookies > 0) && (number_of_cookies >= max_number_of_cookies)) { oidc_warn(r, "the number of existing, valid state cookies (%d) has exceeded the limit (%d), no additional authorization request + state cookie can be generated, aborting the request", number_of_cookies, max_number_of_cookies); /* * TODO: the html_send code below caters for the case that there's a user behind a * browser generating this request, rather than a piece of XHR code; how would an * XHR client handle this? */ /* * it appears that sending content with a 503 turns the HTTP status code * into a 200 so we'll avoid that for now: the user will see Apache specific * readable text anyway * return oidc_util_html_send_error(r, c->error_template, "Too Many Outstanding Requests", apr_psprintf(r->pool, "No authentication request could be generated since there are too many outstanding authentication requests already; you may have to wait up to %d seconds to be able to create a new request", c->state_timeout), HTTP_SERVICE_UNAVAILABLE); */ return HTTP_SERVICE_UNAVAILABLE; } /* assemble the cookie name for the state cookie */ const char *cookieName = oidc_get_state_cookie_name(r, state); /* set it as a cookie */ oidc_util_set_cookie(r, cookieName, cookieValue, -1, OIDC_COOKIE_SAMESITE_LAX(c, r)); return OK; } /* * get the mod_auth_openidc related context from the (userdata in the) request * (used for passing state between various Apache request processing stages and hook callbacks) */ static apr_table_t* oidc_request_state(request_rec *rr) { /* our state is always stored in the main request */ request_rec *r = (rr->main != NULL) ? rr->main : rr; /* our state is a table, get it */ apr_table_t *state = NULL; apr_pool_userdata_get((void**) &state, OIDC_USERDATA_KEY, r->pool); /* if it does not exist, we'll create a new table */ if (state == NULL) { state = apr_table_make(r->pool, 5); apr_pool_userdata_set(state, OIDC_USERDATA_KEY, NULL, r->pool); } /* return the resulting table, always non-null now */ return state; } /* * set a name/value pair in the mod_auth_openidc-specific request context * (used for passing state between various Apache request processing stages and hook callbacks) */ void oidc_request_state_set(request_rec *r, const char *key, const char *value) { /* get a handle to the global state, which is a table */ apr_table_t *state = oidc_request_state(r); /* put the name/value pair in that table */ apr_table_set(state, key, value); } /* * get a name/value pair from the mod_auth_openidc-specific request context * (used for passing state between various Apache request processing stages and hook callbacks) */ const char* oidc_request_state_get(request_rec *r, const char *key) { /* get a handle to the global state, which is a table */ apr_table_t *state = oidc_request_state(r); /* return the value from the table */ return apr_table_get(state, key); } /* * set the claims from a JSON object (c.q. id_token or user_info response) stored * in the session in to HTTP headers passed on to the application */ static apr_byte_t oidc_set_app_claims(request_rec *r, const oidc_cfg *const cfg, oidc_session_t *session, const char *s_claims) { json_t *j_claims = NULL; /* decode the string-encoded attributes in to a JSON structure */ if (s_claims != NULL) { if (oidc_util_decode_json_object(r, s_claims, &j_claims) == FALSE) return FALSE; } /* set the resolved claims a HTTP headers for the application */ if (j_claims != NULL) { oidc_util_set_app_infos(r, j_claims, oidc_cfg_claim_prefix(r), cfg->claim_delimiter, oidc_cfg_dir_pass_info_in_headers(r), oidc_cfg_dir_pass_info_in_envvars(r), oidc_cfg_dir_pass_info_base64url(r)); /* release resources */ json_decref(j_claims); } return TRUE; } static int oidc_authenticate_user(request_rec *r, oidc_cfg *c, oidc_provider_t *provider, const char *original_url, const char *login_hint, const char *id_token_hint, const char *prompt, const char *auth_request_params, const char *path_scope); /* * log message about max session duration */ static void oidc_log_session_expires(request_rec *r, const char *msg, apr_time_t session_expires) { char buf[APR_RFC822_DATE_LEN + 1]; apr_rfc822_date(buf, session_expires); oidc_debug(r, "%s: %s (in %" APR_TIME_T_FMT " secs from now)", msg, buf, apr_time_sec(session_expires - apr_time_now())); } /* * see if this is a non-browser request */ static apr_byte_t oidc_is_xml_http_request(request_rec *r) { if ((oidc_util_hdr_in_x_requested_with_get(r) != NULL) && (apr_strnatcasecmp(oidc_util_hdr_in_x_requested_with_get(r), OIDC_HTTP_HDR_VAL_XML_HTTP_REQUEST) == 0)) return TRUE; if ((oidc_util_hdr_in_accept_contains(r, OIDC_CONTENT_TYPE_TEXT_HTML) == FALSE) && (oidc_util_hdr_in_accept_contains(r, OIDC_CONTENT_TYPE_APP_XHTML_XML) == FALSE) && (oidc_util_hdr_in_accept_contains(r, OIDC_CONTENT_TYPE_ANY) == FALSE)) return TRUE; return FALSE; } /* * find out which action we need to take when encountering an unauthenticated request */ static int oidc_handle_unauthenticated_user(request_rec *r, oidc_cfg *c) { /* see if we've configured OIDCUnAuthAction for this path */ switch (oidc_dir_cfg_unauth_action(r)) { case OIDC_UNAUTH_RETURN410: return HTTP_GONE; case OIDC_UNAUTH_RETURN407: return HTTP_PROXY_AUTHENTICATION_REQUIRED; case OIDC_UNAUTH_RETURN401: return HTTP_UNAUTHORIZED; case OIDC_UNAUTH_PASS: r->user = ""; /* * we're not going to pass information about an authenticated user to the application, * but we do need to scrub the headers that mod_auth_openidc would set for security reasons */ oidc_scrub_headers(r); return OK; case OIDC_UNAUTH_AUTHENTICATE: /* * exception handling: if this looks like a XMLHttpRequest call we * won't redirect the user and thus avoid creating a state cookie * for a non-browser (= Javascript) call that will never return from the OP */ if ((oidc_dir_cfg_unauth_expr_is_set(r) == FALSE) && (oidc_is_xml_http_request(r) == TRUE)) return HTTP_UNAUTHORIZED; } /* * else: no session (regardless of whether it is main or sub-request), * and we need to authenticate the user */ return oidc_authenticate_user(r, c, NULL, oidc_get_current_url(r), NULL, NULL, NULL, oidc_dir_cfg_path_auth_request_params(r), oidc_dir_cfg_path_scope(r)); } /* * check if maximum session duration was exceeded */ static int oidc_check_max_session_duration(request_rec *r, oidc_cfg *cfg, oidc_session_t *session) { /* get the session expiry from the session data */ apr_time_t session_expires = oidc_session_get_session_expires(r, session); /* check the expire timestamp against the current time */ if (apr_time_now() > session_expires) { oidc_warn(r, "maximum session duration exceeded for user: %s", session->remote_user); oidc_session_kill(r, session); return oidc_handle_unauthenticated_user(r, cfg); } /* log message about max session duration */ oidc_log_session_expires(r, "session max lifetime", session_expires); return OK; } /* * validate received session cookie against the domain it was issued for: * * this handles the case where the cache configured is a the same single memcache, Redis, or file * backend for different (virtual) hosts, or a client-side cookie protected with the same secret * * it also handles the case that a cookie is unexpectedly shared across multiple hosts in * name-based virtual hosting even though the OP(s) would be the same */ static apr_byte_t oidc_check_cookie_domain(request_rec *r, oidc_cfg *cfg, oidc_session_t *session) { const char *c_cookie_domain = cfg->cookie_domain ? cfg->cookie_domain : oidc_get_current_url_host(r); const char *s_cookie_domain = oidc_session_get_cookie_domain(r, session); if ((s_cookie_domain == NULL) || (apr_strnatcmp(c_cookie_domain, s_cookie_domain) != 0)) { oidc_warn(r, "aborting: detected attempt to play cookie against a different domain/host than issued for! (issued=%s, current=%s)", s_cookie_domain, c_cookie_domain); return FALSE; } return TRUE; } /* * get a handle to the provider configuration via the "issuer" stored in the session */ apr_byte_t oidc_get_provider_from_session(request_rec *r, oidc_cfg *c, oidc_session_t *session, oidc_provider_t **provider) { oidc_debug(r, "enter"); /* get the issuer value from the session state */ const char *issuer = oidc_session_get_issuer(r, session); if (issuer == NULL) { oidc_warn(r, "empty or invalid session: no issuer found"); return FALSE; } /* get the provider info associated with the issuer value */ oidc_provider_t *p = oidc_get_provider_for_issuer(r, c, issuer, FALSE); if (p == NULL) { oidc_error(r, "session corrupted: no provider found for issuer: %s", issuer); return FALSE; } *provider = p; return TRUE; } /* * store claims resolved from the userinfo endpoint in the session */ static void oidc_store_userinfo_claims(request_rec *r, oidc_cfg *c, oidc_session_t *session, oidc_provider_t *provider, const char *claims, const char *userinfo_jwt) { oidc_debug(r, "enter"); /* see if we've resolved any claims */ if (claims != NULL) { /* * Successfully decoded a set claims from the response so we can store them * (well actually the stringified representation in the response) * in the session context safely now */ oidc_session_set_userinfo_claims(r, session, claims); if (c->session_type != OIDC_SESSION_TYPE_CLIENT_COOKIE) { /* this will also clear the entry if a JWT was not returned at this point */ oidc_session_set_userinfo_jwt(r, session, userinfo_jwt); } } else { /* * clear the existing claims because we could not refresh them */ oidc_session_set_userinfo_claims(r, session, NULL); oidc_session_set_userinfo_jwt(r, session, NULL); } /* store the last refresh time if we've configured a userinfo refresh interval */ if (provider->userinfo_refresh_interval > 0) oidc_session_reset_userinfo_last_refresh(r, session); } /* * execute refresh token grant to refresh the existing access token */ static apr_byte_t oidc_refresh_access_token(request_rec *r, oidc_cfg *c, oidc_session_t *session, oidc_provider_t *provider, char **new_access_token) { oidc_debug(r, "enter"); /* get the refresh token that was stored in the session */ const char *refresh_token = oidc_session_get_refresh_token(r, session); if (refresh_token == NULL) { oidc_warn(r, "refresh token routine called but no refresh_token found in the session"); return FALSE; } /* elements returned in the refresh response */ char *s_id_token = NULL; int expires_in = -1; char *s_token_type = NULL; char *s_access_token = NULL; char *s_refresh_token = NULL; /* refresh the tokens by calling the token endpoint */ if (oidc_proto_refresh_request(r, c, provider, refresh_token, &s_id_token, &s_access_token, &s_token_type, &expires_in, &s_refresh_token) == FALSE) { oidc_error(r, "access_token could not be refreshed"); return FALSE; } /* store the new access_token in the session and discard the old one */ oidc_session_set_access_token(r, session, s_access_token); oidc_session_set_access_token_expires(r, session, expires_in); /* reset the access token refresh timestamp */ oidc_session_reset_access_token_last_refresh(r, session); /* see if we need to return it as a parameter */ if (new_access_token != NULL) *new_access_token = s_access_token; /* if we have a new refresh token (rolling refresh), store it in the session and overwrite the old one */ if (s_refresh_token != NULL) oidc_session_set_refresh_token(r, session, s_refresh_token); return TRUE; } /* * retrieve claims from the userinfo endpoint and return the stringified response */ static const char* oidc_retrieve_claims_from_userinfo_endpoint(request_rec *r, oidc_cfg *c, oidc_provider_t *provider, const char *access_token, oidc_session_t *session, char *id_token_sub, char **userinfo_jwt) { oidc_debug(r, "enter"); char *result = NULL; char *refreshed_access_token = NULL; /* see if a userinfo endpoint is set, otherwise there's nothing to do for us */ if (provider->userinfo_endpoint_url == NULL) { oidc_debug(r, "not retrieving userinfo claims because userinfo_endpoint is not set"); return NULL; } /* see if there's an access token, otherwise we can't call the userinfo endpoint at all */ if (access_token == NULL) { oidc_debug(r, "not retrieving userinfo claims because access_token is not provided"); return NULL; } if ((id_token_sub == NULL) && (session != NULL)) { // when refreshing claims from the userinfo endpoint json_t *id_token_claims = oidc_session_get_idtoken_claims_json(r, session); if (id_token_claims == NULL) { oidc_error(r, "no id_token_claims found in session"); return NULL; } oidc_jose_get_string(r->pool, id_token_claims, OIDC_CLAIM_SUB, FALSE, &id_token_sub, NULL); } // TODO: return code should indicate whether the token expired or some other error occurred // TODO: long-term: session storage should be JSON (with explicit types and less conversion, using standard routines) /* try to get claims from the userinfo endpoint using the provided access token */ if (oidc_proto_resolve_userinfo(r, c, provider, id_token_sub, access_token, &result, userinfo_jwt) == FALSE) { /* see if we have an existing session and we are refreshing the user info claims */ if (session != NULL) { /* first call to user info endpoint failed, but the access token may have just expired, so refresh it */ if (oidc_refresh_access_token(r, c, session, provider, &refreshed_access_token) == TRUE) { /* try again with the new access token */ if (oidc_proto_resolve_userinfo(r, c, provider, id_token_sub, refreshed_access_token, &result, userinfo_jwt) == FALSE) { oidc_error(r, "resolving user info claims with the refreshed access token failed, nothing will be stored in the session"); result = NULL; } } else { oidc_warn(r, "refreshing access token failed, claims will not be retrieved/refreshed from the userinfo endpoint"); result = NULL; } } else { oidc_error(r, "resolving user info claims with the existing/provided access token failed, nothing will be stored in the session"); result = NULL; } } return result; } /* * get (new) claims from the userinfo endpoint */ static apr_byte_t oidc_refresh_claims_from_userinfo_endpoint(request_rec *r, oidc_cfg *cfg, oidc_session_t *session) { oidc_provider_t *provider = NULL; const char *claims = NULL; const char *access_token = NULL; char *userinfo_jwt = NULL; /* get the current provider info */ if (oidc_get_provider_from_session(r, cfg, session, &provider) == FALSE) return FALSE; /* see if we can do anything here, i.e. we have a userinfo endpoint and a refresh interval is configured */ apr_time_t interval = apr_time_from_sec( provider->userinfo_refresh_interval); oidc_debug(r, "userinfo_endpoint=%s, interval=%d", provider->userinfo_endpoint_url, provider->userinfo_refresh_interval); if ((provider->userinfo_endpoint_url != NULL) && (interval > 0)) { /* get the last refresh timestamp from the session info */ apr_time_t last_refresh = oidc_session_get_userinfo_last_refresh(r, session); oidc_debug(r, "refresh needed in: %" APR_TIME_T_FMT " seconds", apr_time_sec(last_refresh + interval - apr_time_now())); /* see if we need to refresh again */ if (last_refresh + interval < apr_time_now()) { /* get the current access token */ access_token = oidc_session_get_access_token(r, session); /* retrieve the current claims */ claims = oidc_retrieve_claims_from_userinfo_endpoint(r, cfg, provider, access_token, session, NULL, &userinfo_jwt); /* store claims resolved from userinfo endpoint */ oidc_store_userinfo_claims(r, cfg, session, provider, claims, userinfo_jwt); /* indicated something changed */ return TRUE; } } return FALSE; } /* * copy the claims and id_token from the session to the request state and optionally return them */ static void oidc_copy_tokens_to_request_state(request_rec *r, oidc_session_t *session, const char **s_id_token, const char **s_claims) { const char *id_token = oidc_session_get_idtoken_claims(r, session); const char *claims = oidc_session_get_userinfo_claims(r, session); oidc_debug(r, "id_token=%s claims=%s", id_token, claims); if (id_token != NULL) { oidc_request_state_set(r, OIDC_REQUEST_STATE_KEY_IDTOKEN, id_token); if (s_id_token != NULL) *s_id_token = id_token; } if (claims != NULL) { oidc_request_state_set(r, OIDC_REQUEST_STATE_KEY_CLAIMS, claims); if (s_claims != NULL) *s_claims = claims; } } /* * pass refresh_token, access_token and access_token_expires as headers/environment variables to the application */ static apr_byte_t oidc_session_pass_tokens(request_rec *r, oidc_cfg *cfg, oidc_session_t *session, apr_byte_t *needs_save) { apr_byte_t pass_headers = oidc_cfg_dir_pass_info_in_headers(r); apr_byte_t pass_envvars = oidc_cfg_dir_pass_info_in_envvars(r); apr_byte_t pass_base64url = oidc_cfg_dir_pass_info_base64url(r); /* set the refresh_token in the app headers/variables, if enabled for this location/directory */ const char *refresh_token = oidc_session_get_refresh_token(r, session); if ((oidc_cfg_dir_pass_refresh_token(r) != 0) && (refresh_token != NULL)) { /* pass it to the app in a header or environment variable */ oidc_util_set_app_info(r, OIDC_APP_INFO_REFRESH_TOKEN, refresh_token, OIDC_DEFAULT_HEADER_PREFIX, pass_headers, pass_envvars, pass_base64url); } /* set the access_token in the app headers/variables */ const char *access_token = oidc_session_get_access_token(r, session); if (access_token != NULL) { /* pass it to the app in a header or environment variable */ oidc_util_set_app_info(r, OIDC_APP_INFO_ACCESS_TOKEN, access_token, OIDC_DEFAULT_HEADER_PREFIX, pass_headers, pass_envvars, pass_base64url); } /* set the expiry timestamp in the app headers/variables */ const char *access_token_expires = oidc_session_get_access_token_expires(r, session); if (access_token_expires != NULL) { /* pass it to the app in a header or environment variable */ oidc_util_set_app_info(r, OIDC_APP_INFO_ACCESS_TOKEN_EXP, access_token_expires, OIDC_DEFAULT_HEADER_PREFIX, pass_headers, pass_envvars, pass_base64url); } /* * reset the session inactivity timer * but only do this once per 10% of the inactivity timeout interval (with a max to 60 seconds) * for performance reasons * * now there's a small chance that the session ends 10% (or a minute) earlier than configured/expected * cq. when there's a request after a recent save (so no update) and then no activity happens until * a request comes in just before the session should expire * ("recent" and "just before" refer to 10%-with-a-max-of-60-seconds of the inactivity interval after * the start/last-update and before the expiry of the session respectively) * * this is be deemed acceptable here because of performance gain */ apr_time_t interval = apr_time_from_sec(cfg->session_inactivity_timeout); apr_time_t now = apr_time_now(); apr_time_t slack = interval / 10; if (slack > apr_time_from_sec(60)) slack = apr_time_from_sec(60); if (session->expiry - now < interval - slack) { session->expiry = now + interval; *needs_save = TRUE; } /* log message about session expiry */ oidc_log_session_expires(r, "session inactivity timeout", session->expiry); return TRUE; } static apr_byte_t oidc_refresh_access_token_before_expiry(request_rec *r, oidc_cfg *cfg, oidc_session_t *session, int ttl_minimum, int logout_on_error) { const char *s_access_token_expires = NULL; apr_time_t t_expires = -1; oidc_provider_t *provider = NULL; oidc_debug(r, "ttl_minimum=%d", ttl_minimum); if (ttl_minimum < 0) return FALSE; s_access_token_expires = oidc_session_get_access_token_expires(r, session); if (s_access_token_expires == NULL) { oidc_debug(r, "no access token expires_in stored in the session (i.e. returned from in the authorization response), so cannot refresh the access token based on TTL requirement"); return FALSE; } if (oidc_session_get_refresh_token(r, session) == NULL) { oidc_debug(r, "no refresh token stored in the session, so cannot refresh the access token based on TTL requirement"); return FALSE; } if (sscanf(s_access_token_expires, "%" APR_TIME_T_FMT, &t_expires) != 1) { oidc_error(r, "could not parse s_access_token_expires %s", s_access_token_expires); return FALSE; } t_expires = apr_time_from_sec(t_expires - ttl_minimum); oidc_debug(r, "refresh needed in: %" APR_TIME_T_FMT " seconds", apr_time_sec(t_expires - apr_time_now())); if (t_expires > apr_time_now()) return FALSE; if (oidc_get_provider_from_session(r, cfg, session, &provider) == FALSE) return FALSE; if (oidc_refresh_access_token(r, cfg, session, provider, NULL) == FALSE) { oidc_warn(r, "access_token could not be refreshed, logout=%d", logout_on_error & OIDC_LOGOUT_ON_ERROR_REFRESH); if (logout_on_error & OIDC_LOGOUT_ON_ERROR_REFRESH) return OIDC_REFRESH_ERROR; else return FALSE; } return TRUE; } /* * handle the case where we have identified an existing authentication session for a user */ static int oidc_handle_existing_session(request_rec *r, oidc_cfg *cfg, oidc_session_t *session, apr_byte_t *needs_save) { apr_byte_t rv = FALSE; oidc_debug(r, "enter"); /* set the user in the main request for further (incl. sub-request) processing */ r->user = apr_pstrdup(r->pool, session->remote_user); oidc_debug(r, "set remote_user to \"%s\"", r->user); /* get the header name in which the remote user name needs to be passed */ char *authn_header = oidc_cfg_dir_authn_header(r); apr_byte_t pass_headers = oidc_cfg_dir_pass_info_in_headers(r); apr_byte_t pass_envvars = oidc_cfg_dir_pass_info_in_envvars(r); apr_byte_t pass_base64url = oidc_cfg_dir_pass_info_base64url(r); /* verify current cookie domain against issued cookie domain */ if (oidc_check_cookie_domain(r, cfg, session) == FALSE) return HTTP_UNAUTHORIZED; /* check if the maximum session duration was exceeded */ int rc = oidc_check_max_session_duration(r, cfg, session); if (rc != OK) return rc; /* if needed, refresh the access token */ rv = oidc_refresh_access_token_before_expiry(r, cfg, session, oidc_cfg_dir_refresh_access_token_before_expiry(r), oidc_cfg_dir_logout_on_error_refresh(r)); if (rv == OIDC_REFRESH_ERROR) { *needs_save = FALSE; return oidc_handle_logout_request(r, cfg, session, cfg->default_slo_url); } *needs_save |= rv; /* if needed, refresh claims from the user info endpoint */ if (oidc_refresh_claims_from_userinfo_endpoint(r, cfg, session) == TRUE) *needs_save = TRUE; /* * we're going to pass the information that we have to the application, * but first we need to scrub the headers that we're going to use for security reasons */ oidc_scrub_headers(r); /* set the user authentication HTTP header if set and required */ if ((r->user != NULL) && (authn_header != NULL)) oidc_util_hdr_in_set(r, authn_header, r->user); const char *s_claims = NULL; const char *s_id_token = NULL; /* copy id_token and claims from session to request state and obtain their values */ oidc_copy_tokens_to_request_state(r, session, &s_id_token, &s_claims); if ((cfg->pass_userinfo_as & OIDC_PASS_USERINFO_AS_CLAIMS)) { /* set the userinfo claims in the app headers */ if (oidc_set_app_claims(r, cfg, session, s_claims) == FALSE) return HTTP_INTERNAL_SERVER_ERROR; } if ((cfg->pass_userinfo_as & OIDC_PASS_USERINFO_AS_JSON_OBJECT)) { /* pass the userinfo JSON object to the app in a header or environment variable */ oidc_util_set_app_info(r, OIDC_APP_INFO_USERINFO_JSON, s_claims, OIDC_DEFAULT_HEADER_PREFIX, pass_headers, pass_envvars, pass_base64url); } if ((cfg->pass_userinfo_as & OIDC_PASS_USERINFO_AS_JWT)) { if (cfg->session_type != OIDC_SESSION_TYPE_CLIENT_COOKIE) { /* get the compact serialized JWT from the session */ const char *s_userinfo_jwt = oidc_session_get_userinfo_jwt(r, session); if (s_userinfo_jwt != NULL) { /* pass the compact serialized JWT to the app in a header or environment variable */ oidc_util_set_app_info(r, OIDC_APP_INFO_USERINFO_JWT, s_userinfo_jwt, OIDC_DEFAULT_HEADER_PREFIX, pass_headers, pass_envvars, pass_base64url); } else { oidc_debug(r, "configured to pass userinfo in a JWT, but no such JWT was found in the session (probably no such JWT was returned from the userinfo endpoint)"); } } else { oidc_error(r, "session type \"client-cookie\" does not allow storing/passing a userinfo JWT; use \"" OIDCSessionType " server-cache\" for that"); } } if ((cfg->pass_idtoken_as & OIDC_PASS_IDTOKEN_AS_CLAIMS)) { /* set the id_token in the app headers */ if (oidc_set_app_claims(r, cfg, session, s_id_token) == FALSE) return HTTP_INTERNAL_SERVER_ERROR; } if ((cfg->pass_idtoken_as & OIDC_PASS_IDTOKEN_AS_PAYLOAD)) { /* pass the id_token JSON object to the app in a header or environment variable */ oidc_util_set_app_info(r, OIDC_APP_INFO_ID_TOKEN_PAYLOAD, s_id_token, OIDC_DEFAULT_HEADER_PREFIX, pass_headers, pass_envvars, pass_base64url); } if ((cfg->pass_idtoken_as & OIDC_PASS_IDTOKEN_AS_SERIALIZED)) { if (cfg->session_type != OIDC_SESSION_TYPE_CLIENT_COOKIE) { /* get the compact serialized JWT from the session */ const char *s_id_token = oidc_session_get_idtoken(r, session); /* pass the compact serialized JWT to the app in a header or environment variable */ oidc_util_set_app_info(r, OIDC_APP_INFO_ID_TOKEN, s_id_token, OIDC_DEFAULT_HEADER_PREFIX, pass_headers, pass_envvars, pass_base64url); } else { oidc_error(r, "session type \"client-cookie\" does not allow storing/passing the id_token; use \"" OIDCSessionType " server-cache\" for that"); } } /* pass the at, rt and at expiry to the application, possibly update the session expiry */ if (oidc_session_pass_tokens(r, cfg, session, needs_save) == FALSE) return HTTP_INTERNAL_SERVER_ERROR; /* return "user authenticated" status */ return OK; } /* * helper function for basic/implicit client flows upon receiving an authorization response: * check that it matches the state stored in the browser and return the variables associated * with the state, such as original_url and OP oidc_provider_t pointer. */ static apr_byte_t oidc_authorization_response_match_state(request_rec *r, oidc_cfg *c, const char *state, struct oidc_provider_t **provider, oidc_proto_state_t **proto_state) { oidc_debug(r, "enter (state=%s)", state); if ((state == NULL) || (apr_strnatcmp(state, "") == 0)) { oidc_error(r, "state parameter is not set"); return FALSE; } /* check the state parameter against what we stored in a cookie */ if (oidc_restore_proto_state(r, c, state, proto_state) == FALSE) { oidc_error(r, "unable to restore state"); return FALSE; } *provider = oidc_get_provider_for_issuer(r, c, oidc_proto_state_get_issuer(*proto_state), FALSE); if (*provider == NULL) { oidc_proto_state_destroy(*proto_state); *proto_state = NULL; return FALSE; } return TRUE; } /* * redirect the browser to the session logout endpoint */ static int oidc_session_redirect_parent_window_to_logout(request_rec *r, oidc_cfg *c) { oidc_debug(r, "enter"); char *java_script = apr_psprintf(r->pool, " <script type=\"text/javascript\">\n" " window.top.location.href = '%s?session=logout';\n" " </script>\n", oidc_get_redirect_uri(r, c)); return oidc_util_html_send(r, "Redirecting...", java_script, NULL, NULL, OK); } /* * handle an error returned by the OP */ static int oidc_authorization_response_error(request_rec *r, oidc_cfg *c, oidc_proto_state_t *proto_state, const char *error, const char *error_description) { const char *prompt = oidc_proto_state_get_prompt(proto_state); if (prompt != NULL) prompt = apr_pstrdup(r->pool, prompt); oidc_proto_state_destroy(proto_state); if ((prompt != NULL) && (apr_strnatcmp(prompt, OIDC_PROTO_PROMPT_NONE) == 0)) { return oidc_session_redirect_parent_window_to_logout(r, c); } return oidc_util_html_send_error(r, c->error_template, apr_psprintf(r->pool, "OpenID Connect Provider error: %s", error), error_description, OK); } /* * get the r->user for this request based on the configuration for OIDC/OAuth */ apr_byte_t oidc_get_remote_user(request_rec *r, const char *claim_name, const char *reg_exp, const char *replace, json_t *json, char **request_user) { /* get the claim value from the JSON object */ json_t *username = json_object_get(json, claim_name); if ((username == NULL) || (!json_is_string(username))) { oidc_warn(r, "JSON object did not contain a \"%s\" string", claim_name); return FALSE; } *request_user = apr_pstrdup(r->pool, json_string_value(username)); if (reg_exp != NULL) { char *error_str = NULL; if (replace == NULL) { if (oidc_util_regexp_first_match(r->pool, *request_user, reg_exp, request_user, &error_str) == FALSE) { oidc_error(r, "oidc_util_regexp_first_match failed: %s", error_str); *request_user = NULL; return FALSE; } } else if (oidc_util_regexp_substitute(r->pool, *request_user, reg_exp, replace, request_user, &error_str) == FALSE) { oidc_error(r, "oidc_util_regexp_substitute failed: %s", error_str); *request_user = NULL; return FALSE; } } return TRUE; } /* * set the unique user identifier that will be propagated in the Apache r->user and REMOTE_USER variables */ static apr_byte_t oidc_set_request_user(request_rec *r, oidc_cfg *c, oidc_provider_t *provider, oidc_jwt_t *jwt, const char *s_claims) { char *issuer = provider->issuer; char *claim_name = apr_pstrdup(r->pool, c->remote_user_claim.claim_name); int n = strlen(claim_name); apr_byte_t post_fix_with_issuer = (claim_name[n - 1] == OIDC_CHAR_AT); if (post_fix_with_issuer == TRUE) { claim_name[n - 1] = '\0'; issuer = (strstr(issuer, "https://") == NULL) ? apr_pstrdup(r->pool, issuer) : apr_pstrdup(r->pool, issuer + strlen("https://")); } /* extract the username claim (default: "sub") from the id_token payload or user claims */ apr_byte_t rc = FALSE; char *remote_user = NULL; json_t *claims = NULL; oidc_util_decode_json_object(r, s_claims, &claims); if (claims == NULL) { rc = oidc_get_remote_user(r, claim_name, c->remote_user_claim.reg_exp, c->remote_user_claim.replace, jwt->payload.value.json, &remote_user); } else { oidc_util_json_merge(r, jwt->payload.value.json, claims); rc = oidc_get_remote_user(r, claim_name, c->remote_user_claim.reg_exp, c->remote_user_claim.replace, claims, &remote_user); json_decref(claims); } if ((rc == FALSE) || (remote_user == NULL)) { oidc_error(r, "" OIDCRemoteUserClaim "is set to \"%s\", but could not set the remote user based on the requested claim \"%s\" and the available claims for the user", c->remote_user_claim.claim_name, claim_name); return FALSE; } if (post_fix_with_issuer == TRUE) remote_user = apr_psprintf(r->pool, "%s%s%s", remote_user, OIDC_STR_AT, issuer); r->user = apr_pstrdup(r->pool, remote_user); oidc_debug(r, "set remote_user to \"%s\" based on claim: \"%s\"%s", r->user, c->remote_user_claim.claim_name, c->remote_user_claim.reg_exp ? apr_psprintf(r->pool, " and expression: \"%s\" and replace string: \"%s\"", c->remote_user_claim.reg_exp, c->remote_user_claim.replace) : ""); return TRUE; } static char* oidc_make_sid_iss_unique(request_rec *r, const char *sid, const char *issuer) { return apr_psprintf(r->pool, "%s@%s", sid, issuer); } /* * store resolved information in the session */ static apr_byte_t oidc_save_in_session(request_rec *r, oidc_cfg *c, oidc_session_t *session, oidc_provider_t *provider, const char *remoteUser, const char *id_token, oidc_jwt_t *id_token_jwt, const char *claims, const char *access_token, const int expires_in, const char *refresh_token, const char *session_state, const char *state, const char *original_url, const char *userinfo_jwt) { /* store the user in the session */ session->remote_user = remoteUser; /* set the session expiry to the inactivity timeout */ session->expiry = apr_time_now() + apr_time_from_sec(c->session_inactivity_timeout); /* store the claims payload in the id_token for later reference */ oidc_session_set_idtoken_claims(r, session, id_token_jwt->payload.value.str); if (c->session_type != OIDC_SESSION_TYPE_CLIENT_COOKIE) { /* store the compact serialized representation of the id_token for later reference */ oidc_session_set_idtoken(r, session, id_token); } /* store the issuer in the session (at least needed for session mgmt and token refresh */ oidc_session_set_issuer(r, session, provider->issuer); /* store the state and original URL in the session for handling browser-back more elegantly */ oidc_session_set_request_state(r, session, state); oidc_session_set_original_url(r, session, original_url); if ((session_state != NULL) && (provider->check_session_iframe != NULL)) { /* store the session state and required parameters session management */ oidc_session_set_session_state(r, session, session_state); oidc_debug(r, "session management enabled: stored session_state (%s), check_session_iframe (%s) and client_id (%s) in the session", session_state, provider->check_session_iframe, provider->client_id); } else if (provider->check_session_iframe == NULL) { oidc_debug(r, "session management disabled: \"check_session_iframe\" is not set in provider configuration"); } else { oidc_debug(r, "session management disabled: no \"session_state\" value is provided in the authentication response even though \"check_session_iframe\" (%s) is set in the provider configuration", provider->check_session_iframe); } /* store claims resolved from userinfo endpoint */ oidc_store_userinfo_claims(r, c, session, provider, claims, userinfo_jwt); /* see if we have an access_token */ if (access_token != NULL) { /* store the access_token in the session context */ oidc_session_set_access_token(r, session, access_token); /* store the associated expires_in value */ oidc_session_set_access_token_expires(r, session, expires_in); /* reset the access token refresh timestamp */ oidc_session_reset_access_token_last_refresh(r, session); } /* see if we have a refresh_token */ if (refresh_token != NULL) { /* store the refresh_token in the session context */ oidc_session_set_refresh_token(r, session, refresh_token); } /* store max session duration in the session as a hard cut-off expiry timestamp */ apr_time_t session_expires = (provider->session_max_duration == 0) ? apr_time_from_sec(id_token_jwt->payload.exp) : (apr_time_now() + apr_time_from_sec(provider->session_max_duration)); oidc_session_set_session_expires(r, session, session_expires); oidc_debug(r, "provider->session_max_duration = %d, session_expires=%" APR_TIME_T_FMT, provider->session_max_duration, session_expires); /* log message about max session duration */ oidc_log_session_expires(r, "session max lifetime", session_expires); /* store the domain for which this session is valid */ oidc_session_set_cookie_domain(r, session, c->cookie_domain ? c->cookie_domain : oidc_get_current_url_host(r)); char *sid = NULL; oidc_debug(r, "provider->backchannel_logout_supported=%d", provider->backchannel_logout_supported); if (provider->backchannel_logout_supported > 0) { oidc_jose_get_string(r->pool, id_token_jwt->payload.value.json, OIDC_CLAIM_SID, FALSE, &sid, NULL); if (sid == NULL) sid = id_token_jwt->payload.sub; session->sid = oidc_make_sid_iss_unique(r, sid, provider->issuer); } /* store the session */ return oidc_session_save(r, session, TRUE); } /* * parse the expiry for the access token */ static int oidc_parse_expires_in(request_rec *r, const char *expires_in) { if (expires_in != NULL) { char *ptr = NULL; long number = strtol(expires_in, &ptr, 10); if (number <= 0) { oidc_warn(r, "could not convert \"expires_in\" value (%s) to a number", expires_in); return -1; } return number; } return -1; } /* * handle the different flows (hybrid, implicit, Authorization Code) */ static apr_byte_t oidc_handle_flows(request_rec *r, oidc_cfg *c, oidc_proto_state_t *proto_state, oidc_provider_t *provider, apr_table_t *params, const char *response_mode, oidc_jwt_t **jwt) { apr_byte_t rc = FALSE; const char *requested_response_type = oidc_proto_state_get_response_type( proto_state); /* handle the requested response type/mode */ if (oidc_util_spaced_string_equals(r->pool, requested_response_type, OIDC_PROTO_RESPONSE_TYPE_CODE_IDTOKEN_TOKEN)) { rc = oidc_proto_authorization_response_code_idtoken_token(r, c, proto_state, provider, params, response_mode, jwt); } else if (oidc_util_spaced_string_equals(r->pool, requested_response_type, OIDC_PROTO_RESPONSE_TYPE_CODE_IDTOKEN)) { rc = oidc_proto_authorization_response_code_idtoken(r, c, proto_state, provider, params, response_mode, jwt); } else if (oidc_util_spaced_string_equals(r->pool, requested_response_type, OIDC_PROTO_RESPONSE_TYPE_CODE_TOKEN)) { rc = oidc_proto_handle_authorization_response_code_token(r, c, proto_state, provider, params, response_mode, jwt); } else if (oidc_util_spaced_string_equals(r->pool, requested_response_type, OIDC_PROTO_RESPONSE_TYPE_CODE)) { rc = oidc_proto_handle_authorization_response_code(r, c, proto_state, provider, params, response_mode, jwt); } else if (oidc_util_spaced_string_equals(r->pool, requested_response_type, OIDC_PROTO_RESPONSE_TYPE_IDTOKEN_TOKEN)) { rc = oidc_proto_handle_authorization_response_idtoken_token(r, c, proto_state, provider, params, response_mode, jwt); } else if (oidc_util_spaced_string_equals(r->pool, requested_response_type, OIDC_PROTO_RESPONSE_TYPE_IDTOKEN)) { rc = oidc_proto_handle_authorization_response_idtoken(r, c, proto_state, provider, params, response_mode, jwt); } else { oidc_error(r, "unsupported response type: \"%s\"", requested_response_type); } if ((rc == FALSE) && (*jwt != NULL)) { oidc_jwt_destroy(*jwt); *jwt = NULL; } return rc; } /* handle the browser back on an authorization response */ static apr_byte_t oidc_handle_browser_back(request_rec *r, const char *r_state, oidc_session_t *session) { /* see if we have an existing session and browser-back was used */ const char *s_state = NULL, *o_url = NULL; if (session->remote_user != NULL) { s_state = oidc_session_get_request_state(r, session); o_url = oidc_session_get_original_url(r, session); if ((r_state != NULL) && (s_state != NULL) && (apr_strnatcmp(r_state, s_state) == 0)) { /* log the browser back event detection */ oidc_warn(r, "browser back detected, redirecting to original URL: %s", o_url); /* go back to the URL that he originally tried to access */ oidc_util_hdr_out_location_set(r, o_url); return TRUE; } } return FALSE; } /* * complete the handling of an authorization response by obtaining, parsing and verifying the * id_token and storing the authenticated user state in the session */ static int oidc_handle_authorization_response(request_rec *r, oidc_cfg *c, oidc_session_t *session, apr_table_t *params, const char *response_mode) { oidc_debug(r, "enter, response_mode=%s", response_mode); oidc_provider_t *provider = NULL; oidc_proto_state_t *proto_state = NULL; oidc_jwt_t *jwt = NULL; /* see if this response came from a browser-back event */ if (oidc_handle_browser_back(r, apr_table_get(params, OIDC_PROTO_STATE), session) == TRUE) return HTTP_MOVED_TEMPORARILY; /* match the returned state parameter against the state stored in the browser */ if (oidc_authorization_response_match_state(r, c, apr_table_get(params, OIDC_PROTO_STATE), &provider, &proto_state) == FALSE) { if (c->default_sso_url != NULL) { oidc_warn(r, "invalid authorization response state; a default SSO URL is set, sending the user there: %s", c->default_sso_url); oidc_util_hdr_out_location_set(r, c->default_sso_url); //oidc_util_hdr_err_out_add(r, "Location", c->default_sso_url)); return HTTP_MOVED_TEMPORARILY; } oidc_error(r, "invalid authorization response state and no default SSO URL is set, sending an error..."); // if content was already returned via html/http send then don't return 500 // but send 200 to avoid extraneous internal error document text to be sent return ((r->user) && (strncmp(r->user, "", 1) == 0)) ? OK : HTTP_BAD_REQUEST; } /* see if the response is an error response */ if (apr_table_get(params, OIDC_PROTO_ERROR) != NULL) return oidc_authorization_response_error(r, c, proto_state, apr_table_get(params, OIDC_PROTO_ERROR), apr_table_get(params, OIDC_PROTO_ERROR_DESCRIPTION)); /* handle the code, implicit or hybrid flow */ if (oidc_handle_flows(r, c, proto_state, provider, params, response_mode, &jwt) == FALSE) return oidc_authorization_response_error(r, c, proto_state, "Error in handling response type.", NULL); if (jwt == NULL) { oidc_error(r, "no id_token was provided"); return oidc_authorization_response_error(r, c, proto_state, "No id_token was provided.", NULL); } int expires_in = oidc_parse_expires_in(r, apr_table_get(params, OIDC_PROTO_EXPIRES_IN)); char *userinfo_jwt = NULL; /* * optionally resolve additional claims against the userinfo endpoint * parsed claims are not actually used here but need to be parsed anyway for error checking purposes */ const char *claims = oidc_retrieve_claims_from_userinfo_endpoint(r, c, provider, apr_table_get(params, OIDC_PROTO_ACCESS_TOKEN), NULL, jwt->payload.sub, &userinfo_jwt); /* restore the original protected URL that the user was trying to access */ const char *original_url = oidc_proto_state_get_original_url(proto_state); if (original_url != NULL) original_url = apr_pstrdup(r->pool, original_url); const char *original_method = oidc_proto_state_get_original_method( proto_state); if (original_method != NULL) original_method = apr_pstrdup(r->pool, original_method); const char *prompt = oidc_proto_state_get_prompt(proto_state); /* set the user */ if (oidc_set_request_user(r, c, provider, jwt, claims) == TRUE) { /* session management: if the user in the new response is not equal to the old one, error out */ if ((prompt != NULL) && (apr_strnatcmp(prompt, OIDC_PROTO_PROMPT_NONE) == 0)) { // TOOD: actually need to compare sub? (need to store it in the session separately then //const char *sub = NULL; //oidc_session_get(r, session, "sub", &sub); //if (apr_strnatcmp(sub, jwt->payload.sub) != 0) { if (apr_strnatcmp(session->remote_user, r->user) != 0) { oidc_warn(r, "user set from new id_token is different from current one"); oidc_jwt_destroy(jwt); return oidc_authorization_response_error(r, c, proto_state, "User changed!", NULL); } } /* store resolved information in the session */ if (oidc_save_in_session(r, c, session, provider, r->user, apr_table_get(params, OIDC_PROTO_ID_TOKEN), jwt, claims, apr_table_get(params, OIDC_PROTO_ACCESS_TOKEN), expires_in, apr_table_get(params, OIDC_PROTO_REFRESH_TOKEN), apr_table_get(params, OIDC_PROTO_SESSION_STATE), apr_table_get(params, OIDC_PROTO_STATE), original_url, userinfo_jwt) == FALSE) { oidc_proto_state_destroy(proto_state); oidc_jwt_destroy(jwt); return HTTP_INTERNAL_SERVER_ERROR; } } else { oidc_error(r, "remote user could not be set"); oidc_jwt_destroy(jwt); return oidc_authorization_response_error(r, c, proto_state, "Remote user could not be set: contact the website administrator", NULL); } /* cleanup */ oidc_proto_state_destroy(proto_state); oidc_jwt_destroy(jwt); /* check that we've actually authenticated a user; functions as error handling for oidc_get_remote_user */ if (r->user == NULL) return HTTP_UNAUTHORIZED; /* log the successful response */ oidc_debug(r, "session created and stored, returning to original URL: %s, original method: %s", original_url, original_method); /* check whether form post data was preserved; if so restore it */ if (apr_strnatcmp(original_method, OIDC_METHOD_FORM_POST) == 0) { return oidc_request_post_preserved_restore(r, original_url); } /* now we've authenticated the user so go back to the URL that he originally tried to access */ oidc_util_hdr_out_location_set(r, original_url); /* do the actual redirect to the original URL */ return HTTP_MOVED_TEMPORARILY; } /* * handle an OpenID Connect Authorization Response using the POST (+fragment->POST) response_mode */ static int oidc_handle_post_authorization_response(request_rec *r, oidc_cfg *c, oidc_session_t *session) { oidc_debug(r, "enter"); /* initialize local variables */ char *response_mode = NULL; /* read the parameters that are POST-ed to us */ apr_table_t *params = apr_table_make(r->pool, 8); if (oidc_util_read_post_params(r, params, FALSE, NULL) == FALSE) { oidc_error(r, "something went wrong when reading the POST parameters"); return HTTP_INTERNAL_SERVER_ERROR; } /* see if we've got any POST-ed data at all */ if ((apr_table_elts(params)->nelts < 1) || ((apr_table_elts(params)->nelts == 1) && apr_table_get(params, OIDC_PROTO_RESPONSE_MODE) && (apr_strnatcmp( apr_table_get(params, OIDC_PROTO_RESPONSE_MODE), OIDC_PROTO_RESPONSE_MODE_FRAGMENT) == 0))) { return oidc_util_html_send_error(r, c->error_template, "Invalid Request", "You've hit an OpenID Connect Redirect URI with no parameters, this is an invalid request; you should not open this URL in your browser directly, or have the server administrator use a different " OIDCRedirectURI " setting.", HTTP_INTERNAL_SERVER_ERROR); } /* get the parameters */ response_mode = (char*) apr_table_get(params, OIDC_PROTO_RESPONSE_MODE); /* do the actual implicit work */ return oidc_handle_authorization_response(r, c, session, params, response_mode ? response_mode : OIDC_PROTO_RESPONSE_MODE_FORM_POST); } /* * handle an OpenID Connect Authorization Response using the redirect response_mode */ static int oidc_handle_redirect_authorization_response(request_rec *r, oidc_cfg *c, oidc_session_t *session) { oidc_debug(r, "enter"); /* read the parameters from the query string */ apr_table_t *params = apr_table_make(r->pool, 8); oidc_util_read_form_encoded_params(r, params, r->args); /* do the actual work */ return oidc_handle_authorization_response(r, c, session, params, OIDC_PROTO_RESPONSE_MODE_QUERY); } /* * present the user with an OP selection screen */ static int oidc_discovery(request_rec *r, oidc_cfg *cfg) { oidc_debug(r, "enter"); /* obtain the URL we're currently accessing, to be stored in the state/session */ char *current_url = oidc_get_current_url(r); const char *method = oidc_original_request_method(r, cfg, FALSE); /* generate CSRF token */ char *csrf = NULL; if (oidc_proto_generate_nonce(r, &csrf, 8) == FALSE) return HTTP_INTERNAL_SERVER_ERROR; char *path_scopes = oidc_dir_cfg_path_scope(r); char *path_auth_request_params = oidc_dir_cfg_path_auth_request_params(r); char *discover_url = oidc_cfg_dir_discover_url(r); /* see if there's an external discovery page configured */ if (discover_url != NULL) { /* yes, assemble the parameters for external discovery */ char *url = apr_psprintf(r->pool, "%s%s%s=%s&%s=%s&%s=%s&%s=%s", discover_url, strchr(discover_url, OIDC_CHAR_QUERY) != NULL ? OIDC_STR_AMP : OIDC_STR_QUERY, OIDC_DISC_RT_PARAM, oidc_util_escape_string(r, current_url), OIDC_DISC_RM_PARAM, method, OIDC_DISC_CB_PARAM, oidc_util_escape_string(r, oidc_get_redirect_uri(r, cfg)), OIDC_CSRF_NAME, oidc_util_escape_string(r, csrf)); if (path_scopes != NULL) url = apr_psprintf(r->pool, "%s&%s=%s", url, OIDC_DISC_SC_PARAM, oidc_util_escape_string(r, path_scopes)); if (path_auth_request_params != NULL) url = apr_psprintf(r->pool, "%s&%s=%s", url, OIDC_DISC_AR_PARAM, oidc_util_escape_string(r, path_auth_request_params)); /* log what we're about to do */ oidc_debug(r, "redirecting to external discovery page: %s", url); /* set CSRF cookie */ oidc_util_set_cookie(r, OIDC_CSRF_NAME, csrf, -1, OIDC_COOKIE_SAMESITE_STRICT(cfg, r)); /* see if we need to preserve POST parameters through Javascript/HTML5 storage */ if (oidc_post_preserve_javascript(r, url, NULL, NULL) == TRUE) return OK; /* do the actual redirect to an external discovery page */ oidc_util_hdr_out_location_set(r, url); return HTTP_MOVED_TEMPORARILY; } /* get a list of all providers configured in the metadata directory */ apr_array_header_t *arr = NULL; if (oidc_metadata_list(r, cfg, &arr) == FALSE) return oidc_util_html_send_error(r, cfg->error_template, "Configuration Error", "No configured providers found, contact your administrator", HTTP_UNAUTHORIZED); /* assemble a where-are-you-from IDP discovery HTML page */ const char *s = " <h3>Select your OpenID Connect Identity Provider</h3>\n"; /* list all configured providers in there */ int i; for (i = 0; i < arr->nelts; i++) { const char *issuer = ((const char**) arr->elts)[i]; // TODO: html escape (especially & character) char *href = apr_psprintf(r->pool, "%s?%s=%s&amp;%s=%s&amp;%s=%s&amp;%s=%s", oidc_get_redirect_uri(r, cfg), OIDC_DISC_OP_PARAM, oidc_util_escape_string(r, issuer), OIDC_DISC_RT_PARAM, oidc_util_escape_string(r, current_url), OIDC_DISC_RM_PARAM, method, OIDC_CSRF_NAME, csrf); if (path_scopes != NULL) href = apr_psprintf(r->pool, "%s&amp;%s=%s", href, OIDC_DISC_SC_PARAM, oidc_util_escape_string(r, path_scopes)); if (path_auth_request_params != NULL) href = apr_psprintf(r->pool, "%s&amp;%s=%s", href, OIDC_DISC_AR_PARAM, oidc_util_escape_string(r, path_auth_request_params)); char *display = (strstr(issuer, "https://") == NULL) ? apr_pstrdup(r->pool, issuer) : apr_pstrdup(r->pool, issuer + strlen("https://")); /* strip port number */ //char *p = strstr(display, ":"); //if (p != NULL) *p = '\0'; /* point back to the redirect_uri, where the selection is handled, with an IDP selection and return_to URL */ s = apr_psprintf(r->pool, "%s<p><a href=\"%s\">%s</a></p>\n", s, href, display); } /* add an option to enter an account or issuer name for dynamic OP discovery */ s = apr_psprintf(r->pool, "%s<form method=\"get\" action=\"%s\">\n", s, oidc_get_redirect_uri(r, cfg)); s = apr_psprintf(r->pool, "%s<p><input type=\"hidden\" name=\"%s\" value=\"%s\"><p>\n", s, OIDC_DISC_RT_PARAM, current_url); s = apr_psprintf(r->pool, "%s<p><input type=\"hidden\" name=\"%s\" value=\"%s\"><p>\n", s, OIDC_DISC_RM_PARAM, method); s = apr_psprintf(r->pool, "%s<p><input type=\"hidden\" name=\"%s\" value=\"%s\"><p>\n", s, OIDC_CSRF_NAME, csrf); if (path_scopes != NULL) s = apr_psprintf(r->pool, "%s<p><input type=\"hidden\" name=\"%s\" value=\"%s\"><p>\n", s, OIDC_DISC_SC_PARAM, path_scopes); if (path_auth_request_params != NULL) s = apr_psprintf(r->pool, "%s<p><input type=\"hidden\" name=\"%s\" value=\"%s\"><p>\n", s, OIDC_DISC_AR_PARAM, path_auth_request_params); s = apr_psprintf(r->pool, "%s<p>Or enter your account name (eg. &quot;mike@seed.gluu.org&quot;, or an IDP identifier (eg. &quot;mitreid.org&quot;):</p>\n", s); s = apr_psprintf(r->pool, "%s<p><input type=\"text\" name=\"%s\" value=\"%s\"></p>\n", s, OIDC_DISC_OP_PARAM, ""); s = apr_psprintf(r->pool, "%s<p><input type=\"submit\" value=\"Submit\"></p>\n", s); s = apr_psprintf(r->pool, "%s</form>\n", s); oidc_util_set_cookie(r, OIDC_CSRF_NAME, csrf, -1, OIDC_COOKIE_SAMESITE_STRICT(cfg, r)); char *javascript = NULL, *javascript_method = NULL; char *html_head = "<style type=\"text/css\">body {text-align: center}</style>"; if (oidc_post_preserve_javascript(r, NULL, &javascript, &javascript_method) == TRUE) html_head = apr_psprintf(r->pool, "%s%s", html_head, javascript); /* now send the HTML contents to the user agent */ return oidc_util_html_send(r, "OpenID Connect Provider Discovery", html_head, javascript_method, s, OK); } /* * authenticate the user to the selected OP, if the OP is not selected yet perform discovery first */ static int oidc_authenticate_user(request_rec *r, oidc_cfg *c, oidc_provider_t *provider, const char *original_url, const char *login_hint, const char *id_token_hint, const char *prompt, const char *auth_request_params, const char *path_scope) { oidc_debug(r, "enter"); if (provider == NULL) { // TODO: should we use an explicit redirect to the discovery endpoint (maybe a "discovery" param to the redirect_uri)? if (c->metadata_dir != NULL) return oidc_discovery(r, c); /* we're not using multiple OP's configured in a metadata directory, pick the statically configured OP */ if (oidc_provider_static_config(r, c, &provider) == FALSE) return HTTP_INTERNAL_SERVER_ERROR; } /* generate the random nonce value that correlates requests and responses */ char *nonce = NULL; if (oidc_proto_generate_nonce(r, &nonce, OIDC_PROTO_NONCE_LENGTH) == FALSE) return HTTP_INTERNAL_SERVER_ERROR; char *pkce_state = NULL; char *code_challenge = NULL; if ((oidc_util_spaced_string_contains(r->pool, provider->response_type, OIDC_PROTO_CODE) == TRUE) && (provider->pkce != NULL)) { /* generate the code verifier value that correlates authorization requests and code exchange requests */ if (provider->pkce->state(r, &pkce_state) == FALSE) return HTTP_INTERNAL_SERVER_ERROR; /* generate the PKCE code challenge */ if (provider->pkce->challenge(r, pkce_state, &code_challenge) == FALSE) return HTTP_INTERNAL_SERVER_ERROR; } /* create the state between request/response */ oidc_proto_state_t *proto_state = oidc_proto_state_new(); oidc_proto_state_set_original_url(proto_state, original_url); oidc_proto_state_set_original_method(proto_state, oidc_original_request_method(r, c, TRUE)); oidc_proto_state_set_issuer(proto_state, provider->issuer); oidc_proto_state_set_response_type(proto_state, provider->response_type); oidc_proto_state_set_nonce(proto_state, nonce); oidc_proto_state_set_timestamp_now(proto_state); if (provider->response_mode) oidc_proto_state_set_response_mode(proto_state, provider->response_mode); if (prompt) oidc_proto_state_set_prompt(proto_state, prompt); if (pkce_state) oidc_proto_state_set_pkce_state(proto_state, pkce_state); /* get a hash value that fingerprints the browser concatenated with the random input */ char *state = oidc_get_browser_state_hash(r, c, nonce); /* * create state that restores the context when the authorization response comes in * and cryptographically bind it to the browser */ int rc = oidc_authorization_request_set_cookie(r, c, state, proto_state); if (rc != OK) { oidc_proto_state_destroy(proto_state); return rc; } /* * printout errors if Cookie settings are not going to work * TODO: separate this code out into its own function */ apr_uri_t o_uri; memset(&o_uri, 0, sizeof(apr_uri_t)); apr_uri_t r_uri; memset(&r_uri, 0, sizeof(apr_uri_t)); apr_uri_parse(r->pool, original_url, &o_uri); apr_uri_parse(r->pool, oidc_get_redirect_uri(r, c), &r_uri); if ((apr_strnatcmp(o_uri.scheme, r_uri.scheme) != 0) && (apr_strnatcmp(r_uri.scheme, "https") == 0)) { oidc_error(r, "the URL scheme (%s) of the configured " OIDCRedirectURI " does not match the URL scheme of the URL being accessed (%s): the \"state\" and \"session\" cookies will not be shared between the two!", r_uri.scheme, o_uri.scheme); oidc_proto_state_destroy(proto_state); return HTTP_INTERNAL_SERVER_ERROR; } if (c->cookie_domain == NULL) { if (apr_strnatcmp(o_uri.hostname, r_uri.hostname) != 0) { char *p = strstr(o_uri.hostname, r_uri.hostname); if ((p == NULL) || (apr_strnatcmp(r_uri.hostname, p) != 0)) { oidc_error(r, "the URL hostname (%s) of the configured " OIDCRedirectURI " does not match the URL hostname of the URL being accessed (%s): the \"state\" and \"session\" cookies will not be shared between the two!", r_uri.hostname, o_uri.hostname); oidc_proto_state_destroy(proto_state); return HTTP_INTERNAL_SERVER_ERROR; } } } else { if (!oidc_util_cookie_domain_valid(r_uri.hostname, c->cookie_domain)) { oidc_error(r, "the domain (%s) configured in " OIDCCookieDomain " does not match the URL hostname (%s) of the URL being accessed (%s): setting \"state\" and \"session\" cookies will not work!!", c->cookie_domain, o_uri.hostname, original_url); oidc_proto_state_destroy(proto_state); return HTTP_INTERNAL_SERVER_ERROR; } } /* send off to the OpenID Connect Provider */ // TODO: maybe show intermediate/progress screen "redirecting to" return oidc_proto_authorization_request(r, provider, login_hint, oidc_get_redirect_uri_iss(r, c, provider), state, proto_state, id_token_hint, code_challenge, auth_request_params, path_scope); } /* * check if the target_link_uri matches to configuration settings to prevent an open redirect */ static int oidc_target_link_uri_matches_configuration(request_rec *r, oidc_cfg *cfg, const char *target_link_uri) { apr_uri_t o_uri; apr_uri_parse(r->pool, target_link_uri, &o_uri); if (o_uri.hostname == NULL) { oidc_error(r, "could not parse the \"target_link_uri\" (%s) in to a valid URL: aborting.", target_link_uri); return FALSE; } apr_uri_t r_uri; apr_uri_parse(r->pool, oidc_get_redirect_uri(r, cfg), &r_uri); if (cfg->cookie_domain == NULL) { /* cookie_domain set: see if the target_link_uri matches the redirect_uri host (because the session cookie will be set host-wide) */ if (apr_strnatcmp(o_uri.hostname, r_uri.hostname) != 0) { char *p = strstr(o_uri.hostname, r_uri.hostname); if ((p == NULL) || (apr_strnatcmp(r_uri.hostname, p) != 0)) { oidc_error(r, "the URL hostname (%s) of the configured " OIDCRedirectURI " does not match the URL hostname of the \"target_link_uri\" (%s): aborting to prevent an open redirect.", r_uri.hostname, o_uri.hostname); return FALSE; } } } else { /* cookie_domain set: see if the target_link_uri is within the cookie_domain */ char *p = strstr(o_uri.hostname, cfg->cookie_domain); if ((p == NULL) || (apr_strnatcmp(cfg->cookie_domain, p) != 0)) { oidc_error(r, "the domain (%s) configured in " OIDCCookieDomain " does not match the URL hostname (%s) of the \"target_link_uri\" (%s): aborting to prevent an open redirect.", cfg->cookie_domain, o_uri.hostname, target_link_uri); return FALSE; } } /* see if the cookie_path setting matches the target_link_uri path */ char *cookie_path = oidc_cfg_dir_cookie_path(r); if (cookie_path != NULL) { char *p = (o_uri.path != NULL) ? strstr(o_uri.path, cookie_path) : NULL; if ((p == NULL) || (p != o_uri.path)) { oidc_error(r, "the path (%s) configured in " OIDCCookiePath " does not match the URL path (%s) of the \"target_link_uri\" (%s): aborting to prevent an open redirect.", cfg->cookie_domain, o_uri.path, target_link_uri); return FALSE; } else if (strlen(o_uri.path) > strlen(cookie_path)) { int n = strlen(cookie_path); if (cookie_path[n - 1] == OIDC_CHAR_FORWARD_SLASH) n--; if (o_uri.path[n] != OIDC_CHAR_FORWARD_SLASH) { oidc_error(r, "the path (%s) configured in " OIDCCookiePath " does not match the URL path (%s) of the \"target_link_uri\" (%s): aborting to prevent an open redirect.", cfg->cookie_domain, o_uri.path, target_link_uri); return FALSE; } } } return TRUE; } /* * handle a response from an IDP discovery page and/or handle 3rd-party initiated SSO */ static int oidc_handle_discovery_response(request_rec *r, oidc_cfg *c) { /* variables to hold the values returned in the response */ char *issuer = NULL, *target_link_uri = NULL, *login_hint = NULL, *auth_request_params = NULL, *csrf_cookie, *csrf_query = NULL, *user = NULL, *path_scopes; oidc_provider_t *provider = NULL; oidc_util_get_request_parameter(r, OIDC_DISC_OP_PARAM, &issuer); oidc_util_get_request_parameter(r, OIDC_DISC_USER_PARAM, &user); oidc_util_get_request_parameter(r, OIDC_DISC_RT_PARAM, &target_link_uri); oidc_util_get_request_parameter(r, OIDC_DISC_LH_PARAM, &login_hint); oidc_util_get_request_parameter(r, OIDC_DISC_SC_PARAM, &path_scopes); oidc_util_get_request_parameter(r, OIDC_DISC_AR_PARAM, &auth_request_params); oidc_util_get_request_parameter(r, OIDC_CSRF_NAME, &csrf_query); csrf_cookie = oidc_util_get_cookie(r, OIDC_CSRF_NAME); /* do CSRF protection if not 3rd party initiated SSO */ if (csrf_cookie) { /* clean CSRF cookie */ oidc_util_set_cookie(r, OIDC_CSRF_NAME, "", 0, OIDC_COOKIE_EXT_SAME_SITE_NONE(r)); /* compare CSRF cookie value with query parameter value */ if ((csrf_query == NULL) || apr_strnatcmp(csrf_query, csrf_cookie) != 0) { oidc_warn(r, "CSRF protection failed, no Discovery and dynamic client registration will be allowed"); csrf_cookie = NULL; } } // TODO: trim issuer/accountname/domain input and do more input validation oidc_debug(r, "issuer=\"%s\", target_link_uri=\"%s\", login_hint=\"%s\", user=\"%s\"", issuer, target_link_uri, login_hint, user); if (target_link_uri == NULL) { if (c->default_sso_url == NULL) { return oidc_util_html_send_error(r, c->error_template, "Invalid Request", "SSO to this module without specifying a \"target_link_uri\" parameter is not possible because " OIDCDefaultURL " is not set.", HTTP_INTERNAL_SERVER_ERROR); } target_link_uri = c->default_sso_url; } /* do open redirect prevention */ if (oidc_target_link_uri_matches_configuration(r, c, target_link_uri) == FALSE) { return oidc_util_html_send_error(r, c->error_template, "Invalid Request", "\"target_link_uri\" parameter does not match configuration settings, aborting to prevent an open redirect.", HTTP_UNAUTHORIZED); } /* see if this is a static setup */ if (c->metadata_dir == NULL) { if ((oidc_provider_static_config(r, c, &provider) == TRUE) && (issuer != NULL)) { if (apr_strnatcmp(provider->issuer, issuer) != 0) { return oidc_util_html_send_error(r, c->error_template, "Invalid Request", apr_psprintf(r->pool, "The \"iss\" value must match the configured providers' one (%s != %s).", issuer, c->provider.issuer), HTTP_INTERNAL_SERVER_ERROR); } } return oidc_authenticate_user(r, c, NULL, target_link_uri, login_hint, NULL, NULL, auth_request_params, path_scopes); } /* find out if the user entered an account name or selected an OP manually */ if (user != NULL) { if (login_hint == NULL) login_hint = apr_pstrdup(r->pool, user); /* normalize the user identifier */ if (strstr(user, "https://") != user) user = apr_psprintf(r->pool, "https://%s", user); /* got an user identifier as input, perform OP discovery with that */ if (oidc_proto_url_based_discovery(r, c, user, &issuer) == FALSE) { /* something did not work out, show a user facing error */ return oidc_util_html_send_error(r, c->error_template, "Invalid Request", "Could not resolve the provided user identifier to an OpenID Connect provider; check your syntax.", HTTP_NOT_FOUND); } /* issuer is set now, so let's continue as planned */ } else if (strstr(issuer, OIDC_STR_AT) != NULL) { if (login_hint == NULL) { login_hint = apr_pstrdup(r->pool, issuer); //char *p = strstr(issuer, OIDC_STR_AT); //*p = '\0'; } /* got an account name as input, perform OP discovery with that */ if (oidc_proto_account_based_discovery(r, c, issuer, &issuer) == FALSE) { /* something did not work out, show a user facing error */ return oidc_util_html_send_error(r, c->error_template, "Invalid Request", "Could not resolve the provided account name to an OpenID Connect provider; check your syntax.", HTTP_NOT_FOUND); } /* issuer is set now, so let's continue as planned */ } /* strip trailing '/' */ int n = strlen(issuer); if (issuer[n - 1] == OIDC_CHAR_FORWARD_SLASH) issuer[n - 1] = '\0'; /* try and get metadata from the metadata directories for the selected OP */ if ((oidc_metadata_get(r, c, issuer, &provider, csrf_cookie != NULL) == TRUE) && (provider != NULL)) { /* now we've got a selected OP, send the user there to authenticate */ return oidc_authenticate_user(r, c, provider, target_link_uri, login_hint, NULL, NULL, auth_request_params, path_scopes); } /* something went wrong */ return oidc_util_html_send_error(r, c->error_template, "Invalid Request", "Could not find valid provider metadata for the selected OpenID Connect provider; contact the administrator", HTTP_NOT_FOUND); } static apr_uint32_t oidc_transparent_pixel[17] = { 0x474e5089, 0x0a1a0a0d, 0x0d000000, 0x52444849, 0x01000000, 0x01000000, 0x00000408, 0x0c1cb500, 0x00000002, 0x4144490b, 0x639c7854, 0x0000cffa, 0x02010702, 0x71311c9a, 0x00000000, 0x444e4549, 0x826042ae }; static apr_byte_t oidc_is_front_channel_logout(const char *logout_param_value) { return ((logout_param_value != NULL) && ((apr_strnatcmp(logout_param_value, OIDC_GET_STYLE_LOGOUT_PARAM_VALUE) == 0) || (apr_strnatcmp(logout_param_value, OIDC_IMG_STYLE_LOGOUT_PARAM_VALUE) == 0))); } static apr_byte_t oidc_is_back_channel_logout(const char *logout_param_value) { return ((logout_param_value != NULL) && (apr_strnatcmp(logout_param_value, OIDC_BACKCHANNEL_STYLE_LOGOUT_PARAM_VALUE) == 0)); } /* * revoke refresh token and access token stored in the session if the * OP has an RFC 7009 compliant token revocation endpoint */ static void oidc_revoke_tokens(request_rec *r, oidc_cfg *c, oidc_session_t *session) { char *response = NULL; char *basic_auth = NULL; char *bearer_auth = NULL; apr_table_t *params = NULL; const char *token = NULL; oidc_provider_t *provider = NULL; oidc_debug(r, "enter"); if (oidc_get_provider_from_session(r, c, session, &provider) == FALSE) goto out; oidc_debug(r, "revocation_endpoint=%s", provider->revocation_endpoint_url ? provider->revocation_endpoint_url : "(null)"); if (provider->revocation_endpoint_url == NULL) goto out; params = apr_table_make(r->pool, 4); // add the token endpoint authentication credentials to the revocation endpoint call... if (oidc_proto_token_endpoint_auth(r, c, provider->token_endpoint_auth, provider->client_id, provider->client_secret, provider->client_signing_keys, provider->token_endpoint_url, params, NULL, &basic_auth, &bearer_auth) == FALSE) goto out; // TODO: use oauth.ssl_validate_server ... token = oidc_session_get_refresh_token(r, session); if (token != NULL) { apr_table_addn(params, "token_type_hint", "refresh_token"); apr_table_addn(params, "token", token); if (oidc_util_http_post_form(r, provider->revocation_endpoint_url, params, basic_auth, bearer_auth, c->oauth.ssl_validate_server, &response, c->http_timeout_long, c->outgoing_proxy, oidc_dir_cfg_pass_cookies(r), NULL, NULL, NULL) == FALSE) { oidc_warn(r, "revoking refresh token failed"); } apr_table_clear(params); } token = oidc_session_get_access_token(r, session); if (token != NULL) { apr_table_addn(params, "token_type_hint", "access_token"); apr_table_addn(params, "token", token); if (oidc_util_http_post_form(r, provider->revocation_endpoint_url, params, basic_auth, bearer_auth, c->oauth.ssl_validate_server, &response, c->http_timeout_long, c->outgoing_proxy, oidc_dir_cfg_pass_cookies(r), NULL, NULL, NULL) == FALSE) { oidc_warn(r, "revoking access token failed"); } } out: oidc_debug(r, "leave"); } /* * handle a local logout */ static int oidc_handle_logout_request(request_rec *r, oidc_cfg *c, oidc_session_t *session, const char *url) { oidc_debug(r, "enter (url=%s)", url); /* if there's no remote_user then there's no (stored) session to kill */ if (session->remote_user != NULL) oidc_revoke_tokens(r, c, session); /* * remove session state (cq. cache entry and cookie) * always clear the session cookie because the cookie may be not sent (but still in the browser) * due to SameSite policies */ oidc_session_kill(r, session); /* see if this is the OP calling us */ if (oidc_is_front_channel_logout(url)) { /* set recommended cache control headers */ oidc_util_hdr_err_out_add(r, OIDC_HTTP_HDR_CACHE_CONTROL, "no-cache, no-store"); oidc_util_hdr_err_out_add(r, OIDC_HTTP_HDR_PRAGMA, "no-cache"); oidc_util_hdr_err_out_add(r, OIDC_HTTP_HDR_P3P, "CAO PSA OUR"); oidc_util_hdr_err_out_add(r, OIDC_HTTP_HDR_EXPIRES, "0"); oidc_util_hdr_err_out_add(r, OIDC_HTTP_HDR_X_FRAME_OPTIONS, "DENY"); /* see if this is PF-PA style logout in which case we return a transparent pixel */ const char *accept = oidc_util_hdr_in_accept_get(r); if ((apr_strnatcmp(url, OIDC_IMG_STYLE_LOGOUT_PARAM_VALUE) == 0) || ((accept) && strstr(accept, OIDC_CONTENT_TYPE_IMAGE_PNG))) { // terminate with DONE instead of OK // to avoid Apache returning auth/authz error 401 for the redirect URI return oidc_util_http_send(r, (const char*) &oidc_transparent_pixel, sizeof(oidc_transparent_pixel), OIDC_CONTENT_TYPE_IMAGE_PNG, DONE); } /* standard HTTP based logout: should be called in an iframe from the OP */ return oidc_util_html_send(r, "Logged Out", NULL, NULL, "<p>Logged Out</p>", DONE); } /* see if we don't need to go somewhere special after killing the session locally */ if (url == NULL) return oidc_util_html_send(r, "Logged Out", NULL, NULL, "<p>Logged Out</p>", OK); /* send the user to the specified where-to-go-after-logout URL */ oidc_util_hdr_out_location_set(r, url); return HTTP_MOVED_TEMPORARILY; } /* * handle a backchannel logout */ #define OIDC_EVENTS_BLOGOUT_KEY "http://schemas.openid.net/event/backchannel-logout" static int oidc_handle_logout_backchannel(request_rec *r, oidc_cfg *cfg) { oidc_debug(r, "enter"); const char *logout_token = NULL; oidc_jwt_t *jwt = NULL; oidc_jose_error_t err; oidc_jwk_t *jwk = NULL; oidc_provider_t *provider = NULL; char *sid = NULL, *uuid = NULL; oidc_session_t session; int rc = HTTP_BAD_REQUEST; apr_table_t *params = apr_table_make(r->pool, 8); if (oidc_util_read_post_params(r, params, FALSE, NULL) == FALSE) { oidc_error(r, "could not read POST-ed parameters to the logout endpoint"); goto out; } logout_token = apr_table_get(params, OIDC_PROTO_LOGOUT_TOKEN); if (logout_token == NULL) { oidc_error(r, "backchannel lggout endpoint was called but could not find a parameter named \"%s\"", OIDC_PROTO_LOGOUT_TOKEN); goto out; } // TODO: jwk symmetric key based on provider if (oidc_jwt_parse(r->pool, logout_token, &jwt, oidc_util_merge_symmetric_key(r->pool, cfg->private_keys, NULL), &err) == FALSE) { oidc_error(r, "oidc_jwt_parse failed: %s", oidc_jose_e2s(r->pool, err)); goto out; } provider = oidc_get_provider_for_issuer(r, cfg, jwt->payload.iss, FALSE); if (provider == NULL) { oidc_error(r, "no provider found for issuer: %s", jwt->payload.iss); goto out; } // TODO: destroy the JWK used for decryption jwk = NULL; if (oidc_util_create_symmetric_key(r, provider->client_secret, 0, NULL, TRUE, &jwk) == FALSE) return FALSE; oidc_jwks_uri_t jwks_uri = { provider->jwks_uri, provider->jwks_refresh_interval, provider->ssl_validate_server }; if (oidc_proto_jwt_verify(r, cfg, jwt, &jwks_uri, oidc_util_merge_symmetric_key(r->pool, NULL, jwk), provider->id_token_signed_response_alg) == FALSE) { oidc_error(r, "id_token signature could not be validated, aborting"); goto out; } // oidc_proto_validate_idtoken would try and require a token binding cnf // if the policy is set to "required", so don't use that here if (oidc_proto_validate_jwt(r, jwt, provider->validate_issuer ? provider->issuer : NULL, FALSE, FALSE, provider->idtoken_iat_slack, OIDC_TOKEN_BINDING_POLICY_DISABLED) == FALSE) goto out; /* verify the "aud" and "azp" values */ if (oidc_proto_validate_aud_and_azp(r, cfg, provider, &jwt->payload) == FALSE) goto out; json_t *events = json_object_get(jwt->payload.value.json, OIDC_CLAIM_EVENTS); if (events == NULL) { oidc_error(r, "\"%s\" claim could not be found in logout token", OIDC_CLAIM_EVENTS); goto out; } json_t *blogout = json_object_get(events, OIDC_EVENTS_BLOGOUT_KEY); if (!json_is_object(blogout)) { oidc_error(r, "\"%s\" object could not be found in \"%s\" claim", OIDC_EVENTS_BLOGOUT_KEY, OIDC_CLAIM_EVENTS); goto out; } char *nonce = NULL; oidc_json_object_get_string(r->pool, jwt->payload.value.json, OIDC_CLAIM_NONCE, &nonce, NULL); if (nonce != NULL) { oidc_error(r, "rejecting logout request/token since it contains a \"%s\" claim", OIDC_CLAIM_NONCE); goto out; } char *jti = NULL; oidc_json_object_get_string(r->pool, jwt->payload.value.json, OIDC_CLAIM_JTI, &jti, NULL); if (jti != NULL) { char *replay = NULL; oidc_cache_get_jti(r, jti, &replay); if (replay != NULL) { oidc_error(r, "the \"%s\" value (%s) passed in logout token was found in the cache already; possible replay attack!?", OIDC_CLAIM_JTI, jti); goto out; } } /* jti cache duration is the configured replay prevention window for token issuance plus 10 seconds for safety */ apr_time_t jti_cache_duration = apr_time_from_sec( provider->idtoken_iat_slack * 2 + 10); /* store it in the cache for the calculated duration */ oidc_cache_set_jti(r, jti, jti, apr_time_now() + jti_cache_duration); oidc_json_object_get_string(r->pool, jwt->payload.value.json, OIDC_CLAIM_EVENTS, &sid, NULL); // TODO: by-spec we should cater for the fact that "sid" has been provided // in the id_token returned in the authentication request, but "sub" // is used in the logout token but that requires a 2nd entry in the // cache and a separate session "sub" member, ugh; we'll just assume // that is "sid" is specified in the id_token, the OP will actually use // this for logout // (and probably call us multiple times or the same sub if needed) oidc_json_object_get_string(r->pool, jwt->payload.value.json, OIDC_CLAIM_SID, &sid, NULL); if (sid == NULL) sid = jwt->payload.sub; if (sid == NULL) { oidc_error(r, "no \"sub\" and no \"sid\" claim found in logout token"); goto out; } // TODO: when dealing with sub instead of a true sid, we'll be killing all sessions for // a specific user, across hosts that share the *same* cache backend // if those hosts haven't been configured with a different OIDCCryptoPassphrase // - perhaps that's even acceptable since non-memory caching is encrypted by default // and memory-based caching doesn't suffer from this (different shm segments)? // - it will result in 400 errors returned from backchannel logout calls to the other hosts... sid = oidc_make_sid_iss_unique(r, sid, provider->issuer); oidc_cache_get_sid(r, sid, &uuid); if (uuid == NULL) { oidc_error(r, "could not find session based on sid/sub provided in logout token: %s", sid); // return HTTP 200 according to (new?) spec and terminate early // to avoid Apache returning auth/authz error 500 for the redirect URI rc = DONE; goto out; } // revoke tokens if we can get a handle on those if (cfg->session_type != OIDC_SESSION_TYPE_CLIENT_COOKIE) { if (oidc_session_load_cache_by_uuid(r, cfg, uuid, &session) != FALSE) if (oidc_session_extract(r, &session) != FALSE) oidc_revoke_tokens(r, cfg, &session); } // clear the session cache oidc_cache_set_sid(r, sid, NULL, 0); oidc_cache_set_session(r, uuid, NULL, 0); // terminate with DONE instead of OK // to avoid Apache returning auth/authz error 500 for the redirect URI rc = DONE; out: if (jwk != NULL) { oidc_jwk_destroy(jwk); jwk = NULL; } if (jwt != NULL) { oidc_jwt_destroy(jwt); jwt = NULL; } oidc_util_hdr_err_out_add(r, OIDC_HTTP_HDR_CACHE_CONTROL, "no-cache, no-store"); oidc_util_hdr_err_out_add(r, OIDC_HTTP_HDR_PRAGMA, "no-cache"); return rc; } static apr_byte_t oidc_validate_redirect_url(request_rec *r, oidc_cfg *c, const char *url, apr_byte_t restrict_to_host, char **err_str, char **err_desc) { apr_uri_t uri; const char *c_host = NULL; apr_hash_index_t *hi = NULL; if (apr_uri_parse(r->pool, url, &uri) != APR_SUCCESS) { *err_str = apr_pstrdup(r->pool, "Malformed URL"); *err_desc = apr_psprintf(r->pool, "not a valid URL value: %s", url); oidc_error(r, "%s: %s", *err_str, *err_desc); return FALSE; } if (c->redirect_urls_allowed != NULL) { for (hi = apr_hash_first(NULL, c->redirect_urls_allowed); hi; hi = apr_hash_next(hi)) { apr_hash_this(hi, (const void**) &c_host, NULL, NULL); if (oidc_util_regexp_first_match(r->pool, url, c_host, NULL, err_str) == TRUE) break; } if (hi == NULL) { *err_str = apr_pstrdup(r->pool, "URL not allowed"); *err_desc = apr_psprintf(r->pool, "value does not match the list of allowed redirect URLs: %s", url); oidc_error(r, "%s: %s", *err_str, *err_desc); return FALSE; } } else if ((uri.hostname != NULL) && (restrict_to_host == TRUE)) { c_host = oidc_get_current_url_host(r); if ((strstr(c_host, uri.hostname) == NULL) || (strstr(uri.hostname, c_host) == NULL)) { *err_str = apr_pstrdup(r->pool, "Invalid Request"); *err_desc = apr_psprintf(r->pool, "URL value \"%s\" does not match the hostname of the current request \"%s\"", apr_uri_unparse(r->pool, &uri, 0), c_host); oidc_error(r, "%s: %s", *err_str, *err_desc); return FALSE; } } if ((uri.hostname == NULL) && (strstr(url, "/") != url)) { *err_str = apr_pstrdup(r->pool, "Malformed URL"); *err_desc = apr_psprintf(r->pool, "No hostname was parsed and it does not seem to be relative, i.e starting with '/': %s", url); oidc_error(r, "%s: %s", *err_str, *err_desc); return FALSE; } else if ((uri.hostname == NULL) && (strstr(url, "//") == url)) { *err_str = apr_pstrdup(r->pool, "Malformed URL"); *err_desc = apr_psprintf(r->pool, "No hostname was parsed and starting with '//': %s", url); oidc_error(r, "%s: %s", *err_str, *err_desc); return FALSE; } else if ((uri.hostname == NULL) && (strstr(url, "/\\") == url)) { *err_str = apr_pstrdup(r->pool, "Malformed URL"); *err_desc = apr_psprintf(r->pool, "No hostname was parsed and starting with '/\\': %s", url); oidc_error(r, "%s: %s", *err_str, *err_desc); return FALSE; } /* validate the URL to prevent HTTP header splitting */ if (((strstr(url, "\n") != NULL) || strstr(url, "\r") != NULL)) { *err_str = apr_pstrdup(r->pool, "Invalid URL"); *err_desc = apr_psprintf(r->pool, "URL value \"%s\" contains illegal \"\n\" or \"\r\" character(s)", url); oidc_error(r, "%s: %s", *err_str, *err_desc); return FALSE; } return TRUE; } /* * perform (single) logout */ static int oidc_handle_logout(request_rec *r, oidc_cfg *c, oidc_session_t *session) { oidc_provider_t *provider = NULL; /* pickup the command or URL where the user wants to go after logout */ char *url = NULL; char *error_str = NULL; char *error_description = NULL; oidc_util_get_request_parameter(r, OIDC_REDIRECT_URI_REQUEST_LOGOUT, &url); oidc_debug(r, "enter (url=%s)", url); if (oidc_is_front_channel_logout(url)) { return oidc_handle_logout_request(r, c, session, url); } else if (oidc_is_back_channel_logout(url)) { return oidc_handle_logout_backchannel(r, c); } if ((url == NULL) || (apr_strnatcmp(url, "") == 0)) { url = c->default_slo_url; } else { /* do input validation on the logout parameter value */ if (oidc_validate_redirect_url(r, c, url, TRUE, &error_str, &error_description) == FALSE) { return oidc_util_html_send_error(r, c->error_template, error_str, error_description, HTTP_BAD_REQUEST); } } oidc_get_provider_from_session(r, c, session, &provider); if ((provider != NULL) && (provider->end_session_endpoint != NULL)) { const char *id_token_hint = oidc_session_get_idtoken(r, session); char *logout_request = apr_pstrdup(r->pool, provider->end_session_endpoint); if (id_token_hint != NULL) { logout_request = apr_psprintf(r->pool, "%s%sid_token_hint=%s", logout_request, strchr(logout_request ? logout_request : "", OIDC_CHAR_QUERY) != NULL ? OIDC_STR_AMP : OIDC_STR_QUERY, oidc_util_escape_string(r, id_token_hint)); } if (url != NULL) { logout_request = apr_psprintf(r->pool, "%s%spost_logout_redirect_uri=%s", logout_request, strchr(logout_request ? logout_request : "", OIDC_CHAR_QUERY) != NULL ? OIDC_STR_AMP : OIDC_STR_QUERY, oidc_util_escape_string(r, url)); } url = logout_request; } return oidc_handle_logout_request(r, c, session, url); } /* * handle request for JWKs */ int oidc_handle_jwks(request_rec *r, oidc_cfg *c) { /* pickup requested JWKs type */ // char *jwks_type = NULL; // oidc_util_get_request_parameter(r, OIDC_REDIRECT_URI_REQUEST_JWKS, &jwks_type); char *jwks = apr_pstrdup(r->pool, "{ \"keys\" : ["); int i = 0; apr_byte_t first = TRUE; oidc_jose_error_t err; if (c->public_keys != NULL) { /* loop over the RSA public keys */ for (i = 0; i < c->public_keys->nelts; i++) { const oidc_jwk_t *jwk = ((const oidc_jwk_t**) c->public_keys->elts)[i]; char *s_json = NULL; if (oidc_jwk_to_json(r->pool, jwk, &s_json, &err) == TRUE) { jwks = apr_psprintf(r->pool, "%s%s %s ", jwks, first ? "" : ",", s_json); first = FALSE; } else { oidc_error(r, "could not convert RSA JWK to JSON using oidc_jwk_to_json: %s", oidc_jose_e2s(r->pool, err)); } } } // TODO: send stuff if first == FALSE? jwks = apr_psprintf(r->pool, "%s ] }", jwks); return oidc_util_http_send(r, jwks, strlen(jwks), OIDC_CONTENT_TYPE_JSON, OK); } static int oidc_handle_session_management_iframe_op(request_rec *r, oidc_cfg *c, oidc_session_t *session, const char *check_session_iframe) { oidc_debug(r, "enter"); oidc_util_hdr_out_location_set(r, check_session_iframe); return HTTP_MOVED_TEMPORARILY; } static int oidc_handle_session_management_iframe_rp(request_rec *r, oidc_cfg *c, oidc_session_t *session, const char *client_id, const char *check_session_iframe) { oidc_debug(r, "enter"); const char *java_script = " <script type=\"text/javascript\">\n" " var targetOrigin = '%s';\n" " var clientId = '%s';\n" " var sessionId = '%s';\n" " var loginUrl = '%s';\n" " var message = clientId + ' ' + sessionId;\n" " var timerID;\n" "\n" " function checkSession() {\n" " console.debug('checkSession: posting ' + message + ' to ' + targetOrigin);\n" " var win = window.parent.document.getElementById('%s').contentWindow;\n" " win.postMessage( message, targetOrigin);\n" " }\n" "\n" " function setTimer() {\n" " checkSession();\n" " timerID = setInterval('checkSession()', %d);\n" " }\n" "\n" " function receiveMessage(e) {\n" " console.debug('receiveMessage: ' + e.data + ' from ' + e.origin);\n" " if (e.origin !== targetOrigin ) {\n" " console.debug('receiveMessage: cross-site scripting attack?');\n" " return;\n" " }\n" " if (e.data != 'unchanged') {\n" " clearInterval(timerID);\n" " if (e.data == 'changed' && sessionId == '' ) {\n" " // 'changed' + no session: enforce a login (if we have a login url...)\n" " if (loginUrl != '') {\n" " window.top.location.replace(loginUrl);\n" " }\n" " } else {\n" " // either 'changed' + active session, or 'error': enforce a logout\n" " window.top.location.replace('%s?logout=' + encodeURIComponent(window.top.location.href));\n" " }\n" " }\n" " }\n" "\n" " window.addEventListener('message', receiveMessage, false);\n" "\n" " </script>\n"; /* determine the origin for the check_session_iframe endpoint */ char *origin = apr_pstrdup(r->pool, check_session_iframe); apr_uri_t uri; apr_uri_parse(r->pool, check_session_iframe, &uri); char *p = strstr(origin, uri.path); *p = '\0'; /* the element identifier for the OP iframe */ const char *op_iframe_id = "openidc-op"; /* restore the OP session_state from the session */ const char *session_state = oidc_session_get_session_state(r, session); if (session_state == NULL) { oidc_warn(r, "no session_state found in the session; the OP does probably not support session management!?"); //return OK; } char *s_poll_interval = NULL; oidc_util_get_request_parameter(r, "poll", &s_poll_interval); int poll_interval = s_poll_interval ? strtol(s_poll_interval, NULL, 10) : 0; if ((poll_interval <= 0) || (poll_interval > 3600 * 24)) poll_interval = 3000; char *login_uri = NULL, *error_str = NULL, *error_description = NULL; oidc_util_get_request_parameter(r, "login_uri", &login_uri); if ((login_uri != NULL) && (oidc_validate_redirect_url(r, c, login_uri, FALSE, &error_str, &error_description) == FALSE)) { return HTTP_BAD_REQUEST; } const char *redirect_uri = oidc_get_redirect_uri(r, c); java_script = apr_psprintf(r->pool, java_script, origin, client_id, session_state ? session_state : "", login_uri ? login_uri : "", op_iframe_id, poll_interval, redirect_uri, redirect_uri); return oidc_util_html_send(r, NULL, java_script, "setTimer", NULL, OK); } /* * handle session management request */ static int oidc_handle_session_management(request_rec *r, oidc_cfg *c, oidc_session_t *session) { char *cmd = NULL; const char *id_token_hint = NULL; oidc_provider_t *provider = NULL; /* get the command passed to the session management handler */ oidc_util_get_request_parameter(r, OIDC_REDIRECT_URI_REQUEST_SESSION, &cmd); if (cmd == NULL) { oidc_error(r, "session management handler called with no command"); return HTTP_INTERNAL_SERVER_ERROR; } /* see if this is a local logout during session management */ if (apr_strnatcmp("logout", cmd) == 0) { oidc_debug(r, "[session=logout] calling oidc_handle_logout_request because of session mgmt local logout call."); return oidc_handle_logout_request(r, c, session, c->default_slo_url); } if (oidc_get_provider_from_session(r, c, session, &provider) == FALSE) { if ((oidc_provider_static_config(r, c, &provider) == FALSE) || (provider == NULL)) return HTTP_NOT_FOUND; } /* see if this is a request for the OP iframe */ if (apr_strnatcmp("iframe_op", cmd) == 0) { if (provider->check_session_iframe != NULL) { return oidc_handle_session_management_iframe_op(r, c, session, provider->check_session_iframe); } return HTTP_NOT_FOUND; } /* see if this is a request for the RP iframe */ if (apr_strnatcmp("iframe_rp", cmd) == 0) { if ((provider->client_id != NULL) && (provider->check_session_iframe != NULL)) { return oidc_handle_session_management_iframe_rp(r, c, session, provider->client_id, provider->check_session_iframe); } oidc_debug(r, "iframe_rp command issued but no client (%s) and/or no check_session_iframe (%s) set", provider->client_id, provider->check_session_iframe); return HTTP_NOT_FOUND; } /* see if this is a request check the login state with the OP */ if (apr_strnatcmp("check", cmd) == 0) { id_token_hint = oidc_session_get_idtoken(r, session); /* * TODO: this doesn't work with per-path provided auth_request_params and scopes * as oidc_dir_cfg_path_auth_request_params and oidc_dir_cfg_path_scope will pick * those for the redirect_uri itself; do we need to store those as part of the * session now? */ return oidc_authenticate_user(r, c, provider, apr_psprintf(r->pool, "%s?session=iframe_rp", oidc_get_redirect_uri_iss(r, c, provider)), NULL, id_token_hint, "none", oidc_dir_cfg_path_auth_request_params(r), oidc_dir_cfg_path_scope(r)); } /* handle failure in fallthrough */ oidc_error(r, "unknown command: %s", cmd); return HTTP_INTERNAL_SERVER_ERROR; } /* * handle refresh token request */ static int oidc_handle_refresh_token_request(request_rec *r, oidc_cfg *c, oidc_session_t *session) { char *return_to = NULL; char *r_access_token = NULL; char *error_code = NULL; char *error_str = NULL; char *error_description = NULL; apr_byte_t needs_save = TRUE; /* get the command passed to the session management handler */ oidc_util_get_request_parameter(r, OIDC_REDIRECT_URI_REQUEST_REFRESH, &return_to); oidc_util_get_request_parameter(r, OIDC_PROTO_ACCESS_TOKEN, &r_access_token); /* check the input parameters */ if (return_to == NULL) { oidc_error(r, "refresh token request handler called with no URL to return to"); return HTTP_INTERNAL_SERVER_ERROR; } /* do input validation on the return to parameter value */ if (oidc_validate_redirect_url(r, c, return_to, TRUE, &error_str, &error_description) == FALSE) { oidc_error(r, "return_to URL validation failed: %s: %s", error_str, error_description); return HTTP_INTERNAL_SERVER_ERROR; } if (r_access_token == NULL) { oidc_error(r, "refresh token request handler called with no access_token parameter"); error_code = "no_access_token"; goto end; } const char *s_access_token = oidc_session_get_access_token(r, session); if (s_access_token == NULL) { oidc_error(r, "no existing access_token found in the session, nothing to refresh"); error_code = "no_access_token_exists"; goto end; } /* compare the access_token parameter used for XSRF protection */ if (apr_strnatcmp(s_access_token, r_access_token) != 0) { oidc_error(r, "access_token passed in refresh request does not match the one stored in the session"); error_code = "no_access_token_match"; goto end; } /* get a handle to the provider configuration */ oidc_provider_t *provider = NULL; if (oidc_get_provider_from_session(r, c, session, &provider) == FALSE) { error_code = "session_corruption"; goto end; } /* execute the actual refresh grant */ if (oidc_refresh_access_token(r, c, session, provider, NULL) == FALSE) { oidc_error(r, "access_token could not be refreshed"); error_code = "refresh_failed"; goto end; } /* pass the tokens to the application, possibly updating the expiry */ if (oidc_session_pass_tokens(r, c, session, &needs_save) == FALSE) { error_code = "session_corruption"; goto end; } if (oidc_session_save(r, session, FALSE) == FALSE) { error_code = "error saving session"; goto end; } end: /* pass optional error message to the return URL */ if (error_code != NULL) return_to = apr_psprintf(r->pool, "%s%serror_code=%s", return_to, strchr(return_to ? return_to : "", OIDC_CHAR_QUERY) ? OIDC_STR_AMP : OIDC_STR_QUERY, oidc_util_escape_string(r, error_code)); /* add the redirect location header */ oidc_util_hdr_out_location_set(r, return_to); return HTTP_MOVED_TEMPORARILY; } /* * handle request object by reference request */ static int oidc_handle_request_uri(request_rec *r, oidc_cfg *c) { char *request_ref = NULL; oidc_util_get_request_parameter(r, OIDC_REDIRECT_URI_REQUEST_REQUEST_URI, &request_ref); if (request_ref == NULL) { oidc_error(r, "no \"%s\" parameter found", OIDC_REDIRECT_URI_REQUEST_REQUEST_URI); return HTTP_BAD_REQUEST; } char *jwt = NULL; oidc_cache_get_request_uri(r, request_ref, &jwt); if (jwt == NULL) { oidc_error(r, "no cached JWT found for %s reference: %s", OIDC_REDIRECT_URI_REQUEST_REQUEST_URI, request_ref); return HTTP_NOT_FOUND; } oidc_cache_set_request_uri(r, request_ref, NULL, 0); return oidc_util_http_send(r, jwt, strlen(jwt), OIDC_CONTENT_TYPE_JWT, OK); } /* * handle a request to invalidate a cached access token introspection result */ int oidc_handle_remove_at_cache(request_rec *r, oidc_cfg *c) { char *access_token = NULL; oidc_util_get_request_parameter(r, OIDC_REDIRECT_URI_REQUEST_REMOVE_AT_CACHE, &access_token); char *cache_entry = NULL; oidc_cache_get_access_token(r, access_token, &cache_entry); if (cache_entry == NULL) { oidc_error(r, "no cached access token found for value: %s", access_token); return HTTP_NOT_FOUND; } oidc_cache_set_access_token(r, access_token, NULL, 0); return OK; } #define OIDC_INFO_PARAM_ACCESS_TOKEN_REFRESH_INTERVAL "access_token_refresh_interval" /* * handle request for session info */ static int oidc_handle_info_request(request_rec *r, oidc_cfg *c, oidc_session_t *session, apr_byte_t needs_save) { int rc = HTTP_UNAUTHORIZED; char *s_format = NULL, *s_interval = NULL, *r_value = NULL; oidc_util_get_request_parameter(r, OIDC_REDIRECT_URI_REQUEST_INFO, &s_format); oidc_util_get_request_parameter(r, OIDC_INFO_PARAM_ACCESS_TOKEN_REFRESH_INTERVAL, &s_interval); /* see if this is a request for a format that is supported */ if ((apr_strnatcmp(OIDC_HOOK_INFO_FORMAT_JSON, s_format) != 0) && (apr_strnatcmp(OIDC_HOOK_INFO_FORMAT_HTML, s_format) != 0)) { oidc_warn(r, "request for unknown format: %s", s_format); return HTTP_UNSUPPORTED_MEDIA_TYPE; } /* check that we actually have a user session and this is someone calling with a proper session cookie */ if (session->remote_user == NULL) { oidc_warn(r, "no user session found"); return HTTP_UNAUTHORIZED; } /* set the user in the main request for further (incl. sub-request and authz) processing */ r->user = apr_pstrdup(r->pool, session->remote_user); if (c->info_hook_data == NULL) { oidc_warn(r, "no data configured to return in " OIDCInfoHook); return HTTP_NOT_FOUND; } /* see if we can and need to refresh the access token */ if ((s_interval != NULL) && (oidc_session_get_refresh_token(r, session) != NULL)) { apr_time_t t_interval; if (sscanf(s_interval, "%" APR_TIME_T_FMT, &t_interval) == 1) { t_interval = apr_time_from_sec(t_interval); /* get the last refresh timestamp from the session info */ apr_time_t last_refresh = oidc_session_get_access_token_last_refresh(r, session); oidc_debug(r, "refresh needed in: %" APR_TIME_T_FMT " seconds", apr_time_sec(last_refresh + t_interval - apr_time_now())); /* see if we need to refresh again */ if (last_refresh + t_interval < apr_time_now()) { /* get the current provider info */ oidc_provider_t *provider = NULL; if (oidc_get_provider_from_session(r, c, session, &provider) == FALSE) return HTTP_INTERNAL_SERVER_ERROR; /* execute the actual refresh grant */ if (oidc_refresh_access_token(r, c, session, provider, NULL) == FALSE) oidc_warn(r, "access_token could not be refreshed"); else needs_save = TRUE; } } } /* create the JSON object */ json_t *json = json_object(); /* add a timestamp of creation in there for the caller */ if (apr_hash_get(c->info_hook_data, OIDC_HOOK_INFO_TIMESTAMP, APR_HASH_KEY_STRING)) { json_object_set_new(json, OIDC_HOOK_INFO_TIMESTAMP, json_integer(apr_time_sec(apr_time_now()))); } /* * refresh the claims from the userinfo endpoint * side-effect is that this may refresh the access token if not already done * note that OIDCUserInfoRefreshInterval should be set to control the refresh policy */ needs_save |= oidc_refresh_claims_from_userinfo_endpoint(r, c, session); /* include the access token in the session info */ if (apr_hash_get(c->info_hook_data, OIDC_HOOK_INFO_ACCES_TOKEN, APR_HASH_KEY_STRING)) { const char *access_token = oidc_session_get_access_token(r, session); if (access_token != NULL) json_object_set_new(json, OIDC_HOOK_INFO_ACCES_TOKEN, json_string(access_token)); } /* include the access token expiry timestamp in the session info */ if (apr_hash_get(c->info_hook_data, OIDC_HOOK_INFO_ACCES_TOKEN_EXP, APR_HASH_KEY_STRING)) { const char *access_token_expires = oidc_session_get_access_token_expires(r, session); if (access_token_expires != NULL) json_object_set_new(json, OIDC_HOOK_INFO_ACCES_TOKEN_EXP, json_string(access_token_expires)); } /* include the id_token claims in the session info */ if (apr_hash_get(c->info_hook_data, OIDC_HOOK_INFO_ID_TOKEN, APR_HASH_KEY_STRING)) { json_t *id_token = oidc_session_get_idtoken_claims_json(r, session); if (id_token) json_object_set_new(json, OIDC_HOOK_INFO_ID_TOKEN, id_token); } if (apr_hash_get(c->info_hook_data, OIDC_HOOK_INFO_USER_INFO, APR_HASH_KEY_STRING)) { /* include the claims from the userinfo endpoint the session info */ json_t *claims = oidc_session_get_userinfo_claims_json(r, session); if (claims) json_object_set_new(json, OIDC_HOOK_INFO_USER_INFO, claims); } /* include the maximum session lifetime in the session info */ if (apr_hash_get(c->info_hook_data, OIDC_HOOK_INFO_SESSION_EXP, APR_HASH_KEY_STRING)) { apr_time_t session_expires = oidc_session_get_session_expires(r, session); json_object_set_new(json, OIDC_HOOK_INFO_SESSION_EXP, json_integer(apr_time_sec(session_expires))); } /* include the inactivity timeout in the session info */ if (apr_hash_get(c->info_hook_data, OIDC_HOOK_INFO_SESSION_TIMEOUT, APR_HASH_KEY_STRING)) { json_object_set_new(json, OIDC_HOOK_INFO_SESSION_TIMEOUT, json_integer(apr_time_sec(session->expiry))); } /* include the remote_user in the session info */ if (apr_hash_get(c->info_hook_data, OIDC_HOOK_INFO_SESSION_REMOTE_USER, APR_HASH_KEY_STRING)) { json_object_set_new(json, OIDC_HOOK_INFO_SESSION_REMOTE_USER, json_string(session->remote_user)); } if (apr_hash_get(c->info_hook_data, OIDC_HOOK_INFO_SESSION, APR_HASH_KEY_STRING)) { json_t *j_session = json_object(); json_object_set(j_session, OIDC_HOOK_INFO_SESSION_STATE, session->state); json_object_set_new(j_session, OIDC_HOOK_INFO_SESSION_UUID, json_string(session->uuid)); json_object_set_new(json, OIDC_HOOK_INFO_SESSION, j_session); } if (apr_hash_get(c->info_hook_data, OIDC_HOOK_INFO_REFRESH_TOKEN, APR_HASH_KEY_STRING)) { /* include the refresh token in the session info */ const char *refresh_token = oidc_session_get_refresh_token(r, session); if (refresh_token != NULL) json_object_set_new(json, OIDC_HOOK_INFO_REFRESH_TOKEN, json_string(refresh_token)); } /* pass the tokens to the application and save the session, possibly updating the expiry */ if (oidc_session_pass_tokens(r, c, session, &needs_save) == FALSE) oidc_warn(r, "error passing tokens"); /* check if something was updated in the session and we need to save it again */ if (needs_save) { if (oidc_session_save(r, session, FALSE) == FALSE) { oidc_warn(r, "error saving session"); rc = HTTP_INTERNAL_SERVER_ERROR; } } if (apr_strnatcmp(OIDC_HOOK_INFO_FORMAT_JSON, s_format) == 0) { /* JSON-encode the result */ r_value = oidc_util_encode_json_object(r, json, 0); /* return the stringified JSON result */ rc = oidc_util_http_send(r, r_value, strlen(r_value), OIDC_CONTENT_TYPE_JSON, OK); } else if (apr_strnatcmp(OIDC_HOOK_INFO_FORMAT_HTML, s_format) == 0) { /* JSON-encode the result */ r_value = oidc_util_encode_json_object(r, json, JSON_INDENT(2)); rc = oidc_util_html_send(r, "Session Info", NULL, NULL, apr_psprintf(r->pool, "<pre>%s</pre>", r_value), OK); } /* free the allocated resources */ json_decref(json); return rc; } /* * handle all requests to the redirect_uri */ int oidc_handle_redirect_uri_request(request_rec *r, oidc_cfg *c, oidc_session_t *session) { if (oidc_proto_is_redirect_authorization_response(r, c)) { /* this is an authorization response from the OP using the Basic Client profile or a Hybrid flow*/ return oidc_handle_redirect_authorization_response(r, c, session); /* * * Note that we are checking for logout *before* checking for a POST authorization response * to handle backchannel POST-based logout * * so any POST to the Redirect URI that does not have a logout query parameter will be handled * as an authorization response; alternatively we could assume that a POST response has no * parameters */ } else if (oidc_util_request_has_parameter(r, OIDC_REDIRECT_URI_REQUEST_LOGOUT)) { /* handle logout */ return oidc_handle_logout(r, c, session); } else if (oidc_proto_is_post_authorization_response(r, c)) { /* this is an authorization response using the fragment(+POST) response_mode with the Implicit Client profile */ return oidc_handle_post_authorization_response(r, c, session); } else if (oidc_is_discovery_response(r, c)) { /* this is response from the OP discovery page */ return oidc_handle_discovery_response(r, c); } else if (oidc_util_request_has_parameter(r, OIDC_REDIRECT_URI_REQUEST_JWKS)) { /* * Will be handled in the content handler; avoid: * No authentication done but request not allowed without authentication * by setting r->user */ r->user = ""; return OK; } else if (oidc_util_request_has_parameter(r, OIDC_REDIRECT_URI_REQUEST_SESSION)) { /* handle session management request */ return oidc_handle_session_management(r, c, session); } else if (oidc_util_request_has_parameter(r, OIDC_REDIRECT_URI_REQUEST_REFRESH)) { /* handle refresh token request */ return oidc_handle_refresh_token_request(r, c, session); } else if (oidc_util_request_has_parameter(r, OIDC_REDIRECT_URI_REQUEST_REQUEST_URI)) { /* handle request object by reference request */ return oidc_handle_request_uri(r, c); } else if (oidc_util_request_has_parameter(r, OIDC_REDIRECT_URI_REQUEST_REMOVE_AT_CACHE)) { /* handle request to invalidate access token cache */ return oidc_handle_remove_at_cache(r, c); } else if (oidc_util_request_has_parameter(r, OIDC_REDIRECT_URI_REQUEST_INFO)) { if (session->remote_user == NULL) return HTTP_UNAUTHORIZED; /* * Will be handled in the content handler; avoid: * No authentication done but request not allowed without authentication * by setting r->user */ r->user = ""; return OK; } else if ((r->args == NULL) || (apr_strnatcmp(r->args, "") == 0)) { /* this is a "bare" request to the redirect URI, indicating implicit flow using the fragment response_mode */ return oidc_proto_javascript_implicit(r, c); } /* this is not an authorization response or logout request */ /* check for "error" response */ if (oidc_util_request_has_parameter(r, OIDC_PROTO_ERROR)) { // char *error = NULL, *descr = NULL; // oidc_util_get_request_parameter(r, "error", &error); // oidc_util_get_request_parameter(r, "error_description", &descr); // // /* send user facing error to browser */ // return oidc_util_html_send_error(r, error, descr, DONE); return oidc_handle_redirect_authorization_response(r, c, session); } oidc_error(r, "The OpenID Connect callback URL received an invalid request: %s; returning HTTP_INTERNAL_SERVER_ERROR", r->args); /* something went wrong */ return oidc_util_html_send_error(r, c->error_template, "Invalid Request", apr_psprintf(r->pool, "The OpenID Connect callback URL received an invalid request"), HTTP_INTERNAL_SERVER_ERROR); } #define OIDC_AUTH_TYPE_OPENID_CONNECT "openid-connect" #define OIDC_AUTH_TYPE_OPENID_OAUTH20 "oauth20" #define OIDC_AUTH_TYPE_OPENID_BOTH "auth-openidc" /* * main routine: handle OpenID Connect authentication */ static int oidc_check_userid_openidc(request_rec *r, oidc_cfg *c) { if (oidc_get_redirect_uri(r, c) == NULL) { oidc_error(r, "configuration error: the authentication type is set to \"" OIDC_AUTH_TYPE_OPENID_CONNECT "\" but " OIDCRedirectURI " has not been set"); return HTTP_INTERNAL_SERVER_ERROR; } /* check if this is a sub-request or an initial request */ if (!ap_is_initial_req(r)) { /* not an initial request, try to recycle what we've already established in the main request */ if (r->main != NULL) r->user = r->main->user; else if (r->prev != NULL) r->user = r->prev->user; if (r->user != NULL) { /* this is a sub-request and we have a session (headers will have been scrubbed and set already) */ oidc_debug(r, "recycling user '%s' from initial request for sub-request", r->user); /* * apparently request state can get lost in sub-requests, so let's see * if we need to restore id_token and/or claims from the session cache */ const char *s_id_token = oidc_request_state_get(r, OIDC_REQUEST_STATE_KEY_IDTOKEN); if (s_id_token == NULL) { oidc_session_t *session = NULL; oidc_session_load(r, &session); oidc_copy_tokens_to_request_state(r, session, NULL, NULL); /* free resources allocated for the session */ oidc_session_free(r, session); } /* strip any cookies that we need to */ oidc_strip_cookies(r); return OK; } /* * else: not initial request, but we could not find a session, so: * try to load a new session as if this were the initial request */ } int rc = OK; apr_byte_t needs_save = FALSE; /* load the session from the request state; this will be a new "empty" session if no state exists */ oidc_session_t *session = NULL; oidc_session_load(r, &session); /* see if the initial request is to the redirect URI; this handles potential logout too */ if (oidc_util_request_matches_url(r, oidc_get_redirect_uri(r, c))) { /* handle request to the redirect_uri */ rc = oidc_handle_redirect_uri_request(r, c, session); /* free resources allocated for the session */ oidc_session_free(r, session); return rc; /* initial request to non-redirect URI, check if we have an existing session */ } else if (session->remote_user != NULL) { /* this is initial request and we already have a session */ rc = oidc_handle_existing_session(r, c, session, &needs_save); if (rc == OK) { /* check if something was updated in the session and we need to save it again */ if (needs_save) { if (oidc_session_save(r, session, FALSE) == FALSE) { oidc_warn(r, "error saving session"); rc = HTTP_INTERNAL_SERVER_ERROR; } } } /* free resources allocated for the session */ oidc_session_free(r, session); /* strip any cookies that we need to */ oidc_strip_cookies(r); return rc; } /* free resources allocated for the session */ oidc_session_free(r, session); /* * else: we have no session and it is not an authorization or * discovery response: just hit the default flow for unauthenticated users */ return oidc_handle_unauthenticated_user(r, c); } /* * main routine: handle "mixed" OIDC/OAuth authentication */ static int oidc_check_mixed_userid_oauth(request_rec *r, oidc_cfg *c) { /* get the bearer access token from the Authorization header */ const char *access_token = NULL; if (oidc_oauth_get_bearer_token(r, &access_token) == TRUE) { r->ap_auth_type = apr_pstrdup(r->pool, OIDC_AUTH_TYPE_OPENID_OAUTH20); return oidc_oauth_check_userid(r, c, access_token); } /* no bearer token found: then treat this as a regular OIDC browser request */ r->ap_auth_type = apr_pstrdup(r->pool, OIDC_AUTH_TYPE_OPENID_CONNECT); return oidc_check_userid_openidc(r, c); } /* * generic Apache authentication hook for this module: dispatches to OpenID Connect or OAuth 2.0 specific routines */ int oidc_check_user_id(request_rec *r) { oidc_cfg *c = ap_get_module_config(r->server->module_config, &auth_openidc_module); /* log some stuff about the incoming HTTP request */ oidc_debug(r, "incoming request: \"%s?%s\", ap_is_initial_req(r)=%d", r->parsed_uri.path, r->args, ap_is_initial_req(r)); /* see if any authentication has been defined at all */ const char *current_auth = ap_auth_type(r); if (current_auth == NULL) return DECLINED; /* see if we've configured OpenID Connect user authentication for this request */ if (strcasecmp(current_auth, OIDC_AUTH_TYPE_OPENID_CONNECT) == 0) { r->ap_auth_type = (char*) current_auth; return oidc_check_userid_openidc(r, c); } /* see if we've configured OAuth 2.0 access control for this request */ if (strcasecmp(current_auth, OIDC_AUTH_TYPE_OPENID_OAUTH20) == 0) { r->ap_auth_type = (char*) current_auth; return oidc_oauth_check_userid(r, c, NULL); } /* see if we've configured "mixed mode" for this request */ if (strcasecmp(current_auth, OIDC_AUTH_TYPE_OPENID_BOTH) == 0) return oidc_check_mixed_userid_oauth(r, c); /* this is not for us but for some other handler */ return DECLINED; } /* * get the claims and id_token from request state */ static void oidc_authz_get_claims_and_idtoken(request_rec *r, json_t **claims, json_t **id_token) { const char *s_claims = oidc_request_state_get(r, OIDC_REQUEST_STATE_KEY_CLAIMS); if (s_claims != NULL) oidc_util_decode_json_object(r, s_claims, claims); const char *s_id_token = oidc_request_state_get(r, OIDC_REQUEST_STATE_KEY_IDTOKEN); if (s_id_token != NULL) oidc_util_decode_json_object(r, s_id_token, id_token); } #if MODULE_MAGIC_NUMBER_MAJOR >= 20100714 #define OIDC_OAUTH_BEARER_SCOPE_ERROR "OIDC_OAUTH_BEARER_SCOPE_ERROR" #define OIDC_OAUTH_BEARER_SCOPE_ERROR_VALUE "Bearer error=\"insufficient_scope\", error_description=\"Different scope(s) or other claims required\"" /* * find out which action we need to take when encountering an unauthorized request */ static authz_status oidc_handle_unauthorized_user24(request_rec *r) { oidc_debug(r, "enter"); oidc_cfg *c = ap_get_module_config(r->server->module_config, &auth_openidc_module); if (apr_strnatcasecmp((const char*) ap_auth_type(r), OIDC_AUTH_TYPE_OPENID_OAUTH20) == 0) { oidc_debug(r, "setting environment variable %s to \"%s\" for usage in mod_headers", OIDC_OAUTH_BEARER_SCOPE_ERROR, OIDC_OAUTH_BEARER_SCOPE_ERROR_VALUE); apr_table_set(r->subprocess_env, OIDC_OAUTH_BEARER_SCOPE_ERROR, OIDC_OAUTH_BEARER_SCOPE_ERROR_VALUE); return AUTHZ_DENIED; } /* see if we've configured OIDCUnAutzAction for this path */ switch (oidc_dir_cfg_unautz_action(r)) { // TODO: document that AuthzSendForbiddenOnFailure is required to return 403 FORBIDDEN case OIDC_UNAUTZ_RETURN403: case OIDC_UNAUTZ_RETURN401: return AUTHZ_DENIED; break; case OIDC_UNAUTZ_AUTHENTICATE: /* * exception handling: if this looks like a XMLHttpRequest call we * won't redirect the user and thus avoid creating a state cookie * for a non-browser (= Javascript) call that will never return from the OP */ if (oidc_is_xml_http_request(r) == TRUE) return AUTHZ_DENIED; break; } oidc_authenticate_user(r, c, NULL, oidc_get_current_url(r), NULL, NULL, NULL, oidc_dir_cfg_path_auth_request_params(r), oidc_dir_cfg_path_scope(r)); const char *location = oidc_util_hdr_out_location_get(r); if (location != NULL) { oidc_debug(r, "send HTML refresh with authorization redirect: %s", location); char *html_head = apr_psprintf(r->pool, "<meta http-equiv=\"refresh\" content=\"0; url=%s\">", location); oidc_util_html_send(r, "Stepup Authentication", html_head, NULL, NULL, HTTP_UNAUTHORIZED); /* * a hack for Apache 2.4 to prevent it from writing its own 401 HTML document * text by making ap_send_error_response in http_protocol.c return early... */ r->header_only = 1; } return AUTHZ_DENIED; } /* * generic Apache >=2.4 authorization hook for this module * handles both OpenID Connect or OAuth 2.0 in the same way, based on the claims stored in the session */ authz_status oidc_authz_checker(request_rec *r, const char *require_args, const void *parsed_require_args, oidc_authz_match_claim_fn_type match_claim_fn) { oidc_debug(r, "enter: require_args=\"%s\"", require_args); /* check for anonymous access and PASS mode */ if (r->user != NULL && strlen(r->user) == 0) { r->user = NULL; if (oidc_dir_cfg_unauth_action(r) == OIDC_UNAUTH_PASS) return AUTHZ_GRANTED; } /* get the set of claims from the request state (they've been set in the authentication part earlier */ json_t *claims = NULL, *id_token = NULL; oidc_authz_get_claims_and_idtoken(r, &claims, &id_token); /* merge id_token claims (e.g. "iss") in to claims json object */ if (claims) oidc_util_json_merge(r, id_token, claims); /* dispatch to the >=2.4 specific authz routine */ authz_status rc = oidc_authz_worker24(r, claims ? claims : id_token, require_args, parsed_require_args, match_claim_fn); /* cleanup */ if (claims) json_decref(claims); if (id_token) json_decref(id_token); if ((rc == AUTHZ_DENIED) && ap_auth_type(r)) rc = oidc_handle_unauthorized_user24(r); return rc; } authz_status oidc_authz_checker_claim(request_rec *r, const char *require_args, const void *parsed_require_args) { return oidc_authz_checker(r, require_args, parsed_require_args, oidc_authz_match_claim); } #ifdef USE_LIBJQ authz_status oidc_authz_checker_claims_expr(request_rec *r, const char *require_args, const void *parsed_require_args) { return oidc_authz_checker(r, require_args, parsed_require_args, oidc_authz_match_claims_expr); } #endif #else /* * find out which action we need to take when encountering an unauthorized request */ static int oidc_handle_unauthorized_user22(request_rec *r) { oidc_cfg *c = ap_get_module_config(r->server->module_config, &auth_openidc_module); if (apr_strnatcasecmp((const char *) ap_auth_type(r), OIDC_AUTH_TYPE_OPENID_OAUTH20) == 0) { oidc_oauth_return_www_authenticate(r, "insufficient_scope", "Different scope(s) or other claims required"); return HTTP_UNAUTHORIZED; } /* see if we've configured OIDCUnAutzAction for this path */ switch (oidc_dir_cfg_unautz_action(r)) { case OIDC_UNAUTZ_RETURN403: return HTTP_FORBIDDEN; case OIDC_UNAUTZ_RETURN401: return HTTP_UNAUTHORIZED; case OIDC_UNAUTZ_AUTHENTICATE: /* * exception handling: if this looks like a XMLHttpRequest call we * won't redirect the user and thus avoid creating a state cookie * for a non-browser (= Javascript) call that will never return from the OP */ if (oidc_is_xml_http_request(r) == TRUE) return HTTP_UNAUTHORIZED; } return oidc_authenticate_user(r, c, NULL, oidc_get_current_url(r), NULL, NULL, NULL, oidc_dir_cfg_path_auth_request_params(r), oidc_dir_cfg_path_scope(r)); } /* * generic Apache <2.4 authorization hook for this module * handles both OpenID Connect and OAuth 2.0 in the same way, based on the claims stored in the request context */ int oidc_auth_checker(request_rec *r) { /* check for anonymous access and PASS mode */ if (r->user != NULL && strlen(r->user) == 0) { r->user = NULL; if (oidc_dir_cfg_unauth_action(r) == OIDC_UNAUTH_PASS) return OK; } /* get the set of claims from the request state (they've been set in the authentication part earlier */ json_t *claims = NULL, *id_token = NULL; oidc_authz_get_claims_and_idtoken(r, &claims, &id_token); /* get the Require statements */ const apr_array_header_t * const reqs_arr = ap_requires(r); /* see if we have any */ const require_line * const reqs = reqs_arr ? (require_line *) reqs_arr->elts : NULL; if (!reqs_arr) { oidc_debug(r, "no require statements found, so declining to perform authorization."); return DECLINED; } /* merge id_token claims (e.g. "iss") in to claims json object */ if (claims) oidc_util_json_merge(r, id_token, claims); /* dispatch to the <2.4 specific authz routine */ int rc = oidc_authz_worker22(r, claims ? claims : id_token, reqs, reqs_arr->nelts); /* cleanup */ if (claims) json_decref(claims); if (id_token) json_decref(id_token); if ((rc == HTTP_UNAUTHORIZED) && ap_auth_type(r)) rc = oidc_handle_unauthorized_user22(r); return rc; } #endif apr_byte_t oidc_enabled(request_rec *r) { if (ap_auth_type(r) == NULL) return FALSE; if (apr_strnatcasecmp((const char*) ap_auth_type(r), OIDC_AUTH_TYPE_OPENID_CONNECT) == 0) return TRUE; if (apr_strnatcasecmp((const char*) ap_auth_type(r), OIDC_AUTH_TYPE_OPENID_OAUTH20) == 0) return TRUE; if (apr_strnatcasecmp((const char*) ap_auth_type(r), OIDC_AUTH_TYPE_OPENID_BOTH) == 0) return TRUE; return FALSE; } /* * handle content generating requests */ int oidc_content_handler(request_rec *r) { oidc_cfg *c = ap_get_module_config(r->server->module_config, &auth_openidc_module); int rc = DECLINED; /* track if the session needs to be updated/saved into the cache */ apr_byte_t needs_save = FALSE; oidc_session_t *session = NULL; if (oidc_enabled(r) && oidc_util_request_matches_url(r, oidc_get_redirect_uri(r, c))) { if (oidc_util_request_has_parameter(r, OIDC_REDIRECT_URI_REQUEST_INFO)) { oidc_session_load(r, &session); rc = oidc_handle_existing_session(r, c, session, &needs_save); if (rc == OK) /* handle request for session info */ rc = oidc_handle_info_request(r, c, session, needs_save); /* free resources allocated for the session */ oidc_session_free(r, session); } else if (oidc_util_request_has_parameter(r, OIDC_REDIRECT_URI_REQUEST_JWKS)) { /* handle JWKs request */ rc = oidc_handle_jwks(r, c); } } return rc; } extern const command_rec oidc_config_cmds[]; module AP_MODULE_DECLARE_DATA auth_openidc_module = { STANDARD20_MODULE_STUFF, oidc_create_dir_config, oidc_merge_dir_config, oidc_create_server_config, oidc_merge_server_config, oidc_config_cmds, oidc_register_hooks };
static int oidc_request_post_preserved_restore(request_rec *r, const char *original_url) { oidc_debug(r, "enter: original_url=%s", original_url); const char *method = "postOnLoad"; const char *script = apr_psprintf(r->pool, " <script type=\"text/javascript\">\n" " function str_decode(string) {\n" " try {\n" " result = decodeURIComponent(string);\n" " } catch (e) {\n" " result = unescape(string);\n" " }\n" " return result;\n" " }\n" " function %s() {\n" " var mod_auth_openidc_preserve_post_params = JSON.parse(sessionStorage.getItem('mod_auth_openidc_preserve_post_params'));\n" " sessionStorage.removeItem('mod_auth_openidc_preserve_post_params');\n" " for (var key in mod_auth_openidc_preserve_post_params) {\n" " var input = document.createElement(\"input\");\n" " input.name = str_decode(key);\n" " input.value = str_decode(mod_auth_openidc_preserve_post_params[key]);\n" " input.type = \"hidden\";\n" " document.forms[0].appendChild(input);\n" " }\n" " document.forms[0].action = '%s';\n" " document.forms[0].submit();\n" " }\n" " </script>\n", method, original_url); const char *body = " <p>Restoring...</p>\n" " <form method=\"post\"></form>\n"; return oidc_util_html_send(r, "Restoring...", script, method, body, OK); }
static int oidc_request_post_preserved_restore(request_rec *r, const char *original_url) { oidc_debug(r, "enter: original_url=%s", original_url); const char *method = "postOnLoad"; const char *script = apr_psprintf(r->pool, " <script type=\"text/javascript\">\n" " function str_decode(string) {\n" " try {\n" " result = decodeURIComponent(string);\n" " } catch (e) {\n" " result = unescape(string);\n" " }\n" " return result;\n" " }\n" " function %s() {\n" " var mod_auth_openidc_preserve_post_params = JSON.parse(sessionStorage.getItem('mod_auth_openidc_preserve_post_params'));\n" " sessionStorage.removeItem('mod_auth_openidc_preserve_post_params');\n" " for (var key in mod_auth_openidc_preserve_post_params) {\n" " var input = document.createElement(\"input\");\n" " input.name = str_decode(key);\n" " input.value = str_decode(mod_auth_openidc_preserve_post_params[key]);\n" " input.type = \"hidden\";\n" " document.forms[0].appendChild(input);\n" " }\n" " document.forms[0].action = \"%s\";\n" " document.forms[0].submit();\n" " }\n" " </script>\n", method, original_url); const char *body = " <p>Restoring...</p>\n" " <form method=\"post\"></form>\n"; return oidc_util_html_send(r, "Restoring...", script, method, body, OK); }
{'added': [(516, '\t\t\t\t\t" document.forms[0].action = \\"%s\\";\\n"')], 'deleted': [(516, '\t\t\t\t\t" document.forms[0].action = \'%s\';\\n"')]}
1
1
2,637
17,669
https://github.com/zmartzone/mod_auth_openidc
CVE-2021-32792
['CWE-79']
xwddec.c
xwd_decode_frame
/* * XWD image format * * Copyright (c) 2012 Paul B Mahol * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <inttypes.h> #include "libavutil/imgutils.h" #include "avcodec.h" #include "bytestream.h" #include "internal.h" #include "xwd.h" static int xwd_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { AVFrame *p = data; const uint8_t *buf = avpkt->data; int i, ret, buf_size = avpkt->size; uint32_t version, header_size, vclass, ncolors; uint32_t xoffset, be, bpp, lsize, rsize; uint32_t pixformat, pixdepth, bunit, bitorder, bpad; uint32_t rgb[3]; uint8_t *ptr; GetByteContext gb; if (buf_size < XWD_HEADER_SIZE) return AVERROR_INVALIDDATA; bytestream2_init(&gb, buf, buf_size); header_size = bytestream2_get_be32u(&gb); version = bytestream2_get_be32u(&gb); if (version != XWD_VERSION) { av_log(avctx, AV_LOG_ERROR, "unsupported version\n"); return AVERROR_INVALIDDATA; } if (buf_size < header_size || header_size < XWD_HEADER_SIZE) { av_log(avctx, AV_LOG_ERROR, "invalid header size\n"); return AVERROR_INVALIDDATA; } pixformat = bytestream2_get_be32u(&gb); pixdepth = bytestream2_get_be32u(&gb); avctx->width = bytestream2_get_be32u(&gb); avctx->height = bytestream2_get_be32u(&gb); xoffset = bytestream2_get_be32u(&gb); be = bytestream2_get_be32u(&gb); bunit = bytestream2_get_be32u(&gb); bitorder = bytestream2_get_be32u(&gb); bpad = bytestream2_get_be32u(&gb); bpp = bytestream2_get_be32u(&gb); lsize = bytestream2_get_be32u(&gb); vclass = bytestream2_get_be32u(&gb); rgb[0] = bytestream2_get_be32u(&gb); rgb[1] = bytestream2_get_be32u(&gb); rgb[2] = bytestream2_get_be32u(&gb); bytestream2_skipu(&gb, 8); ncolors = bytestream2_get_be32u(&gb); bytestream2_skipu(&gb, header_size - (XWD_HEADER_SIZE - 20)); av_log(avctx, AV_LOG_DEBUG, "pixformat %"PRIu32", pixdepth %"PRIu32", bunit %"PRIu32", bitorder %"PRIu32", bpad %"PRIu32"\n", pixformat, pixdepth, bunit, bitorder, bpad); av_log(avctx, AV_LOG_DEBUG, "vclass %"PRIu32", ncolors %"PRIu32", bpp %"PRIu32", be %"PRIu32", lsize %"PRIu32", xoffset %"PRIu32"\n", vclass, ncolors, bpp, be, lsize, xoffset); av_log(avctx, AV_LOG_DEBUG, "red %0"PRIx32", green %0"PRIx32", blue %0"PRIx32"\n", rgb[0], rgb[1], rgb[2]); if (pixformat > XWD_Z_PIXMAP) { av_log(avctx, AV_LOG_ERROR, "invalid pixmap format\n"); return AVERROR_INVALIDDATA; } if (pixdepth == 0 || pixdepth > 32) { av_log(avctx, AV_LOG_ERROR, "invalid pixmap depth\n"); return AVERROR_INVALIDDATA; } if (xoffset) { avpriv_request_sample(avctx, "xoffset %"PRIu32"", xoffset); return AVERROR_PATCHWELCOME; } if (be > 1) { av_log(avctx, AV_LOG_ERROR, "invalid byte order\n"); return AVERROR_INVALIDDATA; } if (bitorder > 1) { av_log(avctx, AV_LOG_ERROR, "invalid bitmap bit order\n"); return AVERROR_INVALIDDATA; } if (bunit != 8 && bunit != 16 && bunit != 32) { av_log(avctx, AV_LOG_ERROR, "invalid bitmap unit\n"); return AVERROR_INVALIDDATA; } if (bpad != 8 && bpad != 16 && bpad != 32) { av_log(avctx, AV_LOG_ERROR, "invalid bitmap scan-line pad\n"); return AVERROR_INVALIDDATA; } if (bpp == 0 || bpp > 32) { av_log(avctx, AV_LOG_ERROR, "invalid bits per pixel\n"); return AVERROR_INVALIDDATA; } if (ncolors > 256) { av_log(avctx, AV_LOG_ERROR, "invalid number of entries in colormap\n"); return AVERROR_INVALIDDATA; } if ((ret = av_image_check_size(avctx->width, avctx->height, 0, NULL)) < 0) return ret; rsize = FFALIGN(avctx->width * bpp, bpad) / 8; if (lsize < rsize) { av_log(avctx, AV_LOG_ERROR, "invalid bytes per scan-line\n"); return AVERROR_INVALIDDATA; } if (bytestream2_get_bytes_left(&gb) < ncolors * XWD_CMAP_SIZE + (uint64_t)avctx->height * lsize) { av_log(avctx, AV_LOG_ERROR, "input buffer too small\n"); return AVERROR_INVALIDDATA; } if (pixformat != XWD_Z_PIXMAP) { avpriv_report_missing_feature(avctx, "Pixmap format %"PRIu32, pixformat); return AVERROR_PATCHWELCOME; } avctx->pix_fmt = AV_PIX_FMT_NONE; switch (vclass) { case XWD_STATIC_GRAY: case XWD_GRAY_SCALE: if (bpp != 1 && bpp != 8) return AVERROR_INVALIDDATA; if (pixdepth == 1) { avctx->pix_fmt = AV_PIX_FMT_MONOWHITE; } else if (pixdepth == 8) { avctx->pix_fmt = AV_PIX_FMT_GRAY8; } break; case XWD_STATIC_COLOR: case XWD_PSEUDO_COLOR: if (bpp == 8) avctx->pix_fmt = AV_PIX_FMT_PAL8; break; case XWD_TRUE_COLOR: case XWD_DIRECT_COLOR: if (bpp != 16 && bpp != 24 && bpp != 32) return AVERROR_INVALIDDATA; if (bpp == 16 && pixdepth == 15) { if (rgb[0] == 0x7C00 && rgb[1] == 0x3E0 && rgb[2] == 0x1F) avctx->pix_fmt = be ? AV_PIX_FMT_RGB555BE : AV_PIX_FMT_RGB555LE; else if (rgb[0] == 0x1F && rgb[1] == 0x3E0 && rgb[2] == 0x7C00) avctx->pix_fmt = be ? AV_PIX_FMT_BGR555BE : AV_PIX_FMT_BGR555LE; } else if (bpp == 16 && pixdepth == 16) { if (rgb[0] == 0xF800 && rgb[1] == 0x7E0 && rgb[2] == 0x1F) avctx->pix_fmt = be ? AV_PIX_FMT_RGB565BE : AV_PIX_FMT_RGB565LE; else if (rgb[0] == 0x1F && rgb[1] == 0x7E0 && rgb[2] == 0xF800) avctx->pix_fmt = be ? AV_PIX_FMT_BGR565BE : AV_PIX_FMT_BGR565LE; } else if (bpp == 24) { if (rgb[0] == 0xFF0000 && rgb[1] == 0xFF00 && rgb[2] == 0xFF) avctx->pix_fmt = be ? AV_PIX_FMT_RGB24 : AV_PIX_FMT_BGR24; else if (rgb[0] == 0xFF && rgb[1] == 0xFF00 && rgb[2] == 0xFF0000) avctx->pix_fmt = be ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_RGB24; } else if (bpp == 32) { if (rgb[0] == 0xFF0000 && rgb[1] == 0xFF00 && rgb[2] == 0xFF) avctx->pix_fmt = be ? AV_PIX_FMT_ARGB : AV_PIX_FMT_BGRA; else if (rgb[0] == 0xFF && rgb[1] == 0xFF00 && rgb[2] == 0xFF0000) avctx->pix_fmt = be ? AV_PIX_FMT_ABGR : AV_PIX_FMT_RGBA; } bytestream2_skipu(&gb, ncolors * XWD_CMAP_SIZE); break; default: av_log(avctx, AV_LOG_ERROR, "invalid visual class\n"); return AVERROR_INVALIDDATA; } if (avctx->pix_fmt == AV_PIX_FMT_NONE) { avpriv_request_sample(avctx, "Unknown file: bpp %"PRIu32", pixdepth %"PRIu32", vclass %"PRIu32"", bpp, pixdepth, vclass); return AVERROR_PATCHWELCOME; } if ((ret = ff_get_buffer(avctx, p, 0)) < 0) return ret; p->key_frame = 1; p->pict_type = AV_PICTURE_TYPE_I; if (avctx->pix_fmt == AV_PIX_FMT_PAL8) { uint32_t *dst = (uint32_t *)p->data[1]; uint8_t red, green, blue; for (i = 0; i < ncolors; i++) { bytestream2_skipu(&gb, 4); // skip colormap entry number red = bytestream2_get_byteu(&gb); bytestream2_skipu(&gb, 1); green = bytestream2_get_byteu(&gb); bytestream2_skipu(&gb, 1); blue = bytestream2_get_byteu(&gb); bytestream2_skipu(&gb, 3); // skip bitmask flag and padding dst[i] = red << 16 | green << 8 | blue; } } ptr = p->data[0]; for (i = 0; i < avctx->height; i++) { bytestream2_get_bufferu(&gb, ptr, rsize); bytestream2_skipu(&gb, lsize - rsize); ptr += p->linesize[0]; } *got_frame = 1; return buf_size; } AVCodec ff_xwd_decoder = { .name = "xwd", .long_name = NULL_IF_CONFIG_SMALL("XWD (X Window Dump) image"), .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_XWD, .decode = xwd_decode_frame, .capabilities = AV_CODEC_CAP_DR1, };
/* * XWD image format * * Copyright (c) 2012 Paul B Mahol * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <inttypes.h> #include "libavutil/imgutils.h" #include "avcodec.h" #include "bytestream.h" #include "internal.h" #include "xwd.h" static int xwd_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { AVFrame *p = data; const uint8_t *buf = avpkt->data; int i, ret, buf_size = avpkt->size; uint32_t version, header_size, vclass, ncolors; uint32_t xoffset, be, bpp, lsize, rsize; uint32_t pixformat, pixdepth, bunit, bitorder, bpad; uint32_t rgb[3]; uint8_t *ptr; GetByteContext gb; if (buf_size < XWD_HEADER_SIZE) return AVERROR_INVALIDDATA; bytestream2_init(&gb, buf, buf_size); header_size = bytestream2_get_be32u(&gb); version = bytestream2_get_be32u(&gb); if (version != XWD_VERSION) { av_log(avctx, AV_LOG_ERROR, "unsupported version\n"); return AVERROR_INVALIDDATA; } if (buf_size < header_size || header_size < XWD_HEADER_SIZE) { av_log(avctx, AV_LOG_ERROR, "invalid header size\n"); return AVERROR_INVALIDDATA; } pixformat = bytestream2_get_be32u(&gb); pixdepth = bytestream2_get_be32u(&gb); avctx->width = bytestream2_get_be32u(&gb); avctx->height = bytestream2_get_be32u(&gb); xoffset = bytestream2_get_be32u(&gb); be = bytestream2_get_be32u(&gb); bunit = bytestream2_get_be32u(&gb); bitorder = bytestream2_get_be32u(&gb); bpad = bytestream2_get_be32u(&gb); bpp = bytestream2_get_be32u(&gb); lsize = bytestream2_get_be32u(&gb); vclass = bytestream2_get_be32u(&gb); rgb[0] = bytestream2_get_be32u(&gb); rgb[1] = bytestream2_get_be32u(&gb); rgb[2] = bytestream2_get_be32u(&gb); bytestream2_skipu(&gb, 8); ncolors = bytestream2_get_be32u(&gb); bytestream2_skipu(&gb, header_size - (XWD_HEADER_SIZE - 20)); av_log(avctx, AV_LOG_DEBUG, "pixformat %"PRIu32", pixdepth %"PRIu32", bunit %"PRIu32", bitorder %"PRIu32", bpad %"PRIu32"\n", pixformat, pixdepth, bunit, bitorder, bpad); av_log(avctx, AV_LOG_DEBUG, "vclass %"PRIu32", ncolors %"PRIu32", bpp %"PRIu32", be %"PRIu32", lsize %"PRIu32", xoffset %"PRIu32"\n", vclass, ncolors, bpp, be, lsize, xoffset); av_log(avctx, AV_LOG_DEBUG, "red %0"PRIx32", green %0"PRIx32", blue %0"PRIx32"\n", rgb[0], rgb[1], rgb[2]); if (pixformat > XWD_Z_PIXMAP) { av_log(avctx, AV_LOG_ERROR, "invalid pixmap format\n"); return AVERROR_INVALIDDATA; } if (pixdepth == 0 || pixdepth > 32) { av_log(avctx, AV_LOG_ERROR, "invalid pixmap depth\n"); return AVERROR_INVALIDDATA; } if (xoffset) { avpriv_request_sample(avctx, "xoffset %"PRIu32"", xoffset); return AVERROR_PATCHWELCOME; } if (be > 1) { av_log(avctx, AV_LOG_ERROR, "invalid byte order\n"); return AVERROR_INVALIDDATA; } if (bitorder > 1) { av_log(avctx, AV_LOG_ERROR, "invalid bitmap bit order\n"); return AVERROR_INVALIDDATA; } if (bunit != 8 && bunit != 16 && bunit != 32) { av_log(avctx, AV_LOG_ERROR, "invalid bitmap unit\n"); return AVERROR_INVALIDDATA; } if (bpad != 8 && bpad != 16 && bpad != 32) { av_log(avctx, AV_LOG_ERROR, "invalid bitmap scan-line pad\n"); return AVERROR_INVALIDDATA; } if (bpp == 0 || bpp > 32) { av_log(avctx, AV_LOG_ERROR, "invalid bits per pixel\n"); return AVERROR_INVALIDDATA; } if (ncolors > 256) { av_log(avctx, AV_LOG_ERROR, "invalid number of entries in colormap\n"); return AVERROR_INVALIDDATA; } if ((ret = av_image_check_size(avctx->width, avctx->height, 0, NULL)) < 0) return ret; rsize = FFALIGN(avctx->width * bpp, bpad) / 8; if (lsize < rsize) { av_log(avctx, AV_LOG_ERROR, "invalid bytes per scan-line\n"); return AVERROR_INVALIDDATA; } if (bytestream2_get_bytes_left(&gb) < ncolors * XWD_CMAP_SIZE + (uint64_t)avctx->height * lsize) { av_log(avctx, AV_LOG_ERROR, "input buffer too small\n"); return AVERROR_INVALIDDATA; } if (pixformat != XWD_Z_PIXMAP) { avpriv_report_missing_feature(avctx, "Pixmap format %"PRIu32, pixformat); return AVERROR_PATCHWELCOME; } avctx->pix_fmt = AV_PIX_FMT_NONE; switch (vclass) { case XWD_STATIC_GRAY: case XWD_GRAY_SCALE: if (bpp != 1 && bpp != 8) return AVERROR_INVALIDDATA; if (bpp == 1 && pixdepth == 1) { avctx->pix_fmt = AV_PIX_FMT_MONOWHITE; } else if (bpp == 8 && pixdepth == 8) { avctx->pix_fmt = AV_PIX_FMT_GRAY8; } break; case XWD_STATIC_COLOR: case XWD_PSEUDO_COLOR: if (bpp == 8) avctx->pix_fmt = AV_PIX_FMT_PAL8; break; case XWD_TRUE_COLOR: case XWD_DIRECT_COLOR: if (bpp != 16 && bpp != 24 && bpp != 32) return AVERROR_INVALIDDATA; if (bpp == 16 && pixdepth == 15) { if (rgb[0] == 0x7C00 && rgb[1] == 0x3E0 && rgb[2] == 0x1F) avctx->pix_fmt = be ? AV_PIX_FMT_RGB555BE : AV_PIX_FMT_RGB555LE; else if (rgb[0] == 0x1F && rgb[1] == 0x3E0 && rgb[2] == 0x7C00) avctx->pix_fmt = be ? AV_PIX_FMT_BGR555BE : AV_PIX_FMT_BGR555LE; } else if (bpp == 16 && pixdepth == 16) { if (rgb[0] == 0xF800 && rgb[1] == 0x7E0 && rgb[2] == 0x1F) avctx->pix_fmt = be ? AV_PIX_FMT_RGB565BE : AV_PIX_FMT_RGB565LE; else if (rgb[0] == 0x1F && rgb[1] == 0x7E0 && rgb[2] == 0xF800) avctx->pix_fmt = be ? AV_PIX_FMT_BGR565BE : AV_PIX_FMT_BGR565LE; } else if (bpp == 24) { if (rgb[0] == 0xFF0000 && rgb[1] == 0xFF00 && rgb[2] == 0xFF) avctx->pix_fmt = be ? AV_PIX_FMT_RGB24 : AV_PIX_FMT_BGR24; else if (rgb[0] == 0xFF && rgb[1] == 0xFF00 && rgb[2] == 0xFF0000) avctx->pix_fmt = be ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_RGB24; } else if (bpp == 32) { if (rgb[0] == 0xFF0000 && rgb[1] == 0xFF00 && rgb[2] == 0xFF) avctx->pix_fmt = be ? AV_PIX_FMT_ARGB : AV_PIX_FMT_BGRA; else if (rgb[0] == 0xFF && rgb[1] == 0xFF00 && rgb[2] == 0xFF0000) avctx->pix_fmt = be ? AV_PIX_FMT_ABGR : AV_PIX_FMT_RGBA; } bytestream2_skipu(&gb, ncolors * XWD_CMAP_SIZE); break; default: av_log(avctx, AV_LOG_ERROR, "invalid visual class\n"); return AVERROR_INVALIDDATA; } if (avctx->pix_fmt == AV_PIX_FMT_NONE) { avpriv_request_sample(avctx, "Unknown file: bpp %"PRIu32", pixdepth %"PRIu32", vclass %"PRIu32"", bpp, pixdepth, vclass); return AVERROR_PATCHWELCOME; } if ((ret = ff_get_buffer(avctx, p, 0)) < 0) return ret; p->key_frame = 1; p->pict_type = AV_PICTURE_TYPE_I; if (avctx->pix_fmt == AV_PIX_FMT_PAL8) { uint32_t *dst = (uint32_t *)p->data[1]; uint8_t red, green, blue; for (i = 0; i < ncolors; i++) { bytestream2_skipu(&gb, 4); // skip colormap entry number red = bytestream2_get_byteu(&gb); bytestream2_skipu(&gb, 1); green = bytestream2_get_byteu(&gb); bytestream2_skipu(&gb, 1); blue = bytestream2_get_byteu(&gb); bytestream2_skipu(&gb, 3); // skip bitmask flag and padding dst[i] = red << 16 | green << 8 | blue; } } ptr = p->data[0]; for (i = 0; i < avctx->height; i++) { bytestream2_get_bufferu(&gb, ptr, rsize); bytestream2_skipu(&gb, lsize - rsize); ptr += p->linesize[0]; } *got_frame = 1; return buf_size; } AVCodec ff_xwd_decoder = { .name = "xwd", .long_name = NULL_IF_CONFIG_SMALL("XWD (X Window Dump) image"), .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_XWD, .decode = xwd_decode_frame, .capabilities = AV_CODEC_CAP_DR1, };
static int xwd_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { AVFrame *p = data; const uint8_t *buf = avpkt->data; int i, ret, buf_size = avpkt->size; uint32_t version, header_size, vclass, ncolors; uint32_t xoffset, be, bpp, lsize, rsize; uint32_t pixformat, pixdepth, bunit, bitorder, bpad; uint32_t rgb[3]; uint8_t *ptr; GetByteContext gb; if (buf_size < XWD_HEADER_SIZE) return AVERROR_INVALIDDATA; bytestream2_init(&gb, buf, buf_size); header_size = bytestream2_get_be32u(&gb); version = bytestream2_get_be32u(&gb); if (version != XWD_VERSION) { av_log(avctx, AV_LOG_ERROR, "unsupported version\n"); return AVERROR_INVALIDDATA; } if (buf_size < header_size || header_size < XWD_HEADER_SIZE) { av_log(avctx, AV_LOG_ERROR, "invalid header size\n"); return AVERROR_INVALIDDATA; } pixformat = bytestream2_get_be32u(&gb); pixdepth = bytestream2_get_be32u(&gb); avctx->width = bytestream2_get_be32u(&gb); avctx->height = bytestream2_get_be32u(&gb); xoffset = bytestream2_get_be32u(&gb); be = bytestream2_get_be32u(&gb); bunit = bytestream2_get_be32u(&gb); bitorder = bytestream2_get_be32u(&gb); bpad = bytestream2_get_be32u(&gb); bpp = bytestream2_get_be32u(&gb); lsize = bytestream2_get_be32u(&gb); vclass = bytestream2_get_be32u(&gb); rgb[0] = bytestream2_get_be32u(&gb); rgb[1] = bytestream2_get_be32u(&gb); rgb[2] = bytestream2_get_be32u(&gb); bytestream2_skipu(&gb, 8); ncolors = bytestream2_get_be32u(&gb); bytestream2_skipu(&gb, header_size - (XWD_HEADER_SIZE - 20)); av_log(avctx, AV_LOG_DEBUG, "pixformat %"PRIu32", pixdepth %"PRIu32", bunit %"PRIu32", bitorder %"PRIu32", bpad %"PRIu32"\n", pixformat, pixdepth, bunit, bitorder, bpad); av_log(avctx, AV_LOG_DEBUG, "vclass %"PRIu32", ncolors %"PRIu32", bpp %"PRIu32", be %"PRIu32", lsize %"PRIu32", xoffset %"PRIu32"\n", vclass, ncolors, bpp, be, lsize, xoffset); av_log(avctx, AV_LOG_DEBUG, "red %0"PRIx32", green %0"PRIx32", blue %0"PRIx32"\n", rgb[0], rgb[1], rgb[2]); if (pixformat > XWD_Z_PIXMAP) { av_log(avctx, AV_LOG_ERROR, "invalid pixmap format\n"); return AVERROR_INVALIDDATA; } if (pixdepth == 0 || pixdepth > 32) { av_log(avctx, AV_LOG_ERROR, "invalid pixmap depth\n"); return AVERROR_INVALIDDATA; } if (xoffset) { avpriv_request_sample(avctx, "xoffset %"PRIu32"", xoffset); return AVERROR_PATCHWELCOME; } if (be > 1) { av_log(avctx, AV_LOG_ERROR, "invalid byte order\n"); return AVERROR_INVALIDDATA; } if (bitorder > 1) { av_log(avctx, AV_LOG_ERROR, "invalid bitmap bit order\n"); return AVERROR_INVALIDDATA; } if (bunit != 8 && bunit != 16 && bunit != 32) { av_log(avctx, AV_LOG_ERROR, "invalid bitmap unit\n"); return AVERROR_INVALIDDATA; } if (bpad != 8 && bpad != 16 && bpad != 32) { av_log(avctx, AV_LOG_ERROR, "invalid bitmap scan-line pad\n"); return AVERROR_INVALIDDATA; } if (bpp == 0 || bpp > 32) { av_log(avctx, AV_LOG_ERROR, "invalid bits per pixel\n"); return AVERROR_INVALIDDATA; } if (ncolors > 256) { av_log(avctx, AV_LOG_ERROR, "invalid number of entries in colormap\n"); return AVERROR_INVALIDDATA; } if ((ret = av_image_check_size(avctx->width, avctx->height, 0, NULL)) < 0) return ret; rsize = FFALIGN(avctx->width * bpp, bpad) / 8; if (lsize < rsize) { av_log(avctx, AV_LOG_ERROR, "invalid bytes per scan-line\n"); return AVERROR_INVALIDDATA; } if (bytestream2_get_bytes_left(&gb) < ncolors * XWD_CMAP_SIZE + (uint64_t)avctx->height * lsize) { av_log(avctx, AV_LOG_ERROR, "input buffer too small\n"); return AVERROR_INVALIDDATA; } if (pixformat != XWD_Z_PIXMAP) { avpriv_report_missing_feature(avctx, "Pixmap format %"PRIu32, pixformat); return AVERROR_PATCHWELCOME; } avctx->pix_fmt = AV_PIX_FMT_NONE; switch (vclass) { case XWD_STATIC_GRAY: case XWD_GRAY_SCALE: if (bpp != 1 && bpp != 8) return AVERROR_INVALIDDATA; if (pixdepth == 1) { avctx->pix_fmt = AV_PIX_FMT_MONOWHITE; } else if (pixdepth == 8) { avctx->pix_fmt = AV_PIX_FMT_GRAY8; } break; case XWD_STATIC_COLOR: case XWD_PSEUDO_COLOR: if (bpp == 8) avctx->pix_fmt = AV_PIX_FMT_PAL8; break; case XWD_TRUE_COLOR: case XWD_DIRECT_COLOR: if (bpp != 16 && bpp != 24 && bpp != 32) return AVERROR_INVALIDDATA; if (bpp == 16 && pixdepth == 15) { if (rgb[0] == 0x7C00 && rgb[1] == 0x3E0 && rgb[2] == 0x1F) avctx->pix_fmt = be ? AV_PIX_FMT_RGB555BE : AV_PIX_FMT_RGB555LE; else if (rgb[0] == 0x1F && rgb[1] == 0x3E0 && rgb[2] == 0x7C00) avctx->pix_fmt = be ? AV_PIX_FMT_BGR555BE : AV_PIX_FMT_BGR555LE; } else if (bpp == 16 && pixdepth == 16) { if (rgb[0] == 0xF800 && rgb[1] == 0x7E0 && rgb[2] == 0x1F) avctx->pix_fmt = be ? AV_PIX_FMT_RGB565BE : AV_PIX_FMT_RGB565LE; else if (rgb[0] == 0x1F && rgb[1] == 0x7E0 && rgb[2] == 0xF800) avctx->pix_fmt = be ? AV_PIX_FMT_BGR565BE : AV_PIX_FMT_BGR565LE; } else if (bpp == 24) { if (rgb[0] == 0xFF0000 && rgb[1] == 0xFF00 && rgb[2] == 0xFF) avctx->pix_fmt = be ? AV_PIX_FMT_RGB24 : AV_PIX_FMT_BGR24; else if (rgb[0] == 0xFF && rgb[1] == 0xFF00 && rgb[2] == 0xFF0000) avctx->pix_fmt = be ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_RGB24; } else if (bpp == 32) { if (rgb[0] == 0xFF0000 && rgb[1] == 0xFF00 && rgb[2] == 0xFF) avctx->pix_fmt = be ? AV_PIX_FMT_ARGB : AV_PIX_FMT_BGRA; else if (rgb[0] == 0xFF && rgb[1] == 0xFF00 && rgb[2] == 0xFF0000) avctx->pix_fmt = be ? AV_PIX_FMT_ABGR : AV_PIX_FMT_RGBA; } bytestream2_skipu(&gb, ncolors * XWD_CMAP_SIZE); break; default: av_log(avctx, AV_LOG_ERROR, "invalid visual class\n"); return AVERROR_INVALIDDATA; } if (avctx->pix_fmt == AV_PIX_FMT_NONE) { avpriv_request_sample(avctx, "Unknown file: bpp %"PRIu32", pixdepth %"PRIu32", vclass %"PRIu32"", bpp, pixdepth, vclass); return AVERROR_PATCHWELCOME; } if ((ret = ff_get_buffer(avctx, p, 0)) < 0) return ret; p->key_frame = 1; p->pict_type = AV_PICTURE_TYPE_I; if (avctx->pix_fmt == AV_PIX_FMT_PAL8) { uint32_t *dst = (uint32_t *)p->data[1]; uint8_t red, green, blue; for (i = 0; i < ncolors; i++) { bytestream2_skipu(&gb, 4); // skip colormap entry number red = bytestream2_get_byteu(&gb); bytestream2_skipu(&gb, 1); green = bytestream2_get_byteu(&gb); bytestream2_skipu(&gb, 1); blue = bytestream2_get_byteu(&gb); bytestream2_skipu(&gb, 3); // skip bitmask flag and padding dst[i] = red << 16 | green << 8 | blue; } } ptr = p->data[0]; for (i = 0; i < avctx->height; i++) { bytestream2_get_bufferu(&gb, ptr, rsize); bytestream2_skipu(&gb, lsize - rsize); ptr += p->linesize[0]; } *got_frame = 1; return buf_size; }
static int xwd_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { AVFrame *p = data; const uint8_t *buf = avpkt->data; int i, ret, buf_size = avpkt->size; uint32_t version, header_size, vclass, ncolors; uint32_t xoffset, be, bpp, lsize, rsize; uint32_t pixformat, pixdepth, bunit, bitorder, bpad; uint32_t rgb[3]; uint8_t *ptr; GetByteContext gb; if (buf_size < XWD_HEADER_SIZE) return AVERROR_INVALIDDATA; bytestream2_init(&gb, buf, buf_size); header_size = bytestream2_get_be32u(&gb); version = bytestream2_get_be32u(&gb); if (version != XWD_VERSION) { av_log(avctx, AV_LOG_ERROR, "unsupported version\n"); return AVERROR_INVALIDDATA; } if (buf_size < header_size || header_size < XWD_HEADER_SIZE) { av_log(avctx, AV_LOG_ERROR, "invalid header size\n"); return AVERROR_INVALIDDATA; } pixformat = bytestream2_get_be32u(&gb); pixdepth = bytestream2_get_be32u(&gb); avctx->width = bytestream2_get_be32u(&gb); avctx->height = bytestream2_get_be32u(&gb); xoffset = bytestream2_get_be32u(&gb); be = bytestream2_get_be32u(&gb); bunit = bytestream2_get_be32u(&gb); bitorder = bytestream2_get_be32u(&gb); bpad = bytestream2_get_be32u(&gb); bpp = bytestream2_get_be32u(&gb); lsize = bytestream2_get_be32u(&gb); vclass = bytestream2_get_be32u(&gb); rgb[0] = bytestream2_get_be32u(&gb); rgb[1] = bytestream2_get_be32u(&gb); rgb[2] = bytestream2_get_be32u(&gb); bytestream2_skipu(&gb, 8); ncolors = bytestream2_get_be32u(&gb); bytestream2_skipu(&gb, header_size - (XWD_HEADER_SIZE - 20)); av_log(avctx, AV_LOG_DEBUG, "pixformat %"PRIu32", pixdepth %"PRIu32", bunit %"PRIu32", bitorder %"PRIu32", bpad %"PRIu32"\n", pixformat, pixdepth, bunit, bitorder, bpad); av_log(avctx, AV_LOG_DEBUG, "vclass %"PRIu32", ncolors %"PRIu32", bpp %"PRIu32", be %"PRIu32", lsize %"PRIu32", xoffset %"PRIu32"\n", vclass, ncolors, bpp, be, lsize, xoffset); av_log(avctx, AV_LOG_DEBUG, "red %0"PRIx32", green %0"PRIx32", blue %0"PRIx32"\n", rgb[0], rgb[1], rgb[2]); if (pixformat > XWD_Z_PIXMAP) { av_log(avctx, AV_LOG_ERROR, "invalid pixmap format\n"); return AVERROR_INVALIDDATA; } if (pixdepth == 0 || pixdepth > 32) { av_log(avctx, AV_LOG_ERROR, "invalid pixmap depth\n"); return AVERROR_INVALIDDATA; } if (xoffset) { avpriv_request_sample(avctx, "xoffset %"PRIu32"", xoffset); return AVERROR_PATCHWELCOME; } if (be > 1) { av_log(avctx, AV_LOG_ERROR, "invalid byte order\n"); return AVERROR_INVALIDDATA; } if (bitorder > 1) { av_log(avctx, AV_LOG_ERROR, "invalid bitmap bit order\n"); return AVERROR_INVALIDDATA; } if (bunit != 8 && bunit != 16 && bunit != 32) { av_log(avctx, AV_LOG_ERROR, "invalid bitmap unit\n"); return AVERROR_INVALIDDATA; } if (bpad != 8 && bpad != 16 && bpad != 32) { av_log(avctx, AV_LOG_ERROR, "invalid bitmap scan-line pad\n"); return AVERROR_INVALIDDATA; } if (bpp == 0 || bpp > 32) { av_log(avctx, AV_LOG_ERROR, "invalid bits per pixel\n"); return AVERROR_INVALIDDATA; } if (ncolors > 256) { av_log(avctx, AV_LOG_ERROR, "invalid number of entries in colormap\n"); return AVERROR_INVALIDDATA; } if ((ret = av_image_check_size(avctx->width, avctx->height, 0, NULL)) < 0) return ret; rsize = FFALIGN(avctx->width * bpp, bpad) / 8; if (lsize < rsize) { av_log(avctx, AV_LOG_ERROR, "invalid bytes per scan-line\n"); return AVERROR_INVALIDDATA; } if (bytestream2_get_bytes_left(&gb) < ncolors * XWD_CMAP_SIZE + (uint64_t)avctx->height * lsize) { av_log(avctx, AV_LOG_ERROR, "input buffer too small\n"); return AVERROR_INVALIDDATA; } if (pixformat != XWD_Z_PIXMAP) { avpriv_report_missing_feature(avctx, "Pixmap format %"PRIu32, pixformat); return AVERROR_PATCHWELCOME; } avctx->pix_fmt = AV_PIX_FMT_NONE; switch (vclass) { case XWD_STATIC_GRAY: case XWD_GRAY_SCALE: if (bpp != 1 && bpp != 8) return AVERROR_INVALIDDATA; if (bpp == 1 && pixdepth == 1) { avctx->pix_fmt = AV_PIX_FMT_MONOWHITE; } else if (bpp == 8 && pixdepth == 8) { avctx->pix_fmt = AV_PIX_FMT_GRAY8; } break; case XWD_STATIC_COLOR: case XWD_PSEUDO_COLOR: if (bpp == 8) avctx->pix_fmt = AV_PIX_FMT_PAL8; break; case XWD_TRUE_COLOR: case XWD_DIRECT_COLOR: if (bpp != 16 && bpp != 24 && bpp != 32) return AVERROR_INVALIDDATA; if (bpp == 16 && pixdepth == 15) { if (rgb[0] == 0x7C00 && rgb[1] == 0x3E0 && rgb[2] == 0x1F) avctx->pix_fmt = be ? AV_PIX_FMT_RGB555BE : AV_PIX_FMT_RGB555LE; else if (rgb[0] == 0x1F && rgb[1] == 0x3E0 && rgb[2] == 0x7C00) avctx->pix_fmt = be ? AV_PIX_FMT_BGR555BE : AV_PIX_FMT_BGR555LE; } else if (bpp == 16 && pixdepth == 16) { if (rgb[0] == 0xF800 && rgb[1] == 0x7E0 && rgb[2] == 0x1F) avctx->pix_fmt = be ? AV_PIX_FMT_RGB565BE : AV_PIX_FMT_RGB565LE; else if (rgb[0] == 0x1F && rgb[1] == 0x7E0 && rgb[2] == 0xF800) avctx->pix_fmt = be ? AV_PIX_FMT_BGR565BE : AV_PIX_FMT_BGR565LE; } else if (bpp == 24) { if (rgb[0] == 0xFF0000 && rgb[1] == 0xFF00 && rgb[2] == 0xFF) avctx->pix_fmt = be ? AV_PIX_FMT_RGB24 : AV_PIX_FMT_BGR24; else if (rgb[0] == 0xFF && rgb[1] == 0xFF00 && rgb[2] == 0xFF0000) avctx->pix_fmt = be ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_RGB24; } else if (bpp == 32) { if (rgb[0] == 0xFF0000 && rgb[1] == 0xFF00 && rgb[2] == 0xFF) avctx->pix_fmt = be ? AV_PIX_FMT_ARGB : AV_PIX_FMT_BGRA; else if (rgb[0] == 0xFF && rgb[1] == 0xFF00 && rgb[2] == 0xFF0000) avctx->pix_fmt = be ? AV_PIX_FMT_ABGR : AV_PIX_FMT_RGBA; } bytestream2_skipu(&gb, ncolors * XWD_CMAP_SIZE); break; default: av_log(avctx, AV_LOG_ERROR, "invalid visual class\n"); return AVERROR_INVALIDDATA; } if (avctx->pix_fmt == AV_PIX_FMT_NONE) { avpriv_request_sample(avctx, "Unknown file: bpp %"PRIu32", pixdepth %"PRIu32", vclass %"PRIu32"", bpp, pixdepth, vclass); return AVERROR_PATCHWELCOME; } if ((ret = ff_get_buffer(avctx, p, 0)) < 0) return ret; p->key_frame = 1; p->pict_type = AV_PICTURE_TYPE_I; if (avctx->pix_fmt == AV_PIX_FMT_PAL8) { uint32_t *dst = (uint32_t *)p->data[1]; uint8_t red, green, blue; for (i = 0; i < ncolors; i++) { bytestream2_skipu(&gb, 4); // skip colormap entry number red = bytestream2_get_byteu(&gb); bytestream2_skipu(&gb, 1); green = bytestream2_get_byteu(&gb); bytestream2_skipu(&gb, 1); blue = bytestream2_get_byteu(&gb); bytestream2_skipu(&gb, 3); // skip bitmask flag and padding dst[i] = red << 16 | green << 8 | blue; } } ptr = p->data[0]; for (i = 0; i < avctx->height; i++) { bytestream2_get_bufferu(&gb, ptr, rsize); bytestream2_skipu(&gb, lsize - rsize); ptr += p->linesize[0]; } *got_frame = 1; return buf_size; }
{'added': [(160, ' if (bpp == 1 && pixdepth == 1) {'), (162, ' } else if (bpp == 8 && pixdepth == 8) {')], 'deleted': [(160, ' if (pixdepth == 1) {'), (162, ' } else if (pixdepth == 8) {')]}
2
2
198
1,495
https://github.com/FFmpeg/FFmpeg
CVE-2017-9991
['CWE-119']
sip_ua_layer.c
pjsip_ua_register_dlg
/* $Id$ */ /* * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com) * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <pjsip/sip_ua_layer.h> #include <pjsip/sip_module.h> #include <pjsip/sip_dialog.h> #include <pjsip/sip_endpoint.h> #include <pjsip/sip_errno.h> #include <pjsip/sip_transaction.h> #include <pj/os.h> #include <pj/hash.h> #include <pj/assert.h> #include <pj/string.h> #include <pj/pool.h> #include <pj/log.h> #define THIS_FILE "sip_ua_layer.c" /* * Static prototypes. */ static pj_status_t mod_ua_load(pjsip_endpoint *endpt); static pj_status_t mod_ua_unload(void); static pj_bool_t mod_ua_on_rx_request(pjsip_rx_data *rdata); static pj_bool_t mod_ua_on_rx_response(pjsip_rx_data *rdata); static void mod_ua_on_tsx_state(pjsip_transaction*, pjsip_event*); extern long pjsip_dlg_lock_tls_id; /* defined in sip_dialog.c */ /* This struct is used to represent list of dialog inside a dialog set. * We don't want to use pjsip_dialog for this purpose, to save some * memory (about 100 bytes per dialog set). */ struct dlg_set_head { PJ_DECL_LIST_MEMBER(pjsip_dialog); }; /* This struct represents a dialog set. * This is the value that will be put in the UA's hash table. */ struct dlg_set { /* To put this node in free dlg_set nodes in UA. */ PJ_DECL_LIST_MEMBER(struct dlg_set); /* This is the buffer to store this entry in the hash table. */ pj_hash_entry_buf ht_entry; /* List of dialog in this dialog set. */ struct dlg_set_head dlg_list; }; /* * Module interface. */ static struct user_agent { pjsip_module mod; pj_pool_t *pool; pjsip_endpoint *endpt; pj_mutex_t *mutex; pj_hash_table_t *dlg_table; pjsip_ua_init_param param; struct dlg_set free_dlgset_nodes; } mod_ua = { { NULL, NULL, /* prev, next. */ { "mod-ua", 6 }, /* Name. */ -1, /* Id */ PJSIP_MOD_PRIORITY_UA_PROXY_LAYER, /* Priority */ &mod_ua_load, /* load() */ NULL, /* start() */ NULL, /* stop() */ &mod_ua_unload, /* unload() */ &mod_ua_on_rx_request, /* on_rx_request() */ &mod_ua_on_rx_response, /* on_rx_response() */ NULL, /* on_tx_request. */ NULL, /* on_tx_response() */ &mod_ua_on_tsx_state, /* on_tsx_state() */ } }; /* * mod_ua_load() * * Called when module is being loaded by endpoint. */ static pj_status_t mod_ua_load(pjsip_endpoint *endpt) { pj_status_t status; /* Initialize the user agent. */ mod_ua.endpt = endpt; mod_ua.pool = pjsip_endpt_create_pool( endpt, "ua%p", PJSIP_POOL_LEN_UA, PJSIP_POOL_INC_UA); if (mod_ua.pool == NULL) return PJ_ENOMEM; status = pj_mutex_create_recursive(mod_ua.pool, " ua%p", &mod_ua.mutex); if (status != PJ_SUCCESS) return status; mod_ua.dlg_table = pj_hash_create(mod_ua.pool, PJSIP_MAX_DIALOG_COUNT); if (mod_ua.dlg_table == NULL) return PJ_ENOMEM; pj_list_init(&mod_ua.free_dlgset_nodes); /* Initialize dialog lock. */ status = pj_thread_local_alloc(&pjsip_dlg_lock_tls_id); if (status != PJ_SUCCESS) return status; pj_thread_local_set(pjsip_dlg_lock_tls_id, NULL); return PJ_SUCCESS; } /* * mod_ua_unload() * * Called when module is being unloaded. */ static pj_status_t mod_ua_unload(void) { pj_thread_local_free(pjsip_dlg_lock_tls_id); pj_mutex_destroy(mod_ua.mutex); /* Release pool */ if (mod_ua.pool) { pjsip_endpt_release_pool( mod_ua.endpt, mod_ua.pool ); } return PJ_SUCCESS; } /* * mod_ua_on_tsx_stats() * * Called on changed on transaction state. */ static void mod_ua_on_tsx_state( pjsip_transaction *tsx, pjsip_event *e) { pjsip_dialog *dlg; /* If the module id is -1, it could mean that the module has been * destroyed. */ if (mod_ua.mod.id == -1) return; /* Get the dialog where this transaction belongs. */ dlg = (pjsip_dialog*) tsx->mod_data[mod_ua.mod.id]; /* If dialog instance has gone, it could mean that the dialog * may has been destroyed. */ if (dlg == NULL) return; /* Hand over the event to the dialog. */ pjsip_dlg_on_tsx_state(dlg, tsx, e); } /* * Init user agent module and register it to the endpoint. */ PJ_DEF(pj_status_t) pjsip_ua_init_module( pjsip_endpoint *endpt, const pjsip_ua_init_param *prm) { pj_status_t status; /* Check if module already registered. */ PJ_ASSERT_RETURN(mod_ua.mod.id == -1, PJ_EINVALIDOP); /* Copy param, if exists. */ if (prm) pj_memcpy(&mod_ua.param, prm, sizeof(pjsip_ua_init_param)); /* Register the module. */ status = pjsip_endpt_register_module(endpt, &mod_ua.mod); return status; } /* * Get the instance of the user agent. * */ PJ_DEF(pjsip_user_agent*) pjsip_ua_instance(void) { return &mod_ua.mod; } /* * Get the endpoint where this UA is currently registered. */ PJ_DEF(pjsip_endpoint*) pjsip_ua_get_endpt(pjsip_user_agent *ua) { PJ_UNUSED_ARG(ua); pj_assert(ua == &mod_ua.mod); return mod_ua.endpt; } /* * Destroy the user agent layer. */ PJ_DEF(pj_status_t) pjsip_ua_destroy(void) { /* Check if module already destroyed. */ PJ_ASSERT_RETURN(mod_ua.mod.id != -1, PJ_EINVALIDOP); return pjsip_endpt_unregister_module(mod_ua.endpt, &mod_ua.mod); } /* * Create key to identify dialog set. */ /* PJ_DEF(void) pjsip_ua_create_dlg_set_key( pj_pool_t *pool, pj_str_t *set_key, const pj_str_t *call_id, const pj_str_t *local_tag) { PJ_ASSERT_ON_FAIL(pool && set_key && call_id && local_tag, return;); set_key->slen = call_id->slen + local_tag->slen + 1; set_key->ptr = (char*) pj_pool_alloc(pool, set_key->slen); pj_assert(set_key->ptr != NULL); pj_memcpy(set_key->ptr, call_id->ptr, call_id->slen); set_key->ptr[call_id->slen] = '$'; pj_memcpy(set_key->ptr + call_id->slen + 1, local_tag->ptr, local_tag->slen); } */ /* * Acquire one dlg_set node to be put in the hash table. * This will first look in the free nodes list, then allocate * a new one from UA's pool when one is not available. */ static struct dlg_set *alloc_dlgset_node(void) { struct dlg_set *set; if (!pj_list_empty(&mod_ua.free_dlgset_nodes)) { set = mod_ua.free_dlgset_nodes.next; pj_list_erase(set); return set; } else { set = PJ_POOL_ALLOC_T(mod_ua.pool, struct dlg_set); return set; } } /* * Register new dialog. Called by pjsip_dlg_create_uac() and * pjsip_dlg_create_uas_and_inc_lock(); */ PJ_DEF(pj_status_t) pjsip_ua_register_dlg( pjsip_user_agent *ua, pjsip_dialog *dlg ) { /* Sanity check. */ PJ_ASSERT_RETURN(ua && dlg, PJ_EINVAL); /* For all dialogs, local tag (inc hash) must has been initialized. */ PJ_ASSERT_RETURN(dlg->local.info && dlg->local.info->tag.slen && dlg->local.tag_hval != 0, PJ_EBUG); /* For UAS dialog, remote tag (inc hash) must have been initialized. */ //PJ_ASSERT_RETURN(dlg->role==PJSIP_ROLE_UAC || // (dlg->role==PJSIP_ROLE_UAS && dlg->remote.info->tag.slen // && dlg->remote.tag_hval != 0), PJ_EBUG); /* Lock the user agent. */ pj_mutex_lock(mod_ua.mutex); /* For UAC, check if there is existing dialog in the same set. */ if (dlg->role == PJSIP_ROLE_UAC) { struct dlg_set *dlg_set; dlg_set = (struct dlg_set*) pj_hash_get_lower( mod_ua.dlg_table, dlg->local.info->tag.ptr, (unsigned)dlg->local.info->tag.slen, &dlg->local.tag_hval); if (dlg_set) { /* This is NOT the first dialog in the dialog set. * Just add this dialog in the list. */ pj_assert(dlg_set->dlg_list.next != (void*)&dlg_set->dlg_list); pj_list_push_back(&dlg_set->dlg_list, dlg); dlg->dlg_set = dlg_set; } else { /* This is the first dialog in the dialog set. * Create the dialog set and add this dialog to it. */ dlg_set = alloc_dlgset_node(); pj_list_init(&dlg_set->dlg_list); pj_list_push_back(&dlg_set->dlg_list, dlg); dlg->dlg_set = dlg_set; /* Register the dialog set in the hash table. */ pj_hash_set_np_lower(mod_ua.dlg_table, dlg->local.info->tag.ptr, (unsigned)dlg->local.info->tag.slen, dlg->local.tag_hval, dlg_set->ht_entry, dlg_set); } } else { /* For UAS, create the dialog set with a single dialog as member. */ struct dlg_set *dlg_set; dlg_set = alloc_dlgset_node(); pj_list_init(&dlg_set->dlg_list); pj_list_push_back(&dlg_set->dlg_list, dlg); dlg->dlg_set = dlg_set; pj_hash_set_np_lower(mod_ua.dlg_table, dlg->local.info->tag.ptr, (unsigned)dlg->local.info->tag.slen, dlg->local.tag_hval, dlg_set->ht_entry, dlg_set); } /* Unlock user agent. */ pj_mutex_unlock(mod_ua.mutex); /* Done. */ return PJ_SUCCESS; } PJ_DEF(pj_status_t) pjsip_ua_unregister_dlg( pjsip_user_agent *ua, pjsip_dialog *dlg ) { struct dlg_set *dlg_set; pjsip_dialog *d; /* Sanity-check arguments. */ PJ_ASSERT_RETURN(ua && dlg, PJ_EINVAL); /* Check that dialog has been registered. */ PJ_ASSERT_RETURN(dlg->dlg_set, PJ_EINVALIDOP); /* Lock user agent. */ pj_mutex_lock(mod_ua.mutex); /* Find this dialog from the dialog set. */ dlg_set = (struct dlg_set*) dlg->dlg_set; d = dlg_set->dlg_list.next; while (d != (pjsip_dialog*)&dlg_set->dlg_list && d != dlg) { d = d->next; } if (d != dlg) { pj_assert(!"Dialog is not registered!"); pj_mutex_unlock(mod_ua.mutex); return PJ_EINVALIDOP; } /* Remove this dialog from the list. */ pj_list_erase(dlg); /* If dialog list is empty, remove the dialog set from the hash table. */ if (pj_list_empty(&dlg_set->dlg_list)) { pj_hash_set_lower(NULL, mod_ua.dlg_table, dlg->local.info->tag.ptr, (unsigned)dlg->local.info->tag.slen, dlg->local.tag_hval, NULL); /* Return dlg_set to free nodes. */ pj_list_push_back(&mod_ua.free_dlgset_nodes, dlg_set); } /* Unlock user agent. */ pj_mutex_unlock(mod_ua.mutex); /* Done. */ return PJ_SUCCESS; } PJ_DEF(pjsip_dialog*) pjsip_rdata_get_dlg( pjsip_rx_data *rdata ) { return (pjsip_dialog*) rdata->endpt_info.mod_data[mod_ua.mod.id]; } PJ_DEF(pjsip_dialog*) pjsip_tdata_get_dlg( pjsip_tx_data *tdata ) { return (pjsip_dialog*) tdata->mod_data[mod_ua.mod.id]; } PJ_DEF(pjsip_dialog*) pjsip_tsx_get_dlg( pjsip_transaction *tsx ) { return (pjsip_dialog*) tsx->mod_data[mod_ua.mod.id]; } /* * Retrieve the current number of dialog-set currently registered * in the hash table. */ PJ_DEF(unsigned) pjsip_ua_get_dlg_set_count(void) { unsigned count; PJ_ASSERT_RETURN(mod_ua.endpt, 0); pj_mutex_lock(mod_ua.mutex); count = pj_hash_count(mod_ua.dlg_table); pj_mutex_unlock(mod_ua.mutex); return count; } /* * Find a dialog. */ PJ_DEF(pjsip_dialog*) pjsip_ua_find_dialog(const pj_str_t *call_id, const pj_str_t *local_tag, const pj_str_t *remote_tag, pj_bool_t lock_dialog) { struct dlg_set *dlg_set; pjsip_dialog *dlg; PJ_ASSERT_RETURN(call_id && local_tag && remote_tag, NULL); /* Lock user agent. */ pj_mutex_lock(mod_ua.mutex); /* Lookup the dialog set. */ dlg_set = (struct dlg_set*) pj_hash_get_lower(mod_ua.dlg_table, local_tag->ptr, (unsigned)local_tag->slen, NULL); if (dlg_set == NULL) { /* Not found */ pj_mutex_unlock(mod_ua.mutex); return NULL; } /* Dialog set is found, now find the matching dialog based on the * remote tag. */ dlg = dlg_set->dlg_list.next; while (dlg != (pjsip_dialog*)&dlg_set->dlg_list) { if (pj_stricmp(&dlg->remote.info->tag, remote_tag) == 0) break; dlg = dlg->next; } if (dlg == (pjsip_dialog*)&dlg_set->dlg_list) { /* Not found */ pj_mutex_unlock(mod_ua.mutex); return NULL; } /* Dialog has been found. It SHOULD have the right Call-ID!! */ if (pj_strcmp(&dlg->call_id->id, call_id)!=0) { PJ_LOG(6, (THIS_FILE, "Dialog not found: local and remote tags " "matched but not call id")); pj_mutex_unlock(mod_ua.mutex); return NULL; } if (lock_dialog) { if (pjsip_dlg_try_inc_lock(dlg) != PJ_SUCCESS) { /* * Unable to acquire dialog's lock while holding the user * agent's mutex. Release the UA mutex before retrying once * more. * * THIS MAY CAUSE RACE CONDITION! */ /* Unlock user agent. */ pj_mutex_unlock(mod_ua.mutex); /* Lock dialog */ pjsip_dlg_inc_lock(dlg); } else { /* Unlock user agent. */ pj_mutex_unlock(mod_ua.mutex); } } else { /* Unlock user agent. */ pj_mutex_unlock(mod_ua.mutex); } return dlg; } /* * Find the first dialog in dialog set in hash table for an incoming message. */ static struct dlg_set *find_dlg_set_for_msg( pjsip_rx_data *rdata ) { /* CANCEL message doesn't have To tag, so we must lookup the dialog * by finding the INVITE UAS transaction being cancelled. */ if (rdata->msg_info.cseq->method.id == PJSIP_CANCEL_METHOD) { pjsip_dialog *dlg; /* Create key for the rdata, but this time, use INVITE as the * method. */ pj_str_t key; pjsip_role_e role; pjsip_transaction *tsx; if (rdata->msg_info.msg->type == PJSIP_REQUEST_MSG) role = PJSIP_ROLE_UAS; else role = PJSIP_ROLE_UAC; pjsip_tsx_create_key(rdata->tp_info.pool, &key, role, pjsip_get_invite_method(), rdata); /* Lookup the INVITE transaction */ tsx = pjsip_tsx_layer_find_tsx2(&key, PJ_TRUE); /* We should find the dialog attached to the INVITE transaction */ if (tsx) { dlg = (pjsip_dialog*) tsx->mod_data[mod_ua.mod.id]; pj_grp_lock_dec_ref(tsx->grp_lock); /* Dlg may be NULL on some extreme condition * (e.g. during debugging where initially there is a dialog) */ return dlg ? (struct dlg_set*) dlg->dlg_set : NULL; } else { return NULL; } } else { pj_str_t *tag; struct dlg_set *dlg_set; if (rdata->msg_info.msg->type == PJSIP_REQUEST_MSG) tag = &rdata->msg_info.to->tag; else tag = &rdata->msg_info.from->tag; /* Lookup the dialog set. */ dlg_set = (struct dlg_set*) pj_hash_get_lower(mod_ua.dlg_table, tag->ptr, (unsigned)tag->slen, NULL); return dlg_set; } } /* On received requests. */ static pj_bool_t mod_ua_on_rx_request(pjsip_rx_data *rdata) { struct dlg_set *dlg_set; pj_str_t *from_tag; pjsip_dialog *dlg; pj_status_t status; /* Optimized path: bail out early if request is not CANCEL and it doesn't * have To tag */ if (rdata->msg_info.to->tag.slen == 0 && rdata->msg_info.msg->line.req.method.id != PJSIP_CANCEL_METHOD) { return PJ_FALSE; } /* Incoming REGISTER may have tags in it */ if (rdata->msg_info.msg->line.req.method.id == PJSIP_REGISTER_METHOD) return PJ_FALSE; retry_on_deadlock: /* Lock user agent before looking up the dialog hash table. */ pj_mutex_lock(mod_ua.mutex); /* Lookup the dialog set, based on the To tag header. */ dlg_set = find_dlg_set_for_msg(rdata); /* If dialog is not found, respond with 481 (Call/Transaction * Does Not Exist). */ if (dlg_set == NULL) { /* Unable to find dialog. */ pj_mutex_unlock(mod_ua.mutex); if (rdata->msg_info.msg->line.req.method.id != PJSIP_ACK_METHOD) { PJ_LOG(5,(THIS_FILE, "Unable to find dialogset for %s, answering with 481", pjsip_rx_data_get_info(rdata))); /* Respond with 481 . */ pjsip_endpt_respond_stateless( mod_ua.endpt, rdata, 481, NULL, NULL, NULL ); } return PJ_TRUE; } /* Dialog set has been found. * Find the dialog in the dialog set based on the content of the remote * tag. */ from_tag = &rdata->msg_info.from->tag; dlg = dlg_set->dlg_list.next; while (dlg != (pjsip_dialog*)&dlg_set->dlg_list) { if (pj_stricmp(&dlg->remote.info->tag, from_tag) == 0) break; dlg = dlg->next; } /* Dialog may not be found, e.g. in this case: * - UAC sends SUBSCRIBE, then UAS sends NOTIFY before answering * SUBSCRIBE request with 2xx. * * In this case, we can accept the request ONLY when the original * dialog still has empty To tag. */ if (dlg == (pjsip_dialog*)&dlg_set->dlg_list) { pjsip_dialog *first_dlg = dlg_set->dlg_list.next; if (first_dlg->remote.info->tag.slen != 0) { /* Not found. Mulfunction UAC? */ pj_mutex_unlock(mod_ua.mutex); if (rdata->msg_info.msg->line.req.method.id != PJSIP_ACK_METHOD) { PJ_LOG(5,(THIS_FILE, "Unable to find dialog for %s, answering with 481", pjsip_rx_data_get_info(rdata))); pjsip_endpt_respond_stateless(mod_ua.endpt, rdata, PJSIP_SC_CALL_TSX_DOES_NOT_EXIST, NULL, NULL, NULL); } else { PJ_LOG(5,(THIS_FILE, "Unable to find dialog for %s", pjsip_rx_data_get_info(rdata))); } return PJ_TRUE; } dlg = first_dlg; } /* Mark the dialog id of the request. */ rdata->endpt_info.mod_data[mod_ua.mod.id] = dlg; /* Try to lock the dialog */ PJ_LOG(6,(dlg->obj_name, "UA layer acquiring dialog lock for request")); status = pjsip_dlg_try_inc_lock(dlg); if (status != PJ_SUCCESS) { /* Failed to acquire dialog mutex immediately, this could be * because of deadlock. Release UA mutex, yield, and retry * the whole thing once again. */ pj_mutex_unlock(mod_ua.mutex); pj_thread_sleep(0); goto retry_on_deadlock; } /* Done with processing in UA layer, release lock */ pj_mutex_unlock(mod_ua.mutex); /* Pass to dialog. */ pjsip_dlg_on_rx_request(dlg, rdata); /* Unlock the dialog. This may destroy the dialog */ pjsip_dlg_dec_lock(dlg); /* Report as handled. */ return PJ_TRUE; } /* On rx response notification. */ static pj_bool_t mod_ua_on_rx_response(pjsip_rx_data *rdata) { pjsip_transaction *tsx; struct dlg_set *dlg_set; pjsip_dialog *dlg; pj_status_t status; /* * Find the dialog instance for the response. * All outgoing dialog requests are sent statefully, which means * there will be an UAC transaction associated with this response, * and the dialog instance will be recorded in that transaction. * * But even when transaction is found, there is possibility that * the response is a forked response. */ retry_on_deadlock: dlg = NULL; /* Lock user agent dlg table before we're doing anything. */ pj_mutex_lock(mod_ua.mutex); /* Check if transaction is present. */ tsx = pjsip_rdata_get_tsx(rdata); if (tsx) { /* Check if dialog is present in the transaction. */ dlg = pjsip_tsx_get_dlg(tsx); if (!dlg) { /* Unlock dialog hash table. */ pj_mutex_unlock(mod_ua.mutex); return PJ_FALSE; } /* Get the dialog set. */ dlg_set = (struct dlg_set*) dlg->dlg_set; /* Even if transaction is found and (candidate) dialog has been * identified, it's possible that the request has forked. */ } else { /* Transaction is not present. * Check if this is a 2xx/OK response to INVITE, which in this * case the response will be handled directly by the * dialog. */ pjsip_cseq_hdr *cseq_hdr = rdata->msg_info.cseq; if (cseq_hdr->method.id != PJSIP_INVITE_METHOD || rdata->msg_info.msg->line.status.code / 100 != 2) { /* Not a 2xx response to INVITE. * This must be some stateless response sent by other modules, * or a very late response. */ /* Unlock dialog hash table. */ pj_mutex_unlock(mod_ua.mutex); return PJ_FALSE; } /* Get the dialog set. */ dlg_set = (struct dlg_set*) pj_hash_get_lower(mod_ua.dlg_table, rdata->msg_info.from->tag.ptr, (unsigned)rdata->msg_info.from->tag.slen, NULL); if (!dlg_set) { /* Unlock dialog hash table. */ pj_mutex_unlock(mod_ua.mutex); /* Strayed 2xx response!! */ PJ_LOG(4,(THIS_FILE, "Received strayed 2xx response (no dialog is found)" " from %s:%d: %s", rdata->pkt_info.src_name, rdata->pkt_info.src_port, pjsip_rx_data_get_info(rdata))); return PJ_TRUE; } } /* At this point, we must have the dialog set, and the dialog set * must have a dialog in the list. */ pj_assert(dlg_set && !pj_list_empty(&dlg_set->dlg_list)); /* Check for forked response. * Request will fork only for the initial INVITE request. */ //This doesn't work when there is authentication challenge, since //first_cseq evaluation will yield false. //if (rdata->msg_info.cseq->method.id == PJSIP_INVITE_METHOD && // rdata->msg_info.cseq->cseq == dlg_set->dlg_list.next->local.first_cseq) if (rdata->msg_info.cseq->method.id == PJSIP_INVITE_METHOD) { int st_code = rdata->msg_info.msg->line.status.code; pj_str_t *to_tag = &rdata->msg_info.to->tag; dlg = dlg_set->dlg_list.next; while (dlg != (pjsip_dialog*)&dlg_set->dlg_list) { /* If there is dialog with no remote tag (i.e. dialog has not * been established yet), then send this response to that * dialog. */ if (dlg->remote.info->tag.slen == 0) break; /* Otherwise find the one with matching To tag. */ if (pj_stricmp(to_tag, &dlg->remote.info->tag) == 0) break; dlg = dlg->next; } /* If no dialog with matching remote tag is found, this must be * a forked response. Respond to this ONLY when response is non-100 * provisional response OR a 2xx response. */ if (dlg == (pjsip_dialog*)&dlg_set->dlg_list && ((st_code/100==1 && st_code!=100) || st_code/100==2)) { PJ_LOG(5,(THIS_FILE, "Received forked %s for existing dialog %s", pjsip_rx_data_get_info(rdata), dlg_set->dlg_list.next->obj_name)); /* Report to application about forked condition. * Application can either create a dialog or ignore the response. */ if (mod_ua.param.on_dlg_forked) { dlg = (*mod_ua.param.on_dlg_forked)(dlg_set->dlg_list.next, rdata); if (dlg == NULL) { pj_mutex_unlock(mod_ua.mutex); return PJ_TRUE; } } else { dlg = dlg_set->dlg_list.next; PJ_LOG(4,(THIS_FILE, "Unhandled forked %s from %s:%d, response will be " "handed over to the first dialog", pjsip_rx_data_get_info(rdata), rdata->pkt_info.src_name, rdata->pkt_info.src_port)); } } else if (dlg == (pjsip_dialog*)&dlg_set->dlg_list) { /* For 100 or non-2xx response which has different To tag, * pass the response to the first dialog. */ dlg = dlg_set->dlg_list.next; } } else { /* Either this is a non-INVITE response, or subsequent INVITE * within dialog. The dialog should have been identified when * the transaction was found. */ pj_assert(tsx != NULL); pj_assert(dlg != NULL); } /* The dialog must have been found. */ pj_assert(dlg != NULL); /* Put the dialog instance in the rdata. */ rdata->endpt_info.mod_data[mod_ua.mod.id] = dlg; /* Attempt to acquire lock to the dialog. */ PJ_LOG(6,(dlg->obj_name, "UA layer acquiring dialog lock for response")); status = pjsip_dlg_try_inc_lock(dlg); if (status != PJ_SUCCESS) { /* Failed to acquire dialog mutex. This could indicate a deadlock * situation, and for safety, try to avoid deadlock by releasing * UA mutex, yield, and retry the whole processing once again. */ pj_mutex_unlock(mod_ua.mutex); pj_thread_sleep(0); goto retry_on_deadlock; } /* We're done with processing in the UA layer, we can release the mutex */ pj_mutex_unlock(mod_ua.mutex); /* Pass the response to the dialog. */ pjsip_dlg_on_rx_response(dlg, rdata); /* Unlock the dialog. This may destroy the dialog. */ pjsip_dlg_dec_lock(dlg); /* Done. */ return PJ_TRUE; } #if PJ_LOG_MAX_LEVEL >= 3 static void print_dialog( const char *title, pjsip_dialog *dlg, char *buf, pj_size_t size) { int len; char userinfo[PJSIP_MAX_URL_SIZE]; len = pjsip_hdr_print_on(dlg->remote.info, userinfo, sizeof(userinfo)); if (len < 0) pj_ansi_strcpy(userinfo, "<--uri too long-->"); else userinfo[len] = '\0'; len = pj_ansi_snprintf(buf, size, "%s[%s] %s", title, (dlg->state==PJSIP_DIALOG_STATE_NULL ? " - " : "est"), userinfo); if (len < 1 || len >= (int)size) { pj_ansi_strcpy(buf, "<--uri too long-->"); } else buf[len] = '\0'; } #endif /* * Dump user agent contents (e.g. all dialogs). */ PJ_DEF(void) pjsip_ua_dump(pj_bool_t detail) { #if PJ_LOG_MAX_LEVEL >= 3 pj_hash_iterator_t itbuf, *it; char dlginfo[128]; pj_mutex_lock(mod_ua.mutex); PJ_LOG(3, (THIS_FILE, "Number of dialog sets: %u", pj_hash_count(mod_ua.dlg_table))); if (detail && pj_hash_count(mod_ua.dlg_table)) { PJ_LOG(3, (THIS_FILE, "Dumping dialog sets:")); it = pj_hash_first(mod_ua.dlg_table, &itbuf); for (; it != NULL; it = pj_hash_next(mod_ua.dlg_table, it)) { struct dlg_set *dlg_set; pjsip_dialog *dlg; const char *title; dlg_set = (struct dlg_set*) pj_hash_this(mod_ua.dlg_table, it); if (!dlg_set || pj_list_empty(&dlg_set->dlg_list)) continue; /* First dialog in dialog set. */ dlg = dlg_set->dlg_list.next; if (dlg->role == PJSIP_ROLE_UAC) title = " [out] "; else title = " [in] "; print_dialog(title, dlg, dlginfo, sizeof(dlginfo)); PJ_LOG(3,(THIS_FILE, "%s", dlginfo)); /* Next dialog in dialog set (forked) */ dlg = dlg->next; while (dlg != (pjsip_dialog*) &dlg_set->dlg_list) { print_dialog(" [forked] ", dlg, dlginfo, sizeof(dlginfo)); dlg = dlg->next; } } } pj_mutex_unlock(mod_ua.mutex); #endif }
/* $Id$ */ /* * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com) * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <pjsip/sip_ua_layer.h> #include <pjsip/sip_module.h> #include <pjsip/sip_dialog.h> #include <pjsip/sip_endpoint.h> #include <pjsip/sip_errno.h> #include <pjsip/sip_transaction.h> #include <pj/os.h> #include <pj/hash.h> #include <pj/assert.h> #include <pj/string.h> #include <pj/pool.h> #include <pj/log.h> #define THIS_FILE "sip_ua_layer.c" /* * Static prototypes. */ static pj_status_t mod_ua_load(pjsip_endpoint *endpt); static pj_status_t mod_ua_unload(void); static pj_bool_t mod_ua_on_rx_request(pjsip_rx_data *rdata); static pj_bool_t mod_ua_on_rx_response(pjsip_rx_data *rdata); static void mod_ua_on_tsx_state(pjsip_transaction*, pjsip_event*); extern long pjsip_dlg_lock_tls_id; /* defined in sip_dialog.c */ /* This struct is used to represent list of dialog inside a dialog set. * We don't want to use pjsip_dialog for this purpose, to save some * memory (about 100 bytes per dialog set). */ struct dlg_set_head { PJ_DECL_LIST_MEMBER(pjsip_dialog); }; /* This struct represents a dialog set. * This is the value that will be put in the UA's hash table. */ struct dlg_set { /* To put this node in free dlg_set nodes in UA. */ PJ_DECL_LIST_MEMBER(struct dlg_set); /* This is the buffer to store this entry in the hash table. */ pj_hash_entry_buf ht_entry; /* Entry key in the hash table */ pj_str_t ht_key; /* List of dialog in this dialog set. */ struct dlg_set_head dlg_list; }; /* * Module interface. */ static struct user_agent { pjsip_module mod; pj_pool_t *pool; pjsip_endpoint *endpt; pj_mutex_t *mutex; pj_hash_table_t *dlg_table; pjsip_ua_init_param param; struct dlg_set free_dlgset_nodes; } mod_ua = { { NULL, NULL, /* prev, next. */ { "mod-ua", 6 }, /* Name. */ -1, /* Id */ PJSIP_MOD_PRIORITY_UA_PROXY_LAYER, /* Priority */ &mod_ua_load, /* load() */ NULL, /* start() */ NULL, /* stop() */ &mod_ua_unload, /* unload() */ &mod_ua_on_rx_request, /* on_rx_request() */ &mod_ua_on_rx_response, /* on_rx_response() */ NULL, /* on_tx_request. */ NULL, /* on_tx_response() */ &mod_ua_on_tsx_state, /* on_tsx_state() */ } }; /* * mod_ua_load() * * Called when module is being loaded by endpoint. */ static pj_status_t mod_ua_load(pjsip_endpoint *endpt) { pj_status_t status; /* Initialize the user agent. */ mod_ua.endpt = endpt; mod_ua.pool = pjsip_endpt_create_pool( endpt, "ua%p", PJSIP_POOL_LEN_UA, PJSIP_POOL_INC_UA); if (mod_ua.pool == NULL) return PJ_ENOMEM; status = pj_mutex_create_recursive(mod_ua.pool, " ua%p", &mod_ua.mutex); if (status != PJ_SUCCESS) return status; mod_ua.dlg_table = pj_hash_create(mod_ua.pool, PJSIP_MAX_DIALOG_COUNT); if (mod_ua.dlg_table == NULL) return PJ_ENOMEM; pj_list_init(&mod_ua.free_dlgset_nodes); /* Initialize dialog lock. */ status = pj_thread_local_alloc(&pjsip_dlg_lock_tls_id); if (status != PJ_SUCCESS) return status; pj_thread_local_set(pjsip_dlg_lock_tls_id, NULL); return PJ_SUCCESS; } /* * mod_ua_unload() * * Called when module is being unloaded. */ static pj_status_t mod_ua_unload(void) { pj_thread_local_free(pjsip_dlg_lock_tls_id); pj_mutex_destroy(mod_ua.mutex); /* Release pool */ if (mod_ua.pool) { pjsip_endpt_release_pool( mod_ua.endpt, mod_ua.pool ); } return PJ_SUCCESS; } /* * mod_ua_on_tsx_stats() * * Called on changed on transaction state. */ static void mod_ua_on_tsx_state( pjsip_transaction *tsx, pjsip_event *e) { pjsip_dialog *dlg; /* If the module id is -1, it could mean that the module has been * destroyed. */ if (mod_ua.mod.id == -1) return; /* Get the dialog where this transaction belongs. */ dlg = (pjsip_dialog*) tsx->mod_data[mod_ua.mod.id]; /* If dialog instance has gone, it could mean that the dialog * may has been destroyed. */ if (dlg == NULL) return; /* Hand over the event to the dialog. */ pjsip_dlg_on_tsx_state(dlg, tsx, e); } /* * Init user agent module and register it to the endpoint. */ PJ_DEF(pj_status_t) pjsip_ua_init_module( pjsip_endpoint *endpt, const pjsip_ua_init_param *prm) { pj_status_t status; /* Check if module already registered. */ PJ_ASSERT_RETURN(mod_ua.mod.id == -1, PJ_EINVALIDOP); /* Copy param, if exists. */ if (prm) pj_memcpy(&mod_ua.param, prm, sizeof(pjsip_ua_init_param)); /* Register the module. */ status = pjsip_endpt_register_module(endpt, &mod_ua.mod); return status; } /* * Get the instance of the user agent. * */ PJ_DEF(pjsip_user_agent*) pjsip_ua_instance(void) { return &mod_ua.mod; } /* * Get the endpoint where this UA is currently registered. */ PJ_DEF(pjsip_endpoint*) pjsip_ua_get_endpt(pjsip_user_agent *ua) { PJ_UNUSED_ARG(ua); pj_assert(ua == &mod_ua.mod); return mod_ua.endpt; } /* * Destroy the user agent layer. */ PJ_DEF(pj_status_t) pjsip_ua_destroy(void) { /* Check if module already destroyed. */ PJ_ASSERT_RETURN(mod_ua.mod.id != -1, PJ_EINVALIDOP); return pjsip_endpt_unregister_module(mod_ua.endpt, &mod_ua.mod); } /* * Create key to identify dialog set. */ /* PJ_DEF(void) pjsip_ua_create_dlg_set_key( pj_pool_t *pool, pj_str_t *set_key, const pj_str_t *call_id, const pj_str_t *local_tag) { PJ_ASSERT_ON_FAIL(pool && set_key && call_id && local_tag, return;); set_key->slen = call_id->slen + local_tag->slen + 1; set_key->ptr = (char*) pj_pool_alloc(pool, set_key->slen); pj_assert(set_key->ptr != NULL); pj_memcpy(set_key->ptr, call_id->ptr, call_id->slen); set_key->ptr[call_id->slen] = '$'; pj_memcpy(set_key->ptr + call_id->slen + 1, local_tag->ptr, local_tag->slen); } */ /* * Acquire one dlg_set node to be put in the hash table. * This will first look in the free nodes list, then allocate * a new one from UA's pool when one is not available. */ static struct dlg_set *alloc_dlgset_node(void) { struct dlg_set *set; if (!pj_list_empty(&mod_ua.free_dlgset_nodes)) { set = mod_ua.free_dlgset_nodes.next; pj_list_erase(set); return set; } else { set = PJ_POOL_ALLOC_T(mod_ua.pool, struct dlg_set); return set; } } /* * Register new dialog. Called by pjsip_dlg_create_uac() and * pjsip_dlg_create_uas_and_inc_lock(); */ PJ_DEF(pj_status_t) pjsip_ua_register_dlg( pjsip_user_agent *ua, pjsip_dialog *dlg ) { /* Sanity check. */ PJ_ASSERT_RETURN(ua && dlg, PJ_EINVAL); /* For all dialogs, local tag (inc hash) must has been initialized. */ PJ_ASSERT_RETURN(dlg->local.info && dlg->local.info->tag.slen && dlg->local.tag_hval != 0, PJ_EBUG); /* For UAS dialog, remote tag (inc hash) must have been initialized. */ //PJ_ASSERT_RETURN(dlg->role==PJSIP_ROLE_UAC || // (dlg->role==PJSIP_ROLE_UAS && dlg->remote.info->tag.slen // && dlg->remote.tag_hval != 0), PJ_EBUG); /* Lock the user agent. */ pj_mutex_lock(mod_ua.mutex); /* For UAC, check if there is existing dialog in the same set. */ if (dlg->role == PJSIP_ROLE_UAC) { struct dlg_set *dlg_set; dlg_set = (struct dlg_set*) pj_hash_get_lower( mod_ua.dlg_table, dlg->local.info->tag.ptr, (unsigned)dlg->local.info->tag.slen, &dlg->local.tag_hval); if (dlg_set) { /* This is NOT the first dialog in the dialog set. * Just add this dialog in the list. */ pj_assert(dlg_set->dlg_list.next != (void*)&dlg_set->dlg_list); pj_list_push_back(&dlg_set->dlg_list, dlg); dlg->dlg_set = dlg_set; } else { /* This is the first dialog in the dialog set. * Create the dialog set and add this dialog to it. */ dlg_set = alloc_dlgset_node(); dlg_set->ht_key = dlg->local.info->tag; pj_list_init(&dlg_set->dlg_list); pj_list_push_back(&dlg_set->dlg_list, dlg); dlg->dlg_set = dlg_set; /* Register the dialog set in the hash table. */ pj_hash_set_np_lower(mod_ua.dlg_table, dlg_set->ht_key.ptr, (unsigned)dlg_set->ht_key.slen, dlg->local.tag_hval, dlg_set->ht_entry, dlg_set); } } else { /* For UAS, create the dialog set with a single dialog as member. */ struct dlg_set *dlg_set; dlg_set = alloc_dlgset_node(); dlg_set->ht_key = dlg->local.info->tag; pj_list_init(&dlg_set->dlg_list); pj_list_push_back(&dlg_set->dlg_list, dlg); dlg->dlg_set = dlg_set; pj_hash_set_np_lower(mod_ua.dlg_table, dlg_set->ht_key.ptr, (unsigned)dlg_set->ht_key.slen, dlg->local.tag_hval, dlg_set->ht_entry, dlg_set); } /* Unlock user agent. */ pj_mutex_unlock(mod_ua.mutex); /* Done. */ return PJ_SUCCESS; } PJ_DEF(pj_status_t) pjsip_ua_unregister_dlg( pjsip_user_agent *ua, pjsip_dialog *dlg ) { struct dlg_set *dlg_set; pjsip_dialog *d; /* Sanity-check arguments. */ PJ_ASSERT_RETURN(ua && dlg, PJ_EINVAL); /* Check that dialog has been registered. */ PJ_ASSERT_RETURN(dlg->dlg_set, PJ_EINVALIDOP); /* Lock user agent. */ pj_mutex_lock(mod_ua.mutex); /* Find this dialog from the dialog set. */ dlg_set = (struct dlg_set*) dlg->dlg_set; d = dlg_set->dlg_list.next; while (d != (pjsip_dialog*)&dlg_set->dlg_list && d != dlg) { d = d->next; } if (d != dlg) { pj_assert(!"Dialog is not registered!"); pj_mutex_unlock(mod_ua.mutex); return PJ_EINVALIDOP; } /* Remove this dialog from the list. */ pj_list_erase(dlg); /* If dialog list is empty, remove the dialog set from the hash table. */ if (pj_list_empty(&dlg_set->dlg_list)) { /* Verify that the dialog set is valid */ pj_assert(pj_hash_get_lower(mod_ua.dlg_table, dlg_set->ht_key.ptr, (unsigned)dlg_set->ht_key.slen, &dlg->local.tag_hval) == dlg_set); pj_hash_set_lower(NULL, mod_ua.dlg_table, dlg_set->ht_key.ptr, (unsigned)dlg_set->ht_key.slen, dlg->local.tag_hval, NULL); /* Return dlg_set to free nodes. */ pj_list_push_back(&mod_ua.free_dlgset_nodes, dlg_set); } else { /* If the just unregistered dialog is being used as hash key, * reset the dlg_set entry with a new key (i.e: from the first dialog * in dlg_set). */ if (dlg_set->ht_key.ptr == dlg->local.info->tag.ptr && dlg_set->ht_key.slen == dlg->local.info->tag.slen) { pjsip_dialog* key_dlg = dlg_set->dlg_list.next; /* Verify that the old & new keys share the hash value */ pj_assert(key_dlg->local.tag_hval == dlg->local.tag_hval); pj_hash_set_lower(NULL, mod_ua.dlg_table, dlg_set->ht_key.ptr, (unsigned)dlg_set->ht_key.slen, dlg->local.tag_hval, NULL); dlg_set->ht_key = key_dlg->local.info->tag; pj_hash_set_np_lower(mod_ua.dlg_table, dlg_set->ht_key.ptr, (unsigned)dlg_set->ht_key.slen, key_dlg->local.tag_hval, dlg_set->ht_entry, dlg_set); } } /* Unlock user agent. */ pj_mutex_unlock(mod_ua.mutex); /* Done. */ return PJ_SUCCESS; } PJ_DEF(pjsip_dialog*) pjsip_rdata_get_dlg( pjsip_rx_data *rdata ) { return (pjsip_dialog*) rdata->endpt_info.mod_data[mod_ua.mod.id]; } PJ_DEF(pjsip_dialog*) pjsip_tdata_get_dlg( pjsip_tx_data *tdata ) { return (pjsip_dialog*) tdata->mod_data[mod_ua.mod.id]; } PJ_DEF(pjsip_dialog*) pjsip_tsx_get_dlg( pjsip_transaction *tsx ) { return (pjsip_dialog*) tsx->mod_data[mod_ua.mod.id]; } /* * Retrieve the current number of dialog-set currently registered * in the hash table. */ PJ_DEF(unsigned) pjsip_ua_get_dlg_set_count(void) { unsigned count; PJ_ASSERT_RETURN(mod_ua.endpt, 0); pj_mutex_lock(mod_ua.mutex); count = pj_hash_count(mod_ua.dlg_table); pj_mutex_unlock(mod_ua.mutex); return count; } /* * Find a dialog. */ PJ_DEF(pjsip_dialog*) pjsip_ua_find_dialog(const pj_str_t *call_id, const pj_str_t *local_tag, const pj_str_t *remote_tag, pj_bool_t lock_dialog) { struct dlg_set *dlg_set; pjsip_dialog *dlg; PJ_ASSERT_RETURN(call_id && local_tag && remote_tag, NULL); /* Lock user agent. */ pj_mutex_lock(mod_ua.mutex); /* Lookup the dialog set. */ dlg_set = (struct dlg_set*) pj_hash_get_lower(mod_ua.dlg_table, local_tag->ptr, (unsigned)local_tag->slen, NULL); if (dlg_set == NULL) { /* Not found */ pj_mutex_unlock(mod_ua.mutex); return NULL; } /* Dialog set is found, now find the matching dialog based on the * remote tag. */ dlg = dlg_set->dlg_list.next; while (dlg != (pjsip_dialog*)&dlg_set->dlg_list) { if (pj_stricmp(&dlg->remote.info->tag, remote_tag) == 0) break; dlg = dlg->next; } if (dlg == (pjsip_dialog*)&dlg_set->dlg_list) { /* Not found */ pj_mutex_unlock(mod_ua.mutex); return NULL; } /* Dialog has been found. It SHOULD have the right Call-ID!! */ if (pj_strcmp(&dlg->call_id->id, call_id)!=0) { PJ_LOG(6, (THIS_FILE, "Dialog not found: local and remote tags " "matched but not call id")); pj_mutex_unlock(mod_ua.mutex); return NULL; } if (lock_dialog) { if (pjsip_dlg_try_inc_lock(dlg) != PJ_SUCCESS) { /* * Unable to acquire dialog's lock while holding the user * agent's mutex. Release the UA mutex before retrying once * more. * * THIS MAY CAUSE RACE CONDITION! */ /* Unlock user agent. */ pj_mutex_unlock(mod_ua.mutex); /* Lock dialog */ pjsip_dlg_inc_lock(dlg); } else { /* Unlock user agent. */ pj_mutex_unlock(mod_ua.mutex); } } else { /* Unlock user agent. */ pj_mutex_unlock(mod_ua.mutex); } return dlg; } /* * Find the first dialog in dialog set in hash table for an incoming message. */ static struct dlg_set *find_dlg_set_for_msg( pjsip_rx_data *rdata ) { /* CANCEL message doesn't have To tag, so we must lookup the dialog * by finding the INVITE UAS transaction being cancelled. */ if (rdata->msg_info.cseq->method.id == PJSIP_CANCEL_METHOD) { pjsip_dialog *dlg; /* Create key for the rdata, but this time, use INVITE as the * method. */ pj_str_t key; pjsip_role_e role; pjsip_transaction *tsx; if (rdata->msg_info.msg->type == PJSIP_REQUEST_MSG) role = PJSIP_ROLE_UAS; else role = PJSIP_ROLE_UAC; pjsip_tsx_create_key(rdata->tp_info.pool, &key, role, pjsip_get_invite_method(), rdata); /* Lookup the INVITE transaction */ tsx = pjsip_tsx_layer_find_tsx2(&key, PJ_TRUE); /* We should find the dialog attached to the INVITE transaction */ if (tsx) { dlg = (pjsip_dialog*) tsx->mod_data[mod_ua.mod.id]; pj_grp_lock_dec_ref(tsx->grp_lock); /* Dlg may be NULL on some extreme condition * (e.g. during debugging where initially there is a dialog) */ return dlg ? (struct dlg_set*) dlg->dlg_set : NULL; } else { return NULL; } } else { pj_str_t *tag; struct dlg_set *dlg_set; if (rdata->msg_info.msg->type == PJSIP_REQUEST_MSG) tag = &rdata->msg_info.to->tag; else tag = &rdata->msg_info.from->tag; /* Lookup the dialog set. */ dlg_set = (struct dlg_set*) pj_hash_get_lower(mod_ua.dlg_table, tag->ptr, (unsigned)tag->slen, NULL); return dlg_set; } } /* On received requests. */ static pj_bool_t mod_ua_on_rx_request(pjsip_rx_data *rdata) { struct dlg_set *dlg_set; pj_str_t *from_tag; pjsip_dialog *dlg; pj_status_t status; /* Optimized path: bail out early if request is not CANCEL and it doesn't * have To tag */ if (rdata->msg_info.to->tag.slen == 0 && rdata->msg_info.msg->line.req.method.id != PJSIP_CANCEL_METHOD) { return PJ_FALSE; } /* Incoming REGISTER may have tags in it */ if (rdata->msg_info.msg->line.req.method.id == PJSIP_REGISTER_METHOD) return PJ_FALSE; retry_on_deadlock: /* Lock user agent before looking up the dialog hash table. */ pj_mutex_lock(mod_ua.mutex); /* Lookup the dialog set, based on the To tag header. */ dlg_set = find_dlg_set_for_msg(rdata); /* If dialog is not found, respond with 481 (Call/Transaction * Does Not Exist). */ if (dlg_set == NULL) { /* Unable to find dialog. */ pj_mutex_unlock(mod_ua.mutex); if (rdata->msg_info.msg->line.req.method.id != PJSIP_ACK_METHOD) { PJ_LOG(5,(THIS_FILE, "Unable to find dialogset for %s, answering with 481", pjsip_rx_data_get_info(rdata))); /* Respond with 481 . */ pjsip_endpt_respond_stateless( mod_ua.endpt, rdata, 481, NULL, NULL, NULL ); } return PJ_TRUE; } /* Dialog set has been found. * Find the dialog in the dialog set based on the content of the remote * tag. */ from_tag = &rdata->msg_info.from->tag; dlg = dlg_set->dlg_list.next; while (dlg != (pjsip_dialog*)&dlg_set->dlg_list) { if (pj_stricmp(&dlg->remote.info->tag, from_tag) == 0) break; dlg = dlg->next; } /* Dialog may not be found, e.g. in this case: * - UAC sends SUBSCRIBE, then UAS sends NOTIFY before answering * SUBSCRIBE request with 2xx. * * In this case, we can accept the request ONLY when the original * dialog still has empty To tag. */ if (dlg == (pjsip_dialog*)&dlg_set->dlg_list) { pjsip_dialog *first_dlg = dlg_set->dlg_list.next; if (first_dlg->remote.info->tag.slen != 0) { /* Not found. Mulfunction UAC? */ pj_mutex_unlock(mod_ua.mutex); if (rdata->msg_info.msg->line.req.method.id != PJSIP_ACK_METHOD) { PJ_LOG(5,(THIS_FILE, "Unable to find dialog for %s, answering with 481", pjsip_rx_data_get_info(rdata))); pjsip_endpt_respond_stateless(mod_ua.endpt, rdata, PJSIP_SC_CALL_TSX_DOES_NOT_EXIST, NULL, NULL, NULL); } else { PJ_LOG(5,(THIS_FILE, "Unable to find dialog for %s", pjsip_rx_data_get_info(rdata))); } return PJ_TRUE; } dlg = first_dlg; } /* Mark the dialog id of the request. */ rdata->endpt_info.mod_data[mod_ua.mod.id] = dlg; /* Try to lock the dialog */ PJ_LOG(6,(dlg->obj_name, "UA layer acquiring dialog lock for request")); status = pjsip_dlg_try_inc_lock(dlg); if (status != PJ_SUCCESS) { /* Failed to acquire dialog mutex immediately, this could be * because of deadlock. Release UA mutex, yield, and retry * the whole thing once again. */ pj_mutex_unlock(mod_ua.mutex); pj_thread_sleep(0); goto retry_on_deadlock; } /* Done with processing in UA layer, release lock */ pj_mutex_unlock(mod_ua.mutex); /* Pass to dialog. */ pjsip_dlg_on_rx_request(dlg, rdata); /* Unlock the dialog. This may destroy the dialog */ pjsip_dlg_dec_lock(dlg); /* Report as handled. */ return PJ_TRUE; } /* On rx response notification. */ static pj_bool_t mod_ua_on_rx_response(pjsip_rx_data *rdata) { pjsip_transaction *tsx; struct dlg_set *dlg_set; pjsip_dialog *dlg; pj_status_t status; /* * Find the dialog instance for the response. * All outgoing dialog requests are sent statefully, which means * there will be an UAC transaction associated with this response, * and the dialog instance will be recorded in that transaction. * * But even when transaction is found, there is possibility that * the response is a forked response. */ retry_on_deadlock: dlg = NULL; /* Lock user agent dlg table before we're doing anything. */ pj_mutex_lock(mod_ua.mutex); /* Check if transaction is present. */ tsx = pjsip_rdata_get_tsx(rdata); if (tsx) { /* Check if dialog is present in the transaction. */ dlg = pjsip_tsx_get_dlg(tsx); if (!dlg) { /* Unlock dialog hash table. */ pj_mutex_unlock(mod_ua.mutex); return PJ_FALSE; } /* Get the dialog set. */ dlg_set = (struct dlg_set*) dlg->dlg_set; /* Even if transaction is found and (candidate) dialog has been * identified, it's possible that the request has forked. */ } else { /* Transaction is not present. * Check if this is a 2xx/OK response to INVITE, which in this * case the response will be handled directly by the * dialog. */ pjsip_cseq_hdr *cseq_hdr = rdata->msg_info.cseq; if (cseq_hdr->method.id != PJSIP_INVITE_METHOD || rdata->msg_info.msg->line.status.code / 100 != 2) { /* Not a 2xx response to INVITE. * This must be some stateless response sent by other modules, * or a very late response. */ /* Unlock dialog hash table. */ pj_mutex_unlock(mod_ua.mutex); return PJ_FALSE; } /* Get the dialog set. */ dlg_set = (struct dlg_set*) pj_hash_get_lower(mod_ua.dlg_table, rdata->msg_info.from->tag.ptr, (unsigned)rdata->msg_info.from->tag.slen, NULL); if (!dlg_set) { /* Unlock dialog hash table. */ pj_mutex_unlock(mod_ua.mutex); /* Strayed 2xx response!! */ PJ_LOG(4,(THIS_FILE, "Received strayed 2xx response (no dialog is found)" " from %s:%d: %s", rdata->pkt_info.src_name, rdata->pkt_info.src_port, pjsip_rx_data_get_info(rdata))); return PJ_TRUE; } } /* At this point, we must have the dialog set, and the dialog set * must have a dialog in the list. */ pj_assert(dlg_set && !pj_list_empty(&dlg_set->dlg_list)); /* Check for forked response. * Request will fork only for the initial INVITE request. */ //This doesn't work when there is authentication challenge, since //first_cseq evaluation will yield false. //if (rdata->msg_info.cseq->method.id == PJSIP_INVITE_METHOD && // rdata->msg_info.cseq->cseq == dlg_set->dlg_list.next->local.first_cseq) if (rdata->msg_info.cseq->method.id == PJSIP_INVITE_METHOD) { int st_code = rdata->msg_info.msg->line.status.code; pj_str_t *to_tag = &rdata->msg_info.to->tag; dlg = dlg_set->dlg_list.next; while (dlg != (pjsip_dialog*)&dlg_set->dlg_list) { /* If there is dialog with no remote tag (i.e. dialog has not * been established yet), then send this response to that * dialog. */ if (dlg->remote.info->tag.slen == 0) break; /* Otherwise find the one with matching To tag. */ if (pj_stricmp(to_tag, &dlg->remote.info->tag) == 0) break; dlg = dlg->next; } /* If no dialog with matching remote tag is found, this must be * a forked response. Respond to this ONLY when response is non-100 * provisional response OR a 2xx response. */ if (dlg == (pjsip_dialog*)&dlg_set->dlg_list && ((st_code/100==1 && st_code!=100) || st_code/100==2)) { PJ_LOG(5,(THIS_FILE, "Received forked %s for existing dialog %s", pjsip_rx_data_get_info(rdata), dlg_set->dlg_list.next->obj_name)); /* Report to application about forked condition. * Application can either create a dialog or ignore the response. */ if (mod_ua.param.on_dlg_forked) { dlg = (*mod_ua.param.on_dlg_forked)(dlg_set->dlg_list.next, rdata); if (dlg == NULL) { pj_mutex_unlock(mod_ua.mutex); return PJ_TRUE; } } else { dlg = dlg_set->dlg_list.next; PJ_LOG(4,(THIS_FILE, "Unhandled forked %s from %s:%d, response will be " "handed over to the first dialog", pjsip_rx_data_get_info(rdata), rdata->pkt_info.src_name, rdata->pkt_info.src_port)); } } else if (dlg == (pjsip_dialog*)&dlg_set->dlg_list) { /* For 100 or non-2xx response which has different To tag, * pass the response to the first dialog. */ dlg = dlg_set->dlg_list.next; } } else { /* Either this is a non-INVITE response, or subsequent INVITE * within dialog. The dialog should have been identified when * the transaction was found. */ pj_assert(tsx != NULL); pj_assert(dlg != NULL); } /* The dialog must have been found. */ pj_assert(dlg != NULL); /* Put the dialog instance in the rdata. */ rdata->endpt_info.mod_data[mod_ua.mod.id] = dlg; /* Attempt to acquire lock to the dialog. */ PJ_LOG(6,(dlg->obj_name, "UA layer acquiring dialog lock for response")); status = pjsip_dlg_try_inc_lock(dlg); if (status != PJ_SUCCESS) { /* Failed to acquire dialog mutex. This could indicate a deadlock * situation, and for safety, try to avoid deadlock by releasing * UA mutex, yield, and retry the whole processing once again. */ pj_mutex_unlock(mod_ua.mutex); pj_thread_sleep(0); goto retry_on_deadlock; } /* We're done with processing in the UA layer, we can release the mutex */ pj_mutex_unlock(mod_ua.mutex); /* Pass the response to the dialog. */ pjsip_dlg_on_rx_response(dlg, rdata); /* Unlock the dialog. This may destroy the dialog. */ pjsip_dlg_dec_lock(dlg); /* Done. */ return PJ_TRUE; } #if PJ_LOG_MAX_LEVEL >= 3 static void print_dialog( const char *title, pjsip_dialog *dlg, char *buf, pj_size_t size) { int len; char userinfo[PJSIP_MAX_URL_SIZE]; len = pjsip_hdr_print_on(dlg->remote.info, userinfo, sizeof(userinfo)); if (len < 0) pj_ansi_strcpy(userinfo, "<--uri too long-->"); else userinfo[len] = '\0'; len = pj_ansi_snprintf(buf, size, "%s[%s] %s", title, (dlg->state==PJSIP_DIALOG_STATE_NULL ? " - " : "est"), userinfo); if (len < 1 || len >= (int)size) { pj_ansi_strcpy(buf, "<--uri too long-->"); } else buf[len] = '\0'; } #endif /* * Dump user agent contents (e.g. all dialogs). */ PJ_DEF(void) pjsip_ua_dump(pj_bool_t detail) { #if PJ_LOG_MAX_LEVEL >= 3 pj_hash_iterator_t itbuf, *it; char dlginfo[128]; pj_mutex_lock(mod_ua.mutex); PJ_LOG(3, (THIS_FILE, "Number of dialog sets: %u", pj_hash_count(mod_ua.dlg_table))); if (detail && pj_hash_count(mod_ua.dlg_table)) { PJ_LOG(3, (THIS_FILE, "Dumping dialog sets:")); it = pj_hash_first(mod_ua.dlg_table, &itbuf); for (; it != NULL; it = pj_hash_next(mod_ua.dlg_table, it)) { struct dlg_set *dlg_set; pjsip_dialog *dlg; const char *title; dlg_set = (struct dlg_set*) pj_hash_this(mod_ua.dlg_table, it); if (!dlg_set || pj_list_empty(&dlg_set->dlg_list)) continue; /* First dialog in dialog set. */ dlg = dlg_set->dlg_list.next; if (dlg->role == PJSIP_ROLE_UAC) title = " [out] "; else title = " [in] "; print_dialog(title, dlg, dlginfo, sizeof(dlginfo)); PJ_LOG(3,(THIS_FILE, "%s", dlginfo)); /* Next dialog in dialog set (forked) */ dlg = dlg->next; while (dlg != (pjsip_dialog*) &dlg_set->dlg_list) { print_dialog(" [forked] ", dlg, dlginfo, sizeof(dlginfo)); dlg = dlg->next; } } } pj_mutex_unlock(mod_ua.mutex); #endif }
PJ_DEF(pj_status_t) pjsip_ua_register_dlg( pjsip_user_agent *ua, pjsip_dialog *dlg ) { /* Sanity check. */ PJ_ASSERT_RETURN(ua && dlg, PJ_EINVAL); /* For all dialogs, local tag (inc hash) must has been initialized. */ PJ_ASSERT_RETURN(dlg->local.info && dlg->local.info->tag.slen && dlg->local.tag_hval != 0, PJ_EBUG); /* For UAS dialog, remote tag (inc hash) must have been initialized. */ //PJ_ASSERT_RETURN(dlg->role==PJSIP_ROLE_UAC || // (dlg->role==PJSIP_ROLE_UAS && dlg->remote.info->tag.slen // && dlg->remote.tag_hval != 0), PJ_EBUG); /* Lock the user agent. */ pj_mutex_lock(mod_ua.mutex); /* For UAC, check if there is existing dialog in the same set. */ if (dlg->role == PJSIP_ROLE_UAC) { struct dlg_set *dlg_set; dlg_set = (struct dlg_set*) pj_hash_get_lower( mod_ua.dlg_table, dlg->local.info->tag.ptr, (unsigned)dlg->local.info->tag.slen, &dlg->local.tag_hval); if (dlg_set) { /* This is NOT the first dialog in the dialog set. * Just add this dialog in the list. */ pj_assert(dlg_set->dlg_list.next != (void*)&dlg_set->dlg_list); pj_list_push_back(&dlg_set->dlg_list, dlg); dlg->dlg_set = dlg_set; } else { /* This is the first dialog in the dialog set. * Create the dialog set and add this dialog to it. */ dlg_set = alloc_dlgset_node(); pj_list_init(&dlg_set->dlg_list); pj_list_push_back(&dlg_set->dlg_list, dlg); dlg->dlg_set = dlg_set; /* Register the dialog set in the hash table. */ pj_hash_set_np_lower(mod_ua.dlg_table, dlg->local.info->tag.ptr, (unsigned)dlg->local.info->tag.slen, dlg->local.tag_hval, dlg_set->ht_entry, dlg_set); } } else { /* For UAS, create the dialog set with a single dialog as member. */ struct dlg_set *dlg_set; dlg_set = alloc_dlgset_node(); pj_list_init(&dlg_set->dlg_list); pj_list_push_back(&dlg_set->dlg_list, dlg); dlg->dlg_set = dlg_set; pj_hash_set_np_lower(mod_ua.dlg_table, dlg->local.info->tag.ptr, (unsigned)dlg->local.info->tag.slen, dlg->local.tag_hval, dlg_set->ht_entry, dlg_set); } /* Unlock user agent. */ pj_mutex_unlock(mod_ua.mutex); /* Done. */ return PJ_SUCCESS; }
PJ_DEF(pj_status_t) pjsip_ua_register_dlg( pjsip_user_agent *ua, pjsip_dialog *dlg ) { /* Sanity check. */ PJ_ASSERT_RETURN(ua && dlg, PJ_EINVAL); /* For all dialogs, local tag (inc hash) must has been initialized. */ PJ_ASSERT_RETURN(dlg->local.info && dlg->local.info->tag.slen && dlg->local.tag_hval != 0, PJ_EBUG); /* For UAS dialog, remote tag (inc hash) must have been initialized. */ //PJ_ASSERT_RETURN(dlg->role==PJSIP_ROLE_UAC || // (dlg->role==PJSIP_ROLE_UAS && dlg->remote.info->tag.slen // && dlg->remote.tag_hval != 0), PJ_EBUG); /* Lock the user agent. */ pj_mutex_lock(mod_ua.mutex); /* For UAC, check if there is existing dialog in the same set. */ if (dlg->role == PJSIP_ROLE_UAC) { struct dlg_set *dlg_set; dlg_set = (struct dlg_set*) pj_hash_get_lower( mod_ua.dlg_table, dlg->local.info->tag.ptr, (unsigned)dlg->local.info->tag.slen, &dlg->local.tag_hval); if (dlg_set) { /* This is NOT the first dialog in the dialog set. * Just add this dialog in the list. */ pj_assert(dlg_set->dlg_list.next != (void*)&dlg_set->dlg_list); pj_list_push_back(&dlg_set->dlg_list, dlg); dlg->dlg_set = dlg_set; } else { /* This is the first dialog in the dialog set. * Create the dialog set and add this dialog to it. */ dlg_set = alloc_dlgset_node(); dlg_set->ht_key = dlg->local.info->tag; pj_list_init(&dlg_set->dlg_list); pj_list_push_back(&dlg_set->dlg_list, dlg); dlg->dlg_set = dlg_set; /* Register the dialog set in the hash table. */ pj_hash_set_np_lower(mod_ua.dlg_table, dlg_set->ht_key.ptr, (unsigned)dlg_set->ht_key.slen, dlg->local.tag_hval, dlg_set->ht_entry, dlg_set); } } else { /* For UAS, create the dialog set with a single dialog as member. */ struct dlg_set *dlg_set; dlg_set = alloc_dlgset_node(); dlg_set->ht_key = dlg->local.info->tag; pj_list_init(&dlg_set->dlg_list); pj_list_push_back(&dlg_set->dlg_list, dlg); dlg->dlg_set = dlg_set; pj_hash_set_np_lower(mod_ua.dlg_table, dlg_set->ht_key.ptr, (unsigned)dlg_set->ht_key.slen, dlg->local.tag_hval, dlg_set->ht_entry, dlg_set); } /* Unlock user agent. */ pj_mutex_unlock(mod_ua.mutex); /* Done. */ return PJ_SUCCESS; }
{'added': [(68, ' /* Entry key in the hash table */'), (69, ' pj_str_t ht_key;'), (70, ''), (333, '\t dlg_set->ht_key = dlg->local.info->tag;'), (341, '\t\t\t dlg_set->ht_key.ptr,'), (342, ' (unsigned)dlg_set->ht_key.slen,'), (352, '\tdlg_set->ht_key = dlg->local.info->tag;'), (359, '\t\t dlg_set->ht_key.ptr,'), (360, ' (unsigned)dlg_set->ht_key.slen,'), (405, ''), (406, '\t/* Verify that the dialog set is valid */'), (407, '\tpj_assert(pj_hash_get_lower(mod_ua.dlg_table, dlg_set->ht_key.ptr,'), (408, '\t\t\t\t (unsigned)dlg_set->ht_key.slen,'), (409, '\t\t\t\t &dlg->local.tag_hval) == dlg_set);'), (410, ''), (411, '\tpj_hash_set_lower(NULL, mod_ua.dlg_table, dlg_set->ht_key.ptr,'), (412, '\t\t (unsigned)dlg_set->ht_key.slen,'), (417, ' } else {'), (418, '\t/* If the just unregistered dialog is being used as hash key,'), (419, '\t * reset the dlg_set entry with a new key (i.e: from the first dialog'), (420, '\t * in dlg_set).'), (421, '\t */'), (422, '\tif (dlg_set->ht_key.ptr == dlg->local.info->tag.ptr &&'), (423, '\t dlg_set->ht_key.slen == dlg->local.info->tag.slen)'), (424, '\t{'), (425, '\t pjsip_dialog* key_dlg = dlg_set->dlg_list.next;'), (426, ''), (427, '\t /* Verify that the old & new keys share the hash value */'), (428, '\t pj_assert(key_dlg->local.tag_hval == dlg->local.tag_hval);'), (429, ''), (430, '\t pj_hash_set_lower(NULL, mod_ua.dlg_table, dlg_set->ht_key.ptr,'), (431, '\t\t\t (unsigned)dlg_set->ht_key.slen,'), (432, '\t\t\t dlg->local.tag_hval, NULL);'), (433, ''), (434, '\t dlg_set->ht_key = key_dlg->local.info->tag;'), (435, ''), (436, '\t pj_hash_set_np_lower(mod_ua.dlg_table,'), (437, '\t\t\t\t dlg_set->ht_key.ptr,'), (438, '\t\t\t\t (unsigned)dlg_set->ht_key.slen,'), (439, '\t\t\t\t key_dlg->local.tag_hval, dlg_set->ht_entry,'), (440, '\t\t\t\t dlg_set);'), (441, '\t}')], 'deleted': [(337, '\t\t\t dlg->local.info->tag.ptr,'), (338, ' (unsigned)dlg->local.info->tag.slen,'), (354, '\t\t dlg->local.info->tag.ptr,'), (355, ' (unsigned)dlg->local.info->tag.slen,'), (400, '\tpj_hash_set_lower(NULL, mod_ua.dlg_table, dlg->local.info->tag.ptr,'), (401, '\t\t (unsigned)dlg->local.info->tag.slen,')]}
42
6
538
3,384
https://github.com/pjsip/pjproject
CVE-2022-23608
['CWE-416']
sip_ua_layer.c
pjsip_ua_unregister_dlg
/* $Id$ */ /* * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com) * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <pjsip/sip_ua_layer.h> #include <pjsip/sip_module.h> #include <pjsip/sip_dialog.h> #include <pjsip/sip_endpoint.h> #include <pjsip/sip_errno.h> #include <pjsip/sip_transaction.h> #include <pj/os.h> #include <pj/hash.h> #include <pj/assert.h> #include <pj/string.h> #include <pj/pool.h> #include <pj/log.h> #define THIS_FILE "sip_ua_layer.c" /* * Static prototypes. */ static pj_status_t mod_ua_load(pjsip_endpoint *endpt); static pj_status_t mod_ua_unload(void); static pj_bool_t mod_ua_on_rx_request(pjsip_rx_data *rdata); static pj_bool_t mod_ua_on_rx_response(pjsip_rx_data *rdata); static void mod_ua_on_tsx_state(pjsip_transaction*, pjsip_event*); extern long pjsip_dlg_lock_tls_id; /* defined in sip_dialog.c */ /* This struct is used to represent list of dialog inside a dialog set. * We don't want to use pjsip_dialog for this purpose, to save some * memory (about 100 bytes per dialog set). */ struct dlg_set_head { PJ_DECL_LIST_MEMBER(pjsip_dialog); }; /* This struct represents a dialog set. * This is the value that will be put in the UA's hash table. */ struct dlg_set { /* To put this node in free dlg_set nodes in UA. */ PJ_DECL_LIST_MEMBER(struct dlg_set); /* This is the buffer to store this entry in the hash table. */ pj_hash_entry_buf ht_entry; /* List of dialog in this dialog set. */ struct dlg_set_head dlg_list; }; /* * Module interface. */ static struct user_agent { pjsip_module mod; pj_pool_t *pool; pjsip_endpoint *endpt; pj_mutex_t *mutex; pj_hash_table_t *dlg_table; pjsip_ua_init_param param; struct dlg_set free_dlgset_nodes; } mod_ua = { { NULL, NULL, /* prev, next. */ { "mod-ua", 6 }, /* Name. */ -1, /* Id */ PJSIP_MOD_PRIORITY_UA_PROXY_LAYER, /* Priority */ &mod_ua_load, /* load() */ NULL, /* start() */ NULL, /* stop() */ &mod_ua_unload, /* unload() */ &mod_ua_on_rx_request, /* on_rx_request() */ &mod_ua_on_rx_response, /* on_rx_response() */ NULL, /* on_tx_request. */ NULL, /* on_tx_response() */ &mod_ua_on_tsx_state, /* on_tsx_state() */ } }; /* * mod_ua_load() * * Called when module is being loaded by endpoint. */ static pj_status_t mod_ua_load(pjsip_endpoint *endpt) { pj_status_t status; /* Initialize the user agent. */ mod_ua.endpt = endpt; mod_ua.pool = pjsip_endpt_create_pool( endpt, "ua%p", PJSIP_POOL_LEN_UA, PJSIP_POOL_INC_UA); if (mod_ua.pool == NULL) return PJ_ENOMEM; status = pj_mutex_create_recursive(mod_ua.pool, " ua%p", &mod_ua.mutex); if (status != PJ_SUCCESS) return status; mod_ua.dlg_table = pj_hash_create(mod_ua.pool, PJSIP_MAX_DIALOG_COUNT); if (mod_ua.dlg_table == NULL) return PJ_ENOMEM; pj_list_init(&mod_ua.free_dlgset_nodes); /* Initialize dialog lock. */ status = pj_thread_local_alloc(&pjsip_dlg_lock_tls_id); if (status != PJ_SUCCESS) return status; pj_thread_local_set(pjsip_dlg_lock_tls_id, NULL); return PJ_SUCCESS; } /* * mod_ua_unload() * * Called when module is being unloaded. */ static pj_status_t mod_ua_unload(void) { pj_thread_local_free(pjsip_dlg_lock_tls_id); pj_mutex_destroy(mod_ua.mutex); /* Release pool */ if (mod_ua.pool) { pjsip_endpt_release_pool( mod_ua.endpt, mod_ua.pool ); } return PJ_SUCCESS; } /* * mod_ua_on_tsx_stats() * * Called on changed on transaction state. */ static void mod_ua_on_tsx_state( pjsip_transaction *tsx, pjsip_event *e) { pjsip_dialog *dlg; /* If the module id is -1, it could mean that the module has been * destroyed. */ if (mod_ua.mod.id == -1) return; /* Get the dialog where this transaction belongs. */ dlg = (pjsip_dialog*) tsx->mod_data[mod_ua.mod.id]; /* If dialog instance has gone, it could mean that the dialog * may has been destroyed. */ if (dlg == NULL) return; /* Hand over the event to the dialog. */ pjsip_dlg_on_tsx_state(dlg, tsx, e); } /* * Init user agent module and register it to the endpoint. */ PJ_DEF(pj_status_t) pjsip_ua_init_module( pjsip_endpoint *endpt, const pjsip_ua_init_param *prm) { pj_status_t status; /* Check if module already registered. */ PJ_ASSERT_RETURN(mod_ua.mod.id == -1, PJ_EINVALIDOP); /* Copy param, if exists. */ if (prm) pj_memcpy(&mod_ua.param, prm, sizeof(pjsip_ua_init_param)); /* Register the module. */ status = pjsip_endpt_register_module(endpt, &mod_ua.mod); return status; } /* * Get the instance of the user agent. * */ PJ_DEF(pjsip_user_agent*) pjsip_ua_instance(void) { return &mod_ua.mod; } /* * Get the endpoint where this UA is currently registered. */ PJ_DEF(pjsip_endpoint*) pjsip_ua_get_endpt(pjsip_user_agent *ua) { PJ_UNUSED_ARG(ua); pj_assert(ua == &mod_ua.mod); return mod_ua.endpt; } /* * Destroy the user agent layer. */ PJ_DEF(pj_status_t) pjsip_ua_destroy(void) { /* Check if module already destroyed. */ PJ_ASSERT_RETURN(mod_ua.mod.id != -1, PJ_EINVALIDOP); return pjsip_endpt_unregister_module(mod_ua.endpt, &mod_ua.mod); } /* * Create key to identify dialog set. */ /* PJ_DEF(void) pjsip_ua_create_dlg_set_key( pj_pool_t *pool, pj_str_t *set_key, const pj_str_t *call_id, const pj_str_t *local_tag) { PJ_ASSERT_ON_FAIL(pool && set_key && call_id && local_tag, return;); set_key->slen = call_id->slen + local_tag->slen + 1; set_key->ptr = (char*) pj_pool_alloc(pool, set_key->slen); pj_assert(set_key->ptr != NULL); pj_memcpy(set_key->ptr, call_id->ptr, call_id->slen); set_key->ptr[call_id->slen] = '$'; pj_memcpy(set_key->ptr + call_id->slen + 1, local_tag->ptr, local_tag->slen); } */ /* * Acquire one dlg_set node to be put in the hash table. * This will first look in the free nodes list, then allocate * a new one from UA's pool when one is not available. */ static struct dlg_set *alloc_dlgset_node(void) { struct dlg_set *set; if (!pj_list_empty(&mod_ua.free_dlgset_nodes)) { set = mod_ua.free_dlgset_nodes.next; pj_list_erase(set); return set; } else { set = PJ_POOL_ALLOC_T(mod_ua.pool, struct dlg_set); return set; } } /* * Register new dialog. Called by pjsip_dlg_create_uac() and * pjsip_dlg_create_uas_and_inc_lock(); */ PJ_DEF(pj_status_t) pjsip_ua_register_dlg( pjsip_user_agent *ua, pjsip_dialog *dlg ) { /* Sanity check. */ PJ_ASSERT_RETURN(ua && dlg, PJ_EINVAL); /* For all dialogs, local tag (inc hash) must has been initialized. */ PJ_ASSERT_RETURN(dlg->local.info && dlg->local.info->tag.slen && dlg->local.tag_hval != 0, PJ_EBUG); /* For UAS dialog, remote tag (inc hash) must have been initialized. */ //PJ_ASSERT_RETURN(dlg->role==PJSIP_ROLE_UAC || // (dlg->role==PJSIP_ROLE_UAS && dlg->remote.info->tag.slen // && dlg->remote.tag_hval != 0), PJ_EBUG); /* Lock the user agent. */ pj_mutex_lock(mod_ua.mutex); /* For UAC, check if there is existing dialog in the same set. */ if (dlg->role == PJSIP_ROLE_UAC) { struct dlg_set *dlg_set; dlg_set = (struct dlg_set*) pj_hash_get_lower( mod_ua.dlg_table, dlg->local.info->tag.ptr, (unsigned)dlg->local.info->tag.slen, &dlg->local.tag_hval); if (dlg_set) { /* This is NOT the first dialog in the dialog set. * Just add this dialog in the list. */ pj_assert(dlg_set->dlg_list.next != (void*)&dlg_set->dlg_list); pj_list_push_back(&dlg_set->dlg_list, dlg); dlg->dlg_set = dlg_set; } else { /* This is the first dialog in the dialog set. * Create the dialog set and add this dialog to it. */ dlg_set = alloc_dlgset_node(); pj_list_init(&dlg_set->dlg_list); pj_list_push_back(&dlg_set->dlg_list, dlg); dlg->dlg_set = dlg_set; /* Register the dialog set in the hash table. */ pj_hash_set_np_lower(mod_ua.dlg_table, dlg->local.info->tag.ptr, (unsigned)dlg->local.info->tag.slen, dlg->local.tag_hval, dlg_set->ht_entry, dlg_set); } } else { /* For UAS, create the dialog set with a single dialog as member. */ struct dlg_set *dlg_set; dlg_set = alloc_dlgset_node(); pj_list_init(&dlg_set->dlg_list); pj_list_push_back(&dlg_set->dlg_list, dlg); dlg->dlg_set = dlg_set; pj_hash_set_np_lower(mod_ua.dlg_table, dlg->local.info->tag.ptr, (unsigned)dlg->local.info->tag.slen, dlg->local.tag_hval, dlg_set->ht_entry, dlg_set); } /* Unlock user agent. */ pj_mutex_unlock(mod_ua.mutex); /* Done. */ return PJ_SUCCESS; } PJ_DEF(pj_status_t) pjsip_ua_unregister_dlg( pjsip_user_agent *ua, pjsip_dialog *dlg ) { struct dlg_set *dlg_set; pjsip_dialog *d; /* Sanity-check arguments. */ PJ_ASSERT_RETURN(ua && dlg, PJ_EINVAL); /* Check that dialog has been registered. */ PJ_ASSERT_RETURN(dlg->dlg_set, PJ_EINVALIDOP); /* Lock user agent. */ pj_mutex_lock(mod_ua.mutex); /* Find this dialog from the dialog set. */ dlg_set = (struct dlg_set*) dlg->dlg_set; d = dlg_set->dlg_list.next; while (d != (pjsip_dialog*)&dlg_set->dlg_list && d != dlg) { d = d->next; } if (d != dlg) { pj_assert(!"Dialog is not registered!"); pj_mutex_unlock(mod_ua.mutex); return PJ_EINVALIDOP; } /* Remove this dialog from the list. */ pj_list_erase(dlg); /* If dialog list is empty, remove the dialog set from the hash table. */ if (pj_list_empty(&dlg_set->dlg_list)) { pj_hash_set_lower(NULL, mod_ua.dlg_table, dlg->local.info->tag.ptr, (unsigned)dlg->local.info->tag.slen, dlg->local.tag_hval, NULL); /* Return dlg_set to free nodes. */ pj_list_push_back(&mod_ua.free_dlgset_nodes, dlg_set); } /* Unlock user agent. */ pj_mutex_unlock(mod_ua.mutex); /* Done. */ return PJ_SUCCESS; } PJ_DEF(pjsip_dialog*) pjsip_rdata_get_dlg( pjsip_rx_data *rdata ) { return (pjsip_dialog*) rdata->endpt_info.mod_data[mod_ua.mod.id]; } PJ_DEF(pjsip_dialog*) pjsip_tdata_get_dlg( pjsip_tx_data *tdata ) { return (pjsip_dialog*) tdata->mod_data[mod_ua.mod.id]; } PJ_DEF(pjsip_dialog*) pjsip_tsx_get_dlg( pjsip_transaction *tsx ) { return (pjsip_dialog*) tsx->mod_data[mod_ua.mod.id]; } /* * Retrieve the current number of dialog-set currently registered * in the hash table. */ PJ_DEF(unsigned) pjsip_ua_get_dlg_set_count(void) { unsigned count; PJ_ASSERT_RETURN(mod_ua.endpt, 0); pj_mutex_lock(mod_ua.mutex); count = pj_hash_count(mod_ua.dlg_table); pj_mutex_unlock(mod_ua.mutex); return count; } /* * Find a dialog. */ PJ_DEF(pjsip_dialog*) pjsip_ua_find_dialog(const pj_str_t *call_id, const pj_str_t *local_tag, const pj_str_t *remote_tag, pj_bool_t lock_dialog) { struct dlg_set *dlg_set; pjsip_dialog *dlg; PJ_ASSERT_RETURN(call_id && local_tag && remote_tag, NULL); /* Lock user agent. */ pj_mutex_lock(mod_ua.mutex); /* Lookup the dialog set. */ dlg_set = (struct dlg_set*) pj_hash_get_lower(mod_ua.dlg_table, local_tag->ptr, (unsigned)local_tag->slen, NULL); if (dlg_set == NULL) { /* Not found */ pj_mutex_unlock(mod_ua.mutex); return NULL; } /* Dialog set is found, now find the matching dialog based on the * remote tag. */ dlg = dlg_set->dlg_list.next; while (dlg != (pjsip_dialog*)&dlg_set->dlg_list) { if (pj_stricmp(&dlg->remote.info->tag, remote_tag) == 0) break; dlg = dlg->next; } if (dlg == (pjsip_dialog*)&dlg_set->dlg_list) { /* Not found */ pj_mutex_unlock(mod_ua.mutex); return NULL; } /* Dialog has been found. It SHOULD have the right Call-ID!! */ if (pj_strcmp(&dlg->call_id->id, call_id)!=0) { PJ_LOG(6, (THIS_FILE, "Dialog not found: local and remote tags " "matched but not call id")); pj_mutex_unlock(mod_ua.mutex); return NULL; } if (lock_dialog) { if (pjsip_dlg_try_inc_lock(dlg) != PJ_SUCCESS) { /* * Unable to acquire dialog's lock while holding the user * agent's mutex. Release the UA mutex before retrying once * more. * * THIS MAY CAUSE RACE CONDITION! */ /* Unlock user agent. */ pj_mutex_unlock(mod_ua.mutex); /* Lock dialog */ pjsip_dlg_inc_lock(dlg); } else { /* Unlock user agent. */ pj_mutex_unlock(mod_ua.mutex); } } else { /* Unlock user agent. */ pj_mutex_unlock(mod_ua.mutex); } return dlg; } /* * Find the first dialog in dialog set in hash table for an incoming message. */ static struct dlg_set *find_dlg_set_for_msg( pjsip_rx_data *rdata ) { /* CANCEL message doesn't have To tag, so we must lookup the dialog * by finding the INVITE UAS transaction being cancelled. */ if (rdata->msg_info.cseq->method.id == PJSIP_CANCEL_METHOD) { pjsip_dialog *dlg; /* Create key for the rdata, but this time, use INVITE as the * method. */ pj_str_t key; pjsip_role_e role; pjsip_transaction *tsx; if (rdata->msg_info.msg->type == PJSIP_REQUEST_MSG) role = PJSIP_ROLE_UAS; else role = PJSIP_ROLE_UAC; pjsip_tsx_create_key(rdata->tp_info.pool, &key, role, pjsip_get_invite_method(), rdata); /* Lookup the INVITE transaction */ tsx = pjsip_tsx_layer_find_tsx2(&key, PJ_TRUE); /* We should find the dialog attached to the INVITE transaction */ if (tsx) { dlg = (pjsip_dialog*) tsx->mod_data[mod_ua.mod.id]; pj_grp_lock_dec_ref(tsx->grp_lock); /* Dlg may be NULL on some extreme condition * (e.g. during debugging where initially there is a dialog) */ return dlg ? (struct dlg_set*) dlg->dlg_set : NULL; } else { return NULL; } } else { pj_str_t *tag; struct dlg_set *dlg_set; if (rdata->msg_info.msg->type == PJSIP_REQUEST_MSG) tag = &rdata->msg_info.to->tag; else tag = &rdata->msg_info.from->tag; /* Lookup the dialog set. */ dlg_set = (struct dlg_set*) pj_hash_get_lower(mod_ua.dlg_table, tag->ptr, (unsigned)tag->slen, NULL); return dlg_set; } } /* On received requests. */ static pj_bool_t mod_ua_on_rx_request(pjsip_rx_data *rdata) { struct dlg_set *dlg_set; pj_str_t *from_tag; pjsip_dialog *dlg; pj_status_t status; /* Optimized path: bail out early if request is not CANCEL and it doesn't * have To tag */ if (rdata->msg_info.to->tag.slen == 0 && rdata->msg_info.msg->line.req.method.id != PJSIP_CANCEL_METHOD) { return PJ_FALSE; } /* Incoming REGISTER may have tags in it */ if (rdata->msg_info.msg->line.req.method.id == PJSIP_REGISTER_METHOD) return PJ_FALSE; retry_on_deadlock: /* Lock user agent before looking up the dialog hash table. */ pj_mutex_lock(mod_ua.mutex); /* Lookup the dialog set, based on the To tag header. */ dlg_set = find_dlg_set_for_msg(rdata); /* If dialog is not found, respond with 481 (Call/Transaction * Does Not Exist). */ if (dlg_set == NULL) { /* Unable to find dialog. */ pj_mutex_unlock(mod_ua.mutex); if (rdata->msg_info.msg->line.req.method.id != PJSIP_ACK_METHOD) { PJ_LOG(5,(THIS_FILE, "Unable to find dialogset for %s, answering with 481", pjsip_rx_data_get_info(rdata))); /* Respond with 481 . */ pjsip_endpt_respond_stateless( mod_ua.endpt, rdata, 481, NULL, NULL, NULL ); } return PJ_TRUE; } /* Dialog set has been found. * Find the dialog in the dialog set based on the content of the remote * tag. */ from_tag = &rdata->msg_info.from->tag; dlg = dlg_set->dlg_list.next; while (dlg != (pjsip_dialog*)&dlg_set->dlg_list) { if (pj_stricmp(&dlg->remote.info->tag, from_tag) == 0) break; dlg = dlg->next; } /* Dialog may not be found, e.g. in this case: * - UAC sends SUBSCRIBE, then UAS sends NOTIFY before answering * SUBSCRIBE request with 2xx. * * In this case, we can accept the request ONLY when the original * dialog still has empty To tag. */ if (dlg == (pjsip_dialog*)&dlg_set->dlg_list) { pjsip_dialog *first_dlg = dlg_set->dlg_list.next; if (first_dlg->remote.info->tag.slen != 0) { /* Not found. Mulfunction UAC? */ pj_mutex_unlock(mod_ua.mutex); if (rdata->msg_info.msg->line.req.method.id != PJSIP_ACK_METHOD) { PJ_LOG(5,(THIS_FILE, "Unable to find dialog for %s, answering with 481", pjsip_rx_data_get_info(rdata))); pjsip_endpt_respond_stateless(mod_ua.endpt, rdata, PJSIP_SC_CALL_TSX_DOES_NOT_EXIST, NULL, NULL, NULL); } else { PJ_LOG(5,(THIS_FILE, "Unable to find dialog for %s", pjsip_rx_data_get_info(rdata))); } return PJ_TRUE; } dlg = first_dlg; } /* Mark the dialog id of the request. */ rdata->endpt_info.mod_data[mod_ua.mod.id] = dlg; /* Try to lock the dialog */ PJ_LOG(6,(dlg->obj_name, "UA layer acquiring dialog lock for request")); status = pjsip_dlg_try_inc_lock(dlg); if (status != PJ_SUCCESS) { /* Failed to acquire dialog mutex immediately, this could be * because of deadlock. Release UA mutex, yield, and retry * the whole thing once again. */ pj_mutex_unlock(mod_ua.mutex); pj_thread_sleep(0); goto retry_on_deadlock; } /* Done with processing in UA layer, release lock */ pj_mutex_unlock(mod_ua.mutex); /* Pass to dialog. */ pjsip_dlg_on_rx_request(dlg, rdata); /* Unlock the dialog. This may destroy the dialog */ pjsip_dlg_dec_lock(dlg); /* Report as handled. */ return PJ_TRUE; } /* On rx response notification. */ static pj_bool_t mod_ua_on_rx_response(pjsip_rx_data *rdata) { pjsip_transaction *tsx; struct dlg_set *dlg_set; pjsip_dialog *dlg; pj_status_t status; /* * Find the dialog instance for the response. * All outgoing dialog requests are sent statefully, which means * there will be an UAC transaction associated with this response, * and the dialog instance will be recorded in that transaction. * * But even when transaction is found, there is possibility that * the response is a forked response. */ retry_on_deadlock: dlg = NULL; /* Lock user agent dlg table before we're doing anything. */ pj_mutex_lock(mod_ua.mutex); /* Check if transaction is present. */ tsx = pjsip_rdata_get_tsx(rdata); if (tsx) { /* Check if dialog is present in the transaction. */ dlg = pjsip_tsx_get_dlg(tsx); if (!dlg) { /* Unlock dialog hash table. */ pj_mutex_unlock(mod_ua.mutex); return PJ_FALSE; } /* Get the dialog set. */ dlg_set = (struct dlg_set*) dlg->dlg_set; /* Even if transaction is found and (candidate) dialog has been * identified, it's possible that the request has forked. */ } else { /* Transaction is not present. * Check if this is a 2xx/OK response to INVITE, which in this * case the response will be handled directly by the * dialog. */ pjsip_cseq_hdr *cseq_hdr = rdata->msg_info.cseq; if (cseq_hdr->method.id != PJSIP_INVITE_METHOD || rdata->msg_info.msg->line.status.code / 100 != 2) { /* Not a 2xx response to INVITE. * This must be some stateless response sent by other modules, * or a very late response. */ /* Unlock dialog hash table. */ pj_mutex_unlock(mod_ua.mutex); return PJ_FALSE; } /* Get the dialog set. */ dlg_set = (struct dlg_set*) pj_hash_get_lower(mod_ua.dlg_table, rdata->msg_info.from->tag.ptr, (unsigned)rdata->msg_info.from->tag.slen, NULL); if (!dlg_set) { /* Unlock dialog hash table. */ pj_mutex_unlock(mod_ua.mutex); /* Strayed 2xx response!! */ PJ_LOG(4,(THIS_FILE, "Received strayed 2xx response (no dialog is found)" " from %s:%d: %s", rdata->pkt_info.src_name, rdata->pkt_info.src_port, pjsip_rx_data_get_info(rdata))); return PJ_TRUE; } } /* At this point, we must have the dialog set, and the dialog set * must have a dialog in the list. */ pj_assert(dlg_set && !pj_list_empty(&dlg_set->dlg_list)); /* Check for forked response. * Request will fork only for the initial INVITE request. */ //This doesn't work when there is authentication challenge, since //first_cseq evaluation will yield false. //if (rdata->msg_info.cseq->method.id == PJSIP_INVITE_METHOD && // rdata->msg_info.cseq->cseq == dlg_set->dlg_list.next->local.first_cseq) if (rdata->msg_info.cseq->method.id == PJSIP_INVITE_METHOD) { int st_code = rdata->msg_info.msg->line.status.code; pj_str_t *to_tag = &rdata->msg_info.to->tag; dlg = dlg_set->dlg_list.next; while (dlg != (pjsip_dialog*)&dlg_set->dlg_list) { /* If there is dialog with no remote tag (i.e. dialog has not * been established yet), then send this response to that * dialog. */ if (dlg->remote.info->tag.slen == 0) break; /* Otherwise find the one with matching To tag. */ if (pj_stricmp(to_tag, &dlg->remote.info->tag) == 0) break; dlg = dlg->next; } /* If no dialog with matching remote tag is found, this must be * a forked response. Respond to this ONLY when response is non-100 * provisional response OR a 2xx response. */ if (dlg == (pjsip_dialog*)&dlg_set->dlg_list && ((st_code/100==1 && st_code!=100) || st_code/100==2)) { PJ_LOG(5,(THIS_FILE, "Received forked %s for existing dialog %s", pjsip_rx_data_get_info(rdata), dlg_set->dlg_list.next->obj_name)); /* Report to application about forked condition. * Application can either create a dialog or ignore the response. */ if (mod_ua.param.on_dlg_forked) { dlg = (*mod_ua.param.on_dlg_forked)(dlg_set->dlg_list.next, rdata); if (dlg == NULL) { pj_mutex_unlock(mod_ua.mutex); return PJ_TRUE; } } else { dlg = dlg_set->dlg_list.next; PJ_LOG(4,(THIS_FILE, "Unhandled forked %s from %s:%d, response will be " "handed over to the first dialog", pjsip_rx_data_get_info(rdata), rdata->pkt_info.src_name, rdata->pkt_info.src_port)); } } else if (dlg == (pjsip_dialog*)&dlg_set->dlg_list) { /* For 100 or non-2xx response which has different To tag, * pass the response to the first dialog. */ dlg = dlg_set->dlg_list.next; } } else { /* Either this is a non-INVITE response, or subsequent INVITE * within dialog. The dialog should have been identified when * the transaction was found. */ pj_assert(tsx != NULL); pj_assert(dlg != NULL); } /* The dialog must have been found. */ pj_assert(dlg != NULL); /* Put the dialog instance in the rdata. */ rdata->endpt_info.mod_data[mod_ua.mod.id] = dlg; /* Attempt to acquire lock to the dialog. */ PJ_LOG(6,(dlg->obj_name, "UA layer acquiring dialog lock for response")); status = pjsip_dlg_try_inc_lock(dlg); if (status != PJ_SUCCESS) { /* Failed to acquire dialog mutex. This could indicate a deadlock * situation, and for safety, try to avoid deadlock by releasing * UA mutex, yield, and retry the whole processing once again. */ pj_mutex_unlock(mod_ua.mutex); pj_thread_sleep(0); goto retry_on_deadlock; } /* We're done with processing in the UA layer, we can release the mutex */ pj_mutex_unlock(mod_ua.mutex); /* Pass the response to the dialog. */ pjsip_dlg_on_rx_response(dlg, rdata); /* Unlock the dialog. This may destroy the dialog. */ pjsip_dlg_dec_lock(dlg); /* Done. */ return PJ_TRUE; } #if PJ_LOG_MAX_LEVEL >= 3 static void print_dialog( const char *title, pjsip_dialog *dlg, char *buf, pj_size_t size) { int len; char userinfo[PJSIP_MAX_URL_SIZE]; len = pjsip_hdr_print_on(dlg->remote.info, userinfo, sizeof(userinfo)); if (len < 0) pj_ansi_strcpy(userinfo, "<--uri too long-->"); else userinfo[len] = '\0'; len = pj_ansi_snprintf(buf, size, "%s[%s] %s", title, (dlg->state==PJSIP_DIALOG_STATE_NULL ? " - " : "est"), userinfo); if (len < 1 || len >= (int)size) { pj_ansi_strcpy(buf, "<--uri too long-->"); } else buf[len] = '\0'; } #endif /* * Dump user agent contents (e.g. all dialogs). */ PJ_DEF(void) pjsip_ua_dump(pj_bool_t detail) { #if PJ_LOG_MAX_LEVEL >= 3 pj_hash_iterator_t itbuf, *it; char dlginfo[128]; pj_mutex_lock(mod_ua.mutex); PJ_LOG(3, (THIS_FILE, "Number of dialog sets: %u", pj_hash_count(mod_ua.dlg_table))); if (detail && pj_hash_count(mod_ua.dlg_table)) { PJ_LOG(3, (THIS_FILE, "Dumping dialog sets:")); it = pj_hash_first(mod_ua.dlg_table, &itbuf); for (; it != NULL; it = pj_hash_next(mod_ua.dlg_table, it)) { struct dlg_set *dlg_set; pjsip_dialog *dlg; const char *title; dlg_set = (struct dlg_set*) pj_hash_this(mod_ua.dlg_table, it); if (!dlg_set || pj_list_empty(&dlg_set->dlg_list)) continue; /* First dialog in dialog set. */ dlg = dlg_set->dlg_list.next; if (dlg->role == PJSIP_ROLE_UAC) title = " [out] "; else title = " [in] "; print_dialog(title, dlg, dlginfo, sizeof(dlginfo)); PJ_LOG(3,(THIS_FILE, "%s", dlginfo)); /* Next dialog in dialog set (forked) */ dlg = dlg->next; while (dlg != (pjsip_dialog*) &dlg_set->dlg_list) { print_dialog(" [forked] ", dlg, dlginfo, sizeof(dlginfo)); dlg = dlg->next; } } } pj_mutex_unlock(mod_ua.mutex); #endif }
/* $Id$ */ /* * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com) * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <pjsip/sip_ua_layer.h> #include <pjsip/sip_module.h> #include <pjsip/sip_dialog.h> #include <pjsip/sip_endpoint.h> #include <pjsip/sip_errno.h> #include <pjsip/sip_transaction.h> #include <pj/os.h> #include <pj/hash.h> #include <pj/assert.h> #include <pj/string.h> #include <pj/pool.h> #include <pj/log.h> #define THIS_FILE "sip_ua_layer.c" /* * Static prototypes. */ static pj_status_t mod_ua_load(pjsip_endpoint *endpt); static pj_status_t mod_ua_unload(void); static pj_bool_t mod_ua_on_rx_request(pjsip_rx_data *rdata); static pj_bool_t mod_ua_on_rx_response(pjsip_rx_data *rdata); static void mod_ua_on_tsx_state(pjsip_transaction*, pjsip_event*); extern long pjsip_dlg_lock_tls_id; /* defined in sip_dialog.c */ /* This struct is used to represent list of dialog inside a dialog set. * We don't want to use pjsip_dialog for this purpose, to save some * memory (about 100 bytes per dialog set). */ struct dlg_set_head { PJ_DECL_LIST_MEMBER(pjsip_dialog); }; /* This struct represents a dialog set. * This is the value that will be put in the UA's hash table. */ struct dlg_set { /* To put this node in free dlg_set nodes in UA. */ PJ_DECL_LIST_MEMBER(struct dlg_set); /* This is the buffer to store this entry in the hash table. */ pj_hash_entry_buf ht_entry; /* Entry key in the hash table */ pj_str_t ht_key; /* List of dialog in this dialog set. */ struct dlg_set_head dlg_list; }; /* * Module interface. */ static struct user_agent { pjsip_module mod; pj_pool_t *pool; pjsip_endpoint *endpt; pj_mutex_t *mutex; pj_hash_table_t *dlg_table; pjsip_ua_init_param param; struct dlg_set free_dlgset_nodes; } mod_ua = { { NULL, NULL, /* prev, next. */ { "mod-ua", 6 }, /* Name. */ -1, /* Id */ PJSIP_MOD_PRIORITY_UA_PROXY_LAYER, /* Priority */ &mod_ua_load, /* load() */ NULL, /* start() */ NULL, /* stop() */ &mod_ua_unload, /* unload() */ &mod_ua_on_rx_request, /* on_rx_request() */ &mod_ua_on_rx_response, /* on_rx_response() */ NULL, /* on_tx_request. */ NULL, /* on_tx_response() */ &mod_ua_on_tsx_state, /* on_tsx_state() */ } }; /* * mod_ua_load() * * Called when module is being loaded by endpoint. */ static pj_status_t mod_ua_load(pjsip_endpoint *endpt) { pj_status_t status; /* Initialize the user agent. */ mod_ua.endpt = endpt; mod_ua.pool = pjsip_endpt_create_pool( endpt, "ua%p", PJSIP_POOL_LEN_UA, PJSIP_POOL_INC_UA); if (mod_ua.pool == NULL) return PJ_ENOMEM; status = pj_mutex_create_recursive(mod_ua.pool, " ua%p", &mod_ua.mutex); if (status != PJ_SUCCESS) return status; mod_ua.dlg_table = pj_hash_create(mod_ua.pool, PJSIP_MAX_DIALOG_COUNT); if (mod_ua.dlg_table == NULL) return PJ_ENOMEM; pj_list_init(&mod_ua.free_dlgset_nodes); /* Initialize dialog lock. */ status = pj_thread_local_alloc(&pjsip_dlg_lock_tls_id); if (status != PJ_SUCCESS) return status; pj_thread_local_set(pjsip_dlg_lock_tls_id, NULL); return PJ_SUCCESS; } /* * mod_ua_unload() * * Called when module is being unloaded. */ static pj_status_t mod_ua_unload(void) { pj_thread_local_free(pjsip_dlg_lock_tls_id); pj_mutex_destroy(mod_ua.mutex); /* Release pool */ if (mod_ua.pool) { pjsip_endpt_release_pool( mod_ua.endpt, mod_ua.pool ); } return PJ_SUCCESS; } /* * mod_ua_on_tsx_stats() * * Called on changed on transaction state. */ static void mod_ua_on_tsx_state( pjsip_transaction *tsx, pjsip_event *e) { pjsip_dialog *dlg; /* If the module id is -1, it could mean that the module has been * destroyed. */ if (mod_ua.mod.id == -1) return; /* Get the dialog where this transaction belongs. */ dlg = (pjsip_dialog*) tsx->mod_data[mod_ua.mod.id]; /* If dialog instance has gone, it could mean that the dialog * may has been destroyed. */ if (dlg == NULL) return; /* Hand over the event to the dialog. */ pjsip_dlg_on_tsx_state(dlg, tsx, e); } /* * Init user agent module and register it to the endpoint. */ PJ_DEF(pj_status_t) pjsip_ua_init_module( pjsip_endpoint *endpt, const pjsip_ua_init_param *prm) { pj_status_t status; /* Check if module already registered. */ PJ_ASSERT_RETURN(mod_ua.mod.id == -1, PJ_EINVALIDOP); /* Copy param, if exists. */ if (prm) pj_memcpy(&mod_ua.param, prm, sizeof(pjsip_ua_init_param)); /* Register the module. */ status = pjsip_endpt_register_module(endpt, &mod_ua.mod); return status; } /* * Get the instance of the user agent. * */ PJ_DEF(pjsip_user_agent*) pjsip_ua_instance(void) { return &mod_ua.mod; } /* * Get the endpoint where this UA is currently registered. */ PJ_DEF(pjsip_endpoint*) pjsip_ua_get_endpt(pjsip_user_agent *ua) { PJ_UNUSED_ARG(ua); pj_assert(ua == &mod_ua.mod); return mod_ua.endpt; } /* * Destroy the user agent layer. */ PJ_DEF(pj_status_t) pjsip_ua_destroy(void) { /* Check if module already destroyed. */ PJ_ASSERT_RETURN(mod_ua.mod.id != -1, PJ_EINVALIDOP); return pjsip_endpt_unregister_module(mod_ua.endpt, &mod_ua.mod); } /* * Create key to identify dialog set. */ /* PJ_DEF(void) pjsip_ua_create_dlg_set_key( pj_pool_t *pool, pj_str_t *set_key, const pj_str_t *call_id, const pj_str_t *local_tag) { PJ_ASSERT_ON_FAIL(pool && set_key && call_id && local_tag, return;); set_key->slen = call_id->slen + local_tag->slen + 1; set_key->ptr = (char*) pj_pool_alloc(pool, set_key->slen); pj_assert(set_key->ptr != NULL); pj_memcpy(set_key->ptr, call_id->ptr, call_id->slen); set_key->ptr[call_id->slen] = '$'; pj_memcpy(set_key->ptr + call_id->slen + 1, local_tag->ptr, local_tag->slen); } */ /* * Acquire one dlg_set node to be put in the hash table. * This will first look in the free nodes list, then allocate * a new one from UA's pool when one is not available. */ static struct dlg_set *alloc_dlgset_node(void) { struct dlg_set *set; if (!pj_list_empty(&mod_ua.free_dlgset_nodes)) { set = mod_ua.free_dlgset_nodes.next; pj_list_erase(set); return set; } else { set = PJ_POOL_ALLOC_T(mod_ua.pool, struct dlg_set); return set; } } /* * Register new dialog. Called by pjsip_dlg_create_uac() and * pjsip_dlg_create_uas_and_inc_lock(); */ PJ_DEF(pj_status_t) pjsip_ua_register_dlg( pjsip_user_agent *ua, pjsip_dialog *dlg ) { /* Sanity check. */ PJ_ASSERT_RETURN(ua && dlg, PJ_EINVAL); /* For all dialogs, local tag (inc hash) must has been initialized. */ PJ_ASSERT_RETURN(dlg->local.info && dlg->local.info->tag.slen && dlg->local.tag_hval != 0, PJ_EBUG); /* For UAS dialog, remote tag (inc hash) must have been initialized. */ //PJ_ASSERT_RETURN(dlg->role==PJSIP_ROLE_UAC || // (dlg->role==PJSIP_ROLE_UAS && dlg->remote.info->tag.slen // && dlg->remote.tag_hval != 0), PJ_EBUG); /* Lock the user agent. */ pj_mutex_lock(mod_ua.mutex); /* For UAC, check if there is existing dialog in the same set. */ if (dlg->role == PJSIP_ROLE_UAC) { struct dlg_set *dlg_set; dlg_set = (struct dlg_set*) pj_hash_get_lower( mod_ua.dlg_table, dlg->local.info->tag.ptr, (unsigned)dlg->local.info->tag.slen, &dlg->local.tag_hval); if (dlg_set) { /* This is NOT the first dialog in the dialog set. * Just add this dialog in the list. */ pj_assert(dlg_set->dlg_list.next != (void*)&dlg_set->dlg_list); pj_list_push_back(&dlg_set->dlg_list, dlg); dlg->dlg_set = dlg_set; } else { /* This is the first dialog in the dialog set. * Create the dialog set and add this dialog to it. */ dlg_set = alloc_dlgset_node(); dlg_set->ht_key = dlg->local.info->tag; pj_list_init(&dlg_set->dlg_list); pj_list_push_back(&dlg_set->dlg_list, dlg); dlg->dlg_set = dlg_set; /* Register the dialog set in the hash table. */ pj_hash_set_np_lower(mod_ua.dlg_table, dlg_set->ht_key.ptr, (unsigned)dlg_set->ht_key.slen, dlg->local.tag_hval, dlg_set->ht_entry, dlg_set); } } else { /* For UAS, create the dialog set with a single dialog as member. */ struct dlg_set *dlg_set; dlg_set = alloc_dlgset_node(); dlg_set->ht_key = dlg->local.info->tag; pj_list_init(&dlg_set->dlg_list); pj_list_push_back(&dlg_set->dlg_list, dlg); dlg->dlg_set = dlg_set; pj_hash_set_np_lower(mod_ua.dlg_table, dlg_set->ht_key.ptr, (unsigned)dlg_set->ht_key.slen, dlg->local.tag_hval, dlg_set->ht_entry, dlg_set); } /* Unlock user agent. */ pj_mutex_unlock(mod_ua.mutex); /* Done. */ return PJ_SUCCESS; } PJ_DEF(pj_status_t) pjsip_ua_unregister_dlg( pjsip_user_agent *ua, pjsip_dialog *dlg ) { struct dlg_set *dlg_set; pjsip_dialog *d; /* Sanity-check arguments. */ PJ_ASSERT_RETURN(ua && dlg, PJ_EINVAL); /* Check that dialog has been registered. */ PJ_ASSERT_RETURN(dlg->dlg_set, PJ_EINVALIDOP); /* Lock user agent. */ pj_mutex_lock(mod_ua.mutex); /* Find this dialog from the dialog set. */ dlg_set = (struct dlg_set*) dlg->dlg_set; d = dlg_set->dlg_list.next; while (d != (pjsip_dialog*)&dlg_set->dlg_list && d != dlg) { d = d->next; } if (d != dlg) { pj_assert(!"Dialog is not registered!"); pj_mutex_unlock(mod_ua.mutex); return PJ_EINVALIDOP; } /* Remove this dialog from the list. */ pj_list_erase(dlg); /* If dialog list is empty, remove the dialog set from the hash table. */ if (pj_list_empty(&dlg_set->dlg_list)) { /* Verify that the dialog set is valid */ pj_assert(pj_hash_get_lower(mod_ua.dlg_table, dlg_set->ht_key.ptr, (unsigned)dlg_set->ht_key.slen, &dlg->local.tag_hval) == dlg_set); pj_hash_set_lower(NULL, mod_ua.dlg_table, dlg_set->ht_key.ptr, (unsigned)dlg_set->ht_key.slen, dlg->local.tag_hval, NULL); /* Return dlg_set to free nodes. */ pj_list_push_back(&mod_ua.free_dlgset_nodes, dlg_set); } else { /* If the just unregistered dialog is being used as hash key, * reset the dlg_set entry with a new key (i.e: from the first dialog * in dlg_set). */ if (dlg_set->ht_key.ptr == dlg->local.info->tag.ptr && dlg_set->ht_key.slen == dlg->local.info->tag.slen) { pjsip_dialog* key_dlg = dlg_set->dlg_list.next; /* Verify that the old & new keys share the hash value */ pj_assert(key_dlg->local.tag_hval == dlg->local.tag_hval); pj_hash_set_lower(NULL, mod_ua.dlg_table, dlg_set->ht_key.ptr, (unsigned)dlg_set->ht_key.slen, dlg->local.tag_hval, NULL); dlg_set->ht_key = key_dlg->local.info->tag; pj_hash_set_np_lower(mod_ua.dlg_table, dlg_set->ht_key.ptr, (unsigned)dlg_set->ht_key.slen, key_dlg->local.tag_hval, dlg_set->ht_entry, dlg_set); } } /* Unlock user agent. */ pj_mutex_unlock(mod_ua.mutex); /* Done. */ return PJ_SUCCESS; } PJ_DEF(pjsip_dialog*) pjsip_rdata_get_dlg( pjsip_rx_data *rdata ) { return (pjsip_dialog*) rdata->endpt_info.mod_data[mod_ua.mod.id]; } PJ_DEF(pjsip_dialog*) pjsip_tdata_get_dlg( pjsip_tx_data *tdata ) { return (pjsip_dialog*) tdata->mod_data[mod_ua.mod.id]; } PJ_DEF(pjsip_dialog*) pjsip_tsx_get_dlg( pjsip_transaction *tsx ) { return (pjsip_dialog*) tsx->mod_data[mod_ua.mod.id]; } /* * Retrieve the current number of dialog-set currently registered * in the hash table. */ PJ_DEF(unsigned) pjsip_ua_get_dlg_set_count(void) { unsigned count; PJ_ASSERT_RETURN(mod_ua.endpt, 0); pj_mutex_lock(mod_ua.mutex); count = pj_hash_count(mod_ua.dlg_table); pj_mutex_unlock(mod_ua.mutex); return count; } /* * Find a dialog. */ PJ_DEF(pjsip_dialog*) pjsip_ua_find_dialog(const pj_str_t *call_id, const pj_str_t *local_tag, const pj_str_t *remote_tag, pj_bool_t lock_dialog) { struct dlg_set *dlg_set; pjsip_dialog *dlg; PJ_ASSERT_RETURN(call_id && local_tag && remote_tag, NULL); /* Lock user agent. */ pj_mutex_lock(mod_ua.mutex); /* Lookup the dialog set. */ dlg_set = (struct dlg_set*) pj_hash_get_lower(mod_ua.dlg_table, local_tag->ptr, (unsigned)local_tag->slen, NULL); if (dlg_set == NULL) { /* Not found */ pj_mutex_unlock(mod_ua.mutex); return NULL; } /* Dialog set is found, now find the matching dialog based on the * remote tag. */ dlg = dlg_set->dlg_list.next; while (dlg != (pjsip_dialog*)&dlg_set->dlg_list) { if (pj_stricmp(&dlg->remote.info->tag, remote_tag) == 0) break; dlg = dlg->next; } if (dlg == (pjsip_dialog*)&dlg_set->dlg_list) { /* Not found */ pj_mutex_unlock(mod_ua.mutex); return NULL; } /* Dialog has been found. It SHOULD have the right Call-ID!! */ if (pj_strcmp(&dlg->call_id->id, call_id)!=0) { PJ_LOG(6, (THIS_FILE, "Dialog not found: local and remote tags " "matched but not call id")); pj_mutex_unlock(mod_ua.mutex); return NULL; } if (lock_dialog) { if (pjsip_dlg_try_inc_lock(dlg) != PJ_SUCCESS) { /* * Unable to acquire dialog's lock while holding the user * agent's mutex. Release the UA mutex before retrying once * more. * * THIS MAY CAUSE RACE CONDITION! */ /* Unlock user agent. */ pj_mutex_unlock(mod_ua.mutex); /* Lock dialog */ pjsip_dlg_inc_lock(dlg); } else { /* Unlock user agent. */ pj_mutex_unlock(mod_ua.mutex); } } else { /* Unlock user agent. */ pj_mutex_unlock(mod_ua.mutex); } return dlg; } /* * Find the first dialog in dialog set in hash table for an incoming message. */ static struct dlg_set *find_dlg_set_for_msg( pjsip_rx_data *rdata ) { /* CANCEL message doesn't have To tag, so we must lookup the dialog * by finding the INVITE UAS transaction being cancelled. */ if (rdata->msg_info.cseq->method.id == PJSIP_CANCEL_METHOD) { pjsip_dialog *dlg; /* Create key for the rdata, but this time, use INVITE as the * method. */ pj_str_t key; pjsip_role_e role; pjsip_transaction *tsx; if (rdata->msg_info.msg->type == PJSIP_REQUEST_MSG) role = PJSIP_ROLE_UAS; else role = PJSIP_ROLE_UAC; pjsip_tsx_create_key(rdata->tp_info.pool, &key, role, pjsip_get_invite_method(), rdata); /* Lookup the INVITE transaction */ tsx = pjsip_tsx_layer_find_tsx2(&key, PJ_TRUE); /* We should find the dialog attached to the INVITE transaction */ if (tsx) { dlg = (pjsip_dialog*) tsx->mod_data[mod_ua.mod.id]; pj_grp_lock_dec_ref(tsx->grp_lock); /* Dlg may be NULL on some extreme condition * (e.g. during debugging where initially there is a dialog) */ return dlg ? (struct dlg_set*) dlg->dlg_set : NULL; } else { return NULL; } } else { pj_str_t *tag; struct dlg_set *dlg_set; if (rdata->msg_info.msg->type == PJSIP_REQUEST_MSG) tag = &rdata->msg_info.to->tag; else tag = &rdata->msg_info.from->tag; /* Lookup the dialog set. */ dlg_set = (struct dlg_set*) pj_hash_get_lower(mod_ua.dlg_table, tag->ptr, (unsigned)tag->slen, NULL); return dlg_set; } } /* On received requests. */ static pj_bool_t mod_ua_on_rx_request(pjsip_rx_data *rdata) { struct dlg_set *dlg_set; pj_str_t *from_tag; pjsip_dialog *dlg; pj_status_t status; /* Optimized path: bail out early if request is not CANCEL and it doesn't * have To tag */ if (rdata->msg_info.to->tag.slen == 0 && rdata->msg_info.msg->line.req.method.id != PJSIP_CANCEL_METHOD) { return PJ_FALSE; } /* Incoming REGISTER may have tags in it */ if (rdata->msg_info.msg->line.req.method.id == PJSIP_REGISTER_METHOD) return PJ_FALSE; retry_on_deadlock: /* Lock user agent before looking up the dialog hash table. */ pj_mutex_lock(mod_ua.mutex); /* Lookup the dialog set, based on the To tag header. */ dlg_set = find_dlg_set_for_msg(rdata); /* If dialog is not found, respond with 481 (Call/Transaction * Does Not Exist). */ if (dlg_set == NULL) { /* Unable to find dialog. */ pj_mutex_unlock(mod_ua.mutex); if (rdata->msg_info.msg->line.req.method.id != PJSIP_ACK_METHOD) { PJ_LOG(5,(THIS_FILE, "Unable to find dialogset for %s, answering with 481", pjsip_rx_data_get_info(rdata))); /* Respond with 481 . */ pjsip_endpt_respond_stateless( mod_ua.endpt, rdata, 481, NULL, NULL, NULL ); } return PJ_TRUE; } /* Dialog set has been found. * Find the dialog in the dialog set based on the content of the remote * tag. */ from_tag = &rdata->msg_info.from->tag; dlg = dlg_set->dlg_list.next; while (dlg != (pjsip_dialog*)&dlg_set->dlg_list) { if (pj_stricmp(&dlg->remote.info->tag, from_tag) == 0) break; dlg = dlg->next; } /* Dialog may not be found, e.g. in this case: * - UAC sends SUBSCRIBE, then UAS sends NOTIFY before answering * SUBSCRIBE request with 2xx. * * In this case, we can accept the request ONLY when the original * dialog still has empty To tag. */ if (dlg == (pjsip_dialog*)&dlg_set->dlg_list) { pjsip_dialog *first_dlg = dlg_set->dlg_list.next; if (first_dlg->remote.info->tag.slen != 0) { /* Not found. Mulfunction UAC? */ pj_mutex_unlock(mod_ua.mutex); if (rdata->msg_info.msg->line.req.method.id != PJSIP_ACK_METHOD) { PJ_LOG(5,(THIS_FILE, "Unable to find dialog for %s, answering with 481", pjsip_rx_data_get_info(rdata))); pjsip_endpt_respond_stateless(mod_ua.endpt, rdata, PJSIP_SC_CALL_TSX_DOES_NOT_EXIST, NULL, NULL, NULL); } else { PJ_LOG(5,(THIS_FILE, "Unable to find dialog for %s", pjsip_rx_data_get_info(rdata))); } return PJ_TRUE; } dlg = first_dlg; } /* Mark the dialog id of the request. */ rdata->endpt_info.mod_data[mod_ua.mod.id] = dlg; /* Try to lock the dialog */ PJ_LOG(6,(dlg->obj_name, "UA layer acquiring dialog lock for request")); status = pjsip_dlg_try_inc_lock(dlg); if (status != PJ_SUCCESS) { /* Failed to acquire dialog mutex immediately, this could be * because of deadlock. Release UA mutex, yield, and retry * the whole thing once again. */ pj_mutex_unlock(mod_ua.mutex); pj_thread_sleep(0); goto retry_on_deadlock; } /* Done with processing in UA layer, release lock */ pj_mutex_unlock(mod_ua.mutex); /* Pass to dialog. */ pjsip_dlg_on_rx_request(dlg, rdata); /* Unlock the dialog. This may destroy the dialog */ pjsip_dlg_dec_lock(dlg); /* Report as handled. */ return PJ_TRUE; } /* On rx response notification. */ static pj_bool_t mod_ua_on_rx_response(pjsip_rx_data *rdata) { pjsip_transaction *tsx; struct dlg_set *dlg_set; pjsip_dialog *dlg; pj_status_t status; /* * Find the dialog instance for the response. * All outgoing dialog requests are sent statefully, which means * there will be an UAC transaction associated with this response, * and the dialog instance will be recorded in that transaction. * * But even when transaction is found, there is possibility that * the response is a forked response. */ retry_on_deadlock: dlg = NULL; /* Lock user agent dlg table before we're doing anything. */ pj_mutex_lock(mod_ua.mutex); /* Check if transaction is present. */ tsx = pjsip_rdata_get_tsx(rdata); if (tsx) { /* Check if dialog is present in the transaction. */ dlg = pjsip_tsx_get_dlg(tsx); if (!dlg) { /* Unlock dialog hash table. */ pj_mutex_unlock(mod_ua.mutex); return PJ_FALSE; } /* Get the dialog set. */ dlg_set = (struct dlg_set*) dlg->dlg_set; /* Even if transaction is found and (candidate) dialog has been * identified, it's possible that the request has forked. */ } else { /* Transaction is not present. * Check if this is a 2xx/OK response to INVITE, which in this * case the response will be handled directly by the * dialog. */ pjsip_cseq_hdr *cseq_hdr = rdata->msg_info.cseq; if (cseq_hdr->method.id != PJSIP_INVITE_METHOD || rdata->msg_info.msg->line.status.code / 100 != 2) { /* Not a 2xx response to INVITE. * This must be some stateless response sent by other modules, * or a very late response. */ /* Unlock dialog hash table. */ pj_mutex_unlock(mod_ua.mutex); return PJ_FALSE; } /* Get the dialog set. */ dlg_set = (struct dlg_set*) pj_hash_get_lower(mod_ua.dlg_table, rdata->msg_info.from->tag.ptr, (unsigned)rdata->msg_info.from->tag.slen, NULL); if (!dlg_set) { /* Unlock dialog hash table. */ pj_mutex_unlock(mod_ua.mutex); /* Strayed 2xx response!! */ PJ_LOG(4,(THIS_FILE, "Received strayed 2xx response (no dialog is found)" " from %s:%d: %s", rdata->pkt_info.src_name, rdata->pkt_info.src_port, pjsip_rx_data_get_info(rdata))); return PJ_TRUE; } } /* At this point, we must have the dialog set, and the dialog set * must have a dialog in the list. */ pj_assert(dlg_set && !pj_list_empty(&dlg_set->dlg_list)); /* Check for forked response. * Request will fork only for the initial INVITE request. */ //This doesn't work when there is authentication challenge, since //first_cseq evaluation will yield false. //if (rdata->msg_info.cseq->method.id == PJSIP_INVITE_METHOD && // rdata->msg_info.cseq->cseq == dlg_set->dlg_list.next->local.first_cseq) if (rdata->msg_info.cseq->method.id == PJSIP_INVITE_METHOD) { int st_code = rdata->msg_info.msg->line.status.code; pj_str_t *to_tag = &rdata->msg_info.to->tag; dlg = dlg_set->dlg_list.next; while (dlg != (pjsip_dialog*)&dlg_set->dlg_list) { /* If there is dialog with no remote tag (i.e. dialog has not * been established yet), then send this response to that * dialog. */ if (dlg->remote.info->tag.slen == 0) break; /* Otherwise find the one with matching To tag. */ if (pj_stricmp(to_tag, &dlg->remote.info->tag) == 0) break; dlg = dlg->next; } /* If no dialog with matching remote tag is found, this must be * a forked response. Respond to this ONLY when response is non-100 * provisional response OR a 2xx response. */ if (dlg == (pjsip_dialog*)&dlg_set->dlg_list && ((st_code/100==1 && st_code!=100) || st_code/100==2)) { PJ_LOG(5,(THIS_FILE, "Received forked %s for existing dialog %s", pjsip_rx_data_get_info(rdata), dlg_set->dlg_list.next->obj_name)); /* Report to application about forked condition. * Application can either create a dialog or ignore the response. */ if (mod_ua.param.on_dlg_forked) { dlg = (*mod_ua.param.on_dlg_forked)(dlg_set->dlg_list.next, rdata); if (dlg == NULL) { pj_mutex_unlock(mod_ua.mutex); return PJ_TRUE; } } else { dlg = dlg_set->dlg_list.next; PJ_LOG(4,(THIS_FILE, "Unhandled forked %s from %s:%d, response will be " "handed over to the first dialog", pjsip_rx_data_get_info(rdata), rdata->pkt_info.src_name, rdata->pkt_info.src_port)); } } else if (dlg == (pjsip_dialog*)&dlg_set->dlg_list) { /* For 100 or non-2xx response which has different To tag, * pass the response to the first dialog. */ dlg = dlg_set->dlg_list.next; } } else { /* Either this is a non-INVITE response, or subsequent INVITE * within dialog. The dialog should have been identified when * the transaction was found. */ pj_assert(tsx != NULL); pj_assert(dlg != NULL); } /* The dialog must have been found. */ pj_assert(dlg != NULL); /* Put the dialog instance in the rdata. */ rdata->endpt_info.mod_data[mod_ua.mod.id] = dlg; /* Attempt to acquire lock to the dialog. */ PJ_LOG(6,(dlg->obj_name, "UA layer acquiring dialog lock for response")); status = pjsip_dlg_try_inc_lock(dlg); if (status != PJ_SUCCESS) { /* Failed to acquire dialog mutex. This could indicate a deadlock * situation, and for safety, try to avoid deadlock by releasing * UA mutex, yield, and retry the whole processing once again. */ pj_mutex_unlock(mod_ua.mutex); pj_thread_sleep(0); goto retry_on_deadlock; } /* We're done with processing in the UA layer, we can release the mutex */ pj_mutex_unlock(mod_ua.mutex); /* Pass the response to the dialog. */ pjsip_dlg_on_rx_response(dlg, rdata); /* Unlock the dialog. This may destroy the dialog. */ pjsip_dlg_dec_lock(dlg); /* Done. */ return PJ_TRUE; } #if PJ_LOG_MAX_LEVEL >= 3 static void print_dialog( const char *title, pjsip_dialog *dlg, char *buf, pj_size_t size) { int len; char userinfo[PJSIP_MAX_URL_SIZE]; len = pjsip_hdr_print_on(dlg->remote.info, userinfo, sizeof(userinfo)); if (len < 0) pj_ansi_strcpy(userinfo, "<--uri too long-->"); else userinfo[len] = '\0'; len = pj_ansi_snprintf(buf, size, "%s[%s] %s", title, (dlg->state==PJSIP_DIALOG_STATE_NULL ? " - " : "est"), userinfo); if (len < 1 || len >= (int)size) { pj_ansi_strcpy(buf, "<--uri too long-->"); } else buf[len] = '\0'; } #endif /* * Dump user agent contents (e.g. all dialogs). */ PJ_DEF(void) pjsip_ua_dump(pj_bool_t detail) { #if PJ_LOG_MAX_LEVEL >= 3 pj_hash_iterator_t itbuf, *it; char dlginfo[128]; pj_mutex_lock(mod_ua.mutex); PJ_LOG(3, (THIS_FILE, "Number of dialog sets: %u", pj_hash_count(mod_ua.dlg_table))); if (detail && pj_hash_count(mod_ua.dlg_table)) { PJ_LOG(3, (THIS_FILE, "Dumping dialog sets:")); it = pj_hash_first(mod_ua.dlg_table, &itbuf); for (; it != NULL; it = pj_hash_next(mod_ua.dlg_table, it)) { struct dlg_set *dlg_set; pjsip_dialog *dlg; const char *title; dlg_set = (struct dlg_set*) pj_hash_this(mod_ua.dlg_table, it); if (!dlg_set || pj_list_empty(&dlg_set->dlg_list)) continue; /* First dialog in dialog set. */ dlg = dlg_set->dlg_list.next; if (dlg->role == PJSIP_ROLE_UAC) title = " [out] "; else title = " [in] "; print_dialog(title, dlg, dlginfo, sizeof(dlginfo)); PJ_LOG(3,(THIS_FILE, "%s", dlginfo)); /* Next dialog in dialog set (forked) */ dlg = dlg->next; while (dlg != (pjsip_dialog*) &dlg_set->dlg_list) { print_dialog(" [forked] ", dlg, dlginfo, sizeof(dlginfo)); dlg = dlg->next; } } } pj_mutex_unlock(mod_ua.mutex); #endif }
PJ_DEF(pj_status_t) pjsip_ua_unregister_dlg( pjsip_user_agent *ua, pjsip_dialog *dlg ) { struct dlg_set *dlg_set; pjsip_dialog *d; /* Sanity-check arguments. */ PJ_ASSERT_RETURN(ua && dlg, PJ_EINVAL); /* Check that dialog has been registered. */ PJ_ASSERT_RETURN(dlg->dlg_set, PJ_EINVALIDOP); /* Lock user agent. */ pj_mutex_lock(mod_ua.mutex); /* Find this dialog from the dialog set. */ dlg_set = (struct dlg_set*) dlg->dlg_set; d = dlg_set->dlg_list.next; while (d != (pjsip_dialog*)&dlg_set->dlg_list && d != dlg) { d = d->next; } if (d != dlg) { pj_assert(!"Dialog is not registered!"); pj_mutex_unlock(mod_ua.mutex); return PJ_EINVALIDOP; } /* Remove this dialog from the list. */ pj_list_erase(dlg); /* If dialog list is empty, remove the dialog set from the hash table. */ if (pj_list_empty(&dlg_set->dlg_list)) { pj_hash_set_lower(NULL, mod_ua.dlg_table, dlg->local.info->tag.ptr, (unsigned)dlg->local.info->tag.slen, dlg->local.tag_hval, NULL); /* Return dlg_set to free nodes. */ pj_list_push_back(&mod_ua.free_dlgset_nodes, dlg_set); } /* Unlock user agent. */ pj_mutex_unlock(mod_ua.mutex); /* Done. */ return PJ_SUCCESS; }
PJ_DEF(pj_status_t) pjsip_ua_unregister_dlg( pjsip_user_agent *ua, pjsip_dialog *dlg ) { struct dlg_set *dlg_set; pjsip_dialog *d; /* Sanity-check arguments. */ PJ_ASSERT_RETURN(ua && dlg, PJ_EINVAL); /* Check that dialog has been registered. */ PJ_ASSERT_RETURN(dlg->dlg_set, PJ_EINVALIDOP); /* Lock user agent. */ pj_mutex_lock(mod_ua.mutex); /* Find this dialog from the dialog set. */ dlg_set = (struct dlg_set*) dlg->dlg_set; d = dlg_set->dlg_list.next; while (d != (pjsip_dialog*)&dlg_set->dlg_list && d != dlg) { d = d->next; } if (d != dlg) { pj_assert(!"Dialog is not registered!"); pj_mutex_unlock(mod_ua.mutex); return PJ_EINVALIDOP; } /* Remove this dialog from the list. */ pj_list_erase(dlg); /* If dialog list is empty, remove the dialog set from the hash table. */ if (pj_list_empty(&dlg_set->dlg_list)) { /* Verify that the dialog set is valid */ pj_assert(pj_hash_get_lower(mod_ua.dlg_table, dlg_set->ht_key.ptr, (unsigned)dlg_set->ht_key.slen, &dlg->local.tag_hval) == dlg_set); pj_hash_set_lower(NULL, mod_ua.dlg_table, dlg_set->ht_key.ptr, (unsigned)dlg_set->ht_key.slen, dlg->local.tag_hval, NULL); /* Return dlg_set to free nodes. */ pj_list_push_back(&mod_ua.free_dlgset_nodes, dlg_set); } else { /* If the just unregistered dialog is being used as hash key, * reset the dlg_set entry with a new key (i.e: from the first dialog * in dlg_set). */ if (dlg_set->ht_key.ptr == dlg->local.info->tag.ptr && dlg_set->ht_key.slen == dlg->local.info->tag.slen) { pjsip_dialog* key_dlg = dlg_set->dlg_list.next; /* Verify that the old & new keys share the hash value */ pj_assert(key_dlg->local.tag_hval == dlg->local.tag_hval); pj_hash_set_lower(NULL, mod_ua.dlg_table, dlg_set->ht_key.ptr, (unsigned)dlg_set->ht_key.slen, dlg->local.tag_hval, NULL); dlg_set->ht_key = key_dlg->local.info->tag; pj_hash_set_np_lower(mod_ua.dlg_table, dlg_set->ht_key.ptr, (unsigned)dlg_set->ht_key.slen, key_dlg->local.tag_hval, dlg_set->ht_entry, dlg_set); } } /* Unlock user agent. */ pj_mutex_unlock(mod_ua.mutex); /* Done. */ return PJ_SUCCESS; }
{'added': [(68, ' /* Entry key in the hash table */'), (69, ' pj_str_t ht_key;'), (70, ''), (333, '\t dlg_set->ht_key = dlg->local.info->tag;'), (341, '\t\t\t dlg_set->ht_key.ptr,'), (342, ' (unsigned)dlg_set->ht_key.slen,'), (352, '\tdlg_set->ht_key = dlg->local.info->tag;'), (359, '\t\t dlg_set->ht_key.ptr,'), (360, ' (unsigned)dlg_set->ht_key.slen,'), (405, ''), (406, '\t/* Verify that the dialog set is valid */'), (407, '\tpj_assert(pj_hash_get_lower(mod_ua.dlg_table, dlg_set->ht_key.ptr,'), (408, '\t\t\t\t (unsigned)dlg_set->ht_key.slen,'), (409, '\t\t\t\t &dlg->local.tag_hval) == dlg_set);'), (410, ''), (411, '\tpj_hash_set_lower(NULL, mod_ua.dlg_table, dlg_set->ht_key.ptr,'), (412, '\t\t (unsigned)dlg_set->ht_key.slen,'), (417, ' } else {'), (418, '\t/* If the just unregistered dialog is being used as hash key,'), (419, '\t * reset the dlg_set entry with a new key (i.e: from the first dialog'), (420, '\t * in dlg_set).'), (421, '\t */'), (422, '\tif (dlg_set->ht_key.ptr == dlg->local.info->tag.ptr &&'), (423, '\t dlg_set->ht_key.slen == dlg->local.info->tag.slen)'), (424, '\t{'), (425, '\t pjsip_dialog* key_dlg = dlg_set->dlg_list.next;'), (426, ''), (427, '\t /* Verify that the old & new keys share the hash value */'), (428, '\t pj_assert(key_dlg->local.tag_hval == dlg->local.tag_hval);'), (429, ''), (430, '\t pj_hash_set_lower(NULL, mod_ua.dlg_table, dlg_set->ht_key.ptr,'), (431, '\t\t\t (unsigned)dlg_set->ht_key.slen,'), (432, '\t\t\t dlg->local.tag_hval, NULL);'), (433, ''), (434, '\t dlg_set->ht_key = key_dlg->local.info->tag;'), (435, ''), (436, '\t pj_hash_set_np_lower(mod_ua.dlg_table,'), (437, '\t\t\t\t dlg_set->ht_key.ptr,'), (438, '\t\t\t\t (unsigned)dlg_set->ht_key.slen,'), (439, '\t\t\t\t key_dlg->local.tag_hval, dlg_set->ht_entry,'), (440, '\t\t\t\t dlg_set);'), (441, '\t}')], 'deleted': [(337, '\t\t\t dlg->local.info->tag.ptr,'), (338, ' (unsigned)dlg->local.info->tag.slen,'), (354, '\t\t dlg->local.info->tag.ptr,'), (355, ' (unsigned)dlg->local.info->tag.slen,'), (400, '\tpj_hash_set_lower(NULL, mod_ua.dlg_table, dlg->local.info->tag.ptr,'), (401, '\t\t (unsigned)dlg->local.info->tag.slen,')]}
42
6
538
3,384
https://github.com/pjsip/pjproject
CVE-2022-23608
['CWE-416']
scrub.c
btrfs_scrub_dev
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2011, 2012 STRATO. All rights reserved. */ #include <linux/blkdev.h> #include <linux/ratelimit.h> #include <linux/sched/mm.h> #include "ctree.h" #include "volumes.h" #include "disk-io.h" #include "ordered-data.h" #include "transaction.h" #include "backref.h" #include "extent_io.h" #include "dev-replace.h" #include "check-integrity.h" #include "rcu-string.h" #include "raid56.h" /* * This is only the first step towards a full-features scrub. It reads all * extent and super block and verifies the checksums. In case a bad checksum * is found or the extent cannot be read, good data will be written back if * any can be found. * * Future enhancements: * - In case an unrepairable extent is encountered, track which files are * affected and report them * - track and record media errors, throw out bad devices * - add a mode to also read unallocated space */ struct scrub_block; struct scrub_ctx; /* * the following three values only influence the performance. * The last one configures the number of parallel and outstanding I/O * operations. The first two values configure an upper limit for the number * of (dynamically allocated) pages that are added to a bio. */ #define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */ #define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */ #define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */ /* * the following value times PAGE_SIZE needs to be large enough to match the * largest node/leaf/sector size that shall be supported. * Values larger than BTRFS_STRIPE_LEN are not supported. */ #define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */ struct scrub_recover { refcount_t refs; struct btrfs_bio *bbio; u64 map_length; }; struct scrub_page { struct scrub_block *sblock; struct page *page; struct btrfs_device *dev; struct list_head list; u64 flags; /* extent flags */ u64 generation; u64 logical; u64 physical; u64 physical_for_dev_replace; atomic_t refs; struct { unsigned int mirror_num:8; unsigned int have_csum:1; unsigned int io_error:1; }; u8 csum[BTRFS_CSUM_SIZE]; struct scrub_recover *recover; }; struct scrub_bio { int index; struct scrub_ctx *sctx; struct btrfs_device *dev; struct bio *bio; blk_status_t status; u64 logical; u64 physical; #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO struct scrub_page *pagev[SCRUB_PAGES_PER_WR_BIO]; #else struct scrub_page *pagev[SCRUB_PAGES_PER_RD_BIO]; #endif int page_count; int next_free; struct btrfs_work work; }; struct scrub_block { struct scrub_page *pagev[SCRUB_MAX_PAGES_PER_BLOCK]; int page_count; atomic_t outstanding_pages; refcount_t refs; /* free mem on transition to zero */ struct scrub_ctx *sctx; struct scrub_parity *sparity; struct { unsigned int header_error:1; unsigned int checksum_error:1; unsigned int no_io_error_seen:1; unsigned int generation_error:1; /* also sets header_error */ /* The following is for the data used to check parity */ /* It is for the data with checksum */ unsigned int data_corrected:1; }; struct btrfs_work work; }; /* Used for the chunks with parity stripe such RAID5/6 */ struct scrub_parity { struct scrub_ctx *sctx; struct btrfs_device *scrub_dev; u64 logic_start; u64 logic_end; int nsectors; u64 stripe_len; refcount_t refs; struct list_head spages; /* Work of parity check and repair */ struct btrfs_work work; /* Mark the parity blocks which have data */ unsigned long *dbitmap; /* * Mark the parity blocks which have data, but errors happen when * read data or check data */ unsigned long *ebitmap; unsigned long bitmap[0]; }; struct scrub_ctx { struct scrub_bio *bios[SCRUB_BIOS_PER_SCTX]; struct btrfs_fs_info *fs_info; int first_free; int curr; atomic_t bios_in_flight; atomic_t workers_pending; spinlock_t list_lock; wait_queue_head_t list_wait; u16 csum_size; struct list_head csum_list; atomic_t cancel_req; int readonly; int pages_per_rd_bio; int is_dev_replace; struct scrub_bio *wr_curr_bio; struct mutex wr_lock; int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */ struct btrfs_device *wr_tgtdev; bool flush_all_writes; /* * statistics */ struct btrfs_scrub_progress stat; spinlock_t stat_lock; /* * Use a ref counter to avoid use-after-free issues. Scrub workers * decrement bios_in_flight and workers_pending and then do a wakeup * on the list_wait wait queue. We must ensure the main scrub task * doesn't free the scrub context before or while the workers are * doing the wakeup() call. */ refcount_t refs; }; struct scrub_warning { struct btrfs_path *path; u64 extent_item_size; const char *errstr; u64 physical; u64 logical; struct btrfs_device *dev; }; struct full_stripe_lock { struct rb_node node; u64 logical; u64 refs; struct mutex mutex; }; static void scrub_pending_bio_inc(struct scrub_ctx *sctx); static void scrub_pending_bio_dec(struct scrub_ctx *sctx); static int scrub_handle_errored_block(struct scrub_block *sblock_to_check); static int scrub_setup_recheck_block(struct scrub_block *original_sblock, struct scrub_block *sblocks_for_recheck); static void scrub_recheck_block(struct btrfs_fs_info *fs_info, struct scrub_block *sblock, int retry_failed_mirror); static void scrub_recheck_block_checksum(struct scrub_block *sblock); static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, struct scrub_block *sblock_good); static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad, struct scrub_block *sblock_good, int page_num, int force_write); static void scrub_write_block_to_dev_replace(struct scrub_block *sblock); static int scrub_write_page_to_dev_replace(struct scrub_block *sblock, int page_num); static int scrub_checksum_data(struct scrub_block *sblock); static int scrub_checksum_tree_block(struct scrub_block *sblock); static int scrub_checksum_super(struct scrub_block *sblock); static void scrub_block_get(struct scrub_block *sblock); static void scrub_block_put(struct scrub_block *sblock); static void scrub_page_get(struct scrub_page *spage); static void scrub_page_put(struct scrub_page *spage); static void scrub_parity_get(struct scrub_parity *sparity); static void scrub_parity_put(struct scrub_parity *sparity); static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx, struct scrub_page *spage); static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len, u64 physical, struct btrfs_device *dev, u64 flags, u64 gen, int mirror_num, u8 *csum, int force, u64 physical_for_dev_replace); static void scrub_bio_end_io(struct bio *bio); static void scrub_bio_end_io_worker(struct btrfs_work *work); static void scrub_block_complete(struct scrub_block *sblock); static void scrub_remap_extent(struct btrfs_fs_info *fs_info, u64 extent_logical, u64 extent_len, u64 *extent_physical, struct btrfs_device **extent_dev, int *extent_mirror_num); static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx, struct scrub_page *spage); static void scrub_wr_submit(struct scrub_ctx *sctx); static void scrub_wr_bio_end_io(struct bio *bio); static void scrub_wr_bio_end_io_worker(struct btrfs_work *work); static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info); static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info); static void scrub_put_ctx(struct scrub_ctx *sctx); static inline int scrub_is_page_on_raid56(struct scrub_page *page) { return page->recover && (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK); } static void scrub_pending_bio_inc(struct scrub_ctx *sctx) { refcount_inc(&sctx->refs); atomic_inc(&sctx->bios_in_flight); } static void scrub_pending_bio_dec(struct scrub_ctx *sctx) { atomic_dec(&sctx->bios_in_flight); wake_up(&sctx->list_wait); scrub_put_ctx(sctx); } static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info) { while (atomic_read(&fs_info->scrub_pause_req)) { mutex_unlock(&fs_info->scrub_lock); wait_event(fs_info->scrub_pause_wait, atomic_read(&fs_info->scrub_pause_req) == 0); mutex_lock(&fs_info->scrub_lock); } } static void scrub_pause_on(struct btrfs_fs_info *fs_info) { atomic_inc(&fs_info->scrubs_paused); wake_up(&fs_info->scrub_pause_wait); } static void scrub_pause_off(struct btrfs_fs_info *fs_info) { mutex_lock(&fs_info->scrub_lock); __scrub_blocked_if_needed(fs_info); atomic_dec(&fs_info->scrubs_paused); mutex_unlock(&fs_info->scrub_lock); wake_up(&fs_info->scrub_pause_wait); } static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info) { scrub_pause_on(fs_info); scrub_pause_off(fs_info); } /* * Insert new full stripe lock into full stripe locks tree * * Return pointer to existing or newly inserted full_stripe_lock structure if * everything works well. * Return ERR_PTR(-ENOMEM) if we failed to allocate memory * * NOTE: caller must hold full_stripe_locks_root->lock before calling this * function */ static struct full_stripe_lock *insert_full_stripe_lock( struct btrfs_full_stripe_locks_tree *locks_root, u64 fstripe_logical) { struct rb_node **p; struct rb_node *parent = NULL; struct full_stripe_lock *entry; struct full_stripe_lock *ret; lockdep_assert_held(&locks_root->lock); p = &locks_root->root.rb_node; while (*p) { parent = *p; entry = rb_entry(parent, struct full_stripe_lock, node); if (fstripe_logical < entry->logical) { p = &(*p)->rb_left; } else if (fstripe_logical > entry->logical) { p = &(*p)->rb_right; } else { entry->refs++; return entry; } } /* * Insert new lock. */ ret = kmalloc(sizeof(*ret), GFP_KERNEL); if (!ret) return ERR_PTR(-ENOMEM); ret->logical = fstripe_logical; ret->refs = 1; mutex_init(&ret->mutex); rb_link_node(&ret->node, parent, p); rb_insert_color(&ret->node, &locks_root->root); return ret; } /* * Search for a full stripe lock of a block group * * Return pointer to existing full stripe lock if found * Return NULL if not found */ static struct full_stripe_lock *search_full_stripe_lock( struct btrfs_full_stripe_locks_tree *locks_root, u64 fstripe_logical) { struct rb_node *node; struct full_stripe_lock *entry; lockdep_assert_held(&locks_root->lock); node = locks_root->root.rb_node; while (node) { entry = rb_entry(node, struct full_stripe_lock, node); if (fstripe_logical < entry->logical) node = node->rb_left; else if (fstripe_logical > entry->logical) node = node->rb_right; else return entry; } return NULL; } /* * Helper to get full stripe logical from a normal bytenr. * * Caller must ensure @cache is a RAID56 block group. */ static u64 get_full_stripe_logical(struct btrfs_block_group_cache *cache, u64 bytenr) { u64 ret; /* * Due to chunk item size limit, full stripe length should not be * larger than U32_MAX. Just a sanity check here. */ WARN_ON_ONCE(cache->full_stripe_len >= U32_MAX); /* * round_down() can only handle power of 2, while RAID56 full * stripe length can be 64KiB * n, so we need to manually round down. */ ret = div64_u64(bytenr - cache->key.objectid, cache->full_stripe_len) * cache->full_stripe_len + cache->key.objectid; return ret; } /* * Lock a full stripe to avoid concurrency of recovery and read * * It's only used for profiles with parities (RAID5/6), for other profiles it * does nothing. * * Return 0 if we locked full stripe covering @bytenr, with a mutex held. * So caller must call unlock_full_stripe() at the same context. * * Return <0 if encounters error. */ static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr, bool *locked_ret) { struct btrfs_block_group_cache *bg_cache; struct btrfs_full_stripe_locks_tree *locks_root; struct full_stripe_lock *existing; u64 fstripe_start; int ret = 0; *locked_ret = false; bg_cache = btrfs_lookup_block_group(fs_info, bytenr); if (!bg_cache) { ASSERT(0); return -ENOENT; } /* Profiles not based on parity don't need full stripe lock */ if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) goto out; locks_root = &bg_cache->full_stripe_locks_root; fstripe_start = get_full_stripe_logical(bg_cache, bytenr); /* Now insert the full stripe lock */ mutex_lock(&locks_root->lock); existing = insert_full_stripe_lock(locks_root, fstripe_start); mutex_unlock(&locks_root->lock); if (IS_ERR(existing)) { ret = PTR_ERR(existing); goto out; } mutex_lock(&existing->mutex); *locked_ret = true; out: btrfs_put_block_group(bg_cache); return ret; } /* * Unlock a full stripe. * * NOTE: Caller must ensure it's the same context calling corresponding * lock_full_stripe(). * * Return 0 if we unlock full stripe without problem. * Return <0 for error */ static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr, bool locked) { struct btrfs_block_group_cache *bg_cache; struct btrfs_full_stripe_locks_tree *locks_root; struct full_stripe_lock *fstripe_lock; u64 fstripe_start; bool freeit = false; int ret = 0; /* If we didn't acquire full stripe lock, no need to continue */ if (!locked) return 0; bg_cache = btrfs_lookup_block_group(fs_info, bytenr); if (!bg_cache) { ASSERT(0); return -ENOENT; } if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) goto out; locks_root = &bg_cache->full_stripe_locks_root; fstripe_start = get_full_stripe_logical(bg_cache, bytenr); mutex_lock(&locks_root->lock); fstripe_lock = search_full_stripe_lock(locks_root, fstripe_start); /* Unpaired unlock_full_stripe() detected */ if (!fstripe_lock) { WARN_ON(1); ret = -ENOENT; mutex_unlock(&locks_root->lock); goto out; } if (fstripe_lock->refs == 0) { WARN_ON(1); btrfs_warn(fs_info, "full stripe lock at %llu refcount underflow", fstripe_lock->logical); } else { fstripe_lock->refs--; } if (fstripe_lock->refs == 0) { rb_erase(&fstripe_lock->node, &locks_root->root); freeit = true; } mutex_unlock(&locks_root->lock); mutex_unlock(&fstripe_lock->mutex); if (freeit) kfree(fstripe_lock); out: btrfs_put_block_group(bg_cache); return ret; } static void scrub_free_csums(struct scrub_ctx *sctx) { while (!list_empty(&sctx->csum_list)) { struct btrfs_ordered_sum *sum; sum = list_first_entry(&sctx->csum_list, struct btrfs_ordered_sum, list); list_del(&sum->list); kfree(sum); } } static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx) { int i; if (!sctx) return; /* this can happen when scrub is cancelled */ if (sctx->curr != -1) { struct scrub_bio *sbio = sctx->bios[sctx->curr]; for (i = 0; i < sbio->page_count; i++) { WARN_ON(!sbio->pagev[i]->page); scrub_block_put(sbio->pagev[i]->sblock); } bio_put(sbio->bio); } for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) { struct scrub_bio *sbio = sctx->bios[i]; if (!sbio) break; kfree(sbio); } kfree(sctx->wr_curr_bio); scrub_free_csums(sctx); kfree(sctx); } static void scrub_put_ctx(struct scrub_ctx *sctx) { if (refcount_dec_and_test(&sctx->refs)) scrub_free_ctx(sctx); } static noinline_for_stack struct scrub_ctx *scrub_setup_ctx( struct btrfs_fs_info *fs_info, int is_dev_replace) { struct scrub_ctx *sctx; int i; sctx = kzalloc(sizeof(*sctx), GFP_KERNEL); if (!sctx) goto nomem; refcount_set(&sctx->refs, 1); sctx->is_dev_replace = is_dev_replace; sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO; sctx->curr = -1; sctx->fs_info = fs_info; for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) { struct scrub_bio *sbio; sbio = kzalloc(sizeof(*sbio), GFP_KERNEL); if (!sbio) goto nomem; sctx->bios[i] = sbio; sbio->index = i; sbio->sctx = sctx; sbio->page_count = 0; btrfs_init_work(&sbio->work, btrfs_scrub_helper, scrub_bio_end_io_worker, NULL, NULL); if (i != SCRUB_BIOS_PER_SCTX - 1) sctx->bios[i]->next_free = i + 1; else sctx->bios[i]->next_free = -1; } sctx->first_free = 0; atomic_set(&sctx->bios_in_flight, 0); atomic_set(&sctx->workers_pending, 0); atomic_set(&sctx->cancel_req, 0); sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy); INIT_LIST_HEAD(&sctx->csum_list); spin_lock_init(&sctx->list_lock); spin_lock_init(&sctx->stat_lock); init_waitqueue_head(&sctx->list_wait); WARN_ON(sctx->wr_curr_bio != NULL); mutex_init(&sctx->wr_lock); sctx->wr_curr_bio = NULL; if (is_dev_replace) { WARN_ON(!fs_info->dev_replace.tgtdev); sctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO; sctx->wr_tgtdev = fs_info->dev_replace.tgtdev; sctx->flush_all_writes = false; } return sctx; nomem: scrub_free_ctx(sctx); return ERR_PTR(-ENOMEM); } static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *warn_ctx) { u64 isize; u32 nlink; int ret; int i; unsigned nofs_flag; struct extent_buffer *eb; struct btrfs_inode_item *inode_item; struct scrub_warning *swarn = warn_ctx; struct btrfs_fs_info *fs_info = swarn->dev->fs_info; struct inode_fs_paths *ipath = NULL; struct btrfs_root *local_root; struct btrfs_key root_key; struct btrfs_key key; root_key.objectid = root; root_key.type = BTRFS_ROOT_ITEM_KEY; root_key.offset = (u64)-1; local_root = btrfs_read_fs_root_no_name(fs_info, &root_key); if (IS_ERR(local_root)) { ret = PTR_ERR(local_root); goto err; } /* * this makes the path point to (inum INODE_ITEM ioff) */ key.objectid = inum; key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0); if (ret) { btrfs_release_path(swarn->path); goto err; } eb = swarn->path->nodes[0]; inode_item = btrfs_item_ptr(eb, swarn->path->slots[0], struct btrfs_inode_item); isize = btrfs_inode_size(eb, inode_item); nlink = btrfs_inode_nlink(eb, inode_item); btrfs_release_path(swarn->path); /* * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub * uses GFP_NOFS in this context, so we keep it consistent but it does * not seem to be strictly necessary. */ nofs_flag = memalloc_nofs_save(); ipath = init_ipath(4096, local_root, swarn->path); memalloc_nofs_restore(nofs_flag); if (IS_ERR(ipath)) { ret = PTR_ERR(ipath); ipath = NULL; goto err; } ret = paths_from_inode(inum, ipath); if (ret < 0) goto err; /* * we deliberately ignore the bit ipath might have been too small to * hold all of the paths here */ for (i = 0; i < ipath->fspath->elem_cnt; ++i) btrfs_warn_in_rcu(fs_info, "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %llu, links %u (path: %s)", swarn->errstr, swarn->logical, rcu_str_deref(swarn->dev->name), swarn->physical, root, inum, offset, min(isize - offset, (u64)PAGE_SIZE), nlink, (char *)(unsigned long)ipath->fspath->val[i]); free_ipath(ipath); return 0; err: btrfs_warn_in_rcu(fs_info, "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d", swarn->errstr, swarn->logical, rcu_str_deref(swarn->dev->name), swarn->physical, root, inum, offset, ret); free_ipath(ipath); return 0; } static void scrub_print_warning(const char *errstr, struct scrub_block *sblock) { struct btrfs_device *dev; struct btrfs_fs_info *fs_info; struct btrfs_path *path; struct btrfs_key found_key; struct extent_buffer *eb; struct btrfs_extent_item *ei; struct scrub_warning swarn; unsigned long ptr = 0; u64 extent_item_pos; u64 flags = 0; u64 ref_root; u32 item_size; u8 ref_level = 0; int ret; WARN_ON(sblock->page_count < 1); dev = sblock->pagev[0]->dev; fs_info = sblock->sctx->fs_info; path = btrfs_alloc_path(); if (!path) return; swarn.physical = sblock->pagev[0]->physical; swarn.logical = sblock->pagev[0]->logical; swarn.errstr = errstr; swarn.dev = NULL; ret = extent_from_logical(fs_info, swarn.logical, path, &found_key, &flags); if (ret < 0) goto out; extent_item_pos = swarn.logical - found_key.objectid; swarn.extent_item_size = found_key.offset; eb = path->nodes[0]; ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); item_size = btrfs_item_size_nr(eb, path->slots[0]); if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { do { ret = tree_backref_for_extent(&ptr, eb, &found_key, ei, item_size, &ref_root, &ref_level); btrfs_warn_in_rcu(fs_info, "%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu", errstr, swarn.logical, rcu_str_deref(dev->name), swarn.physical, ref_level ? "node" : "leaf", ret < 0 ? -1 : ref_level, ret < 0 ? -1 : ref_root); } while (ret != 1); btrfs_release_path(path); } else { btrfs_release_path(path); swarn.path = path; swarn.dev = dev; iterate_extent_inodes(fs_info, found_key.objectid, extent_item_pos, 1, scrub_print_warning_inode, &swarn, false); } out: btrfs_free_path(path); } static inline void scrub_get_recover(struct scrub_recover *recover) { refcount_inc(&recover->refs); } static inline void scrub_put_recover(struct btrfs_fs_info *fs_info, struct scrub_recover *recover) { if (refcount_dec_and_test(&recover->refs)) { btrfs_bio_counter_dec(fs_info); btrfs_put_bbio(recover->bbio); kfree(recover); } } /* * scrub_handle_errored_block gets called when either verification of the * pages failed or the bio failed to read, e.g. with EIO. In the latter * case, this function handles all pages in the bio, even though only one * may be bad. * The goal of this function is to repair the errored block by using the * contents of one of the mirrors. */ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) { struct scrub_ctx *sctx = sblock_to_check->sctx; struct btrfs_device *dev; struct btrfs_fs_info *fs_info; u64 logical; unsigned int failed_mirror_index; unsigned int is_metadata; unsigned int have_csum; struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */ struct scrub_block *sblock_bad; int ret; int mirror_index; int page_num; int success; bool full_stripe_locked; unsigned int nofs_flag; static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); BUG_ON(sblock_to_check->page_count < 1); fs_info = sctx->fs_info; if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) { /* * if we find an error in a super block, we just report it. * They will get written with the next transaction commit * anyway */ spin_lock(&sctx->stat_lock); ++sctx->stat.super_errors; spin_unlock(&sctx->stat_lock); return 0; } logical = sblock_to_check->pagev[0]->logical; BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1); failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1; is_metadata = !(sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA); have_csum = sblock_to_check->pagev[0]->have_csum; dev = sblock_to_check->pagev[0]->dev; /* * We must use GFP_NOFS because the scrub task might be waiting for a * worker task executing this function and in turn a transaction commit * might be waiting the scrub task to pause (which needs to wait for all * the worker tasks to complete before pausing). * We do allocations in the workers through insert_full_stripe_lock() * and scrub_add_page_to_wr_bio(), which happens down the call chain of * this function. */ nofs_flag = memalloc_nofs_save(); /* * For RAID5/6, race can happen for a different device scrub thread. * For data corruption, Parity and Data threads will both try * to recovery the data. * Race can lead to doubly added csum error, or even unrecoverable * error. */ ret = lock_full_stripe(fs_info, logical, &full_stripe_locked); if (ret < 0) { memalloc_nofs_restore(nofs_flag); spin_lock(&sctx->stat_lock); if (ret == -ENOMEM) sctx->stat.malloc_errors++; sctx->stat.read_errors++; sctx->stat.uncorrectable_errors++; spin_unlock(&sctx->stat_lock); return ret; } /* * read all mirrors one after the other. This includes to * re-read the extent or metadata block that failed (that was * the cause that this fixup code is called) another time, * page by page this time in order to know which pages * caused I/O errors and which ones are good (for all mirrors). * It is the goal to handle the situation when more than one * mirror contains I/O errors, but the errors do not * overlap, i.e. the data can be repaired by selecting the * pages from those mirrors without I/O error on the * particular pages. One example (with blocks >= 2 * PAGE_SIZE) * would be that mirror #1 has an I/O error on the first page, * the second page is good, and mirror #2 has an I/O error on * the second page, but the first page is good. * Then the first page of the first mirror can be repaired by * taking the first page of the second mirror, and the * second page of the second mirror can be repaired by * copying the contents of the 2nd page of the 1st mirror. * One more note: if the pages of one mirror contain I/O * errors, the checksum cannot be verified. In order to get * the best data for repairing, the first attempt is to find * a mirror without I/O errors and with a validated checksum. * Only if this is not possible, the pages are picked from * mirrors with I/O errors without considering the checksum. * If the latter is the case, at the end, the checksum of the * repaired area is verified in order to correctly maintain * the statistics. */ sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS, sizeof(*sblocks_for_recheck), GFP_KERNEL); if (!sblocks_for_recheck) { spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; sctx->stat.read_errors++; sctx->stat.uncorrectable_errors++; spin_unlock(&sctx->stat_lock); btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); goto out; } /* setup the context, map the logical blocks and alloc the pages */ ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck); if (ret) { spin_lock(&sctx->stat_lock); sctx->stat.read_errors++; sctx->stat.uncorrectable_errors++; spin_unlock(&sctx->stat_lock); btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); goto out; } BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS); sblock_bad = sblocks_for_recheck + failed_mirror_index; /* build and submit the bios for the failed mirror, check checksums */ scrub_recheck_block(fs_info, sblock_bad, 1); if (!sblock_bad->header_error && !sblock_bad->checksum_error && sblock_bad->no_io_error_seen) { /* * the error disappeared after reading page by page, or * the area was part of a huge bio and other parts of the * bio caused I/O errors, or the block layer merged several * read requests into one and the error is caused by a * different bio (usually one of the two latter cases is * the cause) */ spin_lock(&sctx->stat_lock); sctx->stat.unverified_errors++; sblock_to_check->data_corrected = 1; spin_unlock(&sctx->stat_lock); if (sctx->is_dev_replace) scrub_write_block_to_dev_replace(sblock_bad); goto out; } if (!sblock_bad->no_io_error_seen) { spin_lock(&sctx->stat_lock); sctx->stat.read_errors++; spin_unlock(&sctx->stat_lock); if (__ratelimit(&_rs)) scrub_print_warning("i/o error", sblock_to_check); btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); } else if (sblock_bad->checksum_error) { spin_lock(&sctx->stat_lock); sctx->stat.csum_errors++; spin_unlock(&sctx->stat_lock); if (__ratelimit(&_rs)) scrub_print_warning("checksum error", sblock_to_check); btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS); } else if (sblock_bad->header_error) { spin_lock(&sctx->stat_lock); sctx->stat.verify_errors++; spin_unlock(&sctx->stat_lock); if (__ratelimit(&_rs)) scrub_print_warning("checksum/header error", sblock_to_check); if (sblock_bad->generation_error) btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_GENERATION_ERRS); else btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS); } if (sctx->readonly) { ASSERT(!sctx->is_dev_replace); goto out; } /* * now build and submit the bios for the other mirrors, check * checksums. * First try to pick the mirror which is completely without I/O * errors and also does not have a checksum error. * If one is found, and if a checksum is present, the full block * that is known to contain an error is rewritten. Afterwards * the block is known to be corrected. * If a mirror is found which is completely correct, and no * checksum is present, only those pages are rewritten that had * an I/O error in the block to be repaired, since it cannot be * determined, which copy of the other pages is better (and it * could happen otherwise that a correct page would be * overwritten by a bad one). */ for (mirror_index = 0; ;mirror_index++) { struct scrub_block *sblock_other; if (mirror_index == failed_mirror_index) continue; /* raid56's mirror can be more than BTRFS_MAX_MIRRORS */ if (!scrub_is_page_on_raid56(sblock_bad->pagev[0])) { if (mirror_index >= BTRFS_MAX_MIRRORS) break; if (!sblocks_for_recheck[mirror_index].page_count) break; sblock_other = sblocks_for_recheck + mirror_index; } else { struct scrub_recover *r = sblock_bad->pagev[0]->recover; int max_allowed = r->bbio->num_stripes - r->bbio->num_tgtdevs; if (mirror_index >= max_allowed) break; if (!sblocks_for_recheck[1].page_count) break; ASSERT(failed_mirror_index == 0); sblock_other = sblocks_for_recheck + 1; sblock_other->pagev[0]->mirror_num = 1 + mirror_index; } /* build and submit the bios, check checksums */ scrub_recheck_block(fs_info, sblock_other, 0); if (!sblock_other->header_error && !sblock_other->checksum_error && sblock_other->no_io_error_seen) { if (sctx->is_dev_replace) { scrub_write_block_to_dev_replace(sblock_other); goto corrected_error; } else { ret = scrub_repair_block_from_good_copy( sblock_bad, sblock_other); if (!ret) goto corrected_error; } } } if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace) goto did_not_correct_error; /* * In case of I/O errors in the area that is supposed to be * repaired, continue by picking good copies of those pages. * Select the good pages from mirrors to rewrite bad pages from * the area to fix. Afterwards verify the checksum of the block * that is supposed to be repaired. This verification step is * only done for the purpose of statistic counting and for the * final scrub report, whether errors remain. * A perfect algorithm could make use of the checksum and try * all possible combinations of pages from the different mirrors * until the checksum verification succeeds. For example, when * the 2nd page of mirror #1 faces I/O errors, and the 2nd page * of mirror #2 is readable but the final checksum test fails, * then the 2nd page of mirror #3 could be tried, whether now * the final checksum succeeds. But this would be a rare * exception and is therefore not implemented. At least it is * avoided that the good copy is overwritten. * A more useful improvement would be to pick the sectors * without I/O error based on sector sizes (512 bytes on legacy * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one * mirror could be repaired by taking 512 byte of a different * mirror, even if other 512 byte sectors in the same PAGE_SIZE * area are unreadable. */ success = 1; for (page_num = 0; page_num < sblock_bad->page_count; page_num++) { struct scrub_page *page_bad = sblock_bad->pagev[page_num]; struct scrub_block *sblock_other = NULL; /* skip no-io-error page in scrub */ if (!page_bad->io_error && !sctx->is_dev_replace) continue; if (scrub_is_page_on_raid56(sblock_bad->pagev[0])) { /* * In case of dev replace, if raid56 rebuild process * didn't work out correct data, then copy the content * in sblock_bad to make sure target device is identical * to source device, instead of writing garbage data in * sblock_for_recheck array to target device. */ sblock_other = NULL; } else if (page_bad->io_error) { /* try to find no-io-error page in mirrors */ for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS && sblocks_for_recheck[mirror_index].page_count > 0; mirror_index++) { if (!sblocks_for_recheck[mirror_index]. pagev[page_num]->io_error) { sblock_other = sblocks_for_recheck + mirror_index; break; } } if (!sblock_other) success = 0; } if (sctx->is_dev_replace) { /* * did not find a mirror to fetch the page * from. scrub_write_page_to_dev_replace() * handles this case (page->io_error), by * filling the block with zeros before * submitting the write request */ if (!sblock_other) sblock_other = sblock_bad; if (scrub_write_page_to_dev_replace(sblock_other, page_num) != 0) { atomic64_inc( &fs_info->dev_replace.num_write_errors); success = 0; } } else if (sblock_other) { ret = scrub_repair_page_from_good_copy(sblock_bad, sblock_other, page_num, 0); if (0 == ret) page_bad->io_error = 0; else success = 0; } } if (success && !sctx->is_dev_replace) { if (is_metadata || have_csum) { /* * need to verify the checksum now that all * sectors on disk are repaired (the write * request for data to be repaired is on its way). * Just be lazy and use scrub_recheck_block() * which re-reads the data before the checksum * is verified, but most likely the data comes out * of the page cache. */ scrub_recheck_block(fs_info, sblock_bad, 1); if (!sblock_bad->header_error && !sblock_bad->checksum_error && sblock_bad->no_io_error_seen) goto corrected_error; else goto did_not_correct_error; } else { corrected_error: spin_lock(&sctx->stat_lock); sctx->stat.corrected_errors++; sblock_to_check->data_corrected = 1; spin_unlock(&sctx->stat_lock); btrfs_err_rl_in_rcu(fs_info, "fixed up error at logical %llu on dev %s", logical, rcu_str_deref(dev->name)); } } else { did_not_correct_error: spin_lock(&sctx->stat_lock); sctx->stat.uncorrectable_errors++; spin_unlock(&sctx->stat_lock); btrfs_err_rl_in_rcu(fs_info, "unable to fixup (regular) error at logical %llu on dev %s", logical, rcu_str_deref(dev->name)); } out: if (sblocks_for_recheck) { for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS; mirror_index++) { struct scrub_block *sblock = sblocks_for_recheck + mirror_index; struct scrub_recover *recover; int page_index; for (page_index = 0; page_index < sblock->page_count; page_index++) { sblock->pagev[page_index]->sblock = NULL; recover = sblock->pagev[page_index]->recover; if (recover) { scrub_put_recover(fs_info, recover); sblock->pagev[page_index]->recover = NULL; } scrub_page_put(sblock->pagev[page_index]); } } kfree(sblocks_for_recheck); } ret = unlock_full_stripe(fs_info, logical, full_stripe_locked); memalloc_nofs_restore(nofs_flag); if (ret < 0) return ret; return 0; } static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio) { if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5) return 2; else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) return 3; else return (int)bbio->num_stripes; } static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type, u64 *raid_map, u64 mapped_length, int nstripes, int mirror, int *stripe_index, u64 *stripe_offset) { int i; if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) { /* RAID5/6 */ for (i = 0; i < nstripes; i++) { if (raid_map[i] == RAID6_Q_STRIPE || raid_map[i] == RAID5_P_STRIPE) continue; if (logical >= raid_map[i] && logical < raid_map[i] + mapped_length) break; } *stripe_index = i; *stripe_offset = logical - raid_map[i]; } else { /* The other RAID type */ *stripe_index = mirror; *stripe_offset = 0; } } static int scrub_setup_recheck_block(struct scrub_block *original_sblock, struct scrub_block *sblocks_for_recheck) { struct scrub_ctx *sctx = original_sblock->sctx; struct btrfs_fs_info *fs_info = sctx->fs_info; u64 length = original_sblock->page_count * PAGE_SIZE; u64 logical = original_sblock->pagev[0]->logical; u64 generation = original_sblock->pagev[0]->generation; u64 flags = original_sblock->pagev[0]->flags; u64 have_csum = original_sblock->pagev[0]->have_csum; struct scrub_recover *recover; struct btrfs_bio *bbio; u64 sublen; u64 mapped_length; u64 stripe_offset; int stripe_index; int page_index = 0; int mirror_index; int nmirrors; int ret; /* * note: the two members refs and outstanding_pages * are not used (and not set) in the blocks that are used for * the recheck procedure */ while (length > 0) { sublen = min_t(u64, length, PAGE_SIZE); mapped_length = sublen; bbio = NULL; /* * with a length of PAGE_SIZE, each returned stripe * represents one mirror */ btrfs_bio_counter_inc_blocked(fs_info); ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical, &mapped_length, &bbio); if (ret || !bbio || mapped_length < sublen) { btrfs_put_bbio(bbio); btrfs_bio_counter_dec(fs_info); return -EIO; } recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS); if (!recover) { btrfs_put_bbio(bbio); btrfs_bio_counter_dec(fs_info); return -ENOMEM; } refcount_set(&recover->refs, 1); recover->bbio = bbio; recover->map_length = mapped_length; BUG_ON(page_index >= SCRUB_MAX_PAGES_PER_BLOCK); nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS); for (mirror_index = 0; mirror_index < nmirrors; mirror_index++) { struct scrub_block *sblock; struct scrub_page *page; sblock = sblocks_for_recheck + mirror_index; sblock->sctx = sctx; page = kzalloc(sizeof(*page), GFP_NOFS); if (!page) { leave_nomem: spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; spin_unlock(&sctx->stat_lock); scrub_put_recover(fs_info, recover); return -ENOMEM; } scrub_page_get(page); sblock->pagev[page_index] = page; page->sblock = sblock; page->flags = flags; page->generation = generation; page->logical = logical; page->have_csum = have_csum; if (have_csum) memcpy(page->csum, original_sblock->pagev[0]->csum, sctx->csum_size); scrub_stripe_index_and_offset(logical, bbio->map_type, bbio->raid_map, mapped_length, bbio->num_stripes - bbio->num_tgtdevs, mirror_index, &stripe_index, &stripe_offset); page->physical = bbio->stripes[stripe_index].physical + stripe_offset; page->dev = bbio->stripes[stripe_index].dev; BUG_ON(page_index >= original_sblock->page_count); page->physical_for_dev_replace = original_sblock->pagev[page_index]-> physical_for_dev_replace; /* for missing devices, dev->bdev is NULL */ page->mirror_num = mirror_index + 1; sblock->page_count++; page->page = alloc_page(GFP_NOFS); if (!page->page) goto leave_nomem; scrub_get_recover(recover); page->recover = recover; } scrub_put_recover(fs_info, recover); length -= sublen; logical += sublen; page_index++; } return 0; } static void scrub_bio_wait_endio(struct bio *bio) { complete(bio->bi_private); } static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info, struct bio *bio, struct scrub_page *page) { DECLARE_COMPLETION_ONSTACK(done); int ret; int mirror_num; bio->bi_iter.bi_sector = page->logical >> 9; bio->bi_private = &done; bio->bi_end_io = scrub_bio_wait_endio; mirror_num = page->sblock->pagev[0]->mirror_num; ret = raid56_parity_recover(fs_info, bio, page->recover->bbio, page->recover->map_length, mirror_num, 0); if (ret) return ret; wait_for_completion_io(&done); return blk_status_to_errno(bio->bi_status); } static void scrub_recheck_block_on_raid56(struct btrfs_fs_info *fs_info, struct scrub_block *sblock) { struct scrub_page *first_page = sblock->pagev[0]; struct bio *bio; int page_num; /* All pages in sblock belong to the same stripe on the same device. */ ASSERT(first_page->dev); if (!first_page->dev->bdev) goto out; bio = btrfs_io_bio_alloc(BIO_MAX_PAGES); bio_set_dev(bio, first_page->dev->bdev); for (page_num = 0; page_num < sblock->page_count; page_num++) { struct scrub_page *page = sblock->pagev[page_num]; WARN_ON(!page->page); bio_add_page(bio, page->page, PAGE_SIZE, 0); } if (scrub_submit_raid56_bio_wait(fs_info, bio, first_page)) { bio_put(bio); goto out; } bio_put(bio); scrub_recheck_block_checksum(sblock); return; out: for (page_num = 0; page_num < sblock->page_count; page_num++) sblock->pagev[page_num]->io_error = 1; sblock->no_io_error_seen = 0; } /* * this function will check the on disk data for checksum errors, header * errors and read I/O errors. If any I/O errors happen, the exact pages * which are errored are marked as being bad. The goal is to enable scrub * to take those pages that are not errored from all the mirrors so that * the pages that are errored in the just handled mirror can be repaired. */ static void scrub_recheck_block(struct btrfs_fs_info *fs_info, struct scrub_block *sblock, int retry_failed_mirror) { int page_num; sblock->no_io_error_seen = 1; /* short cut for raid56 */ if (!retry_failed_mirror && scrub_is_page_on_raid56(sblock->pagev[0])) return scrub_recheck_block_on_raid56(fs_info, sblock); for (page_num = 0; page_num < sblock->page_count; page_num++) { struct bio *bio; struct scrub_page *page = sblock->pagev[page_num]; if (page->dev->bdev == NULL) { page->io_error = 1; sblock->no_io_error_seen = 0; continue; } WARN_ON(!page->page); bio = btrfs_io_bio_alloc(1); bio_set_dev(bio, page->dev->bdev); bio_add_page(bio, page->page, PAGE_SIZE, 0); bio->bi_iter.bi_sector = page->physical >> 9; bio->bi_opf = REQ_OP_READ; if (btrfsic_submit_bio_wait(bio)) { page->io_error = 1; sblock->no_io_error_seen = 0; } bio_put(bio); } if (sblock->no_io_error_seen) scrub_recheck_block_checksum(sblock); } static inline int scrub_check_fsid(u8 fsid[], struct scrub_page *spage) { struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices; int ret; ret = memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE); return !ret; } static void scrub_recheck_block_checksum(struct scrub_block *sblock) { sblock->header_error = 0; sblock->checksum_error = 0; sblock->generation_error = 0; if (sblock->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA) scrub_checksum_data(sblock); else scrub_checksum_tree_block(sblock); } static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, struct scrub_block *sblock_good) { int page_num; int ret = 0; for (page_num = 0; page_num < sblock_bad->page_count; page_num++) { int ret_sub; ret_sub = scrub_repair_page_from_good_copy(sblock_bad, sblock_good, page_num, 1); if (ret_sub) ret = ret_sub; } return ret; } static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad, struct scrub_block *sblock_good, int page_num, int force_write) { struct scrub_page *page_bad = sblock_bad->pagev[page_num]; struct scrub_page *page_good = sblock_good->pagev[page_num]; struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info; BUG_ON(page_bad->page == NULL); BUG_ON(page_good->page == NULL); if (force_write || sblock_bad->header_error || sblock_bad->checksum_error || page_bad->io_error) { struct bio *bio; int ret; if (!page_bad->dev->bdev) { btrfs_warn_rl(fs_info, "scrub_repair_page_from_good_copy(bdev == NULL) is unexpected"); return -EIO; } bio = btrfs_io_bio_alloc(1); bio_set_dev(bio, page_bad->dev->bdev); bio->bi_iter.bi_sector = page_bad->physical >> 9; bio->bi_opf = REQ_OP_WRITE; ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0); if (PAGE_SIZE != ret) { bio_put(bio); return -EIO; } if (btrfsic_submit_bio_wait(bio)) { btrfs_dev_stat_inc_and_print(page_bad->dev, BTRFS_DEV_STAT_WRITE_ERRS); atomic64_inc(&fs_info->dev_replace.num_write_errors); bio_put(bio); return -EIO; } bio_put(bio); } return 0; } static void scrub_write_block_to_dev_replace(struct scrub_block *sblock) { struct btrfs_fs_info *fs_info = sblock->sctx->fs_info; int page_num; /* * This block is used for the check of the parity on the source device, * so the data needn't be written into the destination device. */ if (sblock->sparity) return; for (page_num = 0; page_num < sblock->page_count; page_num++) { int ret; ret = scrub_write_page_to_dev_replace(sblock, page_num); if (ret) atomic64_inc(&fs_info->dev_replace.num_write_errors); } } static int scrub_write_page_to_dev_replace(struct scrub_block *sblock, int page_num) { struct scrub_page *spage = sblock->pagev[page_num]; BUG_ON(spage->page == NULL); if (spage->io_error) { void *mapped_buffer = kmap_atomic(spage->page); clear_page(mapped_buffer); flush_dcache_page(spage->page); kunmap_atomic(mapped_buffer); } return scrub_add_page_to_wr_bio(sblock->sctx, spage); } static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx, struct scrub_page *spage) { struct scrub_bio *sbio; int ret; mutex_lock(&sctx->wr_lock); again: if (!sctx->wr_curr_bio) { sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio), GFP_KERNEL); if (!sctx->wr_curr_bio) { mutex_unlock(&sctx->wr_lock); return -ENOMEM; } sctx->wr_curr_bio->sctx = sctx; sctx->wr_curr_bio->page_count = 0; } sbio = sctx->wr_curr_bio; if (sbio->page_count == 0) { struct bio *bio; sbio->physical = spage->physical_for_dev_replace; sbio->logical = spage->logical; sbio->dev = sctx->wr_tgtdev; bio = sbio->bio; if (!bio) { bio = btrfs_io_bio_alloc(sctx->pages_per_wr_bio); sbio->bio = bio; } bio->bi_private = sbio; bio->bi_end_io = scrub_wr_bio_end_io; bio_set_dev(bio, sbio->dev->bdev); bio->bi_iter.bi_sector = sbio->physical >> 9; bio->bi_opf = REQ_OP_WRITE; sbio->status = 0; } else if (sbio->physical + sbio->page_count * PAGE_SIZE != spage->physical_for_dev_replace || sbio->logical + sbio->page_count * PAGE_SIZE != spage->logical) { scrub_wr_submit(sctx); goto again; } ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0); if (ret != PAGE_SIZE) { if (sbio->page_count < 1) { bio_put(sbio->bio); sbio->bio = NULL; mutex_unlock(&sctx->wr_lock); return -EIO; } scrub_wr_submit(sctx); goto again; } sbio->pagev[sbio->page_count] = spage; scrub_page_get(spage); sbio->page_count++; if (sbio->page_count == sctx->pages_per_wr_bio) scrub_wr_submit(sctx); mutex_unlock(&sctx->wr_lock); return 0; } static void scrub_wr_submit(struct scrub_ctx *sctx) { struct scrub_bio *sbio; if (!sctx->wr_curr_bio) return; sbio = sctx->wr_curr_bio; sctx->wr_curr_bio = NULL; WARN_ON(!sbio->bio->bi_disk); scrub_pending_bio_inc(sctx); /* process all writes in a single worker thread. Then the block layer * orders the requests before sending them to the driver which * doubled the write performance on spinning disks when measured * with Linux 3.5 */ btrfsic_submit_bio(sbio->bio); } static void scrub_wr_bio_end_io(struct bio *bio) { struct scrub_bio *sbio = bio->bi_private; struct btrfs_fs_info *fs_info = sbio->dev->fs_info; sbio->status = bio->bi_status; sbio->bio = bio; btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper, scrub_wr_bio_end_io_worker, NULL, NULL); btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work); } static void scrub_wr_bio_end_io_worker(struct btrfs_work *work) { struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); struct scrub_ctx *sctx = sbio->sctx; int i; WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO); if (sbio->status) { struct btrfs_dev_replace *dev_replace = &sbio->sctx->fs_info->dev_replace; for (i = 0; i < sbio->page_count; i++) { struct scrub_page *spage = sbio->pagev[i]; spage->io_error = 1; atomic64_inc(&dev_replace->num_write_errors); } } for (i = 0; i < sbio->page_count; i++) scrub_page_put(sbio->pagev[i]); bio_put(sbio->bio); kfree(sbio); scrub_pending_bio_dec(sctx); } static int scrub_checksum(struct scrub_block *sblock) { u64 flags; int ret; /* * No need to initialize these stats currently, * because this function only use return value * instead of these stats value. * * Todo: * always use stats */ sblock->header_error = 0; sblock->generation_error = 0; sblock->checksum_error = 0; WARN_ON(sblock->page_count < 1); flags = sblock->pagev[0]->flags; ret = 0; if (flags & BTRFS_EXTENT_FLAG_DATA) ret = scrub_checksum_data(sblock); else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) ret = scrub_checksum_tree_block(sblock); else if (flags & BTRFS_EXTENT_FLAG_SUPER) (void)scrub_checksum_super(sblock); else WARN_ON(1); if (ret) scrub_handle_errored_block(sblock); return ret; } static int scrub_checksum_data(struct scrub_block *sblock) { struct scrub_ctx *sctx = sblock->sctx; u8 csum[BTRFS_CSUM_SIZE]; u8 *on_disk_csum; struct page *page; void *buffer; u32 crc = ~(u32)0; u64 len; int index; BUG_ON(sblock->page_count < 1); if (!sblock->pagev[0]->have_csum) return 0; on_disk_csum = sblock->pagev[0]->csum; page = sblock->pagev[0]->page; buffer = kmap_atomic(page); len = sctx->fs_info->sectorsize; index = 0; for (;;) { u64 l = min_t(u64, len, PAGE_SIZE); crc = btrfs_csum_data(buffer, crc, l); kunmap_atomic(buffer); len -= l; if (len == 0) break; index++; BUG_ON(index >= sblock->page_count); BUG_ON(!sblock->pagev[index]->page); page = sblock->pagev[index]->page; buffer = kmap_atomic(page); } btrfs_csum_final(crc, csum); if (memcmp(csum, on_disk_csum, sctx->csum_size)) sblock->checksum_error = 1; return sblock->checksum_error; } static int scrub_checksum_tree_block(struct scrub_block *sblock) { struct scrub_ctx *sctx = sblock->sctx; struct btrfs_header *h; struct btrfs_fs_info *fs_info = sctx->fs_info; u8 calculated_csum[BTRFS_CSUM_SIZE]; u8 on_disk_csum[BTRFS_CSUM_SIZE]; struct page *page; void *mapped_buffer; u64 mapped_size; void *p; u32 crc = ~(u32)0; u64 len; int index; BUG_ON(sblock->page_count < 1); page = sblock->pagev[0]->page; mapped_buffer = kmap_atomic(page); h = (struct btrfs_header *)mapped_buffer; memcpy(on_disk_csum, h->csum, sctx->csum_size); /* * we don't use the getter functions here, as we * a) don't have an extent buffer and * b) the page is already kmapped */ if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h)) sblock->header_error = 1; if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h)) { sblock->header_error = 1; sblock->generation_error = 1; } if (!scrub_check_fsid(h->fsid, sblock->pagev[0])) sblock->header_error = 1; if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid, BTRFS_UUID_SIZE)) sblock->header_error = 1; len = sctx->fs_info->nodesize - BTRFS_CSUM_SIZE; mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE; p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE; index = 0; for (;;) { u64 l = min_t(u64, len, mapped_size); crc = btrfs_csum_data(p, crc, l); kunmap_atomic(mapped_buffer); len -= l; if (len == 0) break; index++; BUG_ON(index >= sblock->page_count); BUG_ON(!sblock->pagev[index]->page); page = sblock->pagev[index]->page; mapped_buffer = kmap_atomic(page); mapped_size = PAGE_SIZE; p = mapped_buffer; } btrfs_csum_final(crc, calculated_csum); if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size)) sblock->checksum_error = 1; return sblock->header_error || sblock->checksum_error; } static int scrub_checksum_super(struct scrub_block *sblock) { struct btrfs_super_block *s; struct scrub_ctx *sctx = sblock->sctx; u8 calculated_csum[BTRFS_CSUM_SIZE]; u8 on_disk_csum[BTRFS_CSUM_SIZE]; struct page *page; void *mapped_buffer; u64 mapped_size; void *p; u32 crc = ~(u32)0; int fail_gen = 0; int fail_cor = 0; u64 len; int index; BUG_ON(sblock->page_count < 1); page = sblock->pagev[0]->page; mapped_buffer = kmap_atomic(page); s = (struct btrfs_super_block *)mapped_buffer; memcpy(on_disk_csum, s->csum, sctx->csum_size); if (sblock->pagev[0]->logical != btrfs_super_bytenr(s)) ++fail_cor; if (sblock->pagev[0]->generation != btrfs_super_generation(s)) ++fail_gen; if (!scrub_check_fsid(s->fsid, sblock->pagev[0])) ++fail_cor; len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE; mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE; p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE; index = 0; for (;;) { u64 l = min_t(u64, len, mapped_size); crc = btrfs_csum_data(p, crc, l); kunmap_atomic(mapped_buffer); len -= l; if (len == 0) break; index++; BUG_ON(index >= sblock->page_count); BUG_ON(!sblock->pagev[index]->page); page = sblock->pagev[index]->page; mapped_buffer = kmap_atomic(page); mapped_size = PAGE_SIZE; p = mapped_buffer; } btrfs_csum_final(crc, calculated_csum); if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size)) ++fail_cor; if (fail_cor + fail_gen) { /* * if we find an error in a super block, we just report it. * They will get written with the next transaction commit * anyway */ spin_lock(&sctx->stat_lock); ++sctx->stat.super_errors; spin_unlock(&sctx->stat_lock); if (fail_cor) btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev, BTRFS_DEV_STAT_CORRUPTION_ERRS); else btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev, BTRFS_DEV_STAT_GENERATION_ERRS); } return fail_cor + fail_gen; } static void scrub_block_get(struct scrub_block *sblock) { refcount_inc(&sblock->refs); } static void scrub_block_put(struct scrub_block *sblock) { if (refcount_dec_and_test(&sblock->refs)) { int i; if (sblock->sparity) scrub_parity_put(sblock->sparity); for (i = 0; i < sblock->page_count; i++) scrub_page_put(sblock->pagev[i]); kfree(sblock); } } static void scrub_page_get(struct scrub_page *spage) { atomic_inc(&spage->refs); } static void scrub_page_put(struct scrub_page *spage) { if (atomic_dec_and_test(&spage->refs)) { if (spage->page) __free_page(spage->page); kfree(spage); } } static void scrub_submit(struct scrub_ctx *sctx) { struct scrub_bio *sbio; if (sctx->curr == -1) return; sbio = sctx->bios[sctx->curr]; sctx->curr = -1; scrub_pending_bio_inc(sctx); btrfsic_submit_bio(sbio->bio); } static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx, struct scrub_page *spage) { struct scrub_block *sblock = spage->sblock; struct scrub_bio *sbio; int ret; again: /* * grab a fresh bio or wait for one to become available */ while (sctx->curr == -1) { spin_lock(&sctx->list_lock); sctx->curr = sctx->first_free; if (sctx->curr != -1) { sctx->first_free = sctx->bios[sctx->curr]->next_free; sctx->bios[sctx->curr]->next_free = -1; sctx->bios[sctx->curr]->page_count = 0; spin_unlock(&sctx->list_lock); } else { spin_unlock(&sctx->list_lock); wait_event(sctx->list_wait, sctx->first_free != -1); } } sbio = sctx->bios[sctx->curr]; if (sbio->page_count == 0) { struct bio *bio; sbio->physical = spage->physical; sbio->logical = spage->logical; sbio->dev = spage->dev; bio = sbio->bio; if (!bio) { bio = btrfs_io_bio_alloc(sctx->pages_per_rd_bio); sbio->bio = bio; } bio->bi_private = sbio; bio->bi_end_io = scrub_bio_end_io; bio_set_dev(bio, sbio->dev->bdev); bio->bi_iter.bi_sector = sbio->physical >> 9; bio->bi_opf = REQ_OP_READ; sbio->status = 0; } else if (sbio->physical + sbio->page_count * PAGE_SIZE != spage->physical || sbio->logical + sbio->page_count * PAGE_SIZE != spage->logical || sbio->dev != spage->dev) { scrub_submit(sctx); goto again; } sbio->pagev[sbio->page_count] = spage; ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0); if (ret != PAGE_SIZE) { if (sbio->page_count < 1) { bio_put(sbio->bio); sbio->bio = NULL; return -EIO; } scrub_submit(sctx); goto again; } scrub_block_get(sblock); /* one for the page added to the bio */ atomic_inc(&sblock->outstanding_pages); sbio->page_count++; if (sbio->page_count == sctx->pages_per_rd_bio) scrub_submit(sctx); return 0; } static void scrub_missing_raid56_end_io(struct bio *bio) { struct scrub_block *sblock = bio->bi_private; struct btrfs_fs_info *fs_info = sblock->sctx->fs_info; if (bio->bi_status) sblock->no_io_error_seen = 0; bio_put(bio); btrfs_queue_work(fs_info->scrub_workers, &sblock->work); } static void scrub_missing_raid56_worker(struct btrfs_work *work) { struct scrub_block *sblock = container_of(work, struct scrub_block, work); struct scrub_ctx *sctx = sblock->sctx; struct btrfs_fs_info *fs_info = sctx->fs_info; u64 logical; struct btrfs_device *dev; logical = sblock->pagev[0]->logical; dev = sblock->pagev[0]->dev; if (sblock->no_io_error_seen) scrub_recheck_block_checksum(sblock); if (!sblock->no_io_error_seen) { spin_lock(&sctx->stat_lock); sctx->stat.read_errors++; spin_unlock(&sctx->stat_lock); btrfs_err_rl_in_rcu(fs_info, "IO error rebuilding logical %llu for dev %s", logical, rcu_str_deref(dev->name)); } else if (sblock->header_error || sblock->checksum_error) { spin_lock(&sctx->stat_lock); sctx->stat.uncorrectable_errors++; spin_unlock(&sctx->stat_lock); btrfs_err_rl_in_rcu(fs_info, "failed to rebuild valid logical %llu for dev %s", logical, rcu_str_deref(dev->name)); } else { scrub_write_block_to_dev_replace(sblock); } scrub_block_put(sblock); if (sctx->is_dev_replace && sctx->flush_all_writes) { mutex_lock(&sctx->wr_lock); scrub_wr_submit(sctx); mutex_unlock(&sctx->wr_lock); } scrub_pending_bio_dec(sctx); } static void scrub_missing_raid56_pages(struct scrub_block *sblock) { struct scrub_ctx *sctx = sblock->sctx; struct btrfs_fs_info *fs_info = sctx->fs_info; u64 length = sblock->page_count * PAGE_SIZE; u64 logical = sblock->pagev[0]->logical; struct btrfs_bio *bbio = NULL; struct bio *bio; struct btrfs_raid_bio *rbio; int ret; int i; btrfs_bio_counter_inc_blocked(fs_info); ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical, &length, &bbio); if (ret || !bbio || !bbio->raid_map) goto bbio_out; if (WARN_ON(!sctx->is_dev_replace || !(bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) { /* * We shouldn't be scrubbing a missing device. Even for dev * replace, we should only get here for RAID 5/6. We either * managed to mount something with no mirrors remaining or * there's a bug in scrub_remap_extent()/btrfs_map_block(). */ goto bbio_out; } bio = btrfs_io_bio_alloc(0); bio->bi_iter.bi_sector = logical >> 9; bio->bi_private = sblock; bio->bi_end_io = scrub_missing_raid56_end_io; rbio = raid56_alloc_missing_rbio(fs_info, bio, bbio, length); if (!rbio) goto rbio_out; for (i = 0; i < sblock->page_count; i++) { struct scrub_page *spage = sblock->pagev[i]; raid56_add_scrub_pages(rbio, spage->page, spage->logical); } btrfs_init_work(&sblock->work, btrfs_scrub_helper, scrub_missing_raid56_worker, NULL, NULL); scrub_block_get(sblock); scrub_pending_bio_inc(sctx); raid56_submit_missing_rbio(rbio); return; rbio_out: bio_put(bio); bbio_out: btrfs_bio_counter_dec(fs_info); btrfs_put_bbio(bbio); spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; spin_unlock(&sctx->stat_lock); } static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len, u64 physical, struct btrfs_device *dev, u64 flags, u64 gen, int mirror_num, u8 *csum, int force, u64 physical_for_dev_replace) { struct scrub_block *sblock; int index; sblock = kzalloc(sizeof(*sblock), GFP_KERNEL); if (!sblock) { spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; spin_unlock(&sctx->stat_lock); return -ENOMEM; } /* one ref inside this function, plus one for each page added to * a bio later on */ refcount_set(&sblock->refs, 1); sblock->sctx = sctx; sblock->no_io_error_seen = 1; for (index = 0; len > 0; index++) { struct scrub_page *spage; u64 l = min_t(u64, len, PAGE_SIZE); spage = kzalloc(sizeof(*spage), GFP_KERNEL); if (!spage) { leave_nomem: spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; spin_unlock(&sctx->stat_lock); scrub_block_put(sblock); return -ENOMEM; } BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK); scrub_page_get(spage); sblock->pagev[index] = spage; spage->sblock = sblock; spage->dev = dev; spage->flags = flags; spage->generation = gen; spage->logical = logical; spage->physical = physical; spage->physical_for_dev_replace = physical_for_dev_replace; spage->mirror_num = mirror_num; if (csum) { spage->have_csum = 1; memcpy(spage->csum, csum, sctx->csum_size); } else { spage->have_csum = 0; } sblock->page_count++; spage->page = alloc_page(GFP_KERNEL); if (!spage->page) goto leave_nomem; len -= l; logical += l; physical += l; physical_for_dev_replace += l; } WARN_ON(sblock->page_count == 0); if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) { /* * This case should only be hit for RAID 5/6 device replace. See * the comment in scrub_missing_raid56_pages() for details. */ scrub_missing_raid56_pages(sblock); } else { for (index = 0; index < sblock->page_count; index++) { struct scrub_page *spage = sblock->pagev[index]; int ret; ret = scrub_add_page_to_rd_bio(sctx, spage); if (ret) { scrub_block_put(sblock); return ret; } } if (force) scrub_submit(sctx); } /* last one frees, either here or in bio completion for last page */ scrub_block_put(sblock); return 0; } static void scrub_bio_end_io(struct bio *bio) { struct scrub_bio *sbio = bio->bi_private; struct btrfs_fs_info *fs_info = sbio->dev->fs_info; sbio->status = bio->bi_status; sbio->bio = bio; btrfs_queue_work(fs_info->scrub_workers, &sbio->work); } static void scrub_bio_end_io_worker(struct btrfs_work *work) { struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); struct scrub_ctx *sctx = sbio->sctx; int i; BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO); if (sbio->status) { for (i = 0; i < sbio->page_count; i++) { struct scrub_page *spage = sbio->pagev[i]; spage->io_error = 1; spage->sblock->no_io_error_seen = 0; } } /* now complete the scrub_block items that have all pages completed */ for (i = 0; i < sbio->page_count; i++) { struct scrub_page *spage = sbio->pagev[i]; struct scrub_block *sblock = spage->sblock; if (atomic_dec_and_test(&sblock->outstanding_pages)) scrub_block_complete(sblock); scrub_block_put(sblock); } bio_put(sbio->bio); sbio->bio = NULL; spin_lock(&sctx->list_lock); sbio->next_free = sctx->first_free; sctx->first_free = sbio->index; spin_unlock(&sctx->list_lock); if (sctx->is_dev_replace && sctx->flush_all_writes) { mutex_lock(&sctx->wr_lock); scrub_wr_submit(sctx); mutex_unlock(&sctx->wr_lock); } scrub_pending_bio_dec(sctx); } static inline void __scrub_mark_bitmap(struct scrub_parity *sparity, unsigned long *bitmap, u64 start, u64 len) { u64 offset; u64 nsectors64; u32 nsectors; int sectorsize = sparity->sctx->fs_info->sectorsize; if (len >= sparity->stripe_len) { bitmap_set(bitmap, 0, sparity->nsectors); return; } start -= sparity->logic_start; start = div64_u64_rem(start, sparity->stripe_len, &offset); offset = div_u64(offset, sectorsize); nsectors64 = div_u64(len, sectorsize); ASSERT(nsectors64 < UINT_MAX); nsectors = (u32)nsectors64; if (offset + nsectors <= sparity->nsectors) { bitmap_set(bitmap, offset, nsectors); return; } bitmap_set(bitmap, offset, sparity->nsectors - offset); bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset)); } static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity, u64 start, u64 len) { __scrub_mark_bitmap(sparity, sparity->ebitmap, start, len); } static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity, u64 start, u64 len) { __scrub_mark_bitmap(sparity, sparity->dbitmap, start, len); } static void scrub_block_complete(struct scrub_block *sblock) { int corrupted = 0; if (!sblock->no_io_error_seen) { corrupted = 1; scrub_handle_errored_block(sblock); } else { /* * if has checksum error, write via repair mechanism in * dev replace case, otherwise write here in dev replace * case. */ corrupted = scrub_checksum(sblock); if (!corrupted && sblock->sctx->is_dev_replace) scrub_write_block_to_dev_replace(sblock); } if (sblock->sparity && corrupted && !sblock->data_corrected) { u64 start = sblock->pagev[0]->logical; u64 end = sblock->pagev[sblock->page_count - 1]->logical + PAGE_SIZE; scrub_parity_mark_sectors_error(sblock->sparity, start, end - start); } } static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum) { struct btrfs_ordered_sum *sum = NULL; unsigned long index; unsigned long num_sectors; while (!list_empty(&sctx->csum_list)) { sum = list_first_entry(&sctx->csum_list, struct btrfs_ordered_sum, list); if (sum->bytenr > logical) return 0; if (sum->bytenr + sum->len > logical) break; ++sctx->stat.csum_discards; list_del(&sum->list); kfree(sum); sum = NULL; } if (!sum) return 0; index = div_u64(logical - sum->bytenr, sctx->fs_info->sectorsize); ASSERT(index < UINT_MAX); num_sectors = sum->len / sctx->fs_info->sectorsize; memcpy(csum, sum->sums + index, sctx->csum_size); if (index == num_sectors - 1) { list_del(&sum->list); kfree(sum); } return 1; } /* scrub extent tries to collect up to 64 kB for each bio */ static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map, u64 logical, u64 len, u64 physical, struct btrfs_device *dev, u64 flags, u64 gen, int mirror_num, u64 physical_for_dev_replace) { int ret; u8 csum[BTRFS_CSUM_SIZE]; u32 blocksize; if (flags & BTRFS_EXTENT_FLAG_DATA) { if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) blocksize = map->stripe_len; else blocksize = sctx->fs_info->sectorsize; spin_lock(&sctx->stat_lock); sctx->stat.data_extents_scrubbed++; sctx->stat.data_bytes_scrubbed += len; spin_unlock(&sctx->stat_lock); } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) blocksize = map->stripe_len; else blocksize = sctx->fs_info->nodesize; spin_lock(&sctx->stat_lock); sctx->stat.tree_extents_scrubbed++; sctx->stat.tree_bytes_scrubbed += len; spin_unlock(&sctx->stat_lock); } else { blocksize = sctx->fs_info->sectorsize; WARN_ON(1); } while (len) { u64 l = min_t(u64, len, blocksize); int have_csum = 0; if (flags & BTRFS_EXTENT_FLAG_DATA) { /* push csums to sbio */ have_csum = scrub_find_csum(sctx, logical, csum); if (have_csum == 0) ++sctx->stat.no_csum; } ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen, mirror_num, have_csum ? csum : NULL, 0, physical_for_dev_replace); if (ret) return ret; len -= l; logical += l; physical += l; physical_for_dev_replace += l; } return 0; } static int scrub_pages_for_parity(struct scrub_parity *sparity, u64 logical, u64 len, u64 physical, struct btrfs_device *dev, u64 flags, u64 gen, int mirror_num, u8 *csum) { struct scrub_ctx *sctx = sparity->sctx; struct scrub_block *sblock; int index; sblock = kzalloc(sizeof(*sblock), GFP_KERNEL); if (!sblock) { spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; spin_unlock(&sctx->stat_lock); return -ENOMEM; } /* one ref inside this function, plus one for each page added to * a bio later on */ refcount_set(&sblock->refs, 1); sblock->sctx = sctx; sblock->no_io_error_seen = 1; sblock->sparity = sparity; scrub_parity_get(sparity); for (index = 0; len > 0; index++) { struct scrub_page *spage; u64 l = min_t(u64, len, PAGE_SIZE); spage = kzalloc(sizeof(*spage), GFP_KERNEL); if (!spage) { leave_nomem: spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; spin_unlock(&sctx->stat_lock); scrub_block_put(sblock); return -ENOMEM; } BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK); /* For scrub block */ scrub_page_get(spage); sblock->pagev[index] = spage; /* For scrub parity */ scrub_page_get(spage); list_add_tail(&spage->list, &sparity->spages); spage->sblock = sblock; spage->dev = dev; spage->flags = flags; spage->generation = gen; spage->logical = logical; spage->physical = physical; spage->mirror_num = mirror_num; if (csum) { spage->have_csum = 1; memcpy(spage->csum, csum, sctx->csum_size); } else { spage->have_csum = 0; } sblock->page_count++; spage->page = alloc_page(GFP_KERNEL); if (!spage->page) goto leave_nomem; len -= l; logical += l; physical += l; } WARN_ON(sblock->page_count == 0); for (index = 0; index < sblock->page_count; index++) { struct scrub_page *spage = sblock->pagev[index]; int ret; ret = scrub_add_page_to_rd_bio(sctx, spage); if (ret) { scrub_block_put(sblock); return ret; } } /* last one frees, either here or in bio completion for last page */ scrub_block_put(sblock); return 0; } static int scrub_extent_for_parity(struct scrub_parity *sparity, u64 logical, u64 len, u64 physical, struct btrfs_device *dev, u64 flags, u64 gen, int mirror_num) { struct scrub_ctx *sctx = sparity->sctx; int ret; u8 csum[BTRFS_CSUM_SIZE]; u32 blocksize; if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) { scrub_parity_mark_sectors_error(sparity, logical, len); return 0; } if (flags & BTRFS_EXTENT_FLAG_DATA) { blocksize = sparity->stripe_len; } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { blocksize = sparity->stripe_len; } else { blocksize = sctx->fs_info->sectorsize; WARN_ON(1); } while (len) { u64 l = min_t(u64, len, blocksize); int have_csum = 0; if (flags & BTRFS_EXTENT_FLAG_DATA) { /* push csums to sbio */ have_csum = scrub_find_csum(sctx, logical, csum); if (have_csum == 0) goto skip; } ret = scrub_pages_for_parity(sparity, logical, l, physical, dev, flags, gen, mirror_num, have_csum ? csum : NULL); if (ret) return ret; skip: len -= l; logical += l; physical += l; } return 0; } /* * Given a physical address, this will calculate it's * logical offset. if this is a parity stripe, it will return * the most left data stripe's logical offset. * * return 0 if it is a data stripe, 1 means parity stripe. */ static int get_raid56_logic_offset(u64 physical, int num, struct map_lookup *map, u64 *offset, u64 *stripe_start) { int i; int j = 0; u64 stripe_nr; u64 last_offset; u32 stripe_index; u32 rot; last_offset = (physical - map->stripes[num].physical) * nr_data_stripes(map); if (stripe_start) *stripe_start = last_offset; *offset = last_offset; for (i = 0; i < nr_data_stripes(map); i++) { *offset = last_offset + i * map->stripe_len; stripe_nr = div64_u64(*offset, map->stripe_len); stripe_nr = div_u64(stripe_nr, nr_data_stripes(map)); /* Work out the disk rotation on this stripe-set */ stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot); /* calculate which stripe this data locates */ rot += i; stripe_index = rot % map->num_stripes; if (stripe_index == num) return 0; if (stripe_index < num) j++; } *offset = last_offset + j * map->stripe_len; return 1; } static void scrub_free_parity(struct scrub_parity *sparity) { struct scrub_ctx *sctx = sparity->sctx; struct scrub_page *curr, *next; int nbits; nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors); if (nbits) { spin_lock(&sctx->stat_lock); sctx->stat.read_errors += nbits; sctx->stat.uncorrectable_errors += nbits; spin_unlock(&sctx->stat_lock); } list_for_each_entry_safe(curr, next, &sparity->spages, list) { list_del_init(&curr->list); scrub_page_put(curr); } kfree(sparity); } static void scrub_parity_bio_endio_worker(struct btrfs_work *work) { struct scrub_parity *sparity = container_of(work, struct scrub_parity, work); struct scrub_ctx *sctx = sparity->sctx; scrub_free_parity(sparity); scrub_pending_bio_dec(sctx); } static void scrub_parity_bio_endio(struct bio *bio) { struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private; struct btrfs_fs_info *fs_info = sparity->sctx->fs_info; if (bio->bi_status) bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap, sparity->nsectors); bio_put(bio); btrfs_init_work(&sparity->work, btrfs_scrubparity_helper, scrub_parity_bio_endio_worker, NULL, NULL); btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work); } static void scrub_parity_check_and_repair(struct scrub_parity *sparity) { struct scrub_ctx *sctx = sparity->sctx; struct btrfs_fs_info *fs_info = sctx->fs_info; struct bio *bio; struct btrfs_raid_bio *rbio; struct btrfs_bio *bbio = NULL; u64 length; int ret; if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap, sparity->nsectors)) goto out; length = sparity->logic_end - sparity->logic_start; btrfs_bio_counter_inc_blocked(fs_info); ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start, &length, &bbio); if (ret || !bbio || !bbio->raid_map) goto bbio_out; bio = btrfs_io_bio_alloc(0); bio->bi_iter.bi_sector = sparity->logic_start >> 9; bio->bi_private = sparity; bio->bi_end_io = scrub_parity_bio_endio; rbio = raid56_parity_alloc_scrub_rbio(fs_info, bio, bbio, length, sparity->scrub_dev, sparity->dbitmap, sparity->nsectors); if (!rbio) goto rbio_out; scrub_pending_bio_inc(sctx); raid56_parity_submit_scrub_rbio(rbio); return; rbio_out: bio_put(bio); bbio_out: btrfs_bio_counter_dec(fs_info); btrfs_put_bbio(bbio); bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap, sparity->nsectors); spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; spin_unlock(&sctx->stat_lock); out: scrub_free_parity(sparity); } static inline int scrub_calc_parity_bitmap_len(int nsectors) { return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * sizeof(long); } static void scrub_parity_get(struct scrub_parity *sparity) { refcount_inc(&sparity->refs); } static void scrub_parity_put(struct scrub_parity *sparity) { if (!refcount_dec_and_test(&sparity->refs)) return; scrub_parity_check_and_repair(sparity); } static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx, struct map_lookup *map, struct btrfs_device *sdev, struct btrfs_path *path, u64 logic_start, u64 logic_end) { struct btrfs_fs_info *fs_info = sctx->fs_info; struct btrfs_root *root = fs_info->extent_root; struct btrfs_root *csum_root = fs_info->csum_root; struct btrfs_extent_item *extent; struct btrfs_bio *bbio = NULL; u64 flags; int ret; int slot; struct extent_buffer *l; struct btrfs_key key; u64 generation; u64 extent_logical; u64 extent_physical; u64 extent_len; u64 mapped_length; struct btrfs_device *extent_dev; struct scrub_parity *sparity; int nsectors; int bitmap_len; int extent_mirror_num; int stop_loop = 0; nsectors = div_u64(map->stripe_len, fs_info->sectorsize); bitmap_len = scrub_calc_parity_bitmap_len(nsectors); sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len, GFP_NOFS); if (!sparity) { spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; spin_unlock(&sctx->stat_lock); return -ENOMEM; } sparity->stripe_len = map->stripe_len; sparity->nsectors = nsectors; sparity->sctx = sctx; sparity->scrub_dev = sdev; sparity->logic_start = logic_start; sparity->logic_end = logic_end; refcount_set(&sparity->refs, 1); INIT_LIST_HEAD(&sparity->spages); sparity->dbitmap = sparity->bitmap; sparity->ebitmap = (void *)sparity->bitmap + bitmap_len; ret = 0; while (logic_start < logic_end) { if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) key.type = BTRFS_METADATA_ITEM_KEY; else key.type = BTRFS_EXTENT_ITEM_KEY; key.objectid = logic_start; key.offset = (u64)-1; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; if (ret > 0) { ret = btrfs_previous_extent_item(root, path, 0); if (ret < 0) goto out; if (ret > 0) { btrfs_release_path(path); ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; } } stop_loop = 0; while (1) { u64 bytes; l = path->nodes[0]; slot = path->slots[0]; if (slot >= btrfs_header_nritems(l)) { ret = btrfs_next_leaf(root, path); if (ret == 0) continue; if (ret < 0) goto out; stop_loop = 1; break; } btrfs_item_key_to_cpu(l, &key, slot); if (key.type != BTRFS_EXTENT_ITEM_KEY && key.type != BTRFS_METADATA_ITEM_KEY) goto next; if (key.type == BTRFS_METADATA_ITEM_KEY) bytes = fs_info->nodesize; else bytes = key.offset; if (key.objectid + bytes <= logic_start) goto next; if (key.objectid >= logic_end) { stop_loop = 1; break; } while (key.objectid >= logic_start + map->stripe_len) logic_start += map->stripe_len; extent = btrfs_item_ptr(l, slot, struct btrfs_extent_item); flags = btrfs_extent_flags(l, extent); generation = btrfs_extent_generation(l, extent); if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) && (key.objectid < logic_start || key.objectid + bytes > logic_start + map->stripe_len)) { btrfs_err(fs_info, "scrub: tree block %llu spanning stripes, ignored. logical=%llu", key.objectid, logic_start); spin_lock(&sctx->stat_lock); sctx->stat.uncorrectable_errors++; spin_unlock(&sctx->stat_lock); goto next; } again: extent_logical = key.objectid; extent_len = bytes; if (extent_logical < logic_start) { extent_len -= logic_start - extent_logical; extent_logical = logic_start; } if (extent_logical + extent_len > logic_start + map->stripe_len) extent_len = logic_start + map->stripe_len - extent_logical; scrub_parity_mark_sectors_data(sparity, extent_logical, extent_len); mapped_length = extent_len; bbio = NULL; ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical, &mapped_length, &bbio, 0); if (!ret) { if (!bbio || mapped_length < extent_len) ret = -EIO; } if (ret) { btrfs_put_bbio(bbio); goto out; } extent_physical = bbio->stripes[0].physical; extent_mirror_num = bbio->mirror_num; extent_dev = bbio->stripes[0].dev; btrfs_put_bbio(bbio); ret = btrfs_lookup_csums_range(csum_root, extent_logical, extent_logical + extent_len - 1, &sctx->csum_list, 1); if (ret) goto out; ret = scrub_extent_for_parity(sparity, extent_logical, extent_len, extent_physical, extent_dev, flags, generation, extent_mirror_num); scrub_free_csums(sctx); if (ret) goto out; if (extent_logical + extent_len < key.objectid + bytes) { logic_start += map->stripe_len; if (logic_start >= logic_end) { stop_loop = 1; break; } if (logic_start < key.objectid + bytes) { cond_resched(); goto again; } } next: path->slots[0]++; } btrfs_release_path(path); if (stop_loop) break; logic_start += map->stripe_len; } out: if (ret < 0) scrub_parity_mark_sectors_error(sparity, logic_start, logic_end - logic_start); scrub_parity_put(sparity); scrub_submit(sctx); mutex_lock(&sctx->wr_lock); scrub_wr_submit(sctx); mutex_unlock(&sctx->wr_lock); btrfs_release_path(path); return ret < 0 ? ret : 0; } static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, struct map_lookup *map, struct btrfs_device *scrub_dev, int num, u64 base, u64 length) { struct btrfs_path *path, *ppath; struct btrfs_fs_info *fs_info = sctx->fs_info; struct btrfs_root *root = fs_info->extent_root; struct btrfs_root *csum_root = fs_info->csum_root; struct btrfs_extent_item *extent; struct blk_plug plug; u64 flags; int ret; int slot; u64 nstripes; struct extent_buffer *l; u64 physical; u64 logical; u64 logic_end; u64 physical_end; u64 generation; int mirror_num; struct reada_control *reada1; struct reada_control *reada2; struct btrfs_key key; struct btrfs_key key_end; u64 increment = map->stripe_len; u64 offset; u64 extent_logical; u64 extent_physical; u64 extent_len; u64 stripe_logical; u64 stripe_end; struct btrfs_device *extent_dev; int extent_mirror_num; int stop_loop = 0; physical = map->stripes[num].physical; offset = 0; nstripes = div64_u64(length, map->stripe_len); if (map->type & BTRFS_BLOCK_GROUP_RAID0) { offset = map->stripe_len * num; increment = map->stripe_len * map->num_stripes; mirror_num = 1; } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { int factor = map->num_stripes / map->sub_stripes; offset = map->stripe_len * (num / map->sub_stripes); increment = map->stripe_len * factor; mirror_num = num % map->sub_stripes + 1; } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) { increment = map->stripe_len; mirror_num = num % map->num_stripes + 1; } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { increment = map->stripe_len; mirror_num = num % map->num_stripes + 1; } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { get_raid56_logic_offset(physical, num, map, &offset, NULL); increment = map->stripe_len * nr_data_stripes(map); mirror_num = 1; } else { increment = map->stripe_len; mirror_num = 1; } path = btrfs_alloc_path(); if (!path) return -ENOMEM; ppath = btrfs_alloc_path(); if (!ppath) { btrfs_free_path(path); return -ENOMEM; } /* * work on commit root. The related disk blocks are static as * long as COW is applied. This means, it is save to rewrite * them to repair disk errors without any race conditions */ path->search_commit_root = 1; path->skip_locking = 1; ppath->search_commit_root = 1; ppath->skip_locking = 1; /* * trigger the readahead for extent tree csum tree and wait for * completion. During readahead, the scrub is officially paused * to not hold off transaction commits */ logical = base + offset; physical_end = physical + nstripes * map->stripe_len; if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { get_raid56_logic_offset(physical_end, num, map, &logic_end, NULL); logic_end += base; } else { logic_end = logical + increment * nstripes; } wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); scrub_blocked_if_needed(fs_info); /* FIXME it might be better to start readahead at commit root */ key.objectid = logical; key.type = BTRFS_EXTENT_ITEM_KEY; key.offset = (u64)0; key_end.objectid = logic_end; key_end.type = BTRFS_METADATA_ITEM_KEY; key_end.offset = (u64)-1; reada1 = btrfs_reada_add(root, &key, &key_end); key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; key.type = BTRFS_EXTENT_CSUM_KEY; key.offset = logical; key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID; key_end.type = BTRFS_EXTENT_CSUM_KEY; key_end.offset = logic_end; reada2 = btrfs_reada_add(csum_root, &key, &key_end); if (!IS_ERR(reada1)) btrfs_reada_wait(reada1); if (!IS_ERR(reada2)) btrfs_reada_wait(reada2); /* * collect all data csums for the stripe to avoid seeking during * the scrub. This might currently (crc32) end up to be about 1MB */ blk_start_plug(&plug); /* * now find all extents for each stripe and scrub them */ ret = 0; while (physical < physical_end) { /* * canceled? */ if (atomic_read(&fs_info->scrub_cancel_req) || atomic_read(&sctx->cancel_req)) { ret = -ECANCELED; goto out; } /* * check to see if we have to pause */ if (atomic_read(&fs_info->scrub_pause_req)) { /* push queued extents */ sctx->flush_all_writes = true; scrub_submit(sctx); mutex_lock(&sctx->wr_lock); scrub_wr_submit(sctx); mutex_unlock(&sctx->wr_lock); wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); sctx->flush_all_writes = false; scrub_blocked_if_needed(fs_info); } if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { ret = get_raid56_logic_offset(physical, num, map, &logical, &stripe_logical); logical += base; if (ret) { /* it is parity strip */ stripe_logical += base; stripe_end = stripe_logical + increment; ret = scrub_raid56_parity(sctx, map, scrub_dev, ppath, stripe_logical, stripe_end); if (ret) goto out; goto skip; } } if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) key.type = BTRFS_METADATA_ITEM_KEY; else key.type = BTRFS_EXTENT_ITEM_KEY; key.objectid = logical; key.offset = (u64)-1; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; if (ret > 0) { ret = btrfs_previous_extent_item(root, path, 0); if (ret < 0) goto out; if (ret > 0) { /* there's no smaller item, so stick with the * larger one */ btrfs_release_path(path); ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; } } stop_loop = 0; while (1) { u64 bytes; l = path->nodes[0]; slot = path->slots[0]; if (slot >= btrfs_header_nritems(l)) { ret = btrfs_next_leaf(root, path); if (ret == 0) continue; if (ret < 0) goto out; stop_loop = 1; break; } btrfs_item_key_to_cpu(l, &key, slot); if (key.type != BTRFS_EXTENT_ITEM_KEY && key.type != BTRFS_METADATA_ITEM_KEY) goto next; if (key.type == BTRFS_METADATA_ITEM_KEY) bytes = fs_info->nodesize; else bytes = key.offset; if (key.objectid + bytes <= logical) goto next; if (key.objectid >= logical + map->stripe_len) { /* out of this device extent */ if (key.objectid >= logic_end) stop_loop = 1; break; } extent = btrfs_item_ptr(l, slot, struct btrfs_extent_item); flags = btrfs_extent_flags(l, extent); generation = btrfs_extent_generation(l, extent); if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) && (key.objectid < logical || key.objectid + bytes > logical + map->stripe_len)) { btrfs_err(fs_info, "scrub: tree block %llu spanning stripes, ignored. logical=%llu", key.objectid, logical); spin_lock(&sctx->stat_lock); sctx->stat.uncorrectable_errors++; spin_unlock(&sctx->stat_lock); goto next; } again: extent_logical = key.objectid; extent_len = bytes; /* * trim extent to this stripe */ if (extent_logical < logical) { extent_len -= logical - extent_logical; extent_logical = logical; } if (extent_logical + extent_len > logical + map->stripe_len) { extent_len = logical + map->stripe_len - extent_logical; } extent_physical = extent_logical - logical + physical; extent_dev = scrub_dev; extent_mirror_num = mirror_num; if (sctx->is_dev_replace) scrub_remap_extent(fs_info, extent_logical, extent_len, &extent_physical, &extent_dev, &extent_mirror_num); ret = btrfs_lookup_csums_range(csum_root, extent_logical, extent_logical + extent_len - 1, &sctx->csum_list, 1); if (ret) goto out; ret = scrub_extent(sctx, map, extent_logical, extent_len, extent_physical, extent_dev, flags, generation, extent_mirror_num, extent_logical - logical + physical); scrub_free_csums(sctx); if (ret) goto out; if (extent_logical + extent_len < key.objectid + bytes) { if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { /* * loop until we find next data stripe * or we have finished all stripes. */ loop: physical += map->stripe_len; ret = get_raid56_logic_offset(physical, num, map, &logical, &stripe_logical); logical += base; if (ret && physical < physical_end) { stripe_logical += base; stripe_end = stripe_logical + increment; ret = scrub_raid56_parity(sctx, map, scrub_dev, ppath, stripe_logical, stripe_end); if (ret) goto out; goto loop; } } else { physical += map->stripe_len; logical += increment; } if (logical < key.objectid + bytes) { cond_resched(); goto again; } if (physical >= physical_end) { stop_loop = 1; break; } } next: path->slots[0]++; } btrfs_release_path(path); skip: logical += increment; physical += map->stripe_len; spin_lock(&sctx->stat_lock); if (stop_loop) sctx->stat.last_physical = map->stripes[num].physical + length; else sctx->stat.last_physical = physical; spin_unlock(&sctx->stat_lock); if (stop_loop) break; } out: /* push queued extents */ scrub_submit(sctx); mutex_lock(&sctx->wr_lock); scrub_wr_submit(sctx); mutex_unlock(&sctx->wr_lock); blk_finish_plug(&plug); btrfs_free_path(path); btrfs_free_path(ppath); return ret < 0 ? ret : 0; } static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, struct btrfs_device *scrub_dev, u64 chunk_offset, u64 length, u64 dev_offset, struct btrfs_block_group_cache *cache) { struct btrfs_fs_info *fs_info = sctx->fs_info; struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; struct map_lookup *map; struct extent_map *em; int i; int ret = 0; read_lock(&map_tree->map_tree.lock); em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1); read_unlock(&map_tree->map_tree.lock); if (!em) { /* * Might have been an unused block group deleted by the cleaner * kthread or relocation. */ spin_lock(&cache->lock); if (!cache->removed) ret = -EINVAL; spin_unlock(&cache->lock); return ret; } map = em->map_lookup; if (em->start != chunk_offset) goto out; if (em->len < length) goto out; for (i = 0; i < map->num_stripes; ++i) { if (map->stripes[i].dev->bdev == scrub_dev->bdev && map->stripes[i].physical == dev_offset) { ret = scrub_stripe(sctx, map, scrub_dev, i, chunk_offset, length); if (ret) goto out; } } out: free_extent_map(em); return ret; } static noinline_for_stack int scrub_enumerate_chunks(struct scrub_ctx *sctx, struct btrfs_device *scrub_dev, u64 start, u64 end) { struct btrfs_dev_extent *dev_extent = NULL; struct btrfs_path *path; struct btrfs_fs_info *fs_info = sctx->fs_info; struct btrfs_root *root = fs_info->dev_root; u64 length; u64 chunk_offset; int ret = 0; int ro_set; int slot; struct extent_buffer *l; struct btrfs_key key; struct btrfs_key found_key; struct btrfs_block_group_cache *cache; struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; path = btrfs_alloc_path(); if (!path) return -ENOMEM; path->reada = READA_FORWARD; path->search_commit_root = 1; path->skip_locking = 1; key.objectid = scrub_dev->devid; key.offset = 0ull; key.type = BTRFS_DEV_EXTENT_KEY; while (1) { ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) break; if (ret > 0) { if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { ret = btrfs_next_leaf(root, path); if (ret < 0) break; if (ret > 0) { ret = 0; break; } } else { ret = 0; } } l = path->nodes[0]; slot = path->slots[0]; btrfs_item_key_to_cpu(l, &found_key, slot); if (found_key.objectid != scrub_dev->devid) break; if (found_key.type != BTRFS_DEV_EXTENT_KEY) break; if (found_key.offset >= end) break; if (found_key.offset < key.offset) break; dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); length = btrfs_dev_extent_length(l, dev_extent); if (found_key.offset + length <= start) goto skip; chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); /* * get a reference on the corresponding block group to prevent * the chunk from going away while we scrub it */ cache = btrfs_lookup_block_group(fs_info, chunk_offset); /* some chunks are removed but not committed to disk yet, * continue scrubbing */ if (!cache) goto skip; /* * we need call btrfs_inc_block_group_ro() with scrubs_paused, * to avoid deadlock caused by: * btrfs_inc_block_group_ro() * -> btrfs_wait_for_commit() * -> btrfs_commit_transaction() * -> btrfs_scrub_pause() */ scrub_pause_on(fs_info); ret = btrfs_inc_block_group_ro(cache); if (!ret && sctx->is_dev_replace) { /* * If we are doing a device replace wait for any tasks * that started delalloc right before we set the block * group to RO mode, as they might have just allocated * an extent from it or decided they could do a nocow * write. And if any such tasks did that, wait for their * ordered extents to complete and then commit the * current transaction, so that we can later see the new * extent items in the extent tree - the ordered extents * create delayed data references (for cow writes) when * they complete, which will be run and insert the * corresponding extent items into the extent tree when * we commit the transaction they used when running * inode.c:btrfs_finish_ordered_io(). We later use * the commit root of the extent tree to find extents * to copy from the srcdev into the tgtdev, and we don't * want to miss any new extents. */ btrfs_wait_block_group_reservations(cache); btrfs_wait_nocow_writers(cache); ret = btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->key.objectid, cache->key.offset); if (ret > 0) { struct btrfs_trans_handle *trans; trans = btrfs_join_transaction(root); if (IS_ERR(trans)) ret = PTR_ERR(trans); else ret = btrfs_commit_transaction(trans); if (ret) { scrub_pause_off(fs_info); btrfs_put_block_group(cache); break; } } } scrub_pause_off(fs_info); if (ret == 0) { ro_set = 1; } else if (ret == -ENOSPC) { /* * btrfs_inc_block_group_ro return -ENOSPC when it * failed in creating new chunk for metadata. * It is not a problem for scrub/replace, because * metadata are always cowed, and our scrub paused * commit_transactions. */ ro_set = 0; } else { btrfs_warn(fs_info, "failed setting block group ro: %d", ret); btrfs_put_block_group(cache); break; } down_write(&fs_info->dev_replace.rwsem); dev_replace->cursor_right = found_key.offset + length; dev_replace->cursor_left = found_key.offset; dev_replace->item_needs_writeback = 1; up_write(&dev_replace->rwsem); ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length, found_key.offset, cache); /* * flush, submit all pending read and write bios, afterwards * wait for them. * Note that in the dev replace case, a read request causes * write requests that are submitted in the read completion * worker. Therefore in the current situation, it is required * that all write requests are flushed, so that all read and * write requests are really completed when bios_in_flight * changes to 0. */ sctx->flush_all_writes = true; scrub_submit(sctx); mutex_lock(&sctx->wr_lock); scrub_wr_submit(sctx); mutex_unlock(&sctx->wr_lock); wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); scrub_pause_on(fs_info); /* * must be called before we decrease @scrub_paused. * make sure we don't block transaction commit while * we are waiting pending workers finished. */ wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0); sctx->flush_all_writes = false; scrub_pause_off(fs_info); down_write(&fs_info->dev_replace.rwsem); dev_replace->cursor_left = dev_replace->cursor_right; dev_replace->item_needs_writeback = 1; up_write(&fs_info->dev_replace.rwsem); if (ro_set) btrfs_dec_block_group_ro(cache); /* * We might have prevented the cleaner kthread from deleting * this block group if it was already unused because we raced * and set it to RO mode first. So add it back to the unused * list, otherwise it might not ever be deleted unless a manual * balance is triggered or it becomes used and unused again. */ spin_lock(&cache->lock); if (!cache->removed && !cache->ro && cache->reserved == 0 && btrfs_block_group_used(&cache->item) == 0) { spin_unlock(&cache->lock); btrfs_mark_bg_unused(cache); } else { spin_unlock(&cache->lock); } btrfs_put_block_group(cache); if (ret) break; if (sctx->is_dev_replace && atomic64_read(&dev_replace->num_write_errors) > 0) { ret = -EIO; break; } if (sctx->stat.malloc_errors > 0) { ret = -ENOMEM; break; } skip: key.offset = found_key.offset + length; btrfs_release_path(path); } btrfs_free_path(path); return ret; } static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx, struct btrfs_device *scrub_dev) { int i; u64 bytenr; u64 gen; int ret; struct btrfs_fs_info *fs_info = sctx->fs_info; if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) return -EIO; /* Seed devices of a new filesystem has their own generation. */ if (scrub_dev->fs_devices != fs_info->fs_devices) gen = scrub_dev->generation; else gen = fs_info->last_trans_committed; for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { bytenr = btrfs_sb_offset(i); if (bytenr + BTRFS_SUPER_INFO_SIZE > scrub_dev->commit_total_bytes) break; ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr, scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i, NULL, 1, bytenr); if (ret) return ret; } wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); return 0; } /* * get a reference count on fs_info->scrub_workers. start worker if necessary */ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info, int is_dev_replace) { unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND; int max_active = fs_info->thread_pool_size; if (fs_info->scrub_workers_refcnt == 0) { fs_info->scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub", flags, is_dev_replace ? 1 : max_active, 4); if (!fs_info->scrub_workers) goto fail_scrub_workers; fs_info->scrub_wr_completion_workers = btrfs_alloc_workqueue(fs_info, "scrubwrc", flags, max_active, 2); if (!fs_info->scrub_wr_completion_workers) goto fail_scrub_wr_completion_workers; fs_info->scrub_parity_workers = btrfs_alloc_workqueue(fs_info, "scrubparity", flags, max_active, 2); if (!fs_info->scrub_parity_workers) goto fail_scrub_parity_workers; } ++fs_info->scrub_workers_refcnt; return 0; fail_scrub_parity_workers: btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers); fail_scrub_wr_completion_workers: btrfs_destroy_workqueue(fs_info->scrub_workers); fail_scrub_workers: return -ENOMEM; } static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info) { if (--fs_info->scrub_workers_refcnt == 0) { btrfs_destroy_workqueue(fs_info->scrub_workers); btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers); btrfs_destroy_workqueue(fs_info->scrub_parity_workers); } WARN_ON(fs_info->scrub_workers_refcnt < 0); } int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, u64 end, struct btrfs_scrub_progress *progress, int readonly, int is_dev_replace) { struct scrub_ctx *sctx; int ret; struct btrfs_device *dev; unsigned int nofs_flag; if (btrfs_fs_closing(fs_info)) return -EINVAL; if (fs_info->nodesize > BTRFS_STRIPE_LEN) { /* * in this case scrub is unable to calculate the checksum * the way scrub is implemented. Do not handle this * situation at all because it won't ever happen. */ btrfs_err(fs_info, "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails", fs_info->nodesize, BTRFS_STRIPE_LEN); return -EINVAL; } if (fs_info->sectorsize != PAGE_SIZE) { /* not supported for data w/o checksums */ btrfs_err_rl(fs_info, "scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails", fs_info->sectorsize, PAGE_SIZE); return -EINVAL; } if (fs_info->nodesize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK || fs_info->sectorsize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) { /* * would exhaust the array bounds of pagev member in * struct scrub_block */ btrfs_err(fs_info, "scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails", fs_info->nodesize, SCRUB_MAX_PAGES_PER_BLOCK, fs_info->sectorsize, SCRUB_MAX_PAGES_PER_BLOCK); return -EINVAL; } /* Allocate outside of device_list_mutex */ sctx = scrub_setup_ctx(fs_info, is_dev_replace); if (IS_ERR(sctx)) return PTR_ERR(sctx); mutex_lock(&fs_info->fs_devices->device_list_mutex); dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL); if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) && !is_dev_replace)) { mutex_unlock(&fs_info->fs_devices->device_list_mutex); ret = -ENODEV; goto out_free_ctx; } if (!is_dev_replace && !readonly && !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) { mutex_unlock(&fs_info->fs_devices->device_list_mutex); btrfs_err_in_rcu(fs_info, "scrub: device %s is not writable", rcu_str_deref(dev->name)); ret = -EROFS; goto out_free_ctx; } mutex_lock(&fs_info->scrub_lock); if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) { mutex_unlock(&fs_info->scrub_lock); mutex_unlock(&fs_info->fs_devices->device_list_mutex); ret = -EIO; goto out_free_ctx; } down_read(&fs_info->dev_replace.rwsem); if (dev->scrub_ctx || (!is_dev_replace && btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) { up_read(&fs_info->dev_replace.rwsem); mutex_unlock(&fs_info->scrub_lock); mutex_unlock(&fs_info->fs_devices->device_list_mutex); ret = -EINPROGRESS; goto out_free_ctx; } up_read(&fs_info->dev_replace.rwsem); ret = scrub_workers_get(fs_info, is_dev_replace); if (ret) { mutex_unlock(&fs_info->scrub_lock); mutex_unlock(&fs_info->fs_devices->device_list_mutex); goto out_free_ctx; } sctx->readonly = readonly; dev->scrub_ctx = sctx; mutex_unlock(&fs_info->fs_devices->device_list_mutex); /* * checking @scrub_pause_req here, we can avoid * race between committing transaction and scrubbing. */ __scrub_blocked_if_needed(fs_info); atomic_inc(&fs_info->scrubs_running); mutex_unlock(&fs_info->scrub_lock); /* * In order to avoid deadlock with reclaim when there is a transaction * trying to pause scrub, make sure we use GFP_NOFS for all the * allocations done at btrfs_scrub_pages() and scrub_pages_for_parity() * invoked by our callees. The pausing request is done when the * transaction commit starts, and it blocks the transaction until scrub * is paused (done at specific points at scrub_stripe() or right above * before incrementing fs_info->scrubs_running). */ nofs_flag = memalloc_nofs_save(); if (!is_dev_replace) { /* * by holding device list mutex, we can * kick off writing super in log tree sync. */ mutex_lock(&fs_info->fs_devices->device_list_mutex); ret = scrub_supers(sctx, dev); mutex_unlock(&fs_info->fs_devices->device_list_mutex); } if (!ret) ret = scrub_enumerate_chunks(sctx, dev, start, end); memalloc_nofs_restore(nofs_flag); wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); atomic_dec(&fs_info->scrubs_running); wake_up(&fs_info->scrub_pause_wait); wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0); if (progress) memcpy(progress, &sctx->stat, sizeof(*progress)); mutex_lock(&fs_info->scrub_lock); dev->scrub_ctx = NULL; scrub_workers_put(fs_info); mutex_unlock(&fs_info->scrub_lock); scrub_put_ctx(sctx); return ret; out_free_ctx: scrub_free_ctx(sctx); return ret; } void btrfs_scrub_pause(struct btrfs_fs_info *fs_info) { mutex_lock(&fs_info->scrub_lock); atomic_inc(&fs_info->scrub_pause_req); while (atomic_read(&fs_info->scrubs_paused) != atomic_read(&fs_info->scrubs_running)) { mutex_unlock(&fs_info->scrub_lock); wait_event(fs_info->scrub_pause_wait, atomic_read(&fs_info->scrubs_paused) == atomic_read(&fs_info->scrubs_running)); mutex_lock(&fs_info->scrub_lock); } mutex_unlock(&fs_info->scrub_lock); } void btrfs_scrub_continue(struct btrfs_fs_info *fs_info) { atomic_dec(&fs_info->scrub_pause_req); wake_up(&fs_info->scrub_pause_wait); } int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info) { mutex_lock(&fs_info->scrub_lock); if (!atomic_read(&fs_info->scrubs_running)) { mutex_unlock(&fs_info->scrub_lock); return -ENOTCONN; } atomic_inc(&fs_info->scrub_cancel_req); while (atomic_read(&fs_info->scrubs_running)) { mutex_unlock(&fs_info->scrub_lock); wait_event(fs_info->scrub_pause_wait, atomic_read(&fs_info->scrubs_running) == 0); mutex_lock(&fs_info->scrub_lock); } atomic_dec(&fs_info->scrub_cancel_req); mutex_unlock(&fs_info->scrub_lock); return 0; } int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info, struct btrfs_device *dev) { struct scrub_ctx *sctx; mutex_lock(&fs_info->scrub_lock); sctx = dev->scrub_ctx; if (!sctx) { mutex_unlock(&fs_info->scrub_lock); return -ENOTCONN; } atomic_inc(&sctx->cancel_req); while (dev->scrub_ctx) { mutex_unlock(&fs_info->scrub_lock); wait_event(fs_info->scrub_pause_wait, dev->scrub_ctx == NULL); mutex_lock(&fs_info->scrub_lock); } mutex_unlock(&fs_info->scrub_lock); return 0; } int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid, struct btrfs_scrub_progress *progress) { struct btrfs_device *dev; struct scrub_ctx *sctx = NULL; mutex_lock(&fs_info->fs_devices->device_list_mutex); dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL); if (dev) sctx = dev->scrub_ctx; if (sctx) memcpy(progress, &sctx->stat, sizeof(*progress)); mutex_unlock(&fs_info->fs_devices->device_list_mutex); return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV; } static void scrub_remap_extent(struct btrfs_fs_info *fs_info, u64 extent_logical, u64 extent_len, u64 *extent_physical, struct btrfs_device **extent_dev, int *extent_mirror_num) { u64 mapped_length; struct btrfs_bio *bbio = NULL; int ret; mapped_length = extent_len; ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical, &mapped_length, &bbio, 0); if (ret || !bbio || mapped_length < extent_len || !bbio->stripes[0].dev->bdev) { btrfs_put_bbio(bbio); return; } *extent_physical = bbio->stripes[0].physical; *extent_mirror_num = bbio->mirror_num; *extent_dev = bbio->stripes[0].dev; btrfs_put_bbio(bbio); }
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2011, 2012 STRATO. All rights reserved. */ #include <linux/blkdev.h> #include <linux/ratelimit.h> #include <linux/sched/mm.h> #include "ctree.h" #include "volumes.h" #include "disk-io.h" #include "ordered-data.h" #include "transaction.h" #include "backref.h" #include "extent_io.h" #include "dev-replace.h" #include "check-integrity.h" #include "rcu-string.h" #include "raid56.h" /* * This is only the first step towards a full-features scrub. It reads all * extent and super block and verifies the checksums. In case a bad checksum * is found or the extent cannot be read, good data will be written back if * any can be found. * * Future enhancements: * - In case an unrepairable extent is encountered, track which files are * affected and report them * - track and record media errors, throw out bad devices * - add a mode to also read unallocated space */ struct scrub_block; struct scrub_ctx; /* * the following three values only influence the performance. * The last one configures the number of parallel and outstanding I/O * operations. The first two values configure an upper limit for the number * of (dynamically allocated) pages that are added to a bio. */ #define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */ #define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */ #define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */ /* * the following value times PAGE_SIZE needs to be large enough to match the * largest node/leaf/sector size that shall be supported. * Values larger than BTRFS_STRIPE_LEN are not supported. */ #define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */ struct scrub_recover { refcount_t refs; struct btrfs_bio *bbio; u64 map_length; }; struct scrub_page { struct scrub_block *sblock; struct page *page; struct btrfs_device *dev; struct list_head list; u64 flags; /* extent flags */ u64 generation; u64 logical; u64 physical; u64 physical_for_dev_replace; atomic_t refs; struct { unsigned int mirror_num:8; unsigned int have_csum:1; unsigned int io_error:1; }; u8 csum[BTRFS_CSUM_SIZE]; struct scrub_recover *recover; }; struct scrub_bio { int index; struct scrub_ctx *sctx; struct btrfs_device *dev; struct bio *bio; blk_status_t status; u64 logical; u64 physical; #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO struct scrub_page *pagev[SCRUB_PAGES_PER_WR_BIO]; #else struct scrub_page *pagev[SCRUB_PAGES_PER_RD_BIO]; #endif int page_count; int next_free; struct btrfs_work work; }; struct scrub_block { struct scrub_page *pagev[SCRUB_MAX_PAGES_PER_BLOCK]; int page_count; atomic_t outstanding_pages; refcount_t refs; /* free mem on transition to zero */ struct scrub_ctx *sctx; struct scrub_parity *sparity; struct { unsigned int header_error:1; unsigned int checksum_error:1; unsigned int no_io_error_seen:1; unsigned int generation_error:1; /* also sets header_error */ /* The following is for the data used to check parity */ /* It is for the data with checksum */ unsigned int data_corrected:1; }; struct btrfs_work work; }; /* Used for the chunks with parity stripe such RAID5/6 */ struct scrub_parity { struct scrub_ctx *sctx; struct btrfs_device *scrub_dev; u64 logic_start; u64 logic_end; int nsectors; u64 stripe_len; refcount_t refs; struct list_head spages; /* Work of parity check and repair */ struct btrfs_work work; /* Mark the parity blocks which have data */ unsigned long *dbitmap; /* * Mark the parity blocks which have data, but errors happen when * read data or check data */ unsigned long *ebitmap; unsigned long bitmap[0]; }; struct scrub_ctx { struct scrub_bio *bios[SCRUB_BIOS_PER_SCTX]; struct btrfs_fs_info *fs_info; int first_free; int curr; atomic_t bios_in_flight; atomic_t workers_pending; spinlock_t list_lock; wait_queue_head_t list_wait; u16 csum_size; struct list_head csum_list; atomic_t cancel_req; int readonly; int pages_per_rd_bio; int is_dev_replace; struct scrub_bio *wr_curr_bio; struct mutex wr_lock; int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */ struct btrfs_device *wr_tgtdev; bool flush_all_writes; /* * statistics */ struct btrfs_scrub_progress stat; spinlock_t stat_lock; /* * Use a ref counter to avoid use-after-free issues. Scrub workers * decrement bios_in_flight and workers_pending and then do a wakeup * on the list_wait wait queue. We must ensure the main scrub task * doesn't free the scrub context before or while the workers are * doing the wakeup() call. */ refcount_t refs; }; struct scrub_warning { struct btrfs_path *path; u64 extent_item_size; const char *errstr; u64 physical; u64 logical; struct btrfs_device *dev; }; struct full_stripe_lock { struct rb_node node; u64 logical; u64 refs; struct mutex mutex; }; static void scrub_pending_bio_inc(struct scrub_ctx *sctx); static void scrub_pending_bio_dec(struct scrub_ctx *sctx); static int scrub_handle_errored_block(struct scrub_block *sblock_to_check); static int scrub_setup_recheck_block(struct scrub_block *original_sblock, struct scrub_block *sblocks_for_recheck); static void scrub_recheck_block(struct btrfs_fs_info *fs_info, struct scrub_block *sblock, int retry_failed_mirror); static void scrub_recheck_block_checksum(struct scrub_block *sblock); static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, struct scrub_block *sblock_good); static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad, struct scrub_block *sblock_good, int page_num, int force_write); static void scrub_write_block_to_dev_replace(struct scrub_block *sblock); static int scrub_write_page_to_dev_replace(struct scrub_block *sblock, int page_num); static int scrub_checksum_data(struct scrub_block *sblock); static int scrub_checksum_tree_block(struct scrub_block *sblock); static int scrub_checksum_super(struct scrub_block *sblock); static void scrub_block_get(struct scrub_block *sblock); static void scrub_block_put(struct scrub_block *sblock); static void scrub_page_get(struct scrub_page *spage); static void scrub_page_put(struct scrub_page *spage); static void scrub_parity_get(struct scrub_parity *sparity); static void scrub_parity_put(struct scrub_parity *sparity); static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx, struct scrub_page *spage); static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len, u64 physical, struct btrfs_device *dev, u64 flags, u64 gen, int mirror_num, u8 *csum, int force, u64 physical_for_dev_replace); static void scrub_bio_end_io(struct bio *bio); static void scrub_bio_end_io_worker(struct btrfs_work *work); static void scrub_block_complete(struct scrub_block *sblock); static void scrub_remap_extent(struct btrfs_fs_info *fs_info, u64 extent_logical, u64 extent_len, u64 *extent_physical, struct btrfs_device **extent_dev, int *extent_mirror_num); static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx, struct scrub_page *spage); static void scrub_wr_submit(struct scrub_ctx *sctx); static void scrub_wr_bio_end_io(struct bio *bio); static void scrub_wr_bio_end_io_worker(struct btrfs_work *work); static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info); static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info); static void scrub_put_ctx(struct scrub_ctx *sctx); static inline int scrub_is_page_on_raid56(struct scrub_page *page) { return page->recover && (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK); } static void scrub_pending_bio_inc(struct scrub_ctx *sctx) { refcount_inc(&sctx->refs); atomic_inc(&sctx->bios_in_flight); } static void scrub_pending_bio_dec(struct scrub_ctx *sctx) { atomic_dec(&sctx->bios_in_flight); wake_up(&sctx->list_wait); scrub_put_ctx(sctx); } static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info) { while (atomic_read(&fs_info->scrub_pause_req)) { mutex_unlock(&fs_info->scrub_lock); wait_event(fs_info->scrub_pause_wait, atomic_read(&fs_info->scrub_pause_req) == 0); mutex_lock(&fs_info->scrub_lock); } } static void scrub_pause_on(struct btrfs_fs_info *fs_info) { atomic_inc(&fs_info->scrubs_paused); wake_up(&fs_info->scrub_pause_wait); } static void scrub_pause_off(struct btrfs_fs_info *fs_info) { mutex_lock(&fs_info->scrub_lock); __scrub_blocked_if_needed(fs_info); atomic_dec(&fs_info->scrubs_paused); mutex_unlock(&fs_info->scrub_lock); wake_up(&fs_info->scrub_pause_wait); } static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info) { scrub_pause_on(fs_info); scrub_pause_off(fs_info); } /* * Insert new full stripe lock into full stripe locks tree * * Return pointer to existing or newly inserted full_stripe_lock structure if * everything works well. * Return ERR_PTR(-ENOMEM) if we failed to allocate memory * * NOTE: caller must hold full_stripe_locks_root->lock before calling this * function */ static struct full_stripe_lock *insert_full_stripe_lock( struct btrfs_full_stripe_locks_tree *locks_root, u64 fstripe_logical) { struct rb_node **p; struct rb_node *parent = NULL; struct full_stripe_lock *entry; struct full_stripe_lock *ret; lockdep_assert_held(&locks_root->lock); p = &locks_root->root.rb_node; while (*p) { parent = *p; entry = rb_entry(parent, struct full_stripe_lock, node); if (fstripe_logical < entry->logical) { p = &(*p)->rb_left; } else if (fstripe_logical > entry->logical) { p = &(*p)->rb_right; } else { entry->refs++; return entry; } } /* * Insert new lock. */ ret = kmalloc(sizeof(*ret), GFP_KERNEL); if (!ret) return ERR_PTR(-ENOMEM); ret->logical = fstripe_logical; ret->refs = 1; mutex_init(&ret->mutex); rb_link_node(&ret->node, parent, p); rb_insert_color(&ret->node, &locks_root->root); return ret; } /* * Search for a full stripe lock of a block group * * Return pointer to existing full stripe lock if found * Return NULL if not found */ static struct full_stripe_lock *search_full_stripe_lock( struct btrfs_full_stripe_locks_tree *locks_root, u64 fstripe_logical) { struct rb_node *node; struct full_stripe_lock *entry; lockdep_assert_held(&locks_root->lock); node = locks_root->root.rb_node; while (node) { entry = rb_entry(node, struct full_stripe_lock, node); if (fstripe_logical < entry->logical) node = node->rb_left; else if (fstripe_logical > entry->logical) node = node->rb_right; else return entry; } return NULL; } /* * Helper to get full stripe logical from a normal bytenr. * * Caller must ensure @cache is a RAID56 block group. */ static u64 get_full_stripe_logical(struct btrfs_block_group_cache *cache, u64 bytenr) { u64 ret; /* * Due to chunk item size limit, full stripe length should not be * larger than U32_MAX. Just a sanity check here. */ WARN_ON_ONCE(cache->full_stripe_len >= U32_MAX); /* * round_down() can only handle power of 2, while RAID56 full * stripe length can be 64KiB * n, so we need to manually round down. */ ret = div64_u64(bytenr - cache->key.objectid, cache->full_stripe_len) * cache->full_stripe_len + cache->key.objectid; return ret; } /* * Lock a full stripe to avoid concurrency of recovery and read * * It's only used for profiles with parities (RAID5/6), for other profiles it * does nothing. * * Return 0 if we locked full stripe covering @bytenr, with a mutex held. * So caller must call unlock_full_stripe() at the same context. * * Return <0 if encounters error. */ static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr, bool *locked_ret) { struct btrfs_block_group_cache *bg_cache; struct btrfs_full_stripe_locks_tree *locks_root; struct full_stripe_lock *existing; u64 fstripe_start; int ret = 0; *locked_ret = false; bg_cache = btrfs_lookup_block_group(fs_info, bytenr); if (!bg_cache) { ASSERT(0); return -ENOENT; } /* Profiles not based on parity don't need full stripe lock */ if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) goto out; locks_root = &bg_cache->full_stripe_locks_root; fstripe_start = get_full_stripe_logical(bg_cache, bytenr); /* Now insert the full stripe lock */ mutex_lock(&locks_root->lock); existing = insert_full_stripe_lock(locks_root, fstripe_start); mutex_unlock(&locks_root->lock); if (IS_ERR(existing)) { ret = PTR_ERR(existing); goto out; } mutex_lock(&existing->mutex); *locked_ret = true; out: btrfs_put_block_group(bg_cache); return ret; } /* * Unlock a full stripe. * * NOTE: Caller must ensure it's the same context calling corresponding * lock_full_stripe(). * * Return 0 if we unlock full stripe without problem. * Return <0 for error */ static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr, bool locked) { struct btrfs_block_group_cache *bg_cache; struct btrfs_full_stripe_locks_tree *locks_root; struct full_stripe_lock *fstripe_lock; u64 fstripe_start; bool freeit = false; int ret = 0; /* If we didn't acquire full stripe lock, no need to continue */ if (!locked) return 0; bg_cache = btrfs_lookup_block_group(fs_info, bytenr); if (!bg_cache) { ASSERT(0); return -ENOENT; } if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) goto out; locks_root = &bg_cache->full_stripe_locks_root; fstripe_start = get_full_stripe_logical(bg_cache, bytenr); mutex_lock(&locks_root->lock); fstripe_lock = search_full_stripe_lock(locks_root, fstripe_start); /* Unpaired unlock_full_stripe() detected */ if (!fstripe_lock) { WARN_ON(1); ret = -ENOENT; mutex_unlock(&locks_root->lock); goto out; } if (fstripe_lock->refs == 0) { WARN_ON(1); btrfs_warn(fs_info, "full stripe lock at %llu refcount underflow", fstripe_lock->logical); } else { fstripe_lock->refs--; } if (fstripe_lock->refs == 0) { rb_erase(&fstripe_lock->node, &locks_root->root); freeit = true; } mutex_unlock(&locks_root->lock); mutex_unlock(&fstripe_lock->mutex); if (freeit) kfree(fstripe_lock); out: btrfs_put_block_group(bg_cache); return ret; } static void scrub_free_csums(struct scrub_ctx *sctx) { while (!list_empty(&sctx->csum_list)) { struct btrfs_ordered_sum *sum; sum = list_first_entry(&sctx->csum_list, struct btrfs_ordered_sum, list); list_del(&sum->list); kfree(sum); } } static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx) { int i; if (!sctx) return; /* this can happen when scrub is cancelled */ if (sctx->curr != -1) { struct scrub_bio *sbio = sctx->bios[sctx->curr]; for (i = 0; i < sbio->page_count; i++) { WARN_ON(!sbio->pagev[i]->page); scrub_block_put(sbio->pagev[i]->sblock); } bio_put(sbio->bio); } for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) { struct scrub_bio *sbio = sctx->bios[i]; if (!sbio) break; kfree(sbio); } kfree(sctx->wr_curr_bio); scrub_free_csums(sctx); kfree(sctx); } static void scrub_put_ctx(struct scrub_ctx *sctx) { if (refcount_dec_and_test(&sctx->refs)) scrub_free_ctx(sctx); } static noinline_for_stack struct scrub_ctx *scrub_setup_ctx( struct btrfs_fs_info *fs_info, int is_dev_replace) { struct scrub_ctx *sctx; int i; sctx = kzalloc(sizeof(*sctx), GFP_KERNEL); if (!sctx) goto nomem; refcount_set(&sctx->refs, 1); sctx->is_dev_replace = is_dev_replace; sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO; sctx->curr = -1; sctx->fs_info = fs_info; for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) { struct scrub_bio *sbio; sbio = kzalloc(sizeof(*sbio), GFP_KERNEL); if (!sbio) goto nomem; sctx->bios[i] = sbio; sbio->index = i; sbio->sctx = sctx; sbio->page_count = 0; btrfs_init_work(&sbio->work, btrfs_scrub_helper, scrub_bio_end_io_worker, NULL, NULL); if (i != SCRUB_BIOS_PER_SCTX - 1) sctx->bios[i]->next_free = i + 1; else sctx->bios[i]->next_free = -1; } sctx->first_free = 0; atomic_set(&sctx->bios_in_flight, 0); atomic_set(&sctx->workers_pending, 0); atomic_set(&sctx->cancel_req, 0); sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy); INIT_LIST_HEAD(&sctx->csum_list); spin_lock_init(&sctx->list_lock); spin_lock_init(&sctx->stat_lock); init_waitqueue_head(&sctx->list_wait); WARN_ON(sctx->wr_curr_bio != NULL); mutex_init(&sctx->wr_lock); sctx->wr_curr_bio = NULL; if (is_dev_replace) { WARN_ON(!fs_info->dev_replace.tgtdev); sctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO; sctx->wr_tgtdev = fs_info->dev_replace.tgtdev; sctx->flush_all_writes = false; } return sctx; nomem: scrub_free_ctx(sctx); return ERR_PTR(-ENOMEM); } static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *warn_ctx) { u64 isize; u32 nlink; int ret; int i; unsigned nofs_flag; struct extent_buffer *eb; struct btrfs_inode_item *inode_item; struct scrub_warning *swarn = warn_ctx; struct btrfs_fs_info *fs_info = swarn->dev->fs_info; struct inode_fs_paths *ipath = NULL; struct btrfs_root *local_root; struct btrfs_key root_key; struct btrfs_key key; root_key.objectid = root; root_key.type = BTRFS_ROOT_ITEM_KEY; root_key.offset = (u64)-1; local_root = btrfs_read_fs_root_no_name(fs_info, &root_key); if (IS_ERR(local_root)) { ret = PTR_ERR(local_root); goto err; } /* * this makes the path point to (inum INODE_ITEM ioff) */ key.objectid = inum; key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0); if (ret) { btrfs_release_path(swarn->path); goto err; } eb = swarn->path->nodes[0]; inode_item = btrfs_item_ptr(eb, swarn->path->slots[0], struct btrfs_inode_item); isize = btrfs_inode_size(eb, inode_item); nlink = btrfs_inode_nlink(eb, inode_item); btrfs_release_path(swarn->path); /* * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub * uses GFP_NOFS in this context, so we keep it consistent but it does * not seem to be strictly necessary. */ nofs_flag = memalloc_nofs_save(); ipath = init_ipath(4096, local_root, swarn->path); memalloc_nofs_restore(nofs_flag); if (IS_ERR(ipath)) { ret = PTR_ERR(ipath); ipath = NULL; goto err; } ret = paths_from_inode(inum, ipath); if (ret < 0) goto err; /* * we deliberately ignore the bit ipath might have been too small to * hold all of the paths here */ for (i = 0; i < ipath->fspath->elem_cnt; ++i) btrfs_warn_in_rcu(fs_info, "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %llu, links %u (path: %s)", swarn->errstr, swarn->logical, rcu_str_deref(swarn->dev->name), swarn->physical, root, inum, offset, min(isize - offset, (u64)PAGE_SIZE), nlink, (char *)(unsigned long)ipath->fspath->val[i]); free_ipath(ipath); return 0; err: btrfs_warn_in_rcu(fs_info, "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d", swarn->errstr, swarn->logical, rcu_str_deref(swarn->dev->name), swarn->physical, root, inum, offset, ret); free_ipath(ipath); return 0; } static void scrub_print_warning(const char *errstr, struct scrub_block *sblock) { struct btrfs_device *dev; struct btrfs_fs_info *fs_info; struct btrfs_path *path; struct btrfs_key found_key; struct extent_buffer *eb; struct btrfs_extent_item *ei; struct scrub_warning swarn; unsigned long ptr = 0; u64 extent_item_pos; u64 flags = 0; u64 ref_root; u32 item_size; u8 ref_level = 0; int ret; WARN_ON(sblock->page_count < 1); dev = sblock->pagev[0]->dev; fs_info = sblock->sctx->fs_info; path = btrfs_alloc_path(); if (!path) return; swarn.physical = sblock->pagev[0]->physical; swarn.logical = sblock->pagev[0]->logical; swarn.errstr = errstr; swarn.dev = NULL; ret = extent_from_logical(fs_info, swarn.logical, path, &found_key, &flags); if (ret < 0) goto out; extent_item_pos = swarn.logical - found_key.objectid; swarn.extent_item_size = found_key.offset; eb = path->nodes[0]; ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); item_size = btrfs_item_size_nr(eb, path->slots[0]); if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { do { ret = tree_backref_for_extent(&ptr, eb, &found_key, ei, item_size, &ref_root, &ref_level); btrfs_warn_in_rcu(fs_info, "%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu", errstr, swarn.logical, rcu_str_deref(dev->name), swarn.physical, ref_level ? "node" : "leaf", ret < 0 ? -1 : ref_level, ret < 0 ? -1 : ref_root); } while (ret != 1); btrfs_release_path(path); } else { btrfs_release_path(path); swarn.path = path; swarn.dev = dev; iterate_extent_inodes(fs_info, found_key.objectid, extent_item_pos, 1, scrub_print_warning_inode, &swarn, false); } out: btrfs_free_path(path); } static inline void scrub_get_recover(struct scrub_recover *recover) { refcount_inc(&recover->refs); } static inline void scrub_put_recover(struct btrfs_fs_info *fs_info, struct scrub_recover *recover) { if (refcount_dec_and_test(&recover->refs)) { btrfs_bio_counter_dec(fs_info); btrfs_put_bbio(recover->bbio); kfree(recover); } } /* * scrub_handle_errored_block gets called when either verification of the * pages failed or the bio failed to read, e.g. with EIO. In the latter * case, this function handles all pages in the bio, even though only one * may be bad. * The goal of this function is to repair the errored block by using the * contents of one of the mirrors. */ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) { struct scrub_ctx *sctx = sblock_to_check->sctx; struct btrfs_device *dev; struct btrfs_fs_info *fs_info; u64 logical; unsigned int failed_mirror_index; unsigned int is_metadata; unsigned int have_csum; struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */ struct scrub_block *sblock_bad; int ret; int mirror_index; int page_num; int success; bool full_stripe_locked; unsigned int nofs_flag; static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); BUG_ON(sblock_to_check->page_count < 1); fs_info = sctx->fs_info; if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) { /* * if we find an error in a super block, we just report it. * They will get written with the next transaction commit * anyway */ spin_lock(&sctx->stat_lock); ++sctx->stat.super_errors; spin_unlock(&sctx->stat_lock); return 0; } logical = sblock_to_check->pagev[0]->logical; BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1); failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1; is_metadata = !(sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA); have_csum = sblock_to_check->pagev[0]->have_csum; dev = sblock_to_check->pagev[0]->dev; /* * We must use GFP_NOFS because the scrub task might be waiting for a * worker task executing this function and in turn a transaction commit * might be waiting the scrub task to pause (which needs to wait for all * the worker tasks to complete before pausing). * We do allocations in the workers through insert_full_stripe_lock() * and scrub_add_page_to_wr_bio(), which happens down the call chain of * this function. */ nofs_flag = memalloc_nofs_save(); /* * For RAID5/6, race can happen for a different device scrub thread. * For data corruption, Parity and Data threads will both try * to recovery the data. * Race can lead to doubly added csum error, or even unrecoverable * error. */ ret = lock_full_stripe(fs_info, logical, &full_stripe_locked); if (ret < 0) { memalloc_nofs_restore(nofs_flag); spin_lock(&sctx->stat_lock); if (ret == -ENOMEM) sctx->stat.malloc_errors++; sctx->stat.read_errors++; sctx->stat.uncorrectable_errors++; spin_unlock(&sctx->stat_lock); return ret; } /* * read all mirrors one after the other. This includes to * re-read the extent or metadata block that failed (that was * the cause that this fixup code is called) another time, * page by page this time in order to know which pages * caused I/O errors and which ones are good (for all mirrors). * It is the goal to handle the situation when more than one * mirror contains I/O errors, but the errors do not * overlap, i.e. the data can be repaired by selecting the * pages from those mirrors without I/O error on the * particular pages. One example (with blocks >= 2 * PAGE_SIZE) * would be that mirror #1 has an I/O error on the first page, * the second page is good, and mirror #2 has an I/O error on * the second page, but the first page is good. * Then the first page of the first mirror can be repaired by * taking the first page of the second mirror, and the * second page of the second mirror can be repaired by * copying the contents of the 2nd page of the 1st mirror. * One more note: if the pages of one mirror contain I/O * errors, the checksum cannot be verified. In order to get * the best data for repairing, the first attempt is to find * a mirror without I/O errors and with a validated checksum. * Only if this is not possible, the pages are picked from * mirrors with I/O errors without considering the checksum. * If the latter is the case, at the end, the checksum of the * repaired area is verified in order to correctly maintain * the statistics. */ sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS, sizeof(*sblocks_for_recheck), GFP_KERNEL); if (!sblocks_for_recheck) { spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; sctx->stat.read_errors++; sctx->stat.uncorrectable_errors++; spin_unlock(&sctx->stat_lock); btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); goto out; } /* setup the context, map the logical blocks and alloc the pages */ ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck); if (ret) { spin_lock(&sctx->stat_lock); sctx->stat.read_errors++; sctx->stat.uncorrectable_errors++; spin_unlock(&sctx->stat_lock); btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); goto out; } BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS); sblock_bad = sblocks_for_recheck + failed_mirror_index; /* build and submit the bios for the failed mirror, check checksums */ scrub_recheck_block(fs_info, sblock_bad, 1); if (!sblock_bad->header_error && !sblock_bad->checksum_error && sblock_bad->no_io_error_seen) { /* * the error disappeared after reading page by page, or * the area was part of a huge bio and other parts of the * bio caused I/O errors, or the block layer merged several * read requests into one and the error is caused by a * different bio (usually one of the two latter cases is * the cause) */ spin_lock(&sctx->stat_lock); sctx->stat.unverified_errors++; sblock_to_check->data_corrected = 1; spin_unlock(&sctx->stat_lock); if (sctx->is_dev_replace) scrub_write_block_to_dev_replace(sblock_bad); goto out; } if (!sblock_bad->no_io_error_seen) { spin_lock(&sctx->stat_lock); sctx->stat.read_errors++; spin_unlock(&sctx->stat_lock); if (__ratelimit(&_rs)) scrub_print_warning("i/o error", sblock_to_check); btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); } else if (sblock_bad->checksum_error) { spin_lock(&sctx->stat_lock); sctx->stat.csum_errors++; spin_unlock(&sctx->stat_lock); if (__ratelimit(&_rs)) scrub_print_warning("checksum error", sblock_to_check); btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS); } else if (sblock_bad->header_error) { spin_lock(&sctx->stat_lock); sctx->stat.verify_errors++; spin_unlock(&sctx->stat_lock); if (__ratelimit(&_rs)) scrub_print_warning("checksum/header error", sblock_to_check); if (sblock_bad->generation_error) btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_GENERATION_ERRS); else btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS); } if (sctx->readonly) { ASSERT(!sctx->is_dev_replace); goto out; } /* * now build and submit the bios for the other mirrors, check * checksums. * First try to pick the mirror which is completely without I/O * errors and also does not have a checksum error. * If one is found, and if a checksum is present, the full block * that is known to contain an error is rewritten. Afterwards * the block is known to be corrected. * If a mirror is found which is completely correct, and no * checksum is present, only those pages are rewritten that had * an I/O error in the block to be repaired, since it cannot be * determined, which copy of the other pages is better (and it * could happen otherwise that a correct page would be * overwritten by a bad one). */ for (mirror_index = 0; ;mirror_index++) { struct scrub_block *sblock_other; if (mirror_index == failed_mirror_index) continue; /* raid56's mirror can be more than BTRFS_MAX_MIRRORS */ if (!scrub_is_page_on_raid56(sblock_bad->pagev[0])) { if (mirror_index >= BTRFS_MAX_MIRRORS) break; if (!sblocks_for_recheck[mirror_index].page_count) break; sblock_other = sblocks_for_recheck + mirror_index; } else { struct scrub_recover *r = sblock_bad->pagev[0]->recover; int max_allowed = r->bbio->num_stripes - r->bbio->num_tgtdevs; if (mirror_index >= max_allowed) break; if (!sblocks_for_recheck[1].page_count) break; ASSERT(failed_mirror_index == 0); sblock_other = sblocks_for_recheck + 1; sblock_other->pagev[0]->mirror_num = 1 + mirror_index; } /* build and submit the bios, check checksums */ scrub_recheck_block(fs_info, sblock_other, 0); if (!sblock_other->header_error && !sblock_other->checksum_error && sblock_other->no_io_error_seen) { if (sctx->is_dev_replace) { scrub_write_block_to_dev_replace(sblock_other); goto corrected_error; } else { ret = scrub_repair_block_from_good_copy( sblock_bad, sblock_other); if (!ret) goto corrected_error; } } } if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace) goto did_not_correct_error; /* * In case of I/O errors in the area that is supposed to be * repaired, continue by picking good copies of those pages. * Select the good pages from mirrors to rewrite bad pages from * the area to fix. Afterwards verify the checksum of the block * that is supposed to be repaired. This verification step is * only done for the purpose of statistic counting and for the * final scrub report, whether errors remain. * A perfect algorithm could make use of the checksum and try * all possible combinations of pages from the different mirrors * until the checksum verification succeeds. For example, when * the 2nd page of mirror #1 faces I/O errors, and the 2nd page * of mirror #2 is readable but the final checksum test fails, * then the 2nd page of mirror #3 could be tried, whether now * the final checksum succeeds. But this would be a rare * exception and is therefore not implemented. At least it is * avoided that the good copy is overwritten. * A more useful improvement would be to pick the sectors * without I/O error based on sector sizes (512 bytes on legacy * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one * mirror could be repaired by taking 512 byte of a different * mirror, even if other 512 byte sectors in the same PAGE_SIZE * area are unreadable. */ success = 1; for (page_num = 0; page_num < sblock_bad->page_count; page_num++) { struct scrub_page *page_bad = sblock_bad->pagev[page_num]; struct scrub_block *sblock_other = NULL; /* skip no-io-error page in scrub */ if (!page_bad->io_error && !sctx->is_dev_replace) continue; if (scrub_is_page_on_raid56(sblock_bad->pagev[0])) { /* * In case of dev replace, if raid56 rebuild process * didn't work out correct data, then copy the content * in sblock_bad to make sure target device is identical * to source device, instead of writing garbage data in * sblock_for_recheck array to target device. */ sblock_other = NULL; } else if (page_bad->io_error) { /* try to find no-io-error page in mirrors */ for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS && sblocks_for_recheck[mirror_index].page_count > 0; mirror_index++) { if (!sblocks_for_recheck[mirror_index]. pagev[page_num]->io_error) { sblock_other = sblocks_for_recheck + mirror_index; break; } } if (!sblock_other) success = 0; } if (sctx->is_dev_replace) { /* * did not find a mirror to fetch the page * from. scrub_write_page_to_dev_replace() * handles this case (page->io_error), by * filling the block with zeros before * submitting the write request */ if (!sblock_other) sblock_other = sblock_bad; if (scrub_write_page_to_dev_replace(sblock_other, page_num) != 0) { atomic64_inc( &fs_info->dev_replace.num_write_errors); success = 0; } } else if (sblock_other) { ret = scrub_repair_page_from_good_copy(sblock_bad, sblock_other, page_num, 0); if (0 == ret) page_bad->io_error = 0; else success = 0; } } if (success && !sctx->is_dev_replace) { if (is_metadata || have_csum) { /* * need to verify the checksum now that all * sectors on disk are repaired (the write * request for data to be repaired is on its way). * Just be lazy and use scrub_recheck_block() * which re-reads the data before the checksum * is verified, but most likely the data comes out * of the page cache. */ scrub_recheck_block(fs_info, sblock_bad, 1); if (!sblock_bad->header_error && !sblock_bad->checksum_error && sblock_bad->no_io_error_seen) goto corrected_error; else goto did_not_correct_error; } else { corrected_error: spin_lock(&sctx->stat_lock); sctx->stat.corrected_errors++; sblock_to_check->data_corrected = 1; spin_unlock(&sctx->stat_lock); btrfs_err_rl_in_rcu(fs_info, "fixed up error at logical %llu on dev %s", logical, rcu_str_deref(dev->name)); } } else { did_not_correct_error: spin_lock(&sctx->stat_lock); sctx->stat.uncorrectable_errors++; spin_unlock(&sctx->stat_lock); btrfs_err_rl_in_rcu(fs_info, "unable to fixup (regular) error at logical %llu on dev %s", logical, rcu_str_deref(dev->name)); } out: if (sblocks_for_recheck) { for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS; mirror_index++) { struct scrub_block *sblock = sblocks_for_recheck + mirror_index; struct scrub_recover *recover; int page_index; for (page_index = 0; page_index < sblock->page_count; page_index++) { sblock->pagev[page_index]->sblock = NULL; recover = sblock->pagev[page_index]->recover; if (recover) { scrub_put_recover(fs_info, recover); sblock->pagev[page_index]->recover = NULL; } scrub_page_put(sblock->pagev[page_index]); } } kfree(sblocks_for_recheck); } ret = unlock_full_stripe(fs_info, logical, full_stripe_locked); memalloc_nofs_restore(nofs_flag); if (ret < 0) return ret; return 0; } static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio) { if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5) return 2; else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) return 3; else return (int)bbio->num_stripes; } static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type, u64 *raid_map, u64 mapped_length, int nstripes, int mirror, int *stripe_index, u64 *stripe_offset) { int i; if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) { /* RAID5/6 */ for (i = 0; i < nstripes; i++) { if (raid_map[i] == RAID6_Q_STRIPE || raid_map[i] == RAID5_P_STRIPE) continue; if (logical >= raid_map[i] && logical < raid_map[i] + mapped_length) break; } *stripe_index = i; *stripe_offset = logical - raid_map[i]; } else { /* The other RAID type */ *stripe_index = mirror; *stripe_offset = 0; } } static int scrub_setup_recheck_block(struct scrub_block *original_sblock, struct scrub_block *sblocks_for_recheck) { struct scrub_ctx *sctx = original_sblock->sctx; struct btrfs_fs_info *fs_info = sctx->fs_info; u64 length = original_sblock->page_count * PAGE_SIZE; u64 logical = original_sblock->pagev[0]->logical; u64 generation = original_sblock->pagev[0]->generation; u64 flags = original_sblock->pagev[0]->flags; u64 have_csum = original_sblock->pagev[0]->have_csum; struct scrub_recover *recover; struct btrfs_bio *bbio; u64 sublen; u64 mapped_length; u64 stripe_offset; int stripe_index; int page_index = 0; int mirror_index; int nmirrors; int ret; /* * note: the two members refs and outstanding_pages * are not used (and not set) in the blocks that are used for * the recheck procedure */ while (length > 0) { sublen = min_t(u64, length, PAGE_SIZE); mapped_length = sublen; bbio = NULL; /* * with a length of PAGE_SIZE, each returned stripe * represents one mirror */ btrfs_bio_counter_inc_blocked(fs_info); ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical, &mapped_length, &bbio); if (ret || !bbio || mapped_length < sublen) { btrfs_put_bbio(bbio); btrfs_bio_counter_dec(fs_info); return -EIO; } recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS); if (!recover) { btrfs_put_bbio(bbio); btrfs_bio_counter_dec(fs_info); return -ENOMEM; } refcount_set(&recover->refs, 1); recover->bbio = bbio; recover->map_length = mapped_length; BUG_ON(page_index >= SCRUB_MAX_PAGES_PER_BLOCK); nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS); for (mirror_index = 0; mirror_index < nmirrors; mirror_index++) { struct scrub_block *sblock; struct scrub_page *page; sblock = sblocks_for_recheck + mirror_index; sblock->sctx = sctx; page = kzalloc(sizeof(*page), GFP_NOFS); if (!page) { leave_nomem: spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; spin_unlock(&sctx->stat_lock); scrub_put_recover(fs_info, recover); return -ENOMEM; } scrub_page_get(page); sblock->pagev[page_index] = page; page->sblock = sblock; page->flags = flags; page->generation = generation; page->logical = logical; page->have_csum = have_csum; if (have_csum) memcpy(page->csum, original_sblock->pagev[0]->csum, sctx->csum_size); scrub_stripe_index_and_offset(logical, bbio->map_type, bbio->raid_map, mapped_length, bbio->num_stripes - bbio->num_tgtdevs, mirror_index, &stripe_index, &stripe_offset); page->physical = bbio->stripes[stripe_index].physical + stripe_offset; page->dev = bbio->stripes[stripe_index].dev; BUG_ON(page_index >= original_sblock->page_count); page->physical_for_dev_replace = original_sblock->pagev[page_index]-> physical_for_dev_replace; /* for missing devices, dev->bdev is NULL */ page->mirror_num = mirror_index + 1; sblock->page_count++; page->page = alloc_page(GFP_NOFS); if (!page->page) goto leave_nomem; scrub_get_recover(recover); page->recover = recover; } scrub_put_recover(fs_info, recover); length -= sublen; logical += sublen; page_index++; } return 0; } static void scrub_bio_wait_endio(struct bio *bio) { complete(bio->bi_private); } static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info, struct bio *bio, struct scrub_page *page) { DECLARE_COMPLETION_ONSTACK(done); int ret; int mirror_num; bio->bi_iter.bi_sector = page->logical >> 9; bio->bi_private = &done; bio->bi_end_io = scrub_bio_wait_endio; mirror_num = page->sblock->pagev[0]->mirror_num; ret = raid56_parity_recover(fs_info, bio, page->recover->bbio, page->recover->map_length, mirror_num, 0); if (ret) return ret; wait_for_completion_io(&done); return blk_status_to_errno(bio->bi_status); } static void scrub_recheck_block_on_raid56(struct btrfs_fs_info *fs_info, struct scrub_block *sblock) { struct scrub_page *first_page = sblock->pagev[0]; struct bio *bio; int page_num; /* All pages in sblock belong to the same stripe on the same device. */ ASSERT(first_page->dev); if (!first_page->dev->bdev) goto out; bio = btrfs_io_bio_alloc(BIO_MAX_PAGES); bio_set_dev(bio, first_page->dev->bdev); for (page_num = 0; page_num < sblock->page_count; page_num++) { struct scrub_page *page = sblock->pagev[page_num]; WARN_ON(!page->page); bio_add_page(bio, page->page, PAGE_SIZE, 0); } if (scrub_submit_raid56_bio_wait(fs_info, bio, first_page)) { bio_put(bio); goto out; } bio_put(bio); scrub_recheck_block_checksum(sblock); return; out: for (page_num = 0; page_num < sblock->page_count; page_num++) sblock->pagev[page_num]->io_error = 1; sblock->no_io_error_seen = 0; } /* * this function will check the on disk data for checksum errors, header * errors and read I/O errors. If any I/O errors happen, the exact pages * which are errored are marked as being bad. The goal is to enable scrub * to take those pages that are not errored from all the mirrors so that * the pages that are errored in the just handled mirror can be repaired. */ static void scrub_recheck_block(struct btrfs_fs_info *fs_info, struct scrub_block *sblock, int retry_failed_mirror) { int page_num; sblock->no_io_error_seen = 1; /* short cut for raid56 */ if (!retry_failed_mirror && scrub_is_page_on_raid56(sblock->pagev[0])) return scrub_recheck_block_on_raid56(fs_info, sblock); for (page_num = 0; page_num < sblock->page_count; page_num++) { struct bio *bio; struct scrub_page *page = sblock->pagev[page_num]; if (page->dev->bdev == NULL) { page->io_error = 1; sblock->no_io_error_seen = 0; continue; } WARN_ON(!page->page); bio = btrfs_io_bio_alloc(1); bio_set_dev(bio, page->dev->bdev); bio_add_page(bio, page->page, PAGE_SIZE, 0); bio->bi_iter.bi_sector = page->physical >> 9; bio->bi_opf = REQ_OP_READ; if (btrfsic_submit_bio_wait(bio)) { page->io_error = 1; sblock->no_io_error_seen = 0; } bio_put(bio); } if (sblock->no_io_error_seen) scrub_recheck_block_checksum(sblock); } static inline int scrub_check_fsid(u8 fsid[], struct scrub_page *spage) { struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices; int ret; ret = memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE); return !ret; } static void scrub_recheck_block_checksum(struct scrub_block *sblock) { sblock->header_error = 0; sblock->checksum_error = 0; sblock->generation_error = 0; if (sblock->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA) scrub_checksum_data(sblock); else scrub_checksum_tree_block(sblock); } static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, struct scrub_block *sblock_good) { int page_num; int ret = 0; for (page_num = 0; page_num < sblock_bad->page_count; page_num++) { int ret_sub; ret_sub = scrub_repair_page_from_good_copy(sblock_bad, sblock_good, page_num, 1); if (ret_sub) ret = ret_sub; } return ret; } static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad, struct scrub_block *sblock_good, int page_num, int force_write) { struct scrub_page *page_bad = sblock_bad->pagev[page_num]; struct scrub_page *page_good = sblock_good->pagev[page_num]; struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info; BUG_ON(page_bad->page == NULL); BUG_ON(page_good->page == NULL); if (force_write || sblock_bad->header_error || sblock_bad->checksum_error || page_bad->io_error) { struct bio *bio; int ret; if (!page_bad->dev->bdev) { btrfs_warn_rl(fs_info, "scrub_repair_page_from_good_copy(bdev == NULL) is unexpected"); return -EIO; } bio = btrfs_io_bio_alloc(1); bio_set_dev(bio, page_bad->dev->bdev); bio->bi_iter.bi_sector = page_bad->physical >> 9; bio->bi_opf = REQ_OP_WRITE; ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0); if (PAGE_SIZE != ret) { bio_put(bio); return -EIO; } if (btrfsic_submit_bio_wait(bio)) { btrfs_dev_stat_inc_and_print(page_bad->dev, BTRFS_DEV_STAT_WRITE_ERRS); atomic64_inc(&fs_info->dev_replace.num_write_errors); bio_put(bio); return -EIO; } bio_put(bio); } return 0; } static void scrub_write_block_to_dev_replace(struct scrub_block *sblock) { struct btrfs_fs_info *fs_info = sblock->sctx->fs_info; int page_num; /* * This block is used for the check of the parity on the source device, * so the data needn't be written into the destination device. */ if (sblock->sparity) return; for (page_num = 0; page_num < sblock->page_count; page_num++) { int ret; ret = scrub_write_page_to_dev_replace(sblock, page_num); if (ret) atomic64_inc(&fs_info->dev_replace.num_write_errors); } } static int scrub_write_page_to_dev_replace(struct scrub_block *sblock, int page_num) { struct scrub_page *spage = sblock->pagev[page_num]; BUG_ON(spage->page == NULL); if (spage->io_error) { void *mapped_buffer = kmap_atomic(spage->page); clear_page(mapped_buffer); flush_dcache_page(spage->page); kunmap_atomic(mapped_buffer); } return scrub_add_page_to_wr_bio(sblock->sctx, spage); } static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx, struct scrub_page *spage) { struct scrub_bio *sbio; int ret; mutex_lock(&sctx->wr_lock); again: if (!sctx->wr_curr_bio) { sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio), GFP_KERNEL); if (!sctx->wr_curr_bio) { mutex_unlock(&sctx->wr_lock); return -ENOMEM; } sctx->wr_curr_bio->sctx = sctx; sctx->wr_curr_bio->page_count = 0; } sbio = sctx->wr_curr_bio; if (sbio->page_count == 0) { struct bio *bio; sbio->physical = spage->physical_for_dev_replace; sbio->logical = spage->logical; sbio->dev = sctx->wr_tgtdev; bio = sbio->bio; if (!bio) { bio = btrfs_io_bio_alloc(sctx->pages_per_wr_bio); sbio->bio = bio; } bio->bi_private = sbio; bio->bi_end_io = scrub_wr_bio_end_io; bio_set_dev(bio, sbio->dev->bdev); bio->bi_iter.bi_sector = sbio->physical >> 9; bio->bi_opf = REQ_OP_WRITE; sbio->status = 0; } else if (sbio->physical + sbio->page_count * PAGE_SIZE != spage->physical_for_dev_replace || sbio->logical + sbio->page_count * PAGE_SIZE != spage->logical) { scrub_wr_submit(sctx); goto again; } ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0); if (ret != PAGE_SIZE) { if (sbio->page_count < 1) { bio_put(sbio->bio); sbio->bio = NULL; mutex_unlock(&sctx->wr_lock); return -EIO; } scrub_wr_submit(sctx); goto again; } sbio->pagev[sbio->page_count] = spage; scrub_page_get(spage); sbio->page_count++; if (sbio->page_count == sctx->pages_per_wr_bio) scrub_wr_submit(sctx); mutex_unlock(&sctx->wr_lock); return 0; } static void scrub_wr_submit(struct scrub_ctx *sctx) { struct scrub_bio *sbio; if (!sctx->wr_curr_bio) return; sbio = sctx->wr_curr_bio; sctx->wr_curr_bio = NULL; WARN_ON(!sbio->bio->bi_disk); scrub_pending_bio_inc(sctx); /* process all writes in a single worker thread. Then the block layer * orders the requests before sending them to the driver which * doubled the write performance on spinning disks when measured * with Linux 3.5 */ btrfsic_submit_bio(sbio->bio); } static void scrub_wr_bio_end_io(struct bio *bio) { struct scrub_bio *sbio = bio->bi_private; struct btrfs_fs_info *fs_info = sbio->dev->fs_info; sbio->status = bio->bi_status; sbio->bio = bio; btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper, scrub_wr_bio_end_io_worker, NULL, NULL); btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work); } static void scrub_wr_bio_end_io_worker(struct btrfs_work *work) { struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); struct scrub_ctx *sctx = sbio->sctx; int i; WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO); if (sbio->status) { struct btrfs_dev_replace *dev_replace = &sbio->sctx->fs_info->dev_replace; for (i = 0; i < sbio->page_count; i++) { struct scrub_page *spage = sbio->pagev[i]; spage->io_error = 1; atomic64_inc(&dev_replace->num_write_errors); } } for (i = 0; i < sbio->page_count; i++) scrub_page_put(sbio->pagev[i]); bio_put(sbio->bio); kfree(sbio); scrub_pending_bio_dec(sctx); } static int scrub_checksum(struct scrub_block *sblock) { u64 flags; int ret; /* * No need to initialize these stats currently, * because this function only use return value * instead of these stats value. * * Todo: * always use stats */ sblock->header_error = 0; sblock->generation_error = 0; sblock->checksum_error = 0; WARN_ON(sblock->page_count < 1); flags = sblock->pagev[0]->flags; ret = 0; if (flags & BTRFS_EXTENT_FLAG_DATA) ret = scrub_checksum_data(sblock); else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) ret = scrub_checksum_tree_block(sblock); else if (flags & BTRFS_EXTENT_FLAG_SUPER) (void)scrub_checksum_super(sblock); else WARN_ON(1); if (ret) scrub_handle_errored_block(sblock); return ret; } static int scrub_checksum_data(struct scrub_block *sblock) { struct scrub_ctx *sctx = sblock->sctx; u8 csum[BTRFS_CSUM_SIZE]; u8 *on_disk_csum; struct page *page; void *buffer; u32 crc = ~(u32)0; u64 len; int index; BUG_ON(sblock->page_count < 1); if (!sblock->pagev[0]->have_csum) return 0; on_disk_csum = sblock->pagev[0]->csum; page = sblock->pagev[0]->page; buffer = kmap_atomic(page); len = sctx->fs_info->sectorsize; index = 0; for (;;) { u64 l = min_t(u64, len, PAGE_SIZE); crc = btrfs_csum_data(buffer, crc, l); kunmap_atomic(buffer); len -= l; if (len == 0) break; index++; BUG_ON(index >= sblock->page_count); BUG_ON(!sblock->pagev[index]->page); page = sblock->pagev[index]->page; buffer = kmap_atomic(page); } btrfs_csum_final(crc, csum); if (memcmp(csum, on_disk_csum, sctx->csum_size)) sblock->checksum_error = 1; return sblock->checksum_error; } static int scrub_checksum_tree_block(struct scrub_block *sblock) { struct scrub_ctx *sctx = sblock->sctx; struct btrfs_header *h; struct btrfs_fs_info *fs_info = sctx->fs_info; u8 calculated_csum[BTRFS_CSUM_SIZE]; u8 on_disk_csum[BTRFS_CSUM_SIZE]; struct page *page; void *mapped_buffer; u64 mapped_size; void *p; u32 crc = ~(u32)0; u64 len; int index; BUG_ON(sblock->page_count < 1); page = sblock->pagev[0]->page; mapped_buffer = kmap_atomic(page); h = (struct btrfs_header *)mapped_buffer; memcpy(on_disk_csum, h->csum, sctx->csum_size); /* * we don't use the getter functions here, as we * a) don't have an extent buffer and * b) the page is already kmapped */ if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h)) sblock->header_error = 1; if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h)) { sblock->header_error = 1; sblock->generation_error = 1; } if (!scrub_check_fsid(h->fsid, sblock->pagev[0])) sblock->header_error = 1; if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid, BTRFS_UUID_SIZE)) sblock->header_error = 1; len = sctx->fs_info->nodesize - BTRFS_CSUM_SIZE; mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE; p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE; index = 0; for (;;) { u64 l = min_t(u64, len, mapped_size); crc = btrfs_csum_data(p, crc, l); kunmap_atomic(mapped_buffer); len -= l; if (len == 0) break; index++; BUG_ON(index >= sblock->page_count); BUG_ON(!sblock->pagev[index]->page); page = sblock->pagev[index]->page; mapped_buffer = kmap_atomic(page); mapped_size = PAGE_SIZE; p = mapped_buffer; } btrfs_csum_final(crc, calculated_csum); if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size)) sblock->checksum_error = 1; return sblock->header_error || sblock->checksum_error; } static int scrub_checksum_super(struct scrub_block *sblock) { struct btrfs_super_block *s; struct scrub_ctx *sctx = sblock->sctx; u8 calculated_csum[BTRFS_CSUM_SIZE]; u8 on_disk_csum[BTRFS_CSUM_SIZE]; struct page *page; void *mapped_buffer; u64 mapped_size; void *p; u32 crc = ~(u32)0; int fail_gen = 0; int fail_cor = 0; u64 len; int index; BUG_ON(sblock->page_count < 1); page = sblock->pagev[0]->page; mapped_buffer = kmap_atomic(page); s = (struct btrfs_super_block *)mapped_buffer; memcpy(on_disk_csum, s->csum, sctx->csum_size); if (sblock->pagev[0]->logical != btrfs_super_bytenr(s)) ++fail_cor; if (sblock->pagev[0]->generation != btrfs_super_generation(s)) ++fail_gen; if (!scrub_check_fsid(s->fsid, sblock->pagev[0])) ++fail_cor; len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE; mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE; p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE; index = 0; for (;;) { u64 l = min_t(u64, len, mapped_size); crc = btrfs_csum_data(p, crc, l); kunmap_atomic(mapped_buffer); len -= l; if (len == 0) break; index++; BUG_ON(index >= sblock->page_count); BUG_ON(!sblock->pagev[index]->page); page = sblock->pagev[index]->page; mapped_buffer = kmap_atomic(page); mapped_size = PAGE_SIZE; p = mapped_buffer; } btrfs_csum_final(crc, calculated_csum); if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size)) ++fail_cor; if (fail_cor + fail_gen) { /* * if we find an error in a super block, we just report it. * They will get written with the next transaction commit * anyway */ spin_lock(&sctx->stat_lock); ++sctx->stat.super_errors; spin_unlock(&sctx->stat_lock); if (fail_cor) btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev, BTRFS_DEV_STAT_CORRUPTION_ERRS); else btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev, BTRFS_DEV_STAT_GENERATION_ERRS); } return fail_cor + fail_gen; } static void scrub_block_get(struct scrub_block *sblock) { refcount_inc(&sblock->refs); } static void scrub_block_put(struct scrub_block *sblock) { if (refcount_dec_and_test(&sblock->refs)) { int i; if (sblock->sparity) scrub_parity_put(sblock->sparity); for (i = 0; i < sblock->page_count; i++) scrub_page_put(sblock->pagev[i]); kfree(sblock); } } static void scrub_page_get(struct scrub_page *spage) { atomic_inc(&spage->refs); } static void scrub_page_put(struct scrub_page *spage) { if (atomic_dec_and_test(&spage->refs)) { if (spage->page) __free_page(spage->page); kfree(spage); } } static void scrub_submit(struct scrub_ctx *sctx) { struct scrub_bio *sbio; if (sctx->curr == -1) return; sbio = sctx->bios[sctx->curr]; sctx->curr = -1; scrub_pending_bio_inc(sctx); btrfsic_submit_bio(sbio->bio); } static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx, struct scrub_page *spage) { struct scrub_block *sblock = spage->sblock; struct scrub_bio *sbio; int ret; again: /* * grab a fresh bio or wait for one to become available */ while (sctx->curr == -1) { spin_lock(&sctx->list_lock); sctx->curr = sctx->first_free; if (sctx->curr != -1) { sctx->first_free = sctx->bios[sctx->curr]->next_free; sctx->bios[sctx->curr]->next_free = -1; sctx->bios[sctx->curr]->page_count = 0; spin_unlock(&sctx->list_lock); } else { spin_unlock(&sctx->list_lock); wait_event(sctx->list_wait, sctx->first_free != -1); } } sbio = sctx->bios[sctx->curr]; if (sbio->page_count == 0) { struct bio *bio; sbio->physical = spage->physical; sbio->logical = spage->logical; sbio->dev = spage->dev; bio = sbio->bio; if (!bio) { bio = btrfs_io_bio_alloc(sctx->pages_per_rd_bio); sbio->bio = bio; } bio->bi_private = sbio; bio->bi_end_io = scrub_bio_end_io; bio_set_dev(bio, sbio->dev->bdev); bio->bi_iter.bi_sector = sbio->physical >> 9; bio->bi_opf = REQ_OP_READ; sbio->status = 0; } else if (sbio->physical + sbio->page_count * PAGE_SIZE != spage->physical || sbio->logical + sbio->page_count * PAGE_SIZE != spage->logical || sbio->dev != spage->dev) { scrub_submit(sctx); goto again; } sbio->pagev[sbio->page_count] = spage; ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0); if (ret != PAGE_SIZE) { if (sbio->page_count < 1) { bio_put(sbio->bio); sbio->bio = NULL; return -EIO; } scrub_submit(sctx); goto again; } scrub_block_get(sblock); /* one for the page added to the bio */ atomic_inc(&sblock->outstanding_pages); sbio->page_count++; if (sbio->page_count == sctx->pages_per_rd_bio) scrub_submit(sctx); return 0; } static void scrub_missing_raid56_end_io(struct bio *bio) { struct scrub_block *sblock = bio->bi_private; struct btrfs_fs_info *fs_info = sblock->sctx->fs_info; if (bio->bi_status) sblock->no_io_error_seen = 0; bio_put(bio); btrfs_queue_work(fs_info->scrub_workers, &sblock->work); } static void scrub_missing_raid56_worker(struct btrfs_work *work) { struct scrub_block *sblock = container_of(work, struct scrub_block, work); struct scrub_ctx *sctx = sblock->sctx; struct btrfs_fs_info *fs_info = sctx->fs_info; u64 logical; struct btrfs_device *dev; logical = sblock->pagev[0]->logical; dev = sblock->pagev[0]->dev; if (sblock->no_io_error_seen) scrub_recheck_block_checksum(sblock); if (!sblock->no_io_error_seen) { spin_lock(&sctx->stat_lock); sctx->stat.read_errors++; spin_unlock(&sctx->stat_lock); btrfs_err_rl_in_rcu(fs_info, "IO error rebuilding logical %llu for dev %s", logical, rcu_str_deref(dev->name)); } else if (sblock->header_error || sblock->checksum_error) { spin_lock(&sctx->stat_lock); sctx->stat.uncorrectable_errors++; spin_unlock(&sctx->stat_lock); btrfs_err_rl_in_rcu(fs_info, "failed to rebuild valid logical %llu for dev %s", logical, rcu_str_deref(dev->name)); } else { scrub_write_block_to_dev_replace(sblock); } scrub_block_put(sblock); if (sctx->is_dev_replace && sctx->flush_all_writes) { mutex_lock(&sctx->wr_lock); scrub_wr_submit(sctx); mutex_unlock(&sctx->wr_lock); } scrub_pending_bio_dec(sctx); } static void scrub_missing_raid56_pages(struct scrub_block *sblock) { struct scrub_ctx *sctx = sblock->sctx; struct btrfs_fs_info *fs_info = sctx->fs_info; u64 length = sblock->page_count * PAGE_SIZE; u64 logical = sblock->pagev[0]->logical; struct btrfs_bio *bbio = NULL; struct bio *bio; struct btrfs_raid_bio *rbio; int ret; int i; btrfs_bio_counter_inc_blocked(fs_info); ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical, &length, &bbio); if (ret || !bbio || !bbio->raid_map) goto bbio_out; if (WARN_ON(!sctx->is_dev_replace || !(bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) { /* * We shouldn't be scrubbing a missing device. Even for dev * replace, we should only get here for RAID 5/6. We either * managed to mount something with no mirrors remaining or * there's a bug in scrub_remap_extent()/btrfs_map_block(). */ goto bbio_out; } bio = btrfs_io_bio_alloc(0); bio->bi_iter.bi_sector = logical >> 9; bio->bi_private = sblock; bio->bi_end_io = scrub_missing_raid56_end_io; rbio = raid56_alloc_missing_rbio(fs_info, bio, bbio, length); if (!rbio) goto rbio_out; for (i = 0; i < sblock->page_count; i++) { struct scrub_page *spage = sblock->pagev[i]; raid56_add_scrub_pages(rbio, spage->page, spage->logical); } btrfs_init_work(&sblock->work, btrfs_scrub_helper, scrub_missing_raid56_worker, NULL, NULL); scrub_block_get(sblock); scrub_pending_bio_inc(sctx); raid56_submit_missing_rbio(rbio); return; rbio_out: bio_put(bio); bbio_out: btrfs_bio_counter_dec(fs_info); btrfs_put_bbio(bbio); spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; spin_unlock(&sctx->stat_lock); } static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len, u64 physical, struct btrfs_device *dev, u64 flags, u64 gen, int mirror_num, u8 *csum, int force, u64 physical_for_dev_replace) { struct scrub_block *sblock; int index; sblock = kzalloc(sizeof(*sblock), GFP_KERNEL); if (!sblock) { spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; spin_unlock(&sctx->stat_lock); return -ENOMEM; } /* one ref inside this function, plus one for each page added to * a bio later on */ refcount_set(&sblock->refs, 1); sblock->sctx = sctx; sblock->no_io_error_seen = 1; for (index = 0; len > 0; index++) { struct scrub_page *spage; u64 l = min_t(u64, len, PAGE_SIZE); spage = kzalloc(sizeof(*spage), GFP_KERNEL); if (!spage) { leave_nomem: spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; spin_unlock(&sctx->stat_lock); scrub_block_put(sblock); return -ENOMEM; } BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK); scrub_page_get(spage); sblock->pagev[index] = spage; spage->sblock = sblock; spage->dev = dev; spage->flags = flags; spage->generation = gen; spage->logical = logical; spage->physical = physical; spage->physical_for_dev_replace = physical_for_dev_replace; spage->mirror_num = mirror_num; if (csum) { spage->have_csum = 1; memcpy(spage->csum, csum, sctx->csum_size); } else { spage->have_csum = 0; } sblock->page_count++; spage->page = alloc_page(GFP_KERNEL); if (!spage->page) goto leave_nomem; len -= l; logical += l; physical += l; physical_for_dev_replace += l; } WARN_ON(sblock->page_count == 0); if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) { /* * This case should only be hit for RAID 5/6 device replace. See * the comment in scrub_missing_raid56_pages() for details. */ scrub_missing_raid56_pages(sblock); } else { for (index = 0; index < sblock->page_count; index++) { struct scrub_page *spage = sblock->pagev[index]; int ret; ret = scrub_add_page_to_rd_bio(sctx, spage); if (ret) { scrub_block_put(sblock); return ret; } } if (force) scrub_submit(sctx); } /* last one frees, either here or in bio completion for last page */ scrub_block_put(sblock); return 0; } static void scrub_bio_end_io(struct bio *bio) { struct scrub_bio *sbio = bio->bi_private; struct btrfs_fs_info *fs_info = sbio->dev->fs_info; sbio->status = bio->bi_status; sbio->bio = bio; btrfs_queue_work(fs_info->scrub_workers, &sbio->work); } static void scrub_bio_end_io_worker(struct btrfs_work *work) { struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); struct scrub_ctx *sctx = sbio->sctx; int i; BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO); if (sbio->status) { for (i = 0; i < sbio->page_count; i++) { struct scrub_page *spage = sbio->pagev[i]; spage->io_error = 1; spage->sblock->no_io_error_seen = 0; } } /* now complete the scrub_block items that have all pages completed */ for (i = 0; i < sbio->page_count; i++) { struct scrub_page *spage = sbio->pagev[i]; struct scrub_block *sblock = spage->sblock; if (atomic_dec_and_test(&sblock->outstanding_pages)) scrub_block_complete(sblock); scrub_block_put(sblock); } bio_put(sbio->bio); sbio->bio = NULL; spin_lock(&sctx->list_lock); sbio->next_free = sctx->first_free; sctx->first_free = sbio->index; spin_unlock(&sctx->list_lock); if (sctx->is_dev_replace && sctx->flush_all_writes) { mutex_lock(&sctx->wr_lock); scrub_wr_submit(sctx); mutex_unlock(&sctx->wr_lock); } scrub_pending_bio_dec(sctx); } static inline void __scrub_mark_bitmap(struct scrub_parity *sparity, unsigned long *bitmap, u64 start, u64 len) { u64 offset; u64 nsectors64; u32 nsectors; int sectorsize = sparity->sctx->fs_info->sectorsize; if (len >= sparity->stripe_len) { bitmap_set(bitmap, 0, sparity->nsectors); return; } start -= sparity->logic_start; start = div64_u64_rem(start, sparity->stripe_len, &offset); offset = div_u64(offset, sectorsize); nsectors64 = div_u64(len, sectorsize); ASSERT(nsectors64 < UINT_MAX); nsectors = (u32)nsectors64; if (offset + nsectors <= sparity->nsectors) { bitmap_set(bitmap, offset, nsectors); return; } bitmap_set(bitmap, offset, sparity->nsectors - offset); bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset)); } static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity, u64 start, u64 len) { __scrub_mark_bitmap(sparity, sparity->ebitmap, start, len); } static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity, u64 start, u64 len) { __scrub_mark_bitmap(sparity, sparity->dbitmap, start, len); } static void scrub_block_complete(struct scrub_block *sblock) { int corrupted = 0; if (!sblock->no_io_error_seen) { corrupted = 1; scrub_handle_errored_block(sblock); } else { /* * if has checksum error, write via repair mechanism in * dev replace case, otherwise write here in dev replace * case. */ corrupted = scrub_checksum(sblock); if (!corrupted && sblock->sctx->is_dev_replace) scrub_write_block_to_dev_replace(sblock); } if (sblock->sparity && corrupted && !sblock->data_corrected) { u64 start = sblock->pagev[0]->logical; u64 end = sblock->pagev[sblock->page_count - 1]->logical + PAGE_SIZE; scrub_parity_mark_sectors_error(sblock->sparity, start, end - start); } } static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum) { struct btrfs_ordered_sum *sum = NULL; unsigned long index; unsigned long num_sectors; while (!list_empty(&sctx->csum_list)) { sum = list_first_entry(&sctx->csum_list, struct btrfs_ordered_sum, list); if (sum->bytenr > logical) return 0; if (sum->bytenr + sum->len > logical) break; ++sctx->stat.csum_discards; list_del(&sum->list); kfree(sum); sum = NULL; } if (!sum) return 0; index = div_u64(logical - sum->bytenr, sctx->fs_info->sectorsize); ASSERT(index < UINT_MAX); num_sectors = sum->len / sctx->fs_info->sectorsize; memcpy(csum, sum->sums + index, sctx->csum_size); if (index == num_sectors - 1) { list_del(&sum->list); kfree(sum); } return 1; } /* scrub extent tries to collect up to 64 kB for each bio */ static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map, u64 logical, u64 len, u64 physical, struct btrfs_device *dev, u64 flags, u64 gen, int mirror_num, u64 physical_for_dev_replace) { int ret; u8 csum[BTRFS_CSUM_SIZE]; u32 blocksize; if (flags & BTRFS_EXTENT_FLAG_DATA) { if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) blocksize = map->stripe_len; else blocksize = sctx->fs_info->sectorsize; spin_lock(&sctx->stat_lock); sctx->stat.data_extents_scrubbed++; sctx->stat.data_bytes_scrubbed += len; spin_unlock(&sctx->stat_lock); } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) blocksize = map->stripe_len; else blocksize = sctx->fs_info->nodesize; spin_lock(&sctx->stat_lock); sctx->stat.tree_extents_scrubbed++; sctx->stat.tree_bytes_scrubbed += len; spin_unlock(&sctx->stat_lock); } else { blocksize = sctx->fs_info->sectorsize; WARN_ON(1); } while (len) { u64 l = min_t(u64, len, blocksize); int have_csum = 0; if (flags & BTRFS_EXTENT_FLAG_DATA) { /* push csums to sbio */ have_csum = scrub_find_csum(sctx, logical, csum); if (have_csum == 0) ++sctx->stat.no_csum; } ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen, mirror_num, have_csum ? csum : NULL, 0, physical_for_dev_replace); if (ret) return ret; len -= l; logical += l; physical += l; physical_for_dev_replace += l; } return 0; } static int scrub_pages_for_parity(struct scrub_parity *sparity, u64 logical, u64 len, u64 physical, struct btrfs_device *dev, u64 flags, u64 gen, int mirror_num, u8 *csum) { struct scrub_ctx *sctx = sparity->sctx; struct scrub_block *sblock; int index; sblock = kzalloc(sizeof(*sblock), GFP_KERNEL); if (!sblock) { spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; spin_unlock(&sctx->stat_lock); return -ENOMEM; } /* one ref inside this function, plus one for each page added to * a bio later on */ refcount_set(&sblock->refs, 1); sblock->sctx = sctx; sblock->no_io_error_seen = 1; sblock->sparity = sparity; scrub_parity_get(sparity); for (index = 0; len > 0; index++) { struct scrub_page *spage; u64 l = min_t(u64, len, PAGE_SIZE); spage = kzalloc(sizeof(*spage), GFP_KERNEL); if (!spage) { leave_nomem: spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; spin_unlock(&sctx->stat_lock); scrub_block_put(sblock); return -ENOMEM; } BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK); /* For scrub block */ scrub_page_get(spage); sblock->pagev[index] = spage; /* For scrub parity */ scrub_page_get(spage); list_add_tail(&spage->list, &sparity->spages); spage->sblock = sblock; spage->dev = dev; spage->flags = flags; spage->generation = gen; spage->logical = logical; spage->physical = physical; spage->mirror_num = mirror_num; if (csum) { spage->have_csum = 1; memcpy(spage->csum, csum, sctx->csum_size); } else { spage->have_csum = 0; } sblock->page_count++; spage->page = alloc_page(GFP_KERNEL); if (!spage->page) goto leave_nomem; len -= l; logical += l; physical += l; } WARN_ON(sblock->page_count == 0); for (index = 0; index < sblock->page_count; index++) { struct scrub_page *spage = sblock->pagev[index]; int ret; ret = scrub_add_page_to_rd_bio(sctx, spage); if (ret) { scrub_block_put(sblock); return ret; } } /* last one frees, either here or in bio completion for last page */ scrub_block_put(sblock); return 0; } static int scrub_extent_for_parity(struct scrub_parity *sparity, u64 logical, u64 len, u64 physical, struct btrfs_device *dev, u64 flags, u64 gen, int mirror_num) { struct scrub_ctx *sctx = sparity->sctx; int ret; u8 csum[BTRFS_CSUM_SIZE]; u32 blocksize; if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) { scrub_parity_mark_sectors_error(sparity, logical, len); return 0; } if (flags & BTRFS_EXTENT_FLAG_DATA) { blocksize = sparity->stripe_len; } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { blocksize = sparity->stripe_len; } else { blocksize = sctx->fs_info->sectorsize; WARN_ON(1); } while (len) { u64 l = min_t(u64, len, blocksize); int have_csum = 0; if (flags & BTRFS_EXTENT_FLAG_DATA) { /* push csums to sbio */ have_csum = scrub_find_csum(sctx, logical, csum); if (have_csum == 0) goto skip; } ret = scrub_pages_for_parity(sparity, logical, l, physical, dev, flags, gen, mirror_num, have_csum ? csum : NULL); if (ret) return ret; skip: len -= l; logical += l; physical += l; } return 0; } /* * Given a physical address, this will calculate it's * logical offset. if this is a parity stripe, it will return * the most left data stripe's logical offset. * * return 0 if it is a data stripe, 1 means parity stripe. */ static int get_raid56_logic_offset(u64 physical, int num, struct map_lookup *map, u64 *offset, u64 *stripe_start) { int i; int j = 0; u64 stripe_nr; u64 last_offset; u32 stripe_index; u32 rot; last_offset = (physical - map->stripes[num].physical) * nr_data_stripes(map); if (stripe_start) *stripe_start = last_offset; *offset = last_offset; for (i = 0; i < nr_data_stripes(map); i++) { *offset = last_offset + i * map->stripe_len; stripe_nr = div64_u64(*offset, map->stripe_len); stripe_nr = div_u64(stripe_nr, nr_data_stripes(map)); /* Work out the disk rotation on this stripe-set */ stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot); /* calculate which stripe this data locates */ rot += i; stripe_index = rot % map->num_stripes; if (stripe_index == num) return 0; if (stripe_index < num) j++; } *offset = last_offset + j * map->stripe_len; return 1; } static void scrub_free_parity(struct scrub_parity *sparity) { struct scrub_ctx *sctx = sparity->sctx; struct scrub_page *curr, *next; int nbits; nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors); if (nbits) { spin_lock(&sctx->stat_lock); sctx->stat.read_errors += nbits; sctx->stat.uncorrectable_errors += nbits; spin_unlock(&sctx->stat_lock); } list_for_each_entry_safe(curr, next, &sparity->spages, list) { list_del_init(&curr->list); scrub_page_put(curr); } kfree(sparity); } static void scrub_parity_bio_endio_worker(struct btrfs_work *work) { struct scrub_parity *sparity = container_of(work, struct scrub_parity, work); struct scrub_ctx *sctx = sparity->sctx; scrub_free_parity(sparity); scrub_pending_bio_dec(sctx); } static void scrub_parity_bio_endio(struct bio *bio) { struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private; struct btrfs_fs_info *fs_info = sparity->sctx->fs_info; if (bio->bi_status) bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap, sparity->nsectors); bio_put(bio); btrfs_init_work(&sparity->work, btrfs_scrubparity_helper, scrub_parity_bio_endio_worker, NULL, NULL); btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work); } static void scrub_parity_check_and_repair(struct scrub_parity *sparity) { struct scrub_ctx *sctx = sparity->sctx; struct btrfs_fs_info *fs_info = sctx->fs_info; struct bio *bio; struct btrfs_raid_bio *rbio; struct btrfs_bio *bbio = NULL; u64 length; int ret; if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap, sparity->nsectors)) goto out; length = sparity->logic_end - sparity->logic_start; btrfs_bio_counter_inc_blocked(fs_info); ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start, &length, &bbio); if (ret || !bbio || !bbio->raid_map) goto bbio_out; bio = btrfs_io_bio_alloc(0); bio->bi_iter.bi_sector = sparity->logic_start >> 9; bio->bi_private = sparity; bio->bi_end_io = scrub_parity_bio_endio; rbio = raid56_parity_alloc_scrub_rbio(fs_info, bio, bbio, length, sparity->scrub_dev, sparity->dbitmap, sparity->nsectors); if (!rbio) goto rbio_out; scrub_pending_bio_inc(sctx); raid56_parity_submit_scrub_rbio(rbio); return; rbio_out: bio_put(bio); bbio_out: btrfs_bio_counter_dec(fs_info); btrfs_put_bbio(bbio); bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap, sparity->nsectors); spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; spin_unlock(&sctx->stat_lock); out: scrub_free_parity(sparity); } static inline int scrub_calc_parity_bitmap_len(int nsectors) { return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * sizeof(long); } static void scrub_parity_get(struct scrub_parity *sparity) { refcount_inc(&sparity->refs); } static void scrub_parity_put(struct scrub_parity *sparity) { if (!refcount_dec_and_test(&sparity->refs)) return; scrub_parity_check_and_repair(sparity); } static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx, struct map_lookup *map, struct btrfs_device *sdev, struct btrfs_path *path, u64 logic_start, u64 logic_end) { struct btrfs_fs_info *fs_info = sctx->fs_info; struct btrfs_root *root = fs_info->extent_root; struct btrfs_root *csum_root = fs_info->csum_root; struct btrfs_extent_item *extent; struct btrfs_bio *bbio = NULL; u64 flags; int ret; int slot; struct extent_buffer *l; struct btrfs_key key; u64 generation; u64 extent_logical; u64 extent_physical; u64 extent_len; u64 mapped_length; struct btrfs_device *extent_dev; struct scrub_parity *sparity; int nsectors; int bitmap_len; int extent_mirror_num; int stop_loop = 0; nsectors = div_u64(map->stripe_len, fs_info->sectorsize); bitmap_len = scrub_calc_parity_bitmap_len(nsectors); sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len, GFP_NOFS); if (!sparity) { spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; spin_unlock(&sctx->stat_lock); return -ENOMEM; } sparity->stripe_len = map->stripe_len; sparity->nsectors = nsectors; sparity->sctx = sctx; sparity->scrub_dev = sdev; sparity->logic_start = logic_start; sparity->logic_end = logic_end; refcount_set(&sparity->refs, 1); INIT_LIST_HEAD(&sparity->spages); sparity->dbitmap = sparity->bitmap; sparity->ebitmap = (void *)sparity->bitmap + bitmap_len; ret = 0; while (logic_start < logic_end) { if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) key.type = BTRFS_METADATA_ITEM_KEY; else key.type = BTRFS_EXTENT_ITEM_KEY; key.objectid = logic_start; key.offset = (u64)-1; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; if (ret > 0) { ret = btrfs_previous_extent_item(root, path, 0); if (ret < 0) goto out; if (ret > 0) { btrfs_release_path(path); ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; } } stop_loop = 0; while (1) { u64 bytes; l = path->nodes[0]; slot = path->slots[0]; if (slot >= btrfs_header_nritems(l)) { ret = btrfs_next_leaf(root, path); if (ret == 0) continue; if (ret < 0) goto out; stop_loop = 1; break; } btrfs_item_key_to_cpu(l, &key, slot); if (key.type != BTRFS_EXTENT_ITEM_KEY && key.type != BTRFS_METADATA_ITEM_KEY) goto next; if (key.type == BTRFS_METADATA_ITEM_KEY) bytes = fs_info->nodesize; else bytes = key.offset; if (key.objectid + bytes <= logic_start) goto next; if (key.objectid >= logic_end) { stop_loop = 1; break; } while (key.objectid >= logic_start + map->stripe_len) logic_start += map->stripe_len; extent = btrfs_item_ptr(l, slot, struct btrfs_extent_item); flags = btrfs_extent_flags(l, extent); generation = btrfs_extent_generation(l, extent); if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) && (key.objectid < logic_start || key.objectid + bytes > logic_start + map->stripe_len)) { btrfs_err(fs_info, "scrub: tree block %llu spanning stripes, ignored. logical=%llu", key.objectid, logic_start); spin_lock(&sctx->stat_lock); sctx->stat.uncorrectable_errors++; spin_unlock(&sctx->stat_lock); goto next; } again: extent_logical = key.objectid; extent_len = bytes; if (extent_logical < logic_start) { extent_len -= logic_start - extent_logical; extent_logical = logic_start; } if (extent_logical + extent_len > logic_start + map->stripe_len) extent_len = logic_start + map->stripe_len - extent_logical; scrub_parity_mark_sectors_data(sparity, extent_logical, extent_len); mapped_length = extent_len; bbio = NULL; ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical, &mapped_length, &bbio, 0); if (!ret) { if (!bbio || mapped_length < extent_len) ret = -EIO; } if (ret) { btrfs_put_bbio(bbio); goto out; } extent_physical = bbio->stripes[0].physical; extent_mirror_num = bbio->mirror_num; extent_dev = bbio->stripes[0].dev; btrfs_put_bbio(bbio); ret = btrfs_lookup_csums_range(csum_root, extent_logical, extent_logical + extent_len - 1, &sctx->csum_list, 1); if (ret) goto out; ret = scrub_extent_for_parity(sparity, extent_logical, extent_len, extent_physical, extent_dev, flags, generation, extent_mirror_num); scrub_free_csums(sctx); if (ret) goto out; if (extent_logical + extent_len < key.objectid + bytes) { logic_start += map->stripe_len; if (logic_start >= logic_end) { stop_loop = 1; break; } if (logic_start < key.objectid + bytes) { cond_resched(); goto again; } } next: path->slots[0]++; } btrfs_release_path(path); if (stop_loop) break; logic_start += map->stripe_len; } out: if (ret < 0) scrub_parity_mark_sectors_error(sparity, logic_start, logic_end - logic_start); scrub_parity_put(sparity); scrub_submit(sctx); mutex_lock(&sctx->wr_lock); scrub_wr_submit(sctx); mutex_unlock(&sctx->wr_lock); btrfs_release_path(path); return ret < 0 ? ret : 0; } static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, struct map_lookup *map, struct btrfs_device *scrub_dev, int num, u64 base, u64 length) { struct btrfs_path *path, *ppath; struct btrfs_fs_info *fs_info = sctx->fs_info; struct btrfs_root *root = fs_info->extent_root; struct btrfs_root *csum_root = fs_info->csum_root; struct btrfs_extent_item *extent; struct blk_plug plug; u64 flags; int ret; int slot; u64 nstripes; struct extent_buffer *l; u64 physical; u64 logical; u64 logic_end; u64 physical_end; u64 generation; int mirror_num; struct reada_control *reada1; struct reada_control *reada2; struct btrfs_key key; struct btrfs_key key_end; u64 increment = map->stripe_len; u64 offset; u64 extent_logical; u64 extent_physical; u64 extent_len; u64 stripe_logical; u64 stripe_end; struct btrfs_device *extent_dev; int extent_mirror_num; int stop_loop = 0; physical = map->stripes[num].physical; offset = 0; nstripes = div64_u64(length, map->stripe_len); if (map->type & BTRFS_BLOCK_GROUP_RAID0) { offset = map->stripe_len * num; increment = map->stripe_len * map->num_stripes; mirror_num = 1; } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { int factor = map->num_stripes / map->sub_stripes; offset = map->stripe_len * (num / map->sub_stripes); increment = map->stripe_len * factor; mirror_num = num % map->sub_stripes + 1; } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) { increment = map->stripe_len; mirror_num = num % map->num_stripes + 1; } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { increment = map->stripe_len; mirror_num = num % map->num_stripes + 1; } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { get_raid56_logic_offset(physical, num, map, &offset, NULL); increment = map->stripe_len * nr_data_stripes(map); mirror_num = 1; } else { increment = map->stripe_len; mirror_num = 1; } path = btrfs_alloc_path(); if (!path) return -ENOMEM; ppath = btrfs_alloc_path(); if (!ppath) { btrfs_free_path(path); return -ENOMEM; } /* * work on commit root. The related disk blocks are static as * long as COW is applied. This means, it is save to rewrite * them to repair disk errors without any race conditions */ path->search_commit_root = 1; path->skip_locking = 1; ppath->search_commit_root = 1; ppath->skip_locking = 1; /* * trigger the readahead for extent tree csum tree and wait for * completion. During readahead, the scrub is officially paused * to not hold off transaction commits */ logical = base + offset; physical_end = physical + nstripes * map->stripe_len; if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { get_raid56_logic_offset(physical_end, num, map, &logic_end, NULL); logic_end += base; } else { logic_end = logical + increment * nstripes; } wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); scrub_blocked_if_needed(fs_info); /* FIXME it might be better to start readahead at commit root */ key.objectid = logical; key.type = BTRFS_EXTENT_ITEM_KEY; key.offset = (u64)0; key_end.objectid = logic_end; key_end.type = BTRFS_METADATA_ITEM_KEY; key_end.offset = (u64)-1; reada1 = btrfs_reada_add(root, &key, &key_end); key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; key.type = BTRFS_EXTENT_CSUM_KEY; key.offset = logical; key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID; key_end.type = BTRFS_EXTENT_CSUM_KEY; key_end.offset = logic_end; reada2 = btrfs_reada_add(csum_root, &key, &key_end); if (!IS_ERR(reada1)) btrfs_reada_wait(reada1); if (!IS_ERR(reada2)) btrfs_reada_wait(reada2); /* * collect all data csums for the stripe to avoid seeking during * the scrub. This might currently (crc32) end up to be about 1MB */ blk_start_plug(&plug); /* * now find all extents for each stripe and scrub them */ ret = 0; while (physical < physical_end) { /* * canceled? */ if (atomic_read(&fs_info->scrub_cancel_req) || atomic_read(&sctx->cancel_req)) { ret = -ECANCELED; goto out; } /* * check to see if we have to pause */ if (atomic_read(&fs_info->scrub_pause_req)) { /* push queued extents */ sctx->flush_all_writes = true; scrub_submit(sctx); mutex_lock(&sctx->wr_lock); scrub_wr_submit(sctx); mutex_unlock(&sctx->wr_lock); wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); sctx->flush_all_writes = false; scrub_blocked_if_needed(fs_info); } if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { ret = get_raid56_logic_offset(physical, num, map, &logical, &stripe_logical); logical += base; if (ret) { /* it is parity strip */ stripe_logical += base; stripe_end = stripe_logical + increment; ret = scrub_raid56_parity(sctx, map, scrub_dev, ppath, stripe_logical, stripe_end); if (ret) goto out; goto skip; } } if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) key.type = BTRFS_METADATA_ITEM_KEY; else key.type = BTRFS_EXTENT_ITEM_KEY; key.objectid = logical; key.offset = (u64)-1; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; if (ret > 0) { ret = btrfs_previous_extent_item(root, path, 0); if (ret < 0) goto out; if (ret > 0) { /* there's no smaller item, so stick with the * larger one */ btrfs_release_path(path); ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; } } stop_loop = 0; while (1) { u64 bytes; l = path->nodes[0]; slot = path->slots[0]; if (slot >= btrfs_header_nritems(l)) { ret = btrfs_next_leaf(root, path); if (ret == 0) continue; if (ret < 0) goto out; stop_loop = 1; break; } btrfs_item_key_to_cpu(l, &key, slot); if (key.type != BTRFS_EXTENT_ITEM_KEY && key.type != BTRFS_METADATA_ITEM_KEY) goto next; if (key.type == BTRFS_METADATA_ITEM_KEY) bytes = fs_info->nodesize; else bytes = key.offset; if (key.objectid + bytes <= logical) goto next; if (key.objectid >= logical + map->stripe_len) { /* out of this device extent */ if (key.objectid >= logic_end) stop_loop = 1; break; } extent = btrfs_item_ptr(l, slot, struct btrfs_extent_item); flags = btrfs_extent_flags(l, extent); generation = btrfs_extent_generation(l, extent); if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) && (key.objectid < logical || key.objectid + bytes > logical + map->stripe_len)) { btrfs_err(fs_info, "scrub: tree block %llu spanning stripes, ignored. logical=%llu", key.objectid, logical); spin_lock(&sctx->stat_lock); sctx->stat.uncorrectable_errors++; spin_unlock(&sctx->stat_lock); goto next; } again: extent_logical = key.objectid; extent_len = bytes; /* * trim extent to this stripe */ if (extent_logical < logical) { extent_len -= logical - extent_logical; extent_logical = logical; } if (extent_logical + extent_len > logical + map->stripe_len) { extent_len = logical + map->stripe_len - extent_logical; } extent_physical = extent_logical - logical + physical; extent_dev = scrub_dev; extent_mirror_num = mirror_num; if (sctx->is_dev_replace) scrub_remap_extent(fs_info, extent_logical, extent_len, &extent_physical, &extent_dev, &extent_mirror_num); ret = btrfs_lookup_csums_range(csum_root, extent_logical, extent_logical + extent_len - 1, &sctx->csum_list, 1); if (ret) goto out; ret = scrub_extent(sctx, map, extent_logical, extent_len, extent_physical, extent_dev, flags, generation, extent_mirror_num, extent_logical - logical + physical); scrub_free_csums(sctx); if (ret) goto out; if (extent_logical + extent_len < key.objectid + bytes) { if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { /* * loop until we find next data stripe * or we have finished all stripes. */ loop: physical += map->stripe_len; ret = get_raid56_logic_offset(physical, num, map, &logical, &stripe_logical); logical += base; if (ret && physical < physical_end) { stripe_logical += base; stripe_end = stripe_logical + increment; ret = scrub_raid56_parity(sctx, map, scrub_dev, ppath, stripe_logical, stripe_end); if (ret) goto out; goto loop; } } else { physical += map->stripe_len; logical += increment; } if (logical < key.objectid + bytes) { cond_resched(); goto again; } if (physical >= physical_end) { stop_loop = 1; break; } } next: path->slots[0]++; } btrfs_release_path(path); skip: logical += increment; physical += map->stripe_len; spin_lock(&sctx->stat_lock); if (stop_loop) sctx->stat.last_physical = map->stripes[num].physical + length; else sctx->stat.last_physical = physical; spin_unlock(&sctx->stat_lock); if (stop_loop) break; } out: /* push queued extents */ scrub_submit(sctx); mutex_lock(&sctx->wr_lock); scrub_wr_submit(sctx); mutex_unlock(&sctx->wr_lock); blk_finish_plug(&plug); btrfs_free_path(path); btrfs_free_path(ppath); return ret < 0 ? ret : 0; } static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, struct btrfs_device *scrub_dev, u64 chunk_offset, u64 length, u64 dev_offset, struct btrfs_block_group_cache *cache) { struct btrfs_fs_info *fs_info = sctx->fs_info; struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; struct map_lookup *map; struct extent_map *em; int i; int ret = 0; read_lock(&map_tree->map_tree.lock); em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1); read_unlock(&map_tree->map_tree.lock); if (!em) { /* * Might have been an unused block group deleted by the cleaner * kthread or relocation. */ spin_lock(&cache->lock); if (!cache->removed) ret = -EINVAL; spin_unlock(&cache->lock); return ret; } map = em->map_lookup; if (em->start != chunk_offset) goto out; if (em->len < length) goto out; for (i = 0; i < map->num_stripes; ++i) { if (map->stripes[i].dev->bdev == scrub_dev->bdev && map->stripes[i].physical == dev_offset) { ret = scrub_stripe(sctx, map, scrub_dev, i, chunk_offset, length); if (ret) goto out; } } out: free_extent_map(em); return ret; } static noinline_for_stack int scrub_enumerate_chunks(struct scrub_ctx *sctx, struct btrfs_device *scrub_dev, u64 start, u64 end) { struct btrfs_dev_extent *dev_extent = NULL; struct btrfs_path *path; struct btrfs_fs_info *fs_info = sctx->fs_info; struct btrfs_root *root = fs_info->dev_root; u64 length; u64 chunk_offset; int ret = 0; int ro_set; int slot; struct extent_buffer *l; struct btrfs_key key; struct btrfs_key found_key; struct btrfs_block_group_cache *cache; struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; path = btrfs_alloc_path(); if (!path) return -ENOMEM; path->reada = READA_FORWARD; path->search_commit_root = 1; path->skip_locking = 1; key.objectid = scrub_dev->devid; key.offset = 0ull; key.type = BTRFS_DEV_EXTENT_KEY; while (1) { ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) break; if (ret > 0) { if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { ret = btrfs_next_leaf(root, path); if (ret < 0) break; if (ret > 0) { ret = 0; break; } } else { ret = 0; } } l = path->nodes[0]; slot = path->slots[0]; btrfs_item_key_to_cpu(l, &found_key, slot); if (found_key.objectid != scrub_dev->devid) break; if (found_key.type != BTRFS_DEV_EXTENT_KEY) break; if (found_key.offset >= end) break; if (found_key.offset < key.offset) break; dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); length = btrfs_dev_extent_length(l, dev_extent); if (found_key.offset + length <= start) goto skip; chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); /* * get a reference on the corresponding block group to prevent * the chunk from going away while we scrub it */ cache = btrfs_lookup_block_group(fs_info, chunk_offset); /* some chunks are removed but not committed to disk yet, * continue scrubbing */ if (!cache) goto skip; /* * we need call btrfs_inc_block_group_ro() with scrubs_paused, * to avoid deadlock caused by: * btrfs_inc_block_group_ro() * -> btrfs_wait_for_commit() * -> btrfs_commit_transaction() * -> btrfs_scrub_pause() */ scrub_pause_on(fs_info); ret = btrfs_inc_block_group_ro(cache); if (!ret && sctx->is_dev_replace) { /* * If we are doing a device replace wait for any tasks * that started delalloc right before we set the block * group to RO mode, as they might have just allocated * an extent from it or decided they could do a nocow * write. And if any such tasks did that, wait for their * ordered extents to complete and then commit the * current transaction, so that we can later see the new * extent items in the extent tree - the ordered extents * create delayed data references (for cow writes) when * they complete, which will be run and insert the * corresponding extent items into the extent tree when * we commit the transaction they used when running * inode.c:btrfs_finish_ordered_io(). We later use * the commit root of the extent tree to find extents * to copy from the srcdev into the tgtdev, and we don't * want to miss any new extents. */ btrfs_wait_block_group_reservations(cache); btrfs_wait_nocow_writers(cache); ret = btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->key.objectid, cache->key.offset); if (ret > 0) { struct btrfs_trans_handle *trans; trans = btrfs_join_transaction(root); if (IS_ERR(trans)) ret = PTR_ERR(trans); else ret = btrfs_commit_transaction(trans); if (ret) { scrub_pause_off(fs_info); btrfs_put_block_group(cache); break; } } } scrub_pause_off(fs_info); if (ret == 0) { ro_set = 1; } else if (ret == -ENOSPC) { /* * btrfs_inc_block_group_ro return -ENOSPC when it * failed in creating new chunk for metadata. * It is not a problem for scrub/replace, because * metadata are always cowed, and our scrub paused * commit_transactions. */ ro_set = 0; } else { btrfs_warn(fs_info, "failed setting block group ro: %d", ret); btrfs_put_block_group(cache); break; } down_write(&fs_info->dev_replace.rwsem); dev_replace->cursor_right = found_key.offset + length; dev_replace->cursor_left = found_key.offset; dev_replace->item_needs_writeback = 1; up_write(&dev_replace->rwsem); ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length, found_key.offset, cache); /* * flush, submit all pending read and write bios, afterwards * wait for them. * Note that in the dev replace case, a read request causes * write requests that are submitted in the read completion * worker. Therefore in the current situation, it is required * that all write requests are flushed, so that all read and * write requests are really completed when bios_in_flight * changes to 0. */ sctx->flush_all_writes = true; scrub_submit(sctx); mutex_lock(&sctx->wr_lock); scrub_wr_submit(sctx); mutex_unlock(&sctx->wr_lock); wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); scrub_pause_on(fs_info); /* * must be called before we decrease @scrub_paused. * make sure we don't block transaction commit while * we are waiting pending workers finished. */ wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0); sctx->flush_all_writes = false; scrub_pause_off(fs_info); down_write(&fs_info->dev_replace.rwsem); dev_replace->cursor_left = dev_replace->cursor_right; dev_replace->item_needs_writeback = 1; up_write(&fs_info->dev_replace.rwsem); if (ro_set) btrfs_dec_block_group_ro(cache); /* * We might have prevented the cleaner kthread from deleting * this block group if it was already unused because we raced * and set it to RO mode first. So add it back to the unused * list, otherwise it might not ever be deleted unless a manual * balance is triggered or it becomes used and unused again. */ spin_lock(&cache->lock); if (!cache->removed && !cache->ro && cache->reserved == 0 && btrfs_block_group_used(&cache->item) == 0) { spin_unlock(&cache->lock); btrfs_mark_bg_unused(cache); } else { spin_unlock(&cache->lock); } btrfs_put_block_group(cache); if (ret) break; if (sctx->is_dev_replace && atomic64_read(&dev_replace->num_write_errors) > 0) { ret = -EIO; break; } if (sctx->stat.malloc_errors > 0) { ret = -ENOMEM; break; } skip: key.offset = found_key.offset + length; btrfs_release_path(path); } btrfs_free_path(path); return ret; } static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx, struct btrfs_device *scrub_dev) { int i; u64 bytenr; u64 gen; int ret; struct btrfs_fs_info *fs_info = sctx->fs_info; if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) return -EIO; /* Seed devices of a new filesystem has their own generation. */ if (scrub_dev->fs_devices != fs_info->fs_devices) gen = scrub_dev->generation; else gen = fs_info->last_trans_committed; for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { bytenr = btrfs_sb_offset(i); if (bytenr + BTRFS_SUPER_INFO_SIZE > scrub_dev->commit_total_bytes) break; ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr, scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i, NULL, 1, bytenr); if (ret) return ret; } wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); return 0; } /* * get a reference count on fs_info->scrub_workers. start worker if necessary */ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info, int is_dev_replace) { unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND; int max_active = fs_info->thread_pool_size; if (fs_info->scrub_workers_refcnt == 0) { fs_info->scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub", flags, is_dev_replace ? 1 : max_active, 4); if (!fs_info->scrub_workers) goto fail_scrub_workers; fs_info->scrub_wr_completion_workers = btrfs_alloc_workqueue(fs_info, "scrubwrc", flags, max_active, 2); if (!fs_info->scrub_wr_completion_workers) goto fail_scrub_wr_completion_workers; fs_info->scrub_parity_workers = btrfs_alloc_workqueue(fs_info, "scrubparity", flags, max_active, 2); if (!fs_info->scrub_parity_workers) goto fail_scrub_parity_workers; } ++fs_info->scrub_workers_refcnt; return 0; fail_scrub_parity_workers: btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers); fail_scrub_wr_completion_workers: btrfs_destroy_workqueue(fs_info->scrub_workers); fail_scrub_workers: return -ENOMEM; } static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info) { if (--fs_info->scrub_workers_refcnt == 0) { btrfs_destroy_workqueue(fs_info->scrub_workers); btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers); btrfs_destroy_workqueue(fs_info->scrub_parity_workers); } WARN_ON(fs_info->scrub_workers_refcnt < 0); } int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, u64 end, struct btrfs_scrub_progress *progress, int readonly, int is_dev_replace) { struct scrub_ctx *sctx; int ret; struct btrfs_device *dev; unsigned int nofs_flag; if (btrfs_fs_closing(fs_info)) return -EINVAL; if (fs_info->nodesize > BTRFS_STRIPE_LEN) { /* * in this case scrub is unable to calculate the checksum * the way scrub is implemented. Do not handle this * situation at all because it won't ever happen. */ btrfs_err(fs_info, "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails", fs_info->nodesize, BTRFS_STRIPE_LEN); return -EINVAL; } if (fs_info->sectorsize != PAGE_SIZE) { /* not supported for data w/o checksums */ btrfs_err_rl(fs_info, "scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails", fs_info->sectorsize, PAGE_SIZE); return -EINVAL; } if (fs_info->nodesize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK || fs_info->sectorsize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) { /* * would exhaust the array bounds of pagev member in * struct scrub_block */ btrfs_err(fs_info, "scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails", fs_info->nodesize, SCRUB_MAX_PAGES_PER_BLOCK, fs_info->sectorsize, SCRUB_MAX_PAGES_PER_BLOCK); return -EINVAL; } /* Allocate outside of device_list_mutex */ sctx = scrub_setup_ctx(fs_info, is_dev_replace); if (IS_ERR(sctx)) return PTR_ERR(sctx); mutex_lock(&fs_info->fs_devices->device_list_mutex); dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true); if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) && !is_dev_replace)) { mutex_unlock(&fs_info->fs_devices->device_list_mutex); ret = -ENODEV; goto out_free_ctx; } if (!is_dev_replace && !readonly && !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) { mutex_unlock(&fs_info->fs_devices->device_list_mutex); btrfs_err_in_rcu(fs_info, "scrub: device %s is not writable", rcu_str_deref(dev->name)); ret = -EROFS; goto out_free_ctx; } mutex_lock(&fs_info->scrub_lock); if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) { mutex_unlock(&fs_info->scrub_lock); mutex_unlock(&fs_info->fs_devices->device_list_mutex); ret = -EIO; goto out_free_ctx; } down_read(&fs_info->dev_replace.rwsem); if (dev->scrub_ctx || (!is_dev_replace && btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) { up_read(&fs_info->dev_replace.rwsem); mutex_unlock(&fs_info->scrub_lock); mutex_unlock(&fs_info->fs_devices->device_list_mutex); ret = -EINPROGRESS; goto out_free_ctx; } up_read(&fs_info->dev_replace.rwsem); ret = scrub_workers_get(fs_info, is_dev_replace); if (ret) { mutex_unlock(&fs_info->scrub_lock); mutex_unlock(&fs_info->fs_devices->device_list_mutex); goto out_free_ctx; } sctx->readonly = readonly; dev->scrub_ctx = sctx; mutex_unlock(&fs_info->fs_devices->device_list_mutex); /* * checking @scrub_pause_req here, we can avoid * race between committing transaction and scrubbing. */ __scrub_blocked_if_needed(fs_info); atomic_inc(&fs_info->scrubs_running); mutex_unlock(&fs_info->scrub_lock); /* * In order to avoid deadlock with reclaim when there is a transaction * trying to pause scrub, make sure we use GFP_NOFS for all the * allocations done at btrfs_scrub_pages() and scrub_pages_for_parity() * invoked by our callees. The pausing request is done when the * transaction commit starts, and it blocks the transaction until scrub * is paused (done at specific points at scrub_stripe() or right above * before incrementing fs_info->scrubs_running). */ nofs_flag = memalloc_nofs_save(); if (!is_dev_replace) { /* * by holding device list mutex, we can * kick off writing super in log tree sync. */ mutex_lock(&fs_info->fs_devices->device_list_mutex); ret = scrub_supers(sctx, dev); mutex_unlock(&fs_info->fs_devices->device_list_mutex); } if (!ret) ret = scrub_enumerate_chunks(sctx, dev, start, end); memalloc_nofs_restore(nofs_flag); wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); atomic_dec(&fs_info->scrubs_running); wake_up(&fs_info->scrub_pause_wait); wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0); if (progress) memcpy(progress, &sctx->stat, sizeof(*progress)); mutex_lock(&fs_info->scrub_lock); dev->scrub_ctx = NULL; scrub_workers_put(fs_info); mutex_unlock(&fs_info->scrub_lock); scrub_put_ctx(sctx); return ret; out_free_ctx: scrub_free_ctx(sctx); return ret; } void btrfs_scrub_pause(struct btrfs_fs_info *fs_info) { mutex_lock(&fs_info->scrub_lock); atomic_inc(&fs_info->scrub_pause_req); while (atomic_read(&fs_info->scrubs_paused) != atomic_read(&fs_info->scrubs_running)) { mutex_unlock(&fs_info->scrub_lock); wait_event(fs_info->scrub_pause_wait, atomic_read(&fs_info->scrubs_paused) == atomic_read(&fs_info->scrubs_running)); mutex_lock(&fs_info->scrub_lock); } mutex_unlock(&fs_info->scrub_lock); } void btrfs_scrub_continue(struct btrfs_fs_info *fs_info) { atomic_dec(&fs_info->scrub_pause_req); wake_up(&fs_info->scrub_pause_wait); } int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info) { mutex_lock(&fs_info->scrub_lock); if (!atomic_read(&fs_info->scrubs_running)) { mutex_unlock(&fs_info->scrub_lock); return -ENOTCONN; } atomic_inc(&fs_info->scrub_cancel_req); while (atomic_read(&fs_info->scrubs_running)) { mutex_unlock(&fs_info->scrub_lock); wait_event(fs_info->scrub_pause_wait, atomic_read(&fs_info->scrubs_running) == 0); mutex_lock(&fs_info->scrub_lock); } atomic_dec(&fs_info->scrub_cancel_req); mutex_unlock(&fs_info->scrub_lock); return 0; } int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info, struct btrfs_device *dev) { struct scrub_ctx *sctx; mutex_lock(&fs_info->scrub_lock); sctx = dev->scrub_ctx; if (!sctx) { mutex_unlock(&fs_info->scrub_lock); return -ENOTCONN; } atomic_inc(&sctx->cancel_req); while (dev->scrub_ctx) { mutex_unlock(&fs_info->scrub_lock); wait_event(fs_info->scrub_pause_wait, dev->scrub_ctx == NULL); mutex_lock(&fs_info->scrub_lock); } mutex_unlock(&fs_info->scrub_lock); return 0; } int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid, struct btrfs_scrub_progress *progress) { struct btrfs_device *dev; struct scrub_ctx *sctx = NULL; mutex_lock(&fs_info->fs_devices->device_list_mutex); dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true); if (dev) sctx = dev->scrub_ctx; if (sctx) memcpy(progress, &sctx->stat, sizeof(*progress)); mutex_unlock(&fs_info->fs_devices->device_list_mutex); return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV; } static void scrub_remap_extent(struct btrfs_fs_info *fs_info, u64 extent_logical, u64 extent_len, u64 *extent_physical, struct btrfs_device **extent_dev, int *extent_mirror_num) { u64 mapped_length; struct btrfs_bio *bbio = NULL; int ret; mapped_length = extent_len; ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical, &mapped_length, &bbio, 0); if (ret || !bbio || mapped_length < extent_len || !bbio->stripes[0].dev->bdev) { btrfs_put_bbio(bbio); return; } *extent_physical = bbio->stripes[0].physical; *extent_mirror_num = bbio->mirror_num; *extent_dev = bbio->stripes[0].dev; btrfs_put_bbio(bbio); }
int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, u64 end, struct btrfs_scrub_progress *progress, int readonly, int is_dev_replace) { struct scrub_ctx *sctx; int ret; struct btrfs_device *dev; unsigned int nofs_flag; if (btrfs_fs_closing(fs_info)) return -EINVAL; if (fs_info->nodesize > BTRFS_STRIPE_LEN) { /* * in this case scrub is unable to calculate the checksum * the way scrub is implemented. Do not handle this * situation at all because it won't ever happen. */ btrfs_err(fs_info, "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails", fs_info->nodesize, BTRFS_STRIPE_LEN); return -EINVAL; } if (fs_info->sectorsize != PAGE_SIZE) { /* not supported for data w/o checksums */ btrfs_err_rl(fs_info, "scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails", fs_info->sectorsize, PAGE_SIZE); return -EINVAL; } if (fs_info->nodesize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK || fs_info->sectorsize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) { /* * would exhaust the array bounds of pagev member in * struct scrub_block */ btrfs_err(fs_info, "scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails", fs_info->nodesize, SCRUB_MAX_PAGES_PER_BLOCK, fs_info->sectorsize, SCRUB_MAX_PAGES_PER_BLOCK); return -EINVAL; } /* Allocate outside of device_list_mutex */ sctx = scrub_setup_ctx(fs_info, is_dev_replace); if (IS_ERR(sctx)) return PTR_ERR(sctx); mutex_lock(&fs_info->fs_devices->device_list_mutex); dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL); if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) && !is_dev_replace)) { mutex_unlock(&fs_info->fs_devices->device_list_mutex); ret = -ENODEV; goto out_free_ctx; } if (!is_dev_replace && !readonly && !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) { mutex_unlock(&fs_info->fs_devices->device_list_mutex); btrfs_err_in_rcu(fs_info, "scrub: device %s is not writable", rcu_str_deref(dev->name)); ret = -EROFS; goto out_free_ctx; } mutex_lock(&fs_info->scrub_lock); if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) { mutex_unlock(&fs_info->scrub_lock); mutex_unlock(&fs_info->fs_devices->device_list_mutex); ret = -EIO; goto out_free_ctx; } down_read(&fs_info->dev_replace.rwsem); if (dev->scrub_ctx || (!is_dev_replace && btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) { up_read(&fs_info->dev_replace.rwsem); mutex_unlock(&fs_info->scrub_lock); mutex_unlock(&fs_info->fs_devices->device_list_mutex); ret = -EINPROGRESS; goto out_free_ctx; } up_read(&fs_info->dev_replace.rwsem); ret = scrub_workers_get(fs_info, is_dev_replace); if (ret) { mutex_unlock(&fs_info->scrub_lock); mutex_unlock(&fs_info->fs_devices->device_list_mutex); goto out_free_ctx; } sctx->readonly = readonly; dev->scrub_ctx = sctx; mutex_unlock(&fs_info->fs_devices->device_list_mutex); /* * checking @scrub_pause_req here, we can avoid * race between committing transaction and scrubbing. */ __scrub_blocked_if_needed(fs_info); atomic_inc(&fs_info->scrubs_running); mutex_unlock(&fs_info->scrub_lock); /* * In order to avoid deadlock with reclaim when there is a transaction * trying to pause scrub, make sure we use GFP_NOFS for all the * allocations done at btrfs_scrub_pages() and scrub_pages_for_parity() * invoked by our callees. The pausing request is done when the * transaction commit starts, and it blocks the transaction until scrub * is paused (done at specific points at scrub_stripe() or right above * before incrementing fs_info->scrubs_running). */ nofs_flag = memalloc_nofs_save(); if (!is_dev_replace) { /* * by holding device list mutex, we can * kick off writing super in log tree sync. */ mutex_lock(&fs_info->fs_devices->device_list_mutex); ret = scrub_supers(sctx, dev); mutex_unlock(&fs_info->fs_devices->device_list_mutex); } if (!ret) ret = scrub_enumerate_chunks(sctx, dev, start, end); memalloc_nofs_restore(nofs_flag); wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); atomic_dec(&fs_info->scrubs_running); wake_up(&fs_info->scrub_pause_wait); wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0); if (progress) memcpy(progress, &sctx->stat, sizeof(*progress)); mutex_lock(&fs_info->scrub_lock); dev->scrub_ctx = NULL; scrub_workers_put(fs_info); mutex_unlock(&fs_info->scrub_lock); scrub_put_ctx(sctx); return ret; out_free_ctx: scrub_free_ctx(sctx); return ret; }
int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, u64 end, struct btrfs_scrub_progress *progress, int readonly, int is_dev_replace) { struct scrub_ctx *sctx; int ret; struct btrfs_device *dev; unsigned int nofs_flag; if (btrfs_fs_closing(fs_info)) return -EINVAL; if (fs_info->nodesize > BTRFS_STRIPE_LEN) { /* * in this case scrub is unable to calculate the checksum * the way scrub is implemented. Do not handle this * situation at all because it won't ever happen. */ btrfs_err(fs_info, "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails", fs_info->nodesize, BTRFS_STRIPE_LEN); return -EINVAL; } if (fs_info->sectorsize != PAGE_SIZE) { /* not supported for data w/o checksums */ btrfs_err_rl(fs_info, "scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails", fs_info->sectorsize, PAGE_SIZE); return -EINVAL; } if (fs_info->nodesize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK || fs_info->sectorsize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) { /* * would exhaust the array bounds of pagev member in * struct scrub_block */ btrfs_err(fs_info, "scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails", fs_info->nodesize, SCRUB_MAX_PAGES_PER_BLOCK, fs_info->sectorsize, SCRUB_MAX_PAGES_PER_BLOCK); return -EINVAL; } /* Allocate outside of device_list_mutex */ sctx = scrub_setup_ctx(fs_info, is_dev_replace); if (IS_ERR(sctx)) return PTR_ERR(sctx); mutex_lock(&fs_info->fs_devices->device_list_mutex); dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true); if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) && !is_dev_replace)) { mutex_unlock(&fs_info->fs_devices->device_list_mutex); ret = -ENODEV; goto out_free_ctx; } if (!is_dev_replace && !readonly && !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) { mutex_unlock(&fs_info->fs_devices->device_list_mutex); btrfs_err_in_rcu(fs_info, "scrub: device %s is not writable", rcu_str_deref(dev->name)); ret = -EROFS; goto out_free_ctx; } mutex_lock(&fs_info->scrub_lock); if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) { mutex_unlock(&fs_info->scrub_lock); mutex_unlock(&fs_info->fs_devices->device_list_mutex); ret = -EIO; goto out_free_ctx; } down_read(&fs_info->dev_replace.rwsem); if (dev->scrub_ctx || (!is_dev_replace && btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) { up_read(&fs_info->dev_replace.rwsem); mutex_unlock(&fs_info->scrub_lock); mutex_unlock(&fs_info->fs_devices->device_list_mutex); ret = -EINPROGRESS; goto out_free_ctx; } up_read(&fs_info->dev_replace.rwsem); ret = scrub_workers_get(fs_info, is_dev_replace); if (ret) { mutex_unlock(&fs_info->scrub_lock); mutex_unlock(&fs_info->fs_devices->device_list_mutex); goto out_free_ctx; } sctx->readonly = readonly; dev->scrub_ctx = sctx; mutex_unlock(&fs_info->fs_devices->device_list_mutex); /* * checking @scrub_pause_req here, we can avoid * race between committing transaction and scrubbing. */ __scrub_blocked_if_needed(fs_info); atomic_inc(&fs_info->scrubs_running); mutex_unlock(&fs_info->scrub_lock); /* * In order to avoid deadlock with reclaim when there is a transaction * trying to pause scrub, make sure we use GFP_NOFS for all the * allocations done at btrfs_scrub_pages() and scrub_pages_for_parity() * invoked by our callees. The pausing request is done when the * transaction commit starts, and it blocks the transaction until scrub * is paused (done at specific points at scrub_stripe() or right above * before incrementing fs_info->scrubs_running). */ nofs_flag = memalloc_nofs_save(); if (!is_dev_replace) { /* * by holding device list mutex, we can * kick off writing super in log tree sync. */ mutex_lock(&fs_info->fs_devices->device_list_mutex); ret = scrub_supers(sctx, dev); mutex_unlock(&fs_info->fs_devices->device_list_mutex); } if (!ret) ret = scrub_enumerate_chunks(sctx, dev, start, end); memalloc_nofs_restore(nofs_flag); wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); atomic_dec(&fs_info->scrubs_running); wake_up(&fs_info->scrub_pause_wait); wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0); if (progress) memcpy(progress, &sctx->stat, sizeof(*progress)); mutex_lock(&fs_info->scrub_lock); dev->scrub_ctx = NULL; scrub_workers_put(fs_info); mutex_unlock(&fs_info->scrub_lock); scrub_put_ctx(sctx); return ret; out_free_ctx: scrub_free_ctx(sctx); return ret; }
{'added': [(3838, '\tdev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);'), (4015, '\tdev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);')], 'deleted': [(3838, '\tdev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL);'), (4015, '\tdev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL);')]}
2
2
3,051
18,241
https://github.com/torvalds/linux
CVE-2019-18885
['CWE-476']
scrub.c
btrfs_scrub_progress
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2011, 2012 STRATO. All rights reserved. */ #include <linux/blkdev.h> #include <linux/ratelimit.h> #include <linux/sched/mm.h> #include "ctree.h" #include "volumes.h" #include "disk-io.h" #include "ordered-data.h" #include "transaction.h" #include "backref.h" #include "extent_io.h" #include "dev-replace.h" #include "check-integrity.h" #include "rcu-string.h" #include "raid56.h" /* * This is only the first step towards a full-features scrub. It reads all * extent and super block and verifies the checksums. In case a bad checksum * is found or the extent cannot be read, good data will be written back if * any can be found. * * Future enhancements: * - In case an unrepairable extent is encountered, track which files are * affected and report them * - track and record media errors, throw out bad devices * - add a mode to also read unallocated space */ struct scrub_block; struct scrub_ctx; /* * the following three values only influence the performance. * The last one configures the number of parallel and outstanding I/O * operations. The first two values configure an upper limit for the number * of (dynamically allocated) pages that are added to a bio. */ #define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */ #define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */ #define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */ /* * the following value times PAGE_SIZE needs to be large enough to match the * largest node/leaf/sector size that shall be supported. * Values larger than BTRFS_STRIPE_LEN are not supported. */ #define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */ struct scrub_recover { refcount_t refs; struct btrfs_bio *bbio; u64 map_length; }; struct scrub_page { struct scrub_block *sblock; struct page *page; struct btrfs_device *dev; struct list_head list; u64 flags; /* extent flags */ u64 generation; u64 logical; u64 physical; u64 physical_for_dev_replace; atomic_t refs; struct { unsigned int mirror_num:8; unsigned int have_csum:1; unsigned int io_error:1; }; u8 csum[BTRFS_CSUM_SIZE]; struct scrub_recover *recover; }; struct scrub_bio { int index; struct scrub_ctx *sctx; struct btrfs_device *dev; struct bio *bio; blk_status_t status; u64 logical; u64 physical; #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO struct scrub_page *pagev[SCRUB_PAGES_PER_WR_BIO]; #else struct scrub_page *pagev[SCRUB_PAGES_PER_RD_BIO]; #endif int page_count; int next_free; struct btrfs_work work; }; struct scrub_block { struct scrub_page *pagev[SCRUB_MAX_PAGES_PER_BLOCK]; int page_count; atomic_t outstanding_pages; refcount_t refs; /* free mem on transition to zero */ struct scrub_ctx *sctx; struct scrub_parity *sparity; struct { unsigned int header_error:1; unsigned int checksum_error:1; unsigned int no_io_error_seen:1; unsigned int generation_error:1; /* also sets header_error */ /* The following is for the data used to check parity */ /* It is for the data with checksum */ unsigned int data_corrected:1; }; struct btrfs_work work; }; /* Used for the chunks with parity stripe such RAID5/6 */ struct scrub_parity { struct scrub_ctx *sctx; struct btrfs_device *scrub_dev; u64 logic_start; u64 logic_end; int nsectors; u64 stripe_len; refcount_t refs; struct list_head spages; /* Work of parity check and repair */ struct btrfs_work work; /* Mark the parity blocks which have data */ unsigned long *dbitmap; /* * Mark the parity blocks which have data, but errors happen when * read data or check data */ unsigned long *ebitmap; unsigned long bitmap[0]; }; struct scrub_ctx { struct scrub_bio *bios[SCRUB_BIOS_PER_SCTX]; struct btrfs_fs_info *fs_info; int first_free; int curr; atomic_t bios_in_flight; atomic_t workers_pending; spinlock_t list_lock; wait_queue_head_t list_wait; u16 csum_size; struct list_head csum_list; atomic_t cancel_req; int readonly; int pages_per_rd_bio; int is_dev_replace; struct scrub_bio *wr_curr_bio; struct mutex wr_lock; int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */ struct btrfs_device *wr_tgtdev; bool flush_all_writes; /* * statistics */ struct btrfs_scrub_progress stat; spinlock_t stat_lock; /* * Use a ref counter to avoid use-after-free issues. Scrub workers * decrement bios_in_flight and workers_pending and then do a wakeup * on the list_wait wait queue. We must ensure the main scrub task * doesn't free the scrub context before or while the workers are * doing the wakeup() call. */ refcount_t refs; }; struct scrub_warning { struct btrfs_path *path; u64 extent_item_size; const char *errstr; u64 physical; u64 logical; struct btrfs_device *dev; }; struct full_stripe_lock { struct rb_node node; u64 logical; u64 refs; struct mutex mutex; }; static void scrub_pending_bio_inc(struct scrub_ctx *sctx); static void scrub_pending_bio_dec(struct scrub_ctx *sctx); static int scrub_handle_errored_block(struct scrub_block *sblock_to_check); static int scrub_setup_recheck_block(struct scrub_block *original_sblock, struct scrub_block *sblocks_for_recheck); static void scrub_recheck_block(struct btrfs_fs_info *fs_info, struct scrub_block *sblock, int retry_failed_mirror); static void scrub_recheck_block_checksum(struct scrub_block *sblock); static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, struct scrub_block *sblock_good); static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad, struct scrub_block *sblock_good, int page_num, int force_write); static void scrub_write_block_to_dev_replace(struct scrub_block *sblock); static int scrub_write_page_to_dev_replace(struct scrub_block *sblock, int page_num); static int scrub_checksum_data(struct scrub_block *sblock); static int scrub_checksum_tree_block(struct scrub_block *sblock); static int scrub_checksum_super(struct scrub_block *sblock); static void scrub_block_get(struct scrub_block *sblock); static void scrub_block_put(struct scrub_block *sblock); static void scrub_page_get(struct scrub_page *spage); static void scrub_page_put(struct scrub_page *spage); static void scrub_parity_get(struct scrub_parity *sparity); static void scrub_parity_put(struct scrub_parity *sparity); static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx, struct scrub_page *spage); static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len, u64 physical, struct btrfs_device *dev, u64 flags, u64 gen, int mirror_num, u8 *csum, int force, u64 physical_for_dev_replace); static void scrub_bio_end_io(struct bio *bio); static void scrub_bio_end_io_worker(struct btrfs_work *work); static void scrub_block_complete(struct scrub_block *sblock); static void scrub_remap_extent(struct btrfs_fs_info *fs_info, u64 extent_logical, u64 extent_len, u64 *extent_physical, struct btrfs_device **extent_dev, int *extent_mirror_num); static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx, struct scrub_page *spage); static void scrub_wr_submit(struct scrub_ctx *sctx); static void scrub_wr_bio_end_io(struct bio *bio); static void scrub_wr_bio_end_io_worker(struct btrfs_work *work); static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info); static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info); static void scrub_put_ctx(struct scrub_ctx *sctx); static inline int scrub_is_page_on_raid56(struct scrub_page *page) { return page->recover && (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK); } static void scrub_pending_bio_inc(struct scrub_ctx *sctx) { refcount_inc(&sctx->refs); atomic_inc(&sctx->bios_in_flight); } static void scrub_pending_bio_dec(struct scrub_ctx *sctx) { atomic_dec(&sctx->bios_in_flight); wake_up(&sctx->list_wait); scrub_put_ctx(sctx); } static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info) { while (atomic_read(&fs_info->scrub_pause_req)) { mutex_unlock(&fs_info->scrub_lock); wait_event(fs_info->scrub_pause_wait, atomic_read(&fs_info->scrub_pause_req) == 0); mutex_lock(&fs_info->scrub_lock); } } static void scrub_pause_on(struct btrfs_fs_info *fs_info) { atomic_inc(&fs_info->scrubs_paused); wake_up(&fs_info->scrub_pause_wait); } static void scrub_pause_off(struct btrfs_fs_info *fs_info) { mutex_lock(&fs_info->scrub_lock); __scrub_blocked_if_needed(fs_info); atomic_dec(&fs_info->scrubs_paused); mutex_unlock(&fs_info->scrub_lock); wake_up(&fs_info->scrub_pause_wait); } static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info) { scrub_pause_on(fs_info); scrub_pause_off(fs_info); } /* * Insert new full stripe lock into full stripe locks tree * * Return pointer to existing or newly inserted full_stripe_lock structure if * everything works well. * Return ERR_PTR(-ENOMEM) if we failed to allocate memory * * NOTE: caller must hold full_stripe_locks_root->lock before calling this * function */ static struct full_stripe_lock *insert_full_stripe_lock( struct btrfs_full_stripe_locks_tree *locks_root, u64 fstripe_logical) { struct rb_node **p; struct rb_node *parent = NULL; struct full_stripe_lock *entry; struct full_stripe_lock *ret; lockdep_assert_held(&locks_root->lock); p = &locks_root->root.rb_node; while (*p) { parent = *p; entry = rb_entry(parent, struct full_stripe_lock, node); if (fstripe_logical < entry->logical) { p = &(*p)->rb_left; } else if (fstripe_logical > entry->logical) { p = &(*p)->rb_right; } else { entry->refs++; return entry; } } /* * Insert new lock. */ ret = kmalloc(sizeof(*ret), GFP_KERNEL); if (!ret) return ERR_PTR(-ENOMEM); ret->logical = fstripe_logical; ret->refs = 1; mutex_init(&ret->mutex); rb_link_node(&ret->node, parent, p); rb_insert_color(&ret->node, &locks_root->root); return ret; } /* * Search for a full stripe lock of a block group * * Return pointer to existing full stripe lock if found * Return NULL if not found */ static struct full_stripe_lock *search_full_stripe_lock( struct btrfs_full_stripe_locks_tree *locks_root, u64 fstripe_logical) { struct rb_node *node; struct full_stripe_lock *entry; lockdep_assert_held(&locks_root->lock); node = locks_root->root.rb_node; while (node) { entry = rb_entry(node, struct full_stripe_lock, node); if (fstripe_logical < entry->logical) node = node->rb_left; else if (fstripe_logical > entry->logical) node = node->rb_right; else return entry; } return NULL; } /* * Helper to get full stripe logical from a normal bytenr. * * Caller must ensure @cache is a RAID56 block group. */ static u64 get_full_stripe_logical(struct btrfs_block_group_cache *cache, u64 bytenr) { u64 ret; /* * Due to chunk item size limit, full stripe length should not be * larger than U32_MAX. Just a sanity check here. */ WARN_ON_ONCE(cache->full_stripe_len >= U32_MAX); /* * round_down() can only handle power of 2, while RAID56 full * stripe length can be 64KiB * n, so we need to manually round down. */ ret = div64_u64(bytenr - cache->key.objectid, cache->full_stripe_len) * cache->full_stripe_len + cache->key.objectid; return ret; } /* * Lock a full stripe to avoid concurrency of recovery and read * * It's only used for profiles with parities (RAID5/6), for other profiles it * does nothing. * * Return 0 if we locked full stripe covering @bytenr, with a mutex held. * So caller must call unlock_full_stripe() at the same context. * * Return <0 if encounters error. */ static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr, bool *locked_ret) { struct btrfs_block_group_cache *bg_cache; struct btrfs_full_stripe_locks_tree *locks_root; struct full_stripe_lock *existing; u64 fstripe_start; int ret = 0; *locked_ret = false; bg_cache = btrfs_lookup_block_group(fs_info, bytenr); if (!bg_cache) { ASSERT(0); return -ENOENT; } /* Profiles not based on parity don't need full stripe lock */ if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) goto out; locks_root = &bg_cache->full_stripe_locks_root; fstripe_start = get_full_stripe_logical(bg_cache, bytenr); /* Now insert the full stripe lock */ mutex_lock(&locks_root->lock); existing = insert_full_stripe_lock(locks_root, fstripe_start); mutex_unlock(&locks_root->lock); if (IS_ERR(existing)) { ret = PTR_ERR(existing); goto out; } mutex_lock(&existing->mutex); *locked_ret = true; out: btrfs_put_block_group(bg_cache); return ret; } /* * Unlock a full stripe. * * NOTE: Caller must ensure it's the same context calling corresponding * lock_full_stripe(). * * Return 0 if we unlock full stripe without problem. * Return <0 for error */ static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr, bool locked) { struct btrfs_block_group_cache *bg_cache; struct btrfs_full_stripe_locks_tree *locks_root; struct full_stripe_lock *fstripe_lock; u64 fstripe_start; bool freeit = false; int ret = 0; /* If we didn't acquire full stripe lock, no need to continue */ if (!locked) return 0; bg_cache = btrfs_lookup_block_group(fs_info, bytenr); if (!bg_cache) { ASSERT(0); return -ENOENT; } if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) goto out; locks_root = &bg_cache->full_stripe_locks_root; fstripe_start = get_full_stripe_logical(bg_cache, bytenr); mutex_lock(&locks_root->lock); fstripe_lock = search_full_stripe_lock(locks_root, fstripe_start); /* Unpaired unlock_full_stripe() detected */ if (!fstripe_lock) { WARN_ON(1); ret = -ENOENT; mutex_unlock(&locks_root->lock); goto out; } if (fstripe_lock->refs == 0) { WARN_ON(1); btrfs_warn(fs_info, "full stripe lock at %llu refcount underflow", fstripe_lock->logical); } else { fstripe_lock->refs--; } if (fstripe_lock->refs == 0) { rb_erase(&fstripe_lock->node, &locks_root->root); freeit = true; } mutex_unlock(&locks_root->lock); mutex_unlock(&fstripe_lock->mutex); if (freeit) kfree(fstripe_lock); out: btrfs_put_block_group(bg_cache); return ret; } static void scrub_free_csums(struct scrub_ctx *sctx) { while (!list_empty(&sctx->csum_list)) { struct btrfs_ordered_sum *sum; sum = list_first_entry(&sctx->csum_list, struct btrfs_ordered_sum, list); list_del(&sum->list); kfree(sum); } } static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx) { int i; if (!sctx) return; /* this can happen when scrub is cancelled */ if (sctx->curr != -1) { struct scrub_bio *sbio = sctx->bios[sctx->curr]; for (i = 0; i < sbio->page_count; i++) { WARN_ON(!sbio->pagev[i]->page); scrub_block_put(sbio->pagev[i]->sblock); } bio_put(sbio->bio); } for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) { struct scrub_bio *sbio = sctx->bios[i]; if (!sbio) break; kfree(sbio); } kfree(sctx->wr_curr_bio); scrub_free_csums(sctx); kfree(sctx); } static void scrub_put_ctx(struct scrub_ctx *sctx) { if (refcount_dec_and_test(&sctx->refs)) scrub_free_ctx(sctx); } static noinline_for_stack struct scrub_ctx *scrub_setup_ctx( struct btrfs_fs_info *fs_info, int is_dev_replace) { struct scrub_ctx *sctx; int i; sctx = kzalloc(sizeof(*sctx), GFP_KERNEL); if (!sctx) goto nomem; refcount_set(&sctx->refs, 1); sctx->is_dev_replace = is_dev_replace; sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO; sctx->curr = -1; sctx->fs_info = fs_info; for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) { struct scrub_bio *sbio; sbio = kzalloc(sizeof(*sbio), GFP_KERNEL); if (!sbio) goto nomem; sctx->bios[i] = sbio; sbio->index = i; sbio->sctx = sctx; sbio->page_count = 0; btrfs_init_work(&sbio->work, btrfs_scrub_helper, scrub_bio_end_io_worker, NULL, NULL); if (i != SCRUB_BIOS_PER_SCTX - 1) sctx->bios[i]->next_free = i + 1; else sctx->bios[i]->next_free = -1; } sctx->first_free = 0; atomic_set(&sctx->bios_in_flight, 0); atomic_set(&sctx->workers_pending, 0); atomic_set(&sctx->cancel_req, 0); sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy); INIT_LIST_HEAD(&sctx->csum_list); spin_lock_init(&sctx->list_lock); spin_lock_init(&sctx->stat_lock); init_waitqueue_head(&sctx->list_wait); WARN_ON(sctx->wr_curr_bio != NULL); mutex_init(&sctx->wr_lock); sctx->wr_curr_bio = NULL; if (is_dev_replace) { WARN_ON(!fs_info->dev_replace.tgtdev); sctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO; sctx->wr_tgtdev = fs_info->dev_replace.tgtdev; sctx->flush_all_writes = false; } return sctx; nomem: scrub_free_ctx(sctx); return ERR_PTR(-ENOMEM); } static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *warn_ctx) { u64 isize; u32 nlink; int ret; int i; unsigned nofs_flag; struct extent_buffer *eb; struct btrfs_inode_item *inode_item; struct scrub_warning *swarn = warn_ctx; struct btrfs_fs_info *fs_info = swarn->dev->fs_info; struct inode_fs_paths *ipath = NULL; struct btrfs_root *local_root; struct btrfs_key root_key; struct btrfs_key key; root_key.objectid = root; root_key.type = BTRFS_ROOT_ITEM_KEY; root_key.offset = (u64)-1; local_root = btrfs_read_fs_root_no_name(fs_info, &root_key); if (IS_ERR(local_root)) { ret = PTR_ERR(local_root); goto err; } /* * this makes the path point to (inum INODE_ITEM ioff) */ key.objectid = inum; key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0); if (ret) { btrfs_release_path(swarn->path); goto err; } eb = swarn->path->nodes[0]; inode_item = btrfs_item_ptr(eb, swarn->path->slots[0], struct btrfs_inode_item); isize = btrfs_inode_size(eb, inode_item); nlink = btrfs_inode_nlink(eb, inode_item); btrfs_release_path(swarn->path); /* * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub * uses GFP_NOFS in this context, so we keep it consistent but it does * not seem to be strictly necessary. */ nofs_flag = memalloc_nofs_save(); ipath = init_ipath(4096, local_root, swarn->path); memalloc_nofs_restore(nofs_flag); if (IS_ERR(ipath)) { ret = PTR_ERR(ipath); ipath = NULL; goto err; } ret = paths_from_inode(inum, ipath); if (ret < 0) goto err; /* * we deliberately ignore the bit ipath might have been too small to * hold all of the paths here */ for (i = 0; i < ipath->fspath->elem_cnt; ++i) btrfs_warn_in_rcu(fs_info, "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %llu, links %u (path: %s)", swarn->errstr, swarn->logical, rcu_str_deref(swarn->dev->name), swarn->physical, root, inum, offset, min(isize - offset, (u64)PAGE_SIZE), nlink, (char *)(unsigned long)ipath->fspath->val[i]); free_ipath(ipath); return 0; err: btrfs_warn_in_rcu(fs_info, "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d", swarn->errstr, swarn->logical, rcu_str_deref(swarn->dev->name), swarn->physical, root, inum, offset, ret); free_ipath(ipath); return 0; } static void scrub_print_warning(const char *errstr, struct scrub_block *sblock) { struct btrfs_device *dev; struct btrfs_fs_info *fs_info; struct btrfs_path *path; struct btrfs_key found_key; struct extent_buffer *eb; struct btrfs_extent_item *ei; struct scrub_warning swarn; unsigned long ptr = 0; u64 extent_item_pos; u64 flags = 0; u64 ref_root; u32 item_size; u8 ref_level = 0; int ret; WARN_ON(sblock->page_count < 1); dev = sblock->pagev[0]->dev; fs_info = sblock->sctx->fs_info; path = btrfs_alloc_path(); if (!path) return; swarn.physical = sblock->pagev[0]->physical; swarn.logical = sblock->pagev[0]->logical; swarn.errstr = errstr; swarn.dev = NULL; ret = extent_from_logical(fs_info, swarn.logical, path, &found_key, &flags); if (ret < 0) goto out; extent_item_pos = swarn.logical - found_key.objectid; swarn.extent_item_size = found_key.offset; eb = path->nodes[0]; ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); item_size = btrfs_item_size_nr(eb, path->slots[0]); if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { do { ret = tree_backref_for_extent(&ptr, eb, &found_key, ei, item_size, &ref_root, &ref_level); btrfs_warn_in_rcu(fs_info, "%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu", errstr, swarn.logical, rcu_str_deref(dev->name), swarn.physical, ref_level ? "node" : "leaf", ret < 0 ? -1 : ref_level, ret < 0 ? -1 : ref_root); } while (ret != 1); btrfs_release_path(path); } else { btrfs_release_path(path); swarn.path = path; swarn.dev = dev; iterate_extent_inodes(fs_info, found_key.objectid, extent_item_pos, 1, scrub_print_warning_inode, &swarn, false); } out: btrfs_free_path(path); } static inline void scrub_get_recover(struct scrub_recover *recover) { refcount_inc(&recover->refs); } static inline void scrub_put_recover(struct btrfs_fs_info *fs_info, struct scrub_recover *recover) { if (refcount_dec_and_test(&recover->refs)) { btrfs_bio_counter_dec(fs_info); btrfs_put_bbio(recover->bbio); kfree(recover); } } /* * scrub_handle_errored_block gets called when either verification of the * pages failed or the bio failed to read, e.g. with EIO. In the latter * case, this function handles all pages in the bio, even though only one * may be bad. * The goal of this function is to repair the errored block by using the * contents of one of the mirrors. */ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) { struct scrub_ctx *sctx = sblock_to_check->sctx; struct btrfs_device *dev; struct btrfs_fs_info *fs_info; u64 logical; unsigned int failed_mirror_index; unsigned int is_metadata; unsigned int have_csum; struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */ struct scrub_block *sblock_bad; int ret; int mirror_index; int page_num; int success; bool full_stripe_locked; unsigned int nofs_flag; static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); BUG_ON(sblock_to_check->page_count < 1); fs_info = sctx->fs_info; if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) { /* * if we find an error in a super block, we just report it. * They will get written with the next transaction commit * anyway */ spin_lock(&sctx->stat_lock); ++sctx->stat.super_errors; spin_unlock(&sctx->stat_lock); return 0; } logical = sblock_to_check->pagev[0]->logical; BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1); failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1; is_metadata = !(sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA); have_csum = sblock_to_check->pagev[0]->have_csum; dev = sblock_to_check->pagev[0]->dev; /* * We must use GFP_NOFS because the scrub task might be waiting for a * worker task executing this function and in turn a transaction commit * might be waiting the scrub task to pause (which needs to wait for all * the worker tasks to complete before pausing). * We do allocations in the workers through insert_full_stripe_lock() * and scrub_add_page_to_wr_bio(), which happens down the call chain of * this function. */ nofs_flag = memalloc_nofs_save(); /* * For RAID5/6, race can happen for a different device scrub thread. * For data corruption, Parity and Data threads will both try * to recovery the data. * Race can lead to doubly added csum error, or even unrecoverable * error. */ ret = lock_full_stripe(fs_info, logical, &full_stripe_locked); if (ret < 0) { memalloc_nofs_restore(nofs_flag); spin_lock(&sctx->stat_lock); if (ret == -ENOMEM) sctx->stat.malloc_errors++; sctx->stat.read_errors++; sctx->stat.uncorrectable_errors++; spin_unlock(&sctx->stat_lock); return ret; } /* * read all mirrors one after the other. This includes to * re-read the extent or metadata block that failed (that was * the cause that this fixup code is called) another time, * page by page this time in order to know which pages * caused I/O errors and which ones are good (for all mirrors). * It is the goal to handle the situation when more than one * mirror contains I/O errors, but the errors do not * overlap, i.e. the data can be repaired by selecting the * pages from those mirrors without I/O error on the * particular pages. One example (with blocks >= 2 * PAGE_SIZE) * would be that mirror #1 has an I/O error on the first page, * the second page is good, and mirror #2 has an I/O error on * the second page, but the first page is good. * Then the first page of the first mirror can be repaired by * taking the first page of the second mirror, and the * second page of the second mirror can be repaired by * copying the contents of the 2nd page of the 1st mirror. * One more note: if the pages of one mirror contain I/O * errors, the checksum cannot be verified. In order to get * the best data for repairing, the first attempt is to find * a mirror without I/O errors and with a validated checksum. * Only if this is not possible, the pages are picked from * mirrors with I/O errors without considering the checksum. * If the latter is the case, at the end, the checksum of the * repaired area is verified in order to correctly maintain * the statistics. */ sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS, sizeof(*sblocks_for_recheck), GFP_KERNEL); if (!sblocks_for_recheck) { spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; sctx->stat.read_errors++; sctx->stat.uncorrectable_errors++; spin_unlock(&sctx->stat_lock); btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); goto out; } /* setup the context, map the logical blocks and alloc the pages */ ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck); if (ret) { spin_lock(&sctx->stat_lock); sctx->stat.read_errors++; sctx->stat.uncorrectable_errors++; spin_unlock(&sctx->stat_lock); btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); goto out; } BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS); sblock_bad = sblocks_for_recheck + failed_mirror_index; /* build and submit the bios for the failed mirror, check checksums */ scrub_recheck_block(fs_info, sblock_bad, 1); if (!sblock_bad->header_error && !sblock_bad->checksum_error && sblock_bad->no_io_error_seen) { /* * the error disappeared after reading page by page, or * the area was part of a huge bio and other parts of the * bio caused I/O errors, or the block layer merged several * read requests into one and the error is caused by a * different bio (usually one of the two latter cases is * the cause) */ spin_lock(&sctx->stat_lock); sctx->stat.unverified_errors++; sblock_to_check->data_corrected = 1; spin_unlock(&sctx->stat_lock); if (sctx->is_dev_replace) scrub_write_block_to_dev_replace(sblock_bad); goto out; } if (!sblock_bad->no_io_error_seen) { spin_lock(&sctx->stat_lock); sctx->stat.read_errors++; spin_unlock(&sctx->stat_lock); if (__ratelimit(&_rs)) scrub_print_warning("i/o error", sblock_to_check); btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); } else if (sblock_bad->checksum_error) { spin_lock(&sctx->stat_lock); sctx->stat.csum_errors++; spin_unlock(&sctx->stat_lock); if (__ratelimit(&_rs)) scrub_print_warning("checksum error", sblock_to_check); btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS); } else if (sblock_bad->header_error) { spin_lock(&sctx->stat_lock); sctx->stat.verify_errors++; spin_unlock(&sctx->stat_lock); if (__ratelimit(&_rs)) scrub_print_warning("checksum/header error", sblock_to_check); if (sblock_bad->generation_error) btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_GENERATION_ERRS); else btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS); } if (sctx->readonly) { ASSERT(!sctx->is_dev_replace); goto out; } /* * now build and submit the bios for the other mirrors, check * checksums. * First try to pick the mirror which is completely without I/O * errors and also does not have a checksum error. * If one is found, and if a checksum is present, the full block * that is known to contain an error is rewritten. Afterwards * the block is known to be corrected. * If a mirror is found which is completely correct, and no * checksum is present, only those pages are rewritten that had * an I/O error in the block to be repaired, since it cannot be * determined, which copy of the other pages is better (and it * could happen otherwise that a correct page would be * overwritten by a bad one). */ for (mirror_index = 0; ;mirror_index++) { struct scrub_block *sblock_other; if (mirror_index == failed_mirror_index) continue; /* raid56's mirror can be more than BTRFS_MAX_MIRRORS */ if (!scrub_is_page_on_raid56(sblock_bad->pagev[0])) { if (mirror_index >= BTRFS_MAX_MIRRORS) break; if (!sblocks_for_recheck[mirror_index].page_count) break; sblock_other = sblocks_for_recheck + mirror_index; } else { struct scrub_recover *r = sblock_bad->pagev[0]->recover; int max_allowed = r->bbio->num_stripes - r->bbio->num_tgtdevs; if (mirror_index >= max_allowed) break; if (!sblocks_for_recheck[1].page_count) break; ASSERT(failed_mirror_index == 0); sblock_other = sblocks_for_recheck + 1; sblock_other->pagev[0]->mirror_num = 1 + mirror_index; } /* build and submit the bios, check checksums */ scrub_recheck_block(fs_info, sblock_other, 0); if (!sblock_other->header_error && !sblock_other->checksum_error && sblock_other->no_io_error_seen) { if (sctx->is_dev_replace) { scrub_write_block_to_dev_replace(sblock_other); goto corrected_error; } else { ret = scrub_repair_block_from_good_copy( sblock_bad, sblock_other); if (!ret) goto corrected_error; } } } if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace) goto did_not_correct_error; /* * In case of I/O errors in the area that is supposed to be * repaired, continue by picking good copies of those pages. * Select the good pages from mirrors to rewrite bad pages from * the area to fix. Afterwards verify the checksum of the block * that is supposed to be repaired. This verification step is * only done for the purpose of statistic counting and for the * final scrub report, whether errors remain. * A perfect algorithm could make use of the checksum and try * all possible combinations of pages from the different mirrors * until the checksum verification succeeds. For example, when * the 2nd page of mirror #1 faces I/O errors, and the 2nd page * of mirror #2 is readable but the final checksum test fails, * then the 2nd page of mirror #3 could be tried, whether now * the final checksum succeeds. But this would be a rare * exception and is therefore not implemented. At least it is * avoided that the good copy is overwritten. * A more useful improvement would be to pick the sectors * without I/O error based on sector sizes (512 bytes on legacy * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one * mirror could be repaired by taking 512 byte of a different * mirror, even if other 512 byte sectors in the same PAGE_SIZE * area are unreadable. */ success = 1; for (page_num = 0; page_num < sblock_bad->page_count; page_num++) { struct scrub_page *page_bad = sblock_bad->pagev[page_num]; struct scrub_block *sblock_other = NULL; /* skip no-io-error page in scrub */ if (!page_bad->io_error && !sctx->is_dev_replace) continue; if (scrub_is_page_on_raid56(sblock_bad->pagev[0])) { /* * In case of dev replace, if raid56 rebuild process * didn't work out correct data, then copy the content * in sblock_bad to make sure target device is identical * to source device, instead of writing garbage data in * sblock_for_recheck array to target device. */ sblock_other = NULL; } else if (page_bad->io_error) { /* try to find no-io-error page in mirrors */ for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS && sblocks_for_recheck[mirror_index].page_count > 0; mirror_index++) { if (!sblocks_for_recheck[mirror_index]. pagev[page_num]->io_error) { sblock_other = sblocks_for_recheck + mirror_index; break; } } if (!sblock_other) success = 0; } if (sctx->is_dev_replace) { /* * did not find a mirror to fetch the page * from. scrub_write_page_to_dev_replace() * handles this case (page->io_error), by * filling the block with zeros before * submitting the write request */ if (!sblock_other) sblock_other = sblock_bad; if (scrub_write_page_to_dev_replace(sblock_other, page_num) != 0) { atomic64_inc( &fs_info->dev_replace.num_write_errors); success = 0; } } else if (sblock_other) { ret = scrub_repair_page_from_good_copy(sblock_bad, sblock_other, page_num, 0); if (0 == ret) page_bad->io_error = 0; else success = 0; } } if (success && !sctx->is_dev_replace) { if (is_metadata || have_csum) { /* * need to verify the checksum now that all * sectors on disk are repaired (the write * request for data to be repaired is on its way). * Just be lazy and use scrub_recheck_block() * which re-reads the data before the checksum * is verified, but most likely the data comes out * of the page cache. */ scrub_recheck_block(fs_info, sblock_bad, 1); if (!sblock_bad->header_error && !sblock_bad->checksum_error && sblock_bad->no_io_error_seen) goto corrected_error; else goto did_not_correct_error; } else { corrected_error: spin_lock(&sctx->stat_lock); sctx->stat.corrected_errors++; sblock_to_check->data_corrected = 1; spin_unlock(&sctx->stat_lock); btrfs_err_rl_in_rcu(fs_info, "fixed up error at logical %llu on dev %s", logical, rcu_str_deref(dev->name)); } } else { did_not_correct_error: spin_lock(&sctx->stat_lock); sctx->stat.uncorrectable_errors++; spin_unlock(&sctx->stat_lock); btrfs_err_rl_in_rcu(fs_info, "unable to fixup (regular) error at logical %llu on dev %s", logical, rcu_str_deref(dev->name)); } out: if (sblocks_for_recheck) { for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS; mirror_index++) { struct scrub_block *sblock = sblocks_for_recheck + mirror_index; struct scrub_recover *recover; int page_index; for (page_index = 0; page_index < sblock->page_count; page_index++) { sblock->pagev[page_index]->sblock = NULL; recover = sblock->pagev[page_index]->recover; if (recover) { scrub_put_recover(fs_info, recover); sblock->pagev[page_index]->recover = NULL; } scrub_page_put(sblock->pagev[page_index]); } } kfree(sblocks_for_recheck); } ret = unlock_full_stripe(fs_info, logical, full_stripe_locked); memalloc_nofs_restore(nofs_flag); if (ret < 0) return ret; return 0; } static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio) { if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5) return 2; else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) return 3; else return (int)bbio->num_stripes; } static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type, u64 *raid_map, u64 mapped_length, int nstripes, int mirror, int *stripe_index, u64 *stripe_offset) { int i; if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) { /* RAID5/6 */ for (i = 0; i < nstripes; i++) { if (raid_map[i] == RAID6_Q_STRIPE || raid_map[i] == RAID5_P_STRIPE) continue; if (logical >= raid_map[i] && logical < raid_map[i] + mapped_length) break; } *stripe_index = i; *stripe_offset = logical - raid_map[i]; } else { /* The other RAID type */ *stripe_index = mirror; *stripe_offset = 0; } } static int scrub_setup_recheck_block(struct scrub_block *original_sblock, struct scrub_block *sblocks_for_recheck) { struct scrub_ctx *sctx = original_sblock->sctx; struct btrfs_fs_info *fs_info = sctx->fs_info; u64 length = original_sblock->page_count * PAGE_SIZE; u64 logical = original_sblock->pagev[0]->logical; u64 generation = original_sblock->pagev[0]->generation; u64 flags = original_sblock->pagev[0]->flags; u64 have_csum = original_sblock->pagev[0]->have_csum; struct scrub_recover *recover; struct btrfs_bio *bbio; u64 sublen; u64 mapped_length; u64 stripe_offset; int stripe_index; int page_index = 0; int mirror_index; int nmirrors; int ret; /* * note: the two members refs and outstanding_pages * are not used (and not set) in the blocks that are used for * the recheck procedure */ while (length > 0) { sublen = min_t(u64, length, PAGE_SIZE); mapped_length = sublen; bbio = NULL; /* * with a length of PAGE_SIZE, each returned stripe * represents one mirror */ btrfs_bio_counter_inc_blocked(fs_info); ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical, &mapped_length, &bbio); if (ret || !bbio || mapped_length < sublen) { btrfs_put_bbio(bbio); btrfs_bio_counter_dec(fs_info); return -EIO; } recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS); if (!recover) { btrfs_put_bbio(bbio); btrfs_bio_counter_dec(fs_info); return -ENOMEM; } refcount_set(&recover->refs, 1); recover->bbio = bbio; recover->map_length = mapped_length; BUG_ON(page_index >= SCRUB_MAX_PAGES_PER_BLOCK); nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS); for (mirror_index = 0; mirror_index < nmirrors; mirror_index++) { struct scrub_block *sblock; struct scrub_page *page; sblock = sblocks_for_recheck + mirror_index; sblock->sctx = sctx; page = kzalloc(sizeof(*page), GFP_NOFS); if (!page) { leave_nomem: spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; spin_unlock(&sctx->stat_lock); scrub_put_recover(fs_info, recover); return -ENOMEM; } scrub_page_get(page); sblock->pagev[page_index] = page; page->sblock = sblock; page->flags = flags; page->generation = generation; page->logical = logical; page->have_csum = have_csum; if (have_csum) memcpy(page->csum, original_sblock->pagev[0]->csum, sctx->csum_size); scrub_stripe_index_and_offset(logical, bbio->map_type, bbio->raid_map, mapped_length, bbio->num_stripes - bbio->num_tgtdevs, mirror_index, &stripe_index, &stripe_offset); page->physical = bbio->stripes[stripe_index].physical + stripe_offset; page->dev = bbio->stripes[stripe_index].dev; BUG_ON(page_index >= original_sblock->page_count); page->physical_for_dev_replace = original_sblock->pagev[page_index]-> physical_for_dev_replace; /* for missing devices, dev->bdev is NULL */ page->mirror_num = mirror_index + 1; sblock->page_count++; page->page = alloc_page(GFP_NOFS); if (!page->page) goto leave_nomem; scrub_get_recover(recover); page->recover = recover; } scrub_put_recover(fs_info, recover); length -= sublen; logical += sublen; page_index++; } return 0; } static void scrub_bio_wait_endio(struct bio *bio) { complete(bio->bi_private); } static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info, struct bio *bio, struct scrub_page *page) { DECLARE_COMPLETION_ONSTACK(done); int ret; int mirror_num; bio->bi_iter.bi_sector = page->logical >> 9; bio->bi_private = &done; bio->bi_end_io = scrub_bio_wait_endio; mirror_num = page->sblock->pagev[0]->mirror_num; ret = raid56_parity_recover(fs_info, bio, page->recover->bbio, page->recover->map_length, mirror_num, 0); if (ret) return ret; wait_for_completion_io(&done); return blk_status_to_errno(bio->bi_status); } static void scrub_recheck_block_on_raid56(struct btrfs_fs_info *fs_info, struct scrub_block *sblock) { struct scrub_page *first_page = sblock->pagev[0]; struct bio *bio; int page_num; /* All pages in sblock belong to the same stripe on the same device. */ ASSERT(first_page->dev); if (!first_page->dev->bdev) goto out; bio = btrfs_io_bio_alloc(BIO_MAX_PAGES); bio_set_dev(bio, first_page->dev->bdev); for (page_num = 0; page_num < sblock->page_count; page_num++) { struct scrub_page *page = sblock->pagev[page_num]; WARN_ON(!page->page); bio_add_page(bio, page->page, PAGE_SIZE, 0); } if (scrub_submit_raid56_bio_wait(fs_info, bio, first_page)) { bio_put(bio); goto out; } bio_put(bio); scrub_recheck_block_checksum(sblock); return; out: for (page_num = 0; page_num < sblock->page_count; page_num++) sblock->pagev[page_num]->io_error = 1; sblock->no_io_error_seen = 0; } /* * this function will check the on disk data for checksum errors, header * errors and read I/O errors. If any I/O errors happen, the exact pages * which are errored are marked as being bad. The goal is to enable scrub * to take those pages that are not errored from all the mirrors so that * the pages that are errored in the just handled mirror can be repaired. */ static void scrub_recheck_block(struct btrfs_fs_info *fs_info, struct scrub_block *sblock, int retry_failed_mirror) { int page_num; sblock->no_io_error_seen = 1; /* short cut for raid56 */ if (!retry_failed_mirror && scrub_is_page_on_raid56(sblock->pagev[0])) return scrub_recheck_block_on_raid56(fs_info, sblock); for (page_num = 0; page_num < sblock->page_count; page_num++) { struct bio *bio; struct scrub_page *page = sblock->pagev[page_num]; if (page->dev->bdev == NULL) { page->io_error = 1; sblock->no_io_error_seen = 0; continue; } WARN_ON(!page->page); bio = btrfs_io_bio_alloc(1); bio_set_dev(bio, page->dev->bdev); bio_add_page(bio, page->page, PAGE_SIZE, 0); bio->bi_iter.bi_sector = page->physical >> 9; bio->bi_opf = REQ_OP_READ; if (btrfsic_submit_bio_wait(bio)) { page->io_error = 1; sblock->no_io_error_seen = 0; } bio_put(bio); } if (sblock->no_io_error_seen) scrub_recheck_block_checksum(sblock); } static inline int scrub_check_fsid(u8 fsid[], struct scrub_page *spage) { struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices; int ret; ret = memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE); return !ret; } static void scrub_recheck_block_checksum(struct scrub_block *sblock) { sblock->header_error = 0; sblock->checksum_error = 0; sblock->generation_error = 0; if (sblock->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA) scrub_checksum_data(sblock); else scrub_checksum_tree_block(sblock); } static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, struct scrub_block *sblock_good) { int page_num; int ret = 0; for (page_num = 0; page_num < sblock_bad->page_count; page_num++) { int ret_sub; ret_sub = scrub_repair_page_from_good_copy(sblock_bad, sblock_good, page_num, 1); if (ret_sub) ret = ret_sub; } return ret; } static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad, struct scrub_block *sblock_good, int page_num, int force_write) { struct scrub_page *page_bad = sblock_bad->pagev[page_num]; struct scrub_page *page_good = sblock_good->pagev[page_num]; struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info; BUG_ON(page_bad->page == NULL); BUG_ON(page_good->page == NULL); if (force_write || sblock_bad->header_error || sblock_bad->checksum_error || page_bad->io_error) { struct bio *bio; int ret; if (!page_bad->dev->bdev) { btrfs_warn_rl(fs_info, "scrub_repair_page_from_good_copy(bdev == NULL) is unexpected"); return -EIO; } bio = btrfs_io_bio_alloc(1); bio_set_dev(bio, page_bad->dev->bdev); bio->bi_iter.bi_sector = page_bad->physical >> 9; bio->bi_opf = REQ_OP_WRITE; ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0); if (PAGE_SIZE != ret) { bio_put(bio); return -EIO; } if (btrfsic_submit_bio_wait(bio)) { btrfs_dev_stat_inc_and_print(page_bad->dev, BTRFS_DEV_STAT_WRITE_ERRS); atomic64_inc(&fs_info->dev_replace.num_write_errors); bio_put(bio); return -EIO; } bio_put(bio); } return 0; } static void scrub_write_block_to_dev_replace(struct scrub_block *sblock) { struct btrfs_fs_info *fs_info = sblock->sctx->fs_info; int page_num; /* * This block is used for the check of the parity on the source device, * so the data needn't be written into the destination device. */ if (sblock->sparity) return; for (page_num = 0; page_num < sblock->page_count; page_num++) { int ret; ret = scrub_write_page_to_dev_replace(sblock, page_num); if (ret) atomic64_inc(&fs_info->dev_replace.num_write_errors); } } static int scrub_write_page_to_dev_replace(struct scrub_block *sblock, int page_num) { struct scrub_page *spage = sblock->pagev[page_num]; BUG_ON(spage->page == NULL); if (spage->io_error) { void *mapped_buffer = kmap_atomic(spage->page); clear_page(mapped_buffer); flush_dcache_page(spage->page); kunmap_atomic(mapped_buffer); } return scrub_add_page_to_wr_bio(sblock->sctx, spage); } static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx, struct scrub_page *spage) { struct scrub_bio *sbio; int ret; mutex_lock(&sctx->wr_lock); again: if (!sctx->wr_curr_bio) { sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio), GFP_KERNEL); if (!sctx->wr_curr_bio) { mutex_unlock(&sctx->wr_lock); return -ENOMEM; } sctx->wr_curr_bio->sctx = sctx; sctx->wr_curr_bio->page_count = 0; } sbio = sctx->wr_curr_bio; if (sbio->page_count == 0) { struct bio *bio; sbio->physical = spage->physical_for_dev_replace; sbio->logical = spage->logical; sbio->dev = sctx->wr_tgtdev; bio = sbio->bio; if (!bio) { bio = btrfs_io_bio_alloc(sctx->pages_per_wr_bio); sbio->bio = bio; } bio->bi_private = sbio; bio->bi_end_io = scrub_wr_bio_end_io; bio_set_dev(bio, sbio->dev->bdev); bio->bi_iter.bi_sector = sbio->physical >> 9; bio->bi_opf = REQ_OP_WRITE; sbio->status = 0; } else if (sbio->physical + sbio->page_count * PAGE_SIZE != spage->physical_for_dev_replace || sbio->logical + sbio->page_count * PAGE_SIZE != spage->logical) { scrub_wr_submit(sctx); goto again; } ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0); if (ret != PAGE_SIZE) { if (sbio->page_count < 1) { bio_put(sbio->bio); sbio->bio = NULL; mutex_unlock(&sctx->wr_lock); return -EIO; } scrub_wr_submit(sctx); goto again; } sbio->pagev[sbio->page_count] = spage; scrub_page_get(spage); sbio->page_count++; if (sbio->page_count == sctx->pages_per_wr_bio) scrub_wr_submit(sctx); mutex_unlock(&sctx->wr_lock); return 0; } static void scrub_wr_submit(struct scrub_ctx *sctx) { struct scrub_bio *sbio; if (!sctx->wr_curr_bio) return; sbio = sctx->wr_curr_bio; sctx->wr_curr_bio = NULL; WARN_ON(!sbio->bio->bi_disk); scrub_pending_bio_inc(sctx); /* process all writes in a single worker thread. Then the block layer * orders the requests before sending them to the driver which * doubled the write performance on spinning disks when measured * with Linux 3.5 */ btrfsic_submit_bio(sbio->bio); } static void scrub_wr_bio_end_io(struct bio *bio) { struct scrub_bio *sbio = bio->bi_private; struct btrfs_fs_info *fs_info = sbio->dev->fs_info; sbio->status = bio->bi_status; sbio->bio = bio; btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper, scrub_wr_bio_end_io_worker, NULL, NULL); btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work); } static void scrub_wr_bio_end_io_worker(struct btrfs_work *work) { struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); struct scrub_ctx *sctx = sbio->sctx; int i; WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO); if (sbio->status) { struct btrfs_dev_replace *dev_replace = &sbio->sctx->fs_info->dev_replace; for (i = 0; i < sbio->page_count; i++) { struct scrub_page *spage = sbio->pagev[i]; spage->io_error = 1; atomic64_inc(&dev_replace->num_write_errors); } } for (i = 0; i < sbio->page_count; i++) scrub_page_put(sbio->pagev[i]); bio_put(sbio->bio); kfree(sbio); scrub_pending_bio_dec(sctx); } static int scrub_checksum(struct scrub_block *sblock) { u64 flags; int ret; /* * No need to initialize these stats currently, * because this function only use return value * instead of these stats value. * * Todo: * always use stats */ sblock->header_error = 0; sblock->generation_error = 0; sblock->checksum_error = 0; WARN_ON(sblock->page_count < 1); flags = sblock->pagev[0]->flags; ret = 0; if (flags & BTRFS_EXTENT_FLAG_DATA) ret = scrub_checksum_data(sblock); else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) ret = scrub_checksum_tree_block(sblock); else if (flags & BTRFS_EXTENT_FLAG_SUPER) (void)scrub_checksum_super(sblock); else WARN_ON(1); if (ret) scrub_handle_errored_block(sblock); return ret; } static int scrub_checksum_data(struct scrub_block *sblock) { struct scrub_ctx *sctx = sblock->sctx; u8 csum[BTRFS_CSUM_SIZE]; u8 *on_disk_csum; struct page *page; void *buffer; u32 crc = ~(u32)0; u64 len; int index; BUG_ON(sblock->page_count < 1); if (!sblock->pagev[0]->have_csum) return 0; on_disk_csum = sblock->pagev[0]->csum; page = sblock->pagev[0]->page; buffer = kmap_atomic(page); len = sctx->fs_info->sectorsize; index = 0; for (;;) { u64 l = min_t(u64, len, PAGE_SIZE); crc = btrfs_csum_data(buffer, crc, l); kunmap_atomic(buffer); len -= l; if (len == 0) break; index++; BUG_ON(index >= sblock->page_count); BUG_ON(!sblock->pagev[index]->page); page = sblock->pagev[index]->page; buffer = kmap_atomic(page); } btrfs_csum_final(crc, csum); if (memcmp(csum, on_disk_csum, sctx->csum_size)) sblock->checksum_error = 1; return sblock->checksum_error; } static int scrub_checksum_tree_block(struct scrub_block *sblock) { struct scrub_ctx *sctx = sblock->sctx; struct btrfs_header *h; struct btrfs_fs_info *fs_info = sctx->fs_info; u8 calculated_csum[BTRFS_CSUM_SIZE]; u8 on_disk_csum[BTRFS_CSUM_SIZE]; struct page *page; void *mapped_buffer; u64 mapped_size; void *p; u32 crc = ~(u32)0; u64 len; int index; BUG_ON(sblock->page_count < 1); page = sblock->pagev[0]->page; mapped_buffer = kmap_atomic(page); h = (struct btrfs_header *)mapped_buffer; memcpy(on_disk_csum, h->csum, sctx->csum_size); /* * we don't use the getter functions here, as we * a) don't have an extent buffer and * b) the page is already kmapped */ if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h)) sblock->header_error = 1; if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h)) { sblock->header_error = 1; sblock->generation_error = 1; } if (!scrub_check_fsid(h->fsid, sblock->pagev[0])) sblock->header_error = 1; if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid, BTRFS_UUID_SIZE)) sblock->header_error = 1; len = sctx->fs_info->nodesize - BTRFS_CSUM_SIZE; mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE; p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE; index = 0; for (;;) { u64 l = min_t(u64, len, mapped_size); crc = btrfs_csum_data(p, crc, l); kunmap_atomic(mapped_buffer); len -= l; if (len == 0) break; index++; BUG_ON(index >= sblock->page_count); BUG_ON(!sblock->pagev[index]->page); page = sblock->pagev[index]->page; mapped_buffer = kmap_atomic(page); mapped_size = PAGE_SIZE; p = mapped_buffer; } btrfs_csum_final(crc, calculated_csum); if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size)) sblock->checksum_error = 1; return sblock->header_error || sblock->checksum_error; } static int scrub_checksum_super(struct scrub_block *sblock) { struct btrfs_super_block *s; struct scrub_ctx *sctx = sblock->sctx; u8 calculated_csum[BTRFS_CSUM_SIZE]; u8 on_disk_csum[BTRFS_CSUM_SIZE]; struct page *page; void *mapped_buffer; u64 mapped_size; void *p; u32 crc = ~(u32)0; int fail_gen = 0; int fail_cor = 0; u64 len; int index; BUG_ON(sblock->page_count < 1); page = sblock->pagev[0]->page; mapped_buffer = kmap_atomic(page); s = (struct btrfs_super_block *)mapped_buffer; memcpy(on_disk_csum, s->csum, sctx->csum_size); if (sblock->pagev[0]->logical != btrfs_super_bytenr(s)) ++fail_cor; if (sblock->pagev[0]->generation != btrfs_super_generation(s)) ++fail_gen; if (!scrub_check_fsid(s->fsid, sblock->pagev[0])) ++fail_cor; len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE; mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE; p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE; index = 0; for (;;) { u64 l = min_t(u64, len, mapped_size); crc = btrfs_csum_data(p, crc, l); kunmap_atomic(mapped_buffer); len -= l; if (len == 0) break; index++; BUG_ON(index >= sblock->page_count); BUG_ON(!sblock->pagev[index]->page); page = sblock->pagev[index]->page; mapped_buffer = kmap_atomic(page); mapped_size = PAGE_SIZE; p = mapped_buffer; } btrfs_csum_final(crc, calculated_csum); if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size)) ++fail_cor; if (fail_cor + fail_gen) { /* * if we find an error in a super block, we just report it. * They will get written with the next transaction commit * anyway */ spin_lock(&sctx->stat_lock); ++sctx->stat.super_errors; spin_unlock(&sctx->stat_lock); if (fail_cor) btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev, BTRFS_DEV_STAT_CORRUPTION_ERRS); else btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev, BTRFS_DEV_STAT_GENERATION_ERRS); } return fail_cor + fail_gen; } static void scrub_block_get(struct scrub_block *sblock) { refcount_inc(&sblock->refs); } static void scrub_block_put(struct scrub_block *sblock) { if (refcount_dec_and_test(&sblock->refs)) { int i; if (sblock->sparity) scrub_parity_put(sblock->sparity); for (i = 0; i < sblock->page_count; i++) scrub_page_put(sblock->pagev[i]); kfree(sblock); } } static void scrub_page_get(struct scrub_page *spage) { atomic_inc(&spage->refs); } static void scrub_page_put(struct scrub_page *spage) { if (atomic_dec_and_test(&spage->refs)) { if (spage->page) __free_page(spage->page); kfree(spage); } } static void scrub_submit(struct scrub_ctx *sctx) { struct scrub_bio *sbio; if (sctx->curr == -1) return; sbio = sctx->bios[sctx->curr]; sctx->curr = -1; scrub_pending_bio_inc(sctx); btrfsic_submit_bio(sbio->bio); } static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx, struct scrub_page *spage) { struct scrub_block *sblock = spage->sblock; struct scrub_bio *sbio; int ret; again: /* * grab a fresh bio or wait for one to become available */ while (sctx->curr == -1) { spin_lock(&sctx->list_lock); sctx->curr = sctx->first_free; if (sctx->curr != -1) { sctx->first_free = sctx->bios[sctx->curr]->next_free; sctx->bios[sctx->curr]->next_free = -1; sctx->bios[sctx->curr]->page_count = 0; spin_unlock(&sctx->list_lock); } else { spin_unlock(&sctx->list_lock); wait_event(sctx->list_wait, sctx->first_free != -1); } } sbio = sctx->bios[sctx->curr]; if (sbio->page_count == 0) { struct bio *bio; sbio->physical = spage->physical; sbio->logical = spage->logical; sbio->dev = spage->dev; bio = sbio->bio; if (!bio) { bio = btrfs_io_bio_alloc(sctx->pages_per_rd_bio); sbio->bio = bio; } bio->bi_private = sbio; bio->bi_end_io = scrub_bio_end_io; bio_set_dev(bio, sbio->dev->bdev); bio->bi_iter.bi_sector = sbio->physical >> 9; bio->bi_opf = REQ_OP_READ; sbio->status = 0; } else if (sbio->physical + sbio->page_count * PAGE_SIZE != spage->physical || sbio->logical + sbio->page_count * PAGE_SIZE != spage->logical || sbio->dev != spage->dev) { scrub_submit(sctx); goto again; } sbio->pagev[sbio->page_count] = spage; ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0); if (ret != PAGE_SIZE) { if (sbio->page_count < 1) { bio_put(sbio->bio); sbio->bio = NULL; return -EIO; } scrub_submit(sctx); goto again; } scrub_block_get(sblock); /* one for the page added to the bio */ atomic_inc(&sblock->outstanding_pages); sbio->page_count++; if (sbio->page_count == sctx->pages_per_rd_bio) scrub_submit(sctx); return 0; } static void scrub_missing_raid56_end_io(struct bio *bio) { struct scrub_block *sblock = bio->bi_private; struct btrfs_fs_info *fs_info = sblock->sctx->fs_info; if (bio->bi_status) sblock->no_io_error_seen = 0; bio_put(bio); btrfs_queue_work(fs_info->scrub_workers, &sblock->work); } static void scrub_missing_raid56_worker(struct btrfs_work *work) { struct scrub_block *sblock = container_of(work, struct scrub_block, work); struct scrub_ctx *sctx = sblock->sctx; struct btrfs_fs_info *fs_info = sctx->fs_info; u64 logical; struct btrfs_device *dev; logical = sblock->pagev[0]->logical; dev = sblock->pagev[0]->dev; if (sblock->no_io_error_seen) scrub_recheck_block_checksum(sblock); if (!sblock->no_io_error_seen) { spin_lock(&sctx->stat_lock); sctx->stat.read_errors++; spin_unlock(&sctx->stat_lock); btrfs_err_rl_in_rcu(fs_info, "IO error rebuilding logical %llu for dev %s", logical, rcu_str_deref(dev->name)); } else if (sblock->header_error || sblock->checksum_error) { spin_lock(&sctx->stat_lock); sctx->stat.uncorrectable_errors++; spin_unlock(&sctx->stat_lock); btrfs_err_rl_in_rcu(fs_info, "failed to rebuild valid logical %llu for dev %s", logical, rcu_str_deref(dev->name)); } else { scrub_write_block_to_dev_replace(sblock); } scrub_block_put(sblock); if (sctx->is_dev_replace && sctx->flush_all_writes) { mutex_lock(&sctx->wr_lock); scrub_wr_submit(sctx); mutex_unlock(&sctx->wr_lock); } scrub_pending_bio_dec(sctx); } static void scrub_missing_raid56_pages(struct scrub_block *sblock) { struct scrub_ctx *sctx = sblock->sctx; struct btrfs_fs_info *fs_info = sctx->fs_info; u64 length = sblock->page_count * PAGE_SIZE; u64 logical = sblock->pagev[0]->logical; struct btrfs_bio *bbio = NULL; struct bio *bio; struct btrfs_raid_bio *rbio; int ret; int i; btrfs_bio_counter_inc_blocked(fs_info); ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical, &length, &bbio); if (ret || !bbio || !bbio->raid_map) goto bbio_out; if (WARN_ON(!sctx->is_dev_replace || !(bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) { /* * We shouldn't be scrubbing a missing device. Even for dev * replace, we should only get here for RAID 5/6. We either * managed to mount something with no mirrors remaining or * there's a bug in scrub_remap_extent()/btrfs_map_block(). */ goto bbio_out; } bio = btrfs_io_bio_alloc(0); bio->bi_iter.bi_sector = logical >> 9; bio->bi_private = sblock; bio->bi_end_io = scrub_missing_raid56_end_io; rbio = raid56_alloc_missing_rbio(fs_info, bio, bbio, length); if (!rbio) goto rbio_out; for (i = 0; i < sblock->page_count; i++) { struct scrub_page *spage = sblock->pagev[i]; raid56_add_scrub_pages(rbio, spage->page, spage->logical); } btrfs_init_work(&sblock->work, btrfs_scrub_helper, scrub_missing_raid56_worker, NULL, NULL); scrub_block_get(sblock); scrub_pending_bio_inc(sctx); raid56_submit_missing_rbio(rbio); return; rbio_out: bio_put(bio); bbio_out: btrfs_bio_counter_dec(fs_info); btrfs_put_bbio(bbio); spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; spin_unlock(&sctx->stat_lock); } static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len, u64 physical, struct btrfs_device *dev, u64 flags, u64 gen, int mirror_num, u8 *csum, int force, u64 physical_for_dev_replace) { struct scrub_block *sblock; int index; sblock = kzalloc(sizeof(*sblock), GFP_KERNEL); if (!sblock) { spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; spin_unlock(&sctx->stat_lock); return -ENOMEM; } /* one ref inside this function, plus one for each page added to * a bio later on */ refcount_set(&sblock->refs, 1); sblock->sctx = sctx; sblock->no_io_error_seen = 1; for (index = 0; len > 0; index++) { struct scrub_page *spage; u64 l = min_t(u64, len, PAGE_SIZE); spage = kzalloc(sizeof(*spage), GFP_KERNEL); if (!spage) { leave_nomem: spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; spin_unlock(&sctx->stat_lock); scrub_block_put(sblock); return -ENOMEM; } BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK); scrub_page_get(spage); sblock->pagev[index] = spage; spage->sblock = sblock; spage->dev = dev; spage->flags = flags; spage->generation = gen; spage->logical = logical; spage->physical = physical; spage->physical_for_dev_replace = physical_for_dev_replace; spage->mirror_num = mirror_num; if (csum) { spage->have_csum = 1; memcpy(spage->csum, csum, sctx->csum_size); } else { spage->have_csum = 0; } sblock->page_count++; spage->page = alloc_page(GFP_KERNEL); if (!spage->page) goto leave_nomem; len -= l; logical += l; physical += l; physical_for_dev_replace += l; } WARN_ON(sblock->page_count == 0); if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) { /* * This case should only be hit for RAID 5/6 device replace. See * the comment in scrub_missing_raid56_pages() for details. */ scrub_missing_raid56_pages(sblock); } else { for (index = 0; index < sblock->page_count; index++) { struct scrub_page *spage = sblock->pagev[index]; int ret; ret = scrub_add_page_to_rd_bio(sctx, spage); if (ret) { scrub_block_put(sblock); return ret; } } if (force) scrub_submit(sctx); } /* last one frees, either here or in bio completion for last page */ scrub_block_put(sblock); return 0; } static void scrub_bio_end_io(struct bio *bio) { struct scrub_bio *sbio = bio->bi_private; struct btrfs_fs_info *fs_info = sbio->dev->fs_info; sbio->status = bio->bi_status; sbio->bio = bio; btrfs_queue_work(fs_info->scrub_workers, &sbio->work); } static void scrub_bio_end_io_worker(struct btrfs_work *work) { struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); struct scrub_ctx *sctx = sbio->sctx; int i; BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO); if (sbio->status) { for (i = 0; i < sbio->page_count; i++) { struct scrub_page *spage = sbio->pagev[i]; spage->io_error = 1; spage->sblock->no_io_error_seen = 0; } } /* now complete the scrub_block items that have all pages completed */ for (i = 0; i < sbio->page_count; i++) { struct scrub_page *spage = sbio->pagev[i]; struct scrub_block *sblock = spage->sblock; if (atomic_dec_and_test(&sblock->outstanding_pages)) scrub_block_complete(sblock); scrub_block_put(sblock); } bio_put(sbio->bio); sbio->bio = NULL; spin_lock(&sctx->list_lock); sbio->next_free = sctx->first_free; sctx->first_free = sbio->index; spin_unlock(&sctx->list_lock); if (sctx->is_dev_replace && sctx->flush_all_writes) { mutex_lock(&sctx->wr_lock); scrub_wr_submit(sctx); mutex_unlock(&sctx->wr_lock); } scrub_pending_bio_dec(sctx); } static inline void __scrub_mark_bitmap(struct scrub_parity *sparity, unsigned long *bitmap, u64 start, u64 len) { u64 offset; u64 nsectors64; u32 nsectors; int sectorsize = sparity->sctx->fs_info->sectorsize; if (len >= sparity->stripe_len) { bitmap_set(bitmap, 0, sparity->nsectors); return; } start -= sparity->logic_start; start = div64_u64_rem(start, sparity->stripe_len, &offset); offset = div_u64(offset, sectorsize); nsectors64 = div_u64(len, sectorsize); ASSERT(nsectors64 < UINT_MAX); nsectors = (u32)nsectors64; if (offset + nsectors <= sparity->nsectors) { bitmap_set(bitmap, offset, nsectors); return; } bitmap_set(bitmap, offset, sparity->nsectors - offset); bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset)); } static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity, u64 start, u64 len) { __scrub_mark_bitmap(sparity, sparity->ebitmap, start, len); } static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity, u64 start, u64 len) { __scrub_mark_bitmap(sparity, sparity->dbitmap, start, len); } static void scrub_block_complete(struct scrub_block *sblock) { int corrupted = 0; if (!sblock->no_io_error_seen) { corrupted = 1; scrub_handle_errored_block(sblock); } else { /* * if has checksum error, write via repair mechanism in * dev replace case, otherwise write here in dev replace * case. */ corrupted = scrub_checksum(sblock); if (!corrupted && sblock->sctx->is_dev_replace) scrub_write_block_to_dev_replace(sblock); } if (sblock->sparity && corrupted && !sblock->data_corrected) { u64 start = sblock->pagev[0]->logical; u64 end = sblock->pagev[sblock->page_count - 1]->logical + PAGE_SIZE; scrub_parity_mark_sectors_error(sblock->sparity, start, end - start); } } static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum) { struct btrfs_ordered_sum *sum = NULL; unsigned long index; unsigned long num_sectors; while (!list_empty(&sctx->csum_list)) { sum = list_first_entry(&sctx->csum_list, struct btrfs_ordered_sum, list); if (sum->bytenr > logical) return 0; if (sum->bytenr + sum->len > logical) break; ++sctx->stat.csum_discards; list_del(&sum->list); kfree(sum); sum = NULL; } if (!sum) return 0; index = div_u64(logical - sum->bytenr, sctx->fs_info->sectorsize); ASSERT(index < UINT_MAX); num_sectors = sum->len / sctx->fs_info->sectorsize; memcpy(csum, sum->sums + index, sctx->csum_size); if (index == num_sectors - 1) { list_del(&sum->list); kfree(sum); } return 1; } /* scrub extent tries to collect up to 64 kB for each bio */ static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map, u64 logical, u64 len, u64 physical, struct btrfs_device *dev, u64 flags, u64 gen, int mirror_num, u64 physical_for_dev_replace) { int ret; u8 csum[BTRFS_CSUM_SIZE]; u32 blocksize; if (flags & BTRFS_EXTENT_FLAG_DATA) { if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) blocksize = map->stripe_len; else blocksize = sctx->fs_info->sectorsize; spin_lock(&sctx->stat_lock); sctx->stat.data_extents_scrubbed++; sctx->stat.data_bytes_scrubbed += len; spin_unlock(&sctx->stat_lock); } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) blocksize = map->stripe_len; else blocksize = sctx->fs_info->nodesize; spin_lock(&sctx->stat_lock); sctx->stat.tree_extents_scrubbed++; sctx->stat.tree_bytes_scrubbed += len; spin_unlock(&sctx->stat_lock); } else { blocksize = sctx->fs_info->sectorsize; WARN_ON(1); } while (len) { u64 l = min_t(u64, len, blocksize); int have_csum = 0; if (flags & BTRFS_EXTENT_FLAG_DATA) { /* push csums to sbio */ have_csum = scrub_find_csum(sctx, logical, csum); if (have_csum == 0) ++sctx->stat.no_csum; } ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen, mirror_num, have_csum ? csum : NULL, 0, physical_for_dev_replace); if (ret) return ret; len -= l; logical += l; physical += l; physical_for_dev_replace += l; } return 0; } static int scrub_pages_for_parity(struct scrub_parity *sparity, u64 logical, u64 len, u64 physical, struct btrfs_device *dev, u64 flags, u64 gen, int mirror_num, u8 *csum) { struct scrub_ctx *sctx = sparity->sctx; struct scrub_block *sblock; int index; sblock = kzalloc(sizeof(*sblock), GFP_KERNEL); if (!sblock) { spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; spin_unlock(&sctx->stat_lock); return -ENOMEM; } /* one ref inside this function, plus one for each page added to * a bio later on */ refcount_set(&sblock->refs, 1); sblock->sctx = sctx; sblock->no_io_error_seen = 1; sblock->sparity = sparity; scrub_parity_get(sparity); for (index = 0; len > 0; index++) { struct scrub_page *spage; u64 l = min_t(u64, len, PAGE_SIZE); spage = kzalloc(sizeof(*spage), GFP_KERNEL); if (!spage) { leave_nomem: spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; spin_unlock(&sctx->stat_lock); scrub_block_put(sblock); return -ENOMEM; } BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK); /* For scrub block */ scrub_page_get(spage); sblock->pagev[index] = spage; /* For scrub parity */ scrub_page_get(spage); list_add_tail(&spage->list, &sparity->spages); spage->sblock = sblock; spage->dev = dev; spage->flags = flags; spage->generation = gen; spage->logical = logical; spage->physical = physical; spage->mirror_num = mirror_num; if (csum) { spage->have_csum = 1; memcpy(spage->csum, csum, sctx->csum_size); } else { spage->have_csum = 0; } sblock->page_count++; spage->page = alloc_page(GFP_KERNEL); if (!spage->page) goto leave_nomem; len -= l; logical += l; physical += l; } WARN_ON(sblock->page_count == 0); for (index = 0; index < sblock->page_count; index++) { struct scrub_page *spage = sblock->pagev[index]; int ret; ret = scrub_add_page_to_rd_bio(sctx, spage); if (ret) { scrub_block_put(sblock); return ret; } } /* last one frees, either here or in bio completion for last page */ scrub_block_put(sblock); return 0; } static int scrub_extent_for_parity(struct scrub_parity *sparity, u64 logical, u64 len, u64 physical, struct btrfs_device *dev, u64 flags, u64 gen, int mirror_num) { struct scrub_ctx *sctx = sparity->sctx; int ret; u8 csum[BTRFS_CSUM_SIZE]; u32 blocksize; if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) { scrub_parity_mark_sectors_error(sparity, logical, len); return 0; } if (flags & BTRFS_EXTENT_FLAG_DATA) { blocksize = sparity->stripe_len; } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { blocksize = sparity->stripe_len; } else { blocksize = sctx->fs_info->sectorsize; WARN_ON(1); } while (len) { u64 l = min_t(u64, len, blocksize); int have_csum = 0; if (flags & BTRFS_EXTENT_FLAG_DATA) { /* push csums to sbio */ have_csum = scrub_find_csum(sctx, logical, csum); if (have_csum == 0) goto skip; } ret = scrub_pages_for_parity(sparity, logical, l, physical, dev, flags, gen, mirror_num, have_csum ? csum : NULL); if (ret) return ret; skip: len -= l; logical += l; physical += l; } return 0; } /* * Given a physical address, this will calculate it's * logical offset. if this is a parity stripe, it will return * the most left data stripe's logical offset. * * return 0 if it is a data stripe, 1 means parity stripe. */ static int get_raid56_logic_offset(u64 physical, int num, struct map_lookup *map, u64 *offset, u64 *stripe_start) { int i; int j = 0; u64 stripe_nr; u64 last_offset; u32 stripe_index; u32 rot; last_offset = (physical - map->stripes[num].physical) * nr_data_stripes(map); if (stripe_start) *stripe_start = last_offset; *offset = last_offset; for (i = 0; i < nr_data_stripes(map); i++) { *offset = last_offset + i * map->stripe_len; stripe_nr = div64_u64(*offset, map->stripe_len); stripe_nr = div_u64(stripe_nr, nr_data_stripes(map)); /* Work out the disk rotation on this stripe-set */ stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot); /* calculate which stripe this data locates */ rot += i; stripe_index = rot % map->num_stripes; if (stripe_index == num) return 0; if (stripe_index < num) j++; } *offset = last_offset + j * map->stripe_len; return 1; } static void scrub_free_parity(struct scrub_parity *sparity) { struct scrub_ctx *sctx = sparity->sctx; struct scrub_page *curr, *next; int nbits; nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors); if (nbits) { spin_lock(&sctx->stat_lock); sctx->stat.read_errors += nbits; sctx->stat.uncorrectable_errors += nbits; spin_unlock(&sctx->stat_lock); } list_for_each_entry_safe(curr, next, &sparity->spages, list) { list_del_init(&curr->list); scrub_page_put(curr); } kfree(sparity); } static void scrub_parity_bio_endio_worker(struct btrfs_work *work) { struct scrub_parity *sparity = container_of(work, struct scrub_parity, work); struct scrub_ctx *sctx = sparity->sctx; scrub_free_parity(sparity); scrub_pending_bio_dec(sctx); } static void scrub_parity_bio_endio(struct bio *bio) { struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private; struct btrfs_fs_info *fs_info = sparity->sctx->fs_info; if (bio->bi_status) bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap, sparity->nsectors); bio_put(bio); btrfs_init_work(&sparity->work, btrfs_scrubparity_helper, scrub_parity_bio_endio_worker, NULL, NULL); btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work); } static void scrub_parity_check_and_repair(struct scrub_parity *sparity) { struct scrub_ctx *sctx = sparity->sctx; struct btrfs_fs_info *fs_info = sctx->fs_info; struct bio *bio; struct btrfs_raid_bio *rbio; struct btrfs_bio *bbio = NULL; u64 length; int ret; if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap, sparity->nsectors)) goto out; length = sparity->logic_end - sparity->logic_start; btrfs_bio_counter_inc_blocked(fs_info); ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start, &length, &bbio); if (ret || !bbio || !bbio->raid_map) goto bbio_out; bio = btrfs_io_bio_alloc(0); bio->bi_iter.bi_sector = sparity->logic_start >> 9; bio->bi_private = sparity; bio->bi_end_io = scrub_parity_bio_endio; rbio = raid56_parity_alloc_scrub_rbio(fs_info, bio, bbio, length, sparity->scrub_dev, sparity->dbitmap, sparity->nsectors); if (!rbio) goto rbio_out; scrub_pending_bio_inc(sctx); raid56_parity_submit_scrub_rbio(rbio); return; rbio_out: bio_put(bio); bbio_out: btrfs_bio_counter_dec(fs_info); btrfs_put_bbio(bbio); bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap, sparity->nsectors); spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; spin_unlock(&sctx->stat_lock); out: scrub_free_parity(sparity); } static inline int scrub_calc_parity_bitmap_len(int nsectors) { return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * sizeof(long); } static void scrub_parity_get(struct scrub_parity *sparity) { refcount_inc(&sparity->refs); } static void scrub_parity_put(struct scrub_parity *sparity) { if (!refcount_dec_and_test(&sparity->refs)) return; scrub_parity_check_and_repair(sparity); } static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx, struct map_lookup *map, struct btrfs_device *sdev, struct btrfs_path *path, u64 logic_start, u64 logic_end) { struct btrfs_fs_info *fs_info = sctx->fs_info; struct btrfs_root *root = fs_info->extent_root; struct btrfs_root *csum_root = fs_info->csum_root; struct btrfs_extent_item *extent; struct btrfs_bio *bbio = NULL; u64 flags; int ret; int slot; struct extent_buffer *l; struct btrfs_key key; u64 generation; u64 extent_logical; u64 extent_physical; u64 extent_len; u64 mapped_length; struct btrfs_device *extent_dev; struct scrub_parity *sparity; int nsectors; int bitmap_len; int extent_mirror_num; int stop_loop = 0; nsectors = div_u64(map->stripe_len, fs_info->sectorsize); bitmap_len = scrub_calc_parity_bitmap_len(nsectors); sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len, GFP_NOFS); if (!sparity) { spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; spin_unlock(&sctx->stat_lock); return -ENOMEM; } sparity->stripe_len = map->stripe_len; sparity->nsectors = nsectors; sparity->sctx = sctx; sparity->scrub_dev = sdev; sparity->logic_start = logic_start; sparity->logic_end = logic_end; refcount_set(&sparity->refs, 1); INIT_LIST_HEAD(&sparity->spages); sparity->dbitmap = sparity->bitmap; sparity->ebitmap = (void *)sparity->bitmap + bitmap_len; ret = 0; while (logic_start < logic_end) { if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) key.type = BTRFS_METADATA_ITEM_KEY; else key.type = BTRFS_EXTENT_ITEM_KEY; key.objectid = logic_start; key.offset = (u64)-1; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; if (ret > 0) { ret = btrfs_previous_extent_item(root, path, 0); if (ret < 0) goto out; if (ret > 0) { btrfs_release_path(path); ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; } } stop_loop = 0; while (1) { u64 bytes; l = path->nodes[0]; slot = path->slots[0]; if (slot >= btrfs_header_nritems(l)) { ret = btrfs_next_leaf(root, path); if (ret == 0) continue; if (ret < 0) goto out; stop_loop = 1; break; } btrfs_item_key_to_cpu(l, &key, slot); if (key.type != BTRFS_EXTENT_ITEM_KEY && key.type != BTRFS_METADATA_ITEM_KEY) goto next; if (key.type == BTRFS_METADATA_ITEM_KEY) bytes = fs_info->nodesize; else bytes = key.offset; if (key.objectid + bytes <= logic_start) goto next; if (key.objectid >= logic_end) { stop_loop = 1; break; } while (key.objectid >= logic_start + map->stripe_len) logic_start += map->stripe_len; extent = btrfs_item_ptr(l, slot, struct btrfs_extent_item); flags = btrfs_extent_flags(l, extent); generation = btrfs_extent_generation(l, extent); if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) && (key.objectid < logic_start || key.objectid + bytes > logic_start + map->stripe_len)) { btrfs_err(fs_info, "scrub: tree block %llu spanning stripes, ignored. logical=%llu", key.objectid, logic_start); spin_lock(&sctx->stat_lock); sctx->stat.uncorrectable_errors++; spin_unlock(&sctx->stat_lock); goto next; } again: extent_logical = key.objectid; extent_len = bytes; if (extent_logical < logic_start) { extent_len -= logic_start - extent_logical; extent_logical = logic_start; } if (extent_logical + extent_len > logic_start + map->stripe_len) extent_len = logic_start + map->stripe_len - extent_logical; scrub_parity_mark_sectors_data(sparity, extent_logical, extent_len); mapped_length = extent_len; bbio = NULL; ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical, &mapped_length, &bbio, 0); if (!ret) { if (!bbio || mapped_length < extent_len) ret = -EIO; } if (ret) { btrfs_put_bbio(bbio); goto out; } extent_physical = bbio->stripes[0].physical; extent_mirror_num = bbio->mirror_num; extent_dev = bbio->stripes[0].dev; btrfs_put_bbio(bbio); ret = btrfs_lookup_csums_range(csum_root, extent_logical, extent_logical + extent_len - 1, &sctx->csum_list, 1); if (ret) goto out; ret = scrub_extent_for_parity(sparity, extent_logical, extent_len, extent_physical, extent_dev, flags, generation, extent_mirror_num); scrub_free_csums(sctx); if (ret) goto out; if (extent_logical + extent_len < key.objectid + bytes) { logic_start += map->stripe_len; if (logic_start >= logic_end) { stop_loop = 1; break; } if (logic_start < key.objectid + bytes) { cond_resched(); goto again; } } next: path->slots[0]++; } btrfs_release_path(path); if (stop_loop) break; logic_start += map->stripe_len; } out: if (ret < 0) scrub_parity_mark_sectors_error(sparity, logic_start, logic_end - logic_start); scrub_parity_put(sparity); scrub_submit(sctx); mutex_lock(&sctx->wr_lock); scrub_wr_submit(sctx); mutex_unlock(&sctx->wr_lock); btrfs_release_path(path); return ret < 0 ? ret : 0; } static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, struct map_lookup *map, struct btrfs_device *scrub_dev, int num, u64 base, u64 length) { struct btrfs_path *path, *ppath; struct btrfs_fs_info *fs_info = sctx->fs_info; struct btrfs_root *root = fs_info->extent_root; struct btrfs_root *csum_root = fs_info->csum_root; struct btrfs_extent_item *extent; struct blk_plug plug; u64 flags; int ret; int slot; u64 nstripes; struct extent_buffer *l; u64 physical; u64 logical; u64 logic_end; u64 physical_end; u64 generation; int mirror_num; struct reada_control *reada1; struct reada_control *reada2; struct btrfs_key key; struct btrfs_key key_end; u64 increment = map->stripe_len; u64 offset; u64 extent_logical; u64 extent_physical; u64 extent_len; u64 stripe_logical; u64 stripe_end; struct btrfs_device *extent_dev; int extent_mirror_num; int stop_loop = 0; physical = map->stripes[num].physical; offset = 0; nstripes = div64_u64(length, map->stripe_len); if (map->type & BTRFS_BLOCK_GROUP_RAID0) { offset = map->stripe_len * num; increment = map->stripe_len * map->num_stripes; mirror_num = 1; } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { int factor = map->num_stripes / map->sub_stripes; offset = map->stripe_len * (num / map->sub_stripes); increment = map->stripe_len * factor; mirror_num = num % map->sub_stripes + 1; } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) { increment = map->stripe_len; mirror_num = num % map->num_stripes + 1; } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { increment = map->stripe_len; mirror_num = num % map->num_stripes + 1; } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { get_raid56_logic_offset(physical, num, map, &offset, NULL); increment = map->stripe_len * nr_data_stripes(map); mirror_num = 1; } else { increment = map->stripe_len; mirror_num = 1; } path = btrfs_alloc_path(); if (!path) return -ENOMEM; ppath = btrfs_alloc_path(); if (!ppath) { btrfs_free_path(path); return -ENOMEM; } /* * work on commit root. The related disk blocks are static as * long as COW is applied. This means, it is save to rewrite * them to repair disk errors without any race conditions */ path->search_commit_root = 1; path->skip_locking = 1; ppath->search_commit_root = 1; ppath->skip_locking = 1; /* * trigger the readahead for extent tree csum tree and wait for * completion. During readahead, the scrub is officially paused * to not hold off transaction commits */ logical = base + offset; physical_end = physical + nstripes * map->stripe_len; if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { get_raid56_logic_offset(physical_end, num, map, &logic_end, NULL); logic_end += base; } else { logic_end = logical + increment * nstripes; } wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); scrub_blocked_if_needed(fs_info); /* FIXME it might be better to start readahead at commit root */ key.objectid = logical; key.type = BTRFS_EXTENT_ITEM_KEY; key.offset = (u64)0; key_end.objectid = logic_end; key_end.type = BTRFS_METADATA_ITEM_KEY; key_end.offset = (u64)-1; reada1 = btrfs_reada_add(root, &key, &key_end); key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; key.type = BTRFS_EXTENT_CSUM_KEY; key.offset = logical; key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID; key_end.type = BTRFS_EXTENT_CSUM_KEY; key_end.offset = logic_end; reada2 = btrfs_reada_add(csum_root, &key, &key_end); if (!IS_ERR(reada1)) btrfs_reada_wait(reada1); if (!IS_ERR(reada2)) btrfs_reada_wait(reada2); /* * collect all data csums for the stripe to avoid seeking during * the scrub. This might currently (crc32) end up to be about 1MB */ blk_start_plug(&plug); /* * now find all extents for each stripe and scrub them */ ret = 0; while (physical < physical_end) { /* * canceled? */ if (atomic_read(&fs_info->scrub_cancel_req) || atomic_read(&sctx->cancel_req)) { ret = -ECANCELED; goto out; } /* * check to see if we have to pause */ if (atomic_read(&fs_info->scrub_pause_req)) { /* push queued extents */ sctx->flush_all_writes = true; scrub_submit(sctx); mutex_lock(&sctx->wr_lock); scrub_wr_submit(sctx); mutex_unlock(&sctx->wr_lock); wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); sctx->flush_all_writes = false; scrub_blocked_if_needed(fs_info); } if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { ret = get_raid56_logic_offset(physical, num, map, &logical, &stripe_logical); logical += base; if (ret) { /* it is parity strip */ stripe_logical += base; stripe_end = stripe_logical + increment; ret = scrub_raid56_parity(sctx, map, scrub_dev, ppath, stripe_logical, stripe_end); if (ret) goto out; goto skip; } } if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) key.type = BTRFS_METADATA_ITEM_KEY; else key.type = BTRFS_EXTENT_ITEM_KEY; key.objectid = logical; key.offset = (u64)-1; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; if (ret > 0) { ret = btrfs_previous_extent_item(root, path, 0); if (ret < 0) goto out; if (ret > 0) { /* there's no smaller item, so stick with the * larger one */ btrfs_release_path(path); ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; } } stop_loop = 0; while (1) { u64 bytes; l = path->nodes[0]; slot = path->slots[0]; if (slot >= btrfs_header_nritems(l)) { ret = btrfs_next_leaf(root, path); if (ret == 0) continue; if (ret < 0) goto out; stop_loop = 1; break; } btrfs_item_key_to_cpu(l, &key, slot); if (key.type != BTRFS_EXTENT_ITEM_KEY && key.type != BTRFS_METADATA_ITEM_KEY) goto next; if (key.type == BTRFS_METADATA_ITEM_KEY) bytes = fs_info->nodesize; else bytes = key.offset; if (key.objectid + bytes <= logical) goto next; if (key.objectid >= logical + map->stripe_len) { /* out of this device extent */ if (key.objectid >= logic_end) stop_loop = 1; break; } extent = btrfs_item_ptr(l, slot, struct btrfs_extent_item); flags = btrfs_extent_flags(l, extent); generation = btrfs_extent_generation(l, extent); if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) && (key.objectid < logical || key.objectid + bytes > logical + map->stripe_len)) { btrfs_err(fs_info, "scrub: tree block %llu spanning stripes, ignored. logical=%llu", key.objectid, logical); spin_lock(&sctx->stat_lock); sctx->stat.uncorrectable_errors++; spin_unlock(&sctx->stat_lock); goto next; } again: extent_logical = key.objectid; extent_len = bytes; /* * trim extent to this stripe */ if (extent_logical < logical) { extent_len -= logical - extent_logical; extent_logical = logical; } if (extent_logical + extent_len > logical + map->stripe_len) { extent_len = logical + map->stripe_len - extent_logical; } extent_physical = extent_logical - logical + physical; extent_dev = scrub_dev; extent_mirror_num = mirror_num; if (sctx->is_dev_replace) scrub_remap_extent(fs_info, extent_logical, extent_len, &extent_physical, &extent_dev, &extent_mirror_num); ret = btrfs_lookup_csums_range(csum_root, extent_logical, extent_logical + extent_len - 1, &sctx->csum_list, 1); if (ret) goto out; ret = scrub_extent(sctx, map, extent_logical, extent_len, extent_physical, extent_dev, flags, generation, extent_mirror_num, extent_logical - logical + physical); scrub_free_csums(sctx); if (ret) goto out; if (extent_logical + extent_len < key.objectid + bytes) { if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { /* * loop until we find next data stripe * or we have finished all stripes. */ loop: physical += map->stripe_len; ret = get_raid56_logic_offset(physical, num, map, &logical, &stripe_logical); logical += base; if (ret && physical < physical_end) { stripe_logical += base; stripe_end = stripe_logical + increment; ret = scrub_raid56_parity(sctx, map, scrub_dev, ppath, stripe_logical, stripe_end); if (ret) goto out; goto loop; } } else { physical += map->stripe_len; logical += increment; } if (logical < key.objectid + bytes) { cond_resched(); goto again; } if (physical >= physical_end) { stop_loop = 1; break; } } next: path->slots[0]++; } btrfs_release_path(path); skip: logical += increment; physical += map->stripe_len; spin_lock(&sctx->stat_lock); if (stop_loop) sctx->stat.last_physical = map->stripes[num].physical + length; else sctx->stat.last_physical = physical; spin_unlock(&sctx->stat_lock); if (stop_loop) break; } out: /* push queued extents */ scrub_submit(sctx); mutex_lock(&sctx->wr_lock); scrub_wr_submit(sctx); mutex_unlock(&sctx->wr_lock); blk_finish_plug(&plug); btrfs_free_path(path); btrfs_free_path(ppath); return ret < 0 ? ret : 0; } static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, struct btrfs_device *scrub_dev, u64 chunk_offset, u64 length, u64 dev_offset, struct btrfs_block_group_cache *cache) { struct btrfs_fs_info *fs_info = sctx->fs_info; struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; struct map_lookup *map; struct extent_map *em; int i; int ret = 0; read_lock(&map_tree->map_tree.lock); em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1); read_unlock(&map_tree->map_tree.lock); if (!em) { /* * Might have been an unused block group deleted by the cleaner * kthread or relocation. */ spin_lock(&cache->lock); if (!cache->removed) ret = -EINVAL; spin_unlock(&cache->lock); return ret; } map = em->map_lookup; if (em->start != chunk_offset) goto out; if (em->len < length) goto out; for (i = 0; i < map->num_stripes; ++i) { if (map->stripes[i].dev->bdev == scrub_dev->bdev && map->stripes[i].physical == dev_offset) { ret = scrub_stripe(sctx, map, scrub_dev, i, chunk_offset, length); if (ret) goto out; } } out: free_extent_map(em); return ret; } static noinline_for_stack int scrub_enumerate_chunks(struct scrub_ctx *sctx, struct btrfs_device *scrub_dev, u64 start, u64 end) { struct btrfs_dev_extent *dev_extent = NULL; struct btrfs_path *path; struct btrfs_fs_info *fs_info = sctx->fs_info; struct btrfs_root *root = fs_info->dev_root; u64 length; u64 chunk_offset; int ret = 0; int ro_set; int slot; struct extent_buffer *l; struct btrfs_key key; struct btrfs_key found_key; struct btrfs_block_group_cache *cache; struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; path = btrfs_alloc_path(); if (!path) return -ENOMEM; path->reada = READA_FORWARD; path->search_commit_root = 1; path->skip_locking = 1; key.objectid = scrub_dev->devid; key.offset = 0ull; key.type = BTRFS_DEV_EXTENT_KEY; while (1) { ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) break; if (ret > 0) { if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { ret = btrfs_next_leaf(root, path); if (ret < 0) break; if (ret > 0) { ret = 0; break; } } else { ret = 0; } } l = path->nodes[0]; slot = path->slots[0]; btrfs_item_key_to_cpu(l, &found_key, slot); if (found_key.objectid != scrub_dev->devid) break; if (found_key.type != BTRFS_DEV_EXTENT_KEY) break; if (found_key.offset >= end) break; if (found_key.offset < key.offset) break; dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); length = btrfs_dev_extent_length(l, dev_extent); if (found_key.offset + length <= start) goto skip; chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); /* * get a reference on the corresponding block group to prevent * the chunk from going away while we scrub it */ cache = btrfs_lookup_block_group(fs_info, chunk_offset); /* some chunks are removed but not committed to disk yet, * continue scrubbing */ if (!cache) goto skip; /* * we need call btrfs_inc_block_group_ro() with scrubs_paused, * to avoid deadlock caused by: * btrfs_inc_block_group_ro() * -> btrfs_wait_for_commit() * -> btrfs_commit_transaction() * -> btrfs_scrub_pause() */ scrub_pause_on(fs_info); ret = btrfs_inc_block_group_ro(cache); if (!ret && sctx->is_dev_replace) { /* * If we are doing a device replace wait for any tasks * that started delalloc right before we set the block * group to RO mode, as they might have just allocated * an extent from it or decided they could do a nocow * write. And if any such tasks did that, wait for their * ordered extents to complete and then commit the * current transaction, so that we can later see the new * extent items in the extent tree - the ordered extents * create delayed data references (for cow writes) when * they complete, which will be run and insert the * corresponding extent items into the extent tree when * we commit the transaction they used when running * inode.c:btrfs_finish_ordered_io(). We later use * the commit root of the extent tree to find extents * to copy from the srcdev into the tgtdev, and we don't * want to miss any new extents. */ btrfs_wait_block_group_reservations(cache); btrfs_wait_nocow_writers(cache); ret = btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->key.objectid, cache->key.offset); if (ret > 0) { struct btrfs_trans_handle *trans; trans = btrfs_join_transaction(root); if (IS_ERR(trans)) ret = PTR_ERR(trans); else ret = btrfs_commit_transaction(trans); if (ret) { scrub_pause_off(fs_info); btrfs_put_block_group(cache); break; } } } scrub_pause_off(fs_info); if (ret == 0) { ro_set = 1; } else if (ret == -ENOSPC) { /* * btrfs_inc_block_group_ro return -ENOSPC when it * failed in creating new chunk for metadata. * It is not a problem for scrub/replace, because * metadata are always cowed, and our scrub paused * commit_transactions. */ ro_set = 0; } else { btrfs_warn(fs_info, "failed setting block group ro: %d", ret); btrfs_put_block_group(cache); break; } down_write(&fs_info->dev_replace.rwsem); dev_replace->cursor_right = found_key.offset + length; dev_replace->cursor_left = found_key.offset; dev_replace->item_needs_writeback = 1; up_write(&dev_replace->rwsem); ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length, found_key.offset, cache); /* * flush, submit all pending read and write bios, afterwards * wait for them. * Note that in the dev replace case, a read request causes * write requests that are submitted in the read completion * worker. Therefore in the current situation, it is required * that all write requests are flushed, so that all read and * write requests are really completed when bios_in_flight * changes to 0. */ sctx->flush_all_writes = true; scrub_submit(sctx); mutex_lock(&sctx->wr_lock); scrub_wr_submit(sctx); mutex_unlock(&sctx->wr_lock); wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); scrub_pause_on(fs_info); /* * must be called before we decrease @scrub_paused. * make sure we don't block transaction commit while * we are waiting pending workers finished. */ wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0); sctx->flush_all_writes = false; scrub_pause_off(fs_info); down_write(&fs_info->dev_replace.rwsem); dev_replace->cursor_left = dev_replace->cursor_right; dev_replace->item_needs_writeback = 1; up_write(&fs_info->dev_replace.rwsem); if (ro_set) btrfs_dec_block_group_ro(cache); /* * We might have prevented the cleaner kthread from deleting * this block group if it was already unused because we raced * and set it to RO mode first. So add it back to the unused * list, otherwise it might not ever be deleted unless a manual * balance is triggered or it becomes used and unused again. */ spin_lock(&cache->lock); if (!cache->removed && !cache->ro && cache->reserved == 0 && btrfs_block_group_used(&cache->item) == 0) { spin_unlock(&cache->lock); btrfs_mark_bg_unused(cache); } else { spin_unlock(&cache->lock); } btrfs_put_block_group(cache); if (ret) break; if (sctx->is_dev_replace && atomic64_read(&dev_replace->num_write_errors) > 0) { ret = -EIO; break; } if (sctx->stat.malloc_errors > 0) { ret = -ENOMEM; break; } skip: key.offset = found_key.offset + length; btrfs_release_path(path); } btrfs_free_path(path); return ret; } static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx, struct btrfs_device *scrub_dev) { int i; u64 bytenr; u64 gen; int ret; struct btrfs_fs_info *fs_info = sctx->fs_info; if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) return -EIO; /* Seed devices of a new filesystem has their own generation. */ if (scrub_dev->fs_devices != fs_info->fs_devices) gen = scrub_dev->generation; else gen = fs_info->last_trans_committed; for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { bytenr = btrfs_sb_offset(i); if (bytenr + BTRFS_SUPER_INFO_SIZE > scrub_dev->commit_total_bytes) break; ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr, scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i, NULL, 1, bytenr); if (ret) return ret; } wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); return 0; } /* * get a reference count on fs_info->scrub_workers. start worker if necessary */ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info, int is_dev_replace) { unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND; int max_active = fs_info->thread_pool_size; if (fs_info->scrub_workers_refcnt == 0) { fs_info->scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub", flags, is_dev_replace ? 1 : max_active, 4); if (!fs_info->scrub_workers) goto fail_scrub_workers; fs_info->scrub_wr_completion_workers = btrfs_alloc_workqueue(fs_info, "scrubwrc", flags, max_active, 2); if (!fs_info->scrub_wr_completion_workers) goto fail_scrub_wr_completion_workers; fs_info->scrub_parity_workers = btrfs_alloc_workqueue(fs_info, "scrubparity", flags, max_active, 2); if (!fs_info->scrub_parity_workers) goto fail_scrub_parity_workers; } ++fs_info->scrub_workers_refcnt; return 0; fail_scrub_parity_workers: btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers); fail_scrub_wr_completion_workers: btrfs_destroy_workqueue(fs_info->scrub_workers); fail_scrub_workers: return -ENOMEM; } static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info) { if (--fs_info->scrub_workers_refcnt == 0) { btrfs_destroy_workqueue(fs_info->scrub_workers); btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers); btrfs_destroy_workqueue(fs_info->scrub_parity_workers); } WARN_ON(fs_info->scrub_workers_refcnt < 0); } int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, u64 end, struct btrfs_scrub_progress *progress, int readonly, int is_dev_replace) { struct scrub_ctx *sctx; int ret; struct btrfs_device *dev; unsigned int nofs_flag; if (btrfs_fs_closing(fs_info)) return -EINVAL; if (fs_info->nodesize > BTRFS_STRIPE_LEN) { /* * in this case scrub is unable to calculate the checksum * the way scrub is implemented. Do not handle this * situation at all because it won't ever happen. */ btrfs_err(fs_info, "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails", fs_info->nodesize, BTRFS_STRIPE_LEN); return -EINVAL; } if (fs_info->sectorsize != PAGE_SIZE) { /* not supported for data w/o checksums */ btrfs_err_rl(fs_info, "scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails", fs_info->sectorsize, PAGE_SIZE); return -EINVAL; } if (fs_info->nodesize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK || fs_info->sectorsize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) { /* * would exhaust the array bounds of pagev member in * struct scrub_block */ btrfs_err(fs_info, "scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails", fs_info->nodesize, SCRUB_MAX_PAGES_PER_BLOCK, fs_info->sectorsize, SCRUB_MAX_PAGES_PER_BLOCK); return -EINVAL; } /* Allocate outside of device_list_mutex */ sctx = scrub_setup_ctx(fs_info, is_dev_replace); if (IS_ERR(sctx)) return PTR_ERR(sctx); mutex_lock(&fs_info->fs_devices->device_list_mutex); dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL); if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) && !is_dev_replace)) { mutex_unlock(&fs_info->fs_devices->device_list_mutex); ret = -ENODEV; goto out_free_ctx; } if (!is_dev_replace && !readonly && !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) { mutex_unlock(&fs_info->fs_devices->device_list_mutex); btrfs_err_in_rcu(fs_info, "scrub: device %s is not writable", rcu_str_deref(dev->name)); ret = -EROFS; goto out_free_ctx; } mutex_lock(&fs_info->scrub_lock); if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) { mutex_unlock(&fs_info->scrub_lock); mutex_unlock(&fs_info->fs_devices->device_list_mutex); ret = -EIO; goto out_free_ctx; } down_read(&fs_info->dev_replace.rwsem); if (dev->scrub_ctx || (!is_dev_replace && btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) { up_read(&fs_info->dev_replace.rwsem); mutex_unlock(&fs_info->scrub_lock); mutex_unlock(&fs_info->fs_devices->device_list_mutex); ret = -EINPROGRESS; goto out_free_ctx; } up_read(&fs_info->dev_replace.rwsem); ret = scrub_workers_get(fs_info, is_dev_replace); if (ret) { mutex_unlock(&fs_info->scrub_lock); mutex_unlock(&fs_info->fs_devices->device_list_mutex); goto out_free_ctx; } sctx->readonly = readonly; dev->scrub_ctx = sctx; mutex_unlock(&fs_info->fs_devices->device_list_mutex); /* * checking @scrub_pause_req here, we can avoid * race between committing transaction and scrubbing. */ __scrub_blocked_if_needed(fs_info); atomic_inc(&fs_info->scrubs_running); mutex_unlock(&fs_info->scrub_lock); /* * In order to avoid deadlock with reclaim when there is a transaction * trying to pause scrub, make sure we use GFP_NOFS for all the * allocations done at btrfs_scrub_pages() and scrub_pages_for_parity() * invoked by our callees. The pausing request is done when the * transaction commit starts, and it blocks the transaction until scrub * is paused (done at specific points at scrub_stripe() or right above * before incrementing fs_info->scrubs_running). */ nofs_flag = memalloc_nofs_save(); if (!is_dev_replace) { /* * by holding device list mutex, we can * kick off writing super in log tree sync. */ mutex_lock(&fs_info->fs_devices->device_list_mutex); ret = scrub_supers(sctx, dev); mutex_unlock(&fs_info->fs_devices->device_list_mutex); } if (!ret) ret = scrub_enumerate_chunks(sctx, dev, start, end); memalloc_nofs_restore(nofs_flag); wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); atomic_dec(&fs_info->scrubs_running); wake_up(&fs_info->scrub_pause_wait); wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0); if (progress) memcpy(progress, &sctx->stat, sizeof(*progress)); mutex_lock(&fs_info->scrub_lock); dev->scrub_ctx = NULL; scrub_workers_put(fs_info); mutex_unlock(&fs_info->scrub_lock); scrub_put_ctx(sctx); return ret; out_free_ctx: scrub_free_ctx(sctx); return ret; } void btrfs_scrub_pause(struct btrfs_fs_info *fs_info) { mutex_lock(&fs_info->scrub_lock); atomic_inc(&fs_info->scrub_pause_req); while (atomic_read(&fs_info->scrubs_paused) != atomic_read(&fs_info->scrubs_running)) { mutex_unlock(&fs_info->scrub_lock); wait_event(fs_info->scrub_pause_wait, atomic_read(&fs_info->scrubs_paused) == atomic_read(&fs_info->scrubs_running)); mutex_lock(&fs_info->scrub_lock); } mutex_unlock(&fs_info->scrub_lock); } void btrfs_scrub_continue(struct btrfs_fs_info *fs_info) { atomic_dec(&fs_info->scrub_pause_req); wake_up(&fs_info->scrub_pause_wait); } int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info) { mutex_lock(&fs_info->scrub_lock); if (!atomic_read(&fs_info->scrubs_running)) { mutex_unlock(&fs_info->scrub_lock); return -ENOTCONN; } atomic_inc(&fs_info->scrub_cancel_req); while (atomic_read(&fs_info->scrubs_running)) { mutex_unlock(&fs_info->scrub_lock); wait_event(fs_info->scrub_pause_wait, atomic_read(&fs_info->scrubs_running) == 0); mutex_lock(&fs_info->scrub_lock); } atomic_dec(&fs_info->scrub_cancel_req); mutex_unlock(&fs_info->scrub_lock); return 0; } int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info, struct btrfs_device *dev) { struct scrub_ctx *sctx; mutex_lock(&fs_info->scrub_lock); sctx = dev->scrub_ctx; if (!sctx) { mutex_unlock(&fs_info->scrub_lock); return -ENOTCONN; } atomic_inc(&sctx->cancel_req); while (dev->scrub_ctx) { mutex_unlock(&fs_info->scrub_lock); wait_event(fs_info->scrub_pause_wait, dev->scrub_ctx == NULL); mutex_lock(&fs_info->scrub_lock); } mutex_unlock(&fs_info->scrub_lock); return 0; } int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid, struct btrfs_scrub_progress *progress) { struct btrfs_device *dev; struct scrub_ctx *sctx = NULL; mutex_lock(&fs_info->fs_devices->device_list_mutex); dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL); if (dev) sctx = dev->scrub_ctx; if (sctx) memcpy(progress, &sctx->stat, sizeof(*progress)); mutex_unlock(&fs_info->fs_devices->device_list_mutex); return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV; } static void scrub_remap_extent(struct btrfs_fs_info *fs_info, u64 extent_logical, u64 extent_len, u64 *extent_physical, struct btrfs_device **extent_dev, int *extent_mirror_num) { u64 mapped_length; struct btrfs_bio *bbio = NULL; int ret; mapped_length = extent_len; ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical, &mapped_length, &bbio, 0); if (ret || !bbio || mapped_length < extent_len || !bbio->stripes[0].dev->bdev) { btrfs_put_bbio(bbio); return; } *extent_physical = bbio->stripes[0].physical; *extent_mirror_num = bbio->mirror_num; *extent_dev = bbio->stripes[0].dev; btrfs_put_bbio(bbio); }
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2011, 2012 STRATO. All rights reserved. */ #include <linux/blkdev.h> #include <linux/ratelimit.h> #include <linux/sched/mm.h> #include "ctree.h" #include "volumes.h" #include "disk-io.h" #include "ordered-data.h" #include "transaction.h" #include "backref.h" #include "extent_io.h" #include "dev-replace.h" #include "check-integrity.h" #include "rcu-string.h" #include "raid56.h" /* * This is only the first step towards a full-features scrub. It reads all * extent and super block and verifies the checksums. In case a bad checksum * is found or the extent cannot be read, good data will be written back if * any can be found. * * Future enhancements: * - In case an unrepairable extent is encountered, track which files are * affected and report them * - track and record media errors, throw out bad devices * - add a mode to also read unallocated space */ struct scrub_block; struct scrub_ctx; /* * the following three values only influence the performance. * The last one configures the number of parallel and outstanding I/O * operations. The first two values configure an upper limit for the number * of (dynamically allocated) pages that are added to a bio. */ #define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */ #define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */ #define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */ /* * the following value times PAGE_SIZE needs to be large enough to match the * largest node/leaf/sector size that shall be supported. * Values larger than BTRFS_STRIPE_LEN are not supported. */ #define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */ struct scrub_recover { refcount_t refs; struct btrfs_bio *bbio; u64 map_length; }; struct scrub_page { struct scrub_block *sblock; struct page *page; struct btrfs_device *dev; struct list_head list; u64 flags; /* extent flags */ u64 generation; u64 logical; u64 physical; u64 physical_for_dev_replace; atomic_t refs; struct { unsigned int mirror_num:8; unsigned int have_csum:1; unsigned int io_error:1; }; u8 csum[BTRFS_CSUM_SIZE]; struct scrub_recover *recover; }; struct scrub_bio { int index; struct scrub_ctx *sctx; struct btrfs_device *dev; struct bio *bio; blk_status_t status; u64 logical; u64 physical; #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO struct scrub_page *pagev[SCRUB_PAGES_PER_WR_BIO]; #else struct scrub_page *pagev[SCRUB_PAGES_PER_RD_BIO]; #endif int page_count; int next_free; struct btrfs_work work; }; struct scrub_block { struct scrub_page *pagev[SCRUB_MAX_PAGES_PER_BLOCK]; int page_count; atomic_t outstanding_pages; refcount_t refs; /* free mem on transition to zero */ struct scrub_ctx *sctx; struct scrub_parity *sparity; struct { unsigned int header_error:1; unsigned int checksum_error:1; unsigned int no_io_error_seen:1; unsigned int generation_error:1; /* also sets header_error */ /* The following is for the data used to check parity */ /* It is for the data with checksum */ unsigned int data_corrected:1; }; struct btrfs_work work; }; /* Used for the chunks with parity stripe such RAID5/6 */ struct scrub_parity { struct scrub_ctx *sctx; struct btrfs_device *scrub_dev; u64 logic_start; u64 logic_end; int nsectors; u64 stripe_len; refcount_t refs; struct list_head spages; /* Work of parity check and repair */ struct btrfs_work work; /* Mark the parity blocks which have data */ unsigned long *dbitmap; /* * Mark the parity blocks which have data, but errors happen when * read data or check data */ unsigned long *ebitmap; unsigned long bitmap[0]; }; struct scrub_ctx { struct scrub_bio *bios[SCRUB_BIOS_PER_SCTX]; struct btrfs_fs_info *fs_info; int first_free; int curr; atomic_t bios_in_flight; atomic_t workers_pending; spinlock_t list_lock; wait_queue_head_t list_wait; u16 csum_size; struct list_head csum_list; atomic_t cancel_req; int readonly; int pages_per_rd_bio; int is_dev_replace; struct scrub_bio *wr_curr_bio; struct mutex wr_lock; int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */ struct btrfs_device *wr_tgtdev; bool flush_all_writes; /* * statistics */ struct btrfs_scrub_progress stat; spinlock_t stat_lock; /* * Use a ref counter to avoid use-after-free issues. Scrub workers * decrement bios_in_flight and workers_pending and then do a wakeup * on the list_wait wait queue. We must ensure the main scrub task * doesn't free the scrub context before or while the workers are * doing the wakeup() call. */ refcount_t refs; }; struct scrub_warning { struct btrfs_path *path; u64 extent_item_size; const char *errstr; u64 physical; u64 logical; struct btrfs_device *dev; }; struct full_stripe_lock { struct rb_node node; u64 logical; u64 refs; struct mutex mutex; }; static void scrub_pending_bio_inc(struct scrub_ctx *sctx); static void scrub_pending_bio_dec(struct scrub_ctx *sctx); static int scrub_handle_errored_block(struct scrub_block *sblock_to_check); static int scrub_setup_recheck_block(struct scrub_block *original_sblock, struct scrub_block *sblocks_for_recheck); static void scrub_recheck_block(struct btrfs_fs_info *fs_info, struct scrub_block *sblock, int retry_failed_mirror); static void scrub_recheck_block_checksum(struct scrub_block *sblock); static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, struct scrub_block *sblock_good); static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad, struct scrub_block *sblock_good, int page_num, int force_write); static void scrub_write_block_to_dev_replace(struct scrub_block *sblock); static int scrub_write_page_to_dev_replace(struct scrub_block *sblock, int page_num); static int scrub_checksum_data(struct scrub_block *sblock); static int scrub_checksum_tree_block(struct scrub_block *sblock); static int scrub_checksum_super(struct scrub_block *sblock); static void scrub_block_get(struct scrub_block *sblock); static void scrub_block_put(struct scrub_block *sblock); static void scrub_page_get(struct scrub_page *spage); static void scrub_page_put(struct scrub_page *spage); static void scrub_parity_get(struct scrub_parity *sparity); static void scrub_parity_put(struct scrub_parity *sparity); static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx, struct scrub_page *spage); static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len, u64 physical, struct btrfs_device *dev, u64 flags, u64 gen, int mirror_num, u8 *csum, int force, u64 physical_for_dev_replace); static void scrub_bio_end_io(struct bio *bio); static void scrub_bio_end_io_worker(struct btrfs_work *work); static void scrub_block_complete(struct scrub_block *sblock); static void scrub_remap_extent(struct btrfs_fs_info *fs_info, u64 extent_logical, u64 extent_len, u64 *extent_physical, struct btrfs_device **extent_dev, int *extent_mirror_num); static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx, struct scrub_page *spage); static void scrub_wr_submit(struct scrub_ctx *sctx); static void scrub_wr_bio_end_io(struct bio *bio); static void scrub_wr_bio_end_io_worker(struct btrfs_work *work); static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info); static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info); static void scrub_put_ctx(struct scrub_ctx *sctx); static inline int scrub_is_page_on_raid56(struct scrub_page *page) { return page->recover && (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK); } static void scrub_pending_bio_inc(struct scrub_ctx *sctx) { refcount_inc(&sctx->refs); atomic_inc(&sctx->bios_in_flight); } static void scrub_pending_bio_dec(struct scrub_ctx *sctx) { atomic_dec(&sctx->bios_in_flight); wake_up(&sctx->list_wait); scrub_put_ctx(sctx); } static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info) { while (atomic_read(&fs_info->scrub_pause_req)) { mutex_unlock(&fs_info->scrub_lock); wait_event(fs_info->scrub_pause_wait, atomic_read(&fs_info->scrub_pause_req) == 0); mutex_lock(&fs_info->scrub_lock); } } static void scrub_pause_on(struct btrfs_fs_info *fs_info) { atomic_inc(&fs_info->scrubs_paused); wake_up(&fs_info->scrub_pause_wait); } static void scrub_pause_off(struct btrfs_fs_info *fs_info) { mutex_lock(&fs_info->scrub_lock); __scrub_blocked_if_needed(fs_info); atomic_dec(&fs_info->scrubs_paused); mutex_unlock(&fs_info->scrub_lock); wake_up(&fs_info->scrub_pause_wait); } static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info) { scrub_pause_on(fs_info); scrub_pause_off(fs_info); } /* * Insert new full stripe lock into full stripe locks tree * * Return pointer to existing or newly inserted full_stripe_lock structure if * everything works well. * Return ERR_PTR(-ENOMEM) if we failed to allocate memory * * NOTE: caller must hold full_stripe_locks_root->lock before calling this * function */ static struct full_stripe_lock *insert_full_stripe_lock( struct btrfs_full_stripe_locks_tree *locks_root, u64 fstripe_logical) { struct rb_node **p; struct rb_node *parent = NULL; struct full_stripe_lock *entry; struct full_stripe_lock *ret; lockdep_assert_held(&locks_root->lock); p = &locks_root->root.rb_node; while (*p) { parent = *p; entry = rb_entry(parent, struct full_stripe_lock, node); if (fstripe_logical < entry->logical) { p = &(*p)->rb_left; } else if (fstripe_logical > entry->logical) { p = &(*p)->rb_right; } else { entry->refs++; return entry; } } /* * Insert new lock. */ ret = kmalloc(sizeof(*ret), GFP_KERNEL); if (!ret) return ERR_PTR(-ENOMEM); ret->logical = fstripe_logical; ret->refs = 1; mutex_init(&ret->mutex); rb_link_node(&ret->node, parent, p); rb_insert_color(&ret->node, &locks_root->root); return ret; } /* * Search for a full stripe lock of a block group * * Return pointer to existing full stripe lock if found * Return NULL if not found */ static struct full_stripe_lock *search_full_stripe_lock( struct btrfs_full_stripe_locks_tree *locks_root, u64 fstripe_logical) { struct rb_node *node; struct full_stripe_lock *entry; lockdep_assert_held(&locks_root->lock); node = locks_root->root.rb_node; while (node) { entry = rb_entry(node, struct full_stripe_lock, node); if (fstripe_logical < entry->logical) node = node->rb_left; else if (fstripe_logical > entry->logical) node = node->rb_right; else return entry; } return NULL; } /* * Helper to get full stripe logical from a normal bytenr. * * Caller must ensure @cache is a RAID56 block group. */ static u64 get_full_stripe_logical(struct btrfs_block_group_cache *cache, u64 bytenr) { u64 ret; /* * Due to chunk item size limit, full stripe length should not be * larger than U32_MAX. Just a sanity check here. */ WARN_ON_ONCE(cache->full_stripe_len >= U32_MAX); /* * round_down() can only handle power of 2, while RAID56 full * stripe length can be 64KiB * n, so we need to manually round down. */ ret = div64_u64(bytenr - cache->key.objectid, cache->full_stripe_len) * cache->full_stripe_len + cache->key.objectid; return ret; } /* * Lock a full stripe to avoid concurrency of recovery and read * * It's only used for profiles with parities (RAID5/6), for other profiles it * does nothing. * * Return 0 if we locked full stripe covering @bytenr, with a mutex held. * So caller must call unlock_full_stripe() at the same context. * * Return <0 if encounters error. */ static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr, bool *locked_ret) { struct btrfs_block_group_cache *bg_cache; struct btrfs_full_stripe_locks_tree *locks_root; struct full_stripe_lock *existing; u64 fstripe_start; int ret = 0; *locked_ret = false; bg_cache = btrfs_lookup_block_group(fs_info, bytenr); if (!bg_cache) { ASSERT(0); return -ENOENT; } /* Profiles not based on parity don't need full stripe lock */ if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) goto out; locks_root = &bg_cache->full_stripe_locks_root; fstripe_start = get_full_stripe_logical(bg_cache, bytenr); /* Now insert the full stripe lock */ mutex_lock(&locks_root->lock); existing = insert_full_stripe_lock(locks_root, fstripe_start); mutex_unlock(&locks_root->lock); if (IS_ERR(existing)) { ret = PTR_ERR(existing); goto out; } mutex_lock(&existing->mutex); *locked_ret = true; out: btrfs_put_block_group(bg_cache); return ret; } /* * Unlock a full stripe. * * NOTE: Caller must ensure it's the same context calling corresponding * lock_full_stripe(). * * Return 0 if we unlock full stripe without problem. * Return <0 for error */ static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr, bool locked) { struct btrfs_block_group_cache *bg_cache; struct btrfs_full_stripe_locks_tree *locks_root; struct full_stripe_lock *fstripe_lock; u64 fstripe_start; bool freeit = false; int ret = 0; /* If we didn't acquire full stripe lock, no need to continue */ if (!locked) return 0; bg_cache = btrfs_lookup_block_group(fs_info, bytenr); if (!bg_cache) { ASSERT(0); return -ENOENT; } if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) goto out; locks_root = &bg_cache->full_stripe_locks_root; fstripe_start = get_full_stripe_logical(bg_cache, bytenr); mutex_lock(&locks_root->lock); fstripe_lock = search_full_stripe_lock(locks_root, fstripe_start); /* Unpaired unlock_full_stripe() detected */ if (!fstripe_lock) { WARN_ON(1); ret = -ENOENT; mutex_unlock(&locks_root->lock); goto out; } if (fstripe_lock->refs == 0) { WARN_ON(1); btrfs_warn(fs_info, "full stripe lock at %llu refcount underflow", fstripe_lock->logical); } else { fstripe_lock->refs--; } if (fstripe_lock->refs == 0) { rb_erase(&fstripe_lock->node, &locks_root->root); freeit = true; } mutex_unlock(&locks_root->lock); mutex_unlock(&fstripe_lock->mutex); if (freeit) kfree(fstripe_lock); out: btrfs_put_block_group(bg_cache); return ret; } static void scrub_free_csums(struct scrub_ctx *sctx) { while (!list_empty(&sctx->csum_list)) { struct btrfs_ordered_sum *sum; sum = list_first_entry(&sctx->csum_list, struct btrfs_ordered_sum, list); list_del(&sum->list); kfree(sum); } } static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx) { int i; if (!sctx) return; /* this can happen when scrub is cancelled */ if (sctx->curr != -1) { struct scrub_bio *sbio = sctx->bios[sctx->curr]; for (i = 0; i < sbio->page_count; i++) { WARN_ON(!sbio->pagev[i]->page); scrub_block_put(sbio->pagev[i]->sblock); } bio_put(sbio->bio); } for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) { struct scrub_bio *sbio = sctx->bios[i]; if (!sbio) break; kfree(sbio); } kfree(sctx->wr_curr_bio); scrub_free_csums(sctx); kfree(sctx); } static void scrub_put_ctx(struct scrub_ctx *sctx) { if (refcount_dec_and_test(&sctx->refs)) scrub_free_ctx(sctx); } static noinline_for_stack struct scrub_ctx *scrub_setup_ctx( struct btrfs_fs_info *fs_info, int is_dev_replace) { struct scrub_ctx *sctx; int i; sctx = kzalloc(sizeof(*sctx), GFP_KERNEL); if (!sctx) goto nomem; refcount_set(&sctx->refs, 1); sctx->is_dev_replace = is_dev_replace; sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO; sctx->curr = -1; sctx->fs_info = fs_info; for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) { struct scrub_bio *sbio; sbio = kzalloc(sizeof(*sbio), GFP_KERNEL); if (!sbio) goto nomem; sctx->bios[i] = sbio; sbio->index = i; sbio->sctx = sctx; sbio->page_count = 0; btrfs_init_work(&sbio->work, btrfs_scrub_helper, scrub_bio_end_io_worker, NULL, NULL); if (i != SCRUB_BIOS_PER_SCTX - 1) sctx->bios[i]->next_free = i + 1; else sctx->bios[i]->next_free = -1; } sctx->first_free = 0; atomic_set(&sctx->bios_in_flight, 0); atomic_set(&sctx->workers_pending, 0); atomic_set(&sctx->cancel_req, 0); sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy); INIT_LIST_HEAD(&sctx->csum_list); spin_lock_init(&sctx->list_lock); spin_lock_init(&sctx->stat_lock); init_waitqueue_head(&sctx->list_wait); WARN_ON(sctx->wr_curr_bio != NULL); mutex_init(&sctx->wr_lock); sctx->wr_curr_bio = NULL; if (is_dev_replace) { WARN_ON(!fs_info->dev_replace.tgtdev); sctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO; sctx->wr_tgtdev = fs_info->dev_replace.tgtdev; sctx->flush_all_writes = false; } return sctx; nomem: scrub_free_ctx(sctx); return ERR_PTR(-ENOMEM); } static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *warn_ctx) { u64 isize; u32 nlink; int ret; int i; unsigned nofs_flag; struct extent_buffer *eb; struct btrfs_inode_item *inode_item; struct scrub_warning *swarn = warn_ctx; struct btrfs_fs_info *fs_info = swarn->dev->fs_info; struct inode_fs_paths *ipath = NULL; struct btrfs_root *local_root; struct btrfs_key root_key; struct btrfs_key key; root_key.objectid = root; root_key.type = BTRFS_ROOT_ITEM_KEY; root_key.offset = (u64)-1; local_root = btrfs_read_fs_root_no_name(fs_info, &root_key); if (IS_ERR(local_root)) { ret = PTR_ERR(local_root); goto err; } /* * this makes the path point to (inum INODE_ITEM ioff) */ key.objectid = inum; key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0); if (ret) { btrfs_release_path(swarn->path); goto err; } eb = swarn->path->nodes[0]; inode_item = btrfs_item_ptr(eb, swarn->path->slots[0], struct btrfs_inode_item); isize = btrfs_inode_size(eb, inode_item); nlink = btrfs_inode_nlink(eb, inode_item); btrfs_release_path(swarn->path); /* * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub * uses GFP_NOFS in this context, so we keep it consistent but it does * not seem to be strictly necessary. */ nofs_flag = memalloc_nofs_save(); ipath = init_ipath(4096, local_root, swarn->path); memalloc_nofs_restore(nofs_flag); if (IS_ERR(ipath)) { ret = PTR_ERR(ipath); ipath = NULL; goto err; } ret = paths_from_inode(inum, ipath); if (ret < 0) goto err; /* * we deliberately ignore the bit ipath might have been too small to * hold all of the paths here */ for (i = 0; i < ipath->fspath->elem_cnt; ++i) btrfs_warn_in_rcu(fs_info, "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %llu, links %u (path: %s)", swarn->errstr, swarn->logical, rcu_str_deref(swarn->dev->name), swarn->physical, root, inum, offset, min(isize - offset, (u64)PAGE_SIZE), nlink, (char *)(unsigned long)ipath->fspath->val[i]); free_ipath(ipath); return 0; err: btrfs_warn_in_rcu(fs_info, "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d", swarn->errstr, swarn->logical, rcu_str_deref(swarn->dev->name), swarn->physical, root, inum, offset, ret); free_ipath(ipath); return 0; } static void scrub_print_warning(const char *errstr, struct scrub_block *sblock) { struct btrfs_device *dev; struct btrfs_fs_info *fs_info; struct btrfs_path *path; struct btrfs_key found_key; struct extent_buffer *eb; struct btrfs_extent_item *ei; struct scrub_warning swarn; unsigned long ptr = 0; u64 extent_item_pos; u64 flags = 0; u64 ref_root; u32 item_size; u8 ref_level = 0; int ret; WARN_ON(sblock->page_count < 1); dev = sblock->pagev[0]->dev; fs_info = sblock->sctx->fs_info; path = btrfs_alloc_path(); if (!path) return; swarn.physical = sblock->pagev[0]->physical; swarn.logical = sblock->pagev[0]->logical; swarn.errstr = errstr; swarn.dev = NULL; ret = extent_from_logical(fs_info, swarn.logical, path, &found_key, &flags); if (ret < 0) goto out; extent_item_pos = swarn.logical - found_key.objectid; swarn.extent_item_size = found_key.offset; eb = path->nodes[0]; ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); item_size = btrfs_item_size_nr(eb, path->slots[0]); if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { do { ret = tree_backref_for_extent(&ptr, eb, &found_key, ei, item_size, &ref_root, &ref_level); btrfs_warn_in_rcu(fs_info, "%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu", errstr, swarn.logical, rcu_str_deref(dev->name), swarn.physical, ref_level ? "node" : "leaf", ret < 0 ? -1 : ref_level, ret < 0 ? -1 : ref_root); } while (ret != 1); btrfs_release_path(path); } else { btrfs_release_path(path); swarn.path = path; swarn.dev = dev; iterate_extent_inodes(fs_info, found_key.objectid, extent_item_pos, 1, scrub_print_warning_inode, &swarn, false); } out: btrfs_free_path(path); } static inline void scrub_get_recover(struct scrub_recover *recover) { refcount_inc(&recover->refs); } static inline void scrub_put_recover(struct btrfs_fs_info *fs_info, struct scrub_recover *recover) { if (refcount_dec_and_test(&recover->refs)) { btrfs_bio_counter_dec(fs_info); btrfs_put_bbio(recover->bbio); kfree(recover); } } /* * scrub_handle_errored_block gets called when either verification of the * pages failed or the bio failed to read, e.g. with EIO. In the latter * case, this function handles all pages in the bio, even though only one * may be bad. * The goal of this function is to repair the errored block by using the * contents of one of the mirrors. */ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) { struct scrub_ctx *sctx = sblock_to_check->sctx; struct btrfs_device *dev; struct btrfs_fs_info *fs_info; u64 logical; unsigned int failed_mirror_index; unsigned int is_metadata; unsigned int have_csum; struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */ struct scrub_block *sblock_bad; int ret; int mirror_index; int page_num; int success; bool full_stripe_locked; unsigned int nofs_flag; static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); BUG_ON(sblock_to_check->page_count < 1); fs_info = sctx->fs_info; if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) { /* * if we find an error in a super block, we just report it. * They will get written with the next transaction commit * anyway */ spin_lock(&sctx->stat_lock); ++sctx->stat.super_errors; spin_unlock(&sctx->stat_lock); return 0; } logical = sblock_to_check->pagev[0]->logical; BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1); failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1; is_metadata = !(sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA); have_csum = sblock_to_check->pagev[0]->have_csum; dev = sblock_to_check->pagev[0]->dev; /* * We must use GFP_NOFS because the scrub task might be waiting for a * worker task executing this function and in turn a transaction commit * might be waiting the scrub task to pause (which needs to wait for all * the worker tasks to complete before pausing). * We do allocations in the workers through insert_full_stripe_lock() * and scrub_add_page_to_wr_bio(), which happens down the call chain of * this function. */ nofs_flag = memalloc_nofs_save(); /* * For RAID5/6, race can happen for a different device scrub thread. * For data corruption, Parity and Data threads will both try * to recovery the data. * Race can lead to doubly added csum error, or even unrecoverable * error. */ ret = lock_full_stripe(fs_info, logical, &full_stripe_locked); if (ret < 0) { memalloc_nofs_restore(nofs_flag); spin_lock(&sctx->stat_lock); if (ret == -ENOMEM) sctx->stat.malloc_errors++; sctx->stat.read_errors++; sctx->stat.uncorrectable_errors++; spin_unlock(&sctx->stat_lock); return ret; } /* * read all mirrors one after the other. This includes to * re-read the extent or metadata block that failed (that was * the cause that this fixup code is called) another time, * page by page this time in order to know which pages * caused I/O errors and which ones are good (for all mirrors). * It is the goal to handle the situation when more than one * mirror contains I/O errors, but the errors do not * overlap, i.e. the data can be repaired by selecting the * pages from those mirrors without I/O error on the * particular pages. One example (with blocks >= 2 * PAGE_SIZE) * would be that mirror #1 has an I/O error on the first page, * the second page is good, and mirror #2 has an I/O error on * the second page, but the first page is good. * Then the first page of the first mirror can be repaired by * taking the first page of the second mirror, and the * second page of the second mirror can be repaired by * copying the contents of the 2nd page of the 1st mirror. * One more note: if the pages of one mirror contain I/O * errors, the checksum cannot be verified. In order to get * the best data for repairing, the first attempt is to find * a mirror without I/O errors and with a validated checksum. * Only if this is not possible, the pages are picked from * mirrors with I/O errors without considering the checksum. * If the latter is the case, at the end, the checksum of the * repaired area is verified in order to correctly maintain * the statistics. */ sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS, sizeof(*sblocks_for_recheck), GFP_KERNEL); if (!sblocks_for_recheck) { spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; sctx->stat.read_errors++; sctx->stat.uncorrectable_errors++; spin_unlock(&sctx->stat_lock); btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); goto out; } /* setup the context, map the logical blocks and alloc the pages */ ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck); if (ret) { spin_lock(&sctx->stat_lock); sctx->stat.read_errors++; sctx->stat.uncorrectable_errors++; spin_unlock(&sctx->stat_lock); btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); goto out; } BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS); sblock_bad = sblocks_for_recheck + failed_mirror_index; /* build and submit the bios for the failed mirror, check checksums */ scrub_recheck_block(fs_info, sblock_bad, 1); if (!sblock_bad->header_error && !sblock_bad->checksum_error && sblock_bad->no_io_error_seen) { /* * the error disappeared after reading page by page, or * the area was part of a huge bio and other parts of the * bio caused I/O errors, or the block layer merged several * read requests into one and the error is caused by a * different bio (usually one of the two latter cases is * the cause) */ spin_lock(&sctx->stat_lock); sctx->stat.unverified_errors++; sblock_to_check->data_corrected = 1; spin_unlock(&sctx->stat_lock); if (sctx->is_dev_replace) scrub_write_block_to_dev_replace(sblock_bad); goto out; } if (!sblock_bad->no_io_error_seen) { spin_lock(&sctx->stat_lock); sctx->stat.read_errors++; spin_unlock(&sctx->stat_lock); if (__ratelimit(&_rs)) scrub_print_warning("i/o error", sblock_to_check); btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); } else if (sblock_bad->checksum_error) { spin_lock(&sctx->stat_lock); sctx->stat.csum_errors++; spin_unlock(&sctx->stat_lock); if (__ratelimit(&_rs)) scrub_print_warning("checksum error", sblock_to_check); btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS); } else if (sblock_bad->header_error) { spin_lock(&sctx->stat_lock); sctx->stat.verify_errors++; spin_unlock(&sctx->stat_lock); if (__ratelimit(&_rs)) scrub_print_warning("checksum/header error", sblock_to_check); if (sblock_bad->generation_error) btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_GENERATION_ERRS); else btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS); } if (sctx->readonly) { ASSERT(!sctx->is_dev_replace); goto out; } /* * now build and submit the bios for the other mirrors, check * checksums. * First try to pick the mirror which is completely without I/O * errors and also does not have a checksum error. * If one is found, and if a checksum is present, the full block * that is known to contain an error is rewritten. Afterwards * the block is known to be corrected. * If a mirror is found which is completely correct, and no * checksum is present, only those pages are rewritten that had * an I/O error in the block to be repaired, since it cannot be * determined, which copy of the other pages is better (and it * could happen otherwise that a correct page would be * overwritten by a bad one). */ for (mirror_index = 0; ;mirror_index++) { struct scrub_block *sblock_other; if (mirror_index == failed_mirror_index) continue; /* raid56's mirror can be more than BTRFS_MAX_MIRRORS */ if (!scrub_is_page_on_raid56(sblock_bad->pagev[0])) { if (mirror_index >= BTRFS_MAX_MIRRORS) break; if (!sblocks_for_recheck[mirror_index].page_count) break; sblock_other = sblocks_for_recheck + mirror_index; } else { struct scrub_recover *r = sblock_bad->pagev[0]->recover; int max_allowed = r->bbio->num_stripes - r->bbio->num_tgtdevs; if (mirror_index >= max_allowed) break; if (!sblocks_for_recheck[1].page_count) break; ASSERT(failed_mirror_index == 0); sblock_other = sblocks_for_recheck + 1; sblock_other->pagev[0]->mirror_num = 1 + mirror_index; } /* build and submit the bios, check checksums */ scrub_recheck_block(fs_info, sblock_other, 0); if (!sblock_other->header_error && !sblock_other->checksum_error && sblock_other->no_io_error_seen) { if (sctx->is_dev_replace) { scrub_write_block_to_dev_replace(sblock_other); goto corrected_error; } else { ret = scrub_repair_block_from_good_copy( sblock_bad, sblock_other); if (!ret) goto corrected_error; } } } if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace) goto did_not_correct_error; /* * In case of I/O errors in the area that is supposed to be * repaired, continue by picking good copies of those pages. * Select the good pages from mirrors to rewrite bad pages from * the area to fix. Afterwards verify the checksum of the block * that is supposed to be repaired. This verification step is * only done for the purpose of statistic counting and for the * final scrub report, whether errors remain. * A perfect algorithm could make use of the checksum and try * all possible combinations of pages from the different mirrors * until the checksum verification succeeds. For example, when * the 2nd page of mirror #1 faces I/O errors, and the 2nd page * of mirror #2 is readable but the final checksum test fails, * then the 2nd page of mirror #3 could be tried, whether now * the final checksum succeeds. But this would be a rare * exception and is therefore not implemented. At least it is * avoided that the good copy is overwritten. * A more useful improvement would be to pick the sectors * without I/O error based on sector sizes (512 bytes on legacy * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one * mirror could be repaired by taking 512 byte of a different * mirror, even if other 512 byte sectors in the same PAGE_SIZE * area are unreadable. */ success = 1; for (page_num = 0; page_num < sblock_bad->page_count; page_num++) { struct scrub_page *page_bad = sblock_bad->pagev[page_num]; struct scrub_block *sblock_other = NULL; /* skip no-io-error page in scrub */ if (!page_bad->io_error && !sctx->is_dev_replace) continue; if (scrub_is_page_on_raid56(sblock_bad->pagev[0])) { /* * In case of dev replace, if raid56 rebuild process * didn't work out correct data, then copy the content * in sblock_bad to make sure target device is identical * to source device, instead of writing garbage data in * sblock_for_recheck array to target device. */ sblock_other = NULL; } else if (page_bad->io_error) { /* try to find no-io-error page in mirrors */ for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS && sblocks_for_recheck[mirror_index].page_count > 0; mirror_index++) { if (!sblocks_for_recheck[mirror_index]. pagev[page_num]->io_error) { sblock_other = sblocks_for_recheck + mirror_index; break; } } if (!sblock_other) success = 0; } if (sctx->is_dev_replace) { /* * did not find a mirror to fetch the page * from. scrub_write_page_to_dev_replace() * handles this case (page->io_error), by * filling the block with zeros before * submitting the write request */ if (!sblock_other) sblock_other = sblock_bad; if (scrub_write_page_to_dev_replace(sblock_other, page_num) != 0) { atomic64_inc( &fs_info->dev_replace.num_write_errors); success = 0; } } else if (sblock_other) { ret = scrub_repair_page_from_good_copy(sblock_bad, sblock_other, page_num, 0); if (0 == ret) page_bad->io_error = 0; else success = 0; } } if (success && !sctx->is_dev_replace) { if (is_metadata || have_csum) { /* * need to verify the checksum now that all * sectors on disk are repaired (the write * request for data to be repaired is on its way). * Just be lazy and use scrub_recheck_block() * which re-reads the data before the checksum * is verified, but most likely the data comes out * of the page cache. */ scrub_recheck_block(fs_info, sblock_bad, 1); if (!sblock_bad->header_error && !sblock_bad->checksum_error && sblock_bad->no_io_error_seen) goto corrected_error; else goto did_not_correct_error; } else { corrected_error: spin_lock(&sctx->stat_lock); sctx->stat.corrected_errors++; sblock_to_check->data_corrected = 1; spin_unlock(&sctx->stat_lock); btrfs_err_rl_in_rcu(fs_info, "fixed up error at logical %llu on dev %s", logical, rcu_str_deref(dev->name)); } } else { did_not_correct_error: spin_lock(&sctx->stat_lock); sctx->stat.uncorrectable_errors++; spin_unlock(&sctx->stat_lock); btrfs_err_rl_in_rcu(fs_info, "unable to fixup (regular) error at logical %llu on dev %s", logical, rcu_str_deref(dev->name)); } out: if (sblocks_for_recheck) { for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS; mirror_index++) { struct scrub_block *sblock = sblocks_for_recheck + mirror_index; struct scrub_recover *recover; int page_index; for (page_index = 0; page_index < sblock->page_count; page_index++) { sblock->pagev[page_index]->sblock = NULL; recover = sblock->pagev[page_index]->recover; if (recover) { scrub_put_recover(fs_info, recover); sblock->pagev[page_index]->recover = NULL; } scrub_page_put(sblock->pagev[page_index]); } } kfree(sblocks_for_recheck); } ret = unlock_full_stripe(fs_info, logical, full_stripe_locked); memalloc_nofs_restore(nofs_flag); if (ret < 0) return ret; return 0; } static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio) { if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5) return 2; else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) return 3; else return (int)bbio->num_stripes; } static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type, u64 *raid_map, u64 mapped_length, int nstripes, int mirror, int *stripe_index, u64 *stripe_offset) { int i; if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) { /* RAID5/6 */ for (i = 0; i < nstripes; i++) { if (raid_map[i] == RAID6_Q_STRIPE || raid_map[i] == RAID5_P_STRIPE) continue; if (logical >= raid_map[i] && logical < raid_map[i] + mapped_length) break; } *stripe_index = i; *stripe_offset = logical - raid_map[i]; } else { /* The other RAID type */ *stripe_index = mirror; *stripe_offset = 0; } } static int scrub_setup_recheck_block(struct scrub_block *original_sblock, struct scrub_block *sblocks_for_recheck) { struct scrub_ctx *sctx = original_sblock->sctx; struct btrfs_fs_info *fs_info = sctx->fs_info; u64 length = original_sblock->page_count * PAGE_SIZE; u64 logical = original_sblock->pagev[0]->logical; u64 generation = original_sblock->pagev[0]->generation; u64 flags = original_sblock->pagev[0]->flags; u64 have_csum = original_sblock->pagev[0]->have_csum; struct scrub_recover *recover; struct btrfs_bio *bbio; u64 sublen; u64 mapped_length; u64 stripe_offset; int stripe_index; int page_index = 0; int mirror_index; int nmirrors; int ret; /* * note: the two members refs and outstanding_pages * are not used (and not set) in the blocks that are used for * the recheck procedure */ while (length > 0) { sublen = min_t(u64, length, PAGE_SIZE); mapped_length = sublen; bbio = NULL; /* * with a length of PAGE_SIZE, each returned stripe * represents one mirror */ btrfs_bio_counter_inc_blocked(fs_info); ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical, &mapped_length, &bbio); if (ret || !bbio || mapped_length < sublen) { btrfs_put_bbio(bbio); btrfs_bio_counter_dec(fs_info); return -EIO; } recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS); if (!recover) { btrfs_put_bbio(bbio); btrfs_bio_counter_dec(fs_info); return -ENOMEM; } refcount_set(&recover->refs, 1); recover->bbio = bbio; recover->map_length = mapped_length; BUG_ON(page_index >= SCRUB_MAX_PAGES_PER_BLOCK); nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS); for (mirror_index = 0; mirror_index < nmirrors; mirror_index++) { struct scrub_block *sblock; struct scrub_page *page; sblock = sblocks_for_recheck + mirror_index; sblock->sctx = sctx; page = kzalloc(sizeof(*page), GFP_NOFS); if (!page) { leave_nomem: spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; spin_unlock(&sctx->stat_lock); scrub_put_recover(fs_info, recover); return -ENOMEM; } scrub_page_get(page); sblock->pagev[page_index] = page; page->sblock = sblock; page->flags = flags; page->generation = generation; page->logical = logical; page->have_csum = have_csum; if (have_csum) memcpy(page->csum, original_sblock->pagev[0]->csum, sctx->csum_size); scrub_stripe_index_and_offset(logical, bbio->map_type, bbio->raid_map, mapped_length, bbio->num_stripes - bbio->num_tgtdevs, mirror_index, &stripe_index, &stripe_offset); page->physical = bbio->stripes[stripe_index].physical + stripe_offset; page->dev = bbio->stripes[stripe_index].dev; BUG_ON(page_index >= original_sblock->page_count); page->physical_for_dev_replace = original_sblock->pagev[page_index]-> physical_for_dev_replace; /* for missing devices, dev->bdev is NULL */ page->mirror_num = mirror_index + 1; sblock->page_count++; page->page = alloc_page(GFP_NOFS); if (!page->page) goto leave_nomem; scrub_get_recover(recover); page->recover = recover; } scrub_put_recover(fs_info, recover); length -= sublen; logical += sublen; page_index++; } return 0; } static void scrub_bio_wait_endio(struct bio *bio) { complete(bio->bi_private); } static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info, struct bio *bio, struct scrub_page *page) { DECLARE_COMPLETION_ONSTACK(done); int ret; int mirror_num; bio->bi_iter.bi_sector = page->logical >> 9; bio->bi_private = &done; bio->bi_end_io = scrub_bio_wait_endio; mirror_num = page->sblock->pagev[0]->mirror_num; ret = raid56_parity_recover(fs_info, bio, page->recover->bbio, page->recover->map_length, mirror_num, 0); if (ret) return ret; wait_for_completion_io(&done); return blk_status_to_errno(bio->bi_status); } static void scrub_recheck_block_on_raid56(struct btrfs_fs_info *fs_info, struct scrub_block *sblock) { struct scrub_page *first_page = sblock->pagev[0]; struct bio *bio; int page_num; /* All pages in sblock belong to the same stripe on the same device. */ ASSERT(first_page->dev); if (!first_page->dev->bdev) goto out; bio = btrfs_io_bio_alloc(BIO_MAX_PAGES); bio_set_dev(bio, first_page->dev->bdev); for (page_num = 0; page_num < sblock->page_count; page_num++) { struct scrub_page *page = sblock->pagev[page_num]; WARN_ON(!page->page); bio_add_page(bio, page->page, PAGE_SIZE, 0); } if (scrub_submit_raid56_bio_wait(fs_info, bio, first_page)) { bio_put(bio); goto out; } bio_put(bio); scrub_recheck_block_checksum(sblock); return; out: for (page_num = 0; page_num < sblock->page_count; page_num++) sblock->pagev[page_num]->io_error = 1; sblock->no_io_error_seen = 0; } /* * this function will check the on disk data for checksum errors, header * errors and read I/O errors. If any I/O errors happen, the exact pages * which are errored are marked as being bad. The goal is to enable scrub * to take those pages that are not errored from all the mirrors so that * the pages that are errored in the just handled mirror can be repaired. */ static void scrub_recheck_block(struct btrfs_fs_info *fs_info, struct scrub_block *sblock, int retry_failed_mirror) { int page_num; sblock->no_io_error_seen = 1; /* short cut for raid56 */ if (!retry_failed_mirror && scrub_is_page_on_raid56(sblock->pagev[0])) return scrub_recheck_block_on_raid56(fs_info, sblock); for (page_num = 0; page_num < sblock->page_count; page_num++) { struct bio *bio; struct scrub_page *page = sblock->pagev[page_num]; if (page->dev->bdev == NULL) { page->io_error = 1; sblock->no_io_error_seen = 0; continue; } WARN_ON(!page->page); bio = btrfs_io_bio_alloc(1); bio_set_dev(bio, page->dev->bdev); bio_add_page(bio, page->page, PAGE_SIZE, 0); bio->bi_iter.bi_sector = page->physical >> 9; bio->bi_opf = REQ_OP_READ; if (btrfsic_submit_bio_wait(bio)) { page->io_error = 1; sblock->no_io_error_seen = 0; } bio_put(bio); } if (sblock->no_io_error_seen) scrub_recheck_block_checksum(sblock); } static inline int scrub_check_fsid(u8 fsid[], struct scrub_page *spage) { struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices; int ret; ret = memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE); return !ret; } static void scrub_recheck_block_checksum(struct scrub_block *sblock) { sblock->header_error = 0; sblock->checksum_error = 0; sblock->generation_error = 0; if (sblock->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA) scrub_checksum_data(sblock); else scrub_checksum_tree_block(sblock); } static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, struct scrub_block *sblock_good) { int page_num; int ret = 0; for (page_num = 0; page_num < sblock_bad->page_count; page_num++) { int ret_sub; ret_sub = scrub_repair_page_from_good_copy(sblock_bad, sblock_good, page_num, 1); if (ret_sub) ret = ret_sub; } return ret; } static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad, struct scrub_block *sblock_good, int page_num, int force_write) { struct scrub_page *page_bad = sblock_bad->pagev[page_num]; struct scrub_page *page_good = sblock_good->pagev[page_num]; struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info; BUG_ON(page_bad->page == NULL); BUG_ON(page_good->page == NULL); if (force_write || sblock_bad->header_error || sblock_bad->checksum_error || page_bad->io_error) { struct bio *bio; int ret; if (!page_bad->dev->bdev) { btrfs_warn_rl(fs_info, "scrub_repair_page_from_good_copy(bdev == NULL) is unexpected"); return -EIO; } bio = btrfs_io_bio_alloc(1); bio_set_dev(bio, page_bad->dev->bdev); bio->bi_iter.bi_sector = page_bad->physical >> 9; bio->bi_opf = REQ_OP_WRITE; ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0); if (PAGE_SIZE != ret) { bio_put(bio); return -EIO; } if (btrfsic_submit_bio_wait(bio)) { btrfs_dev_stat_inc_and_print(page_bad->dev, BTRFS_DEV_STAT_WRITE_ERRS); atomic64_inc(&fs_info->dev_replace.num_write_errors); bio_put(bio); return -EIO; } bio_put(bio); } return 0; } static void scrub_write_block_to_dev_replace(struct scrub_block *sblock) { struct btrfs_fs_info *fs_info = sblock->sctx->fs_info; int page_num; /* * This block is used for the check of the parity on the source device, * so the data needn't be written into the destination device. */ if (sblock->sparity) return; for (page_num = 0; page_num < sblock->page_count; page_num++) { int ret; ret = scrub_write_page_to_dev_replace(sblock, page_num); if (ret) atomic64_inc(&fs_info->dev_replace.num_write_errors); } } static int scrub_write_page_to_dev_replace(struct scrub_block *sblock, int page_num) { struct scrub_page *spage = sblock->pagev[page_num]; BUG_ON(spage->page == NULL); if (spage->io_error) { void *mapped_buffer = kmap_atomic(spage->page); clear_page(mapped_buffer); flush_dcache_page(spage->page); kunmap_atomic(mapped_buffer); } return scrub_add_page_to_wr_bio(sblock->sctx, spage); } static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx, struct scrub_page *spage) { struct scrub_bio *sbio; int ret; mutex_lock(&sctx->wr_lock); again: if (!sctx->wr_curr_bio) { sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio), GFP_KERNEL); if (!sctx->wr_curr_bio) { mutex_unlock(&sctx->wr_lock); return -ENOMEM; } sctx->wr_curr_bio->sctx = sctx; sctx->wr_curr_bio->page_count = 0; } sbio = sctx->wr_curr_bio; if (sbio->page_count == 0) { struct bio *bio; sbio->physical = spage->physical_for_dev_replace; sbio->logical = spage->logical; sbio->dev = sctx->wr_tgtdev; bio = sbio->bio; if (!bio) { bio = btrfs_io_bio_alloc(sctx->pages_per_wr_bio); sbio->bio = bio; } bio->bi_private = sbio; bio->bi_end_io = scrub_wr_bio_end_io; bio_set_dev(bio, sbio->dev->bdev); bio->bi_iter.bi_sector = sbio->physical >> 9; bio->bi_opf = REQ_OP_WRITE; sbio->status = 0; } else if (sbio->physical + sbio->page_count * PAGE_SIZE != spage->physical_for_dev_replace || sbio->logical + sbio->page_count * PAGE_SIZE != spage->logical) { scrub_wr_submit(sctx); goto again; } ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0); if (ret != PAGE_SIZE) { if (sbio->page_count < 1) { bio_put(sbio->bio); sbio->bio = NULL; mutex_unlock(&sctx->wr_lock); return -EIO; } scrub_wr_submit(sctx); goto again; } sbio->pagev[sbio->page_count] = spage; scrub_page_get(spage); sbio->page_count++; if (sbio->page_count == sctx->pages_per_wr_bio) scrub_wr_submit(sctx); mutex_unlock(&sctx->wr_lock); return 0; } static void scrub_wr_submit(struct scrub_ctx *sctx) { struct scrub_bio *sbio; if (!sctx->wr_curr_bio) return; sbio = sctx->wr_curr_bio; sctx->wr_curr_bio = NULL; WARN_ON(!sbio->bio->bi_disk); scrub_pending_bio_inc(sctx); /* process all writes in a single worker thread. Then the block layer * orders the requests before sending them to the driver which * doubled the write performance on spinning disks when measured * with Linux 3.5 */ btrfsic_submit_bio(sbio->bio); } static void scrub_wr_bio_end_io(struct bio *bio) { struct scrub_bio *sbio = bio->bi_private; struct btrfs_fs_info *fs_info = sbio->dev->fs_info; sbio->status = bio->bi_status; sbio->bio = bio; btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper, scrub_wr_bio_end_io_worker, NULL, NULL); btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work); } static void scrub_wr_bio_end_io_worker(struct btrfs_work *work) { struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); struct scrub_ctx *sctx = sbio->sctx; int i; WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO); if (sbio->status) { struct btrfs_dev_replace *dev_replace = &sbio->sctx->fs_info->dev_replace; for (i = 0; i < sbio->page_count; i++) { struct scrub_page *spage = sbio->pagev[i]; spage->io_error = 1; atomic64_inc(&dev_replace->num_write_errors); } } for (i = 0; i < sbio->page_count; i++) scrub_page_put(sbio->pagev[i]); bio_put(sbio->bio); kfree(sbio); scrub_pending_bio_dec(sctx); } static int scrub_checksum(struct scrub_block *sblock) { u64 flags; int ret; /* * No need to initialize these stats currently, * because this function only use return value * instead of these stats value. * * Todo: * always use stats */ sblock->header_error = 0; sblock->generation_error = 0; sblock->checksum_error = 0; WARN_ON(sblock->page_count < 1); flags = sblock->pagev[0]->flags; ret = 0; if (flags & BTRFS_EXTENT_FLAG_DATA) ret = scrub_checksum_data(sblock); else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) ret = scrub_checksum_tree_block(sblock); else if (flags & BTRFS_EXTENT_FLAG_SUPER) (void)scrub_checksum_super(sblock); else WARN_ON(1); if (ret) scrub_handle_errored_block(sblock); return ret; } static int scrub_checksum_data(struct scrub_block *sblock) { struct scrub_ctx *sctx = sblock->sctx; u8 csum[BTRFS_CSUM_SIZE]; u8 *on_disk_csum; struct page *page; void *buffer; u32 crc = ~(u32)0; u64 len; int index; BUG_ON(sblock->page_count < 1); if (!sblock->pagev[0]->have_csum) return 0; on_disk_csum = sblock->pagev[0]->csum; page = sblock->pagev[0]->page; buffer = kmap_atomic(page); len = sctx->fs_info->sectorsize; index = 0; for (;;) { u64 l = min_t(u64, len, PAGE_SIZE); crc = btrfs_csum_data(buffer, crc, l); kunmap_atomic(buffer); len -= l; if (len == 0) break; index++; BUG_ON(index >= sblock->page_count); BUG_ON(!sblock->pagev[index]->page); page = sblock->pagev[index]->page; buffer = kmap_atomic(page); } btrfs_csum_final(crc, csum); if (memcmp(csum, on_disk_csum, sctx->csum_size)) sblock->checksum_error = 1; return sblock->checksum_error; } static int scrub_checksum_tree_block(struct scrub_block *sblock) { struct scrub_ctx *sctx = sblock->sctx; struct btrfs_header *h; struct btrfs_fs_info *fs_info = sctx->fs_info; u8 calculated_csum[BTRFS_CSUM_SIZE]; u8 on_disk_csum[BTRFS_CSUM_SIZE]; struct page *page; void *mapped_buffer; u64 mapped_size; void *p; u32 crc = ~(u32)0; u64 len; int index; BUG_ON(sblock->page_count < 1); page = sblock->pagev[0]->page; mapped_buffer = kmap_atomic(page); h = (struct btrfs_header *)mapped_buffer; memcpy(on_disk_csum, h->csum, sctx->csum_size); /* * we don't use the getter functions here, as we * a) don't have an extent buffer and * b) the page is already kmapped */ if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h)) sblock->header_error = 1; if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h)) { sblock->header_error = 1; sblock->generation_error = 1; } if (!scrub_check_fsid(h->fsid, sblock->pagev[0])) sblock->header_error = 1; if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid, BTRFS_UUID_SIZE)) sblock->header_error = 1; len = sctx->fs_info->nodesize - BTRFS_CSUM_SIZE; mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE; p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE; index = 0; for (;;) { u64 l = min_t(u64, len, mapped_size); crc = btrfs_csum_data(p, crc, l); kunmap_atomic(mapped_buffer); len -= l; if (len == 0) break; index++; BUG_ON(index >= sblock->page_count); BUG_ON(!sblock->pagev[index]->page); page = sblock->pagev[index]->page; mapped_buffer = kmap_atomic(page); mapped_size = PAGE_SIZE; p = mapped_buffer; } btrfs_csum_final(crc, calculated_csum); if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size)) sblock->checksum_error = 1; return sblock->header_error || sblock->checksum_error; } static int scrub_checksum_super(struct scrub_block *sblock) { struct btrfs_super_block *s; struct scrub_ctx *sctx = sblock->sctx; u8 calculated_csum[BTRFS_CSUM_SIZE]; u8 on_disk_csum[BTRFS_CSUM_SIZE]; struct page *page; void *mapped_buffer; u64 mapped_size; void *p; u32 crc = ~(u32)0; int fail_gen = 0; int fail_cor = 0; u64 len; int index; BUG_ON(sblock->page_count < 1); page = sblock->pagev[0]->page; mapped_buffer = kmap_atomic(page); s = (struct btrfs_super_block *)mapped_buffer; memcpy(on_disk_csum, s->csum, sctx->csum_size); if (sblock->pagev[0]->logical != btrfs_super_bytenr(s)) ++fail_cor; if (sblock->pagev[0]->generation != btrfs_super_generation(s)) ++fail_gen; if (!scrub_check_fsid(s->fsid, sblock->pagev[0])) ++fail_cor; len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE; mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE; p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE; index = 0; for (;;) { u64 l = min_t(u64, len, mapped_size); crc = btrfs_csum_data(p, crc, l); kunmap_atomic(mapped_buffer); len -= l; if (len == 0) break; index++; BUG_ON(index >= sblock->page_count); BUG_ON(!sblock->pagev[index]->page); page = sblock->pagev[index]->page; mapped_buffer = kmap_atomic(page); mapped_size = PAGE_SIZE; p = mapped_buffer; } btrfs_csum_final(crc, calculated_csum); if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size)) ++fail_cor; if (fail_cor + fail_gen) { /* * if we find an error in a super block, we just report it. * They will get written with the next transaction commit * anyway */ spin_lock(&sctx->stat_lock); ++sctx->stat.super_errors; spin_unlock(&sctx->stat_lock); if (fail_cor) btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev, BTRFS_DEV_STAT_CORRUPTION_ERRS); else btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev, BTRFS_DEV_STAT_GENERATION_ERRS); } return fail_cor + fail_gen; } static void scrub_block_get(struct scrub_block *sblock) { refcount_inc(&sblock->refs); } static void scrub_block_put(struct scrub_block *sblock) { if (refcount_dec_and_test(&sblock->refs)) { int i; if (sblock->sparity) scrub_parity_put(sblock->sparity); for (i = 0; i < sblock->page_count; i++) scrub_page_put(sblock->pagev[i]); kfree(sblock); } } static void scrub_page_get(struct scrub_page *spage) { atomic_inc(&spage->refs); } static void scrub_page_put(struct scrub_page *spage) { if (atomic_dec_and_test(&spage->refs)) { if (spage->page) __free_page(spage->page); kfree(spage); } } static void scrub_submit(struct scrub_ctx *sctx) { struct scrub_bio *sbio; if (sctx->curr == -1) return; sbio = sctx->bios[sctx->curr]; sctx->curr = -1; scrub_pending_bio_inc(sctx); btrfsic_submit_bio(sbio->bio); } static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx, struct scrub_page *spage) { struct scrub_block *sblock = spage->sblock; struct scrub_bio *sbio; int ret; again: /* * grab a fresh bio or wait for one to become available */ while (sctx->curr == -1) { spin_lock(&sctx->list_lock); sctx->curr = sctx->first_free; if (sctx->curr != -1) { sctx->first_free = sctx->bios[sctx->curr]->next_free; sctx->bios[sctx->curr]->next_free = -1; sctx->bios[sctx->curr]->page_count = 0; spin_unlock(&sctx->list_lock); } else { spin_unlock(&sctx->list_lock); wait_event(sctx->list_wait, sctx->first_free != -1); } } sbio = sctx->bios[sctx->curr]; if (sbio->page_count == 0) { struct bio *bio; sbio->physical = spage->physical; sbio->logical = spage->logical; sbio->dev = spage->dev; bio = sbio->bio; if (!bio) { bio = btrfs_io_bio_alloc(sctx->pages_per_rd_bio); sbio->bio = bio; } bio->bi_private = sbio; bio->bi_end_io = scrub_bio_end_io; bio_set_dev(bio, sbio->dev->bdev); bio->bi_iter.bi_sector = sbio->physical >> 9; bio->bi_opf = REQ_OP_READ; sbio->status = 0; } else if (sbio->physical + sbio->page_count * PAGE_SIZE != spage->physical || sbio->logical + sbio->page_count * PAGE_SIZE != spage->logical || sbio->dev != spage->dev) { scrub_submit(sctx); goto again; } sbio->pagev[sbio->page_count] = spage; ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0); if (ret != PAGE_SIZE) { if (sbio->page_count < 1) { bio_put(sbio->bio); sbio->bio = NULL; return -EIO; } scrub_submit(sctx); goto again; } scrub_block_get(sblock); /* one for the page added to the bio */ atomic_inc(&sblock->outstanding_pages); sbio->page_count++; if (sbio->page_count == sctx->pages_per_rd_bio) scrub_submit(sctx); return 0; } static void scrub_missing_raid56_end_io(struct bio *bio) { struct scrub_block *sblock = bio->bi_private; struct btrfs_fs_info *fs_info = sblock->sctx->fs_info; if (bio->bi_status) sblock->no_io_error_seen = 0; bio_put(bio); btrfs_queue_work(fs_info->scrub_workers, &sblock->work); } static void scrub_missing_raid56_worker(struct btrfs_work *work) { struct scrub_block *sblock = container_of(work, struct scrub_block, work); struct scrub_ctx *sctx = sblock->sctx; struct btrfs_fs_info *fs_info = sctx->fs_info; u64 logical; struct btrfs_device *dev; logical = sblock->pagev[0]->logical; dev = sblock->pagev[0]->dev; if (sblock->no_io_error_seen) scrub_recheck_block_checksum(sblock); if (!sblock->no_io_error_seen) { spin_lock(&sctx->stat_lock); sctx->stat.read_errors++; spin_unlock(&sctx->stat_lock); btrfs_err_rl_in_rcu(fs_info, "IO error rebuilding logical %llu for dev %s", logical, rcu_str_deref(dev->name)); } else if (sblock->header_error || sblock->checksum_error) { spin_lock(&sctx->stat_lock); sctx->stat.uncorrectable_errors++; spin_unlock(&sctx->stat_lock); btrfs_err_rl_in_rcu(fs_info, "failed to rebuild valid logical %llu for dev %s", logical, rcu_str_deref(dev->name)); } else { scrub_write_block_to_dev_replace(sblock); } scrub_block_put(sblock); if (sctx->is_dev_replace && sctx->flush_all_writes) { mutex_lock(&sctx->wr_lock); scrub_wr_submit(sctx); mutex_unlock(&sctx->wr_lock); } scrub_pending_bio_dec(sctx); } static void scrub_missing_raid56_pages(struct scrub_block *sblock) { struct scrub_ctx *sctx = sblock->sctx; struct btrfs_fs_info *fs_info = sctx->fs_info; u64 length = sblock->page_count * PAGE_SIZE; u64 logical = sblock->pagev[0]->logical; struct btrfs_bio *bbio = NULL; struct bio *bio; struct btrfs_raid_bio *rbio; int ret; int i; btrfs_bio_counter_inc_blocked(fs_info); ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical, &length, &bbio); if (ret || !bbio || !bbio->raid_map) goto bbio_out; if (WARN_ON(!sctx->is_dev_replace || !(bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) { /* * We shouldn't be scrubbing a missing device. Even for dev * replace, we should only get here for RAID 5/6. We either * managed to mount something with no mirrors remaining or * there's a bug in scrub_remap_extent()/btrfs_map_block(). */ goto bbio_out; } bio = btrfs_io_bio_alloc(0); bio->bi_iter.bi_sector = logical >> 9; bio->bi_private = sblock; bio->bi_end_io = scrub_missing_raid56_end_io; rbio = raid56_alloc_missing_rbio(fs_info, bio, bbio, length); if (!rbio) goto rbio_out; for (i = 0; i < sblock->page_count; i++) { struct scrub_page *spage = sblock->pagev[i]; raid56_add_scrub_pages(rbio, spage->page, spage->logical); } btrfs_init_work(&sblock->work, btrfs_scrub_helper, scrub_missing_raid56_worker, NULL, NULL); scrub_block_get(sblock); scrub_pending_bio_inc(sctx); raid56_submit_missing_rbio(rbio); return; rbio_out: bio_put(bio); bbio_out: btrfs_bio_counter_dec(fs_info); btrfs_put_bbio(bbio); spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; spin_unlock(&sctx->stat_lock); } static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len, u64 physical, struct btrfs_device *dev, u64 flags, u64 gen, int mirror_num, u8 *csum, int force, u64 physical_for_dev_replace) { struct scrub_block *sblock; int index; sblock = kzalloc(sizeof(*sblock), GFP_KERNEL); if (!sblock) { spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; spin_unlock(&sctx->stat_lock); return -ENOMEM; } /* one ref inside this function, plus one for each page added to * a bio later on */ refcount_set(&sblock->refs, 1); sblock->sctx = sctx; sblock->no_io_error_seen = 1; for (index = 0; len > 0; index++) { struct scrub_page *spage; u64 l = min_t(u64, len, PAGE_SIZE); spage = kzalloc(sizeof(*spage), GFP_KERNEL); if (!spage) { leave_nomem: spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; spin_unlock(&sctx->stat_lock); scrub_block_put(sblock); return -ENOMEM; } BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK); scrub_page_get(spage); sblock->pagev[index] = spage; spage->sblock = sblock; spage->dev = dev; spage->flags = flags; spage->generation = gen; spage->logical = logical; spage->physical = physical; spage->physical_for_dev_replace = physical_for_dev_replace; spage->mirror_num = mirror_num; if (csum) { spage->have_csum = 1; memcpy(spage->csum, csum, sctx->csum_size); } else { spage->have_csum = 0; } sblock->page_count++; spage->page = alloc_page(GFP_KERNEL); if (!spage->page) goto leave_nomem; len -= l; logical += l; physical += l; physical_for_dev_replace += l; } WARN_ON(sblock->page_count == 0); if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) { /* * This case should only be hit for RAID 5/6 device replace. See * the comment in scrub_missing_raid56_pages() for details. */ scrub_missing_raid56_pages(sblock); } else { for (index = 0; index < sblock->page_count; index++) { struct scrub_page *spage = sblock->pagev[index]; int ret; ret = scrub_add_page_to_rd_bio(sctx, spage); if (ret) { scrub_block_put(sblock); return ret; } } if (force) scrub_submit(sctx); } /* last one frees, either here or in bio completion for last page */ scrub_block_put(sblock); return 0; } static void scrub_bio_end_io(struct bio *bio) { struct scrub_bio *sbio = bio->bi_private; struct btrfs_fs_info *fs_info = sbio->dev->fs_info; sbio->status = bio->bi_status; sbio->bio = bio; btrfs_queue_work(fs_info->scrub_workers, &sbio->work); } static void scrub_bio_end_io_worker(struct btrfs_work *work) { struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); struct scrub_ctx *sctx = sbio->sctx; int i; BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO); if (sbio->status) { for (i = 0; i < sbio->page_count; i++) { struct scrub_page *spage = sbio->pagev[i]; spage->io_error = 1; spage->sblock->no_io_error_seen = 0; } } /* now complete the scrub_block items that have all pages completed */ for (i = 0; i < sbio->page_count; i++) { struct scrub_page *spage = sbio->pagev[i]; struct scrub_block *sblock = spage->sblock; if (atomic_dec_and_test(&sblock->outstanding_pages)) scrub_block_complete(sblock); scrub_block_put(sblock); } bio_put(sbio->bio); sbio->bio = NULL; spin_lock(&sctx->list_lock); sbio->next_free = sctx->first_free; sctx->first_free = sbio->index; spin_unlock(&sctx->list_lock); if (sctx->is_dev_replace && sctx->flush_all_writes) { mutex_lock(&sctx->wr_lock); scrub_wr_submit(sctx); mutex_unlock(&sctx->wr_lock); } scrub_pending_bio_dec(sctx); } static inline void __scrub_mark_bitmap(struct scrub_parity *sparity, unsigned long *bitmap, u64 start, u64 len) { u64 offset; u64 nsectors64; u32 nsectors; int sectorsize = sparity->sctx->fs_info->sectorsize; if (len >= sparity->stripe_len) { bitmap_set(bitmap, 0, sparity->nsectors); return; } start -= sparity->logic_start; start = div64_u64_rem(start, sparity->stripe_len, &offset); offset = div_u64(offset, sectorsize); nsectors64 = div_u64(len, sectorsize); ASSERT(nsectors64 < UINT_MAX); nsectors = (u32)nsectors64; if (offset + nsectors <= sparity->nsectors) { bitmap_set(bitmap, offset, nsectors); return; } bitmap_set(bitmap, offset, sparity->nsectors - offset); bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset)); } static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity, u64 start, u64 len) { __scrub_mark_bitmap(sparity, sparity->ebitmap, start, len); } static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity, u64 start, u64 len) { __scrub_mark_bitmap(sparity, sparity->dbitmap, start, len); } static void scrub_block_complete(struct scrub_block *sblock) { int corrupted = 0; if (!sblock->no_io_error_seen) { corrupted = 1; scrub_handle_errored_block(sblock); } else { /* * if has checksum error, write via repair mechanism in * dev replace case, otherwise write here in dev replace * case. */ corrupted = scrub_checksum(sblock); if (!corrupted && sblock->sctx->is_dev_replace) scrub_write_block_to_dev_replace(sblock); } if (sblock->sparity && corrupted && !sblock->data_corrected) { u64 start = sblock->pagev[0]->logical; u64 end = sblock->pagev[sblock->page_count - 1]->logical + PAGE_SIZE; scrub_parity_mark_sectors_error(sblock->sparity, start, end - start); } } static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum) { struct btrfs_ordered_sum *sum = NULL; unsigned long index; unsigned long num_sectors; while (!list_empty(&sctx->csum_list)) { sum = list_first_entry(&sctx->csum_list, struct btrfs_ordered_sum, list); if (sum->bytenr > logical) return 0; if (sum->bytenr + sum->len > logical) break; ++sctx->stat.csum_discards; list_del(&sum->list); kfree(sum); sum = NULL; } if (!sum) return 0; index = div_u64(logical - sum->bytenr, sctx->fs_info->sectorsize); ASSERT(index < UINT_MAX); num_sectors = sum->len / sctx->fs_info->sectorsize; memcpy(csum, sum->sums + index, sctx->csum_size); if (index == num_sectors - 1) { list_del(&sum->list); kfree(sum); } return 1; } /* scrub extent tries to collect up to 64 kB for each bio */ static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map, u64 logical, u64 len, u64 physical, struct btrfs_device *dev, u64 flags, u64 gen, int mirror_num, u64 physical_for_dev_replace) { int ret; u8 csum[BTRFS_CSUM_SIZE]; u32 blocksize; if (flags & BTRFS_EXTENT_FLAG_DATA) { if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) blocksize = map->stripe_len; else blocksize = sctx->fs_info->sectorsize; spin_lock(&sctx->stat_lock); sctx->stat.data_extents_scrubbed++; sctx->stat.data_bytes_scrubbed += len; spin_unlock(&sctx->stat_lock); } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) blocksize = map->stripe_len; else blocksize = sctx->fs_info->nodesize; spin_lock(&sctx->stat_lock); sctx->stat.tree_extents_scrubbed++; sctx->stat.tree_bytes_scrubbed += len; spin_unlock(&sctx->stat_lock); } else { blocksize = sctx->fs_info->sectorsize; WARN_ON(1); } while (len) { u64 l = min_t(u64, len, blocksize); int have_csum = 0; if (flags & BTRFS_EXTENT_FLAG_DATA) { /* push csums to sbio */ have_csum = scrub_find_csum(sctx, logical, csum); if (have_csum == 0) ++sctx->stat.no_csum; } ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen, mirror_num, have_csum ? csum : NULL, 0, physical_for_dev_replace); if (ret) return ret; len -= l; logical += l; physical += l; physical_for_dev_replace += l; } return 0; } static int scrub_pages_for_parity(struct scrub_parity *sparity, u64 logical, u64 len, u64 physical, struct btrfs_device *dev, u64 flags, u64 gen, int mirror_num, u8 *csum) { struct scrub_ctx *sctx = sparity->sctx; struct scrub_block *sblock; int index; sblock = kzalloc(sizeof(*sblock), GFP_KERNEL); if (!sblock) { spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; spin_unlock(&sctx->stat_lock); return -ENOMEM; } /* one ref inside this function, plus one for each page added to * a bio later on */ refcount_set(&sblock->refs, 1); sblock->sctx = sctx; sblock->no_io_error_seen = 1; sblock->sparity = sparity; scrub_parity_get(sparity); for (index = 0; len > 0; index++) { struct scrub_page *spage; u64 l = min_t(u64, len, PAGE_SIZE); spage = kzalloc(sizeof(*spage), GFP_KERNEL); if (!spage) { leave_nomem: spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; spin_unlock(&sctx->stat_lock); scrub_block_put(sblock); return -ENOMEM; } BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK); /* For scrub block */ scrub_page_get(spage); sblock->pagev[index] = spage; /* For scrub parity */ scrub_page_get(spage); list_add_tail(&spage->list, &sparity->spages); spage->sblock = sblock; spage->dev = dev; spage->flags = flags; spage->generation = gen; spage->logical = logical; spage->physical = physical; spage->mirror_num = mirror_num; if (csum) { spage->have_csum = 1; memcpy(spage->csum, csum, sctx->csum_size); } else { spage->have_csum = 0; } sblock->page_count++; spage->page = alloc_page(GFP_KERNEL); if (!spage->page) goto leave_nomem; len -= l; logical += l; physical += l; } WARN_ON(sblock->page_count == 0); for (index = 0; index < sblock->page_count; index++) { struct scrub_page *spage = sblock->pagev[index]; int ret; ret = scrub_add_page_to_rd_bio(sctx, spage); if (ret) { scrub_block_put(sblock); return ret; } } /* last one frees, either here or in bio completion for last page */ scrub_block_put(sblock); return 0; } static int scrub_extent_for_parity(struct scrub_parity *sparity, u64 logical, u64 len, u64 physical, struct btrfs_device *dev, u64 flags, u64 gen, int mirror_num) { struct scrub_ctx *sctx = sparity->sctx; int ret; u8 csum[BTRFS_CSUM_SIZE]; u32 blocksize; if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) { scrub_parity_mark_sectors_error(sparity, logical, len); return 0; } if (flags & BTRFS_EXTENT_FLAG_DATA) { blocksize = sparity->stripe_len; } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { blocksize = sparity->stripe_len; } else { blocksize = sctx->fs_info->sectorsize; WARN_ON(1); } while (len) { u64 l = min_t(u64, len, blocksize); int have_csum = 0; if (flags & BTRFS_EXTENT_FLAG_DATA) { /* push csums to sbio */ have_csum = scrub_find_csum(sctx, logical, csum); if (have_csum == 0) goto skip; } ret = scrub_pages_for_parity(sparity, logical, l, physical, dev, flags, gen, mirror_num, have_csum ? csum : NULL); if (ret) return ret; skip: len -= l; logical += l; physical += l; } return 0; } /* * Given a physical address, this will calculate it's * logical offset. if this is a parity stripe, it will return * the most left data stripe's logical offset. * * return 0 if it is a data stripe, 1 means parity stripe. */ static int get_raid56_logic_offset(u64 physical, int num, struct map_lookup *map, u64 *offset, u64 *stripe_start) { int i; int j = 0; u64 stripe_nr; u64 last_offset; u32 stripe_index; u32 rot; last_offset = (physical - map->stripes[num].physical) * nr_data_stripes(map); if (stripe_start) *stripe_start = last_offset; *offset = last_offset; for (i = 0; i < nr_data_stripes(map); i++) { *offset = last_offset + i * map->stripe_len; stripe_nr = div64_u64(*offset, map->stripe_len); stripe_nr = div_u64(stripe_nr, nr_data_stripes(map)); /* Work out the disk rotation on this stripe-set */ stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot); /* calculate which stripe this data locates */ rot += i; stripe_index = rot % map->num_stripes; if (stripe_index == num) return 0; if (stripe_index < num) j++; } *offset = last_offset + j * map->stripe_len; return 1; } static void scrub_free_parity(struct scrub_parity *sparity) { struct scrub_ctx *sctx = sparity->sctx; struct scrub_page *curr, *next; int nbits; nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors); if (nbits) { spin_lock(&sctx->stat_lock); sctx->stat.read_errors += nbits; sctx->stat.uncorrectable_errors += nbits; spin_unlock(&sctx->stat_lock); } list_for_each_entry_safe(curr, next, &sparity->spages, list) { list_del_init(&curr->list); scrub_page_put(curr); } kfree(sparity); } static void scrub_parity_bio_endio_worker(struct btrfs_work *work) { struct scrub_parity *sparity = container_of(work, struct scrub_parity, work); struct scrub_ctx *sctx = sparity->sctx; scrub_free_parity(sparity); scrub_pending_bio_dec(sctx); } static void scrub_parity_bio_endio(struct bio *bio) { struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private; struct btrfs_fs_info *fs_info = sparity->sctx->fs_info; if (bio->bi_status) bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap, sparity->nsectors); bio_put(bio); btrfs_init_work(&sparity->work, btrfs_scrubparity_helper, scrub_parity_bio_endio_worker, NULL, NULL); btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work); } static void scrub_parity_check_and_repair(struct scrub_parity *sparity) { struct scrub_ctx *sctx = sparity->sctx; struct btrfs_fs_info *fs_info = sctx->fs_info; struct bio *bio; struct btrfs_raid_bio *rbio; struct btrfs_bio *bbio = NULL; u64 length; int ret; if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap, sparity->nsectors)) goto out; length = sparity->logic_end - sparity->logic_start; btrfs_bio_counter_inc_blocked(fs_info); ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start, &length, &bbio); if (ret || !bbio || !bbio->raid_map) goto bbio_out; bio = btrfs_io_bio_alloc(0); bio->bi_iter.bi_sector = sparity->logic_start >> 9; bio->bi_private = sparity; bio->bi_end_io = scrub_parity_bio_endio; rbio = raid56_parity_alloc_scrub_rbio(fs_info, bio, bbio, length, sparity->scrub_dev, sparity->dbitmap, sparity->nsectors); if (!rbio) goto rbio_out; scrub_pending_bio_inc(sctx); raid56_parity_submit_scrub_rbio(rbio); return; rbio_out: bio_put(bio); bbio_out: btrfs_bio_counter_dec(fs_info); btrfs_put_bbio(bbio); bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap, sparity->nsectors); spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; spin_unlock(&sctx->stat_lock); out: scrub_free_parity(sparity); } static inline int scrub_calc_parity_bitmap_len(int nsectors) { return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * sizeof(long); } static void scrub_parity_get(struct scrub_parity *sparity) { refcount_inc(&sparity->refs); } static void scrub_parity_put(struct scrub_parity *sparity) { if (!refcount_dec_and_test(&sparity->refs)) return; scrub_parity_check_and_repair(sparity); } static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx, struct map_lookup *map, struct btrfs_device *sdev, struct btrfs_path *path, u64 logic_start, u64 logic_end) { struct btrfs_fs_info *fs_info = sctx->fs_info; struct btrfs_root *root = fs_info->extent_root; struct btrfs_root *csum_root = fs_info->csum_root; struct btrfs_extent_item *extent; struct btrfs_bio *bbio = NULL; u64 flags; int ret; int slot; struct extent_buffer *l; struct btrfs_key key; u64 generation; u64 extent_logical; u64 extent_physical; u64 extent_len; u64 mapped_length; struct btrfs_device *extent_dev; struct scrub_parity *sparity; int nsectors; int bitmap_len; int extent_mirror_num; int stop_loop = 0; nsectors = div_u64(map->stripe_len, fs_info->sectorsize); bitmap_len = scrub_calc_parity_bitmap_len(nsectors); sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len, GFP_NOFS); if (!sparity) { spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; spin_unlock(&sctx->stat_lock); return -ENOMEM; } sparity->stripe_len = map->stripe_len; sparity->nsectors = nsectors; sparity->sctx = sctx; sparity->scrub_dev = sdev; sparity->logic_start = logic_start; sparity->logic_end = logic_end; refcount_set(&sparity->refs, 1); INIT_LIST_HEAD(&sparity->spages); sparity->dbitmap = sparity->bitmap; sparity->ebitmap = (void *)sparity->bitmap + bitmap_len; ret = 0; while (logic_start < logic_end) { if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) key.type = BTRFS_METADATA_ITEM_KEY; else key.type = BTRFS_EXTENT_ITEM_KEY; key.objectid = logic_start; key.offset = (u64)-1; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; if (ret > 0) { ret = btrfs_previous_extent_item(root, path, 0); if (ret < 0) goto out; if (ret > 0) { btrfs_release_path(path); ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; } } stop_loop = 0; while (1) { u64 bytes; l = path->nodes[0]; slot = path->slots[0]; if (slot >= btrfs_header_nritems(l)) { ret = btrfs_next_leaf(root, path); if (ret == 0) continue; if (ret < 0) goto out; stop_loop = 1; break; } btrfs_item_key_to_cpu(l, &key, slot); if (key.type != BTRFS_EXTENT_ITEM_KEY && key.type != BTRFS_METADATA_ITEM_KEY) goto next; if (key.type == BTRFS_METADATA_ITEM_KEY) bytes = fs_info->nodesize; else bytes = key.offset; if (key.objectid + bytes <= logic_start) goto next; if (key.objectid >= logic_end) { stop_loop = 1; break; } while (key.objectid >= logic_start + map->stripe_len) logic_start += map->stripe_len; extent = btrfs_item_ptr(l, slot, struct btrfs_extent_item); flags = btrfs_extent_flags(l, extent); generation = btrfs_extent_generation(l, extent); if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) && (key.objectid < logic_start || key.objectid + bytes > logic_start + map->stripe_len)) { btrfs_err(fs_info, "scrub: tree block %llu spanning stripes, ignored. logical=%llu", key.objectid, logic_start); spin_lock(&sctx->stat_lock); sctx->stat.uncorrectable_errors++; spin_unlock(&sctx->stat_lock); goto next; } again: extent_logical = key.objectid; extent_len = bytes; if (extent_logical < logic_start) { extent_len -= logic_start - extent_logical; extent_logical = logic_start; } if (extent_logical + extent_len > logic_start + map->stripe_len) extent_len = logic_start + map->stripe_len - extent_logical; scrub_parity_mark_sectors_data(sparity, extent_logical, extent_len); mapped_length = extent_len; bbio = NULL; ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical, &mapped_length, &bbio, 0); if (!ret) { if (!bbio || mapped_length < extent_len) ret = -EIO; } if (ret) { btrfs_put_bbio(bbio); goto out; } extent_physical = bbio->stripes[0].physical; extent_mirror_num = bbio->mirror_num; extent_dev = bbio->stripes[0].dev; btrfs_put_bbio(bbio); ret = btrfs_lookup_csums_range(csum_root, extent_logical, extent_logical + extent_len - 1, &sctx->csum_list, 1); if (ret) goto out; ret = scrub_extent_for_parity(sparity, extent_logical, extent_len, extent_physical, extent_dev, flags, generation, extent_mirror_num); scrub_free_csums(sctx); if (ret) goto out; if (extent_logical + extent_len < key.objectid + bytes) { logic_start += map->stripe_len; if (logic_start >= logic_end) { stop_loop = 1; break; } if (logic_start < key.objectid + bytes) { cond_resched(); goto again; } } next: path->slots[0]++; } btrfs_release_path(path); if (stop_loop) break; logic_start += map->stripe_len; } out: if (ret < 0) scrub_parity_mark_sectors_error(sparity, logic_start, logic_end - logic_start); scrub_parity_put(sparity); scrub_submit(sctx); mutex_lock(&sctx->wr_lock); scrub_wr_submit(sctx); mutex_unlock(&sctx->wr_lock); btrfs_release_path(path); return ret < 0 ? ret : 0; } static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, struct map_lookup *map, struct btrfs_device *scrub_dev, int num, u64 base, u64 length) { struct btrfs_path *path, *ppath; struct btrfs_fs_info *fs_info = sctx->fs_info; struct btrfs_root *root = fs_info->extent_root; struct btrfs_root *csum_root = fs_info->csum_root; struct btrfs_extent_item *extent; struct blk_plug plug; u64 flags; int ret; int slot; u64 nstripes; struct extent_buffer *l; u64 physical; u64 logical; u64 logic_end; u64 physical_end; u64 generation; int mirror_num; struct reada_control *reada1; struct reada_control *reada2; struct btrfs_key key; struct btrfs_key key_end; u64 increment = map->stripe_len; u64 offset; u64 extent_logical; u64 extent_physical; u64 extent_len; u64 stripe_logical; u64 stripe_end; struct btrfs_device *extent_dev; int extent_mirror_num; int stop_loop = 0; physical = map->stripes[num].physical; offset = 0; nstripes = div64_u64(length, map->stripe_len); if (map->type & BTRFS_BLOCK_GROUP_RAID0) { offset = map->stripe_len * num; increment = map->stripe_len * map->num_stripes; mirror_num = 1; } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { int factor = map->num_stripes / map->sub_stripes; offset = map->stripe_len * (num / map->sub_stripes); increment = map->stripe_len * factor; mirror_num = num % map->sub_stripes + 1; } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) { increment = map->stripe_len; mirror_num = num % map->num_stripes + 1; } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { increment = map->stripe_len; mirror_num = num % map->num_stripes + 1; } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { get_raid56_logic_offset(physical, num, map, &offset, NULL); increment = map->stripe_len * nr_data_stripes(map); mirror_num = 1; } else { increment = map->stripe_len; mirror_num = 1; } path = btrfs_alloc_path(); if (!path) return -ENOMEM; ppath = btrfs_alloc_path(); if (!ppath) { btrfs_free_path(path); return -ENOMEM; } /* * work on commit root. The related disk blocks are static as * long as COW is applied. This means, it is save to rewrite * them to repair disk errors without any race conditions */ path->search_commit_root = 1; path->skip_locking = 1; ppath->search_commit_root = 1; ppath->skip_locking = 1; /* * trigger the readahead for extent tree csum tree and wait for * completion. During readahead, the scrub is officially paused * to not hold off transaction commits */ logical = base + offset; physical_end = physical + nstripes * map->stripe_len; if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { get_raid56_logic_offset(physical_end, num, map, &logic_end, NULL); logic_end += base; } else { logic_end = logical + increment * nstripes; } wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); scrub_blocked_if_needed(fs_info); /* FIXME it might be better to start readahead at commit root */ key.objectid = logical; key.type = BTRFS_EXTENT_ITEM_KEY; key.offset = (u64)0; key_end.objectid = logic_end; key_end.type = BTRFS_METADATA_ITEM_KEY; key_end.offset = (u64)-1; reada1 = btrfs_reada_add(root, &key, &key_end); key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; key.type = BTRFS_EXTENT_CSUM_KEY; key.offset = logical; key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID; key_end.type = BTRFS_EXTENT_CSUM_KEY; key_end.offset = logic_end; reada2 = btrfs_reada_add(csum_root, &key, &key_end); if (!IS_ERR(reada1)) btrfs_reada_wait(reada1); if (!IS_ERR(reada2)) btrfs_reada_wait(reada2); /* * collect all data csums for the stripe to avoid seeking during * the scrub. This might currently (crc32) end up to be about 1MB */ blk_start_plug(&plug); /* * now find all extents for each stripe and scrub them */ ret = 0; while (physical < physical_end) { /* * canceled? */ if (atomic_read(&fs_info->scrub_cancel_req) || atomic_read(&sctx->cancel_req)) { ret = -ECANCELED; goto out; } /* * check to see if we have to pause */ if (atomic_read(&fs_info->scrub_pause_req)) { /* push queued extents */ sctx->flush_all_writes = true; scrub_submit(sctx); mutex_lock(&sctx->wr_lock); scrub_wr_submit(sctx); mutex_unlock(&sctx->wr_lock); wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); sctx->flush_all_writes = false; scrub_blocked_if_needed(fs_info); } if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { ret = get_raid56_logic_offset(physical, num, map, &logical, &stripe_logical); logical += base; if (ret) { /* it is parity strip */ stripe_logical += base; stripe_end = stripe_logical + increment; ret = scrub_raid56_parity(sctx, map, scrub_dev, ppath, stripe_logical, stripe_end); if (ret) goto out; goto skip; } } if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) key.type = BTRFS_METADATA_ITEM_KEY; else key.type = BTRFS_EXTENT_ITEM_KEY; key.objectid = logical; key.offset = (u64)-1; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; if (ret > 0) { ret = btrfs_previous_extent_item(root, path, 0); if (ret < 0) goto out; if (ret > 0) { /* there's no smaller item, so stick with the * larger one */ btrfs_release_path(path); ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; } } stop_loop = 0; while (1) { u64 bytes; l = path->nodes[0]; slot = path->slots[0]; if (slot >= btrfs_header_nritems(l)) { ret = btrfs_next_leaf(root, path); if (ret == 0) continue; if (ret < 0) goto out; stop_loop = 1; break; } btrfs_item_key_to_cpu(l, &key, slot); if (key.type != BTRFS_EXTENT_ITEM_KEY && key.type != BTRFS_METADATA_ITEM_KEY) goto next; if (key.type == BTRFS_METADATA_ITEM_KEY) bytes = fs_info->nodesize; else bytes = key.offset; if (key.objectid + bytes <= logical) goto next; if (key.objectid >= logical + map->stripe_len) { /* out of this device extent */ if (key.objectid >= logic_end) stop_loop = 1; break; } extent = btrfs_item_ptr(l, slot, struct btrfs_extent_item); flags = btrfs_extent_flags(l, extent); generation = btrfs_extent_generation(l, extent); if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) && (key.objectid < logical || key.objectid + bytes > logical + map->stripe_len)) { btrfs_err(fs_info, "scrub: tree block %llu spanning stripes, ignored. logical=%llu", key.objectid, logical); spin_lock(&sctx->stat_lock); sctx->stat.uncorrectable_errors++; spin_unlock(&sctx->stat_lock); goto next; } again: extent_logical = key.objectid; extent_len = bytes; /* * trim extent to this stripe */ if (extent_logical < logical) { extent_len -= logical - extent_logical; extent_logical = logical; } if (extent_logical + extent_len > logical + map->stripe_len) { extent_len = logical + map->stripe_len - extent_logical; } extent_physical = extent_logical - logical + physical; extent_dev = scrub_dev; extent_mirror_num = mirror_num; if (sctx->is_dev_replace) scrub_remap_extent(fs_info, extent_logical, extent_len, &extent_physical, &extent_dev, &extent_mirror_num); ret = btrfs_lookup_csums_range(csum_root, extent_logical, extent_logical + extent_len - 1, &sctx->csum_list, 1); if (ret) goto out; ret = scrub_extent(sctx, map, extent_logical, extent_len, extent_physical, extent_dev, flags, generation, extent_mirror_num, extent_logical - logical + physical); scrub_free_csums(sctx); if (ret) goto out; if (extent_logical + extent_len < key.objectid + bytes) { if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { /* * loop until we find next data stripe * or we have finished all stripes. */ loop: physical += map->stripe_len; ret = get_raid56_logic_offset(physical, num, map, &logical, &stripe_logical); logical += base; if (ret && physical < physical_end) { stripe_logical += base; stripe_end = stripe_logical + increment; ret = scrub_raid56_parity(sctx, map, scrub_dev, ppath, stripe_logical, stripe_end); if (ret) goto out; goto loop; } } else { physical += map->stripe_len; logical += increment; } if (logical < key.objectid + bytes) { cond_resched(); goto again; } if (physical >= physical_end) { stop_loop = 1; break; } } next: path->slots[0]++; } btrfs_release_path(path); skip: logical += increment; physical += map->stripe_len; spin_lock(&sctx->stat_lock); if (stop_loop) sctx->stat.last_physical = map->stripes[num].physical + length; else sctx->stat.last_physical = physical; spin_unlock(&sctx->stat_lock); if (stop_loop) break; } out: /* push queued extents */ scrub_submit(sctx); mutex_lock(&sctx->wr_lock); scrub_wr_submit(sctx); mutex_unlock(&sctx->wr_lock); blk_finish_plug(&plug); btrfs_free_path(path); btrfs_free_path(ppath); return ret < 0 ? ret : 0; } static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, struct btrfs_device *scrub_dev, u64 chunk_offset, u64 length, u64 dev_offset, struct btrfs_block_group_cache *cache) { struct btrfs_fs_info *fs_info = sctx->fs_info; struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; struct map_lookup *map; struct extent_map *em; int i; int ret = 0; read_lock(&map_tree->map_tree.lock); em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1); read_unlock(&map_tree->map_tree.lock); if (!em) { /* * Might have been an unused block group deleted by the cleaner * kthread or relocation. */ spin_lock(&cache->lock); if (!cache->removed) ret = -EINVAL; spin_unlock(&cache->lock); return ret; } map = em->map_lookup; if (em->start != chunk_offset) goto out; if (em->len < length) goto out; for (i = 0; i < map->num_stripes; ++i) { if (map->stripes[i].dev->bdev == scrub_dev->bdev && map->stripes[i].physical == dev_offset) { ret = scrub_stripe(sctx, map, scrub_dev, i, chunk_offset, length); if (ret) goto out; } } out: free_extent_map(em); return ret; } static noinline_for_stack int scrub_enumerate_chunks(struct scrub_ctx *sctx, struct btrfs_device *scrub_dev, u64 start, u64 end) { struct btrfs_dev_extent *dev_extent = NULL; struct btrfs_path *path; struct btrfs_fs_info *fs_info = sctx->fs_info; struct btrfs_root *root = fs_info->dev_root; u64 length; u64 chunk_offset; int ret = 0; int ro_set; int slot; struct extent_buffer *l; struct btrfs_key key; struct btrfs_key found_key; struct btrfs_block_group_cache *cache; struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; path = btrfs_alloc_path(); if (!path) return -ENOMEM; path->reada = READA_FORWARD; path->search_commit_root = 1; path->skip_locking = 1; key.objectid = scrub_dev->devid; key.offset = 0ull; key.type = BTRFS_DEV_EXTENT_KEY; while (1) { ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) break; if (ret > 0) { if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { ret = btrfs_next_leaf(root, path); if (ret < 0) break; if (ret > 0) { ret = 0; break; } } else { ret = 0; } } l = path->nodes[0]; slot = path->slots[0]; btrfs_item_key_to_cpu(l, &found_key, slot); if (found_key.objectid != scrub_dev->devid) break; if (found_key.type != BTRFS_DEV_EXTENT_KEY) break; if (found_key.offset >= end) break; if (found_key.offset < key.offset) break; dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); length = btrfs_dev_extent_length(l, dev_extent); if (found_key.offset + length <= start) goto skip; chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); /* * get a reference on the corresponding block group to prevent * the chunk from going away while we scrub it */ cache = btrfs_lookup_block_group(fs_info, chunk_offset); /* some chunks are removed but not committed to disk yet, * continue scrubbing */ if (!cache) goto skip; /* * we need call btrfs_inc_block_group_ro() with scrubs_paused, * to avoid deadlock caused by: * btrfs_inc_block_group_ro() * -> btrfs_wait_for_commit() * -> btrfs_commit_transaction() * -> btrfs_scrub_pause() */ scrub_pause_on(fs_info); ret = btrfs_inc_block_group_ro(cache); if (!ret && sctx->is_dev_replace) { /* * If we are doing a device replace wait for any tasks * that started delalloc right before we set the block * group to RO mode, as they might have just allocated * an extent from it or decided they could do a nocow * write. And if any such tasks did that, wait for their * ordered extents to complete and then commit the * current transaction, so that we can later see the new * extent items in the extent tree - the ordered extents * create delayed data references (for cow writes) when * they complete, which will be run and insert the * corresponding extent items into the extent tree when * we commit the transaction they used when running * inode.c:btrfs_finish_ordered_io(). We later use * the commit root of the extent tree to find extents * to copy from the srcdev into the tgtdev, and we don't * want to miss any new extents. */ btrfs_wait_block_group_reservations(cache); btrfs_wait_nocow_writers(cache); ret = btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->key.objectid, cache->key.offset); if (ret > 0) { struct btrfs_trans_handle *trans; trans = btrfs_join_transaction(root); if (IS_ERR(trans)) ret = PTR_ERR(trans); else ret = btrfs_commit_transaction(trans); if (ret) { scrub_pause_off(fs_info); btrfs_put_block_group(cache); break; } } } scrub_pause_off(fs_info); if (ret == 0) { ro_set = 1; } else if (ret == -ENOSPC) { /* * btrfs_inc_block_group_ro return -ENOSPC when it * failed in creating new chunk for metadata. * It is not a problem for scrub/replace, because * metadata are always cowed, and our scrub paused * commit_transactions. */ ro_set = 0; } else { btrfs_warn(fs_info, "failed setting block group ro: %d", ret); btrfs_put_block_group(cache); break; } down_write(&fs_info->dev_replace.rwsem); dev_replace->cursor_right = found_key.offset + length; dev_replace->cursor_left = found_key.offset; dev_replace->item_needs_writeback = 1; up_write(&dev_replace->rwsem); ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length, found_key.offset, cache); /* * flush, submit all pending read and write bios, afterwards * wait for them. * Note that in the dev replace case, a read request causes * write requests that are submitted in the read completion * worker. Therefore in the current situation, it is required * that all write requests are flushed, so that all read and * write requests are really completed when bios_in_flight * changes to 0. */ sctx->flush_all_writes = true; scrub_submit(sctx); mutex_lock(&sctx->wr_lock); scrub_wr_submit(sctx); mutex_unlock(&sctx->wr_lock); wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); scrub_pause_on(fs_info); /* * must be called before we decrease @scrub_paused. * make sure we don't block transaction commit while * we are waiting pending workers finished. */ wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0); sctx->flush_all_writes = false; scrub_pause_off(fs_info); down_write(&fs_info->dev_replace.rwsem); dev_replace->cursor_left = dev_replace->cursor_right; dev_replace->item_needs_writeback = 1; up_write(&fs_info->dev_replace.rwsem); if (ro_set) btrfs_dec_block_group_ro(cache); /* * We might have prevented the cleaner kthread from deleting * this block group if it was already unused because we raced * and set it to RO mode first. So add it back to the unused * list, otherwise it might not ever be deleted unless a manual * balance is triggered or it becomes used and unused again. */ spin_lock(&cache->lock); if (!cache->removed && !cache->ro && cache->reserved == 0 && btrfs_block_group_used(&cache->item) == 0) { spin_unlock(&cache->lock); btrfs_mark_bg_unused(cache); } else { spin_unlock(&cache->lock); } btrfs_put_block_group(cache); if (ret) break; if (sctx->is_dev_replace && atomic64_read(&dev_replace->num_write_errors) > 0) { ret = -EIO; break; } if (sctx->stat.malloc_errors > 0) { ret = -ENOMEM; break; } skip: key.offset = found_key.offset + length; btrfs_release_path(path); } btrfs_free_path(path); return ret; } static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx, struct btrfs_device *scrub_dev) { int i; u64 bytenr; u64 gen; int ret; struct btrfs_fs_info *fs_info = sctx->fs_info; if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) return -EIO; /* Seed devices of a new filesystem has their own generation. */ if (scrub_dev->fs_devices != fs_info->fs_devices) gen = scrub_dev->generation; else gen = fs_info->last_trans_committed; for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { bytenr = btrfs_sb_offset(i); if (bytenr + BTRFS_SUPER_INFO_SIZE > scrub_dev->commit_total_bytes) break; ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr, scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i, NULL, 1, bytenr); if (ret) return ret; } wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); return 0; } /* * get a reference count on fs_info->scrub_workers. start worker if necessary */ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info, int is_dev_replace) { unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND; int max_active = fs_info->thread_pool_size; if (fs_info->scrub_workers_refcnt == 0) { fs_info->scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub", flags, is_dev_replace ? 1 : max_active, 4); if (!fs_info->scrub_workers) goto fail_scrub_workers; fs_info->scrub_wr_completion_workers = btrfs_alloc_workqueue(fs_info, "scrubwrc", flags, max_active, 2); if (!fs_info->scrub_wr_completion_workers) goto fail_scrub_wr_completion_workers; fs_info->scrub_parity_workers = btrfs_alloc_workqueue(fs_info, "scrubparity", flags, max_active, 2); if (!fs_info->scrub_parity_workers) goto fail_scrub_parity_workers; } ++fs_info->scrub_workers_refcnt; return 0; fail_scrub_parity_workers: btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers); fail_scrub_wr_completion_workers: btrfs_destroy_workqueue(fs_info->scrub_workers); fail_scrub_workers: return -ENOMEM; } static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info) { if (--fs_info->scrub_workers_refcnt == 0) { btrfs_destroy_workqueue(fs_info->scrub_workers); btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers); btrfs_destroy_workqueue(fs_info->scrub_parity_workers); } WARN_ON(fs_info->scrub_workers_refcnt < 0); } int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, u64 end, struct btrfs_scrub_progress *progress, int readonly, int is_dev_replace) { struct scrub_ctx *sctx; int ret; struct btrfs_device *dev; unsigned int nofs_flag; if (btrfs_fs_closing(fs_info)) return -EINVAL; if (fs_info->nodesize > BTRFS_STRIPE_LEN) { /* * in this case scrub is unable to calculate the checksum * the way scrub is implemented. Do not handle this * situation at all because it won't ever happen. */ btrfs_err(fs_info, "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails", fs_info->nodesize, BTRFS_STRIPE_LEN); return -EINVAL; } if (fs_info->sectorsize != PAGE_SIZE) { /* not supported for data w/o checksums */ btrfs_err_rl(fs_info, "scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails", fs_info->sectorsize, PAGE_SIZE); return -EINVAL; } if (fs_info->nodesize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK || fs_info->sectorsize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) { /* * would exhaust the array bounds of pagev member in * struct scrub_block */ btrfs_err(fs_info, "scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails", fs_info->nodesize, SCRUB_MAX_PAGES_PER_BLOCK, fs_info->sectorsize, SCRUB_MAX_PAGES_PER_BLOCK); return -EINVAL; } /* Allocate outside of device_list_mutex */ sctx = scrub_setup_ctx(fs_info, is_dev_replace); if (IS_ERR(sctx)) return PTR_ERR(sctx); mutex_lock(&fs_info->fs_devices->device_list_mutex); dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true); if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) && !is_dev_replace)) { mutex_unlock(&fs_info->fs_devices->device_list_mutex); ret = -ENODEV; goto out_free_ctx; } if (!is_dev_replace && !readonly && !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) { mutex_unlock(&fs_info->fs_devices->device_list_mutex); btrfs_err_in_rcu(fs_info, "scrub: device %s is not writable", rcu_str_deref(dev->name)); ret = -EROFS; goto out_free_ctx; } mutex_lock(&fs_info->scrub_lock); if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) { mutex_unlock(&fs_info->scrub_lock); mutex_unlock(&fs_info->fs_devices->device_list_mutex); ret = -EIO; goto out_free_ctx; } down_read(&fs_info->dev_replace.rwsem); if (dev->scrub_ctx || (!is_dev_replace && btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) { up_read(&fs_info->dev_replace.rwsem); mutex_unlock(&fs_info->scrub_lock); mutex_unlock(&fs_info->fs_devices->device_list_mutex); ret = -EINPROGRESS; goto out_free_ctx; } up_read(&fs_info->dev_replace.rwsem); ret = scrub_workers_get(fs_info, is_dev_replace); if (ret) { mutex_unlock(&fs_info->scrub_lock); mutex_unlock(&fs_info->fs_devices->device_list_mutex); goto out_free_ctx; } sctx->readonly = readonly; dev->scrub_ctx = sctx; mutex_unlock(&fs_info->fs_devices->device_list_mutex); /* * checking @scrub_pause_req here, we can avoid * race between committing transaction and scrubbing. */ __scrub_blocked_if_needed(fs_info); atomic_inc(&fs_info->scrubs_running); mutex_unlock(&fs_info->scrub_lock); /* * In order to avoid deadlock with reclaim when there is a transaction * trying to pause scrub, make sure we use GFP_NOFS for all the * allocations done at btrfs_scrub_pages() and scrub_pages_for_parity() * invoked by our callees. The pausing request is done when the * transaction commit starts, and it blocks the transaction until scrub * is paused (done at specific points at scrub_stripe() or right above * before incrementing fs_info->scrubs_running). */ nofs_flag = memalloc_nofs_save(); if (!is_dev_replace) { /* * by holding device list mutex, we can * kick off writing super in log tree sync. */ mutex_lock(&fs_info->fs_devices->device_list_mutex); ret = scrub_supers(sctx, dev); mutex_unlock(&fs_info->fs_devices->device_list_mutex); } if (!ret) ret = scrub_enumerate_chunks(sctx, dev, start, end); memalloc_nofs_restore(nofs_flag); wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); atomic_dec(&fs_info->scrubs_running); wake_up(&fs_info->scrub_pause_wait); wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0); if (progress) memcpy(progress, &sctx->stat, sizeof(*progress)); mutex_lock(&fs_info->scrub_lock); dev->scrub_ctx = NULL; scrub_workers_put(fs_info); mutex_unlock(&fs_info->scrub_lock); scrub_put_ctx(sctx); return ret; out_free_ctx: scrub_free_ctx(sctx); return ret; } void btrfs_scrub_pause(struct btrfs_fs_info *fs_info) { mutex_lock(&fs_info->scrub_lock); atomic_inc(&fs_info->scrub_pause_req); while (atomic_read(&fs_info->scrubs_paused) != atomic_read(&fs_info->scrubs_running)) { mutex_unlock(&fs_info->scrub_lock); wait_event(fs_info->scrub_pause_wait, atomic_read(&fs_info->scrubs_paused) == atomic_read(&fs_info->scrubs_running)); mutex_lock(&fs_info->scrub_lock); } mutex_unlock(&fs_info->scrub_lock); } void btrfs_scrub_continue(struct btrfs_fs_info *fs_info) { atomic_dec(&fs_info->scrub_pause_req); wake_up(&fs_info->scrub_pause_wait); } int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info) { mutex_lock(&fs_info->scrub_lock); if (!atomic_read(&fs_info->scrubs_running)) { mutex_unlock(&fs_info->scrub_lock); return -ENOTCONN; } atomic_inc(&fs_info->scrub_cancel_req); while (atomic_read(&fs_info->scrubs_running)) { mutex_unlock(&fs_info->scrub_lock); wait_event(fs_info->scrub_pause_wait, atomic_read(&fs_info->scrubs_running) == 0); mutex_lock(&fs_info->scrub_lock); } atomic_dec(&fs_info->scrub_cancel_req); mutex_unlock(&fs_info->scrub_lock); return 0; } int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info, struct btrfs_device *dev) { struct scrub_ctx *sctx; mutex_lock(&fs_info->scrub_lock); sctx = dev->scrub_ctx; if (!sctx) { mutex_unlock(&fs_info->scrub_lock); return -ENOTCONN; } atomic_inc(&sctx->cancel_req); while (dev->scrub_ctx) { mutex_unlock(&fs_info->scrub_lock); wait_event(fs_info->scrub_pause_wait, dev->scrub_ctx == NULL); mutex_lock(&fs_info->scrub_lock); } mutex_unlock(&fs_info->scrub_lock); return 0; } int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid, struct btrfs_scrub_progress *progress) { struct btrfs_device *dev; struct scrub_ctx *sctx = NULL; mutex_lock(&fs_info->fs_devices->device_list_mutex); dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true); if (dev) sctx = dev->scrub_ctx; if (sctx) memcpy(progress, &sctx->stat, sizeof(*progress)); mutex_unlock(&fs_info->fs_devices->device_list_mutex); return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV; } static void scrub_remap_extent(struct btrfs_fs_info *fs_info, u64 extent_logical, u64 extent_len, u64 *extent_physical, struct btrfs_device **extent_dev, int *extent_mirror_num) { u64 mapped_length; struct btrfs_bio *bbio = NULL; int ret; mapped_length = extent_len; ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical, &mapped_length, &bbio, 0); if (ret || !bbio || mapped_length < extent_len || !bbio->stripes[0].dev->bdev) { btrfs_put_bbio(bbio); return; } *extent_physical = bbio->stripes[0].physical; *extent_mirror_num = bbio->mirror_num; *extent_dev = bbio->stripes[0].dev; btrfs_put_bbio(bbio); }
int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid, struct btrfs_scrub_progress *progress) { struct btrfs_device *dev; struct scrub_ctx *sctx = NULL; mutex_lock(&fs_info->fs_devices->device_list_mutex); dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL); if (dev) sctx = dev->scrub_ctx; if (sctx) memcpy(progress, &sctx->stat, sizeof(*progress)); mutex_unlock(&fs_info->fs_devices->device_list_mutex); return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV; }
int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid, struct btrfs_scrub_progress *progress) { struct btrfs_device *dev; struct scrub_ctx *sctx = NULL; mutex_lock(&fs_info->fs_devices->device_list_mutex); dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true); if (dev) sctx = dev->scrub_ctx; if (sctx) memcpy(progress, &sctx->stat, sizeof(*progress)); mutex_unlock(&fs_info->fs_devices->device_list_mutex); return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV; }
{'added': [(3838, '\tdev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);'), (4015, '\tdev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);')], 'deleted': [(3838, '\tdev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL);'), (4015, '\tdev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL);')]}
2
2
3,051
18,241
https://github.com/torvalds/linux
CVE-2019-18885
['CWE-476']
orders.c
update_read_brush
/** * FreeRDP: A Remote Desktop Protocol Implementation * Drawing Orders * * Copyright 2011 Marc-Andre Moreau <marcandre.moreau@gmail.com> * Copyright 2016 Armin Novak <armin.novak@thincast.com> * Copyright 2016 Thincast Technologies GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "window.h" #include <winpr/wtypes.h> #include <winpr/crt.h> #include <freerdp/api.h> #include <freerdp/log.h> #include <freerdp/graphics.h> #include <freerdp/codec/bitmap.h> #include <freerdp/gdi/gdi.h> #include "orders.h" #include "../cache/glyph.h" #include "../cache/bitmap.h" #include "../cache/brush.h" #include "../cache/cache.h" #define TAG FREERDP_TAG("core.orders") BYTE get_primary_drawing_order_field_bytes(UINT32 orderType, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (orderType) { case 0: return DSTBLT_ORDER_FIELD_BYTES; case 1: return PATBLT_ORDER_FIELD_BYTES; case 2: return SCRBLT_ORDER_FIELD_BYTES; case 3: return 0; case 4: return 0; case 5: return 0; case 6: return 0; case 7: return DRAW_NINE_GRID_ORDER_FIELD_BYTES; case 8: return MULTI_DRAW_NINE_GRID_ORDER_FIELD_BYTES; case 9: return LINE_TO_ORDER_FIELD_BYTES; case 10: return OPAQUE_RECT_ORDER_FIELD_BYTES; case 11: return SAVE_BITMAP_ORDER_FIELD_BYTES; case 12: return 0; case 13: return MEMBLT_ORDER_FIELD_BYTES; case 14: return MEM3BLT_ORDER_FIELD_BYTES; case 15: return MULTI_DSTBLT_ORDER_FIELD_BYTES; case 16: return MULTI_PATBLT_ORDER_FIELD_BYTES; case 17: return MULTI_SCRBLT_ORDER_FIELD_BYTES; case 18: return MULTI_OPAQUE_RECT_ORDER_FIELD_BYTES; case 19: return FAST_INDEX_ORDER_FIELD_BYTES; case 20: return POLYGON_SC_ORDER_FIELD_BYTES; case 21: return POLYGON_CB_ORDER_FIELD_BYTES; case 22: return POLYLINE_ORDER_FIELD_BYTES; case 23: return 0; case 24: return FAST_GLYPH_ORDER_FIELD_BYTES; case 25: return ELLIPSE_SC_ORDER_FIELD_BYTES; case 26: return ELLIPSE_CB_ORDER_FIELD_BYTES; case 27: return GLYPH_INDEX_ORDER_FIELD_BYTES; default: if (pValid) *pValid = FALSE; WLog_WARN(TAG, "Invalid orderType 0x%08X received", orderType); return 0; } } static const BYTE CBR2_BPP[] = { 0, 0, 0, 8, 16, 24, 32 }; static const BYTE BPP_CBR2[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 }; static const BYTE CBR23_BPP[] = { 0, 0, 0, 8, 16, 24, 32 }; static const BYTE BPP_CBR23[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 }; static const BYTE BMF_BPP[] = { 0, 1, 0, 8, 16, 24, 32, 0 }; static const BYTE BPP_BMF[] = { 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 }; static BOOL check_order_activated(wLog* log, rdpSettings* settings, const char* orderName, BOOL condition) { if (!condition) { if (settings->AllowUnanouncedOrdersFromServer) { WLog_Print(log, WLOG_WARN, "%s - SERVER BUG: The support for this feature was not announced!", orderName); return TRUE; } else { WLog_Print(log, WLOG_ERROR, "%s - SERVER BUG: The support for this feature was not announced! Use " "/relax-order-checks to ignore", orderName); return FALSE; } } return TRUE; } static BOOL check_alt_order_supported(wLog* log, rdpSettings* settings, BYTE orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: case ORDER_TYPE_SWITCH_SURFACE: condition = settings->OffscreenSupportLevel != 0; break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: condition = settings->DrawNineGridEnabled; break; case ORDER_TYPE_FRAME_MARKER: condition = settings->FrameMarkerCommandEnabled; break; case ORDER_TYPE_GDIPLUS_FIRST: case ORDER_TYPE_GDIPLUS_NEXT: case ORDER_TYPE_GDIPLUS_END: case ORDER_TYPE_GDIPLUS_CACHE_FIRST: case ORDER_TYPE_GDIPLUS_CACHE_NEXT: case ORDER_TYPE_GDIPLUS_CACHE_END: condition = settings->DrawGdiPlusCacheEnabled; break; case ORDER_TYPE_WINDOW: condition = settings->RemoteWndSupportLevel != WINDOW_LEVEL_NOT_SUPPORTED; break; case ORDER_TYPE_STREAM_BITMAP_FIRST: case ORDER_TYPE_STREAM_BITMAP_NEXT: case ORDER_TYPE_COMPDESK_FIRST: condition = TRUE; break; default: WLog_Print(log, WLOG_WARN, "%s - Alternate Secondary Drawing Order UNKNOWN", orderName); condition = FALSE; break; } return check_order_activated(log, settings, orderName, condition); } static BOOL check_secondary_order_supported(wLog* log, rdpSettings* settings, BYTE orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_BITMAP_UNCOMPRESSED: case ORDER_TYPE_CACHE_BITMAP_COMPRESSED: condition = settings->BitmapCacheEnabled; break; case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2: case ORDER_TYPE_BITMAP_COMPRESSED_V2: condition = settings->BitmapCacheEnabled; break; case ORDER_TYPE_BITMAP_COMPRESSED_V3: condition = settings->BitmapCacheV3Enabled; break; case ORDER_TYPE_CACHE_COLOR_TABLE: condition = (settings->OrderSupport[NEG_MEMBLT_INDEX] || settings->OrderSupport[NEG_MEM3BLT_INDEX]); break; case ORDER_TYPE_CACHE_GLYPH: { switch (settings->GlyphSupportLevel) { case GLYPH_SUPPORT_PARTIAL: case GLYPH_SUPPORT_FULL: case GLYPH_SUPPORT_ENCODE: condition = TRUE; break; case GLYPH_SUPPORT_NONE: default: condition = FALSE; break; } } break; case ORDER_TYPE_CACHE_BRUSH: condition = TRUE; break; default: WLog_Print(log, WLOG_WARN, "SECONDARY ORDER %s not supported", orderName); break; } return check_order_activated(log, settings, orderName, condition); } static BOOL check_primary_order_supported(wLog* log, rdpSettings* settings, UINT32 orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_DSTBLT: condition = settings->OrderSupport[NEG_DSTBLT_INDEX]; break; case ORDER_TYPE_SCRBLT: condition = settings->OrderSupport[NEG_SCRBLT_INDEX]; break; case ORDER_TYPE_DRAW_NINE_GRID: condition = settings->OrderSupport[NEG_DRAWNINEGRID_INDEX]; break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: condition = settings->OrderSupport[NEG_MULTI_DRAWNINEGRID_INDEX]; break; case ORDER_TYPE_LINE_TO: condition = settings->OrderSupport[NEG_LINETO_INDEX]; break; /* [MS-RDPEGDI] 2.2.2.2.1.1.2.5 OpaqueRect (OPAQUERECT_ORDER) * suggests that PatBlt and OpaqueRect imply each other. */ case ORDER_TYPE_PATBLT: case ORDER_TYPE_OPAQUE_RECT: condition = settings->OrderSupport[NEG_OPAQUE_RECT_INDEX] || settings->OrderSupport[NEG_PATBLT_INDEX]; break; case ORDER_TYPE_SAVE_BITMAP: condition = settings->OrderSupport[NEG_SAVEBITMAP_INDEX]; break; case ORDER_TYPE_MEMBLT: condition = settings->OrderSupport[NEG_MEMBLT_INDEX]; break; case ORDER_TYPE_MEM3BLT: condition = settings->OrderSupport[NEG_MEM3BLT_INDEX]; break; case ORDER_TYPE_MULTI_DSTBLT: condition = settings->OrderSupport[NEG_MULTIDSTBLT_INDEX]; break; case ORDER_TYPE_MULTI_PATBLT: condition = settings->OrderSupport[NEG_MULTIPATBLT_INDEX]; break; case ORDER_TYPE_MULTI_SCRBLT: condition = settings->OrderSupport[NEG_MULTIDSTBLT_INDEX]; break; case ORDER_TYPE_MULTI_OPAQUE_RECT: condition = settings->OrderSupport[NEG_MULTIOPAQUERECT_INDEX]; break; case ORDER_TYPE_FAST_INDEX: condition = settings->OrderSupport[NEG_FAST_INDEX_INDEX]; break; case ORDER_TYPE_POLYGON_SC: condition = settings->OrderSupport[NEG_POLYGON_SC_INDEX]; break; case ORDER_TYPE_POLYGON_CB: condition = settings->OrderSupport[NEG_POLYGON_CB_INDEX]; break; case ORDER_TYPE_POLYLINE: condition = settings->OrderSupport[NEG_POLYLINE_INDEX]; break; case ORDER_TYPE_FAST_GLYPH: condition = settings->OrderSupport[NEG_FAST_GLYPH_INDEX]; break; case ORDER_TYPE_ELLIPSE_SC: condition = settings->OrderSupport[NEG_ELLIPSE_SC_INDEX]; break; case ORDER_TYPE_ELLIPSE_CB: condition = settings->OrderSupport[NEG_ELLIPSE_CB_INDEX]; break; case ORDER_TYPE_GLYPH_INDEX: condition = settings->OrderSupport[NEG_GLYPH_INDEX_INDEX]; break; default: WLog_Print(log, WLOG_WARN, "%s Primary Drawing Order not supported", orderName); break; } return check_order_activated(log, settings, orderName, condition); } static const char* primary_order_string(UINT32 orderType) { const char* orders[] = { "[0x%02" PRIx8 "] DstBlt", "[0x%02" PRIx8 "] PatBlt", "[0x%02" PRIx8 "] ScrBlt", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] DrawNineGrid", "[0x%02" PRIx8 "] MultiDrawNineGrid", "[0x%02" PRIx8 "] LineTo", "[0x%02" PRIx8 "] OpaqueRect", "[0x%02" PRIx8 "] SaveBitmap", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] MemBlt", "[0x%02" PRIx8 "] Mem3Blt", "[0x%02" PRIx8 "] MultiDstBlt", "[0x%02" PRIx8 "] MultiPatBlt", "[0x%02" PRIx8 "] MultiScrBlt", "[0x%02" PRIx8 "] MultiOpaqueRect", "[0x%02" PRIx8 "] FastIndex", "[0x%02" PRIx8 "] PolygonSC", "[0x%02" PRIx8 "] PolygonCB", "[0x%02" PRIx8 "] Polyline", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] FastGlyph", "[0x%02" PRIx8 "] EllipseSC", "[0x%02" PRIx8 "] EllipseCB", "[0x%02" PRIx8 "] GlyphIndex" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static const char* secondary_order_string(UINT32 orderType) { const char* orders[] = { "[0x%02" PRIx8 "] Cache Bitmap", "[0x%02" PRIx8 "] Cache Color Table", "[0x%02" PRIx8 "] Cache Bitmap (Compressed)", "[0x%02" PRIx8 "] Cache Glyph", "[0x%02" PRIx8 "] Cache Bitmap V2", "[0x%02" PRIx8 "] Cache Bitmap V2 (Compressed)", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] Cache Brush", "[0x%02" PRIx8 "] Cache Bitmap V3" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static const char* altsec_order_string(BYTE orderType) { const char* orders[] = { "[0x%02" PRIx8 "] Switch Surface", "[0x%02" PRIx8 "] Create Offscreen Bitmap", "[0x%02" PRIx8 "] Stream Bitmap First", "[0x%02" PRIx8 "] Stream Bitmap Next", "[0x%02" PRIx8 "] Create NineGrid Bitmap", "[0x%02" PRIx8 "] Draw GDI+ First", "[0x%02" PRIx8 "] Draw GDI+ Next", "[0x%02" PRIx8 "] Draw GDI+ End", "[0x%02" PRIx8 "] Draw GDI+ Cache First", "[0x%02" PRIx8 "] Draw GDI+ Cache Next", "[0x%02" PRIx8 "] Draw GDI+ Cache End", "[0x%02" PRIx8 "] Windowing", "[0x%02" PRIx8 "] Desktop Composition", "[0x%02" PRIx8 "] Frame Marker" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static INLINE BOOL update_read_coord(wStream* s, INT32* coord, BOOL delta) { INT8 lsi8; INT16 lsi16; if (delta) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_INT8(s, lsi8); *coord += lsi8; } else { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_INT16(s, lsi16); *coord = lsi16; } return TRUE; } static INLINE BOOL update_write_coord(wStream* s, INT32 coord) { Stream_Write_UINT16(s, coord); return TRUE; } static INLINE BOOL update_read_color(wStream* s, UINT32* color) { BYTE byte; if (Stream_GetRemainingLength(s) < 3) return FALSE; *color = 0; Stream_Read_UINT8(s, byte); *color = (UINT32)byte; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 8) & 0xFF00; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 16) & 0xFF0000; return TRUE; } static INLINE BOOL update_write_color(wStream* s, UINT32 color) { BYTE byte; byte = (color & 0xFF); Stream_Write_UINT8(s, byte); byte = ((color >> 8) & 0xFF); Stream_Write_UINT8(s, byte); byte = ((color >> 16) & 0xFF); Stream_Write_UINT8(s, byte); return TRUE; } static INLINE BOOL update_read_colorref(wStream* s, UINT32* color) { BYTE byte; if (Stream_GetRemainingLength(s) < 4) return FALSE; *color = 0; Stream_Read_UINT8(s, byte); *color = byte; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 8); Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 16); Stream_Seek_UINT8(s); return TRUE; } static INLINE BOOL update_read_color_quad(wStream* s, UINT32* color) { return update_read_colorref(s, color); } static INLINE void update_write_color_quad(wStream* s, UINT32 color) { BYTE byte; byte = (color >> 16) & 0xFF; Stream_Write_UINT8(s, byte); byte = (color >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = color & 0xFF; Stream_Write_UINT8(s, byte); } static INLINE BOOL update_read_2byte_unsigned(wStream* s, UINT32* value) { BYTE byte; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) return FALSE; *value = (byte & 0x7F) << 8; Stream_Read_UINT8(s, byte); *value |= byte; } else { *value = (byte & 0x7F); } return TRUE; } static INLINE BOOL update_write_2byte_unsigned(wStream* s, UINT32 value) { BYTE byte; if (value > 0x7FFF) return FALSE; if (value >= 0x7F) { byte = ((value & 0x7F00) >> 8); Stream_Write_UINT8(s, byte | 0x80); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else { byte = (value & 0x7F); Stream_Write_UINT8(s, byte); } return TRUE; } static INLINE BOOL update_read_2byte_signed(wStream* s, INT32* value) { BYTE byte; BOOL negative; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); negative = (byte & 0x40) ? TRUE : FALSE; *value = (byte & 0x3F); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); *value = (*value << 8) | byte; } if (negative) *value *= -1; return TRUE; } static INLINE BOOL update_write_2byte_signed(wStream* s, INT32 value) { BYTE byte; BOOL negative = FALSE; if (value < 0) { negative = TRUE; value *= -1; } if (value > 0x3FFF) return FALSE; if (value >= 0x3F) { byte = ((value & 0x3F00) >> 8); if (negative) byte |= 0x40; Stream_Write_UINT8(s, byte | 0x80); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else { byte = (value & 0x3F); if (negative) byte |= 0x40; Stream_Write_UINT8(s, byte); } return TRUE; } static INLINE BOOL update_read_4byte_unsigned(wStream* s, UINT32* value) { BYTE byte; BYTE count; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); count = (byte & 0xC0) >> 6; if (Stream_GetRemainingLength(s) < count) return FALSE; switch (count) { case 0: *value = (byte & 0x3F); break; case 1: *value = (byte & 0x3F) << 8; Stream_Read_UINT8(s, byte); *value |= byte; break; case 2: *value = (byte & 0x3F) << 16; Stream_Read_UINT8(s, byte); *value |= (byte << 8); Stream_Read_UINT8(s, byte); *value |= byte; break; case 3: *value = (byte & 0x3F) << 24; Stream_Read_UINT8(s, byte); *value |= (byte << 16); Stream_Read_UINT8(s, byte); *value |= (byte << 8); Stream_Read_UINT8(s, byte); *value |= byte; break; default: break; } return TRUE; } static INLINE BOOL update_write_4byte_unsigned(wStream* s, UINT32 value) { BYTE byte; if (value <= 0x3F) { Stream_Write_UINT8(s, value); } else if (value <= 0x3FFF) { byte = (value >> 8) & 0x3F; Stream_Write_UINT8(s, byte | 0x40); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else if (value <= 0x3FFFFF) { byte = (value >> 16) & 0x3F; Stream_Write_UINT8(s, byte | 0x80); byte = (value >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else if (value <= 0x3FFFFFFF) { byte = (value >> 24) & 0x3F; Stream_Write_UINT8(s, byte | 0xC0); byte = (value >> 16) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else return FALSE; return TRUE; } static INLINE BOOL update_read_delta(wStream* s, INT32* value) { BYTE byte; if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, byte); if (byte & 0x40) *value = (byte | ~0x3F); else *value = (byte & 0x3F); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, byte); *value = (*value << 8) | byte; } return TRUE; } #if 0 static INLINE void update_read_glyph_delta(wStream* s, UINT16* value) { BYTE byte; Stream_Read_UINT8(s, byte); if (byte == 0x80) Stream_Read_UINT16(s, *value); else *value = (byte & 0x3F); } static INLINE void update_seek_glyph_delta(wStream* s) { BYTE byte; Stream_Read_UINT8(s, byte); if (byte & 0x80) Stream_Seek_UINT8(s); } #endif static INLINE BOOL update_read_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->style); } if (fieldFlags & ORDER_FIELD_04) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->hatch); } if (brush->style & CACHED_BRUSH) { brush->index = brush->hatch; brush->bpp = BMF_BPP[brush->style & 0x07]; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 7) return FALSE; brush->data = (BYTE*)brush->p8x8; Stream_Read_UINT8(s, brush->data[7]); Stream_Read_UINT8(s, brush->data[6]); Stream_Read_UINT8(s, brush->data[5]); Stream_Read_UINT8(s, brush->data[4]); Stream_Read_UINT8(s, brush->data[3]); Stream_Read_UINT8(s, brush->data[2]); Stream_Read_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; } static INLINE BOOL update_write_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { Stream_Write_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { Stream_Write_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { Stream_Write_UINT8(s, brush->style); } if (brush->style & CACHED_BRUSH) { brush->hatch = brush->index; brush->bpp = BMF_BPP[brush->style & 0x07]; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_04) { Stream_Write_UINT8(s, brush->hatch); } if (fieldFlags & ORDER_FIELD_05) { brush->data = (BYTE*)brush->p8x8; Stream_Write_UINT8(s, brush->data[7]); Stream_Write_UINT8(s, brush->data[6]); Stream_Write_UINT8(s, brush->data[5]); Stream_Write_UINT8(s, brush->data[4]); Stream_Write_UINT8(s, brush->data[3]); Stream_Write_UINT8(s, brush->data[2]); Stream_Write_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; } static INLINE BOOL update_read_delta_rects(wStream* s, DELTA_RECT* rectangles, UINT32* nr) { UINT32 number = *nr; UINT32 i; BYTE flags = 0; BYTE* zeroBits; UINT32 zeroBitsSize; if (number > 45) { WLog_WARN(TAG, "Invalid number of delta rectangles %" PRIu32, number); return FALSE; } zeroBitsSize = ((number + 1) / 2); if (Stream_GetRemainingLength(s) < zeroBitsSize) return FALSE; Stream_GetPointer(s, zeroBits); Stream_Seek(s, zeroBitsSize); ZeroMemory(rectangles, sizeof(DELTA_RECT) * number); for (i = 0; i < number; i++) { if (i % 2 == 0) flags = zeroBits[i / 2]; if ((~flags & 0x80) && !update_read_delta(s, &rectangles[i].left)) return FALSE; if ((~flags & 0x40) && !update_read_delta(s, &rectangles[i].top)) return FALSE; if (~flags & 0x20) { if (!update_read_delta(s, &rectangles[i].width)) return FALSE; } else if (i > 0) rectangles[i].width = rectangles[i - 1].width; else rectangles[i].width = 0; if (~flags & 0x10) { if (!update_read_delta(s, &rectangles[i].height)) return FALSE; } else if (i > 0) rectangles[i].height = rectangles[i - 1].height; else rectangles[i].height = 0; if (i > 0) { rectangles[i].left += rectangles[i - 1].left; rectangles[i].top += rectangles[i - 1].top; } flags <<= 4; } return TRUE; } static INLINE BOOL update_read_delta_points(wStream* s, DELTA_POINT* points, int number, INT16 x, INT16 y) { int i; BYTE flags = 0; BYTE* zeroBits; UINT32 zeroBitsSize; zeroBitsSize = ((number + 3) / 4); if (Stream_GetRemainingLength(s) < zeroBitsSize) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < %" PRIu32 "", zeroBitsSize); return FALSE; } Stream_GetPointer(s, zeroBits); Stream_Seek(s, zeroBitsSize); ZeroMemory(points, sizeof(DELTA_POINT) * number); for (i = 0; i < number; i++) { if (i % 4 == 0) flags = zeroBits[i / 4]; if ((~flags & 0x80) && !update_read_delta(s, &points[i].x)) { WLog_ERR(TAG, "update_read_delta(x) failed"); return FALSE; } if ((~flags & 0x40) && !update_read_delta(s, &points[i].y)) { WLog_ERR(TAG, "update_read_delta(y) failed"); return FALSE; } flags <<= 2; } return TRUE; } #define ORDER_FIELD_BYTE(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 1) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT8(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_2BYTE(NO, TARGET1, TARGET2) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 2) \ { \ WLog_ERR(TAG, "error reading %s or %s", #TARGET1, #TARGET2); \ return FALSE; \ } \ Stream_Read_UINT8(s, TARGET1); \ Stream_Read_UINT8(s, TARGET2); \ } \ } while (0) #define ORDER_FIELD_UINT16(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 2) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT16(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_UINT32(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 4) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT32(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_COORD(NO, TARGET) \ do \ { \ if ((orderInfo->fieldFlags & (1 << (NO - 1))) && \ !update_read_coord(s, &TARGET, orderInfo->deltaCoordinates)) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ } while (0) static INLINE BOOL ORDER_FIELD_COLOR(const ORDER_INFO* orderInfo, wStream* s, UINT32 NO, UINT32* TARGET) { if (!TARGET || !orderInfo) return FALSE; if ((orderInfo->fieldFlags & (1 << (NO - 1))) && !update_read_color(s, TARGET)) return FALSE; return TRUE; } static INLINE BOOL FIELD_SKIP_BUFFER16(wStream* s, UINT32 TARGET_LEN) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, TARGET_LEN); if (!Stream_SafeSeek(s, TARGET_LEN)) { WLog_ERR(TAG, "error skipping %" PRIu32 " bytes", TARGET_LEN); return FALSE; } return TRUE; } /* Primary Drawing Orders */ static BOOL update_read_dstblt_order(wStream* s, const ORDER_INFO* orderInfo, DSTBLT_ORDER* dstblt) { ORDER_FIELD_COORD(1, dstblt->nLeftRect); ORDER_FIELD_COORD(2, dstblt->nTopRect); ORDER_FIELD_COORD(3, dstblt->nWidth); ORDER_FIELD_COORD(4, dstblt->nHeight); ORDER_FIELD_BYTE(5, dstblt->bRop); return TRUE; } int update_approximate_dstblt_order(ORDER_INFO* orderInfo, const DSTBLT_ORDER* dstblt) { return 32; } BOOL update_write_dstblt_order(wStream* s, ORDER_INFO* orderInfo, const DSTBLT_ORDER* dstblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_dstblt_order(orderInfo, dstblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, dstblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, dstblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, dstblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, dstblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, dstblt->bRop); return TRUE; } static BOOL update_read_patblt_order(wStream* s, const ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { ORDER_FIELD_COORD(1, patblt->nLeftRect); ORDER_FIELD_COORD(2, patblt->nTopRect); ORDER_FIELD_COORD(3, patblt->nWidth); ORDER_FIELD_COORD(4, patblt->nHeight); ORDER_FIELD_BYTE(5, patblt->bRop); ORDER_FIELD_COLOR(orderInfo, s, 6, &patblt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 7, &patblt->foreColor); return update_read_brush(s, &patblt->brush, orderInfo->fieldFlags >> 7); } int update_approximate_patblt_order(ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { return 32; } BOOL update_write_patblt_order(wStream* s, ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_patblt_order(orderInfo, patblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, patblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, patblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, patblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, patblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, patblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, patblt->backColor); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_color(s, patblt->foreColor); orderInfo->fieldFlags |= ORDER_FIELD_08; orderInfo->fieldFlags |= ORDER_FIELD_09; orderInfo->fieldFlags |= ORDER_FIELD_10; orderInfo->fieldFlags |= ORDER_FIELD_11; orderInfo->fieldFlags |= ORDER_FIELD_12; update_write_brush(s, &patblt->brush, orderInfo->fieldFlags >> 7); return TRUE; } static BOOL update_read_scrblt_order(wStream* s, const ORDER_INFO* orderInfo, SCRBLT_ORDER* scrblt) { ORDER_FIELD_COORD(1, scrblt->nLeftRect); ORDER_FIELD_COORD(2, scrblt->nTopRect); ORDER_FIELD_COORD(3, scrblt->nWidth); ORDER_FIELD_COORD(4, scrblt->nHeight); ORDER_FIELD_BYTE(5, scrblt->bRop); ORDER_FIELD_COORD(6, scrblt->nXSrc); ORDER_FIELD_COORD(7, scrblt->nYSrc); return TRUE; } int update_approximate_scrblt_order(ORDER_INFO* orderInfo, const SCRBLT_ORDER* scrblt) { return 32; } BOOL update_write_scrblt_order(wStream* s, ORDER_INFO* orderInfo, const SCRBLT_ORDER* scrblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_scrblt_order(orderInfo, scrblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, scrblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, scrblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, scrblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, scrblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, scrblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_coord(s, scrblt->nXSrc); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_coord(s, scrblt->nYSrc); return TRUE; } static BOOL update_read_opaque_rect_order(wStream* s, const ORDER_INFO* orderInfo, OPAQUE_RECT_ORDER* opaque_rect) { BYTE byte; ORDER_FIELD_COORD(1, opaque_rect->nLeftRect); ORDER_FIELD_COORD(2, opaque_rect->nTopRect); ORDER_FIELD_COORD(3, opaque_rect->nWidth); ORDER_FIELD_COORD(4, opaque_rect->nHeight); if (orderInfo->fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x00FFFF00) | ((UINT32)byte); } if (orderInfo->fieldFlags & ORDER_FIELD_06) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x00FF00FF) | ((UINT32)byte << 8); } if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x0000FFFF) | ((UINT32)byte << 16); } return TRUE; } int update_approximate_opaque_rect_order(ORDER_INFO* orderInfo, const OPAQUE_RECT_ORDER* opaque_rect) { return 32; } BOOL update_write_opaque_rect_order(wStream* s, ORDER_INFO* orderInfo, const OPAQUE_RECT_ORDER* opaque_rect) { BYTE byte; int inf = update_approximate_opaque_rect_order(orderInfo, opaque_rect); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; // TODO: Color format conversion orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, opaque_rect->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, opaque_rect->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, opaque_rect->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, opaque_rect->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; byte = opaque_rect->color & 0x000000FF; Stream_Write_UINT8(s, byte); orderInfo->fieldFlags |= ORDER_FIELD_06; byte = (opaque_rect->color & 0x0000FF00) >> 8; Stream_Write_UINT8(s, byte); orderInfo->fieldFlags |= ORDER_FIELD_07; byte = (opaque_rect->color & 0x00FF0000) >> 16; Stream_Write_UINT8(s, byte); return TRUE; } static BOOL update_read_draw_nine_grid_order(wStream* s, const ORDER_INFO* orderInfo, DRAW_NINE_GRID_ORDER* draw_nine_grid) { ORDER_FIELD_COORD(1, draw_nine_grid->srcLeft); ORDER_FIELD_COORD(2, draw_nine_grid->srcTop); ORDER_FIELD_COORD(3, draw_nine_grid->srcRight); ORDER_FIELD_COORD(4, draw_nine_grid->srcBottom); ORDER_FIELD_UINT16(5, draw_nine_grid->bitmapId); return TRUE; } static BOOL update_read_multi_dstblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_DSTBLT_ORDER* multi_dstblt) { ORDER_FIELD_COORD(1, multi_dstblt->nLeftRect); ORDER_FIELD_COORD(2, multi_dstblt->nTopRect); ORDER_FIELD_COORD(3, multi_dstblt->nWidth); ORDER_FIELD_COORD(4, multi_dstblt->nHeight); ORDER_FIELD_BYTE(5, multi_dstblt->bRop); ORDER_FIELD_BYTE(6, multi_dstblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_dstblt->cbData); return update_read_delta_rects(s, multi_dstblt->rectangles, &multi_dstblt->numRectangles); } return TRUE; } static BOOL update_read_multi_patblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_PATBLT_ORDER* multi_patblt) { ORDER_FIELD_COORD(1, multi_patblt->nLeftRect); ORDER_FIELD_COORD(2, multi_patblt->nTopRect); ORDER_FIELD_COORD(3, multi_patblt->nWidth); ORDER_FIELD_COORD(4, multi_patblt->nHeight); ORDER_FIELD_BYTE(5, multi_patblt->bRop); ORDER_FIELD_COLOR(orderInfo, s, 6, &multi_patblt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 7, &multi_patblt->foreColor); if (!update_read_brush(s, &multi_patblt->brush, orderInfo->fieldFlags >> 7)) return FALSE; ORDER_FIELD_BYTE(13, multi_patblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_14) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_patblt->cbData); if (!update_read_delta_rects(s, multi_patblt->rectangles, &multi_patblt->numRectangles)) return FALSE; } return TRUE; } static BOOL update_read_multi_scrblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_SCRBLT_ORDER* multi_scrblt) { ORDER_FIELD_COORD(1, multi_scrblt->nLeftRect); ORDER_FIELD_COORD(2, multi_scrblt->nTopRect); ORDER_FIELD_COORD(3, multi_scrblt->nWidth); ORDER_FIELD_COORD(4, multi_scrblt->nHeight); ORDER_FIELD_BYTE(5, multi_scrblt->bRop); ORDER_FIELD_COORD(6, multi_scrblt->nXSrc); ORDER_FIELD_COORD(7, multi_scrblt->nYSrc); ORDER_FIELD_BYTE(8, multi_scrblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_09) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_scrblt->cbData); return update_read_delta_rects(s, multi_scrblt->rectangles, &multi_scrblt->numRectangles); } return TRUE; } static BOOL update_read_multi_opaque_rect_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_OPAQUE_RECT_ORDER* multi_opaque_rect) { BYTE byte; ORDER_FIELD_COORD(1, multi_opaque_rect->nLeftRect); ORDER_FIELD_COORD(2, multi_opaque_rect->nTopRect); ORDER_FIELD_COORD(3, multi_opaque_rect->nWidth); ORDER_FIELD_COORD(4, multi_opaque_rect->nHeight); if (orderInfo->fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x00FFFF00) | ((UINT32)byte); } if (orderInfo->fieldFlags & ORDER_FIELD_06) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x00FF00FF) | ((UINT32)byte << 8); } if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x0000FFFF) | ((UINT32)byte << 16); } ORDER_FIELD_BYTE(8, multi_opaque_rect->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_09) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_opaque_rect->cbData); return update_read_delta_rects(s, multi_opaque_rect->rectangles, &multi_opaque_rect->numRectangles); } return TRUE; } static BOOL update_read_multi_draw_nine_grid_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_DRAW_NINE_GRID_ORDER* multi_draw_nine_grid) { ORDER_FIELD_COORD(1, multi_draw_nine_grid->srcLeft); ORDER_FIELD_COORD(2, multi_draw_nine_grid->srcTop); ORDER_FIELD_COORD(3, multi_draw_nine_grid->srcRight); ORDER_FIELD_COORD(4, multi_draw_nine_grid->srcBottom); ORDER_FIELD_UINT16(5, multi_draw_nine_grid->bitmapId); ORDER_FIELD_BYTE(6, multi_draw_nine_grid->nDeltaEntries); if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_draw_nine_grid->cbData); return update_read_delta_rects(s, multi_draw_nine_grid->rectangles, &multi_draw_nine_grid->nDeltaEntries); } return TRUE; } static BOOL update_read_line_to_order(wStream* s, const ORDER_INFO* orderInfo, LINE_TO_ORDER* line_to) { ORDER_FIELD_UINT16(1, line_to->backMode); ORDER_FIELD_COORD(2, line_to->nXStart); ORDER_FIELD_COORD(3, line_to->nYStart); ORDER_FIELD_COORD(4, line_to->nXEnd); ORDER_FIELD_COORD(5, line_to->nYEnd); ORDER_FIELD_COLOR(orderInfo, s, 6, &line_to->backColor); ORDER_FIELD_BYTE(7, line_to->bRop2); ORDER_FIELD_BYTE(8, line_to->penStyle); ORDER_FIELD_BYTE(9, line_to->penWidth); ORDER_FIELD_COLOR(orderInfo, s, 10, &line_to->penColor); return TRUE; } int update_approximate_line_to_order(ORDER_INFO* orderInfo, const LINE_TO_ORDER* line_to) { return 32; } BOOL update_write_line_to_order(wStream* s, ORDER_INFO* orderInfo, const LINE_TO_ORDER* line_to) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_line_to_order(orderInfo, line_to))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT16(s, line_to->backMode); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, line_to->nXStart); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, line_to->nYStart); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, line_to->nXEnd); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_coord(s, line_to->nYEnd); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, line_to->backColor); orderInfo->fieldFlags |= ORDER_FIELD_07; Stream_Write_UINT8(s, line_to->bRop2); orderInfo->fieldFlags |= ORDER_FIELD_08; Stream_Write_UINT8(s, line_to->penStyle); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT8(s, line_to->penWidth); orderInfo->fieldFlags |= ORDER_FIELD_10; update_write_color(s, line_to->penColor); return TRUE; } static BOOL update_read_polyline_order(wStream* s, const ORDER_INFO* orderInfo, POLYLINE_ORDER* polyline) { UINT16 word; UINT32 new_num = polyline->numDeltaEntries; ORDER_FIELD_COORD(1, polyline->xStart); ORDER_FIELD_COORD(2, polyline->yStart); ORDER_FIELD_BYTE(3, polyline->bRop2); ORDER_FIELD_UINT16(4, word); ORDER_FIELD_COLOR(orderInfo, s, 5, &polyline->penColor); ORDER_FIELD_BYTE(6, new_num); if (orderInfo->fieldFlags & ORDER_FIELD_07) { DELTA_POINT* new_points; if (new_num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, polyline->cbData); new_points = (DELTA_POINT*)realloc(polyline->points, sizeof(DELTA_POINT) * new_num); if (!new_points) { WLog_ERR(TAG, "realloc(%" PRIu32 ") failed", new_num); return FALSE; } polyline->points = new_points; polyline->numDeltaEntries = new_num; return update_read_delta_points(s, polyline->points, polyline->numDeltaEntries, polyline->xStart, polyline->yStart); } return TRUE; } static BOOL update_read_memblt_order(wStream* s, const ORDER_INFO* orderInfo, MEMBLT_ORDER* memblt) { if (!s || !orderInfo || !memblt) return FALSE; ORDER_FIELD_UINT16(1, memblt->cacheId); ORDER_FIELD_COORD(2, memblt->nLeftRect); ORDER_FIELD_COORD(3, memblt->nTopRect); ORDER_FIELD_COORD(4, memblt->nWidth); ORDER_FIELD_COORD(5, memblt->nHeight); ORDER_FIELD_BYTE(6, memblt->bRop); ORDER_FIELD_COORD(7, memblt->nXSrc); ORDER_FIELD_COORD(8, memblt->nYSrc); ORDER_FIELD_UINT16(9, memblt->cacheIndex); memblt->colorIndex = (memblt->cacheId >> 8); memblt->cacheId = (memblt->cacheId & 0xFF); memblt->bitmap = NULL; return TRUE; } int update_approximate_memblt_order(ORDER_INFO* orderInfo, const MEMBLT_ORDER* memblt) { return 64; } BOOL update_write_memblt_order(wStream* s, ORDER_INFO* orderInfo, const MEMBLT_ORDER* memblt) { UINT16 cacheId; if (!Stream_EnsureRemainingCapacity(s, update_approximate_memblt_order(orderInfo, memblt))) return FALSE; cacheId = (memblt->cacheId & 0xFF) | ((memblt->colorIndex & 0xFF) << 8); orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT16(s, cacheId); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, memblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, memblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, memblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_coord(s, memblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_06; Stream_Write_UINT8(s, memblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_coord(s, memblt->nXSrc); orderInfo->fieldFlags |= ORDER_FIELD_08; update_write_coord(s, memblt->nYSrc); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT16(s, memblt->cacheIndex); return TRUE; } static BOOL update_read_mem3blt_order(wStream* s, const ORDER_INFO* orderInfo, MEM3BLT_ORDER* mem3blt) { ORDER_FIELD_UINT16(1, mem3blt->cacheId); ORDER_FIELD_COORD(2, mem3blt->nLeftRect); ORDER_FIELD_COORD(3, mem3blt->nTopRect); ORDER_FIELD_COORD(4, mem3blt->nWidth); ORDER_FIELD_COORD(5, mem3blt->nHeight); ORDER_FIELD_BYTE(6, mem3blt->bRop); ORDER_FIELD_COORD(7, mem3blt->nXSrc); ORDER_FIELD_COORD(8, mem3blt->nYSrc); ORDER_FIELD_COLOR(orderInfo, s, 9, &mem3blt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 10, &mem3blt->foreColor); if (!update_read_brush(s, &mem3blt->brush, orderInfo->fieldFlags >> 10)) return FALSE; ORDER_FIELD_UINT16(16, mem3blt->cacheIndex); mem3blt->colorIndex = (mem3blt->cacheId >> 8); mem3blt->cacheId = (mem3blt->cacheId & 0xFF); mem3blt->bitmap = NULL; return TRUE; } static BOOL update_read_save_bitmap_order(wStream* s, const ORDER_INFO* orderInfo, SAVE_BITMAP_ORDER* save_bitmap) { ORDER_FIELD_UINT32(1, save_bitmap->savedBitmapPosition); ORDER_FIELD_COORD(2, save_bitmap->nLeftRect); ORDER_FIELD_COORD(3, save_bitmap->nTopRect); ORDER_FIELD_COORD(4, save_bitmap->nRightRect); ORDER_FIELD_COORD(5, save_bitmap->nBottomRect); ORDER_FIELD_BYTE(6, save_bitmap->operation); return TRUE; } static BOOL update_read_glyph_index_order(wStream* s, const ORDER_INFO* orderInfo, GLYPH_INDEX_ORDER* glyph_index) { ORDER_FIELD_BYTE(1, glyph_index->cacheId); ORDER_FIELD_BYTE(2, glyph_index->flAccel); ORDER_FIELD_BYTE(3, glyph_index->ulCharInc); ORDER_FIELD_BYTE(4, glyph_index->fOpRedundant); ORDER_FIELD_COLOR(orderInfo, s, 5, &glyph_index->backColor); ORDER_FIELD_COLOR(orderInfo, s, 6, &glyph_index->foreColor); ORDER_FIELD_UINT16(7, glyph_index->bkLeft); ORDER_FIELD_UINT16(8, glyph_index->bkTop); ORDER_FIELD_UINT16(9, glyph_index->bkRight); ORDER_FIELD_UINT16(10, glyph_index->bkBottom); ORDER_FIELD_UINT16(11, glyph_index->opLeft); ORDER_FIELD_UINT16(12, glyph_index->opTop); ORDER_FIELD_UINT16(13, glyph_index->opRight); ORDER_FIELD_UINT16(14, glyph_index->opBottom); if (!update_read_brush(s, &glyph_index->brush, orderInfo->fieldFlags >> 14)) return FALSE; ORDER_FIELD_UINT16(20, glyph_index->x); ORDER_FIELD_UINT16(21, glyph_index->y); if (orderInfo->fieldFlags & ORDER_FIELD_22) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, glyph_index->cbData); if (Stream_GetRemainingLength(s) < glyph_index->cbData) return FALSE; CopyMemory(glyph_index->data, Stream_Pointer(s), glyph_index->cbData); Stream_Seek(s, glyph_index->cbData); } return TRUE; } int update_approximate_glyph_index_order(ORDER_INFO* orderInfo, const GLYPH_INDEX_ORDER* glyph_index) { return 64; } BOOL update_write_glyph_index_order(wStream* s, ORDER_INFO* orderInfo, GLYPH_INDEX_ORDER* glyph_index) { int inf = update_approximate_glyph_index_order(orderInfo, glyph_index); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT8(s, glyph_index->cacheId); orderInfo->fieldFlags |= ORDER_FIELD_02; Stream_Write_UINT8(s, glyph_index->flAccel); orderInfo->fieldFlags |= ORDER_FIELD_03; Stream_Write_UINT8(s, glyph_index->ulCharInc); orderInfo->fieldFlags |= ORDER_FIELD_04; Stream_Write_UINT8(s, glyph_index->fOpRedundant); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_color(s, glyph_index->backColor); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, glyph_index->foreColor); orderInfo->fieldFlags |= ORDER_FIELD_07; Stream_Write_UINT16(s, glyph_index->bkLeft); orderInfo->fieldFlags |= ORDER_FIELD_08; Stream_Write_UINT16(s, glyph_index->bkTop); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT16(s, glyph_index->bkRight); orderInfo->fieldFlags |= ORDER_FIELD_10; Stream_Write_UINT16(s, glyph_index->bkBottom); orderInfo->fieldFlags |= ORDER_FIELD_11; Stream_Write_UINT16(s, glyph_index->opLeft); orderInfo->fieldFlags |= ORDER_FIELD_12; Stream_Write_UINT16(s, glyph_index->opTop); orderInfo->fieldFlags |= ORDER_FIELD_13; Stream_Write_UINT16(s, glyph_index->opRight); orderInfo->fieldFlags |= ORDER_FIELD_14; Stream_Write_UINT16(s, glyph_index->opBottom); orderInfo->fieldFlags |= ORDER_FIELD_15; orderInfo->fieldFlags |= ORDER_FIELD_16; orderInfo->fieldFlags |= ORDER_FIELD_17; orderInfo->fieldFlags |= ORDER_FIELD_18; orderInfo->fieldFlags |= ORDER_FIELD_19; update_write_brush(s, &glyph_index->brush, orderInfo->fieldFlags >> 14); orderInfo->fieldFlags |= ORDER_FIELD_20; Stream_Write_UINT16(s, glyph_index->x); orderInfo->fieldFlags |= ORDER_FIELD_21; Stream_Write_UINT16(s, glyph_index->y); orderInfo->fieldFlags |= ORDER_FIELD_22; Stream_Write_UINT8(s, glyph_index->cbData); Stream_Write(s, glyph_index->data, glyph_index->cbData); return TRUE; } static BOOL update_read_fast_index_order(wStream* s, const ORDER_INFO* orderInfo, FAST_INDEX_ORDER* fast_index) { ORDER_FIELD_BYTE(1, fast_index->cacheId); ORDER_FIELD_2BYTE(2, fast_index->ulCharInc, fast_index->flAccel); ORDER_FIELD_COLOR(orderInfo, s, 3, &fast_index->backColor); ORDER_FIELD_COLOR(orderInfo, s, 4, &fast_index->foreColor); ORDER_FIELD_COORD(5, fast_index->bkLeft); ORDER_FIELD_COORD(6, fast_index->bkTop); ORDER_FIELD_COORD(7, fast_index->bkRight); ORDER_FIELD_COORD(8, fast_index->bkBottom); ORDER_FIELD_COORD(9, fast_index->opLeft); ORDER_FIELD_COORD(10, fast_index->opTop); ORDER_FIELD_COORD(11, fast_index->opRight); ORDER_FIELD_COORD(12, fast_index->opBottom); ORDER_FIELD_COORD(13, fast_index->x); ORDER_FIELD_COORD(14, fast_index->y); if (orderInfo->fieldFlags & ORDER_FIELD_15) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, fast_index->cbData); if (Stream_GetRemainingLength(s) < fast_index->cbData) return FALSE; CopyMemory(fast_index->data, Stream_Pointer(s), fast_index->cbData); Stream_Seek(s, fast_index->cbData); } return TRUE; } static BOOL update_read_fast_glyph_order(wStream* s, const ORDER_INFO* orderInfo, FAST_GLYPH_ORDER* fastGlyph) { GLYPH_DATA_V2* glyph = &fastGlyph->glyphData; ORDER_FIELD_BYTE(1, fastGlyph->cacheId); ORDER_FIELD_2BYTE(2, fastGlyph->ulCharInc, fastGlyph->flAccel); ORDER_FIELD_COLOR(orderInfo, s, 3, &fastGlyph->backColor); ORDER_FIELD_COLOR(orderInfo, s, 4, &fastGlyph->foreColor); ORDER_FIELD_COORD(5, fastGlyph->bkLeft); ORDER_FIELD_COORD(6, fastGlyph->bkTop); ORDER_FIELD_COORD(7, fastGlyph->bkRight); ORDER_FIELD_COORD(8, fastGlyph->bkBottom); ORDER_FIELD_COORD(9, fastGlyph->opLeft); ORDER_FIELD_COORD(10, fastGlyph->opTop); ORDER_FIELD_COORD(11, fastGlyph->opRight); ORDER_FIELD_COORD(12, fastGlyph->opBottom); ORDER_FIELD_COORD(13, fastGlyph->x); ORDER_FIELD_COORD(14, fastGlyph->y); if (orderInfo->fieldFlags & ORDER_FIELD_15) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, fastGlyph->cbData); if (Stream_GetRemainingLength(s) < fastGlyph->cbData) return FALSE; CopyMemory(fastGlyph->data, Stream_Pointer(s), fastGlyph->cbData); if (Stream_GetRemainingLength(s) < fastGlyph->cbData) return FALSE; if (!Stream_SafeSeek(s, 1)) return FALSE; if (fastGlyph->cbData > 1) { UINT32 new_cb; /* parse optional glyph data */ glyph->cacheIndex = fastGlyph->data[0]; if (!update_read_2byte_signed(s, &glyph->x) || !update_read_2byte_signed(s, &glyph->y) || !update_read_2byte_unsigned(s, &glyph->cx) || !update_read_2byte_unsigned(s, &glyph->cy)) return FALSE; glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; new_cb = ((glyph->cx + 7) / 8) * glyph->cy; new_cb += ((new_cb % 4) > 0) ? 4 - (new_cb % 4) : 0; if (fastGlyph->cbData < new_cb) return FALSE; if (new_cb > 0) { BYTE* new_aj; new_aj = (BYTE*)realloc(glyph->aj, new_cb); if (!new_aj) return FALSE; glyph->aj = new_aj; glyph->cb = new_cb; Stream_Read(s, glyph->aj, glyph->cb); } Stream_Seek(s, fastGlyph->cbData - new_cb); } } return TRUE; } static BOOL update_read_polygon_sc_order(wStream* s, const ORDER_INFO* orderInfo, POLYGON_SC_ORDER* polygon_sc) { UINT32 num = polygon_sc->numPoints; ORDER_FIELD_COORD(1, polygon_sc->xStart); ORDER_FIELD_COORD(2, polygon_sc->yStart); ORDER_FIELD_BYTE(3, polygon_sc->bRop2); ORDER_FIELD_BYTE(4, polygon_sc->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 5, &polygon_sc->brushColor); ORDER_FIELD_BYTE(6, num); if (orderInfo->fieldFlags & ORDER_FIELD_07) { DELTA_POINT* newpoints; if (num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, polygon_sc->cbData); newpoints = (DELTA_POINT*)realloc(polygon_sc->points, sizeof(DELTA_POINT) * num); if (!newpoints) return FALSE; polygon_sc->points = newpoints; polygon_sc->numPoints = num; return update_read_delta_points(s, polygon_sc->points, polygon_sc->numPoints, polygon_sc->xStart, polygon_sc->yStart); } return TRUE; } static BOOL update_read_polygon_cb_order(wStream* s, const ORDER_INFO* orderInfo, POLYGON_CB_ORDER* polygon_cb) { UINT32 num = polygon_cb->numPoints; ORDER_FIELD_COORD(1, polygon_cb->xStart); ORDER_FIELD_COORD(2, polygon_cb->yStart); ORDER_FIELD_BYTE(3, polygon_cb->bRop2); ORDER_FIELD_BYTE(4, polygon_cb->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 5, &polygon_cb->backColor); ORDER_FIELD_COLOR(orderInfo, s, 6, &polygon_cb->foreColor); if (!update_read_brush(s, &polygon_cb->brush, orderInfo->fieldFlags >> 6)) return FALSE; ORDER_FIELD_BYTE(12, num); if (orderInfo->fieldFlags & ORDER_FIELD_13) { DELTA_POINT* newpoints; if (num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, polygon_cb->cbData); newpoints = (DELTA_POINT*)realloc(polygon_cb->points, sizeof(DELTA_POINT) * num); if (!newpoints) return FALSE; polygon_cb->points = newpoints; polygon_cb->numPoints = num; if (!update_read_delta_points(s, polygon_cb->points, polygon_cb->numPoints, polygon_cb->xStart, polygon_cb->yStart)) return FALSE; } polygon_cb->backMode = (polygon_cb->bRop2 & 0x80) ? BACKMODE_TRANSPARENT : BACKMODE_OPAQUE; polygon_cb->bRop2 = (polygon_cb->bRop2 & 0x1F); return TRUE; } static BOOL update_read_ellipse_sc_order(wStream* s, const ORDER_INFO* orderInfo, ELLIPSE_SC_ORDER* ellipse_sc) { ORDER_FIELD_COORD(1, ellipse_sc->leftRect); ORDER_FIELD_COORD(2, ellipse_sc->topRect); ORDER_FIELD_COORD(3, ellipse_sc->rightRect); ORDER_FIELD_COORD(4, ellipse_sc->bottomRect); ORDER_FIELD_BYTE(5, ellipse_sc->bRop2); ORDER_FIELD_BYTE(6, ellipse_sc->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 7, &ellipse_sc->color); return TRUE; } static BOOL update_read_ellipse_cb_order(wStream* s, const ORDER_INFO* orderInfo, ELLIPSE_CB_ORDER* ellipse_cb) { ORDER_FIELD_COORD(1, ellipse_cb->leftRect); ORDER_FIELD_COORD(2, ellipse_cb->topRect); ORDER_FIELD_COORD(3, ellipse_cb->rightRect); ORDER_FIELD_COORD(4, ellipse_cb->bottomRect); ORDER_FIELD_BYTE(5, ellipse_cb->bRop2); ORDER_FIELD_BYTE(6, ellipse_cb->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 7, &ellipse_cb->backColor); ORDER_FIELD_COLOR(orderInfo, s, 8, &ellipse_cb->foreColor); return update_read_brush(s, &ellipse_cb->brush, orderInfo->fieldFlags >> 8); } /* Secondary Drawing Orders */ static CACHE_BITMAP_ORDER* update_read_cache_bitmap_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { CACHE_BITMAP_ORDER* cache_bitmap; if (!update || !s) return NULL; cache_bitmap = calloc(1, sizeof(CACHE_BITMAP_ORDER)); if (!cache_bitmap) goto fail; if (Stream_GetRemainingLength(s) < 9) goto fail; Stream_Read_UINT8(s, cache_bitmap->cacheId); /* cacheId (1 byte) */ Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapWidth); /* bitmapWidth (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapHeight); /* bitmapHeight (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ if ((cache_bitmap->bitmapBpp < 1) || (cache_bitmap->bitmapBpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bitmap bpp %" PRIu32 "", cache_bitmap->bitmapBpp); goto fail; } Stream_Read_UINT16(s, cache_bitmap->bitmapLength); /* bitmapLength (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap->cacheIndex); /* cacheIndex (2 bytes) */ if (compressed) { if ((flags & NO_BITMAP_COMPRESSION_HDR) == 0) { BYTE* bitmapComprHdr = (BYTE*)&(cache_bitmap->bitmapComprHdr); if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read(s, bitmapComprHdr, 8); /* bitmapComprHdr (8 bytes) */ cache_bitmap->bitmapLength -= 8; } } if (cache_bitmap->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap->bitmapLength) goto fail; cache_bitmap->bitmapDataStream = malloc(cache_bitmap->bitmapLength); if (!cache_bitmap->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap->bitmapDataStream, cache_bitmap->bitmapLength); cache_bitmap->compressed = compressed; return cache_bitmap; fail: free_cache_bitmap_order(update->context, cache_bitmap); return NULL; } int update_approximate_cache_bitmap_order(const CACHE_BITMAP_ORDER* cache_bitmap, BOOL compressed, UINT16* flags) { return 64 + cache_bitmap->bitmapLength; } BOOL update_write_cache_bitmap_order(wStream* s, const CACHE_BITMAP_ORDER* cache_bitmap, BOOL compressed, UINT16* flags) { UINT32 bitmapLength = cache_bitmap->bitmapLength; int inf = update_approximate_cache_bitmap_order(cache_bitmap, compressed, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; *flags = NO_BITMAP_COMPRESSION_HDR; if ((*flags & NO_BITMAP_COMPRESSION_HDR) == 0) bitmapLength += 8; Stream_Write_UINT8(s, cache_bitmap->cacheId); /* cacheId (1 byte) */ Stream_Write_UINT8(s, 0); /* pad1Octet (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapWidth); /* bitmapWidth (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapHeight); /* bitmapHeight (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ Stream_Write_UINT16(s, bitmapLength); /* bitmapLength (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap->cacheIndex); /* cacheIndex (2 bytes) */ if (compressed) { if ((*flags & NO_BITMAP_COMPRESSION_HDR) == 0) { BYTE* bitmapComprHdr = (BYTE*)&(cache_bitmap->bitmapComprHdr); Stream_Write(s, bitmapComprHdr, 8); /* bitmapComprHdr (8 bytes) */ bitmapLength -= 8; } Stream_Write(s, cache_bitmap->bitmapDataStream, bitmapLength); } else { Stream_Write(s, cache_bitmap->bitmapDataStream, bitmapLength); } return TRUE; } static CACHE_BITMAP_V2_ORDER* update_read_cache_bitmap_v2_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { BYTE bitsPerPixelId; CACHE_BITMAP_V2_ORDER* cache_bitmap_v2; if (!update || !s) return NULL; cache_bitmap_v2 = calloc(1, sizeof(CACHE_BITMAP_V2_ORDER)); if (!cache_bitmap_v2) goto fail; cache_bitmap_v2->cacheId = flags & 0x0003; cache_bitmap_v2->flags = (flags & 0xFF80) >> 7; bitsPerPixelId = (flags & 0x0078) >> 3; cache_bitmap_v2->bitmapBpp = CBR2_BPP[bitsPerPixelId]; if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ goto fail; cache_bitmap_v2->bitmapHeight = cache_bitmap_v2->bitmapWidth; } else { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ goto fail; } if (!update_read_4byte_unsigned(s, &cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->cacheIndex)) /* cacheIndex */ goto fail; if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } } if (cache_bitmap_v2->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap_v2->bitmapLength) goto fail; if (cache_bitmap_v2->bitmapLength == 0) goto fail; cache_bitmap_v2->bitmapDataStream = malloc(cache_bitmap_v2->bitmapLength); if (!cache_bitmap_v2->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); cache_bitmap_v2->compressed = compressed; return cache_bitmap_v2; fail: free_cache_bitmap_v2_order(update->context, cache_bitmap_v2); return NULL; } int update_approximate_cache_bitmap_v2_order(CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { return 64 + cache_bitmap_v2->bitmapLength; } BOOL update_write_cache_bitmap_v2_order(wStream* s, CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { BYTE bitsPerPixelId; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v2_order(cache_bitmap_v2, compressed, flags))) return FALSE; bitsPerPixelId = BPP_CBR2[cache_bitmap_v2->bitmapBpp]; *flags = (cache_bitmap_v2->cacheId & 0x0003) | (bitsPerPixelId << 3) | ((cache_bitmap_v2->flags << 7) & 0xFF80); if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { Stream_Write_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ return FALSE; } else { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ return FALSE; } if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (!update_write_4byte_unsigned(s, cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_write_2byte_unsigned(s, cache_bitmap_v2->cacheIndex)) /* cacheIndex */ return FALSE; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { Stream_Write_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } else { if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } cache_bitmap_v2->compressed = compressed; return TRUE; } static CACHE_BITMAP_V3_ORDER* update_read_cache_bitmap_v3_order(rdpUpdate* update, wStream* s, UINT16 flags) { BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; UINT32 new_len; BYTE* new_data; CACHE_BITMAP_V3_ORDER* cache_bitmap_v3; if (!update || !s) return NULL; cache_bitmap_v3 = calloc(1, sizeof(CACHE_BITMAP_V3_ORDER)); if (!cache_bitmap_v3) goto fail; cache_bitmap_v3->cacheId = flags & 0x00000003; cache_bitmap_v3->flags = (flags & 0x0000FF80) >> 7; bitsPerPixelId = (flags & 0x00000078) >> 3; cache_bitmap_v3->bpp = CBR23_BPP[bitsPerPixelId]; if (Stream_GetRemainingLength(s) < 21) goto fail; Stream_Read_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ bitmapData = &cache_bitmap_v3->bitmapData; Stream_Read_UINT8(s, bitmapData->bpp); if ((bitmapData->bpp < 1) || (bitmapData->bpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bpp value %" PRIu32 "", bitmapData->bpp); goto fail; } Stream_Seek_UINT8(s); /* reserved1 (1 byte) */ Stream_Seek_UINT8(s); /* reserved2 (1 byte) */ Stream_Read_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Read_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Read_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Read_UINT32(s, new_len); /* length (4 bytes) */ if ((new_len == 0) || (Stream_GetRemainingLength(s) < new_len)) goto fail; new_data = (BYTE*)realloc(bitmapData->data, new_len); if (!new_data) goto fail; bitmapData->data = new_data; bitmapData->length = new_len; Stream_Read(s, bitmapData->data, bitmapData->length); return cache_bitmap_v3; fail: free_cache_bitmap_v3_order(update->context, cache_bitmap_v3); return NULL; } int update_approximate_cache_bitmap_v3_order(CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BITMAP_DATA_EX* bitmapData = &cache_bitmap_v3->bitmapData; return 64 + bitmapData->length; } BOOL update_write_cache_bitmap_v3_order(wStream* s, CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v3_order(cache_bitmap_v3, flags))) return FALSE; bitmapData = &cache_bitmap_v3->bitmapData; bitsPerPixelId = BPP_CBR23[cache_bitmap_v3->bpp]; *flags = (cache_bitmap_v3->cacheId & 0x00000003) | ((cache_bitmap_v3->flags << 7) & 0x0000FF80) | ((bitsPerPixelId << 3) & 0x00000078); Stream_Write_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ Stream_Write_UINT8(s, bitmapData->bpp); Stream_Write_UINT8(s, 0); /* reserved1 (1 byte) */ Stream_Write_UINT8(s, 0); /* reserved2 (1 byte) */ Stream_Write_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Write_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Write_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Write_UINT32(s, bitmapData->length); /* length (4 bytes) */ Stream_Write(s, bitmapData->data, bitmapData->length); return TRUE; } static CACHE_COLOR_TABLE_ORDER* update_read_cache_color_table_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; UINT32* colorTable; CACHE_COLOR_TABLE_ORDER* cache_color_table = calloc(1, sizeof(CACHE_COLOR_TABLE_ORDER)); if (!cache_color_table) goto fail; if (Stream_GetRemainingLength(s) < 3) goto fail; Stream_Read_UINT8(s, cache_color_table->cacheIndex); /* cacheIndex (1 byte) */ Stream_Read_UINT16(s, cache_color_table->numberColors); /* numberColors (2 bytes) */ if (cache_color_table->numberColors != 256) { /* This field MUST be set to 256 */ goto fail; } if (Stream_GetRemainingLength(s) < cache_color_table->numberColors * 4) goto fail; colorTable = (UINT32*)&cache_color_table->colorTable; for (i = 0; i < (int)cache_color_table->numberColors; i++) update_read_color_quad(s, &colorTable[i]); return cache_color_table; fail: free_cache_color_table_order(update->context, cache_color_table); return NULL; } int update_approximate_cache_color_table_order(const CACHE_COLOR_TABLE_ORDER* cache_color_table, UINT16* flags) { return 16 + (256 * 4); } BOOL update_write_cache_color_table_order(wStream* s, const CACHE_COLOR_TABLE_ORDER* cache_color_table, UINT16* flags) { int i, inf; UINT32* colorTable; if (cache_color_table->numberColors != 256) return FALSE; inf = update_approximate_cache_color_table_order(cache_color_table, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT8(s, cache_color_table->cacheIndex); /* cacheIndex (1 byte) */ Stream_Write_UINT16(s, cache_color_table->numberColors); /* numberColors (2 bytes) */ colorTable = (UINT32*)&cache_color_table->colorTable; for (i = 0; i < (int)cache_color_table->numberColors; i++) { update_write_color_quad(s, colorTable[i]); } return TRUE; } static CACHE_GLYPH_ORDER* update_read_cache_glyph_order(rdpUpdate* update, wStream* s, UINT16 flags) { UINT32 i; CACHE_GLYPH_ORDER* cache_glyph_order = calloc(1, sizeof(CACHE_GLYPH_ORDER)); if (!cache_glyph_order || !update || !s) goto fail; if (Stream_GetRemainingLength(s) < 2) goto fail; Stream_Read_UINT8(s, cache_glyph_order->cacheId); /* cacheId (1 byte) */ Stream_Read_UINT8(s, cache_glyph_order->cGlyphs); /* cGlyphs (1 byte) */ for (i = 0; i < cache_glyph_order->cGlyphs; i++) { GLYPH_DATA* glyph = &cache_glyph_order->glyphData[i]; if (Stream_GetRemainingLength(s) < 10) goto fail; Stream_Read_UINT16(s, glyph->cacheIndex); Stream_Read_INT16(s, glyph->x); Stream_Read_INT16(s, glyph->y); Stream_Read_UINT16(s, glyph->cx); Stream_Read_UINT16(s, glyph->cy); glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; if (Stream_GetRemainingLength(s) < glyph->cb) goto fail; glyph->aj = (BYTE*)malloc(glyph->cb); if (!glyph->aj) goto fail; Stream_Read(s, glyph->aj, glyph->cb); } if ((flags & CG_GLYPH_UNICODE_PRESENT) && (cache_glyph_order->cGlyphs > 0)) { cache_glyph_order->unicodeCharacters = calloc(cache_glyph_order->cGlyphs, sizeof(WCHAR)); if (!cache_glyph_order->unicodeCharacters) goto fail; if (Stream_GetRemainingLength(s) < sizeof(WCHAR) * cache_glyph_order->cGlyphs) goto fail; Stream_Read_UTF16_String(s, cache_glyph_order->unicodeCharacters, cache_glyph_order->cGlyphs); } return cache_glyph_order; fail: free_cache_glyph_order(update->context, cache_glyph_order); return NULL; } int update_approximate_cache_glyph_order(const CACHE_GLYPH_ORDER* cache_glyph, UINT16* flags) { return 2 + cache_glyph->cGlyphs * 32; } BOOL update_write_cache_glyph_order(wStream* s, const CACHE_GLYPH_ORDER* cache_glyph, UINT16* flags) { int i, inf; INT16 lsi16; const GLYPH_DATA* glyph; inf = update_approximate_cache_glyph_order(cache_glyph, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT8(s, cache_glyph->cacheId); /* cacheId (1 byte) */ Stream_Write_UINT8(s, cache_glyph->cGlyphs); /* cGlyphs (1 byte) */ for (i = 0; i < (int)cache_glyph->cGlyphs; i++) { UINT32 cb; glyph = &cache_glyph->glyphData[i]; Stream_Write_UINT16(s, glyph->cacheIndex); /* cacheIndex (2 bytes) */ lsi16 = glyph->x; Stream_Write_UINT16(s, lsi16); /* x (2 bytes) */ lsi16 = glyph->y; Stream_Write_UINT16(s, lsi16); /* y (2 bytes) */ Stream_Write_UINT16(s, glyph->cx); /* cx (2 bytes) */ Stream_Write_UINT16(s, glyph->cy); /* cy (2 bytes) */ cb = ((glyph->cx + 7) / 8) * glyph->cy; cb += ((cb % 4) > 0) ? 4 - (cb % 4) : 0; Stream_Write(s, glyph->aj, cb); } if (*flags & CG_GLYPH_UNICODE_PRESENT) { Stream_Zero(s, cache_glyph->cGlyphs * 2); } return TRUE; } static CACHE_GLYPH_V2_ORDER* update_read_cache_glyph_v2_order(rdpUpdate* update, wStream* s, UINT16 flags) { UINT32 i; CACHE_GLYPH_V2_ORDER* cache_glyph_v2 = calloc(1, sizeof(CACHE_GLYPH_V2_ORDER)); if (!cache_glyph_v2) goto fail; cache_glyph_v2->cacheId = (flags & 0x000F); cache_glyph_v2->flags = (flags & 0x00F0) >> 4; cache_glyph_v2->cGlyphs = (flags & 0xFF00) >> 8; for (i = 0; i < cache_glyph_v2->cGlyphs; i++) { GLYPH_DATA_V2* glyph = &cache_glyph_v2->glyphData[i]; if (Stream_GetRemainingLength(s) < 1) goto fail; Stream_Read_UINT8(s, glyph->cacheIndex); if (!update_read_2byte_signed(s, &glyph->x) || !update_read_2byte_signed(s, &glyph->y) || !update_read_2byte_unsigned(s, &glyph->cx) || !update_read_2byte_unsigned(s, &glyph->cy)) { goto fail; } glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; if (Stream_GetRemainingLength(s) < glyph->cb) goto fail; glyph->aj = (BYTE*)malloc(glyph->cb); if (!glyph->aj) goto fail; Stream_Read(s, glyph->aj, glyph->cb); } if ((flags & CG_GLYPH_UNICODE_PRESENT) && (cache_glyph_v2->cGlyphs > 0)) { cache_glyph_v2->unicodeCharacters = calloc(cache_glyph_v2->cGlyphs, sizeof(WCHAR)); if (!cache_glyph_v2->unicodeCharacters) goto fail; if (Stream_GetRemainingLength(s) < sizeof(WCHAR) * cache_glyph_v2->cGlyphs) goto fail; Stream_Read_UTF16_String(s, cache_glyph_v2->unicodeCharacters, cache_glyph_v2->cGlyphs); } return cache_glyph_v2; fail: free_cache_glyph_v2_order(update->context, cache_glyph_v2); return NULL; } int update_approximate_cache_glyph_v2_order(const CACHE_GLYPH_V2_ORDER* cache_glyph_v2, UINT16* flags) { return 8 + cache_glyph_v2->cGlyphs * 32; } BOOL update_write_cache_glyph_v2_order(wStream* s, const CACHE_GLYPH_V2_ORDER* cache_glyph_v2, UINT16* flags) { UINT32 i, inf; inf = update_approximate_cache_glyph_v2_order(cache_glyph_v2, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; *flags = (cache_glyph_v2->cacheId & 0x000F) | ((cache_glyph_v2->flags & 0x000F) << 4) | ((cache_glyph_v2->cGlyphs & 0x00FF) << 8); for (i = 0; i < cache_glyph_v2->cGlyphs; i++) { UINT32 cb; const GLYPH_DATA_V2* glyph = &cache_glyph_v2->glyphData[i]; Stream_Write_UINT8(s, glyph->cacheIndex); if (!update_write_2byte_signed(s, glyph->x) || !update_write_2byte_signed(s, glyph->y) || !update_write_2byte_unsigned(s, glyph->cx) || !update_write_2byte_unsigned(s, glyph->cy)) { return FALSE; } cb = ((glyph->cx + 7) / 8) * glyph->cy; cb += ((cb % 4) > 0) ? 4 - (cb % 4) : 0; Stream_Write(s, glyph->aj, cb); } if (*flags & CG_GLYPH_UNICODE_PRESENT) { Stream_Zero(s, cache_glyph_v2->cGlyphs * 2); } return TRUE; } static BOOL update_decompress_brush(wStream* s, BYTE* output, size_t outSize, BYTE bpp) { INT32 x, y, k; BYTE byte = 0; const BYTE* palette = Stream_Pointer(s) + 16; const INT32 bytesPerPixel = ((bpp + 1) / 8); if (!Stream_SafeSeek(s, 16ULL + 7ULL * bytesPerPixel)) // 64 / 4 return FALSE; for (y = 7; y >= 0; y--) { for (x = 0; x < 8; x++) { UINT32 index; if ((x % 4) == 0) Stream_Read_UINT8(s, byte); index = ((byte >> ((3 - (x % 4)) * 2)) & 0x03); for (k = 0; k < bytesPerPixel; k++) { const size_t dstIndex = ((y * 8 + x) * bytesPerPixel) + k; const size_t srcIndex = (index * bytesPerPixel) + k; if (dstIndex >= outSize) return FALSE; output[dstIndex] = palette[srcIndex]; } } } return TRUE; } static BOOL update_compress_brush(wStream* s, const BYTE* input, BYTE bpp) { return FALSE; } static CACHE_BRUSH_ORDER* update_read_cache_brush_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; BYTE iBitmapFormat; BOOL compressed = FALSE; CACHE_BRUSH_ORDER* cache_brush = calloc(1, sizeof(CACHE_BRUSH_ORDER)); if (!cache_brush) goto fail; if (Stream_GetRemainingLength(s) < 6) goto fail; Stream_Read_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Read_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ if (iBitmapFormat >= ARRAYSIZE(BMF_BPP)) goto fail; cache_brush->bpp = BMF_BPP[iBitmapFormat]; Stream_Read_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Read_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Read_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Read_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_Print(update->log, WLOG_ERROR, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); goto fail; } /* rows are encoded in reverse order */ if (Stream_GetRemainingLength(s) < 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_decompress_brush(s, cache_brush->data, sizeof(cache_brush->data), cache_brush->bpp)) goto fail; } else { /* uncompressed brush */ UINT32 scanline = (cache_brush->bpp / 8) * 8; if (Stream_GetRemainingLength(s) < scanline * 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read(s, &cache_brush->data[i * scanline], scanline); } } } } return cache_brush; fail: free_cache_brush_order(update->context, cache_brush); return NULL; } int update_approximate_cache_brush_order(const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { return 64; } BOOL update_write_cache_brush_order(wStream* s, const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { int i; BYTE iBitmapFormat; BOOL compressed = FALSE; if (!Stream_EnsureRemainingCapacity(s, update_approximate_cache_brush_order(cache_brush, flags))) return FALSE; iBitmapFormat = BPP_BMF[cache_brush->bpp]; Stream_Write_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Write_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ Stream_Write_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Write_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Write_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Write_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_ERR(TAG, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); return FALSE; } for (i = 7; i >= 0; i--) { Stream_Write_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_compress_brush(s, cache_brush->data, cache_brush->bpp)) return FALSE; } else { /* uncompressed brush */ int scanline = (cache_brush->bpp / 8) * 8; for (i = 7; i >= 0; i--) { Stream_Write(s, &cache_brush->data[i * scanline], scanline); } } } } return TRUE; } /* Alternate Secondary Drawing Orders */ static BOOL update_read_create_offscreen_bitmap_order(wStream* s, CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { UINT16 flags; BOOL deleteListPresent; OFFSCREEN_DELETE_LIST* deleteList; if (Stream_GetRemainingLength(s) < 6) return FALSE; Stream_Read_UINT16(s, flags); /* flags (2 bytes) */ create_offscreen_bitmap->id = flags & 0x7FFF; deleteListPresent = (flags & 0x8000) ? TRUE : FALSE; Stream_Read_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */ Stream_Read_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */ deleteList = &(create_offscreen_bitmap->deleteList); if (deleteListPresent) { UINT32 i; if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, deleteList->cIndices); if (deleteList->cIndices > deleteList->sIndices) { UINT16* new_indices; new_indices = (UINT16*)realloc(deleteList->indices, deleteList->cIndices * 2); if (!new_indices) return FALSE; deleteList->sIndices = deleteList->cIndices; deleteList->indices = new_indices; } if (Stream_GetRemainingLength(s) < 2 * deleteList->cIndices) return FALSE; for (i = 0; i < deleteList->cIndices; i++) { Stream_Read_UINT16(s, deleteList->indices[i]); } } else { deleteList->cIndices = 0; } return TRUE; } int update_approximate_create_offscreen_bitmap_order( const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { const OFFSCREEN_DELETE_LIST* deleteList = &(create_offscreen_bitmap->deleteList); return 32 + deleteList->cIndices * 2; } BOOL update_write_create_offscreen_bitmap_order( wStream* s, const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { UINT16 flags; BOOL deleteListPresent; const OFFSCREEN_DELETE_LIST* deleteList; if (!Stream_EnsureRemainingCapacity( s, update_approximate_create_offscreen_bitmap_order(create_offscreen_bitmap))) return FALSE; deleteList = &(create_offscreen_bitmap->deleteList); flags = create_offscreen_bitmap->id & 0x7FFF; deleteListPresent = (deleteList->cIndices > 0) ? TRUE : FALSE; if (deleteListPresent) flags |= 0x8000; Stream_Write_UINT16(s, flags); /* flags (2 bytes) */ Stream_Write_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */ Stream_Write_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */ if (deleteListPresent) { int i; Stream_Write_UINT16(s, deleteList->cIndices); for (i = 0; i < (int)deleteList->cIndices; i++) { Stream_Write_UINT16(s, deleteList->indices[i]); } } return TRUE; } static BOOL update_read_switch_surface_order(wStream* s, SWITCH_SURFACE_ORDER* switch_surface) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, switch_surface->bitmapId); /* bitmapId (2 bytes) */ return TRUE; } int update_approximate_switch_surface_order(const SWITCH_SURFACE_ORDER* switch_surface) { return 2; } BOOL update_write_switch_surface_order(wStream* s, const SWITCH_SURFACE_ORDER* switch_surface) { int inf = update_approximate_switch_surface_order(switch_surface); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT16(s, switch_surface->bitmapId); /* bitmapId (2 bytes) */ return TRUE; } static BOOL update_read_create_nine_grid_bitmap_order(wStream* s, CREATE_NINE_GRID_BITMAP_ORDER* create_nine_grid_bitmap) { NINE_GRID_BITMAP_INFO* nineGridInfo; if (Stream_GetRemainingLength(s) < 19) return FALSE; Stream_Read_UINT8(s, create_nine_grid_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ if ((create_nine_grid_bitmap->bitmapBpp < 1) || (create_nine_grid_bitmap->bitmapBpp > 32)) { WLog_ERR(TAG, "invalid bpp value %" PRIu32 "", create_nine_grid_bitmap->bitmapBpp); return FALSE; } Stream_Read_UINT16(s, create_nine_grid_bitmap->bitmapId); /* bitmapId (2 bytes) */ nineGridInfo = &(create_nine_grid_bitmap->nineGridInfo); Stream_Read_UINT32(s, nineGridInfo->flFlags); /* flFlags (4 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulLeftWidth); /* ulLeftWidth (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulRightWidth); /* ulRightWidth (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulTopHeight); /* ulTopHeight (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulBottomHeight); /* ulBottomHeight (2 bytes) */ update_read_colorref(s, &nineGridInfo->crTransparent); /* crTransparent (4 bytes) */ return TRUE; } static BOOL update_read_frame_marker_order(wStream* s, FRAME_MARKER_ORDER* frame_marker) { if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT32(s, frame_marker->action); /* action (4 bytes) */ return TRUE; } static BOOL update_read_stream_bitmap_first_order(wStream* s, STREAM_BITMAP_FIRST_ORDER* stream_bitmap_first) { if (Stream_GetRemainingLength(s) < 10) // 8 + 2 at least return FALSE; Stream_Read_UINT8(s, stream_bitmap_first->bitmapFlags); /* bitmapFlags (1 byte) */ Stream_Read_UINT8(s, stream_bitmap_first->bitmapBpp); /* bitmapBpp (1 byte) */ if ((stream_bitmap_first->bitmapBpp < 1) || (stream_bitmap_first->bitmapBpp > 32)) { WLog_ERR(TAG, "invalid bpp value %" PRIu32 "", stream_bitmap_first->bitmapBpp); return FALSE; } Stream_Read_UINT16(s, stream_bitmap_first->bitmapType); /* bitmapType (2 bytes) */ Stream_Read_UINT16(s, stream_bitmap_first->bitmapWidth); /* bitmapWidth (2 bytes) */ Stream_Read_UINT16(s, stream_bitmap_first->bitmapHeight); /* bitmapHeigth (2 bytes) */ if (stream_bitmap_first->bitmapFlags & STREAM_BITMAP_V2) { if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT32(s, stream_bitmap_first->bitmapSize); /* bitmapSize (4 bytes) */ } else { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, stream_bitmap_first->bitmapSize); /* bitmapSize (2 bytes) */ } FIELD_SKIP_BUFFER16( s, stream_bitmap_first->bitmapBlockSize); /* bitmapBlockSize(2 bytes) + bitmapBlock */ return TRUE; } static BOOL update_read_stream_bitmap_next_order(wStream* s, STREAM_BITMAP_NEXT_ORDER* stream_bitmap_next) { if (Stream_GetRemainingLength(s) < 5) return FALSE; Stream_Read_UINT8(s, stream_bitmap_next->bitmapFlags); /* bitmapFlags (1 byte) */ Stream_Read_UINT16(s, stream_bitmap_next->bitmapType); /* bitmapType (2 bytes) */ FIELD_SKIP_BUFFER16( s, stream_bitmap_next->bitmapBlockSize); /* bitmapBlockSize(2 bytes) + bitmapBlock */ return TRUE; } static BOOL update_read_draw_gdiplus_first_order(wStream* s, DRAW_GDIPLUS_FIRST_ORDER* draw_gdiplus_first) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_first->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_first->cbTotalSize); /* cbTotalSize (4 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_first->cbTotalEmfSize); /* cbTotalEmfSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_first->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_next_order(wStream* s, DRAW_GDIPLUS_NEXT_ORDER* draw_gdiplus_next) { if (Stream_GetRemainingLength(s) < 3) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ FIELD_SKIP_BUFFER16(s, draw_gdiplus_next->cbSize); /* cbSize(2 bytes) + emfRecords */ return TRUE; } static BOOL update_read_draw_gdiplus_end_order(wStream* s, DRAW_GDIPLUS_END_ORDER* draw_gdiplus_end) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_end->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_end->cbTotalSize); /* cbTotalSize (4 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_end->cbTotalEmfSize); /* cbTotalEmfSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_end->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_cache_first_order(wStream* s, DRAW_GDIPLUS_CACHE_FIRST_ORDER* draw_gdiplus_cache_first) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_first->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_cache_first->cbTotalSize); /* cbTotalSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_cache_first->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_cache_next_order(wStream* s, DRAW_GDIPLUS_CACHE_NEXT_ORDER* draw_gdiplus_cache_next) { if (Stream_GetRemainingLength(s) < 7) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_next->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_next->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_next->cacheIndex); /* cacheIndex (2 bytes) */ FIELD_SKIP_BUFFER16(s, draw_gdiplus_cache_next->cbSize); /* cbSize(2 bytes) + emfRecords */ return TRUE; } static BOOL update_read_draw_gdiplus_cache_end_order(wStream* s, DRAW_GDIPLUS_CACHE_END_ORDER* draw_gdiplus_cache_end) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_end->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_cache_end->cbTotalSize); /* cbTotalSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_cache_end->cbSize); /* emfRecords */ } static BOOL update_read_field_flags(wStream* s, UINT32* fieldFlags, BYTE flags, BYTE fieldBytes) { int i; BYTE byte; if (flags & ORDER_ZERO_FIELD_BYTE_BIT0) fieldBytes--; if (flags & ORDER_ZERO_FIELD_BYTE_BIT1) { if (fieldBytes > 1) fieldBytes -= 2; else fieldBytes = 0; } if (Stream_GetRemainingLength(s) < fieldBytes) return FALSE; *fieldFlags = 0; for (i = 0; i < fieldBytes; i++) { Stream_Read_UINT8(s, byte); *fieldFlags |= byte << (i * 8); } return TRUE; } BOOL update_write_field_flags(wStream* s, UINT32 fieldFlags, BYTE flags, BYTE fieldBytes) { BYTE byte; if (fieldBytes == 1) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); } else if (fieldBytes == 2) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 8) & 0xFF; Stream_Write_UINT8(s, byte); } else if (fieldBytes == 3) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 16) & 0xFF; Stream_Write_UINT8(s, byte); } else { return FALSE; } return TRUE; } static BOOL update_read_bounds(wStream* s, rdpBounds* bounds) { BYTE flags; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, flags); /* field flags */ if (flags & BOUND_LEFT) { if (!update_read_coord(s, &bounds->left, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_LEFT) { if (!update_read_coord(s, &bounds->left, TRUE)) return FALSE; } if (flags & BOUND_TOP) { if (!update_read_coord(s, &bounds->top, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_TOP) { if (!update_read_coord(s, &bounds->top, TRUE)) return FALSE; } if (flags & BOUND_RIGHT) { if (!update_read_coord(s, &bounds->right, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_RIGHT) { if (!update_read_coord(s, &bounds->right, TRUE)) return FALSE; } if (flags & BOUND_BOTTOM) { if (!update_read_coord(s, &bounds->bottom, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_BOTTOM) { if (!update_read_coord(s, &bounds->bottom, TRUE)) return FALSE; } return TRUE; } BOOL update_write_bounds(wStream* s, ORDER_INFO* orderInfo) { if (!(orderInfo->controlFlags & ORDER_BOUNDS)) return TRUE; if (orderInfo->controlFlags & ORDER_ZERO_BOUNDS_DELTAS) return TRUE; Stream_Write_UINT8(s, orderInfo->boundsFlags); /* field flags */ if (orderInfo->boundsFlags & BOUND_LEFT) { if (!update_write_coord(s, orderInfo->bounds.left)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_LEFT) { } if (orderInfo->boundsFlags & BOUND_TOP) { if (!update_write_coord(s, orderInfo->bounds.top)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_TOP) { } if (orderInfo->boundsFlags & BOUND_RIGHT) { if (!update_write_coord(s, orderInfo->bounds.right)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_RIGHT) { } if (orderInfo->boundsFlags & BOUND_BOTTOM) { if (!update_write_coord(s, orderInfo->bounds.bottom)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_BOTTOM) { } return TRUE; } static BOOL read_primary_order(wLog* log, const char* orderName, wStream* s, const ORDER_INFO* orderInfo, rdpPrimaryUpdate* primary) { BOOL rc = FALSE; if (!s || !orderInfo || !primary || !orderName) return FALSE; switch (orderInfo->orderType) { case ORDER_TYPE_DSTBLT: rc = update_read_dstblt_order(s, orderInfo, &(primary->dstblt)); break; case ORDER_TYPE_PATBLT: rc = update_read_patblt_order(s, orderInfo, &(primary->patblt)); break; case ORDER_TYPE_SCRBLT: rc = update_read_scrblt_order(s, orderInfo, &(primary->scrblt)); break; case ORDER_TYPE_OPAQUE_RECT: rc = update_read_opaque_rect_order(s, orderInfo, &(primary->opaque_rect)); break; case ORDER_TYPE_DRAW_NINE_GRID: rc = update_read_draw_nine_grid_order(s, orderInfo, &(primary->draw_nine_grid)); break; case ORDER_TYPE_MULTI_DSTBLT: rc = update_read_multi_dstblt_order(s, orderInfo, &(primary->multi_dstblt)); break; case ORDER_TYPE_MULTI_PATBLT: rc = update_read_multi_patblt_order(s, orderInfo, &(primary->multi_patblt)); break; case ORDER_TYPE_MULTI_SCRBLT: rc = update_read_multi_scrblt_order(s, orderInfo, &(primary->multi_scrblt)); break; case ORDER_TYPE_MULTI_OPAQUE_RECT: rc = update_read_multi_opaque_rect_order(s, orderInfo, &(primary->multi_opaque_rect)); break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: rc = update_read_multi_draw_nine_grid_order(s, orderInfo, &(primary->multi_draw_nine_grid)); break; case ORDER_TYPE_LINE_TO: rc = update_read_line_to_order(s, orderInfo, &(primary->line_to)); break; case ORDER_TYPE_POLYLINE: rc = update_read_polyline_order(s, orderInfo, &(primary->polyline)); break; case ORDER_TYPE_MEMBLT: rc = update_read_memblt_order(s, orderInfo, &(primary->memblt)); break; case ORDER_TYPE_MEM3BLT: rc = update_read_mem3blt_order(s, orderInfo, &(primary->mem3blt)); break; case ORDER_TYPE_SAVE_BITMAP: rc = update_read_save_bitmap_order(s, orderInfo, &(primary->save_bitmap)); break; case ORDER_TYPE_GLYPH_INDEX: rc = update_read_glyph_index_order(s, orderInfo, &(primary->glyph_index)); break; case ORDER_TYPE_FAST_INDEX: rc = update_read_fast_index_order(s, orderInfo, &(primary->fast_index)); break; case ORDER_TYPE_FAST_GLYPH: rc = update_read_fast_glyph_order(s, orderInfo, &(primary->fast_glyph)); break; case ORDER_TYPE_POLYGON_SC: rc = update_read_polygon_sc_order(s, orderInfo, &(primary->polygon_sc)); break; case ORDER_TYPE_POLYGON_CB: rc = update_read_polygon_cb_order(s, orderInfo, &(primary->polygon_cb)); break; case ORDER_TYPE_ELLIPSE_SC: rc = update_read_ellipse_sc_order(s, orderInfo, &(primary->ellipse_sc)); break; case ORDER_TYPE_ELLIPSE_CB: rc = update_read_ellipse_cb_order(s, orderInfo, &(primary->ellipse_cb)); break; default: WLog_Print(log, WLOG_WARN, "Primary Drawing Order %s not supported, ignoring", orderName); rc = TRUE; break; } if (!rc) { WLog_Print(log, WLOG_ERROR, "%s - update_read_dstblt_order() failed", orderName); return FALSE; } return TRUE; } static BOOL update_recv_primary_order(rdpUpdate* update, wStream* s, BYTE flags) { BYTE field; BOOL rc = FALSE; rdpContext* context = update->context; rdpPrimaryUpdate* primary = update->primary; ORDER_INFO* orderInfo = &(primary->order_info); rdpSettings* settings = context->settings; const char* orderName; if (flags & ORDER_TYPE_CHANGE) { if (Stream_GetRemainingLength(s) < 1) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, orderInfo->orderType); /* orderType (1 byte) */ } orderName = primary_order_string(orderInfo->orderType); if (!check_primary_order_supported(update->log, settings, orderInfo->orderType, orderName)) return FALSE; field = get_primary_drawing_order_field_bytes(orderInfo->orderType, &rc); if (!rc) return FALSE; if (!update_read_field_flags(s, &(orderInfo->fieldFlags), flags, field)) { WLog_Print(update->log, WLOG_ERROR, "update_read_field_flags() failed"); return FALSE; } if (flags & ORDER_BOUNDS) { if (!(flags & ORDER_ZERO_BOUNDS_DELTAS)) { if (!update_read_bounds(s, &orderInfo->bounds)) { WLog_Print(update->log, WLOG_ERROR, "update_read_bounds() failed"); return FALSE; } } rc = IFCALLRESULT(FALSE, update->SetBounds, context, &orderInfo->bounds); if (!rc) return FALSE; } orderInfo->deltaCoordinates = (flags & ORDER_DELTA_COORDINATES) ? TRUE : FALSE; if (!read_primary_order(update->log, orderName, s, orderInfo, primary)) return FALSE; switch (orderInfo->orderType) { case ORDER_TYPE_DSTBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->dstblt.bRop), gdi_rop3_code(primary->dstblt.bRop)); rc = IFCALLRESULT(FALSE, primary->DstBlt, context, &primary->dstblt); } break; case ORDER_TYPE_PATBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->patblt.bRop), gdi_rop3_code(primary->patblt.bRop)); rc = IFCALLRESULT(FALSE, primary->PatBlt, context, &primary->patblt); } break; case ORDER_TYPE_SCRBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->scrblt.bRop), gdi_rop3_code(primary->scrblt.bRop)); rc = IFCALLRESULT(FALSE, primary->ScrBlt, context, &primary->scrblt); } break; case ORDER_TYPE_OPAQUE_RECT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->OpaqueRect, context, &primary->opaque_rect); } break; case ORDER_TYPE_DRAW_NINE_GRID: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->DrawNineGrid, context, &primary->draw_nine_grid); } break; case ORDER_TYPE_MULTI_DSTBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_dstblt.bRop), gdi_rop3_code(primary->multi_dstblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiDstBlt, context, &primary->multi_dstblt); } break; case ORDER_TYPE_MULTI_PATBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_patblt.bRop), gdi_rop3_code(primary->multi_patblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiPatBlt, context, &primary->multi_patblt); } break; case ORDER_TYPE_MULTI_SCRBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_scrblt.bRop), gdi_rop3_code(primary->multi_scrblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiScrBlt, context, &primary->multi_scrblt); } break; case ORDER_TYPE_MULTI_OPAQUE_RECT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->MultiOpaqueRect, context, &primary->multi_opaque_rect); } break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->MultiDrawNineGrid, context, &primary->multi_draw_nine_grid); } break; case ORDER_TYPE_LINE_TO: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->LineTo, context, &primary->line_to); } break; case ORDER_TYPE_POLYLINE: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->Polyline, context, &primary->polyline); } break; case ORDER_TYPE_MEMBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->memblt.bRop), gdi_rop3_code(primary->memblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MemBlt, context, &primary->memblt); } break; case ORDER_TYPE_MEM3BLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->mem3blt.bRop), gdi_rop3_code(primary->mem3blt.bRop)); rc = IFCALLRESULT(FALSE, primary->Mem3Blt, context, &primary->mem3blt); } break; case ORDER_TYPE_SAVE_BITMAP: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->SaveBitmap, context, &primary->save_bitmap); } break; case ORDER_TYPE_GLYPH_INDEX: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->GlyphIndex, context, &primary->glyph_index); } break; case ORDER_TYPE_FAST_INDEX: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->FastIndex, context, &primary->fast_index); } break; case ORDER_TYPE_FAST_GLYPH: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->FastGlyph, context, &primary->fast_glyph); } break; case ORDER_TYPE_POLYGON_SC: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->PolygonSC, context, &primary->polygon_sc); } break; case ORDER_TYPE_POLYGON_CB: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->PolygonCB, context, &primary->polygon_cb); } break; case ORDER_TYPE_ELLIPSE_SC: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->EllipseSC, context, &primary->ellipse_sc); } break; case ORDER_TYPE_ELLIPSE_CB: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->EllipseCB, context, &primary->ellipse_cb); } break; default: WLog_Print(update->log, WLOG_WARN, "Primary Drawing Order %s not supported", orderName); break; } if (!rc) { WLog_Print(update->log, WLOG_WARN, "Primary Drawing Order %s failed", orderName); return FALSE; } if (flags & ORDER_BOUNDS) { rc = IFCALLRESULT(FALSE, update->SetBounds, context, NULL); } return rc; } static BOOL update_recv_secondary_order(rdpUpdate* update, wStream* s, BYTE flags) { BOOL rc = FALSE; size_t start, end, diff; BYTE orderType; UINT16 extraFlags; UINT16 orderLength; rdpContext* context = update->context; rdpSettings* settings = context->settings; rdpSecondaryUpdate* secondary = update->secondary; const char* name; if (Stream_GetRemainingLength(s) < 5) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 5"); return FALSE; } Stream_Read_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Read_UINT16(s, extraFlags); /* extraFlags (2 bytes) */ Stream_Read_UINT8(s, orderType); /* orderType (1 byte) */ if (Stream_GetRemainingLength(s) < orderLength + 7U) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) %" PRIuz " < %" PRIu16, Stream_GetRemainingLength(s), orderLength + 7); return FALSE; } start = Stream_GetPosition(s); name = secondary_order_string(orderType); WLog_Print(update->log, WLOG_DEBUG, "Secondary Drawing Order %s", name); if (!check_secondary_order_supported(update->log, settings, orderType, name)) return FALSE; switch (orderType) { case ORDER_TYPE_BITMAP_UNCOMPRESSED: case ORDER_TYPE_CACHE_BITMAP_COMPRESSED: { const BOOL compressed = (orderType == ORDER_TYPE_CACHE_BITMAP_COMPRESSED); CACHE_BITMAP_ORDER* order = update_read_cache_bitmap_order(update, s, compressed, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmap, context, order); free_cache_bitmap_order(context, order); } } break; case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2: case ORDER_TYPE_BITMAP_COMPRESSED_V2: { const BOOL compressed = (orderType == ORDER_TYPE_BITMAP_COMPRESSED_V2); CACHE_BITMAP_V2_ORDER* order = update_read_cache_bitmap_v2_order(update, s, compressed, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV2, context, order); free_cache_bitmap_v2_order(context, order); } } break; case ORDER_TYPE_BITMAP_COMPRESSED_V3: { CACHE_BITMAP_V3_ORDER* order = update_read_cache_bitmap_v3_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV3, context, order); free_cache_bitmap_v3_order(context, order); } } break; case ORDER_TYPE_CACHE_COLOR_TABLE: { CACHE_COLOR_TABLE_ORDER* order = update_read_cache_color_table_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheColorTable, context, order); free_cache_color_table_order(context, order); } } break; case ORDER_TYPE_CACHE_GLYPH: { switch (settings->GlyphSupportLevel) { case GLYPH_SUPPORT_PARTIAL: case GLYPH_SUPPORT_FULL: { CACHE_GLYPH_ORDER* order = update_read_cache_glyph_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheGlyph, context, order); free_cache_glyph_order(context, order); } } break; case GLYPH_SUPPORT_ENCODE: { CACHE_GLYPH_V2_ORDER* order = update_read_cache_glyph_v2_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheGlyphV2, context, order); free_cache_glyph_v2_order(context, order); } } break; case GLYPH_SUPPORT_NONE: default: break; } } break; case ORDER_TYPE_CACHE_BRUSH: /* [MS-RDPEGDI] 2.2.2.2.1.2.7 Cache Brush (CACHE_BRUSH_ORDER) */ { CACHE_BRUSH_ORDER* order = update_read_cache_brush_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBrush, context, order); free_cache_brush_order(context, order); } } break; default: WLog_Print(update->log, WLOG_WARN, "SECONDARY ORDER %s not supported", name); break; } if (!rc) { WLog_Print(update->log, WLOG_ERROR, "SECONDARY ORDER %s failed", name); } start += orderLength + 7; end = Stream_GetPosition(s); if (start > end) { WLog_Print(update->log, WLOG_WARN, "SECONDARY_ORDER %s: read %" PRIuz "bytes too much", name, end - start); return FALSE; } diff = start - end; if (diff > 0) { WLog_Print(update->log, WLOG_DEBUG, "SECONDARY_ORDER %s: read %" PRIuz "bytes short, skipping", name, diff); Stream_Seek(s, diff); } return rc; } static BOOL read_altsec_order(wStream* s, BYTE orderType, rdpAltSecUpdate* altsec) { BOOL rc = FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: rc = update_read_create_offscreen_bitmap_order(s, &(altsec->create_offscreen_bitmap)); break; case ORDER_TYPE_SWITCH_SURFACE: rc = update_read_switch_surface_order(s, &(altsec->switch_surface)); break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: rc = update_read_create_nine_grid_bitmap_order(s, &(altsec->create_nine_grid_bitmap)); break; case ORDER_TYPE_FRAME_MARKER: rc = update_read_frame_marker_order(s, &(altsec->frame_marker)); break; case ORDER_TYPE_STREAM_BITMAP_FIRST: rc = update_read_stream_bitmap_first_order(s, &(altsec->stream_bitmap_first)); break; case ORDER_TYPE_STREAM_BITMAP_NEXT: rc = update_read_stream_bitmap_next_order(s, &(altsec->stream_bitmap_next)); break; case ORDER_TYPE_GDIPLUS_FIRST: rc = update_read_draw_gdiplus_first_order(s, &(altsec->draw_gdiplus_first)); break; case ORDER_TYPE_GDIPLUS_NEXT: rc = update_read_draw_gdiplus_next_order(s, &(altsec->draw_gdiplus_next)); break; case ORDER_TYPE_GDIPLUS_END: rc = update_read_draw_gdiplus_end_order(s, &(altsec->draw_gdiplus_end)); break; case ORDER_TYPE_GDIPLUS_CACHE_FIRST: rc = update_read_draw_gdiplus_cache_first_order(s, &(altsec->draw_gdiplus_cache_first)); break; case ORDER_TYPE_GDIPLUS_CACHE_NEXT: rc = update_read_draw_gdiplus_cache_next_order(s, &(altsec->draw_gdiplus_cache_next)); break; case ORDER_TYPE_GDIPLUS_CACHE_END: rc = update_read_draw_gdiplus_cache_end_order(s, &(altsec->draw_gdiplus_cache_end)); break; case ORDER_TYPE_WINDOW: /* This order is handled elsewhere. */ rc = TRUE; break; case ORDER_TYPE_COMPDESK_FIRST: rc = TRUE; break; default: break; } return rc; } static BOOL update_recv_altsec_order(rdpUpdate* update, wStream* s, BYTE flags) { BYTE orderType = flags >>= 2; /* orderType is in higher 6 bits of flags field */ BOOL rc = FALSE; rdpContext* context = update->context; rdpSettings* settings = context->settings; rdpAltSecUpdate* altsec = update->altsec; const char* orderName = altsec_order_string(orderType); WLog_Print(update->log, WLOG_DEBUG, "Alternate Secondary Drawing Order %s", orderName); if (!check_alt_order_supported(update->log, settings, orderType, orderName)) return FALSE; if (!read_altsec_order(s, orderType, altsec)) return FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: IFCALLRET(altsec->CreateOffscreenBitmap, rc, context, &(altsec->create_offscreen_bitmap)); break; case ORDER_TYPE_SWITCH_SURFACE: IFCALLRET(altsec->SwitchSurface, rc, context, &(altsec->switch_surface)); break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: IFCALLRET(altsec->CreateNineGridBitmap, rc, context, &(altsec->create_nine_grid_bitmap)); break; case ORDER_TYPE_FRAME_MARKER: IFCALLRET(altsec->FrameMarker, rc, context, &(altsec->frame_marker)); break; case ORDER_TYPE_STREAM_BITMAP_FIRST: IFCALLRET(altsec->StreamBitmapFirst, rc, context, &(altsec->stream_bitmap_first)); break; case ORDER_TYPE_STREAM_BITMAP_NEXT: IFCALLRET(altsec->StreamBitmapNext, rc, context, &(altsec->stream_bitmap_next)); break; case ORDER_TYPE_GDIPLUS_FIRST: IFCALLRET(altsec->DrawGdiPlusFirst, rc, context, &(altsec->draw_gdiplus_first)); break; case ORDER_TYPE_GDIPLUS_NEXT: IFCALLRET(altsec->DrawGdiPlusNext, rc, context, &(altsec->draw_gdiplus_next)); break; case ORDER_TYPE_GDIPLUS_END: IFCALLRET(altsec->DrawGdiPlusEnd, rc, context, &(altsec->draw_gdiplus_end)); break; case ORDER_TYPE_GDIPLUS_CACHE_FIRST: IFCALLRET(altsec->DrawGdiPlusCacheFirst, rc, context, &(altsec->draw_gdiplus_cache_first)); break; case ORDER_TYPE_GDIPLUS_CACHE_NEXT: IFCALLRET(altsec->DrawGdiPlusCacheNext, rc, context, &(altsec->draw_gdiplus_cache_next)); break; case ORDER_TYPE_GDIPLUS_CACHE_END: IFCALLRET(altsec->DrawGdiPlusCacheEnd, rc, context, &(altsec->draw_gdiplus_cache_end)); break; case ORDER_TYPE_WINDOW: rc = update_recv_altsec_window_order(update, s); break; case ORDER_TYPE_COMPDESK_FIRST: rc = TRUE; break; default: break; } if (!rc) { WLog_Print(update->log, WLOG_WARN, "Alternate Secondary Drawing Order %s failed", orderName); } return rc; } BOOL update_recv_order(rdpUpdate* update, wStream* s) { BOOL rc; BYTE controlFlags; if (Stream_GetRemainingLength(s) < 1) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, controlFlags); /* controlFlags (1 byte) */ if (!(controlFlags & ORDER_STANDARD)) rc = update_recv_altsec_order(update, s, controlFlags); else if (controlFlags & ORDER_SECONDARY) rc = update_recv_secondary_order(update, s, controlFlags); else rc = update_recv_primary_order(update, s, controlFlags); if (!rc) WLog_Print(update->log, WLOG_ERROR, "order flags %02" PRIx8 " failed", controlFlags); return rc; }
/** * FreeRDP: A Remote Desktop Protocol Implementation * Drawing Orders * * Copyright 2011 Marc-Andre Moreau <marcandre.moreau@gmail.com> * Copyright 2016 Armin Novak <armin.novak@thincast.com> * Copyright 2016 Thincast Technologies GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "window.h" #include <winpr/wtypes.h> #include <winpr/crt.h> #include <freerdp/api.h> #include <freerdp/log.h> #include <freerdp/graphics.h> #include <freerdp/codec/bitmap.h> #include <freerdp/gdi/gdi.h> #include "orders.h" #include "../cache/glyph.h" #include "../cache/bitmap.h" #include "../cache/brush.h" #include "../cache/cache.h" #define TAG FREERDP_TAG("core.orders") BYTE get_primary_drawing_order_field_bytes(UINT32 orderType, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (orderType) { case 0: return DSTBLT_ORDER_FIELD_BYTES; case 1: return PATBLT_ORDER_FIELD_BYTES; case 2: return SCRBLT_ORDER_FIELD_BYTES; case 3: return 0; case 4: return 0; case 5: return 0; case 6: return 0; case 7: return DRAW_NINE_GRID_ORDER_FIELD_BYTES; case 8: return MULTI_DRAW_NINE_GRID_ORDER_FIELD_BYTES; case 9: return LINE_TO_ORDER_FIELD_BYTES; case 10: return OPAQUE_RECT_ORDER_FIELD_BYTES; case 11: return SAVE_BITMAP_ORDER_FIELD_BYTES; case 12: return 0; case 13: return MEMBLT_ORDER_FIELD_BYTES; case 14: return MEM3BLT_ORDER_FIELD_BYTES; case 15: return MULTI_DSTBLT_ORDER_FIELD_BYTES; case 16: return MULTI_PATBLT_ORDER_FIELD_BYTES; case 17: return MULTI_SCRBLT_ORDER_FIELD_BYTES; case 18: return MULTI_OPAQUE_RECT_ORDER_FIELD_BYTES; case 19: return FAST_INDEX_ORDER_FIELD_BYTES; case 20: return POLYGON_SC_ORDER_FIELD_BYTES; case 21: return POLYGON_CB_ORDER_FIELD_BYTES; case 22: return POLYLINE_ORDER_FIELD_BYTES; case 23: return 0; case 24: return FAST_GLYPH_ORDER_FIELD_BYTES; case 25: return ELLIPSE_SC_ORDER_FIELD_BYTES; case 26: return ELLIPSE_CB_ORDER_FIELD_BYTES; case 27: return GLYPH_INDEX_ORDER_FIELD_BYTES; default: if (pValid) *pValid = FALSE; WLog_WARN(TAG, "Invalid orderType 0x%08X received", orderType); return 0; } } static BYTE get_cbr2_bpp(UINT32 bpp, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (bpp) { case 3: return 8; case 4: return 16; case 5: return 24; case 6: return 32; default: WLog_WARN(TAG, "Invalid bpp %" PRIu32, bpp); if (pValid) *pValid = FALSE; return 0; } } static BYTE get_bmf_bpp(UINT32 bmf, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (bmf) { case 1: return 1; case 3: return 8; case 4: return 16; case 5: return 24; case 6: return 32; default: WLog_WARN(TAG, "Invalid bmf %" PRIu32, bmf); if (pValid) *pValid = FALSE; return 0; } } static BYTE get_bpp_bmf(UINT32 bpp, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (bpp) { case 1: return 1; case 8: return 3; case 16: return 4; case 24: return 5; case 32: return 6; default: WLog_WARN(TAG, "Invalid color depth %" PRIu32, bpp); if (pValid) *pValid = FALSE; return 0; } } static BOOL check_order_activated(wLog* log, rdpSettings* settings, const char* orderName, BOOL condition) { if (!condition) { if (settings->AllowUnanouncedOrdersFromServer) { WLog_Print(log, WLOG_WARN, "%s - SERVER BUG: The support for this feature was not announced!", orderName); return TRUE; } else { WLog_Print(log, WLOG_ERROR, "%s - SERVER BUG: The support for this feature was not announced! Use " "/relax-order-checks to ignore", orderName); return FALSE; } } return TRUE; } static BOOL check_alt_order_supported(wLog* log, rdpSettings* settings, BYTE orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: case ORDER_TYPE_SWITCH_SURFACE: condition = settings->OffscreenSupportLevel != 0; break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: condition = settings->DrawNineGridEnabled; break; case ORDER_TYPE_FRAME_MARKER: condition = settings->FrameMarkerCommandEnabled; break; case ORDER_TYPE_GDIPLUS_FIRST: case ORDER_TYPE_GDIPLUS_NEXT: case ORDER_TYPE_GDIPLUS_END: case ORDER_TYPE_GDIPLUS_CACHE_FIRST: case ORDER_TYPE_GDIPLUS_CACHE_NEXT: case ORDER_TYPE_GDIPLUS_CACHE_END: condition = settings->DrawGdiPlusCacheEnabled; break; case ORDER_TYPE_WINDOW: condition = settings->RemoteWndSupportLevel != WINDOW_LEVEL_NOT_SUPPORTED; break; case ORDER_TYPE_STREAM_BITMAP_FIRST: case ORDER_TYPE_STREAM_BITMAP_NEXT: case ORDER_TYPE_COMPDESK_FIRST: condition = TRUE; break; default: WLog_Print(log, WLOG_WARN, "%s - Alternate Secondary Drawing Order UNKNOWN", orderName); condition = FALSE; break; } return check_order_activated(log, settings, orderName, condition); } static BOOL check_secondary_order_supported(wLog* log, rdpSettings* settings, BYTE orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_BITMAP_UNCOMPRESSED: case ORDER_TYPE_CACHE_BITMAP_COMPRESSED: condition = settings->BitmapCacheEnabled; break; case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2: case ORDER_TYPE_BITMAP_COMPRESSED_V2: condition = settings->BitmapCacheEnabled; break; case ORDER_TYPE_BITMAP_COMPRESSED_V3: condition = settings->BitmapCacheV3Enabled; break; case ORDER_TYPE_CACHE_COLOR_TABLE: condition = (settings->OrderSupport[NEG_MEMBLT_INDEX] || settings->OrderSupport[NEG_MEM3BLT_INDEX]); break; case ORDER_TYPE_CACHE_GLYPH: { switch (settings->GlyphSupportLevel) { case GLYPH_SUPPORT_PARTIAL: case GLYPH_SUPPORT_FULL: case GLYPH_SUPPORT_ENCODE: condition = TRUE; break; case GLYPH_SUPPORT_NONE: default: condition = FALSE; break; } } break; case ORDER_TYPE_CACHE_BRUSH: condition = TRUE; break; default: WLog_Print(log, WLOG_WARN, "SECONDARY ORDER %s not supported", orderName); break; } return check_order_activated(log, settings, orderName, condition); } static BOOL check_primary_order_supported(wLog* log, rdpSettings* settings, UINT32 orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_DSTBLT: condition = settings->OrderSupport[NEG_DSTBLT_INDEX]; break; case ORDER_TYPE_SCRBLT: condition = settings->OrderSupport[NEG_SCRBLT_INDEX]; break; case ORDER_TYPE_DRAW_NINE_GRID: condition = settings->OrderSupport[NEG_DRAWNINEGRID_INDEX]; break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: condition = settings->OrderSupport[NEG_MULTI_DRAWNINEGRID_INDEX]; break; case ORDER_TYPE_LINE_TO: condition = settings->OrderSupport[NEG_LINETO_INDEX]; break; /* [MS-RDPEGDI] 2.2.2.2.1.1.2.5 OpaqueRect (OPAQUERECT_ORDER) * suggests that PatBlt and OpaqueRect imply each other. */ case ORDER_TYPE_PATBLT: case ORDER_TYPE_OPAQUE_RECT: condition = settings->OrderSupport[NEG_OPAQUE_RECT_INDEX] || settings->OrderSupport[NEG_PATBLT_INDEX]; break; case ORDER_TYPE_SAVE_BITMAP: condition = settings->OrderSupport[NEG_SAVEBITMAP_INDEX]; break; case ORDER_TYPE_MEMBLT: condition = settings->OrderSupport[NEG_MEMBLT_INDEX]; break; case ORDER_TYPE_MEM3BLT: condition = settings->OrderSupport[NEG_MEM3BLT_INDEX]; break; case ORDER_TYPE_MULTI_DSTBLT: condition = settings->OrderSupport[NEG_MULTIDSTBLT_INDEX]; break; case ORDER_TYPE_MULTI_PATBLT: condition = settings->OrderSupport[NEG_MULTIPATBLT_INDEX]; break; case ORDER_TYPE_MULTI_SCRBLT: condition = settings->OrderSupport[NEG_MULTIDSTBLT_INDEX]; break; case ORDER_TYPE_MULTI_OPAQUE_RECT: condition = settings->OrderSupport[NEG_MULTIOPAQUERECT_INDEX]; break; case ORDER_TYPE_FAST_INDEX: condition = settings->OrderSupport[NEG_FAST_INDEX_INDEX]; break; case ORDER_TYPE_POLYGON_SC: condition = settings->OrderSupport[NEG_POLYGON_SC_INDEX]; break; case ORDER_TYPE_POLYGON_CB: condition = settings->OrderSupport[NEG_POLYGON_CB_INDEX]; break; case ORDER_TYPE_POLYLINE: condition = settings->OrderSupport[NEG_POLYLINE_INDEX]; break; case ORDER_TYPE_FAST_GLYPH: condition = settings->OrderSupport[NEG_FAST_GLYPH_INDEX]; break; case ORDER_TYPE_ELLIPSE_SC: condition = settings->OrderSupport[NEG_ELLIPSE_SC_INDEX]; break; case ORDER_TYPE_ELLIPSE_CB: condition = settings->OrderSupport[NEG_ELLIPSE_CB_INDEX]; break; case ORDER_TYPE_GLYPH_INDEX: condition = settings->OrderSupport[NEG_GLYPH_INDEX_INDEX]; break; default: WLog_Print(log, WLOG_WARN, "%s Primary Drawing Order not supported", orderName); break; } return check_order_activated(log, settings, orderName, condition); } static const char* primary_order_string(UINT32 orderType) { const char* orders[] = { "[0x%02" PRIx8 "] DstBlt", "[0x%02" PRIx8 "] PatBlt", "[0x%02" PRIx8 "] ScrBlt", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] DrawNineGrid", "[0x%02" PRIx8 "] MultiDrawNineGrid", "[0x%02" PRIx8 "] LineTo", "[0x%02" PRIx8 "] OpaqueRect", "[0x%02" PRIx8 "] SaveBitmap", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] MemBlt", "[0x%02" PRIx8 "] Mem3Blt", "[0x%02" PRIx8 "] MultiDstBlt", "[0x%02" PRIx8 "] MultiPatBlt", "[0x%02" PRIx8 "] MultiScrBlt", "[0x%02" PRIx8 "] MultiOpaqueRect", "[0x%02" PRIx8 "] FastIndex", "[0x%02" PRIx8 "] PolygonSC", "[0x%02" PRIx8 "] PolygonCB", "[0x%02" PRIx8 "] Polyline", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] FastGlyph", "[0x%02" PRIx8 "] EllipseSC", "[0x%02" PRIx8 "] EllipseCB", "[0x%02" PRIx8 "] GlyphIndex" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static const char* secondary_order_string(UINT32 orderType) { const char* orders[] = { "[0x%02" PRIx8 "] Cache Bitmap", "[0x%02" PRIx8 "] Cache Color Table", "[0x%02" PRIx8 "] Cache Bitmap (Compressed)", "[0x%02" PRIx8 "] Cache Glyph", "[0x%02" PRIx8 "] Cache Bitmap V2", "[0x%02" PRIx8 "] Cache Bitmap V2 (Compressed)", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] Cache Brush", "[0x%02" PRIx8 "] Cache Bitmap V3" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static const char* altsec_order_string(BYTE orderType) { const char* orders[] = { "[0x%02" PRIx8 "] Switch Surface", "[0x%02" PRIx8 "] Create Offscreen Bitmap", "[0x%02" PRIx8 "] Stream Bitmap First", "[0x%02" PRIx8 "] Stream Bitmap Next", "[0x%02" PRIx8 "] Create NineGrid Bitmap", "[0x%02" PRIx8 "] Draw GDI+ First", "[0x%02" PRIx8 "] Draw GDI+ Next", "[0x%02" PRIx8 "] Draw GDI+ End", "[0x%02" PRIx8 "] Draw GDI+ Cache First", "[0x%02" PRIx8 "] Draw GDI+ Cache Next", "[0x%02" PRIx8 "] Draw GDI+ Cache End", "[0x%02" PRIx8 "] Windowing", "[0x%02" PRIx8 "] Desktop Composition", "[0x%02" PRIx8 "] Frame Marker" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static INLINE BOOL update_read_coord(wStream* s, INT32* coord, BOOL delta) { INT8 lsi8; INT16 lsi16; if (delta) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_INT8(s, lsi8); *coord += lsi8; } else { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_INT16(s, lsi16); *coord = lsi16; } return TRUE; } static INLINE BOOL update_write_coord(wStream* s, INT32 coord) { Stream_Write_UINT16(s, coord); return TRUE; } static INLINE BOOL update_read_color(wStream* s, UINT32* color) { BYTE byte; if (Stream_GetRemainingLength(s) < 3) return FALSE; *color = 0; Stream_Read_UINT8(s, byte); *color = (UINT32)byte; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 8) & 0xFF00; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 16) & 0xFF0000; return TRUE; } static INLINE BOOL update_write_color(wStream* s, UINT32 color) { BYTE byte; byte = (color & 0xFF); Stream_Write_UINT8(s, byte); byte = ((color >> 8) & 0xFF); Stream_Write_UINT8(s, byte); byte = ((color >> 16) & 0xFF); Stream_Write_UINT8(s, byte); return TRUE; } static INLINE BOOL update_read_colorref(wStream* s, UINT32* color) { BYTE byte; if (Stream_GetRemainingLength(s) < 4) return FALSE; *color = 0; Stream_Read_UINT8(s, byte); *color = byte; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 8); Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 16); Stream_Seek_UINT8(s); return TRUE; } static INLINE BOOL update_read_color_quad(wStream* s, UINT32* color) { return update_read_colorref(s, color); } static INLINE void update_write_color_quad(wStream* s, UINT32 color) { BYTE byte; byte = (color >> 16) & 0xFF; Stream_Write_UINT8(s, byte); byte = (color >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = color & 0xFF; Stream_Write_UINT8(s, byte); } static INLINE BOOL update_read_2byte_unsigned(wStream* s, UINT32* value) { BYTE byte; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) return FALSE; *value = (byte & 0x7F) << 8; Stream_Read_UINT8(s, byte); *value |= byte; } else { *value = (byte & 0x7F); } return TRUE; } static INLINE BOOL update_write_2byte_unsigned(wStream* s, UINT32 value) { BYTE byte; if (value > 0x7FFF) return FALSE; if (value >= 0x7F) { byte = ((value & 0x7F00) >> 8); Stream_Write_UINT8(s, byte | 0x80); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else { byte = (value & 0x7F); Stream_Write_UINT8(s, byte); } return TRUE; } static INLINE BOOL update_read_2byte_signed(wStream* s, INT32* value) { BYTE byte; BOOL negative; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); negative = (byte & 0x40) ? TRUE : FALSE; *value = (byte & 0x3F); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); *value = (*value << 8) | byte; } if (negative) *value *= -1; return TRUE; } static INLINE BOOL update_write_2byte_signed(wStream* s, INT32 value) { BYTE byte; BOOL negative = FALSE; if (value < 0) { negative = TRUE; value *= -1; } if (value > 0x3FFF) return FALSE; if (value >= 0x3F) { byte = ((value & 0x3F00) >> 8); if (negative) byte |= 0x40; Stream_Write_UINT8(s, byte | 0x80); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else { byte = (value & 0x3F); if (negative) byte |= 0x40; Stream_Write_UINT8(s, byte); } return TRUE; } static INLINE BOOL update_read_4byte_unsigned(wStream* s, UINT32* value) { BYTE byte; BYTE count; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); count = (byte & 0xC0) >> 6; if (Stream_GetRemainingLength(s) < count) return FALSE; switch (count) { case 0: *value = (byte & 0x3F); break; case 1: *value = (byte & 0x3F) << 8; Stream_Read_UINT8(s, byte); *value |= byte; break; case 2: *value = (byte & 0x3F) << 16; Stream_Read_UINT8(s, byte); *value |= (byte << 8); Stream_Read_UINT8(s, byte); *value |= byte; break; case 3: *value = (byte & 0x3F) << 24; Stream_Read_UINT8(s, byte); *value |= (byte << 16); Stream_Read_UINT8(s, byte); *value |= (byte << 8); Stream_Read_UINT8(s, byte); *value |= byte; break; default: break; } return TRUE; } static INLINE BOOL update_write_4byte_unsigned(wStream* s, UINT32 value) { BYTE byte; if (value <= 0x3F) { Stream_Write_UINT8(s, value); } else if (value <= 0x3FFF) { byte = (value >> 8) & 0x3F; Stream_Write_UINT8(s, byte | 0x40); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else if (value <= 0x3FFFFF) { byte = (value >> 16) & 0x3F; Stream_Write_UINT8(s, byte | 0x80); byte = (value >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else if (value <= 0x3FFFFFFF) { byte = (value >> 24) & 0x3F; Stream_Write_UINT8(s, byte | 0xC0); byte = (value >> 16) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else return FALSE; return TRUE; } static INLINE BOOL update_read_delta(wStream* s, INT32* value) { BYTE byte; if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, byte); if (byte & 0x40) *value = (byte | ~0x3F); else *value = (byte & 0x3F); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, byte); *value = (*value << 8) | byte; } return TRUE; } #if 0 static INLINE void update_read_glyph_delta(wStream* s, UINT16* value) { BYTE byte; Stream_Read_UINT8(s, byte); if (byte == 0x80) Stream_Read_UINT16(s, *value); else *value = (byte & 0x3F); } static INLINE void update_seek_glyph_delta(wStream* s) { BYTE byte; Stream_Read_UINT8(s, byte); if (byte & 0x80) Stream_Seek_UINT8(s); } #endif static INLINE BOOL update_read_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->style); } if (fieldFlags & ORDER_FIELD_04) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->hatch); } if (brush->style & CACHED_BRUSH) { BOOL rc; brush->index = brush->hatch; brush->bpp = get_bmf_bpp(brush->style, &rc); if (!rc) return FALSE; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 7) return FALSE; brush->data = (BYTE*)brush->p8x8; Stream_Read_UINT8(s, brush->data[7]); Stream_Read_UINT8(s, brush->data[6]); Stream_Read_UINT8(s, brush->data[5]); Stream_Read_UINT8(s, brush->data[4]); Stream_Read_UINT8(s, brush->data[3]); Stream_Read_UINT8(s, brush->data[2]); Stream_Read_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; } static INLINE BOOL update_write_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { Stream_Write_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { Stream_Write_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { Stream_Write_UINT8(s, brush->style); } if (brush->style & CACHED_BRUSH) { BOOL rc; brush->hatch = brush->index; brush->bpp = get_bmf_bpp(brush->style, &rc); if (!rc) return FALSE; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_04) { Stream_Write_UINT8(s, brush->hatch); } if (fieldFlags & ORDER_FIELD_05) { brush->data = (BYTE*)brush->p8x8; Stream_Write_UINT8(s, brush->data[7]); Stream_Write_UINT8(s, brush->data[6]); Stream_Write_UINT8(s, brush->data[5]); Stream_Write_UINT8(s, brush->data[4]); Stream_Write_UINT8(s, brush->data[3]); Stream_Write_UINT8(s, brush->data[2]); Stream_Write_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; } static INLINE BOOL update_read_delta_rects(wStream* s, DELTA_RECT* rectangles, UINT32* nr) { UINT32 number = *nr; UINT32 i; BYTE flags = 0; BYTE* zeroBits; UINT32 zeroBitsSize; if (number > 45) { WLog_WARN(TAG, "Invalid number of delta rectangles %" PRIu32, number); return FALSE; } zeroBitsSize = ((number + 1) / 2); if (Stream_GetRemainingLength(s) < zeroBitsSize) return FALSE; Stream_GetPointer(s, zeroBits); Stream_Seek(s, zeroBitsSize); ZeroMemory(rectangles, sizeof(DELTA_RECT) * number); for (i = 0; i < number; i++) { if (i % 2 == 0) flags = zeroBits[i / 2]; if ((~flags & 0x80) && !update_read_delta(s, &rectangles[i].left)) return FALSE; if ((~flags & 0x40) && !update_read_delta(s, &rectangles[i].top)) return FALSE; if (~flags & 0x20) { if (!update_read_delta(s, &rectangles[i].width)) return FALSE; } else if (i > 0) rectangles[i].width = rectangles[i - 1].width; else rectangles[i].width = 0; if (~flags & 0x10) { if (!update_read_delta(s, &rectangles[i].height)) return FALSE; } else if (i > 0) rectangles[i].height = rectangles[i - 1].height; else rectangles[i].height = 0; if (i > 0) { rectangles[i].left += rectangles[i - 1].left; rectangles[i].top += rectangles[i - 1].top; } flags <<= 4; } return TRUE; } static INLINE BOOL update_read_delta_points(wStream* s, DELTA_POINT* points, int number, INT16 x, INT16 y) { int i; BYTE flags = 0; BYTE* zeroBits; UINT32 zeroBitsSize; zeroBitsSize = ((number + 3) / 4); if (Stream_GetRemainingLength(s) < zeroBitsSize) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < %" PRIu32 "", zeroBitsSize); return FALSE; } Stream_GetPointer(s, zeroBits); Stream_Seek(s, zeroBitsSize); ZeroMemory(points, sizeof(DELTA_POINT) * number); for (i = 0; i < number; i++) { if (i % 4 == 0) flags = zeroBits[i / 4]; if ((~flags & 0x80) && !update_read_delta(s, &points[i].x)) { WLog_ERR(TAG, "update_read_delta(x) failed"); return FALSE; } if ((~flags & 0x40) && !update_read_delta(s, &points[i].y)) { WLog_ERR(TAG, "update_read_delta(y) failed"); return FALSE; } flags <<= 2; } return TRUE; } #define ORDER_FIELD_BYTE(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 1) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT8(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_2BYTE(NO, TARGET1, TARGET2) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 2) \ { \ WLog_ERR(TAG, "error reading %s or %s", #TARGET1, #TARGET2); \ return FALSE; \ } \ Stream_Read_UINT8(s, TARGET1); \ Stream_Read_UINT8(s, TARGET2); \ } \ } while (0) #define ORDER_FIELD_UINT16(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 2) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT16(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_UINT32(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 4) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT32(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_COORD(NO, TARGET) \ do \ { \ if ((orderInfo->fieldFlags & (1 << (NO - 1))) && \ !update_read_coord(s, &TARGET, orderInfo->deltaCoordinates)) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ } while (0) static INLINE BOOL ORDER_FIELD_COLOR(const ORDER_INFO* orderInfo, wStream* s, UINT32 NO, UINT32* TARGET) { if (!TARGET || !orderInfo) return FALSE; if ((orderInfo->fieldFlags & (1 << (NO - 1))) && !update_read_color(s, TARGET)) return FALSE; return TRUE; } static INLINE BOOL FIELD_SKIP_BUFFER16(wStream* s, UINT32 TARGET_LEN) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, TARGET_LEN); if (!Stream_SafeSeek(s, TARGET_LEN)) { WLog_ERR(TAG, "error skipping %" PRIu32 " bytes", TARGET_LEN); return FALSE; } return TRUE; } /* Primary Drawing Orders */ static BOOL update_read_dstblt_order(wStream* s, const ORDER_INFO* orderInfo, DSTBLT_ORDER* dstblt) { ORDER_FIELD_COORD(1, dstblt->nLeftRect); ORDER_FIELD_COORD(2, dstblt->nTopRect); ORDER_FIELD_COORD(3, dstblt->nWidth); ORDER_FIELD_COORD(4, dstblt->nHeight); ORDER_FIELD_BYTE(5, dstblt->bRop); return TRUE; } int update_approximate_dstblt_order(ORDER_INFO* orderInfo, const DSTBLT_ORDER* dstblt) { return 32; } BOOL update_write_dstblt_order(wStream* s, ORDER_INFO* orderInfo, const DSTBLT_ORDER* dstblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_dstblt_order(orderInfo, dstblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, dstblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, dstblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, dstblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, dstblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, dstblt->bRop); return TRUE; } static BOOL update_read_patblt_order(wStream* s, const ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { ORDER_FIELD_COORD(1, patblt->nLeftRect); ORDER_FIELD_COORD(2, patblt->nTopRect); ORDER_FIELD_COORD(3, patblt->nWidth); ORDER_FIELD_COORD(4, patblt->nHeight); ORDER_FIELD_BYTE(5, patblt->bRop); ORDER_FIELD_COLOR(orderInfo, s, 6, &patblt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 7, &patblt->foreColor); return update_read_brush(s, &patblt->brush, orderInfo->fieldFlags >> 7); } int update_approximate_patblt_order(ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { return 32; } BOOL update_write_patblt_order(wStream* s, ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_patblt_order(orderInfo, patblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, patblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, patblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, patblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, patblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, patblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, patblt->backColor); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_color(s, patblt->foreColor); orderInfo->fieldFlags |= ORDER_FIELD_08; orderInfo->fieldFlags |= ORDER_FIELD_09; orderInfo->fieldFlags |= ORDER_FIELD_10; orderInfo->fieldFlags |= ORDER_FIELD_11; orderInfo->fieldFlags |= ORDER_FIELD_12; update_write_brush(s, &patblt->brush, orderInfo->fieldFlags >> 7); return TRUE; } static BOOL update_read_scrblt_order(wStream* s, const ORDER_INFO* orderInfo, SCRBLT_ORDER* scrblt) { ORDER_FIELD_COORD(1, scrblt->nLeftRect); ORDER_FIELD_COORD(2, scrblt->nTopRect); ORDER_FIELD_COORD(3, scrblt->nWidth); ORDER_FIELD_COORD(4, scrblt->nHeight); ORDER_FIELD_BYTE(5, scrblt->bRop); ORDER_FIELD_COORD(6, scrblt->nXSrc); ORDER_FIELD_COORD(7, scrblt->nYSrc); return TRUE; } int update_approximate_scrblt_order(ORDER_INFO* orderInfo, const SCRBLT_ORDER* scrblt) { return 32; } BOOL update_write_scrblt_order(wStream* s, ORDER_INFO* orderInfo, const SCRBLT_ORDER* scrblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_scrblt_order(orderInfo, scrblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, scrblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, scrblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, scrblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, scrblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, scrblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_coord(s, scrblt->nXSrc); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_coord(s, scrblt->nYSrc); return TRUE; } static BOOL update_read_opaque_rect_order(wStream* s, const ORDER_INFO* orderInfo, OPAQUE_RECT_ORDER* opaque_rect) { BYTE byte; ORDER_FIELD_COORD(1, opaque_rect->nLeftRect); ORDER_FIELD_COORD(2, opaque_rect->nTopRect); ORDER_FIELD_COORD(3, opaque_rect->nWidth); ORDER_FIELD_COORD(4, opaque_rect->nHeight); if (orderInfo->fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x00FFFF00) | ((UINT32)byte); } if (orderInfo->fieldFlags & ORDER_FIELD_06) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x00FF00FF) | ((UINT32)byte << 8); } if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x0000FFFF) | ((UINT32)byte << 16); } return TRUE; } int update_approximate_opaque_rect_order(ORDER_INFO* orderInfo, const OPAQUE_RECT_ORDER* opaque_rect) { return 32; } BOOL update_write_opaque_rect_order(wStream* s, ORDER_INFO* orderInfo, const OPAQUE_RECT_ORDER* opaque_rect) { BYTE byte; int inf = update_approximate_opaque_rect_order(orderInfo, opaque_rect); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; // TODO: Color format conversion orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, opaque_rect->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, opaque_rect->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, opaque_rect->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, opaque_rect->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; byte = opaque_rect->color & 0x000000FF; Stream_Write_UINT8(s, byte); orderInfo->fieldFlags |= ORDER_FIELD_06; byte = (opaque_rect->color & 0x0000FF00) >> 8; Stream_Write_UINT8(s, byte); orderInfo->fieldFlags |= ORDER_FIELD_07; byte = (opaque_rect->color & 0x00FF0000) >> 16; Stream_Write_UINT8(s, byte); return TRUE; } static BOOL update_read_draw_nine_grid_order(wStream* s, const ORDER_INFO* orderInfo, DRAW_NINE_GRID_ORDER* draw_nine_grid) { ORDER_FIELD_COORD(1, draw_nine_grid->srcLeft); ORDER_FIELD_COORD(2, draw_nine_grid->srcTop); ORDER_FIELD_COORD(3, draw_nine_grid->srcRight); ORDER_FIELD_COORD(4, draw_nine_grid->srcBottom); ORDER_FIELD_UINT16(5, draw_nine_grid->bitmapId); return TRUE; } static BOOL update_read_multi_dstblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_DSTBLT_ORDER* multi_dstblt) { ORDER_FIELD_COORD(1, multi_dstblt->nLeftRect); ORDER_FIELD_COORD(2, multi_dstblt->nTopRect); ORDER_FIELD_COORD(3, multi_dstblt->nWidth); ORDER_FIELD_COORD(4, multi_dstblt->nHeight); ORDER_FIELD_BYTE(5, multi_dstblt->bRop); ORDER_FIELD_BYTE(6, multi_dstblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_dstblt->cbData); return update_read_delta_rects(s, multi_dstblt->rectangles, &multi_dstblt->numRectangles); } return TRUE; } static BOOL update_read_multi_patblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_PATBLT_ORDER* multi_patblt) { ORDER_FIELD_COORD(1, multi_patblt->nLeftRect); ORDER_FIELD_COORD(2, multi_patblt->nTopRect); ORDER_FIELD_COORD(3, multi_patblt->nWidth); ORDER_FIELD_COORD(4, multi_patblt->nHeight); ORDER_FIELD_BYTE(5, multi_patblt->bRop); ORDER_FIELD_COLOR(orderInfo, s, 6, &multi_patblt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 7, &multi_patblt->foreColor); if (!update_read_brush(s, &multi_patblt->brush, orderInfo->fieldFlags >> 7)) return FALSE; ORDER_FIELD_BYTE(13, multi_patblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_14) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_patblt->cbData); if (!update_read_delta_rects(s, multi_patblt->rectangles, &multi_patblt->numRectangles)) return FALSE; } return TRUE; } static BOOL update_read_multi_scrblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_SCRBLT_ORDER* multi_scrblt) { ORDER_FIELD_COORD(1, multi_scrblt->nLeftRect); ORDER_FIELD_COORD(2, multi_scrblt->nTopRect); ORDER_FIELD_COORD(3, multi_scrblt->nWidth); ORDER_FIELD_COORD(4, multi_scrblt->nHeight); ORDER_FIELD_BYTE(5, multi_scrblt->bRop); ORDER_FIELD_COORD(6, multi_scrblt->nXSrc); ORDER_FIELD_COORD(7, multi_scrblt->nYSrc); ORDER_FIELD_BYTE(8, multi_scrblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_09) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_scrblt->cbData); return update_read_delta_rects(s, multi_scrblt->rectangles, &multi_scrblt->numRectangles); } return TRUE; } static BOOL update_read_multi_opaque_rect_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_OPAQUE_RECT_ORDER* multi_opaque_rect) { BYTE byte; ORDER_FIELD_COORD(1, multi_opaque_rect->nLeftRect); ORDER_FIELD_COORD(2, multi_opaque_rect->nTopRect); ORDER_FIELD_COORD(3, multi_opaque_rect->nWidth); ORDER_FIELD_COORD(4, multi_opaque_rect->nHeight); if (orderInfo->fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x00FFFF00) | ((UINT32)byte); } if (orderInfo->fieldFlags & ORDER_FIELD_06) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x00FF00FF) | ((UINT32)byte << 8); } if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x0000FFFF) | ((UINT32)byte << 16); } ORDER_FIELD_BYTE(8, multi_opaque_rect->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_09) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_opaque_rect->cbData); return update_read_delta_rects(s, multi_opaque_rect->rectangles, &multi_opaque_rect->numRectangles); } return TRUE; } static BOOL update_read_multi_draw_nine_grid_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_DRAW_NINE_GRID_ORDER* multi_draw_nine_grid) { ORDER_FIELD_COORD(1, multi_draw_nine_grid->srcLeft); ORDER_FIELD_COORD(2, multi_draw_nine_grid->srcTop); ORDER_FIELD_COORD(3, multi_draw_nine_grid->srcRight); ORDER_FIELD_COORD(4, multi_draw_nine_grid->srcBottom); ORDER_FIELD_UINT16(5, multi_draw_nine_grid->bitmapId); ORDER_FIELD_BYTE(6, multi_draw_nine_grid->nDeltaEntries); if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_draw_nine_grid->cbData); return update_read_delta_rects(s, multi_draw_nine_grid->rectangles, &multi_draw_nine_grid->nDeltaEntries); } return TRUE; } static BOOL update_read_line_to_order(wStream* s, const ORDER_INFO* orderInfo, LINE_TO_ORDER* line_to) { ORDER_FIELD_UINT16(1, line_to->backMode); ORDER_FIELD_COORD(2, line_to->nXStart); ORDER_FIELD_COORD(3, line_to->nYStart); ORDER_FIELD_COORD(4, line_to->nXEnd); ORDER_FIELD_COORD(5, line_to->nYEnd); ORDER_FIELD_COLOR(orderInfo, s, 6, &line_to->backColor); ORDER_FIELD_BYTE(7, line_to->bRop2); ORDER_FIELD_BYTE(8, line_to->penStyle); ORDER_FIELD_BYTE(9, line_to->penWidth); ORDER_FIELD_COLOR(orderInfo, s, 10, &line_to->penColor); return TRUE; } int update_approximate_line_to_order(ORDER_INFO* orderInfo, const LINE_TO_ORDER* line_to) { return 32; } BOOL update_write_line_to_order(wStream* s, ORDER_INFO* orderInfo, const LINE_TO_ORDER* line_to) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_line_to_order(orderInfo, line_to))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT16(s, line_to->backMode); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, line_to->nXStart); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, line_to->nYStart); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, line_to->nXEnd); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_coord(s, line_to->nYEnd); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, line_to->backColor); orderInfo->fieldFlags |= ORDER_FIELD_07; Stream_Write_UINT8(s, line_to->bRop2); orderInfo->fieldFlags |= ORDER_FIELD_08; Stream_Write_UINT8(s, line_to->penStyle); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT8(s, line_to->penWidth); orderInfo->fieldFlags |= ORDER_FIELD_10; update_write_color(s, line_to->penColor); return TRUE; } static BOOL update_read_polyline_order(wStream* s, const ORDER_INFO* orderInfo, POLYLINE_ORDER* polyline) { UINT16 word; UINT32 new_num = polyline->numDeltaEntries; ORDER_FIELD_COORD(1, polyline->xStart); ORDER_FIELD_COORD(2, polyline->yStart); ORDER_FIELD_BYTE(3, polyline->bRop2); ORDER_FIELD_UINT16(4, word); ORDER_FIELD_COLOR(orderInfo, s, 5, &polyline->penColor); ORDER_FIELD_BYTE(6, new_num); if (orderInfo->fieldFlags & ORDER_FIELD_07) { DELTA_POINT* new_points; if (new_num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, polyline->cbData); new_points = (DELTA_POINT*)realloc(polyline->points, sizeof(DELTA_POINT) * new_num); if (!new_points) { WLog_ERR(TAG, "realloc(%" PRIu32 ") failed", new_num); return FALSE; } polyline->points = new_points; polyline->numDeltaEntries = new_num; return update_read_delta_points(s, polyline->points, polyline->numDeltaEntries, polyline->xStart, polyline->yStart); } return TRUE; } static BOOL update_read_memblt_order(wStream* s, const ORDER_INFO* orderInfo, MEMBLT_ORDER* memblt) { if (!s || !orderInfo || !memblt) return FALSE; ORDER_FIELD_UINT16(1, memblt->cacheId); ORDER_FIELD_COORD(2, memblt->nLeftRect); ORDER_FIELD_COORD(3, memblt->nTopRect); ORDER_FIELD_COORD(4, memblt->nWidth); ORDER_FIELD_COORD(5, memblt->nHeight); ORDER_FIELD_BYTE(6, memblt->bRop); ORDER_FIELD_COORD(7, memblt->nXSrc); ORDER_FIELD_COORD(8, memblt->nYSrc); ORDER_FIELD_UINT16(9, memblt->cacheIndex); memblt->colorIndex = (memblt->cacheId >> 8); memblt->cacheId = (memblt->cacheId & 0xFF); memblt->bitmap = NULL; return TRUE; } int update_approximate_memblt_order(ORDER_INFO* orderInfo, const MEMBLT_ORDER* memblt) { return 64; } BOOL update_write_memblt_order(wStream* s, ORDER_INFO* orderInfo, const MEMBLT_ORDER* memblt) { UINT16 cacheId; if (!Stream_EnsureRemainingCapacity(s, update_approximate_memblt_order(orderInfo, memblt))) return FALSE; cacheId = (memblt->cacheId & 0xFF) | ((memblt->colorIndex & 0xFF) << 8); orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT16(s, cacheId); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, memblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, memblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, memblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_coord(s, memblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_06; Stream_Write_UINT8(s, memblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_coord(s, memblt->nXSrc); orderInfo->fieldFlags |= ORDER_FIELD_08; update_write_coord(s, memblt->nYSrc); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT16(s, memblt->cacheIndex); return TRUE; } static BOOL update_read_mem3blt_order(wStream* s, const ORDER_INFO* orderInfo, MEM3BLT_ORDER* mem3blt) { ORDER_FIELD_UINT16(1, mem3blt->cacheId); ORDER_FIELD_COORD(2, mem3blt->nLeftRect); ORDER_FIELD_COORD(3, mem3blt->nTopRect); ORDER_FIELD_COORD(4, mem3blt->nWidth); ORDER_FIELD_COORD(5, mem3blt->nHeight); ORDER_FIELD_BYTE(6, mem3blt->bRop); ORDER_FIELD_COORD(7, mem3blt->nXSrc); ORDER_FIELD_COORD(8, mem3blt->nYSrc); ORDER_FIELD_COLOR(orderInfo, s, 9, &mem3blt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 10, &mem3blt->foreColor); if (!update_read_brush(s, &mem3blt->brush, orderInfo->fieldFlags >> 10)) return FALSE; ORDER_FIELD_UINT16(16, mem3blt->cacheIndex); mem3blt->colorIndex = (mem3blt->cacheId >> 8); mem3blt->cacheId = (mem3blt->cacheId & 0xFF); mem3blt->bitmap = NULL; return TRUE; } static BOOL update_read_save_bitmap_order(wStream* s, const ORDER_INFO* orderInfo, SAVE_BITMAP_ORDER* save_bitmap) { ORDER_FIELD_UINT32(1, save_bitmap->savedBitmapPosition); ORDER_FIELD_COORD(2, save_bitmap->nLeftRect); ORDER_FIELD_COORD(3, save_bitmap->nTopRect); ORDER_FIELD_COORD(4, save_bitmap->nRightRect); ORDER_FIELD_COORD(5, save_bitmap->nBottomRect); ORDER_FIELD_BYTE(6, save_bitmap->operation); return TRUE; } static BOOL update_read_glyph_index_order(wStream* s, const ORDER_INFO* orderInfo, GLYPH_INDEX_ORDER* glyph_index) { ORDER_FIELD_BYTE(1, glyph_index->cacheId); ORDER_FIELD_BYTE(2, glyph_index->flAccel); ORDER_FIELD_BYTE(3, glyph_index->ulCharInc); ORDER_FIELD_BYTE(4, glyph_index->fOpRedundant); ORDER_FIELD_COLOR(orderInfo, s, 5, &glyph_index->backColor); ORDER_FIELD_COLOR(orderInfo, s, 6, &glyph_index->foreColor); ORDER_FIELD_UINT16(7, glyph_index->bkLeft); ORDER_FIELD_UINT16(8, glyph_index->bkTop); ORDER_FIELD_UINT16(9, glyph_index->bkRight); ORDER_FIELD_UINT16(10, glyph_index->bkBottom); ORDER_FIELD_UINT16(11, glyph_index->opLeft); ORDER_FIELD_UINT16(12, glyph_index->opTop); ORDER_FIELD_UINT16(13, glyph_index->opRight); ORDER_FIELD_UINT16(14, glyph_index->opBottom); if (!update_read_brush(s, &glyph_index->brush, orderInfo->fieldFlags >> 14)) return FALSE; ORDER_FIELD_UINT16(20, glyph_index->x); ORDER_FIELD_UINT16(21, glyph_index->y); if (orderInfo->fieldFlags & ORDER_FIELD_22) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, glyph_index->cbData); if (Stream_GetRemainingLength(s) < glyph_index->cbData) return FALSE; CopyMemory(glyph_index->data, Stream_Pointer(s), glyph_index->cbData); Stream_Seek(s, glyph_index->cbData); } return TRUE; } int update_approximate_glyph_index_order(ORDER_INFO* orderInfo, const GLYPH_INDEX_ORDER* glyph_index) { return 64; } BOOL update_write_glyph_index_order(wStream* s, ORDER_INFO* orderInfo, GLYPH_INDEX_ORDER* glyph_index) { int inf = update_approximate_glyph_index_order(orderInfo, glyph_index); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT8(s, glyph_index->cacheId); orderInfo->fieldFlags |= ORDER_FIELD_02; Stream_Write_UINT8(s, glyph_index->flAccel); orderInfo->fieldFlags |= ORDER_FIELD_03; Stream_Write_UINT8(s, glyph_index->ulCharInc); orderInfo->fieldFlags |= ORDER_FIELD_04; Stream_Write_UINT8(s, glyph_index->fOpRedundant); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_color(s, glyph_index->backColor); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, glyph_index->foreColor); orderInfo->fieldFlags |= ORDER_FIELD_07; Stream_Write_UINT16(s, glyph_index->bkLeft); orderInfo->fieldFlags |= ORDER_FIELD_08; Stream_Write_UINT16(s, glyph_index->bkTop); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT16(s, glyph_index->bkRight); orderInfo->fieldFlags |= ORDER_FIELD_10; Stream_Write_UINT16(s, glyph_index->bkBottom); orderInfo->fieldFlags |= ORDER_FIELD_11; Stream_Write_UINT16(s, glyph_index->opLeft); orderInfo->fieldFlags |= ORDER_FIELD_12; Stream_Write_UINT16(s, glyph_index->opTop); orderInfo->fieldFlags |= ORDER_FIELD_13; Stream_Write_UINT16(s, glyph_index->opRight); orderInfo->fieldFlags |= ORDER_FIELD_14; Stream_Write_UINT16(s, glyph_index->opBottom); orderInfo->fieldFlags |= ORDER_FIELD_15; orderInfo->fieldFlags |= ORDER_FIELD_16; orderInfo->fieldFlags |= ORDER_FIELD_17; orderInfo->fieldFlags |= ORDER_FIELD_18; orderInfo->fieldFlags |= ORDER_FIELD_19; update_write_brush(s, &glyph_index->brush, orderInfo->fieldFlags >> 14); orderInfo->fieldFlags |= ORDER_FIELD_20; Stream_Write_UINT16(s, glyph_index->x); orderInfo->fieldFlags |= ORDER_FIELD_21; Stream_Write_UINT16(s, glyph_index->y); orderInfo->fieldFlags |= ORDER_FIELD_22; Stream_Write_UINT8(s, glyph_index->cbData); Stream_Write(s, glyph_index->data, glyph_index->cbData); return TRUE; } static BOOL update_read_fast_index_order(wStream* s, const ORDER_INFO* orderInfo, FAST_INDEX_ORDER* fast_index) { ORDER_FIELD_BYTE(1, fast_index->cacheId); ORDER_FIELD_2BYTE(2, fast_index->ulCharInc, fast_index->flAccel); ORDER_FIELD_COLOR(orderInfo, s, 3, &fast_index->backColor); ORDER_FIELD_COLOR(orderInfo, s, 4, &fast_index->foreColor); ORDER_FIELD_COORD(5, fast_index->bkLeft); ORDER_FIELD_COORD(6, fast_index->bkTop); ORDER_FIELD_COORD(7, fast_index->bkRight); ORDER_FIELD_COORD(8, fast_index->bkBottom); ORDER_FIELD_COORD(9, fast_index->opLeft); ORDER_FIELD_COORD(10, fast_index->opTop); ORDER_FIELD_COORD(11, fast_index->opRight); ORDER_FIELD_COORD(12, fast_index->opBottom); ORDER_FIELD_COORD(13, fast_index->x); ORDER_FIELD_COORD(14, fast_index->y); if (orderInfo->fieldFlags & ORDER_FIELD_15) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, fast_index->cbData); if (Stream_GetRemainingLength(s) < fast_index->cbData) return FALSE; CopyMemory(fast_index->data, Stream_Pointer(s), fast_index->cbData); Stream_Seek(s, fast_index->cbData); } return TRUE; } static BOOL update_read_fast_glyph_order(wStream* s, const ORDER_INFO* orderInfo, FAST_GLYPH_ORDER* fastGlyph) { GLYPH_DATA_V2* glyph = &fastGlyph->glyphData; ORDER_FIELD_BYTE(1, fastGlyph->cacheId); ORDER_FIELD_2BYTE(2, fastGlyph->ulCharInc, fastGlyph->flAccel); ORDER_FIELD_COLOR(orderInfo, s, 3, &fastGlyph->backColor); ORDER_FIELD_COLOR(orderInfo, s, 4, &fastGlyph->foreColor); ORDER_FIELD_COORD(5, fastGlyph->bkLeft); ORDER_FIELD_COORD(6, fastGlyph->bkTop); ORDER_FIELD_COORD(7, fastGlyph->bkRight); ORDER_FIELD_COORD(8, fastGlyph->bkBottom); ORDER_FIELD_COORD(9, fastGlyph->opLeft); ORDER_FIELD_COORD(10, fastGlyph->opTop); ORDER_FIELD_COORD(11, fastGlyph->opRight); ORDER_FIELD_COORD(12, fastGlyph->opBottom); ORDER_FIELD_COORD(13, fastGlyph->x); ORDER_FIELD_COORD(14, fastGlyph->y); if (orderInfo->fieldFlags & ORDER_FIELD_15) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, fastGlyph->cbData); if (Stream_GetRemainingLength(s) < fastGlyph->cbData) return FALSE; CopyMemory(fastGlyph->data, Stream_Pointer(s), fastGlyph->cbData); if (Stream_GetRemainingLength(s) < fastGlyph->cbData) return FALSE; if (!Stream_SafeSeek(s, 1)) return FALSE; if (fastGlyph->cbData > 1) { UINT32 new_cb; /* parse optional glyph data */ glyph->cacheIndex = fastGlyph->data[0]; if (!update_read_2byte_signed(s, &glyph->x) || !update_read_2byte_signed(s, &glyph->y) || !update_read_2byte_unsigned(s, &glyph->cx) || !update_read_2byte_unsigned(s, &glyph->cy)) return FALSE; glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; new_cb = ((glyph->cx + 7) / 8) * glyph->cy; new_cb += ((new_cb % 4) > 0) ? 4 - (new_cb % 4) : 0; if (fastGlyph->cbData < new_cb) return FALSE; if (new_cb > 0) { BYTE* new_aj; new_aj = (BYTE*)realloc(glyph->aj, new_cb); if (!new_aj) return FALSE; glyph->aj = new_aj; glyph->cb = new_cb; Stream_Read(s, glyph->aj, glyph->cb); } Stream_Seek(s, fastGlyph->cbData - new_cb); } } return TRUE; } static BOOL update_read_polygon_sc_order(wStream* s, const ORDER_INFO* orderInfo, POLYGON_SC_ORDER* polygon_sc) { UINT32 num = polygon_sc->numPoints; ORDER_FIELD_COORD(1, polygon_sc->xStart); ORDER_FIELD_COORD(2, polygon_sc->yStart); ORDER_FIELD_BYTE(3, polygon_sc->bRop2); ORDER_FIELD_BYTE(4, polygon_sc->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 5, &polygon_sc->brushColor); ORDER_FIELD_BYTE(6, num); if (orderInfo->fieldFlags & ORDER_FIELD_07) { DELTA_POINT* newpoints; if (num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, polygon_sc->cbData); newpoints = (DELTA_POINT*)realloc(polygon_sc->points, sizeof(DELTA_POINT) * num); if (!newpoints) return FALSE; polygon_sc->points = newpoints; polygon_sc->numPoints = num; return update_read_delta_points(s, polygon_sc->points, polygon_sc->numPoints, polygon_sc->xStart, polygon_sc->yStart); } return TRUE; } static BOOL update_read_polygon_cb_order(wStream* s, const ORDER_INFO* orderInfo, POLYGON_CB_ORDER* polygon_cb) { UINT32 num = polygon_cb->numPoints; ORDER_FIELD_COORD(1, polygon_cb->xStart); ORDER_FIELD_COORD(2, polygon_cb->yStart); ORDER_FIELD_BYTE(3, polygon_cb->bRop2); ORDER_FIELD_BYTE(4, polygon_cb->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 5, &polygon_cb->backColor); ORDER_FIELD_COLOR(orderInfo, s, 6, &polygon_cb->foreColor); if (!update_read_brush(s, &polygon_cb->brush, orderInfo->fieldFlags >> 6)) return FALSE; ORDER_FIELD_BYTE(12, num); if (orderInfo->fieldFlags & ORDER_FIELD_13) { DELTA_POINT* newpoints; if (num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, polygon_cb->cbData); newpoints = (DELTA_POINT*)realloc(polygon_cb->points, sizeof(DELTA_POINT) * num); if (!newpoints) return FALSE; polygon_cb->points = newpoints; polygon_cb->numPoints = num; if (!update_read_delta_points(s, polygon_cb->points, polygon_cb->numPoints, polygon_cb->xStart, polygon_cb->yStart)) return FALSE; } polygon_cb->backMode = (polygon_cb->bRop2 & 0x80) ? BACKMODE_TRANSPARENT : BACKMODE_OPAQUE; polygon_cb->bRop2 = (polygon_cb->bRop2 & 0x1F); return TRUE; } static BOOL update_read_ellipse_sc_order(wStream* s, const ORDER_INFO* orderInfo, ELLIPSE_SC_ORDER* ellipse_sc) { ORDER_FIELD_COORD(1, ellipse_sc->leftRect); ORDER_FIELD_COORD(2, ellipse_sc->topRect); ORDER_FIELD_COORD(3, ellipse_sc->rightRect); ORDER_FIELD_COORD(4, ellipse_sc->bottomRect); ORDER_FIELD_BYTE(5, ellipse_sc->bRop2); ORDER_FIELD_BYTE(6, ellipse_sc->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 7, &ellipse_sc->color); return TRUE; } static BOOL update_read_ellipse_cb_order(wStream* s, const ORDER_INFO* orderInfo, ELLIPSE_CB_ORDER* ellipse_cb) { ORDER_FIELD_COORD(1, ellipse_cb->leftRect); ORDER_FIELD_COORD(2, ellipse_cb->topRect); ORDER_FIELD_COORD(3, ellipse_cb->rightRect); ORDER_FIELD_COORD(4, ellipse_cb->bottomRect); ORDER_FIELD_BYTE(5, ellipse_cb->bRop2); ORDER_FIELD_BYTE(6, ellipse_cb->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 7, &ellipse_cb->backColor); ORDER_FIELD_COLOR(orderInfo, s, 8, &ellipse_cb->foreColor); return update_read_brush(s, &ellipse_cb->brush, orderInfo->fieldFlags >> 8); } /* Secondary Drawing Orders */ static CACHE_BITMAP_ORDER* update_read_cache_bitmap_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { CACHE_BITMAP_ORDER* cache_bitmap; if (!update || !s) return NULL; cache_bitmap = calloc(1, sizeof(CACHE_BITMAP_ORDER)); if (!cache_bitmap) goto fail; if (Stream_GetRemainingLength(s) < 9) goto fail; Stream_Read_UINT8(s, cache_bitmap->cacheId); /* cacheId (1 byte) */ Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapWidth); /* bitmapWidth (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapHeight); /* bitmapHeight (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ if ((cache_bitmap->bitmapBpp < 1) || (cache_bitmap->bitmapBpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bitmap bpp %" PRIu32 "", cache_bitmap->bitmapBpp); goto fail; } Stream_Read_UINT16(s, cache_bitmap->bitmapLength); /* bitmapLength (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap->cacheIndex); /* cacheIndex (2 bytes) */ if (compressed) { if ((flags & NO_BITMAP_COMPRESSION_HDR) == 0) { BYTE* bitmapComprHdr = (BYTE*)&(cache_bitmap->bitmapComprHdr); if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read(s, bitmapComprHdr, 8); /* bitmapComprHdr (8 bytes) */ cache_bitmap->bitmapLength -= 8; } } if (cache_bitmap->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap->bitmapLength) goto fail; cache_bitmap->bitmapDataStream = malloc(cache_bitmap->bitmapLength); if (!cache_bitmap->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap->bitmapDataStream, cache_bitmap->bitmapLength); cache_bitmap->compressed = compressed; return cache_bitmap; fail: free_cache_bitmap_order(update->context, cache_bitmap); return NULL; } int update_approximate_cache_bitmap_order(const CACHE_BITMAP_ORDER* cache_bitmap, BOOL compressed, UINT16* flags) { return 64 + cache_bitmap->bitmapLength; } BOOL update_write_cache_bitmap_order(wStream* s, const CACHE_BITMAP_ORDER* cache_bitmap, BOOL compressed, UINT16* flags) { UINT32 bitmapLength = cache_bitmap->bitmapLength; int inf = update_approximate_cache_bitmap_order(cache_bitmap, compressed, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; *flags = NO_BITMAP_COMPRESSION_HDR; if ((*flags & NO_BITMAP_COMPRESSION_HDR) == 0) bitmapLength += 8; Stream_Write_UINT8(s, cache_bitmap->cacheId); /* cacheId (1 byte) */ Stream_Write_UINT8(s, 0); /* pad1Octet (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapWidth); /* bitmapWidth (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapHeight); /* bitmapHeight (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ Stream_Write_UINT16(s, bitmapLength); /* bitmapLength (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap->cacheIndex); /* cacheIndex (2 bytes) */ if (compressed) { if ((*flags & NO_BITMAP_COMPRESSION_HDR) == 0) { BYTE* bitmapComprHdr = (BYTE*)&(cache_bitmap->bitmapComprHdr); Stream_Write(s, bitmapComprHdr, 8); /* bitmapComprHdr (8 bytes) */ bitmapLength -= 8; } Stream_Write(s, cache_bitmap->bitmapDataStream, bitmapLength); } else { Stream_Write(s, cache_bitmap->bitmapDataStream, bitmapLength); } return TRUE; } static CACHE_BITMAP_V2_ORDER* update_read_cache_bitmap_v2_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { BOOL rc; BYTE bitsPerPixelId; CACHE_BITMAP_V2_ORDER* cache_bitmap_v2; if (!update || !s) return NULL; cache_bitmap_v2 = calloc(1, sizeof(CACHE_BITMAP_V2_ORDER)); if (!cache_bitmap_v2) goto fail; cache_bitmap_v2->cacheId = flags & 0x0003; cache_bitmap_v2->flags = (flags & 0xFF80) >> 7; bitsPerPixelId = (flags & 0x0078) >> 3; cache_bitmap_v2->bitmapBpp = get_cbr2_bpp(bitsPerPixelId, &rc); if (!rc) goto fail; if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ goto fail; cache_bitmap_v2->bitmapHeight = cache_bitmap_v2->bitmapWidth; } else { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ goto fail; } if (!update_read_4byte_unsigned(s, &cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->cacheIndex)) /* cacheIndex */ goto fail; if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } } if (cache_bitmap_v2->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap_v2->bitmapLength) goto fail; if (cache_bitmap_v2->bitmapLength == 0) goto fail; cache_bitmap_v2->bitmapDataStream = malloc(cache_bitmap_v2->bitmapLength); if (!cache_bitmap_v2->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); cache_bitmap_v2->compressed = compressed; return cache_bitmap_v2; fail: free_cache_bitmap_v2_order(update->context, cache_bitmap_v2); return NULL; } int update_approximate_cache_bitmap_v2_order(CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { return 64 + cache_bitmap_v2->bitmapLength; } BOOL update_write_cache_bitmap_v2_order(wStream* s, CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { BOOL rc; BYTE bitsPerPixelId; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v2_order(cache_bitmap_v2, compressed, flags))) return FALSE; bitsPerPixelId = get_bpp_bmf(cache_bitmap_v2->bitmapBpp, &rc); if (!rc) return FALSE; *flags = (cache_bitmap_v2->cacheId & 0x0003) | (bitsPerPixelId << 3) | ((cache_bitmap_v2->flags << 7) & 0xFF80); if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { Stream_Write_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ return FALSE; } else { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ return FALSE; } if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (!update_write_4byte_unsigned(s, cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_write_2byte_unsigned(s, cache_bitmap_v2->cacheIndex)) /* cacheIndex */ return FALSE; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { Stream_Write_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } else { if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } cache_bitmap_v2->compressed = compressed; return TRUE; } static CACHE_BITMAP_V3_ORDER* update_read_cache_bitmap_v3_order(rdpUpdate* update, wStream* s, UINT16 flags) { BOOL rc; BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; UINT32 new_len; BYTE* new_data; CACHE_BITMAP_V3_ORDER* cache_bitmap_v3; if (!update || !s) return NULL; cache_bitmap_v3 = calloc(1, sizeof(CACHE_BITMAP_V3_ORDER)); if (!cache_bitmap_v3) goto fail; cache_bitmap_v3->cacheId = flags & 0x00000003; cache_bitmap_v3->flags = (flags & 0x0000FF80) >> 7; bitsPerPixelId = (flags & 0x00000078) >> 3; cache_bitmap_v3->bpp = get_cbr2_bpp(bitsPerPixelId, &rc); if (!rc) goto fail; if (Stream_GetRemainingLength(s) < 21) goto fail; Stream_Read_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ bitmapData = &cache_bitmap_v3->bitmapData; Stream_Read_UINT8(s, bitmapData->bpp); if ((bitmapData->bpp < 1) || (bitmapData->bpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bpp value %" PRIu32 "", bitmapData->bpp); goto fail; } Stream_Seek_UINT8(s); /* reserved1 (1 byte) */ Stream_Seek_UINT8(s); /* reserved2 (1 byte) */ Stream_Read_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Read_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Read_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Read_UINT32(s, new_len); /* length (4 bytes) */ if ((new_len == 0) || (Stream_GetRemainingLength(s) < new_len)) goto fail; new_data = (BYTE*)realloc(bitmapData->data, new_len); if (!new_data) goto fail; bitmapData->data = new_data; bitmapData->length = new_len; Stream_Read(s, bitmapData->data, bitmapData->length); return cache_bitmap_v3; fail: free_cache_bitmap_v3_order(update->context, cache_bitmap_v3); return NULL; } int update_approximate_cache_bitmap_v3_order(CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BITMAP_DATA_EX* bitmapData = &cache_bitmap_v3->bitmapData; return 64 + bitmapData->length; } BOOL update_write_cache_bitmap_v3_order(wStream* s, CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BOOL rc; BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v3_order(cache_bitmap_v3, flags))) return FALSE; bitmapData = &cache_bitmap_v3->bitmapData; bitsPerPixelId = get_bpp_bmf(cache_bitmap_v3->bpp, &rc); if (!rc) return FALSE; *flags = (cache_bitmap_v3->cacheId & 0x00000003) | ((cache_bitmap_v3->flags << 7) & 0x0000FF80) | ((bitsPerPixelId << 3) & 0x00000078); Stream_Write_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ Stream_Write_UINT8(s, bitmapData->bpp); Stream_Write_UINT8(s, 0); /* reserved1 (1 byte) */ Stream_Write_UINT8(s, 0); /* reserved2 (1 byte) */ Stream_Write_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Write_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Write_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Write_UINT32(s, bitmapData->length); /* length (4 bytes) */ Stream_Write(s, bitmapData->data, bitmapData->length); return TRUE; } static CACHE_COLOR_TABLE_ORDER* update_read_cache_color_table_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; UINT32* colorTable; CACHE_COLOR_TABLE_ORDER* cache_color_table = calloc(1, sizeof(CACHE_COLOR_TABLE_ORDER)); if (!cache_color_table) goto fail; if (Stream_GetRemainingLength(s) < 3) goto fail; Stream_Read_UINT8(s, cache_color_table->cacheIndex); /* cacheIndex (1 byte) */ Stream_Read_UINT16(s, cache_color_table->numberColors); /* numberColors (2 bytes) */ if (cache_color_table->numberColors != 256) { /* This field MUST be set to 256 */ goto fail; } if (Stream_GetRemainingLength(s) < cache_color_table->numberColors * 4) goto fail; colorTable = (UINT32*)&cache_color_table->colorTable; for (i = 0; i < (int)cache_color_table->numberColors; i++) update_read_color_quad(s, &colorTable[i]); return cache_color_table; fail: free_cache_color_table_order(update->context, cache_color_table); return NULL; } int update_approximate_cache_color_table_order(const CACHE_COLOR_TABLE_ORDER* cache_color_table, UINT16* flags) { return 16 + (256 * 4); } BOOL update_write_cache_color_table_order(wStream* s, const CACHE_COLOR_TABLE_ORDER* cache_color_table, UINT16* flags) { int i, inf; UINT32* colorTable; if (cache_color_table->numberColors != 256) return FALSE; inf = update_approximate_cache_color_table_order(cache_color_table, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT8(s, cache_color_table->cacheIndex); /* cacheIndex (1 byte) */ Stream_Write_UINT16(s, cache_color_table->numberColors); /* numberColors (2 bytes) */ colorTable = (UINT32*)&cache_color_table->colorTable; for (i = 0; i < (int)cache_color_table->numberColors; i++) { update_write_color_quad(s, colorTable[i]); } return TRUE; } static CACHE_GLYPH_ORDER* update_read_cache_glyph_order(rdpUpdate* update, wStream* s, UINT16 flags) { UINT32 i; CACHE_GLYPH_ORDER* cache_glyph_order = calloc(1, sizeof(CACHE_GLYPH_ORDER)); if (!cache_glyph_order || !update || !s) goto fail; if (Stream_GetRemainingLength(s) < 2) goto fail; Stream_Read_UINT8(s, cache_glyph_order->cacheId); /* cacheId (1 byte) */ Stream_Read_UINT8(s, cache_glyph_order->cGlyphs); /* cGlyphs (1 byte) */ for (i = 0; i < cache_glyph_order->cGlyphs; i++) { GLYPH_DATA* glyph = &cache_glyph_order->glyphData[i]; if (Stream_GetRemainingLength(s) < 10) goto fail; Stream_Read_UINT16(s, glyph->cacheIndex); Stream_Read_INT16(s, glyph->x); Stream_Read_INT16(s, glyph->y); Stream_Read_UINT16(s, glyph->cx); Stream_Read_UINT16(s, glyph->cy); glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; if (Stream_GetRemainingLength(s) < glyph->cb) goto fail; glyph->aj = (BYTE*)malloc(glyph->cb); if (!glyph->aj) goto fail; Stream_Read(s, glyph->aj, glyph->cb); } if ((flags & CG_GLYPH_UNICODE_PRESENT) && (cache_glyph_order->cGlyphs > 0)) { cache_glyph_order->unicodeCharacters = calloc(cache_glyph_order->cGlyphs, sizeof(WCHAR)); if (!cache_glyph_order->unicodeCharacters) goto fail; if (Stream_GetRemainingLength(s) < sizeof(WCHAR) * cache_glyph_order->cGlyphs) goto fail; Stream_Read_UTF16_String(s, cache_glyph_order->unicodeCharacters, cache_glyph_order->cGlyphs); } return cache_glyph_order; fail: free_cache_glyph_order(update->context, cache_glyph_order); return NULL; } int update_approximate_cache_glyph_order(const CACHE_GLYPH_ORDER* cache_glyph, UINT16* flags) { return 2 + cache_glyph->cGlyphs * 32; } BOOL update_write_cache_glyph_order(wStream* s, const CACHE_GLYPH_ORDER* cache_glyph, UINT16* flags) { int i, inf; INT16 lsi16; const GLYPH_DATA* glyph; inf = update_approximate_cache_glyph_order(cache_glyph, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT8(s, cache_glyph->cacheId); /* cacheId (1 byte) */ Stream_Write_UINT8(s, cache_glyph->cGlyphs); /* cGlyphs (1 byte) */ for (i = 0; i < (int)cache_glyph->cGlyphs; i++) { UINT32 cb; glyph = &cache_glyph->glyphData[i]; Stream_Write_UINT16(s, glyph->cacheIndex); /* cacheIndex (2 bytes) */ lsi16 = glyph->x; Stream_Write_UINT16(s, lsi16); /* x (2 bytes) */ lsi16 = glyph->y; Stream_Write_UINT16(s, lsi16); /* y (2 bytes) */ Stream_Write_UINT16(s, glyph->cx); /* cx (2 bytes) */ Stream_Write_UINT16(s, glyph->cy); /* cy (2 bytes) */ cb = ((glyph->cx + 7) / 8) * glyph->cy; cb += ((cb % 4) > 0) ? 4 - (cb % 4) : 0; Stream_Write(s, glyph->aj, cb); } if (*flags & CG_GLYPH_UNICODE_PRESENT) { Stream_Zero(s, cache_glyph->cGlyphs * 2); } return TRUE; } static CACHE_GLYPH_V2_ORDER* update_read_cache_glyph_v2_order(rdpUpdate* update, wStream* s, UINT16 flags) { UINT32 i; CACHE_GLYPH_V2_ORDER* cache_glyph_v2 = calloc(1, sizeof(CACHE_GLYPH_V2_ORDER)); if (!cache_glyph_v2) goto fail; cache_glyph_v2->cacheId = (flags & 0x000F); cache_glyph_v2->flags = (flags & 0x00F0) >> 4; cache_glyph_v2->cGlyphs = (flags & 0xFF00) >> 8; for (i = 0; i < cache_glyph_v2->cGlyphs; i++) { GLYPH_DATA_V2* glyph = &cache_glyph_v2->glyphData[i]; if (Stream_GetRemainingLength(s) < 1) goto fail; Stream_Read_UINT8(s, glyph->cacheIndex); if (!update_read_2byte_signed(s, &glyph->x) || !update_read_2byte_signed(s, &glyph->y) || !update_read_2byte_unsigned(s, &glyph->cx) || !update_read_2byte_unsigned(s, &glyph->cy)) { goto fail; } glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; if (Stream_GetRemainingLength(s) < glyph->cb) goto fail; glyph->aj = (BYTE*)malloc(glyph->cb); if (!glyph->aj) goto fail; Stream_Read(s, glyph->aj, glyph->cb); } if ((flags & CG_GLYPH_UNICODE_PRESENT) && (cache_glyph_v2->cGlyphs > 0)) { cache_glyph_v2->unicodeCharacters = calloc(cache_glyph_v2->cGlyphs, sizeof(WCHAR)); if (!cache_glyph_v2->unicodeCharacters) goto fail; if (Stream_GetRemainingLength(s) < sizeof(WCHAR) * cache_glyph_v2->cGlyphs) goto fail; Stream_Read_UTF16_String(s, cache_glyph_v2->unicodeCharacters, cache_glyph_v2->cGlyphs); } return cache_glyph_v2; fail: free_cache_glyph_v2_order(update->context, cache_glyph_v2); return NULL; } int update_approximate_cache_glyph_v2_order(const CACHE_GLYPH_V2_ORDER* cache_glyph_v2, UINT16* flags) { return 8 + cache_glyph_v2->cGlyphs * 32; } BOOL update_write_cache_glyph_v2_order(wStream* s, const CACHE_GLYPH_V2_ORDER* cache_glyph_v2, UINT16* flags) { UINT32 i, inf; inf = update_approximate_cache_glyph_v2_order(cache_glyph_v2, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; *flags = (cache_glyph_v2->cacheId & 0x000F) | ((cache_glyph_v2->flags & 0x000F) << 4) | ((cache_glyph_v2->cGlyphs & 0x00FF) << 8); for (i = 0; i < cache_glyph_v2->cGlyphs; i++) { UINT32 cb; const GLYPH_DATA_V2* glyph = &cache_glyph_v2->glyphData[i]; Stream_Write_UINT8(s, glyph->cacheIndex); if (!update_write_2byte_signed(s, glyph->x) || !update_write_2byte_signed(s, glyph->y) || !update_write_2byte_unsigned(s, glyph->cx) || !update_write_2byte_unsigned(s, glyph->cy)) { return FALSE; } cb = ((glyph->cx + 7) / 8) * glyph->cy; cb += ((cb % 4) > 0) ? 4 - (cb % 4) : 0; Stream_Write(s, glyph->aj, cb); } if (*flags & CG_GLYPH_UNICODE_PRESENT) { Stream_Zero(s, cache_glyph_v2->cGlyphs * 2); } return TRUE; } static BOOL update_decompress_brush(wStream* s, BYTE* output, size_t outSize, BYTE bpp) { INT32 x, y, k; BYTE byte = 0; const BYTE* palette = Stream_Pointer(s) + 16; const INT32 bytesPerPixel = ((bpp + 1) / 8); if (!Stream_SafeSeek(s, 16ULL + 7ULL * bytesPerPixel)) // 64 / 4 return FALSE; for (y = 7; y >= 0; y--) { for (x = 0; x < 8; x++) { UINT32 index; if ((x % 4) == 0) Stream_Read_UINT8(s, byte); index = ((byte >> ((3 - (x % 4)) * 2)) & 0x03); for (k = 0; k < bytesPerPixel; k++) { const size_t dstIndex = ((y * 8 + x) * bytesPerPixel) + k; const size_t srcIndex = (index * bytesPerPixel) + k; if (dstIndex >= outSize) return FALSE; output[dstIndex] = palette[srcIndex]; } } } return TRUE; } static BOOL update_compress_brush(wStream* s, const BYTE* input, BYTE bpp) { return FALSE; } static CACHE_BRUSH_ORDER* update_read_cache_brush_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; BOOL rc; BYTE iBitmapFormat; BOOL compressed = FALSE; CACHE_BRUSH_ORDER* cache_brush = calloc(1, sizeof(CACHE_BRUSH_ORDER)); if (!cache_brush) goto fail; if (Stream_GetRemainingLength(s) < 6) goto fail; Stream_Read_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Read_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ cache_brush->bpp = get_bmf_bpp(iBitmapFormat, &rc); if (!rc) goto fail; Stream_Read_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Read_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Read_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Read_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_Print(update->log, WLOG_ERROR, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); goto fail; } /* rows are encoded in reverse order */ if (Stream_GetRemainingLength(s) < 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_decompress_brush(s, cache_brush->data, sizeof(cache_brush->data), cache_brush->bpp)) goto fail; } else { /* uncompressed brush */ UINT32 scanline = (cache_brush->bpp / 8) * 8; if (Stream_GetRemainingLength(s) < scanline * 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read(s, &cache_brush->data[i * scanline], scanline); } } } } return cache_brush; fail: free_cache_brush_order(update->context, cache_brush); return NULL; } int update_approximate_cache_brush_order(const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { return 64; } BOOL update_write_cache_brush_order(wStream* s, const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { int i; BYTE iBitmapFormat; BOOL rc; BOOL compressed = FALSE; if (!Stream_EnsureRemainingCapacity(s, update_approximate_cache_brush_order(cache_brush, flags))) return FALSE; iBitmapFormat = get_bpp_bmf(cache_brush->bpp, &rc); if (!rc) return FALSE; Stream_Write_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Write_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ Stream_Write_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Write_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Write_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Write_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_ERR(TAG, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); return FALSE; } for (i = 7; i >= 0; i--) { Stream_Write_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_compress_brush(s, cache_brush->data, cache_brush->bpp)) return FALSE; } else { /* uncompressed brush */ int scanline = (cache_brush->bpp / 8) * 8; for (i = 7; i >= 0; i--) { Stream_Write(s, &cache_brush->data[i * scanline], scanline); } } } } return TRUE; } /* Alternate Secondary Drawing Orders */ static BOOL update_read_create_offscreen_bitmap_order(wStream* s, CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { UINT16 flags; BOOL deleteListPresent; OFFSCREEN_DELETE_LIST* deleteList; if (Stream_GetRemainingLength(s) < 6) return FALSE; Stream_Read_UINT16(s, flags); /* flags (2 bytes) */ create_offscreen_bitmap->id = flags & 0x7FFF; deleteListPresent = (flags & 0x8000) ? TRUE : FALSE; Stream_Read_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */ Stream_Read_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */ deleteList = &(create_offscreen_bitmap->deleteList); if (deleteListPresent) { UINT32 i; if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, deleteList->cIndices); if (deleteList->cIndices > deleteList->sIndices) { UINT16* new_indices; new_indices = (UINT16*)realloc(deleteList->indices, deleteList->cIndices * 2); if (!new_indices) return FALSE; deleteList->sIndices = deleteList->cIndices; deleteList->indices = new_indices; } if (Stream_GetRemainingLength(s) < 2 * deleteList->cIndices) return FALSE; for (i = 0; i < deleteList->cIndices; i++) { Stream_Read_UINT16(s, deleteList->indices[i]); } } else { deleteList->cIndices = 0; } return TRUE; } int update_approximate_create_offscreen_bitmap_order( const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { const OFFSCREEN_DELETE_LIST* deleteList = &(create_offscreen_bitmap->deleteList); return 32 + deleteList->cIndices * 2; } BOOL update_write_create_offscreen_bitmap_order( wStream* s, const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { UINT16 flags; BOOL deleteListPresent; const OFFSCREEN_DELETE_LIST* deleteList; if (!Stream_EnsureRemainingCapacity( s, update_approximate_create_offscreen_bitmap_order(create_offscreen_bitmap))) return FALSE; deleteList = &(create_offscreen_bitmap->deleteList); flags = create_offscreen_bitmap->id & 0x7FFF; deleteListPresent = (deleteList->cIndices > 0) ? TRUE : FALSE; if (deleteListPresent) flags |= 0x8000; Stream_Write_UINT16(s, flags); /* flags (2 bytes) */ Stream_Write_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */ Stream_Write_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */ if (deleteListPresent) { int i; Stream_Write_UINT16(s, deleteList->cIndices); for (i = 0; i < (int)deleteList->cIndices; i++) { Stream_Write_UINT16(s, deleteList->indices[i]); } } return TRUE; } static BOOL update_read_switch_surface_order(wStream* s, SWITCH_SURFACE_ORDER* switch_surface) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, switch_surface->bitmapId); /* bitmapId (2 bytes) */ return TRUE; } int update_approximate_switch_surface_order(const SWITCH_SURFACE_ORDER* switch_surface) { return 2; } BOOL update_write_switch_surface_order(wStream* s, const SWITCH_SURFACE_ORDER* switch_surface) { int inf = update_approximate_switch_surface_order(switch_surface); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT16(s, switch_surface->bitmapId); /* bitmapId (2 bytes) */ return TRUE; } static BOOL update_read_create_nine_grid_bitmap_order(wStream* s, CREATE_NINE_GRID_BITMAP_ORDER* create_nine_grid_bitmap) { NINE_GRID_BITMAP_INFO* nineGridInfo; if (Stream_GetRemainingLength(s) < 19) return FALSE; Stream_Read_UINT8(s, create_nine_grid_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ if ((create_nine_grid_bitmap->bitmapBpp < 1) || (create_nine_grid_bitmap->bitmapBpp > 32)) { WLog_ERR(TAG, "invalid bpp value %" PRIu32 "", create_nine_grid_bitmap->bitmapBpp); return FALSE; } Stream_Read_UINT16(s, create_nine_grid_bitmap->bitmapId); /* bitmapId (2 bytes) */ nineGridInfo = &(create_nine_grid_bitmap->nineGridInfo); Stream_Read_UINT32(s, nineGridInfo->flFlags); /* flFlags (4 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulLeftWidth); /* ulLeftWidth (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulRightWidth); /* ulRightWidth (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulTopHeight); /* ulTopHeight (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulBottomHeight); /* ulBottomHeight (2 bytes) */ update_read_colorref(s, &nineGridInfo->crTransparent); /* crTransparent (4 bytes) */ return TRUE; } static BOOL update_read_frame_marker_order(wStream* s, FRAME_MARKER_ORDER* frame_marker) { if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT32(s, frame_marker->action); /* action (4 bytes) */ return TRUE; } static BOOL update_read_stream_bitmap_first_order(wStream* s, STREAM_BITMAP_FIRST_ORDER* stream_bitmap_first) { if (Stream_GetRemainingLength(s) < 10) // 8 + 2 at least return FALSE; Stream_Read_UINT8(s, stream_bitmap_first->bitmapFlags); /* bitmapFlags (1 byte) */ Stream_Read_UINT8(s, stream_bitmap_first->bitmapBpp); /* bitmapBpp (1 byte) */ if ((stream_bitmap_first->bitmapBpp < 1) || (stream_bitmap_first->bitmapBpp > 32)) { WLog_ERR(TAG, "invalid bpp value %" PRIu32 "", stream_bitmap_first->bitmapBpp); return FALSE; } Stream_Read_UINT16(s, stream_bitmap_first->bitmapType); /* bitmapType (2 bytes) */ Stream_Read_UINT16(s, stream_bitmap_first->bitmapWidth); /* bitmapWidth (2 bytes) */ Stream_Read_UINT16(s, stream_bitmap_first->bitmapHeight); /* bitmapHeigth (2 bytes) */ if (stream_bitmap_first->bitmapFlags & STREAM_BITMAP_V2) { if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT32(s, stream_bitmap_first->bitmapSize); /* bitmapSize (4 bytes) */ } else { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, stream_bitmap_first->bitmapSize); /* bitmapSize (2 bytes) */ } FIELD_SKIP_BUFFER16( s, stream_bitmap_first->bitmapBlockSize); /* bitmapBlockSize(2 bytes) + bitmapBlock */ return TRUE; } static BOOL update_read_stream_bitmap_next_order(wStream* s, STREAM_BITMAP_NEXT_ORDER* stream_bitmap_next) { if (Stream_GetRemainingLength(s) < 5) return FALSE; Stream_Read_UINT8(s, stream_bitmap_next->bitmapFlags); /* bitmapFlags (1 byte) */ Stream_Read_UINT16(s, stream_bitmap_next->bitmapType); /* bitmapType (2 bytes) */ FIELD_SKIP_BUFFER16( s, stream_bitmap_next->bitmapBlockSize); /* bitmapBlockSize(2 bytes) + bitmapBlock */ return TRUE; } static BOOL update_read_draw_gdiplus_first_order(wStream* s, DRAW_GDIPLUS_FIRST_ORDER* draw_gdiplus_first) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_first->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_first->cbTotalSize); /* cbTotalSize (4 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_first->cbTotalEmfSize); /* cbTotalEmfSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_first->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_next_order(wStream* s, DRAW_GDIPLUS_NEXT_ORDER* draw_gdiplus_next) { if (Stream_GetRemainingLength(s) < 3) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ FIELD_SKIP_BUFFER16(s, draw_gdiplus_next->cbSize); /* cbSize(2 bytes) + emfRecords */ return TRUE; } static BOOL update_read_draw_gdiplus_end_order(wStream* s, DRAW_GDIPLUS_END_ORDER* draw_gdiplus_end) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_end->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_end->cbTotalSize); /* cbTotalSize (4 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_end->cbTotalEmfSize); /* cbTotalEmfSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_end->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_cache_first_order(wStream* s, DRAW_GDIPLUS_CACHE_FIRST_ORDER* draw_gdiplus_cache_first) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_first->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_cache_first->cbTotalSize); /* cbTotalSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_cache_first->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_cache_next_order(wStream* s, DRAW_GDIPLUS_CACHE_NEXT_ORDER* draw_gdiplus_cache_next) { if (Stream_GetRemainingLength(s) < 7) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_next->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_next->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_next->cacheIndex); /* cacheIndex (2 bytes) */ FIELD_SKIP_BUFFER16(s, draw_gdiplus_cache_next->cbSize); /* cbSize(2 bytes) + emfRecords */ return TRUE; } static BOOL update_read_draw_gdiplus_cache_end_order(wStream* s, DRAW_GDIPLUS_CACHE_END_ORDER* draw_gdiplus_cache_end) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_end->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_cache_end->cbTotalSize); /* cbTotalSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_cache_end->cbSize); /* emfRecords */ } static BOOL update_read_field_flags(wStream* s, UINT32* fieldFlags, BYTE flags, BYTE fieldBytes) { int i; BYTE byte; if (flags & ORDER_ZERO_FIELD_BYTE_BIT0) fieldBytes--; if (flags & ORDER_ZERO_FIELD_BYTE_BIT1) { if (fieldBytes > 1) fieldBytes -= 2; else fieldBytes = 0; } if (Stream_GetRemainingLength(s) < fieldBytes) return FALSE; *fieldFlags = 0; for (i = 0; i < fieldBytes; i++) { Stream_Read_UINT8(s, byte); *fieldFlags |= byte << (i * 8); } return TRUE; } BOOL update_write_field_flags(wStream* s, UINT32 fieldFlags, BYTE flags, BYTE fieldBytes) { BYTE byte; if (fieldBytes == 1) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); } else if (fieldBytes == 2) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 8) & 0xFF; Stream_Write_UINT8(s, byte); } else if (fieldBytes == 3) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 16) & 0xFF; Stream_Write_UINT8(s, byte); } else { return FALSE; } return TRUE; } static BOOL update_read_bounds(wStream* s, rdpBounds* bounds) { BYTE flags; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, flags); /* field flags */ if (flags & BOUND_LEFT) { if (!update_read_coord(s, &bounds->left, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_LEFT) { if (!update_read_coord(s, &bounds->left, TRUE)) return FALSE; } if (flags & BOUND_TOP) { if (!update_read_coord(s, &bounds->top, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_TOP) { if (!update_read_coord(s, &bounds->top, TRUE)) return FALSE; } if (flags & BOUND_RIGHT) { if (!update_read_coord(s, &bounds->right, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_RIGHT) { if (!update_read_coord(s, &bounds->right, TRUE)) return FALSE; } if (flags & BOUND_BOTTOM) { if (!update_read_coord(s, &bounds->bottom, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_BOTTOM) { if (!update_read_coord(s, &bounds->bottom, TRUE)) return FALSE; } return TRUE; } BOOL update_write_bounds(wStream* s, ORDER_INFO* orderInfo) { if (!(orderInfo->controlFlags & ORDER_BOUNDS)) return TRUE; if (orderInfo->controlFlags & ORDER_ZERO_BOUNDS_DELTAS) return TRUE; Stream_Write_UINT8(s, orderInfo->boundsFlags); /* field flags */ if (orderInfo->boundsFlags & BOUND_LEFT) { if (!update_write_coord(s, orderInfo->bounds.left)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_LEFT) { } if (orderInfo->boundsFlags & BOUND_TOP) { if (!update_write_coord(s, orderInfo->bounds.top)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_TOP) { } if (orderInfo->boundsFlags & BOUND_RIGHT) { if (!update_write_coord(s, orderInfo->bounds.right)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_RIGHT) { } if (orderInfo->boundsFlags & BOUND_BOTTOM) { if (!update_write_coord(s, orderInfo->bounds.bottom)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_BOTTOM) { } return TRUE; } static BOOL read_primary_order(wLog* log, const char* orderName, wStream* s, const ORDER_INFO* orderInfo, rdpPrimaryUpdate* primary) { BOOL rc = FALSE; if (!s || !orderInfo || !primary || !orderName) return FALSE; switch (orderInfo->orderType) { case ORDER_TYPE_DSTBLT: rc = update_read_dstblt_order(s, orderInfo, &(primary->dstblt)); break; case ORDER_TYPE_PATBLT: rc = update_read_patblt_order(s, orderInfo, &(primary->patblt)); break; case ORDER_TYPE_SCRBLT: rc = update_read_scrblt_order(s, orderInfo, &(primary->scrblt)); break; case ORDER_TYPE_OPAQUE_RECT: rc = update_read_opaque_rect_order(s, orderInfo, &(primary->opaque_rect)); break; case ORDER_TYPE_DRAW_NINE_GRID: rc = update_read_draw_nine_grid_order(s, orderInfo, &(primary->draw_nine_grid)); break; case ORDER_TYPE_MULTI_DSTBLT: rc = update_read_multi_dstblt_order(s, orderInfo, &(primary->multi_dstblt)); break; case ORDER_TYPE_MULTI_PATBLT: rc = update_read_multi_patblt_order(s, orderInfo, &(primary->multi_patblt)); break; case ORDER_TYPE_MULTI_SCRBLT: rc = update_read_multi_scrblt_order(s, orderInfo, &(primary->multi_scrblt)); break; case ORDER_TYPE_MULTI_OPAQUE_RECT: rc = update_read_multi_opaque_rect_order(s, orderInfo, &(primary->multi_opaque_rect)); break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: rc = update_read_multi_draw_nine_grid_order(s, orderInfo, &(primary->multi_draw_nine_grid)); break; case ORDER_TYPE_LINE_TO: rc = update_read_line_to_order(s, orderInfo, &(primary->line_to)); break; case ORDER_TYPE_POLYLINE: rc = update_read_polyline_order(s, orderInfo, &(primary->polyline)); break; case ORDER_TYPE_MEMBLT: rc = update_read_memblt_order(s, orderInfo, &(primary->memblt)); break; case ORDER_TYPE_MEM3BLT: rc = update_read_mem3blt_order(s, orderInfo, &(primary->mem3blt)); break; case ORDER_TYPE_SAVE_BITMAP: rc = update_read_save_bitmap_order(s, orderInfo, &(primary->save_bitmap)); break; case ORDER_TYPE_GLYPH_INDEX: rc = update_read_glyph_index_order(s, orderInfo, &(primary->glyph_index)); break; case ORDER_TYPE_FAST_INDEX: rc = update_read_fast_index_order(s, orderInfo, &(primary->fast_index)); break; case ORDER_TYPE_FAST_GLYPH: rc = update_read_fast_glyph_order(s, orderInfo, &(primary->fast_glyph)); break; case ORDER_TYPE_POLYGON_SC: rc = update_read_polygon_sc_order(s, orderInfo, &(primary->polygon_sc)); break; case ORDER_TYPE_POLYGON_CB: rc = update_read_polygon_cb_order(s, orderInfo, &(primary->polygon_cb)); break; case ORDER_TYPE_ELLIPSE_SC: rc = update_read_ellipse_sc_order(s, orderInfo, &(primary->ellipse_sc)); break; case ORDER_TYPE_ELLIPSE_CB: rc = update_read_ellipse_cb_order(s, orderInfo, &(primary->ellipse_cb)); break; default: WLog_Print(log, WLOG_WARN, "Primary Drawing Order %s not supported, ignoring", orderName); rc = TRUE; break; } if (!rc) { WLog_Print(log, WLOG_ERROR, "%s - update_read_dstblt_order() failed", orderName); return FALSE; } return TRUE; } static BOOL update_recv_primary_order(rdpUpdate* update, wStream* s, BYTE flags) { BYTE field; BOOL rc = FALSE; rdpContext* context = update->context; rdpPrimaryUpdate* primary = update->primary; ORDER_INFO* orderInfo = &(primary->order_info); rdpSettings* settings = context->settings; const char* orderName; if (flags & ORDER_TYPE_CHANGE) { if (Stream_GetRemainingLength(s) < 1) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, orderInfo->orderType); /* orderType (1 byte) */ } orderName = primary_order_string(orderInfo->orderType); if (!check_primary_order_supported(update->log, settings, orderInfo->orderType, orderName)) return FALSE; field = get_primary_drawing_order_field_bytes(orderInfo->orderType, &rc); if (!rc) return FALSE; if (!update_read_field_flags(s, &(orderInfo->fieldFlags), flags, field)) { WLog_Print(update->log, WLOG_ERROR, "update_read_field_flags() failed"); return FALSE; } if (flags & ORDER_BOUNDS) { if (!(flags & ORDER_ZERO_BOUNDS_DELTAS)) { if (!update_read_bounds(s, &orderInfo->bounds)) { WLog_Print(update->log, WLOG_ERROR, "update_read_bounds() failed"); return FALSE; } } rc = IFCALLRESULT(FALSE, update->SetBounds, context, &orderInfo->bounds); if (!rc) return FALSE; } orderInfo->deltaCoordinates = (flags & ORDER_DELTA_COORDINATES) ? TRUE : FALSE; if (!read_primary_order(update->log, orderName, s, orderInfo, primary)) return FALSE; switch (orderInfo->orderType) { case ORDER_TYPE_DSTBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->dstblt.bRop), gdi_rop3_code(primary->dstblt.bRop)); rc = IFCALLRESULT(FALSE, primary->DstBlt, context, &primary->dstblt); } break; case ORDER_TYPE_PATBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->patblt.bRop), gdi_rop3_code(primary->patblt.bRop)); rc = IFCALLRESULT(FALSE, primary->PatBlt, context, &primary->patblt); } break; case ORDER_TYPE_SCRBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->scrblt.bRop), gdi_rop3_code(primary->scrblt.bRop)); rc = IFCALLRESULT(FALSE, primary->ScrBlt, context, &primary->scrblt); } break; case ORDER_TYPE_OPAQUE_RECT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->OpaqueRect, context, &primary->opaque_rect); } break; case ORDER_TYPE_DRAW_NINE_GRID: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->DrawNineGrid, context, &primary->draw_nine_grid); } break; case ORDER_TYPE_MULTI_DSTBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_dstblt.bRop), gdi_rop3_code(primary->multi_dstblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiDstBlt, context, &primary->multi_dstblt); } break; case ORDER_TYPE_MULTI_PATBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_patblt.bRop), gdi_rop3_code(primary->multi_patblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiPatBlt, context, &primary->multi_patblt); } break; case ORDER_TYPE_MULTI_SCRBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_scrblt.bRop), gdi_rop3_code(primary->multi_scrblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiScrBlt, context, &primary->multi_scrblt); } break; case ORDER_TYPE_MULTI_OPAQUE_RECT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->MultiOpaqueRect, context, &primary->multi_opaque_rect); } break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->MultiDrawNineGrid, context, &primary->multi_draw_nine_grid); } break; case ORDER_TYPE_LINE_TO: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->LineTo, context, &primary->line_to); } break; case ORDER_TYPE_POLYLINE: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->Polyline, context, &primary->polyline); } break; case ORDER_TYPE_MEMBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->memblt.bRop), gdi_rop3_code(primary->memblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MemBlt, context, &primary->memblt); } break; case ORDER_TYPE_MEM3BLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->mem3blt.bRop), gdi_rop3_code(primary->mem3blt.bRop)); rc = IFCALLRESULT(FALSE, primary->Mem3Blt, context, &primary->mem3blt); } break; case ORDER_TYPE_SAVE_BITMAP: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->SaveBitmap, context, &primary->save_bitmap); } break; case ORDER_TYPE_GLYPH_INDEX: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->GlyphIndex, context, &primary->glyph_index); } break; case ORDER_TYPE_FAST_INDEX: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->FastIndex, context, &primary->fast_index); } break; case ORDER_TYPE_FAST_GLYPH: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->FastGlyph, context, &primary->fast_glyph); } break; case ORDER_TYPE_POLYGON_SC: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->PolygonSC, context, &primary->polygon_sc); } break; case ORDER_TYPE_POLYGON_CB: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->PolygonCB, context, &primary->polygon_cb); } break; case ORDER_TYPE_ELLIPSE_SC: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->EllipseSC, context, &primary->ellipse_sc); } break; case ORDER_TYPE_ELLIPSE_CB: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->EllipseCB, context, &primary->ellipse_cb); } break; default: WLog_Print(update->log, WLOG_WARN, "Primary Drawing Order %s not supported", orderName); break; } if (!rc) { WLog_Print(update->log, WLOG_WARN, "Primary Drawing Order %s failed", orderName); return FALSE; } if (flags & ORDER_BOUNDS) { rc = IFCALLRESULT(FALSE, update->SetBounds, context, NULL); } return rc; } static BOOL update_recv_secondary_order(rdpUpdate* update, wStream* s, BYTE flags) { BOOL rc = FALSE; size_t start, end, diff; BYTE orderType; UINT16 extraFlags; UINT16 orderLength; rdpContext* context = update->context; rdpSettings* settings = context->settings; rdpSecondaryUpdate* secondary = update->secondary; const char* name; if (Stream_GetRemainingLength(s) < 5) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 5"); return FALSE; } Stream_Read_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Read_UINT16(s, extraFlags); /* extraFlags (2 bytes) */ Stream_Read_UINT8(s, orderType); /* orderType (1 byte) */ if (Stream_GetRemainingLength(s) < orderLength + 7U) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) %" PRIuz " < %" PRIu16, Stream_GetRemainingLength(s), orderLength + 7); return FALSE; } start = Stream_GetPosition(s); name = secondary_order_string(orderType); WLog_Print(update->log, WLOG_DEBUG, "Secondary Drawing Order %s", name); if (!check_secondary_order_supported(update->log, settings, orderType, name)) return FALSE; switch (orderType) { case ORDER_TYPE_BITMAP_UNCOMPRESSED: case ORDER_TYPE_CACHE_BITMAP_COMPRESSED: { const BOOL compressed = (orderType == ORDER_TYPE_CACHE_BITMAP_COMPRESSED); CACHE_BITMAP_ORDER* order = update_read_cache_bitmap_order(update, s, compressed, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmap, context, order); free_cache_bitmap_order(context, order); } } break; case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2: case ORDER_TYPE_BITMAP_COMPRESSED_V2: { const BOOL compressed = (orderType == ORDER_TYPE_BITMAP_COMPRESSED_V2); CACHE_BITMAP_V2_ORDER* order = update_read_cache_bitmap_v2_order(update, s, compressed, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV2, context, order); free_cache_bitmap_v2_order(context, order); } } break; case ORDER_TYPE_BITMAP_COMPRESSED_V3: { CACHE_BITMAP_V3_ORDER* order = update_read_cache_bitmap_v3_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV3, context, order); free_cache_bitmap_v3_order(context, order); } } break; case ORDER_TYPE_CACHE_COLOR_TABLE: { CACHE_COLOR_TABLE_ORDER* order = update_read_cache_color_table_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheColorTable, context, order); free_cache_color_table_order(context, order); } } break; case ORDER_TYPE_CACHE_GLYPH: { switch (settings->GlyphSupportLevel) { case GLYPH_SUPPORT_PARTIAL: case GLYPH_SUPPORT_FULL: { CACHE_GLYPH_ORDER* order = update_read_cache_glyph_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheGlyph, context, order); free_cache_glyph_order(context, order); } } break; case GLYPH_SUPPORT_ENCODE: { CACHE_GLYPH_V2_ORDER* order = update_read_cache_glyph_v2_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheGlyphV2, context, order); free_cache_glyph_v2_order(context, order); } } break; case GLYPH_SUPPORT_NONE: default: break; } } break; case ORDER_TYPE_CACHE_BRUSH: /* [MS-RDPEGDI] 2.2.2.2.1.2.7 Cache Brush (CACHE_BRUSH_ORDER) */ { CACHE_BRUSH_ORDER* order = update_read_cache_brush_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBrush, context, order); free_cache_brush_order(context, order); } } break; default: WLog_Print(update->log, WLOG_WARN, "SECONDARY ORDER %s not supported", name); break; } if (!rc) { WLog_Print(update->log, WLOG_ERROR, "SECONDARY ORDER %s failed", name); } start += orderLength + 7; end = Stream_GetPosition(s); if (start > end) { WLog_Print(update->log, WLOG_WARN, "SECONDARY_ORDER %s: read %" PRIuz "bytes too much", name, end - start); return FALSE; } diff = start - end; if (diff > 0) { WLog_Print(update->log, WLOG_DEBUG, "SECONDARY_ORDER %s: read %" PRIuz "bytes short, skipping", name, diff); Stream_Seek(s, diff); } return rc; } static BOOL read_altsec_order(wStream* s, BYTE orderType, rdpAltSecUpdate* altsec) { BOOL rc = FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: rc = update_read_create_offscreen_bitmap_order(s, &(altsec->create_offscreen_bitmap)); break; case ORDER_TYPE_SWITCH_SURFACE: rc = update_read_switch_surface_order(s, &(altsec->switch_surface)); break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: rc = update_read_create_nine_grid_bitmap_order(s, &(altsec->create_nine_grid_bitmap)); break; case ORDER_TYPE_FRAME_MARKER: rc = update_read_frame_marker_order(s, &(altsec->frame_marker)); break; case ORDER_TYPE_STREAM_BITMAP_FIRST: rc = update_read_stream_bitmap_first_order(s, &(altsec->stream_bitmap_first)); break; case ORDER_TYPE_STREAM_BITMAP_NEXT: rc = update_read_stream_bitmap_next_order(s, &(altsec->stream_bitmap_next)); break; case ORDER_TYPE_GDIPLUS_FIRST: rc = update_read_draw_gdiplus_first_order(s, &(altsec->draw_gdiplus_first)); break; case ORDER_TYPE_GDIPLUS_NEXT: rc = update_read_draw_gdiplus_next_order(s, &(altsec->draw_gdiplus_next)); break; case ORDER_TYPE_GDIPLUS_END: rc = update_read_draw_gdiplus_end_order(s, &(altsec->draw_gdiplus_end)); break; case ORDER_TYPE_GDIPLUS_CACHE_FIRST: rc = update_read_draw_gdiplus_cache_first_order(s, &(altsec->draw_gdiplus_cache_first)); break; case ORDER_TYPE_GDIPLUS_CACHE_NEXT: rc = update_read_draw_gdiplus_cache_next_order(s, &(altsec->draw_gdiplus_cache_next)); break; case ORDER_TYPE_GDIPLUS_CACHE_END: rc = update_read_draw_gdiplus_cache_end_order(s, &(altsec->draw_gdiplus_cache_end)); break; case ORDER_TYPE_WINDOW: /* This order is handled elsewhere. */ rc = TRUE; break; case ORDER_TYPE_COMPDESK_FIRST: rc = TRUE; break; default: break; } return rc; } static BOOL update_recv_altsec_order(rdpUpdate* update, wStream* s, BYTE flags) { BYTE orderType = flags >>= 2; /* orderType is in higher 6 bits of flags field */ BOOL rc = FALSE; rdpContext* context = update->context; rdpSettings* settings = context->settings; rdpAltSecUpdate* altsec = update->altsec; const char* orderName = altsec_order_string(orderType); WLog_Print(update->log, WLOG_DEBUG, "Alternate Secondary Drawing Order %s", orderName); if (!check_alt_order_supported(update->log, settings, orderType, orderName)) return FALSE; if (!read_altsec_order(s, orderType, altsec)) return FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: IFCALLRET(altsec->CreateOffscreenBitmap, rc, context, &(altsec->create_offscreen_bitmap)); break; case ORDER_TYPE_SWITCH_SURFACE: IFCALLRET(altsec->SwitchSurface, rc, context, &(altsec->switch_surface)); break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: IFCALLRET(altsec->CreateNineGridBitmap, rc, context, &(altsec->create_nine_grid_bitmap)); break; case ORDER_TYPE_FRAME_MARKER: IFCALLRET(altsec->FrameMarker, rc, context, &(altsec->frame_marker)); break; case ORDER_TYPE_STREAM_BITMAP_FIRST: IFCALLRET(altsec->StreamBitmapFirst, rc, context, &(altsec->stream_bitmap_first)); break; case ORDER_TYPE_STREAM_BITMAP_NEXT: IFCALLRET(altsec->StreamBitmapNext, rc, context, &(altsec->stream_bitmap_next)); break; case ORDER_TYPE_GDIPLUS_FIRST: IFCALLRET(altsec->DrawGdiPlusFirst, rc, context, &(altsec->draw_gdiplus_first)); break; case ORDER_TYPE_GDIPLUS_NEXT: IFCALLRET(altsec->DrawGdiPlusNext, rc, context, &(altsec->draw_gdiplus_next)); break; case ORDER_TYPE_GDIPLUS_END: IFCALLRET(altsec->DrawGdiPlusEnd, rc, context, &(altsec->draw_gdiplus_end)); break; case ORDER_TYPE_GDIPLUS_CACHE_FIRST: IFCALLRET(altsec->DrawGdiPlusCacheFirst, rc, context, &(altsec->draw_gdiplus_cache_first)); break; case ORDER_TYPE_GDIPLUS_CACHE_NEXT: IFCALLRET(altsec->DrawGdiPlusCacheNext, rc, context, &(altsec->draw_gdiplus_cache_next)); break; case ORDER_TYPE_GDIPLUS_CACHE_END: IFCALLRET(altsec->DrawGdiPlusCacheEnd, rc, context, &(altsec->draw_gdiplus_cache_end)); break; case ORDER_TYPE_WINDOW: rc = update_recv_altsec_window_order(update, s); break; case ORDER_TYPE_COMPDESK_FIRST: rc = TRUE; break; default: break; } if (!rc) { WLog_Print(update->log, WLOG_WARN, "Alternate Secondary Drawing Order %s failed", orderName); } return rc; } BOOL update_recv_order(rdpUpdate* update, wStream* s) { BOOL rc; BYTE controlFlags; if (Stream_GetRemainingLength(s) < 1) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, controlFlags); /* controlFlags (1 byte) */ if (!(controlFlags & ORDER_STANDARD)) rc = update_recv_altsec_order(update, s, controlFlags); else if (controlFlags & ORDER_SECONDARY) rc = update_recv_secondary_order(update, s, controlFlags); else rc = update_recv_primary_order(update, s, controlFlags); if (!rc) WLog_Print(update->log, WLOG_ERROR, "order flags %02" PRIx8 " failed", controlFlags); return rc; }
static INLINE BOOL update_read_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->style); } if (fieldFlags & ORDER_FIELD_04) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->hatch); } if (brush->style & CACHED_BRUSH) { brush->index = brush->hatch; brush->bpp = BMF_BPP[brush->style & 0x07]; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 7) return FALSE; brush->data = (BYTE*)brush->p8x8; Stream_Read_UINT8(s, brush->data[7]); Stream_Read_UINT8(s, brush->data[6]); Stream_Read_UINT8(s, brush->data[5]); Stream_Read_UINT8(s, brush->data[4]); Stream_Read_UINT8(s, brush->data[3]); Stream_Read_UINT8(s, brush->data[2]); Stream_Read_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; }
static INLINE BOOL update_read_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->style); } if (fieldFlags & ORDER_FIELD_04) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->hatch); } if (brush->style & CACHED_BRUSH) { BOOL rc; brush->index = brush->hatch; brush->bpp = get_bmf_bpp(brush->style, &rc); if (!rc) return FALSE; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 7) return FALSE; brush->data = (BYTE*)brush->p8x8; Stream_Read_UINT8(s, brush->data[7]); Stream_Read_UINT8(s, brush->data[6]); Stream_Read_UINT8(s, brush->data[5]); Stream_Read_UINT8(s, brush->data[4]); Stream_Read_UINT8(s, brush->data[3]); Stream_Read_UINT8(s, brush->data[2]); Stream_Read_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; }
{'added': [(116, 'static BYTE get_cbr2_bpp(UINT32 bpp, BOOL* pValid)'), (117, '{'), (118, '\tif (pValid)'), (119, '\t\t*pValid = TRUE;'), (120, '\tswitch (bpp)'), (121, '\t{'), (122, '\t\tcase 3:'), (123, '\t\t\treturn 8;'), (124, '\t\tcase 4:'), (125, '\t\t\treturn 16;'), (126, '\t\tcase 5:'), (127, '\t\t\treturn 24;'), (128, '\t\tcase 6:'), (129, '\t\t\treturn 32;'), (130, '\t\tdefault:'), (131, '\t\t\tWLog_WARN(TAG, "Invalid bpp %" PRIu32, bpp);'), (132, '\t\t\tif (pValid)'), (133, '\t\t\t\t*pValid = FALSE;'), (134, '\t\t\treturn 0;'), (135, '\t}'), (136, '}'), (138, 'static BYTE get_bmf_bpp(UINT32 bmf, BOOL* pValid)'), (139, '{'), (140, '\tif (pValid)'), (141, '\t\t*pValid = TRUE;'), (142, '\tswitch (bmf)'), (143, '\t{'), (144, '\t\tcase 1:'), (145, '\t\t\treturn 1;'), (146, '\t\tcase 3:'), (147, '\t\t\treturn 8;'), (148, '\t\tcase 4:'), (149, '\t\t\treturn 16;'), (150, '\t\tcase 5:'), (151, '\t\t\treturn 24;'), (152, '\t\tcase 6:'), (153, '\t\t\treturn 32;'), (154, '\t\tdefault:'), (155, '\t\t\tWLog_WARN(TAG, "Invalid bmf %" PRIu32, bmf);'), (156, '\t\t\tif (pValid)'), (157, '\t\t\t\t*pValid = FALSE;'), (158, '\t\t\treturn 0;'), (159, '\t}'), (160, '}'), (161, 'static BYTE get_bpp_bmf(UINT32 bpp, BOOL* pValid)'), (162, '{'), (163, '\tif (pValid)'), (164, '\t\t*pValid = TRUE;'), (165, '\tswitch (bpp)'), (166, '\t{'), (167, '\t\tcase 1:'), (168, '\t\t\treturn 1;'), (169, '\t\tcase 8:'), (170, '\t\t\treturn 3;'), (171, '\t\tcase 16:'), (172, '\t\t\treturn 4;'), (173, '\t\tcase 24:'), (174, '\t\t\treturn 5;'), (175, '\t\tcase 32:'), (176, '\t\t\treturn 6;'), (177, '\t\tdefault:'), (178, '\t\t\tWLog_WARN(TAG, "Invalid color depth %" PRIu32, bpp);'), (179, '\t\t\tif (pValid)'), (180, '\t\t\t\t*pValid = FALSE;'), (181, '\t\t\treturn 0;'), (182, '\t}'), (183, '}'), (871, '\t\tBOOL rc;'), (873, '\t\tbrush->bpp = get_bmf_bpp(brush->style, &rc);'), (874, '\t\tif (!rc)'), (875, '\t\t\treturn FALSE;'), (917, '\t\tBOOL rc;'), (919, '\t\tbrush->bpp = get_bmf_bpp(brush->style, &rc);'), (920, '\t\tif (!rc)'), (921, '\t\t\treturn FALSE;'), (2077, '\tBOOL rc;'), (2092, '\tcache_bitmap_v2->bitmapBpp = get_cbr2_bpp(bitsPerPixelId, &rc);'), (2093, '\tif (!rc)'), (2094, '\t\tgoto fail;'), (2173, '\tBOOL rc;'), (2180, '\tbitsPerPixelId = get_bpp_bmf(cache_bitmap_v2->bitmapBpp, &rc);'), (2181, '\tif (!rc)'), (2182, '\t\treturn FALSE;'), (2244, '\tBOOL rc;'), (2262, '\tcache_bitmap_v3->bpp = get_cbr2_bpp(bitsPerPixelId, &rc);'), (2263, '\tif (!rc)'), (2264, '\t\tgoto fail;'), (2312, '\tBOOL rc;'), (2321, '\tbitsPerPixelId = get_bpp_bmf(cache_bitmap_v3->bpp, &rc);'), (2322, '\tif (!rc)'), (2323, '\t\treturn FALSE;'), (2647, '\tBOOL rc;'), (2661, '\tcache_brush->bpp = get_bmf_bpp(iBitmapFormat, &rc);'), (2662, '\tif (!rc)'), (2735, '\tBOOL rc;'), (2742, '\tiBitmapFormat = get_bpp_bmf(cache_brush->bpp, &rc);'), (2743, '\tif (!rc)'), (2744, '\t\treturn FALSE;')], 'deleted': [(116, 'static const BYTE CBR2_BPP[] = { 0, 0, 0, 8, 16, 24, 32 };'), (117, ''), (118, 'static const BYTE BPP_CBR2[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0,'), (119, '\t 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 };'), (120, ''), (121, 'static const BYTE CBR23_BPP[] = { 0, 0, 0, 8, 16, 24, 32 };'), (122, ''), (123, 'static const BYTE BPP_CBR23[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0,'), (124, '\t 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 };'), (125, ''), (126, 'static const BYTE BMF_BPP[] = { 0, 1, 0, 8, 16, 24, 32, 0 };'), (128, 'static const BYTE BPP_BMF[] = { 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0,'), (129, '\t 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 };'), (818, '\t\tbrush->bpp = BMF_BPP[brush->style & 0x07];'), (819, ''), (862, '\t\tbrush->bpp = BMF_BPP[brush->style & 0x07];'), (863, ''), (2033, '\tcache_bitmap_v2->bitmapBpp = CBR2_BPP[bitsPerPixelId];'), (2118, '\tbitsPerPixelId = BPP_CBR2[cache_bitmap_v2->bitmapBpp];'), (2197, '\tcache_bitmap_v3->bpp = CBR23_BPP[bitsPerPixelId];'), (2253, '\tbitsPerPixelId = BPP_CBR23[cache_bitmap_v3->bpp];'), (2590, '\tif (iBitmapFormat >= ARRAYSIZE(BMF_BPP))'), (2593, '\tcache_brush->bpp = BMF_BPP[iBitmapFormat];'), (2670, '\tiBitmapFormat = BPP_BMF[cache_brush->bpp];')]}
98
24
3,271
19,873
https://github.com/FreeRDP/FreeRDP
CVE-2020-11096
['CWE-125']
orders.c
update_read_cache_bitmap_v2_order
/** * FreeRDP: A Remote Desktop Protocol Implementation * Drawing Orders * * Copyright 2011 Marc-Andre Moreau <marcandre.moreau@gmail.com> * Copyright 2016 Armin Novak <armin.novak@thincast.com> * Copyright 2016 Thincast Technologies GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "window.h" #include <winpr/wtypes.h> #include <winpr/crt.h> #include <freerdp/api.h> #include <freerdp/log.h> #include <freerdp/graphics.h> #include <freerdp/codec/bitmap.h> #include <freerdp/gdi/gdi.h> #include "orders.h" #include "../cache/glyph.h" #include "../cache/bitmap.h" #include "../cache/brush.h" #include "../cache/cache.h" #define TAG FREERDP_TAG("core.orders") BYTE get_primary_drawing_order_field_bytes(UINT32 orderType, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (orderType) { case 0: return DSTBLT_ORDER_FIELD_BYTES; case 1: return PATBLT_ORDER_FIELD_BYTES; case 2: return SCRBLT_ORDER_FIELD_BYTES; case 3: return 0; case 4: return 0; case 5: return 0; case 6: return 0; case 7: return DRAW_NINE_GRID_ORDER_FIELD_BYTES; case 8: return MULTI_DRAW_NINE_GRID_ORDER_FIELD_BYTES; case 9: return LINE_TO_ORDER_FIELD_BYTES; case 10: return OPAQUE_RECT_ORDER_FIELD_BYTES; case 11: return SAVE_BITMAP_ORDER_FIELD_BYTES; case 12: return 0; case 13: return MEMBLT_ORDER_FIELD_BYTES; case 14: return MEM3BLT_ORDER_FIELD_BYTES; case 15: return MULTI_DSTBLT_ORDER_FIELD_BYTES; case 16: return MULTI_PATBLT_ORDER_FIELD_BYTES; case 17: return MULTI_SCRBLT_ORDER_FIELD_BYTES; case 18: return MULTI_OPAQUE_RECT_ORDER_FIELD_BYTES; case 19: return FAST_INDEX_ORDER_FIELD_BYTES; case 20: return POLYGON_SC_ORDER_FIELD_BYTES; case 21: return POLYGON_CB_ORDER_FIELD_BYTES; case 22: return POLYLINE_ORDER_FIELD_BYTES; case 23: return 0; case 24: return FAST_GLYPH_ORDER_FIELD_BYTES; case 25: return ELLIPSE_SC_ORDER_FIELD_BYTES; case 26: return ELLIPSE_CB_ORDER_FIELD_BYTES; case 27: return GLYPH_INDEX_ORDER_FIELD_BYTES; default: if (pValid) *pValid = FALSE; WLog_WARN(TAG, "Invalid orderType 0x%08X received", orderType); return 0; } } static const BYTE CBR2_BPP[] = { 0, 0, 0, 8, 16, 24, 32 }; static const BYTE BPP_CBR2[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 }; static const BYTE CBR23_BPP[] = { 0, 0, 0, 8, 16, 24, 32 }; static const BYTE BPP_CBR23[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 }; static const BYTE BMF_BPP[] = { 0, 1, 0, 8, 16, 24, 32, 0 }; static const BYTE BPP_BMF[] = { 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 }; static BOOL check_order_activated(wLog* log, rdpSettings* settings, const char* orderName, BOOL condition) { if (!condition) { if (settings->AllowUnanouncedOrdersFromServer) { WLog_Print(log, WLOG_WARN, "%s - SERVER BUG: The support for this feature was not announced!", orderName); return TRUE; } else { WLog_Print(log, WLOG_ERROR, "%s - SERVER BUG: The support for this feature was not announced! Use " "/relax-order-checks to ignore", orderName); return FALSE; } } return TRUE; } static BOOL check_alt_order_supported(wLog* log, rdpSettings* settings, BYTE orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: case ORDER_TYPE_SWITCH_SURFACE: condition = settings->OffscreenSupportLevel != 0; break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: condition = settings->DrawNineGridEnabled; break; case ORDER_TYPE_FRAME_MARKER: condition = settings->FrameMarkerCommandEnabled; break; case ORDER_TYPE_GDIPLUS_FIRST: case ORDER_TYPE_GDIPLUS_NEXT: case ORDER_TYPE_GDIPLUS_END: case ORDER_TYPE_GDIPLUS_CACHE_FIRST: case ORDER_TYPE_GDIPLUS_CACHE_NEXT: case ORDER_TYPE_GDIPLUS_CACHE_END: condition = settings->DrawGdiPlusCacheEnabled; break; case ORDER_TYPE_WINDOW: condition = settings->RemoteWndSupportLevel != WINDOW_LEVEL_NOT_SUPPORTED; break; case ORDER_TYPE_STREAM_BITMAP_FIRST: case ORDER_TYPE_STREAM_BITMAP_NEXT: case ORDER_TYPE_COMPDESK_FIRST: condition = TRUE; break; default: WLog_Print(log, WLOG_WARN, "%s - Alternate Secondary Drawing Order UNKNOWN", orderName); condition = FALSE; break; } return check_order_activated(log, settings, orderName, condition); } static BOOL check_secondary_order_supported(wLog* log, rdpSettings* settings, BYTE orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_BITMAP_UNCOMPRESSED: case ORDER_TYPE_CACHE_BITMAP_COMPRESSED: condition = settings->BitmapCacheEnabled; break; case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2: case ORDER_TYPE_BITMAP_COMPRESSED_V2: condition = settings->BitmapCacheEnabled; break; case ORDER_TYPE_BITMAP_COMPRESSED_V3: condition = settings->BitmapCacheV3Enabled; break; case ORDER_TYPE_CACHE_COLOR_TABLE: condition = (settings->OrderSupport[NEG_MEMBLT_INDEX] || settings->OrderSupport[NEG_MEM3BLT_INDEX]); break; case ORDER_TYPE_CACHE_GLYPH: { switch (settings->GlyphSupportLevel) { case GLYPH_SUPPORT_PARTIAL: case GLYPH_SUPPORT_FULL: case GLYPH_SUPPORT_ENCODE: condition = TRUE; break; case GLYPH_SUPPORT_NONE: default: condition = FALSE; break; } } break; case ORDER_TYPE_CACHE_BRUSH: condition = TRUE; break; default: WLog_Print(log, WLOG_WARN, "SECONDARY ORDER %s not supported", orderName); break; } return check_order_activated(log, settings, orderName, condition); } static BOOL check_primary_order_supported(wLog* log, rdpSettings* settings, UINT32 orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_DSTBLT: condition = settings->OrderSupport[NEG_DSTBLT_INDEX]; break; case ORDER_TYPE_SCRBLT: condition = settings->OrderSupport[NEG_SCRBLT_INDEX]; break; case ORDER_TYPE_DRAW_NINE_GRID: condition = settings->OrderSupport[NEG_DRAWNINEGRID_INDEX]; break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: condition = settings->OrderSupport[NEG_MULTI_DRAWNINEGRID_INDEX]; break; case ORDER_TYPE_LINE_TO: condition = settings->OrderSupport[NEG_LINETO_INDEX]; break; /* [MS-RDPEGDI] 2.2.2.2.1.1.2.5 OpaqueRect (OPAQUERECT_ORDER) * suggests that PatBlt and OpaqueRect imply each other. */ case ORDER_TYPE_PATBLT: case ORDER_TYPE_OPAQUE_RECT: condition = settings->OrderSupport[NEG_OPAQUE_RECT_INDEX] || settings->OrderSupport[NEG_PATBLT_INDEX]; break; case ORDER_TYPE_SAVE_BITMAP: condition = settings->OrderSupport[NEG_SAVEBITMAP_INDEX]; break; case ORDER_TYPE_MEMBLT: condition = settings->OrderSupport[NEG_MEMBLT_INDEX]; break; case ORDER_TYPE_MEM3BLT: condition = settings->OrderSupport[NEG_MEM3BLT_INDEX]; break; case ORDER_TYPE_MULTI_DSTBLT: condition = settings->OrderSupport[NEG_MULTIDSTBLT_INDEX]; break; case ORDER_TYPE_MULTI_PATBLT: condition = settings->OrderSupport[NEG_MULTIPATBLT_INDEX]; break; case ORDER_TYPE_MULTI_SCRBLT: condition = settings->OrderSupport[NEG_MULTIDSTBLT_INDEX]; break; case ORDER_TYPE_MULTI_OPAQUE_RECT: condition = settings->OrderSupport[NEG_MULTIOPAQUERECT_INDEX]; break; case ORDER_TYPE_FAST_INDEX: condition = settings->OrderSupport[NEG_FAST_INDEX_INDEX]; break; case ORDER_TYPE_POLYGON_SC: condition = settings->OrderSupport[NEG_POLYGON_SC_INDEX]; break; case ORDER_TYPE_POLYGON_CB: condition = settings->OrderSupport[NEG_POLYGON_CB_INDEX]; break; case ORDER_TYPE_POLYLINE: condition = settings->OrderSupport[NEG_POLYLINE_INDEX]; break; case ORDER_TYPE_FAST_GLYPH: condition = settings->OrderSupport[NEG_FAST_GLYPH_INDEX]; break; case ORDER_TYPE_ELLIPSE_SC: condition = settings->OrderSupport[NEG_ELLIPSE_SC_INDEX]; break; case ORDER_TYPE_ELLIPSE_CB: condition = settings->OrderSupport[NEG_ELLIPSE_CB_INDEX]; break; case ORDER_TYPE_GLYPH_INDEX: condition = settings->OrderSupport[NEG_GLYPH_INDEX_INDEX]; break; default: WLog_Print(log, WLOG_WARN, "%s Primary Drawing Order not supported", orderName); break; } return check_order_activated(log, settings, orderName, condition); } static const char* primary_order_string(UINT32 orderType) { const char* orders[] = { "[0x%02" PRIx8 "] DstBlt", "[0x%02" PRIx8 "] PatBlt", "[0x%02" PRIx8 "] ScrBlt", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] DrawNineGrid", "[0x%02" PRIx8 "] MultiDrawNineGrid", "[0x%02" PRIx8 "] LineTo", "[0x%02" PRIx8 "] OpaqueRect", "[0x%02" PRIx8 "] SaveBitmap", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] MemBlt", "[0x%02" PRIx8 "] Mem3Blt", "[0x%02" PRIx8 "] MultiDstBlt", "[0x%02" PRIx8 "] MultiPatBlt", "[0x%02" PRIx8 "] MultiScrBlt", "[0x%02" PRIx8 "] MultiOpaqueRect", "[0x%02" PRIx8 "] FastIndex", "[0x%02" PRIx8 "] PolygonSC", "[0x%02" PRIx8 "] PolygonCB", "[0x%02" PRIx8 "] Polyline", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] FastGlyph", "[0x%02" PRIx8 "] EllipseSC", "[0x%02" PRIx8 "] EllipseCB", "[0x%02" PRIx8 "] GlyphIndex" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static const char* secondary_order_string(UINT32 orderType) { const char* orders[] = { "[0x%02" PRIx8 "] Cache Bitmap", "[0x%02" PRIx8 "] Cache Color Table", "[0x%02" PRIx8 "] Cache Bitmap (Compressed)", "[0x%02" PRIx8 "] Cache Glyph", "[0x%02" PRIx8 "] Cache Bitmap V2", "[0x%02" PRIx8 "] Cache Bitmap V2 (Compressed)", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] Cache Brush", "[0x%02" PRIx8 "] Cache Bitmap V3" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static const char* altsec_order_string(BYTE orderType) { const char* orders[] = { "[0x%02" PRIx8 "] Switch Surface", "[0x%02" PRIx8 "] Create Offscreen Bitmap", "[0x%02" PRIx8 "] Stream Bitmap First", "[0x%02" PRIx8 "] Stream Bitmap Next", "[0x%02" PRIx8 "] Create NineGrid Bitmap", "[0x%02" PRIx8 "] Draw GDI+ First", "[0x%02" PRIx8 "] Draw GDI+ Next", "[0x%02" PRIx8 "] Draw GDI+ End", "[0x%02" PRIx8 "] Draw GDI+ Cache First", "[0x%02" PRIx8 "] Draw GDI+ Cache Next", "[0x%02" PRIx8 "] Draw GDI+ Cache End", "[0x%02" PRIx8 "] Windowing", "[0x%02" PRIx8 "] Desktop Composition", "[0x%02" PRIx8 "] Frame Marker" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static INLINE BOOL update_read_coord(wStream* s, INT32* coord, BOOL delta) { INT8 lsi8; INT16 lsi16; if (delta) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_INT8(s, lsi8); *coord += lsi8; } else { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_INT16(s, lsi16); *coord = lsi16; } return TRUE; } static INLINE BOOL update_write_coord(wStream* s, INT32 coord) { Stream_Write_UINT16(s, coord); return TRUE; } static INLINE BOOL update_read_color(wStream* s, UINT32* color) { BYTE byte; if (Stream_GetRemainingLength(s) < 3) return FALSE; *color = 0; Stream_Read_UINT8(s, byte); *color = (UINT32)byte; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 8) & 0xFF00; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 16) & 0xFF0000; return TRUE; } static INLINE BOOL update_write_color(wStream* s, UINT32 color) { BYTE byte; byte = (color & 0xFF); Stream_Write_UINT8(s, byte); byte = ((color >> 8) & 0xFF); Stream_Write_UINT8(s, byte); byte = ((color >> 16) & 0xFF); Stream_Write_UINT8(s, byte); return TRUE; } static INLINE BOOL update_read_colorref(wStream* s, UINT32* color) { BYTE byte; if (Stream_GetRemainingLength(s) < 4) return FALSE; *color = 0; Stream_Read_UINT8(s, byte); *color = byte; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 8); Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 16); Stream_Seek_UINT8(s); return TRUE; } static INLINE BOOL update_read_color_quad(wStream* s, UINT32* color) { return update_read_colorref(s, color); } static INLINE void update_write_color_quad(wStream* s, UINT32 color) { BYTE byte; byte = (color >> 16) & 0xFF; Stream_Write_UINT8(s, byte); byte = (color >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = color & 0xFF; Stream_Write_UINT8(s, byte); } static INLINE BOOL update_read_2byte_unsigned(wStream* s, UINT32* value) { BYTE byte; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) return FALSE; *value = (byte & 0x7F) << 8; Stream_Read_UINT8(s, byte); *value |= byte; } else { *value = (byte & 0x7F); } return TRUE; } static INLINE BOOL update_write_2byte_unsigned(wStream* s, UINT32 value) { BYTE byte; if (value > 0x7FFF) return FALSE; if (value >= 0x7F) { byte = ((value & 0x7F00) >> 8); Stream_Write_UINT8(s, byte | 0x80); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else { byte = (value & 0x7F); Stream_Write_UINT8(s, byte); } return TRUE; } static INLINE BOOL update_read_2byte_signed(wStream* s, INT32* value) { BYTE byte; BOOL negative; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); negative = (byte & 0x40) ? TRUE : FALSE; *value = (byte & 0x3F); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); *value = (*value << 8) | byte; } if (negative) *value *= -1; return TRUE; } static INLINE BOOL update_write_2byte_signed(wStream* s, INT32 value) { BYTE byte; BOOL negative = FALSE; if (value < 0) { negative = TRUE; value *= -1; } if (value > 0x3FFF) return FALSE; if (value >= 0x3F) { byte = ((value & 0x3F00) >> 8); if (negative) byte |= 0x40; Stream_Write_UINT8(s, byte | 0x80); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else { byte = (value & 0x3F); if (negative) byte |= 0x40; Stream_Write_UINT8(s, byte); } return TRUE; } static INLINE BOOL update_read_4byte_unsigned(wStream* s, UINT32* value) { BYTE byte; BYTE count; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); count = (byte & 0xC0) >> 6; if (Stream_GetRemainingLength(s) < count) return FALSE; switch (count) { case 0: *value = (byte & 0x3F); break; case 1: *value = (byte & 0x3F) << 8; Stream_Read_UINT8(s, byte); *value |= byte; break; case 2: *value = (byte & 0x3F) << 16; Stream_Read_UINT8(s, byte); *value |= (byte << 8); Stream_Read_UINT8(s, byte); *value |= byte; break; case 3: *value = (byte & 0x3F) << 24; Stream_Read_UINT8(s, byte); *value |= (byte << 16); Stream_Read_UINT8(s, byte); *value |= (byte << 8); Stream_Read_UINT8(s, byte); *value |= byte; break; default: break; } return TRUE; } static INLINE BOOL update_write_4byte_unsigned(wStream* s, UINT32 value) { BYTE byte; if (value <= 0x3F) { Stream_Write_UINT8(s, value); } else if (value <= 0x3FFF) { byte = (value >> 8) & 0x3F; Stream_Write_UINT8(s, byte | 0x40); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else if (value <= 0x3FFFFF) { byte = (value >> 16) & 0x3F; Stream_Write_UINT8(s, byte | 0x80); byte = (value >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else if (value <= 0x3FFFFFFF) { byte = (value >> 24) & 0x3F; Stream_Write_UINT8(s, byte | 0xC0); byte = (value >> 16) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else return FALSE; return TRUE; } static INLINE BOOL update_read_delta(wStream* s, INT32* value) { BYTE byte; if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, byte); if (byte & 0x40) *value = (byte | ~0x3F); else *value = (byte & 0x3F); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, byte); *value = (*value << 8) | byte; } return TRUE; } #if 0 static INLINE void update_read_glyph_delta(wStream* s, UINT16* value) { BYTE byte; Stream_Read_UINT8(s, byte); if (byte == 0x80) Stream_Read_UINT16(s, *value); else *value = (byte & 0x3F); } static INLINE void update_seek_glyph_delta(wStream* s) { BYTE byte; Stream_Read_UINT8(s, byte); if (byte & 0x80) Stream_Seek_UINT8(s); } #endif static INLINE BOOL update_read_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->style); } if (fieldFlags & ORDER_FIELD_04) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->hatch); } if (brush->style & CACHED_BRUSH) { brush->index = brush->hatch; brush->bpp = BMF_BPP[brush->style & 0x07]; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 7) return FALSE; brush->data = (BYTE*)brush->p8x8; Stream_Read_UINT8(s, brush->data[7]); Stream_Read_UINT8(s, brush->data[6]); Stream_Read_UINT8(s, brush->data[5]); Stream_Read_UINT8(s, brush->data[4]); Stream_Read_UINT8(s, brush->data[3]); Stream_Read_UINT8(s, brush->data[2]); Stream_Read_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; } static INLINE BOOL update_write_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { Stream_Write_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { Stream_Write_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { Stream_Write_UINT8(s, brush->style); } if (brush->style & CACHED_BRUSH) { brush->hatch = brush->index; brush->bpp = BMF_BPP[brush->style & 0x07]; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_04) { Stream_Write_UINT8(s, brush->hatch); } if (fieldFlags & ORDER_FIELD_05) { brush->data = (BYTE*)brush->p8x8; Stream_Write_UINT8(s, brush->data[7]); Stream_Write_UINT8(s, brush->data[6]); Stream_Write_UINT8(s, brush->data[5]); Stream_Write_UINT8(s, brush->data[4]); Stream_Write_UINT8(s, brush->data[3]); Stream_Write_UINT8(s, brush->data[2]); Stream_Write_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; } static INLINE BOOL update_read_delta_rects(wStream* s, DELTA_RECT* rectangles, UINT32* nr) { UINT32 number = *nr; UINT32 i; BYTE flags = 0; BYTE* zeroBits; UINT32 zeroBitsSize; if (number > 45) { WLog_WARN(TAG, "Invalid number of delta rectangles %" PRIu32, number); return FALSE; } zeroBitsSize = ((number + 1) / 2); if (Stream_GetRemainingLength(s) < zeroBitsSize) return FALSE; Stream_GetPointer(s, zeroBits); Stream_Seek(s, zeroBitsSize); ZeroMemory(rectangles, sizeof(DELTA_RECT) * number); for (i = 0; i < number; i++) { if (i % 2 == 0) flags = zeroBits[i / 2]; if ((~flags & 0x80) && !update_read_delta(s, &rectangles[i].left)) return FALSE; if ((~flags & 0x40) && !update_read_delta(s, &rectangles[i].top)) return FALSE; if (~flags & 0x20) { if (!update_read_delta(s, &rectangles[i].width)) return FALSE; } else if (i > 0) rectangles[i].width = rectangles[i - 1].width; else rectangles[i].width = 0; if (~flags & 0x10) { if (!update_read_delta(s, &rectangles[i].height)) return FALSE; } else if (i > 0) rectangles[i].height = rectangles[i - 1].height; else rectangles[i].height = 0; if (i > 0) { rectangles[i].left += rectangles[i - 1].left; rectangles[i].top += rectangles[i - 1].top; } flags <<= 4; } return TRUE; } static INLINE BOOL update_read_delta_points(wStream* s, DELTA_POINT* points, int number, INT16 x, INT16 y) { int i; BYTE flags = 0; BYTE* zeroBits; UINT32 zeroBitsSize; zeroBitsSize = ((number + 3) / 4); if (Stream_GetRemainingLength(s) < zeroBitsSize) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < %" PRIu32 "", zeroBitsSize); return FALSE; } Stream_GetPointer(s, zeroBits); Stream_Seek(s, zeroBitsSize); ZeroMemory(points, sizeof(DELTA_POINT) * number); for (i = 0; i < number; i++) { if (i % 4 == 0) flags = zeroBits[i / 4]; if ((~flags & 0x80) && !update_read_delta(s, &points[i].x)) { WLog_ERR(TAG, "update_read_delta(x) failed"); return FALSE; } if ((~flags & 0x40) && !update_read_delta(s, &points[i].y)) { WLog_ERR(TAG, "update_read_delta(y) failed"); return FALSE; } flags <<= 2; } return TRUE; } #define ORDER_FIELD_BYTE(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 1) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT8(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_2BYTE(NO, TARGET1, TARGET2) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 2) \ { \ WLog_ERR(TAG, "error reading %s or %s", #TARGET1, #TARGET2); \ return FALSE; \ } \ Stream_Read_UINT8(s, TARGET1); \ Stream_Read_UINT8(s, TARGET2); \ } \ } while (0) #define ORDER_FIELD_UINT16(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 2) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT16(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_UINT32(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 4) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT32(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_COORD(NO, TARGET) \ do \ { \ if ((orderInfo->fieldFlags & (1 << (NO - 1))) && \ !update_read_coord(s, &TARGET, orderInfo->deltaCoordinates)) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ } while (0) static INLINE BOOL ORDER_FIELD_COLOR(const ORDER_INFO* orderInfo, wStream* s, UINT32 NO, UINT32* TARGET) { if (!TARGET || !orderInfo) return FALSE; if ((orderInfo->fieldFlags & (1 << (NO - 1))) && !update_read_color(s, TARGET)) return FALSE; return TRUE; } static INLINE BOOL FIELD_SKIP_BUFFER16(wStream* s, UINT32 TARGET_LEN) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, TARGET_LEN); if (!Stream_SafeSeek(s, TARGET_LEN)) { WLog_ERR(TAG, "error skipping %" PRIu32 " bytes", TARGET_LEN); return FALSE; } return TRUE; } /* Primary Drawing Orders */ static BOOL update_read_dstblt_order(wStream* s, const ORDER_INFO* orderInfo, DSTBLT_ORDER* dstblt) { ORDER_FIELD_COORD(1, dstblt->nLeftRect); ORDER_FIELD_COORD(2, dstblt->nTopRect); ORDER_FIELD_COORD(3, dstblt->nWidth); ORDER_FIELD_COORD(4, dstblt->nHeight); ORDER_FIELD_BYTE(5, dstblt->bRop); return TRUE; } int update_approximate_dstblt_order(ORDER_INFO* orderInfo, const DSTBLT_ORDER* dstblt) { return 32; } BOOL update_write_dstblt_order(wStream* s, ORDER_INFO* orderInfo, const DSTBLT_ORDER* dstblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_dstblt_order(orderInfo, dstblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, dstblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, dstblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, dstblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, dstblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, dstblt->bRop); return TRUE; } static BOOL update_read_patblt_order(wStream* s, const ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { ORDER_FIELD_COORD(1, patblt->nLeftRect); ORDER_FIELD_COORD(2, patblt->nTopRect); ORDER_FIELD_COORD(3, patblt->nWidth); ORDER_FIELD_COORD(4, patblt->nHeight); ORDER_FIELD_BYTE(5, patblt->bRop); ORDER_FIELD_COLOR(orderInfo, s, 6, &patblt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 7, &patblt->foreColor); return update_read_brush(s, &patblt->brush, orderInfo->fieldFlags >> 7); } int update_approximate_patblt_order(ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { return 32; } BOOL update_write_patblt_order(wStream* s, ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_patblt_order(orderInfo, patblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, patblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, patblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, patblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, patblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, patblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, patblt->backColor); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_color(s, patblt->foreColor); orderInfo->fieldFlags |= ORDER_FIELD_08; orderInfo->fieldFlags |= ORDER_FIELD_09; orderInfo->fieldFlags |= ORDER_FIELD_10; orderInfo->fieldFlags |= ORDER_FIELD_11; orderInfo->fieldFlags |= ORDER_FIELD_12; update_write_brush(s, &patblt->brush, orderInfo->fieldFlags >> 7); return TRUE; } static BOOL update_read_scrblt_order(wStream* s, const ORDER_INFO* orderInfo, SCRBLT_ORDER* scrblt) { ORDER_FIELD_COORD(1, scrblt->nLeftRect); ORDER_FIELD_COORD(2, scrblt->nTopRect); ORDER_FIELD_COORD(3, scrblt->nWidth); ORDER_FIELD_COORD(4, scrblt->nHeight); ORDER_FIELD_BYTE(5, scrblt->bRop); ORDER_FIELD_COORD(6, scrblt->nXSrc); ORDER_FIELD_COORD(7, scrblt->nYSrc); return TRUE; } int update_approximate_scrblt_order(ORDER_INFO* orderInfo, const SCRBLT_ORDER* scrblt) { return 32; } BOOL update_write_scrblt_order(wStream* s, ORDER_INFO* orderInfo, const SCRBLT_ORDER* scrblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_scrblt_order(orderInfo, scrblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, scrblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, scrblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, scrblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, scrblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, scrblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_coord(s, scrblt->nXSrc); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_coord(s, scrblt->nYSrc); return TRUE; } static BOOL update_read_opaque_rect_order(wStream* s, const ORDER_INFO* orderInfo, OPAQUE_RECT_ORDER* opaque_rect) { BYTE byte; ORDER_FIELD_COORD(1, opaque_rect->nLeftRect); ORDER_FIELD_COORD(2, opaque_rect->nTopRect); ORDER_FIELD_COORD(3, opaque_rect->nWidth); ORDER_FIELD_COORD(4, opaque_rect->nHeight); if (orderInfo->fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x00FFFF00) | ((UINT32)byte); } if (orderInfo->fieldFlags & ORDER_FIELD_06) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x00FF00FF) | ((UINT32)byte << 8); } if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x0000FFFF) | ((UINT32)byte << 16); } return TRUE; } int update_approximate_opaque_rect_order(ORDER_INFO* orderInfo, const OPAQUE_RECT_ORDER* opaque_rect) { return 32; } BOOL update_write_opaque_rect_order(wStream* s, ORDER_INFO* orderInfo, const OPAQUE_RECT_ORDER* opaque_rect) { BYTE byte; int inf = update_approximate_opaque_rect_order(orderInfo, opaque_rect); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; // TODO: Color format conversion orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, opaque_rect->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, opaque_rect->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, opaque_rect->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, opaque_rect->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; byte = opaque_rect->color & 0x000000FF; Stream_Write_UINT8(s, byte); orderInfo->fieldFlags |= ORDER_FIELD_06; byte = (opaque_rect->color & 0x0000FF00) >> 8; Stream_Write_UINT8(s, byte); orderInfo->fieldFlags |= ORDER_FIELD_07; byte = (opaque_rect->color & 0x00FF0000) >> 16; Stream_Write_UINT8(s, byte); return TRUE; } static BOOL update_read_draw_nine_grid_order(wStream* s, const ORDER_INFO* orderInfo, DRAW_NINE_GRID_ORDER* draw_nine_grid) { ORDER_FIELD_COORD(1, draw_nine_grid->srcLeft); ORDER_FIELD_COORD(2, draw_nine_grid->srcTop); ORDER_FIELD_COORD(3, draw_nine_grid->srcRight); ORDER_FIELD_COORD(4, draw_nine_grid->srcBottom); ORDER_FIELD_UINT16(5, draw_nine_grid->bitmapId); return TRUE; } static BOOL update_read_multi_dstblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_DSTBLT_ORDER* multi_dstblt) { ORDER_FIELD_COORD(1, multi_dstblt->nLeftRect); ORDER_FIELD_COORD(2, multi_dstblt->nTopRect); ORDER_FIELD_COORD(3, multi_dstblt->nWidth); ORDER_FIELD_COORD(4, multi_dstblt->nHeight); ORDER_FIELD_BYTE(5, multi_dstblt->bRop); ORDER_FIELD_BYTE(6, multi_dstblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_dstblt->cbData); return update_read_delta_rects(s, multi_dstblt->rectangles, &multi_dstblt->numRectangles); } return TRUE; } static BOOL update_read_multi_patblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_PATBLT_ORDER* multi_patblt) { ORDER_FIELD_COORD(1, multi_patblt->nLeftRect); ORDER_FIELD_COORD(2, multi_patblt->nTopRect); ORDER_FIELD_COORD(3, multi_patblt->nWidth); ORDER_FIELD_COORD(4, multi_patblt->nHeight); ORDER_FIELD_BYTE(5, multi_patblt->bRop); ORDER_FIELD_COLOR(orderInfo, s, 6, &multi_patblt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 7, &multi_patblt->foreColor); if (!update_read_brush(s, &multi_patblt->brush, orderInfo->fieldFlags >> 7)) return FALSE; ORDER_FIELD_BYTE(13, multi_patblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_14) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_patblt->cbData); if (!update_read_delta_rects(s, multi_patblt->rectangles, &multi_patblt->numRectangles)) return FALSE; } return TRUE; } static BOOL update_read_multi_scrblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_SCRBLT_ORDER* multi_scrblt) { ORDER_FIELD_COORD(1, multi_scrblt->nLeftRect); ORDER_FIELD_COORD(2, multi_scrblt->nTopRect); ORDER_FIELD_COORD(3, multi_scrblt->nWidth); ORDER_FIELD_COORD(4, multi_scrblt->nHeight); ORDER_FIELD_BYTE(5, multi_scrblt->bRop); ORDER_FIELD_COORD(6, multi_scrblt->nXSrc); ORDER_FIELD_COORD(7, multi_scrblt->nYSrc); ORDER_FIELD_BYTE(8, multi_scrblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_09) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_scrblt->cbData); return update_read_delta_rects(s, multi_scrblt->rectangles, &multi_scrblt->numRectangles); } return TRUE; } static BOOL update_read_multi_opaque_rect_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_OPAQUE_RECT_ORDER* multi_opaque_rect) { BYTE byte; ORDER_FIELD_COORD(1, multi_opaque_rect->nLeftRect); ORDER_FIELD_COORD(2, multi_opaque_rect->nTopRect); ORDER_FIELD_COORD(3, multi_opaque_rect->nWidth); ORDER_FIELD_COORD(4, multi_opaque_rect->nHeight); if (orderInfo->fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x00FFFF00) | ((UINT32)byte); } if (orderInfo->fieldFlags & ORDER_FIELD_06) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x00FF00FF) | ((UINT32)byte << 8); } if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x0000FFFF) | ((UINT32)byte << 16); } ORDER_FIELD_BYTE(8, multi_opaque_rect->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_09) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_opaque_rect->cbData); return update_read_delta_rects(s, multi_opaque_rect->rectangles, &multi_opaque_rect->numRectangles); } return TRUE; } static BOOL update_read_multi_draw_nine_grid_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_DRAW_NINE_GRID_ORDER* multi_draw_nine_grid) { ORDER_FIELD_COORD(1, multi_draw_nine_grid->srcLeft); ORDER_FIELD_COORD(2, multi_draw_nine_grid->srcTop); ORDER_FIELD_COORD(3, multi_draw_nine_grid->srcRight); ORDER_FIELD_COORD(4, multi_draw_nine_grid->srcBottom); ORDER_FIELD_UINT16(5, multi_draw_nine_grid->bitmapId); ORDER_FIELD_BYTE(6, multi_draw_nine_grid->nDeltaEntries); if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_draw_nine_grid->cbData); return update_read_delta_rects(s, multi_draw_nine_grid->rectangles, &multi_draw_nine_grid->nDeltaEntries); } return TRUE; } static BOOL update_read_line_to_order(wStream* s, const ORDER_INFO* orderInfo, LINE_TO_ORDER* line_to) { ORDER_FIELD_UINT16(1, line_to->backMode); ORDER_FIELD_COORD(2, line_to->nXStart); ORDER_FIELD_COORD(3, line_to->nYStart); ORDER_FIELD_COORD(4, line_to->nXEnd); ORDER_FIELD_COORD(5, line_to->nYEnd); ORDER_FIELD_COLOR(orderInfo, s, 6, &line_to->backColor); ORDER_FIELD_BYTE(7, line_to->bRop2); ORDER_FIELD_BYTE(8, line_to->penStyle); ORDER_FIELD_BYTE(9, line_to->penWidth); ORDER_FIELD_COLOR(orderInfo, s, 10, &line_to->penColor); return TRUE; } int update_approximate_line_to_order(ORDER_INFO* orderInfo, const LINE_TO_ORDER* line_to) { return 32; } BOOL update_write_line_to_order(wStream* s, ORDER_INFO* orderInfo, const LINE_TO_ORDER* line_to) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_line_to_order(orderInfo, line_to))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT16(s, line_to->backMode); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, line_to->nXStart); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, line_to->nYStart); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, line_to->nXEnd); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_coord(s, line_to->nYEnd); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, line_to->backColor); orderInfo->fieldFlags |= ORDER_FIELD_07; Stream_Write_UINT8(s, line_to->bRop2); orderInfo->fieldFlags |= ORDER_FIELD_08; Stream_Write_UINT8(s, line_to->penStyle); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT8(s, line_to->penWidth); orderInfo->fieldFlags |= ORDER_FIELD_10; update_write_color(s, line_to->penColor); return TRUE; } static BOOL update_read_polyline_order(wStream* s, const ORDER_INFO* orderInfo, POLYLINE_ORDER* polyline) { UINT16 word; UINT32 new_num = polyline->numDeltaEntries; ORDER_FIELD_COORD(1, polyline->xStart); ORDER_FIELD_COORD(2, polyline->yStart); ORDER_FIELD_BYTE(3, polyline->bRop2); ORDER_FIELD_UINT16(4, word); ORDER_FIELD_COLOR(orderInfo, s, 5, &polyline->penColor); ORDER_FIELD_BYTE(6, new_num); if (orderInfo->fieldFlags & ORDER_FIELD_07) { DELTA_POINT* new_points; if (new_num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, polyline->cbData); new_points = (DELTA_POINT*)realloc(polyline->points, sizeof(DELTA_POINT) * new_num); if (!new_points) { WLog_ERR(TAG, "realloc(%" PRIu32 ") failed", new_num); return FALSE; } polyline->points = new_points; polyline->numDeltaEntries = new_num; return update_read_delta_points(s, polyline->points, polyline->numDeltaEntries, polyline->xStart, polyline->yStart); } return TRUE; } static BOOL update_read_memblt_order(wStream* s, const ORDER_INFO* orderInfo, MEMBLT_ORDER* memblt) { if (!s || !orderInfo || !memblt) return FALSE; ORDER_FIELD_UINT16(1, memblt->cacheId); ORDER_FIELD_COORD(2, memblt->nLeftRect); ORDER_FIELD_COORD(3, memblt->nTopRect); ORDER_FIELD_COORD(4, memblt->nWidth); ORDER_FIELD_COORD(5, memblt->nHeight); ORDER_FIELD_BYTE(6, memblt->bRop); ORDER_FIELD_COORD(7, memblt->nXSrc); ORDER_FIELD_COORD(8, memblt->nYSrc); ORDER_FIELD_UINT16(9, memblt->cacheIndex); memblt->colorIndex = (memblt->cacheId >> 8); memblt->cacheId = (memblt->cacheId & 0xFF); memblt->bitmap = NULL; return TRUE; } int update_approximate_memblt_order(ORDER_INFO* orderInfo, const MEMBLT_ORDER* memblt) { return 64; } BOOL update_write_memblt_order(wStream* s, ORDER_INFO* orderInfo, const MEMBLT_ORDER* memblt) { UINT16 cacheId; if (!Stream_EnsureRemainingCapacity(s, update_approximate_memblt_order(orderInfo, memblt))) return FALSE; cacheId = (memblt->cacheId & 0xFF) | ((memblt->colorIndex & 0xFF) << 8); orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT16(s, cacheId); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, memblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, memblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, memblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_coord(s, memblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_06; Stream_Write_UINT8(s, memblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_coord(s, memblt->nXSrc); orderInfo->fieldFlags |= ORDER_FIELD_08; update_write_coord(s, memblt->nYSrc); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT16(s, memblt->cacheIndex); return TRUE; } static BOOL update_read_mem3blt_order(wStream* s, const ORDER_INFO* orderInfo, MEM3BLT_ORDER* mem3blt) { ORDER_FIELD_UINT16(1, mem3blt->cacheId); ORDER_FIELD_COORD(2, mem3blt->nLeftRect); ORDER_FIELD_COORD(3, mem3blt->nTopRect); ORDER_FIELD_COORD(4, mem3blt->nWidth); ORDER_FIELD_COORD(5, mem3blt->nHeight); ORDER_FIELD_BYTE(6, mem3blt->bRop); ORDER_FIELD_COORD(7, mem3blt->nXSrc); ORDER_FIELD_COORD(8, mem3blt->nYSrc); ORDER_FIELD_COLOR(orderInfo, s, 9, &mem3blt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 10, &mem3blt->foreColor); if (!update_read_brush(s, &mem3blt->brush, orderInfo->fieldFlags >> 10)) return FALSE; ORDER_FIELD_UINT16(16, mem3blt->cacheIndex); mem3blt->colorIndex = (mem3blt->cacheId >> 8); mem3blt->cacheId = (mem3blt->cacheId & 0xFF); mem3blt->bitmap = NULL; return TRUE; } static BOOL update_read_save_bitmap_order(wStream* s, const ORDER_INFO* orderInfo, SAVE_BITMAP_ORDER* save_bitmap) { ORDER_FIELD_UINT32(1, save_bitmap->savedBitmapPosition); ORDER_FIELD_COORD(2, save_bitmap->nLeftRect); ORDER_FIELD_COORD(3, save_bitmap->nTopRect); ORDER_FIELD_COORD(4, save_bitmap->nRightRect); ORDER_FIELD_COORD(5, save_bitmap->nBottomRect); ORDER_FIELD_BYTE(6, save_bitmap->operation); return TRUE; } static BOOL update_read_glyph_index_order(wStream* s, const ORDER_INFO* orderInfo, GLYPH_INDEX_ORDER* glyph_index) { ORDER_FIELD_BYTE(1, glyph_index->cacheId); ORDER_FIELD_BYTE(2, glyph_index->flAccel); ORDER_FIELD_BYTE(3, glyph_index->ulCharInc); ORDER_FIELD_BYTE(4, glyph_index->fOpRedundant); ORDER_FIELD_COLOR(orderInfo, s, 5, &glyph_index->backColor); ORDER_FIELD_COLOR(orderInfo, s, 6, &glyph_index->foreColor); ORDER_FIELD_UINT16(7, glyph_index->bkLeft); ORDER_FIELD_UINT16(8, glyph_index->bkTop); ORDER_FIELD_UINT16(9, glyph_index->bkRight); ORDER_FIELD_UINT16(10, glyph_index->bkBottom); ORDER_FIELD_UINT16(11, glyph_index->opLeft); ORDER_FIELD_UINT16(12, glyph_index->opTop); ORDER_FIELD_UINT16(13, glyph_index->opRight); ORDER_FIELD_UINT16(14, glyph_index->opBottom); if (!update_read_brush(s, &glyph_index->brush, orderInfo->fieldFlags >> 14)) return FALSE; ORDER_FIELD_UINT16(20, glyph_index->x); ORDER_FIELD_UINT16(21, glyph_index->y); if (orderInfo->fieldFlags & ORDER_FIELD_22) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, glyph_index->cbData); if (Stream_GetRemainingLength(s) < glyph_index->cbData) return FALSE; CopyMemory(glyph_index->data, Stream_Pointer(s), glyph_index->cbData); Stream_Seek(s, glyph_index->cbData); } return TRUE; } int update_approximate_glyph_index_order(ORDER_INFO* orderInfo, const GLYPH_INDEX_ORDER* glyph_index) { return 64; } BOOL update_write_glyph_index_order(wStream* s, ORDER_INFO* orderInfo, GLYPH_INDEX_ORDER* glyph_index) { int inf = update_approximate_glyph_index_order(orderInfo, glyph_index); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT8(s, glyph_index->cacheId); orderInfo->fieldFlags |= ORDER_FIELD_02; Stream_Write_UINT8(s, glyph_index->flAccel); orderInfo->fieldFlags |= ORDER_FIELD_03; Stream_Write_UINT8(s, glyph_index->ulCharInc); orderInfo->fieldFlags |= ORDER_FIELD_04; Stream_Write_UINT8(s, glyph_index->fOpRedundant); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_color(s, glyph_index->backColor); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, glyph_index->foreColor); orderInfo->fieldFlags |= ORDER_FIELD_07; Stream_Write_UINT16(s, glyph_index->bkLeft); orderInfo->fieldFlags |= ORDER_FIELD_08; Stream_Write_UINT16(s, glyph_index->bkTop); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT16(s, glyph_index->bkRight); orderInfo->fieldFlags |= ORDER_FIELD_10; Stream_Write_UINT16(s, glyph_index->bkBottom); orderInfo->fieldFlags |= ORDER_FIELD_11; Stream_Write_UINT16(s, glyph_index->opLeft); orderInfo->fieldFlags |= ORDER_FIELD_12; Stream_Write_UINT16(s, glyph_index->opTop); orderInfo->fieldFlags |= ORDER_FIELD_13; Stream_Write_UINT16(s, glyph_index->opRight); orderInfo->fieldFlags |= ORDER_FIELD_14; Stream_Write_UINT16(s, glyph_index->opBottom); orderInfo->fieldFlags |= ORDER_FIELD_15; orderInfo->fieldFlags |= ORDER_FIELD_16; orderInfo->fieldFlags |= ORDER_FIELD_17; orderInfo->fieldFlags |= ORDER_FIELD_18; orderInfo->fieldFlags |= ORDER_FIELD_19; update_write_brush(s, &glyph_index->brush, orderInfo->fieldFlags >> 14); orderInfo->fieldFlags |= ORDER_FIELD_20; Stream_Write_UINT16(s, glyph_index->x); orderInfo->fieldFlags |= ORDER_FIELD_21; Stream_Write_UINT16(s, glyph_index->y); orderInfo->fieldFlags |= ORDER_FIELD_22; Stream_Write_UINT8(s, glyph_index->cbData); Stream_Write(s, glyph_index->data, glyph_index->cbData); return TRUE; } static BOOL update_read_fast_index_order(wStream* s, const ORDER_INFO* orderInfo, FAST_INDEX_ORDER* fast_index) { ORDER_FIELD_BYTE(1, fast_index->cacheId); ORDER_FIELD_2BYTE(2, fast_index->ulCharInc, fast_index->flAccel); ORDER_FIELD_COLOR(orderInfo, s, 3, &fast_index->backColor); ORDER_FIELD_COLOR(orderInfo, s, 4, &fast_index->foreColor); ORDER_FIELD_COORD(5, fast_index->bkLeft); ORDER_FIELD_COORD(6, fast_index->bkTop); ORDER_FIELD_COORD(7, fast_index->bkRight); ORDER_FIELD_COORD(8, fast_index->bkBottom); ORDER_FIELD_COORD(9, fast_index->opLeft); ORDER_FIELD_COORD(10, fast_index->opTop); ORDER_FIELD_COORD(11, fast_index->opRight); ORDER_FIELD_COORD(12, fast_index->opBottom); ORDER_FIELD_COORD(13, fast_index->x); ORDER_FIELD_COORD(14, fast_index->y); if (orderInfo->fieldFlags & ORDER_FIELD_15) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, fast_index->cbData); if (Stream_GetRemainingLength(s) < fast_index->cbData) return FALSE; CopyMemory(fast_index->data, Stream_Pointer(s), fast_index->cbData); Stream_Seek(s, fast_index->cbData); } return TRUE; } static BOOL update_read_fast_glyph_order(wStream* s, const ORDER_INFO* orderInfo, FAST_GLYPH_ORDER* fastGlyph) { GLYPH_DATA_V2* glyph = &fastGlyph->glyphData; ORDER_FIELD_BYTE(1, fastGlyph->cacheId); ORDER_FIELD_2BYTE(2, fastGlyph->ulCharInc, fastGlyph->flAccel); ORDER_FIELD_COLOR(orderInfo, s, 3, &fastGlyph->backColor); ORDER_FIELD_COLOR(orderInfo, s, 4, &fastGlyph->foreColor); ORDER_FIELD_COORD(5, fastGlyph->bkLeft); ORDER_FIELD_COORD(6, fastGlyph->bkTop); ORDER_FIELD_COORD(7, fastGlyph->bkRight); ORDER_FIELD_COORD(8, fastGlyph->bkBottom); ORDER_FIELD_COORD(9, fastGlyph->opLeft); ORDER_FIELD_COORD(10, fastGlyph->opTop); ORDER_FIELD_COORD(11, fastGlyph->opRight); ORDER_FIELD_COORD(12, fastGlyph->opBottom); ORDER_FIELD_COORD(13, fastGlyph->x); ORDER_FIELD_COORD(14, fastGlyph->y); if (orderInfo->fieldFlags & ORDER_FIELD_15) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, fastGlyph->cbData); if (Stream_GetRemainingLength(s) < fastGlyph->cbData) return FALSE; CopyMemory(fastGlyph->data, Stream_Pointer(s), fastGlyph->cbData); if (Stream_GetRemainingLength(s) < fastGlyph->cbData) return FALSE; if (!Stream_SafeSeek(s, 1)) return FALSE; if (fastGlyph->cbData > 1) { UINT32 new_cb; /* parse optional glyph data */ glyph->cacheIndex = fastGlyph->data[0]; if (!update_read_2byte_signed(s, &glyph->x) || !update_read_2byte_signed(s, &glyph->y) || !update_read_2byte_unsigned(s, &glyph->cx) || !update_read_2byte_unsigned(s, &glyph->cy)) return FALSE; glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; new_cb = ((glyph->cx + 7) / 8) * glyph->cy; new_cb += ((new_cb % 4) > 0) ? 4 - (new_cb % 4) : 0; if (fastGlyph->cbData < new_cb) return FALSE; if (new_cb > 0) { BYTE* new_aj; new_aj = (BYTE*)realloc(glyph->aj, new_cb); if (!new_aj) return FALSE; glyph->aj = new_aj; glyph->cb = new_cb; Stream_Read(s, glyph->aj, glyph->cb); } Stream_Seek(s, fastGlyph->cbData - new_cb); } } return TRUE; } static BOOL update_read_polygon_sc_order(wStream* s, const ORDER_INFO* orderInfo, POLYGON_SC_ORDER* polygon_sc) { UINT32 num = polygon_sc->numPoints; ORDER_FIELD_COORD(1, polygon_sc->xStart); ORDER_FIELD_COORD(2, polygon_sc->yStart); ORDER_FIELD_BYTE(3, polygon_sc->bRop2); ORDER_FIELD_BYTE(4, polygon_sc->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 5, &polygon_sc->brushColor); ORDER_FIELD_BYTE(6, num); if (orderInfo->fieldFlags & ORDER_FIELD_07) { DELTA_POINT* newpoints; if (num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, polygon_sc->cbData); newpoints = (DELTA_POINT*)realloc(polygon_sc->points, sizeof(DELTA_POINT) * num); if (!newpoints) return FALSE; polygon_sc->points = newpoints; polygon_sc->numPoints = num; return update_read_delta_points(s, polygon_sc->points, polygon_sc->numPoints, polygon_sc->xStart, polygon_sc->yStart); } return TRUE; } static BOOL update_read_polygon_cb_order(wStream* s, const ORDER_INFO* orderInfo, POLYGON_CB_ORDER* polygon_cb) { UINT32 num = polygon_cb->numPoints; ORDER_FIELD_COORD(1, polygon_cb->xStart); ORDER_FIELD_COORD(2, polygon_cb->yStart); ORDER_FIELD_BYTE(3, polygon_cb->bRop2); ORDER_FIELD_BYTE(4, polygon_cb->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 5, &polygon_cb->backColor); ORDER_FIELD_COLOR(orderInfo, s, 6, &polygon_cb->foreColor); if (!update_read_brush(s, &polygon_cb->brush, orderInfo->fieldFlags >> 6)) return FALSE; ORDER_FIELD_BYTE(12, num); if (orderInfo->fieldFlags & ORDER_FIELD_13) { DELTA_POINT* newpoints; if (num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, polygon_cb->cbData); newpoints = (DELTA_POINT*)realloc(polygon_cb->points, sizeof(DELTA_POINT) * num); if (!newpoints) return FALSE; polygon_cb->points = newpoints; polygon_cb->numPoints = num; if (!update_read_delta_points(s, polygon_cb->points, polygon_cb->numPoints, polygon_cb->xStart, polygon_cb->yStart)) return FALSE; } polygon_cb->backMode = (polygon_cb->bRop2 & 0x80) ? BACKMODE_TRANSPARENT : BACKMODE_OPAQUE; polygon_cb->bRop2 = (polygon_cb->bRop2 & 0x1F); return TRUE; } static BOOL update_read_ellipse_sc_order(wStream* s, const ORDER_INFO* orderInfo, ELLIPSE_SC_ORDER* ellipse_sc) { ORDER_FIELD_COORD(1, ellipse_sc->leftRect); ORDER_FIELD_COORD(2, ellipse_sc->topRect); ORDER_FIELD_COORD(3, ellipse_sc->rightRect); ORDER_FIELD_COORD(4, ellipse_sc->bottomRect); ORDER_FIELD_BYTE(5, ellipse_sc->bRop2); ORDER_FIELD_BYTE(6, ellipse_sc->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 7, &ellipse_sc->color); return TRUE; } static BOOL update_read_ellipse_cb_order(wStream* s, const ORDER_INFO* orderInfo, ELLIPSE_CB_ORDER* ellipse_cb) { ORDER_FIELD_COORD(1, ellipse_cb->leftRect); ORDER_FIELD_COORD(2, ellipse_cb->topRect); ORDER_FIELD_COORD(3, ellipse_cb->rightRect); ORDER_FIELD_COORD(4, ellipse_cb->bottomRect); ORDER_FIELD_BYTE(5, ellipse_cb->bRop2); ORDER_FIELD_BYTE(6, ellipse_cb->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 7, &ellipse_cb->backColor); ORDER_FIELD_COLOR(orderInfo, s, 8, &ellipse_cb->foreColor); return update_read_brush(s, &ellipse_cb->brush, orderInfo->fieldFlags >> 8); } /* Secondary Drawing Orders */ static CACHE_BITMAP_ORDER* update_read_cache_bitmap_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { CACHE_BITMAP_ORDER* cache_bitmap; if (!update || !s) return NULL; cache_bitmap = calloc(1, sizeof(CACHE_BITMAP_ORDER)); if (!cache_bitmap) goto fail; if (Stream_GetRemainingLength(s) < 9) goto fail; Stream_Read_UINT8(s, cache_bitmap->cacheId); /* cacheId (1 byte) */ Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapWidth); /* bitmapWidth (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapHeight); /* bitmapHeight (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ if ((cache_bitmap->bitmapBpp < 1) || (cache_bitmap->bitmapBpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bitmap bpp %" PRIu32 "", cache_bitmap->bitmapBpp); goto fail; } Stream_Read_UINT16(s, cache_bitmap->bitmapLength); /* bitmapLength (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap->cacheIndex); /* cacheIndex (2 bytes) */ if (compressed) { if ((flags & NO_BITMAP_COMPRESSION_HDR) == 0) { BYTE* bitmapComprHdr = (BYTE*)&(cache_bitmap->bitmapComprHdr); if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read(s, bitmapComprHdr, 8); /* bitmapComprHdr (8 bytes) */ cache_bitmap->bitmapLength -= 8; } } if (cache_bitmap->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap->bitmapLength) goto fail; cache_bitmap->bitmapDataStream = malloc(cache_bitmap->bitmapLength); if (!cache_bitmap->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap->bitmapDataStream, cache_bitmap->bitmapLength); cache_bitmap->compressed = compressed; return cache_bitmap; fail: free_cache_bitmap_order(update->context, cache_bitmap); return NULL; } int update_approximate_cache_bitmap_order(const CACHE_BITMAP_ORDER* cache_bitmap, BOOL compressed, UINT16* flags) { return 64 + cache_bitmap->bitmapLength; } BOOL update_write_cache_bitmap_order(wStream* s, const CACHE_BITMAP_ORDER* cache_bitmap, BOOL compressed, UINT16* flags) { UINT32 bitmapLength = cache_bitmap->bitmapLength; int inf = update_approximate_cache_bitmap_order(cache_bitmap, compressed, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; *flags = NO_BITMAP_COMPRESSION_HDR; if ((*flags & NO_BITMAP_COMPRESSION_HDR) == 0) bitmapLength += 8; Stream_Write_UINT8(s, cache_bitmap->cacheId); /* cacheId (1 byte) */ Stream_Write_UINT8(s, 0); /* pad1Octet (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapWidth); /* bitmapWidth (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapHeight); /* bitmapHeight (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ Stream_Write_UINT16(s, bitmapLength); /* bitmapLength (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap->cacheIndex); /* cacheIndex (2 bytes) */ if (compressed) { if ((*flags & NO_BITMAP_COMPRESSION_HDR) == 0) { BYTE* bitmapComprHdr = (BYTE*)&(cache_bitmap->bitmapComprHdr); Stream_Write(s, bitmapComprHdr, 8); /* bitmapComprHdr (8 bytes) */ bitmapLength -= 8; } Stream_Write(s, cache_bitmap->bitmapDataStream, bitmapLength); } else { Stream_Write(s, cache_bitmap->bitmapDataStream, bitmapLength); } return TRUE; } static CACHE_BITMAP_V2_ORDER* update_read_cache_bitmap_v2_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { BYTE bitsPerPixelId; CACHE_BITMAP_V2_ORDER* cache_bitmap_v2; if (!update || !s) return NULL; cache_bitmap_v2 = calloc(1, sizeof(CACHE_BITMAP_V2_ORDER)); if (!cache_bitmap_v2) goto fail; cache_bitmap_v2->cacheId = flags & 0x0003; cache_bitmap_v2->flags = (flags & 0xFF80) >> 7; bitsPerPixelId = (flags & 0x0078) >> 3; cache_bitmap_v2->bitmapBpp = CBR2_BPP[bitsPerPixelId]; if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ goto fail; cache_bitmap_v2->bitmapHeight = cache_bitmap_v2->bitmapWidth; } else { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ goto fail; } if (!update_read_4byte_unsigned(s, &cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->cacheIndex)) /* cacheIndex */ goto fail; if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } } if (cache_bitmap_v2->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap_v2->bitmapLength) goto fail; if (cache_bitmap_v2->bitmapLength == 0) goto fail; cache_bitmap_v2->bitmapDataStream = malloc(cache_bitmap_v2->bitmapLength); if (!cache_bitmap_v2->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); cache_bitmap_v2->compressed = compressed; return cache_bitmap_v2; fail: free_cache_bitmap_v2_order(update->context, cache_bitmap_v2); return NULL; } int update_approximate_cache_bitmap_v2_order(CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { return 64 + cache_bitmap_v2->bitmapLength; } BOOL update_write_cache_bitmap_v2_order(wStream* s, CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { BYTE bitsPerPixelId; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v2_order(cache_bitmap_v2, compressed, flags))) return FALSE; bitsPerPixelId = BPP_CBR2[cache_bitmap_v2->bitmapBpp]; *flags = (cache_bitmap_v2->cacheId & 0x0003) | (bitsPerPixelId << 3) | ((cache_bitmap_v2->flags << 7) & 0xFF80); if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { Stream_Write_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ return FALSE; } else { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ return FALSE; } if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (!update_write_4byte_unsigned(s, cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_write_2byte_unsigned(s, cache_bitmap_v2->cacheIndex)) /* cacheIndex */ return FALSE; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { Stream_Write_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } else { if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } cache_bitmap_v2->compressed = compressed; return TRUE; } static CACHE_BITMAP_V3_ORDER* update_read_cache_bitmap_v3_order(rdpUpdate* update, wStream* s, UINT16 flags) { BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; UINT32 new_len; BYTE* new_data; CACHE_BITMAP_V3_ORDER* cache_bitmap_v3; if (!update || !s) return NULL; cache_bitmap_v3 = calloc(1, sizeof(CACHE_BITMAP_V3_ORDER)); if (!cache_bitmap_v3) goto fail; cache_bitmap_v3->cacheId = flags & 0x00000003; cache_bitmap_v3->flags = (flags & 0x0000FF80) >> 7; bitsPerPixelId = (flags & 0x00000078) >> 3; cache_bitmap_v3->bpp = CBR23_BPP[bitsPerPixelId]; if (Stream_GetRemainingLength(s) < 21) goto fail; Stream_Read_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ bitmapData = &cache_bitmap_v3->bitmapData; Stream_Read_UINT8(s, bitmapData->bpp); if ((bitmapData->bpp < 1) || (bitmapData->bpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bpp value %" PRIu32 "", bitmapData->bpp); goto fail; } Stream_Seek_UINT8(s); /* reserved1 (1 byte) */ Stream_Seek_UINT8(s); /* reserved2 (1 byte) */ Stream_Read_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Read_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Read_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Read_UINT32(s, new_len); /* length (4 bytes) */ if ((new_len == 0) || (Stream_GetRemainingLength(s) < new_len)) goto fail; new_data = (BYTE*)realloc(bitmapData->data, new_len); if (!new_data) goto fail; bitmapData->data = new_data; bitmapData->length = new_len; Stream_Read(s, bitmapData->data, bitmapData->length); return cache_bitmap_v3; fail: free_cache_bitmap_v3_order(update->context, cache_bitmap_v3); return NULL; } int update_approximate_cache_bitmap_v3_order(CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BITMAP_DATA_EX* bitmapData = &cache_bitmap_v3->bitmapData; return 64 + bitmapData->length; } BOOL update_write_cache_bitmap_v3_order(wStream* s, CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v3_order(cache_bitmap_v3, flags))) return FALSE; bitmapData = &cache_bitmap_v3->bitmapData; bitsPerPixelId = BPP_CBR23[cache_bitmap_v3->bpp]; *flags = (cache_bitmap_v3->cacheId & 0x00000003) | ((cache_bitmap_v3->flags << 7) & 0x0000FF80) | ((bitsPerPixelId << 3) & 0x00000078); Stream_Write_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ Stream_Write_UINT8(s, bitmapData->bpp); Stream_Write_UINT8(s, 0); /* reserved1 (1 byte) */ Stream_Write_UINT8(s, 0); /* reserved2 (1 byte) */ Stream_Write_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Write_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Write_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Write_UINT32(s, bitmapData->length); /* length (4 bytes) */ Stream_Write(s, bitmapData->data, bitmapData->length); return TRUE; } static CACHE_COLOR_TABLE_ORDER* update_read_cache_color_table_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; UINT32* colorTable; CACHE_COLOR_TABLE_ORDER* cache_color_table = calloc(1, sizeof(CACHE_COLOR_TABLE_ORDER)); if (!cache_color_table) goto fail; if (Stream_GetRemainingLength(s) < 3) goto fail; Stream_Read_UINT8(s, cache_color_table->cacheIndex); /* cacheIndex (1 byte) */ Stream_Read_UINT16(s, cache_color_table->numberColors); /* numberColors (2 bytes) */ if (cache_color_table->numberColors != 256) { /* This field MUST be set to 256 */ goto fail; } if (Stream_GetRemainingLength(s) < cache_color_table->numberColors * 4) goto fail; colorTable = (UINT32*)&cache_color_table->colorTable; for (i = 0; i < (int)cache_color_table->numberColors; i++) update_read_color_quad(s, &colorTable[i]); return cache_color_table; fail: free_cache_color_table_order(update->context, cache_color_table); return NULL; } int update_approximate_cache_color_table_order(const CACHE_COLOR_TABLE_ORDER* cache_color_table, UINT16* flags) { return 16 + (256 * 4); } BOOL update_write_cache_color_table_order(wStream* s, const CACHE_COLOR_TABLE_ORDER* cache_color_table, UINT16* flags) { int i, inf; UINT32* colorTable; if (cache_color_table->numberColors != 256) return FALSE; inf = update_approximate_cache_color_table_order(cache_color_table, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT8(s, cache_color_table->cacheIndex); /* cacheIndex (1 byte) */ Stream_Write_UINT16(s, cache_color_table->numberColors); /* numberColors (2 bytes) */ colorTable = (UINT32*)&cache_color_table->colorTable; for (i = 0; i < (int)cache_color_table->numberColors; i++) { update_write_color_quad(s, colorTable[i]); } return TRUE; } static CACHE_GLYPH_ORDER* update_read_cache_glyph_order(rdpUpdate* update, wStream* s, UINT16 flags) { UINT32 i; CACHE_GLYPH_ORDER* cache_glyph_order = calloc(1, sizeof(CACHE_GLYPH_ORDER)); if (!cache_glyph_order || !update || !s) goto fail; if (Stream_GetRemainingLength(s) < 2) goto fail; Stream_Read_UINT8(s, cache_glyph_order->cacheId); /* cacheId (1 byte) */ Stream_Read_UINT8(s, cache_glyph_order->cGlyphs); /* cGlyphs (1 byte) */ for (i = 0; i < cache_glyph_order->cGlyphs; i++) { GLYPH_DATA* glyph = &cache_glyph_order->glyphData[i]; if (Stream_GetRemainingLength(s) < 10) goto fail; Stream_Read_UINT16(s, glyph->cacheIndex); Stream_Read_INT16(s, glyph->x); Stream_Read_INT16(s, glyph->y); Stream_Read_UINT16(s, glyph->cx); Stream_Read_UINT16(s, glyph->cy); glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; if (Stream_GetRemainingLength(s) < glyph->cb) goto fail; glyph->aj = (BYTE*)malloc(glyph->cb); if (!glyph->aj) goto fail; Stream_Read(s, glyph->aj, glyph->cb); } if ((flags & CG_GLYPH_UNICODE_PRESENT) && (cache_glyph_order->cGlyphs > 0)) { cache_glyph_order->unicodeCharacters = calloc(cache_glyph_order->cGlyphs, sizeof(WCHAR)); if (!cache_glyph_order->unicodeCharacters) goto fail; if (Stream_GetRemainingLength(s) < sizeof(WCHAR) * cache_glyph_order->cGlyphs) goto fail; Stream_Read_UTF16_String(s, cache_glyph_order->unicodeCharacters, cache_glyph_order->cGlyphs); } return cache_glyph_order; fail: free_cache_glyph_order(update->context, cache_glyph_order); return NULL; } int update_approximate_cache_glyph_order(const CACHE_GLYPH_ORDER* cache_glyph, UINT16* flags) { return 2 + cache_glyph->cGlyphs * 32; } BOOL update_write_cache_glyph_order(wStream* s, const CACHE_GLYPH_ORDER* cache_glyph, UINT16* flags) { int i, inf; INT16 lsi16; const GLYPH_DATA* glyph; inf = update_approximate_cache_glyph_order(cache_glyph, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT8(s, cache_glyph->cacheId); /* cacheId (1 byte) */ Stream_Write_UINT8(s, cache_glyph->cGlyphs); /* cGlyphs (1 byte) */ for (i = 0; i < (int)cache_glyph->cGlyphs; i++) { UINT32 cb; glyph = &cache_glyph->glyphData[i]; Stream_Write_UINT16(s, glyph->cacheIndex); /* cacheIndex (2 bytes) */ lsi16 = glyph->x; Stream_Write_UINT16(s, lsi16); /* x (2 bytes) */ lsi16 = glyph->y; Stream_Write_UINT16(s, lsi16); /* y (2 bytes) */ Stream_Write_UINT16(s, glyph->cx); /* cx (2 bytes) */ Stream_Write_UINT16(s, glyph->cy); /* cy (2 bytes) */ cb = ((glyph->cx + 7) / 8) * glyph->cy; cb += ((cb % 4) > 0) ? 4 - (cb % 4) : 0; Stream_Write(s, glyph->aj, cb); } if (*flags & CG_GLYPH_UNICODE_PRESENT) { Stream_Zero(s, cache_glyph->cGlyphs * 2); } return TRUE; } static CACHE_GLYPH_V2_ORDER* update_read_cache_glyph_v2_order(rdpUpdate* update, wStream* s, UINT16 flags) { UINT32 i; CACHE_GLYPH_V2_ORDER* cache_glyph_v2 = calloc(1, sizeof(CACHE_GLYPH_V2_ORDER)); if (!cache_glyph_v2) goto fail; cache_glyph_v2->cacheId = (flags & 0x000F); cache_glyph_v2->flags = (flags & 0x00F0) >> 4; cache_glyph_v2->cGlyphs = (flags & 0xFF00) >> 8; for (i = 0; i < cache_glyph_v2->cGlyphs; i++) { GLYPH_DATA_V2* glyph = &cache_glyph_v2->glyphData[i]; if (Stream_GetRemainingLength(s) < 1) goto fail; Stream_Read_UINT8(s, glyph->cacheIndex); if (!update_read_2byte_signed(s, &glyph->x) || !update_read_2byte_signed(s, &glyph->y) || !update_read_2byte_unsigned(s, &glyph->cx) || !update_read_2byte_unsigned(s, &glyph->cy)) { goto fail; } glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; if (Stream_GetRemainingLength(s) < glyph->cb) goto fail; glyph->aj = (BYTE*)malloc(glyph->cb); if (!glyph->aj) goto fail; Stream_Read(s, glyph->aj, glyph->cb); } if ((flags & CG_GLYPH_UNICODE_PRESENT) && (cache_glyph_v2->cGlyphs > 0)) { cache_glyph_v2->unicodeCharacters = calloc(cache_glyph_v2->cGlyphs, sizeof(WCHAR)); if (!cache_glyph_v2->unicodeCharacters) goto fail; if (Stream_GetRemainingLength(s) < sizeof(WCHAR) * cache_glyph_v2->cGlyphs) goto fail; Stream_Read_UTF16_String(s, cache_glyph_v2->unicodeCharacters, cache_glyph_v2->cGlyphs); } return cache_glyph_v2; fail: free_cache_glyph_v2_order(update->context, cache_glyph_v2); return NULL; } int update_approximate_cache_glyph_v2_order(const CACHE_GLYPH_V2_ORDER* cache_glyph_v2, UINT16* flags) { return 8 + cache_glyph_v2->cGlyphs * 32; } BOOL update_write_cache_glyph_v2_order(wStream* s, const CACHE_GLYPH_V2_ORDER* cache_glyph_v2, UINT16* flags) { UINT32 i, inf; inf = update_approximate_cache_glyph_v2_order(cache_glyph_v2, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; *flags = (cache_glyph_v2->cacheId & 0x000F) | ((cache_glyph_v2->flags & 0x000F) << 4) | ((cache_glyph_v2->cGlyphs & 0x00FF) << 8); for (i = 0; i < cache_glyph_v2->cGlyphs; i++) { UINT32 cb; const GLYPH_DATA_V2* glyph = &cache_glyph_v2->glyphData[i]; Stream_Write_UINT8(s, glyph->cacheIndex); if (!update_write_2byte_signed(s, glyph->x) || !update_write_2byte_signed(s, glyph->y) || !update_write_2byte_unsigned(s, glyph->cx) || !update_write_2byte_unsigned(s, glyph->cy)) { return FALSE; } cb = ((glyph->cx + 7) / 8) * glyph->cy; cb += ((cb % 4) > 0) ? 4 - (cb % 4) : 0; Stream_Write(s, glyph->aj, cb); } if (*flags & CG_GLYPH_UNICODE_PRESENT) { Stream_Zero(s, cache_glyph_v2->cGlyphs * 2); } return TRUE; } static BOOL update_decompress_brush(wStream* s, BYTE* output, size_t outSize, BYTE bpp) { INT32 x, y, k; BYTE byte = 0; const BYTE* palette = Stream_Pointer(s) + 16; const INT32 bytesPerPixel = ((bpp + 1) / 8); if (!Stream_SafeSeek(s, 16ULL + 7ULL * bytesPerPixel)) // 64 / 4 return FALSE; for (y = 7; y >= 0; y--) { for (x = 0; x < 8; x++) { UINT32 index; if ((x % 4) == 0) Stream_Read_UINT8(s, byte); index = ((byte >> ((3 - (x % 4)) * 2)) & 0x03); for (k = 0; k < bytesPerPixel; k++) { const size_t dstIndex = ((y * 8 + x) * bytesPerPixel) + k; const size_t srcIndex = (index * bytesPerPixel) + k; if (dstIndex >= outSize) return FALSE; output[dstIndex] = palette[srcIndex]; } } } return TRUE; } static BOOL update_compress_brush(wStream* s, const BYTE* input, BYTE bpp) { return FALSE; } static CACHE_BRUSH_ORDER* update_read_cache_brush_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; BYTE iBitmapFormat; BOOL compressed = FALSE; CACHE_BRUSH_ORDER* cache_brush = calloc(1, sizeof(CACHE_BRUSH_ORDER)); if (!cache_brush) goto fail; if (Stream_GetRemainingLength(s) < 6) goto fail; Stream_Read_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Read_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ if (iBitmapFormat >= ARRAYSIZE(BMF_BPP)) goto fail; cache_brush->bpp = BMF_BPP[iBitmapFormat]; Stream_Read_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Read_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Read_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Read_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_Print(update->log, WLOG_ERROR, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); goto fail; } /* rows are encoded in reverse order */ if (Stream_GetRemainingLength(s) < 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_decompress_brush(s, cache_brush->data, sizeof(cache_brush->data), cache_brush->bpp)) goto fail; } else { /* uncompressed brush */ UINT32 scanline = (cache_brush->bpp / 8) * 8; if (Stream_GetRemainingLength(s) < scanline * 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read(s, &cache_brush->data[i * scanline], scanline); } } } } return cache_brush; fail: free_cache_brush_order(update->context, cache_brush); return NULL; } int update_approximate_cache_brush_order(const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { return 64; } BOOL update_write_cache_brush_order(wStream* s, const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { int i; BYTE iBitmapFormat; BOOL compressed = FALSE; if (!Stream_EnsureRemainingCapacity(s, update_approximate_cache_brush_order(cache_brush, flags))) return FALSE; iBitmapFormat = BPP_BMF[cache_brush->bpp]; Stream_Write_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Write_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ Stream_Write_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Write_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Write_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Write_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_ERR(TAG, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); return FALSE; } for (i = 7; i >= 0; i--) { Stream_Write_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_compress_brush(s, cache_brush->data, cache_brush->bpp)) return FALSE; } else { /* uncompressed brush */ int scanline = (cache_brush->bpp / 8) * 8; for (i = 7; i >= 0; i--) { Stream_Write(s, &cache_brush->data[i * scanline], scanline); } } } } return TRUE; } /* Alternate Secondary Drawing Orders */ static BOOL update_read_create_offscreen_bitmap_order(wStream* s, CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { UINT16 flags; BOOL deleteListPresent; OFFSCREEN_DELETE_LIST* deleteList; if (Stream_GetRemainingLength(s) < 6) return FALSE; Stream_Read_UINT16(s, flags); /* flags (2 bytes) */ create_offscreen_bitmap->id = flags & 0x7FFF; deleteListPresent = (flags & 0x8000) ? TRUE : FALSE; Stream_Read_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */ Stream_Read_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */ deleteList = &(create_offscreen_bitmap->deleteList); if (deleteListPresent) { UINT32 i; if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, deleteList->cIndices); if (deleteList->cIndices > deleteList->sIndices) { UINT16* new_indices; new_indices = (UINT16*)realloc(deleteList->indices, deleteList->cIndices * 2); if (!new_indices) return FALSE; deleteList->sIndices = deleteList->cIndices; deleteList->indices = new_indices; } if (Stream_GetRemainingLength(s) < 2 * deleteList->cIndices) return FALSE; for (i = 0; i < deleteList->cIndices; i++) { Stream_Read_UINT16(s, deleteList->indices[i]); } } else { deleteList->cIndices = 0; } return TRUE; } int update_approximate_create_offscreen_bitmap_order( const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { const OFFSCREEN_DELETE_LIST* deleteList = &(create_offscreen_bitmap->deleteList); return 32 + deleteList->cIndices * 2; } BOOL update_write_create_offscreen_bitmap_order( wStream* s, const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { UINT16 flags; BOOL deleteListPresent; const OFFSCREEN_DELETE_LIST* deleteList; if (!Stream_EnsureRemainingCapacity( s, update_approximate_create_offscreen_bitmap_order(create_offscreen_bitmap))) return FALSE; deleteList = &(create_offscreen_bitmap->deleteList); flags = create_offscreen_bitmap->id & 0x7FFF; deleteListPresent = (deleteList->cIndices > 0) ? TRUE : FALSE; if (deleteListPresent) flags |= 0x8000; Stream_Write_UINT16(s, flags); /* flags (2 bytes) */ Stream_Write_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */ Stream_Write_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */ if (deleteListPresent) { int i; Stream_Write_UINT16(s, deleteList->cIndices); for (i = 0; i < (int)deleteList->cIndices; i++) { Stream_Write_UINT16(s, deleteList->indices[i]); } } return TRUE; } static BOOL update_read_switch_surface_order(wStream* s, SWITCH_SURFACE_ORDER* switch_surface) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, switch_surface->bitmapId); /* bitmapId (2 bytes) */ return TRUE; } int update_approximate_switch_surface_order(const SWITCH_SURFACE_ORDER* switch_surface) { return 2; } BOOL update_write_switch_surface_order(wStream* s, const SWITCH_SURFACE_ORDER* switch_surface) { int inf = update_approximate_switch_surface_order(switch_surface); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT16(s, switch_surface->bitmapId); /* bitmapId (2 bytes) */ return TRUE; } static BOOL update_read_create_nine_grid_bitmap_order(wStream* s, CREATE_NINE_GRID_BITMAP_ORDER* create_nine_grid_bitmap) { NINE_GRID_BITMAP_INFO* nineGridInfo; if (Stream_GetRemainingLength(s) < 19) return FALSE; Stream_Read_UINT8(s, create_nine_grid_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ if ((create_nine_grid_bitmap->bitmapBpp < 1) || (create_nine_grid_bitmap->bitmapBpp > 32)) { WLog_ERR(TAG, "invalid bpp value %" PRIu32 "", create_nine_grid_bitmap->bitmapBpp); return FALSE; } Stream_Read_UINT16(s, create_nine_grid_bitmap->bitmapId); /* bitmapId (2 bytes) */ nineGridInfo = &(create_nine_grid_bitmap->nineGridInfo); Stream_Read_UINT32(s, nineGridInfo->flFlags); /* flFlags (4 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulLeftWidth); /* ulLeftWidth (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulRightWidth); /* ulRightWidth (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulTopHeight); /* ulTopHeight (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulBottomHeight); /* ulBottomHeight (2 bytes) */ update_read_colorref(s, &nineGridInfo->crTransparent); /* crTransparent (4 bytes) */ return TRUE; } static BOOL update_read_frame_marker_order(wStream* s, FRAME_MARKER_ORDER* frame_marker) { if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT32(s, frame_marker->action); /* action (4 bytes) */ return TRUE; } static BOOL update_read_stream_bitmap_first_order(wStream* s, STREAM_BITMAP_FIRST_ORDER* stream_bitmap_first) { if (Stream_GetRemainingLength(s) < 10) // 8 + 2 at least return FALSE; Stream_Read_UINT8(s, stream_bitmap_first->bitmapFlags); /* bitmapFlags (1 byte) */ Stream_Read_UINT8(s, stream_bitmap_first->bitmapBpp); /* bitmapBpp (1 byte) */ if ((stream_bitmap_first->bitmapBpp < 1) || (stream_bitmap_first->bitmapBpp > 32)) { WLog_ERR(TAG, "invalid bpp value %" PRIu32 "", stream_bitmap_first->bitmapBpp); return FALSE; } Stream_Read_UINT16(s, stream_bitmap_first->bitmapType); /* bitmapType (2 bytes) */ Stream_Read_UINT16(s, stream_bitmap_first->bitmapWidth); /* bitmapWidth (2 bytes) */ Stream_Read_UINT16(s, stream_bitmap_first->bitmapHeight); /* bitmapHeigth (2 bytes) */ if (stream_bitmap_first->bitmapFlags & STREAM_BITMAP_V2) { if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT32(s, stream_bitmap_first->bitmapSize); /* bitmapSize (4 bytes) */ } else { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, stream_bitmap_first->bitmapSize); /* bitmapSize (2 bytes) */ } FIELD_SKIP_BUFFER16( s, stream_bitmap_first->bitmapBlockSize); /* bitmapBlockSize(2 bytes) + bitmapBlock */ return TRUE; } static BOOL update_read_stream_bitmap_next_order(wStream* s, STREAM_BITMAP_NEXT_ORDER* stream_bitmap_next) { if (Stream_GetRemainingLength(s) < 5) return FALSE; Stream_Read_UINT8(s, stream_bitmap_next->bitmapFlags); /* bitmapFlags (1 byte) */ Stream_Read_UINT16(s, stream_bitmap_next->bitmapType); /* bitmapType (2 bytes) */ FIELD_SKIP_BUFFER16( s, stream_bitmap_next->bitmapBlockSize); /* bitmapBlockSize(2 bytes) + bitmapBlock */ return TRUE; } static BOOL update_read_draw_gdiplus_first_order(wStream* s, DRAW_GDIPLUS_FIRST_ORDER* draw_gdiplus_first) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_first->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_first->cbTotalSize); /* cbTotalSize (4 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_first->cbTotalEmfSize); /* cbTotalEmfSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_first->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_next_order(wStream* s, DRAW_GDIPLUS_NEXT_ORDER* draw_gdiplus_next) { if (Stream_GetRemainingLength(s) < 3) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ FIELD_SKIP_BUFFER16(s, draw_gdiplus_next->cbSize); /* cbSize(2 bytes) + emfRecords */ return TRUE; } static BOOL update_read_draw_gdiplus_end_order(wStream* s, DRAW_GDIPLUS_END_ORDER* draw_gdiplus_end) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_end->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_end->cbTotalSize); /* cbTotalSize (4 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_end->cbTotalEmfSize); /* cbTotalEmfSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_end->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_cache_first_order(wStream* s, DRAW_GDIPLUS_CACHE_FIRST_ORDER* draw_gdiplus_cache_first) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_first->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_cache_first->cbTotalSize); /* cbTotalSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_cache_first->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_cache_next_order(wStream* s, DRAW_GDIPLUS_CACHE_NEXT_ORDER* draw_gdiplus_cache_next) { if (Stream_GetRemainingLength(s) < 7) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_next->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_next->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_next->cacheIndex); /* cacheIndex (2 bytes) */ FIELD_SKIP_BUFFER16(s, draw_gdiplus_cache_next->cbSize); /* cbSize(2 bytes) + emfRecords */ return TRUE; } static BOOL update_read_draw_gdiplus_cache_end_order(wStream* s, DRAW_GDIPLUS_CACHE_END_ORDER* draw_gdiplus_cache_end) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_end->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_cache_end->cbTotalSize); /* cbTotalSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_cache_end->cbSize); /* emfRecords */ } static BOOL update_read_field_flags(wStream* s, UINT32* fieldFlags, BYTE flags, BYTE fieldBytes) { int i; BYTE byte; if (flags & ORDER_ZERO_FIELD_BYTE_BIT0) fieldBytes--; if (flags & ORDER_ZERO_FIELD_BYTE_BIT1) { if (fieldBytes > 1) fieldBytes -= 2; else fieldBytes = 0; } if (Stream_GetRemainingLength(s) < fieldBytes) return FALSE; *fieldFlags = 0; for (i = 0; i < fieldBytes; i++) { Stream_Read_UINT8(s, byte); *fieldFlags |= byte << (i * 8); } return TRUE; } BOOL update_write_field_flags(wStream* s, UINT32 fieldFlags, BYTE flags, BYTE fieldBytes) { BYTE byte; if (fieldBytes == 1) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); } else if (fieldBytes == 2) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 8) & 0xFF; Stream_Write_UINT8(s, byte); } else if (fieldBytes == 3) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 16) & 0xFF; Stream_Write_UINT8(s, byte); } else { return FALSE; } return TRUE; } static BOOL update_read_bounds(wStream* s, rdpBounds* bounds) { BYTE flags; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, flags); /* field flags */ if (flags & BOUND_LEFT) { if (!update_read_coord(s, &bounds->left, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_LEFT) { if (!update_read_coord(s, &bounds->left, TRUE)) return FALSE; } if (flags & BOUND_TOP) { if (!update_read_coord(s, &bounds->top, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_TOP) { if (!update_read_coord(s, &bounds->top, TRUE)) return FALSE; } if (flags & BOUND_RIGHT) { if (!update_read_coord(s, &bounds->right, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_RIGHT) { if (!update_read_coord(s, &bounds->right, TRUE)) return FALSE; } if (flags & BOUND_BOTTOM) { if (!update_read_coord(s, &bounds->bottom, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_BOTTOM) { if (!update_read_coord(s, &bounds->bottom, TRUE)) return FALSE; } return TRUE; } BOOL update_write_bounds(wStream* s, ORDER_INFO* orderInfo) { if (!(orderInfo->controlFlags & ORDER_BOUNDS)) return TRUE; if (orderInfo->controlFlags & ORDER_ZERO_BOUNDS_DELTAS) return TRUE; Stream_Write_UINT8(s, orderInfo->boundsFlags); /* field flags */ if (orderInfo->boundsFlags & BOUND_LEFT) { if (!update_write_coord(s, orderInfo->bounds.left)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_LEFT) { } if (orderInfo->boundsFlags & BOUND_TOP) { if (!update_write_coord(s, orderInfo->bounds.top)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_TOP) { } if (orderInfo->boundsFlags & BOUND_RIGHT) { if (!update_write_coord(s, orderInfo->bounds.right)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_RIGHT) { } if (orderInfo->boundsFlags & BOUND_BOTTOM) { if (!update_write_coord(s, orderInfo->bounds.bottom)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_BOTTOM) { } return TRUE; } static BOOL read_primary_order(wLog* log, const char* orderName, wStream* s, const ORDER_INFO* orderInfo, rdpPrimaryUpdate* primary) { BOOL rc = FALSE; if (!s || !orderInfo || !primary || !orderName) return FALSE; switch (orderInfo->orderType) { case ORDER_TYPE_DSTBLT: rc = update_read_dstblt_order(s, orderInfo, &(primary->dstblt)); break; case ORDER_TYPE_PATBLT: rc = update_read_patblt_order(s, orderInfo, &(primary->patblt)); break; case ORDER_TYPE_SCRBLT: rc = update_read_scrblt_order(s, orderInfo, &(primary->scrblt)); break; case ORDER_TYPE_OPAQUE_RECT: rc = update_read_opaque_rect_order(s, orderInfo, &(primary->opaque_rect)); break; case ORDER_TYPE_DRAW_NINE_GRID: rc = update_read_draw_nine_grid_order(s, orderInfo, &(primary->draw_nine_grid)); break; case ORDER_TYPE_MULTI_DSTBLT: rc = update_read_multi_dstblt_order(s, orderInfo, &(primary->multi_dstblt)); break; case ORDER_TYPE_MULTI_PATBLT: rc = update_read_multi_patblt_order(s, orderInfo, &(primary->multi_patblt)); break; case ORDER_TYPE_MULTI_SCRBLT: rc = update_read_multi_scrblt_order(s, orderInfo, &(primary->multi_scrblt)); break; case ORDER_TYPE_MULTI_OPAQUE_RECT: rc = update_read_multi_opaque_rect_order(s, orderInfo, &(primary->multi_opaque_rect)); break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: rc = update_read_multi_draw_nine_grid_order(s, orderInfo, &(primary->multi_draw_nine_grid)); break; case ORDER_TYPE_LINE_TO: rc = update_read_line_to_order(s, orderInfo, &(primary->line_to)); break; case ORDER_TYPE_POLYLINE: rc = update_read_polyline_order(s, orderInfo, &(primary->polyline)); break; case ORDER_TYPE_MEMBLT: rc = update_read_memblt_order(s, orderInfo, &(primary->memblt)); break; case ORDER_TYPE_MEM3BLT: rc = update_read_mem3blt_order(s, orderInfo, &(primary->mem3blt)); break; case ORDER_TYPE_SAVE_BITMAP: rc = update_read_save_bitmap_order(s, orderInfo, &(primary->save_bitmap)); break; case ORDER_TYPE_GLYPH_INDEX: rc = update_read_glyph_index_order(s, orderInfo, &(primary->glyph_index)); break; case ORDER_TYPE_FAST_INDEX: rc = update_read_fast_index_order(s, orderInfo, &(primary->fast_index)); break; case ORDER_TYPE_FAST_GLYPH: rc = update_read_fast_glyph_order(s, orderInfo, &(primary->fast_glyph)); break; case ORDER_TYPE_POLYGON_SC: rc = update_read_polygon_sc_order(s, orderInfo, &(primary->polygon_sc)); break; case ORDER_TYPE_POLYGON_CB: rc = update_read_polygon_cb_order(s, orderInfo, &(primary->polygon_cb)); break; case ORDER_TYPE_ELLIPSE_SC: rc = update_read_ellipse_sc_order(s, orderInfo, &(primary->ellipse_sc)); break; case ORDER_TYPE_ELLIPSE_CB: rc = update_read_ellipse_cb_order(s, orderInfo, &(primary->ellipse_cb)); break; default: WLog_Print(log, WLOG_WARN, "Primary Drawing Order %s not supported, ignoring", orderName); rc = TRUE; break; } if (!rc) { WLog_Print(log, WLOG_ERROR, "%s - update_read_dstblt_order() failed", orderName); return FALSE; } return TRUE; } static BOOL update_recv_primary_order(rdpUpdate* update, wStream* s, BYTE flags) { BYTE field; BOOL rc = FALSE; rdpContext* context = update->context; rdpPrimaryUpdate* primary = update->primary; ORDER_INFO* orderInfo = &(primary->order_info); rdpSettings* settings = context->settings; const char* orderName; if (flags & ORDER_TYPE_CHANGE) { if (Stream_GetRemainingLength(s) < 1) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, orderInfo->orderType); /* orderType (1 byte) */ } orderName = primary_order_string(orderInfo->orderType); if (!check_primary_order_supported(update->log, settings, orderInfo->orderType, orderName)) return FALSE; field = get_primary_drawing_order_field_bytes(orderInfo->orderType, &rc); if (!rc) return FALSE; if (!update_read_field_flags(s, &(orderInfo->fieldFlags), flags, field)) { WLog_Print(update->log, WLOG_ERROR, "update_read_field_flags() failed"); return FALSE; } if (flags & ORDER_BOUNDS) { if (!(flags & ORDER_ZERO_BOUNDS_DELTAS)) { if (!update_read_bounds(s, &orderInfo->bounds)) { WLog_Print(update->log, WLOG_ERROR, "update_read_bounds() failed"); return FALSE; } } rc = IFCALLRESULT(FALSE, update->SetBounds, context, &orderInfo->bounds); if (!rc) return FALSE; } orderInfo->deltaCoordinates = (flags & ORDER_DELTA_COORDINATES) ? TRUE : FALSE; if (!read_primary_order(update->log, orderName, s, orderInfo, primary)) return FALSE; switch (orderInfo->orderType) { case ORDER_TYPE_DSTBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->dstblt.bRop), gdi_rop3_code(primary->dstblt.bRop)); rc = IFCALLRESULT(FALSE, primary->DstBlt, context, &primary->dstblt); } break; case ORDER_TYPE_PATBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->patblt.bRop), gdi_rop3_code(primary->patblt.bRop)); rc = IFCALLRESULT(FALSE, primary->PatBlt, context, &primary->patblt); } break; case ORDER_TYPE_SCRBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->scrblt.bRop), gdi_rop3_code(primary->scrblt.bRop)); rc = IFCALLRESULT(FALSE, primary->ScrBlt, context, &primary->scrblt); } break; case ORDER_TYPE_OPAQUE_RECT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->OpaqueRect, context, &primary->opaque_rect); } break; case ORDER_TYPE_DRAW_NINE_GRID: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->DrawNineGrid, context, &primary->draw_nine_grid); } break; case ORDER_TYPE_MULTI_DSTBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_dstblt.bRop), gdi_rop3_code(primary->multi_dstblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiDstBlt, context, &primary->multi_dstblt); } break; case ORDER_TYPE_MULTI_PATBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_patblt.bRop), gdi_rop3_code(primary->multi_patblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiPatBlt, context, &primary->multi_patblt); } break; case ORDER_TYPE_MULTI_SCRBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_scrblt.bRop), gdi_rop3_code(primary->multi_scrblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiScrBlt, context, &primary->multi_scrblt); } break; case ORDER_TYPE_MULTI_OPAQUE_RECT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->MultiOpaqueRect, context, &primary->multi_opaque_rect); } break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->MultiDrawNineGrid, context, &primary->multi_draw_nine_grid); } break; case ORDER_TYPE_LINE_TO: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->LineTo, context, &primary->line_to); } break; case ORDER_TYPE_POLYLINE: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->Polyline, context, &primary->polyline); } break; case ORDER_TYPE_MEMBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->memblt.bRop), gdi_rop3_code(primary->memblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MemBlt, context, &primary->memblt); } break; case ORDER_TYPE_MEM3BLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->mem3blt.bRop), gdi_rop3_code(primary->mem3blt.bRop)); rc = IFCALLRESULT(FALSE, primary->Mem3Blt, context, &primary->mem3blt); } break; case ORDER_TYPE_SAVE_BITMAP: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->SaveBitmap, context, &primary->save_bitmap); } break; case ORDER_TYPE_GLYPH_INDEX: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->GlyphIndex, context, &primary->glyph_index); } break; case ORDER_TYPE_FAST_INDEX: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->FastIndex, context, &primary->fast_index); } break; case ORDER_TYPE_FAST_GLYPH: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->FastGlyph, context, &primary->fast_glyph); } break; case ORDER_TYPE_POLYGON_SC: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->PolygonSC, context, &primary->polygon_sc); } break; case ORDER_TYPE_POLYGON_CB: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->PolygonCB, context, &primary->polygon_cb); } break; case ORDER_TYPE_ELLIPSE_SC: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->EllipseSC, context, &primary->ellipse_sc); } break; case ORDER_TYPE_ELLIPSE_CB: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->EllipseCB, context, &primary->ellipse_cb); } break; default: WLog_Print(update->log, WLOG_WARN, "Primary Drawing Order %s not supported", orderName); break; } if (!rc) { WLog_Print(update->log, WLOG_WARN, "Primary Drawing Order %s failed", orderName); return FALSE; } if (flags & ORDER_BOUNDS) { rc = IFCALLRESULT(FALSE, update->SetBounds, context, NULL); } return rc; } static BOOL update_recv_secondary_order(rdpUpdate* update, wStream* s, BYTE flags) { BOOL rc = FALSE; size_t start, end, diff; BYTE orderType; UINT16 extraFlags; UINT16 orderLength; rdpContext* context = update->context; rdpSettings* settings = context->settings; rdpSecondaryUpdate* secondary = update->secondary; const char* name; if (Stream_GetRemainingLength(s) < 5) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 5"); return FALSE; } Stream_Read_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Read_UINT16(s, extraFlags); /* extraFlags (2 bytes) */ Stream_Read_UINT8(s, orderType); /* orderType (1 byte) */ if (Stream_GetRemainingLength(s) < orderLength + 7U) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) %" PRIuz " < %" PRIu16, Stream_GetRemainingLength(s), orderLength + 7); return FALSE; } start = Stream_GetPosition(s); name = secondary_order_string(orderType); WLog_Print(update->log, WLOG_DEBUG, "Secondary Drawing Order %s", name); if (!check_secondary_order_supported(update->log, settings, orderType, name)) return FALSE; switch (orderType) { case ORDER_TYPE_BITMAP_UNCOMPRESSED: case ORDER_TYPE_CACHE_BITMAP_COMPRESSED: { const BOOL compressed = (orderType == ORDER_TYPE_CACHE_BITMAP_COMPRESSED); CACHE_BITMAP_ORDER* order = update_read_cache_bitmap_order(update, s, compressed, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmap, context, order); free_cache_bitmap_order(context, order); } } break; case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2: case ORDER_TYPE_BITMAP_COMPRESSED_V2: { const BOOL compressed = (orderType == ORDER_TYPE_BITMAP_COMPRESSED_V2); CACHE_BITMAP_V2_ORDER* order = update_read_cache_bitmap_v2_order(update, s, compressed, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV2, context, order); free_cache_bitmap_v2_order(context, order); } } break; case ORDER_TYPE_BITMAP_COMPRESSED_V3: { CACHE_BITMAP_V3_ORDER* order = update_read_cache_bitmap_v3_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV3, context, order); free_cache_bitmap_v3_order(context, order); } } break; case ORDER_TYPE_CACHE_COLOR_TABLE: { CACHE_COLOR_TABLE_ORDER* order = update_read_cache_color_table_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheColorTable, context, order); free_cache_color_table_order(context, order); } } break; case ORDER_TYPE_CACHE_GLYPH: { switch (settings->GlyphSupportLevel) { case GLYPH_SUPPORT_PARTIAL: case GLYPH_SUPPORT_FULL: { CACHE_GLYPH_ORDER* order = update_read_cache_glyph_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheGlyph, context, order); free_cache_glyph_order(context, order); } } break; case GLYPH_SUPPORT_ENCODE: { CACHE_GLYPH_V2_ORDER* order = update_read_cache_glyph_v2_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheGlyphV2, context, order); free_cache_glyph_v2_order(context, order); } } break; case GLYPH_SUPPORT_NONE: default: break; } } break; case ORDER_TYPE_CACHE_BRUSH: /* [MS-RDPEGDI] 2.2.2.2.1.2.7 Cache Brush (CACHE_BRUSH_ORDER) */ { CACHE_BRUSH_ORDER* order = update_read_cache_brush_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBrush, context, order); free_cache_brush_order(context, order); } } break; default: WLog_Print(update->log, WLOG_WARN, "SECONDARY ORDER %s not supported", name); break; } if (!rc) { WLog_Print(update->log, WLOG_ERROR, "SECONDARY ORDER %s failed", name); } start += orderLength + 7; end = Stream_GetPosition(s); if (start > end) { WLog_Print(update->log, WLOG_WARN, "SECONDARY_ORDER %s: read %" PRIuz "bytes too much", name, end - start); return FALSE; } diff = start - end; if (diff > 0) { WLog_Print(update->log, WLOG_DEBUG, "SECONDARY_ORDER %s: read %" PRIuz "bytes short, skipping", name, diff); Stream_Seek(s, diff); } return rc; } static BOOL read_altsec_order(wStream* s, BYTE orderType, rdpAltSecUpdate* altsec) { BOOL rc = FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: rc = update_read_create_offscreen_bitmap_order(s, &(altsec->create_offscreen_bitmap)); break; case ORDER_TYPE_SWITCH_SURFACE: rc = update_read_switch_surface_order(s, &(altsec->switch_surface)); break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: rc = update_read_create_nine_grid_bitmap_order(s, &(altsec->create_nine_grid_bitmap)); break; case ORDER_TYPE_FRAME_MARKER: rc = update_read_frame_marker_order(s, &(altsec->frame_marker)); break; case ORDER_TYPE_STREAM_BITMAP_FIRST: rc = update_read_stream_bitmap_first_order(s, &(altsec->stream_bitmap_first)); break; case ORDER_TYPE_STREAM_BITMAP_NEXT: rc = update_read_stream_bitmap_next_order(s, &(altsec->stream_bitmap_next)); break; case ORDER_TYPE_GDIPLUS_FIRST: rc = update_read_draw_gdiplus_first_order(s, &(altsec->draw_gdiplus_first)); break; case ORDER_TYPE_GDIPLUS_NEXT: rc = update_read_draw_gdiplus_next_order(s, &(altsec->draw_gdiplus_next)); break; case ORDER_TYPE_GDIPLUS_END: rc = update_read_draw_gdiplus_end_order(s, &(altsec->draw_gdiplus_end)); break; case ORDER_TYPE_GDIPLUS_CACHE_FIRST: rc = update_read_draw_gdiplus_cache_first_order(s, &(altsec->draw_gdiplus_cache_first)); break; case ORDER_TYPE_GDIPLUS_CACHE_NEXT: rc = update_read_draw_gdiplus_cache_next_order(s, &(altsec->draw_gdiplus_cache_next)); break; case ORDER_TYPE_GDIPLUS_CACHE_END: rc = update_read_draw_gdiplus_cache_end_order(s, &(altsec->draw_gdiplus_cache_end)); break; case ORDER_TYPE_WINDOW: /* This order is handled elsewhere. */ rc = TRUE; break; case ORDER_TYPE_COMPDESK_FIRST: rc = TRUE; break; default: break; } return rc; } static BOOL update_recv_altsec_order(rdpUpdate* update, wStream* s, BYTE flags) { BYTE orderType = flags >>= 2; /* orderType is in higher 6 bits of flags field */ BOOL rc = FALSE; rdpContext* context = update->context; rdpSettings* settings = context->settings; rdpAltSecUpdate* altsec = update->altsec; const char* orderName = altsec_order_string(orderType); WLog_Print(update->log, WLOG_DEBUG, "Alternate Secondary Drawing Order %s", orderName); if (!check_alt_order_supported(update->log, settings, orderType, orderName)) return FALSE; if (!read_altsec_order(s, orderType, altsec)) return FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: IFCALLRET(altsec->CreateOffscreenBitmap, rc, context, &(altsec->create_offscreen_bitmap)); break; case ORDER_TYPE_SWITCH_SURFACE: IFCALLRET(altsec->SwitchSurface, rc, context, &(altsec->switch_surface)); break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: IFCALLRET(altsec->CreateNineGridBitmap, rc, context, &(altsec->create_nine_grid_bitmap)); break; case ORDER_TYPE_FRAME_MARKER: IFCALLRET(altsec->FrameMarker, rc, context, &(altsec->frame_marker)); break; case ORDER_TYPE_STREAM_BITMAP_FIRST: IFCALLRET(altsec->StreamBitmapFirst, rc, context, &(altsec->stream_bitmap_first)); break; case ORDER_TYPE_STREAM_BITMAP_NEXT: IFCALLRET(altsec->StreamBitmapNext, rc, context, &(altsec->stream_bitmap_next)); break; case ORDER_TYPE_GDIPLUS_FIRST: IFCALLRET(altsec->DrawGdiPlusFirst, rc, context, &(altsec->draw_gdiplus_first)); break; case ORDER_TYPE_GDIPLUS_NEXT: IFCALLRET(altsec->DrawGdiPlusNext, rc, context, &(altsec->draw_gdiplus_next)); break; case ORDER_TYPE_GDIPLUS_END: IFCALLRET(altsec->DrawGdiPlusEnd, rc, context, &(altsec->draw_gdiplus_end)); break; case ORDER_TYPE_GDIPLUS_CACHE_FIRST: IFCALLRET(altsec->DrawGdiPlusCacheFirst, rc, context, &(altsec->draw_gdiplus_cache_first)); break; case ORDER_TYPE_GDIPLUS_CACHE_NEXT: IFCALLRET(altsec->DrawGdiPlusCacheNext, rc, context, &(altsec->draw_gdiplus_cache_next)); break; case ORDER_TYPE_GDIPLUS_CACHE_END: IFCALLRET(altsec->DrawGdiPlusCacheEnd, rc, context, &(altsec->draw_gdiplus_cache_end)); break; case ORDER_TYPE_WINDOW: rc = update_recv_altsec_window_order(update, s); break; case ORDER_TYPE_COMPDESK_FIRST: rc = TRUE; break; default: break; } if (!rc) { WLog_Print(update->log, WLOG_WARN, "Alternate Secondary Drawing Order %s failed", orderName); } return rc; } BOOL update_recv_order(rdpUpdate* update, wStream* s) { BOOL rc; BYTE controlFlags; if (Stream_GetRemainingLength(s) < 1) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, controlFlags); /* controlFlags (1 byte) */ if (!(controlFlags & ORDER_STANDARD)) rc = update_recv_altsec_order(update, s, controlFlags); else if (controlFlags & ORDER_SECONDARY) rc = update_recv_secondary_order(update, s, controlFlags); else rc = update_recv_primary_order(update, s, controlFlags); if (!rc) WLog_Print(update->log, WLOG_ERROR, "order flags %02" PRIx8 " failed", controlFlags); return rc; }
/** * FreeRDP: A Remote Desktop Protocol Implementation * Drawing Orders * * Copyright 2011 Marc-Andre Moreau <marcandre.moreau@gmail.com> * Copyright 2016 Armin Novak <armin.novak@thincast.com> * Copyright 2016 Thincast Technologies GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "window.h" #include <winpr/wtypes.h> #include <winpr/crt.h> #include <freerdp/api.h> #include <freerdp/log.h> #include <freerdp/graphics.h> #include <freerdp/codec/bitmap.h> #include <freerdp/gdi/gdi.h> #include "orders.h" #include "../cache/glyph.h" #include "../cache/bitmap.h" #include "../cache/brush.h" #include "../cache/cache.h" #define TAG FREERDP_TAG("core.orders") BYTE get_primary_drawing_order_field_bytes(UINT32 orderType, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (orderType) { case 0: return DSTBLT_ORDER_FIELD_BYTES; case 1: return PATBLT_ORDER_FIELD_BYTES; case 2: return SCRBLT_ORDER_FIELD_BYTES; case 3: return 0; case 4: return 0; case 5: return 0; case 6: return 0; case 7: return DRAW_NINE_GRID_ORDER_FIELD_BYTES; case 8: return MULTI_DRAW_NINE_GRID_ORDER_FIELD_BYTES; case 9: return LINE_TO_ORDER_FIELD_BYTES; case 10: return OPAQUE_RECT_ORDER_FIELD_BYTES; case 11: return SAVE_BITMAP_ORDER_FIELD_BYTES; case 12: return 0; case 13: return MEMBLT_ORDER_FIELD_BYTES; case 14: return MEM3BLT_ORDER_FIELD_BYTES; case 15: return MULTI_DSTBLT_ORDER_FIELD_BYTES; case 16: return MULTI_PATBLT_ORDER_FIELD_BYTES; case 17: return MULTI_SCRBLT_ORDER_FIELD_BYTES; case 18: return MULTI_OPAQUE_RECT_ORDER_FIELD_BYTES; case 19: return FAST_INDEX_ORDER_FIELD_BYTES; case 20: return POLYGON_SC_ORDER_FIELD_BYTES; case 21: return POLYGON_CB_ORDER_FIELD_BYTES; case 22: return POLYLINE_ORDER_FIELD_BYTES; case 23: return 0; case 24: return FAST_GLYPH_ORDER_FIELD_BYTES; case 25: return ELLIPSE_SC_ORDER_FIELD_BYTES; case 26: return ELLIPSE_CB_ORDER_FIELD_BYTES; case 27: return GLYPH_INDEX_ORDER_FIELD_BYTES; default: if (pValid) *pValid = FALSE; WLog_WARN(TAG, "Invalid orderType 0x%08X received", orderType); return 0; } } static BYTE get_cbr2_bpp(UINT32 bpp, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (bpp) { case 3: return 8; case 4: return 16; case 5: return 24; case 6: return 32; default: WLog_WARN(TAG, "Invalid bpp %" PRIu32, bpp); if (pValid) *pValid = FALSE; return 0; } } static BYTE get_bmf_bpp(UINT32 bmf, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (bmf) { case 1: return 1; case 3: return 8; case 4: return 16; case 5: return 24; case 6: return 32; default: WLog_WARN(TAG, "Invalid bmf %" PRIu32, bmf); if (pValid) *pValid = FALSE; return 0; } } static BYTE get_bpp_bmf(UINT32 bpp, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (bpp) { case 1: return 1; case 8: return 3; case 16: return 4; case 24: return 5; case 32: return 6; default: WLog_WARN(TAG, "Invalid color depth %" PRIu32, bpp); if (pValid) *pValid = FALSE; return 0; } } static BOOL check_order_activated(wLog* log, rdpSettings* settings, const char* orderName, BOOL condition) { if (!condition) { if (settings->AllowUnanouncedOrdersFromServer) { WLog_Print(log, WLOG_WARN, "%s - SERVER BUG: The support for this feature was not announced!", orderName); return TRUE; } else { WLog_Print(log, WLOG_ERROR, "%s - SERVER BUG: The support for this feature was not announced! Use " "/relax-order-checks to ignore", orderName); return FALSE; } } return TRUE; } static BOOL check_alt_order_supported(wLog* log, rdpSettings* settings, BYTE orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: case ORDER_TYPE_SWITCH_SURFACE: condition = settings->OffscreenSupportLevel != 0; break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: condition = settings->DrawNineGridEnabled; break; case ORDER_TYPE_FRAME_MARKER: condition = settings->FrameMarkerCommandEnabled; break; case ORDER_TYPE_GDIPLUS_FIRST: case ORDER_TYPE_GDIPLUS_NEXT: case ORDER_TYPE_GDIPLUS_END: case ORDER_TYPE_GDIPLUS_CACHE_FIRST: case ORDER_TYPE_GDIPLUS_CACHE_NEXT: case ORDER_TYPE_GDIPLUS_CACHE_END: condition = settings->DrawGdiPlusCacheEnabled; break; case ORDER_TYPE_WINDOW: condition = settings->RemoteWndSupportLevel != WINDOW_LEVEL_NOT_SUPPORTED; break; case ORDER_TYPE_STREAM_BITMAP_FIRST: case ORDER_TYPE_STREAM_BITMAP_NEXT: case ORDER_TYPE_COMPDESK_FIRST: condition = TRUE; break; default: WLog_Print(log, WLOG_WARN, "%s - Alternate Secondary Drawing Order UNKNOWN", orderName); condition = FALSE; break; } return check_order_activated(log, settings, orderName, condition); } static BOOL check_secondary_order_supported(wLog* log, rdpSettings* settings, BYTE orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_BITMAP_UNCOMPRESSED: case ORDER_TYPE_CACHE_BITMAP_COMPRESSED: condition = settings->BitmapCacheEnabled; break; case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2: case ORDER_TYPE_BITMAP_COMPRESSED_V2: condition = settings->BitmapCacheEnabled; break; case ORDER_TYPE_BITMAP_COMPRESSED_V3: condition = settings->BitmapCacheV3Enabled; break; case ORDER_TYPE_CACHE_COLOR_TABLE: condition = (settings->OrderSupport[NEG_MEMBLT_INDEX] || settings->OrderSupport[NEG_MEM3BLT_INDEX]); break; case ORDER_TYPE_CACHE_GLYPH: { switch (settings->GlyphSupportLevel) { case GLYPH_SUPPORT_PARTIAL: case GLYPH_SUPPORT_FULL: case GLYPH_SUPPORT_ENCODE: condition = TRUE; break; case GLYPH_SUPPORT_NONE: default: condition = FALSE; break; } } break; case ORDER_TYPE_CACHE_BRUSH: condition = TRUE; break; default: WLog_Print(log, WLOG_WARN, "SECONDARY ORDER %s not supported", orderName); break; } return check_order_activated(log, settings, orderName, condition); } static BOOL check_primary_order_supported(wLog* log, rdpSettings* settings, UINT32 orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_DSTBLT: condition = settings->OrderSupport[NEG_DSTBLT_INDEX]; break; case ORDER_TYPE_SCRBLT: condition = settings->OrderSupport[NEG_SCRBLT_INDEX]; break; case ORDER_TYPE_DRAW_NINE_GRID: condition = settings->OrderSupport[NEG_DRAWNINEGRID_INDEX]; break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: condition = settings->OrderSupport[NEG_MULTI_DRAWNINEGRID_INDEX]; break; case ORDER_TYPE_LINE_TO: condition = settings->OrderSupport[NEG_LINETO_INDEX]; break; /* [MS-RDPEGDI] 2.2.2.2.1.1.2.5 OpaqueRect (OPAQUERECT_ORDER) * suggests that PatBlt and OpaqueRect imply each other. */ case ORDER_TYPE_PATBLT: case ORDER_TYPE_OPAQUE_RECT: condition = settings->OrderSupport[NEG_OPAQUE_RECT_INDEX] || settings->OrderSupport[NEG_PATBLT_INDEX]; break; case ORDER_TYPE_SAVE_BITMAP: condition = settings->OrderSupport[NEG_SAVEBITMAP_INDEX]; break; case ORDER_TYPE_MEMBLT: condition = settings->OrderSupport[NEG_MEMBLT_INDEX]; break; case ORDER_TYPE_MEM3BLT: condition = settings->OrderSupport[NEG_MEM3BLT_INDEX]; break; case ORDER_TYPE_MULTI_DSTBLT: condition = settings->OrderSupport[NEG_MULTIDSTBLT_INDEX]; break; case ORDER_TYPE_MULTI_PATBLT: condition = settings->OrderSupport[NEG_MULTIPATBLT_INDEX]; break; case ORDER_TYPE_MULTI_SCRBLT: condition = settings->OrderSupport[NEG_MULTIDSTBLT_INDEX]; break; case ORDER_TYPE_MULTI_OPAQUE_RECT: condition = settings->OrderSupport[NEG_MULTIOPAQUERECT_INDEX]; break; case ORDER_TYPE_FAST_INDEX: condition = settings->OrderSupport[NEG_FAST_INDEX_INDEX]; break; case ORDER_TYPE_POLYGON_SC: condition = settings->OrderSupport[NEG_POLYGON_SC_INDEX]; break; case ORDER_TYPE_POLYGON_CB: condition = settings->OrderSupport[NEG_POLYGON_CB_INDEX]; break; case ORDER_TYPE_POLYLINE: condition = settings->OrderSupport[NEG_POLYLINE_INDEX]; break; case ORDER_TYPE_FAST_GLYPH: condition = settings->OrderSupport[NEG_FAST_GLYPH_INDEX]; break; case ORDER_TYPE_ELLIPSE_SC: condition = settings->OrderSupport[NEG_ELLIPSE_SC_INDEX]; break; case ORDER_TYPE_ELLIPSE_CB: condition = settings->OrderSupport[NEG_ELLIPSE_CB_INDEX]; break; case ORDER_TYPE_GLYPH_INDEX: condition = settings->OrderSupport[NEG_GLYPH_INDEX_INDEX]; break; default: WLog_Print(log, WLOG_WARN, "%s Primary Drawing Order not supported", orderName); break; } return check_order_activated(log, settings, orderName, condition); } static const char* primary_order_string(UINT32 orderType) { const char* orders[] = { "[0x%02" PRIx8 "] DstBlt", "[0x%02" PRIx8 "] PatBlt", "[0x%02" PRIx8 "] ScrBlt", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] DrawNineGrid", "[0x%02" PRIx8 "] MultiDrawNineGrid", "[0x%02" PRIx8 "] LineTo", "[0x%02" PRIx8 "] OpaqueRect", "[0x%02" PRIx8 "] SaveBitmap", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] MemBlt", "[0x%02" PRIx8 "] Mem3Blt", "[0x%02" PRIx8 "] MultiDstBlt", "[0x%02" PRIx8 "] MultiPatBlt", "[0x%02" PRIx8 "] MultiScrBlt", "[0x%02" PRIx8 "] MultiOpaqueRect", "[0x%02" PRIx8 "] FastIndex", "[0x%02" PRIx8 "] PolygonSC", "[0x%02" PRIx8 "] PolygonCB", "[0x%02" PRIx8 "] Polyline", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] FastGlyph", "[0x%02" PRIx8 "] EllipseSC", "[0x%02" PRIx8 "] EllipseCB", "[0x%02" PRIx8 "] GlyphIndex" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static const char* secondary_order_string(UINT32 orderType) { const char* orders[] = { "[0x%02" PRIx8 "] Cache Bitmap", "[0x%02" PRIx8 "] Cache Color Table", "[0x%02" PRIx8 "] Cache Bitmap (Compressed)", "[0x%02" PRIx8 "] Cache Glyph", "[0x%02" PRIx8 "] Cache Bitmap V2", "[0x%02" PRIx8 "] Cache Bitmap V2 (Compressed)", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] Cache Brush", "[0x%02" PRIx8 "] Cache Bitmap V3" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static const char* altsec_order_string(BYTE orderType) { const char* orders[] = { "[0x%02" PRIx8 "] Switch Surface", "[0x%02" PRIx8 "] Create Offscreen Bitmap", "[0x%02" PRIx8 "] Stream Bitmap First", "[0x%02" PRIx8 "] Stream Bitmap Next", "[0x%02" PRIx8 "] Create NineGrid Bitmap", "[0x%02" PRIx8 "] Draw GDI+ First", "[0x%02" PRIx8 "] Draw GDI+ Next", "[0x%02" PRIx8 "] Draw GDI+ End", "[0x%02" PRIx8 "] Draw GDI+ Cache First", "[0x%02" PRIx8 "] Draw GDI+ Cache Next", "[0x%02" PRIx8 "] Draw GDI+ Cache End", "[0x%02" PRIx8 "] Windowing", "[0x%02" PRIx8 "] Desktop Composition", "[0x%02" PRIx8 "] Frame Marker" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static INLINE BOOL update_read_coord(wStream* s, INT32* coord, BOOL delta) { INT8 lsi8; INT16 lsi16; if (delta) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_INT8(s, lsi8); *coord += lsi8; } else { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_INT16(s, lsi16); *coord = lsi16; } return TRUE; } static INLINE BOOL update_write_coord(wStream* s, INT32 coord) { Stream_Write_UINT16(s, coord); return TRUE; } static INLINE BOOL update_read_color(wStream* s, UINT32* color) { BYTE byte; if (Stream_GetRemainingLength(s) < 3) return FALSE; *color = 0; Stream_Read_UINT8(s, byte); *color = (UINT32)byte; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 8) & 0xFF00; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 16) & 0xFF0000; return TRUE; } static INLINE BOOL update_write_color(wStream* s, UINT32 color) { BYTE byte; byte = (color & 0xFF); Stream_Write_UINT8(s, byte); byte = ((color >> 8) & 0xFF); Stream_Write_UINT8(s, byte); byte = ((color >> 16) & 0xFF); Stream_Write_UINT8(s, byte); return TRUE; } static INLINE BOOL update_read_colorref(wStream* s, UINT32* color) { BYTE byte; if (Stream_GetRemainingLength(s) < 4) return FALSE; *color = 0; Stream_Read_UINT8(s, byte); *color = byte; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 8); Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 16); Stream_Seek_UINT8(s); return TRUE; } static INLINE BOOL update_read_color_quad(wStream* s, UINT32* color) { return update_read_colorref(s, color); } static INLINE void update_write_color_quad(wStream* s, UINT32 color) { BYTE byte; byte = (color >> 16) & 0xFF; Stream_Write_UINT8(s, byte); byte = (color >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = color & 0xFF; Stream_Write_UINT8(s, byte); } static INLINE BOOL update_read_2byte_unsigned(wStream* s, UINT32* value) { BYTE byte; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) return FALSE; *value = (byte & 0x7F) << 8; Stream_Read_UINT8(s, byte); *value |= byte; } else { *value = (byte & 0x7F); } return TRUE; } static INLINE BOOL update_write_2byte_unsigned(wStream* s, UINT32 value) { BYTE byte; if (value > 0x7FFF) return FALSE; if (value >= 0x7F) { byte = ((value & 0x7F00) >> 8); Stream_Write_UINT8(s, byte | 0x80); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else { byte = (value & 0x7F); Stream_Write_UINT8(s, byte); } return TRUE; } static INLINE BOOL update_read_2byte_signed(wStream* s, INT32* value) { BYTE byte; BOOL negative; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); negative = (byte & 0x40) ? TRUE : FALSE; *value = (byte & 0x3F); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); *value = (*value << 8) | byte; } if (negative) *value *= -1; return TRUE; } static INLINE BOOL update_write_2byte_signed(wStream* s, INT32 value) { BYTE byte; BOOL negative = FALSE; if (value < 0) { negative = TRUE; value *= -1; } if (value > 0x3FFF) return FALSE; if (value >= 0x3F) { byte = ((value & 0x3F00) >> 8); if (negative) byte |= 0x40; Stream_Write_UINT8(s, byte | 0x80); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else { byte = (value & 0x3F); if (negative) byte |= 0x40; Stream_Write_UINT8(s, byte); } return TRUE; } static INLINE BOOL update_read_4byte_unsigned(wStream* s, UINT32* value) { BYTE byte; BYTE count; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); count = (byte & 0xC0) >> 6; if (Stream_GetRemainingLength(s) < count) return FALSE; switch (count) { case 0: *value = (byte & 0x3F); break; case 1: *value = (byte & 0x3F) << 8; Stream_Read_UINT8(s, byte); *value |= byte; break; case 2: *value = (byte & 0x3F) << 16; Stream_Read_UINT8(s, byte); *value |= (byte << 8); Stream_Read_UINT8(s, byte); *value |= byte; break; case 3: *value = (byte & 0x3F) << 24; Stream_Read_UINT8(s, byte); *value |= (byte << 16); Stream_Read_UINT8(s, byte); *value |= (byte << 8); Stream_Read_UINT8(s, byte); *value |= byte; break; default: break; } return TRUE; } static INLINE BOOL update_write_4byte_unsigned(wStream* s, UINT32 value) { BYTE byte; if (value <= 0x3F) { Stream_Write_UINT8(s, value); } else if (value <= 0x3FFF) { byte = (value >> 8) & 0x3F; Stream_Write_UINT8(s, byte | 0x40); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else if (value <= 0x3FFFFF) { byte = (value >> 16) & 0x3F; Stream_Write_UINT8(s, byte | 0x80); byte = (value >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else if (value <= 0x3FFFFFFF) { byte = (value >> 24) & 0x3F; Stream_Write_UINT8(s, byte | 0xC0); byte = (value >> 16) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else return FALSE; return TRUE; } static INLINE BOOL update_read_delta(wStream* s, INT32* value) { BYTE byte; if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, byte); if (byte & 0x40) *value = (byte | ~0x3F); else *value = (byte & 0x3F); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, byte); *value = (*value << 8) | byte; } return TRUE; } #if 0 static INLINE void update_read_glyph_delta(wStream* s, UINT16* value) { BYTE byte; Stream_Read_UINT8(s, byte); if (byte == 0x80) Stream_Read_UINT16(s, *value); else *value = (byte & 0x3F); } static INLINE void update_seek_glyph_delta(wStream* s) { BYTE byte; Stream_Read_UINT8(s, byte); if (byte & 0x80) Stream_Seek_UINT8(s); } #endif static INLINE BOOL update_read_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->style); } if (fieldFlags & ORDER_FIELD_04) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->hatch); } if (brush->style & CACHED_BRUSH) { BOOL rc; brush->index = brush->hatch; brush->bpp = get_bmf_bpp(brush->style, &rc); if (!rc) return FALSE; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 7) return FALSE; brush->data = (BYTE*)brush->p8x8; Stream_Read_UINT8(s, brush->data[7]); Stream_Read_UINT8(s, brush->data[6]); Stream_Read_UINT8(s, brush->data[5]); Stream_Read_UINT8(s, brush->data[4]); Stream_Read_UINT8(s, brush->data[3]); Stream_Read_UINT8(s, brush->data[2]); Stream_Read_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; } static INLINE BOOL update_write_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { Stream_Write_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { Stream_Write_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { Stream_Write_UINT8(s, brush->style); } if (brush->style & CACHED_BRUSH) { BOOL rc; brush->hatch = brush->index; brush->bpp = get_bmf_bpp(brush->style, &rc); if (!rc) return FALSE; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_04) { Stream_Write_UINT8(s, brush->hatch); } if (fieldFlags & ORDER_FIELD_05) { brush->data = (BYTE*)brush->p8x8; Stream_Write_UINT8(s, brush->data[7]); Stream_Write_UINT8(s, brush->data[6]); Stream_Write_UINT8(s, brush->data[5]); Stream_Write_UINT8(s, brush->data[4]); Stream_Write_UINT8(s, brush->data[3]); Stream_Write_UINT8(s, brush->data[2]); Stream_Write_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; } static INLINE BOOL update_read_delta_rects(wStream* s, DELTA_RECT* rectangles, UINT32* nr) { UINT32 number = *nr; UINT32 i; BYTE flags = 0; BYTE* zeroBits; UINT32 zeroBitsSize; if (number > 45) { WLog_WARN(TAG, "Invalid number of delta rectangles %" PRIu32, number); return FALSE; } zeroBitsSize = ((number + 1) / 2); if (Stream_GetRemainingLength(s) < zeroBitsSize) return FALSE; Stream_GetPointer(s, zeroBits); Stream_Seek(s, zeroBitsSize); ZeroMemory(rectangles, sizeof(DELTA_RECT) * number); for (i = 0; i < number; i++) { if (i % 2 == 0) flags = zeroBits[i / 2]; if ((~flags & 0x80) && !update_read_delta(s, &rectangles[i].left)) return FALSE; if ((~flags & 0x40) && !update_read_delta(s, &rectangles[i].top)) return FALSE; if (~flags & 0x20) { if (!update_read_delta(s, &rectangles[i].width)) return FALSE; } else if (i > 0) rectangles[i].width = rectangles[i - 1].width; else rectangles[i].width = 0; if (~flags & 0x10) { if (!update_read_delta(s, &rectangles[i].height)) return FALSE; } else if (i > 0) rectangles[i].height = rectangles[i - 1].height; else rectangles[i].height = 0; if (i > 0) { rectangles[i].left += rectangles[i - 1].left; rectangles[i].top += rectangles[i - 1].top; } flags <<= 4; } return TRUE; } static INLINE BOOL update_read_delta_points(wStream* s, DELTA_POINT* points, int number, INT16 x, INT16 y) { int i; BYTE flags = 0; BYTE* zeroBits; UINT32 zeroBitsSize; zeroBitsSize = ((number + 3) / 4); if (Stream_GetRemainingLength(s) < zeroBitsSize) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < %" PRIu32 "", zeroBitsSize); return FALSE; } Stream_GetPointer(s, zeroBits); Stream_Seek(s, zeroBitsSize); ZeroMemory(points, sizeof(DELTA_POINT) * number); for (i = 0; i < number; i++) { if (i % 4 == 0) flags = zeroBits[i / 4]; if ((~flags & 0x80) && !update_read_delta(s, &points[i].x)) { WLog_ERR(TAG, "update_read_delta(x) failed"); return FALSE; } if ((~flags & 0x40) && !update_read_delta(s, &points[i].y)) { WLog_ERR(TAG, "update_read_delta(y) failed"); return FALSE; } flags <<= 2; } return TRUE; } #define ORDER_FIELD_BYTE(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 1) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT8(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_2BYTE(NO, TARGET1, TARGET2) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 2) \ { \ WLog_ERR(TAG, "error reading %s or %s", #TARGET1, #TARGET2); \ return FALSE; \ } \ Stream_Read_UINT8(s, TARGET1); \ Stream_Read_UINT8(s, TARGET2); \ } \ } while (0) #define ORDER_FIELD_UINT16(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 2) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT16(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_UINT32(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 4) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT32(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_COORD(NO, TARGET) \ do \ { \ if ((orderInfo->fieldFlags & (1 << (NO - 1))) && \ !update_read_coord(s, &TARGET, orderInfo->deltaCoordinates)) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ } while (0) static INLINE BOOL ORDER_FIELD_COLOR(const ORDER_INFO* orderInfo, wStream* s, UINT32 NO, UINT32* TARGET) { if (!TARGET || !orderInfo) return FALSE; if ((orderInfo->fieldFlags & (1 << (NO - 1))) && !update_read_color(s, TARGET)) return FALSE; return TRUE; } static INLINE BOOL FIELD_SKIP_BUFFER16(wStream* s, UINT32 TARGET_LEN) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, TARGET_LEN); if (!Stream_SafeSeek(s, TARGET_LEN)) { WLog_ERR(TAG, "error skipping %" PRIu32 " bytes", TARGET_LEN); return FALSE; } return TRUE; } /* Primary Drawing Orders */ static BOOL update_read_dstblt_order(wStream* s, const ORDER_INFO* orderInfo, DSTBLT_ORDER* dstblt) { ORDER_FIELD_COORD(1, dstblt->nLeftRect); ORDER_FIELD_COORD(2, dstblt->nTopRect); ORDER_FIELD_COORD(3, dstblt->nWidth); ORDER_FIELD_COORD(4, dstblt->nHeight); ORDER_FIELD_BYTE(5, dstblt->bRop); return TRUE; } int update_approximate_dstblt_order(ORDER_INFO* orderInfo, const DSTBLT_ORDER* dstblt) { return 32; } BOOL update_write_dstblt_order(wStream* s, ORDER_INFO* orderInfo, const DSTBLT_ORDER* dstblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_dstblt_order(orderInfo, dstblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, dstblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, dstblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, dstblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, dstblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, dstblt->bRop); return TRUE; } static BOOL update_read_patblt_order(wStream* s, const ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { ORDER_FIELD_COORD(1, patblt->nLeftRect); ORDER_FIELD_COORD(2, patblt->nTopRect); ORDER_FIELD_COORD(3, patblt->nWidth); ORDER_FIELD_COORD(4, patblt->nHeight); ORDER_FIELD_BYTE(5, patblt->bRop); ORDER_FIELD_COLOR(orderInfo, s, 6, &patblt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 7, &patblt->foreColor); return update_read_brush(s, &patblt->brush, orderInfo->fieldFlags >> 7); } int update_approximate_patblt_order(ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { return 32; } BOOL update_write_patblt_order(wStream* s, ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_patblt_order(orderInfo, patblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, patblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, patblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, patblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, patblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, patblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, patblt->backColor); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_color(s, patblt->foreColor); orderInfo->fieldFlags |= ORDER_FIELD_08; orderInfo->fieldFlags |= ORDER_FIELD_09; orderInfo->fieldFlags |= ORDER_FIELD_10; orderInfo->fieldFlags |= ORDER_FIELD_11; orderInfo->fieldFlags |= ORDER_FIELD_12; update_write_brush(s, &patblt->brush, orderInfo->fieldFlags >> 7); return TRUE; } static BOOL update_read_scrblt_order(wStream* s, const ORDER_INFO* orderInfo, SCRBLT_ORDER* scrblt) { ORDER_FIELD_COORD(1, scrblt->nLeftRect); ORDER_FIELD_COORD(2, scrblt->nTopRect); ORDER_FIELD_COORD(3, scrblt->nWidth); ORDER_FIELD_COORD(4, scrblt->nHeight); ORDER_FIELD_BYTE(5, scrblt->bRop); ORDER_FIELD_COORD(6, scrblt->nXSrc); ORDER_FIELD_COORD(7, scrblt->nYSrc); return TRUE; } int update_approximate_scrblt_order(ORDER_INFO* orderInfo, const SCRBLT_ORDER* scrblt) { return 32; } BOOL update_write_scrblt_order(wStream* s, ORDER_INFO* orderInfo, const SCRBLT_ORDER* scrblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_scrblt_order(orderInfo, scrblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, scrblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, scrblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, scrblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, scrblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, scrblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_coord(s, scrblt->nXSrc); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_coord(s, scrblt->nYSrc); return TRUE; } static BOOL update_read_opaque_rect_order(wStream* s, const ORDER_INFO* orderInfo, OPAQUE_RECT_ORDER* opaque_rect) { BYTE byte; ORDER_FIELD_COORD(1, opaque_rect->nLeftRect); ORDER_FIELD_COORD(2, opaque_rect->nTopRect); ORDER_FIELD_COORD(3, opaque_rect->nWidth); ORDER_FIELD_COORD(4, opaque_rect->nHeight); if (orderInfo->fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x00FFFF00) | ((UINT32)byte); } if (orderInfo->fieldFlags & ORDER_FIELD_06) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x00FF00FF) | ((UINT32)byte << 8); } if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x0000FFFF) | ((UINT32)byte << 16); } return TRUE; } int update_approximate_opaque_rect_order(ORDER_INFO* orderInfo, const OPAQUE_RECT_ORDER* opaque_rect) { return 32; } BOOL update_write_opaque_rect_order(wStream* s, ORDER_INFO* orderInfo, const OPAQUE_RECT_ORDER* opaque_rect) { BYTE byte; int inf = update_approximate_opaque_rect_order(orderInfo, opaque_rect); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; // TODO: Color format conversion orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, opaque_rect->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, opaque_rect->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, opaque_rect->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, opaque_rect->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; byte = opaque_rect->color & 0x000000FF; Stream_Write_UINT8(s, byte); orderInfo->fieldFlags |= ORDER_FIELD_06; byte = (opaque_rect->color & 0x0000FF00) >> 8; Stream_Write_UINT8(s, byte); orderInfo->fieldFlags |= ORDER_FIELD_07; byte = (opaque_rect->color & 0x00FF0000) >> 16; Stream_Write_UINT8(s, byte); return TRUE; } static BOOL update_read_draw_nine_grid_order(wStream* s, const ORDER_INFO* orderInfo, DRAW_NINE_GRID_ORDER* draw_nine_grid) { ORDER_FIELD_COORD(1, draw_nine_grid->srcLeft); ORDER_FIELD_COORD(2, draw_nine_grid->srcTop); ORDER_FIELD_COORD(3, draw_nine_grid->srcRight); ORDER_FIELD_COORD(4, draw_nine_grid->srcBottom); ORDER_FIELD_UINT16(5, draw_nine_grid->bitmapId); return TRUE; } static BOOL update_read_multi_dstblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_DSTBLT_ORDER* multi_dstblt) { ORDER_FIELD_COORD(1, multi_dstblt->nLeftRect); ORDER_FIELD_COORD(2, multi_dstblt->nTopRect); ORDER_FIELD_COORD(3, multi_dstblt->nWidth); ORDER_FIELD_COORD(4, multi_dstblt->nHeight); ORDER_FIELD_BYTE(5, multi_dstblt->bRop); ORDER_FIELD_BYTE(6, multi_dstblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_dstblt->cbData); return update_read_delta_rects(s, multi_dstblt->rectangles, &multi_dstblt->numRectangles); } return TRUE; } static BOOL update_read_multi_patblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_PATBLT_ORDER* multi_patblt) { ORDER_FIELD_COORD(1, multi_patblt->nLeftRect); ORDER_FIELD_COORD(2, multi_patblt->nTopRect); ORDER_FIELD_COORD(3, multi_patblt->nWidth); ORDER_FIELD_COORD(4, multi_patblt->nHeight); ORDER_FIELD_BYTE(5, multi_patblt->bRop); ORDER_FIELD_COLOR(orderInfo, s, 6, &multi_patblt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 7, &multi_patblt->foreColor); if (!update_read_brush(s, &multi_patblt->brush, orderInfo->fieldFlags >> 7)) return FALSE; ORDER_FIELD_BYTE(13, multi_patblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_14) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_patblt->cbData); if (!update_read_delta_rects(s, multi_patblt->rectangles, &multi_patblt->numRectangles)) return FALSE; } return TRUE; } static BOOL update_read_multi_scrblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_SCRBLT_ORDER* multi_scrblt) { ORDER_FIELD_COORD(1, multi_scrblt->nLeftRect); ORDER_FIELD_COORD(2, multi_scrblt->nTopRect); ORDER_FIELD_COORD(3, multi_scrblt->nWidth); ORDER_FIELD_COORD(4, multi_scrblt->nHeight); ORDER_FIELD_BYTE(5, multi_scrblt->bRop); ORDER_FIELD_COORD(6, multi_scrblt->nXSrc); ORDER_FIELD_COORD(7, multi_scrblt->nYSrc); ORDER_FIELD_BYTE(8, multi_scrblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_09) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_scrblt->cbData); return update_read_delta_rects(s, multi_scrblt->rectangles, &multi_scrblt->numRectangles); } return TRUE; } static BOOL update_read_multi_opaque_rect_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_OPAQUE_RECT_ORDER* multi_opaque_rect) { BYTE byte; ORDER_FIELD_COORD(1, multi_opaque_rect->nLeftRect); ORDER_FIELD_COORD(2, multi_opaque_rect->nTopRect); ORDER_FIELD_COORD(3, multi_opaque_rect->nWidth); ORDER_FIELD_COORD(4, multi_opaque_rect->nHeight); if (orderInfo->fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x00FFFF00) | ((UINT32)byte); } if (orderInfo->fieldFlags & ORDER_FIELD_06) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x00FF00FF) | ((UINT32)byte << 8); } if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x0000FFFF) | ((UINT32)byte << 16); } ORDER_FIELD_BYTE(8, multi_opaque_rect->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_09) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_opaque_rect->cbData); return update_read_delta_rects(s, multi_opaque_rect->rectangles, &multi_opaque_rect->numRectangles); } return TRUE; } static BOOL update_read_multi_draw_nine_grid_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_DRAW_NINE_GRID_ORDER* multi_draw_nine_grid) { ORDER_FIELD_COORD(1, multi_draw_nine_grid->srcLeft); ORDER_FIELD_COORD(2, multi_draw_nine_grid->srcTop); ORDER_FIELD_COORD(3, multi_draw_nine_grid->srcRight); ORDER_FIELD_COORD(4, multi_draw_nine_grid->srcBottom); ORDER_FIELD_UINT16(5, multi_draw_nine_grid->bitmapId); ORDER_FIELD_BYTE(6, multi_draw_nine_grid->nDeltaEntries); if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_draw_nine_grid->cbData); return update_read_delta_rects(s, multi_draw_nine_grid->rectangles, &multi_draw_nine_grid->nDeltaEntries); } return TRUE; } static BOOL update_read_line_to_order(wStream* s, const ORDER_INFO* orderInfo, LINE_TO_ORDER* line_to) { ORDER_FIELD_UINT16(1, line_to->backMode); ORDER_FIELD_COORD(2, line_to->nXStart); ORDER_FIELD_COORD(3, line_to->nYStart); ORDER_FIELD_COORD(4, line_to->nXEnd); ORDER_FIELD_COORD(5, line_to->nYEnd); ORDER_FIELD_COLOR(orderInfo, s, 6, &line_to->backColor); ORDER_FIELD_BYTE(7, line_to->bRop2); ORDER_FIELD_BYTE(8, line_to->penStyle); ORDER_FIELD_BYTE(9, line_to->penWidth); ORDER_FIELD_COLOR(orderInfo, s, 10, &line_to->penColor); return TRUE; } int update_approximate_line_to_order(ORDER_INFO* orderInfo, const LINE_TO_ORDER* line_to) { return 32; } BOOL update_write_line_to_order(wStream* s, ORDER_INFO* orderInfo, const LINE_TO_ORDER* line_to) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_line_to_order(orderInfo, line_to))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT16(s, line_to->backMode); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, line_to->nXStart); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, line_to->nYStart); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, line_to->nXEnd); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_coord(s, line_to->nYEnd); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, line_to->backColor); orderInfo->fieldFlags |= ORDER_FIELD_07; Stream_Write_UINT8(s, line_to->bRop2); orderInfo->fieldFlags |= ORDER_FIELD_08; Stream_Write_UINT8(s, line_to->penStyle); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT8(s, line_to->penWidth); orderInfo->fieldFlags |= ORDER_FIELD_10; update_write_color(s, line_to->penColor); return TRUE; } static BOOL update_read_polyline_order(wStream* s, const ORDER_INFO* orderInfo, POLYLINE_ORDER* polyline) { UINT16 word; UINT32 new_num = polyline->numDeltaEntries; ORDER_FIELD_COORD(1, polyline->xStart); ORDER_FIELD_COORD(2, polyline->yStart); ORDER_FIELD_BYTE(3, polyline->bRop2); ORDER_FIELD_UINT16(4, word); ORDER_FIELD_COLOR(orderInfo, s, 5, &polyline->penColor); ORDER_FIELD_BYTE(6, new_num); if (orderInfo->fieldFlags & ORDER_FIELD_07) { DELTA_POINT* new_points; if (new_num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, polyline->cbData); new_points = (DELTA_POINT*)realloc(polyline->points, sizeof(DELTA_POINT) * new_num); if (!new_points) { WLog_ERR(TAG, "realloc(%" PRIu32 ") failed", new_num); return FALSE; } polyline->points = new_points; polyline->numDeltaEntries = new_num; return update_read_delta_points(s, polyline->points, polyline->numDeltaEntries, polyline->xStart, polyline->yStart); } return TRUE; } static BOOL update_read_memblt_order(wStream* s, const ORDER_INFO* orderInfo, MEMBLT_ORDER* memblt) { if (!s || !orderInfo || !memblt) return FALSE; ORDER_FIELD_UINT16(1, memblt->cacheId); ORDER_FIELD_COORD(2, memblt->nLeftRect); ORDER_FIELD_COORD(3, memblt->nTopRect); ORDER_FIELD_COORD(4, memblt->nWidth); ORDER_FIELD_COORD(5, memblt->nHeight); ORDER_FIELD_BYTE(6, memblt->bRop); ORDER_FIELD_COORD(7, memblt->nXSrc); ORDER_FIELD_COORD(8, memblt->nYSrc); ORDER_FIELD_UINT16(9, memblt->cacheIndex); memblt->colorIndex = (memblt->cacheId >> 8); memblt->cacheId = (memblt->cacheId & 0xFF); memblt->bitmap = NULL; return TRUE; } int update_approximate_memblt_order(ORDER_INFO* orderInfo, const MEMBLT_ORDER* memblt) { return 64; } BOOL update_write_memblt_order(wStream* s, ORDER_INFO* orderInfo, const MEMBLT_ORDER* memblt) { UINT16 cacheId; if (!Stream_EnsureRemainingCapacity(s, update_approximate_memblt_order(orderInfo, memblt))) return FALSE; cacheId = (memblt->cacheId & 0xFF) | ((memblt->colorIndex & 0xFF) << 8); orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT16(s, cacheId); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, memblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, memblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, memblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_coord(s, memblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_06; Stream_Write_UINT8(s, memblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_coord(s, memblt->nXSrc); orderInfo->fieldFlags |= ORDER_FIELD_08; update_write_coord(s, memblt->nYSrc); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT16(s, memblt->cacheIndex); return TRUE; } static BOOL update_read_mem3blt_order(wStream* s, const ORDER_INFO* orderInfo, MEM3BLT_ORDER* mem3blt) { ORDER_FIELD_UINT16(1, mem3blt->cacheId); ORDER_FIELD_COORD(2, mem3blt->nLeftRect); ORDER_FIELD_COORD(3, mem3blt->nTopRect); ORDER_FIELD_COORD(4, mem3blt->nWidth); ORDER_FIELD_COORD(5, mem3blt->nHeight); ORDER_FIELD_BYTE(6, mem3blt->bRop); ORDER_FIELD_COORD(7, mem3blt->nXSrc); ORDER_FIELD_COORD(8, mem3blt->nYSrc); ORDER_FIELD_COLOR(orderInfo, s, 9, &mem3blt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 10, &mem3blt->foreColor); if (!update_read_brush(s, &mem3blt->brush, orderInfo->fieldFlags >> 10)) return FALSE; ORDER_FIELD_UINT16(16, mem3blt->cacheIndex); mem3blt->colorIndex = (mem3blt->cacheId >> 8); mem3blt->cacheId = (mem3blt->cacheId & 0xFF); mem3blt->bitmap = NULL; return TRUE; } static BOOL update_read_save_bitmap_order(wStream* s, const ORDER_INFO* orderInfo, SAVE_BITMAP_ORDER* save_bitmap) { ORDER_FIELD_UINT32(1, save_bitmap->savedBitmapPosition); ORDER_FIELD_COORD(2, save_bitmap->nLeftRect); ORDER_FIELD_COORD(3, save_bitmap->nTopRect); ORDER_FIELD_COORD(4, save_bitmap->nRightRect); ORDER_FIELD_COORD(5, save_bitmap->nBottomRect); ORDER_FIELD_BYTE(6, save_bitmap->operation); return TRUE; } static BOOL update_read_glyph_index_order(wStream* s, const ORDER_INFO* orderInfo, GLYPH_INDEX_ORDER* glyph_index) { ORDER_FIELD_BYTE(1, glyph_index->cacheId); ORDER_FIELD_BYTE(2, glyph_index->flAccel); ORDER_FIELD_BYTE(3, glyph_index->ulCharInc); ORDER_FIELD_BYTE(4, glyph_index->fOpRedundant); ORDER_FIELD_COLOR(orderInfo, s, 5, &glyph_index->backColor); ORDER_FIELD_COLOR(orderInfo, s, 6, &glyph_index->foreColor); ORDER_FIELD_UINT16(7, glyph_index->bkLeft); ORDER_FIELD_UINT16(8, glyph_index->bkTop); ORDER_FIELD_UINT16(9, glyph_index->bkRight); ORDER_FIELD_UINT16(10, glyph_index->bkBottom); ORDER_FIELD_UINT16(11, glyph_index->opLeft); ORDER_FIELD_UINT16(12, glyph_index->opTop); ORDER_FIELD_UINT16(13, glyph_index->opRight); ORDER_FIELD_UINT16(14, glyph_index->opBottom); if (!update_read_brush(s, &glyph_index->brush, orderInfo->fieldFlags >> 14)) return FALSE; ORDER_FIELD_UINT16(20, glyph_index->x); ORDER_FIELD_UINT16(21, glyph_index->y); if (orderInfo->fieldFlags & ORDER_FIELD_22) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, glyph_index->cbData); if (Stream_GetRemainingLength(s) < glyph_index->cbData) return FALSE; CopyMemory(glyph_index->data, Stream_Pointer(s), glyph_index->cbData); Stream_Seek(s, glyph_index->cbData); } return TRUE; } int update_approximate_glyph_index_order(ORDER_INFO* orderInfo, const GLYPH_INDEX_ORDER* glyph_index) { return 64; } BOOL update_write_glyph_index_order(wStream* s, ORDER_INFO* orderInfo, GLYPH_INDEX_ORDER* glyph_index) { int inf = update_approximate_glyph_index_order(orderInfo, glyph_index); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT8(s, glyph_index->cacheId); orderInfo->fieldFlags |= ORDER_FIELD_02; Stream_Write_UINT8(s, glyph_index->flAccel); orderInfo->fieldFlags |= ORDER_FIELD_03; Stream_Write_UINT8(s, glyph_index->ulCharInc); orderInfo->fieldFlags |= ORDER_FIELD_04; Stream_Write_UINT8(s, glyph_index->fOpRedundant); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_color(s, glyph_index->backColor); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, glyph_index->foreColor); orderInfo->fieldFlags |= ORDER_FIELD_07; Stream_Write_UINT16(s, glyph_index->bkLeft); orderInfo->fieldFlags |= ORDER_FIELD_08; Stream_Write_UINT16(s, glyph_index->bkTop); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT16(s, glyph_index->bkRight); orderInfo->fieldFlags |= ORDER_FIELD_10; Stream_Write_UINT16(s, glyph_index->bkBottom); orderInfo->fieldFlags |= ORDER_FIELD_11; Stream_Write_UINT16(s, glyph_index->opLeft); orderInfo->fieldFlags |= ORDER_FIELD_12; Stream_Write_UINT16(s, glyph_index->opTop); orderInfo->fieldFlags |= ORDER_FIELD_13; Stream_Write_UINT16(s, glyph_index->opRight); orderInfo->fieldFlags |= ORDER_FIELD_14; Stream_Write_UINT16(s, glyph_index->opBottom); orderInfo->fieldFlags |= ORDER_FIELD_15; orderInfo->fieldFlags |= ORDER_FIELD_16; orderInfo->fieldFlags |= ORDER_FIELD_17; orderInfo->fieldFlags |= ORDER_FIELD_18; orderInfo->fieldFlags |= ORDER_FIELD_19; update_write_brush(s, &glyph_index->brush, orderInfo->fieldFlags >> 14); orderInfo->fieldFlags |= ORDER_FIELD_20; Stream_Write_UINT16(s, glyph_index->x); orderInfo->fieldFlags |= ORDER_FIELD_21; Stream_Write_UINT16(s, glyph_index->y); orderInfo->fieldFlags |= ORDER_FIELD_22; Stream_Write_UINT8(s, glyph_index->cbData); Stream_Write(s, glyph_index->data, glyph_index->cbData); return TRUE; } static BOOL update_read_fast_index_order(wStream* s, const ORDER_INFO* orderInfo, FAST_INDEX_ORDER* fast_index) { ORDER_FIELD_BYTE(1, fast_index->cacheId); ORDER_FIELD_2BYTE(2, fast_index->ulCharInc, fast_index->flAccel); ORDER_FIELD_COLOR(orderInfo, s, 3, &fast_index->backColor); ORDER_FIELD_COLOR(orderInfo, s, 4, &fast_index->foreColor); ORDER_FIELD_COORD(5, fast_index->bkLeft); ORDER_FIELD_COORD(6, fast_index->bkTop); ORDER_FIELD_COORD(7, fast_index->bkRight); ORDER_FIELD_COORD(8, fast_index->bkBottom); ORDER_FIELD_COORD(9, fast_index->opLeft); ORDER_FIELD_COORD(10, fast_index->opTop); ORDER_FIELD_COORD(11, fast_index->opRight); ORDER_FIELD_COORD(12, fast_index->opBottom); ORDER_FIELD_COORD(13, fast_index->x); ORDER_FIELD_COORD(14, fast_index->y); if (orderInfo->fieldFlags & ORDER_FIELD_15) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, fast_index->cbData); if (Stream_GetRemainingLength(s) < fast_index->cbData) return FALSE; CopyMemory(fast_index->data, Stream_Pointer(s), fast_index->cbData); Stream_Seek(s, fast_index->cbData); } return TRUE; } static BOOL update_read_fast_glyph_order(wStream* s, const ORDER_INFO* orderInfo, FAST_GLYPH_ORDER* fastGlyph) { GLYPH_DATA_V2* glyph = &fastGlyph->glyphData; ORDER_FIELD_BYTE(1, fastGlyph->cacheId); ORDER_FIELD_2BYTE(2, fastGlyph->ulCharInc, fastGlyph->flAccel); ORDER_FIELD_COLOR(orderInfo, s, 3, &fastGlyph->backColor); ORDER_FIELD_COLOR(orderInfo, s, 4, &fastGlyph->foreColor); ORDER_FIELD_COORD(5, fastGlyph->bkLeft); ORDER_FIELD_COORD(6, fastGlyph->bkTop); ORDER_FIELD_COORD(7, fastGlyph->bkRight); ORDER_FIELD_COORD(8, fastGlyph->bkBottom); ORDER_FIELD_COORD(9, fastGlyph->opLeft); ORDER_FIELD_COORD(10, fastGlyph->opTop); ORDER_FIELD_COORD(11, fastGlyph->opRight); ORDER_FIELD_COORD(12, fastGlyph->opBottom); ORDER_FIELD_COORD(13, fastGlyph->x); ORDER_FIELD_COORD(14, fastGlyph->y); if (orderInfo->fieldFlags & ORDER_FIELD_15) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, fastGlyph->cbData); if (Stream_GetRemainingLength(s) < fastGlyph->cbData) return FALSE; CopyMemory(fastGlyph->data, Stream_Pointer(s), fastGlyph->cbData); if (Stream_GetRemainingLength(s) < fastGlyph->cbData) return FALSE; if (!Stream_SafeSeek(s, 1)) return FALSE; if (fastGlyph->cbData > 1) { UINT32 new_cb; /* parse optional glyph data */ glyph->cacheIndex = fastGlyph->data[0]; if (!update_read_2byte_signed(s, &glyph->x) || !update_read_2byte_signed(s, &glyph->y) || !update_read_2byte_unsigned(s, &glyph->cx) || !update_read_2byte_unsigned(s, &glyph->cy)) return FALSE; glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; new_cb = ((glyph->cx + 7) / 8) * glyph->cy; new_cb += ((new_cb % 4) > 0) ? 4 - (new_cb % 4) : 0; if (fastGlyph->cbData < new_cb) return FALSE; if (new_cb > 0) { BYTE* new_aj; new_aj = (BYTE*)realloc(glyph->aj, new_cb); if (!new_aj) return FALSE; glyph->aj = new_aj; glyph->cb = new_cb; Stream_Read(s, glyph->aj, glyph->cb); } Stream_Seek(s, fastGlyph->cbData - new_cb); } } return TRUE; } static BOOL update_read_polygon_sc_order(wStream* s, const ORDER_INFO* orderInfo, POLYGON_SC_ORDER* polygon_sc) { UINT32 num = polygon_sc->numPoints; ORDER_FIELD_COORD(1, polygon_sc->xStart); ORDER_FIELD_COORD(2, polygon_sc->yStart); ORDER_FIELD_BYTE(3, polygon_sc->bRop2); ORDER_FIELD_BYTE(4, polygon_sc->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 5, &polygon_sc->brushColor); ORDER_FIELD_BYTE(6, num); if (orderInfo->fieldFlags & ORDER_FIELD_07) { DELTA_POINT* newpoints; if (num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, polygon_sc->cbData); newpoints = (DELTA_POINT*)realloc(polygon_sc->points, sizeof(DELTA_POINT) * num); if (!newpoints) return FALSE; polygon_sc->points = newpoints; polygon_sc->numPoints = num; return update_read_delta_points(s, polygon_sc->points, polygon_sc->numPoints, polygon_sc->xStart, polygon_sc->yStart); } return TRUE; } static BOOL update_read_polygon_cb_order(wStream* s, const ORDER_INFO* orderInfo, POLYGON_CB_ORDER* polygon_cb) { UINT32 num = polygon_cb->numPoints; ORDER_FIELD_COORD(1, polygon_cb->xStart); ORDER_FIELD_COORD(2, polygon_cb->yStart); ORDER_FIELD_BYTE(3, polygon_cb->bRop2); ORDER_FIELD_BYTE(4, polygon_cb->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 5, &polygon_cb->backColor); ORDER_FIELD_COLOR(orderInfo, s, 6, &polygon_cb->foreColor); if (!update_read_brush(s, &polygon_cb->brush, orderInfo->fieldFlags >> 6)) return FALSE; ORDER_FIELD_BYTE(12, num); if (orderInfo->fieldFlags & ORDER_FIELD_13) { DELTA_POINT* newpoints; if (num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, polygon_cb->cbData); newpoints = (DELTA_POINT*)realloc(polygon_cb->points, sizeof(DELTA_POINT) * num); if (!newpoints) return FALSE; polygon_cb->points = newpoints; polygon_cb->numPoints = num; if (!update_read_delta_points(s, polygon_cb->points, polygon_cb->numPoints, polygon_cb->xStart, polygon_cb->yStart)) return FALSE; } polygon_cb->backMode = (polygon_cb->bRop2 & 0x80) ? BACKMODE_TRANSPARENT : BACKMODE_OPAQUE; polygon_cb->bRop2 = (polygon_cb->bRop2 & 0x1F); return TRUE; } static BOOL update_read_ellipse_sc_order(wStream* s, const ORDER_INFO* orderInfo, ELLIPSE_SC_ORDER* ellipse_sc) { ORDER_FIELD_COORD(1, ellipse_sc->leftRect); ORDER_FIELD_COORD(2, ellipse_sc->topRect); ORDER_FIELD_COORD(3, ellipse_sc->rightRect); ORDER_FIELD_COORD(4, ellipse_sc->bottomRect); ORDER_FIELD_BYTE(5, ellipse_sc->bRop2); ORDER_FIELD_BYTE(6, ellipse_sc->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 7, &ellipse_sc->color); return TRUE; } static BOOL update_read_ellipse_cb_order(wStream* s, const ORDER_INFO* orderInfo, ELLIPSE_CB_ORDER* ellipse_cb) { ORDER_FIELD_COORD(1, ellipse_cb->leftRect); ORDER_FIELD_COORD(2, ellipse_cb->topRect); ORDER_FIELD_COORD(3, ellipse_cb->rightRect); ORDER_FIELD_COORD(4, ellipse_cb->bottomRect); ORDER_FIELD_BYTE(5, ellipse_cb->bRop2); ORDER_FIELD_BYTE(6, ellipse_cb->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 7, &ellipse_cb->backColor); ORDER_FIELD_COLOR(orderInfo, s, 8, &ellipse_cb->foreColor); return update_read_brush(s, &ellipse_cb->brush, orderInfo->fieldFlags >> 8); } /* Secondary Drawing Orders */ static CACHE_BITMAP_ORDER* update_read_cache_bitmap_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { CACHE_BITMAP_ORDER* cache_bitmap; if (!update || !s) return NULL; cache_bitmap = calloc(1, sizeof(CACHE_BITMAP_ORDER)); if (!cache_bitmap) goto fail; if (Stream_GetRemainingLength(s) < 9) goto fail; Stream_Read_UINT8(s, cache_bitmap->cacheId); /* cacheId (1 byte) */ Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapWidth); /* bitmapWidth (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapHeight); /* bitmapHeight (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ if ((cache_bitmap->bitmapBpp < 1) || (cache_bitmap->bitmapBpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bitmap bpp %" PRIu32 "", cache_bitmap->bitmapBpp); goto fail; } Stream_Read_UINT16(s, cache_bitmap->bitmapLength); /* bitmapLength (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap->cacheIndex); /* cacheIndex (2 bytes) */ if (compressed) { if ((flags & NO_BITMAP_COMPRESSION_HDR) == 0) { BYTE* bitmapComprHdr = (BYTE*)&(cache_bitmap->bitmapComprHdr); if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read(s, bitmapComprHdr, 8); /* bitmapComprHdr (8 bytes) */ cache_bitmap->bitmapLength -= 8; } } if (cache_bitmap->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap->bitmapLength) goto fail; cache_bitmap->bitmapDataStream = malloc(cache_bitmap->bitmapLength); if (!cache_bitmap->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap->bitmapDataStream, cache_bitmap->bitmapLength); cache_bitmap->compressed = compressed; return cache_bitmap; fail: free_cache_bitmap_order(update->context, cache_bitmap); return NULL; } int update_approximate_cache_bitmap_order(const CACHE_BITMAP_ORDER* cache_bitmap, BOOL compressed, UINT16* flags) { return 64 + cache_bitmap->bitmapLength; } BOOL update_write_cache_bitmap_order(wStream* s, const CACHE_BITMAP_ORDER* cache_bitmap, BOOL compressed, UINT16* flags) { UINT32 bitmapLength = cache_bitmap->bitmapLength; int inf = update_approximate_cache_bitmap_order(cache_bitmap, compressed, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; *flags = NO_BITMAP_COMPRESSION_HDR; if ((*flags & NO_BITMAP_COMPRESSION_HDR) == 0) bitmapLength += 8; Stream_Write_UINT8(s, cache_bitmap->cacheId); /* cacheId (1 byte) */ Stream_Write_UINT8(s, 0); /* pad1Octet (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapWidth); /* bitmapWidth (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapHeight); /* bitmapHeight (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ Stream_Write_UINT16(s, bitmapLength); /* bitmapLength (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap->cacheIndex); /* cacheIndex (2 bytes) */ if (compressed) { if ((*flags & NO_BITMAP_COMPRESSION_HDR) == 0) { BYTE* bitmapComprHdr = (BYTE*)&(cache_bitmap->bitmapComprHdr); Stream_Write(s, bitmapComprHdr, 8); /* bitmapComprHdr (8 bytes) */ bitmapLength -= 8; } Stream_Write(s, cache_bitmap->bitmapDataStream, bitmapLength); } else { Stream_Write(s, cache_bitmap->bitmapDataStream, bitmapLength); } return TRUE; } static CACHE_BITMAP_V2_ORDER* update_read_cache_bitmap_v2_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { BOOL rc; BYTE bitsPerPixelId; CACHE_BITMAP_V2_ORDER* cache_bitmap_v2; if (!update || !s) return NULL; cache_bitmap_v2 = calloc(1, sizeof(CACHE_BITMAP_V2_ORDER)); if (!cache_bitmap_v2) goto fail; cache_bitmap_v2->cacheId = flags & 0x0003; cache_bitmap_v2->flags = (flags & 0xFF80) >> 7; bitsPerPixelId = (flags & 0x0078) >> 3; cache_bitmap_v2->bitmapBpp = get_cbr2_bpp(bitsPerPixelId, &rc); if (!rc) goto fail; if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ goto fail; cache_bitmap_v2->bitmapHeight = cache_bitmap_v2->bitmapWidth; } else { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ goto fail; } if (!update_read_4byte_unsigned(s, &cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->cacheIndex)) /* cacheIndex */ goto fail; if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } } if (cache_bitmap_v2->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap_v2->bitmapLength) goto fail; if (cache_bitmap_v2->bitmapLength == 0) goto fail; cache_bitmap_v2->bitmapDataStream = malloc(cache_bitmap_v2->bitmapLength); if (!cache_bitmap_v2->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); cache_bitmap_v2->compressed = compressed; return cache_bitmap_v2; fail: free_cache_bitmap_v2_order(update->context, cache_bitmap_v2); return NULL; } int update_approximate_cache_bitmap_v2_order(CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { return 64 + cache_bitmap_v2->bitmapLength; } BOOL update_write_cache_bitmap_v2_order(wStream* s, CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { BOOL rc; BYTE bitsPerPixelId; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v2_order(cache_bitmap_v2, compressed, flags))) return FALSE; bitsPerPixelId = get_bpp_bmf(cache_bitmap_v2->bitmapBpp, &rc); if (!rc) return FALSE; *flags = (cache_bitmap_v2->cacheId & 0x0003) | (bitsPerPixelId << 3) | ((cache_bitmap_v2->flags << 7) & 0xFF80); if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { Stream_Write_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ return FALSE; } else { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ return FALSE; } if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (!update_write_4byte_unsigned(s, cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_write_2byte_unsigned(s, cache_bitmap_v2->cacheIndex)) /* cacheIndex */ return FALSE; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { Stream_Write_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } else { if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } cache_bitmap_v2->compressed = compressed; return TRUE; } static CACHE_BITMAP_V3_ORDER* update_read_cache_bitmap_v3_order(rdpUpdate* update, wStream* s, UINT16 flags) { BOOL rc; BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; UINT32 new_len; BYTE* new_data; CACHE_BITMAP_V3_ORDER* cache_bitmap_v3; if (!update || !s) return NULL; cache_bitmap_v3 = calloc(1, sizeof(CACHE_BITMAP_V3_ORDER)); if (!cache_bitmap_v3) goto fail; cache_bitmap_v3->cacheId = flags & 0x00000003; cache_bitmap_v3->flags = (flags & 0x0000FF80) >> 7; bitsPerPixelId = (flags & 0x00000078) >> 3; cache_bitmap_v3->bpp = get_cbr2_bpp(bitsPerPixelId, &rc); if (!rc) goto fail; if (Stream_GetRemainingLength(s) < 21) goto fail; Stream_Read_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ bitmapData = &cache_bitmap_v3->bitmapData; Stream_Read_UINT8(s, bitmapData->bpp); if ((bitmapData->bpp < 1) || (bitmapData->bpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bpp value %" PRIu32 "", bitmapData->bpp); goto fail; } Stream_Seek_UINT8(s); /* reserved1 (1 byte) */ Stream_Seek_UINT8(s); /* reserved2 (1 byte) */ Stream_Read_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Read_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Read_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Read_UINT32(s, new_len); /* length (4 bytes) */ if ((new_len == 0) || (Stream_GetRemainingLength(s) < new_len)) goto fail; new_data = (BYTE*)realloc(bitmapData->data, new_len); if (!new_data) goto fail; bitmapData->data = new_data; bitmapData->length = new_len; Stream_Read(s, bitmapData->data, bitmapData->length); return cache_bitmap_v3; fail: free_cache_bitmap_v3_order(update->context, cache_bitmap_v3); return NULL; } int update_approximate_cache_bitmap_v3_order(CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BITMAP_DATA_EX* bitmapData = &cache_bitmap_v3->bitmapData; return 64 + bitmapData->length; } BOOL update_write_cache_bitmap_v3_order(wStream* s, CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BOOL rc; BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v3_order(cache_bitmap_v3, flags))) return FALSE; bitmapData = &cache_bitmap_v3->bitmapData; bitsPerPixelId = get_bpp_bmf(cache_bitmap_v3->bpp, &rc); if (!rc) return FALSE; *flags = (cache_bitmap_v3->cacheId & 0x00000003) | ((cache_bitmap_v3->flags << 7) & 0x0000FF80) | ((bitsPerPixelId << 3) & 0x00000078); Stream_Write_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ Stream_Write_UINT8(s, bitmapData->bpp); Stream_Write_UINT8(s, 0); /* reserved1 (1 byte) */ Stream_Write_UINT8(s, 0); /* reserved2 (1 byte) */ Stream_Write_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Write_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Write_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Write_UINT32(s, bitmapData->length); /* length (4 bytes) */ Stream_Write(s, bitmapData->data, bitmapData->length); return TRUE; } static CACHE_COLOR_TABLE_ORDER* update_read_cache_color_table_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; UINT32* colorTable; CACHE_COLOR_TABLE_ORDER* cache_color_table = calloc(1, sizeof(CACHE_COLOR_TABLE_ORDER)); if (!cache_color_table) goto fail; if (Stream_GetRemainingLength(s) < 3) goto fail; Stream_Read_UINT8(s, cache_color_table->cacheIndex); /* cacheIndex (1 byte) */ Stream_Read_UINT16(s, cache_color_table->numberColors); /* numberColors (2 bytes) */ if (cache_color_table->numberColors != 256) { /* This field MUST be set to 256 */ goto fail; } if (Stream_GetRemainingLength(s) < cache_color_table->numberColors * 4) goto fail; colorTable = (UINT32*)&cache_color_table->colorTable; for (i = 0; i < (int)cache_color_table->numberColors; i++) update_read_color_quad(s, &colorTable[i]); return cache_color_table; fail: free_cache_color_table_order(update->context, cache_color_table); return NULL; } int update_approximate_cache_color_table_order(const CACHE_COLOR_TABLE_ORDER* cache_color_table, UINT16* flags) { return 16 + (256 * 4); } BOOL update_write_cache_color_table_order(wStream* s, const CACHE_COLOR_TABLE_ORDER* cache_color_table, UINT16* flags) { int i, inf; UINT32* colorTable; if (cache_color_table->numberColors != 256) return FALSE; inf = update_approximate_cache_color_table_order(cache_color_table, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT8(s, cache_color_table->cacheIndex); /* cacheIndex (1 byte) */ Stream_Write_UINT16(s, cache_color_table->numberColors); /* numberColors (2 bytes) */ colorTable = (UINT32*)&cache_color_table->colorTable; for (i = 0; i < (int)cache_color_table->numberColors; i++) { update_write_color_quad(s, colorTable[i]); } return TRUE; } static CACHE_GLYPH_ORDER* update_read_cache_glyph_order(rdpUpdate* update, wStream* s, UINT16 flags) { UINT32 i; CACHE_GLYPH_ORDER* cache_glyph_order = calloc(1, sizeof(CACHE_GLYPH_ORDER)); if (!cache_glyph_order || !update || !s) goto fail; if (Stream_GetRemainingLength(s) < 2) goto fail; Stream_Read_UINT8(s, cache_glyph_order->cacheId); /* cacheId (1 byte) */ Stream_Read_UINT8(s, cache_glyph_order->cGlyphs); /* cGlyphs (1 byte) */ for (i = 0; i < cache_glyph_order->cGlyphs; i++) { GLYPH_DATA* glyph = &cache_glyph_order->glyphData[i]; if (Stream_GetRemainingLength(s) < 10) goto fail; Stream_Read_UINT16(s, glyph->cacheIndex); Stream_Read_INT16(s, glyph->x); Stream_Read_INT16(s, glyph->y); Stream_Read_UINT16(s, glyph->cx); Stream_Read_UINT16(s, glyph->cy); glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; if (Stream_GetRemainingLength(s) < glyph->cb) goto fail; glyph->aj = (BYTE*)malloc(glyph->cb); if (!glyph->aj) goto fail; Stream_Read(s, glyph->aj, glyph->cb); } if ((flags & CG_GLYPH_UNICODE_PRESENT) && (cache_glyph_order->cGlyphs > 0)) { cache_glyph_order->unicodeCharacters = calloc(cache_glyph_order->cGlyphs, sizeof(WCHAR)); if (!cache_glyph_order->unicodeCharacters) goto fail; if (Stream_GetRemainingLength(s) < sizeof(WCHAR) * cache_glyph_order->cGlyphs) goto fail; Stream_Read_UTF16_String(s, cache_glyph_order->unicodeCharacters, cache_glyph_order->cGlyphs); } return cache_glyph_order; fail: free_cache_glyph_order(update->context, cache_glyph_order); return NULL; } int update_approximate_cache_glyph_order(const CACHE_GLYPH_ORDER* cache_glyph, UINT16* flags) { return 2 + cache_glyph->cGlyphs * 32; } BOOL update_write_cache_glyph_order(wStream* s, const CACHE_GLYPH_ORDER* cache_glyph, UINT16* flags) { int i, inf; INT16 lsi16; const GLYPH_DATA* glyph; inf = update_approximate_cache_glyph_order(cache_glyph, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT8(s, cache_glyph->cacheId); /* cacheId (1 byte) */ Stream_Write_UINT8(s, cache_glyph->cGlyphs); /* cGlyphs (1 byte) */ for (i = 0; i < (int)cache_glyph->cGlyphs; i++) { UINT32 cb; glyph = &cache_glyph->glyphData[i]; Stream_Write_UINT16(s, glyph->cacheIndex); /* cacheIndex (2 bytes) */ lsi16 = glyph->x; Stream_Write_UINT16(s, lsi16); /* x (2 bytes) */ lsi16 = glyph->y; Stream_Write_UINT16(s, lsi16); /* y (2 bytes) */ Stream_Write_UINT16(s, glyph->cx); /* cx (2 bytes) */ Stream_Write_UINT16(s, glyph->cy); /* cy (2 bytes) */ cb = ((glyph->cx + 7) / 8) * glyph->cy; cb += ((cb % 4) > 0) ? 4 - (cb % 4) : 0; Stream_Write(s, glyph->aj, cb); } if (*flags & CG_GLYPH_UNICODE_PRESENT) { Stream_Zero(s, cache_glyph->cGlyphs * 2); } return TRUE; } static CACHE_GLYPH_V2_ORDER* update_read_cache_glyph_v2_order(rdpUpdate* update, wStream* s, UINT16 flags) { UINT32 i; CACHE_GLYPH_V2_ORDER* cache_glyph_v2 = calloc(1, sizeof(CACHE_GLYPH_V2_ORDER)); if (!cache_glyph_v2) goto fail; cache_glyph_v2->cacheId = (flags & 0x000F); cache_glyph_v2->flags = (flags & 0x00F0) >> 4; cache_glyph_v2->cGlyphs = (flags & 0xFF00) >> 8; for (i = 0; i < cache_glyph_v2->cGlyphs; i++) { GLYPH_DATA_V2* glyph = &cache_glyph_v2->glyphData[i]; if (Stream_GetRemainingLength(s) < 1) goto fail; Stream_Read_UINT8(s, glyph->cacheIndex); if (!update_read_2byte_signed(s, &glyph->x) || !update_read_2byte_signed(s, &glyph->y) || !update_read_2byte_unsigned(s, &glyph->cx) || !update_read_2byte_unsigned(s, &glyph->cy)) { goto fail; } glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; if (Stream_GetRemainingLength(s) < glyph->cb) goto fail; glyph->aj = (BYTE*)malloc(glyph->cb); if (!glyph->aj) goto fail; Stream_Read(s, glyph->aj, glyph->cb); } if ((flags & CG_GLYPH_UNICODE_PRESENT) && (cache_glyph_v2->cGlyphs > 0)) { cache_glyph_v2->unicodeCharacters = calloc(cache_glyph_v2->cGlyphs, sizeof(WCHAR)); if (!cache_glyph_v2->unicodeCharacters) goto fail; if (Stream_GetRemainingLength(s) < sizeof(WCHAR) * cache_glyph_v2->cGlyphs) goto fail; Stream_Read_UTF16_String(s, cache_glyph_v2->unicodeCharacters, cache_glyph_v2->cGlyphs); } return cache_glyph_v2; fail: free_cache_glyph_v2_order(update->context, cache_glyph_v2); return NULL; } int update_approximate_cache_glyph_v2_order(const CACHE_GLYPH_V2_ORDER* cache_glyph_v2, UINT16* flags) { return 8 + cache_glyph_v2->cGlyphs * 32; } BOOL update_write_cache_glyph_v2_order(wStream* s, const CACHE_GLYPH_V2_ORDER* cache_glyph_v2, UINT16* flags) { UINT32 i, inf; inf = update_approximate_cache_glyph_v2_order(cache_glyph_v2, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; *flags = (cache_glyph_v2->cacheId & 0x000F) | ((cache_glyph_v2->flags & 0x000F) << 4) | ((cache_glyph_v2->cGlyphs & 0x00FF) << 8); for (i = 0; i < cache_glyph_v2->cGlyphs; i++) { UINT32 cb; const GLYPH_DATA_V2* glyph = &cache_glyph_v2->glyphData[i]; Stream_Write_UINT8(s, glyph->cacheIndex); if (!update_write_2byte_signed(s, glyph->x) || !update_write_2byte_signed(s, glyph->y) || !update_write_2byte_unsigned(s, glyph->cx) || !update_write_2byte_unsigned(s, glyph->cy)) { return FALSE; } cb = ((glyph->cx + 7) / 8) * glyph->cy; cb += ((cb % 4) > 0) ? 4 - (cb % 4) : 0; Stream_Write(s, glyph->aj, cb); } if (*flags & CG_GLYPH_UNICODE_PRESENT) { Stream_Zero(s, cache_glyph_v2->cGlyphs * 2); } return TRUE; } static BOOL update_decompress_brush(wStream* s, BYTE* output, size_t outSize, BYTE bpp) { INT32 x, y, k; BYTE byte = 0; const BYTE* palette = Stream_Pointer(s) + 16; const INT32 bytesPerPixel = ((bpp + 1) / 8); if (!Stream_SafeSeek(s, 16ULL + 7ULL * bytesPerPixel)) // 64 / 4 return FALSE; for (y = 7; y >= 0; y--) { for (x = 0; x < 8; x++) { UINT32 index; if ((x % 4) == 0) Stream_Read_UINT8(s, byte); index = ((byte >> ((3 - (x % 4)) * 2)) & 0x03); for (k = 0; k < bytesPerPixel; k++) { const size_t dstIndex = ((y * 8 + x) * bytesPerPixel) + k; const size_t srcIndex = (index * bytesPerPixel) + k; if (dstIndex >= outSize) return FALSE; output[dstIndex] = palette[srcIndex]; } } } return TRUE; } static BOOL update_compress_brush(wStream* s, const BYTE* input, BYTE bpp) { return FALSE; } static CACHE_BRUSH_ORDER* update_read_cache_brush_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; BOOL rc; BYTE iBitmapFormat; BOOL compressed = FALSE; CACHE_BRUSH_ORDER* cache_brush = calloc(1, sizeof(CACHE_BRUSH_ORDER)); if (!cache_brush) goto fail; if (Stream_GetRemainingLength(s) < 6) goto fail; Stream_Read_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Read_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ cache_brush->bpp = get_bmf_bpp(iBitmapFormat, &rc); if (!rc) goto fail; Stream_Read_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Read_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Read_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Read_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_Print(update->log, WLOG_ERROR, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); goto fail; } /* rows are encoded in reverse order */ if (Stream_GetRemainingLength(s) < 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_decompress_brush(s, cache_brush->data, sizeof(cache_brush->data), cache_brush->bpp)) goto fail; } else { /* uncompressed brush */ UINT32 scanline = (cache_brush->bpp / 8) * 8; if (Stream_GetRemainingLength(s) < scanline * 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read(s, &cache_brush->data[i * scanline], scanline); } } } } return cache_brush; fail: free_cache_brush_order(update->context, cache_brush); return NULL; } int update_approximate_cache_brush_order(const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { return 64; } BOOL update_write_cache_brush_order(wStream* s, const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { int i; BYTE iBitmapFormat; BOOL rc; BOOL compressed = FALSE; if (!Stream_EnsureRemainingCapacity(s, update_approximate_cache_brush_order(cache_brush, flags))) return FALSE; iBitmapFormat = get_bpp_bmf(cache_brush->bpp, &rc); if (!rc) return FALSE; Stream_Write_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Write_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ Stream_Write_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Write_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Write_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Write_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_ERR(TAG, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); return FALSE; } for (i = 7; i >= 0; i--) { Stream_Write_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_compress_brush(s, cache_brush->data, cache_brush->bpp)) return FALSE; } else { /* uncompressed brush */ int scanline = (cache_brush->bpp / 8) * 8; for (i = 7; i >= 0; i--) { Stream_Write(s, &cache_brush->data[i * scanline], scanline); } } } } return TRUE; } /* Alternate Secondary Drawing Orders */ static BOOL update_read_create_offscreen_bitmap_order(wStream* s, CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { UINT16 flags; BOOL deleteListPresent; OFFSCREEN_DELETE_LIST* deleteList; if (Stream_GetRemainingLength(s) < 6) return FALSE; Stream_Read_UINT16(s, flags); /* flags (2 bytes) */ create_offscreen_bitmap->id = flags & 0x7FFF; deleteListPresent = (flags & 0x8000) ? TRUE : FALSE; Stream_Read_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */ Stream_Read_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */ deleteList = &(create_offscreen_bitmap->deleteList); if (deleteListPresent) { UINT32 i; if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, deleteList->cIndices); if (deleteList->cIndices > deleteList->sIndices) { UINT16* new_indices; new_indices = (UINT16*)realloc(deleteList->indices, deleteList->cIndices * 2); if (!new_indices) return FALSE; deleteList->sIndices = deleteList->cIndices; deleteList->indices = new_indices; } if (Stream_GetRemainingLength(s) < 2 * deleteList->cIndices) return FALSE; for (i = 0; i < deleteList->cIndices; i++) { Stream_Read_UINT16(s, deleteList->indices[i]); } } else { deleteList->cIndices = 0; } return TRUE; } int update_approximate_create_offscreen_bitmap_order( const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { const OFFSCREEN_DELETE_LIST* deleteList = &(create_offscreen_bitmap->deleteList); return 32 + deleteList->cIndices * 2; } BOOL update_write_create_offscreen_bitmap_order( wStream* s, const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { UINT16 flags; BOOL deleteListPresent; const OFFSCREEN_DELETE_LIST* deleteList; if (!Stream_EnsureRemainingCapacity( s, update_approximate_create_offscreen_bitmap_order(create_offscreen_bitmap))) return FALSE; deleteList = &(create_offscreen_bitmap->deleteList); flags = create_offscreen_bitmap->id & 0x7FFF; deleteListPresent = (deleteList->cIndices > 0) ? TRUE : FALSE; if (deleteListPresent) flags |= 0x8000; Stream_Write_UINT16(s, flags); /* flags (2 bytes) */ Stream_Write_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */ Stream_Write_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */ if (deleteListPresent) { int i; Stream_Write_UINT16(s, deleteList->cIndices); for (i = 0; i < (int)deleteList->cIndices; i++) { Stream_Write_UINT16(s, deleteList->indices[i]); } } return TRUE; } static BOOL update_read_switch_surface_order(wStream* s, SWITCH_SURFACE_ORDER* switch_surface) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, switch_surface->bitmapId); /* bitmapId (2 bytes) */ return TRUE; } int update_approximate_switch_surface_order(const SWITCH_SURFACE_ORDER* switch_surface) { return 2; } BOOL update_write_switch_surface_order(wStream* s, const SWITCH_SURFACE_ORDER* switch_surface) { int inf = update_approximate_switch_surface_order(switch_surface); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT16(s, switch_surface->bitmapId); /* bitmapId (2 bytes) */ return TRUE; } static BOOL update_read_create_nine_grid_bitmap_order(wStream* s, CREATE_NINE_GRID_BITMAP_ORDER* create_nine_grid_bitmap) { NINE_GRID_BITMAP_INFO* nineGridInfo; if (Stream_GetRemainingLength(s) < 19) return FALSE; Stream_Read_UINT8(s, create_nine_grid_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ if ((create_nine_grid_bitmap->bitmapBpp < 1) || (create_nine_grid_bitmap->bitmapBpp > 32)) { WLog_ERR(TAG, "invalid bpp value %" PRIu32 "", create_nine_grid_bitmap->bitmapBpp); return FALSE; } Stream_Read_UINT16(s, create_nine_grid_bitmap->bitmapId); /* bitmapId (2 bytes) */ nineGridInfo = &(create_nine_grid_bitmap->nineGridInfo); Stream_Read_UINT32(s, nineGridInfo->flFlags); /* flFlags (4 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulLeftWidth); /* ulLeftWidth (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulRightWidth); /* ulRightWidth (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulTopHeight); /* ulTopHeight (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulBottomHeight); /* ulBottomHeight (2 bytes) */ update_read_colorref(s, &nineGridInfo->crTransparent); /* crTransparent (4 bytes) */ return TRUE; } static BOOL update_read_frame_marker_order(wStream* s, FRAME_MARKER_ORDER* frame_marker) { if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT32(s, frame_marker->action); /* action (4 bytes) */ return TRUE; } static BOOL update_read_stream_bitmap_first_order(wStream* s, STREAM_BITMAP_FIRST_ORDER* stream_bitmap_first) { if (Stream_GetRemainingLength(s) < 10) // 8 + 2 at least return FALSE; Stream_Read_UINT8(s, stream_bitmap_first->bitmapFlags); /* bitmapFlags (1 byte) */ Stream_Read_UINT8(s, stream_bitmap_first->bitmapBpp); /* bitmapBpp (1 byte) */ if ((stream_bitmap_first->bitmapBpp < 1) || (stream_bitmap_first->bitmapBpp > 32)) { WLog_ERR(TAG, "invalid bpp value %" PRIu32 "", stream_bitmap_first->bitmapBpp); return FALSE; } Stream_Read_UINT16(s, stream_bitmap_first->bitmapType); /* bitmapType (2 bytes) */ Stream_Read_UINT16(s, stream_bitmap_first->bitmapWidth); /* bitmapWidth (2 bytes) */ Stream_Read_UINT16(s, stream_bitmap_first->bitmapHeight); /* bitmapHeigth (2 bytes) */ if (stream_bitmap_first->bitmapFlags & STREAM_BITMAP_V2) { if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT32(s, stream_bitmap_first->bitmapSize); /* bitmapSize (4 bytes) */ } else { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, stream_bitmap_first->bitmapSize); /* bitmapSize (2 bytes) */ } FIELD_SKIP_BUFFER16( s, stream_bitmap_first->bitmapBlockSize); /* bitmapBlockSize(2 bytes) + bitmapBlock */ return TRUE; } static BOOL update_read_stream_bitmap_next_order(wStream* s, STREAM_BITMAP_NEXT_ORDER* stream_bitmap_next) { if (Stream_GetRemainingLength(s) < 5) return FALSE; Stream_Read_UINT8(s, stream_bitmap_next->bitmapFlags); /* bitmapFlags (1 byte) */ Stream_Read_UINT16(s, stream_bitmap_next->bitmapType); /* bitmapType (2 bytes) */ FIELD_SKIP_BUFFER16( s, stream_bitmap_next->bitmapBlockSize); /* bitmapBlockSize(2 bytes) + bitmapBlock */ return TRUE; } static BOOL update_read_draw_gdiplus_first_order(wStream* s, DRAW_GDIPLUS_FIRST_ORDER* draw_gdiplus_first) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_first->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_first->cbTotalSize); /* cbTotalSize (4 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_first->cbTotalEmfSize); /* cbTotalEmfSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_first->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_next_order(wStream* s, DRAW_GDIPLUS_NEXT_ORDER* draw_gdiplus_next) { if (Stream_GetRemainingLength(s) < 3) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ FIELD_SKIP_BUFFER16(s, draw_gdiplus_next->cbSize); /* cbSize(2 bytes) + emfRecords */ return TRUE; } static BOOL update_read_draw_gdiplus_end_order(wStream* s, DRAW_GDIPLUS_END_ORDER* draw_gdiplus_end) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_end->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_end->cbTotalSize); /* cbTotalSize (4 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_end->cbTotalEmfSize); /* cbTotalEmfSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_end->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_cache_first_order(wStream* s, DRAW_GDIPLUS_CACHE_FIRST_ORDER* draw_gdiplus_cache_first) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_first->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_cache_first->cbTotalSize); /* cbTotalSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_cache_first->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_cache_next_order(wStream* s, DRAW_GDIPLUS_CACHE_NEXT_ORDER* draw_gdiplus_cache_next) { if (Stream_GetRemainingLength(s) < 7) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_next->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_next->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_next->cacheIndex); /* cacheIndex (2 bytes) */ FIELD_SKIP_BUFFER16(s, draw_gdiplus_cache_next->cbSize); /* cbSize(2 bytes) + emfRecords */ return TRUE; } static BOOL update_read_draw_gdiplus_cache_end_order(wStream* s, DRAW_GDIPLUS_CACHE_END_ORDER* draw_gdiplus_cache_end) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_end->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_cache_end->cbTotalSize); /* cbTotalSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_cache_end->cbSize); /* emfRecords */ } static BOOL update_read_field_flags(wStream* s, UINT32* fieldFlags, BYTE flags, BYTE fieldBytes) { int i; BYTE byte; if (flags & ORDER_ZERO_FIELD_BYTE_BIT0) fieldBytes--; if (flags & ORDER_ZERO_FIELD_BYTE_BIT1) { if (fieldBytes > 1) fieldBytes -= 2; else fieldBytes = 0; } if (Stream_GetRemainingLength(s) < fieldBytes) return FALSE; *fieldFlags = 0; for (i = 0; i < fieldBytes; i++) { Stream_Read_UINT8(s, byte); *fieldFlags |= byte << (i * 8); } return TRUE; } BOOL update_write_field_flags(wStream* s, UINT32 fieldFlags, BYTE flags, BYTE fieldBytes) { BYTE byte; if (fieldBytes == 1) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); } else if (fieldBytes == 2) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 8) & 0xFF; Stream_Write_UINT8(s, byte); } else if (fieldBytes == 3) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 16) & 0xFF; Stream_Write_UINT8(s, byte); } else { return FALSE; } return TRUE; } static BOOL update_read_bounds(wStream* s, rdpBounds* bounds) { BYTE flags; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, flags); /* field flags */ if (flags & BOUND_LEFT) { if (!update_read_coord(s, &bounds->left, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_LEFT) { if (!update_read_coord(s, &bounds->left, TRUE)) return FALSE; } if (flags & BOUND_TOP) { if (!update_read_coord(s, &bounds->top, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_TOP) { if (!update_read_coord(s, &bounds->top, TRUE)) return FALSE; } if (flags & BOUND_RIGHT) { if (!update_read_coord(s, &bounds->right, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_RIGHT) { if (!update_read_coord(s, &bounds->right, TRUE)) return FALSE; } if (flags & BOUND_BOTTOM) { if (!update_read_coord(s, &bounds->bottom, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_BOTTOM) { if (!update_read_coord(s, &bounds->bottom, TRUE)) return FALSE; } return TRUE; } BOOL update_write_bounds(wStream* s, ORDER_INFO* orderInfo) { if (!(orderInfo->controlFlags & ORDER_BOUNDS)) return TRUE; if (orderInfo->controlFlags & ORDER_ZERO_BOUNDS_DELTAS) return TRUE; Stream_Write_UINT8(s, orderInfo->boundsFlags); /* field flags */ if (orderInfo->boundsFlags & BOUND_LEFT) { if (!update_write_coord(s, orderInfo->bounds.left)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_LEFT) { } if (orderInfo->boundsFlags & BOUND_TOP) { if (!update_write_coord(s, orderInfo->bounds.top)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_TOP) { } if (orderInfo->boundsFlags & BOUND_RIGHT) { if (!update_write_coord(s, orderInfo->bounds.right)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_RIGHT) { } if (orderInfo->boundsFlags & BOUND_BOTTOM) { if (!update_write_coord(s, orderInfo->bounds.bottom)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_BOTTOM) { } return TRUE; } static BOOL read_primary_order(wLog* log, const char* orderName, wStream* s, const ORDER_INFO* orderInfo, rdpPrimaryUpdate* primary) { BOOL rc = FALSE; if (!s || !orderInfo || !primary || !orderName) return FALSE; switch (orderInfo->orderType) { case ORDER_TYPE_DSTBLT: rc = update_read_dstblt_order(s, orderInfo, &(primary->dstblt)); break; case ORDER_TYPE_PATBLT: rc = update_read_patblt_order(s, orderInfo, &(primary->patblt)); break; case ORDER_TYPE_SCRBLT: rc = update_read_scrblt_order(s, orderInfo, &(primary->scrblt)); break; case ORDER_TYPE_OPAQUE_RECT: rc = update_read_opaque_rect_order(s, orderInfo, &(primary->opaque_rect)); break; case ORDER_TYPE_DRAW_NINE_GRID: rc = update_read_draw_nine_grid_order(s, orderInfo, &(primary->draw_nine_grid)); break; case ORDER_TYPE_MULTI_DSTBLT: rc = update_read_multi_dstblt_order(s, orderInfo, &(primary->multi_dstblt)); break; case ORDER_TYPE_MULTI_PATBLT: rc = update_read_multi_patblt_order(s, orderInfo, &(primary->multi_patblt)); break; case ORDER_TYPE_MULTI_SCRBLT: rc = update_read_multi_scrblt_order(s, orderInfo, &(primary->multi_scrblt)); break; case ORDER_TYPE_MULTI_OPAQUE_RECT: rc = update_read_multi_opaque_rect_order(s, orderInfo, &(primary->multi_opaque_rect)); break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: rc = update_read_multi_draw_nine_grid_order(s, orderInfo, &(primary->multi_draw_nine_grid)); break; case ORDER_TYPE_LINE_TO: rc = update_read_line_to_order(s, orderInfo, &(primary->line_to)); break; case ORDER_TYPE_POLYLINE: rc = update_read_polyline_order(s, orderInfo, &(primary->polyline)); break; case ORDER_TYPE_MEMBLT: rc = update_read_memblt_order(s, orderInfo, &(primary->memblt)); break; case ORDER_TYPE_MEM3BLT: rc = update_read_mem3blt_order(s, orderInfo, &(primary->mem3blt)); break; case ORDER_TYPE_SAVE_BITMAP: rc = update_read_save_bitmap_order(s, orderInfo, &(primary->save_bitmap)); break; case ORDER_TYPE_GLYPH_INDEX: rc = update_read_glyph_index_order(s, orderInfo, &(primary->glyph_index)); break; case ORDER_TYPE_FAST_INDEX: rc = update_read_fast_index_order(s, orderInfo, &(primary->fast_index)); break; case ORDER_TYPE_FAST_GLYPH: rc = update_read_fast_glyph_order(s, orderInfo, &(primary->fast_glyph)); break; case ORDER_TYPE_POLYGON_SC: rc = update_read_polygon_sc_order(s, orderInfo, &(primary->polygon_sc)); break; case ORDER_TYPE_POLYGON_CB: rc = update_read_polygon_cb_order(s, orderInfo, &(primary->polygon_cb)); break; case ORDER_TYPE_ELLIPSE_SC: rc = update_read_ellipse_sc_order(s, orderInfo, &(primary->ellipse_sc)); break; case ORDER_TYPE_ELLIPSE_CB: rc = update_read_ellipse_cb_order(s, orderInfo, &(primary->ellipse_cb)); break; default: WLog_Print(log, WLOG_WARN, "Primary Drawing Order %s not supported, ignoring", orderName); rc = TRUE; break; } if (!rc) { WLog_Print(log, WLOG_ERROR, "%s - update_read_dstblt_order() failed", orderName); return FALSE; } return TRUE; } static BOOL update_recv_primary_order(rdpUpdate* update, wStream* s, BYTE flags) { BYTE field; BOOL rc = FALSE; rdpContext* context = update->context; rdpPrimaryUpdate* primary = update->primary; ORDER_INFO* orderInfo = &(primary->order_info); rdpSettings* settings = context->settings; const char* orderName; if (flags & ORDER_TYPE_CHANGE) { if (Stream_GetRemainingLength(s) < 1) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, orderInfo->orderType); /* orderType (1 byte) */ } orderName = primary_order_string(orderInfo->orderType); if (!check_primary_order_supported(update->log, settings, orderInfo->orderType, orderName)) return FALSE; field = get_primary_drawing_order_field_bytes(orderInfo->orderType, &rc); if (!rc) return FALSE; if (!update_read_field_flags(s, &(orderInfo->fieldFlags), flags, field)) { WLog_Print(update->log, WLOG_ERROR, "update_read_field_flags() failed"); return FALSE; } if (flags & ORDER_BOUNDS) { if (!(flags & ORDER_ZERO_BOUNDS_DELTAS)) { if (!update_read_bounds(s, &orderInfo->bounds)) { WLog_Print(update->log, WLOG_ERROR, "update_read_bounds() failed"); return FALSE; } } rc = IFCALLRESULT(FALSE, update->SetBounds, context, &orderInfo->bounds); if (!rc) return FALSE; } orderInfo->deltaCoordinates = (flags & ORDER_DELTA_COORDINATES) ? TRUE : FALSE; if (!read_primary_order(update->log, orderName, s, orderInfo, primary)) return FALSE; switch (orderInfo->orderType) { case ORDER_TYPE_DSTBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->dstblt.bRop), gdi_rop3_code(primary->dstblt.bRop)); rc = IFCALLRESULT(FALSE, primary->DstBlt, context, &primary->dstblt); } break; case ORDER_TYPE_PATBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->patblt.bRop), gdi_rop3_code(primary->patblt.bRop)); rc = IFCALLRESULT(FALSE, primary->PatBlt, context, &primary->patblt); } break; case ORDER_TYPE_SCRBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->scrblt.bRop), gdi_rop3_code(primary->scrblt.bRop)); rc = IFCALLRESULT(FALSE, primary->ScrBlt, context, &primary->scrblt); } break; case ORDER_TYPE_OPAQUE_RECT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->OpaqueRect, context, &primary->opaque_rect); } break; case ORDER_TYPE_DRAW_NINE_GRID: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->DrawNineGrid, context, &primary->draw_nine_grid); } break; case ORDER_TYPE_MULTI_DSTBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_dstblt.bRop), gdi_rop3_code(primary->multi_dstblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiDstBlt, context, &primary->multi_dstblt); } break; case ORDER_TYPE_MULTI_PATBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_patblt.bRop), gdi_rop3_code(primary->multi_patblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiPatBlt, context, &primary->multi_patblt); } break; case ORDER_TYPE_MULTI_SCRBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_scrblt.bRop), gdi_rop3_code(primary->multi_scrblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiScrBlt, context, &primary->multi_scrblt); } break; case ORDER_TYPE_MULTI_OPAQUE_RECT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->MultiOpaqueRect, context, &primary->multi_opaque_rect); } break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->MultiDrawNineGrid, context, &primary->multi_draw_nine_grid); } break; case ORDER_TYPE_LINE_TO: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->LineTo, context, &primary->line_to); } break; case ORDER_TYPE_POLYLINE: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->Polyline, context, &primary->polyline); } break; case ORDER_TYPE_MEMBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->memblt.bRop), gdi_rop3_code(primary->memblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MemBlt, context, &primary->memblt); } break; case ORDER_TYPE_MEM3BLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->mem3blt.bRop), gdi_rop3_code(primary->mem3blt.bRop)); rc = IFCALLRESULT(FALSE, primary->Mem3Blt, context, &primary->mem3blt); } break; case ORDER_TYPE_SAVE_BITMAP: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->SaveBitmap, context, &primary->save_bitmap); } break; case ORDER_TYPE_GLYPH_INDEX: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->GlyphIndex, context, &primary->glyph_index); } break; case ORDER_TYPE_FAST_INDEX: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->FastIndex, context, &primary->fast_index); } break; case ORDER_TYPE_FAST_GLYPH: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->FastGlyph, context, &primary->fast_glyph); } break; case ORDER_TYPE_POLYGON_SC: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->PolygonSC, context, &primary->polygon_sc); } break; case ORDER_TYPE_POLYGON_CB: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->PolygonCB, context, &primary->polygon_cb); } break; case ORDER_TYPE_ELLIPSE_SC: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->EllipseSC, context, &primary->ellipse_sc); } break; case ORDER_TYPE_ELLIPSE_CB: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->EllipseCB, context, &primary->ellipse_cb); } break; default: WLog_Print(update->log, WLOG_WARN, "Primary Drawing Order %s not supported", orderName); break; } if (!rc) { WLog_Print(update->log, WLOG_WARN, "Primary Drawing Order %s failed", orderName); return FALSE; } if (flags & ORDER_BOUNDS) { rc = IFCALLRESULT(FALSE, update->SetBounds, context, NULL); } return rc; } static BOOL update_recv_secondary_order(rdpUpdate* update, wStream* s, BYTE flags) { BOOL rc = FALSE; size_t start, end, diff; BYTE orderType; UINT16 extraFlags; UINT16 orderLength; rdpContext* context = update->context; rdpSettings* settings = context->settings; rdpSecondaryUpdate* secondary = update->secondary; const char* name; if (Stream_GetRemainingLength(s) < 5) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 5"); return FALSE; } Stream_Read_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Read_UINT16(s, extraFlags); /* extraFlags (2 bytes) */ Stream_Read_UINT8(s, orderType); /* orderType (1 byte) */ if (Stream_GetRemainingLength(s) < orderLength + 7U) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) %" PRIuz " < %" PRIu16, Stream_GetRemainingLength(s), orderLength + 7); return FALSE; } start = Stream_GetPosition(s); name = secondary_order_string(orderType); WLog_Print(update->log, WLOG_DEBUG, "Secondary Drawing Order %s", name); if (!check_secondary_order_supported(update->log, settings, orderType, name)) return FALSE; switch (orderType) { case ORDER_TYPE_BITMAP_UNCOMPRESSED: case ORDER_TYPE_CACHE_BITMAP_COMPRESSED: { const BOOL compressed = (orderType == ORDER_TYPE_CACHE_BITMAP_COMPRESSED); CACHE_BITMAP_ORDER* order = update_read_cache_bitmap_order(update, s, compressed, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmap, context, order); free_cache_bitmap_order(context, order); } } break; case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2: case ORDER_TYPE_BITMAP_COMPRESSED_V2: { const BOOL compressed = (orderType == ORDER_TYPE_BITMAP_COMPRESSED_V2); CACHE_BITMAP_V2_ORDER* order = update_read_cache_bitmap_v2_order(update, s, compressed, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV2, context, order); free_cache_bitmap_v2_order(context, order); } } break; case ORDER_TYPE_BITMAP_COMPRESSED_V3: { CACHE_BITMAP_V3_ORDER* order = update_read_cache_bitmap_v3_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV3, context, order); free_cache_bitmap_v3_order(context, order); } } break; case ORDER_TYPE_CACHE_COLOR_TABLE: { CACHE_COLOR_TABLE_ORDER* order = update_read_cache_color_table_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheColorTable, context, order); free_cache_color_table_order(context, order); } } break; case ORDER_TYPE_CACHE_GLYPH: { switch (settings->GlyphSupportLevel) { case GLYPH_SUPPORT_PARTIAL: case GLYPH_SUPPORT_FULL: { CACHE_GLYPH_ORDER* order = update_read_cache_glyph_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheGlyph, context, order); free_cache_glyph_order(context, order); } } break; case GLYPH_SUPPORT_ENCODE: { CACHE_GLYPH_V2_ORDER* order = update_read_cache_glyph_v2_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheGlyphV2, context, order); free_cache_glyph_v2_order(context, order); } } break; case GLYPH_SUPPORT_NONE: default: break; } } break; case ORDER_TYPE_CACHE_BRUSH: /* [MS-RDPEGDI] 2.2.2.2.1.2.7 Cache Brush (CACHE_BRUSH_ORDER) */ { CACHE_BRUSH_ORDER* order = update_read_cache_brush_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBrush, context, order); free_cache_brush_order(context, order); } } break; default: WLog_Print(update->log, WLOG_WARN, "SECONDARY ORDER %s not supported", name); break; } if (!rc) { WLog_Print(update->log, WLOG_ERROR, "SECONDARY ORDER %s failed", name); } start += orderLength + 7; end = Stream_GetPosition(s); if (start > end) { WLog_Print(update->log, WLOG_WARN, "SECONDARY_ORDER %s: read %" PRIuz "bytes too much", name, end - start); return FALSE; } diff = start - end; if (diff > 0) { WLog_Print(update->log, WLOG_DEBUG, "SECONDARY_ORDER %s: read %" PRIuz "bytes short, skipping", name, diff); Stream_Seek(s, diff); } return rc; } static BOOL read_altsec_order(wStream* s, BYTE orderType, rdpAltSecUpdate* altsec) { BOOL rc = FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: rc = update_read_create_offscreen_bitmap_order(s, &(altsec->create_offscreen_bitmap)); break; case ORDER_TYPE_SWITCH_SURFACE: rc = update_read_switch_surface_order(s, &(altsec->switch_surface)); break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: rc = update_read_create_nine_grid_bitmap_order(s, &(altsec->create_nine_grid_bitmap)); break; case ORDER_TYPE_FRAME_MARKER: rc = update_read_frame_marker_order(s, &(altsec->frame_marker)); break; case ORDER_TYPE_STREAM_BITMAP_FIRST: rc = update_read_stream_bitmap_first_order(s, &(altsec->stream_bitmap_first)); break; case ORDER_TYPE_STREAM_BITMAP_NEXT: rc = update_read_stream_bitmap_next_order(s, &(altsec->stream_bitmap_next)); break; case ORDER_TYPE_GDIPLUS_FIRST: rc = update_read_draw_gdiplus_first_order(s, &(altsec->draw_gdiplus_first)); break; case ORDER_TYPE_GDIPLUS_NEXT: rc = update_read_draw_gdiplus_next_order(s, &(altsec->draw_gdiplus_next)); break; case ORDER_TYPE_GDIPLUS_END: rc = update_read_draw_gdiplus_end_order(s, &(altsec->draw_gdiplus_end)); break; case ORDER_TYPE_GDIPLUS_CACHE_FIRST: rc = update_read_draw_gdiplus_cache_first_order(s, &(altsec->draw_gdiplus_cache_first)); break; case ORDER_TYPE_GDIPLUS_CACHE_NEXT: rc = update_read_draw_gdiplus_cache_next_order(s, &(altsec->draw_gdiplus_cache_next)); break; case ORDER_TYPE_GDIPLUS_CACHE_END: rc = update_read_draw_gdiplus_cache_end_order(s, &(altsec->draw_gdiplus_cache_end)); break; case ORDER_TYPE_WINDOW: /* This order is handled elsewhere. */ rc = TRUE; break; case ORDER_TYPE_COMPDESK_FIRST: rc = TRUE; break; default: break; } return rc; } static BOOL update_recv_altsec_order(rdpUpdate* update, wStream* s, BYTE flags) { BYTE orderType = flags >>= 2; /* orderType is in higher 6 bits of flags field */ BOOL rc = FALSE; rdpContext* context = update->context; rdpSettings* settings = context->settings; rdpAltSecUpdate* altsec = update->altsec; const char* orderName = altsec_order_string(orderType); WLog_Print(update->log, WLOG_DEBUG, "Alternate Secondary Drawing Order %s", orderName); if (!check_alt_order_supported(update->log, settings, orderType, orderName)) return FALSE; if (!read_altsec_order(s, orderType, altsec)) return FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: IFCALLRET(altsec->CreateOffscreenBitmap, rc, context, &(altsec->create_offscreen_bitmap)); break; case ORDER_TYPE_SWITCH_SURFACE: IFCALLRET(altsec->SwitchSurface, rc, context, &(altsec->switch_surface)); break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: IFCALLRET(altsec->CreateNineGridBitmap, rc, context, &(altsec->create_nine_grid_bitmap)); break; case ORDER_TYPE_FRAME_MARKER: IFCALLRET(altsec->FrameMarker, rc, context, &(altsec->frame_marker)); break; case ORDER_TYPE_STREAM_BITMAP_FIRST: IFCALLRET(altsec->StreamBitmapFirst, rc, context, &(altsec->stream_bitmap_first)); break; case ORDER_TYPE_STREAM_BITMAP_NEXT: IFCALLRET(altsec->StreamBitmapNext, rc, context, &(altsec->stream_bitmap_next)); break; case ORDER_TYPE_GDIPLUS_FIRST: IFCALLRET(altsec->DrawGdiPlusFirst, rc, context, &(altsec->draw_gdiplus_first)); break; case ORDER_TYPE_GDIPLUS_NEXT: IFCALLRET(altsec->DrawGdiPlusNext, rc, context, &(altsec->draw_gdiplus_next)); break; case ORDER_TYPE_GDIPLUS_END: IFCALLRET(altsec->DrawGdiPlusEnd, rc, context, &(altsec->draw_gdiplus_end)); break; case ORDER_TYPE_GDIPLUS_CACHE_FIRST: IFCALLRET(altsec->DrawGdiPlusCacheFirst, rc, context, &(altsec->draw_gdiplus_cache_first)); break; case ORDER_TYPE_GDIPLUS_CACHE_NEXT: IFCALLRET(altsec->DrawGdiPlusCacheNext, rc, context, &(altsec->draw_gdiplus_cache_next)); break; case ORDER_TYPE_GDIPLUS_CACHE_END: IFCALLRET(altsec->DrawGdiPlusCacheEnd, rc, context, &(altsec->draw_gdiplus_cache_end)); break; case ORDER_TYPE_WINDOW: rc = update_recv_altsec_window_order(update, s); break; case ORDER_TYPE_COMPDESK_FIRST: rc = TRUE; break; default: break; } if (!rc) { WLog_Print(update->log, WLOG_WARN, "Alternate Secondary Drawing Order %s failed", orderName); } return rc; } BOOL update_recv_order(rdpUpdate* update, wStream* s) { BOOL rc; BYTE controlFlags; if (Stream_GetRemainingLength(s) < 1) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, controlFlags); /* controlFlags (1 byte) */ if (!(controlFlags & ORDER_STANDARD)) rc = update_recv_altsec_order(update, s, controlFlags); else if (controlFlags & ORDER_SECONDARY) rc = update_recv_secondary_order(update, s, controlFlags); else rc = update_recv_primary_order(update, s, controlFlags); if (!rc) WLog_Print(update->log, WLOG_ERROR, "order flags %02" PRIx8 " failed", controlFlags); return rc; }
static CACHE_BITMAP_V2_ORDER* update_read_cache_bitmap_v2_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { BYTE bitsPerPixelId; CACHE_BITMAP_V2_ORDER* cache_bitmap_v2; if (!update || !s) return NULL; cache_bitmap_v2 = calloc(1, sizeof(CACHE_BITMAP_V2_ORDER)); if (!cache_bitmap_v2) goto fail; cache_bitmap_v2->cacheId = flags & 0x0003; cache_bitmap_v2->flags = (flags & 0xFF80) >> 7; bitsPerPixelId = (flags & 0x0078) >> 3; cache_bitmap_v2->bitmapBpp = CBR2_BPP[bitsPerPixelId]; if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ goto fail; cache_bitmap_v2->bitmapHeight = cache_bitmap_v2->bitmapWidth; } else { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ goto fail; } if (!update_read_4byte_unsigned(s, &cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->cacheIndex)) /* cacheIndex */ goto fail; if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } } if (cache_bitmap_v2->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap_v2->bitmapLength) goto fail; if (cache_bitmap_v2->bitmapLength == 0) goto fail; cache_bitmap_v2->bitmapDataStream = malloc(cache_bitmap_v2->bitmapLength); if (!cache_bitmap_v2->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); cache_bitmap_v2->compressed = compressed; return cache_bitmap_v2; fail: free_cache_bitmap_v2_order(update->context, cache_bitmap_v2); return NULL; }
static CACHE_BITMAP_V2_ORDER* update_read_cache_bitmap_v2_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { BOOL rc; BYTE bitsPerPixelId; CACHE_BITMAP_V2_ORDER* cache_bitmap_v2; if (!update || !s) return NULL; cache_bitmap_v2 = calloc(1, sizeof(CACHE_BITMAP_V2_ORDER)); if (!cache_bitmap_v2) goto fail; cache_bitmap_v2->cacheId = flags & 0x0003; cache_bitmap_v2->flags = (flags & 0xFF80) >> 7; bitsPerPixelId = (flags & 0x0078) >> 3; cache_bitmap_v2->bitmapBpp = get_cbr2_bpp(bitsPerPixelId, &rc); if (!rc) goto fail; if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ goto fail; cache_bitmap_v2->bitmapHeight = cache_bitmap_v2->bitmapWidth; } else { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ goto fail; } if (!update_read_4byte_unsigned(s, &cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->cacheIndex)) /* cacheIndex */ goto fail; if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } } if (cache_bitmap_v2->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap_v2->bitmapLength) goto fail; if (cache_bitmap_v2->bitmapLength == 0) goto fail; cache_bitmap_v2->bitmapDataStream = malloc(cache_bitmap_v2->bitmapLength); if (!cache_bitmap_v2->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); cache_bitmap_v2->compressed = compressed; return cache_bitmap_v2; fail: free_cache_bitmap_v2_order(update->context, cache_bitmap_v2); return NULL; }
{'added': [(116, 'static BYTE get_cbr2_bpp(UINT32 bpp, BOOL* pValid)'), (117, '{'), (118, '\tif (pValid)'), (119, '\t\t*pValid = TRUE;'), (120, '\tswitch (bpp)'), (121, '\t{'), (122, '\t\tcase 3:'), (123, '\t\t\treturn 8;'), (124, '\t\tcase 4:'), (125, '\t\t\treturn 16;'), (126, '\t\tcase 5:'), (127, '\t\t\treturn 24;'), (128, '\t\tcase 6:'), (129, '\t\t\treturn 32;'), (130, '\t\tdefault:'), (131, '\t\t\tWLog_WARN(TAG, "Invalid bpp %" PRIu32, bpp);'), (132, '\t\t\tif (pValid)'), (133, '\t\t\t\t*pValid = FALSE;'), (134, '\t\t\treturn 0;'), (135, '\t}'), (136, '}'), (138, 'static BYTE get_bmf_bpp(UINT32 bmf, BOOL* pValid)'), (139, '{'), (140, '\tif (pValid)'), (141, '\t\t*pValid = TRUE;'), (142, '\tswitch (bmf)'), (143, '\t{'), (144, '\t\tcase 1:'), (145, '\t\t\treturn 1;'), (146, '\t\tcase 3:'), (147, '\t\t\treturn 8;'), (148, '\t\tcase 4:'), (149, '\t\t\treturn 16;'), (150, '\t\tcase 5:'), (151, '\t\t\treturn 24;'), (152, '\t\tcase 6:'), (153, '\t\t\treturn 32;'), (154, '\t\tdefault:'), (155, '\t\t\tWLog_WARN(TAG, "Invalid bmf %" PRIu32, bmf);'), (156, '\t\t\tif (pValid)'), (157, '\t\t\t\t*pValid = FALSE;'), (158, '\t\t\treturn 0;'), (159, '\t}'), (160, '}'), (161, 'static BYTE get_bpp_bmf(UINT32 bpp, BOOL* pValid)'), (162, '{'), (163, '\tif (pValid)'), (164, '\t\t*pValid = TRUE;'), (165, '\tswitch (bpp)'), (166, '\t{'), (167, '\t\tcase 1:'), (168, '\t\t\treturn 1;'), (169, '\t\tcase 8:'), (170, '\t\t\treturn 3;'), (171, '\t\tcase 16:'), (172, '\t\t\treturn 4;'), (173, '\t\tcase 24:'), (174, '\t\t\treturn 5;'), (175, '\t\tcase 32:'), (176, '\t\t\treturn 6;'), (177, '\t\tdefault:'), (178, '\t\t\tWLog_WARN(TAG, "Invalid color depth %" PRIu32, bpp);'), (179, '\t\t\tif (pValid)'), (180, '\t\t\t\t*pValid = FALSE;'), (181, '\t\t\treturn 0;'), (182, '\t}'), (183, '}'), (871, '\t\tBOOL rc;'), (873, '\t\tbrush->bpp = get_bmf_bpp(brush->style, &rc);'), (874, '\t\tif (!rc)'), (875, '\t\t\treturn FALSE;'), (917, '\t\tBOOL rc;'), (919, '\t\tbrush->bpp = get_bmf_bpp(brush->style, &rc);'), (920, '\t\tif (!rc)'), (921, '\t\t\treturn FALSE;'), (2077, '\tBOOL rc;'), (2092, '\tcache_bitmap_v2->bitmapBpp = get_cbr2_bpp(bitsPerPixelId, &rc);'), (2093, '\tif (!rc)'), (2094, '\t\tgoto fail;'), (2173, '\tBOOL rc;'), (2180, '\tbitsPerPixelId = get_bpp_bmf(cache_bitmap_v2->bitmapBpp, &rc);'), (2181, '\tif (!rc)'), (2182, '\t\treturn FALSE;'), (2244, '\tBOOL rc;'), (2262, '\tcache_bitmap_v3->bpp = get_cbr2_bpp(bitsPerPixelId, &rc);'), (2263, '\tif (!rc)'), (2264, '\t\tgoto fail;'), (2312, '\tBOOL rc;'), (2321, '\tbitsPerPixelId = get_bpp_bmf(cache_bitmap_v3->bpp, &rc);'), (2322, '\tif (!rc)'), (2323, '\t\treturn FALSE;'), (2647, '\tBOOL rc;'), (2661, '\tcache_brush->bpp = get_bmf_bpp(iBitmapFormat, &rc);'), (2662, '\tif (!rc)'), (2735, '\tBOOL rc;'), (2742, '\tiBitmapFormat = get_bpp_bmf(cache_brush->bpp, &rc);'), (2743, '\tif (!rc)'), (2744, '\t\treturn FALSE;')], 'deleted': [(116, 'static const BYTE CBR2_BPP[] = { 0, 0, 0, 8, 16, 24, 32 };'), (117, ''), (118, 'static const BYTE BPP_CBR2[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0,'), (119, '\t 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 };'), (120, ''), (121, 'static const BYTE CBR23_BPP[] = { 0, 0, 0, 8, 16, 24, 32 };'), (122, ''), (123, 'static const BYTE BPP_CBR23[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0,'), (124, '\t 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 };'), (125, ''), (126, 'static const BYTE BMF_BPP[] = { 0, 1, 0, 8, 16, 24, 32, 0 };'), (128, 'static const BYTE BPP_BMF[] = { 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0,'), (129, '\t 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 };'), (818, '\t\tbrush->bpp = BMF_BPP[brush->style & 0x07];'), (819, ''), (862, '\t\tbrush->bpp = BMF_BPP[brush->style & 0x07];'), (863, ''), (2033, '\tcache_bitmap_v2->bitmapBpp = CBR2_BPP[bitsPerPixelId];'), (2118, '\tbitsPerPixelId = BPP_CBR2[cache_bitmap_v2->bitmapBpp];'), (2197, '\tcache_bitmap_v3->bpp = CBR23_BPP[bitsPerPixelId];'), (2253, '\tbitsPerPixelId = BPP_CBR23[cache_bitmap_v3->bpp];'), (2590, '\tif (iBitmapFormat >= ARRAYSIZE(BMF_BPP))'), (2593, '\tcache_brush->bpp = BMF_BPP[iBitmapFormat];'), (2670, '\tiBitmapFormat = BPP_BMF[cache_brush->bpp];')]}
98
24
3,271
19,873
https://github.com/FreeRDP/FreeRDP
CVE-2020-11096
['CWE-125']
orders.c
update_read_cache_bitmap_v3_order
/** * FreeRDP: A Remote Desktop Protocol Implementation * Drawing Orders * * Copyright 2011 Marc-Andre Moreau <marcandre.moreau@gmail.com> * Copyright 2016 Armin Novak <armin.novak@thincast.com> * Copyright 2016 Thincast Technologies GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "window.h" #include <winpr/wtypes.h> #include <winpr/crt.h> #include <freerdp/api.h> #include <freerdp/log.h> #include <freerdp/graphics.h> #include <freerdp/codec/bitmap.h> #include <freerdp/gdi/gdi.h> #include "orders.h" #include "../cache/glyph.h" #include "../cache/bitmap.h" #include "../cache/brush.h" #include "../cache/cache.h" #define TAG FREERDP_TAG("core.orders") BYTE get_primary_drawing_order_field_bytes(UINT32 orderType, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (orderType) { case 0: return DSTBLT_ORDER_FIELD_BYTES; case 1: return PATBLT_ORDER_FIELD_BYTES; case 2: return SCRBLT_ORDER_FIELD_BYTES; case 3: return 0; case 4: return 0; case 5: return 0; case 6: return 0; case 7: return DRAW_NINE_GRID_ORDER_FIELD_BYTES; case 8: return MULTI_DRAW_NINE_GRID_ORDER_FIELD_BYTES; case 9: return LINE_TO_ORDER_FIELD_BYTES; case 10: return OPAQUE_RECT_ORDER_FIELD_BYTES; case 11: return SAVE_BITMAP_ORDER_FIELD_BYTES; case 12: return 0; case 13: return MEMBLT_ORDER_FIELD_BYTES; case 14: return MEM3BLT_ORDER_FIELD_BYTES; case 15: return MULTI_DSTBLT_ORDER_FIELD_BYTES; case 16: return MULTI_PATBLT_ORDER_FIELD_BYTES; case 17: return MULTI_SCRBLT_ORDER_FIELD_BYTES; case 18: return MULTI_OPAQUE_RECT_ORDER_FIELD_BYTES; case 19: return FAST_INDEX_ORDER_FIELD_BYTES; case 20: return POLYGON_SC_ORDER_FIELD_BYTES; case 21: return POLYGON_CB_ORDER_FIELD_BYTES; case 22: return POLYLINE_ORDER_FIELD_BYTES; case 23: return 0; case 24: return FAST_GLYPH_ORDER_FIELD_BYTES; case 25: return ELLIPSE_SC_ORDER_FIELD_BYTES; case 26: return ELLIPSE_CB_ORDER_FIELD_BYTES; case 27: return GLYPH_INDEX_ORDER_FIELD_BYTES; default: if (pValid) *pValid = FALSE; WLog_WARN(TAG, "Invalid orderType 0x%08X received", orderType); return 0; } } static const BYTE CBR2_BPP[] = { 0, 0, 0, 8, 16, 24, 32 }; static const BYTE BPP_CBR2[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 }; static const BYTE CBR23_BPP[] = { 0, 0, 0, 8, 16, 24, 32 }; static const BYTE BPP_CBR23[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 }; static const BYTE BMF_BPP[] = { 0, 1, 0, 8, 16, 24, 32, 0 }; static const BYTE BPP_BMF[] = { 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 }; static BOOL check_order_activated(wLog* log, rdpSettings* settings, const char* orderName, BOOL condition) { if (!condition) { if (settings->AllowUnanouncedOrdersFromServer) { WLog_Print(log, WLOG_WARN, "%s - SERVER BUG: The support for this feature was not announced!", orderName); return TRUE; } else { WLog_Print(log, WLOG_ERROR, "%s - SERVER BUG: The support for this feature was not announced! Use " "/relax-order-checks to ignore", orderName); return FALSE; } } return TRUE; } static BOOL check_alt_order_supported(wLog* log, rdpSettings* settings, BYTE orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: case ORDER_TYPE_SWITCH_SURFACE: condition = settings->OffscreenSupportLevel != 0; break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: condition = settings->DrawNineGridEnabled; break; case ORDER_TYPE_FRAME_MARKER: condition = settings->FrameMarkerCommandEnabled; break; case ORDER_TYPE_GDIPLUS_FIRST: case ORDER_TYPE_GDIPLUS_NEXT: case ORDER_TYPE_GDIPLUS_END: case ORDER_TYPE_GDIPLUS_CACHE_FIRST: case ORDER_TYPE_GDIPLUS_CACHE_NEXT: case ORDER_TYPE_GDIPLUS_CACHE_END: condition = settings->DrawGdiPlusCacheEnabled; break; case ORDER_TYPE_WINDOW: condition = settings->RemoteWndSupportLevel != WINDOW_LEVEL_NOT_SUPPORTED; break; case ORDER_TYPE_STREAM_BITMAP_FIRST: case ORDER_TYPE_STREAM_BITMAP_NEXT: case ORDER_TYPE_COMPDESK_FIRST: condition = TRUE; break; default: WLog_Print(log, WLOG_WARN, "%s - Alternate Secondary Drawing Order UNKNOWN", orderName); condition = FALSE; break; } return check_order_activated(log, settings, orderName, condition); } static BOOL check_secondary_order_supported(wLog* log, rdpSettings* settings, BYTE orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_BITMAP_UNCOMPRESSED: case ORDER_TYPE_CACHE_BITMAP_COMPRESSED: condition = settings->BitmapCacheEnabled; break; case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2: case ORDER_TYPE_BITMAP_COMPRESSED_V2: condition = settings->BitmapCacheEnabled; break; case ORDER_TYPE_BITMAP_COMPRESSED_V3: condition = settings->BitmapCacheV3Enabled; break; case ORDER_TYPE_CACHE_COLOR_TABLE: condition = (settings->OrderSupport[NEG_MEMBLT_INDEX] || settings->OrderSupport[NEG_MEM3BLT_INDEX]); break; case ORDER_TYPE_CACHE_GLYPH: { switch (settings->GlyphSupportLevel) { case GLYPH_SUPPORT_PARTIAL: case GLYPH_SUPPORT_FULL: case GLYPH_SUPPORT_ENCODE: condition = TRUE; break; case GLYPH_SUPPORT_NONE: default: condition = FALSE; break; } } break; case ORDER_TYPE_CACHE_BRUSH: condition = TRUE; break; default: WLog_Print(log, WLOG_WARN, "SECONDARY ORDER %s not supported", orderName); break; } return check_order_activated(log, settings, orderName, condition); } static BOOL check_primary_order_supported(wLog* log, rdpSettings* settings, UINT32 orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_DSTBLT: condition = settings->OrderSupport[NEG_DSTBLT_INDEX]; break; case ORDER_TYPE_SCRBLT: condition = settings->OrderSupport[NEG_SCRBLT_INDEX]; break; case ORDER_TYPE_DRAW_NINE_GRID: condition = settings->OrderSupport[NEG_DRAWNINEGRID_INDEX]; break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: condition = settings->OrderSupport[NEG_MULTI_DRAWNINEGRID_INDEX]; break; case ORDER_TYPE_LINE_TO: condition = settings->OrderSupport[NEG_LINETO_INDEX]; break; /* [MS-RDPEGDI] 2.2.2.2.1.1.2.5 OpaqueRect (OPAQUERECT_ORDER) * suggests that PatBlt and OpaqueRect imply each other. */ case ORDER_TYPE_PATBLT: case ORDER_TYPE_OPAQUE_RECT: condition = settings->OrderSupport[NEG_OPAQUE_RECT_INDEX] || settings->OrderSupport[NEG_PATBLT_INDEX]; break; case ORDER_TYPE_SAVE_BITMAP: condition = settings->OrderSupport[NEG_SAVEBITMAP_INDEX]; break; case ORDER_TYPE_MEMBLT: condition = settings->OrderSupport[NEG_MEMBLT_INDEX]; break; case ORDER_TYPE_MEM3BLT: condition = settings->OrderSupport[NEG_MEM3BLT_INDEX]; break; case ORDER_TYPE_MULTI_DSTBLT: condition = settings->OrderSupport[NEG_MULTIDSTBLT_INDEX]; break; case ORDER_TYPE_MULTI_PATBLT: condition = settings->OrderSupport[NEG_MULTIPATBLT_INDEX]; break; case ORDER_TYPE_MULTI_SCRBLT: condition = settings->OrderSupport[NEG_MULTIDSTBLT_INDEX]; break; case ORDER_TYPE_MULTI_OPAQUE_RECT: condition = settings->OrderSupport[NEG_MULTIOPAQUERECT_INDEX]; break; case ORDER_TYPE_FAST_INDEX: condition = settings->OrderSupport[NEG_FAST_INDEX_INDEX]; break; case ORDER_TYPE_POLYGON_SC: condition = settings->OrderSupport[NEG_POLYGON_SC_INDEX]; break; case ORDER_TYPE_POLYGON_CB: condition = settings->OrderSupport[NEG_POLYGON_CB_INDEX]; break; case ORDER_TYPE_POLYLINE: condition = settings->OrderSupport[NEG_POLYLINE_INDEX]; break; case ORDER_TYPE_FAST_GLYPH: condition = settings->OrderSupport[NEG_FAST_GLYPH_INDEX]; break; case ORDER_TYPE_ELLIPSE_SC: condition = settings->OrderSupport[NEG_ELLIPSE_SC_INDEX]; break; case ORDER_TYPE_ELLIPSE_CB: condition = settings->OrderSupport[NEG_ELLIPSE_CB_INDEX]; break; case ORDER_TYPE_GLYPH_INDEX: condition = settings->OrderSupport[NEG_GLYPH_INDEX_INDEX]; break; default: WLog_Print(log, WLOG_WARN, "%s Primary Drawing Order not supported", orderName); break; } return check_order_activated(log, settings, orderName, condition); } static const char* primary_order_string(UINT32 orderType) { const char* orders[] = { "[0x%02" PRIx8 "] DstBlt", "[0x%02" PRIx8 "] PatBlt", "[0x%02" PRIx8 "] ScrBlt", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] DrawNineGrid", "[0x%02" PRIx8 "] MultiDrawNineGrid", "[0x%02" PRIx8 "] LineTo", "[0x%02" PRIx8 "] OpaqueRect", "[0x%02" PRIx8 "] SaveBitmap", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] MemBlt", "[0x%02" PRIx8 "] Mem3Blt", "[0x%02" PRIx8 "] MultiDstBlt", "[0x%02" PRIx8 "] MultiPatBlt", "[0x%02" PRIx8 "] MultiScrBlt", "[0x%02" PRIx8 "] MultiOpaqueRect", "[0x%02" PRIx8 "] FastIndex", "[0x%02" PRIx8 "] PolygonSC", "[0x%02" PRIx8 "] PolygonCB", "[0x%02" PRIx8 "] Polyline", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] FastGlyph", "[0x%02" PRIx8 "] EllipseSC", "[0x%02" PRIx8 "] EllipseCB", "[0x%02" PRIx8 "] GlyphIndex" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static const char* secondary_order_string(UINT32 orderType) { const char* orders[] = { "[0x%02" PRIx8 "] Cache Bitmap", "[0x%02" PRIx8 "] Cache Color Table", "[0x%02" PRIx8 "] Cache Bitmap (Compressed)", "[0x%02" PRIx8 "] Cache Glyph", "[0x%02" PRIx8 "] Cache Bitmap V2", "[0x%02" PRIx8 "] Cache Bitmap V2 (Compressed)", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] Cache Brush", "[0x%02" PRIx8 "] Cache Bitmap V3" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static const char* altsec_order_string(BYTE orderType) { const char* orders[] = { "[0x%02" PRIx8 "] Switch Surface", "[0x%02" PRIx8 "] Create Offscreen Bitmap", "[0x%02" PRIx8 "] Stream Bitmap First", "[0x%02" PRIx8 "] Stream Bitmap Next", "[0x%02" PRIx8 "] Create NineGrid Bitmap", "[0x%02" PRIx8 "] Draw GDI+ First", "[0x%02" PRIx8 "] Draw GDI+ Next", "[0x%02" PRIx8 "] Draw GDI+ End", "[0x%02" PRIx8 "] Draw GDI+ Cache First", "[0x%02" PRIx8 "] Draw GDI+ Cache Next", "[0x%02" PRIx8 "] Draw GDI+ Cache End", "[0x%02" PRIx8 "] Windowing", "[0x%02" PRIx8 "] Desktop Composition", "[0x%02" PRIx8 "] Frame Marker" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static INLINE BOOL update_read_coord(wStream* s, INT32* coord, BOOL delta) { INT8 lsi8; INT16 lsi16; if (delta) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_INT8(s, lsi8); *coord += lsi8; } else { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_INT16(s, lsi16); *coord = lsi16; } return TRUE; } static INLINE BOOL update_write_coord(wStream* s, INT32 coord) { Stream_Write_UINT16(s, coord); return TRUE; } static INLINE BOOL update_read_color(wStream* s, UINT32* color) { BYTE byte; if (Stream_GetRemainingLength(s) < 3) return FALSE; *color = 0; Stream_Read_UINT8(s, byte); *color = (UINT32)byte; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 8) & 0xFF00; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 16) & 0xFF0000; return TRUE; } static INLINE BOOL update_write_color(wStream* s, UINT32 color) { BYTE byte; byte = (color & 0xFF); Stream_Write_UINT8(s, byte); byte = ((color >> 8) & 0xFF); Stream_Write_UINT8(s, byte); byte = ((color >> 16) & 0xFF); Stream_Write_UINT8(s, byte); return TRUE; } static INLINE BOOL update_read_colorref(wStream* s, UINT32* color) { BYTE byte; if (Stream_GetRemainingLength(s) < 4) return FALSE; *color = 0; Stream_Read_UINT8(s, byte); *color = byte; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 8); Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 16); Stream_Seek_UINT8(s); return TRUE; } static INLINE BOOL update_read_color_quad(wStream* s, UINT32* color) { return update_read_colorref(s, color); } static INLINE void update_write_color_quad(wStream* s, UINT32 color) { BYTE byte; byte = (color >> 16) & 0xFF; Stream_Write_UINT8(s, byte); byte = (color >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = color & 0xFF; Stream_Write_UINT8(s, byte); } static INLINE BOOL update_read_2byte_unsigned(wStream* s, UINT32* value) { BYTE byte; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) return FALSE; *value = (byte & 0x7F) << 8; Stream_Read_UINT8(s, byte); *value |= byte; } else { *value = (byte & 0x7F); } return TRUE; } static INLINE BOOL update_write_2byte_unsigned(wStream* s, UINT32 value) { BYTE byte; if (value > 0x7FFF) return FALSE; if (value >= 0x7F) { byte = ((value & 0x7F00) >> 8); Stream_Write_UINT8(s, byte | 0x80); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else { byte = (value & 0x7F); Stream_Write_UINT8(s, byte); } return TRUE; } static INLINE BOOL update_read_2byte_signed(wStream* s, INT32* value) { BYTE byte; BOOL negative; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); negative = (byte & 0x40) ? TRUE : FALSE; *value = (byte & 0x3F); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); *value = (*value << 8) | byte; } if (negative) *value *= -1; return TRUE; } static INLINE BOOL update_write_2byte_signed(wStream* s, INT32 value) { BYTE byte; BOOL negative = FALSE; if (value < 0) { negative = TRUE; value *= -1; } if (value > 0x3FFF) return FALSE; if (value >= 0x3F) { byte = ((value & 0x3F00) >> 8); if (negative) byte |= 0x40; Stream_Write_UINT8(s, byte | 0x80); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else { byte = (value & 0x3F); if (negative) byte |= 0x40; Stream_Write_UINT8(s, byte); } return TRUE; } static INLINE BOOL update_read_4byte_unsigned(wStream* s, UINT32* value) { BYTE byte; BYTE count; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); count = (byte & 0xC0) >> 6; if (Stream_GetRemainingLength(s) < count) return FALSE; switch (count) { case 0: *value = (byte & 0x3F); break; case 1: *value = (byte & 0x3F) << 8; Stream_Read_UINT8(s, byte); *value |= byte; break; case 2: *value = (byte & 0x3F) << 16; Stream_Read_UINT8(s, byte); *value |= (byte << 8); Stream_Read_UINT8(s, byte); *value |= byte; break; case 3: *value = (byte & 0x3F) << 24; Stream_Read_UINT8(s, byte); *value |= (byte << 16); Stream_Read_UINT8(s, byte); *value |= (byte << 8); Stream_Read_UINT8(s, byte); *value |= byte; break; default: break; } return TRUE; } static INLINE BOOL update_write_4byte_unsigned(wStream* s, UINT32 value) { BYTE byte; if (value <= 0x3F) { Stream_Write_UINT8(s, value); } else if (value <= 0x3FFF) { byte = (value >> 8) & 0x3F; Stream_Write_UINT8(s, byte | 0x40); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else if (value <= 0x3FFFFF) { byte = (value >> 16) & 0x3F; Stream_Write_UINT8(s, byte | 0x80); byte = (value >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else if (value <= 0x3FFFFFFF) { byte = (value >> 24) & 0x3F; Stream_Write_UINT8(s, byte | 0xC0); byte = (value >> 16) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else return FALSE; return TRUE; } static INLINE BOOL update_read_delta(wStream* s, INT32* value) { BYTE byte; if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, byte); if (byte & 0x40) *value = (byte | ~0x3F); else *value = (byte & 0x3F); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, byte); *value = (*value << 8) | byte; } return TRUE; } #if 0 static INLINE void update_read_glyph_delta(wStream* s, UINT16* value) { BYTE byte; Stream_Read_UINT8(s, byte); if (byte == 0x80) Stream_Read_UINT16(s, *value); else *value = (byte & 0x3F); } static INLINE void update_seek_glyph_delta(wStream* s) { BYTE byte; Stream_Read_UINT8(s, byte); if (byte & 0x80) Stream_Seek_UINT8(s); } #endif static INLINE BOOL update_read_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->style); } if (fieldFlags & ORDER_FIELD_04) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->hatch); } if (brush->style & CACHED_BRUSH) { brush->index = brush->hatch; brush->bpp = BMF_BPP[brush->style & 0x07]; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 7) return FALSE; brush->data = (BYTE*)brush->p8x8; Stream_Read_UINT8(s, brush->data[7]); Stream_Read_UINT8(s, brush->data[6]); Stream_Read_UINT8(s, brush->data[5]); Stream_Read_UINT8(s, brush->data[4]); Stream_Read_UINT8(s, brush->data[3]); Stream_Read_UINT8(s, brush->data[2]); Stream_Read_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; } static INLINE BOOL update_write_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { Stream_Write_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { Stream_Write_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { Stream_Write_UINT8(s, brush->style); } if (brush->style & CACHED_BRUSH) { brush->hatch = brush->index; brush->bpp = BMF_BPP[brush->style & 0x07]; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_04) { Stream_Write_UINT8(s, brush->hatch); } if (fieldFlags & ORDER_FIELD_05) { brush->data = (BYTE*)brush->p8x8; Stream_Write_UINT8(s, brush->data[7]); Stream_Write_UINT8(s, brush->data[6]); Stream_Write_UINT8(s, brush->data[5]); Stream_Write_UINT8(s, brush->data[4]); Stream_Write_UINT8(s, brush->data[3]); Stream_Write_UINT8(s, brush->data[2]); Stream_Write_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; } static INLINE BOOL update_read_delta_rects(wStream* s, DELTA_RECT* rectangles, UINT32* nr) { UINT32 number = *nr; UINT32 i; BYTE flags = 0; BYTE* zeroBits; UINT32 zeroBitsSize; if (number > 45) { WLog_WARN(TAG, "Invalid number of delta rectangles %" PRIu32, number); return FALSE; } zeroBitsSize = ((number + 1) / 2); if (Stream_GetRemainingLength(s) < zeroBitsSize) return FALSE; Stream_GetPointer(s, zeroBits); Stream_Seek(s, zeroBitsSize); ZeroMemory(rectangles, sizeof(DELTA_RECT) * number); for (i = 0; i < number; i++) { if (i % 2 == 0) flags = zeroBits[i / 2]; if ((~flags & 0x80) && !update_read_delta(s, &rectangles[i].left)) return FALSE; if ((~flags & 0x40) && !update_read_delta(s, &rectangles[i].top)) return FALSE; if (~flags & 0x20) { if (!update_read_delta(s, &rectangles[i].width)) return FALSE; } else if (i > 0) rectangles[i].width = rectangles[i - 1].width; else rectangles[i].width = 0; if (~flags & 0x10) { if (!update_read_delta(s, &rectangles[i].height)) return FALSE; } else if (i > 0) rectangles[i].height = rectangles[i - 1].height; else rectangles[i].height = 0; if (i > 0) { rectangles[i].left += rectangles[i - 1].left; rectangles[i].top += rectangles[i - 1].top; } flags <<= 4; } return TRUE; } static INLINE BOOL update_read_delta_points(wStream* s, DELTA_POINT* points, int number, INT16 x, INT16 y) { int i; BYTE flags = 0; BYTE* zeroBits; UINT32 zeroBitsSize; zeroBitsSize = ((number + 3) / 4); if (Stream_GetRemainingLength(s) < zeroBitsSize) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < %" PRIu32 "", zeroBitsSize); return FALSE; } Stream_GetPointer(s, zeroBits); Stream_Seek(s, zeroBitsSize); ZeroMemory(points, sizeof(DELTA_POINT) * number); for (i = 0; i < number; i++) { if (i % 4 == 0) flags = zeroBits[i / 4]; if ((~flags & 0x80) && !update_read_delta(s, &points[i].x)) { WLog_ERR(TAG, "update_read_delta(x) failed"); return FALSE; } if ((~flags & 0x40) && !update_read_delta(s, &points[i].y)) { WLog_ERR(TAG, "update_read_delta(y) failed"); return FALSE; } flags <<= 2; } return TRUE; } #define ORDER_FIELD_BYTE(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 1) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT8(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_2BYTE(NO, TARGET1, TARGET2) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 2) \ { \ WLog_ERR(TAG, "error reading %s or %s", #TARGET1, #TARGET2); \ return FALSE; \ } \ Stream_Read_UINT8(s, TARGET1); \ Stream_Read_UINT8(s, TARGET2); \ } \ } while (0) #define ORDER_FIELD_UINT16(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 2) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT16(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_UINT32(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 4) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT32(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_COORD(NO, TARGET) \ do \ { \ if ((orderInfo->fieldFlags & (1 << (NO - 1))) && \ !update_read_coord(s, &TARGET, orderInfo->deltaCoordinates)) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ } while (0) static INLINE BOOL ORDER_FIELD_COLOR(const ORDER_INFO* orderInfo, wStream* s, UINT32 NO, UINT32* TARGET) { if (!TARGET || !orderInfo) return FALSE; if ((orderInfo->fieldFlags & (1 << (NO - 1))) && !update_read_color(s, TARGET)) return FALSE; return TRUE; } static INLINE BOOL FIELD_SKIP_BUFFER16(wStream* s, UINT32 TARGET_LEN) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, TARGET_LEN); if (!Stream_SafeSeek(s, TARGET_LEN)) { WLog_ERR(TAG, "error skipping %" PRIu32 " bytes", TARGET_LEN); return FALSE; } return TRUE; } /* Primary Drawing Orders */ static BOOL update_read_dstblt_order(wStream* s, const ORDER_INFO* orderInfo, DSTBLT_ORDER* dstblt) { ORDER_FIELD_COORD(1, dstblt->nLeftRect); ORDER_FIELD_COORD(2, dstblt->nTopRect); ORDER_FIELD_COORD(3, dstblt->nWidth); ORDER_FIELD_COORD(4, dstblt->nHeight); ORDER_FIELD_BYTE(5, dstblt->bRop); return TRUE; } int update_approximate_dstblt_order(ORDER_INFO* orderInfo, const DSTBLT_ORDER* dstblt) { return 32; } BOOL update_write_dstblt_order(wStream* s, ORDER_INFO* orderInfo, const DSTBLT_ORDER* dstblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_dstblt_order(orderInfo, dstblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, dstblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, dstblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, dstblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, dstblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, dstblt->bRop); return TRUE; } static BOOL update_read_patblt_order(wStream* s, const ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { ORDER_FIELD_COORD(1, patblt->nLeftRect); ORDER_FIELD_COORD(2, patblt->nTopRect); ORDER_FIELD_COORD(3, patblt->nWidth); ORDER_FIELD_COORD(4, patblt->nHeight); ORDER_FIELD_BYTE(5, patblt->bRop); ORDER_FIELD_COLOR(orderInfo, s, 6, &patblt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 7, &patblt->foreColor); return update_read_brush(s, &patblt->brush, orderInfo->fieldFlags >> 7); } int update_approximate_patblt_order(ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { return 32; } BOOL update_write_patblt_order(wStream* s, ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_patblt_order(orderInfo, patblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, patblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, patblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, patblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, patblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, patblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, patblt->backColor); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_color(s, patblt->foreColor); orderInfo->fieldFlags |= ORDER_FIELD_08; orderInfo->fieldFlags |= ORDER_FIELD_09; orderInfo->fieldFlags |= ORDER_FIELD_10; orderInfo->fieldFlags |= ORDER_FIELD_11; orderInfo->fieldFlags |= ORDER_FIELD_12; update_write_brush(s, &patblt->brush, orderInfo->fieldFlags >> 7); return TRUE; } static BOOL update_read_scrblt_order(wStream* s, const ORDER_INFO* orderInfo, SCRBLT_ORDER* scrblt) { ORDER_FIELD_COORD(1, scrblt->nLeftRect); ORDER_FIELD_COORD(2, scrblt->nTopRect); ORDER_FIELD_COORD(3, scrblt->nWidth); ORDER_FIELD_COORD(4, scrblt->nHeight); ORDER_FIELD_BYTE(5, scrblt->bRop); ORDER_FIELD_COORD(6, scrblt->nXSrc); ORDER_FIELD_COORD(7, scrblt->nYSrc); return TRUE; } int update_approximate_scrblt_order(ORDER_INFO* orderInfo, const SCRBLT_ORDER* scrblt) { return 32; } BOOL update_write_scrblt_order(wStream* s, ORDER_INFO* orderInfo, const SCRBLT_ORDER* scrblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_scrblt_order(orderInfo, scrblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, scrblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, scrblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, scrblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, scrblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, scrblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_coord(s, scrblt->nXSrc); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_coord(s, scrblt->nYSrc); return TRUE; } static BOOL update_read_opaque_rect_order(wStream* s, const ORDER_INFO* orderInfo, OPAQUE_RECT_ORDER* opaque_rect) { BYTE byte; ORDER_FIELD_COORD(1, opaque_rect->nLeftRect); ORDER_FIELD_COORD(2, opaque_rect->nTopRect); ORDER_FIELD_COORD(3, opaque_rect->nWidth); ORDER_FIELD_COORD(4, opaque_rect->nHeight); if (orderInfo->fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x00FFFF00) | ((UINT32)byte); } if (orderInfo->fieldFlags & ORDER_FIELD_06) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x00FF00FF) | ((UINT32)byte << 8); } if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x0000FFFF) | ((UINT32)byte << 16); } return TRUE; } int update_approximate_opaque_rect_order(ORDER_INFO* orderInfo, const OPAQUE_RECT_ORDER* opaque_rect) { return 32; } BOOL update_write_opaque_rect_order(wStream* s, ORDER_INFO* orderInfo, const OPAQUE_RECT_ORDER* opaque_rect) { BYTE byte; int inf = update_approximate_opaque_rect_order(orderInfo, opaque_rect); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; // TODO: Color format conversion orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, opaque_rect->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, opaque_rect->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, opaque_rect->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, opaque_rect->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; byte = opaque_rect->color & 0x000000FF; Stream_Write_UINT8(s, byte); orderInfo->fieldFlags |= ORDER_FIELD_06; byte = (opaque_rect->color & 0x0000FF00) >> 8; Stream_Write_UINT8(s, byte); orderInfo->fieldFlags |= ORDER_FIELD_07; byte = (opaque_rect->color & 0x00FF0000) >> 16; Stream_Write_UINT8(s, byte); return TRUE; } static BOOL update_read_draw_nine_grid_order(wStream* s, const ORDER_INFO* orderInfo, DRAW_NINE_GRID_ORDER* draw_nine_grid) { ORDER_FIELD_COORD(1, draw_nine_grid->srcLeft); ORDER_FIELD_COORD(2, draw_nine_grid->srcTop); ORDER_FIELD_COORD(3, draw_nine_grid->srcRight); ORDER_FIELD_COORD(4, draw_nine_grid->srcBottom); ORDER_FIELD_UINT16(5, draw_nine_grid->bitmapId); return TRUE; } static BOOL update_read_multi_dstblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_DSTBLT_ORDER* multi_dstblt) { ORDER_FIELD_COORD(1, multi_dstblt->nLeftRect); ORDER_FIELD_COORD(2, multi_dstblt->nTopRect); ORDER_FIELD_COORD(3, multi_dstblt->nWidth); ORDER_FIELD_COORD(4, multi_dstblt->nHeight); ORDER_FIELD_BYTE(5, multi_dstblt->bRop); ORDER_FIELD_BYTE(6, multi_dstblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_dstblt->cbData); return update_read_delta_rects(s, multi_dstblt->rectangles, &multi_dstblt->numRectangles); } return TRUE; } static BOOL update_read_multi_patblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_PATBLT_ORDER* multi_patblt) { ORDER_FIELD_COORD(1, multi_patblt->nLeftRect); ORDER_FIELD_COORD(2, multi_patblt->nTopRect); ORDER_FIELD_COORD(3, multi_patblt->nWidth); ORDER_FIELD_COORD(4, multi_patblt->nHeight); ORDER_FIELD_BYTE(5, multi_patblt->bRop); ORDER_FIELD_COLOR(orderInfo, s, 6, &multi_patblt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 7, &multi_patblt->foreColor); if (!update_read_brush(s, &multi_patblt->brush, orderInfo->fieldFlags >> 7)) return FALSE; ORDER_FIELD_BYTE(13, multi_patblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_14) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_patblt->cbData); if (!update_read_delta_rects(s, multi_patblt->rectangles, &multi_patblt->numRectangles)) return FALSE; } return TRUE; } static BOOL update_read_multi_scrblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_SCRBLT_ORDER* multi_scrblt) { ORDER_FIELD_COORD(1, multi_scrblt->nLeftRect); ORDER_FIELD_COORD(2, multi_scrblt->nTopRect); ORDER_FIELD_COORD(3, multi_scrblt->nWidth); ORDER_FIELD_COORD(4, multi_scrblt->nHeight); ORDER_FIELD_BYTE(5, multi_scrblt->bRop); ORDER_FIELD_COORD(6, multi_scrblt->nXSrc); ORDER_FIELD_COORD(7, multi_scrblt->nYSrc); ORDER_FIELD_BYTE(8, multi_scrblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_09) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_scrblt->cbData); return update_read_delta_rects(s, multi_scrblt->rectangles, &multi_scrblt->numRectangles); } return TRUE; } static BOOL update_read_multi_opaque_rect_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_OPAQUE_RECT_ORDER* multi_opaque_rect) { BYTE byte; ORDER_FIELD_COORD(1, multi_opaque_rect->nLeftRect); ORDER_FIELD_COORD(2, multi_opaque_rect->nTopRect); ORDER_FIELD_COORD(3, multi_opaque_rect->nWidth); ORDER_FIELD_COORD(4, multi_opaque_rect->nHeight); if (orderInfo->fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x00FFFF00) | ((UINT32)byte); } if (orderInfo->fieldFlags & ORDER_FIELD_06) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x00FF00FF) | ((UINT32)byte << 8); } if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x0000FFFF) | ((UINT32)byte << 16); } ORDER_FIELD_BYTE(8, multi_opaque_rect->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_09) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_opaque_rect->cbData); return update_read_delta_rects(s, multi_opaque_rect->rectangles, &multi_opaque_rect->numRectangles); } return TRUE; } static BOOL update_read_multi_draw_nine_grid_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_DRAW_NINE_GRID_ORDER* multi_draw_nine_grid) { ORDER_FIELD_COORD(1, multi_draw_nine_grid->srcLeft); ORDER_FIELD_COORD(2, multi_draw_nine_grid->srcTop); ORDER_FIELD_COORD(3, multi_draw_nine_grid->srcRight); ORDER_FIELD_COORD(4, multi_draw_nine_grid->srcBottom); ORDER_FIELD_UINT16(5, multi_draw_nine_grid->bitmapId); ORDER_FIELD_BYTE(6, multi_draw_nine_grid->nDeltaEntries); if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_draw_nine_grid->cbData); return update_read_delta_rects(s, multi_draw_nine_grid->rectangles, &multi_draw_nine_grid->nDeltaEntries); } return TRUE; } static BOOL update_read_line_to_order(wStream* s, const ORDER_INFO* orderInfo, LINE_TO_ORDER* line_to) { ORDER_FIELD_UINT16(1, line_to->backMode); ORDER_FIELD_COORD(2, line_to->nXStart); ORDER_FIELD_COORD(3, line_to->nYStart); ORDER_FIELD_COORD(4, line_to->nXEnd); ORDER_FIELD_COORD(5, line_to->nYEnd); ORDER_FIELD_COLOR(orderInfo, s, 6, &line_to->backColor); ORDER_FIELD_BYTE(7, line_to->bRop2); ORDER_FIELD_BYTE(8, line_to->penStyle); ORDER_FIELD_BYTE(9, line_to->penWidth); ORDER_FIELD_COLOR(orderInfo, s, 10, &line_to->penColor); return TRUE; } int update_approximate_line_to_order(ORDER_INFO* orderInfo, const LINE_TO_ORDER* line_to) { return 32; } BOOL update_write_line_to_order(wStream* s, ORDER_INFO* orderInfo, const LINE_TO_ORDER* line_to) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_line_to_order(orderInfo, line_to))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT16(s, line_to->backMode); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, line_to->nXStart); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, line_to->nYStart); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, line_to->nXEnd); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_coord(s, line_to->nYEnd); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, line_to->backColor); orderInfo->fieldFlags |= ORDER_FIELD_07; Stream_Write_UINT8(s, line_to->bRop2); orderInfo->fieldFlags |= ORDER_FIELD_08; Stream_Write_UINT8(s, line_to->penStyle); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT8(s, line_to->penWidth); orderInfo->fieldFlags |= ORDER_FIELD_10; update_write_color(s, line_to->penColor); return TRUE; } static BOOL update_read_polyline_order(wStream* s, const ORDER_INFO* orderInfo, POLYLINE_ORDER* polyline) { UINT16 word; UINT32 new_num = polyline->numDeltaEntries; ORDER_FIELD_COORD(1, polyline->xStart); ORDER_FIELD_COORD(2, polyline->yStart); ORDER_FIELD_BYTE(3, polyline->bRop2); ORDER_FIELD_UINT16(4, word); ORDER_FIELD_COLOR(orderInfo, s, 5, &polyline->penColor); ORDER_FIELD_BYTE(6, new_num); if (orderInfo->fieldFlags & ORDER_FIELD_07) { DELTA_POINT* new_points; if (new_num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, polyline->cbData); new_points = (DELTA_POINT*)realloc(polyline->points, sizeof(DELTA_POINT) * new_num); if (!new_points) { WLog_ERR(TAG, "realloc(%" PRIu32 ") failed", new_num); return FALSE; } polyline->points = new_points; polyline->numDeltaEntries = new_num; return update_read_delta_points(s, polyline->points, polyline->numDeltaEntries, polyline->xStart, polyline->yStart); } return TRUE; } static BOOL update_read_memblt_order(wStream* s, const ORDER_INFO* orderInfo, MEMBLT_ORDER* memblt) { if (!s || !orderInfo || !memblt) return FALSE; ORDER_FIELD_UINT16(1, memblt->cacheId); ORDER_FIELD_COORD(2, memblt->nLeftRect); ORDER_FIELD_COORD(3, memblt->nTopRect); ORDER_FIELD_COORD(4, memblt->nWidth); ORDER_FIELD_COORD(5, memblt->nHeight); ORDER_FIELD_BYTE(6, memblt->bRop); ORDER_FIELD_COORD(7, memblt->nXSrc); ORDER_FIELD_COORD(8, memblt->nYSrc); ORDER_FIELD_UINT16(9, memblt->cacheIndex); memblt->colorIndex = (memblt->cacheId >> 8); memblt->cacheId = (memblt->cacheId & 0xFF); memblt->bitmap = NULL; return TRUE; } int update_approximate_memblt_order(ORDER_INFO* orderInfo, const MEMBLT_ORDER* memblt) { return 64; } BOOL update_write_memblt_order(wStream* s, ORDER_INFO* orderInfo, const MEMBLT_ORDER* memblt) { UINT16 cacheId; if (!Stream_EnsureRemainingCapacity(s, update_approximate_memblt_order(orderInfo, memblt))) return FALSE; cacheId = (memblt->cacheId & 0xFF) | ((memblt->colorIndex & 0xFF) << 8); orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT16(s, cacheId); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, memblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, memblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, memblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_coord(s, memblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_06; Stream_Write_UINT8(s, memblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_coord(s, memblt->nXSrc); orderInfo->fieldFlags |= ORDER_FIELD_08; update_write_coord(s, memblt->nYSrc); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT16(s, memblt->cacheIndex); return TRUE; } static BOOL update_read_mem3blt_order(wStream* s, const ORDER_INFO* orderInfo, MEM3BLT_ORDER* mem3blt) { ORDER_FIELD_UINT16(1, mem3blt->cacheId); ORDER_FIELD_COORD(2, mem3blt->nLeftRect); ORDER_FIELD_COORD(3, mem3blt->nTopRect); ORDER_FIELD_COORD(4, mem3blt->nWidth); ORDER_FIELD_COORD(5, mem3blt->nHeight); ORDER_FIELD_BYTE(6, mem3blt->bRop); ORDER_FIELD_COORD(7, mem3blt->nXSrc); ORDER_FIELD_COORD(8, mem3blt->nYSrc); ORDER_FIELD_COLOR(orderInfo, s, 9, &mem3blt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 10, &mem3blt->foreColor); if (!update_read_brush(s, &mem3blt->brush, orderInfo->fieldFlags >> 10)) return FALSE; ORDER_FIELD_UINT16(16, mem3blt->cacheIndex); mem3blt->colorIndex = (mem3blt->cacheId >> 8); mem3blt->cacheId = (mem3blt->cacheId & 0xFF); mem3blt->bitmap = NULL; return TRUE; } static BOOL update_read_save_bitmap_order(wStream* s, const ORDER_INFO* orderInfo, SAVE_BITMAP_ORDER* save_bitmap) { ORDER_FIELD_UINT32(1, save_bitmap->savedBitmapPosition); ORDER_FIELD_COORD(2, save_bitmap->nLeftRect); ORDER_FIELD_COORD(3, save_bitmap->nTopRect); ORDER_FIELD_COORD(4, save_bitmap->nRightRect); ORDER_FIELD_COORD(5, save_bitmap->nBottomRect); ORDER_FIELD_BYTE(6, save_bitmap->operation); return TRUE; } static BOOL update_read_glyph_index_order(wStream* s, const ORDER_INFO* orderInfo, GLYPH_INDEX_ORDER* glyph_index) { ORDER_FIELD_BYTE(1, glyph_index->cacheId); ORDER_FIELD_BYTE(2, glyph_index->flAccel); ORDER_FIELD_BYTE(3, glyph_index->ulCharInc); ORDER_FIELD_BYTE(4, glyph_index->fOpRedundant); ORDER_FIELD_COLOR(orderInfo, s, 5, &glyph_index->backColor); ORDER_FIELD_COLOR(orderInfo, s, 6, &glyph_index->foreColor); ORDER_FIELD_UINT16(7, glyph_index->bkLeft); ORDER_FIELD_UINT16(8, glyph_index->bkTop); ORDER_FIELD_UINT16(9, glyph_index->bkRight); ORDER_FIELD_UINT16(10, glyph_index->bkBottom); ORDER_FIELD_UINT16(11, glyph_index->opLeft); ORDER_FIELD_UINT16(12, glyph_index->opTop); ORDER_FIELD_UINT16(13, glyph_index->opRight); ORDER_FIELD_UINT16(14, glyph_index->opBottom); if (!update_read_brush(s, &glyph_index->brush, orderInfo->fieldFlags >> 14)) return FALSE; ORDER_FIELD_UINT16(20, glyph_index->x); ORDER_FIELD_UINT16(21, glyph_index->y); if (orderInfo->fieldFlags & ORDER_FIELD_22) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, glyph_index->cbData); if (Stream_GetRemainingLength(s) < glyph_index->cbData) return FALSE; CopyMemory(glyph_index->data, Stream_Pointer(s), glyph_index->cbData); Stream_Seek(s, glyph_index->cbData); } return TRUE; } int update_approximate_glyph_index_order(ORDER_INFO* orderInfo, const GLYPH_INDEX_ORDER* glyph_index) { return 64; } BOOL update_write_glyph_index_order(wStream* s, ORDER_INFO* orderInfo, GLYPH_INDEX_ORDER* glyph_index) { int inf = update_approximate_glyph_index_order(orderInfo, glyph_index); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT8(s, glyph_index->cacheId); orderInfo->fieldFlags |= ORDER_FIELD_02; Stream_Write_UINT8(s, glyph_index->flAccel); orderInfo->fieldFlags |= ORDER_FIELD_03; Stream_Write_UINT8(s, glyph_index->ulCharInc); orderInfo->fieldFlags |= ORDER_FIELD_04; Stream_Write_UINT8(s, glyph_index->fOpRedundant); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_color(s, glyph_index->backColor); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, glyph_index->foreColor); orderInfo->fieldFlags |= ORDER_FIELD_07; Stream_Write_UINT16(s, glyph_index->bkLeft); orderInfo->fieldFlags |= ORDER_FIELD_08; Stream_Write_UINT16(s, glyph_index->bkTop); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT16(s, glyph_index->bkRight); orderInfo->fieldFlags |= ORDER_FIELD_10; Stream_Write_UINT16(s, glyph_index->bkBottom); orderInfo->fieldFlags |= ORDER_FIELD_11; Stream_Write_UINT16(s, glyph_index->opLeft); orderInfo->fieldFlags |= ORDER_FIELD_12; Stream_Write_UINT16(s, glyph_index->opTop); orderInfo->fieldFlags |= ORDER_FIELD_13; Stream_Write_UINT16(s, glyph_index->opRight); orderInfo->fieldFlags |= ORDER_FIELD_14; Stream_Write_UINT16(s, glyph_index->opBottom); orderInfo->fieldFlags |= ORDER_FIELD_15; orderInfo->fieldFlags |= ORDER_FIELD_16; orderInfo->fieldFlags |= ORDER_FIELD_17; orderInfo->fieldFlags |= ORDER_FIELD_18; orderInfo->fieldFlags |= ORDER_FIELD_19; update_write_brush(s, &glyph_index->brush, orderInfo->fieldFlags >> 14); orderInfo->fieldFlags |= ORDER_FIELD_20; Stream_Write_UINT16(s, glyph_index->x); orderInfo->fieldFlags |= ORDER_FIELD_21; Stream_Write_UINT16(s, glyph_index->y); orderInfo->fieldFlags |= ORDER_FIELD_22; Stream_Write_UINT8(s, glyph_index->cbData); Stream_Write(s, glyph_index->data, glyph_index->cbData); return TRUE; } static BOOL update_read_fast_index_order(wStream* s, const ORDER_INFO* orderInfo, FAST_INDEX_ORDER* fast_index) { ORDER_FIELD_BYTE(1, fast_index->cacheId); ORDER_FIELD_2BYTE(2, fast_index->ulCharInc, fast_index->flAccel); ORDER_FIELD_COLOR(orderInfo, s, 3, &fast_index->backColor); ORDER_FIELD_COLOR(orderInfo, s, 4, &fast_index->foreColor); ORDER_FIELD_COORD(5, fast_index->bkLeft); ORDER_FIELD_COORD(6, fast_index->bkTop); ORDER_FIELD_COORD(7, fast_index->bkRight); ORDER_FIELD_COORD(8, fast_index->bkBottom); ORDER_FIELD_COORD(9, fast_index->opLeft); ORDER_FIELD_COORD(10, fast_index->opTop); ORDER_FIELD_COORD(11, fast_index->opRight); ORDER_FIELD_COORD(12, fast_index->opBottom); ORDER_FIELD_COORD(13, fast_index->x); ORDER_FIELD_COORD(14, fast_index->y); if (orderInfo->fieldFlags & ORDER_FIELD_15) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, fast_index->cbData); if (Stream_GetRemainingLength(s) < fast_index->cbData) return FALSE; CopyMemory(fast_index->data, Stream_Pointer(s), fast_index->cbData); Stream_Seek(s, fast_index->cbData); } return TRUE; } static BOOL update_read_fast_glyph_order(wStream* s, const ORDER_INFO* orderInfo, FAST_GLYPH_ORDER* fastGlyph) { GLYPH_DATA_V2* glyph = &fastGlyph->glyphData; ORDER_FIELD_BYTE(1, fastGlyph->cacheId); ORDER_FIELD_2BYTE(2, fastGlyph->ulCharInc, fastGlyph->flAccel); ORDER_FIELD_COLOR(orderInfo, s, 3, &fastGlyph->backColor); ORDER_FIELD_COLOR(orderInfo, s, 4, &fastGlyph->foreColor); ORDER_FIELD_COORD(5, fastGlyph->bkLeft); ORDER_FIELD_COORD(6, fastGlyph->bkTop); ORDER_FIELD_COORD(7, fastGlyph->bkRight); ORDER_FIELD_COORD(8, fastGlyph->bkBottom); ORDER_FIELD_COORD(9, fastGlyph->opLeft); ORDER_FIELD_COORD(10, fastGlyph->opTop); ORDER_FIELD_COORD(11, fastGlyph->opRight); ORDER_FIELD_COORD(12, fastGlyph->opBottom); ORDER_FIELD_COORD(13, fastGlyph->x); ORDER_FIELD_COORD(14, fastGlyph->y); if (orderInfo->fieldFlags & ORDER_FIELD_15) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, fastGlyph->cbData); if (Stream_GetRemainingLength(s) < fastGlyph->cbData) return FALSE; CopyMemory(fastGlyph->data, Stream_Pointer(s), fastGlyph->cbData); if (Stream_GetRemainingLength(s) < fastGlyph->cbData) return FALSE; if (!Stream_SafeSeek(s, 1)) return FALSE; if (fastGlyph->cbData > 1) { UINT32 new_cb; /* parse optional glyph data */ glyph->cacheIndex = fastGlyph->data[0]; if (!update_read_2byte_signed(s, &glyph->x) || !update_read_2byte_signed(s, &glyph->y) || !update_read_2byte_unsigned(s, &glyph->cx) || !update_read_2byte_unsigned(s, &glyph->cy)) return FALSE; glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; new_cb = ((glyph->cx + 7) / 8) * glyph->cy; new_cb += ((new_cb % 4) > 0) ? 4 - (new_cb % 4) : 0; if (fastGlyph->cbData < new_cb) return FALSE; if (new_cb > 0) { BYTE* new_aj; new_aj = (BYTE*)realloc(glyph->aj, new_cb); if (!new_aj) return FALSE; glyph->aj = new_aj; glyph->cb = new_cb; Stream_Read(s, glyph->aj, glyph->cb); } Stream_Seek(s, fastGlyph->cbData - new_cb); } } return TRUE; } static BOOL update_read_polygon_sc_order(wStream* s, const ORDER_INFO* orderInfo, POLYGON_SC_ORDER* polygon_sc) { UINT32 num = polygon_sc->numPoints; ORDER_FIELD_COORD(1, polygon_sc->xStart); ORDER_FIELD_COORD(2, polygon_sc->yStart); ORDER_FIELD_BYTE(3, polygon_sc->bRop2); ORDER_FIELD_BYTE(4, polygon_sc->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 5, &polygon_sc->brushColor); ORDER_FIELD_BYTE(6, num); if (orderInfo->fieldFlags & ORDER_FIELD_07) { DELTA_POINT* newpoints; if (num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, polygon_sc->cbData); newpoints = (DELTA_POINT*)realloc(polygon_sc->points, sizeof(DELTA_POINT) * num); if (!newpoints) return FALSE; polygon_sc->points = newpoints; polygon_sc->numPoints = num; return update_read_delta_points(s, polygon_sc->points, polygon_sc->numPoints, polygon_sc->xStart, polygon_sc->yStart); } return TRUE; } static BOOL update_read_polygon_cb_order(wStream* s, const ORDER_INFO* orderInfo, POLYGON_CB_ORDER* polygon_cb) { UINT32 num = polygon_cb->numPoints; ORDER_FIELD_COORD(1, polygon_cb->xStart); ORDER_FIELD_COORD(2, polygon_cb->yStart); ORDER_FIELD_BYTE(3, polygon_cb->bRop2); ORDER_FIELD_BYTE(4, polygon_cb->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 5, &polygon_cb->backColor); ORDER_FIELD_COLOR(orderInfo, s, 6, &polygon_cb->foreColor); if (!update_read_brush(s, &polygon_cb->brush, orderInfo->fieldFlags >> 6)) return FALSE; ORDER_FIELD_BYTE(12, num); if (orderInfo->fieldFlags & ORDER_FIELD_13) { DELTA_POINT* newpoints; if (num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, polygon_cb->cbData); newpoints = (DELTA_POINT*)realloc(polygon_cb->points, sizeof(DELTA_POINT) * num); if (!newpoints) return FALSE; polygon_cb->points = newpoints; polygon_cb->numPoints = num; if (!update_read_delta_points(s, polygon_cb->points, polygon_cb->numPoints, polygon_cb->xStart, polygon_cb->yStart)) return FALSE; } polygon_cb->backMode = (polygon_cb->bRop2 & 0x80) ? BACKMODE_TRANSPARENT : BACKMODE_OPAQUE; polygon_cb->bRop2 = (polygon_cb->bRop2 & 0x1F); return TRUE; } static BOOL update_read_ellipse_sc_order(wStream* s, const ORDER_INFO* orderInfo, ELLIPSE_SC_ORDER* ellipse_sc) { ORDER_FIELD_COORD(1, ellipse_sc->leftRect); ORDER_FIELD_COORD(2, ellipse_sc->topRect); ORDER_FIELD_COORD(3, ellipse_sc->rightRect); ORDER_FIELD_COORD(4, ellipse_sc->bottomRect); ORDER_FIELD_BYTE(5, ellipse_sc->bRop2); ORDER_FIELD_BYTE(6, ellipse_sc->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 7, &ellipse_sc->color); return TRUE; } static BOOL update_read_ellipse_cb_order(wStream* s, const ORDER_INFO* orderInfo, ELLIPSE_CB_ORDER* ellipse_cb) { ORDER_FIELD_COORD(1, ellipse_cb->leftRect); ORDER_FIELD_COORD(2, ellipse_cb->topRect); ORDER_FIELD_COORD(3, ellipse_cb->rightRect); ORDER_FIELD_COORD(4, ellipse_cb->bottomRect); ORDER_FIELD_BYTE(5, ellipse_cb->bRop2); ORDER_FIELD_BYTE(6, ellipse_cb->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 7, &ellipse_cb->backColor); ORDER_FIELD_COLOR(orderInfo, s, 8, &ellipse_cb->foreColor); return update_read_brush(s, &ellipse_cb->brush, orderInfo->fieldFlags >> 8); } /* Secondary Drawing Orders */ static CACHE_BITMAP_ORDER* update_read_cache_bitmap_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { CACHE_BITMAP_ORDER* cache_bitmap; if (!update || !s) return NULL; cache_bitmap = calloc(1, sizeof(CACHE_BITMAP_ORDER)); if (!cache_bitmap) goto fail; if (Stream_GetRemainingLength(s) < 9) goto fail; Stream_Read_UINT8(s, cache_bitmap->cacheId); /* cacheId (1 byte) */ Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapWidth); /* bitmapWidth (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapHeight); /* bitmapHeight (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ if ((cache_bitmap->bitmapBpp < 1) || (cache_bitmap->bitmapBpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bitmap bpp %" PRIu32 "", cache_bitmap->bitmapBpp); goto fail; } Stream_Read_UINT16(s, cache_bitmap->bitmapLength); /* bitmapLength (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap->cacheIndex); /* cacheIndex (2 bytes) */ if (compressed) { if ((flags & NO_BITMAP_COMPRESSION_HDR) == 0) { BYTE* bitmapComprHdr = (BYTE*)&(cache_bitmap->bitmapComprHdr); if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read(s, bitmapComprHdr, 8); /* bitmapComprHdr (8 bytes) */ cache_bitmap->bitmapLength -= 8; } } if (cache_bitmap->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap->bitmapLength) goto fail; cache_bitmap->bitmapDataStream = malloc(cache_bitmap->bitmapLength); if (!cache_bitmap->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap->bitmapDataStream, cache_bitmap->bitmapLength); cache_bitmap->compressed = compressed; return cache_bitmap; fail: free_cache_bitmap_order(update->context, cache_bitmap); return NULL; } int update_approximate_cache_bitmap_order(const CACHE_BITMAP_ORDER* cache_bitmap, BOOL compressed, UINT16* flags) { return 64 + cache_bitmap->bitmapLength; } BOOL update_write_cache_bitmap_order(wStream* s, const CACHE_BITMAP_ORDER* cache_bitmap, BOOL compressed, UINT16* flags) { UINT32 bitmapLength = cache_bitmap->bitmapLength; int inf = update_approximate_cache_bitmap_order(cache_bitmap, compressed, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; *flags = NO_BITMAP_COMPRESSION_HDR; if ((*flags & NO_BITMAP_COMPRESSION_HDR) == 0) bitmapLength += 8; Stream_Write_UINT8(s, cache_bitmap->cacheId); /* cacheId (1 byte) */ Stream_Write_UINT8(s, 0); /* pad1Octet (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapWidth); /* bitmapWidth (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapHeight); /* bitmapHeight (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ Stream_Write_UINT16(s, bitmapLength); /* bitmapLength (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap->cacheIndex); /* cacheIndex (2 bytes) */ if (compressed) { if ((*flags & NO_BITMAP_COMPRESSION_HDR) == 0) { BYTE* bitmapComprHdr = (BYTE*)&(cache_bitmap->bitmapComprHdr); Stream_Write(s, bitmapComprHdr, 8); /* bitmapComprHdr (8 bytes) */ bitmapLength -= 8; } Stream_Write(s, cache_bitmap->bitmapDataStream, bitmapLength); } else { Stream_Write(s, cache_bitmap->bitmapDataStream, bitmapLength); } return TRUE; } static CACHE_BITMAP_V2_ORDER* update_read_cache_bitmap_v2_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { BYTE bitsPerPixelId; CACHE_BITMAP_V2_ORDER* cache_bitmap_v2; if (!update || !s) return NULL; cache_bitmap_v2 = calloc(1, sizeof(CACHE_BITMAP_V2_ORDER)); if (!cache_bitmap_v2) goto fail; cache_bitmap_v2->cacheId = flags & 0x0003; cache_bitmap_v2->flags = (flags & 0xFF80) >> 7; bitsPerPixelId = (flags & 0x0078) >> 3; cache_bitmap_v2->bitmapBpp = CBR2_BPP[bitsPerPixelId]; if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ goto fail; cache_bitmap_v2->bitmapHeight = cache_bitmap_v2->bitmapWidth; } else { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ goto fail; } if (!update_read_4byte_unsigned(s, &cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->cacheIndex)) /* cacheIndex */ goto fail; if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } } if (cache_bitmap_v2->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap_v2->bitmapLength) goto fail; if (cache_bitmap_v2->bitmapLength == 0) goto fail; cache_bitmap_v2->bitmapDataStream = malloc(cache_bitmap_v2->bitmapLength); if (!cache_bitmap_v2->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); cache_bitmap_v2->compressed = compressed; return cache_bitmap_v2; fail: free_cache_bitmap_v2_order(update->context, cache_bitmap_v2); return NULL; } int update_approximate_cache_bitmap_v2_order(CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { return 64 + cache_bitmap_v2->bitmapLength; } BOOL update_write_cache_bitmap_v2_order(wStream* s, CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { BYTE bitsPerPixelId; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v2_order(cache_bitmap_v2, compressed, flags))) return FALSE; bitsPerPixelId = BPP_CBR2[cache_bitmap_v2->bitmapBpp]; *flags = (cache_bitmap_v2->cacheId & 0x0003) | (bitsPerPixelId << 3) | ((cache_bitmap_v2->flags << 7) & 0xFF80); if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { Stream_Write_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ return FALSE; } else { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ return FALSE; } if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (!update_write_4byte_unsigned(s, cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_write_2byte_unsigned(s, cache_bitmap_v2->cacheIndex)) /* cacheIndex */ return FALSE; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { Stream_Write_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } else { if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } cache_bitmap_v2->compressed = compressed; return TRUE; } static CACHE_BITMAP_V3_ORDER* update_read_cache_bitmap_v3_order(rdpUpdate* update, wStream* s, UINT16 flags) { BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; UINT32 new_len; BYTE* new_data; CACHE_BITMAP_V3_ORDER* cache_bitmap_v3; if (!update || !s) return NULL; cache_bitmap_v3 = calloc(1, sizeof(CACHE_BITMAP_V3_ORDER)); if (!cache_bitmap_v3) goto fail; cache_bitmap_v3->cacheId = flags & 0x00000003; cache_bitmap_v3->flags = (flags & 0x0000FF80) >> 7; bitsPerPixelId = (flags & 0x00000078) >> 3; cache_bitmap_v3->bpp = CBR23_BPP[bitsPerPixelId]; if (Stream_GetRemainingLength(s) < 21) goto fail; Stream_Read_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ bitmapData = &cache_bitmap_v3->bitmapData; Stream_Read_UINT8(s, bitmapData->bpp); if ((bitmapData->bpp < 1) || (bitmapData->bpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bpp value %" PRIu32 "", bitmapData->bpp); goto fail; } Stream_Seek_UINT8(s); /* reserved1 (1 byte) */ Stream_Seek_UINT8(s); /* reserved2 (1 byte) */ Stream_Read_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Read_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Read_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Read_UINT32(s, new_len); /* length (4 bytes) */ if ((new_len == 0) || (Stream_GetRemainingLength(s) < new_len)) goto fail; new_data = (BYTE*)realloc(bitmapData->data, new_len); if (!new_data) goto fail; bitmapData->data = new_data; bitmapData->length = new_len; Stream_Read(s, bitmapData->data, bitmapData->length); return cache_bitmap_v3; fail: free_cache_bitmap_v3_order(update->context, cache_bitmap_v3); return NULL; } int update_approximate_cache_bitmap_v3_order(CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BITMAP_DATA_EX* bitmapData = &cache_bitmap_v3->bitmapData; return 64 + bitmapData->length; } BOOL update_write_cache_bitmap_v3_order(wStream* s, CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v3_order(cache_bitmap_v3, flags))) return FALSE; bitmapData = &cache_bitmap_v3->bitmapData; bitsPerPixelId = BPP_CBR23[cache_bitmap_v3->bpp]; *flags = (cache_bitmap_v3->cacheId & 0x00000003) | ((cache_bitmap_v3->flags << 7) & 0x0000FF80) | ((bitsPerPixelId << 3) & 0x00000078); Stream_Write_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ Stream_Write_UINT8(s, bitmapData->bpp); Stream_Write_UINT8(s, 0); /* reserved1 (1 byte) */ Stream_Write_UINT8(s, 0); /* reserved2 (1 byte) */ Stream_Write_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Write_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Write_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Write_UINT32(s, bitmapData->length); /* length (4 bytes) */ Stream_Write(s, bitmapData->data, bitmapData->length); return TRUE; } static CACHE_COLOR_TABLE_ORDER* update_read_cache_color_table_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; UINT32* colorTable; CACHE_COLOR_TABLE_ORDER* cache_color_table = calloc(1, sizeof(CACHE_COLOR_TABLE_ORDER)); if (!cache_color_table) goto fail; if (Stream_GetRemainingLength(s) < 3) goto fail; Stream_Read_UINT8(s, cache_color_table->cacheIndex); /* cacheIndex (1 byte) */ Stream_Read_UINT16(s, cache_color_table->numberColors); /* numberColors (2 bytes) */ if (cache_color_table->numberColors != 256) { /* This field MUST be set to 256 */ goto fail; } if (Stream_GetRemainingLength(s) < cache_color_table->numberColors * 4) goto fail; colorTable = (UINT32*)&cache_color_table->colorTable; for (i = 0; i < (int)cache_color_table->numberColors; i++) update_read_color_quad(s, &colorTable[i]); return cache_color_table; fail: free_cache_color_table_order(update->context, cache_color_table); return NULL; } int update_approximate_cache_color_table_order(const CACHE_COLOR_TABLE_ORDER* cache_color_table, UINT16* flags) { return 16 + (256 * 4); } BOOL update_write_cache_color_table_order(wStream* s, const CACHE_COLOR_TABLE_ORDER* cache_color_table, UINT16* flags) { int i, inf; UINT32* colorTable; if (cache_color_table->numberColors != 256) return FALSE; inf = update_approximate_cache_color_table_order(cache_color_table, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT8(s, cache_color_table->cacheIndex); /* cacheIndex (1 byte) */ Stream_Write_UINT16(s, cache_color_table->numberColors); /* numberColors (2 bytes) */ colorTable = (UINT32*)&cache_color_table->colorTable; for (i = 0; i < (int)cache_color_table->numberColors; i++) { update_write_color_quad(s, colorTable[i]); } return TRUE; } static CACHE_GLYPH_ORDER* update_read_cache_glyph_order(rdpUpdate* update, wStream* s, UINT16 flags) { UINT32 i; CACHE_GLYPH_ORDER* cache_glyph_order = calloc(1, sizeof(CACHE_GLYPH_ORDER)); if (!cache_glyph_order || !update || !s) goto fail; if (Stream_GetRemainingLength(s) < 2) goto fail; Stream_Read_UINT8(s, cache_glyph_order->cacheId); /* cacheId (1 byte) */ Stream_Read_UINT8(s, cache_glyph_order->cGlyphs); /* cGlyphs (1 byte) */ for (i = 0; i < cache_glyph_order->cGlyphs; i++) { GLYPH_DATA* glyph = &cache_glyph_order->glyphData[i]; if (Stream_GetRemainingLength(s) < 10) goto fail; Stream_Read_UINT16(s, glyph->cacheIndex); Stream_Read_INT16(s, glyph->x); Stream_Read_INT16(s, glyph->y); Stream_Read_UINT16(s, glyph->cx); Stream_Read_UINT16(s, glyph->cy); glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; if (Stream_GetRemainingLength(s) < glyph->cb) goto fail; glyph->aj = (BYTE*)malloc(glyph->cb); if (!glyph->aj) goto fail; Stream_Read(s, glyph->aj, glyph->cb); } if ((flags & CG_GLYPH_UNICODE_PRESENT) && (cache_glyph_order->cGlyphs > 0)) { cache_glyph_order->unicodeCharacters = calloc(cache_glyph_order->cGlyphs, sizeof(WCHAR)); if (!cache_glyph_order->unicodeCharacters) goto fail; if (Stream_GetRemainingLength(s) < sizeof(WCHAR) * cache_glyph_order->cGlyphs) goto fail; Stream_Read_UTF16_String(s, cache_glyph_order->unicodeCharacters, cache_glyph_order->cGlyphs); } return cache_glyph_order; fail: free_cache_glyph_order(update->context, cache_glyph_order); return NULL; } int update_approximate_cache_glyph_order(const CACHE_GLYPH_ORDER* cache_glyph, UINT16* flags) { return 2 + cache_glyph->cGlyphs * 32; } BOOL update_write_cache_glyph_order(wStream* s, const CACHE_GLYPH_ORDER* cache_glyph, UINT16* flags) { int i, inf; INT16 lsi16; const GLYPH_DATA* glyph; inf = update_approximate_cache_glyph_order(cache_glyph, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT8(s, cache_glyph->cacheId); /* cacheId (1 byte) */ Stream_Write_UINT8(s, cache_glyph->cGlyphs); /* cGlyphs (1 byte) */ for (i = 0; i < (int)cache_glyph->cGlyphs; i++) { UINT32 cb; glyph = &cache_glyph->glyphData[i]; Stream_Write_UINT16(s, glyph->cacheIndex); /* cacheIndex (2 bytes) */ lsi16 = glyph->x; Stream_Write_UINT16(s, lsi16); /* x (2 bytes) */ lsi16 = glyph->y; Stream_Write_UINT16(s, lsi16); /* y (2 bytes) */ Stream_Write_UINT16(s, glyph->cx); /* cx (2 bytes) */ Stream_Write_UINT16(s, glyph->cy); /* cy (2 bytes) */ cb = ((glyph->cx + 7) / 8) * glyph->cy; cb += ((cb % 4) > 0) ? 4 - (cb % 4) : 0; Stream_Write(s, glyph->aj, cb); } if (*flags & CG_GLYPH_UNICODE_PRESENT) { Stream_Zero(s, cache_glyph->cGlyphs * 2); } return TRUE; } static CACHE_GLYPH_V2_ORDER* update_read_cache_glyph_v2_order(rdpUpdate* update, wStream* s, UINT16 flags) { UINT32 i; CACHE_GLYPH_V2_ORDER* cache_glyph_v2 = calloc(1, sizeof(CACHE_GLYPH_V2_ORDER)); if (!cache_glyph_v2) goto fail; cache_glyph_v2->cacheId = (flags & 0x000F); cache_glyph_v2->flags = (flags & 0x00F0) >> 4; cache_glyph_v2->cGlyphs = (flags & 0xFF00) >> 8; for (i = 0; i < cache_glyph_v2->cGlyphs; i++) { GLYPH_DATA_V2* glyph = &cache_glyph_v2->glyphData[i]; if (Stream_GetRemainingLength(s) < 1) goto fail; Stream_Read_UINT8(s, glyph->cacheIndex); if (!update_read_2byte_signed(s, &glyph->x) || !update_read_2byte_signed(s, &glyph->y) || !update_read_2byte_unsigned(s, &glyph->cx) || !update_read_2byte_unsigned(s, &glyph->cy)) { goto fail; } glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; if (Stream_GetRemainingLength(s) < glyph->cb) goto fail; glyph->aj = (BYTE*)malloc(glyph->cb); if (!glyph->aj) goto fail; Stream_Read(s, glyph->aj, glyph->cb); } if ((flags & CG_GLYPH_UNICODE_PRESENT) && (cache_glyph_v2->cGlyphs > 0)) { cache_glyph_v2->unicodeCharacters = calloc(cache_glyph_v2->cGlyphs, sizeof(WCHAR)); if (!cache_glyph_v2->unicodeCharacters) goto fail; if (Stream_GetRemainingLength(s) < sizeof(WCHAR) * cache_glyph_v2->cGlyphs) goto fail; Stream_Read_UTF16_String(s, cache_glyph_v2->unicodeCharacters, cache_glyph_v2->cGlyphs); } return cache_glyph_v2; fail: free_cache_glyph_v2_order(update->context, cache_glyph_v2); return NULL; } int update_approximate_cache_glyph_v2_order(const CACHE_GLYPH_V2_ORDER* cache_glyph_v2, UINT16* flags) { return 8 + cache_glyph_v2->cGlyphs * 32; } BOOL update_write_cache_glyph_v2_order(wStream* s, const CACHE_GLYPH_V2_ORDER* cache_glyph_v2, UINT16* flags) { UINT32 i, inf; inf = update_approximate_cache_glyph_v2_order(cache_glyph_v2, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; *flags = (cache_glyph_v2->cacheId & 0x000F) | ((cache_glyph_v2->flags & 0x000F) << 4) | ((cache_glyph_v2->cGlyphs & 0x00FF) << 8); for (i = 0; i < cache_glyph_v2->cGlyphs; i++) { UINT32 cb; const GLYPH_DATA_V2* glyph = &cache_glyph_v2->glyphData[i]; Stream_Write_UINT8(s, glyph->cacheIndex); if (!update_write_2byte_signed(s, glyph->x) || !update_write_2byte_signed(s, glyph->y) || !update_write_2byte_unsigned(s, glyph->cx) || !update_write_2byte_unsigned(s, glyph->cy)) { return FALSE; } cb = ((glyph->cx + 7) / 8) * glyph->cy; cb += ((cb % 4) > 0) ? 4 - (cb % 4) : 0; Stream_Write(s, glyph->aj, cb); } if (*flags & CG_GLYPH_UNICODE_PRESENT) { Stream_Zero(s, cache_glyph_v2->cGlyphs * 2); } return TRUE; } static BOOL update_decompress_brush(wStream* s, BYTE* output, size_t outSize, BYTE bpp) { INT32 x, y, k; BYTE byte = 0; const BYTE* palette = Stream_Pointer(s) + 16; const INT32 bytesPerPixel = ((bpp + 1) / 8); if (!Stream_SafeSeek(s, 16ULL + 7ULL * bytesPerPixel)) // 64 / 4 return FALSE; for (y = 7; y >= 0; y--) { for (x = 0; x < 8; x++) { UINT32 index; if ((x % 4) == 0) Stream_Read_UINT8(s, byte); index = ((byte >> ((3 - (x % 4)) * 2)) & 0x03); for (k = 0; k < bytesPerPixel; k++) { const size_t dstIndex = ((y * 8 + x) * bytesPerPixel) + k; const size_t srcIndex = (index * bytesPerPixel) + k; if (dstIndex >= outSize) return FALSE; output[dstIndex] = palette[srcIndex]; } } } return TRUE; } static BOOL update_compress_brush(wStream* s, const BYTE* input, BYTE bpp) { return FALSE; } static CACHE_BRUSH_ORDER* update_read_cache_brush_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; BYTE iBitmapFormat; BOOL compressed = FALSE; CACHE_BRUSH_ORDER* cache_brush = calloc(1, sizeof(CACHE_BRUSH_ORDER)); if (!cache_brush) goto fail; if (Stream_GetRemainingLength(s) < 6) goto fail; Stream_Read_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Read_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ if (iBitmapFormat >= ARRAYSIZE(BMF_BPP)) goto fail; cache_brush->bpp = BMF_BPP[iBitmapFormat]; Stream_Read_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Read_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Read_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Read_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_Print(update->log, WLOG_ERROR, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); goto fail; } /* rows are encoded in reverse order */ if (Stream_GetRemainingLength(s) < 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_decompress_brush(s, cache_brush->data, sizeof(cache_brush->data), cache_brush->bpp)) goto fail; } else { /* uncompressed brush */ UINT32 scanline = (cache_brush->bpp / 8) * 8; if (Stream_GetRemainingLength(s) < scanline * 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read(s, &cache_brush->data[i * scanline], scanline); } } } } return cache_brush; fail: free_cache_brush_order(update->context, cache_brush); return NULL; } int update_approximate_cache_brush_order(const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { return 64; } BOOL update_write_cache_brush_order(wStream* s, const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { int i; BYTE iBitmapFormat; BOOL compressed = FALSE; if (!Stream_EnsureRemainingCapacity(s, update_approximate_cache_brush_order(cache_brush, flags))) return FALSE; iBitmapFormat = BPP_BMF[cache_brush->bpp]; Stream_Write_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Write_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ Stream_Write_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Write_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Write_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Write_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_ERR(TAG, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); return FALSE; } for (i = 7; i >= 0; i--) { Stream_Write_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_compress_brush(s, cache_brush->data, cache_brush->bpp)) return FALSE; } else { /* uncompressed brush */ int scanline = (cache_brush->bpp / 8) * 8; for (i = 7; i >= 0; i--) { Stream_Write(s, &cache_brush->data[i * scanline], scanline); } } } } return TRUE; } /* Alternate Secondary Drawing Orders */ static BOOL update_read_create_offscreen_bitmap_order(wStream* s, CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { UINT16 flags; BOOL deleteListPresent; OFFSCREEN_DELETE_LIST* deleteList; if (Stream_GetRemainingLength(s) < 6) return FALSE; Stream_Read_UINT16(s, flags); /* flags (2 bytes) */ create_offscreen_bitmap->id = flags & 0x7FFF; deleteListPresent = (flags & 0x8000) ? TRUE : FALSE; Stream_Read_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */ Stream_Read_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */ deleteList = &(create_offscreen_bitmap->deleteList); if (deleteListPresent) { UINT32 i; if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, deleteList->cIndices); if (deleteList->cIndices > deleteList->sIndices) { UINT16* new_indices; new_indices = (UINT16*)realloc(deleteList->indices, deleteList->cIndices * 2); if (!new_indices) return FALSE; deleteList->sIndices = deleteList->cIndices; deleteList->indices = new_indices; } if (Stream_GetRemainingLength(s) < 2 * deleteList->cIndices) return FALSE; for (i = 0; i < deleteList->cIndices; i++) { Stream_Read_UINT16(s, deleteList->indices[i]); } } else { deleteList->cIndices = 0; } return TRUE; } int update_approximate_create_offscreen_bitmap_order( const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { const OFFSCREEN_DELETE_LIST* deleteList = &(create_offscreen_bitmap->deleteList); return 32 + deleteList->cIndices * 2; } BOOL update_write_create_offscreen_bitmap_order( wStream* s, const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { UINT16 flags; BOOL deleteListPresent; const OFFSCREEN_DELETE_LIST* deleteList; if (!Stream_EnsureRemainingCapacity( s, update_approximate_create_offscreen_bitmap_order(create_offscreen_bitmap))) return FALSE; deleteList = &(create_offscreen_bitmap->deleteList); flags = create_offscreen_bitmap->id & 0x7FFF; deleteListPresent = (deleteList->cIndices > 0) ? TRUE : FALSE; if (deleteListPresent) flags |= 0x8000; Stream_Write_UINT16(s, flags); /* flags (2 bytes) */ Stream_Write_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */ Stream_Write_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */ if (deleteListPresent) { int i; Stream_Write_UINT16(s, deleteList->cIndices); for (i = 0; i < (int)deleteList->cIndices; i++) { Stream_Write_UINT16(s, deleteList->indices[i]); } } return TRUE; } static BOOL update_read_switch_surface_order(wStream* s, SWITCH_SURFACE_ORDER* switch_surface) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, switch_surface->bitmapId); /* bitmapId (2 bytes) */ return TRUE; } int update_approximate_switch_surface_order(const SWITCH_SURFACE_ORDER* switch_surface) { return 2; } BOOL update_write_switch_surface_order(wStream* s, const SWITCH_SURFACE_ORDER* switch_surface) { int inf = update_approximate_switch_surface_order(switch_surface); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT16(s, switch_surface->bitmapId); /* bitmapId (2 bytes) */ return TRUE; } static BOOL update_read_create_nine_grid_bitmap_order(wStream* s, CREATE_NINE_GRID_BITMAP_ORDER* create_nine_grid_bitmap) { NINE_GRID_BITMAP_INFO* nineGridInfo; if (Stream_GetRemainingLength(s) < 19) return FALSE; Stream_Read_UINT8(s, create_nine_grid_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ if ((create_nine_grid_bitmap->bitmapBpp < 1) || (create_nine_grid_bitmap->bitmapBpp > 32)) { WLog_ERR(TAG, "invalid bpp value %" PRIu32 "", create_nine_grid_bitmap->bitmapBpp); return FALSE; } Stream_Read_UINT16(s, create_nine_grid_bitmap->bitmapId); /* bitmapId (2 bytes) */ nineGridInfo = &(create_nine_grid_bitmap->nineGridInfo); Stream_Read_UINT32(s, nineGridInfo->flFlags); /* flFlags (4 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulLeftWidth); /* ulLeftWidth (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulRightWidth); /* ulRightWidth (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulTopHeight); /* ulTopHeight (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulBottomHeight); /* ulBottomHeight (2 bytes) */ update_read_colorref(s, &nineGridInfo->crTransparent); /* crTransparent (4 bytes) */ return TRUE; } static BOOL update_read_frame_marker_order(wStream* s, FRAME_MARKER_ORDER* frame_marker) { if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT32(s, frame_marker->action); /* action (4 bytes) */ return TRUE; } static BOOL update_read_stream_bitmap_first_order(wStream* s, STREAM_BITMAP_FIRST_ORDER* stream_bitmap_first) { if (Stream_GetRemainingLength(s) < 10) // 8 + 2 at least return FALSE; Stream_Read_UINT8(s, stream_bitmap_first->bitmapFlags); /* bitmapFlags (1 byte) */ Stream_Read_UINT8(s, stream_bitmap_first->bitmapBpp); /* bitmapBpp (1 byte) */ if ((stream_bitmap_first->bitmapBpp < 1) || (stream_bitmap_first->bitmapBpp > 32)) { WLog_ERR(TAG, "invalid bpp value %" PRIu32 "", stream_bitmap_first->bitmapBpp); return FALSE; } Stream_Read_UINT16(s, stream_bitmap_first->bitmapType); /* bitmapType (2 bytes) */ Stream_Read_UINT16(s, stream_bitmap_first->bitmapWidth); /* bitmapWidth (2 bytes) */ Stream_Read_UINT16(s, stream_bitmap_first->bitmapHeight); /* bitmapHeigth (2 bytes) */ if (stream_bitmap_first->bitmapFlags & STREAM_BITMAP_V2) { if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT32(s, stream_bitmap_first->bitmapSize); /* bitmapSize (4 bytes) */ } else { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, stream_bitmap_first->bitmapSize); /* bitmapSize (2 bytes) */ } FIELD_SKIP_BUFFER16( s, stream_bitmap_first->bitmapBlockSize); /* bitmapBlockSize(2 bytes) + bitmapBlock */ return TRUE; } static BOOL update_read_stream_bitmap_next_order(wStream* s, STREAM_BITMAP_NEXT_ORDER* stream_bitmap_next) { if (Stream_GetRemainingLength(s) < 5) return FALSE; Stream_Read_UINT8(s, stream_bitmap_next->bitmapFlags); /* bitmapFlags (1 byte) */ Stream_Read_UINT16(s, stream_bitmap_next->bitmapType); /* bitmapType (2 bytes) */ FIELD_SKIP_BUFFER16( s, stream_bitmap_next->bitmapBlockSize); /* bitmapBlockSize(2 bytes) + bitmapBlock */ return TRUE; } static BOOL update_read_draw_gdiplus_first_order(wStream* s, DRAW_GDIPLUS_FIRST_ORDER* draw_gdiplus_first) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_first->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_first->cbTotalSize); /* cbTotalSize (4 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_first->cbTotalEmfSize); /* cbTotalEmfSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_first->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_next_order(wStream* s, DRAW_GDIPLUS_NEXT_ORDER* draw_gdiplus_next) { if (Stream_GetRemainingLength(s) < 3) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ FIELD_SKIP_BUFFER16(s, draw_gdiplus_next->cbSize); /* cbSize(2 bytes) + emfRecords */ return TRUE; } static BOOL update_read_draw_gdiplus_end_order(wStream* s, DRAW_GDIPLUS_END_ORDER* draw_gdiplus_end) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_end->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_end->cbTotalSize); /* cbTotalSize (4 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_end->cbTotalEmfSize); /* cbTotalEmfSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_end->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_cache_first_order(wStream* s, DRAW_GDIPLUS_CACHE_FIRST_ORDER* draw_gdiplus_cache_first) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_first->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_cache_first->cbTotalSize); /* cbTotalSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_cache_first->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_cache_next_order(wStream* s, DRAW_GDIPLUS_CACHE_NEXT_ORDER* draw_gdiplus_cache_next) { if (Stream_GetRemainingLength(s) < 7) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_next->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_next->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_next->cacheIndex); /* cacheIndex (2 bytes) */ FIELD_SKIP_BUFFER16(s, draw_gdiplus_cache_next->cbSize); /* cbSize(2 bytes) + emfRecords */ return TRUE; } static BOOL update_read_draw_gdiplus_cache_end_order(wStream* s, DRAW_GDIPLUS_CACHE_END_ORDER* draw_gdiplus_cache_end) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_end->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_cache_end->cbTotalSize); /* cbTotalSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_cache_end->cbSize); /* emfRecords */ } static BOOL update_read_field_flags(wStream* s, UINT32* fieldFlags, BYTE flags, BYTE fieldBytes) { int i; BYTE byte; if (flags & ORDER_ZERO_FIELD_BYTE_BIT0) fieldBytes--; if (flags & ORDER_ZERO_FIELD_BYTE_BIT1) { if (fieldBytes > 1) fieldBytes -= 2; else fieldBytes = 0; } if (Stream_GetRemainingLength(s) < fieldBytes) return FALSE; *fieldFlags = 0; for (i = 0; i < fieldBytes; i++) { Stream_Read_UINT8(s, byte); *fieldFlags |= byte << (i * 8); } return TRUE; } BOOL update_write_field_flags(wStream* s, UINT32 fieldFlags, BYTE flags, BYTE fieldBytes) { BYTE byte; if (fieldBytes == 1) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); } else if (fieldBytes == 2) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 8) & 0xFF; Stream_Write_UINT8(s, byte); } else if (fieldBytes == 3) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 16) & 0xFF; Stream_Write_UINT8(s, byte); } else { return FALSE; } return TRUE; } static BOOL update_read_bounds(wStream* s, rdpBounds* bounds) { BYTE flags; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, flags); /* field flags */ if (flags & BOUND_LEFT) { if (!update_read_coord(s, &bounds->left, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_LEFT) { if (!update_read_coord(s, &bounds->left, TRUE)) return FALSE; } if (flags & BOUND_TOP) { if (!update_read_coord(s, &bounds->top, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_TOP) { if (!update_read_coord(s, &bounds->top, TRUE)) return FALSE; } if (flags & BOUND_RIGHT) { if (!update_read_coord(s, &bounds->right, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_RIGHT) { if (!update_read_coord(s, &bounds->right, TRUE)) return FALSE; } if (flags & BOUND_BOTTOM) { if (!update_read_coord(s, &bounds->bottom, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_BOTTOM) { if (!update_read_coord(s, &bounds->bottom, TRUE)) return FALSE; } return TRUE; } BOOL update_write_bounds(wStream* s, ORDER_INFO* orderInfo) { if (!(orderInfo->controlFlags & ORDER_BOUNDS)) return TRUE; if (orderInfo->controlFlags & ORDER_ZERO_BOUNDS_DELTAS) return TRUE; Stream_Write_UINT8(s, orderInfo->boundsFlags); /* field flags */ if (orderInfo->boundsFlags & BOUND_LEFT) { if (!update_write_coord(s, orderInfo->bounds.left)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_LEFT) { } if (orderInfo->boundsFlags & BOUND_TOP) { if (!update_write_coord(s, orderInfo->bounds.top)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_TOP) { } if (orderInfo->boundsFlags & BOUND_RIGHT) { if (!update_write_coord(s, orderInfo->bounds.right)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_RIGHT) { } if (orderInfo->boundsFlags & BOUND_BOTTOM) { if (!update_write_coord(s, orderInfo->bounds.bottom)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_BOTTOM) { } return TRUE; } static BOOL read_primary_order(wLog* log, const char* orderName, wStream* s, const ORDER_INFO* orderInfo, rdpPrimaryUpdate* primary) { BOOL rc = FALSE; if (!s || !orderInfo || !primary || !orderName) return FALSE; switch (orderInfo->orderType) { case ORDER_TYPE_DSTBLT: rc = update_read_dstblt_order(s, orderInfo, &(primary->dstblt)); break; case ORDER_TYPE_PATBLT: rc = update_read_patblt_order(s, orderInfo, &(primary->patblt)); break; case ORDER_TYPE_SCRBLT: rc = update_read_scrblt_order(s, orderInfo, &(primary->scrblt)); break; case ORDER_TYPE_OPAQUE_RECT: rc = update_read_opaque_rect_order(s, orderInfo, &(primary->opaque_rect)); break; case ORDER_TYPE_DRAW_NINE_GRID: rc = update_read_draw_nine_grid_order(s, orderInfo, &(primary->draw_nine_grid)); break; case ORDER_TYPE_MULTI_DSTBLT: rc = update_read_multi_dstblt_order(s, orderInfo, &(primary->multi_dstblt)); break; case ORDER_TYPE_MULTI_PATBLT: rc = update_read_multi_patblt_order(s, orderInfo, &(primary->multi_patblt)); break; case ORDER_TYPE_MULTI_SCRBLT: rc = update_read_multi_scrblt_order(s, orderInfo, &(primary->multi_scrblt)); break; case ORDER_TYPE_MULTI_OPAQUE_RECT: rc = update_read_multi_opaque_rect_order(s, orderInfo, &(primary->multi_opaque_rect)); break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: rc = update_read_multi_draw_nine_grid_order(s, orderInfo, &(primary->multi_draw_nine_grid)); break; case ORDER_TYPE_LINE_TO: rc = update_read_line_to_order(s, orderInfo, &(primary->line_to)); break; case ORDER_TYPE_POLYLINE: rc = update_read_polyline_order(s, orderInfo, &(primary->polyline)); break; case ORDER_TYPE_MEMBLT: rc = update_read_memblt_order(s, orderInfo, &(primary->memblt)); break; case ORDER_TYPE_MEM3BLT: rc = update_read_mem3blt_order(s, orderInfo, &(primary->mem3blt)); break; case ORDER_TYPE_SAVE_BITMAP: rc = update_read_save_bitmap_order(s, orderInfo, &(primary->save_bitmap)); break; case ORDER_TYPE_GLYPH_INDEX: rc = update_read_glyph_index_order(s, orderInfo, &(primary->glyph_index)); break; case ORDER_TYPE_FAST_INDEX: rc = update_read_fast_index_order(s, orderInfo, &(primary->fast_index)); break; case ORDER_TYPE_FAST_GLYPH: rc = update_read_fast_glyph_order(s, orderInfo, &(primary->fast_glyph)); break; case ORDER_TYPE_POLYGON_SC: rc = update_read_polygon_sc_order(s, orderInfo, &(primary->polygon_sc)); break; case ORDER_TYPE_POLYGON_CB: rc = update_read_polygon_cb_order(s, orderInfo, &(primary->polygon_cb)); break; case ORDER_TYPE_ELLIPSE_SC: rc = update_read_ellipse_sc_order(s, orderInfo, &(primary->ellipse_sc)); break; case ORDER_TYPE_ELLIPSE_CB: rc = update_read_ellipse_cb_order(s, orderInfo, &(primary->ellipse_cb)); break; default: WLog_Print(log, WLOG_WARN, "Primary Drawing Order %s not supported, ignoring", orderName); rc = TRUE; break; } if (!rc) { WLog_Print(log, WLOG_ERROR, "%s - update_read_dstblt_order() failed", orderName); return FALSE; } return TRUE; } static BOOL update_recv_primary_order(rdpUpdate* update, wStream* s, BYTE flags) { BYTE field; BOOL rc = FALSE; rdpContext* context = update->context; rdpPrimaryUpdate* primary = update->primary; ORDER_INFO* orderInfo = &(primary->order_info); rdpSettings* settings = context->settings; const char* orderName; if (flags & ORDER_TYPE_CHANGE) { if (Stream_GetRemainingLength(s) < 1) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, orderInfo->orderType); /* orderType (1 byte) */ } orderName = primary_order_string(orderInfo->orderType); if (!check_primary_order_supported(update->log, settings, orderInfo->orderType, orderName)) return FALSE; field = get_primary_drawing_order_field_bytes(orderInfo->orderType, &rc); if (!rc) return FALSE; if (!update_read_field_flags(s, &(orderInfo->fieldFlags), flags, field)) { WLog_Print(update->log, WLOG_ERROR, "update_read_field_flags() failed"); return FALSE; } if (flags & ORDER_BOUNDS) { if (!(flags & ORDER_ZERO_BOUNDS_DELTAS)) { if (!update_read_bounds(s, &orderInfo->bounds)) { WLog_Print(update->log, WLOG_ERROR, "update_read_bounds() failed"); return FALSE; } } rc = IFCALLRESULT(FALSE, update->SetBounds, context, &orderInfo->bounds); if (!rc) return FALSE; } orderInfo->deltaCoordinates = (flags & ORDER_DELTA_COORDINATES) ? TRUE : FALSE; if (!read_primary_order(update->log, orderName, s, orderInfo, primary)) return FALSE; switch (orderInfo->orderType) { case ORDER_TYPE_DSTBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->dstblt.bRop), gdi_rop3_code(primary->dstblt.bRop)); rc = IFCALLRESULT(FALSE, primary->DstBlt, context, &primary->dstblt); } break; case ORDER_TYPE_PATBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->patblt.bRop), gdi_rop3_code(primary->patblt.bRop)); rc = IFCALLRESULT(FALSE, primary->PatBlt, context, &primary->patblt); } break; case ORDER_TYPE_SCRBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->scrblt.bRop), gdi_rop3_code(primary->scrblt.bRop)); rc = IFCALLRESULT(FALSE, primary->ScrBlt, context, &primary->scrblt); } break; case ORDER_TYPE_OPAQUE_RECT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->OpaqueRect, context, &primary->opaque_rect); } break; case ORDER_TYPE_DRAW_NINE_GRID: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->DrawNineGrid, context, &primary->draw_nine_grid); } break; case ORDER_TYPE_MULTI_DSTBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_dstblt.bRop), gdi_rop3_code(primary->multi_dstblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiDstBlt, context, &primary->multi_dstblt); } break; case ORDER_TYPE_MULTI_PATBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_patblt.bRop), gdi_rop3_code(primary->multi_patblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiPatBlt, context, &primary->multi_patblt); } break; case ORDER_TYPE_MULTI_SCRBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_scrblt.bRop), gdi_rop3_code(primary->multi_scrblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiScrBlt, context, &primary->multi_scrblt); } break; case ORDER_TYPE_MULTI_OPAQUE_RECT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->MultiOpaqueRect, context, &primary->multi_opaque_rect); } break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->MultiDrawNineGrid, context, &primary->multi_draw_nine_grid); } break; case ORDER_TYPE_LINE_TO: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->LineTo, context, &primary->line_to); } break; case ORDER_TYPE_POLYLINE: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->Polyline, context, &primary->polyline); } break; case ORDER_TYPE_MEMBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->memblt.bRop), gdi_rop3_code(primary->memblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MemBlt, context, &primary->memblt); } break; case ORDER_TYPE_MEM3BLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->mem3blt.bRop), gdi_rop3_code(primary->mem3blt.bRop)); rc = IFCALLRESULT(FALSE, primary->Mem3Blt, context, &primary->mem3blt); } break; case ORDER_TYPE_SAVE_BITMAP: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->SaveBitmap, context, &primary->save_bitmap); } break; case ORDER_TYPE_GLYPH_INDEX: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->GlyphIndex, context, &primary->glyph_index); } break; case ORDER_TYPE_FAST_INDEX: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->FastIndex, context, &primary->fast_index); } break; case ORDER_TYPE_FAST_GLYPH: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->FastGlyph, context, &primary->fast_glyph); } break; case ORDER_TYPE_POLYGON_SC: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->PolygonSC, context, &primary->polygon_sc); } break; case ORDER_TYPE_POLYGON_CB: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->PolygonCB, context, &primary->polygon_cb); } break; case ORDER_TYPE_ELLIPSE_SC: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->EllipseSC, context, &primary->ellipse_sc); } break; case ORDER_TYPE_ELLIPSE_CB: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->EllipseCB, context, &primary->ellipse_cb); } break; default: WLog_Print(update->log, WLOG_WARN, "Primary Drawing Order %s not supported", orderName); break; } if (!rc) { WLog_Print(update->log, WLOG_WARN, "Primary Drawing Order %s failed", orderName); return FALSE; } if (flags & ORDER_BOUNDS) { rc = IFCALLRESULT(FALSE, update->SetBounds, context, NULL); } return rc; } static BOOL update_recv_secondary_order(rdpUpdate* update, wStream* s, BYTE flags) { BOOL rc = FALSE; size_t start, end, diff; BYTE orderType; UINT16 extraFlags; UINT16 orderLength; rdpContext* context = update->context; rdpSettings* settings = context->settings; rdpSecondaryUpdate* secondary = update->secondary; const char* name; if (Stream_GetRemainingLength(s) < 5) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 5"); return FALSE; } Stream_Read_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Read_UINT16(s, extraFlags); /* extraFlags (2 bytes) */ Stream_Read_UINT8(s, orderType); /* orderType (1 byte) */ if (Stream_GetRemainingLength(s) < orderLength + 7U) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) %" PRIuz " < %" PRIu16, Stream_GetRemainingLength(s), orderLength + 7); return FALSE; } start = Stream_GetPosition(s); name = secondary_order_string(orderType); WLog_Print(update->log, WLOG_DEBUG, "Secondary Drawing Order %s", name); if (!check_secondary_order_supported(update->log, settings, orderType, name)) return FALSE; switch (orderType) { case ORDER_TYPE_BITMAP_UNCOMPRESSED: case ORDER_TYPE_CACHE_BITMAP_COMPRESSED: { const BOOL compressed = (orderType == ORDER_TYPE_CACHE_BITMAP_COMPRESSED); CACHE_BITMAP_ORDER* order = update_read_cache_bitmap_order(update, s, compressed, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmap, context, order); free_cache_bitmap_order(context, order); } } break; case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2: case ORDER_TYPE_BITMAP_COMPRESSED_V2: { const BOOL compressed = (orderType == ORDER_TYPE_BITMAP_COMPRESSED_V2); CACHE_BITMAP_V2_ORDER* order = update_read_cache_bitmap_v2_order(update, s, compressed, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV2, context, order); free_cache_bitmap_v2_order(context, order); } } break; case ORDER_TYPE_BITMAP_COMPRESSED_V3: { CACHE_BITMAP_V3_ORDER* order = update_read_cache_bitmap_v3_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV3, context, order); free_cache_bitmap_v3_order(context, order); } } break; case ORDER_TYPE_CACHE_COLOR_TABLE: { CACHE_COLOR_TABLE_ORDER* order = update_read_cache_color_table_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheColorTable, context, order); free_cache_color_table_order(context, order); } } break; case ORDER_TYPE_CACHE_GLYPH: { switch (settings->GlyphSupportLevel) { case GLYPH_SUPPORT_PARTIAL: case GLYPH_SUPPORT_FULL: { CACHE_GLYPH_ORDER* order = update_read_cache_glyph_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheGlyph, context, order); free_cache_glyph_order(context, order); } } break; case GLYPH_SUPPORT_ENCODE: { CACHE_GLYPH_V2_ORDER* order = update_read_cache_glyph_v2_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheGlyphV2, context, order); free_cache_glyph_v2_order(context, order); } } break; case GLYPH_SUPPORT_NONE: default: break; } } break; case ORDER_TYPE_CACHE_BRUSH: /* [MS-RDPEGDI] 2.2.2.2.1.2.7 Cache Brush (CACHE_BRUSH_ORDER) */ { CACHE_BRUSH_ORDER* order = update_read_cache_brush_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBrush, context, order); free_cache_brush_order(context, order); } } break; default: WLog_Print(update->log, WLOG_WARN, "SECONDARY ORDER %s not supported", name); break; } if (!rc) { WLog_Print(update->log, WLOG_ERROR, "SECONDARY ORDER %s failed", name); } start += orderLength + 7; end = Stream_GetPosition(s); if (start > end) { WLog_Print(update->log, WLOG_WARN, "SECONDARY_ORDER %s: read %" PRIuz "bytes too much", name, end - start); return FALSE; } diff = start - end; if (diff > 0) { WLog_Print(update->log, WLOG_DEBUG, "SECONDARY_ORDER %s: read %" PRIuz "bytes short, skipping", name, diff); Stream_Seek(s, diff); } return rc; } static BOOL read_altsec_order(wStream* s, BYTE orderType, rdpAltSecUpdate* altsec) { BOOL rc = FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: rc = update_read_create_offscreen_bitmap_order(s, &(altsec->create_offscreen_bitmap)); break; case ORDER_TYPE_SWITCH_SURFACE: rc = update_read_switch_surface_order(s, &(altsec->switch_surface)); break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: rc = update_read_create_nine_grid_bitmap_order(s, &(altsec->create_nine_grid_bitmap)); break; case ORDER_TYPE_FRAME_MARKER: rc = update_read_frame_marker_order(s, &(altsec->frame_marker)); break; case ORDER_TYPE_STREAM_BITMAP_FIRST: rc = update_read_stream_bitmap_first_order(s, &(altsec->stream_bitmap_first)); break; case ORDER_TYPE_STREAM_BITMAP_NEXT: rc = update_read_stream_bitmap_next_order(s, &(altsec->stream_bitmap_next)); break; case ORDER_TYPE_GDIPLUS_FIRST: rc = update_read_draw_gdiplus_first_order(s, &(altsec->draw_gdiplus_first)); break; case ORDER_TYPE_GDIPLUS_NEXT: rc = update_read_draw_gdiplus_next_order(s, &(altsec->draw_gdiplus_next)); break; case ORDER_TYPE_GDIPLUS_END: rc = update_read_draw_gdiplus_end_order(s, &(altsec->draw_gdiplus_end)); break; case ORDER_TYPE_GDIPLUS_CACHE_FIRST: rc = update_read_draw_gdiplus_cache_first_order(s, &(altsec->draw_gdiplus_cache_first)); break; case ORDER_TYPE_GDIPLUS_CACHE_NEXT: rc = update_read_draw_gdiplus_cache_next_order(s, &(altsec->draw_gdiplus_cache_next)); break; case ORDER_TYPE_GDIPLUS_CACHE_END: rc = update_read_draw_gdiplus_cache_end_order(s, &(altsec->draw_gdiplus_cache_end)); break; case ORDER_TYPE_WINDOW: /* This order is handled elsewhere. */ rc = TRUE; break; case ORDER_TYPE_COMPDESK_FIRST: rc = TRUE; break; default: break; } return rc; } static BOOL update_recv_altsec_order(rdpUpdate* update, wStream* s, BYTE flags) { BYTE orderType = flags >>= 2; /* orderType is in higher 6 bits of flags field */ BOOL rc = FALSE; rdpContext* context = update->context; rdpSettings* settings = context->settings; rdpAltSecUpdate* altsec = update->altsec; const char* orderName = altsec_order_string(orderType); WLog_Print(update->log, WLOG_DEBUG, "Alternate Secondary Drawing Order %s", orderName); if (!check_alt_order_supported(update->log, settings, orderType, orderName)) return FALSE; if (!read_altsec_order(s, orderType, altsec)) return FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: IFCALLRET(altsec->CreateOffscreenBitmap, rc, context, &(altsec->create_offscreen_bitmap)); break; case ORDER_TYPE_SWITCH_SURFACE: IFCALLRET(altsec->SwitchSurface, rc, context, &(altsec->switch_surface)); break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: IFCALLRET(altsec->CreateNineGridBitmap, rc, context, &(altsec->create_nine_grid_bitmap)); break; case ORDER_TYPE_FRAME_MARKER: IFCALLRET(altsec->FrameMarker, rc, context, &(altsec->frame_marker)); break; case ORDER_TYPE_STREAM_BITMAP_FIRST: IFCALLRET(altsec->StreamBitmapFirst, rc, context, &(altsec->stream_bitmap_first)); break; case ORDER_TYPE_STREAM_BITMAP_NEXT: IFCALLRET(altsec->StreamBitmapNext, rc, context, &(altsec->stream_bitmap_next)); break; case ORDER_TYPE_GDIPLUS_FIRST: IFCALLRET(altsec->DrawGdiPlusFirst, rc, context, &(altsec->draw_gdiplus_first)); break; case ORDER_TYPE_GDIPLUS_NEXT: IFCALLRET(altsec->DrawGdiPlusNext, rc, context, &(altsec->draw_gdiplus_next)); break; case ORDER_TYPE_GDIPLUS_END: IFCALLRET(altsec->DrawGdiPlusEnd, rc, context, &(altsec->draw_gdiplus_end)); break; case ORDER_TYPE_GDIPLUS_CACHE_FIRST: IFCALLRET(altsec->DrawGdiPlusCacheFirst, rc, context, &(altsec->draw_gdiplus_cache_first)); break; case ORDER_TYPE_GDIPLUS_CACHE_NEXT: IFCALLRET(altsec->DrawGdiPlusCacheNext, rc, context, &(altsec->draw_gdiplus_cache_next)); break; case ORDER_TYPE_GDIPLUS_CACHE_END: IFCALLRET(altsec->DrawGdiPlusCacheEnd, rc, context, &(altsec->draw_gdiplus_cache_end)); break; case ORDER_TYPE_WINDOW: rc = update_recv_altsec_window_order(update, s); break; case ORDER_TYPE_COMPDESK_FIRST: rc = TRUE; break; default: break; } if (!rc) { WLog_Print(update->log, WLOG_WARN, "Alternate Secondary Drawing Order %s failed", orderName); } return rc; } BOOL update_recv_order(rdpUpdate* update, wStream* s) { BOOL rc; BYTE controlFlags; if (Stream_GetRemainingLength(s) < 1) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, controlFlags); /* controlFlags (1 byte) */ if (!(controlFlags & ORDER_STANDARD)) rc = update_recv_altsec_order(update, s, controlFlags); else if (controlFlags & ORDER_SECONDARY) rc = update_recv_secondary_order(update, s, controlFlags); else rc = update_recv_primary_order(update, s, controlFlags); if (!rc) WLog_Print(update->log, WLOG_ERROR, "order flags %02" PRIx8 " failed", controlFlags); return rc; }
/** * FreeRDP: A Remote Desktop Protocol Implementation * Drawing Orders * * Copyright 2011 Marc-Andre Moreau <marcandre.moreau@gmail.com> * Copyright 2016 Armin Novak <armin.novak@thincast.com> * Copyright 2016 Thincast Technologies GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "window.h" #include <winpr/wtypes.h> #include <winpr/crt.h> #include <freerdp/api.h> #include <freerdp/log.h> #include <freerdp/graphics.h> #include <freerdp/codec/bitmap.h> #include <freerdp/gdi/gdi.h> #include "orders.h" #include "../cache/glyph.h" #include "../cache/bitmap.h" #include "../cache/brush.h" #include "../cache/cache.h" #define TAG FREERDP_TAG("core.orders") BYTE get_primary_drawing_order_field_bytes(UINT32 orderType, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (orderType) { case 0: return DSTBLT_ORDER_FIELD_BYTES; case 1: return PATBLT_ORDER_FIELD_BYTES; case 2: return SCRBLT_ORDER_FIELD_BYTES; case 3: return 0; case 4: return 0; case 5: return 0; case 6: return 0; case 7: return DRAW_NINE_GRID_ORDER_FIELD_BYTES; case 8: return MULTI_DRAW_NINE_GRID_ORDER_FIELD_BYTES; case 9: return LINE_TO_ORDER_FIELD_BYTES; case 10: return OPAQUE_RECT_ORDER_FIELD_BYTES; case 11: return SAVE_BITMAP_ORDER_FIELD_BYTES; case 12: return 0; case 13: return MEMBLT_ORDER_FIELD_BYTES; case 14: return MEM3BLT_ORDER_FIELD_BYTES; case 15: return MULTI_DSTBLT_ORDER_FIELD_BYTES; case 16: return MULTI_PATBLT_ORDER_FIELD_BYTES; case 17: return MULTI_SCRBLT_ORDER_FIELD_BYTES; case 18: return MULTI_OPAQUE_RECT_ORDER_FIELD_BYTES; case 19: return FAST_INDEX_ORDER_FIELD_BYTES; case 20: return POLYGON_SC_ORDER_FIELD_BYTES; case 21: return POLYGON_CB_ORDER_FIELD_BYTES; case 22: return POLYLINE_ORDER_FIELD_BYTES; case 23: return 0; case 24: return FAST_GLYPH_ORDER_FIELD_BYTES; case 25: return ELLIPSE_SC_ORDER_FIELD_BYTES; case 26: return ELLIPSE_CB_ORDER_FIELD_BYTES; case 27: return GLYPH_INDEX_ORDER_FIELD_BYTES; default: if (pValid) *pValid = FALSE; WLog_WARN(TAG, "Invalid orderType 0x%08X received", orderType); return 0; } } static BYTE get_cbr2_bpp(UINT32 bpp, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (bpp) { case 3: return 8; case 4: return 16; case 5: return 24; case 6: return 32; default: WLog_WARN(TAG, "Invalid bpp %" PRIu32, bpp); if (pValid) *pValid = FALSE; return 0; } } static BYTE get_bmf_bpp(UINT32 bmf, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (bmf) { case 1: return 1; case 3: return 8; case 4: return 16; case 5: return 24; case 6: return 32; default: WLog_WARN(TAG, "Invalid bmf %" PRIu32, bmf); if (pValid) *pValid = FALSE; return 0; } } static BYTE get_bpp_bmf(UINT32 bpp, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (bpp) { case 1: return 1; case 8: return 3; case 16: return 4; case 24: return 5; case 32: return 6; default: WLog_WARN(TAG, "Invalid color depth %" PRIu32, bpp); if (pValid) *pValid = FALSE; return 0; } } static BOOL check_order_activated(wLog* log, rdpSettings* settings, const char* orderName, BOOL condition) { if (!condition) { if (settings->AllowUnanouncedOrdersFromServer) { WLog_Print(log, WLOG_WARN, "%s - SERVER BUG: The support for this feature was not announced!", orderName); return TRUE; } else { WLog_Print(log, WLOG_ERROR, "%s - SERVER BUG: The support for this feature was not announced! Use " "/relax-order-checks to ignore", orderName); return FALSE; } } return TRUE; } static BOOL check_alt_order_supported(wLog* log, rdpSettings* settings, BYTE orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: case ORDER_TYPE_SWITCH_SURFACE: condition = settings->OffscreenSupportLevel != 0; break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: condition = settings->DrawNineGridEnabled; break; case ORDER_TYPE_FRAME_MARKER: condition = settings->FrameMarkerCommandEnabled; break; case ORDER_TYPE_GDIPLUS_FIRST: case ORDER_TYPE_GDIPLUS_NEXT: case ORDER_TYPE_GDIPLUS_END: case ORDER_TYPE_GDIPLUS_CACHE_FIRST: case ORDER_TYPE_GDIPLUS_CACHE_NEXT: case ORDER_TYPE_GDIPLUS_CACHE_END: condition = settings->DrawGdiPlusCacheEnabled; break; case ORDER_TYPE_WINDOW: condition = settings->RemoteWndSupportLevel != WINDOW_LEVEL_NOT_SUPPORTED; break; case ORDER_TYPE_STREAM_BITMAP_FIRST: case ORDER_TYPE_STREAM_BITMAP_NEXT: case ORDER_TYPE_COMPDESK_FIRST: condition = TRUE; break; default: WLog_Print(log, WLOG_WARN, "%s - Alternate Secondary Drawing Order UNKNOWN", orderName); condition = FALSE; break; } return check_order_activated(log, settings, orderName, condition); } static BOOL check_secondary_order_supported(wLog* log, rdpSettings* settings, BYTE orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_BITMAP_UNCOMPRESSED: case ORDER_TYPE_CACHE_BITMAP_COMPRESSED: condition = settings->BitmapCacheEnabled; break; case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2: case ORDER_TYPE_BITMAP_COMPRESSED_V2: condition = settings->BitmapCacheEnabled; break; case ORDER_TYPE_BITMAP_COMPRESSED_V3: condition = settings->BitmapCacheV3Enabled; break; case ORDER_TYPE_CACHE_COLOR_TABLE: condition = (settings->OrderSupport[NEG_MEMBLT_INDEX] || settings->OrderSupport[NEG_MEM3BLT_INDEX]); break; case ORDER_TYPE_CACHE_GLYPH: { switch (settings->GlyphSupportLevel) { case GLYPH_SUPPORT_PARTIAL: case GLYPH_SUPPORT_FULL: case GLYPH_SUPPORT_ENCODE: condition = TRUE; break; case GLYPH_SUPPORT_NONE: default: condition = FALSE; break; } } break; case ORDER_TYPE_CACHE_BRUSH: condition = TRUE; break; default: WLog_Print(log, WLOG_WARN, "SECONDARY ORDER %s not supported", orderName); break; } return check_order_activated(log, settings, orderName, condition); } static BOOL check_primary_order_supported(wLog* log, rdpSettings* settings, UINT32 orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_DSTBLT: condition = settings->OrderSupport[NEG_DSTBLT_INDEX]; break; case ORDER_TYPE_SCRBLT: condition = settings->OrderSupport[NEG_SCRBLT_INDEX]; break; case ORDER_TYPE_DRAW_NINE_GRID: condition = settings->OrderSupport[NEG_DRAWNINEGRID_INDEX]; break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: condition = settings->OrderSupport[NEG_MULTI_DRAWNINEGRID_INDEX]; break; case ORDER_TYPE_LINE_TO: condition = settings->OrderSupport[NEG_LINETO_INDEX]; break; /* [MS-RDPEGDI] 2.2.2.2.1.1.2.5 OpaqueRect (OPAQUERECT_ORDER) * suggests that PatBlt and OpaqueRect imply each other. */ case ORDER_TYPE_PATBLT: case ORDER_TYPE_OPAQUE_RECT: condition = settings->OrderSupport[NEG_OPAQUE_RECT_INDEX] || settings->OrderSupport[NEG_PATBLT_INDEX]; break; case ORDER_TYPE_SAVE_BITMAP: condition = settings->OrderSupport[NEG_SAVEBITMAP_INDEX]; break; case ORDER_TYPE_MEMBLT: condition = settings->OrderSupport[NEG_MEMBLT_INDEX]; break; case ORDER_TYPE_MEM3BLT: condition = settings->OrderSupport[NEG_MEM3BLT_INDEX]; break; case ORDER_TYPE_MULTI_DSTBLT: condition = settings->OrderSupport[NEG_MULTIDSTBLT_INDEX]; break; case ORDER_TYPE_MULTI_PATBLT: condition = settings->OrderSupport[NEG_MULTIPATBLT_INDEX]; break; case ORDER_TYPE_MULTI_SCRBLT: condition = settings->OrderSupport[NEG_MULTIDSTBLT_INDEX]; break; case ORDER_TYPE_MULTI_OPAQUE_RECT: condition = settings->OrderSupport[NEG_MULTIOPAQUERECT_INDEX]; break; case ORDER_TYPE_FAST_INDEX: condition = settings->OrderSupport[NEG_FAST_INDEX_INDEX]; break; case ORDER_TYPE_POLYGON_SC: condition = settings->OrderSupport[NEG_POLYGON_SC_INDEX]; break; case ORDER_TYPE_POLYGON_CB: condition = settings->OrderSupport[NEG_POLYGON_CB_INDEX]; break; case ORDER_TYPE_POLYLINE: condition = settings->OrderSupport[NEG_POLYLINE_INDEX]; break; case ORDER_TYPE_FAST_GLYPH: condition = settings->OrderSupport[NEG_FAST_GLYPH_INDEX]; break; case ORDER_TYPE_ELLIPSE_SC: condition = settings->OrderSupport[NEG_ELLIPSE_SC_INDEX]; break; case ORDER_TYPE_ELLIPSE_CB: condition = settings->OrderSupport[NEG_ELLIPSE_CB_INDEX]; break; case ORDER_TYPE_GLYPH_INDEX: condition = settings->OrderSupport[NEG_GLYPH_INDEX_INDEX]; break; default: WLog_Print(log, WLOG_WARN, "%s Primary Drawing Order not supported", orderName); break; } return check_order_activated(log, settings, orderName, condition); } static const char* primary_order_string(UINT32 orderType) { const char* orders[] = { "[0x%02" PRIx8 "] DstBlt", "[0x%02" PRIx8 "] PatBlt", "[0x%02" PRIx8 "] ScrBlt", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] DrawNineGrid", "[0x%02" PRIx8 "] MultiDrawNineGrid", "[0x%02" PRIx8 "] LineTo", "[0x%02" PRIx8 "] OpaqueRect", "[0x%02" PRIx8 "] SaveBitmap", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] MemBlt", "[0x%02" PRIx8 "] Mem3Blt", "[0x%02" PRIx8 "] MultiDstBlt", "[0x%02" PRIx8 "] MultiPatBlt", "[0x%02" PRIx8 "] MultiScrBlt", "[0x%02" PRIx8 "] MultiOpaqueRect", "[0x%02" PRIx8 "] FastIndex", "[0x%02" PRIx8 "] PolygonSC", "[0x%02" PRIx8 "] PolygonCB", "[0x%02" PRIx8 "] Polyline", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] FastGlyph", "[0x%02" PRIx8 "] EllipseSC", "[0x%02" PRIx8 "] EllipseCB", "[0x%02" PRIx8 "] GlyphIndex" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static const char* secondary_order_string(UINT32 orderType) { const char* orders[] = { "[0x%02" PRIx8 "] Cache Bitmap", "[0x%02" PRIx8 "] Cache Color Table", "[0x%02" PRIx8 "] Cache Bitmap (Compressed)", "[0x%02" PRIx8 "] Cache Glyph", "[0x%02" PRIx8 "] Cache Bitmap V2", "[0x%02" PRIx8 "] Cache Bitmap V2 (Compressed)", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] Cache Brush", "[0x%02" PRIx8 "] Cache Bitmap V3" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static const char* altsec_order_string(BYTE orderType) { const char* orders[] = { "[0x%02" PRIx8 "] Switch Surface", "[0x%02" PRIx8 "] Create Offscreen Bitmap", "[0x%02" PRIx8 "] Stream Bitmap First", "[0x%02" PRIx8 "] Stream Bitmap Next", "[0x%02" PRIx8 "] Create NineGrid Bitmap", "[0x%02" PRIx8 "] Draw GDI+ First", "[0x%02" PRIx8 "] Draw GDI+ Next", "[0x%02" PRIx8 "] Draw GDI+ End", "[0x%02" PRIx8 "] Draw GDI+ Cache First", "[0x%02" PRIx8 "] Draw GDI+ Cache Next", "[0x%02" PRIx8 "] Draw GDI+ Cache End", "[0x%02" PRIx8 "] Windowing", "[0x%02" PRIx8 "] Desktop Composition", "[0x%02" PRIx8 "] Frame Marker" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static INLINE BOOL update_read_coord(wStream* s, INT32* coord, BOOL delta) { INT8 lsi8; INT16 lsi16; if (delta) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_INT8(s, lsi8); *coord += lsi8; } else { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_INT16(s, lsi16); *coord = lsi16; } return TRUE; } static INLINE BOOL update_write_coord(wStream* s, INT32 coord) { Stream_Write_UINT16(s, coord); return TRUE; } static INLINE BOOL update_read_color(wStream* s, UINT32* color) { BYTE byte; if (Stream_GetRemainingLength(s) < 3) return FALSE; *color = 0; Stream_Read_UINT8(s, byte); *color = (UINT32)byte; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 8) & 0xFF00; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 16) & 0xFF0000; return TRUE; } static INLINE BOOL update_write_color(wStream* s, UINT32 color) { BYTE byte; byte = (color & 0xFF); Stream_Write_UINT8(s, byte); byte = ((color >> 8) & 0xFF); Stream_Write_UINT8(s, byte); byte = ((color >> 16) & 0xFF); Stream_Write_UINT8(s, byte); return TRUE; } static INLINE BOOL update_read_colorref(wStream* s, UINT32* color) { BYTE byte; if (Stream_GetRemainingLength(s) < 4) return FALSE; *color = 0; Stream_Read_UINT8(s, byte); *color = byte; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 8); Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 16); Stream_Seek_UINT8(s); return TRUE; } static INLINE BOOL update_read_color_quad(wStream* s, UINT32* color) { return update_read_colorref(s, color); } static INLINE void update_write_color_quad(wStream* s, UINT32 color) { BYTE byte; byte = (color >> 16) & 0xFF; Stream_Write_UINT8(s, byte); byte = (color >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = color & 0xFF; Stream_Write_UINT8(s, byte); } static INLINE BOOL update_read_2byte_unsigned(wStream* s, UINT32* value) { BYTE byte; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) return FALSE; *value = (byte & 0x7F) << 8; Stream_Read_UINT8(s, byte); *value |= byte; } else { *value = (byte & 0x7F); } return TRUE; } static INLINE BOOL update_write_2byte_unsigned(wStream* s, UINT32 value) { BYTE byte; if (value > 0x7FFF) return FALSE; if (value >= 0x7F) { byte = ((value & 0x7F00) >> 8); Stream_Write_UINT8(s, byte | 0x80); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else { byte = (value & 0x7F); Stream_Write_UINT8(s, byte); } return TRUE; } static INLINE BOOL update_read_2byte_signed(wStream* s, INT32* value) { BYTE byte; BOOL negative; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); negative = (byte & 0x40) ? TRUE : FALSE; *value = (byte & 0x3F); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); *value = (*value << 8) | byte; } if (negative) *value *= -1; return TRUE; } static INLINE BOOL update_write_2byte_signed(wStream* s, INT32 value) { BYTE byte; BOOL negative = FALSE; if (value < 0) { negative = TRUE; value *= -1; } if (value > 0x3FFF) return FALSE; if (value >= 0x3F) { byte = ((value & 0x3F00) >> 8); if (negative) byte |= 0x40; Stream_Write_UINT8(s, byte | 0x80); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else { byte = (value & 0x3F); if (negative) byte |= 0x40; Stream_Write_UINT8(s, byte); } return TRUE; } static INLINE BOOL update_read_4byte_unsigned(wStream* s, UINT32* value) { BYTE byte; BYTE count; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); count = (byte & 0xC0) >> 6; if (Stream_GetRemainingLength(s) < count) return FALSE; switch (count) { case 0: *value = (byte & 0x3F); break; case 1: *value = (byte & 0x3F) << 8; Stream_Read_UINT8(s, byte); *value |= byte; break; case 2: *value = (byte & 0x3F) << 16; Stream_Read_UINT8(s, byte); *value |= (byte << 8); Stream_Read_UINT8(s, byte); *value |= byte; break; case 3: *value = (byte & 0x3F) << 24; Stream_Read_UINT8(s, byte); *value |= (byte << 16); Stream_Read_UINT8(s, byte); *value |= (byte << 8); Stream_Read_UINT8(s, byte); *value |= byte; break; default: break; } return TRUE; } static INLINE BOOL update_write_4byte_unsigned(wStream* s, UINT32 value) { BYTE byte; if (value <= 0x3F) { Stream_Write_UINT8(s, value); } else if (value <= 0x3FFF) { byte = (value >> 8) & 0x3F; Stream_Write_UINT8(s, byte | 0x40); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else if (value <= 0x3FFFFF) { byte = (value >> 16) & 0x3F; Stream_Write_UINT8(s, byte | 0x80); byte = (value >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else if (value <= 0x3FFFFFFF) { byte = (value >> 24) & 0x3F; Stream_Write_UINT8(s, byte | 0xC0); byte = (value >> 16) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else return FALSE; return TRUE; } static INLINE BOOL update_read_delta(wStream* s, INT32* value) { BYTE byte; if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, byte); if (byte & 0x40) *value = (byte | ~0x3F); else *value = (byte & 0x3F); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, byte); *value = (*value << 8) | byte; } return TRUE; } #if 0 static INLINE void update_read_glyph_delta(wStream* s, UINT16* value) { BYTE byte; Stream_Read_UINT8(s, byte); if (byte == 0x80) Stream_Read_UINT16(s, *value); else *value = (byte & 0x3F); } static INLINE void update_seek_glyph_delta(wStream* s) { BYTE byte; Stream_Read_UINT8(s, byte); if (byte & 0x80) Stream_Seek_UINT8(s); } #endif static INLINE BOOL update_read_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->style); } if (fieldFlags & ORDER_FIELD_04) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->hatch); } if (brush->style & CACHED_BRUSH) { BOOL rc; brush->index = brush->hatch; brush->bpp = get_bmf_bpp(brush->style, &rc); if (!rc) return FALSE; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 7) return FALSE; brush->data = (BYTE*)brush->p8x8; Stream_Read_UINT8(s, brush->data[7]); Stream_Read_UINT8(s, brush->data[6]); Stream_Read_UINT8(s, brush->data[5]); Stream_Read_UINT8(s, brush->data[4]); Stream_Read_UINT8(s, brush->data[3]); Stream_Read_UINT8(s, brush->data[2]); Stream_Read_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; } static INLINE BOOL update_write_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { Stream_Write_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { Stream_Write_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { Stream_Write_UINT8(s, brush->style); } if (brush->style & CACHED_BRUSH) { BOOL rc; brush->hatch = brush->index; brush->bpp = get_bmf_bpp(brush->style, &rc); if (!rc) return FALSE; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_04) { Stream_Write_UINT8(s, brush->hatch); } if (fieldFlags & ORDER_FIELD_05) { brush->data = (BYTE*)brush->p8x8; Stream_Write_UINT8(s, brush->data[7]); Stream_Write_UINT8(s, brush->data[6]); Stream_Write_UINT8(s, brush->data[5]); Stream_Write_UINT8(s, brush->data[4]); Stream_Write_UINT8(s, brush->data[3]); Stream_Write_UINT8(s, brush->data[2]); Stream_Write_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; } static INLINE BOOL update_read_delta_rects(wStream* s, DELTA_RECT* rectangles, UINT32* nr) { UINT32 number = *nr; UINT32 i; BYTE flags = 0; BYTE* zeroBits; UINT32 zeroBitsSize; if (number > 45) { WLog_WARN(TAG, "Invalid number of delta rectangles %" PRIu32, number); return FALSE; } zeroBitsSize = ((number + 1) / 2); if (Stream_GetRemainingLength(s) < zeroBitsSize) return FALSE; Stream_GetPointer(s, zeroBits); Stream_Seek(s, zeroBitsSize); ZeroMemory(rectangles, sizeof(DELTA_RECT) * number); for (i = 0; i < number; i++) { if (i % 2 == 0) flags = zeroBits[i / 2]; if ((~flags & 0x80) && !update_read_delta(s, &rectangles[i].left)) return FALSE; if ((~flags & 0x40) && !update_read_delta(s, &rectangles[i].top)) return FALSE; if (~flags & 0x20) { if (!update_read_delta(s, &rectangles[i].width)) return FALSE; } else if (i > 0) rectangles[i].width = rectangles[i - 1].width; else rectangles[i].width = 0; if (~flags & 0x10) { if (!update_read_delta(s, &rectangles[i].height)) return FALSE; } else if (i > 0) rectangles[i].height = rectangles[i - 1].height; else rectangles[i].height = 0; if (i > 0) { rectangles[i].left += rectangles[i - 1].left; rectangles[i].top += rectangles[i - 1].top; } flags <<= 4; } return TRUE; } static INLINE BOOL update_read_delta_points(wStream* s, DELTA_POINT* points, int number, INT16 x, INT16 y) { int i; BYTE flags = 0; BYTE* zeroBits; UINT32 zeroBitsSize; zeroBitsSize = ((number + 3) / 4); if (Stream_GetRemainingLength(s) < zeroBitsSize) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < %" PRIu32 "", zeroBitsSize); return FALSE; } Stream_GetPointer(s, zeroBits); Stream_Seek(s, zeroBitsSize); ZeroMemory(points, sizeof(DELTA_POINT) * number); for (i = 0; i < number; i++) { if (i % 4 == 0) flags = zeroBits[i / 4]; if ((~flags & 0x80) && !update_read_delta(s, &points[i].x)) { WLog_ERR(TAG, "update_read_delta(x) failed"); return FALSE; } if ((~flags & 0x40) && !update_read_delta(s, &points[i].y)) { WLog_ERR(TAG, "update_read_delta(y) failed"); return FALSE; } flags <<= 2; } return TRUE; } #define ORDER_FIELD_BYTE(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 1) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT8(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_2BYTE(NO, TARGET1, TARGET2) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 2) \ { \ WLog_ERR(TAG, "error reading %s or %s", #TARGET1, #TARGET2); \ return FALSE; \ } \ Stream_Read_UINT8(s, TARGET1); \ Stream_Read_UINT8(s, TARGET2); \ } \ } while (0) #define ORDER_FIELD_UINT16(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 2) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT16(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_UINT32(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 4) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT32(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_COORD(NO, TARGET) \ do \ { \ if ((orderInfo->fieldFlags & (1 << (NO - 1))) && \ !update_read_coord(s, &TARGET, orderInfo->deltaCoordinates)) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ } while (0) static INLINE BOOL ORDER_FIELD_COLOR(const ORDER_INFO* orderInfo, wStream* s, UINT32 NO, UINT32* TARGET) { if (!TARGET || !orderInfo) return FALSE; if ((orderInfo->fieldFlags & (1 << (NO - 1))) && !update_read_color(s, TARGET)) return FALSE; return TRUE; } static INLINE BOOL FIELD_SKIP_BUFFER16(wStream* s, UINT32 TARGET_LEN) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, TARGET_LEN); if (!Stream_SafeSeek(s, TARGET_LEN)) { WLog_ERR(TAG, "error skipping %" PRIu32 " bytes", TARGET_LEN); return FALSE; } return TRUE; } /* Primary Drawing Orders */ static BOOL update_read_dstblt_order(wStream* s, const ORDER_INFO* orderInfo, DSTBLT_ORDER* dstblt) { ORDER_FIELD_COORD(1, dstblt->nLeftRect); ORDER_FIELD_COORD(2, dstblt->nTopRect); ORDER_FIELD_COORD(3, dstblt->nWidth); ORDER_FIELD_COORD(4, dstblt->nHeight); ORDER_FIELD_BYTE(5, dstblt->bRop); return TRUE; } int update_approximate_dstblt_order(ORDER_INFO* orderInfo, const DSTBLT_ORDER* dstblt) { return 32; } BOOL update_write_dstblt_order(wStream* s, ORDER_INFO* orderInfo, const DSTBLT_ORDER* dstblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_dstblt_order(orderInfo, dstblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, dstblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, dstblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, dstblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, dstblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, dstblt->bRop); return TRUE; } static BOOL update_read_patblt_order(wStream* s, const ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { ORDER_FIELD_COORD(1, patblt->nLeftRect); ORDER_FIELD_COORD(2, patblt->nTopRect); ORDER_FIELD_COORD(3, patblt->nWidth); ORDER_FIELD_COORD(4, patblt->nHeight); ORDER_FIELD_BYTE(5, patblt->bRop); ORDER_FIELD_COLOR(orderInfo, s, 6, &patblt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 7, &patblt->foreColor); return update_read_brush(s, &patblt->brush, orderInfo->fieldFlags >> 7); } int update_approximate_patblt_order(ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { return 32; } BOOL update_write_patblt_order(wStream* s, ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_patblt_order(orderInfo, patblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, patblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, patblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, patblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, patblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, patblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, patblt->backColor); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_color(s, patblt->foreColor); orderInfo->fieldFlags |= ORDER_FIELD_08; orderInfo->fieldFlags |= ORDER_FIELD_09; orderInfo->fieldFlags |= ORDER_FIELD_10; orderInfo->fieldFlags |= ORDER_FIELD_11; orderInfo->fieldFlags |= ORDER_FIELD_12; update_write_brush(s, &patblt->brush, orderInfo->fieldFlags >> 7); return TRUE; } static BOOL update_read_scrblt_order(wStream* s, const ORDER_INFO* orderInfo, SCRBLT_ORDER* scrblt) { ORDER_FIELD_COORD(1, scrblt->nLeftRect); ORDER_FIELD_COORD(2, scrblt->nTopRect); ORDER_FIELD_COORD(3, scrblt->nWidth); ORDER_FIELD_COORD(4, scrblt->nHeight); ORDER_FIELD_BYTE(5, scrblt->bRop); ORDER_FIELD_COORD(6, scrblt->nXSrc); ORDER_FIELD_COORD(7, scrblt->nYSrc); return TRUE; } int update_approximate_scrblt_order(ORDER_INFO* orderInfo, const SCRBLT_ORDER* scrblt) { return 32; } BOOL update_write_scrblt_order(wStream* s, ORDER_INFO* orderInfo, const SCRBLT_ORDER* scrblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_scrblt_order(orderInfo, scrblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, scrblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, scrblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, scrblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, scrblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, scrblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_coord(s, scrblt->nXSrc); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_coord(s, scrblt->nYSrc); return TRUE; } static BOOL update_read_opaque_rect_order(wStream* s, const ORDER_INFO* orderInfo, OPAQUE_RECT_ORDER* opaque_rect) { BYTE byte; ORDER_FIELD_COORD(1, opaque_rect->nLeftRect); ORDER_FIELD_COORD(2, opaque_rect->nTopRect); ORDER_FIELD_COORD(3, opaque_rect->nWidth); ORDER_FIELD_COORD(4, opaque_rect->nHeight); if (orderInfo->fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x00FFFF00) | ((UINT32)byte); } if (orderInfo->fieldFlags & ORDER_FIELD_06) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x00FF00FF) | ((UINT32)byte << 8); } if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x0000FFFF) | ((UINT32)byte << 16); } return TRUE; } int update_approximate_opaque_rect_order(ORDER_INFO* orderInfo, const OPAQUE_RECT_ORDER* opaque_rect) { return 32; } BOOL update_write_opaque_rect_order(wStream* s, ORDER_INFO* orderInfo, const OPAQUE_RECT_ORDER* opaque_rect) { BYTE byte; int inf = update_approximate_opaque_rect_order(orderInfo, opaque_rect); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; // TODO: Color format conversion orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, opaque_rect->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, opaque_rect->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, opaque_rect->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, opaque_rect->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; byte = opaque_rect->color & 0x000000FF; Stream_Write_UINT8(s, byte); orderInfo->fieldFlags |= ORDER_FIELD_06; byte = (opaque_rect->color & 0x0000FF00) >> 8; Stream_Write_UINT8(s, byte); orderInfo->fieldFlags |= ORDER_FIELD_07; byte = (opaque_rect->color & 0x00FF0000) >> 16; Stream_Write_UINT8(s, byte); return TRUE; } static BOOL update_read_draw_nine_grid_order(wStream* s, const ORDER_INFO* orderInfo, DRAW_NINE_GRID_ORDER* draw_nine_grid) { ORDER_FIELD_COORD(1, draw_nine_grid->srcLeft); ORDER_FIELD_COORD(2, draw_nine_grid->srcTop); ORDER_FIELD_COORD(3, draw_nine_grid->srcRight); ORDER_FIELD_COORD(4, draw_nine_grid->srcBottom); ORDER_FIELD_UINT16(5, draw_nine_grid->bitmapId); return TRUE; } static BOOL update_read_multi_dstblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_DSTBLT_ORDER* multi_dstblt) { ORDER_FIELD_COORD(1, multi_dstblt->nLeftRect); ORDER_FIELD_COORD(2, multi_dstblt->nTopRect); ORDER_FIELD_COORD(3, multi_dstblt->nWidth); ORDER_FIELD_COORD(4, multi_dstblt->nHeight); ORDER_FIELD_BYTE(5, multi_dstblt->bRop); ORDER_FIELD_BYTE(6, multi_dstblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_dstblt->cbData); return update_read_delta_rects(s, multi_dstblt->rectangles, &multi_dstblt->numRectangles); } return TRUE; } static BOOL update_read_multi_patblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_PATBLT_ORDER* multi_patblt) { ORDER_FIELD_COORD(1, multi_patblt->nLeftRect); ORDER_FIELD_COORD(2, multi_patblt->nTopRect); ORDER_FIELD_COORD(3, multi_patblt->nWidth); ORDER_FIELD_COORD(4, multi_patblt->nHeight); ORDER_FIELD_BYTE(5, multi_patblt->bRop); ORDER_FIELD_COLOR(orderInfo, s, 6, &multi_patblt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 7, &multi_patblt->foreColor); if (!update_read_brush(s, &multi_patblt->brush, orderInfo->fieldFlags >> 7)) return FALSE; ORDER_FIELD_BYTE(13, multi_patblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_14) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_patblt->cbData); if (!update_read_delta_rects(s, multi_patblt->rectangles, &multi_patblt->numRectangles)) return FALSE; } return TRUE; } static BOOL update_read_multi_scrblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_SCRBLT_ORDER* multi_scrblt) { ORDER_FIELD_COORD(1, multi_scrblt->nLeftRect); ORDER_FIELD_COORD(2, multi_scrblt->nTopRect); ORDER_FIELD_COORD(3, multi_scrblt->nWidth); ORDER_FIELD_COORD(4, multi_scrblt->nHeight); ORDER_FIELD_BYTE(5, multi_scrblt->bRop); ORDER_FIELD_COORD(6, multi_scrblt->nXSrc); ORDER_FIELD_COORD(7, multi_scrblt->nYSrc); ORDER_FIELD_BYTE(8, multi_scrblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_09) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_scrblt->cbData); return update_read_delta_rects(s, multi_scrblt->rectangles, &multi_scrblt->numRectangles); } return TRUE; } static BOOL update_read_multi_opaque_rect_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_OPAQUE_RECT_ORDER* multi_opaque_rect) { BYTE byte; ORDER_FIELD_COORD(1, multi_opaque_rect->nLeftRect); ORDER_FIELD_COORD(2, multi_opaque_rect->nTopRect); ORDER_FIELD_COORD(3, multi_opaque_rect->nWidth); ORDER_FIELD_COORD(4, multi_opaque_rect->nHeight); if (orderInfo->fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x00FFFF00) | ((UINT32)byte); } if (orderInfo->fieldFlags & ORDER_FIELD_06) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x00FF00FF) | ((UINT32)byte << 8); } if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x0000FFFF) | ((UINT32)byte << 16); } ORDER_FIELD_BYTE(8, multi_opaque_rect->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_09) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_opaque_rect->cbData); return update_read_delta_rects(s, multi_opaque_rect->rectangles, &multi_opaque_rect->numRectangles); } return TRUE; } static BOOL update_read_multi_draw_nine_grid_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_DRAW_NINE_GRID_ORDER* multi_draw_nine_grid) { ORDER_FIELD_COORD(1, multi_draw_nine_grid->srcLeft); ORDER_FIELD_COORD(2, multi_draw_nine_grid->srcTop); ORDER_FIELD_COORD(3, multi_draw_nine_grid->srcRight); ORDER_FIELD_COORD(4, multi_draw_nine_grid->srcBottom); ORDER_FIELD_UINT16(5, multi_draw_nine_grid->bitmapId); ORDER_FIELD_BYTE(6, multi_draw_nine_grid->nDeltaEntries); if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_draw_nine_grid->cbData); return update_read_delta_rects(s, multi_draw_nine_grid->rectangles, &multi_draw_nine_grid->nDeltaEntries); } return TRUE; } static BOOL update_read_line_to_order(wStream* s, const ORDER_INFO* orderInfo, LINE_TO_ORDER* line_to) { ORDER_FIELD_UINT16(1, line_to->backMode); ORDER_FIELD_COORD(2, line_to->nXStart); ORDER_FIELD_COORD(3, line_to->nYStart); ORDER_FIELD_COORD(4, line_to->nXEnd); ORDER_FIELD_COORD(5, line_to->nYEnd); ORDER_FIELD_COLOR(orderInfo, s, 6, &line_to->backColor); ORDER_FIELD_BYTE(7, line_to->bRop2); ORDER_FIELD_BYTE(8, line_to->penStyle); ORDER_FIELD_BYTE(9, line_to->penWidth); ORDER_FIELD_COLOR(orderInfo, s, 10, &line_to->penColor); return TRUE; } int update_approximate_line_to_order(ORDER_INFO* orderInfo, const LINE_TO_ORDER* line_to) { return 32; } BOOL update_write_line_to_order(wStream* s, ORDER_INFO* orderInfo, const LINE_TO_ORDER* line_to) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_line_to_order(orderInfo, line_to))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT16(s, line_to->backMode); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, line_to->nXStart); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, line_to->nYStart); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, line_to->nXEnd); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_coord(s, line_to->nYEnd); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, line_to->backColor); orderInfo->fieldFlags |= ORDER_FIELD_07; Stream_Write_UINT8(s, line_to->bRop2); orderInfo->fieldFlags |= ORDER_FIELD_08; Stream_Write_UINT8(s, line_to->penStyle); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT8(s, line_to->penWidth); orderInfo->fieldFlags |= ORDER_FIELD_10; update_write_color(s, line_to->penColor); return TRUE; } static BOOL update_read_polyline_order(wStream* s, const ORDER_INFO* orderInfo, POLYLINE_ORDER* polyline) { UINT16 word; UINT32 new_num = polyline->numDeltaEntries; ORDER_FIELD_COORD(1, polyline->xStart); ORDER_FIELD_COORD(2, polyline->yStart); ORDER_FIELD_BYTE(3, polyline->bRop2); ORDER_FIELD_UINT16(4, word); ORDER_FIELD_COLOR(orderInfo, s, 5, &polyline->penColor); ORDER_FIELD_BYTE(6, new_num); if (orderInfo->fieldFlags & ORDER_FIELD_07) { DELTA_POINT* new_points; if (new_num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, polyline->cbData); new_points = (DELTA_POINT*)realloc(polyline->points, sizeof(DELTA_POINT) * new_num); if (!new_points) { WLog_ERR(TAG, "realloc(%" PRIu32 ") failed", new_num); return FALSE; } polyline->points = new_points; polyline->numDeltaEntries = new_num; return update_read_delta_points(s, polyline->points, polyline->numDeltaEntries, polyline->xStart, polyline->yStart); } return TRUE; } static BOOL update_read_memblt_order(wStream* s, const ORDER_INFO* orderInfo, MEMBLT_ORDER* memblt) { if (!s || !orderInfo || !memblt) return FALSE; ORDER_FIELD_UINT16(1, memblt->cacheId); ORDER_FIELD_COORD(2, memblt->nLeftRect); ORDER_FIELD_COORD(3, memblt->nTopRect); ORDER_FIELD_COORD(4, memblt->nWidth); ORDER_FIELD_COORD(5, memblt->nHeight); ORDER_FIELD_BYTE(6, memblt->bRop); ORDER_FIELD_COORD(7, memblt->nXSrc); ORDER_FIELD_COORD(8, memblt->nYSrc); ORDER_FIELD_UINT16(9, memblt->cacheIndex); memblt->colorIndex = (memblt->cacheId >> 8); memblt->cacheId = (memblt->cacheId & 0xFF); memblt->bitmap = NULL; return TRUE; } int update_approximate_memblt_order(ORDER_INFO* orderInfo, const MEMBLT_ORDER* memblt) { return 64; } BOOL update_write_memblt_order(wStream* s, ORDER_INFO* orderInfo, const MEMBLT_ORDER* memblt) { UINT16 cacheId; if (!Stream_EnsureRemainingCapacity(s, update_approximate_memblt_order(orderInfo, memblt))) return FALSE; cacheId = (memblt->cacheId & 0xFF) | ((memblt->colorIndex & 0xFF) << 8); orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT16(s, cacheId); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, memblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, memblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, memblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_coord(s, memblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_06; Stream_Write_UINT8(s, memblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_coord(s, memblt->nXSrc); orderInfo->fieldFlags |= ORDER_FIELD_08; update_write_coord(s, memblt->nYSrc); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT16(s, memblt->cacheIndex); return TRUE; } static BOOL update_read_mem3blt_order(wStream* s, const ORDER_INFO* orderInfo, MEM3BLT_ORDER* mem3blt) { ORDER_FIELD_UINT16(1, mem3blt->cacheId); ORDER_FIELD_COORD(2, mem3blt->nLeftRect); ORDER_FIELD_COORD(3, mem3blt->nTopRect); ORDER_FIELD_COORD(4, mem3blt->nWidth); ORDER_FIELD_COORD(5, mem3blt->nHeight); ORDER_FIELD_BYTE(6, mem3blt->bRop); ORDER_FIELD_COORD(7, mem3blt->nXSrc); ORDER_FIELD_COORD(8, mem3blt->nYSrc); ORDER_FIELD_COLOR(orderInfo, s, 9, &mem3blt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 10, &mem3blt->foreColor); if (!update_read_brush(s, &mem3blt->brush, orderInfo->fieldFlags >> 10)) return FALSE; ORDER_FIELD_UINT16(16, mem3blt->cacheIndex); mem3blt->colorIndex = (mem3blt->cacheId >> 8); mem3blt->cacheId = (mem3blt->cacheId & 0xFF); mem3blt->bitmap = NULL; return TRUE; } static BOOL update_read_save_bitmap_order(wStream* s, const ORDER_INFO* orderInfo, SAVE_BITMAP_ORDER* save_bitmap) { ORDER_FIELD_UINT32(1, save_bitmap->savedBitmapPosition); ORDER_FIELD_COORD(2, save_bitmap->nLeftRect); ORDER_FIELD_COORD(3, save_bitmap->nTopRect); ORDER_FIELD_COORD(4, save_bitmap->nRightRect); ORDER_FIELD_COORD(5, save_bitmap->nBottomRect); ORDER_FIELD_BYTE(6, save_bitmap->operation); return TRUE; } static BOOL update_read_glyph_index_order(wStream* s, const ORDER_INFO* orderInfo, GLYPH_INDEX_ORDER* glyph_index) { ORDER_FIELD_BYTE(1, glyph_index->cacheId); ORDER_FIELD_BYTE(2, glyph_index->flAccel); ORDER_FIELD_BYTE(3, glyph_index->ulCharInc); ORDER_FIELD_BYTE(4, glyph_index->fOpRedundant); ORDER_FIELD_COLOR(orderInfo, s, 5, &glyph_index->backColor); ORDER_FIELD_COLOR(orderInfo, s, 6, &glyph_index->foreColor); ORDER_FIELD_UINT16(7, glyph_index->bkLeft); ORDER_FIELD_UINT16(8, glyph_index->bkTop); ORDER_FIELD_UINT16(9, glyph_index->bkRight); ORDER_FIELD_UINT16(10, glyph_index->bkBottom); ORDER_FIELD_UINT16(11, glyph_index->opLeft); ORDER_FIELD_UINT16(12, glyph_index->opTop); ORDER_FIELD_UINT16(13, glyph_index->opRight); ORDER_FIELD_UINT16(14, glyph_index->opBottom); if (!update_read_brush(s, &glyph_index->brush, orderInfo->fieldFlags >> 14)) return FALSE; ORDER_FIELD_UINT16(20, glyph_index->x); ORDER_FIELD_UINT16(21, glyph_index->y); if (orderInfo->fieldFlags & ORDER_FIELD_22) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, glyph_index->cbData); if (Stream_GetRemainingLength(s) < glyph_index->cbData) return FALSE; CopyMemory(glyph_index->data, Stream_Pointer(s), glyph_index->cbData); Stream_Seek(s, glyph_index->cbData); } return TRUE; } int update_approximate_glyph_index_order(ORDER_INFO* orderInfo, const GLYPH_INDEX_ORDER* glyph_index) { return 64; } BOOL update_write_glyph_index_order(wStream* s, ORDER_INFO* orderInfo, GLYPH_INDEX_ORDER* glyph_index) { int inf = update_approximate_glyph_index_order(orderInfo, glyph_index); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT8(s, glyph_index->cacheId); orderInfo->fieldFlags |= ORDER_FIELD_02; Stream_Write_UINT8(s, glyph_index->flAccel); orderInfo->fieldFlags |= ORDER_FIELD_03; Stream_Write_UINT8(s, glyph_index->ulCharInc); orderInfo->fieldFlags |= ORDER_FIELD_04; Stream_Write_UINT8(s, glyph_index->fOpRedundant); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_color(s, glyph_index->backColor); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, glyph_index->foreColor); orderInfo->fieldFlags |= ORDER_FIELD_07; Stream_Write_UINT16(s, glyph_index->bkLeft); orderInfo->fieldFlags |= ORDER_FIELD_08; Stream_Write_UINT16(s, glyph_index->bkTop); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT16(s, glyph_index->bkRight); orderInfo->fieldFlags |= ORDER_FIELD_10; Stream_Write_UINT16(s, glyph_index->bkBottom); orderInfo->fieldFlags |= ORDER_FIELD_11; Stream_Write_UINT16(s, glyph_index->opLeft); orderInfo->fieldFlags |= ORDER_FIELD_12; Stream_Write_UINT16(s, glyph_index->opTop); orderInfo->fieldFlags |= ORDER_FIELD_13; Stream_Write_UINT16(s, glyph_index->opRight); orderInfo->fieldFlags |= ORDER_FIELD_14; Stream_Write_UINT16(s, glyph_index->opBottom); orderInfo->fieldFlags |= ORDER_FIELD_15; orderInfo->fieldFlags |= ORDER_FIELD_16; orderInfo->fieldFlags |= ORDER_FIELD_17; orderInfo->fieldFlags |= ORDER_FIELD_18; orderInfo->fieldFlags |= ORDER_FIELD_19; update_write_brush(s, &glyph_index->brush, orderInfo->fieldFlags >> 14); orderInfo->fieldFlags |= ORDER_FIELD_20; Stream_Write_UINT16(s, glyph_index->x); orderInfo->fieldFlags |= ORDER_FIELD_21; Stream_Write_UINT16(s, glyph_index->y); orderInfo->fieldFlags |= ORDER_FIELD_22; Stream_Write_UINT8(s, glyph_index->cbData); Stream_Write(s, glyph_index->data, glyph_index->cbData); return TRUE; } static BOOL update_read_fast_index_order(wStream* s, const ORDER_INFO* orderInfo, FAST_INDEX_ORDER* fast_index) { ORDER_FIELD_BYTE(1, fast_index->cacheId); ORDER_FIELD_2BYTE(2, fast_index->ulCharInc, fast_index->flAccel); ORDER_FIELD_COLOR(orderInfo, s, 3, &fast_index->backColor); ORDER_FIELD_COLOR(orderInfo, s, 4, &fast_index->foreColor); ORDER_FIELD_COORD(5, fast_index->bkLeft); ORDER_FIELD_COORD(6, fast_index->bkTop); ORDER_FIELD_COORD(7, fast_index->bkRight); ORDER_FIELD_COORD(8, fast_index->bkBottom); ORDER_FIELD_COORD(9, fast_index->opLeft); ORDER_FIELD_COORD(10, fast_index->opTop); ORDER_FIELD_COORD(11, fast_index->opRight); ORDER_FIELD_COORD(12, fast_index->opBottom); ORDER_FIELD_COORD(13, fast_index->x); ORDER_FIELD_COORD(14, fast_index->y); if (orderInfo->fieldFlags & ORDER_FIELD_15) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, fast_index->cbData); if (Stream_GetRemainingLength(s) < fast_index->cbData) return FALSE; CopyMemory(fast_index->data, Stream_Pointer(s), fast_index->cbData); Stream_Seek(s, fast_index->cbData); } return TRUE; } static BOOL update_read_fast_glyph_order(wStream* s, const ORDER_INFO* orderInfo, FAST_GLYPH_ORDER* fastGlyph) { GLYPH_DATA_V2* glyph = &fastGlyph->glyphData; ORDER_FIELD_BYTE(1, fastGlyph->cacheId); ORDER_FIELD_2BYTE(2, fastGlyph->ulCharInc, fastGlyph->flAccel); ORDER_FIELD_COLOR(orderInfo, s, 3, &fastGlyph->backColor); ORDER_FIELD_COLOR(orderInfo, s, 4, &fastGlyph->foreColor); ORDER_FIELD_COORD(5, fastGlyph->bkLeft); ORDER_FIELD_COORD(6, fastGlyph->bkTop); ORDER_FIELD_COORD(7, fastGlyph->bkRight); ORDER_FIELD_COORD(8, fastGlyph->bkBottom); ORDER_FIELD_COORD(9, fastGlyph->opLeft); ORDER_FIELD_COORD(10, fastGlyph->opTop); ORDER_FIELD_COORD(11, fastGlyph->opRight); ORDER_FIELD_COORD(12, fastGlyph->opBottom); ORDER_FIELD_COORD(13, fastGlyph->x); ORDER_FIELD_COORD(14, fastGlyph->y); if (orderInfo->fieldFlags & ORDER_FIELD_15) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, fastGlyph->cbData); if (Stream_GetRemainingLength(s) < fastGlyph->cbData) return FALSE; CopyMemory(fastGlyph->data, Stream_Pointer(s), fastGlyph->cbData); if (Stream_GetRemainingLength(s) < fastGlyph->cbData) return FALSE; if (!Stream_SafeSeek(s, 1)) return FALSE; if (fastGlyph->cbData > 1) { UINT32 new_cb; /* parse optional glyph data */ glyph->cacheIndex = fastGlyph->data[0]; if (!update_read_2byte_signed(s, &glyph->x) || !update_read_2byte_signed(s, &glyph->y) || !update_read_2byte_unsigned(s, &glyph->cx) || !update_read_2byte_unsigned(s, &glyph->cy)) return FALSE; glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; new_cb = ((glyph->cx + 7) / 8) * glyph->cy; new_cb += ((new_cb % 4) > 0) ? 4 - (new_cb % 4) : 0; if (fastGlyph->cbData < new_cb) return FALSE; if (new_cb > 0) { BYTE* new_aj; new_aj = (BYTE*)realloc(glyph->aj, new_cb); if (!new_aj) return FALSE; glyph->aj = new_aj; glyph->cb = new_cb; Stream_Read(s, glyph->aj, glyph->cb); } Stream_Seek(s, fastGlyph->cbData - new_cb); } } return TRUE; } static BOOL update_read_polygon_sc_order(wStream* s, const ORDER_INFO* orderInfo, POLYGON_SC_ORDER* polygon_sc) { UINT32 num = polygon_sc->numPoints; ORDER_FIELD_COORD(1, polygon_sc->xStart); ORDER_FIELD_COORD(2, polygon_sc->yStart); ORDER_FIELD_BYTE(3, polygon_sc->bRop2); ORDER_FIELD_BYTE(4, polygon_sc->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 5, &polygon_sc->brushColor); ORDER_FIELD_BYTE(6, num); if (orderInfo->fieldFlags & ORDER_FIELD_07) { DELTA_POINT* newpoints; if (num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, polygon_sc->cbData); newpoints = (DELTA_POINT*)realloc(polygon_sc->points, sizeof(DELTA_POINT) * num); if (!newpoints) return FALSE; polygon_sc->points = newpoints; polygon_sc->numPoints = num; return update_read_delta_points(s, polygon_sc->points, polygon_sc->numPoints, polygon_sc->xStart, polygon_sc->yStart); } return TRUE; } static BOOL update_read_polygon_cb_order(wStream* s, const ORDER_INFO* orderInfo, POLYGON_CB_ORDER* polygon_cb) { UINT32 num = polygon_cb->numPoints; ORDER_FIELD_COORD(1, polygon_cb->xStart); ORDER_FIELD_COORD(2, polygon_cb->yStart); ORDER_FIELD_BYTE(3, polygon_cb->bRop2); ORDER_FIELD_BYTE(4, polygon_cb->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 5, &polygon_cb->backColor); ORDER_FIELD_COLOR(orderInfo, s, 6, &polygon_cb->foreColor); if (!update_read_brush(s, &polygon_cb->brush, orderInfo->fieldFlags >> 6)) return FALSE; ORDER_FIELD_BYTE(12, num); if (orderInfo->fieldFlags & ORDER_FIELD_13) { DELTA_POINT* newpoints; if (num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, polygon_cb->cbData); newpoints = (DELTA_POINT*)realloc(polygon_cb->points, sizeof(DELTA_POINT) * num); if (!newpoints) return FALSE; polygon_cb->points = newpoints; polygon_cb->numPoints = num; if (!update_read_delta_points(s, polygon_cb->points, polygon_cb->numPoints, polygon_cb->xStart, polygon_cb->yStart)) return FALSE; } polygon_cb->backMode = (polygon_cb->bRop2 & 0x80) ? BACKMODE_TRANSPARENT : BACKMODE_OPAQUE; polygon_cb->bRop2 = (polygon_cb->bRop2 & 0x1F); return TRUE; } static BOOL update_read_ellipse_sc_order(wStream* s, const ORDER_INFO* orderInfo, ELLIPSE_SC_ORDER* ellipse_sc) { ORDER_FIELD_COORD(1, ellipse_sc->leftRect); ORDER_FIELD_COORD(2, ellipse_sc->topRect); ORDER_FIELD_COORD(3, ellipse_sc->rightRect); ORDER_FIELD_COORD(4, ellipse_sc->bottomRect); ORDER_FIELD_BYTE(5, ellipse_sc->bRop2); ORDER_FIELD_BYTE(6, ellipse_sc->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 7, &ellipse_sc->color); return TRUE; } static BOOL update_read_ellipse_cb_order(wStream* s, const ORDER_INFO* orderInfo, ELLIPSE_CB_ORDER* ellipse_cb) { ORDER_FIELD_COORD(1, ellipse_cb->leftRect); ORDER_FIELD_COORD(2, ellipse_cb->topRect); ORDER_FIELD_COORD(3, ellipse_cb->rightRect); ORDER_FIELD_COORD(4, ellipse_cb->bottomRect); ORDER_FIELD_BYTE(5, ellipse_cb->bRop2); ORDER_FIELD_BYTE(6, ellipse_cb->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 7, &ellipse_cb->backColor); ORDER_FIELD_COLOR(orderInfo, s, 8, &ellipse_cb->foreColor); return update_read_brush(s, &ellipse_cb->brush, orderInfo->fieldFlags >> 8); } /* Secondary Drawing Orders */ static CACHE_BITMAP_ORDER* update_read_cache_bitmap_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { CACHE_BITMAP_ORDER* cache_bitmap; if (!update || !s) return NULL; cache_bitmap = calloc(1, sizeof(CACHE_BITMAP_ORDER)); if (!cache_bitmap) goto fail; if (Stream_GetRemainingLength(s) < 9) goto fail; Stream_Read_UINT8(s, cache_bitmap->cacheId); /* cacheId (1 byte) */ Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapWidth); /* bitmapWidth (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapHeight); /* bitmapHeight (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ if ((cache_bitmap->bitmapBpp < 1) || (cache_bitmap->bitmapBpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bitmap bpp %" PRIu32 "", cache_bitmap->bitmapBpp); goto fail; } Stream_Read_UINT16(s, cache_bitmap->bitmapLength); /* bitmapLength (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap->cacheIndex); /* cacheIndex (2 bytes) */ if (compressed) { if ((flags & NO_BITMAP_COMPRESSION_HDR) == 0) { BYTE* bitmapComprHdr = (BYTE*)&(cache_bitmap->bitmapComprHdr); if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read(s, bitmapComprHdr, 8); /* bitmapComprHdr (8 bytes) */ cache_bitmap->bitmapLength -= 8; } } if (cache_bitmap->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap->bitmapLength) goto fail; cache_bitmap->bitmapDataStream = malloc(cache_bitmap->bitmapLength); if (!cache_bitmap->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap->bitmapDataStream, cache_bitmap->bitmapLength); cache_bitmap->compressed = compressed; return cache_bitmap; fail: free_cache_bitmap_order(update->context, cache_bitmap); return NULL; } int update_approximate_cache_bitmap_order(const CACHE_BITMAP_ORDER* cache_bitmap, BOOL compressed, UINT16* flags) { return 64 + cache_bitmap->bitmapLength; } BOOL update_write_cache_bitmap_order(wStream* s, const CACHE_BITMAP_ORDER* cache_bitmap, BOOL compressed, UINT16* flags) { UINT32 bitmapLength = cache_bitmap->bitmapLength; int inf = update_approximate_cache_bitmap_order(cache_bitmap, compressed, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; *flags = NO_BITMAP_COMPRESSION_HDR; if ((*flags & NO_BITMAP_COMPRESSION_HDR) == 0) bitmapLength += 8; Stream_Write_UINT8(s, cache_bitmap->cacheId); /* cacheId (1 byte) */ Stream_Write_UINT8(s, 0); /* pad1Octet (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapWidth); /* bitmapWidth (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapHeight); /* bitmapHeight (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ Stream_Write_UINT16(s, bitmapLength); /* bitmapLength (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap->cacheIndex); /* cacheIndex (2 bytes) */ if (compressed) { if ((*flags & NO_BITMAP_COMPRESSION_HDR) == 0) { BYTE* bitmapComprHdr = (BYTE*)&(cache_bitmap->bitmapComprHdr); Stream_Write(s, bitmapComprHdr, 8); /* bitmapComprHdr (8 bytes) */ bitmapLength -= 8; } Stream_Write(s, cache_bitmap->bitmapDataStream, bitmapLength); } else { Stream_Write(s, cache_bitmap->bitmapDataStream, bitmapLength); } return TRUE; } static CACHE_BITMAP_V2_ORDER* update_read_cache_bitmap_v2_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { BOOL rc; BYTE bitsPerPixelId; CACHE_BITMAP_V2_ORDER* cache_bitmap_v2; if (!update || !s) return NULL; cache_bitmap_v2 = calloc(1, sizeof(CACHE_BITMAP_V2_ORDER)); if (!cache_bitmap_v2) goto fail; cache_bitmap_v2->cacheId = flags & 0x0003; cache_bitmap_v2->flags = (flags & 0xFF80) >> 7; bitsPerPixelId = (flags & 0x0078) >> 3; cache_bitmap_v2->bitmapBpp = get_cbr2_bpp(bitsPerPixelId, &rc); if (!rc) goto fail; if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ goto fail; cache_bitmap_v2->bitmapHeight = cache_bitmap_v2->bitmapWidth; } else { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ goto fail; } if (!update_read_4byte_unsigned(s, &cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->cacheIndex)) /* cacheIndex */ goto fail; if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } } if (cache_bitmap_v2->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap_v2->bitmapLength) goto fail; if (cache_bitmap_v2->bitmapLength == 0) goto fail; cache_bitmap_v2->bitmapDataStream = malloc(cache_bitmap_v2->bitmapLength); if (!cache_bitmap_v2->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); cache_bitmap_v2->compressed = compressed; return cache_bitmap_v2; fail: free_cache_bitmap_v2_order(update->context, cache_bitmap_v2); return NULL; } int update_approximate_cache_bitmap_v2_order(CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { return 64 + cache_bitmap_v2->bitmapLength; } BOOL update_write_cache_bitmap_v2_order(wStream* s, CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { BOOL rc; BYTE bitsPerPixelId; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v2_order(cache_bitmap_v2, compressed, flags))) return FALSE; bitsPerPixelId = get_bpp_bmf(cache_bitmap_v2->bitmapBpp, &rc); if (!rc) return FALSE; *flags = (cache_bitmap_v2->cacheId & 0x0003) | (bitsPerPixelId << 3) | ((cache_bitmap_v2->flags << 7) & 0xFF80); if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { Stream_Write_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ return FALSE; } else { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ return FALSE; } if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (!update_write_4byte_unsigned(s, cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_write_2byte_unsigned(s, cache_bitmap_v2->cacheIndex)) /* cacheIndex */ return FALSE; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { Stream_Write_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } else { if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } cache_bitmap_v2->compressed = compressed; return TRUE; } static CACHE_BITMAP_V3_ORDER* update_read_cache_bitmap_v3_order(rdpUpdate* update, wStream* s, UINT16 flags) { BOOL rc; BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; UINT32 new_len; BYTE* new_data; CACHE_BITMAP_V3_ORDER* cache_bitmap_v3; if (!update || !s) return NULL; cache_bitmap_v3 = calloc(1, sizeof(CACHE_BITMAP_V3_ORDER)); if (!cache_bitmap_v3) goto fail; cache_bitmap_v3->cacheId = flags & 0x00000003; cache_bitmap_v3->flags = (flags & 0x0000FF80) >> 7; bitsPerPixelId = (flags & 0x00000078) >> 3; cache_bitmap_v3->bpp = get_cbr2_bpp(bitsPerPixelId, &rc); if (!rc) goto fail; if (Stream_GetRemainingLength(s) < 21) goto fail; Stream_Read_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ bitmapData = &cache_bitmap_v3->bitmapData; Stream_Read_UINT8(s, bitmapData->bpp); if ((bitmapData->bpp < 1) || (bitmapData->bpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bpp value %" PRIu32 "", bitmapData->bpp); goto fail; } Stream_Seek_UINT8(s); /* reserved1 (1 byte) */ Stream_Seek_UINT8(s); /* reserved2 (1 byte) */ Stream_Read_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Read_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Read_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Read_UINT32(s, new_len); /* length (4 bytes) */ if ((new_len == 0) || (Stream_GetRemainingLength(s) < new_len)) goto fail; new_data = (BYTE*)realloc(bitmapData->data, new_len); if (!new_data) goto fail; bitmapData->data = new_data; bitmapData->length = new_len; Stream_Read(s, bitmapData->data, bitmapData->length); return cache_bitmap_v3; fail: free_cache_bitmap_v3_order(update->context, cache_bitmap_v3); return NULL; } int update_approximate_cache_bitmap_v3_order(CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BITMAP_DATA_EX* bitmapData = &cache_bitmap_v3->bitmapData; return 64 + bitmapData->length; } BOOL update_write_cache_bitmap_v3_order(wStream* s, CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BOOL rc; BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v3_order(cache_bitmap_v3, flags))) return FALSE; bitmapData = &cache_bitmap_v3->bitmapData; bitsPerPixelId = get_bpp_bmf(cache_bitmap_v3->bpp, &rc); if (!rc) return FALSE; *flags = (cache_bitmap_v3->cacheId & 0x00000003) | ((cache_bitmap_v3->flags << 7) & 0x0000FF80) | ((bitsPerPixelId << 3) & 0x00000078); Stream_Write_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ Stream_Write_UINT8(s, bitmapData->bpp); Stream_Write_UINT8(s, 0); /* reserved1 (1 byte) */ Stream_Write_UINT8(s, 0); /* reserved2 (1 byte) */ Stream_Write_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Write_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Write_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Write_UINT32(s, bitmapData->length); /* length (4 bytes) */ Stream_Write(s, bitmapData->data, bitmapData->length); return TRUE; } static CACHE_COLOR_TABLE_ORDER* update_read_cache_color_table_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; UINT32* colorTable; CACHE_COLOR_TABLE_ORDER* cache_color_table = calloc(1, sizeof(CACHE_COLOR_TABLE_ORDER)); if (!cache_color_table) goto fail; if (Stream_GetRemainingLength(s) < 3) goto fail; Stream_Read_UINT8(s, cache_color_table->cacheIndex); /* cacheIndex (1 byte) */ Stream_Read_UINT16(s, cache_color_table->numberColors); /* numberColors (2 bytes) */ if (cache_color_table->numberColors != 256) { /* This field MUST be set to 256 */ goto fail; } if (Stream_GetRemainingLength(s) < cache_color_table->numberColors * 4) goto fail; colorTable = (UINT32*)&cache_color_table->colorTable; for (i = 0; i < (int)cache_color_table->numberColors; i++) update_read_color_quad(s, &colorTable[i]); return cache_color_table; fail: free_cache_color_table_order(update->context, cache_color_table); return NULL; } int update_approximate_cache_color_table_order(const CACHE_COLOR_TABLE_ORDER* cache_color_table, UINT16* flags) { return 16 + (256 * 4); } BOOL update_write_cache_color_table_order(wStream* s, const CACHE_COLOR_TABLE_ORDER* cache_color_table, UINT16* flags) { int i, inf; UINT32* colorTable; if (cache_color_table->numberColors != 256) return FALSE; inf = update_approximate_cache_color_table_order(cache_color_table, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT8(s, cache_color_table->cacheIndex); /* cacheIndex (1 byte) */ Stream_Write_UINT16(s, cache_color_table->numberColors); /* numberColors (2 bytes) */ colorTable = (UINT32*)&cache_color_table->colorTable; for (i = 0; i < (int)cache_color_table->numberColors; i++) { update_write_color_quad(s, colorTable[i]); } return TRUE; } static CACHE_GLYPH_ORDER* update_read_cache_glyph_order(rdpUpdate* update, wStream* s, UINT16 flags) { UINT32 i; CACHE_GLYPH_ORDER* cache_glyph_order = calloc(1, sizeof(CACHE_GLYPH_ORDER)); if (!cache_glyph_order || !update || !s) goto fail; if (Stream_GetRemainingLength(s) < 2) goto fail; Stream_Read_UINT8(s, cache_glyph_order->cacheId); /* cacheId (1 byte) */ Stream_Read_UINT8(s, cache_glyph_order->cGlyphs); /* cGlyphs (1 byte) */ for (i = 0; i < cache_glyph_order->cGlyphs; i++) { GLYPH_DATA* glyph = &cache_glyph_order->glyphData[i]; if (Stream_GetRemainingLength(s) < 10) goto fail; Stream_Read_UINT16(s, glyph->cacheIndex); Stream_Read_INT16(s, glyph->x); Stream_Read_INT16(s, glyph->y); Stream_Read_UINT16(s, glyph->cx); Stream_Read_UINT16(s, glyph->cy); glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; if (Stream_GetRemainingLength(s) < glyph->cb) goto fail; glyph->aj = (BYTE*)malloc(glyph->cb); if (!glyph->aj) goto fail; Stream_Read(s, glyph->aj, glyph->cb); } if ((flags & CG_GLYPH_UNICODE_PRESENT) && (cache_glyph_order->cGlyphs > 0)) { cache_glyph_order->unicodeCharacters = calloc(cache_glyph_order->cGlyphs, sizeof(WCHAR)); if (!cache_glyph_order->unicodeCharacters) goto fail; if (Stream_GetRemainingLength(s) < sizeof(WCHAR) * cache_glyph_order->cGlyphs) goto fail; Stream_Read_UTF16_String(s, cache_glyph_order->unicodeCharacters, cache_glyph_order->cGlyphs); } return cache_glyph_order; fail: free_cache_glyph_order(update->context, cache_glyph_order); return NULL; } int update_approximate_cache_glyph_order(const CACHE_GLYPH_ORDER* cache_glyph, UINT16* flags) { return 2 + cache_glyph->cGlyphs * 32; } BOOL update_write_cache_glyph_order(wStream* s, const CACHE_GLYPH_ORDER* cache_glyph, UINT16* flags) { int i, inf; INT16 lsi16; const GLYPH_DATA* glyph; inf = update_approximate_cache_glyph_order(cache_glyph, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT8(s, cache_glyph->cacheId); /* cacheId (1 byte) */ Stream_Write_UINT8(s, cache_glyph->cGlyphs); /* cGlyphs (1 byte) */ for (i = 0; i < (int)cache_glyph->cGlyphs; i++) { UINT32 cb; glyph = &cache_glyph->glyphData[i]; Stream_Write_UINT16(s, glyph->cacheIndex); /* cacheIndex (2 bytes) */ lsi16 = glyph->x; Stream_Write_UINT16(s, lsi16); /* x (2 bytes) */ lsi16 = glyph->y; Stream_Write_UINT16(s, lsi16); /* y (2 bytes) */ Stream_Write_UINT16(s, glyph->cx); /* cx (2 bytes) */ Stream_Write_UINT16(s, glyph->cy); /* cy (2 bytes) */ cb = ((glyph->cx + 7) / 8) * glyph->cy; cb += ((cb % 4) > 0) ? 4 - (cb % 4) : 0; Stream_Write(s, glyph->aj, cb); } if (*flags & CG_GLYPH_UNICODE_PRESENT) { Stream_Zero(s, cache_glyph->cGlyphs * 2); } return TRUE; } static CACHE_GLYPH_V2_ORDER* update_read_cache_glyph_v2_order(rdpUpdate* update, wStream* s, UINT16 flags) { UINT32 i; CACHE_GLYPH_V2_ORDER* cache_glyph_v2 = calloc(1, sizeof(CACHE_GLYPH_V2_ORDER)); if (!cache_glyph_v2) goto fail; cache_glyph_v2->cacheId = (flags & 0x000F); cache_glyph_v2->flags = (flags & 0x00F0) >> 4; cache_glyph_v2->cGlyphs = (flags & 0xFF00) >> 8; for (i = 0; i < cache_glyph_v2->cGlyphs; i++) { GLYPH_DATA_V2* glyph = &cache_glyph_v2->glyphData[i]; if (Stream_GetRemainingLength(s) < 1) goto fail; Stream_Read_UINT8(s, glyph->cacheIndex); if (!update_read_2byte_signed(s, &glyph->x) || !update_read_2byte_signed(s, &glyph->y) || !update_read_2byte_unsigned(s, &glyph->cx) || !update_read_2byte_unsigned(s, &glyph->cy)) { goto fail; } glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; if (Stream_GetRemainingLength(s) < glyph->cb) goto fail; glyph->aj = (BYTE*)malloc(glyph->cb); if (!glyph->aj) goto fail; Stream_Read(s, glyph->aj, glyph->cb); } if ((flags & CG_GLYPH_UNICODE_PRESENT) && (cache_glyph_v2->cGlyphs > 0)) { cache_glyph_v2->unicodeCharacters = calloc(cache_glyph_v2->cGlyphs, sizeof(WCHAR)); if (!cache_glyph_v2->unicodeCharacters) goto fail; if (Stream_GetRemainingLength(s) < sizeof(WCHAR) * cache_glyph_v2->cGlyphs) goto fail; Stream_Read_UTF16_String(s, cache_glyph_v2->unicodeCharacters, cache_glyph_v2->cGlyphs); } return cache_glyph_v2; fail: free_cache_glyph_v2_order(update->context, cache_glyph_v2); return NULL; } int update_approximate_cache_glyph_v2_order(const CACHE_GLYPH_V2_ORDER* cache_glyph_v2, UINT16* flags) { return 8 + cache_glyph_v2->cGlyphs * 32; } BOOL update_write_cache_glyph_v2_order(wStream* s, const CACHE_GLYPH_V2_ORDER* cache_glyph_v2, UINT16* flags) { UINT32 i, inf; inf = update_approximate_cache_glyph_v2_order(cache_glyph_v2, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; *flags = (cache_glyph_v2->cacheId & 0x000F) | ((cache_glyph_v2->flags & 0x000F) << 4) | ((cache_glyph_v2->cGlyphs & 0x00FF) << 8); for (i = 0; i < cache_glyph_v2->cGlyphs; i++) { UINT32 cb; const GLYPH_DATA_V2* glyph = &cache_glyph_v2->glyphData[i]; Stream_Write_UINT8(s, glyph->cacheIndex); if (!update_write_2byte_signed(s, glyph->x) || !update_write_2byte_signed(s, glyph->y) || !update_write_2byte_unsigned(s, glyph->cx) || !update_write_2byte_unsigned(s, glyph->cy)) { return FALSE; } cb = ((glyph->cx + 7) / 8) * glyph->cy; cb += ((cb % 4) > 0) ? 4 - (cb % 4) : 0; Stream_Write(s, glyph->aj, cb); } if (*flags & CG_GLYPH_UNICODE_PRESENT) { Stream_Zero(s, cache_glyph_v2->cGlyphs * 2); } return TRUE; } static BOOL update_decompress_brush(wStream* s, BYTE* output, size_t outSize, BYTE bpp) { INT32 x, y, k; BYTE byte = 0; const BYTE* palette = Stream_Pointer(s) + 16; const INT32 bytesPerPixel = ((bpp + 1) / 8); if (!Stream_SafeSeek(s, 16ULL + 7ULL * bytesPerPixel)) // 64 / 4 return FALSE; for (y = 7; y >= 0; y--) { for (x = 0; x < 8; x++) { UINT32 index; if ((x % 4) == 0) Stream_Read_UINT8(s, byte); index = ((byte >> ((3 - (x % 4)) * 2)) & 0x03); for (k = 0; k < bytesPerPixel; k++) { const size_t dstIndex = ((y * 8 + x) * bytesPerPixel) + k; const size_t srcIndex = (index * bytesPerPixel) + k; if (dstIndex >= outSize) return FALSE; output[dstIndex] = palette[srcIndex]; } } } return TRUE; } static BOOL update_compress_brush(wStream* s, const BYTE* input, BYTE bpp) { return FALSE; } static CACHE_BRUSH_ORDER* update_read_cache_brush_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; BOOL rc; BYTE iBitmapFormat; BOOL compressed = FALSE; CACHE_BRUSH_ORDER* cache_brush = calloc(1, sizeof(CACHE_BRUSH_ORDER)); if (!cache_brush) goto fail; if (Stream_GetRemainingLength(s) < 6) goto fail; Stream_Read_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Read_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ cache_brush->bpp = get_bmf_bpp(iBitmapFormat, &rc); if (!rc) goto fail; Stream_Read_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Read_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Read_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Read_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_Print(update->log, WLOG_ERROR, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); goto fail; } /* rows are encoded in reverse order */ if (Stream_GetRemainingLength(s) < 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_decompress_brush(s, cache_brush->data, sizeof(cache_brush->data), cache_brush->bpp)) goto fail; } else { /* uncompressed brush */ UINT32 scanline = (cache_brush->bpp / 8) * 8; if (Stream_GetRemainingLength(s) < scanline * 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read(s, &cache_brush->data[i * scanline], scanline); } } } } return cache_brush; fail: free_cache_brush_order(update->context, cache_brush); return NULL; } int update_approximate_cache_brush_order(const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { return 64; } BOOL update_write_cache_brush_order(wStream* s, const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { int i; BYTE iBitmapFormat; BOOL rc; BOOL compressed = FALSE; if (!Stream_EnsureRemainingCapacity(s, update_approximate_cache_brush_order(cache_brush, flags))) return FALSE; iBitmapFormat = get_bpp_bmf(cache_brush->bpp, &rc); if (!rc) return FALSE; Stream_Write_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Write_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ Stream_Write_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Write_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Write_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Write_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_ERR(TAG, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); return FALSE; } for (i = 7; i >= 0; i--) { Stream_Write_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_compress_brush(s, cache_brush->data, cache_brush->bpp)) return FALSE; } else { /* uncompressed brush */ int scanline = (cache_brush->bpp / 8) * 8; for (i = 7; i >= 0; i--) { Stream_Write(s, &cache_brush->data[i * scanline], scanline); } } } } return TRUE; } /* Alternate Secondary Drawing Orders */ static BOOL update_read_create_offscreen_bitmap_order(wStream* s, CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { UINT16 flags; BOOL deleteListPresent; OFFSCREEN_DELETE_LIST* deleteList; if (Stream_GetRemainingLength(s) < 6) return FALSE; Stream_Read_UINT16(s, flags); /* flags (2 bytes) */ create_offscreen_bitmap->id = flags & 0x7FFF; deleteListPresent = (flags & 0x8000) ? TRUE : FALSE; Stream_Read_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */ Stream_Read_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */ deleteList = &(create_offscreen_bitmap->deleteList); if (deleteListPresent) { UINT32 i; if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, deleteList->cIndices); if (deleteList->cIndices > deleteList->sIndices) { UINT16* new_indices; new_indices = (UINT16*)realloc(deleteList->indices, deleteList->cIndices * 2); if (!new_indices) return FALSE; deleteList->sIndices = deleteList->cIndices; deleteList->indices = new_indices; } if (Stream_GetRemainingLength(s) < 2 * deleteList->cIndices) return FALSE; for (i = 0; i < deleteList->cIndices; i++) { Stream_Read_UINT16(s, deleteList->indices[i]); } } else { deleteList->cIndices = 0; } return TRUE; } int update_approximate_create_offscreen_bitmap_order( const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { const OFFSCREEN_DELETE_LIST* deleteList = &(create_offscreen_bitmap->deleteList); return 32 + deleteList->cIndices * 2; } BOOL update_write_create_offscreen_bitmap_order( wStream* s, const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { UINT16 flags; BOOL deleteListPresent; const OFFSCREEN_DELETE_LIST* deleteList; if (!Stream_EnsureRemainingCapacity( s, update_approximate_create_offscreen_bitmap_order(create_offscreen_bitmap))) return FALSE; deleteList = &(create_offscreen_bitmap->deleteList); flags = create_offscreen_bitmap->id & 0x7FFF; deleteListPresent = (deleteList->cIndices > 0) ? TRUE : FALSE; if (deleteListPresent) flags |= 0x8000; Stream_Write_UINT16(s, flags); /* flags (2 bytes) */ Stream_Write_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */ Stream_Write_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */ if (deleteListPresent) { int i; Stream_Write_UINT16(s, deleteList->cIndices); for (i = 0; i < (int)deleteList->cIndices; i++) { Stream_Write_UINT16(s, deleteList->indices[i]); } } return TRUE; } static BOOL update_read_switch_surface_order(wStream* s, SWITCH_SURFACE_ORDER* switch_surface) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, switch_surface->bitmapId); /* bitmapId (2 bytes) */ return TRUE; } int update_approximate_switch_surface_order(const SWITCH_SURFACE_ORDER* switch_surface) { return 2; } BOOL update_write_switch_surface_order(wStream* s, const SWITCH_SURFACE_ORDER* switch_surface) { int inf = update_approximate_switch_surface_order(switch_surface); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT16(s, switch_surface->bitmapId); /* bitmapId (2 bytes) */ return TRUE; } static BOOL update_read_create_nine_grid_bitmap_order(wStream* s, CREATE_NINE_GRID_BITMAP_ORDER* create_nine_grid_bitmap) { NINE_GRID_BITMAP_INFO* nineGridInfo; if (Stream_GetRemainingLength(s) < 19) return FALSE; Stream_Read_UINT8(s, create_nine_grid_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ if ((create_nine_grid_bitmap->bitmapBpp < 1) || (create_nine_grid_bitmap->bitmapBpp > 32)) { WLog_ERR(TAG, "invalid bpp value %" PRIu32 "", create_nine_grid_bitmap->bitmapBpp); return FALSE; } Stream_Read_UINT16(s, create_nine_grid_bitmap->bitmapId); /* bitmapId (2 bytes) */ nineGridInfo = &(create_nine_grid_bitmap->nineGridInfo); Stream_Read_UINT32(s, nineGridInfo->flFlags); /* flFlags (4 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulLeftWidth); /* ulLeftWidth (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulRightWidth); /* ulRightWidth (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulTopHeight); /* ulTopHeight (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulBottomHeight); /* ulBottomHeight (2 bytes) */ update_read_colorref(s, &nineGridInfo->crTransparent); /* crTransparent (4 bytes) */ return TRUE; } static BOOL update_read_frame_marker_order(wStream* s, FRAME_MARKER_ORDER* frame_marker) { if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT32(s, frame_marker->action); /* action (4 bytes) */ return TRUE; } static BOOL update_read_stream_bitmap_first_order(wStream* s, STREAM_BITMAP_FIRST_ORDER* stream_bitmap_first) { if (Stream_GetRemainingLength(s) < 10) // 8 + 2 at least return FALSE; Stream_Read_UINT8(s, stream_bitmap_first->bitmapFlags); /* bitmapFlags (1 byte) */ Stream_Read_UINT8(s, stream_bitmap_first->bitmapBpp); /* bitmapBpp (1 byte) */ if ((stream_bitmap_first->bitmapBpp < 1) || (stream_bitmap_first->bitmapBpp > 32)) { WLog_ERR(TAG, "invalid bpp value %" PRIu32 "", stream_bitmap_first->bitmapBpp); return FALSE; } Stream_Read_UINT16(s, stream_bitmap_first->bitmapType); /* bitmapType (2 bytes) */ Stream_Read_UINT16(s, stream_bitmap_first->bitmapWidth); /* bitmapWidth (2 bytes) */ Stream_Read_UINT16(s, stream_bitmap_first->bitmapHeight); /* bitmapHeigth (2 bytes) */ if (stream_bitmap_first->bitmapFlags & STREAM_BITMAP_V2) { if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT32(s, stream_bitmap_first->bitmapSize); /* bitmapSize (4 bytes) */ } else { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, stream_bitmap_first->bitmapSize); /* bitmapSize (2 bytes) */ } FIELD_SKIP_BUFFER16( s, stream_bitmap_first->bitmapBlockSize); /* bitmapBlockSize(2 bytes) + bitmapBlock */ return TRUE; } static BOOL update_read_stream_bitmap_next_order(wStream* s, STREAM_BITMAP_NEXT_ORDER* stream_bitmap_next) { if (Stream_GetRemainingLength(s) < 5) return FALSE; Stream_Read_UINT8(s, stream_bitmap_next->bitmapFlags); /* bitmapFlags (1 byte) */ Stream_Read_UINT16(s, stream_bitmap_next->bitmapType); /* bitmapType (2 bytes) */ FIELD_SKIP_BUFFER16( s, stream_bitmap_next->bitmapBlockSize); /* bitmapBlockSize(2 bytes) + bitmapBlock */ return TRUE; } static BOOL update_read_draw_gdiplus_first_order(wStream* s, DRAW_GDIPLUS_FIRST_ORDER* draw_gdiplus_first) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_first->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_first->cbTotalSize); /* cbTotalSize (4 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_first->cbTotalEmfSize); /* cbTotalEmfSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_first->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_next_order(wStream* s, DRAW_GDIPLUS_NEXT_ORDER* draw_gdiplus_next) { if (Stream_GetRemainingLength(s) < 3) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ FIELD_SKIP_BUFFER16(s, draw_gdiplus_next->cbSize); /* cbSize(2 bytes) + emfRecords */ return TRUE; } static BOOL update_read_draw_gdiplus_end_order(wStream* s, DRAW_GDIPLUS_END_ORDER* draw_gdiplus_end) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_end->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_end->cbTotalSize); /* cbTotalSize (4 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_end->cbTotalEmfSize); /* cbTotalEmfSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_end->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_cache_first_order(wStream* s, DRAW_GDIPLUS_CACHE_FIRST_ORDER* draw_gdiplus_cache_first) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_first->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_cache_first->cbTotalSize); /* cbTotalSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_cache_first->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_cache_next_order(wStream* s, DRAW_GDIPLUS_CACHE_NEXT_ORDER* draw_gdiplus_cache_next) { if (Stream_GetRemainingLength(s) < 7) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_next->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_next->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_next->cacheIndex); /* cacheIndex (2 bytes) */ FIELD_SKIP_BUFFER16(s, draw_gdiplus_cache_next->cbSize); /* cbSize(2 bytes) + emfRecords */ return TRUE; } static BOOL update_read_draw_gdiplus_cache_end_order(wStream* s, DRAW_GDIPLUS_CACHE_END_ORDER* draw_gdiplus_cache_end) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_end->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_cache_end->cbTotalSize); /* cbTotalSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_cache_end->cbSize); /* emfRecords */ } static BOOL update_read_field_flags(wStream* s, UINT32* fieldFlags, BYTE flags, BYTE fieldBytes) { int i; BYTE byte; if (flags & ORDER_ZERO_FIELD_BYTE_BIT0) fieldBytes--; if (flags & ORDER_ZERO_FIELD_BYTE_BIT1) { if (fieldBytes > 1) fieldBytes -= 2; else fieldBytes = 0; } if (Stream_GetRemainingLength(s) < fieldBytes) return FALSE; *fieldFlags = 0; for (i = 0; i < fieldBytes; i++) { Stream_Read_UINT8(s, byte); *fieldFlags |= byte << (i * 8); } return TRUE; } BOOL update_write_field_flags(wStream* s, UINT32 fieldFlags, BYTE flags, BYTE fieldBytes) { BYTE byte; if (fieldBytes == 1) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); } else if (fieldBytes == 2) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 8) & 0xFF; Stream_Write_UINT8(s, byte); } else if (fieldBytes == 3) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 16) & 0xFF; Stream_Write_UINT8(s, byte); } else { return FALSE; } return TRUE; } static BOOL update_read_bounds(wStream* s, rdpBounds* bounds) { BYTE flags; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, flags); /* field flags */ if (flags & BOUND_LEFT) { if (!update_read_coord(s, &bounds->left, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_LEFT) { if (!update_read_coord(s, &bounds->left, TRUE)) return FALSE; } if (flags & BOUND_TOP) { if (!update_read_coord(s, &bounds->top, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_TOP) { if (!update_read_coord(s, &bounds->top, TRUE)) return FALSE; } if (flags & BOUND_RIGHT) { if (!update_read_coord(s, &bounds->right, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_RIGHT) { if (!update_read_coord(s, &bounds->right, TRUE)) return FALSE; } if (flags & BOUND_BOTTOM) { if (!update_read_coord(s, &bounds->bottom, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_BOTTOM) { if (!update_read_coord(s, &bounds->bottom, TRUE)) return FALSE; } return TRUE; } BOOL update_write_bounds(wStream* s, ORDER_INFO* orderInfo) { if (!(orderInfo->controlFlags & ORDER_BOUNDS)) return TRUE; if (orderInfo->controlFlags & ORDER_ZERO_BOUNDS_DELTAS) return TRUE; Stream_Write_UINT8(s, orderInfo->boundsFlags); /* field flags */ if (orderInfo->boundsFlags & BOUND_LEFT) { if (!update_write_coord(s, orderInfo->bounds.left)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_LEFT) { } if (orderInfo->boundsFlags & BOUND_TOP) { if (!update_write_coord(s, orderInfo->bounds.top)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_TOP) { } if (orderInfo->boundsFlags & BOUND_RIGHT) { if (!update_write_coord(s, orderInfo->bounds.right)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_RIGHT) { } if (orderInfo->boundsFlags & BOUND_BOTTOM) { if (!update_write_coord(s, orderInfo->bounds.bottom)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_BOTTOM) { } return TRUE; } static BOOL read_primary_order(wLog* log, const char* orderName, wStream* s, const ORDER_INFO* orderInfo, rdpPrimaryUpdate* primary) { BOOL rc = FALSE; if (!s || !orderInfo || !primary || !orderName) return FALSE; switch (orderInfo->orderType) { case ORDER_TYPE_DSTBLT: rc = update_read_dstblt_order(s, orderInfo, &(primary->dstblt)); break; case ORDER_TYPE_PATBLT: rc = update_read_patblt_order(s, orderInfo, &(primary->patblt)); break; case ORDER_TYPE_SCRBLT: rc = update_read_scrblt_order(s, orderInfo, &(primary->scrblt)); break; case ORDER_TYPE_OPAQUE_RECT: rc = update_read_opaque_rect_order(s, orderInfo, &(primary->opaque_rect)); break; case ORDER_TYPE_DRAW_NINE_GRID: rc = update_read_draw_nine_grid_order(s, orderInfo, &(primary->draw_nine_grid)); break; case ORDER_TYPE_MULTI_DSTBLT: rc = update_read_multi_dstblt_order(s, orderInfo, &(primary->multi_dstblt)); break; case ORDER_TYPE_MULTI_PATBLT: rc = update_read_multi_patblt_order(s, orderInfo, &(primary->multi_patblt)); break; case ORDER_TYPE_MULTI_SCRBLT: rc = update_read_multi_scrblt_order(s, orderInfo, &(primary->multi_scrblt)); break; case ORDER_TYPE_MULTI_OPAQUE_RECT: rc = update_read_multi_opaque_rect_order(s, orderInfo, &(primary->multi_opaque_rect)); break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: rc = update_read_multi_draw_nine_grid_order(s, orderInfo, &(primary->multi_draw_nine_grid)); break; case ORDER_TYPE_LINE_TO: rc = update_read_line_to_order(s, orderInfo, &(primary->line_to)); break; case ORDER_TYPE_POLYLINE: rc = update_read_polyline_order(s, orderInfo, &(primary->polyline)); break; case ORDER_TYPE_MEMBLT: rc = update_read_memblt_order(s, orderInfo, &(primary->memblt)); break; case ORDER_TYPE_MEM3BLT: rc = update_read_mem3blt_order(s, orderInfo, &(primary->mem3blt)); break; case ORDER_TYPE_SAVE_BITMAP: rc = update_read_save_bitmap_order(s, orderInfo, &(primary->save_bitmap)); break; case ORDER_TYPE_GLYPH_INDEX: rc = update_read_glyph_index_order(s, orderInfo, &(primary->glyph_index)); break; case ORDER_TYPE_FAST_INDEX: rc = update_read_fast_index_order(s, orderInfo, &(primary->fast_index)); break; case ORDER_TYPE_FAST_GLYPH: rc = update_read_fast_glyph_order(s, orderInfo, &(primary->fast_glyph)); break; case ORDER_TYPE_POLYGON_SC: rc = update_read_polygon_sc_order(s, orderInfo, &(primary->polygon_sc)); break; case ORDER_TYPE_POLYGON_CB: rc = update_read_polygon_cb_order(s, orderInfo, &(primary->polygon_cb)); break; case ORDER_TYPE_ELLIPSE_SC: rc = update_read_ellipse_sc_order(s, orderInfo, &(primary->ellipse_sc)); break; case ORDER_TYPE_ELLIPSE_CB: rc = update_read_ellipse_cb_order(s, orderInfo, &(primary->ellipse_cb)); break; default: WLog_Print(log, WLOG_WARN, "Primary Drawing Order %s not supported, ignoring", orderName); rc = TRUE; break; } if (!rc) { WLog_Print(log, WLOG_ERROR, "%s - update_read_dstblt_order() failed", orderName); return FALSE; } return TRUE; } static BOOL update_recv_primary_order(rdpUpdate* update, wStream* s, BYTE flags) { BYTE field; BOOL rc = FALSE; rdpContext* context = update->context; rdpPrimaryUpdate* primary = update->primary; ORDER_INFO* orderInfo = &(primary->order_info); rdpSettings* settings = context->settings; const char* orderName; if (flags & ORDER_TYPE_CHANGE) { if (Stream_GetRemainingLength(s) < 1) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, orderInfo->orderType); /* orderType (1 byte) */ } orderName = primary_order_string(orderInfo->orderType); if (!check_primary_order_supported(update->log, settings, orderInfo->orderType, orderName)) return FALSE; field = get_primary_drawing_order_field_bytes(orderInfo->orderType, &rc); if (!rc) return FALSE; if (!update_read_field_flags(s, &(orderInfo->fieldFlags), flags, field)) { WLog_Print(update->log, WLOG_ERROR, "update_read_field_flags() failed"); return FALSE; } if (flags & ORDER_BOUNDS) { if (!(flags & ORDER_ZERO_BOUNDS_DELTAS)) { if (!update_read_bounds(s, &orderInfo->bounds)) { WLog_Print(update->log, WLOG_ERROR, "update_read_bounds() failed"); return FALSE; } } rc = IFCALLRESULT(FALSE, update->SetBounds, context, &orderInfo->bounds); if (!rc) return FALSE; } orderInfo->deltaCoordinates = (flags & ORDER_DELTA_COORDINATES) ? TRUE : FALSE; if (!read_primary_order(update->log, orderName, s, orderInfo, primary)) return FALSE; switch (orderInfo->orderType) { case ORDER_TYPE_DSTBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->dstblt.bRop), gdi_rop3_code(primary->dstblt.bRop)); rc = IFCALLRESULT(FALSE, primary->DstBlt, context, &primary->dstblt); } break; case ORDER_TYPE_PATBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->patblt.bRop), gdi_rop3_code(primary->patblt.bRop)); rc = IFCALLRESULT(FALSE, primary->PatBlt, context, &primary->patblt); } break; case ORDER_TYPE_SCRBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->scrblt.bRop), gdi_rop3_code(primary->scrblt.bRop)); rc = IFCALLRESULT(FALSE, primary->ScrBlt, context, &primary->scrblt); } break; case ORDER_TYPE_OPAQUE_RECT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->OpaqueRect, context, &primary->opaque_rect); } break; case ORDER_TYPE_DRAW_NINE_GRID: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->DrawNineGrid, context, &primary->draw_nine_grid); } break; case ORDER_TYPE_MULTI_DSTBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_dstblt.bRop), gdi_rop3_code(primary->multi_dstblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiDstBlt, context, &primary->multi_dstblt); } break; case ORDER_TYPE_MULTI_PATBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_patblt.bRop), gdi_rop3_code(primary->multi_patblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiPatBlt, context, &primary->multi_patblt); } break; case ORDER_TYPE_MULTI_SCRBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_scrblt.bRop), gdi_rop3_code(primary->multi_scrblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiScrBlt, context, &primary->multi_scrblt); } break; case ORDER_TYPE_MULTI_OPAQUE_RECT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->MultiOpaqueRect, context, &primary->multi_opaque_rect); } break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->MultiDrawNineGrid, context, &primary->multi_draw_nine_grid); } break; case ORDER_TYPE_LINE_TO: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->LineTo, context, &primary->line_to); } break; case ORDER_TYPE_POLYLINE: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->Polyline, context, &primary->polyline); } break; case ORDER_TYPE_MEMBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->memblt.bRop), gdi_rop3_code(primary->memblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MemBlt, context, &primary->memblt); } break; case ORDER_TYPE_MEM3BLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->mem3blt.bRop), gdi_rop3_code(primary->mem3blt.bRop)); rc = IFCALLRESULT(FALSE, primary->Mem3Blt, context, &primary->mem3blt); } break; case ORDER_TYPE_SAVE_BITMAP: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->SaveBitmap, context, &primary->save_bitmap); } break; case ORDER_TYPE_GLYPH_INDEX: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->GlyphIndex, context, &primary->glyph_index); } break; case ORDER_TYPE_FAST_INDEX: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->FastIndex, context, &primary->fast_index); } break; case ORDER_TYPE_FAST_GLYPH: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->FastGlyph, context, &primary->fast_glyph); } break; case ORDER_TYPE_POLYGON_SC: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->PolygonSC, context, &primary->polygon_sc); } break; case ORDER_TYPE_POLYGON_CB: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->PolygonCB, context, &primary->polygon_cb); } break; case ORDER_TYPE_ELLIPSE_SC: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->EllipseSC, context, &primary->ellipse_sc); } break; case ORDER_TYPE_ELLIPSE_CB: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->EllipseCB, context, &primary->ellipse_cb); } break; default: WLog_Print(update->log, WLOG_WARN, "Primary Drawing Order %s not supported", orderName); break; } if (!rc) { WLog_Print(update->log, WLOG_WARN, "Primary Drawing Order %s failed", orderName); return FALSE; } if (flags & ORDER_BOUNDS) { rc = IFCALLRESULT(FALSE, update->SetBounds, context, NULL); } return rc; } static BOOL update_recv_secondary_order(rdpUpdate* update, wStream* s, BYTE flags) { BOOL rc = FALSE; size_t start, end, diff; BYTE orderType; UINT16 extraFlags; UINT16 orderLength; rdpContext* context = update->context; rdpSettings* settings = context->settings; rdpSecondaryUpdate* secondary = update->secondary; const char* name; if (Stream_GetRemainingLength(s) < 5) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 5"); return FALSE; } Stream_Read_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Read_UINT16(s, extraFlags); /* extraFlags (2 bytes) */ Stream_Read_UINT8(s, orderType); /* orderType (1 byte) */ if (Stream_GetRemainingLength(s) < orderLength + 7U) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) %" PRIuz " < %" PRIu16, Stream_GetRemainingLength(s), orderLength + 7); return FALSE; } start = Stream_GetPosition(s); name = secondary_order_string(orderType); WLog_Print(update->log, WLOG_DEBUG, "Secondary Drawing Order %s", name); if (!check_secondary_order_supported(update->log, settings, orderType, name)) return FALSE; switch (orderType) { case ORDER_TYPE_BITMAP_UNCOMPRESSED: case ORDER_TYPE_CACHE_BITMAP_COMPRESSED: { const BOOL compressed = (orderType == ORDER_TYPE_CACHE_BITMAP_COMPRESSED); CACHE_BITMAP_ORDER* order = update_read_cache_bitmap_order(update, s, compressed, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmap, context, order); free_cache_bitmap_order(context, order); } } break; case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2: case ORDER_TYPE_BITMAP_COMPRESSED_V2: { const BOOL compressed = (orderType == ORDER_TYPE_BITMAP_COMPRESSED_V2); CACHE_BITMAP_V2_ORDER* order = update_read_cache_bitmap_v2_order(update, s, compressed, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV2, context, order); free_cache_bitmap_v2_order(context, order); } } break; case ORDER_TYPE_BITMAP_COMPRESSED_V3: { CACHE_BITMAP_V3_ORDER* order = update_read_cache_bitmap_v3_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV3, context, order); free_cache_bitmap_v3_order(context, order); } } break; case ORDER_TYPE_CACHE_COLOR_TABLE: { CACHE_COLOR_TABLE_ORDER* order = update_read_cache_color_table_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheColorTable, context, order); free_cache_color_table_order(context, order); } } break; case ORDER_TYPE_CACHE_GLYPH: { switch (settings->GlyphSupportLevel) { case GLYPH_SUPPORT_PARTIAL: case GLYPH_SUPPORT_FULL: { CACHE_GLYPH_ORDER* order = update_read_cache_glyph_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheGlyph, context, order); free_cache_glyph_order(context, order); } } break; case GLYPH_SUPPORT_ENCODE: { CACHE_GLYPH_V2_ORDER* order = update_read_cache_glyph_v2_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheGlyphV2, context, order); free_cache_glyph_v2_order(context, order); } } break; case GLYPH_SUPPORT_NONE: default: break; } } break; case ORDER_TYPE_CACHE_BRUSH: /* [MS-RDPEGDI] 2.2.2.2.1.2.7 Cache Brush (CACHE_BRUSH_ORDER) */ { CACHE_BRUSH_ORDER* order = update_read_cache_brush_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBrush, context, order); free_cache_brush_order(context, order); } } break; default: WLog_Print(update->log, WLOG_WARN, "SECONDARY ORDER %s not supported", name); break; } if (!rc) { WLog_Print(update->log, WLOG_ERROR, "SECONDARY ORDER %s failed", name); } start += orderLength + 7; end = Stream_GetPosition(s); if (start > end) { WLog_Print(update->log, WLOG_WARN, "SECONDARY_ORDER %s: read %" PRIuz "bytes too much", name, end - start); return FALSE; } diff = start - end; if (diff > 0) { WLog_Print(update->log, WLOG_DEBUG, "SECONDARY_ORDER %s: read %" PRIuz "bytes short, skipping", name, diff); Stream_Seek(s, diff); } return rc; } static BOOL read_altsec_order(wStream* s, BYTE orderType, rdpAltSecUpdate* altsec) { BOOL rc = FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: rc = update_read_create_offscreen_bitmap_order(s, &(altsec->create_offscreen_bitmap)); break; case ORDER_TYPE_SWITCH_SURFACE: rc = update_read_switch_surface_order(s, &(altsec->switch_surface)); break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: rc = update_read_create_nine_grid_bitmap_order(s, &(altsec->create_nine_grid_bitmap)); break; case ORDER_TYPE_FRAME_MARKER: rc = update_read_frame_marker_order(s, &(altsec->frame_marker)); break; case ORDER_TYPE_STREAM_BITMAP_FIRST: rc = update_read_stream_bitmap_first_order(s, &(altsec->stream_bitmap_first)); break; case ORDER_TYPE_STREAM_BITMAP_NEXT: rc = update_read_stream_bitmap_next_order(s, &(altsec->stream_bitmap_next)); break; case ORDER_TYPE_GDIPLUS_FIRST: rc = update_read_draw_gdiplus_first_order(s, &(altsec->draw_gdiplus_first)); break; case ORDER_TYPE_GDIPLUS_NEXT: rc = update_read_draw_gdiplus_next_order(s, &(altsec->draw_gdiplus_next)); break; case ORDER_TYPE_GDIPLUS_END: rc = update_read_draw_gdiplus_end_order(s, &(altsec->draw_gdiplus_end)); break; case ORDER_TYPE_GDIPLUS_CACHE_FIRST: rc = update_read_draw_gdiplus_cache_first_order(s, &(altsec->draw_gdiplus_cache_first)); break; case ORDER_TYPE_GDIPLUS_CACHE_NEXT: rc = update_read_draw_gdiplus_cache_next_order(s, &(altsec->draw_gdiplus_cache_next)); break; case ORDER_TYPE_GDIPLUS_CACHE_END: rc = update_read_draw_gdiplus_cache_end_order(s, &(altsec->draw_gdiplus_cache_end)); break; case ORDER_TYPE_WINDOW: /* This order is handled elsewhere. */ rc = TRUE; break; case ORDER_TYPE_COMPDESK_FIRST: rc = TRUE; break; default: break; } return rc; } static BOOL update_recv_altsec_order(rdpUpdate* update, wStream* s, BYTE flags) { BYTE orderType = flags >>= 2; /* orderType is in higher 6 bits of flags field */ BOOL rc = FALSE; rdpContext* context = update->context; rdpSettings* settings = context->settings; rdpAltSecUpdate* altsec = update->altsec; const char* orderName = altsec_order_string(orderType); WLog_Print(update->log, WLOG_DEBUG, "Alternate Secondary Drawing Order %s", orderName); if (!check_alt_order_supported(update->log, settings, orderType, orderName)) return FALSE; if (!read_altsec_order(s, orderType, altsec)) return FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: IFCALLRET(altsec->CreateOffscreenBitmap, rc, context, &(altsec->create_offscreen_bitmap)); break; case ORDER_TYPE_SWITCH_SURFACE: IFCALLRET(altsec->SwitchSurface, rc, context, &(altsec->switch_surface)); break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: IFCALLRET(altsec->CreateNineGridBitmap, rc, context, &(altsec->create_nine_grid_bitmap)); break; case ORDER_TYPE_FRAME_MARKER: IFCALLRET(altsec->FrameMarker, rc, context, &(altsec->frame_marker)); break; case ORDER_TYPE_STREAM_BITMAP_FIRST: IFCALLRET(altsec->StreamBitmapFirst, rc, context, &(altsec->stream_bitmap_first)); break; case ORDER_TYPE_STREAM_BITMAP_NEXT: IFCALLRET(altsec->StreamBitmapNext, rc, context, &(altsec->stream_bitmap_next)); break; case ORDER_TYPE_GDIPLUS_FIRST: IFCALLRET(altsec->DrawGdiPlusFirst, rc, context, &(altsec->draw_gdiplus_first)); break; case ORDER_TYPE_GDIPLUS_NEXT: IFCALLRET(altsec->DrawGdiPlusNext, rc, context, &(altsec->draw_gdiplus_next)); break; case ORDER_TYPE_GDIPLUS_END: IFCALLRET(altsec->DrawGdiPlusEnd, rc, context, &(altsec->draw_gdiplus_end)); break; case ORDER_TYPE_GDIPLUS_CACHE_FIRST: IFCALLRET(altsec->DrawGdiPlusCacheFirst, rc, context, &(altsec->draw_gdiplus_cache_first)); break; case ORDER_TYPE_GDIPLUS_CACHE_NEXT: IFCALLRET(altsec->DrawGdiPlusCacheNext, rc, context, &(altsec->draw_gdiplus_cache_next)); break; case ORDER_TYPE_GDIPLUS_CACHE_END: IFCALLRET(altsec->DrawGdiPlusCacheEnd, rc, context, &(altsec->draw_gdiplus_cache_end)); break; case ORDER_TYPE_WINDOW: rc = update_recv_altsec_window_order(update, s); break; case ORDER_TYPE_COMPDESK_FIRST: rc = TRUE; break; default: break; } if (!rc) { WLog_Print(update->log, WLOG_WARN, "Alternate Secondary Drawing Order %s failed", orderName); } return rc; } BOOL update_recv_order(rdpUpdate* update, wStream* s) { BOOL rc; BYTE controlFlags; if (Stream_GetRemainingLength(s) < 1) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, controlFlags); /* controlFlags (1 byte) */ if (!(controlFlags & ORDER_STANDARD)) rc = update_recv_altsec_order(update, s, controlFlags); else if (controlFlags & ORDER_SECONDARY) rc = update_recv_secondary_order(update, s, controlFlags); else rc = update_recv_primary_order(update, s, controlFlags); if (!rc) WLog_Print(update->log, WLOG_ERROR, "order flags %02" PRIx8 " failed", controlFlags); return rc; }
static CACHE_BITMAP_V3_ORDER* update_read_cache_bitmap_v3_order(rdpUpdate* update, wStream* s, UINT16 flags) { BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; UINT32 new_len; BYTE* new_data; CACHE_BITMAP_V3_ORDER* cache_bitmap_v3; if (!update || !s) return NULL; cache_bitmap_v3 = calloc(1, sizeof(CACHE_BITMAP_V3_ORDER)); if (!cache_bitmap_v3) goto fail; cache_bitmap_v3->cacheId = flags & 0x00000003; cache_bitmap_v3->flags = (flags & 0x0000FF80) >> 7; bitsPerPixelId = (flags & 0x00000078) >> 3; cache_bitmap_v3->bpp = CBR23_BPP[bitsPerPixelId]; if (Stream_GetRemainingLength(s) < 21) goto fail; Stream_Read_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ bitmapData = &cache_bitmap_v3->bitmapData; Stream_Read_UINT8(s, bitmapData->bpp); if ((bitmapData->bpp < 1) || (bitmapData->bpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bpp value %" PRIu32 "", bitmapData->bpp); goto fail; } Stream_Seek_UINT8(s); /* reserved1 (1 byte) */ Stream_Seek_UINT8(s); /* reserved2 (1 byte) */ Stream_Read_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Read_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Read_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Read_UINT32(s, new_len); /* length (4 bytes) */ if ((new_len == 0) || (Stream_GetRemainingLength(s) < new_len)) goto fail; new_data = (BYTE*)realloc(bitmapData->data, new_len); if (!new_data) goto fail; bitmapData->data = new_data; bitmapData->length = new_len; Stream_Read(s, bitmapData->data, bitmapData->length); return cache_bitmap_v3; fail: free_cache_bitmap_v3_order(update->context, cache_bitmap_v3); return NULL; }
static CACHE_BITMAP_V3_ORDER* update_read_cache_bitmap_v3_order(rdpUpdate* update, wStream* s, UINT16 flags) { BOOL rc; BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; UINT32 new_len; BYTE* new_data; CACHE_BITMAP_V3_ORDER* cache_bitmap_v3; if (!update || !s) return NULL; cache_bitmap_v3 = calloc(1, sizeof(CACHE_BITMAP_V3_ORDER)); if (!cache_bitmap_v3) goto fail; cache_bitmap_v3->cacheId = flags & 0x00000003; cache_bitmap_v3->flags = (flags & 0x0000FF80) >> 7; bitsPerPixelId = (flags & 0x00000078) >> 3; cache_bitmap_v3->bpp = get_cbr2_bpp(bitsPerPixelId, &rc); if (!rc) goto fail; if (Stream_GetRemainingLength(s) < 21) goto fail; Stream_Read_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ bitmapData = &cache_bitmap_v3->bitmapData; Stream_Read_UINT8(s, bitmapData->bpp); if ((bitmapData->bpp < 1) || (bitmapData->bpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bpp value %" PRIu32 "", bitmapData->bpp); goto fail; } Stream_Seek_UINT8(s); /* reserved1 (1 byte) */ Stream_Seek_UINT8(s); /* reserved2 (1 byte) */ Stream_Read_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Read_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Read_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Read_UINT32(s, new_len); /* length (4 bytes) */ if ((new_len == 0) || (Stream_GetRemainingLength(s) < new_len)) goto fail; new_data = (BYTE*)realloc(bitmapData->data, new_len); if (!new_data) goto fail; bitmapData->data = new_data; bitmapData->length = new_len; Stream_Read(s, bitmapData->data, bitmapData->length); return cache_bitmap_v3; fail: free_cache_bitmap_v3_order(update->context, cache_bitmap_v3); return NULL; }
{'added': [(116, 'static BYTE get_cbr2_bpp(UINT32 bpp, BOOL* pValid)'), (117, '{'), (118, '\tif (pValid)'), (119, '\t\t*pValid = TRUE;'), (120, '\tswitch (bpp)'), (121, '\t{'), (122, '\t\tcase 3:'), (123, '\t\t\treturn 8;'), (124, '\t\tcase 4:'), (125, '\t\t\treturn 16;'), (126, '\t\tcase 5:'), (127, '\t\t\treturn 24;'), (128, '\t\tcase 6:'), (129, '\t\t\treturn 32;'), (130, '\t\tdefault:'), (131, '\t\t\tWLog_WARN(TAG, "Invalid bpp %" PRIu32, bpp);'), (132, '\t\t\tif (pValid)'), (133, '\t\t\t\t*pValid = FALSE;'), (134, '\t\t\treturn 0;'), (135, '\t}'), (136, '}'), (138, 'static BYTE get_bmf_bpp(UINT32 bmf, BOOL* pValid)'), (139, '{'), (140, '\tif (pValid)'), (141, '\t\t*pValid = TRUE;'), (142, '\tswitch (bmf)'), (143, '\t{'), (144, '\t\tcase 1:'), (145, '\t\t\treturn 1;'), (146, '\t\tcase 3:'), (147, '\t\t\treturn 8;'), (148, '\t\tcase 4:'), (149, '\t\t\treturn 16;'), (150, '\t\tcase 5:'), (151, '\t\t\treturn 24;'), (152, '\t\tcase 6:'), (153, '\t\t\treturn 32;'), (154, '\t\tdefault:'), (155, '\t\t\tWLog_WARN(TAG, "Invalid bmf %" PRIu32, bmf);'), (156, '\t\t\tif (pValid)'), (157, '\t\t\t\t*pValid = FALSE;'), (158, '\t\t\treturn 0;'), (159, '\t}'), (160, '}'), (161, 'static BYTE get_bpp_bmf(UINT32 bpp, BOOL* pValid)'), (162, '{'), (163, '\tif (pValid)'), (164, '\t\t*pValid = TRUE;'), (165, '\tswitch (bpp)'), (166, '\t{'), (167, '\t\tcase 1:'), (168, '\t\t\treturn 1;'), (169, '\t\tcase 8:'), (170, '\t\t\treturn 3;'), (171, '\t\tcase 16:'), (172, '\t\t\treturn 4;'), (173, '\t\tcase 24:'), (174, '\t\t\treturn 5;'), (175, '\t\tcase 32:'), (176, '\t\t\treturn 6;'), (177, '\t\tdefault:'), (178, '\t\t\tWLog_WARN(TAG, "Invalid color depth %" PRIu32, bpp);'), (179, '\t\t\tif (pValid)'), (180, '\t\t\t\t*pValid = FALSE;'), (181, '\t\t\treturn 0;'), (182, '\t}'), (183, '}'), (871, '\t\tBOOL rc;'), (873, '\t\tbrush->bpp = get_bmf_bpp(brush->style, &rc);'), (874, '\t\tif (!rc)'), (875, '\t\t\treturn FALSE;'), (917, '\t\tBOOL rc;'), (919, '\t\tbrush->bpp = get_bmf_bpp(brush->style, &rc);'), (920, '\t\tif (!rc)'), (921, '\t\t\treturn FALSE;'), (2077, '\tBOOL rc;'), (2092, '\tcache_bitmap_v2->bitmapBpp = get_cbr2_bpp(bitsPerPixelId, &rc);'), (2093, '\tif (!rc)'), (2094, '\t\tgoto fail;'), (2173, '\tBOOL rc;'), (2180, '\tbitsPerPixelId = get_bpp_bmf(cache_bitmap_v2->bitmapBpp, &rc);'), (2181, '\tif (!rc)'), (2182, '\t\treturn FALSE;'), (2244, '\tBOOL rc;'), (2262, '\tcache_bitmap_v3->bpp = get_cbr2_bpp(bitsPerPixelId, &rc);'), (2263, '\tif (!rc)'), (2264, '\t\tgoto fail;'), (2312, '\tBOOL rc;'), (2321, '\tbitsPerPixelId = get_bpp_bmf(cache_bitmap_v3->bpp, &rc);'), (2322, '\tif (!rc)'), (2323, '\t\treturn FALSE;'), (2647, '\tBOOL rc;'), (2661, '\tcache_brush->bpp = get_bmf_bpp(iBitmapFormat, &rc);'), (2662, '\tif (!rc)'), (2735, '\tBOOL rc;'), (2742, '\tiBitmapFormat = get_bpp_bmf(cache_brush->bpp, &rc);'), (2743, '\tif (!rc)'), (2744, '\t\treturn FALSE;')], 'deleted': [(116, 'static const BYTE CBR2_BPP[] = { 0, 0, 0, 8, 16, 24, 32 };'), (117, ''), (118, 'static const BYTE BPP_CBR2[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0,'), (119, '\t 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 };'), (120, ''), (121, 'static const BYTE CBR23_BPP[] = { 0, 0, 0, 8, 16, 24, 32 };'), (122, ''), (123, 'static const BYTE BPP_CBR23[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0,'), (124, '\t 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 };'), (125, ''), (126, 'static const BYTE BMF_BPP[] = { 0, 1, 0, 8, 16, 24, 32, 0 };'), (128, 'static const BYTE BPP_BMF[] = { 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0,'), (129, '\t 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 };'), (818, '\t\tbrush->bpp = BMF_BPP[brush->style & 0x07];'), (819, ''), (862, '\t\tbrush->bpp = BMF_BPP[brush->style & 0x07];'), (863, ''), (2033, '\tcache_bitmap_v2->bitmapBpp = CBR2_BPP[bitsPerPixelId];'), (2118, '\tbitsPerPixelId = BPP_CBR2[cache_bitmap_v2->bitmapBpp];'), (2197, '\tcache_bitmap_v3->bpp = CBR23_BPP[bitsPerPixelId];'), (2253, '\tbitsPerPixelId = BPP_CBR23[cache_bitmap_v3->bpp];'), (2590, '\tif (iBitmapFormat >= ARRAYSIZE(BMF_BPP))'), (2593, '\tcache_brush->bpp = BMF_BPP[iBitmapFormat];'), (2670, '\tiBitmapFormat = BPP_BMF[cache_brush->bpp];')]}
98
24
3,271
19,873
https://github.com/FreeRDP/FreeRDP
CVE-2020-11096
['CWE-125']
orders.c
update_read_cache_brush_order
/** * FreeRDP: A Remote Desktop Protocol Implementation * Drawing Orders * * Copyright 2011 Marc-Andre Moreau <marcandre.moreau@gmail.com> * Copyright 2016 Armin Novak <armin.novak@thincast.com> * Copyright 2016 Thincast Technologies GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "window.h" #include <winpr/wtypes.h> #include <winpr/crt.h> #include <freerdp/api.h> #include <freerdp/log.h> #include <freerdp/graphics.h> #include <freerdp/codec/bitmap.h> #include <freerdp/gdi/gdi.h> #include "orders.h" #include "../cache/glyph.h" #include "../cache/bitmap.h" #include "../cache/brush.h" #include "../cache/cache.h" #define TAG FREERDP_TAG("core.orders") BYTE get_primary_drawing_order_field_bytes(UINT32 orderType, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (orderType) { case 0: return DSTBLT_ORDER_FIELD_BYTES; case 1: return PATBLT_ORDER_FIELD_BYTES; case 2: return SCRBLT_ORDER_FIELD_BYTES; case 3: return 0; case 4: return 0; case 5: return 0; case 6: return 0; case 7: return DRAW_NINE_GRID_ORDER_FIELD_BYTES; case 8: return MULTI_DRAW_NINE_GRID_ORDER_FIELD_BYTES; case 9: return LINE_TO_ORDER_FIELD_BYTES; case 10: return OPAQUE_RECT_ORDER_FIELD_BYTES; case 11: return SAVE_BITMAP_ORDER_FIELD_BYTES; case 12: return 0; case 13: return MEMBLT_ORDER_FIELD_BYTES; case 14: return MEM3BLT_ORDER_FIELD_BYTES; case 15: return MULTI_DSTBLT_ORDER_FIELD_BYTES; case 16: return MULTI_PATBLT_ORDER_FIELD_BYTES; case 17: return MULTI_SCRBLT_ORDER_FIELD_BYTES; case 18: return MULTI_OPAQUE_RECT_ORDER_FIELD_BYTES; case 19: return FAST_INDEX_ORDER_FIELD_BYTES; case 20: return POLYGON_SC_ORDER_FIELD_BYTES; case 21: return POLYGON_CB_ORDER_FIELD_BYTES; case 22: return POLYLINE_ORDER_FIELD_BYTES; case 23: return 0; case 24: return FAST_GLYPH_ORDER_FIELD_BYTES; case 25: return ELLIPSE_SC_ORDER_FIELD_BYTES; case 26: return ELLIPSE_CB_ORDER_FIELD_BYTES; case 27: return GLYPH_INDEX_ORDER_FIELD_BYTES; default: if (pValid) *pValid = FALSE; WLog_WARN(TAG, "Invalid orderType 0x%08X received", orderType); return 0; } } static const BYTE CBR2_BPP[] = { 0, 0, 0, 8, 16, 24, 32 }; static const BYTE BPP_CBR2[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 }; static const BYTE CBR23_BPP[] = { 0, 0, 0, 8, 16, 24, 32 }; static const BYTE BPP_CBR23[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 }; static const BYTE BMF_BPP[] = { 0, 1, 0, 8, 16, 24, 32, 0 }; static const BYTE BPP_BMF[] = { 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 }; static BOOL check_order_activated(wLog* log, rdpSettings* settings, const char* orderName, BOOL condition) { if (!condition) { if (settings->AllowUnanouncedOrdersFromServer) { WLog_Print(log, WLOG_WARN, "%s - SERVER BUG: The support for this feature was not announced!", orderName); return TRUE; } else { WLog_Print(log, WLOG_ERROR, "%s - SERVER BUG: The support for this feature was not announced! Use " "/relax-order-checks to ignore", orderName); return FALSE; } } return TRUE; } static BOOL check_alt_order_supported(wLog* log, rdpSettings* settings, BYTE orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: case ORDER_TYPE_SWITCH_SURFACE: condition = settings->OffscreenSupportLevel != 0; break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: condition = settings->DrawNineGridEnabled; break; case ORDER_TYPE_FRAME_MARKER: condition = settings->FrameMarkerCommandEnabled; break; case ORDER_TYPE_GDIPLUS_FIRST: case ORDER_TYPE_GDIPLUS_NEXT: case ORDER_TYPE_GDIPLUS_END: case ORDER_TYPE_GDIPLUS_CACHE_FIRST: case ORDER_TYPE_GDIPLUS_CACHE_NEXT: case ORDER_TYPE_GDIPLUS_CACHE_END: condition = settings->DrawGdiPlusCacheEnabled; break; case ORDER_TYPE_WINDOW: condition = settings->RemoteWndSupportLevel != WINDOW_LEVEL_NOT_SUPPORTED; break; case ORDER_TYPE_STREAM_BITMAP_FIRST: case ORDER_TYPE_STREAM_BITMAP_NEXT: case ORDER_TYPE_COMPDESK_FIRST: condition = TRUE; break; default: WLog_Print(log, WLOG_WARN, "%s - Alternate Secondary Drawing Order UNKNOWN", orderName); condition = FALSE; break; } return check_order_activated(log, settings, orderName, condition); } static BOOL check_secondary_order_supported(wLog* log, rdpSettings* settings, BYTE orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_BITMAP_UNCOMPRESSED: case ORDER_TYPE_CACHE_BITMAP_COMPRESSED: condition = settings->BitmapCacheEnabled; break; case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2: case ORDER_TYPE_BITMAP_COMPRESSED_V2: condition = settings->BitmapCacheEnabled; break; case ORDER_TYPE_BITMAP_COMPRESSED_V3: condition = settings->BitmapCacheV3Enabled; break; case ORDER_TYPE_CACHE_COLOR_TABLE: condition = (settings->OrderSupport[NEG_MEMBLT_INDEX] || settings->OrderSupport[NEG_MEM3BLT_INDEX]); break; case ORDER_TYPE_CACHE_GLYPH: { switch (settings->GlyphSupportLevel) { case GLYPH_SUPPORT_PARTIAL: case GLYPH_SUPPORT_FULL: case GLYPH_SUPPORT_ENCODE: condition = TRUE; break; case GLYPH_SUPPORT_NONE: default: condition = FALSE; break; } } break; case ORDER_TYPE_CACHE_BRUSH: condition = TRUE; break; default: WLog_Print(log, WLOG_WARN, "SECONDARY ORDER %s not supported", orderName); break; } return check_order_activated(log, settings, orderName, condition); } static BOOL check_primary_order_supported(wLog* log, rdpSettings* settings, UINT32 orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_DSTBLT: condition = settings->OrderSupport[NEG_DSTBLT_INDEX]; break; case ORDER_TYPE_SCRBLT: condition = settings->OrderSupport[NEG_SCRBLT_INDEX]; break; case ORDER_TYPE_DRAW_NINE_GRID: condition = settings->OrderSupport[NEG_DRAWNINEGRID_INDEX]; break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: condition = settings->OrderSupport[NEG_MULTI_DRAWNINEGRID_INDEX]; break; case ORDER_TYPE_LINE_TO: condition = settings->OrderSupport[NEG_LINETO_INDEX]; break; /* [MS-RDPEGDI] 2.2.2.2.1.1.2.5 OpaqueRect (OPAQUERECT_ORDER) * suggests that PatBlt and OpaqueRect imply each other. */ case ORDER_TYPE_PATBLT: case ORDER_TYPE_OPAQUE_RECT: condition = settings->OrderSupport[NEG_OPAQUE_RECT_INDEX] || settings->OrderSupport[NEG_PATBLT_INDEX]; break; case ORDER_TYPE_SAVE_BITMAP: condition = settings->OrderSupport[NEG_SAVEBITMAP_INDEX]; break; case ORDER_TYPE_MEMBLT: condition = settings->OrderSupport[NEG_MEMBLT_INDEX]; break; case ORDER_TYPE_MEM3BLT: condition = settings->OrderSupport[NEG_MEM3BLT_INDEX]; break; case ORDER_TYPE_MULTI_DSTBLT: condition = settings->OrderSupport[NEG_MULTIDSTBLT_INDEX]; break; case ORDER_TYPE_MULTI_PATBLT: condition = settings->OrderSupport[NEG_MULTIPATBLT_INDEX]; break; case ORDER_TYPE_MULTI_SCRBLT: condition = settings->OrderSupport[NEG_MULTIDSTBLT_INDEX]; break; case ORDER_TYPE_MULTI_OPAQUE_RECT: condition = settings->OrderSupport[NEG_MULTIOPAQUERECT_INDEX]; break; case ORDER_TYPE_FAST_INDEX: condition = settings->OrderSupport[NEG_FAST_INDEX_INDEX]; break; case ORDER_TYPE_POLYGON_SC: condition = settings->OrderSupport[NEG_POLYGON_SC_INDEX]; break; case ORDER_TYPE_POLYGON_CB: condition = settings->OrderSupport[NEG_POLYGON_CB_INDEX]; break; case ORDER_TYPE_POLYLINE: condition = settings->OrderSupport[NEG_POLYLINE_INDEX]; break; case ORDER_TYPE_FAST_GLYPH: condition = settings->OrderSupport[NEG_FAST_GLYPH_INDEX]; break; case ORDER_TYPE_ELLIPSE_SC: condition = settings->OrderSupport[NEG_ELLIPSE_SC_INDEX]; break; case ORDER_TYPE_ELLIPSE_CB: condition = settings->OrderSupport[NEG_ELLIPSE_CB_INDEX]; break; case ORDER_TYPE_GLYPH_INDEX: condition = settings->OrderSupport[NEG_GLYPH_INDEX_INDEX]; break; default: WLog_Print(log, WLOG_WARN, "%s Primary Drawing Order not supported", orderName); break; } return check_order_activated(log, settings, orderName, condition); } static const char* primary_order_string(UINT32 orderType) { const char* orders[] = { "[0x%02" PRIx8 "] DstBlt", "[0x%02" PRIx8 "] PatBlt", "[0x%02" PRIx8 "] ScrBlt", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] DrawNineGrid", "[0x%02" PRIx8 "] MultiDrawNineGrid", "[0x%02" PRIx8 "] LineTo", "[0x%02" PRIx8 "] OpaqueRect", "[0x%02" PRIx8 "] SaveBitmap", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] MemBlt", "[0x%02" PRIx8 "] Mem3Blt", "[0x%02" PRIx8 "] MultiDstBlt", "[0x%02" PRIx8 "] MultiPatBlt", "[0x%02" PRIx8 "] MultiScrBlt", "[0x%02" PRIx8 "] MultiOpaqueRect", "[0x%02" PRIx8 "] FastIndex", "[0x%02" PRIx8 "] PolygonSC", "[0x%02" PRIx8 "] PolygonCB", "[0x%02" PRIx8 "] Polyline", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] FastGlyph", "[0x%02" PRIx8 "] EllipseSC", "[0x%02" PRIx8 "] EllipseCB", "[0x%02" PRIx8 "] GlyphIndex" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static const char* secondary_order_string(UINT32 orderType) { const char* orders[] = { "[0x%02" PRIx8 "] Cache Bitmap", "[0x%02" PRIx8 "] Cache Color Table", "[0x%02" PRIx8 "] Cache Bitmap (Compressed)", "[0x%02" PRIx8 "] Cache Glyph", "[0x%02" PRIx8 "] Cache Bitmap V2", "[0x%02" PRIx8 "] Cache Bitmap V2 (Compressed)", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] Cache Brush", "[0x%02" PRIx8 "] Cache Bitmap V3" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static const char* altsec_order_string(BYTE orderType) { const char* orders[] = { "[0x%02" PRIx8 "] Switch Surface", "[0x%02" PRIx8 "] Create Offscreen Bitmap", "[0x%02" PRIx8 "] Stream Bitmap First", "[0x%02" PRIx8 "] Stream Bitmap Next", "[0x%02" PRIx8 "] Create NineGrid Bitmap", "[0x%02" PRIx8 "] Draw GDI+ First", "[0x%02" PRIx8 "] Draw GDI+ Next", "[0x%02" PRIx8 "] Draw GDI+ End", "[0x%02" PRIx8 "] Draw GDI+ Cache First", "[0x%02" PRIx8 "] Draw GDI+ Cache Next", "[0x%02" PRIx8 "] Draw GDI+ Cache End", "[0x%02" PRIx8 "] Windowing", "[0x%02" PRIx8 "] Desktop Composition", "[0x%02" PRIx8 "] Frame Marker" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static INLINE BOOL update_read_coord(wStream* s, INT32* coord, BOOL delta) { INT8 lsi8; INT16 lsi16; if (delta) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_INT8(s, lsi8); *coord += lsi8; } else { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_INT16(s, lsi16); *coord = lsi16; } return TRUE; } static INLINE BOOL update_write_coord(wStream* s, INT32 coord) { Stream_Write_UINT16(s, coord); return TRUE; } static INLINE BOOL update_read_color(wStream* s, UINT32* color) { BYTE byte; if (Stream_GetRemainingLength(s) < 3) return FALSE; *color = 0; Stream_Read_UINT8(s, byte); *color = (UINT32)byte; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 8) & 0xFF00; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 16) & 0xFF0000; return TRUE; } static INLINE BOOL update_write_color(wStream* s, UINT32 color) { BYTE byte; byte = (color & 0xFF); Stream_Write_UINT8(s, byte); byte = ((color >> 8) & 0xFF); Stream_Write_UINT8(s, byte); byte = ((color >> 16) & 0xFF); Stream_Write_UINT8(s, byte); return TRUE; } static INLINE BOOL update_read_colorref(wStream* s, UINT32* color) { BYTE byte; if (Stream_GetRemainingLength(s) < 4) return FALSE; *color = 0; Stream_Read_UINT8(s, byte); *color = byte; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 8); Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 16); Stream_Seek_UINT8(s); return TRUE; } static INLINE BOOL update_read_color_quad(wStream* s, UINT32* color) { return update_read_colorref(s, color); } static INLINE void update_write_color_quad(wStream* s, UINT32 color) { BYTE byte; byte = (color >> 16) & 0xFF; Stream_Write_UINT8(s, byte); byte = (color >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = color & 0xFF; Stream_Write_UINT8(s, byte); } static INLINE BOOL update_read_2byte_unsigned(wStream* s, UINT32* value) { BYTE byte; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) return FALSE; *value = (byte & 0x7F) << 8; Stream_Read_UINT8(s, byte); *value |= byte; } else { *value = (byte & 0x7F); } return TRUE; } static INLINE BOOL update_write_2byte_unsigned(wStream* s, UINT32 value) { BYTE byte; if (value > 0x7FFF) return FALSE; if (value >= 0x7F) { byte = ((value & 0x7F00) >> 8); Stream_Write_UINT8(s, byte | 0x80); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else { byte = (value & 0x7F); Stream_Write_UINT8(s, byte); } return TRUE; } static INLINE BOOL update_read_2byte_signed(wStream* s, INT32* value) { BYTE byte; BOOL negative; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); negative = (byte & 0x40) ? TRUE : FALSE; *value = (byte & 0x3F); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); *value = (*value << 8) | byte; } if (negative) *value *= -1; return TRUE; } static INLINE BOOL update_write_2byte_signed(wStream* s, INT32 value) { BYTE byte; BOOL negative = FALSE; if (value < 0) { negative = TRUE; value *= -1; } if (value > 0x3FFF) return FALSE; if (value >= 0x3F) { byte = ((value & 0x3F00) >> 8); if (negative) byte |= 0x40; Stream_Write_UINT8(s, byte | 0x80); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else { byte = (value & 0x3F); if (negative) byte |= 0x40; Stream_Write_UINT8(s, byte); } return TRUE; } static INLINE BOOL update_read_4byte_unsigned(wStream* s, UINT32* value) { BYTE byte; BYTE count; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); count = (byte & 0xC0) >> 6; if (Stream_GetRemainingLength(s) < count) return FALSE; switch (count) { case 0: *value = (byte & 0x3F); break; case 1: *value = (byte & 0x3F) << 8; Stream_Read_UINT8(s, byte); *value |= byte; break; case 2: *value = (byte & 0x3F) << 16; Stream_Read_UINT8(s, byte); *value |= (byte << 8); Stream_Read_UINT8(s, byte); *value |= byte; break; case 3: *value = (byte & 0x3F) << 24; Stream_Read_UINT8(s, byte); *value |= (byte << 16); Stream_Read_UINT8(s, byte); *value |= (byte << 8); Stream_Read_UINT8(s, byte); *value |= byte; break; default: break; } return TRUE; } static INLINE BOOL update_write_4byte_unsigned(wStream* s, UINT32 value) { BYTE byte; if (value <= 0x3F) { Stream_Write_UINT8(s, value); } else if (value <= 0x3FFF) { byte = (value >> 8) & 0x3F; Stream_Write_UINT8(s, byte | 0x40); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else if (value <= 0x3FFFFF) { byte = (value >> 16) & 0x3F; Stream_Write_UINT8(s, byte | 0x80); byte = (value >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else if (value <= 0x3FFFFFFF) { byte = (value >> 24) & 0x3F; Stream_Write_UINT8(s, byte | 0xC0); byte = (value >> 16) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else return FALSE; return TRUE; } static INLINE BOOL update_read_delta(wStream* s, INT32* value) { BYTE byte; if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, byte); if (byte & 0x40) *value = (byte | ~0x3F); else *value = (byte & 0x3F); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, byte); *value = (*value << 8) | byte; } return TRUE; } #if 0 static INLINE void update_read_glyph_delta(wStream* s, UINT16* value) { BYTE byte; Stream_Read_UINT8(s, byte); if (byte == 0x80) Stream_Read_UINT16(s, *value); else *value = (byte & 0x3F); } static INLINE void update_seek_glyph_delta(wStream* s) { BYTE byte; Stream_Read_UINT8(s, byte); if (byte & 0x80) Stream_Seek_UINT8(s); } #endif static INLINE BOOL update_read_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->style); } if (fieldFlags & ORDER_FIELD_04) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->hatch); } if (brush->style & CACHED_BRUSH) { brush->index = brush->hatch; brush->bpp = BMF_BPP[brush->style & 0x07]; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 7) return FALSE; brush->data = (BYTE*)brush->p8x8; Stream_Read_UINT8(s, brush->data[7]); Stream_Read_UINT8(s, brush->data[6]); Stream_Read_UINT8(s, brush->data[5]); Stream_Read_UINT8(s, brush->data[4]); Stream_Read_UINT8(s, brush->data[3]); Stream_Read_UINT8(s, brush->data[2]); Stream_Read_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; } static INLINE BOOL update_write_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { Stream_Write_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { Stream_Write_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { Stream_Write_UINT8(s, brush->style); } if (brush->style & CACHED_BRUSH) { brush->hatch = brush->index; brush->bpp = BMF_BPP[brush->style & 0x07]; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_04) { Stream_Write_UINT8(s, brush->hatch); } if (fieldFlags & ORDER_FIELD_05) { brush->data = (BYTE*)brush->p8x8; Stream_Write_UINT8(s, brush->data[7]); Stream_Write_UINT8(s, brush->data[6]); Stream_Write_UINT8(s, brush->data[5]); Stream_Write_UINT8(s, brush->data[4]); Stream_Write_UINT8(s, brush->data[3]); Stream_Write_UINT8(s, brush->data[2]); Stream_Write_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; } static INLINE BOOL update_read_delta_rects(wStream* s, DELTA_RECT* rectangles, UINT32* nr) { UINT32 number = *nr; UINT32 i; BYTE flags = 0; BYTE* zeroBits; UINT32 zeroBitsSize; if (number > 45) { WLog_WARN(TAG, "Invalid number of delta rectangles %" PRIu32, number); return FALSE; } zeroBitsSize = ((number + 1) / 2); if (Stream_GetRemainingLength(s) < zeroBitsSize) return FALSE; Stream_GetPointer(s, zeroBits); Stream_Seek(s, zeroBitsSize); ZeroMemory(rectangles, sizeof(DELTA_RECT) * number); for (i = 0; i < number; i++) { if (i % 2 == 0) flags = zeroBits[i / 2]; if ((~flags & 0x80) && !update_read_delta(s, &rectangles[i].left)) return FALSE; if ((~flags & 0x40) && !update_read_delta(s, &rectangles[i].top)) return FALSE; if (~flags & 0x20) { if (!update_read_delta(s, &rectangles[i].width)) return FALSE; } else if (i > 0) rectangles[i].width = rectangles[i - 1].width; else rectangles[i].width = 0; if (~flags & 0x10) { if (!update_read_delta(s, &rectangles[i].height)) return FALSE; } else if (i > 0) rectangles[i].height = rectangles[i - 1].height; else rectangles[i].height = 0; if (i > 0) { rectangles[i].left += rectangles[i - 1].left; rectangles[i].top += rectangles[i - 1].top; } flags <<= 4; } return TRUE; } static INLINE BOOL update_read_delta_points(wStream* s, DELTA_POINT* points, int number, INT16 x, INT16 y) { int i; BYTE flags = 0; BYTE* zeroBits; UINT32 zeroBitsSize; zeroBitsSize = ((number + 3) / 4); if (Stream_GetRemainingLength(s) < zeroBitsSize) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < %" PRIu32 "", zeroBitsSize); return FALSE; } Stream_GetPointer(s, zeroBits); Stream_Seek(s, zeroBitsSize); ZeroMemory(points, sizeof(DELTA_POINT) * number); for (i = 0; i < number; i++) { if (i % 4 == 0) flags = zeroBits[i / 4]; if ((~flags & 0x80) && !update_read_delta(s, &points[i].x)) { WLog_ERR(TAG, "update_read_delta(x) failed"); return FALSE; } if ((~flags & 0x40) && !update_read_delta(s, &points[i].y)) { WLog_ERR(TAG, "update_read_delta(y) failed"); return FALSE; } flags <<= 2; } return TRUE; } #define ORDER_FIELD_BYTE(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 1) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT8(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_2BYTE(NO, TARGET1, TARGET2) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 2) \ { \ WLog_ERR(TAG, "error reading %s or %s", #TARGET1, #TARGET2); \ return FALSE; \ } \ Stream_Read_UINT8(s, TARGET1); \ Stream_Read_UINT8(s, TARGET2); \ } \ } while (0) #define ORDER_FIELD_UINT16(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 2) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT16(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_UINT32(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 4) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT32(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_COORD(NO, TARGET) \ do \ { \ if ((orderInfo->fieldFlags & (1 << (NO - 1))) && \ !update_read_coord(s, &TARGET, orderInfo->deltaCoordinates)) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ } while (0) static INLINE BOOL ORDER_FIELD_COLOR(const ORDER_INFO* orderInfo, wStream* s, UINT32 NO, UINT32* TARGET) { if (!TARGET || !orderInfo) return FALSE; if ((orderInfo->fieldFlags & (1 << (NO - 1))) && !update_read_color(s, TARGET)) return FALSE; return TRUE; } static INLINE BOOL FIELD_SKIP_BUFFER16(wStream* s, UINT32 TARGET_LEN) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, TARGET_LEN); if (!Stream_SafeSeek(s, TARGET_LEN)) { WLog_ERR(TAG, "error skipping %" PRIu32 " bytes", TARGET_LEN); return FALSE; } return TRUE; } /* Primary Drawing Orders */ static BOOL update_read_dstblt_order(wStream* s, const ORDER_INFO* orderInfo, DSTBLT_ORDER* dstblt) { ORDER_FIELD_COORD(1, dstblt->nLeftRect); ORDER_FIELD_COORD(2, dstblt->nTopRect); ORDER_FIELD_COORD(3, dstblt->nWidth); ORDER_FIELD_COORD(4, dstblt->nHeight); ORDER_FIELD_BYTE(5, dstblt->bRop); return TRUE; } int update_approximate_dstblt_order(ORDER_INFO* orderInfo, const DSTBLT_ORDER* dstblt) { return 32; } BOOL update_write_dstblt_order(wStream* s, ORDER_INFO* orderInfo, const DSTBLT_ORDER* dstblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_dstblt_order(orderInfo, dstblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, dstblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, dstblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, dstblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, dstblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, dstblt->bRop); return TRUE; } static BOOL update_read_patblt_order(wStream* s, const ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { ORDER_FIELD_COORD(1, patblt->nLeftRect); ORDER_FIELD_COORD(2, patblt->nTopRect); ORDER_FIELD_COORD(3, patblt->nWidth); ORDER_FIELD_COORD(4, patblt->nHeight); ORDER_FIELD_BYTE(5, patblt->bRop); ORDER_FIELD_COLOR(orderInfo, s, 6, &patblt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 7, &patblt->foreColor); return update_read_brush(s, &patblt->brush, orderInfo->fieldFlags >> 7); } int update_approximate_patblt_order(ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { return 32; } BOOL update_write_patblt_order(wStream* s, ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_patblt_order(orderInfo, patblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, patblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, patblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, patblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, patblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, patblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, patblt->backColor); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_color(s, patblt->foreColor); orderInfo->fieldFlags |= ORDER_FIELD_08; orderInfo->fieldFlags |= ORDER_FIELD_09; orderInfo->fieldFlags |= ORDER_FIELD_10; orderInfo->fieldFlags |= ORDER_FIELD_11; orderInfo->fieldFlags |= ORDER_FIELD_12; update_write_brush(s, &patblt->brush, orderInfo->fieldFlags >> 7); return TRUE; } static BOOL update_read_scrblt_order(wStream* s, const ORDER_INFO* orderInfo, SCRBLT_ORDER* scrblt) { ORDER_FIELD_COORD(1, scrblt->nLeftRect); ORDER_FIELD_COORD(2, scrblt->nTopRect); ORDER_FIELD_COORD(3, scrblt->nWidth); ORDER_FIELD_COORD(4, scrblt->nHeight); ORDER_FIELD_BYTE(5, scrblt->bRop); ORDER_FIELD_COORD(6, scrblt->nXSrc); ORDER_FIELD_COORD(7, scrblt->nYSrc); return TRUE; } int update_approximate_scrblt_order(ORDER_INFO* orderInfo, const SCRBLT_ORDER* scrblt) { return 32; } BOOL update_write_scrblt_order(wStream* s, ORDER_INFO* orderInfo, const SCRBLT_ORDER* scrblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_scrblt_order(orderInfo, scrblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, scrblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, scrblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, scrblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, scrblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, scrblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_coord(s, scrblt->nXSrc); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_coord(s, scrblt->nYSrc); return TRUE; } static BOOL update_read_opaque_rect_order(wStream* s, const ORDER_INFO* orderInfo, OPAQUE_RECT_ORDER* opaque_rect) { BYTE byte; ORDER_FIELD_COORD(1, opaque_rect->nLeftRect); ORDER_FIELD_COORD(2, opaque_rect->nTopRect); ORDER_FIELD_COORD(3, opaque_rect->nWidth); ORDER_FIELD_COORD(4, opaque_rect->nHeight); if (orderInfo->fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x00FFFF00) | ((UINT32)byte); } if (orderInfo->fieldFlags & ORDER_FIELD_06) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x00FF00FF) | ((UINT32)byte << 8); } if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x0000FFFF) | ((UINT32)byte << 16); } return TRUE; } int update_approximate_opaque_rect_order(ORDER_INFO* orderInfo, const OPAQUE_RECT_ORDER* opaque_rect) { return 32; } BOOL update_write_opaque_rect_order(wStream* s, ORDER_INFO* orderInfo, const OPAQUE_RECT_ORDER* opaque_rect) { BYTE byte; int inf = update_approximate_opaque_rect_order(orderInfo, opaque_rect); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; // TODO: Color format conversion orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, opaque_rect->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, opaque_rect->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, opaque_rect->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, opaque_rect->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; byte = opaque_rect->color & 0x000000FF; Stream_Write_UINT8(s, byte); orderInfo->fieldFlags |= ORDER_FIELD_06; byte = (opaque_rect->color & 0x0000FF00) >> 8; Stream_Write_UINT8(s, byte); orderInfo->fieldFlags |= ORDER_FIELD_07; byte = (opaque_rect->color & 0x00FF0000) >> 16; Stream_Write_UINT8(s, byte); return TRUE; } static BOOL update_read_draw_nine_grid_order(wStream* s, const ORDER_INFO* orderInfo, DRAW_NINE_GRID_ORDER* draw_nine_grid) { ORDER_FIELD_COORD(1, draw_nine_grid->srcLeft); ORDER_FIELD_COORD(2, draw_nine_grid->srcTop); ORDER_FIELD_COORD(3, draw_nine_grid->srcRight); ORDER_FIELD_COORD(4, draw_nine_grid->srcBottom); ORDER_FIELD_UINT16(5, draw_nine_grid->bitmapId); return TRUE; } static BOOL update_read_multi_dstblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_DSTBLT_ORDER* multi_dstblt) { ORDER_FIELD_COORD(1, multi_dstblt->nLeftRect); ORDER_FIELD_COORD(2, multi_dstblt->nTopRect); ORDER_FIELD_COORD(3, multi_dstblt->nWidth); ORDER_FIELD_COORD(4, multi_dstblt->nHeight); ORDER_FIELD_BYTE(5, multi_dstblt->bRop); ORDER_FIELD_BYTE(6, multi_dstblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_dstblt->cbData); return update_read_delta_rects(s, multi_dstblt->rectangles, &multi_dstblt->numRectangles); } return TRUE; } static BOOL update_read_multi_patblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_PATBLT_ORDER* multi_patblt) { ORDER_FIELD_COORD(1, multi_patblt->nLeftRect); ORDER_FIELD_COORD(2, multi_patblt->nTopRect); ORDER_FIELD_COORD(3, multi_patblt->nWidth); ORDER_FIELD_COORD(4, multi_patblt->nHeight); ORDER_FIELD_BYTE(5, multi_patblt->bRop); ORDER_FIELD_COLOR(orderInfo, s, 6, &multi_patblt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 7, &multi_patblt->foreColor); if (!update_read_brush(s, &multi_patblt->brush, orderInfo->fieldFlags >> 7)) return FALSE; ORDER_FIELD_BYTE(13, multi_patblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_14) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_patblt->cbData); if (!update_read_delta_rects(s, multi_patblt->rectangles, &multi_patblt->numRectangles)) return FALSE; } return TRUE; } static BOOL update_read_multi_scrblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_SCRBLT_ORDER* multi_scrblt) { ORDER_FIELD_COORD(1, multi_scrblt->nLeftRect); ORDER_FIELD_COORD(2, multi_scrblt->nTopRect); ORDER_FIELD_COORD(3, multi_scrblt->nWidth); ORDER_FIELD_COORD(4, multi_scrblt->nHeight); ORDER_FIELD_BYTE(5, multi_scrblt->bRop); ORDER_FIELD_COORD(6, multi_scrblt->nXSrc); ORDER_FIELD_COORD(7, multi_scrblt->nYSrc); ORDER_FIELD_BYTE(8, multi_scrblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_09) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_scrblt->cbData); return update_read_delta_rects(s, multi_scrblt->rectangles, &multi_scrblt->numRectangles); } return TRUE; } static BOOL update_read_multi_opaque_rect_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_OPAQUE_RECT_ORDER* multi_opaque_rect) { BYTE byte; ORDER_FIELD_COORD(1, multi_opaque_rect->nLeftRect); ORDER_FIELD_COORD(2, multi_opaque_rect->nTopRect); ORDER_FIELD_COORD(3, multi_opaque_rect->nWidth); ORDER_FIELD_COORD(4, multi_opaque_rect->nHeight); if (orderInfo->fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x00FFFF00) | ((UINT32)byte); } if (orderInfo->fieldFlags & ORDER_FIELD_06) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x00FF00FF) | ((UINT32)byte << 8); } if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x0000FFFF) | ((UINT32)byte << 16); } ORDER_FIELD_BYTE(8, multi_opaque_rect->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_09) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_opaque_rect->cbData); return update_read_delta_rects(s, multi_opaque_rect->rectangles, &multi_opaque_rect->numRectangles); } return TRUE; } static BOOL update_read_multi_draw_nine_grid_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_DRAW_NINE_GRID_ORDER* multi_draw_nine_grid) { ORDER_FIELD_COORD(1, multi_draw_nine_grid->srcLeft); ORDER_FIELD_COORD(2, multi_draw_nine_grid->srcTop); ORDER_FIELD_COORD(3, multi_draw_nine_grid->srcRight); ORDER_FIELD_COORD(4, multi_draw_nine_grid->srcBottom); ORDER_FIELD_UINT16(5, multi_draw_nine_grid->bitmapId); ORDER_FIELD_BYTE(6, multi_draw_nine_grid->nDeltaEntries); if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_draw_nine_grid->cbData); return update_read_delta_rects(s, multi_draw_nine_grid->rectangles, &multi_draw_nine_grid->nDeltaEntries); } return TRUE; } static BOOL update_read_line_to_order(wStream* s, const ORDER_INFO* orderInfo, LINE_TO_ORDER* line_to) { ORDER_FIELD_UINT16(1, line_to->backMode); ORDER_FIELD_COORD(2, line_to->nXStart); ORDER_FIELD_COORD(3, line_to->nYStart); ORDER_FIELD_COORD(4, line_to->nXEnd); ORDER_FIELD_COORD(5, line_to->nYEnd); ORDER_FIELD_COLOR(orderInfo, s, 6, &line_to->backColor); ORDER_FIELD_BYTE(7, line_to->bRop2); ORDER_FIELD_BYTE(8, line_to->penStyle); ORDER_FIELD_BYTE(9, line_to->penWidth); ORDER_FIELD_COLOR(orderInfo, s, 10, &line_to->penColor); return TRUE; } int update_approximate_line_to_order(ORDER_INFO* orderInfo, const LINE_TO_ORDER* line_to) { return 32; } BOOL update_write_line_to_order(wStream* s, ORDER_INFO* orderInfo, const LINE_TO_ORDER* line_to) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_line_to_order(orderInfo, line_to))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT16(s, line_to->backMode); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, line_to->nXStart); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, line_to->nYStart); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, line_to->nXEnd); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_coord(s, line_to->nYEnd); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, line_to->backColor); orderInfo->fieldFlags |= ORDER_FIELD_07; Stream_Write_UINT8(s, line_to->bRop2); orderInfo->fieldFlags |= ORDER_FIELD_08; Stream_Write_UINT8(s, line_to->penStyle); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT8(s, line_to->penWidth); orderInfo->fieldFlags |= ORDER_FIELD_10; update_write_color(s, line_to->penColor); return TRUE; } static BOOL update_read_polyline_order(wStream* s, const ORDER_INFO* orderInfo, POLYLINE_ORDER* polyline) { UINT16 word; UINT32 new_num = polyline->numDeltaEntries; ORDER_FIELD_COORD(1, polyline->xStart); ORDER_FIELD_COORD(2, polyline->yStart); ORDER_FIELD_BYTE(3, polyline->bRop2); ORDER_FIELD_UINT16(4, word); ORDER_FIELD_COLOR(orderInfo, s, 5, &polyline->penColor); ORDER_FIELD_BYTE(6, new_num); if (orderInfo->fieldFlags & ORDER_FIELD_07) { DELTA_POINT* new_points; if (new_num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, polyline->cbData); new_points = (DELTA_POINT*)realloc(polyline->points, sizeof(DELTA_POINT) * new_num); if (!new_points) { WLog_ERR(TAG, "realloc(%" PRIu32 ") failed", new_num); return FALSE; } polyline->points = new_points; polyline->numDeltaEntries = new_num; return update_read_delta_points(s, polyline->points, polyline->numDeltaEntries, polyline->xStart, polyline->yStart); } return TRUE; } static BOOL update_read_memblt_order(wStream* s, const ORDER_INFO* orderInfo, MEMBLT_ORDER* memblt) { if (!s || !orderInfo || !memblt) return FALSE; ORDER_FIELD_UINT16(1, memblt->cacheId); ORDER_FIELD_COORD(2, memblt->nLeftRect); ORDER_FIELD_COORD(3, memblt->nTopRect); ORDER_FIELD_COORD(4, memblt->nWidth); ORDER_FIELD_COORD(5, memblt->nHeight); ORDER_FIELD_BYTE(6, memblt->bRop); ORDER_FIELD_COORD(7, memblt->nXSrc); ORDER_FIELD_COORD(8, memblt->nYSrc); ORDER_FIELD_UINT16(9, memblt->cacheIndex); memblt->colorIndex = (memblt->cacheId >> 8); memblt->cacheId = (memblt->cacheId & 0xFF); memblt->bitmap = NULL; return TRUE; } int update_approximate_memblt_order(ORDER_INFO* orderInfo, const MEMBLT_ORDER* memblt) { return 64; } BOOL update_write_memblt_order(wStream* s, ORDER_INFO* orderInfo, const MEMBLT_ORDER* memblt) { UINT16 cacheId; if (!Stream_EnsureRemainingCapacity(s, update_approximate_memblt_order(orderInfo, memblt))) return FALSE; cacheId = (memblt->cacheId & 0xFF) | ((memblt->colorIndex & 0xFF) << 8); orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT16(s, cacheId); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, memblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, memblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, memblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_coord(s, memblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_06; Stream_Write_UINT8(s, memblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_coord(s, memblt->nXSrc); orderInfo->fieldFlags |= ORDER_FIELD_08; update_write_coord(s, memblt->nYSrc); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT16(s, memblt->cacheIndex); return TRUE; } static BOOL update_read_mem3blt_order(wStream* s, const ORDER_INFO* orderInfo, MEM3BLT_ORDER* mem3blt) { ORDER_FIELD_UINT16(1, mem3blt->cacheId); ORDER_FIELD_COORD(2, mem3blt->nLeftRect); ORDER_FIELD_COORD(3, mem3blt->nTopRect); ORDER_FIELD_COORD(4, mem3blt->nWidth); ORDER_FIELD_COORD(5, mem3blt->nHeight); ORDER_FIELD_BYTE(6, mem3blt->bRop); ORDER_FIELD_COORD(7, mem3blt->nXSrc); ORDER_FIELD_COORD(8, mem3blt->nYSrc); ORDER_FIELD_COLOR(orderInfo, s, 9, &mem3blt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 10, &mem3blt->foreColor); if (!update_read_brush(s, &mem3blt->brush, orderInfo->fieldFlags >> 10)) return FALSE; ORDER_FIELD_UINT16(16, mem3blt->cacheIndex); mem3blt->colorIndex = (mem3blt->cacheId >> 8); mem3blt->cacheId = (mem3blt->cacheId & 0xFF); mem3blt->bitmap = NULL; return TRUE; } static BOOL update_read_save_bitmap_order(wStream* s, const ORDER_INFO* orderInfo, SAVE_BITMAP_ORDER* save_bitmap) { ORDER_FIELD_UINT32(1, save_bitmap->savedBitmapPosition); ORDER_FIELD_COORD(2, save_bitmap->nLeftRect); ORDER_FIELD_COORD(3, save_bitmap->nTopRect); ORDER_FIELD_COORD(4, save_bitmap->nRightRect); ORDER_FIELD_COORD(5, save_bitmap->nBottomRect); ORDER_FIELD_BYTE(6, save_bitmap->operation); return TRUE; } static BOOL update_read_glyph_index_order(wStream* s, const ORDER_INFO* orderInfo, GLYPH_INDEX_ORDER* glyph_index) { ORDER_FIELD_BYTE(1, glyph_index->cacheId); ORDER_FIELD_BYTE(2, glyph_index->flAccel); ORDER_FIELD_BYTE(3, glyph_index->ulCharInc); ORDER_FIELD_BYTE(4, glyph_index->fOpRedundant); ORDER_FIELD_COLOR(orderInfo, s, 5, &glyph_index->backColor); ORDER_FIELD_COLOR(orderInfo, s, 6, &glyph_index->foreColor); ORDER_FIELD_UINT16(7, glyph_index->bkLeft); ORDER_FIELD_UINT16(8, glyph_index->bkTop); ORDER_FIELD_UINT16(9, glyph_index->bkRight); ORDER_FIELD_UINT16(10, glyph_index->bkBottom); ORDER_FIELD_UINT16(11, glyph_index->opLeft); ORDER_FIELD_UINT16(12, glyph_index->opTop); ORDER_FIELD_UINT16(13, glyph_index->opRight); ORDER_FIELD_UINT16(14, glyph_index->opBottom); if (!update_read_brush(s, &glyph_index->brush, orderInfo->fieldFlags >> 14)) return FALSE; ORDER_FIELD_UINT16(20, glyph_index->x); ORDER_FIELD_UINT16(21, glyph_index->y); if (orderInfo->fieldFlags & ORDER_FIELD_22) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, glyph_index->cbData); if (Stream_GetRemainingLength(s) < glyph_index->cbData) return FALSE; CopyMemory(glyph_index->data, Stream_Pointer(s), glyph_index->cbData); Stream_Seek(s, glyph_index->cbData); } return TRUE; } int update_approximate_glyph_index_order(ORDER_INFO* orderInfo, const GLYPH_INDEX_ORDER* glyph_index) { return 64; } BOOL update_write_glyph_index_order(wStream* s, ORDER_INFO* orderInfo, GLYPH_INDEX_ORDER* glyph_index) { int inf = update_approximate_glyph_index_order(orderInfo, glyph_index); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT8(s, glyph_index->cacheId); orderInfo->fieldFlags |= ORDER_FIELD_02; Stream_Write_UINT8(s, glyph_index->flAccel); orderInfo->fieldFlags |= ORDER_FIELD_03; Stream_Write_UINT8(s, glyph_index->ulCharInc); orderInfo->fieldFlags |= ORDER_FIELD_04; Stream_Write_UINT8(s, glyph_index->fOpRedundant); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_color(s, glyph_index->backColor); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, glyph_index->foreColor); orderInfo->fieldFlags |= ORDER_FIELD_07; Stream_Write_UINT16(s, glyph_index->bkLeft); orderInfo->fieldFlags |= ORDER_FIELD_08; Stream_Write_UINT16(s, glyph_index->bkTop); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT16(s, glyph_index->bkRight); orderInfo->fieldFlags |= ORDER_FIELD_10; Stream_Write_UINT16(s, glyph_index->bkBottom); orderInfo->fieldFlags |= ORDER_FIELD_11; Stream_Write_UINT16(s, glyph_index->opLeft); orderInfo->fieldFlags |= ORDER_FIELD_12; Stream_Write_UINT16(s, glyph_index->opTop); orderInfo->fieldFlags |= ORDER_FIELD_13; Stream_Write_UINT16(s, glyph_index->opRight); orderInfo->fieldFlags |= ORDER_FIELD_14; Stream_Write_UINT16(s, glyph_index->opBottom); orderInfo->fieldFlags |= ORDER_FIELD_15; orderInfo->fieldFlags |= ORDER_FIELD_16; orderInfo->fieldFlags |= ORDER_FIELD_17; orderInfo->fieldFlags |= ORDER_FIELD_18; orderInfo->fieldFlags |= ORDER_FIELD_19; update_write_brush(s, &glyph_index->brush, orderInfo->fieldFlags >> 14); orderInfo->fieldFlags |= ORDER_FIELD_20; Stream_Write_UINT16(s, glyph_index->x); orderInfo->fieldFlags |= ORDER_FIELD_21; Stream_Write_UINT16(s, glyph_index->y); orderInfo->fieldFlags |= ORDER_FIELD_22; Stream_Write_UINT8(s, glyph_index->cbData); Stream_Write(s, glyph_index->data, glyph_index->cbData); return TRUE; } static BOOL update_read_fast_index_order(wStream* s, const ORDER_INFO* orderInfo, FAST_INDEX_ORDER* fast_index) { ORDER_FIELD_BYTE(1, fast_index->cacheId); ORDER_FIELD_2BYTE(2, fast_index->ulCharInc, fast_index->flAccel); ORDER_FIELD_COLOR(orderInfo, s, 3, &fast_index->backColor); ORDER_FIELD_COLOR(orderInfo, s, 4, &fast_index->foreColor); ORDER_FIELD_COORD(5, fast_index->bkLeft); ORDER_FIELD_COORD(6, fast_index->bkTop); ORDER_FIELD_COORD(7, fast_index->bkRight); ORDER_FIELD_COORD(8, fast_index->bkBottom); ORDER_FIELD_COORD(9, fast_index->opLeft); ORDER_FIELD_COORD(10, fast_index->opTop); ORDER_FIELD_COORD(11, fast_index->opRight); ORDER_FIELD_COORD(12, fast_index->opBottom); ORDER_FIELD_COORD(13, fast_index->x); ORDER_FIELD_COORD(14, fast_index->y); if (orderInfo->fieldFlags & ORDER_FIELD_15) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, fast_index->cbData); if (Stream_GetRemainingLength(s) < fast_index->cbData) return FALSE; CopyMemory(fast_index->data, Stream_Pointer(s), fast_index->cbData); Stream_Seek(s, fast_index->cbData); } return TRUE; } static BOOL update_read_fast_glyph_order(wStream* s, const ORDER_INFO* orderInfo, FAST_GLYPH_ORDER* fastGlyph) { GLYPH_DATA_V2* glyph = &fastGlyph->glyphData; ORDER_FIELD_BYTE(1, fastGlyph->cacheId); ORDER_FIELD_2BYTE(2, fastGlyph->ulCharInc, fastGlyph->flAccel); ORDER_FIELD_COLOR(orderInfo, s, 3, &fastGlyph->backColor); ORDER_FIELD_COLOR(orderInfo, s, 4, &fastGlyph->foreColor); ORDER_FIELD_COORD(5, fastGlyph->bkLeft); ORDER_FIELD_COORD(6, fastGlyph->bkTop); ORDER_FIELD_COORD(7, fastGlyph->bkRight); ORDER_FIELD_COORD(8, fastGlyph->bkBottom); ORDER_FIELD_COORD(9, fastGlyph->opLeft); ORDER_FIELD_COORD(10, fastGlyph->opTop); ORDER_FIELD_COORD(11, fastGlyph->opRight); ORDER_FIELD_COORD(12, fastGlyph->opBottom); ORDER_FIELD_COORD(13, fastGlyph->x); ORDER_FIELD_COORD(14, fastGlyph->y); if (orderInfo->fieldFlags & ORDER_FIELD_15) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, fastGlyph->cbData); if (Stream_GetRemainingLength(s) < fastGlyph->cbData) return FALSE; CopyMemory(fastGlyph->data, Stream_Pointer(s), fastGlyph->cbData); if (Stream_GetRemainingLength(s) < fastGlyph->cbData) return FALSE; if (!Stream_SafeSeek(s, 1)) return FALSE; if (fastGlyph->cbData > 1) { UINT32 new_cb; /* parse optional glyph data */ glyph->cacheIndex = fastGlyph->data[0]; if (!update_read_2byte_signed(s, &glyph->x) || !update_read_2byte_signed(s, &glyph->y) || !update_read_2byte_unsigned(s, &glyph->cx) || !update_read_2byte_unsigned(s, &glyph->cy)) return FALSE; glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; new_cb = ((glyph->cx + 7) / 8) * glyph->cy; new_cb += ((new_cb % 4) > 0) ? 4 - (new_cb % 4) : 0; if (fastGlyph->cbData < new_cb) return FALSE; if (new_cb > 0) { BYTE* new_aj; new_aj = (BYTE*)realloc(glyph->aj, new_cb); if (!new_aj) return FALSE; glyph->aj = new_aj; glyph->cb = new_cb; Stream_Read(s, glyph->aj, glyph->cb); } Stream_Seek(s, fastGlyph->cbData - new_cb); } } return TRUE; } static BOOL update_read_polygon_sc_order(wStream* s, const ORDER_INFO* orderInfo, POLYGON_SC_ORDER* polygon_sc) { UINT32 num = polygon_sc->numPoints; ORDER_FIELD_COORD(1, polygon_sc->xStart); ORDER_FIELD_COORD(2, polygon_sc->yStart); ORDER_FIELD_BYTE(3, polygon_sc->bRop2); ORDER_FIELD_BYTE(4, polygon_sc->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 5, &polygon_sc->brushColor); ORDER_FIELD_BYTE(6, num); if (orderInfo->fieldFlags & ORDER_FIELD_07) { DELTA_POINT* newpoints; if (num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, polygon_sc->cbData); newpoints = (DELTA_POINT*)realloc(polygon_sc->points, sizeof(DELTA_POINT) * num); if (!newpoints) return FALSE; polygon_sc->points = newpoints; polygon_sc->numPoints = num; return update_read_delta_points(s, polygon_sc->points, polygon_sc->numPoints, polygon_sc->xStart, polygon_sc->yStart); } return TRUE; } static BOOL update_read_polygon_cb_order(wStream* s, const ORDER_INFO* orderInfo, POLYGON_CB_ORDER* polygon_cb) { UINT32 num = polygon_cb->numPoints; ORDER_FIELD_COORD(1, polygon_cb->xStart); ORDER_FIELD_COORD(2, polygon_cb->yStart); ORDER_FIELD_BYTE(3, polygon_cb->bRop2); ORDER_FIELD_BYTE(4, polygon_cb->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 5, &polygon_cb->backColor); ORDER_FIELD_COLOR(orderInfo, s, 6, &polygon_cb->foreColor); if (!update_read_brush(s, &polygon_cb->brush, orderInfo->fieldFlags >> 6)) return FALSE; ORDER_FIELD_BYTE(12, num); if (orderInfo->fieldFlags & ORDER_FIELD_13) { DELTA_POINT* newpoints; if (num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, polygon_cb->cbData); newpoints = (DELTA_POINT*)realloc(polygon_cb->points, sizeof(DELTA_POINT) * num); if (!newpoints) return FALSE; polygon_cb->points = newpoints; polygon_cb->numPoints = num; if (!update_read_delta_points(s, polygon_cb->points, polygon_cb->numPoints, polygon_cb->xStart, polygon_cb->yStart)) return FALSE; } polygon_cb->backMode = (polygon_cb->bRop2 & 0x80) ? BACKMODE_TRANSPARENT : BACKMODE_OPAQUE; polygon_cb->bRop2 = (polygon_cb->bRop2 & 0x1F); return TRUE; } static BOOL update_read_ellipse_sc_order(wStream* s, const ORDER_INFO* orderInfo, ELLIPSE_SC_ORDER* ellipse_sc) { ORDER_FIELD_COORD(1, ellipse_sc->leftRect); ORDER_FIELD_COORD(2, ellipse_sc->topRect); ORDER_FIELD_COORD(3, ellipse_sc->rightRect); ORDER_FIELD_COORD(4, ellipse_sc->bottomRect); ORDER_FIELD_BYTE(5, ellipse_sc->bRop2); ORDER_FIELD_BYTE(6, ellipse_sc->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 7, &ellipse_sc->color); return TRUE; } static BOOL update_read_ellipse_cb_order(wStream* s, const ORDER_INFO* orderInfo, ELLIPSE_CB_ORDER* ellipse_cb) { ORDER_FIELD_COORD(1, ellipse_cb->leftRect); ORDER_FIELD_COORD(2, ellipse_cb->topRect); ORDER_FIELD_COORD(3, ellipse_cb->rightRect); ORDER_FIELD_COORD(4, ellipse_cb->bottomRect); ORDER_FIELD_BYTE(5, ellipse_cb->bRop2); ORDER_FIELD_BYTE(6, ellipse_cb->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 7, &ellipse_cb->backColor); ORDER_FIELD_COLOR(orderInfo, s, 8, &ellipse_cb->foreColor); return update_read_brush(s, &ellipse_cb->brush, orderInfo->fieldFlags >> 8); } /* Secondary Drawing Orders */ static CACHE_BITMAP_ORDER* update_read_cache_bitmap_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { CACHE_BITMAP_ORDER* cache_bitmap; if (!update || !s) return NULL; cache_bitmap = calloc(1, sizeof(CACHE_BITMAP_ORDER)); if (!cache_bitmap) goto fail; if (Stream_GetRemainingLength(s) < 9) goto fail; Stream_Read_UINT8(s, cache_bitmap->cacheId); /* cacheId (1 byte) */ Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapWidth); /* bitmapWidth (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapHeight); /* bitmapHeight (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ if ((cache_bitmap->bitmapBpp < 1) || (cache_bitmap->bitmapBpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bitmap bpp %" PRIu32 "", cache_bitmap->bitmapBpp); goto fail; } Stream_Read_UINT16(s, cache_bitmap->bitmapLength); /* bitmapLength (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap->cacheIndex); /* cacheIndex (2 bytes) */ if (compressed) { if ((flags & NO_BITMAP_COMPRESSION_HDR) == 0) { BYTE* bitmapComprHdr = (BYTE*)&(cache_bitmap->bitmapComprHdr); if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read(s, bitmapComprHdr, 8); /* bitmapComprHdr (8 bytes) */ cache_bitmap->bitmapLength -= 8; } } if (cache_bitmap->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap->bitmapLength) goto fail; cache_bitmap->bitmapDataStream = malloc(cache_bitmap->bitmapLength); if (!cache_bitmap->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap->bitmapDataStream, cache_bitmap->bitmapLength); cache_bitmap->compressed = compressed; return cache_bitmap; fail: free_cache_bitmap_order(update->context, cache_bitmap); return NULL; } int update_approximate_cache_bitmap_order(const CACHE_BITMAP_ORDER* cache_bitmap, BOOL compressed, UINT16* flags) { return 64 + cache_bitmap->bitmapLength; } BOOL update_write_cache_bitmap_order(wStream* s, const CACHE_BITMAP_ORDER* cache_bitmap, BOOL compressed, UINT16* flags) { UINT32 bitmapLength = cache_bitmap->bitmapLength; int inf = update_approximate_cache_bitmap_order(cache_bitmap, compressed, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; *flags = NO_BITMAP_COMPRESSION_HDR; if ((*flags & NO_BITMAP_COMPRESSION_HDR) == 0) bitmapLength += 8; Stream_Write_UINT8(s, cache_bitmap->cacheId); /* cacheId (1 byte) */ Stream_Write_UINT8(s, 0); /* pad1Octet (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapWidth); /* bitmapWidth (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapHeight); /* bitmapHeight (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ Stream_Write_UINT16(s, bitmapLength); /* bitmapLength (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap->cacheIndex); /* cacheIndex (2 bytes) */ if (compressed) { if ((*flags & NO_BITMAP_COMPRESSION_HDR) == 0) { BYTE* bitmapComprHdr = (BYTE*)&(cache_bitmap->bitmapComprHdr); Stream_Write(s, bitmapComprHdr, 8); /* bitmapComprHdr (8 bytes) */ bitmapLength -= 8; } Stream_Write(s, cache_bitmap->bitmapDataStream, bitmapLength); } else { Stream_Write(s, cache_bitmap->bitmapDataStream, bitmapLength); } return TRUE; } static CACHE_BITMAP_V2_ORDER* update_read_cache_bitmap_v2_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { BYTE bitsPerPixelId; CACHE_BITMAP_V2_ORDER* cache_bitmap_v2; if (!update || !s) return NULL; cache_bitmap_v2 = calloc(1, sizeof(CACHE_BITMAP_V2_ORDER)); if (!cache_bitmap_v2) goto fail; cache_bitmap_v2->cacheId = flags & 0x0003; cache_bitmap_v2->flags = (flags & 0xFF80) >> 7; bitsPerPixelId = (flags & 0x0078) >> 3; cache_bitmap_v2->bitmapBpp = CBR2_BPP[bitsPerPixelId]; if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ goto fail; cache_bitmap_v2->bitmapHeight = cache_bitmap_v2->bitmapWidth; } else { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ goto fail; } if (!update_read_4byte_unsigned(s, &cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->cacheIndex)) /* cacheIndex */ goto fail; if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } } if (cache_bitmap_v2->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap_v2->bitmapLength) goto fail; if (cache_bitmap_v2->bitmapLength == 0) goto fail; cache_bitmap_v2->bitmapDataStream = malloc(cache_bitmap_v2->bitmapLength); if (!cache_bitmap_v2->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); cache_bitmap_v2->compressed = compressed; return cache_bitmap_v2; fail: free_cache_bitmap_v2_order(update->context, cache_bitmap_v2); return NULL; } int update_approximate_cache_bitmap_v2_order(CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { return 64 + cache_bitmap_v2->bitmapLength; } BOOL update_write_cache_bitmap_v2_order(wStream* s, CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { BYTE bitsPerPixelId; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v2_order(cache_bitmap_v2, compressed, flags))) return FALSE; bitsPerPixelId = BPP_CBR2[cache_bitmap_v2->bitmapBpp]; *flags = (cache_bitmap_v2->cacheId & 0x0003) | (bitsPerPixelId << 3) | ((cache_bitmap_v2->flags << 7) & 0xFF80); if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { Stream_Write_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ return FALSE; } else { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ return FALSE; } if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (!update_write_4byte_unsigned(s, cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_write_2byte_unsigned(s, cache_bitmap_v2->cacheIndex)) /* cacheIndex */ return FALSE; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { Stream_Write_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } else { if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } cache_bitmap_v2->compressed = compressed; return TRUE; } static CACHE_BITMAP_V3_ORDER* update_read_cache_bitmap_v3_order(rdpUpdate* update, wStream* s, UINT16 flags) { BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; UINT32 new_len; BYTE* new_data; CACHE_BITMAP_V3_ORDER* cache_bitmap_v3; if (!update || !s) return NULL; cache_bitmap_v3 = calloc(1, sizeof(CACHE_BITMAP_V3_ORDER)); if (!cache_bitmap_v3) goto fail; cache_bitmap_v3->cacheId = flags & 0x00000003; cache_bitmap_v3->flags = (flags & 0x0000FF80) >> 7; bitsPerPixelId = (flags & 0x00000078) >> 3; cache_bitmap_v3->bpp = CBR23_BPP[bitsPerPixelId]; if (Stream_GetRemainingLength(s) < 21) goto fail; Stream_Read_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ bitmapData = &cache_bitmap_v3->bitmapData; Stream_Read_UINT8(s, bitmapData->bpp); if ((bitmapData->bpp < 1) || (bitmapData->bpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bpp value %" PRIu32 "", bitmapData->bpp); goto fail; } Stream_Seek_UINT8(s); /* reserved1 (1 byte) */ Stream_Seek_UINT8(s); /* reserved2 (1 byte) */ Stream_Read_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Read_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Read_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Read_UINT32(s, new_len); /* length (4 bytes) */ if ((new_len == 0) || (Stream_GetRemainingLength(s) < new_len)) goto fail; new_data = (BYTE*)realloc(bitmapData->data, new_len); if (!new_data) goto fail; bitmapData->data = new_data; bitmapData->length = new_len; Stream_Read(s, bitmapData->data, bitmapData->length); return cache_bitmap_v3; fail: free_cache_bitmap_v3_order(update->context, cache_bitmap_v3); return NULL; } int update_approximate_cache_bitmap_v3_order(CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BITMAP_DATA_EX* bitmapData = &cache_bitmap_v3->bitmapData; return 64 + bitmapData->length; } BOOL update_write_cache_bitmap_v3_order(wStream* s, CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v3_order(cache_bitmap_v3, flags))) return FALSE; bitmapData = &cache_bitmap_v3->bitmapData; bitsPerPixelId = BPP_CBR23[cache_bitmap_v3->bpp]; *flags = (cache_bitmap_v3->cacheId & 0x00000003) | ((cache_bitmap_v3->flags << 7) & 0x0000FF80) | ((bitsPerPixelId << 3) & 0x00000078); Stream_Write_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ Stream_Write_UINT8(s, bitmapData->bpp); Stream_Write_UINT8(s, 0); /* reserved1 (1 byte) */ Stream_Write_UINT8(s, 0); /* reserved2 (1 byte) */ Stream_Write_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Write_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Write_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Write_UINT32(s, bitmapData->length); /* length (4 bytes) */ Stream_Write(s, bitmapData->data, bitmapData->length); return TRUE; } static CACHE_COLOR_TABLE_ORDER* update_read_cache_color_table_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; UINT32* colorTable; CACHE_COLOR_TABLE_ORDER* cache_color_table = calloc(1, sizeof(CACHE_COLOR_TABLE_ORDER)); if (!cache_color_table) goto fail; if (Stream_GetRemainingLength(s) < 3) goto fail; Stream_Read_UINT8(s, cache_color_table->cacheIndex); /* cacheIndex (1 byte) */ Stream_Read_UINT16(s, cache_color_table->numberColors); /* numberColors (2 bytes) */ if (cache_color_table->numberColors != 256) { /* This field MUST be set to 256 */ goto fail; } if (Stream_GetRemainingLength(s) < cache_color_table->numberColors * 4) goto fail; colorTable = (UINT32*)&cache_color_table->colorTable; for (i = 0; i < (int)cache_color_table->numberColors; i++) update_read_color_quad(s, &colorTable[i]); return cache_color_table; fail: free_cache_color_table_order(update->context, cache_color_table); return NULL; } int update_approximate_cache_color_table_order(const CACHE_COLOR_TABLE_ORDER* cache_color_table, UINT16* flags) { return 16 + (256 * 4); } BOOL update_write_cache_color_table_order(wStream* s, const CACHE_COLOR_TABLE_ORDER* cache_color_table, UINT16* flags) { int i, inf; UINT32* colorTable; if (cache_color_table->numberColors != 256) return FALSE; inf = update_approximate_cache_color_table_order(cache_color_table, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT8(s, cache_color_table->cacheIndex); /* cacheIndex (1 byte) */ Stream_Write_UINT16(s, cache_color_table->numberColors); /* numberColors (2 bytes) */ colorTable = (UINT32*)&cache_color_table->colorTable; for (i = 0; i < (int)cache_color_table->numberColors; i++) { update_write_color_quad(s, colorTable[i]); } return TRUE; } static CACHE_GLYPH_ORDER* update_read_cache_glyph_order(rdpUpdate* update, wStream* s, UINT16 flags) { UINT32 i; CACHE_GLYPH_ORDER* cache_glyph_order = calloc(1, sizeof(CACHE_GLYPH_ORDER)); if (!cache_glyph_order || !update || !s) goto fail; if (Stream_GetRemainingLength(s) < 2) goto fail; Stream_Read_UINT8(s, cache_glyph_order->cacheId); /* cacheId (1 byte) */ Stream_Read_UINT8(s, cache_glyph_order->cGlyphs); /* cGlyphs (1 byte) */ for (i = 0; i < cache_glyph_order->cGlyphs; i++) { GLYPH_DATA* glyph = &cache_glyph_order->glyphData[i]; if (Stream_GetRemainingLength(s) < 10) goto fail; Stream_Read_UINT16(s, glyph->cacheIndex); Stream_Read_INT16(s, glyph->x); Stream_Read_INT16(s, glyph->y); Stream_Read_UINT16(s, glyph->cx); Stream_Read_UINT16(s, glyph->cy); glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; if (Stream_GetRemainingLength(s) < glyph->cb) goto fail; glyph->aj = (BYTE*)malloc(glyph->cb); if (!glyph->aj) goto fail; Stream_Read(s, glyph->aj, glyph->cb); } if ((flags & CG_GLYPH_UNICODE_PRESENT) && (cache_glyph_order->cGlyphs > 0)) { cache_glyph_order->unicodeCharacters = calloc(cache_glyph_order->cGlyphs, sizeof(WCHAR)); if (!cache_glyph_order->unicodeCharacters) goto fail; if (Stream_GetRemainingLength(s) < sizeof(WCHAR) * cache_glyph_order->cGlyphs) goto fail; Stream_Read_UTF16_String(s, cache_glyph_order->unicodeCharacters, cache_glyph_order->cGlyphs); } return cache_glyph_order; fail: free_cache_glyph_order(update->context, cache_glyph_order); return NULL; } int update_approximate_cache_glyph_order(const CACHE_GLYPH_ORDER* cache_glyph, UINT16* flags) { return 2 + cache_glyph->cGlyphs * 32; } BOOL update_write_cache_glyph_order(wStream* s, const CACHE_GLYPH_ORDER* cache_glyph, UINT16* flags) { int i, inf; INT16 lsi16; const GLYPH_DATA* glyph; inf = update_approximate_cache_glyph_order(cache_glyph, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT8(s, cache_glyph->cacheId); /* cacheId (1 byte) */ Stream_Write_UINT8(s, cache_glyph->cGlyphs); /* cGlyphs (1 byte) */ for (i = 0; i < (int)cache_glyph->cGlyphs; i++) { UINT32 cb; glyph = &cache_glyph->glyphData[i]; Stream_Write_UINT16(s, glyph->cacheIndex); /* cacheIndex (2 bytes) */ lsi16 = glyph->x; Stream_Write_UINT16(s, lsi16); /* x (2 bytes) */ lsi16 = glyph->y; Stream_Write_UINT16(s, lsi16); /* y (2 bytes) */ Stream_Write_UINT16(s, glyph->cx); /* cx (2 bytes) */ Stream_Write_UINT16(s, glyph->cy); /* cy (2 bytes) */ cb = ((glyph->cx + 7) / 8) * glyph->cy; cb += ((cb % 4) > 0) ? 4 - (cb % 4) : 0; Stream_Write(s, glyph->aj, cb); } if (*flags & CG_GLYPH_UNICODE_PRESENT) { Stream_Zero(s, cache_glyph->cGlyphs * 2); } return TRUE; } static CACHE_GLYPH_V2_ORDER* update_read_cache_glyph_v2_order(rdpUpdate* update, wStream* s, UINT16 flags) { UINT32 i; CACHE_GLYPH_V2_ORDER* cache_glyph_v2 = calloc(1, sizeof(CACHE_GLYPH_V2_ORDER)); if (!cache_glyph_v2) goto fail; cache_glyph_v2->cacheId = (flags & 0x000F); cache_glyph_v2->flags = (flags & 0x00F0) >> 4; cache_glyph_v2->cGlyphs = (flags & 0xFF00) >> 8; for (i = 0; i < cache_glyph_v2->cGlyphs; i++) { GLYPH_DATA_V2* glyph = &cache_glyph_v2->glyphData[i]; if (Stream_GetRemainingLength(s) < 1) goto fail; Stream_Read_UINT8(s, glyph->cacheIndex); if (!update_read_2byte_signed(s, &glyph->x) || !update_read_2byte_signed(s, &glyph->y) || !update_read_2byte_unsigned(s, &glyph->cx) || !update_read_2byte_unsigned(s, &glyph->cy)) { goto fail; } glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; if (Stream_GetRemainingLength(s) < glyph->cb) goto fail; glyph->aj = (BYTE*)malloc(glyph->cb); if (!glyph->aj) goto fail; Stream_Read(s, glyph->aj, glyph->cb); } if ((flags & CG_GLYPH_UNICODE_PRESENT) && (cache_glyph_v2->cGlyphs > 0)) { cache_glyph_v2->unicodeCharacters = calloc(cache_glyph_v2->cGlyphs, sizeof(WCHAR)); if (!cache_glyph_v2->unicodeCharacters) goto fail; if (Stream_GetRemainingLength(s) < sizeof(WCHAR) * cache_glyph_v2->cGlyphs) goto fail; Stream_Read_UTF16_String(s, cache_glyph_v2->unicodeCharacters, cache_glyph_v2->cGlyphs); } return cache_glyph_v2; fail: free_cache_glyph_v2_order(update->context, cache_glyph_v2); return NULL; } int update_approximate_cache_glyph_v2_order(const CACHE_GLYPH_V2_ORDER* cache_glyph_v2, UINT16* flags) { return 8 + cache_glyph_v2->cGlyphs * 32; } BOOL update_write_cache_glyph_v2_order(wStream* s, const CACHE_GLYPH_V2_ORDER* cache_glyph_v2, UINT16* flags) { UINT32 i, inf; inf = update_approximate_cache_glyph_v2_order(cache_glyph_v2, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; *flags = (cache_glyph_v2->cacheId & 0x000F) | ((cache_glyph_v2->flags & 0x000F) << 4) | ((cache_glyph_v2->cGlyphs & 0x00FF) << 8); for (i = 0; i < cache_glyph_v2->cGlyphs; i++) { UINT32 cb; const GLYPH_DATA_V2* glyph = &cache_glyph_v2->glyphData[i]; Stream_Write_UINT8(s, glyph->cacheIndex); if (!update_write_2byte_signed(s, glyph->x) || !update_write_2byte_signed(s, glyph->y) || !update_write_2byte_unsigned(s, glyph->cx) || !update_write_2byte_unsigned(s, glyph->cy)) { return FALSE; } cb = ((glyph->cx + 7) / 8) * glyph->cy; cb += ((cb % 4) > 0) ? 4 - (cb % 4) : 0; Stream_Write(s, glyph->aj, cb); } if (*flags & CG_GLYPH_UNICODE_PRESENT) { Stream_Zero(s, cache_glyph_v2->cGlyphs * 2); } return TRUE; } static BOOL update_decompress_brush(wStream* s, BYTE* output, size_t outSize, BYTE bpp) { INT32 x, y, k; BYTE byte = 0; const BYTE* palette = Stream_Pointer(s) + 16; const INT32 bytesPerPixel = ((bpp + 1) / 8); if (!Stream_SafeSeek(s, 16ULL + 7ULL * bytesPerPixel)) // 64 / 4 return FALSE; for (y = 7; y >= 0; y--) { for (x = 0; x < 8; x++) { UINT32 index; if ((x % 4) == 0) Stream_Read_UINT8(s, byte); index = ((byte >> ((3 - (x % 4)) * 2)) & 0x03); for (k = 0; k < bytesPerPixel; k++) { const size_t dstIndex = ((y * 8 + x) * bytesPerPixel) + k; const size_t srcIndex = (index * bytesPerPixel) + k; if (dstIndex >= outSize) return FALSE; output[dstIndex] = palette[srcIndex]; } } } return TRUE; } static BOOL update_compress_brush(wStream* s, const BYTE* input, BYTE bpp) { return FALSE; } static CACHE_BRUSH_ORDER* update_read_cache_brush_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; BYTE iBitmapFormat; BOOL compressed = FALSE; CACHE_BRUSH_ORDER* cache_brush = calloc(1, sizeof(CACHE_BRUSH_ORDER)); if (!cache_brush) goto fail; if (Stream_GetRemainingLength(s) < 6) goto fail; Stream_Read_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Read_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ if (iBitmapFormat >= ARRAYSIZE(BMF_BPP)) goto fail; cache_brush->bpp = BMF_BPP[iBitmapFormat]; Stream_Read_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Read_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Read_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Read_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_Print(update->log, WLOG_ERROR, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); goto fail; } /* rows are encoded in reverse order */ if (Stream_GetRemainingLength(s) < 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_decompress_brush(s, cache_brush->data, sizeof(cache_brush->data), cache_brush->bpp)) goto fail; } else { /* uncompressed brush */ UINT32 scanline = (cache_brush->bpp / 8) * 8; if (Stream_GetRemainingLength(s) < scanline * 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read(s, &cache_brush->data[i * scanline], scanline); } } } } return cache_brush; fail: free_cache_brush_order(update->context, cache_brush); return NULL; } int update_approximate_cache_brush_order(const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { return 64; } BOOL update_write_cache_brush_order(wStream* s, const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { int i; BYTE iBitmapFormat; BOOL compressed = FALSE; if (!Stream_EnsureRemainingCapacity(s, update_approximate_cache_brush_order(cache_brush, flags))) return FALSE; iBitmapFormat = BPP_BMF[cache_brush->bpp]; Stream_Write_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Write_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ Stream_Write_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Write_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Write_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Write_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_ERR(TAG, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); return FALSE; } for (i = 7; i >= 0; i--) { Stream_Write_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_compress_brush(s, cache_brush->data, cache_brush->bpp)) return FALSE; } else { /* uncompressed brush */ int scanline = (cache_brush->bpp / 8) * 8; for (i = 7; i >= 0; i--) { Stream_Write(s, &cache_brush->data[i * scanline], scanline); } } } } return TRUE; } /* Alternate Secondary Drawing Orders */ static BOOL update_read_create_offscreen_bitmap_order(wStream* s, CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { UINT16 flags; BOOL deleteListPresent; OFFSCREEN_DELETE_LIST* deleteList; if (Stream_GetRemainingLength(s) < 6) return FALSE; Stream_Read_UINT16(s, flags); /* flags (2 bytes) */ create_offscreen_bitmap->id = flags & 0x7FFF; deleteListPresent = (flags & 0x8000) ? TRUE : FALSE; Stream_Read_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */ Stream_Read_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */ deleteList = &(create_offscreen_bitmap->deleteList); if (deleteListPresent) { UINT32 i; if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, deleteList->cIndices); if (deleteList->cIndices > deleteList->sIndices) { UINT16* new_indices; new_indices = (UINT16*)realloc(deleteList->indices, deleteList->cIndices * 2); if (!new_indices) return FALSE; deleteList->sIndices = deleteList->cIndices; deleteList->indices = new_indices; } if (Stream_GetRemainingLength(s) < 2 * deleteList->cIndices) return FALSE; for (i = 0; i < deleteList->cIndices; i++) { Stream_Read_UINT16(s, deleteList->indices[i]); } } else { deleteList->cIndices = 0; } return TRUE; } int update_approximate_create_offscreen_bitmap_order( const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { const OFFSCREEN_DELETE_LIST* deleteList = &(create_offscreen_bitmap->deleteList); return 32 + deleteList->cIndices * 2; } BOOL update_write_create_offscreen_bitmap_order( wStream* s, const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { UINT16 flags; BOOL deleteListPresent; const OFFSCREEN_DELETE_LIST* deleteList; if (!Stream_EnsureRemainingCapacity( s, update_approximate_create_offscreen_bitmap_order(create_offscreen_bitmap))) return FALSE; deleteList = &(create_offscreen_bitmap->deleteList); flags = create_offscreen_bitmap->id & 0x7FFF; deleteListPresent = (deleteList->cIndices > 0) ? TRUE : FALSE; if (deleteListPresent) flags |= 0x8000; Stream_Write_UINT16(s, flags); /* flags (2 bytes) */ Stream_Write_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */ Stream_Write_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */ if (deleteListPresent) { int i; Stream_Write_UINT16(s, deleteList->cIndices); for (i = 0; i < (int)deleteList->cIndices; i++) { Stream_Write_UINT16(s, deleteList->indices[i]); } } return TRUE; } static BOOL update_read_switch_surface_order(wStream* s, SWITCH_SURFACE_ORDER* switch_surface) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, switch_surface->bitmapId); /* bitmapId (2 bytes) */ return TRUE; } int update_approximate_switch_surface_order(const SWITCH_SURFACE_ORDER* switch_surface) { return 2; } BOOL update_write_switch_surface_order(wStream* s, const SWITCH_SURFACE_ORDER* switch_surface) { int inf = update_approximate_switch_surface_order(switch_surface); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT16(s, switch_surface->bitmapId); /* bitmapId (2 bytes) */ return TRUE; } static BOOL update_read_create_nine_grid_bitmap_order(wStream* s, CREATE_NINE_GRID_BITMAP_ORDER* create_nine_grid_bitmap) { NINE_GRID_BITMAP_INFO* nineGridInfo; if (Stream_GetRemainingLength(s) < 19) return FALSE; Stream_Read_UINT8(s, create_nine_grid_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ if ((create_nine_grid_bitmap->bitmapBpp < 1) || (create_nine_grid_bitmap->bitmapBpp > 32)) { WLog_ERR(TAG, "invalid bpp value %" PRIu32 "", create_nine_grid_bitmap->bitmapBpp); return FALSE; } Stream_Read_UINT16(s, create_nine_grid_bitmap->bitmapId); /* bitmapId (2 bytes) */ nineGridInfo = &(create_nine_grid_bitmap->nineGridInfo); Stream_Read_UINT32(s, nineGridInfo->flFlags); /* flFlags (4 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulLeftWidth); /* ulLeftWidth (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulRightWidth); /* ulRightWidth (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulTopHeight); /* ulTopHeight (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulBottomHeight); /* ulBottomHeight (2 bytes) */ update_read_colorref(s, &nineGridInfo->crTransparent); /* crTransparent (4 bytes) */ return TRUE; } static BOOL update_read_frame_marker_order(wStream* s, FRAME_MARKER_ORDER* frame_marker) { if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT32(s, frame_marker->action); /* action (4 bytes) */ return TRUE; } static BOOL update_read_stream_bitmap_first_order(wStream* s, STREAM_BITMAP_FIRST_ORDER* stream_bitmap_first) { if (Stream_GetRemainingLength(s) < 10) // 8 + 2 at least return FALSE; Stream_Read_UINT8(s, stream_bitmap_first->bitmapFlags); /* bitmapFlags (1 byte) */ Stream_Read_UINT8(s, stream_bitmap_first->bitmapBpp); /* bitmapBpp (1 byte) */ if ((stream_bitmap_first->bitmapBpp < 1) || (stream_bitmap_first->bitmapBpp > 32)) { WLog_ERR(TAG, "invalid bpp value %" PRIu32 "", stream_bitmap_first->bitmapBpp); return FALSE; } Stream_Read_UINT16(s, stream_bitmap_first->bitmapType); /* bitmapType (2 bytes) */ Stream_Read_UINT16(s, stream_bitmap_first->bitmapWidth); /* bitmapWidth (2 bytes) */ Stream_Read_UINT16(s, stream_bitmap_first->bitmapHeight); /* bitmapHeigth (2 bytes) */ if (stream_bitmap_first->bitmapFlags & STREAM_BITMAP_V2) { if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT32(s, stream_bitmap_first->bitmapSize); /* bitmapSize (4 bytes) */ } else { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, stream_bitmap_first->bitmapSize); /* bitmapSize (2 bytes) */ } FIELD_SKIP_BUFFER16( s, stream_bitmap_first->bitmapBlockSize); /* bitmapBlockSize(2 bytes) + bitmapBlock */ return TRUE; } static BOOL update_read_stream_bitmap_next_order(wStream* s, STREAM_BITMAP_NEXT_ORDER* stream_bitmap_next) { if (Stream_GetRemainingLength(s) < 5) return FALSE; Stream_Read_UINT8(s, stream_bitmap_next->bitmapFlags); /* bitmapFlags (1 byte) */ Stream_Read_UINT16(s, stream_bitmap_next->bitmapType); /* bitmapType (2 bytes) */ FIELD_SKIP_BUFFER16( s, stream_bitmap_next->bitmapBlockSize); /* bitmapBlockSize(2 bytes) + bitmapBlock */ return TRUE; } static BOOL update_read_draw_gdiplus_first_order(wStream* s, DRAW_GDIPLUS_FIRST_ORDER* draw_gdiplus_first) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_first->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_first->cbTotalSize); /* cbTotalSize (4 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_first->cbTotalEmfSize); /* cbTotalEmfSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_first->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_next_order(wStream* s, DRAW_GDIPLUS_NEXT_ORDER* draw_gdiplus_next) { if (Stream_GetRemainingLength(s) < 3) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ FIELD_SKIP_BUFFER16(s, draw_gdiplus_next->cbSize); /* cbSize(2 bytes) + emfRecords */ return TRUE; } static BOOL update_read_draw_gdiplus_end_order(wStream* s, DRAW_GDIPLUS_END_ORDER* draw_gdiplus_end) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_end->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_end->cbTotalSize); /* cbTotalSize (4 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_end->cbTotalEmfSize); /* cbTotalEmfSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_end->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_cache_first_order(wStream* s, DRAW_GDIPLUS_CACHE_FIRST_ORDER* draw_gdiplus_cache_first) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_first->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_cache_first->cbTotalSize); /* cbTotalSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_cache_first->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_cache_next_order(wStream* s, DRAW_GDIPLUS_CACHE_NEXT_ORDER* draw_gdiplus_cache_next) { if (Stream_GetRemainingLength(s) < 7) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_next->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_next->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_next->cacheIndex); /* cacheIndex (2 bytes) */ FIELD_SKIP_BUFFER16(s, draw_gdiplus_cache_next->cbSize); /* cbSize(2 bytes) + emfRecords */ return TRUE; } static BOOL update_read_draw_gdiplus_cache_end_order(wStream* s, DRAW_GDIPLUS_CACHE_END_ORDER* draw_gdiplus_cache_end) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_end->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_cache_end->cbTotalSize); /* cbTotalSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_cache_end->cbSize); /* emfRecords */ } static BOOL update_read_field_flags(wStream* s, UINT32* fieldFlags, BYTE flags, BYTE fieldBytes) { int i; BYTE byte; if (flags & ORDER_ZERO_FIELD_BYTE_BIT0) fieldBytes--; if (flags & ORDER_ZERO_FIELD_BYTE_BIT1) { if (fieldBytes > 1) fieldBytes -= 2; else fieldBytes = 0; } if (Stream_GetRemainingLength(s) < fieldBytes) return FALSE; *fieldFlags = 0; for (i = 0; i < fieldBytes; i++) { Stream_Read_UINT8(s, byte); *fieldFlags |= byte << (i * 8); } return TRUE; } BOOL update_write_field_flags(wStream* s, UINT32 fieldFlags, BYTE flags, BYTE fieldBytes) { BYTE byte; if (fieldBytes == 1) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); } else if (fieldBytes == 2) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 8) & 0xFF; Stream_Write_UINT8(s, byte); } else if (fieldBytes == 3) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 16) & 0xFF; Stream_Write_UINT8(s, byte); } else { return FALSE; } return TRUE; } static BOOL update_read_bounds(wStream* s, rdpBounds* bounds) { BYTE flags; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, flags); /* field flags */ if (flags & BOUND_LEFT) { if (!update_read_coord(s, &bounds->left, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_LEFT) { if (!update_read_coord(s, &bounds->left, TRUE)) return FALSE; } if (flags & BOUND_TOP) { if (!update_read_coord(s, &bounds->top, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_TOP) { if (!update_read_coord(s, &bounds->top, TRUE)) return FALSE; } if (flags & BOUND_RIGHT) { if (!update_read_coord(s, &bounds->right, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_RIGHT) { if (!update_read_coord(s, &bounds->right, TRUE)) return FALSE; } if (flags & BOUND_BOTTOM) { if (!update_read_coord(s, &bounds->bottom, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_BOTTOM) { if (!update_read_coord(s, &bounds->bottom, TRUE)) return FALSE; } return TRUE; } BOOL update_write_bounds(wStream* s, ORDER_INFO* orderInfo) { if (!(orderInfo->controlFlags & ORDER_BOUNDS)) return TRUE; if (orderInfo->controlFlags & ORDER_ZERO_BOUNDS_DELTAS) return TRUE; Stream_Write_UINT8(s, orderInfo->boundsFlags); /* field flags */ if (orderInfo->boundsFlags & BOUND_LEFT) { if (!update_write_coord(s, orderInfo->bounds.left)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_LEFT) { } if (orderInfo->boundsFlags & BOUND_TOP) { if (!update_write_coord(s, orderInfo->bounds.top)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_TOP) { } if (orderInfo->boundsFlags & BOUND_RIGHT) { if (!update_write_coord(s, orderInfo->bounds.right)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_RIGHT) { } if (orderInfo->boundsFlags & BOUND_BOTTOM) { if (!update_write_coord(s, orderInfo->bounds.bottom)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_BOTTOM) { } return TRUE; } static BOOL read_primary_order(wLog* log, const char* orderName, wStream* s, const ORDER_INFO* orderInfo, rdpPrimaryUpdate* primary) { BOOL rc = FALSE; if (!s || !orderInfo || !primary || !orderName) return FALSE; switch (orderInfo->orderType) { case ORDER_TYPE_DSTBLT: rc = update_read_dstblt_order(s, orderInfo, &(primary->dstblt)); break; case ORDER_TYPE_PATBLT: rc = update_read_patblt_order(s, orderInfo, &(primary->patblt)); break; case ORDER_TYPE_SCRBLT: rc = update_read_scrblt_order(s, orderInfo, &(primary->scrblt)); break; case ORDER_TYPE_OPAQUE_RECT: rc = update_read_opaque_rect_order(s, orderInfo, &(primary->opaque_rect)); break; case ORDER_TYPE_DRAW_NINE_GRID: rc = update_read_draw_nine_grid_order(s, orderInfo, &(primary->draw_nine_grid)); break; case ORDER_TYPE_MULTI_DSTBLT: rc = update_read_multi_dstblt_order(s, orderInfo, &(primary->multi_dstblt)); break; case ORDER_TYPE_MULTI_PATBLT: rc = update_read_multi_patblt_order(s, orderInfo, &(primary->multi_patblt)); break; case ORDER_TYPE_MULTI_SCRBLT: rc = update_read_multi_scrblt_order(s, orderInfo, &(primary->multi_scrblt)); break; case ORDER_TYPE_MULTI_OPAQUE_RECT: rc = update_read_multi_opaque_rect_order(s, orderInfo, &(primary->multi_opaque_rect)); break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: rc = update_read_multi_draw_nine_grid_order(s, orderInfo, &(primary->multi_draw_nine_grid)); break; case ORDER_TYPE_LINE_TO: rc = update_read_line_to_order(s, orderInfo, &(primary->line_to)); break; case ORDER_TYPE_POLYLINE: rc = update_read_polyline_order(s, orderInfo, &(primary->polyline)); break; case ORDER_TYPE_MEMBLT: rc = update_read_memblt_order(s, orderInfo, &(primary->memblt)); break; case ORDER_TYPE_MEM3BLT: rc = update_read_mem3blt_order(s, orderInfo, &(primary->mem3blt)); break; case ORDER_TYPE_SAVE_BITMAP: rc = update_read_save_bitmap_order(s, orderInfo, &(primary->save_bitmap)); break; case ORDER_TYPE_GLYPH_INDEX: rc = update_read_glyph_index_order(s, orderInfo, &(primary->glyph_index)); break; case ORDER_TYPE_FAST_INDEX: rc = update_read_fast_index_order(s, orderInfo, &(primary->fast_index)); break; case ORDER_TYPE_FAST_GLYPH: rc = update_read_fast_glyph_order(s, orderInfo, &(primary->fast_glyph)); break; case ORDER_TYPE_POLYGON_SC: rc = update_read_polygon_sc_order(s, orderInfo, &(primary->polygon_sc)); break; case ORDER_TYPE_POLYGON_CB: rc = update_read_polygon_cb_order(s, orderInfo, &(primary->polygon_cb)); break; case ORDER_TYPE_ELLIPSE_SC: rc = update_read_ellipse_sc_order(s, orderInfo, &(primary->ellipse_sc)); break; case ORDER_TYPE_ELLIPSE_CB: rc = update_read_ellipse_cb_order(s, orderInfo, &(primary->ellipse_cb)); break; default: WLog_Print(log, WLOG_WARN, "Primary Drawing Order %s not supported, ignoring", orderName); rc = TRUE; break; } if (!rc) { WLog_Print(log, WLOG_ERROR, "%s - update_read_dstblt_order() failed", orderName); return FALSE; } return TRUE; } static BOOL update_recv_primary_order(rdpUpdate* update, wStream* s, BYTE flags) { BYTE field; BOOL rc = FALSE; rdpContext* context = update->context; rdpPrimaryUpdate* primary = update->primary; ORDER_INFO* orderInfo = &(primary->order_info); rdpSettings* settings = context->settings; const char* orderName; if (flags & ORDER_TYPE_CHANGE) { if (Stream_GetRemainingLength(s) < 1) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, orderInfo->orderType); /* orderType (1 byte) */ } orderName = primary_order_string(orderInfo->orderType); if (!check_primary_order_supported(update->log, settings, orderInfo->orderType, orderName)) return FALSE; field = get_primary_drawing_order_field_bytes(orderInfo->orderType, &rc); if (!rc) return FALSE; if (!update_read_field_flags(s, &(orderInfo->fieldFlags), flags, field)) { WLog_Print(update->log, WLOG_ERROR, "update_read_field_flags() failed"); return FALSE; } if (flags & ORDER_BOUNDS) { if (!(flags & ORDER_ZERO_BOUNDS_DELTAS)) { if (!update_read_bounds(s, &orderInfo->bounds)) { WLog_Print(update->log, WLOG_ERROR, "update_read_bounds() failed"); return FALSE; } } rc = IFCALLRESULT(FALSE, update->SetBounds, context, &orderInfo->bounds); if (!rc) return FALSE; } orderInfo->deltaCoordinates = (flags & ORDER_DELTA_COORDINATES) ? TRUE : FALSE; if (!read_primary_order(update->log, orderName, s, orderInfo, primary)) return FALSE; switch (orderInfo->orderType) { case ORDER_TYPE_DSTBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->dstblt.bRop), gdi_rop3_code(primary->dstblt.bRop)); rc = IFCALLRESULT(FALSE, primary->DstBlt, context, &primary->dstblt); } break; case ORDER_TYPE_PATBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->patblt.bRop), gdi_rop3_code(primary->patblt.bRop)); rc = IFCALLRESULT(FALSE, primary->PatBlt, context, &primary->patblt); } break; case ORDER_TYPE_SCRBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->scrblt.bRop), gdi_rop3_code(primary->scrblt.bRop)); rc = IFCALLRESULT(FALSE, primary->ScrBlt, context, &primary->scrblt); } break; case ORDER_TYPE_OPAQUE_RECT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->OpaqueRect, context, &primary->opaque_rect); } break; case ORDER_TYPE_DRAW_NINE_GRID: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->DrawNineGrid, context, &primary->draw_nine_grid); } break; case ORDER_TYPE_MULTI_DSTBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_dstblt.bRop), gdi_rop3_code(primary->multi_dstblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiDstBlt, context, &primary->multi_dstblt); } break; case ORDER_TYPE_MULTI_PATBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_patblt.bRop), gdi_rop3_code(primary->multi_patblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiPatBlt, context, &primary->multi_patblt); } break; case ORDER_TYPE_MULTI_SCRBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_scrblt.bRop), gdi_rop3_code(primary->multi_scrblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiScrBlt, context, &primary->multi_scrblt); } break; case ORDER_TYPE_MULTI_OPAQUE_RECT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->MultiOpaqueRect, context, &primary->multi_opaque_rect); } break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->MultiDrawNineGrid, context, &primary->multi_draw_nine_grid); } break; case ORDER_TYPE_LINE_TO: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->LineTo, context, &primary->line_to); } break; case ORDER_TYPE_POLYLINE: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->Polyline, context, &primary->polyline); } break; case ORDER_TYPE_MEMBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->memblt.bRop), gdi_rop3_code(primary->memblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MemBlt, context, &primary->memblt); } break; case ORDER_TYPE_MEM3BLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->mem3blt.bRop), gdi_rop3_code(primary->mem3blt.bRop)); rc = IFCALLRESULT(FALSE, primary->Mem3Blt, context, &primary->mem3blt); } break; case ORDER_TYPE_SAVE_BITMAP: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->SaveBitmap, context, &primary->save_bitmap); } break; case ORDER_TYPE_GLYPH_INDEX: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->GlyphIndex, context, &primary->glyph_index); } break; case ORDER_TYPE_FAST_INDEX: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->FastIndex, context, &primary->fast_index); } break; case ORDER_TYPE_FAST_GLYPH: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->FastGlyph, context, &primary->fast_glyph); } break; case ORDER_TYPE_POLYGON_SC: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->PolygonSC, context, &primary->polygon_sc); } break; case ORDER_TYPE_POLYGON_CB: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->PolygonCB, context, &primary->polygon_cb); } break; case ORDER_TYPE_ELLIPSE_SC: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->EllipseSC, context, &primary->ellipse_sc); } break; case ORDER_TYPE_ELLIPSE_CB: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->EllipseCB, context, &primary->ellipse_cb); } break; default: WLog_Print(update->log, WLOG_WARN, "Primary Drawing Order %s not supported", orderName); break; } if (!rc) { WLog_Print(update->log, WLOG_WARN, "Primary Drawing Order %s failed", orderName); return FALSE; } if (flags & ORDER_BOUNDS) { rc = IFCALLRESULT(FALSE, update->SetBounds, context, NULL); } return rc; } static BOOL update_recv_secondary_order(rdpUpdate* update, wStream* s, BYTE flags) { BOOL rc = FALSE; size_t start, end, diff; BYTE orderType; UINT16 extraFlags; UINT16 orderLength; rdpContext* context = update->context; rdpSettings* settings = context->settings; rdpSecondaryUpdate* secondary = update->secondary; const char* name; if (Stream_GetRemainingLength(s) < 5) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 5"); return FALSE; } Stream_Read_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Read_UINT16(s, extraFlags); /* extraFlags (2 bytes) */ Stream_Read_UINT8(s, orderType); /* orderType (1 byte) */ if (Stream_GetRemainingLength(s) < orderLength + 7U) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) %" PRIuz " < %" PRIu16, Stream_GetRemainingLength(s), orderLength + 7); return FALSE; } start = Stream_GetPosition(s); name = secondary_order_string(orderType); WLog_Print(update->log, WLOG_DEBUG, "Secondary Drawing Order %s", name); if (!check_secondary_order_supported(update->log, settings, orderType, name)) return FALSE; switch (orderType) { case ORDER_TYPE_BITMAP_UNCOMPRESSED: case ORDER_TYPE_CACHE_BITMAP_COMPRESSED: { const BOOL compressed = (orderType == ORDER_TYPE_CACHE_BITMAP_COMPRESSED); CACHE_BITMAP_ORDER* order = update_read_cache_bitmap_order(update, s, compressed, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmap, context, order); free_cache_bitmap_order(context, order); } } break; case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2: case ORDER_TYPE_BITMAP_COMPRESSED_V2: { const BOOL compressed = (orderType == ORDER_TYPE_BITMAP_COMPRESSED_V2); CACHE_BITMAP_V2_ORDER* order = update_read_cache_bitmap_v2_order(update, s, compressed, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV2, context, order); free_cache_bitmap_v2_order(context, order); } } break; case ORDER_TYPE_BITMAP_COMPRESSED_V3: { CACHE_BITMAP_V3_ORDER* order = update_read_cache_bitmap_v3_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV3, context, order); free_cache_bitmap_v3_order(context, order); } } break; case ORDER_TYPE_CACHE_COLOR_TABLE: { CACHE_COLOR_TABLE_ORDER* order = update_read_cache_color_table_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheColorTable, context, order); free_cache_color_table_order(context, order); } } break; case ORDER_TYPE_CACHE_GLYPH: { switch (settings->GlyphSupportLevel) { case GLYPH_SUPPORT_PARTIAL: case GLYPH_SUPPORT_FULL: { CACHE_GLYPH_ORDER* order = update_read_cache_glyph_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheGlyph, context, order); free_cache_glyph_order(context, order); } } break; case GLYPH_SUPPORT_ENCODE: { CACHE_GLYPH_V2_ORDER* order = update_read_cache_glyph_v2_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheGlyphV2, context, order); free_cache_glyph_v2_order(context, order); } } break; case GLYPH_SUPPORT_NONE: default: break; } } break; case ORDER_TYPE_CACHE_BRUSH: /* [MS-RDPEGDI] 2.2.2.2.1.2.7 Cache Brush (CACHE_BRUSH_ORDER) */ { CACHE_BRUSH_ORDER* order = update_read_cache_brush_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBrush, context, order); free_cache_brush_order(context, order); } } break; default: WLog_Print(update->log, WLOG_WARN, "SECONDARY ORDER %s not supported", name); break; } if (!rc) { WLog_Print(update->log, WLOG_ERROR, "SECONDARY ORDER %s failed", name); } start += orderLength + 7; end = Stream_GetPosition(s); if (start > end) { WLog_Print(update->log, WLOG_WARN, "SECONDARY_ORDER %s: read %" PRIuz "bytes too much", name, end - start); return FALSE; } diff = start - end; if (diff > 0) { WLog_Print(update->log, WLOG_DEBUG, "SECONDARY_ORDER %s: read %" PRIuz "bytes short, skipping", name, diff); Stream_Seek(s, diff); } return rc; } static BOOL read_altsec_order(wStream* s, BYTE orderType, rdpAltSecUpdate* altsec) { BOOL rc = FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: rc = update_read_create_offscreen_bitmap_order(s, &(altsec->create_offscreen_bitmap)); break; case ORDER_TYPE_SWITCH_SURFACE: rc = update_read_switch_surface_order(s, &(altsec->switch_surface)); break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: rc = update_read_create_nine_grid_bitmap_order(s, &(altsec->create_nine_grid_bitmap)); break; case ORDER_TYPE_FRAME_MARKER: rc = update_read_frame_marker_order(s, &(altsec->frame_marker)); break; case ORDER_TYPE_STREAM_BITMAP_FIRST: rc = update_read_stream_bitmap_first_order(s, &(altsec->stream_bitmap_first)); break; case ORDER_TYPE_STREAM_BITMAP_NEXT: rc = update_read_stream_bitmap_next_order(s, &(altsec->stream_bitmap_next)); break; case ORDER_TYPE_GDIPLUS_FIRST: rc = update_read_draw_gdiplus_first_order(s, &(altsec->draw_gdiplus_first)); break; case ORDER_TYPE_GDIPLUS_NEXT: rc = update_read_draw_gdiplus_next_order(s, &(altsec->draw_gdiplus_next)); break; case ORDER_TYPE_GDIPLUS_END: rc = update_read_draw_gdiplus_end_order(s, &(altsec->draw_gdiplus_end)); break; case ORDER_TYPE_GDIPLUS_CACHE_FIRST: rc = update_read_draw_gdiplus_cache_first_order(s, &(altsec->draw_gdiplus_cache_first)); break; case ORDER_TYPE_GDIPLUS_CACHE_NEXT: rc = update_read_draw_gdiplus_cache_next_order(s, &(altsec->draw_gdiplus_cache_next)); break; case ORDER_TYPE_GDIPLUS_CACHE_END: rc = update_read_draw_gdiplus_cache_end_order(s, &(altsec->draw_gdiplus_cache_end)); break; case ORDER_TYPE_WINDOW: /* This order is handled elsewhere. */ rc = TRUE; break; case ORDER_TYPE_COMPDESK_FIRST: rc = TRUE; break; default: break; } return rc; } static BOOL update_recv_altsec_order(rdpUpdate* update, wStream* s, BYTE flags) { BYTE orderType = flags >>= 2; /* orderType is in higher 6 bits of flags field */ BOOL rc = FALSE; rdpContext* context = update->context; rdpSettings* settings = context->settings; rdpAltSecUpdate* altsec = update->altsec; const char* orderName = altsec_order_string(orderType); WLog_Print(update->log, WLOG_DEBUG, "Alternate Secondary Drawing Order %s", orderName); if (!check_alt_order_supported(update->log, settings, orderType, orderName)) return FALSE; if (!read_altsec_order(s, orderType, altsec)) return FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: IFCALLRET(altsec->CreateOffscreenBitmap, rc, context, &(altsec->create_offscreen_bitmap)); break; case ORDER_TYPE_SWITCH_SURFACE: IFCALLRET(altsec->SwitchSurface, rc, context, &(altsec->switch_surface)); break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: IFCALLRET(altsec->CreateNineGridBitmap, rc, context, &(altsec->create_nine_grid_bitmap)); break; case ORDER_TYPE_FRAME_MARKER: IFCALLRET(altsec->FrameMarker, rc, context, &(altsec->frame_marker)); break; case ORDER_TYPE_STREAM_BITMAP_FIRST: IFCALLRET(altsec->StreamBitmapFirst, rc, context, &(altsec->stream_bitmap_first)); break; case ORDER_TYPE_STREAM_BITMAP_NEXT: IFCALLRET(altsec->StreamBitmapNext, rc, context, &(altsec->stream_bitmap_next)); break; case ORDER_TYPE_GDIPLUS_FIRST: IFCALLRET(altsec->DrawGdiPlusFirst, rc, context, &(altsec->draw_gdiplus_first)); break; case ORDER_TYPE_GDIPLUS_NEXT: IFCALLRET(altsec->DrawGdiPlusNext, rc, context, &(altsec->draw_gdiplus_next)); break; case ORDER_TYPE_GDIPLUS_END: IFCALLRET(altsec->DrawGdiPlusEnd, rc, context, &(altsec->draw_gdiplus_end)); break; case ORDER_TYPE_GDIPLUS_CACHE_FIRST: IFCALLRET(altsec->DrawGdiPlusCacheFirst, rc, context, &(altsec->draw_gdiplus_cache_first)); break; case ORDER_TYPE_GDIPLUS_CACHE_NEXT: IFCALLRET(altsec->DrawGdiPlusCacheNext, rc, context, &(altsec->draw_gdiplus_cache_next)); break; case ORDER_TYPE_GDIPLUS_CACHE_END: IFCALLRET(altsec->DrawGdiPlusCacheEnd, rc, context, &(altsec->draw_gdiplus_cache_end)); break; case ORDER_TYPE_WINDOW: rc = update_recv_altsec_window_order(update, s); break; case ORDER_TYPE_COMPDESK_FIRST: rc = TRUE; break; default: break; } if (!rc) { WLog_Print(update->log, WLOG_WARN, "Alternate Secondary Drawing Order %s failed", orderName); } return rc; } BOOL update_recv_order(rdpUpdate* update, wStream* s) { BOOL rc; BYTE controlFlags; if (Stream_GetRemainingLength(s) < 1) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, controlFlags); /* controlFlags (1 byte) */ if (!(controlFlags & ORDER_STANDARD)) rc = update_recv_altsec_order(update, s, controlFlags); else if (controlFlags & ORDER_SECONDARY) rc = update_recv_secondary_order(update, s, controlFlags); else rc = update_recv_primary_order(update, s, controlFlags); if (!rc) WLog_Print(update->log, WLOG_ERROR, "order flags %02" PRIx8 " failed", controlFlags); return rc; }
/** * FreeRDP: A Remote Desktop Protocol Implementation * Drawing Orders * * Copyright 2011 Marc-Andre Moreau <marcandre.moreau@gmail.com> * Copyright 2016 Armin Novak <armin.novak@thincast.com> * Copyright 2016 Thincast Technologies GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "window.h" #include <winpr/wtypes.h> #include <winpr/crt.h> #include <freerdp/api.h> #include <freerdp/log.h> #include <freerdp/graphics.h> #include <freerdp/codec/bitmap.h> #include <freerdp/gdi/gdi.h> #include "orders.h" #include "../cache/glyph.h" #include "../cache/bitmap.h" #include "../cache/brush.h" #include "../cache/cache.h" #define TAG FREERDP_TAG("core.orders") BYTE get_primary_drawing_order_field_bytes(UINT32 orderType, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (orderType) { case 0: return DSTBLT_ORDER_FIELD_BYTES; case 1: return PATBLT_ORDER_FIELD_BYTES; case 2: return SCRBLT_ORDER_FIELD_BYTES; case 3: return 0; case 4: return 0; case 5: return 0; case 6: return 0; case 7: return DRAW_NINE_GRID_ORDER_FIELD_BYTES; case 8: return MULTI_DRAW_NINE_GRID_ORDER_FIELD_BYTES; case 9: return LINE_TO_ORDER_FIELD_BYTES; case 10: return OPAQUE_RECT_ORDER_FIELD_BYTES; case 11: return SAVE_BITMAP_ORDER_FIELD_BYTES; case 12: return 0; case 13: return MEMBLT_ORDER_FIELD_BYTES; case 14: return MEM3BLT_ORDER_FIELD_BYTES; case 15: return MULTI_DSTBLT_ORDER_FIELD_BYTES; case 16: return MULTI_PATBLT_ORDER_FIELD_BYTES; case 17: return MULTI_SCRBLT_ORDER_FIELD_BYTES; case 18: return MULTI_OPAQUE_RECT_ORDER_FIELD_BYTES; case 19: return FAST_INDEX_ORDER_FIELD_BYTES; case 20: return POLYGON_SC_ORDER_FIELD_BYTES; case 21: return POLYGON_CB_ORDER_FIELD_BYTES; case 22: return POLYLINE_ORDER_FIELD_BYTES; case 23: return 0; case 24: return FAST_GLYPH_ORDER_FIELD_BYTES; case 25: return ELLIPSE_SC_ORDER_FIELD_BYTES; case 26: return ELLIPSE_CB_ORDER_FIELD_BYTES; case 27: return GLYPH_INDEX_ORDER_FIELD_BYTES; default: if (pValid) *pValid = FALSE; WLog_WARN(TAG, "Invalid orderType 0x%08X received", orderType); return 0; } } static BYTE get_cbr2_bpp(UINT32 bpp, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (bpp) { case 3: return 8; case 4: return 16; case 5: return 24; case 6: return 32; default: WLog_WARN(TAG, "Invalid bpp %" PRIu32, bpp); if (pValid) *pValid = FALSE; return 0; } } static BYTE get_bmf_bpp(UINT32 bmf, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (bmf) { case 1: return 1; case 3: return 8; case 4: return 16; case 5: return 24; case 6: return 32; default: WLog_WARN(TAG, "Invalid bmf %" PRIu32, bmf); if (pValid) *pValid = FALSE; return 0; } } static BYTE get_bpp_bmf(UINT32 bpp, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (bpp) { case 1: return 1; case 8: return 3; case 16: return 4; case 24: return 5; case 32: return 6; default: WLog_WARN(TAG, "Invalid color depth %" PRIu32, bpp); if (pValid) *pValid = FALSE; return 0; } } static BOOL check_order_activated(wLog* log, rdpSettings* settings, const char* orderName, BOOL condition) { if (!condition) { if (settings->AllowUnanouncedOrdersFromServer) { WLog_Print(log, WLOG_WARN, "%s - SERVER BUG: The support for this feature was not announced!", orderName); return TRUE; } else { WLog_Print(log, WLOG_ERROR, "%s - SERVER BUG: The support for this feature was not announced! Use " "/relax-order-checks to ignore", orderName); return FALSE; } } return TRUE; } static BOOL check_alt_order_supported(wLog* log, rdpSettings* settings, BYTE orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: case ORDER_TYPE_SWITCH_SURFACE: condition = settings->OffscreenSupportLevel != 0; break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: condition = settings->DrawNineGridEnabled; break; case ORDER_TYPE_FRAME_MARKER: condition = settings->FrameMarkerCommandEnabled; break; case ORDER_TYPE_GDIPLUS_FIRST: case ORDER_TYPE_GDIPLUS_NEXT: case ORDER_TYPE_GDIPLUS_END: case ORDER_TYPE_GDIPLUS_CACHE_FIRST: case ORDER_TYPE_GDIPLUS_CACHE_NEXT: case ORDER_TYPE_GDIPLUS_CACHE_END: condition = settings->DrawGdiPlusCacheEnabled; break; case ORDER_TYPE_WINDOW: condition = settings->RemoteWndSupportLevel != WINDOW_LEVEL_NOT_SUPPORTED; break; case ORDER_TYPE_STREAM_BITMAP_FIRST: case ORDER_TYPE_STREAM_BITMAP_NEXT: case ORDER_TYPE_COMPDESK_FIRST: condition = TRUE; break; default: WLog_Print(log, WLOG_WARN, "%s - Alternate Secondary Drawing Order UNKNOWN", orderName); condition = FALSE; break; } return check_order_activated(log, settings, orderName, condition); } static BOOL check_secondary_order_supported(wLog* log, rdpSettings* settings, BYTE orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_BITMAP_UNCOMPRESSED: case ORDER_TYPE_CACHE_BITMAP_COMPRESSED: condition = settings->BitmapCacheEnabled; break; case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2: case ORDER_TYPE_BITMAP_COMPRESSED_V2: condition = settings->BitmapCacheEnabled; break; case ORDER_TYPE_BITMAP_COMPRESSED_V3: condition = settings->BitmapCacheV3Enabled; break; case ORDER_TYPE_CACHE_COLOR_TABLE: condition = (settings->OrderSupport[NEG_MEMBLT_INDEX] || settings->OrderSupport[NEG_MEM3BLT_INDEX]); break; case ORDER_TYPE_CACHE_GLYPH: { switch (settings->GlyphSupportLevel) { case GLYPH_SUPPORT_PARTIAL: case GLYPH_SUPPORT_FULL: case GLYPH_SUPPORT_ENCODE: condition = TRUE; break; case GLYPH_SUPPORT_NONE: default: condition = FALSE; break; } } break; case ORDER_TYPE_CACHE_BRUSH: condition = TRUE; break; default: WLog_Print(log, WLOG_WARN, "SECONDARY ORDER %s not supported", orderName); break; } return check_order_activated(log, settings, orderName, condition); } static BOOL check_primary_order_supported(wLog* log, rdpSettings* settings, UINT32 orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_DSTBLT: condition = settings->OrderSupport[NEG_DSTBLT_INDEX]; break; case ORDER_TYPE_SCRBLT: condition = settings->OrderSupport[NEG_SCRBLT_INDEX]; break; case ORDER_TYPE_DRAW_NINE_GRID: condition = settings->OrderSupport[NEG_DRAWNINEGRID_INDEX]; break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: condition = settings->OrderSupport[NEG_MULTI_DRAWNINEGRID_INDEX]; break; case ORDER_TYPE_LINE_TO: condition = settings->OrderSupport[NEG_LINETO_INDEX]; break; /* [MS-RDPEGDI] 2.2.2.2.1.1.2.5 OpaqueRect (OPAQUERECT_ORDER) * suggests that PatBlt and OpaqueRect imply each other. */ case ORDER_TYPE_PATBLT: case ORDER_TYPE_OPAQUE_RECT: condition = settings->OrderSupport[NEG_OPAQUE_RECT_INDEX] || settings->OrderSupport[NEG_PATBLT_INDEX]; break; case ORDER_TYPE_SAVE_BITMAP: condition = settings->OrderSupport[NEG_SAVEBITMAP_INDEX]; break; case ORDER_TYPE_MEMBLT: condition = settings->OrderSupport[NEG_MEMBLT_INDEX]; break; case ORDER_TYPE_MEM3BLT: condition = settings->OrderSupport[NEG_MEM3BLT_INDEX]; break; case ORDER_TYPE_MULTI_DSTBLT: condition = settings->OrderSupport[NEG_MULTIDSTBLT_INDEX]; break; case ORDER_TYPE_MULTI_PATBLT: condition = settings->OrderSupport[NEG_MULTIPATBLT_INDEX]; break; case ORDER_TYPE_MULTI_SCRBLT: condition = settings->OrderSupport[NEG_MULTIDSTBLT_INDEX]; break; case ORDER_TYPE_MULTI_OPAQUE_RECT: condition = settings->OrderSupport[NEG_MULTIOPAQUERECT_INDEX]; break; case ORDER_TYPE_FAST_INDEX: condition = settings->OrderSupport[NEG_FAST_INDEX_INDEX]; break; case ORDER_TYPE_POLYGON_SC: condition = settings->OrderSupport[NEG_POLYGON_SC_INDEX]; break; case ORDER_TYPE_POLYGON_CB: condition = settings->OrderSupport[NEG_POLYGON_CB_INDEX]; break; case ORDER_TYPE_POLYLINE: condition = settings->OrderSupport[NEG_POLYLINE_INDEX]; break; case ORDER_TYPE_FAST_GLYPH: condition = settings->OrderSupport[NEG_FAST_GLYPH_INDEX]; break; case ORDER_TYPE_ELLIPSE_SC: condition = settings->OrderSupport[NEG_ELLIPSE_SC_INDEX]; break; case ORDER_TYPE_ELLIPSE_CB: condition = settings->OrderSupport[NEG_ELLIPSE_CB_INDEX]; break; case ORDER_TYPE_GLYPH_INDEX: condition = settings->OrderSupport[NEG_GLYPH_INDEX_INDEX]; break; default: WLog_Print(log, WLOG_WARN, "%s Primary Drawing Order not supported", orderName); break; } return check_order_activated(log, settings, orderName, condition); } static const char* primary_order_string(UINT32 orderType) { const char* orders[] = { "[0x%02" PRIx8 "] DstBlt", "[0x%02" PRIx8 "] PatBlt", "[0x%02" PRIx8 "] ScrBlt", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] DrawNineGrid", "[0x%02" PRIx8 "] MultiDrawNineGrid", "[0x%02" PRIx8 "] LineTo", "[0x%02" PRIx8 "] OpaqueRect", "[0x%02" PRIx8 "] SaveBitmap", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] MemBlt", "[0x%02" PRIx8 "] Mem3Blt", "[0x%02" PRIx8 "] MultiDstBlt", "[0x%02" PRIx8 "] MultiPatBlt", "[0x%02" PRIx8 "] MultiScrBlt", "[0x%02" PRIx8 "] MultiOpaqueRect", "[0x%02" PRIx8 "] FastIndex", "[0x%02" PRIx8 "] PolygonSC", "[0x%02" PRIx8 "] PolygonCB", "[0x%02" PRIx8 "] Polyline", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] FastGlyph", "[0x%02" PRIx8 "] EllipseSC", "[0x%02" PRIx8 "] EllipseCB", "[0x%02" PRIx8 "] GlyphIndex" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static const char* secondary_order_string(UINT32 orderType) { const char* orders[] = { "[0x%02" PRIx8 "] Cache Bitmap", "[0x%02" PRIx8 "] Cache Color Table", "[0x%02" PRIx8 "] Cache Bitmap (Compressed)", "[0x%02" PRIx8 "] Cache Glyph", "[0x%02" PRIx8 "] Cache Bitmap V2", "[0x%02" PRIx8 "] Cache Bitmap V2 (Compressed)", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] Cache Brush", "[0x%02" PRIx8 "] Cache Bitmap V3" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static const char* altsec_order_string(BYTE orderType) { const char* orders[] = { "[0x%02" PRIx8 "] Switch Surface", "[0x%02" PRIx8 "] Create Offscreen Bitmap", "[0x%02" PRIx8 "] Stream Bitmap First", "[0x%02" PRIx8 "] Stream Bitmap Next", "[0x%02" PRIx8 "] Create NineGrid Bitmap", "[0x%02" PRIx8 "] Draw GDI+ First", "[0x%02" PRIx8 "] Draw GDI+ Next", "[0x%02" PRIx8 "] Draw GDI+ End", "[0x%02" PRIx8 "] Draw GDI+ Cache First", "[0x%02" PRIx8 "] Draw GDI+ Cache Next", "[0x%02" PRIx8 "] Draw GDI+ Cache End", "[0x%02" PRIx8 "] Windowing", "[0x%02" PRIx8 "] Desktop Composition", "[0x%02" PRIx8 "] Frame Marker" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static INLINE BOOL update_read_coord(wStream* s, INT32* coord, BOOL delta) { INT8 lsi8; INT16 lsi16; if (delta) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_INT8(s, lsi8); *coord += lsi8; } else { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_INT16(s, lsi16); *coord = lsi16; } return TRUE; } static INLINE BOOL update_write_coord(wStream* s, INT32 coord) { Stream_Write_UINT16(s, coord); return TRUE; } static INLINE BOOL update_read_color(wStream* s, UINT32* color) { BYTE byte; if (Stream_GetRemainingLength(s) < 3) return FALSE; *color = 0; Stream_Read_UINT8(s, byte); *color = (UINT32)byte; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 8) & 0xFF00; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 16) & 0xFF0000; return TRUE; } static INLINE BOOL update_write_color(wStream* s, UINT32 color) { BYTE byte; byte = (color & 0xFF); Stream_Write_UINT8(s, byte); byte = ((color >> 8) & 0xFF); Stream_Write_UINT8(s, byte); byte = ((color >> 16) & 0xFF); Stream_Write_UINT8(s, byte); return TRUE; } static INLINE BOOL update_read_colorref(wStream* s, UINT32* color) { BYTE byte; if (Stream_GetRemainingLength(s) < 4) return FALSE; *color = 0; Stream_Read_UINT8(s, byte); *color = byte; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 8); Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 16); Stream_Seek_UINT8(s); return TRUE; } static INLINE BOOL update_read_color_quad(wStream* s, UINT32* color) { return update_read_colorref(s, color); } static INLINE void update_write_color_quad(wStream* s, UINT32 color) { BYTE byte; byte = (color >> 16) & 0xFF; Stream_Write_UINT8(s, byte); byte = (color >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = color & 0xFF; Stream_Write_UINT8(s, byte); } static INLINE BOOL update_read_2byte_unsigned(wStream* s, UINT32* value) { BYTE byte; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) return FALSE; *value = (byte & 0x7F) << 8; Stream_Read_UINT8(s, byte); *value |= byte; } else { *value = (byte & 0x7F); } return TRUE; } static INLINE BOOL update_write_2byte_unsigned(wStream* s, UINT32 value) { BYTE byte; if (value > 0x7FFF) return FALSE; if (value >= 0x7F) { byte = ((value & 0x7F00) >> 8); Stream_Write_UINT8(s, byte | 0x80); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else { byte = (value & 0x7F); Stream_Write_UINT8(s, byte); } return TRUE; } static INLINE BOOL update_read_2byte_signed(wStream* s, INT32* value) { BYTE byte; BOOL negative; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); negative = (byte & 0x40) ? TRUE : FALSE; *value = (byte & 0x3F); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); *value = (*value << 8) | byte; } if (negative) *value *= -1; return TRUE; } static INLINE BOOL update_write_2byte_signed(wStream* s, INT32 value) { BYTE byte; BOOL negative = FALSE; if (value < 0) { negative = TRUE; value *= -1; } if (value > 0x3FFF) return FALSE; if (value >= 0x3F) { byte = ((value & 0x3F00) >> 8); if (negative) byte |= 0x40; Stream_Write_UINT8(s, byte | 0x80); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else { byte = (value & 0x3F); if (negative) byte |= 0x40; Stream_Write_UINT8(s, byte); } return TRUE; } static INLINE BOOL update_read_4byte_unsigned(wStream* s, UINT32* value) { BYTE byte; BYTE count; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); count = (byte & 0xC0) >> 6; if (Stream_GetRemainingLength(s) < count) return FALSE; switch (count) { case 0: *value = (byte & 0x3F); break; case 1: *value = (byte & 0x3F) << 8; Stream_Read_UINT8(s, byte); *value |= byte; break; case 2: *value = (byte & 0x3F) << 16; Stream_Read_UINT8(s, byte); *value |= (byte << 8); Stream_Read_UINT8(s, byte); *value |= byte; break; case 3: *value = (byte & 0x3F) << 24; Stream_Read_UINT8(s, byte); *value |= (byte << 16); Stream_Read_UINT8(s, byte); *value |= (byte << 8); Stream_Read_UINT8(s, byte); *value |= byte; break; default: break; } return TRUE; } static INLINE BOOL update_write_4byte_unsigned(wStream* s, UINT32 value) { BYTE byte; if (value <= 0x3F) { Stream_Write_UINT8(s, value); } else if (value <= 0x3FFF) { byte = (value >> 8) & 0x3F; Stream_Write_UINT8(s, byte | 0x40); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else if (value <= 0x3FFFFF) { byte = (value >> 16) & 0x3F; Stream_Write_UINT8(s, byte | 0x80); byte = (value >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else if (value <= 0x3FFFFFFF) { byte = (value >> 24) & 0x3F; Stream_Write_UINT8(s, byte | 0xC0); byte = (value >> 16) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else return FALSE; return TRUE; } static INLINE BOOL update_read_delta(wStream* s, INT32* value) { BYTE byte; if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, byte); if (byte & 0x40) *value = (byte | ~0x3F); else *value = (byte & 0x3F); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, byte); *value = (*value << 8) | byte; } return TRUE; } #if 0 static INLINE void update_read_glyph_delta(wStream* s, UINT16* value) { BYTE byte; Stream_Read_UINT8(s, byte); if (byte == 0x80) Stream_Read_UINT16(s, *value); else *value = (byte & 0x3F); } static INLINE void update_seek_glyph_delta(wStream* s) { BYTE byte; Stream_Read_UINT8(s, byte); if (byte & 0x80) Stream_Seek_UINT8(s); } #endif static INLINE BOOL update_read_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->style); } if (fieldFlags & ORDER_FIELD_04) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->hatch); } if (brush->style & CACHED_BRUSH) { BOOL rc; brush->index = brush->hatch; brush->bpp = get_bmf_bpp(brush->style, &rc); if (!rc) return FALSE; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 7) return FALSE; brush->data = (BYTE*)brush->p8x8; Stream_Read_UINT8(s, brush->data[7]); Stream_Read_UINT8(s, brush->data[6]); Stream_Read_UINT8(s, brush->data[5]); Stream_Read_UINT8(s, brush->data[4]); Stream_Read_UINT8(s, brush->data[3]); Stream_Read_UINT8(s, brush->data[2]); Stream_Read_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; } static INLINE BOOL update_write_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { Stream_Write_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { Stream_Write_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { Stream_Write_UINT8(s, brush->style); } if (brush->style & CACHED_BRUSH) { BOOL rc; brush->hatch = brush->index; brush->bpp = get_bmf_bpp(brush->style, &rc); if (!rc) return FALSE; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_04) { Stream_Write_UINT8(s, brush->hatch); } if (fieldFlags & ORDER_FIELD_05) { brush->data = (BYTE*)brush->p8x8; Stream_Write_UINT8(s, brush->data[7]); Stream_Write_UINT8(s, brush->data[6]); Stream_Write_UINT8(s, brush->data[5]); Stream_Write_UINT8(s, brush->data[4]); Stream_Write_UINT8(s, brush->data[3]); Stream_Write_UINT8(s, brush->data[2]); Stream_Write_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; } static INLINE BOOL update_read_delta_rects(wStream* s, DELTA_RECT* rectangles, UINT32* nr) { UINT32 number = *nr; UINT32 i; BYTE flags = 0; BYTE* zeroBits; UINT32 zeroBitsSize; if (number > 45) { WLog_WARN(TAG, "Invalid number of delta rectangles %" PRIu32, number); return FALSE; } zeroBitsSize = ((number + 1) / 2); if (Stream_GetRemainingLength(s) < zeroBitsSize) return FALSE; Stream_GetPointer(s, zeroBits); Stream_Seek(s, zeroBitsSize); ZeroMemory(rectangles, sizeof(DELTA_RECT) * number); for (i = 0; i < number; i++) { if (i % 2 == 0) flags = zeroBits[i / 2]; if ((~flags & 0x80) && !update_read_delta(s, &rectangles[i].left)) return FALSE; if ((~flags & 0x40) && !update_read_delta(s, &rectangles[i].top)) return FALSE; if (~flags & 0x20) { if (!update_read_delta(s, &rectangles[i].width)) return FALSE; } else if (i > 0) rectangles[i].width = rectangles[i - 1].width; else rectangles[i].width = 0; if (~flags & 0x10) { if (!update_read_delta(s, &rectangles[i].height)) return FALSE; } else if (i > 0) rectangles[i].height = rectangles[i - 1].height; else rectangles[i].height = 0; if (i > 0) { rectangles[i].left += rectangles[i - 1].left; rectangles[i].top += rectangles[i - 1].top; } flags <<= 4; } return TRUE; } static INLINE BOOL update_read_delta_points(wStream* s, DELTA_POINT* points, int number, INT16 x, INT16 y) { int i; BYTE flags = 0; BYTE* zeroBits; UINT32 zeroBitsSize; zeroBitsSize = ((number + 3) / 4); if (Stream_GetRemainingLength(s) < zeroBitsSize) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < %" PRIu32 "", zeroBitsSize); return FALSE; } Stream_GetPointer(s, zeroBits); Stream_Seek(s, zeroBitsSize); ZeroMemory(points, sizeof(DELTA_POINT) * number); for (i = 0; i < number; i++) { if (i % 4 == 0) flags = zeroBits[i / 4]; if ((~flags & 0x80) && !update_read_delta(s, &points[i].x)) { WLog_ERR(TAG, "update_read_delta(x) failed"); return FALSE; } if ((~flags & 0x40) && !update_read_delta(s, &points[i].y)) { WLog_ERR(TAG, "update_read_delta(y) failed"); return FALSE; } flags <<= 2; } return TRUE; } #define ORDER_FIELD_BYTE(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 1) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT8(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_2BYTE(NO, TARGET1, TARGET2) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 2) \ { \ WLog_ERR(TAG, "error reading %s or %s", #TARGET1, #TARGET2); \ return FALSE; \ } \ Stream_Read_UINT8(s, TARGET1); \ Stream_Read_UINT8(s, TARGET2); \ } \ } while (0) #define ORDER_FIELD_UINT16(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 2) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT16(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_UINT32(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 4) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT32(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_COORD(NO, TARGET) \ do \ { \ if ((orderInfo->fieldFlags & (1 << (NO - 1))) && \ !update_read_coord(s, &TARGET, orderInfo->deltaCoordinates)) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ } while (0) static INLINE BOOL ORDER_FIELD_COLOR(const ORDER_INFO* orderInfo, wStream* s, UINT32 NO, UINT32* TARGET) { if (!TARGET || !orderInfo) return FALSE; if ((orderInfo->fieldFlags & (1 << (NO - 1))) && !update_read_color(s, TARGET)) return FALSE; return TRUE; } static INLINE BOOL FIELD_SKIP_BUFFER16(wStream* s, UINT32 TARGET_LEN) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, TARGET_LEN); if (!Stream_SafeSeek(s, TARGET_LEN)) { WLog_ERR(TAG, "error skipping %" PRIu32 " bytes", TARGET_LEN); return FALSE; } return TRUE; } /* Primary Drawing Orders */ static BOOL update_read_dstblt_order(wStream* s, const ORDER_INFO* orderInfo, DSTBLT_ORDER* dstblt) { ORDER_FIELD_COORD(1, dstblt->nLeftRect); ORDER_FIELD_COORD(2, dstblt->nTopRect); ORDER_FIELD_COORD(3, dstblt->nWidth); ORDER_FIELD_COORD(4, dstblt->nHeight); ORDER_FIELD_BYTE(5, dstblt->bRop); return TRUE; } int update_approximate_dstblt_order(ORDER_INFO* orderInfo, const DSTBLT_ORDER* dstblt) { return 32; } BOOL update_write_dstblt_order(wStream* s, ORDER_INFO* orderInfo, const DSTBLT_ORDER* dstblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_dstblt_order(orderInfo, dstblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, dstblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, dstblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, dstblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, dstblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, dstblt->bRop); return TRUE; } static BOOL update_read_patblt_order(wStream* s, const ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { ORDER_FIELD_COORD(1, patblt->nLeftRect); ORDER_FIELD_COORD(2, patblt->nTopRect); ORDER_FIELD_COORD(3, patblt->nWidth); ORDER_FIELD_COORD(4, patblt->nHeight); ORDER_FIELD_BYTE(5, patblt->bRop); ORDER_FIELD_COLOR(orderInfo, s, 6, &patblt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 7, &patblt->foreColor); return update_read_brush(s, &patblt->brush, orderInfo->fieldFlags >> 7); } int update_approximate_patblt_order(ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { return 32; } BOOL update_write_patblt_order(wStream* s, ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_patblt_order(orderInfo, patblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, patblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, patblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, patblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, patblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, patblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, patblt->backColor); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_color(s, patblt->foreColor); orderInfo->fieldFlags |= ORDER_FIELD_08; orderInfo->fieldFlags |= ORDER_FIELD_09; orderInfo->fieldFlags |= ORDER_FIELD_10; orderInfo->fieldFlags |= ORDER_FIELD_11; orderInfo->fieldFlags |= ORDER_FIELD_12; update_write_brush(s, &patblt->brush, orderInfo->fieldFlags >> 7); return TRUE; } static BOOL update_read_scrblt_order(wStream* s, const ORDER_INFO* orderInfo, SCRBLT_ORDER* scrblt) { ORDER_FIELD_COORD(1, scrblt->nLeftRect); ORDER_FIELD_COORD(2, scrblt->nTopRect); ORDER_FIELD_COORD(3, scrblt->nWidth); ORDER_FIELD_COORD(4, scrblt->nHeight); ORDER_FIELD_BYTE(5, scrblt->bRop); ORDER_FIELD_COORD(6, scrblt->nXSrc); ORDER_FIELD_COORD(7, scrblt->nYSrc); return TRUE; } int update_approximate_scrblt_order(ORDER_INFO* orderInfo, const SCRBLT_ORDER* scrblt) { return 32; } BOOL update_write_scrblt_order(wStream* s, ORDER_INFO* orderInfo, const SCRBLT_ORDER* scrblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_scrblt_order(orderInfo, scrblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, scrblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, scrblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, scrblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, scrblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, scrblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_coord(s, scrblt->nXSrc); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_coord(s, scrblt->nYSrc); return TRUE; } static BOOL update_read_opaque_rect_order(wStream* s, const ORDER_INFO* orderInfo, OPAQUE_RECT_ORDER* opaque_rect) { BYTE byte; ORDER_FIELD_COORD(1, opaque_rect->nLeftRect); ORDER_FIELD_COORD(2, opaque_rect->nTopRect); ORDER_FIELD_COORD(3, opaque_rect->nWidth); ORDER_FIELD_COORD(4, opaque_rect->nHeight); if (orderInfo->fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x00FFFF00) | ((UINT32)byte); } if (orderInfo->fieldFlags & ORDER_FIELD_06) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x00FF00FF) | ((UINT32)byte << 8); } if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x0000FFFF) | ((UINT32)byte << 16); } return TRUE; } int update_approximate_opaque_rect_order(ORDER_INFO* orderInfo, const OPAQUE_RECT_ORDER* opaque_rect) { return 32; } BOOL update_write_opaque_rect_order(wStream* s, ORDER_INFO* orderInfo, const OPAQUE_RECT_ORDER* opaque_rect) { BYTE byte; int inf = update_approximate_opaque_rect_order(orderInfo, opaque_rect); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; // TODO: Color format conversion orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, opaque_rect->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, opaque_rect->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, opaque_rect->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, opaque_rect->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; byte = opaque_rect->color & 0x000000FF; Stream_Write_UINT8(s, byte); orderInfo->fieldFlags |= ORDER_FIELD_06; byte = (opaque_rect->color & 0x0000FF00) >> 8; Stream_Write_UINT8(s, byte); orderInfo->fieldFlags |= ORDER_FIELD_07; byte = (opaque_rect->color & 0x00FF0000) >> 16; Stream_Write_UINT8(s, byte); return TRUE; } static BOOL update_read_draw_nine_grid_order(wStream* s, const ORDER_INFO* orderInfo, DRAW_NINE_GRID_ORDER* draw_nine_grid) { ORDER_FIELD_COORD(1, draw_nine_grid->srcLeft); ORDER_FIELD_COORD(2, draw_nine_grid->srcTop); ORDER_FIELD_COORD(3, draw_nine_grid->srcRight); ORDER_FIELD_COORD(4, draw_nine_grid->srcBottom); ORDER_FIELD_UINT16(5, draw_nine_grid->bitmapId); return TRUE; } static BOOL update_read_multi_dstblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_DSTBLT_ORDER* multi_dstblt) { ORDER_FIELD_COORD(1, multi_dstblt->nLeftRect); ORDER_FIELD_COORD(2, multi_dstblt->nTopRect); ORDER_FIELD_COORD(3, multi_dstblt->nWidth); ORDER_FIELD_COORD(4, multi_dstblt->nHeight); ORDER_FIELD_BYTE(5, multi_dstblt->bRop); ORDER_FIELD_BYTE(6, multi_dstblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_dstblt->cbData); return update_read_delta_rects(s, multi_dstblt->rectangles, &multi_dstblt->numRectangles); } return TRUE; } static BOOL update_read_multi_patblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_PATBLT_ORDER* multi_patblt) { ORDER_FIELD_COORD(1, multi_patblt->nLeftRect); ORDER_FIELD_COORD(2, multi_patblt->nTopRect); ORDER_FIELD_COORD(3, multi_patblt->nWidth); ORDER_FIELD_COORD(4, multi_patblt->nHeight); ORDER_FIELD_BYTE(5, multi_patblt->bRop); ORDER_FIELD_COLOR(orderInfo, s, 6, &multi_patblt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 7, &multi_patblt->foreColor); if (!update_read_brush(s, &multi_patblt->brush, orderInfo->fieldFlags >> 7)) return FALSE; ORDER_FIELD_BYTE(13, multi_patblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_14) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_patblt->cbData); if (!update_read_delta_rects(s, multi_patblt->rectangles, &multi_patblt->numRectangles)) return FALSE; } return TRUE; } static BOOL update_read_multi_scrblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_SCRBLT_ORDER* multi_scrblt) { ORDER_FIELD_COORD(1, multi_scrblt->nLeftRect); ORDER_FIELD_COORD(2, multi_scrblt->nTopRect); ORDER_FIELD_COORD(3, multi_scrblt->nWidth); ORDER_FIELD_COORD(4, multi_scrblt->nHeight); ORDER_FIELD_BYTE(5, multi_scrblt->bRop); ORDER_FIELD_COORD(6, multi_scrblt->nXSrc); ORDER_FIELD_COORD(7, multi_scrblt->nYSrc); ORDER_FIELD_BYTE(8, multi_scrblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_09) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_scrblt->cbData); return update_read_delta_rects(s, multi_scrblt->rectangles, &multi_scrblt->numRectangles); } return TRUE; } static BOOL update_read_multi_opaque_rect_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_OPAQUE_RECT_ORDER* multi_opaque_rect) { BYTE byte; ORDER_FIELD_COORD(1, multi_opaque_rect->nLeftRect); ORDER_FIELD_COORD(2, multi_opaque_rect->nTopRect); ORDER_FIELD_COORD(3, multi_opaque_rect->nWidth); ORDER_FIELD_COORD(4, multi_opaque_rect->nHeight); if (orderInfo->fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x00FFFF00) | ((UINT32)byte); } if (orderInfo->fieldFlags & ORDER_FIELD_06) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x00FF00FF) | ((UINT32)byte << 8); } if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x0000FFFF) | ((UINT32)byte << 16); } ORDER_FIELD_BYTE(8, multi_opaque_rect->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_09) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_opaque_rect->cbData); return update_read_delta_rects(s, multi_opaque_rect->rectangles, &multi_opaque_rect->numRectangles); } return TRUE; } static BOOL update_read_multi_draw_nine_grid_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_DRAW_NINE_GRID_ORDER* multi_draw_nine_grid) { ORDER_FIELD_COORD(1, multi_draw_nine_grid->srcLeft); ORDER_FIELD_COORD(2, multi_draw_nine_grid->srcTop); ORDER_FIELD_COORD(3, multi_draw_nine_grid->srcRight); ORDER_FIELD_COORD(4, multi_draw_nine_grid->srcBottom); ORDER_FIELD_UINT16(5, multi_draw_nine_grid->bitmapId); ORDER_FIELD_BYTE(6, multi_draw_nine_grid->nDeltaEntries); if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_draw_nine_grid->cbData); return update_read_delta_rects(s, multi_draw_nine_grid->rectangles, &multi_draw_nine_grid->nDeltaEntries); } return TRUE; } static BOOL update_read_line_to_order(wStream* s, const ORDER_INFO* orderInfo, LINE_TO_ORDER* line_to) { ORDER_FIELD_UINT16(1, line_to->backMode); ORDER_FIELD_COORD(2, line_to->nXStart); ORDER_FIELD_COORD(3, line_to->nYStart); ORDER_FIELD_COORD(4, line_to->nXEnd); ORDER_FIELD_COORD(5, line_to->nYEnd); ORDER_FIELD_COLOR(orderInfo, s, 6, &line_to->backColor); ORDER_FIELD_BYTE(7, line_to->bRop2); ORDER_FIELD_BYTE(8, line_to->penStyle); ORDER_FIELD_BYTE(9, line_to->penWidth); ORDER_FIELD_COLOR(orderInfo, s, 10, &line_to->penColor); return TRUE; } int update_approximate_line_to_order(ORDER_INFO* orderInfo, const LINE_TO_ORDER* line_to) { return 32; } BOOL update_write_line_to_order(wStream* s, ORDER_INFO* orderInfo, const LINE_TO_ORDER* line_to) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_line_to_order(orderInfo, line_to))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT16(s, line_to->backMode); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, line_to->nXStart); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, line_to->nYStart); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, line_to->nXEnd); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_coord(s, line_to->nYEnd); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, line_to->backColor); orderInfo->fieldFlags |= ORDER_FIELD_07; Stream_Write_UINT8(s, line_to->bRop2); orderInfo->fieldFlags |= ORDER_FIELD_08; Stream_Write_UINT8(s, line_to->penStyle); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT8(s, line_to->penWidth); orderInfo->fieldFlags |= ORDER_FIELD_10; update_write_color(s, line_to->penColor); return TRUE; } static BOOL update_read_polyline_order(wStream* s, const ORDER_INFO* orderInfo, POLYLINE_ORDER* polyline) { UINT16 word; UINT32 new_num = polyline->numDeltaEntries; ORDER_FIELD_COORD(1, polyline->xStart); ORDER_FIELD_COORD(2, polyline->yStart); ORDER_FIELD_BYTE(3, polyline->bRop2); ORDER_FIELD_UINT16(4, word); ORDER_FIELD_COLOR(orderInfo, s, 5, &polyline->penColor); ORDER_FIELD_BYTE(6, new_num); if (orderInfo->fieldFlags & ORDER_FIELD_07) { DELTA_POINT* new_points; if (new_num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, polyline->cbData); new_points = (DELTA_POINT*)realloc(polyline->points, sizeof(DELTA_POINT) * new_num); if (!new_points) { WLog_ERR(TAG, "realloc(%" PRIu32 ") failed", new_num); return FALSE; } polyline->points = new_points; polyline->numDeltaEntries = new_num; return update_read_delta_points(s, polyline->points, polyline->numDeltaEntries, polyline->xStart, polyline->yStart); } return TRUE; } static BOOL update_read_memblt_order(wStream* s, const ORDER_INFO* orderInfo, MEMBLT_ORDER* memblt) { if (!s || !orderInfo || !memblt) return FALSE; ORDER_FIELD_UINT16(1, memblt->cacheId); ORDER_FIELD_COORD(2, memblt->nLeftRect); ORDER_FIELD_COORD(3, memblt->nTopRect); ORDER_FIELD_COORD(4, memblt->nWidth); ORDER_FIELD_COORD(5, memblt->nHeight); ORDER_FIELD_BYTE(6, memblt->bRop); ORDER_FIELD_COORD(7, memblt->nXSrc); ORDER_FIELD_COORD(8, memblt->nYSrc); ORDER_FIELD_UINT16(9, memblt->cacheIndex); memblt->colorIndex = (memblt->cacheId >> 8); memblt->cacheId = (memblt->cacheId & 0xFF); memblt->bitmap = NULL; return TRUE; } int update_approximate_memblt_order(ORDER_INFO* orderInfo, const MEMBLT_ORDER* memblt) { return 64; } BOOL update_write_memblt_order(wStream* s, ORDER_INFO* orderInfo, const MEMBLT_ORDER* memblt) { UINT16 cacheId; if (!Stream_EnsureRemainingCapacity(s, update_approximate_memblt_order(orderInfo, memblt))) return FALSE; cacheId = (memblt->cacheId & 0xFF) | ((memblt->colorIndex & 0xFF) << 8); orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT16(s, cacheId); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, memblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, memblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, memblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_coord(s, memblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_06; Stream_Write_UINT8(s, memblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_coord(s, memblt->nXSrc); orderInfo->fieldFlags |= ORDER_FIELD_08; update_write_coord(s, memblt->nYSrc); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT16(s, memblt->cacheIndex); return TRUE; } static BOOL update_read_mem3blt_order(wStream* s, const ORDER_INFO* orderInfo, MEM3BLT_ORDER* mem3blt) { ORDER_FIELD_UINT16(1, mem3blt->cacheId); ORDER_FIELD_COORD(2, mem3blt->nLeftRect); ORDER_FIELD_COORD(3, mem3blt->nTopRect); ORDER_FIELD_COORD(4, mem3blt->nWidth); ORDER_FIELD_COORD(5, mem3blt->nHeight); ORDER_FIELD_BYTE(6, mem3blt->bRop); ORDER_FIELD_COORD(7, mem3blt->nXSrc); ORDER_FIELD_COORD(8, mem3blt->nYSrc); ORDER_FIELD_COLOR(orderInfo, s, 9, &mem3blt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 10, &mem3blt->foreColor); if (!update_read_brush(s, &mem3blt->brush, orderInfo->fieldFlags >> 10)) return FALSE; ORDER_FIELD_UINT16(16, mem3blt->cacheIndex); mem3blt->colorIndex = (mem3blt->cacheId >> 8); mem3blt->cacheId = (mem3blt->cacheId & 0xFF); mem3blt->bitmap = NULL; return TRUE; } static BOOL update_read_save_bitmap_order(wStream* s, const ORDER_INFO* orderInfo, SAVE_BITMAP_ORDER* save_bitmap) { ORDER_FIELD_UINT32(1, save_bitmap->savedBitmapPosition); ORDER_FIELD_COORD(2, save_bitmap->nLeftRect); ORDER_FIELD_COORD(3, save_bitmap->nTopRect); ORDER_FIELD_COORD(4, save_bitmap->nRightRect); ORDER_FIELD_COORD(5, save_bitmap->nBottomRect); ORDER_FIELD_BYTE(6, save_bitmap->operation); return TRUE; } static BOOL update_read_glyph_index_order(wStream* s, const ORDER_INFO* orderInfo, GLYPH_INDEX_ORDER* glyph_index) { ORDER_FIELD_BYTE(1, glyph_index->cacheId); ORDER_FIELD_BYTE(2, glyph_index->flAccel); ORDER_FIELD_BYTE(3, glyph_index->ulCharInc); ORDER_FIELD_BYTE(4, glyph_index->fOpRedundant); ORDER_FIELD_COLOR(orderInfo, s, 5, &glyph_index->backColor); ORDER_FIELD_COLOR(orderInfo, s, 6, &glyph_index->foreColor); ORDER_FIELD_UINT16(7, glyph_index->bkLeft); ORDER_FIELD_UINT16(8, glyph_index->bkTop); ORDER_FIELD_UINT16(9, glyph_index->bkRight); ORDER_FIELD_UINT16(10, glyph_index->bkBottom); ORDER_FIELD_UINT16(11, glyph_index->opLeft); ORDER_FIELD_UINT16(12, glyph_index->opTop); ORDER_FIELD_UINT16(13, glyph_index->opRight); ORDER_FIELD_UINT16(14, glyph_index->opBottom); if (!update_read_brush(s, &glyph_index->brush, orderInfo->fieldFlags >> 14)) return FALSE; ORDER_FIELD_UINT16(20, glyph_index->x); ORDER_FIELD_UINT16(21, glyph_index->y); if (orderInfo->fieldFlags & ORDER_FIELD_22) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, glyph_index->cbData); if (Stream_GetRemainingLength(s) < glyph_index->cbData) return FALSE; CopyMemory(glyph_index->data, Stream_Pointer(s), glyph_index->cbData); Stream_Seek(s, glyph_index->cbData); } return TRUE; } int update_approximate_glyph_index_order(ORDER_INFO* orderInfo, const GLYPH_INDEX_ORDER* glyph_index) { return 64; } BOOL update_write_glyph_index_order(wStream* s, ORDER_INFO* orderInfo, GLYPH_INDEX_ORDER* glyph_index) { int inf = update_approximate_glyph_index_order(orderInfo, glyph_index); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT8(s, glyph_index->cacheId); orderInfo->fieldFlags |= ORDER_FIELD_02; Stream_Write_UINT8(s, glyph_index->flAccel); orderInfo->fieldFlags |= ORDER_FIELD_03; Stream_Write_UINT8(s, glyph_index->ulCharInc); orderInfo->fieldFlags |= ORDER_FIELD_04; Stream_Write_UINT8(s, glyph_index->fOpRedundant); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_color(s, glyph_index->backColor); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, glyph_index->foreColor); orderInfo->fieldFlags |= ORDER_FIELD_07; Stream_Write_UINT16(s, glyph_index->bkLeft); orderInfo->fieldFlags |= ORDER_FIELD_08; Stream_Write_UINT16(s, glyph_index->bkTop); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT16(s, glyph_index->bkRight); orderInfo->fieldFlags |= ORDER_FIELD_10; Stream_Write_UINT16(s, glyph_index->bkBottom); orderInfo->fieldFlags |= ORDER_FIELD_11; Stream_Write_UINT16(s, glyph_index->opLeft); orderInfo->fieldFlags |= ORDER_FIELD_12; Stream_Write_UINT16(s, glyph_index->opTop); orderInfo->fieldFlags |= ORDER_FIELD_13; Stream_Write_UINT16(s, glyph_index->opRight); orderInfo->fieldFlags |= ORDER_FIELD_14; Stream_Write_UINT16(s, glyph_index->opBottom); orderInfo->fieldFlags |= ORDER_FIELD_15; orderInfo->fieldFlags |= ORDER_FIELD_16; orderInfo->fieldFlags |= ORDER_FIELD_17; orderInfo->fieldFlags |= ORDER_FIELD_18; orderInfo->fieldFlags |= ORDER_FIELD_19; update_write_brush(s, &glyph_index->brush, orderInfo->fieldFlags >> 14); orderInfo->fieldFlags |= ORDER_FIELD_20; Stream_Write_UINT16(s, glyph_index->x); orderInfo->fieldFlags |= ORDER_FIELD_21; Stream_Write_UINT16(s, glyph_index->y); orderInfo->fieldFlags |= ORDER_FIELD_22; Stream_Write_UINT8(s, glyph_index->cbData); Stream_Write(s, glyph_index->data, glyph_index->cbData); return TRUE; } static BOOL update_read_fast_index_order(wStream* s, const ORDER_INFO* orderInfo, FAST_INDEX_ORDER* fast_index) { ORDER_FIELD_BYTE(1, fast_index->cacheId); ORDER_FIELD_2BYTE(2, fast_index->ulCharInc, fast_index->flAccel); ORDER_FIELD_COLOR(orderInfo, s, 3, &fast_index->backColor); ORDER_FIELD_COLOR(orderInfo, s, 4, &fast_index->foreColor); ORDER_FIELD_COORD(5, fast_index->bkLeft); ORDER_FIELD_COORD(6, fast_index->bkTop); ORDER_FIELD_COORD(7, fast_index->bkRight); ORDER_FIELD_COORD(8, fast_index->bkBottom); ORDER_FIELD_COORD(9, fast_index->opLeft); ORDER_FIELD_COORD(10, fast_index->opTop); ORDER_FIELD_COORD(11, fast_index->opRight); ORDER_FIELD_COORD(12, fast_index->opBottom); ORDER_FIELD_COORD(13, fast_index->x); ORDER_FIELD_COORD(14, fast_index->y); if (orderInfo->fieldFlags & ORDER_FIELD_15) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, fast_index->cbData); if (Stream_GetRemainingLength(s) < fast_index->cbData) return FALSE; CopyMemory(fast_index->data, Stream_Pointer(s), fast_index->cbData); Stream_Seek(s, fast_index->cbData); } return TRUE; } static BOOL update_read_fast_glyph_order(wStream* s, const ORDER_INFO* orderInfo, FAST_GLYPH_ORDER* fastGlyph) { GLYPH_DATA_V2* glyph = &fastGlyph->glyphData; ORDER_FIELD_BYTE(1, fastGlyph->cacheId); ORDER_FIELD_2BYTE(2, fastGlyph->ulCharInc, fastGlyph->flAccel); ORDER_FIELD_COLOR(orderInfo, s, 3, &fastGlyph->backColor); ORDER_FIELD_COLOR(orderInfo, s, 4, &fastGlyph->foreColor); ORDER_FIELD_COORD(5, fastGlyph->bkLeft); ORDER_FIELD_COORD(6, fastGlyph->bkTop); ORDER_FIELD_COORD(7, fastGlyph->bkRight); ORDER_FIELD_COORD(8, fastGlyph->bkBottom); ORDER_FIELD_COORD(9, fastGlyph->opLeft); ORDER_FIELD_COORD(10, fastGlyph->opTop); ORDER_FIELD_COORD(11, fastGlyph->opRight); ORDER_FIELD_COORD(12, fastGlyph->opBottom); ORDER_FIELD_COORD(13, fastGlyph->x); ORDER_FIELD_COORD(14, fastGlyph->y); if (orderInfo->fieldFlags & ORDER_FIELD_15) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, fastGlyph->cbData); if (Stream_GetRemainingLength(s) < fastGlyph->cbData) return FALSE; CopyMemory(fastGlyph->data, Stream_Pointer(s), fastGlyph->cbData); if (Stream_GetRemainingLength(s) < fastGlyph->cbData) return FALSE; if (!Stream_SafeSeek(s, 1)) return FALSE; if (fastGlyph->cbData > 1) { UINT32 new_cb; /* parse optional glyph data */ glyph->cacheIndex = fastGlyph->data[0]; if (!update_read_2byte_signed(s, &glyph->x) || !update_read_2byte_signed(s, &glyph->y) || !update_read_2byte_unsigned(s, &glyph->cx) || !update_read_2byte_unsigned(s, &glyph->cy)) return FALSE; glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; new_cb = ((glyph->cx + 7) / 8) * glyph->cy; new_cb += ((new_cb % 4) > 0) ? 4 - (new_cb % 4) : 0; if (fastGlyph->cbData < new_cb) return FALSE; if (new_cb > 0) { BYTE* new_aj; new_aj = (BYTE*)realloc(glyph->aj, new_cb); if (!new_aj) return FALSE; glyph->aj = new_aj; glyph->cb = new_cb; Stream_Read(s, glyph->aj, glyph->cb); } Stream_Seek(s, fastGlyph->cbData - new_cb); } } return TRUE; } static BOOL update_read_polygon_sc_order(wStream* s, const ORDER_INFO* orderInfo, POLYGON_SC_ORDER* polygon_sc) { UINT32 num = polygon_sc->numPoints; ORDER_FIELD_COORD(1, polygon_sc->xStart); ORDER_FIELD_COORD(2, polygon_sc->yStart); ORDER_FIELD_BYTE(3, polygon_sc->bRop2); ORDER_FIELD_BYTE(4, polygon_sc->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 5, &polygon_sc->brushColor); ORDER_FIELD_BYTE(6, num); if (orderInfo->fieldFlags & ORDER_FIELD_07) { DELTA_POINT* newpoints; if (num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, polygon_sc->cbData); newpoints = (DELTA_POINT*)realloc(polygon_sc->points, sizeof(DELTA_POINT) * num); if (!newpoints) return FALSE; polygon_sc->points = newpoints; polygon_sc->numPoints = num; return update_read_delta_points(s, polygon_sc->points, polygon_sc->numPoints, polygon_sc->xStart, polygon_sc->yStart); } return TRUE; } static BOOL update_read_polygon_cb_order(wStream* s, const ORDER_INFO* orderInfo, POLYGON_CB_ORDER* polygon_cb) { UINT32 num = polygon_cb->numPoints; ORDER_FIELD_COORD(1, polygon_cb->xStart); ORDER_FIELD_COORD(2, polygon_cb->yStart); ORDER_FIELD_BYTE(3, polygon_cb->bRop2); ORDER_FIELD_BYTE(4, polygon_cb->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 5, &polygon_cb->backColor); ORDER_FIELD_COLOR(orderInfo, s, 6, &polygon_cb->foreColor); if (!update_read_brush(s, &polygon_cb->brush, orderInfo->fieldFlags >> 6)) return FALSE; ORDER_FIELD_BYTE(12, num); if (orderInfo->fieldFlags & ORDER_FIELD_13) { DELTA_POINT* newpoints; if (num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, polygon_cb->cbData); newpoints = (DELTA_POINT*)realloc(polygon_cb->points, sizeof(DELTA_POINT) * num); if (!newpoints) return FALSE; polygon_cb->points = newpoints; polygon_cb->numPoints = num; if (!update_read_delta_points(s, polygon_cb->points, polygon_cb->numPoints, polygon_cb->xStart, polygon_cb->yStart)) return FALSE; } polygon_cb->backMode = (polygon_cb->bRop2 & 0x80) ? BACKMODE_TRANSPARENT : BACKMODE_OPAQUE; polygon_cb->bRop2 = (polygon_cb->bRop2 & 0x1F); return TRUE; } static BOOL update_read_ellipse_sc_order(wStream* s, const ORDER_INFO* orderInfo, ELLIPSE_SC_ORDER* ellipse_sc) { ORDER_FIELD_COORD(1, ellipse_sc->leftRect); ORDER_FIELD_COORD(2, ellipse_sc->topRect); ORDER_FIELD_COORD(3, ellipse_sc->rightRect); ORDER_FIELD_COORD(4, ellipse_sc->bottomRect); ORDER_FIELD_BYTE(5, ellipse_sc->bRop2); ORDER_FIELD_BYTE(6, ellipse_sc->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 7, &ellipse_sc->color); return TRUE; } static BOOL update_read_ellipse_cb_order(wStream* s, const ORDER_INFO* orderInfo, ELLIPSE_CB_ORDER* ellipse_cb) { ORDER_FIELD_COORD(1, ellipse_cb->leftRect); ORDER_FIELD_COORD(2, ellipse_cb->topRect); ORDER_FIELD_COORD(3, ellipse_cb->rightRect); ORDER_FIELD_COORD(4, ellipse_cb->bottomRect); ORDER_FIELD_BYTE(5, ellipse_cb->bRop2); ORDER_FIELD_BYTE(6, ellipse_cb->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 7, &ellipse_cb->backColor); ORDER_FIELD_COLOR(orderInfo, s, 8, &ellipse_cb->foreColor); return update_read_brush(s, &ellipse_cb->brush, orderInfo->fieldFlags >> 8); } /* Secondary Drawing Orders */ static CACHE_BITMAP_ORDER* update_read_cache_bitmap_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { CACHE_BITMAP_ORDER* cache_bitmap; if (!update || !s) return NULL; cache_bitmap = calloc(1, sizeof(CACHE_BITMAP_ORDER)); if (!cache_bitmap) goto fail; if (Stream_GetRemainingLength(s) < 9) goto fail; Stream_Read_UINT8(s, cache_bitmap->cacheId); /* cacheId (1 byte) */ Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapWidth); /* bitmapWidth (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapHeight); /* bitmapHeight (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ if ((cache_bitmap->bitmapBpp < 1) || (cache_bitmap->bitmapBpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bitmap bpp %" PRIu32 "", cache_bitmap->bitmapBpp); goto fail; } Stream_Read_UINT16(s, cache_bitmap->bitmapLength); /* bitmapLength (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap->cacheIndex); /* cacheIndex (2 bytes) */ if (compressed) { if ((flags & NO_BITMAP_COMPRESSION_HDR) == 0) { BYTE* bitmapComprHdr = (BYTE*)&(cache_bitmap->bitmapComprHdr); if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read(s, bitmapComprHdr, 8); /* bitmapComprHdr (8 bytes) */ cache_bitmap->bitmapLength -= 8; } } if (cache_bitmap->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap->bitmapLength) goto fail; cache_bitmap->bitmapDataStream = malloc(cache_bitmap->bitmapLength); if (!cache_bitmap->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap->bitmapDataStream, cache_bitmap->bitmapLength); cache_bitmap->compressed = compressed; return cache_bitmap; fail: free_cache_bitmap_order(update->context, cache_bitmap); return NULL; } int update_approximate_cache_bitmap_order(const CACHE_BITMAP_ORDER* cache_bitmap, BOOL compressed, UINT16* flags) { return 64 + cache_bitmap->bitmapLength; } BOOL update_write_cache_bitmap_order(wStream* s, const CACHE_BITMAP_ORDER* cache_bitmap, BOOL compressed, UINT16* flags) { UINT32 bitmapLength = cache_bitmap->bitmapLength; int inf = update_approximate_cache_bitmap_order(cache_bitmap, compressed, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; *flags = NO_BITMAP_COMPRESSION_HDR; if ((*flags & NO_BITMAP_COMPRESSION_HDR) == 0) bitmapLength += 8; Stream_Write_UINT8(s, cache_bitmap->cacheId); /* cacheId (1 byte) */ Stream_Write_UINT8(s, 0); /* pad1Octet (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapWidth); /* bitmapWidth (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapHeight); /* bitmapHeight (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ Stream_Write_UINT16(s, bitmapLength); /* bitmapLength (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap->cacheIndex); /* cacheIndex (2 bytes) */ if (compressed) { if ((*flags & NO_BITMAP_COMPRESSION_HDR) == 0) { BYTE* bitmapComprHdr = (BYTE*)&(cache_bitmap->bitmapComprHdr); Stream_Write(s, bitmapComprHdr, 8); /* bitmapComprHdr (8 bytes) */ bitmapLength -= 8; } Stream_Write(s, cache_bitmap->bitmapDataStream, bitmapLength); } else { Stream_Write(s, cache_bitmap->bitmapDataStream, bitmapLength); } return TRUE; } static CACHE_BITMAP_V2_ORDER* update_read_cache_bitmap_v2_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { BOOL rc; BYTE bitsPerPixelId; CACHE_BITMAP_V2_ORDER* cache_bitmap_v2; if (!update || !s) return NULL; cache_bitmap_v2 = calloc(1, sizeof(CACHE_BITMAP_V2_ORDER)); if (!cache_bitmap_v2) goto fail; cache_bitmap_v2->cacheId = flags & 0x0003; cache_bitmap_v2->flags = (flags & 0xFF80) >> 7; bitsPerPixelId = (flags & 0x0078) >> 3; cache_bitmap_v2->bitmapBpp = get_cbr2_bpp(bitsPerPixelId, &rc); if (!rc) goto fail; if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ goto fail; cache_bitmap_v2->bitmapHeight = cache_bitmap_v2->bitmapWidth; } else { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ goto fail; } if (!update_read_4byte_unsigned(s, &cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->cacheIndex)) /* cacheIndex */ goto fail; if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } } if (cache_bitmap_v2->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap_v2->bitmapLength) goto fail; if (cache_bitmap_v2->bitmapLength == 0) goto fail; cache_bitmap_v2->bitmapDataStream = malloc(cache_bitmap_v2->bitmapLength); if (!cache_bitmap_v2->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); cache_bitmap_v2->compressed = compressed; return cache_bitmap_v2; fail: free_cache_bitmap_v2_order(update->context, cache_bitmap_v2); return NULL; } int update_approximate_cache_bitmap_v2_order(CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { return 64 + cache_bitmap_v2->bitmapLength; } BOOL update_write_cache_bitmap_v2_order(wStream* s, CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { BOOL rc; BYTE bitsPerPixelId; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v2_order(cache_bitmap_v2, compressed, flags))) return FALSE; bitsPerPixelId = get_bpp_bmf(cache_bitmap_v2->bitmapBpp, &rc); if (!rc) return FALSE; *flags = (cache_bitmap_v2->cacheId & 0x0003) | (bitsPerPixelId << 3) | ((cache_bitmap_v2->flags << 7) & 0xFF80); if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { Stream_Write_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ return FALSE; } else { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ return FALSE; } if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (!update_write_4byte_unsigned(s, cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_write_2byte_unsigned(s, cache_bitmap_v2->cacheIndex)) /* cacheIndex */ return FALSE; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { Stream_Write_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } else { if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } cache_bitmap_v2->compressed = compressed; return TRUE; } static CACHE_BITMAP_V3_ORDER* update_read_cache_bitmap_v3_order(rdpUpdate* update, wStream* s, UINT16 flags) { BOOL rc; BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; UINT32 new_len; BYTE* new_data; CACHE_BITMAP_V3_ORDER* cache_bitmap_v3; if (!update || !s) return NULL; cache_bitmap_v3 = calloc(1, sizeof(CACHE_BITMAP_V3_ORDER)); if (!cache_bitmap_v3) goto fail; cache_bitmap_v3->cacheId = flags & 0x00000003; cache_bitmap_v3->flags = (flags & 0x0000FF80) >> 7; bitsPerPixelId = (flags & 0x00000078) >> 3; cache_bitmap_v3->bpp = get_cbr2_bpp(bitsPerPixelId, &rc); if (!rc) goto fail; if (Stream_GetRemainingLength(s) < 21) goto fail; Stream_Read_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ bitmapData = &cache_bitmap_v3->bitmapData; Stream_Read_UINT8(s, bitmapData->bpp); if ((bitmapData->bpp < 1) || (bitmapData->bpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bpp value %" PRIu32 "", bitmapData->bpp); goto fail; } Stream_Seek_UINT8(s); /* reserved1 (1 byte) */ Stream_Seek_UINT8(s); /* reserved2 (1 byte) */ Stream_Read_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Read_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Read_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Read_UINT32(s, new_len); /* length (4 bytes) */ if ((new_len == 0) || (Stream_GetRemainingLength(s) < new_len)) goto fail; new_data = (BYTE*)realloc(bitmapData->data, new_len); if (!new_data) goto fail; bitmapData->data = new_data; bitmapData->length = new_len; Stream_Read(s, bitmapData->data, bitmapData->length); return cache_bitmap_v3; fail: free_cache_bitmap_v3_order(update->context, cache_bitmap_v3); return NULL; } int update_approximate_cache_bitmap_v3_order(CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BITMAP_DATA_EX* bitmapData = &cache_bitmap_v3->bitmapData; return 64 + bitmapData->length; } BOOL update_write_cache_bitmap_v3_order(wStream* s, CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BOOL rc; BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v3_order(cache_bitmap_v3, flags))) return FALSE; bitmapData = &cache_bitmap_v3->bitmapData; bitsPerPixelId = get_bpp_bmf(cache_bitmap_v3->bpp, &rc); if (!rc) return FALSE; *flags = (cache_bitmap_v3->cacheId & 0x00000003) | ((cache_bitmap_v3->flags << 7) & 0x0000FF80) | ((bitsPerPixelId << 3) & 0x00000078); Stream_Write_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ Stream_Write_UINT8(s, bitmapData->bpp); Stream_Write_UINT8(s, 0); /* reserved1 (1 byte) */ Stream_Write_UINT8(s, 0); /* reserved2 (1 byte) */ Stream_Write_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Write_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Write_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Write_UINT32(s, bitmapData->length); /* length (4 bytes) */ Stream_Write(s, bitmapData->data, bitmapData->length); return TRUE; } static CACHE_COLOR_TABLE_ORDER* update_read_cache_color_table_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; UINT32* colorTable; CACHE_COLOR_TABLE_ORDER* cache_color_table = calloc(1, sizeof(CACHE_COLOR_TABLE_ORDER)); if (!cache_color_table) goto fail; if (Stream_GetRemainingLength(s) < 3) goto fail; Stream_Read_UINT8(s, cache_color_table->cacheIndex); /* cacheIndex (1 byte) */ Stream_Read_UINT16(s, cache_color_table->numberColors); /* numberColors (2 bytes) */ if (cache_color_table->numberColors != 256) { /* This field MUST be set to 256 */ goto fail; } if (Stream_GetRemainingLength(s) < cache_color_table->numberColors * 4) goto fail; colorTable = (UINT32*)&cache_color_table->colorTable; for (i = 0; i < (int)cache_color_table->numberColors; i++) update_read_color_quad(s, &colorTable[i]); return cache_color_table; fail: free_cache_color_table_order(update->context, cache_color_table); return NULL; } int update_approximate_cache_color_table_order(const CACHE_COLOR_TABLE_ORDER* cache_color_table, UINT16* flags) { return 16 + (256 * 4); } BOOL update_write_cache_color_table_order(wStream* s, const CACHE_COLOR_TABLE_ORDER* cache_color_table, UINT16* flags) { int i, inf; UINT32* colorTable; if (cache_color_table->numberColors != 256) return FALSE; inf = update_approximate_cache_color_table_order(cache_color_table, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT8(s, cache_color_table->cacheIndex); /* cacheIndex (1 byte) */ Stream_Write_UINT16(s, cache_color_table->numberColors); /* numberColors (2 bytes) */ colorTable = (UINT32*)&cache_color_table->colorTable; for (i = 0; i < (int)cache_color_table->numberColors; i++) { update_write_color_quad(s, colorTable[i]); } return TRUE; } static CACHE_GLYPH_ORDER* update_read_cache_glyph_order(rdpUpdate* update, wStream* s, UINT16 flags) { UINT32 i; CACHE_GLYPH_ORDER* cache_glyph_order = calloc(1, sizeof(CACHE_GLYPH_ORDER)); if (!cache_glyph_order || !update || !s) goto fail; if (Stream_GetRemainingLength(s) < 2) goto fail; Stream_Read_UINT8(s, cache_glyph_order->cacheId); /* cacheId (1 byte) */ Stream_Read_UINT8(s, cache_glyph_order->cGlyphs); /* cGlyphs (1 byte) */ for (i = 0; i < cache_glyph_order->cGlyphs; i++) { GLYPH_DATA* glyph = &cache_glyph_order->glyphData[i]; if (Stream_GetRemainingLength(s) < 10) goto fail; Stream_Read_UINT16(s, glyph->cacheIndex); Stream_Read_INT16(s, glyph->x); Stream_Read_INT16(s, glyph->y); Stream_Read_UINT16(s, glyph->cx); Stream_Read_UINT16(s, glyph->cy); glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; if (Stream_GetRemainingLength(s) < glyph->cb) goto fail; glyph->aj = (BYTE*)malloc(glyph->cb); if (!glyph->aj) goto fail; Stream_Read(s, glyph->aj, glyph->cb); } if ((flags & CG_GLYPH_UNICODE_PRESENT) && (cache_glyph_order->cGlyphs > 0)) { cache_glyph_order->unicodeCharacters = calloc(cache_glyph_order->cGlyphs, sizeof(WCHAR)); if (!cache_glyph_order->unicodeCharacters) goto fail; if (Stream_GetRemainingLength(s) < sizeof(WCHAR) * cache_glyph_order->cGlyphs) goto fail; Stream_Read_UTF16_String(s, cache_glyph_order->unicodeCharacters, cache_glyph_order->cGlyphs); } return cache_glyph_order; fail: free_cache_glyph_order(update->context, cache_glyph_order); return NULL; } int update_approximate_cache_glyph_order(const CACHE_GLYPH_ORDER* cache_glyph, UINT16* flags) { return 2 + cache_glyph->cGlyphs * 32; } BOOL update_write_cache_glyph_order(wStream* s, const CACHE_GLYPH_ORDER* cache_glyph, UINT16* flags) { int i, inf; INT16 lsi16; const GLYPH_DATA* glyph; inf = update_approximate_cache_glyph_order(cache_glyph, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT8(s, cache_glyph->cacheId); /* cacheId (1 byte) */ Stream_Write_UINT8(s, cache_glyph->cGlyphs); /* cGlyphs (1 byte) */ for (i = 0; i < (int)cache_glyph->cGlyphs; i++) { UINT32 cb; glyph = &cache_glyph->glyphData[i]; Stream_Write_UINT16(s, glyph->cacheIndex); /* cacheIndex (2 bytes) */ lsi16 = glyph->x; Stream_Write_UINT16(s, lsi16); /* x (2 bytes) */ lsi16 = glyph->y; Stream_Write_UINT16(s, lsi16); /* y (2 bytes) */ Stream_Write_UINT16(s, glyph->cx); /* cx (2 bytes) */ Stream_Write_UINT16(s, glyph->cy); /* cy (2 bytes) */ cb = ((glyph->cx + 7) / 8) * glyph->cy; cb += ((cb % 4) > 0) ? 4 - (cb % 4) : 0; Stream_Write(s, glyph->aj, cb); } if (*flags & CG_GLYPH_UNICODE_PRESENT) { Stream_Zero(s, cache_glyph->cGlyphs * 2); } return TRUE; } static CACHE_GLYPH_V2_ORDER* update_read_cache_glyph_v2_order(rdpUpdate* update, wStream* s, UINT16 flags) { UINT32 i; CACHE_GLYPH_V2_ORDER* cache_glyph_v2 = calloc(1, sizeof(CACHE_GLYPH_V2_ORDER)); if (!cache_glyph_v2) goto fail; cache_glyph_v2->cacheId = (flags & 0x000F); cache_glyph_v2->flags = (flags & 0x00F0) >> 4; cache_glyph_v2->cGlyphs = (flags & 0xFF00) >> 8; for (i = 0; i < cache_glyph_v2->cGlyphs; i++) { GLYPH_DATA_V2* glyph = &cache_glyph_v2->glyphData[i]; if (Stream_GetRemainingLength(s) < 1) goto fail; Stream_Read_UINT8(s, glyph->cacheIndex); if (!update_read_2byte_signed(s, &glyph->x) || !update_read_2byte_signed(s, &glyph->y) || !update_read_2byte_unsigned(s, &glyph->cx) || !update_read_2byte_unsigned(s, &glyph->cy)) { goto fail; } glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; if (Stream_GetRemainingLength(s) < glyph->cb) goto fail; glyph->aj = (BYTE*)malloc(glyph->cb); if (!glyph->aj) goto fail; Stream_Read(s, glyph->aj, glyph->cb); } if ((flags & CG_GLYPH_UNICODE_PRESENT) && (cache_glyph_v2->cGlyphs > 0)) { cache_glyph_v2->unicodeCharacters = calloc(cache_glyph_v2->cGlyphs, sizeof(WCHAR)); if (!cache_glyph_v2->unicodeCharacters) goto fail; if (Stream_GetRemainingLength(s) < sizeof(WCHAR) * cache_glyph_v2->cGlyphs) goto fail; Stream_Read_UTF16_String(s, cache_glyph_v2->unicodeCharacters, cache_glyph_v2->cGlyphs); } return cache_glyph_v2; fail: free_cache_glyph_v2_order(update->context, cache_glyph_v2); return NULL; } int update_approximate_cache_glyph_v2_order(const CACHE_GLYPH_V2_ORDER* cache_glyph_v2, UINT16* flags) { return 8 + cache_glyph_v2->cGlyphs * 32; } BOOL update_write_cache_glyph_v2_order(wStream* s, const CACHE_GLYPH_V2_ORDER* cache_glyph_v2, UINT16* flags) { UINT32 i, inf; inf = update_approximate_cache_glyph_v2_order(cache_glyph_v2, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; *flags = (cache_glyph_v2->cacheId & 0x000F) | ((cache_glyph_v2->flags & 0x000F) << 4) | ((cache_glyph_v2->cGlyphs & 0x00FF) << 8); for (i = 0; i < cache_glyph_v2->cGlyphs; i++) { UINT32 cb; const GLYPH_DATA_V2* glyph = &cache_glyph_v2->glyphData[i]; Stream_Write_UINT8(s, glyph->cacheIndex); if (!update_write_2byte_signed(s, glyph->x) || !update_write_2byte_signed(s, glyph->y) || !update_write_2byte_unsigned(s, glyph->cx) || !update_write_2byte_unsigned(s, glyph->cy)) { return FALSE; } cb = ((glyph->cx + 7) / 8) * glyph->cy; cb += ((cb % 4) > 0) ? 4 - (cb % 4) : 0; Stream_Write(s, glyph->aj, cb); } if (*flags & CG_GLYPH_UNICODE_PRESENT) { Stream_Zero(s, cache_glyph_v2->cGlyphs * 2); } return TRUE; } static BOOL update_decompress_brush(wStream* s, BYTE* output, size_t outSize, BYTE bpp) { INT32 x, y, k; BYTE byte = 0; const BYTE* palette = Stream_Pointer(s) + 16; const INT32 bytesPerPixel = ((bpp + 1) / 8); if (!Stream_SafeSeek(s, 16ULL + 7ULL * bytesPerPixel)) // 64 / 4 return FALSE; for (y = 7; y >= 0; y--) { for (x = 0; x < 8; x++) { UINT32 index; if ((x % 4) == 0) Stream_Read_UINT8(s, byte); index = ((byte >> ((3 - (x % 4)) * 2)) & 0x03); for (k = 0; k < bytesPerPixel; k++) { const size_t dstIndex = ((y * 8 + x) * bytesPerPixel) + k; const size_t srcIndex = (index * bytesPerPixel) + k; if (dstIndex >= outSize) return FALSE; output[dstIndex] = palette[srcIndex]; } } } return TRUE; } static BOOL update_compress_brush(wStream* s, const BYTE* input, BYTE bpp) { return FALSE; } static CACHE_BRUSH_ORDER* update_read_cache_brush_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; BOOL rc; BYTE iBitmapFormat; BOOL compressed = FALSE; CACHE_BRUSH_ORDER* cache_brush = calloc(1, sizeof(CACHE_BRUSH_ORDER)); if (!cache_brush) goto fail; if (Stream_GetRemainingLength(s) < 6) goto fail; Stream_Read_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Read_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ cache_brush->bpp = get_bmf_bpp(iBitmapFormat, &rc); if (!rc) goto fail; Stream_Read_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Read_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Read_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Read_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_Print(update->log, WLOG_ERROR, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); goto fail; } /* rows are encoded in reverse order */ if (Stream_GetRemainingLength(s) < 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_decompress_brush(s, cache_brush->data, sizeof(cache_brush->data), cache_brush->bpp)) goto fail; } else { /* uncompressed brush */ UINT32 scanline = (cache_brush->bpp / 8) * 8; if (Stream_GetRemainingLength(s) < scanline * 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read(s, &cache_brush->data[i * scanline], scanline); } } } } return cache_brush; fail: free_cache_brush_order(update->context, cache_brush); return NULL; } int update_approximate_cache_brush_order(const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { return 64; } BOOL update_write_cache_brush_order(wStream* s, const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { int i; BYTE iBitmapFormat; BOOL rc; BOOL compressed = FALSE; if (!Stream_EnsureRemainingCapacity(s, update_approximate_cache_brush_order(cache_brush, flags))) return FALSE; iBitmapFormat = get_bpp_bmf(cache_brush->bpp, &rc); if (!rc) return FALSE; Stream_Write_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Write_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ Stream_Write_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Write_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Write_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Write_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_ERR(TAG, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); return FALSE; } for (i = 7; i >= 0; i--) { Stream_Write_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_compress_brush(s, cache_brush->data, cache_brush->bpp)) return FALSE; } else { /* uncompressed brush */ int scanline = (cache_brush->bpp / 8) * 8; for (i = 7; i >= 0; i--) { Stream_Write(s, &cache_brush->data[i * scanline], scanline); } } } } return TRUE; } /* Alternate Secondary Drawing Orders */ static BOOL update_read_create_offscreen_bitmap_order(wStream* s, CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { UINT16 flags; BOOL deleteListPresent; OFFSCREEN_DELETE_LIST* deleteList; if (Stream_GetRemainingLength(s) < 6) return FALSE; Stream_Read_UINT16(s, flags); /* flags (2 bytes) */ create_offscreen_bitmap->id = flags & 0x7FFF; deleteListPresent = (flags & 0x8000) ? TRUE : FALSE; Stream_Read_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */ Stream_Read_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */ deleteList = &(create_offscreen_bitmap->deleteList); if (deleteListPresent) { UINT32 i; if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, deleteList->cIndices); if (deleteList->cIndices > deleteList->sIndices) { UINT16* new_indices; new_indices = (UINT16*)realloc(deleteList->indices, deleteList->cIndices * 2); if (!new_indices) return FALSE; deleteList->sIndices = deleteList->cIndices; deleteList->indices = new_indices; } if (Stream_GetRemainingLength(s) < 2 * deleteList->cIndices) return FALSE; for (i = 0; i < deleteList->cIndices; i++) { Stream_Read_UINT16(s, deleteList->indices[i]); } } else { deleteList->cIndices = 0; } return TRUE; } int update_approximate_create_offscreen_bitmap_order( const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { const OFFSCREEN_DELETE_LIST* deleteList = &(create_offscreen_bitmap->deleteList); return 32 + deleteList->cIndices * 2; } BOOL update_write_create_offscreen_bitmap_order( wStream* s, const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { UINT16 flags; BOOL deleteListPresent; const OFFSCREEN_DELETE_LIST* deleteList; if (!Stream_EnsureRemainingCapacity( s, update_approximate_create_offscreen_bitmap_order(create_offscreen_bitmap))) return FALSE; deleteList = &(create_offscreen_bitmap->deleteList); flags = create_offscreen_bitmap->id & 0x7FFF; deleteListPresent = (deleteList->cIndices > 0) ? TRUE : FALSE; if (deleteListPresent) flags |= 0x8000; Stream_Write_UINT16(s, flags); /* flags (2 bytes) */ Stream_Write_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */ Stream_Write_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */ if (deleteListPresent) { int i; Stream_Write_UINT16(s, deleteList->cIndices); for (i = 0; i < (int)deleteList->cIndices; i++) { Stream_Write_UINT16(s, deleteList->indices[i]); } } return TRUE; } static BOOL update_read_switch_surface_order(wStream* s, SWITCH_SURFACE_ORDER* switch_surface) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, switch_surface->bitmapId); /* bitmapId (2 bytes) */ return TRUE; } int update_approximate_switch_surface_order(const SWITCH_SURFACE_ORDER* switch_surface) { return 2; } BOOL update_write_switch_surface_order(wStream* s, const SWITCH_SURFACE_ORDER* switch_surface) { int inf = update_approximate_switch_surface_order(switch_surface); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT16(s, switch_surface->bitmapId); /* bitmapId (2 bytes) */ return TRUE; } static BOOL update_read_create_nine_grid_bitmap_order(wStream* s, CREATE_NINE_GRID_BITMAP_ORDER* create_nine_grid_bitmap) { NINE_GRID_BITMAP_INFO* nineGridInfo; if (Stream_GetRemainingLength(s) < 19) return FALSE; Stream_Read_UINT8(s, create_nine_grid_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ if ((create_nine_grid_bitmap->bitmapBpp < 1) || (create_nine_grid_bitmap->bitmapBpp > 32)) { WLog_ERR(TAG, "invalid bpp value %" PRIu32 "", create_nine_grid_bitmap->bitmapBpp); return FALSE; } Stream_Read_UINT16(s, create_nine_grid_bitmap->bitmapId); /* bitmapId (2 bytes) */ nineGridInfo = &(create_nine_grid_bitmap->nineGridInfo); Stream_Read_UINT32(s, nineGridInfo->flFlags); /* flFlags (4 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulLeftWidth); /* ulLeftWidth (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulRightWidth); /* ulRightWidth (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulTopHeight); /* ulTopHeight (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulBottomHeight); /* ulBottomHeight (2 bytes) */ update_read_colorref(s, &nineGridInfo->crTransparent); /* crTransparent (4 bytes) */ return TRUE; } static BOOL update_read_frame_marker_order(wStream* s, FRAME_MARKER_ORDER* frame_marker) { if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT32(s, frame_marker->action); /* action (4 bytes) */ return TRUE; } static BOOL update_read_stream_bitmap_first_order(wStream* s, STREAM_BITMAP_FIRST_ORDER* stream_bitmap_first) { if (Stream_GetRemainingLength(s) < 10) // 8 + 2 at least return FALSE; Stream_Read_UINT8(s, stream_bitmap_first->bitmapFlags); /* bitmapFlags (1 byte) */ Stream_Read_UINT8(s, stream_bitmap_first->bitmapBpp); /* bitmapBpp (1 byte) */ if ((stream_bitmap_first->bitmapBpp < 1) || (stream_bitmap_first->bitmapBpp > 32)) { WLog_ERR(TAG, "invalid bpp value %" PRIu32 "", stream_bitmap_first->bitmapBpp); return FALSE; } Stream_Read_UINT16(s, stream_bitmap_first->bitmapType); /* bitmapType (2 bytes) */ Stream_Read_UINT16(s, stream_bitmap_first->bitmapWidth); /* bitmapWidth (2 bytes) */ Stream_Read_UINT16(s, stream_bitmap_first->bitmapHeight); /* bitmapHeigth (2 bytes) */ if (stream_bitmap_first->bitmapFlags & STREAM_BITMAP_V2) { if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT32(s, stream_bitmap_first->bitmapSize); /* bitmapSize (4 bytes) */ } else { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, stream_bitmap_first->bitmapSize); /* bitmapSize (2 bytes) */ } FIELD_SKIP_BUFFER16( s, stream_bitmap_first->bitmapBlockSize); /* bitmapBlockSize(2 bytes) + bitmapBlock */ return TRUE; } static BOOL update_read_stream_bitmap_next_order(wStream* s, STREAM_BITMAP_NEXT_ORDER* stream_bitmap_next) { if (Stream_GetRemainingLength(s) < 5) return FALSE; Stream_Read_UINT8(s, stream_bitmap_next->bitmapFlags); /* bitmapFlags (1 byte) */ Stream_Read_UINT16(s, stream_bitmap_next->bitmapType); /* bitmapType (2 bytes) */ FIELD_SKIP_BUFFER16( s, stream_bitmap_next->bitmapBlockSize); /* bitmapBlockSize(2 bytes) + bitmapBlock */ return TRUE; } static BOOL update_read_draw_gdiplus_first_order(wStream* s, DRAW_GDIPLUS_FIRST_ORDER* draw_gdiplus_first) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_first->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_first->cbTotalSize); /* cbTotalSize (4 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_first->cbTotalEmfSize); /* cbTotalEmfSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_first->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_next_order(wStream* s, DRAW_GDIPLUS_NEXT_ORDER* draw_gdiplus_next) { if (Stream_GetRemainingLength(s) < 3) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ FIELD_SKIP_BUFFER16(s, draw_gdiplus_next->cbSize); /* cbSize(2 bytes) + emfRecords */ return TRUE; } static BOOL update_read_draw_gdiplus_end_order(wStream* s, DRAW_GDIPLUS_END_ORDER* draw_gdiplus_end) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_end->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_end->cbTotalSize); /* cbTotalSize (4 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_end->cbTotalEmfSize); /* cbTotalEmfSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_end->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_cache_first_order(wStream* s, DRAW_GDIPLUS_CACHE_FIRST_ORDER* draw_gdiplus_cache_first) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_first->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_cache_first->cbTotalSize); /* cbTotalSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_cache_first->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_cache_next_order(wStream* s, DRAW_GDIPLUS_CACHE_NEXT_ORDER* draw_gdiplus_cache_next) { if (Stream_GetRemainingLength(s) < 7) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_next->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_next->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_next->cacheIndex); /* cacheIndex (2 bytes) */ FIELD_SKIP_BUFFER16(s, draw_gdiplus_cache_next->cbSize); /* cbSize(2 bytes) + emfRecords */ return TRUE; } static BOOL update_read_draw_gdiplus_cache_end_order(wStream* s, DRAW_GDIPLUS_CACHE_END_ORDER* draw_gdiplus_cache_end) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_end->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_cache_end->cbTotalSize); /* cbTotalSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_cache_end->cbSize); /* emfRecords */ } static BOOL update_read_field_flags(wStream* s, UINT32* fieldFlags, BYTE flags, BYTE fieldBytes) { int i; BYTE byte; if (flags & ORDER_ZERO_FIELD_BYTE_BIT0) fieldBytes--; if (flags & ORDER_ZERO_FIELD_BYTE_BIT1) { if (fieldBytes > 1) fieldBytes -= 2; else fieldBytes = 0; } if (Stream_GetRemainingLength(s) < fieldBytes) return FALSE; *fieldFlags = 0; for (i = 0; i < fieldBytes; i++) { Stream_Read_UINT8(s, byte); *fieldFlags |= byte << (i * 8); } return TRUE; } BOOL update_write_field_flags(wStream* s, UINT32 fieldFlags, BYTE flags, BYTE fieldBytes) { BYTE byte; if (fieldBytes == 1) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); } else if (fieldBytes == 2) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 8) & 0xFF; Stream_Write_UINT8(s, byte); } else if (fieldBytes == 3) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 16) & 0xFF; Stream_Write_UINT8(s, byte); } else { return FALSE; } return TRUE; } static BOOL update_read_bounds(wStream* s, rdpBounds* bounds) { BYTE flags; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, flags); /* field flags */ if (flags & BOUND_LEFT) { if (!update_read_coord(s, &bounds->left, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_LEFT) { if (!update_read_coord(s, &bounds->left, TRUE)) return FALSE; } if (flags & BOUND_TOP) { if (!update_read_coord(s, &bounds->top, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_TOP) { if (!update_read_coord(s, &bounds->top, TRUE)) return FALSE; } if (flags & BOUND_RIGHT) { if (!update_read_coord(s, &bounds->right, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_RIGHT) { if (!update_read_coord(s, &bounds->right, TRUE)) return FALSE; } if (flags & BOUND_BOTTOM) { if (!update_read_coord(s, &bounds->bottom, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_BOTTOM) { if (!update_read_coord(s, &bounds->bottom, TRUE)) return FALSE; } return TRUE; } BOOL update_write_bounds(wStream* s, ORDER_INFO* orderInfo) { if (!(orderInfo->controlFlags & ORDER_BOUNDS)) return TRUE; if (orderInfo->controlFlags & ORDER_ZERO_BOUNDS_DELTAS) return TRUE; Stream_Write_UINT8(s, orderInfo->boundsFlags); /* field flags */ if (orderInfo->boundsFlags & BOUND_LEFT) { if (!update_write_coord(s, orderInfo->bounds.left)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_LEFT) { } if (orderInfo->boundsFlags & BOUND_TOP) { if (!update_write_coord(s, orderInfo->bounds.top)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_TOP) { } if (orderInfo->boundsFlags & BOUND_RIGHT) { if (!update_write_coord(s, orderInfo->bounds.right)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_RIGHT) { } if (orderInfo->boundsFlags & BOUND_BOTTOM) { if (!update_write_coord(s, orderInfo->bounds.bottom)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_BOTTOM) { } return TRUE; } static BOOL read_primary_order(wLog* log, const char* orderName, wStream* s, const ORDER_INFO* orderInfo, rdpPrimaryUpdate* primary) { BOOL rc = FALSE; if (!s || !orderInfo || !primary || !orderName) return FALSE; switch (orderInfo->orderType) { case ORDER_TYPE_DSTBLT: rc = update_read_dstblt_order(s, orderInfo, &(primary->dstblt)); break; case ORDER_TYPE_PATBLT: rc = update_read_patblt_order(s, orderInfo, &(primary->patblt)); break; case ORDER_TYPE_SCRBLT: rc = update_read_scrblt_order(s, orderInfo, &(primary->scrblt)); break; case ORDER_TYPE_OPAQUE_RECT: rc = update_read_opaque_rect_order(s, orderInfo, &(primary->opaque_rect)); break; case ORDER_TYPE_DRAW_NINE_GRID: rc = update_read_draw_nine_grid_order(s, orderInfo, &(primary->draw_nine_grid)); break; case ORDER_TYPE_MULTI_DSTBLT: rc = update_read_multi_dstblt_order(s, orderInfo, &(primary->multi_dstblt)); break; case ORDER_TYPE_MULTI_PATBLT: rc = update_read_multi_patblt_order(s, orderInfo, &(primary->multi_patblt)); break; case ORDER_TYPE_MULTI_SCRBLT: rc = update_read_multi_scrblt_order(s, orderInfo, &(primary->multi_scrblt)); break; case ORDER_TYPE_MULTI_OPAQUE_RECT: rc = update_read_multi_opaque_rect_order(s, orderInfo, &(primary->multi_opaque_rect)); break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: rc = update_read_multi_draw_nine_grid_order(s, orderInfo, &(primary->multi_draw_nine_grid)); break; case ORDER_TYPE_LINE_TO: rc = update_read_line_to_order(s, orderInfo, &(primary->line_to)); break; case ORDER_TYPE_POLYLINE: rc = update_read_polyline_order(s, orderInfo, &(primary->polyline)); break; case ORDER_TYPE_MEMBLT: rc = update_read_memblt_order(s, orderInfo, &(primary->memblt)); break; case ORDER_TYPE_MEM3BLT: rc = update_read_mem3blt_order(s, orderInfo, &(primary->mem3blt)); break; case ORDER_TYPE_SAVE_BITMAP: rc = update_read_save_bitmap_order(s, orderInfo, &(primary->save_bitmap)); break; case ORDER_TYPE_GLYPH_INDEX: rc = update_read_glyph_index_order(s, orderInfo, &(primary->glyph_index)); break; case ORDER_TYPE_FAST_INDEX: rc = update_read_fast_index_order(s, orderInfo, &(primary->fast_index)); break; case ORDER_TYPE_FAST_GLYPH: rc = update_read_fast_glyph_order(s, orderInfo, &(primary->fast_glyph)); break; case ORDER_TYPE_POLYGON_SC: rc = update_read_polygon_sc_order(s, orderInfo, &(primary->polygon_sc)); break; case ORDER_TYPE_POLYGON_CB: rc = update_read_polygon_cb_order(s, orderInfo, &(primary->polygon_cb)); break; case ORDER_TYPE_ELLIPSE_SC: rc = update_read_ellipse_sc_order(s, orderInfo, &(primary->ellipse_sc)); break; case ORDER_TYPE_ELLIPSE_CB: rc = update_read_ellipse_cb_order(s, orderInfo, &(primary->ellipse_cb)); break; default: WLog_Print(log, WLOG_WARN, "Primary Drawing Order %s not supported, ignoring", orderName); rc = TRUE; break; } if (!rc) { WLog_Print(log, WLOG_ERROR, "%s - update_read_dstblt_order() failed", orderName); return FALSE; } return TRUE; } static BOOL update_recv_primary_order(rdpUpdate* update, wStream* s, BYTE flags) { BYTE field; BOOL rc = FALSE; rdpContext* context = update->context; rdpPrimaryUpdate* primary = update->primary; ORDER_INFO* orderInfo = &(primary->order_info); rdpSettings* settings = context->settings; const char* orderName; if (flags & ORDER_TYPE_CHANGE) { if (Stream_GetRemainingLength(s) < 1) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, orderInfo->orderType); /* orderType (1 byte) */ } orderName = primary_order_string(orderInfo->orderType); if (!check_primary_order_supported(update->log, settings, orderInfo->orderType, orderName)) return FALSE; field = get_primary_drawing_order_field_bytes(orderInfo->orderType, &rc); if (!rc) return FALSE; if (!update_read_field_flags(s, &(orderInfo->fieldFlags), flags, field)) { WLog_Print(update->log, WLOG_ERROR, "update_read_field_flags() failed"); return FALSE; } if (flags & ORDER_BOUNDS) { if (!(flags & ORDER_ZERO_BOUNDS_DELTAS)) { if (!update_read_bounds(s, &orderInfo->bounds)) { WLog_Print(update->log, WLOG_ERROR, "update_read_bounds() failed"); return FALSE; } } rc = IFCALLRESULT(FALSE, update->SetBounds, context, &orderInfo->bounds); if (!rc) return FALSE; } orderInfo->deltaCoordinates = (flags & ORDER_DELTA_COORDINATES) ? TRUE : FALSE; if (!read_primary_order(update->log, orderName, s, orderInfo, primary)) return FALSE; switch (orderInfo->orderType) { case ORDER_TYPE_DSTBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->dstblt.bRop), gdi_rop3_code(primary->dstblt.bRop)); rc = IFCALLRESULT(FALSE, primary->DstBlt, context, &primary->dstblt); } break; case ORDER_TYPE_PATBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->patblt.bRop), gdi_rop3_code(primary->patblt.bRop)); rc = IFCALLRESULT(FALSE, primary->PatBlt, context, &primary->patblt); } break; case ORDER_TYPE_SCRBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->scrblt.bRop), gdi_rop3_code(primary->scrblt.bRop)); rc = IFCALLRESULT(FALSE, primary->ScrBlt, context, &primary->scrblt); } break; case ORDER_TYPE_OPAQUE_RECT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->OpaqueRect, context, &primary->opaque_rect); } break; case ORDER_TYPE_DRAW_NINE_GRID: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->DrawNineGrid, context, &primary->draw_nine_grid); } break; case ORDER_TYPE_MULTI_DSTBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_dstblt.bRop), gdi_rop3_code(primary->multi_dstblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiDstBlt, context, &primary->multi_dstblt); } break; case ORDER_TYPE_MULTI_PATBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_patblt.bRop), gdi_rop3_code(primary->multi_patblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiPatBlt, context, &primary->multi_patblt); } break; case ORDER_TYPE_MULTI_SCRBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_scrblt.bRop), gdi_rop3_code(primary->multi_scrblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiScrBlt, context, &primary->multi_scrblt); } break; case ORDER_TYPE_MULTI_OPAQUE_RECT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->MultiOpaqueRect, context, &primary->multi_opaque_rect); } break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->MultiDrawNineGrid, context, &primary->multi_draw_nine_grid); } break; case ORDER_TYPE_LINE_TO: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->LineTo, context, &primary->line_to); } break; case ORDER_TYPE_POLYLINE: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->Polyline, context, &primary->polyline); } break; case ORDER_TYPE_MEMBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->memblt.bRop), gdi_rop3_code(primary->memblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MemBlt, context, &primary->memblt); } break; case ORDER_TYPE_MEM3BLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->mem3blt.bRop), gdi_rop3_code(primary->mem3blt.bRop)); rc = IFCALLRESULT(FALSE, primary->Mem3Blt, context, &primary->mem3blt); } break; case ORDER_TYPE_SAVE_BITMAP: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->SaveBitmap, context, &primary->save_bitmap); } break; case ORDER_TYPE_GLYPH_INDEX: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->GlyphIndex, context, &primary->glyph_index); } break; case ORDER_TYPE_FAST_INDEX: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->FastIndex, context, &primary->fast_index); } break; case ORDER_TYPE_FAST_GLYPH: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->FastGlyph, context, &primary->fast_glyph); } break; case ORDER_TYPE_POLYGON_SC: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->PolygonSC, context, &primary->polygon_sc); } break; case ORDER_TYPE_POLYGON_CB: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->PolygonCB, context, &primary->polygon_cb); } break; case ORDER_TYPE_ELLIPSE_SC: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->EllipseSC, context, &primary->ellipse_sc); } break; case ORDER_TYPE_ELLIPSE_CB: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->EllipseCB, context, &primary->ellipse_cb); } break; default: WLog_Print(update->log, WLOG_WARN, "Primary Drawing Order %s not supported", orderName); break; } if (!rc) { WLog_Print(update->log, WLOG_WARN, "Primary Drawing Order %s failed", orderName); return FALSE; } if (flags & ORDER_BOUNDS) { rc = IFCALLRESULT(FALSE, update->SetBounds, context, NULL); } return rc; } static BOOL update_recv_secondary_order(rdpUpdate* update, wStream* s, BYTE flags) { BOOL rc = FALSE; size_t start, end, diff; BYTE orderType; UINT16 extraFlags; UINT16 orderLength; rdpContext* context = update->context; rdpSettings* settings = context->settings; rdpSecondaryUpdate* secondary = update->secondary; const char* name; if (Stream_GetRemainingLength(s) < 5) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 5"); return FALSE; } Stream_Read_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Read_UINT16(s, extraFlags); /* extraFlags (2 bytes) */ Stream_Read_UINT8(s, orderType); /* orderType (1 byte) */ if (Stream_GetRemainingLength(s) < orderLength + 7U) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) %" PRIuz " < %" PRIu16, Stream_GetRemainingLength(s), orderLength + 7); return FALSE; } start = Stream_GetPosition(s); name = secondary_order_string(orderType); WLog_Print(update->log, WLOG_DEBUG, "Secondary Drawing Order %s", name); if (!check_secondary_order_supported(update->log, settings, orderType, name)) return FALSE; switch (orderType) { case ORDER_TYPE_BITMAP_UNCOMPRESSED: case ORDER_TYPE_CACHE_BITMAP_COMPRESSED: { const BOOL compressed = (orderType == ORDER_TYPE_CACHE_BITMAP_COMPRESSED); CACHE_BITMAP_ORDER* order = update_read_cache_bitmap_order(update, s, compressed, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmap, context, order); free_cache_bitmap_order(context, order); } } break; case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2: case ORDER_TYPE_BITMAP_COMPRESSED_V2: { const BOOL compressed = (orderType == ORDER_TYPE_BITMAP_COMPRESSED_V2); CACHE_BITMAP_V2_ORDER* order = update_read_cache_bitmap_v2_order(update, s, compressed, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV2, context, order); free_cache_bitmap_v2_order(context, order); } } break; case ORDER_TYPE_BITMAP_COMPRESSED_V3: { CACHE_BITMAP_V3_ORDER* order = update_read_cache_bitmap_v3_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV3, context, order); free_cache_bitmap_v3_order(context, order); } } break; case ORDER_TYPE_CACHE_COLOR_TABLE: { CACHE_COLOR_TABLE_ORDER* order = update_read_cache_color_table_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheColorTable, context, order); free_cache_color_table_order(context, order); } } break; case ORDER_TYPE_CACHE_GLYPH: { switch (settings->GlyphSupportLevel) { case GLYPH_SUPPORT_PARTIAL: case GLYPH_SUPPORT_FULL: { CACHE_GLYPH_ORDER* order = update_read_cache_glyph_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheGlyph, context, order); free_cache_glyph_order(context, order); } } break; case GLYPH_SUPPORT_ENCODE: { CACHE_GLYPH_V2_ORDER* order = update_read_cache_glyph_v2_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheGlyphV2, context, order); free_cache_glyph_v2_order(context, order); } } break; case GLYPH_SUPPORT_NONE: default: break; } } break; case ORDER_TYPE_CACHE_BRUSH: /* [MS-RDPEGDI] 2.2.2.2.1.2.7 Cache Brush (CACHE_BRUSH_ORDER) */ { CACHE_BRUSH_ORDER* order = update_read_cache_brush_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBrush, context, order); free_cache_brush_order(context, order); } } break; default: WLog_Print(update->log, WLOG_WARN, "SECONDARY ORDER %s not supported", name); break; } if (!rc) { WLog_Print(update->log, WLOG_ERROR, "SECONDARY ORDER %s failed", name); } start += orderLength + 7; end = Stream_GetPosition(s); if (start > end) { WLog_Print(update->log, WLOG_WARN, "SECONDARY_ORDER %s: read %" PRIuz "bytes too much", name, end - start); return FALSE; } diff = start - end; if (diff > 0) { WLog_Print(update->log, WLOG_DEBUG, "SECONDARY_ORDER %s: read %" PRIuz "bytes short, skipping", name, diff); Stream_Seek(s, diff); } return rc; } static BOOL read_altsec_order(wStream* s, BYTE orderType, rdpAltSecUpdate* altsec) { BOOL rc = FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: rc = update_read_create_offscreen_bitmap_order(s, &(altsec->create_offscreen_bitmap)); break; case ORDER_TYPE_SWITCH_SURFACE: rc = update_read_switch_surface_order(s, &(altsec->switch_surface)); break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: rc = update_read_create_nine_grid_bitmap_order(s, &(altsec->create_nine_grid_bitmap)); break; case ORDER_TYPE_FRAME_MARKER: rc = update_read_frame_marker_order(s, &(altsec->frame_marker)); break; case ORDER_TYPE_STREAM_BITMAP_FIRST: rc = update_read_stream_bitmap_first_order(s, &(altsec->stream_bitmap_first)); break; case ORDER_TYPE_STREAM_BITMAP_NEXT: rc = update_read_stream_bitmap_next_order(s, &(altsec->stream_bitmap_next)); break; case ORDER_TYPE_GDIPLUS_FIRST: rc = update_read_draw_gdiplus_first_order(s, &(altsec->draw_gdiplus_first)); break; case ORDER_TYPE_GDIPLUS_NEXT: rc = update_read_draw_gdiplus_next_order(s, &(altsec->draw_gdiplus_next)); break; case ORDER_TYPE_GDIPLUS_END: rc = update_read_draw_gdiplus_end_order(s, &(altsec->draw_gdiplus_end)); break; case ORDER_TYPE_GDIPLUS_CACHE_FIRST: rc = update_read_draw_gdiplus_cache_first_order(s, &(altsec->draw_gdiplus_cache_first)); break; case ORDER_TYPE_GDIPLUS_CACHE_NEXT: rc = update_read_draw_gdiplus_cache_next_order(s, &(altsec->draw_gdiplus_cache_next)); break; case ORDER_TYPE_GDIPLUS_CACHE_END: rc = update_read_draw_gdiplus_cache_end_order(s, &(altsec->draw_gdiplus_cache_end)); break; case ORDER_TYPE_WINDOW: /* This order is handled elsewhere. */ rc = TRUE; break; case ORDER_TYPE_COMPDESK_FIRST: rc = TRUE; break; default: break; } return rc; } static BOOL update_recv_altsec_order(rdpUpdate* update, wStream* s, BYTE flags) { BYTE orderType = flags >>= 2; /* orderType is in higher 6 bits of flags field */ BOOL rc = FALSE; rdpContext* context = update->context; rdpSettings* settings = context->settings; rdpAltSecUpdate* altsec = update->altsec; const char* orderName = altsec_order_string(orderType); WLog_Print(update->log, WLOG_DEBUG, "Alternate Secondary Drawing Order %s", orderName); if (!check_alt_order_supported(update->log, settings, orderType, orderName)) return FALSE; if (!read_altsec_order(s, orderType, altsec)) return FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: IFCALLRET(altsec->CreateOffscreenBitmap, rc, context, &(altsec->create_offscreen_bitmap)); break; case ORDER_TYPE_SWITCH_SURFACE: IFCALLRET(altsec->SwitchSurface, rc, context, &(altsec->switch_surface)); break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: IFCALLRET(altsec->CreateNineGridBitmap, rc, context, &(altsec->create_nine_grid_bitmap)); break; case ORDER_TYPE_FRAME_MARKER: IFCALLRET(altsec->FrameMarker, rc, context, &(altsec->frame_marker)); break; case ORDER_TYPE_STREAM_BITMAP_FIRST: IFCALLRET(altsec->StreamBitmapFirst, rc, context, &(altsec->stream_bitmap_first)); break; case ORDER_TYPE_STREAM_BITMAP_NEXT: IFCALLRET(altsec->StreamBitmapNext, rc, context, &(altsec->stream_bitmap_next)); break; case ORDER_TYPE_GDIPLUS_FIRST: IFCALLRET(altsec->DrawGdiPlusFirst, rc, context, &(altsec->draw_gdiplus_first)); break; case ORDER_TYPE_GDIPLUS_NEXT: IFCALLRET(altsec->DrawGdiPlusNext, rc, context, &(altsec->draw_gdiplus_next)); break; case ORDER_TYPE_GDIPLUS_END: IFCALLRET(altsec->DrawGdiPlusEnd, rc, context, &(altsec->draw_gdiplus_end)); break; case ORDER_TYPE_GDIPLUS_CACHE_FIRST: IFCALLRET(altsec->DrawGdiPlusCacheFirst, rc, context, &(altsec->draw_gdiplus_cache_first)); break; case ORDER_TYPE_GDIPLUS_CACHE_NEXT: IFCALLRET(altsec->DrawGdiPlusCacheNext, rc, context, &(altsec->draw_gdiplus_cache_next)); break; case ORDER_TYPE_GDIPLUS_CACHE_END: IFCALLRET(altsec->DrawGdiPlusCacheEnd, rc, context, &(altsec->draw_gdiplus_cache_end)); break; case ORDER_TYPE_WINDOW: rc = update_recv_altsec_window_order(update, s); break; case ORDER_TYPE_COMPDESK_FIRST: rc = TRUE; break; default: break; } if (!rc) { WLog_Print(update->log, WLOG_WARN, "Alternate Secondary Drawing Order %s failed", orderName); } return rc; } BOOL update_recv_order(rdpUpdate* update, wStream* s) { BOOL rc; BYTE controlFlags; if (Stream_GetRemainingLength(s) < 1) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, controlFlags); /* controlFlags (1 byte) */ if (!(controlFlags & ORDER_STANDARD)) rc = update_recv_altsec_order(update, s, controlFlags); else if (controlFlags & ORDER_SECONDARY) rc = update_recv_secondary_order(update, s, controlFlags); else rc = update_recv_primary_order(update, s, controlFlags); if (!rc) WLog_Print(update->log, WLOG_ERROR, "order flags %02" PRIx8 " failed", controlFlags); return rc; }
static CACHE_BRUSH_ORDER* update_read_cache_brush_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; BYTE iBitmapFormat; BOOL compressed = FALSE; CACHE_BRUSH_ORDER* cache_brush = calloc(1, sizeof(CACHE_BRUSH_ORDER)); if (!cache_brush) goto fail; if (Stream_GetRemainingLength(s) < 6) goto fail; Stream_Read_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Read_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ if (iBitmapFormat >= ARRAYSIZE(BMF_BPP)) goto fail; cache_brush->bpp = BMF_BPP[iBitmapFormat]; Stream_Read_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Read_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Read_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Read_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_Print(update->log, WLOG_ERROR, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); goto fail; } /* rows are encoded in reverse order */ if (Stream_GetRemainingLength(s) < 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_decompress_brush(s, cache_brush->data, sizeof(cache_brush->data), cache_brush->bpp)) goto fail; } else { /* uncompressed brush */ UINT32 scanline = (cache_brush->bpp / 8) * 8; if (Stream_GetRemainingLength(s) < scanline * 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read(s, &cache_brush->data[i * scanline], scanline); } } } } return cache_brush; fail: free_cache_brush_order(update->context, cache_brush); return NULL; }
static CACHE_BRUSH_ORDER* update_read_cache_brush_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; BOOL rc; BYTE iBitmapFormat; BOOL compressed = FALSE; CACHE_BRUSH_ORDER* cache_brush = calloc(1, sizeof(CACHE_BRUSH_ORDER)); if (!cache_brush) goto fail; if (Stream_GetRemainingLength(s) < 6) goto fail; Stream_Read_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Read_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ cache_brush->bpp = get_bmf_bpp(iBitmapFormat, &rc); if (!rc) goto fail; Stream_Read_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Read_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Read_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Read_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_Print(update->log, WLOG_ERROR, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); goto fail; } /* rows are encoded in reverse order */ if (Stream_GetRemainingLength(s) < 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_decompress_brush(s, cache_brush->data, sizeof(cache_brush->data), cache_brush->bpp)) goto fail; } else { /* uncompressed brush */ UINT32 scanline = (cache_brush->bpp / 8) * 8; if (Stream_GetRemainingLength(s) < scanline * 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read(s, &cache_brush->data[i * scanline], scanline); } } } } return cache_brush; fail: free_cache_brush_order(update->context, cache_brush); return NULL; }
{'added': [(116, 'static BYTE get_cbr2_bpp(UINT32 bpp, BOOL* pValid)'), (117, '{'), (118, '\tif (pValid)'), (119, '\t\t*pValid = TRUE;'), (120, '\tswitch (bpp)'), (121, '\t{'), (122, '\t\tcase 3:'), (123, '\t\t\treturn 8;'), (124, '\t\tcase 4:'), (125, '\t\t\treturn 16;'), (126, '\t\tcase 5:'), (127, '\t\t\treturn 24;'), (128, '\t\tcase 6:'), (129, '\t\t\treturn 32;'), (130, '\t\tdefault:'), (131, '\t\t\tWLog_WARN(TAG, "Invalid bpp %" PRIu32, bpp);'), (132, '\t\t\tif (pValid)'), (133, '\t\t\t\t*pValid = FALSE;'), (134, '\t\t\treturn 0;'), (135, '\t}'), (136, '}'), (138, 'static BYTE get_bmf_bpp(UINT32 bmf, BOOL* pValid)'), (139, '{'), (140, '\tif (pValid)'), (141, '\t\t*pValid = TRUE;'), (142, '\tswitch (bmf)'), (143, '\t{'), (144, '\t\tcase 1:'), (145, '\t\t\treturn 1;'), (146, '\t\tcase 3:'), (147, '\t\t\treturn 8;'), (148, '\t\tcase 4:'), (149, '\t\t\treturn 16;'), (150, '\t\tcase 5:'), (151, '\t\t\treturn 24;'), (152, '\t\tcase 6:'), (153, '\t\t\treturn 32;'), (154, '\t\tdefault:'), (155, '\t\t\tWLog_WARN(TAG, "Invalid bmf %" PRIu32, bmf);'), (156, '\t\t\tif (pValid)'), (157, '\t\t\t\t*pValid = FALSE;'), (158, '\t\t\treturn 0;'), (159, '\t}'), (160, '}'), (161, 'static BYTE get_bpp_bmf(UINT32 bpp, BOOL* pValid)'), (162, '{'), (163, '\tif (pValid)'), (164, '\t\t*pValid = TRUE;'), (165, '\tswitch (bpp)'), (166, '\t{'), (167, '\t\tcase 1:'), (168, '\t\t\treturn 1;'), (169, '\t\tcase 8:'), (170, '\t\t\treturn 3;'), (171, '\t\tcase 16:'), (172, '\t\t\treturn 4;'), (173, '\t\tcase 24:'), (174, '\t\t\treturn 5;'), (175, '\t\tcase 32:'), (176, '\t\t\treturn 6;'), (177, '\t\tdefault:'), (178, '\t\t\tWLog_WARN(TAG, "Invalid color depth %" PRIu32, bpp);'), (179, '\t\t\tif (pValid)'), (180, '\t\t\t\t*pValid = FALSE;'), (181, '\t\t\treturn 0;'), (182, '\t}'), (183, '}'), (871, '\t\tBOOL rc;'), (873, '\t\tbrush->bpp = get_bmf_bpp(brush->style, &rc);'), (874, '\t\tif (!rc)'), (875, '\t\t\treturn FALSE;'), (917, '\t\tBOOL rc;'), (919, '\t\tbrush->bpp = get_bmf_bpp(brush->style, &rc);'), (920, '\t\tif (!rc)'), (921, '\t\t\treturn FALSE;'), (2077, '\tBOOL rc;'), (2092, '\tcache_bitmap_v2->bitmapBpp = get_cbr2_bpp(bitsPerPixelId, &rc);'), (2093, '\tif (!rc)'), (2094, '\t\tgoto fail;'), (2173, '\tBOOL rc;'), (2180, '\tbitsPerPixelId = get_bpp_bmf(cache_bitmap_v2->bitmapBpp, &rc);'), (2181, '\tif (!rc)'), (2182, '\t\treturn FALSE;'), (2244, '\tBOOL rc;'), (2262, '\tcache_bitmap_v3->bpp = get_cbr2_bpp(bitsPerPixelId, &rc);'), (2263, '\tif (!rc)'), (2264, '\t\tgoto fail;'), (2312, '\tBOOL rc;'), (2321, '\tbitsPerPixelId = get_bpp_bmf(cache_bitmap_v3->bpp, &rc);'), (2322, '\tif (!rc)'), (2323, '\t\treturn FALSE;'), (2647, '\tBOOL rc;'), (2661, '\tcache_brush->bpp = get_bmf_bpp(iBitmapFormat, &rc);'), (2662, '\tif (!rc)'), (2735, '\tBOOL rc;'), (2742, '\tiBitmapFormat = get_bpp_bmf(cache_brush->bpp, &rc);'), (2743, '\tif (!rc)'), (2744, '\t\treturn FALSE;')], 'deleted': [(116, 'static const BYTE CBR2_BPP[] = { 0, 0, 0, 8, 16, 24, 32 };'), (117, ''), (118, 'static const BYTE BPP_CBR2[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0,'), (119, '\t 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 };'), (120, ''), (121, 'static const BYTE CBR23_BPP[] = { 0, 0, 0, 8, 16, 24, 32 };'), (122, ''), (123, 'static const BYTE BPP_CBR23[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0,'), (124, '\t 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 };'), (125, ''), (126, 'static const BYTE BMF_BPP[] = { 0, 1, 0, 8, 16, 24, 32, 0 };'), (128, 'static const BYTE BPP_BMF[] = { 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0,'), (129, '\t 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 };'), (818, '\t\tbrush->bpp = BMF_BPP[brush->style & 0x07];'), (819, ''), (862, '\t\tbrush->bpp = BMF_BPP[brush->style & 0x07];'), (863, ''), (2033, '\tcache_bitmap_v2->bitmapBpp = CBR2_BPP[bitsPerPixelId];'), (2118, '\tbitsPerPixelId = BPP_CBR2[cache_bitmap_v2->bitmapBpp];'), (2197, '\tcache_bitmap_v3->bpp = CBR23_BPP[bitsPerPixelId];'), (2253, '\tbitsPerPixelId = BPP_CBR23[cache_bitmap_v3->bpp];'), (2590, '\tif (iBitmapFormat >= ARRAYSIZE(BMF_BPP))'), (2593, '\tcache_brush->bpp = BMF_BPP[iBitmapFormat];'), (2670, '\tiBitmapFormat = BPP_BMF[cache_brush->bpp];')]}
98
24
3,271
19,873
https://github.com/FreeRDP/FreeRDP
CVE-2020-11096
['CWE-125']
orders.c
update_write_brush
/** * FreeRDP: A Remote Desktop Protocol Implementation * Drawing Orders * * Copyright 2011 Marc-Andre Moreau <marcandre.moreau@gmail.com> * Copyright 2016 Armin Novak <armin.novak@thincast.com> * Copyright 2016 Thincast Technologies GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "window.h" #include <winpr/wtypes.h> #include <winpr/crt.h> #include <freerdp/api.h> #include <freerdp/log.h> #include <freerdp/graphics.h> #include <freerdp/codec/bitmap.h> #include <freerdp/gdi/gdi.h> #include "orders.h" #include "../cache/glyph.h" #include "../cache/bitmap.h" #include "../cache/brush.h" #include "../cache/cache.h" #define TAG FREERDP_TAG("core.orders") BYTE get_primary_drawing_order_field_bytes(UINT32 orderType, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (orderType) { case 0: return DSTBLT_ORDER_FIELD_BYTES; case 1: return PATBLT_ORDER_FIELD_BYTES; case 2: return SCRBLT_ORDER_FIELD_BYTES; case 3: return 0; case 4: return 0; case 5: return 0; case 6: return 0; case 7: return DRAW_NINE_GRID_ORDER_FIELD_BYTES; case 8: return MULTI_DRAW_NINE_GRID_ORDER_FIELD_BYTES; case 9: return LINE_TO_ORDER_FIELD_BYTES; case 10: return OPAQUE_RECT_ORDER_FIELD_BYTES; case 11: return SAVE_BITMAP_ORDER_FIELD_BYTES; case 12: return 0; case 13: return MEMBLT_ORDER_FIELD_BYTES; case 14: return MEM3BLT_ORDER_FIELD_BYTES; case 15: return MULTI_DSTBLT_ORDER_FIELD_BYTES; case 16: return MULTI_PATBLT_ORDER_FIELD_BYTES; case 17: return MULTI_SCRBLT_ORDER_FIELD_BYTES; case 18: return MULTI_OPAQUE_RECT_ORDER_FIELD_BYTES; case 19: return FAST_INDEX_ORDER_FIELD_BYTES; case 20: return POLYGON_SC_ORDER_FIELD_BYTES; case 21: return POLYGON_CB_ORDER_FIELD_BYTES; case 22: return POLYLINE_ORDER_FIELD_BYTES; case 23: return 0; case 24: return FAST_GLYPH_ORDER_FIELD_BYTES; case 25: return ELLIPSE_SC_ORDER_FIELD_BYTES; case 26: return ELLIPSE_CB_ORDER_FIELD_BYTES; case 27: return GLYPH_INDEX_ORDER_FIELD_BYTES; default: if (pValid) *pValid = FALSE; WLog_WARN(TAG, "Invalid orderType 0x%08X received", orderType); return 0; } } static const BYTE CBR2_BPP[] = { 0, 0, 0, 8, 16, 24, 32 }; static const BYTE BPP_CBR2[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 }; static const BYTE CBR23_BPP[] = { 0, 0, 0, 8, 16, 24, 32 }; static const BYTE BPP_CBR23[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 }; static const BYTE BMF_BPP[] = { 0, 1, 0, 8, 16, 24, 32, 0 }; static const BYTE BPP_BMF[] = { 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 }; static BOOL check_order_activated(wLog* log, rdpSettings* settings, const char* orderName, BOOL condition) { if (!condition) { if (settings->AllowUnanouncedOrdersFromServer) { WLog_Print(log, WLOG_WARN, "%s - SERVER BUG: The support for this feature was not announced!", orderName); return TRUE; } else { WLog_Print(log, WLOG_ERROR, "%s - SERVER BUG: The support for this feature was not announced! Use " "/relax-order-checks to ignore", orderName); return FALSE; } } return TRUE; } static BOOL check_alt_order_supported(wLog* log, rdpSettings* settings, BYTE orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: case ORDER_TYPE_SWITCH_SURFACE: condition = settings->OffscreenSupportLevel != 0; break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: condition = settings->DrawNineGridEnabled; break; case ORDER_TYPE_FRAME_MARKER: condition = settings->FrameMarkerCommandEnabled; break; case ORDER_TYPE_GDIPLUS_FIRST: case ORDER_TYPE_GDIPLUS_NEXT: case ORDER_TYPE_GDIPLUS_END: case ORDER_TYPE_GDIPLUS_CACHE_FIRST: case ORDER_TYPE_GDIPLUS_CACHE_NEXT: case ORDER_TYPE_GDIPLUS_CACHE_END: condition = settings->DrawGdiPlusCacheEnabled; break; case ORDER_TYPE_WINDOW: condition = settings->RemoteWndSupportLevel != WINDOW_LEVEL_NOT_SUPPORTED; break; case ORDER_TYPE_STREAM_BITMAP_FIRST: case ORDER_TYPE_STREAM_BITMAP_NEXT: case ORDER_TYPE_COMPDESK_FIRST: condition = TRUE; break; default: WLog_Print(log, WLOG_WARN, "%s - Alternate Secondary Drawing Order UNKNOWN", orderName); condition = FALSE; break; } return check_order_activated(log, settings, orderName, condition); } static BOOL check_secondary_order_supported(wLog* log, rdpSettings* settings, BYTE orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_BITMAP_UNCOMPRESSED: case ORDER_TYPE_CACHE_BITMAP_COMPRESSED: condition = settings->BitmapCacheEnabled; break; case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2: case ORDER_TYPE_BITMAP_COMPRESSED_V2: condition = settings->BitmapCacheEnabled; break; case ORDER_TYPE_BITMAP_COMPRESSED_V3: condition = settings->BitmapCacheV3Enabled; break; case ORDER_TYPE_CACHE_COLOR_TABLE: condition = (settings->OrderSupport[NEG_MEMBLT_INDEX] || settings->OrderSupport[NEG_MEM3BLT_INDEX]); break; case ORDER_TYPE_CACHE_GLYPH: { switch (settings->GlyphSupportLevel) { case GLYPH_SUPPORT_PARTIAL: case GLYPH_SUPPORT_FULL: case GLYPH_SUPPORT_ENCODE: condition = TRUE; break; case GLYPH_SUPPORT_NONE: default: condition = FALSE; break; } } break; case ORDER_TYPE_CACHE_BRUSH: condition = TRUE; break; default: WLog_Print(log, WLOG_WARN, "SECONDARY ORDER %s not supported", orderName); break; } return check_order_activated(log, settings, orderName, condition); } static BOOL check_primary_order_supported(wLog* log, rdpSettings* settings, UINT32 orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_DSTBLT: condition = settings->OrderSupport[NEG_DSTBLT_INDEX]; break; case ORDER_TYPE_SCRBLT: condition = settings->OrderSupport[NEG_SCRBLT_INDEX]; break; case ORDER_TYPE_DRAW_NINE_GRID: condition = settings->OrderSupport[NEG_DRAWNINEGRID_INDEX]; break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: condition = settings->OrderSupport[NEG_MULTI_DRAWNINEGRID_INDEX]; break; case ORDER_TYPE_LINE_TO: condition = settings->OrderSupport[NEG_LINETO_INDEX]; break; /* [MS-RDPEGDI] 2.2.2.2.1.1.2.5 OpaqueRect (OPAQUERECT_ORDER) * suggests that PatBlt and OpaqueRect imply each other. */ case ORDER_TYPE_PATBLT: case ORDER_TYPE_OPAQUE_RECT: condition = settings->OrderSupport[NEG_OPAQUE_RECT_INDEX] || settings->OrderSupport[NEG_PATBLT_INDEX]; break; case ORDER_TYPE_SAVE_BITMAP: condition = settings->OrderSupport[NEG_SAVEBITMAP_INDEX]; break; case ORDER_TYPE_MEMBLT: condition = settings->OrderSupport[NEG_MEMBLT_INDEX]; break; case ORDER_TYPE_MEM3BLT: condition = settings->OrderSupport[NEG_MEM3BLT_INDEX]; break; case ORDER_TYPE_MULTI_DSTBLT: condition = settings->OrderSupport[NEG_MULTIDSTBLT_INDEX]; break; case ORDER_TYPE_MULTI_PATBLT: condition = settings->OrderSupport[NEG_MULTIPATBLT_INDEX]; break; case ORDER_TYPE_MULTI_SCRBLT: condition = settings->OrderSupport[NEG_MULTIDSTBLT_INDEX]; break; case ORDER_TYPE_MULTI_OPAQUE_RECT: condition = settings->OrderSupport[NEG_MULTIOPAQUERECT_INDEX]; break; case ORDER_TYPE_FAST_INDEX: condition = settings->OrderSupport[NEG_FAST_INDEX_INDEX]; break; case ORDER_TYPE_POLYGON_SC: condition = settings->OrderSupport[NEG_POLYGON_SC_INDEX]; break; case ORDER_TYPE_POLYGON_CB: condition = settings->OrderSupport[NEG_POLYGON_CB_INDEX]; break; case ORDER_TYPE_POLYLINE: condition = settings->OrderSupport[NEG_POLYLINE_INDEX]; break; case ORDER_TYPE_FAST_GLYPH: condition = settings->OrderSupport[NEG_FAST_GLYPH_INDEX]; break; case ORDER_TYPE_ELLIPSE_SC: condition = settings->OrderSupport[NEG_ELLIPSE_SC_INDEX]; break; case ORDER_TYPE_ELLIPSE_CB: condition = settings->OrderSupport[NEG_ELLIPSE_CB_INDEX]; break; case ORDER_TYPE_GLYPH_INDEX: condition = settings->OrderSupport[NEG_GLYPH_INDEX_INDEX]; break; default: WLog_Print(log, WLOG_WARN, "%s Primary Drawing Order not supported", orderName); break; } return check_order_activated(log, settings, orderName, condition); } static const char* primary_order_string(UINT32 orderType) { const char* orders[] = { "[0x%02" PRIx8 "] DstBlt", "[0x%02" PRIx8 "] PatBlt", "[0x%02" PRIx8 "] ScrBlt", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] DrawNineGrid", "[0x%02" PRIx8 "] MultiDrawNineGrid", "[0x%02" PRIx8 "] LineTo", "[0x%02" PRIx8 "] OpaqueRect", "[0x%02" PRIx8 "] SaveBitmap", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] MemBlt", "[0x%02" PRIx8 "] Mem3Blt", "[0x%02" PRIx8 "] MultiDstBlt", "[0x%02" PRIx8 "] MultiPatBlt", "[0x%02" PRIx8 "] MultiScrBlt", "[0x%02" PRIx8 "] MultiOpaqueRect", "[0x%02" PRIx8 "] FastIndex", "[0x%02" PRIx8 "] PolygonSC", "[0x%02" PRIx8 "] PolygonCB", "[0x%02" PRIx8 "] Polyline", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] FastGlyph", "[0x%02" PRIx8 "] EllipseSC", "[0x%02" PRIx8 "] EllipseCB", "[0x%02" PRIx8 "] GlyphIndex" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static const char* secondary_order_string(UINT32 orderType) { const char* orders[] = { "[0x%02" PRIx8 "] Cache Bitmap", "[0x%02" PRIx8 "] Cache Color Table", "[0x%02" PRIx8 "] Cache Bitmap (Compressed)", "[0x%02" PRIx8 "] Cache Glyph", "[0x%02" PRIx8 "] Cache Bitmap V2", "[0x%02" PRIx8 "] Cache Bitmap V2 (Compressed)", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] Cache Brush", "[0x%02" PRIx8 "] Cache Bitmap V3" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static const char* altsec_order_string(BYTE orderType) { const char* orders[] = { "[0x%02" PRIx8 "] Switch Surface", "[0x%02" PRIx8 "] Create Offscreen Bitmap", "[0x%02" PRIx8 "] Stream Bitmap First", "[0x%02" PRIx8 "] Stream Bitmap Next", "[0x%02" PRIx8 "] Create NineGrid Bitmap", "[0x%02" PRIx8 "] Draw GDI+ First", "[0x%02" PRIx8 "] Draw GDI+ Next", "[0x%02" PRIx8 "] Draw GDI+ End", "[0x%02" PRIx8 "] Draw GDI+ Cache First", "[0x%02" PRIx8 "] Draw GDI+ Cache Next", "[0x%02" PRIx8 "] Draw GDI+ Cache End", "[0x%02" PRIx8 "] Windowing", "[0x%02" PRIx8 "] Desktop Composition", "[0x%02" PRIx8 "] Frame Marker" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static INLINE BOOL update_read_coord(wStream* s, INT32* coord, BOOL delta) { INT8 lsi8; INT16 lsi16; if (delta) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_INT8(s, lsi8); *coord += lsi8; } else { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_INT16(s, lsi16); *coord = lsi16; } return TRUE; } static INLINE BOOL update_write_coord(wStream* s, INT32 coord) { Stream_Write_UINT16(s, coord); return TRUE; } static INLINE BOOL update_read_color(wStream* s, UINT32* color) { BYTE byte; if (Stream_GetRemainingLength(s) < 3) return FALSE; *color = 0; Stream_Read_UINT8(s, byte); *color = (UINT32)byte; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 8) & 0xFF00; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 16) & 0xFF0000; return TRUE; } static INLINE BOOL update_write_color(wStream* s, UINT32 color) { BYTE byte; byte = (color & 0xFF); Stream_Write_UINT8(s, byte); byte = ((color >> 8) & 0xFF); Stream_Write_UINT8(s, byte); byte = ((color >> 16) & 0xFF); Stream_Write_UINT8(s, byte); return TRUE; } static INLINE BOOL update_read_colorref(wStream* s, UINT32* color) { BYTE byte; if (Stream_GetRemainingLength(s) < 4) return FALSE; *color = 0; Stream_Read_UINT8(s, byte); *color = byte; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 8); Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 16); Stream_Seek_UINT8(s); return TRUE; } static INLINE BOOL update_read_color_quad(wStream* s, UINT32* color) { return update_read_colorref(s, color); } static INLINE void update_write_color_quad(wStream* s, UINT32 color) { BYTE byte; byte = (color >> 16) & 0xFF; Stream_Write_UINT8(s, byte); byte = (color >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = color & 0xFF; Stream_Write_UINT8(s, byte); } static INLINE BOOL update_read_2byte_unsigned(wStream* s, UINT32* value) { BYTE byte; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) return FALSE; *value = (byte & 0x7F) << 8; Stream_Read_UINT8(s, byte); *value |= byte; } else { *value = (byte & 0x7F); } return TRUE; } static INLINE BOOL update_write_2byte_unsigned(wStream* s, UINT32 value) { BYTE byte; if (value > 0x7FFF) return FALSE; if (value >= 0x7F) { byte = ((value & 0x7F00) >> 8); Stream_Write_UINT8(s, byte | 0x80); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else { byte = (value & 0x7F); Stream_Write_UINT8(s, byte); } return TRUE; } static INLINE BOOL update_read_2byte_signed(wStream* s, INT32* value) { BYTE byte; BOOL negative; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); negative = (byte & 0x40) ? TRUE : FALSE; *value = (byte & 0x3F); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); *value = (*value << 8) | byte; } if (negative) *value *= -1; return TRUE; } static INLINE BOOL update_write_2byte_signed(wStream* s, INT32 value) { BYTE byte; BOOL negative = FALSE; if (value < 0) { negative = TRUE; value *= -1; } if (value > 0x3FFF) return FALSE; if (value >= 0x3F) { byte = ((value & 0x3F00) >> 8); if (negative) byte |= 0x40; Stream_Write_UINT8(s, byte | 0x80); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else { byte = (value & 0x3F); if (negative) byte |= 0x40; Stream_Write_UINT8(s, byte); } return TRUE; } static INLINE BOOL update_read_4byte_unsigned(wStream* s, UINT32* value) { BYTE byte; BYTE count; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); count = (byte & 0xC0) >> 6; if (Stream_GetRemainingLength(s) < count) return FALSE; switch (count) { case 0: *value = (byte & 0x3F); break; case 1: *value = (byte & 0x3F) << 8; Stream_Read_UINT8(s, byte); *value |= byte; break; case 2: *value = (byte & 0x3F) << 16; Stream_Read_UINT8(s, byte); *value |= (byte << 8); Stream_Read_UINT8(s, byte); *value |= byte; break; case 3: *value = (byte & 0x3F) << 24; Stream_Read_UINT8(s, byte); *value |= (byte << 16); Stream_Read_UINT8(s, byte); *value |= (byte << 8); Stream_Read_UINT8(s, byte); *value |= byte; break; default: break; } return TRUE; } static INLINE BOOL update_write_4byte_unsigned(wStream* s, UINT32 value) { BYTE byte; if (value <= 0x3F) { Stream_Write_UINT8(s, value); } else if (value <= 0x3FFF) { byte = (value >> 8) & 0x3F; Stream_Write_UINT8(s, byte | 0x40); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else if (value <= 0x3FFFFF) { byte = (value >> 16) & 0x3F; Stream_Write_UINT8(s, byte | 0x80); byte = (value >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else if (value <= 0x3FFFFFFF) { byte = (value >> 24) & 0x3F; Stream_Write_UINT8(s, byte | 0xC0); byte = (value >> 16) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else return FALSE; return TRUE; } static INLINE BOOL update_read_delta(wStream* s, INT32* value) { BYTE byte; if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, byte); if (byte & 0x40) *value = (byte | ~0x3F); else *value = (byte & 0x3F); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, byte); *value = (*value << 8) | byte; } return TRUE; } #if 0 static INLINE void update_read_glyph_delta(wStream* s, UINT16* value) { BYTE byte; Stream_Read_UINT8(s, byte); if (byte == 0x80) Stream_Read_UINT16(s, *value); else *value = (byte & 0x3F); } static INLINE void update_seek_glyph_delta(wStream* s) { BYTE byte; Stream_Read_UINT8(s, byte); if (byte & 0x80) Stream_Seek_UINT8(s); } #endif static INLINE BOOL update_read_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->style); } if (fieldFlags & ORDER_FIELD_04) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->hatch); } if (brush->style & CACHED_BRUSH) { brush->index = brush->hatch; brush->bpp = BMF_BPP[brush->style & 0x07]; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 7) return FALSE; brush->data = (BYTE*)brush->p8x8; Stream_Read_UINT8(s, brush->data[7]); Stream_Read_UINT8(s, brush->data[6]); Stream_Read_UINT8(s, brush->data[5]); Stream_Read_UINT8(s, brush->data[4]); Stream_Read_UINT8(s, brush->data[3]); Stream_Read_UINT8(s, brush->data[2]); Stream_Read_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; } static INLINE BOOL update_write_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { Stream_Write_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { Stream_Write_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { Stream_Write_UINT8(s, brush->style); } if (brush->style & CACHED_BRUSH) { brush->hatch = brush->index; brush->bpp = BMF_BPP[brush->style & 0x07]; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_04) { Stream_Write_UINT8(s, brush->hatch); } if (fieldFlags & ORDER_FIELD_05) { brush->data = (BYTE*)brush->p8x8; Stream_Write_UINT8(s, brush->data[7]); Stream_Write_UINT8(s, brush->data[6]); Stream_Write_UINT8(s, brush->data[5]); Stream_Write_UINT8(s, brush->data[4]); Stream_Write_UINT8(s, brush->data[3]); Stream_Write_UINT8(s, brush->data[2]); Stream_Write_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; } static INLINE BOOL update_read_delta_rects(wStream* s, DELTA_RECT* rectangles, UINT32* nr) { UINT32 number = *nr; UINT32 i; BYTE flags = 0; BYTE* zeroBits; UINT32 zeroBitsSize; if (number > 45) { WLog_WARN(TAG, "Invalid number of delta rectangles %" PRIu32, number); return FALSE; } zeroBitsSize = ((number + 1) / 2); if (Stream_GetRemainingLength(s) < zeroBitsSize) return FALSE; Stream_GetPointer(s, zeroBits); Stream_Seek(s, zeroBitsSize); ZeroMemory(rectangles, sizeof(DELTA_RECT) * number); for (i = 0; i < number; i++) { if (i % 2 == 0) flags = zeroBits[i / 2]; if ((~flags & 0x80) && !update_read_delta(s, &rectangles[i].left)) return FALSE; if ((~flags & 0x40) && !update_read_delta(s, &rectangles[i].top)) return FALSE; if (~flags & 0x20) { if (!update_read_delta(s, &rectangles[i].width)) return FALSE; } else if (i > 0) rectangles[i].width = rectangles[i - 1].width; else rectangles[i].width = 0; if (~flags & 0x10) { if (!update_read_delta(s, &rectangles[i].height)) return FALSE; } else if (i > 0) rectangles[i].height = rectangles[i - 1].height; else rectangles[i].height = 0; if (i > 0) { rectangles[i].left += rectangles[i - 1].left; rectangles[i].top += rectangles[i - 1].top; } flags <<= 4; } return TRUE; } static INLINE BOOL update_read_delta_points(wStream* s, DELTA_POINT* points, int number, INT16 x, INT16 y) { int i; BYTE flags = 0; BYTE* zeroBits; UINT32 zeroBitsSize; zeroBitsSize = ((number + 3) / 4); if (Stream_GetRemainingLength(s) < zeroBitsSize) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < %" PRIu32 "", zeroBitsSize); return FALSE; } Stream_GetPointer(s, zeroBits); Stream_Seek(s, zeroBitsSize); ZeroMemory(points, sizeof(DELTA_POINT) * number); for (i = 0; i < number; i++) { if (i % 4 == 0) flags = zeroBits[i / 4]; if ((~flags & 0x80) && !update_read_delta(s, &points[i].x)) { WLog_ERR(TAG, "update_read_delta(x) failed"); return FALSE; } if ((~flags & 0x40) && !update_read_delta(s, &points[i].y)) { WLog_ERR(TAG, "update_read_delta(y) failed"); return FALSE; } flags <<= 2; } return TRUE; } #define ORDER_FIELD_BYTE(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 1) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT8(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_2BYTE(NO, TARGET1, TARGET2) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 2) \ { \ WLog_ERR(TAG, "error reading %s or %s", #TARGET1, #TARGET2); \ return FALSE; \ } \ Stream_Read_UINT8(s, TARGET1); \ Stream_Read_UINT8(s, TARGET2); \ } \ } while (0) #define ORDER_FIELD_UINT16(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 2) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT16(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_UINT32(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 4) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT32(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_COORD(NO, TARGET) \ do \ { \ if ((orderInfo->fieldFlags & (1 << (NO - 1))) && \ !update_read_coord(s, &TARGET, orderInfo->deltaCoordinates)) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ } while (0) static INLINE BOOL ORDER_FIELD_COLOR(const ORDER_INFO* orderInfo, wStream* s, UINT32 NO, UINT32* TARGET) { if (!TARGET || !orderInfo) return FALSE; if ((orderInfo->fieldFlags & (1 << (NO - 1))) && !update_read_color(s, TARGET)) return FALSE; return TRUE; } static INLINE BOOL FIELD_SKIP_BUFFER16(wStream* s, UINT32 TARGET_LEN) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, TARGET_LEN); if (!Stream_SafeSeek(s, TARGET_LEN)) { WLog_ERR(TAG, "error skipping %" PRIu32 " bytes", TARGET_LEN); return FALSE; } return TRUE; } /* Primary Drawing Orders */ static BOOL update_read_dstblt_order(wStream* s, const ORDER_INFO* orderInfo, DSTBLT_ORDER* dstblt) { ORDER_FIELD_COORD(1, dstblt->nLeftRect); ORDER_FIELD_COORD(2, dstblt->nTopRect); ORDER_FIELD_COORD(3, dstblt->nWidth); ORDER_FIELD_COORD(4, dstblt->nHeight); ORDER_FIELD_BYTE(5, dstblt->bRop); return TRUE; } int update_approximate_dstblt_order(ORDER_INFO* orderInfo, const DSTBLT_ORDER* dstblt) { return 32; } BOOL update_write_dstblt_order(wStream* s, ORDER_INFO* orderInfo, const DSTBLT_ORDER* dstblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_dstblt_order(orderInfo, dstblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, dstblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, dstblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, dstblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, dstblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, dstblt->bRop); return TRUE; } static BOOL update_read_patblt_order(wStream* s, const ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { ORDER_FIELD_COORD(1, patblt->nLeftRect); ORDER_FIELD_COORD(2, patblt->nTopRect); ORDER_FIELD_COORD(3, patblt->nWidth); ORDER_FIELD_COORD(4, patblt->nHeight); ORDER_FIELD_BYTE(5, patblt->bRop); ORDER_FIELD_COLOR(orderInfo, s, 6, &patblt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 7, &patblt->foreColor); return update_read_brush(s, &patblt->brush, orderInfo->fieldFlags >> 7); } int update_approximate_patblt_order(ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { return 32; } BOOL update_write_patblt_order(wStream* s, ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_patblt_order(orderInfo, patblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, patblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, patblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, patblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, patblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, patblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, patblt->backColor); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_color(s, patblt->foreColor); orderInfo->fieldFlags |= ORDER_FIELD_08; orderInfo->fieldFlags |= ORDER_FIELD_09; orderInfo->fieldFlags |= ORDER_FIELD_10; orderInfo->fieldFlags |= ORDER_FIELD_11; orderInfo->fieldFlags |= ORDER_FIELD_12; update_write_brush(s, &patblt->brush, orderInfo->fieldFlags >> 7); return TRUE; } static BOOL update_read_scrblt_order(wStream* s, const ORDER_INFO* orderInfo, SCRBLT_ORDER* scrblt) { ORDER_FIELD_COORD(1, scrblt->nLeftRect); ORDER_FIELD_COORD(2, scrblt->nTopRect); ORDER_FIELD_COORD(3, scrblt->nWidth); ORDER_FIELD_COORD(4, scrblt->nHeight); ORDER_FIELD_BYTE(5, scrblt->bRop); ORDER_FIELD_COORD(6, scrblt->nXSrc); ORDER_FIELD_COORD(7, scrblt->nYSrc); return TRUE; } int update_approximate_scrblt_order(ORDER_INFO* orderInfo, const SCRBLT_ORDER* scrblt) { return 32; } BOOL update_write_scrblt_order(wStream* s, ORDER_INFO* orderInfo, const SCRBLT_ORDER* scrblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_scrblt_order(orderInfo, scrblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, scrblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, scrblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, scrblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, scrblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, scrblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_coord(s, scrblt->nXSrc); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_coord(s, scrblt->nYSrc); return TRUE; } static BOOL update_read_opaque_rect_order(wStream* s, const ORDER_INFO* orderInfo, OPAQUE_RECT_ORDER* opaque_rect) { BYTE byte; ORDER_FIELD_COORD(1, opaque_rect->nLeftRect); ORDER_FIELD_COORD(2, opaque_rect->nTopRect); ORDER_FIELD_COORD(3, opaque_rect->nWidth); ORDER_FIELD_COORD(4, opaque_rect->nHeight); if (orderInfo->fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x00FFFF00) | ((UINT32)byte); } if (orderInfo->fieldFlags & ORDER_FIELD_06) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x00FF00FF) | ((UINT32)byte << 8); } if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x0000FFFF) | ((UINT32)byte << 16); } return TRUE; } int update_approximate_opaque_rect_order(ORDER_INFO* orderInfo, const OPAQUE_RECT_ORDER* opaque_rect) { return 32; } BOOL update_write_opaque_rect_order(wStream* s, ORDER_INFO* orderInfo, const OPAQUE_RECT_ORDER* opaque_rect) { BYTE byte; int inf = update_approximate_opaque_rect_order(orderInfo, opaque_rect); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; // TODO: Color format conversion orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, opaque_rect->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, opaque_rect->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, opaque_rect->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, opaque_rect->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; byte = opaque_rect->color & 0x000000FF; Stream_Write_UINT8(s, byte); orderInfo->fieldFlags |= ORDER_FIELD_06; byte = (opaque_rect->color & 0x0000FF00) >> 8; Stream_Write_UINT8(s, byte); orderInfo->fieldFlags |= ORDER_FIELD_07; byte = (opaque_rect->color & 0x00FF0000) >> 16; Stream_Write_UINT8(s, byte); return TRUE; } static BOOL update_read_draw_nine_grid_order(wStream* s, const ORDER_INFO* orderInfo, DRAW_NINE_GRID_ORDER* draw_nine_grid) { ORDER_FIELD_COORD(1, draw_nine_grid->srcLeft); ORDER_FIELD_COORD(2, draw_nine_grid->srcTop); ORDER_FIELD_COORD(3, draw_nine_grid->srcRight); ORDER_FIELD_COORD(4, draw_nine_grid->srcBottom); ORDER_FIELD_UINT16(5, draw_nine_grid->bitmapId); return TRUE; } static BOOL update_read_multi_dstblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_DSTBLT_ORDER* multi_dstblt) { ORDER_FIELD_COORD(1, multi_dstblt->nLeftRect); ORDER_FIELD_COORD(2, multi_dstblt->nTopRect); ORDER_FIELD_COORD(3, multi_dstblt->nWidth); ORDER_FIELD_COORD(4, multi_dstblt->nHeight); ORDER_FIELD_BYTE(5, multi_dstblt->bRop); ORDER_FIELD_BYTE(6, multi_dstblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_dstblt->cbData); return update_read_delta_rects(s, multi_dstblt->rectangles, &multi_dstblt->numRectangles); } return TRUE; } static BOOL update_read_multi_patblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_PATBLT_ORDER* multi_patblt) { ORDER_FIELD_COORD(1, multi_patblt->nLeftRect); ORDER_FIELD_COORD(2, multi_patblt->nTopRect); ORDER_FIELD_COORD(3, multi_patblt->nWidth); ORDER_FIELD_COORD(4, multi_patblt->nHeight); ORDER_FIELD_BYTE(5, multi_patblt->bRop); ORDER_FIELD_COLOR(orderInfo, s, 6, &multi_patblt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 7, &multi_patblt->foreColor); if (!update_read_brush(s, &multi_patblt->brush, orderInfo->fieldFlags >> 7)) return FALSE; ORDER_FIELD_BYTE(13, multi_patblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_14) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_patblt->cbData); if (!update_read_delta_rects(s, multi_patblt->rectangles, &multi_patblt->numRectangles)) return FALSE; } return TRUE; } static BOOL update_read_multi_scrblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_SCRBLT_ORDER* multi_scrblt) { ORDER_FIELD_COORD(1, multi_scrblt->nLeftRect); ORDER_FIELD_COORD(2, multi_scrblt->nTopRect); ORDER_FIELD_COORD(3, multi_scrblt->nWidth); ORDER_FIELD_COORD(4, multi_scrblt->nHeight); ORDER_FIELD_BYTE(5, multi_scrblt->bRop); ORDER_FIELD_COORD(6, multi_scrblt->nXSrc); ORDER_FIELD_COORD(7, multi_scrblt->nYSrc); ORDER_FIELD_BYTE(8, multi_scrblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_09) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_scrblt->cbData); return update_read_delta_rects(s, multi_scrblt->rectangles, &multi_scrblt->numRectangles); } return TRUE; } static BOOL update_read_multi_opaque_rect_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_OPAQUE_RECT_ORDER* multi_opaque_rect) { BYTE byte; ORDER_FIELD_COORD(1, multi_opaque_rect->nLeftRect); ORDER_FIELD_COORD(2, multi_opaque_rect->nTopRect); ORDER_FIELD_COORD(3, multi_opaque_rect->nWidth); ORDER_FIELD_COORD(4, multi_opaque_rect->nHeight); if (orderInfo->fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x00FFFF00) | ((UINT32)byte); } if (orderInfo->fieldFlags & ORDER_FIELD_06) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x00FF00FF) | ((UINT32)byte << 8); } if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x0000FFFF) | ((UINT32)byte << 16); } ORDER_FIELD_BYTE(8, multi_opaque_rect->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_09) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_opaque_rect->cbData); return update_read_delta_rects(s, multi_opaque_rect->rectangles, &multi_opaque_rect->numRectangles); } return TRUE; } static BOOL update_read_multi_draw_nine_grid_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_DRAW_NINE_GRID_ORDER* multi_draw_nine_grid) { ORDER_FIELD_COORD(1, multi_draw_nine_grid->srcLeft); ORDER_FIELD_COORD(2, multi_draw_nine_grid->srcTop); ORDER_FIELD_COORD(3, multi_draw_nine_grid->srcRight); ORDER_FIELD_COORD(4, multi_draw_nine_grid->srcBottom); ORDER_FIELD_UINT16(5, multi_draw_nine_grid->bitmapId); ORDER_FIELD_BYTE(6, multi_draw_nine_grid->nDeltaEntries); if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_draw_nine_grid->cbData); return update_read_delta_rects(s, multi_draw_nine_grid->rectangles, &multi_draw_nine_grid->nDeltaEntries); } return TRUE; } static BOOL update_read_line_to_order(wStream* s, const ORDER_INFO* orderInfo, LINE_TO_ORDER* line_to) { ORDER_FIELD_UINT16(1, line_to->backMode); ORDER_FIELD_COORD(2, line_to->nXStart); ORDER_FIELD_COORD(3, line_to->nYStart); ORDER_FIELD_COORD(4, line_to->nXEnd); ORDER_FIELD_COORD(5, line_to->nYEnd); ORDER_FIELD_COLOR(orderInfo, s, 6, &line_to->backColor); ORDER_FIELD_BYTE(7, line_to->bRop2); ORDER_FIELD_BYTE(8, line_to->penStyle); ORDER_FIELD_BYTE(9, line_to->penWidth); ORDER_FIELD_COLOR(orderInfo, s, 10, &line_to->penColor); return TRUE; } int update_approximate_line_to_order(ORDER_INFO* orderInfo, const LINE_TO_ORDER* line_to) { return 32; } BOOL update_write_line_to_order(wStream* s, ORDER_INFO* orderInfo, const LINE_TO_ORDER* line_to) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_line_to_order(orderInfo, line_to))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT16(s, line_to->backMode); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, line_to->nXStart); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, line_to->nYStart); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, line_to->nXEnd); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_coord(s, line_to->nYEnd); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, line_to->backColor); orderInfo->fieldFlags |= ORDER_FIELD_07; Stream_Write_UINT8(s, line_to->bRop2); orderInfo->fieldFlags |= ORDER_FIELD_08; Stream_Write_UINT8(s, line_to->penStyle); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT8(s, line_to->penWidth); orderInfo->fieldFlags |= ORDER_FIELD_10; update_write_color(s, line_to->penColor); return TRUE; } static BOOL update_read_polyline_order(wStream* s, const ORDER_INFO* orderInfo, POLYLINE_ORDER* polyline) { UINT16 word; UINT32 new_num = polyline->numDeltaEntries; ORDER_FIELD_COORD(1, polyline->xStart); ORDER_FIELD_COORD(2, polyline->yStart); ORDER_FIELD_BYTE(3, polyline->bRop2); ORDER_FIELD_UINT16(4, word); ORDER_FIELD_COLOR(orderInfo, s, 5, &polyline->penColor); ORDER_FIELD_BYTE(6, new_num); if (orderInfo->fieldFlags & ORDER_FIELD_07) { DELTA_POINT* new_points; if (new_num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, polyline->cbData); new_points = (DELTA_POINT*)realloc(polyline->points, sizeof(DELTA_POINT) * new_num); if (!new_points) { WLog_ERR(TAG, "realloc(%" PRIu32 ") failed", new_num); return FALSE; } polyline->points = new_points; polyline->numDeltaEntries = new_num; return update_read_delta_points(s, polyline->points, polyline->numDeltaEntries, polyline->xStart, polyline->yStart); } return TRUE; } static BOOL update_read_memblt_order(wStream* s, const ORDER_INFO* orderInfo, MEMBLT_ORDER* memblt) { if (!s || !orderInfo || !memblt) return FALSE; ORDER_FIELD_UINT16(1, memblt->cacheId); ORDER_FIELD_COORD(2, memblt->nLeftRect); ORDER_FIELD_COORD(3, memblt->nTopRect); ORDER_FIELD_COORD(4, memblt->nWidth); ORDER_FIELD_COORD(5, memblt->nHeight); ORDER_FIELD_BYTE(6, memblt->bRop); ORDER_FIELD_COORD(7, memblt->nXSrc); ORDER_FIELD_COORD(8, memblt->nYSrc); ORDER_FIELD_UINT16(9, memblt->cacheIndex); memblt->colorIndex = (memblt->cacheId >> 8); memblt->cacheId = (memblt->cacheId & 0xFF); memblt->bitmap = NULL; return TRUE; } int update_approximate_memblt_order(ORDER_INFO* orderInfo, const MEMBLT_ORDER* memblt) { return 64; } BOOL update_write_memblt_order(wStream* s, ORDER_INFO* orderInfo, const MEMBLT_ORDER* memblt) { UINT16 cacheId; if (!Stream_EnsureRemainingCapacity(s, update_approximate_memblt_order(orderInfo, memblt))) return FALSE; cacheId = (memblt->cacheId & 0xFF) | ((memblt->colorIndex & 0xFF) << 8); orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT16(s, cacheId); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, memblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, memblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, memblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_coord(s, memblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_06; Stream_Write_UINT8(s, memblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_coord(s, memblt->nXSrc); orderInfo->fieldFlags |= ORDER_FIELD_08; update_write_coord(s, memblt->nYSrc); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT16(s, memblt->cacheIndex); return TRUE; } static BOOL update_read_mem3blt_order(wStream* s, const ORDER_INFO* orderInfo, MEM3BLT_ORDER* mem3blt) { ORDER_FIELD_UINT16(1, mem3blt->cacheId); ORDER_FIELD_COORD(2, mem3blt->nLeftRect); ORDER_FIELD_COORD(3, mem3blt->nTopRect); ORDER_FIELD_COORD(4, mem3blt->nWidth); ORDER_FIELD_COORD(5, mem3blt->nHeight); ORDER_FIELD_BYTE(6, mem3blt->bRop); ORDER_FIELD_COORD(7, mem3blt->nXSrc); ORDER_FIELD_COORD(8, mem3blt->nYSrc); ORDER_FIELD_COLOR(orderInfo, s, 9, &mem3blt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 10, &mem3blt->foreColor); if (!update_read_brush(s, &mem3blt->brush, orderInfo->fieldFlags >> 10)) return FALSE; ORDER_FIELD_UINT16(16, mem3blt->cacheIndex); mem3blt->colorIndex = (mem3blt->cacheId >> 8); mem3blt->cacheId = (mem3blt->cacheId & 0xFF); mem3blt->bitmap = NULL; return TRUE; } static BOOL update_read_save_bitmap_order(wStream* s, const ORDER_INFO* orderInfo, SAVE_BITMAP_ORDER* save_bitmap) { ORDER_FIELD_UINT32(1, save_bitmap->savedBitmapPosition); ORDER_FIELD_COORD(2, save_bitmap->nLeftRect); ORDER_FIELD_COORD(3, save_bitmap->nTopRect); ORDER_FIELD_COORD(4, save_bitmap->nRightRect); ORDER_FIELD_COORD(5, save_bitmap->nBottomRect); ORDER_FIELD_BYTE(6, save_bitmap->operation); return TRUE; } static BOOL update_read_glyph_index_order(wStream* s, const ORDER_INFO* orderInfo, GLYPH_INDEX_ORDER* glyph_index) { ORDER_FIELD_BYTE(1, glyph_index->cacheId); ORDER_FIELD_BYTE(2, glyph_index->flAccel); ORDER_FIELD_BYTE(3, glyph_index->ulCharInc); ORDER_FIELD_BYTE(4, glyph_index->fOpRedundant); ORDER_FIELD_COLOR(orderInfo, s, 5, &glyph_index->backColor); ORDER_FIELD_COLOR(orderInfo, s, 6, &glyph_index->foreColor); ORDER_FIELD_UINT16(7, glyph_index->bkLeft); ORDER_FIELD_UINT16(8, glyph_index->bkTop); ORDER_FIELD_UINT16(9, glyph_index->bkRight); ORDER_FIELD_UINT16(10, glyph_index->bkBottom); ORDER_FIELD_UINT16(11, glyph_index->opLeft); ORDER_FIELD_UINT16(12, glyph_index->opTop); ORDER_FIELD_UINT16(13, glyph_index->opRight); ORDER_FIELD_UINT16(14, glyph_index->opBottom); if (!update_read_brush(s, &glyph_index->brush, orderInfo->fieldFlags >> 14)) return FALSE; ORDER_FIELD_UINT16(20, glyph_index->x); ORDER_FIELD_UINT16(21, glyph_index->y); if (orderInfo->fieldFlags & ORDER_FIELD_22) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, glyph_index->cbData); if (Stream_GetRemainingLength(s) < glyph_index->cbData) return FALSE; CopyMemory(glyph_index->data, Stream_Pointer(s), glyph_index->cbData); Stream_Seek(s, glyph_index->cbData); } return TRUE; } int update_approximate_glyph_index_order(ORDER_INFO* orderInfo, const GLYPH_INDEX_ORDER* glyph_index) { return 64; } BOOL update_write_glyph_index_order(wStream* s, ORDER_INFO* orderInfo, GLYPH_INDEX_ORDER* glyph_index) { int inf = update_approximate_glyph_index_order(orderInfo, glyph_index); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT8(s, glyph_index->cacheId); orderInfo->fieldFlags |= ORDER_FIELD_02; Stream_Write_UINT8(s, glyph_index->flAccel); orderInfo->fieldFlags |= ORDER_FIELD_03; Stream_Write_UINT8(s, glyph_index->ulCharInc); orderInfo->fieldFlags |= ORDER_FIELD_04; Stream_Write_UINT8(s, glyph_index->fOpRedundant); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_color(s, glyph_index->backColor); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, glyph_index->foreColor); orderInfo->fieldFlags |= ORDER_FIELD_07; Stream_Write_UINT16(s, glyph_index->bkLeft); orderInfo->fieldFlags |= ORDER_FIELD_08; Stream_Write_UINT16(s, glyph_index->bkTop); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT16(s, glyph_index->bkRight); orderInfo->fieldFlags |= ORDER_FIELD_10; Stream_Write_UINT16(s, glyph_index->bkBottom); orderInfo->fieldFlags |= ORDER_FIELD_11; Stream_Write_UINT16(s, glyph_index->opLeft); orderInfo->fieldFlags |= ORDER_FIELD_12; Stream_Write_UINT16(s, glyph_index->opTop); orderInfo->fieldFlags |= ORDER_FIELD_13; Stream_Write_UINT16(s, glyph_index->opRight); orderInfo->fieldFlags |= ORDER_FIELD_14; Stream_Write_UINT16(s, glyph_index->opBottom); orderInfo->fieldFlags |= ORDER_FIELD_15; orderInfo->fieldFlags |= ORDER_FIELD_16; orderInfo->fieldFlags |= ORDER_FIELD_17; orderInfo->fieldFlags |= ORDER_FIELD_18; orderInfo->fieldFlags |= ORDER_FIELD_19; update_write_brush(s, &glyph_index->brush, orderInfo->fieldFlags >> 14); orderInfo->fieldFlags |= ORDER_FIELD_20; Stream_Write_UINT16(s, glyph_index->x); orderInfo->fieldFlags |= ORDER_FIELD_21; Stream_Write_UINT16(s, glyph_index->y); orderInfo->fieldFlags |= ORDER_FIELD_22; Stream_Write_UINT8(s, glyph_index->cbData); Stream_Write(s, glyph_index->data, glyph_index->cbData); return TRUE; } static BOOL update_read_fast_index_order(wStream* s, const ORDER_INFO* orderInfo, FAST_INDEX_ORDER* fast_index) { ORDER_FIELD_BYTE(1, fast_index->cacheId); ORDER_FIELD_2BYTE(2, fast_index->ulCharInc, fast_index->flAccel); ORDER_FIELD_COLOR(orderInfo, s, 3, &fast_index->backColor); ORDER_FIELD_COLOR(orderInfo, s, 4, &fast_index->foreColor); ORDER_FIELD_COORD(5, fast_index->bkLeft); ORDER_FIELD_COORD(6, fast_index->bkTop); ORDER_FIELD_COORD(7, fast_index->bkRight); ORDER_FIELD_COORD(8, fast_index->bkBottom); ORDER_FIELD_COORD(9, fast_index->opLeft); ORDER_FIELD_COORD(10, fast_index->opTop); ORDER_FIELD_COORD(11, fast_index->opRight); ORDER_FIELD_COORD(12, fast_index->opBottom); ORDER_FIELD_COORD(13, fast_index->x); ORDER_FIELD_COORD(14, fast_index->y); if (orderInfo->fieldFlags & ORDER_FIELD_15) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, fast_index->cbData); if (Stream_GetRemainingLength(s) < fast_index->cbData) return FALSE; CopyMemory(fast_index->data, Stream_Pointer(s), fast_index->cbData); Stream_Seek(s, fast_index->cbData); } return TRUE; } static BOOL update_read_fast_glyph_order(wStream* s, const ORDER_INFO* orderInfo, FAST_GLYPH_ORDER* fastGlyph) { GLYPH_DATA_V2* glyph = &fastGlyph->glyphData; ORDER_FIELD_BYTE(1, fastGlyph->cacheId); ORDER_FIELD_2BYTE(2, fastGlyph->ulCharInc, fastGlyph->flAccel); ORDER_FIELD_COLOR(orderInfo, s, 3, &fastGlyph->backColor); ORDER_FIELD_COLOR(orderInfo, s, 4, &fastGlyph->foreColor); ORDER_FIELD_COORD(5, fastGlyph->bkLeft); ORDER_FIELD_COORD(6, fastGlyph->bkTop); ORDER_FIELD_COORD(7, fastGlyph->bkRight); ORDER_FIELD_COORD(8, fastGlyph->bkBottom); ORDER_FIELD_COORD(9, fastGlyph->opLeft); ORDER_FIELD_COORD(10, fastGlyph->opTop); ORDER_FIELD_COORD(11, fastGlyph->opRight); ORDER_FIELD_COORD(12, fastGlyph->opBottom); ORDER_FIELD_COORD(13, fastGlyph->x); ORDER_FIELD_COORD(14, fastGlyph->y); if (orderInfo->fieldFlags & ORDER_FIELD_15) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, fastGlyph->cbData); if (Stream_GetRemainingLength(s) < fastGlyph->cbData) return FALSE; CopyMemory(fastGlyph->data, Stream_Pointer(s), fastGlyph->cbData); if (Stream_GetRemainingLength(s) < fastGlyph->cbData) return FALSE; if (!Stream_SafeSeek(s, 1)) return FALSE; if (fastGlyph->cbData > 1) { UINT32 new_cb; /* parse optional glyph data */ glyph->cacheIndex = fastGlyph->data[0]; if (!update_read_2byte_signed(s, &glyph->x) || !update_read_2byte_signed(s, &glyph->y) || !update_read_2byte_unsigned(s, &glyph->cx) || !update_read_2byte_unsigned(s, &glyph->cy)) return FALSE; glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; new_cb = ((glyph->cx + 7) / 8) * glyph->cy; new_cb += ((new_cb % 4) > 0) ? 4 - (new_cb % 4) : 0; if (fastGlyph->cbData < new_cb) return FALSE; if (new_cb > 0) { BYTE* new_aj; new_aj = (BYTE*)realloc(glyph->aj, new_cb); if (!new_aj) return FALSE; glyph->aj = new_aj; glyph->cb = new_cb; Stream_Read(s, glyph->aj, glyph->cb); } Stream_Seek(s, fastGlyph->cbData - new_cb); } } return TRUE; } static BOOL update_read_polygon_sc_order(wStream* s, const ORDER_INFO* orderInfo, POLYGON_SC_ORDER* polygon_sc) { UINT32 num = polygon_sc->numPoints; ORDER_FIELD_COORD(1, polygon_sc->xStart); ORDER_FIELD_COORD(2, polygon_sc->yStart); ORDER_FIELD_BYTE(3, polygon_sc->bRop2); ORDER_FIELD_BYTE(4, polygon_sc->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 5, &polygon_sc->brushColor); ORDER_FIELD_BYTE(6, num); if (orderInfo->fieldFlags & ORDER_FIELD_07) { DELTA_POINT* newpoints; if (num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, polygon_sc->cbData); newpoints = (DELTA_POINT*)realloc(polygon_sc->points, sizeof(DELTA_POINT) * num); if (!newpoints) return FALSE; polygon_sc->points = newpoints; polygon_sc->numPoints = num; return update_read_delta_points(s, polygon_sc->points, polygon_sc->numPoints, polygon_sc->xStart, polygon_sc->yStart); } return TRUE; } static BOOL update_read_polygon_cb_order(wStream* s, const ORDER_INFO* orderInfo, POLYGON_CB_ORDER* polygon_cb) { UINT32 num = polygon_cb->numPoints; ORDER_FIELD_COORD(1, polygon_cb->xStart); ORDER_FIELD_COORD(2, polygon_cb->yStart); ORDER_FIELD_BYTE(3, polygon_cb->bRop2); ORDER_FIELD_BYTE(4, polygon_cb->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 5, &polygon_cb->backColor); ORDER_FIELD_COLOR(orderInfo, s, 6, &polygon_cb->foreColor); if (!update_read_brush(s, &polygon_cb->brush, orderInfo->fieldFlags >> 6)) return FALSE; ORDER_FIELD_BYTE(12, num); if (orderInfo->fieldFlags & ORDER_FIELD_13) { DELTA_POINT* newpoints; if (num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, polygon_cb->cbData); newpoints = (DELTA_POINT*)realloc(polygon_cb->points, sizeof(DELTA_POINT) * num); if (!newpoints) return FALSE; polygon_cb->points = newpoints; polygon_cb->numPoints = num; if (!update_read_delta_points(s, polygon_cb->points, polygon_cb->numPoints, polygon_cb->xStart, polygon_cb->yStart)) return FALSE; } polygon_cb->backMode = (polygon_cb->bRop2 & 0x80) ? BACKMODE_TRANSPARENT : BACKMODE_OPAQUE; polygon_cb->bRop2 = (polygon_cb->bRop2 & 0x1F); return TRUE; } static BOOL update_read_ellipse_sc_order(wStream* s, const ORDER_INFO* orderInfo, ELLIPSE_SC_ORDER* ellipse_sc) { ORDER_FIELD_COORD(1, ellipse_sc->leftRect); ORDER_FIELD_COORD(2, ellipse_sc->topRect); ORDER_FIELD_COORD(3, ellipse_sc->rightRect); ORDER_FIELD_COORD(4, ellipse_sc->bottomRect); ORDER_FIELD_BYTE(5, ellipse_sc->bRop2); ORDER_FIELD_BYTE(6, ellipse_sc->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 7, &ellipse_sc->color); return TRUE; } static BOOL update_read_ellipse_cb_order(wStream* s, const ORDER_INFO* orderInfo, ELLIPSE_CB_ORDER* ellipse_cb) { ORDER_FIELD_COORD(1, ellipse_cb->leftRect); ORDER_FIELD_COORD(2, ellipse_cb->topRect); ORDER_FIELD_COORD(3, ellipse_cb->rightRect); ORDER_FIELD_COORD(4, ellipse_cb->bottomRect); ORDER_FIELD_BYTE(5, ellipse_cb->bRop2); ORDER_FIELD_BYTE(6, ellipse_cb->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 7, &ellipse_cb->backColor); ORDER_FIELD_COLOR(orderInfo, s, 8, &ellipse_cb->foreColor); return update_read_brush(s, &ellipse_cb->brush, orderInfo->fieldFlags >> 8); } /* Secondary Drawing Orders */ static CACHE_BITMAP_ORDER* update_read_cache_bitmap_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { CACHE_BITMAP_ORDER* cache_bitmap; if (!update || !s) return NULL; cache_bitmap = calloc(1, sizeof(CACHE_BITMAP_ORDER)); if (!cache_bitmap) goto fail; if (Stream_GetRemainingLength(s) < 9) goto fail; Stream_Read_UINT8(s, cache_bitmap->cacheId); /* cacheId (1 byte) */ Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapWidth); /* bitmapWidth (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapHeight); /* bitmapHeight (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ if ((cache_bitmap->bitmapBpp < 1) || (cache_bitmap->bitmapBpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bitmap bpp %" PRIu32 "", cache_bitmap->bitmapBpp); goto fail; } Stream_Read_UINT16(s, cache_bitmap->bitmapLength); /* bitmapLength (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap->cacheIndex); /* cacheIndex (2 bytes) */ if (compressed) { if ((flags & NO_BITMAP_COMPRESSION_HDR) == 0) { BYTE* bitmapComprHdr = (BYTE*)&(cache_bitmap->bitmapComprHdr); if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read(s, bitmapComprHdr, 8); /* bitmapComprHdr (8 bytes) */ cache_bitmap->bitmapLength -= 8; } } if (cache_bitmap->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap->bitmapLength) goto fail; cache_bitmap->bitmapDataStream = malloc(cache_bitmap->bitmapLength); if (!cache_bitmap->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap->bitmapDataStream, cache_bitmap->bitmapLength); cache_bitmap->compressed = compressed; return cache_bitmap; fail: free_cache_bitmap_order(update->context, cache_bitmap); return NULL; } int update_approximate_cache_bitmap_order(const CACHE_BITMAP_ORDER* cache_bitmap, BOOL compressed, UINT16* flags) { return 64 + cache_bitmap->bitmapLength; } BOOL update_write_cache_bitmap_order(wStream* s, const CACHE_BITMAP_ORDER* cache_bitmap, BOOL compressed, UINT16* flags) { UINT32 bitmapLength = cache_bitmap->bitmapLength; int inf = update_approximate_cache_bitmap_order(cache_bitmap, compressed, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; *flags = NO_BITMAP_COMPRESSION_HDR; if ((*flags & NO_BITMAP_COMPRESSION_HDR) == 0) bitmapLength += 8; Stream_Write_UINT8(s, cache_bitmap->cacheId); /* cacheId (1 byte) */ Stream_Write_UINT8(s, 0); /* pad1Octet (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapWidth); /* bitmapWidth (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapHeight); /* bitmapHeight (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ Stream_Write_UINT16(s, bitmapLength); /* bitmapLength (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap->cacheIndex); /* cacheIndex (2 bytes) */ if (compressed) { if ((*flags & NO_BITMAP_COMPRESSION_HDR) == 0) { BYTE* bitmapComprHdr = (BYTE*)&(cache_bitmap->bitmapComprHdr); Stream_Write(s, bitmapComprHdr, 8); /* bitmapComprHdr (8 bytes) */ bitmapLength -= 8; } Stream_Write(s, cache_bitmap->bitmapDataStream, bitmapLength); } else { Stream_Write(s, cache_bitmap->bitmapDataStream, bitmapLength); } return TRUE; } static CACHE_BITMAP_V2_ORDER* update_read_cache_bitmap_v2_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { BYTE bitsPerPixelId; CACHE_BITMAP_V2_ORDER* cache_bitmap_v2; if (!update || !s) return NULL; cache_bitmap_v2 = calloc(1, sizeof(CACHE_BITMAP_V2_ORDER)); if (!cache_bitmap_v2) goto fail; cache_bitmap_v2->cacheId = flags & 0x0003; cache_bitmap_v2->flags = (flags & 0xFF80) >> 7; bitsPerPixelId = (flags & 0x0078) >> 3; cache_bitmap_v2->bitmapBpp = CBR2_BPP[bitsPerPixelId]; if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ goto fail; cache_bitmap_v2->bitmapHeight = cache_bitmap_v2->bitmapWidth; } else { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ goto fail; } if (!update_read_4byte_unsigned(s, &cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->cacheIndex)) /* cacheIndex */ goto fail; if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } } if (cache_bitmap_v2->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap_v2->bitmapLength) goto fail; if (cache_bitmap_v2->bitmapLength == 0) goto fail; cache_bitmap_v2->bitmapDataStream = malloc(cache_bitmap_v2->bitmapLength); if (!cache_bitmap_v2->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); cache_bitmap_v2->compressed = compressed; return cache_bitmap_v2; fail: free_cache_bitmap_v2_order(update->context, cache_bitmap_v2); return NULL; } int update_approximate_cache_bitmap_v2_order(CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { return 64 + cache_bitmap_v2->bitmapLength; } BOOL update_write_cache_bitmap_v2_order(wStream* s, CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { BYTE bitsPerPixelId; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v2_order(cache_bitmap_v2, compressed, flags))) return FALSE; bitsPerPixelId = BPP_CBR2[cache_bitmap_v2->bitmapBpp]; *flags = (cache_bitmap_v2->cacheId & 0x0003) | (bitsPerPixelId << 3) | ((cache_bitmap_v2->flags << 7) & 0xFF80); if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { Stream_Write_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ return FALSE; } else { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ return FALSE; } if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (!update_write_4byte_unsigned(s, cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_write_2byte_unsigned(s, cache_bitmap_v2->cacheIndex)) /* cacheIndex */ return FALSE; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { Stream_Write_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } else { if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } cache_bitmap_v2->compressed = compressed; return TRUE; } static CACHE_BITMAP_V3_ORDER* update_read_cache_bitmap_v3_order(rdpUpdate* update, wStream* s, UINT16 flags) { BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; UINT32 new_len; BYTE* new_data; CACHE_BITMAP_V3_ORDER* cache_bitmap_v3; if (!update || !s) return NULL; cache_bitmap_v3 = calloc(1, sizeof(CACHE_BITMAP_V3_ORDER)); if (!cache_bitmap_v3) goto fail; cache_bitmap_v3->cacheId = flags & 0x00000003; cache_bitmap_v3->flags = (flags & 0x0000FF80) >> 7; bitsPerPixelId = (flags & 0x00000078) >> 3; cache_bitmap_v3->bpp = CBR23_BPP[bitsPerPixelId]; if (Stream_GetRemainingLength(s) < 21) goto fail; Stream_Read_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ bitmapData = &cache_bitmap_v3->bitmapData; Stream_Read_UINT8(s, bitmapData->bpp); if ((bitmapData->bpp < 1) || (bitmapData->bpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bpp value %" PRIu32 "", bitmapData->bpp); goto fail; } Stream_Seek_UINT8(s); /* reserved1 (1 byte) */ Stream_Seek_UINT8(s); /* reserved2 (1 byte) */ Stream_Read_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Read_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Read_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Read_UINT32(s, new_len); /* length (4 bytes) */ if ((new_len == 0) || (Stream_GetRemainingLength(s) < new_len)) goto fail; new_data = (BYTE*)realloc(bitmapData->data, new_len); if (!new_data) goto fail; bitmapData->data = new_data; bitmapData->length = new_len; Stream_Read(s, bitmapData->data, bitmapData->length); return cache_bitmap_v3; fail: free_cache_bitmap_v3_order(update->context, cache_bitmap_v3); return NULL; } int update_approximate_cache_bitmap_v3_order(CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BITMAP_DATA_EX* bitmapData = &cache_bitmap_v3->bitmapData; return 64 + bitmapData->length; } BOOL update_write_cache_bitmap_v3_order(wStream* s, CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v3_order(cache_bitmap_v3, flags))) return FALSE; bitmapData = &cache_bitmap_v3->bitmapData; bitsPerPixelId = BPP_CBR23[cache_bitmap_v3->bpp]; *flags = (cache_bitmap_v3->cacheId & 0x00000003) | ((cache_bitmap_v3->flags << 7) & 0x0000FF80) | ((bitsPerPixelId << 3) & 0x00000078); Stream_Write_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ Stream_Write_UINT8(s, bitmapData->bpp); Stream_Write_UINT8(s, 0); /* reserved1 (1 byte) */ Stream_Write_UINT8(s, 0); /* reserved2 (1 byte) */ Stream_Write_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Write_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Write_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Write_UINT32(s, bitmapData->length); /* length (4 bytes) */ Stream_Write(s, bitmapData->data, bitmapData->length); return TRUE; } static CACHE_COLOR_TABLE_ORDER* update_read_cache_color_table_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; UINT32* colorTable; CACHE_COLOR_TABLE_ORDER* cache_color_table = calloc(1, sizeof(CACHE_COLOR_TABLE_ORDER)); if (!cache_color_table) goto fail; if (Stream_GetRemainingLength(s) < 3) goto fail; Stream_Read_UINT8(s, cache_color_table->cacheIndex); /* cacheIndex (1 byte) */ Stream_Read_UINT16(s, cache_color_table->numberColors); /* numberColors (2 bytes) */ if (cache_color_table->numberColors != 256) { /* This field MUST be set to 256 */ goto fail; } if (Stream_GetRemainingLength(s) < cache_color_table->numberColors * 4) goto fail; colorTable = (UINT32*)&cache_color_table->colorTable; for (i = 0; i < (int)cache_color_table->numberColors; i++) update_read_color_quad(s, &colorTable[i]); return cache_color_table; fail: free_cache_color_table_order(update->context, cache_color_table); return NULL; } int update_approximate_cache_color_table_order(const CACHE_COLOR_TABLE_ORDER* cache_color_table, UINT16* flags) { return 16 + (256 * 4); } BOOL update_write_cache_color_table_order(wStream* s, const CACHE_COLOR_TABLE_ORDER* cache_color_table, UINT16* flags) { int i, inf; UINT32* colorTable; if (cache_color_table->numberColors != 256) return FALSE; inf = update_approximate_cache_color_table_order(cache_color_table, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT8(s, cache_color_table->cacheIndex); /* cacheIndex (1 byte) */ Stream_Write_UINT16(s, cache_color_table->numberColors); /* numberColors (2 bytes) */ colorTable = (UINT32*)&cache_color_table->colorTable; for (i = 0; i < (int)cache_color_table->numberColors; i++) { update_write_color_quad(s, colorTable[i]); } return TRUE; } static CACHE_GLYPH_ORDER* update_read_cache_glyph_order(rdpUpdate* update, wStream* s, UINT16 flags) { UINT32 i; CACHE_GLYPH_ORDER* cache_glyph_order = calloc(1, sizeof(CACHE_GLYPH_ORDER)); if (!cache_glyph_order || !update || !s) goto fail; if (Stream_GetRemainingLength(s) < 2) goto fail; Stream_Read_UINT8(s, cache_glyph_order->cacheId); /* cacheId (1 byte) */ Stream_Read_UINT8(s, cache_glyph_order->cGlyphs); /* cGlyphs (1 byte) */ for (i = 0; i < cache_glyph_order->cGlyphs; i++) { GLYPH_DATA* glyph = &cache_glyph_order->glyphData[i]; if (Stream_GetRemainingLength(s) < 10) goto fail; Stream_Read_UINT16(s, glyph->cacheIndex); Stream_Read_INT16(s, glyph->x); Stream_Read_INT16(s, glyph->y); Stream_Read_UINT16(s, glyph->cx); Stream_Read_UINT16(s, glyph->cy); glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; if (Stream_GetRemainingLength(s) < glyph->cb) goto fail; glyph->aj = (BYTE*)malloc(glyph->cb); if (!glyph->aj) goto fail; Stream_Read(s, glyph->aj, glyph->cb); } if ((flags & CG_GLYPH_UNICODE_PRESENT) && (cache_glyph_order->cGlyphs > 0)) { cache_glyph_order->unicodeCharacters = calloc(cache_glyph_order->cGlyphs, sizeof(WCHAR)); if (!cache_glyph_order->unicodeCharacters) goto fail; if (Stream_GetRemainingLength(s) < sizeof(WCHAR) * cache_glyph_order->cGlyphs) goto fail; Stream_Read_UTF16_String(s, cache_glyph_order->unicodeCharacters, cache_glyph_order->cGlyphs); } return cache_glyph_order; fail: free_cache_glyph_order(update->context, cache_glyph_order); return NULL; } int update_approximate_cache_glyph_order(const CACHE_GLYPH_ORDER* cache_glyph, UINT16* flags) { return 2 + cache_glyph->cGlyphs * 32; } BOOL update_write_cache_glyph_order(wStream* s, const CACHE_GLYPH_ORDER* cache_glyph, UINT16* flags) { int i, inf; INT16 lsi16; const GLYPH_DATA* glyph; inf = update_approximate_cache_glyph_order(cache_glyph, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT8(s, cache_glyph->cacheId); /* cacheId (1 byte) */ Stream_Write_UINT8(s, cache_glyph->cGlyphs); /* cGlyphs (1 byte) */ for (i = 0; i < (int)cache_glyph->cGlyphs; i++) { UINT32 cb; glyph = &cache_glyph->glyphData[i]; Stream_Write_UINT16(s, glyph->cacheIndex); /* cacheIndex (2 bytes) */ lsi16 = glyph->x; Stream_Write_UINT16(s, lsi16); /* x (2 bytes) */ lsi16 = glyph->y; Stream_Write_UINT16(s, lsi16); /* y (2 bytes) */ Stream_Write_UINT16(s, glyph->cx); /* cx (2 bytes) */ Stream_Write_UINT16(s, glyph->cy); /* cy (2 bytes) */ cb = ((glyph->cx + 7) / 8) * glyph->cy; cb += ((cb % 4) > 0) ? 4 - (cb % 4) : 0; Stream_Write(s, glyph->aj, cb); } if (*flags & CG_GLYPH_UNICODE_PRESENT) { Stream_Zero(s, cache_glyph->cGlyphs * 2); } return TRUE; } static CACHE_GLYPH_V2_ORDER* update_read_cache_glyph_v2_order(rdpUpdate* update, wStream* s, UINT16 flags) { UINT32 i; CACHE_GLYPH_V2_ORDER* cache_glyph_v2 = calloc(1, sizeof(CACHE_GLYPH_V2_ORDER)); if (!cache_glyph_v2) goto fail; cache_glyph_v2->cacheId = (flags & 0x000F); cache_glyph_v2->flags = (flags & 0x00F0) >> 4; cache_glyph_v2->cGlyphs = (flags & 0xFF00) >> 8; for (i = 0; i < cache_glyph_v2->cGlyphs; i++) { GLYPH_DATA_V2* glyph = &cache_glyph_v2->glyphData[i]; if (Stream_GetRemainingLength(s) < 1) goto fail; Stream_Read_UINT8(s, glyph->cacheIndex); if (!update_read_2byte_signed(s, &glyph->x) || !update_read_2byte_signed(s, &glyph->y) || !update_read_2byte_unsigned(s, &glyph->cx) || !update_read_2byte_unsigned(s, &glyph->cy)) { goto fail; } glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; if (Stream_GetRemainingLength(s) < glyph->cb) goto fail; glyph->aj = (BYTE*)malloc(glyph->cb); if (!glyph->aj) goto fail; Stream_Read(s, glyph->aj, glyph->cb); } if ((flags & CG_GLYPH_UNICODE_PRESENT) && (cache_glyph_v2->cGlyphs > 0)) { cache_glyph_v2->unicodeCharacters = calloc(cache_glyph_v2->cGlyphs, sizeof(WCHAR)); if (!cache_glyph_v2->unicodeCharacters) goto fail; if (Stream_GetRemainingLength(s) < sizeof(WCHAR) * cache_glyph_v2->cGlyphs) goto fail; Stream_Read_UTF16_String(s, cache_glyph_v2->unicodeCharacters, cache_glyph_v2->cGlyphs); } return cache_glyph_v2; fail: free_cache_glyph_v2_order(update->context, cache_glyph_v2); return NULL; } int update_approximate_cache_glyph_v2_order(const CACHE_GLYPH_V2_ORDER* cache_glyph_v2, UINT16* flags) { return 8 + cache_glyph_v2->cGlyphs * 32; } BOOL update_write_cache_glyph_v2_order(wStream* s, const CACHE_GLYPH_V2_ORDER* cache_glyph_v2, UINT16* flags) { UINT32 i, inf; inf = update_approximate_cache_glyph_v2_order(cache_glyph_v2, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; *flags = (cache_glyph_v2->cacheId & 0x000F) | ((cache_glyph_v2->flags & 0x000F) << 4) | ((cache_glyph_v2->cGlyphs & 0x00FF) << 8); for (i = 0; i < cache_glyph_v2->cGlyphs; i++) { UINT32 cb; const GLYPH_DATA_V2* glyph = &cache_glyph_v2->glyphData[i]; Stream_Write_UINT8(s, glyph->cacheIndex); if (!update_write_2byte_signed(s, glyph->x) || !update_write_2byte_signed(s, glyph->y) || !update_write_2byte_unsigned(s, glyph->cx) || !update_write_2byte_unsigned(s, glyph->cy)) { return FALSE; } cb = ((glyph->cx + 7) / 8) * glyph->cy; cb += ((cb % 4) > 0) ? 4 - (cb % 4) : 0; Stream_Write(s, glyph->aj, cb); } if (*flags & CG_GLYPH_UNICODE_PRESENT) { Stream_Zero(s, cache_glyph_v2->cGlyphs * 2); } return TRUE; } static BOOL update_decompress_brush(wStream* s, BYTE* output, size_t outSize, BYTE bpp) { INT32 x, y, k; BYTE byte = 0; const BYTE* palette = Stream_Pointer(s) + 16; const INT32 bytesPerPixel = ((bpp + 1) / 8); if (!Stream_SafeSeek(s, 16ULL + 7ULL * bytesPerPixel)) // 64 / 4 return FALSE; for (y = 7; y >= 0; y--) { for (x = 0; x < 8; x++) { UINT32 index; if ((x % 4) == 0) Stream_Read_UINT8(s, byte); index = ((byte >> ((3 - (x % 4)) * 2)) & 0x03); for (k = 0; k < bytesPerPixel; k++) { const size_t dstIndex = ((y * 8 + x) * bytesPerPixel) + k; const size_t srcIndex = (index * bytesPerPixel) + k; if (dstIndex >= outSize) return FALSE; output[dstIndex] = palette[srcIndex]; } } } return TRUE; } static BOOL update_compress_brush(wStream* s, const BYTE* input, BYTE bpp) { return FALSE; } static CACHE_BRUSH_ORDER* update_read_cache_brush_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; BYTE iBitmapFormat; BOOL compressed = FALSE; CACHE_BRUSH_ORDER* cache_brush = calloc(1, sizeof(CACHE_BRUSH_ORDER)); if (!cache_brush) goto fail; if (Stream_GetRemainingLength(s) < 6) goto fail; Stream_Read_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Read_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ if (iBitmapFormat >= ARRAYSIZE(BMF_BPP)) goto fail; cache_brush->bpp = BMF_BPP[iBitmapFormat]; Stream_Read_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Read_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Read_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Read_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_Print(update->log, WLOG_ERROR, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); goto fail; } /* rows are encoded in reverse order */ if (Stream_GetRemainingLength(s) < 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_decompress_brush(s, cache_brush->data, sizeof(cache_brush->data), cache_brush->bpp)) goto fail; } else { /* uncompressed brush */ UINT32 scanline = (cache_brush->bpp / 8) * 8; if (Stream_GetRemainingLength(s) < scanline * 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read(s, &cache_brush->data[i * scanline], scanline); } } } } return cache_brush; fail: free_cache_brush_order(update->context, cache_brush); return NULL; } int update_approximate_cache_brush_order(const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { return 64; } BOOL update_write_cache_brush_order(wStream* s, const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { int i; BYTE iBitmapFormat; BOOL compressed = FALSE; if (!Stream_EnsureRemainingCapacity(s, update_approximate_cache_brush_order(cache_brush, flags))) return FALSE; iBitmapFormat = BPP_BMF[cache_brush->bpp]; Stream_Write_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Write_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ Stream_Write_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Write_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Write_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Write_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_ERR(TAG, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); return FALSE; } for (i = 7; i >= 0; i--) { Stream_Write_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_compress_brush(s, cache_brush->data, cache_brush->bpp)) return FALSE; } else { /* uncompressed brush */ int scanline = (cache_brush->bpp / 8) * 8; for (i = 7; i >= 0; i--) { Stream_Write(s, &cache_brush->data[i * scanline], scanline); } } } } return TRUE; } /* Alternate Secondary Drawing Orders */ static BOOL update_read_create_offscreen_bitmap_order(wStream* s, CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { UINT16 flags; BOOL deleteListPresent; OFFSCREEN_DELETE_LIST* deleteList; if (Stream_GetRemainingLength(s) < 6) return FALSE; Stream_Read_UINT16(s, flags); /* flags (2 bytes) */ create_offscreen_bitmap->id = flags & 0x7FFF; deleteListPresent = (flags & 0x8000) ? TRUE : FALSE; Stream_Read_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */ Stream_Read_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */ deleteList = &(create_offscreen_bitmap->deleteList); if (deleteListPresent) { UINT32 i; if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, deleteList->cIndices); if (deleteList->cIndices > deleteList->sIndices) { UINT16* new_indices; new_indices = (UINT16*)realloc(deleteList->indices, deleteList->cIndices * 2); if (!new_indices) return FALSE; deleteList->sIndices = deleteList->cIndices; deleteList->indices = new_indices; } if (Stream_GetRemainingLength(s) < 2 * deleteList->cIndices) return FALSE; for (i = 0; i < deleteList->cIndices; i++) { Stream_Read_UINT16(s, deleteList->indices[i]); } } else { deleteList->cIndices = 0; } return TRUE; } int update_approximate_create_offscreen_bitmap_order( const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { const OFFSCREEN_DELETE_LIST* deleteList = &(create_offscreen_bitmap->deleteList); return 32 + deleteList->cIndices * 2; } BOOL update_write_create_offscreen_bitmap_order( wStream* s, const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { UINT16 flags; BOOL deleteListPresent; const OFFSCREEN_DELETE_LIST* deleteList; if (!Stream_EnsureRemainingCapacity( s, update_approximate_create_offscreen_bitmap_order(create_offscreen_bitmap))) return FALSE; deleteList = &(create_offscreen_bitmap->deleteList); flags = create_offscreen_bitmap->id & 0x7FFF; deleteListPresent = (deleteList->cIndices > 0) ? TRUE : FALSE; if (deleteListPresent) flags |= 0x8000; Stream_Write_UINT16(s, flags); /* flags (2 bytes) */ Stream_Write_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */ Stream_Write_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */ if (deleteListPresent) { int i; Stream_Write_UINT16(s, deleteList->cIndices); for (i = 0; i < (int)deleteList->cIndices; i++) { Stream_Write_UINT16(s, deleteList->indices[i]); } } return TRUE; } static BOOL update_read_switch_surface_order(wStream* s, SWITCH_SURFACE_ORDER* switch_surface) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, switch_surface->bitmapId); /* bitmapId (2 bytes) */ return TRUE; } int update_approximate_switch_surface_order(const SWITCH_SURFACE_ORDER* switch_surface) { return 2; } BOOL update_write_switch_surface_order(wStream* s, const SWITCH_SURFACE_ORDER* switch_surface) { int inf = update_approximate_switch_surface_order(switch_surface); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT16(s, switch_surface->bitmapId); /* bitmapId (2 bytes) */ return TRUE; } static BOOL update_read_create_nine_grid_bitmap_order(wStream* s, CREATE_NINE_GRID_BITMAP_ORDER* create_nine_grid_bitmap) { NINE_GRID_BITMAP_INFO* nineGridInfo; if (Stream_GetRemainingLength(s) < 19) return FALSE; Stream_Read_UINT8(s, create_nine_grid_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ if ((create_nine_grid_bitmap->bitmapBpp < 1) || (create_nine_grid_bitmap->bitmapBpp > 32)) { WLog_ERR(TAG, "invalid bpp value %" PRIu32 "", create_nine_grid_bitmap->bitmapBpp); return FALSE; } Stream_Read_UINT16(s, create_nine_grid_bitmap->bitmapId); /* bitmapId (2 bytes) */ nineGridInfo = &(create_nine_grid_bitmap->nineGridInfo); Stream_Read_UINT32(s, nineGridInfo->flFlags); /* flFlags (4 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulLeftWidth); /* ulLeftWidth (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulRightWidth); /* ulRightWidth (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulTopHeight); /* ulTopHeight (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulBottomHeight); /* ulBottomHeight (2 bytes) */ update_read_colorref(s, &nineGridInfo->crTransparent); /* crTransparent (4 bytes) */ return TRUE; } static BOOL update_read_frame_marker_order(wStream* s, FRAME_MARKER_ORDER* frame_marker) { if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT32(s, frame_marker->action); /* action (4 bytes) */ return TRUE; } static BOOL update_read_stream_bitmap_first_order(wStream* s, STREAM_BITMAP_FIRST_ORDER* stream_bitmap_first) { if (Stream_GetRemainingLength(s) < 10) // 8 + 2 at least return FALSE; Stream_Read_UINT8(s, stream_bitmap_first->bitmapFlags); /* bitmapFlags (1 byte) */ Stream_Read_UINT8(s, stream_bitmap_first->bitmapBpp); /* bitmapBpp (1 byte) */ if ((stream_bitmap_first->bitmapBpp < 1) || (stream_bitmap_first->bitmapBpp > 32)) { WLog_ERR(TAG, "invalid bpp value %" PRIu32 "", stream_bitmap_first->bitmapBpp); return FALSE; } Stream_Read_UINT16(s, stream_bitmap_first->bitmapType); /* bitmapType (2 bytes) */ Stream_Read_UINT16(s, stream_bitmap_first->bitmapWidth); /* bitmapWidth (2 bytes) */ Stream_Read_UINT16(s, stream_bitmap_first->bitmapHeight); /* bitmapHeigth (2 bytes) */ if (stream_bitmap_first->bitmapFlags & STREAM_BITMAP_V2) { if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT32(s, stream_bitmap_first->bitmapSize); /* bitmapSize (4 bytes) */ } else { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, stream_bitmap_first->bitmapSize); /* bitmapSize (2 bytes) */ } FIELD_SKIP_BUFFER16( s, stream_bitmap_first->bitmapBlockSize); /* bitmapBlockSize(2 bytes) + bitmapBlock */ return TRUE; } static BOOL update_read_stream_bitmap_next_order(wStream* s, STREAM_BITMAP_NEXT_ORDER* stream_bitmap_next) { if (Stream_GetRemainingLength(s) < 5) return FALSE; Stream_Read_UINT8(s, stream_bitmap_next->bitmapFlags); /* bitmapFlags (1 byte) */ Stream_Read_UINT16(s, stream_bitmap_next->bitmapType); /* bitmapType (2 bytes) */ FIELD_SKIP_BUFFER16( s, stream_bitmap_next->bitmapBlockSize); /* bitmapBlockSize(2 bytes) + bitmapBlock */ return TRUE; } static BOOL update_read_draw_gdiplus_first_order(wStream* s, DRAW_GDIPLUS_FIRST_ORDER* draw_gdiplus_first) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_first->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_first->cbTotalSize); /* cbTotalSize (4 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_first->cbTotalEmfSize); /* cbTotalEmfSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_first->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_next_order(wStream* s, DRAW_GDIPLUS_NEXT_ORDER* draw_gdiplus_next) { if (Stream_GetRemainingLength(s) < 3) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ FIELD_SKIP_BUFFER16(s, draw_gdiplus_next->cbSize); /* cbSize(2 bytes) + emfRecords */ return TRUE; } static BOOL update_read_draw_gdiplus_end_order(wStream* s, DRAW_GDIPLUS_END_ORDER* draw_gdiplus_end) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_end->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_end->cbTotalSize); /* cbTotalSize (4 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_end->cbTotalEmfSize); /* cbTotalEmfSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_end->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_cache_first_order(wStream* s, DRAW_GDIPLUS_CACHE_FIRST_ORDER* draw_gdiplus_cache_first) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_first->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_cache_first->cbTotalSize); /* cbTotalSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_cache_first->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_cache_next_order(wStream* s, DRAW_GDIPLUS_CACHE_NEXT_ORDER* draw_gdiplus_cache_next) { if (Stream_GetRemainingLength(s) < 7) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_next->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_next->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_next->cacheIndex); /* cacheIndex (2 bytes) */ FIELD_SKIP_BUFFER16(s, draw_gdiplus_cache_next->cbSize); /* cbSize(2 bytes) + emfRecords */ return TRUE; } static BOOL update_read_draw_gdiplus_cache_end_order(wStream* s, DRAW_GDIPLUS_CACHE_END_ORDER* draw_gdiplus_cache_end) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_end->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_cache_end->cbTotalSize); /* cbTotalSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_cache_end->cbSize); /* emfRecords */ } static BOOL update_read_field_flags(wStream* s, UINT32* fieldFlags, BYTE flags, BYTE fieldBytes) { int i; BYTE byte; if (flags & ORDER_ZERO_FIELD_BYTE_BIT0) fieldBytes--; if (flags & ORDER_ZERO_FIELD_BYTE_BIT1) { if (fieldBytes > 1) fieldBytes -= 2; else fieldBytes = 0; } if (Stream_GetRemainingLength(s) < fieldBytes) return FALSE; *fieldFlags = 0; for (i = 0; i < fieldBytes; i++) { Stream_Read_UINT8(s, byte); *fieldFlags |= byte << (i * 8); } return TRUE; } BOOL update_write_field_flags(wStream* s, UINT32 fieldFlags, BYTE flags, BYTE fieldBytes) { BYTE byte; if (fieldBytes == 1) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); } else if (fieldBytes == 2) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 8) & 0xFF; Stream_Write_UINT8(s, byte); } else if (fieldBytes == 3) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 16) & 0xFF; Stream_Write_UINT8(s, byte); } else { return FALSE; } return TRUE; } static BOOL update_read_bounds(wStream* s, rdpBounds* bounds) { BYTE flags; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, flags); /* field flags */ if (flags & BOUND_LEFT) { if (!update_read_coord(s, &bounds->left, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_LEFT) { if (!update_read_coord(s, &bounds->left, TRUE)) return FALSE; } if (flags & BOUND_TOP) { if (!update_read_coord(s, &bounds->top, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_TOP) { if (!update_read_coord(s, &bounds->top, TRUE)) return FALSE; } if (flags & BOUND_RIGHT) { if (!update_read_coord(s, &bounds->right, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_RIGHT) { if (!update_read_coord(s, &bounds->right, TRUE)) return FALSE; } if (flags & BOUND_BOTTOM) { if (!update_read_coord(s, &bounds->bottom, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_BOTTOM) { if (!update_read_coord(s, &bounds->bottom, TRUE)) return FALSE; } return TRUE; } BOOL update_write_bounds(wStream* s, ORDER_INFO* orderInfo) { if (!(orderInfo->controlFlags & ORDER_BOUNDS)) return TRUE; if (orderInfo->controlFlags & ORDER_ZERO_BOUNDS_DELTAS) return TRUE; Stream_Write_UINT8(s, orderInfo->boundsFlags); /* field flags */ if (orderInfo->boundsFlags & BOUND_LEFT) { if (!update_write_coord(s, orderInfo->bounds.left)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_LEFT) { } if (orderInfo->boundsFlags & BOUND_TOP) { if (!update_write_coord(s, orderInfo->bounds.top)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_TOP) { } if (orderInfo->boundsFlags & BOUND_RIGHT) { if (!update_write_coord(s, orderInfo->bounds.right)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_RIGHT) { } if (orderInfo->boundsFlags & BOUND_BOTTOM) { if (!update_write_coord(s, orderInfo->bounds.bottom)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_BOTTOM) { } return TRUE; } static BOOL read_primary_order(wLog* log, const char* orderName, wStream* s, const ORDER_INFO* orderInfo, rdpPrimaryUpdate* primary) { BOOL rc = FALSE; if (!s || !orderInfo || !primary || !orderName) return FALSE; switch (orderInfo->orderType) { case ORDER_TYPE_DSTBLT: rc = update_read_dstblt_order(s, orderInfo, &(primary->dstblt)); break; case ORDER_TYPE_PATBLT: rc = update_read_patblt_order(s, orderInfo, &(primary->patblt)); break; case ORDER_TYPE_SCRBLT: rc = update_read_scrblt_order(s, orderInfo, &(primary->scrblt)); break; case ORDER_TYPE_OPAQUE_RECT: rc = update_read_opaque_rect_order(s, orderInfo, &(primary->opaque_rect)); break; case ORDER_TYPE_DRAW_NINE_GRID: rc = update_read_draw_nine_grid_order(s, orderInfo, &(primary->draw_nine_grid)); break; case ORDER_TYPE_MULTI_DSTBLT: rc = update_read_multi_dstblt_order(s, orderInfo, &(primary->multi_dstblt)); break; case ORDER_TYPE_MULTI_PATBLT: rc = update_read_multi_patblt_order(s, orderInfo, &(primary->multi_patblt)); break; case ORDER_TYPE_MULTI_SCRBLT: rc = update_read_multi_scrblt_order(s, orderInfo, &(primary->multi_scrblt)); break; case ORDER_TYPE_MULTI_OPAQUE_RECT: rc = update_read_multi_opaque_rect_order(s, orderInfo, &(primary->multi_opaque_rect)); break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: rc = update_read_multi_draw_nine_grid_order(s, orderInfo, &(primary->multi_draw_nine_grid)); break; case ORDER_TYPE_LINE_TO: rc = update_read_line_to_order(s, orderInfo, &(primary->line_to)); break; case ORDER_TYPE_POLYLINE: rc = update_read_polyline_order(s, orderInfo, &(primary->polyline)); break; case ORDER_TYPE_MEMBLT: rc = update_read_memblt_order(s, orderInfo, &(primary->memblt)); break; case ORDER_TYPE_MEM3BLT: rc = update_read_mem3blt_order(s, orderInfo, &(primary->mem3blt)); break; case ORDER_TYPE_SAVE_BITMAP: rc = update_read_save_bitmap_order(s, orderInfo, &(primary->save_bitmap)); break; case ORDER_TYPE_GLYPH_INDEX: rc = update_read_glyph_index_order(s, orderInfo, &(primary->glyph_index)); break; case ORDER_TYPE_FAST_INDEX: rc = update_read_fast_index_order(s, orderInfo, &(primary->fast_index)); break; case ORDER_TYPE_FAST_GLYPH: rc = update_read_fast_glyph_order(s, orderInfo, &(primary->fast_glyph)); break; case ORDER_TYPE_POLYGON_SC: rc = update_read_polygon_sc_order(s, orderInfo, &(primary->polygon_sc)); break; case ORDER_TYPE_POLYGON_CB: rc = update_read_polygon_cb_order(s, orderInfo, &(primary->polygon_cb)); break; case ORDER_TYPE_ELLIPSE_SC: rc = update_read_ellipse_sc_order(s, orderInfo, &(primary->ellipse_sc)); break; case ORDER_TYPE_ELLIPSE_CB: rc = update_read_ellipse_cb_order(s, orderInfo, &(primary->ellipse_cb)); break; default: WLog_Print(log, WLOG_WARN, "Primary Drawing Order %s not supported, ignoring", orderName); rc = TRUE; break; } if (!rc) { WLog_Print(log, WLOG_ERROR, "%s - update_read_dstblt_order() failed", orderName); return FALSE; } return TRUE; } static BOOL update_recv_primary_order(rdpUpdate* update, wStream* s, BYTE flags) { BYTE field; BOOL rc = FALSE; rdpContext* context = update->context; rdpPrimaryUpdate* primary = update->primary; ORDER_INFO* orderInfo = &(primary->order_info); rdpSettings* settings = context->settings; const char* orderName; if (flags & ORDER_TYPE_CHANGE) { if (Stream_GetRemainingLength(s) < 1) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, orderInfo->orderType); /* orderType (1 byte) */ } orderName = primary_order_string(orderInfo->orderType); if (!check_primary_order_supported(update->log, settings, orderInfo->orderType, orderName)) return FALSE; field = get_primary_drawing_order_field_bytes(orderInfo->orderType, &rc); if (!rc) return FALSE; if (!update_read_field_flags(s, &(orderInfo->fieldFlags), flags, field)) { WLog_Print(update->log, WLOG_ERROR, "update_read_field_flags() failed"); return FALSE; } if (flags & ORDER_BOUNDS) { if (!(flags & ORDER_ZERO_BOUNDS_DELTAS)) { if (!update_read_bounds(s, &orderInfo->bounds)) { WLog_Print(update->log, WLOG_ERROR, "update_read_bounds() failed"); return FALSE; } } rc = IFCALLRESULT(FALSE, update->SetBounds, context, &orderInfo->bounds); if (!rc) return FALSE; } orderInfo->deltaCoordinates = (flags & ORDER_DELTA_COORDINATES) ? TRUE : FALSE; if (!read_primary_order(update->log, orderName, s, orderInfo, primary)) return FALSE; switch (orderInfo->orderType) { case ORDER_TYPE_DSTBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->dstblt.bRop), gdi_rop3_code(primary->dstblt.bRop)); rc = IFCALLRESULT(FALSE, primary->DstBlt, context, &primary->dstblt); } break; case ORDER_TYPE_PATBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->patblt.bRop), gdi_rop3_code(primary->patblt.bRop)); rc = IFCALLRESULT(FALSE, primary->PatBlt, context, &primary->patblt); } break; case ORDER_TYPE_SCRBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->scrblt.bRop), gdi_rop3_code(primary->scrblt.bRop)); rc = IFCALLRESULT(FALSE, primary->ScrBlt, context, &primary->scrblt); } break; case ORDER_TYPE_OPAQUE_RECT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->OpaqueRect, context, &primary->opaque_rect); } break; case ORDER_TYPE_DRAW_NINE_GRID: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->DrawNineGrid, context, &primary->draw_nine_grid); } break; case ORDER_TYPE_MULTI_DSTBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_dstblt.bRop), gdi_rop3_code(primary->multi_dstblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiDstBlt, context, &primary->multi_dstblt); } break; case ORDER_TYPE_MULTI_PATBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_patblt.bRop), gdi_rop3_code(primary->multi_patblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiPatBlt, context, &primary->multi_patblt); } break; case ORDER_TYPE_MULTI_SCRBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_scrblt.bRop), gdi_rop3_code(primary->multi_scrblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiScrBlt, context, &primary->multi_scrblt); } break; case ORDER_TYPE_MULTI_OPAQUE_RECT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->MultiOpaqueRect, context, &primary->multi_opaque_rect); } break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->MultiDrawNineGrid, context, &primary->multi_draw_nine_grid); } break; case ORDER_TYPE_LINE_TO: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->LineTo, context, &primary->line_to); } break; case ORDER_TYPE_POLYLINE: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->Polyline, context, &primary->polyline); } break; case ORDER_TYPE_MEMBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->memblt.bRop), gdi_rop3_code(primary->memblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MemBlt, context, &primary->memblt); } break; case ORDER_TYPE_MEM3BLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->mem3blt.bRop), gdi_rop3_code(primary->mem3blt.bRop)); rc = IFCALLRESULT(FALSE, primary->Mem3Blt, context, &primary->mem3blt); } break; case ORDER_TYPE_SAVE_BITMAP: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->SaveBitmap, context, &primary->save_bitmap); } break; case ORDER_TYPE_GLYPH_INDEX: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->GlyphIndex, context, &primary->glyph_index); } break; case ORDER_TYPE_FAST_INDEX: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->FastIndex, context, &primary->fast_index); } break; case ORDER_TYPE_FAST_GLYPH: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->FastGlyph, context, &primary->fast_glyph); } break; case ORDER_TYPE_POLYGON_SC: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->PolygonSC, context, &primary->polygon_sc); } break; case ORDER_TYPE_POLYGON_CB: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->PolygonCB, context, &primary->polygon_cb); } break; case ORDER_TYPE_ELLIPSE_SC: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->EllipseSC, context, &primary->ellipse_sc); } break; case ORDER_TYPE_ELLIPSE_CB: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->EllipseCB, context, &primary->ellipse_cb); } break; default: WLog_Print(update->log, WLOG_WARN, "Primary Drawing Order %s not supported", orderName); break; } if (!rc) { WLog_Print(update->log, WLOG_WARN, "Primary Drawing Order %s failed", orderName); return FALSE; } if (flags & ORDER_BOUNDS) { rc = IFCALLRESULT(FALSE, update->SetBounds, context, NULL); } return rc; } static BOOL update_recv_secondary_order(rdpUpdate* update, wStream* s, BYTE flags) { BOOL rc = FALSE; size_t start, end, diff; BYTE orderType; UINT16 extraFlags; UINT16 orderLength; rdpContext* context = update->context; rdpSettings* settings = context->settings; rdpSecondaryUpdate* secondary = update->secondary; const char* name; if (Stream_GetRemainingLength(s) < 5) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 5"); return FALSE; } Stream_Read_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Read_UINT16(s, extraFlags); /* extraFlags (2 bytes) */ Stream_Read_UINT8(s, orderType); /* orderType (1 byte) */ if (Stream_GetRemainingLength(s) < orderLength + 7U) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) %" PRIuz " < %" PRIu16, Stream_GetRemainingLength(s), orderLength + 7); return FALSE; } start = Stream_GetPosition(s); name = secondary_order_string(orderType); WLog_Print(update->log, WLOG_DEBUG, "Secondary Drawing Order %s", name); if (!check_secondary_order_supported(update->log, settings, orderType, name)) return FALSE; switch (orderType) { case ORDER_TYPE_BITMAP_UNCOMPRESSED: case ORDER_TYPE_CACHE_BITMAP_COMPRESSED: { const BOOL compressed = (orderType == ORDER_TYPE_CACHE_BITMAP_COMPRESSED); CACHE_BITMAP_ORDER* order = update_read_cache_bitmap_order(update, s, compressed, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmap, context, order); free_cache_bitmap_order(context, order); } } break; case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2: case ORDER_TYPE_BITMAP_COMPRESSED_V2: { const BOOL compressed = (orderType == ORDER_TYPE_BITMAP_COMPRESSED_V2); CACHE_BITMAP_V2_ORDER* order = update_read_cache_bitmap_v2_order(update, s, compressed, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV2, context, order); free_cache_bitmap_v2_order(context, order); } } break; case ORDER_TYPE_BITMAP_COMPRESSED_V3: { CACHE_BITMAP_V3_ORDER* order = update_read_cache_bitmap_v3_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV3, context, order); free_cache_bitmap_v3_order(context, order); } } break; case ORDER_TYPE_CACHE_COLOR_TABLE: { CACHE_COLOR_TABLE_ORDER* order = update_read_cache_color_table_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheColorTable, context, order); free_cache_color_table_order(context, order); } } break; case ORDER_TYPE_CACHE_GLYPH: { switch (settings->GlyphSupportLevel) { case GLYPH_SUPPORT_PARTIAL: case GLYPH_SUPPORT_FULL: { CACHE_GLYPH_ORDER* order = update_read_cache_glyph_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheGlyph, context, order); free_cache_glyph_order(context, order); } } break; case GLYPH_SUPPORT_ENCODE: { CACHE_GLYPH_V2_ORDER* order = update_read_cache_glyph_v2_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheGlyphV2, context, order); free_cache_glyph_v2_order(context, order); } } break; case GLYPH_SUPPORT_NONE: default: break; } } break; case ORDER_TYPE_CACHE_BRUSH: /* [MS-RDPEGDI] 2.2.2.2.1.2.7 Cache Brush (CACHE_BRUSH_ORDER) */ { CACHE_BRUSH_ORDER* order = update_read_cache_brush_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBrush, context, order); free_cache_brush_order(context, order); } } break; default: WLog_Print(update->log, WLOG_WARN, "SECONDARY ORDER %s not supported", name); break; } if (!rc) { WLog_Print(update->log, WLOG_ERROR, "SECONDARY ORDER %s failed", name); } start += orderLength + 7; end = Stream_GetPosition(s); if (start > end) { WLog_Print(update->log, WLOG_WARN, "SECONDARY_ORDER %s: read %" PRIuz "bytes too much", name, end - start); return FALSE; } diff = start - end; if (diff > 0) { WLog_Print(update->log, WLOG_DEBUG, "SECONDARY_ORDER %s: read %" PRIuz "bytes short, skipping", name, diff); Stream_Seek(s, diff); } return rc; } static BOOL read_altsec_order(wStream* s, BYTE orderType, rdpAltSecUpdate* altsec) { BOOL rc = FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: rc = update_read_create_offscreen_bitmap_order(s, &(altsec->create_offscreen_bitmap)); break; case ORDER_TYPE_SWITCH_SURFACE: rc = update_read_switch_surface_order(s, &(altsec->switch_surface)); break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: rc = update_read_create_nine_grid_bitmap_order(s, &(altsec->create_nine_grid_bitmap)); break; case ORDER_TYPE_FRAME_MARKER: rc = update_read_frame_marker_order(s, &(altsec->frame_marker)); break; case ORDER_TYPE_STREAM_BITMAP_FIRST: rc = update_read_stream_bitmap_first_order(s, &(altsec->stream_bitmap_first)); break; case ORDER_TYPE_STREAM_BITMAP_NEXT: rc = update_read_stream_bitmap_next_order(s, &(altsec->stream_bitmap_next)); break; case ORDER_TYPE_GDIPLUS_FIRST: rc = update_read_draw_gdiplus_first_order(s, &(altsec->draw_gdiplus_first)); break; case ORDER_TYPE_GDIPLUS_NEXT: rc = update_read_draw_gdiplus_next_order(s, &(altsec->draw_gdiplus_next)); break; case ORDER_TYPE_GDIPLUS_END: rc = update_read_draw_gdiplus_end_order(s, &(altsec->draw_gdiplus_end)); break; case ORDER_TYPE_GDIPLUS_CACHE_FIRST: rc = update_read_draw_gdiplus_cache_first_order(s, &(altsec->draw_gdiplus_cache_first)); break; case ORDER_TYPE_GDIPLUS_CACHE_NEXT: rc = update_read_draw_gdiplus_cache_next_order(s, &(altsec->draw_gdiplus_cache_next)); break; case ORDER_TYPE_GDIPLUS_CACHE_END: rc = update_read_draw_gdiplus_cache_end_order(s, &(altsec->draw_gdiplus_cache_end)); break; case ORDER_TYPE_WINDOW: /* This order is handled elsewhere. */ rc = TRUE; break; case ORDER_TYPE_COMPDESK_FIRST: rc = TRUE; break; default: break; } return rc; } static BOOL update_recv_altsec_order(rdpUpdate* update, wStream* s, BYTE flags) { BYTE orderType = flags >>= 2; /* orderType is in higher 6 bits of flags field */ BOOL rc = FALSE; rdpContext* context = update->context; rdpSettings* settings = context->settings; rdpAltSecUpdate* altsec = update->altsec; const char* orderName = altsec_order_string(orderType); WLog_Print(update->log, WLOG_DEBUG, "Alternate Secondary Drawing Order %s", orderName); if (!check_alt_order_supported(update->log, settings, orderType, orderName)) return FALSE; if (!read_altsec_order(s, orderType, altsec)) return FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: IFCALLRET(altsec->CreateOffscreenBitmap, rc, context, &(altsec->create_offscreen_bitmap)); break; case ORDER_TYPE_SWITCH_SURFACE: IFCALLRET(altsec->SwitchSurface, rc, context, &(altsec->switch_surface)); break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: IFCALLRET(altsec->CreateNineGridBitmap, rc, context, &(altsec->create_nine_grid_bitmap)); break; case ORDER_TYPE_FRAME_MARKER: IFCALLRET(altsec->FrameMarker, rc, context, &(altsec->frame_marker)); break; case ORDER_TYPE_STREAM_BITMAP_FIRST: IFCALLRET(altsec->StreamBitmapFirst, rc, context, &(altsec->stream_bitmap_first)); break; case ORDER_TYPE_STREAM_BITMAP_NEXT: IFCALLRET(altsec->StreamBitmapNext, rc, context, &(altsec->stream_bitmap_next)); break; case ORDER_TYPE_GDIPLUS_FIRST: IFCALLRET(altsec->DrawGdiPlusFirst, rc, context, &(altsec->draw_gdiplus_first)); break; case ORDER_TYPE_GDIPLUS_NEXT: IFCALLRET(altsec->DrawGdiPlusNext, rc, context, &(altsec->draw_gdiplus_next)); break; case ORDER_TYPE_GDIPLUS_END: IFCALLRET(altsec->DrawGdiPlusEnd, rc, context, &(altsec->draw_gdiplus_end)); break; case ORDER_TYPE_GDIPLUS_CACHE_FIRST: IFCALLRET(altsec->DrawGdiPlusCacheFirst, rc, context, &(altsec->draw_gdiplus_cache_first)); break; case ORDER_TYPE_GDIPLUS_CACHE_NEXT: IFCALLRET(altsec->DrawGdiPlusCacheNext, rc, context, &(altsec->draw_gdiplus_cache_next)); break; case ORDER_TYPE_GDIPLUS_CACHE_END: IFCALLRET(altsec->DrawGdiPlusCacheEnd, rc, context, &(altsec->draw_gdiplus_cache_end)); break; case ORDER_TYPE_WINDOW: rc = update_recv_altsec_window_order(update, s); break; case ORDER_TYPE_COMPDESK_FIRST: rc = TRUE; break; default: break; } if (!rc) { WLog_Print(update->log, WLOG_WARN, "Alternate Secondary Drawing Order %s failed", orderName); } return rc; } BOOL update_recv_order(rdpUpdate* update, wStream* s) { BOOL rc; BYTE controlFlags; if (Stream_GetRemainingLength(s) < 1) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, controlFlags); /* controlFlags (1 byte) */ if (!(controlFlags & ORDER_STANDARD)) rc = update_recv_altsec_order(update, s, controlFlags); else if (controlFlags & ORDER_SECONDARY) rc = update_recv_secondary_order(update, s, controlFlags); else rc = update_recv_primary_order(update, s, controlFlags); if (!rc) WLog_Print(update->log, WLOG_ERROR, "order flags %02" PRIx8 " failed", controlFlags); return rc; }
/** * FreeRDP: A Remote Desktop Protocol Implementation * Drawing Orders * * Copyright 2011 Marc-Andre Moreau <marcandre.moreau@gmail.com> * Copyright 2016 Armin Novak <armin.novak@thincast.com> * Copyright 2016 Thincast Technologies GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "window.h" #include <winpr/wtypes.h> #include <winpr/crt.h> #include <freerdp/api.h> #include <freerdp/log.h> #include <freerdp/graphics.h> #include <freerdp/codec/bitmap.h> #include <freerdp/gdi/gdi.h> #include "orders.h" #include "../cache/glyph.h" #include "../cache/bitmap.h" #include "../cache/brush.h" #include "../cache/cache.h" #define TAG FREERDP_TAG("core.orders") BYTE get_primary_drawing_order_field_bytes(UINT32 orderType, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (orderType) { case 0: return DSTBLT_ORDER_FIELD_BYTES; case 1: return PATBLT_ORDER_FIELD_BYTES; case 2: return SCRBLT_ORDER_FIELD_BYTES; case 3: return 0; case 4: return 0; case 5: return 0; case 6: return 0; case 7: return DRAW_NINE_GRID_ORDER_FIELD_BYTES; case 8: return MULTI_DRAW_NINE_GRID_ORDER_FIELD_BYTES; case 9: return LINE_TO_ORDER_FIELD_BYTES; case 10: return OPAQUE_RECT_ORDER_FIELD_BYTES; case 11: return SAVE_BITMAP_ORDER_FIELD_BYTES; case 12: return 0; case 13: return MEMBLT_ORDER_FIELD_BYTES; case 14: return MEM3BLT_ORDER_FIELD_BYTES; case 15: return MULTI_DSTBLT_ORDER_FIELD_BYTES; case 16: return MULTI_PATBLT_ORDER_FIELD_BYTES; case 17: return MULTI_SCRBLT_ORDER_FIELD_BYTES; case 18: return MULTI_OPAQUE_RECT_ORDER_FIELD_BYTES; case 19: return FAST_INDEX_ORDER_FIELD_BYTES; case 20: return POLYGON_SC_ORDER_FIELD_BYTES; case 21: return POLYGON_CB_ORDER_FIELD_BYTES; case 22: return POLYLINE_ORDER_FIELD_BYTES; case 23: return 0; case 24: return FAST_GLYPH_ORDER_FIELD_BYTES; case 25: return ELLIPSE_SC_ORDER_FIELD_BYTES; case 26: return ELLIPSE_CB_ORDER_FIELD_BYTES; case 27: return GLYPH_INDEX_ORDER_FIELD_BYTES; default: if (pValid) *pValid = FALSE; WLog_WARN(TAG, "Invalid orderType 0x%08X received", orderType); return 0; } } static BYTE get_cbr2_bpp(UINT32 bpp, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (bpp) { case 3: return 8; case 4: return 16; case 5: return 24; case 6: return 32; default: WLog_WARN(TAG, "Invalid bpp %" PRIu32, bpp); if (pValid) *pValid = FALSE; return 0; } } static BYTE get_bmf_bpp(UINT32 bmf, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (bmf) { case 1: return 1; case 3: return 8; case 4: return 16; case 5: return 24; case 6: return 32; default: WLog_WARN(TAG, "Invalid bmf %" PRIu32, bmf); if (pValid) *pValid = FALSE; return 0; } } static BYTE get_bpp_bmf(UINT32 bpp, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (bpp) { case 1: return 1; case 8: return 3; case 16: return 4; case 24: return 5; case 32: return 6; default: WLog_WARN(TAG, "Invalid color depth %" PRIu32, bpp); if (pValid) *pValid = FALSE; return 0; } } static BOOL check_order_activated(wLog* log, rdpSettings* settings, const char* orderName, BOOL condition) { if (!condition) { if (settings->AllowUnanouncedOrdersFromServer) { WLog_Print(log, WLOG_WARN, "%s - SERVER BUG: The support for this feature was not announced!", orderName); return TRUE; } else { WLog_Print(log, WLOG_ERROR, "%s - SERVER BUG: The support for this feature was not announced! Use " "/relax-order-checks to ignore", orderName); return FALSE; } } return TRUE; } static BOOL check_alt_order_supported(wLog* log, rdpSettings* settings, BYTE orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: case ORDER_TYPE_SWITCH_SURFACE: condition = settings->OffscreenSupportLevel != 0; break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: condition = settings->DrawNineGridEnabled; break; case ORDER_TYPE_FRAME_MARKER: condition = settings->FrameMarkerCommandEnabled; break; case ORDER_TYPE_GDIPLUS_FIRST: case ORDER_TYPE_GDIPLUS_NEXT: case ORDER_TYPE_GDIPLUS_END: case ORDER_TYPE_GDIPLUS_CACHE_FIRST: case ORDER_TYPE_GDIPLUS_CACHE_NEXT: case ORDER_TYPE_GDIPLUS_CACHE_END: condition = settings->DrawGdiPlusCacheEnabled; break; case ORDER_TYPE_WINDOW: condition = settings->RemoteWndSupportLevel != WINDOW_LEVEL_NOT_SUPPORTED; break; case ORDER_TYPE_STREAM_BITMAP_FIRST: case ORDER_TYPE_STREAM_BITMAP_NEXT: case ORDER_TYPE_COMPDESK_FIRST: condition = TRUE; break; default: WLog_Print(log, WLOG_WARN, "%s - Alternate Secondary Drawing Order UNKNOWN", orderName); condition = FALSE; break; } return check_order_activated(log, settings, orderName, condition); } static BOOL check_secondary_order_supported(wLog* log, rdpSettings* settings, BYTE orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_BITMAP_UNCOMPRESSED: case ORDER_TYPE_CACHE_BITMAP_COMPRESSED: condition = settings->BitmapCacheEnabled; break; case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2: case ORDER_TYPE_BITMAP_COMPRESSED_V2: condition = settings->BitmapCacheEnabled; break; case ORDER_TYPE_BITMAP_COMPRESSED_V3: condition = settings->BitmapCacheV3Enabled; break; case ORDER_TYPE_CACHE_COLOR_TABLE: condition = (settings->OrderSupport[NEG_MEMBLT_INDEX] || settings->OrderSupport[NEG_MEM3BLT_INDEX]); break; case ORDER_TYPE_CACHE_GLYPH: { switch (settings->GlyphSupportLevel) { case GLYPH_SUPPORT_PARTIAL: case GLYPH_SUPPORT_FULL: case GLYPH_SUPPORT_ENCODE: condition = TRUE; break; case GLYPH_SUPPORT_NONE: default: condition = FALSE; break; } } break; case ORDER_TYPE_CACHE_BRUSH: condition = TRUE; break; default: WLog_Print(log, WLOG_WARN, "SECONDARY ORDER %s not supported", orderName); break; } return check_order_activated(log, settings, orderName, condition); } static BOOL check_primary_order_supported(wLog* log, rdpSettings* settings, UINT32 orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_DSTBLT: condition = settings->OrderSupport[NEG_DSTBLT_INDEX]; break; case ORDER_TYPE_SCRBLT: condition = settings->OrderSupport[NEG_SCRBLT_INDEX]; break; case ORDER_TYPE_DRAW_NINE_GRID: condition = settings->OrderSupport[NEG_DRAWNINEGRID_INDEX]; break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: condition = settings->OrderSupport[NEG_MULTI_DRAWNINEGRID_INDEX]; break; case ORDER_TYPE_LINE_TO: condition = settings->OrderSupport[NEG_LINETO_INDEX]; break; /* [MS-RDPEGDI] 2.2.2.2.1.1.2.5 OpaqueRect (OPAQUERECT_ORDER) * suggests that PatBlt and OpaqueRect imply each other. */ case ORDER_TYPE_PATBLT: case ORDER_TYPE_OPAQUE_RECT: condition = settings->OrderSupport[NEG_OPAQUE_RECT_INDEX] || settings->OrderSupport[NEG_PATBLT_INDEX]; break; case ORDER_TYPE_SAVE_BITMAP: condition = settings->OrderSupport[NEG_SAVEBITMAP_INDEX]; break; case ORDER_TYPE_MEMBLT: condition = settings->OrderSupport[NEG_MEMBLT_INDEX]; break; case ORDER_TYPE_MEM3BLT: condition = settings->OrderSupport[NEG_MEM3BLT_INDEX]; break; case ORDER_TYPE_MULTI_DSTBLT: condition = settings->OrderSupport[NEG_MULTIDSTBLT_INDEX]; break; case ORDER_TYPE_MULTI_PATBLT: condition = settings->OrderSupport[NEG_MULTIPATBLT_INDEX]; break; case ORDER_TYPE_MULTI_SCRBLT: condition = settings->OrderSupport[NEG_MULTIDSTBLT_INDEX]; break; case ORDER_TYPE_MULTI_OPAQUE_RECT: condition = settings->OrderSupport[NEG_MULTIOPAQUERECT_INDEX]; break; case ORDER_TYPE_FAST_INDEX: condition = settings->OrderSupport[NEG_FAST_INDEX_INDEX]; break; case ORDER_TYPE_POLYGON_SC: condition = settings->OrderSupport[NEG_POLYGON_SC_INDEX]; break; case ORDER_TYPE_POLYGON_CB: condition = settings->OrderSupport[NEG_POLYGON_CB_INDEX]; break; case ORDER_TYPE_POLYLINE: condition = settings->OrderSupport[NEG_POLYLINE_INDEX]; break; case ORDER_TYPE_FAST_GLYPH: condition = settings->OrderSupport[NEG_FAST_GLYPH_INDEX]; break; case ORDER_TYPE_ELLIPSE_SC: condition = settings->OrderSupport[NEG_ELLIPSE_SC_INDEX]; break; case ORDER_TYPE_ELLIPSE_CB: condition = settings->OrderSupport[NEG_ELLIPSE_CB_INDEX]; break; case ORDER_TYPE_GLYPH_INDEX: condition = settings->OrderSupport[NEG_GLYPH_INDEX_INDEX]; break; default: WLog_Print(log, WLOG_WARN, "%s Primary Drawing Order not supported", orderName); break; } return check_order_activated(log, settings, orderName, condition); } static const char* primary_order_string(UINT32 orderType) { const char* orders[] = { "[0x%02" PRIx8 "] DstBlt", "[0x%02" PRIx8 "] PatBlt", "[0x%02" PRIx8 "] ScrBlt", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] DrawNineGrid", "[0x%02" PRIx8 "] MultiDrawNineGrid", "[0x%02" PRIx8 "] LineTo", "[0x%02" PRIx8 "] OpaqueRect", "[0x%02" PRIx8 "] SaveBitmap", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] MemBlt", "[0x%02" PRIx8 "] Mem3Blt", "[0x%02" PRIx8 "] MultiDstBlt", "[0x%02" PRIx8 "] MultiPatBlt", "[0x%02" PRIx8 "] MultiScrBlt", "[0x%02" PRIx8 "] MultiOpaqueRect", "[0x%02" PRIx8 "] FastIndex", "[0x%02" PRIx8 "] PolygonSC", "[0x%02" PRIx8 "] PolygonCB", "[0x%02" PRIx8 "] Polyline", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] FastGlyph", "[0x%02" PRIx8 "] EllipseSC", "[0x%02" PRIx8 "] EllipseCB", "[0x%02" PRIx8 "] GlyphIndex" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static const char* secondary_order_string(UINT32 orderType) { const char* orders[] = { "[0x%02" PRIx8 "] Cache Bitmap", "[0x%02" PRIx8 "] Cache Color Table", "[0x%02" PRIx8 "] Cache Bitmap (Compressed)", "[0x%02" PRIx8 "] Cache Glyph", "[0x%02" PRIx8 "] Cache Bitmap V2", "[0x%02" PRIx8 "] Cache Bitmap V2 (Compressed)", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] Cache Brush", "[0x%02" PRIx8 "] Cache Bitmap V3" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static const char* altsec_order_string(BYTE orderType) { const char* orders[] = { "[0x%02" PRIx8 "] Switch Surface", "[0x%02" PRIx8 "] Create Offscreen Bitmap", "[0x%02" PRIx8 "] Stream Bitmap First", "[0x%02" PRIx8 "] Stream Bitmap Next", "[0x%02" PRIx8 "] Create NineGrid Bitmap", "[0x%02" PRIx8 "] Draw GDI+ First", "[0x%02" PRIx8 "] Draw GDI+ Next", "[0x%02" PRIx8 "] Draw GDI+ End", "[0x%02" PRIx8 "] Draw GDI+ Cache First", "[0x%02" PRIx8 "] Draw GDI+ Cache Next", "[0x%02" PRIx8 "] Draw GDI+ Cache End", "[0x%02" PRIx8 "] Windowing", "[0x%02" PRIx8 "] Desktop Composition", "[0x%02" PRIx8 "] Frame Marker" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static INLINE BOOL update_read_coord(wStream* s, INT32* coord, BOOL delta) { INT8 lsi8; INT16 lsi16; if (delta) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_INT8(s, lsi8); *coord += lsi8; } else { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_INT16(s, lsi16); *coord = lsi16; } return TRUE; } static INLINE BOOL update_write_coord(wStream* s, INT32 coord) { Stream_Write_UINT16(s, coord); return TRUE; } static INLINE BOOL update_read_color(wStream* s, UINT32* color) { BYTE byte; if (Stream_GetRemainingLength(s) < 3) return FALSE; *color = 0; Stream_Read_UINT8(s, byte); *color = (UINT32)byte; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 8) & 0xFF00; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 16) & 0xFF0000; return TRUE; } static INLINE BOOL update_write_color(wStream* s, UINT32 color) { BYTE byte; byte = (color & 0xFF); Stream_Write_UINT8(s, byte); byte = ((color >> 8) & 0xFF); Stream_Write_UINT8(s, byte); byte = ((color >> 16) & 0xFF); Stream_Write_UINT8(s, byte); return TRUE; } static INLINE BOOL update_read_colorref(wStream* s, UINT32* color) { BYTE byte; if (Stream_GetRemainingLength(s) < 4) return FALSE; *color = 0; Stream_Read_UINT8(s, byte); *color = byte; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 8); Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 16); Stream_Seek_UINT8(s); return TRUE; } static INLINE BOOL update_read_color_quad(wStream* s, UINT32* color) { return update_read_colorref(s, color); } static INLINE void update_write_color_quad(wStream* s, UINT32 color) { BYTE byte; byte = (color >> 16) & 0xFF; Stream_Write_UINT8(s, byte); byte = (color >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = color & 0xFF; Stream_Write_UINT8(s, byte); } static INLINE BOOL update_read_2byte_unsigned(wStream* s, UINT32* value) { BYTE byte; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) return FALSE; *value = (byte & 0x7F) << 8; Stream_Read_UINT8(s, byte); *value |= byte; } else { *value = (byte & 0x7F); } return TRUE; } static INLINE BOOL update_write_2byte_unsigned(wStream* s, UINT32 value) { BYTE byte; if (value > 0x7FFF) return FALSE; if (value >= 0x7F) { byte = ((value & 0x7F00) >> 8); Stream_Write_UINT8(s, byte | 0x80); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else { byte = (value & 0x7F); Stream_Write_UINT8(s, byte); } return TRUE; } static INLINE BOOL update_read_2byte_signed(wStream* s, INT32* value) { BYTE byte; BOOL negative; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); negative = (byte & 0x40) ? TRUE : FALSE; *value = (byte & 0x3F); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); *value = (*value << 8) | byte; } if (negative) *value *= -1; return TRUE; } static INLINE BOOL update_write_2byte_signed(wStream* s, INT32 value) { BYTE byte; BOOL negative = FALSE; if (value < 0) { negative = TRUE; value *= -1; } if (value > 0x3FFF) return FALSE; if (value >= 0x3F) { byte = ((value & 0x3F00) >> 8); if (negative) byte |= 0x40; Stream_Write_UINT8(s, byte | 0x80); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else { byte = (value & 0x3F); if (negative) byte |= 0x40; Stream_Write_UINT8(s, byte); } return TRUE; } static INLINE BOOL update_read_4byte_unsigned(wStream* s, UINT32* value) { BYTE byte; BYTE count; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); count = (byte & 0xC0) >> 6; if (Stream_GetRemainingLength(s) < count) return FALSE; switch (count) { case 0: *value = (byte & 0x3F); break; case 1: *value = (byte & 0x3F) << 8; Stream_Read_UINT8(s, byte); *value |= byte; break; case 2: *value = (byte & 0x3F) << 16; Stream_Read_UINT8(s, byte); *value |= (byte << 8); Stream_Read_UINT8(s, byte); *value |= byte; break; case 3: *value = (byte & 0x3F) << 24; Stream_Read_UINT8(s, byte); *value |= (byte << 16); Stream_Read_UINT8(s, byte); *value |= (byte << 8); Stream_Read_UINT8(s, byte); *value |= byte; break; default: break; } return TRUE; } static INLINE BOOL update_write_4byte_unsigned(wStream* s, UINT32 value) { BYTE byte; if (value <= 0x3F) { Stream_Write_UINT8(s, value); } else if (value <= 0x3FFF) { byte = (value >> 8) & 0x3F; Stream_Write_UINT8(s, byte | 0x40); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else if (value <= 0x3FFFFF) { byte = (value >> 16) & 0x3F; Stream_Write_UINT8(s, byte | 0x80); byte = (value >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else if (value <= 0x3FFFFFFF) { byte = (value >> 24) & 0x3F; Stream_Write_UINT8(s, byte | 0xC0); byte = (value >> 16) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else return FALSE; return TRUE; } static INLINE BOOL update_read_delta(wStream* s, INT32* value) { BYTE byte; if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, byte); if (byte & 0x40) *value = (byte | ~0x3F); else *value = (byte & 0x3F); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, byte); *value = (*value << 8) | byte; } return TRUE; } #if 0 static INLINE void update_read_glyph_delta(wStream* s, UINT16* value) { BYTE byte; Stream_Read_UINT8(s, byte); if (byte == 0x80) Stream_Read_UINT16(s, *value); else *value = (byte & 0x3F); } static INLINE void update_seek_glyph_delta(wStream* s) { BYTE byte; Stream_Read_UINT8(s, byte); if (byte & 0x80) Stream_Seek_UINT8(s); } #endif static INLINE BOOL update_read_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->style); } if (fieldFlags & ORDER_FIELD_04) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->hatch); } if (brush->style & CACHED_BRUSH) { BOOL rc; brush->index = brush->hatch; brush->bpp = get_bmf_bpp(brush->style, &rc); if (!rc) return FALSE; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 7) return FALSE; brush->data = (BYTE*)brush->p8x8; Stream_Read_UINT8(s, brush->data[7]); Stream_Read_UINT8(s, brush->data[6]); Stream_Read_UINT8(s, brush->data[5]); Stream_Read_UINT8(s, brush->data[4]); Stream_Read_UINT8(s, brush->data[3]); Stream_Read_UINT8(s, brush->data[2]); Stream_Read_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; } static INLINE BOOL update_write_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { Stream_Write_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { Stream_Write_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { Stream_Write_UINT8(s, brush->style); } if (brush->style & CACHED_BRUSH) { BOOL rc; brush->hatch = brush->index; brush->bpp = get_bmf_bpp(brush->style, &rc); if (!rc) return FALSE; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_04) { Stream_Write_UINT8(s, brush->hatch); } if (fieldFlags & ORDER_FIELD_05) { brush->data = (BYTE*)brush->p8x8; Stream_Write_UINT8(s, brush->data[7]); Stream_Write_UINT8(s, brush->data[6]); Stream_Write_UINT8(s, brush->data[5]); Stream_Write_UINT8(s, brush->data[4]); Stream_Write_UINT8(s, brush->data[3]); Stream_Write_UINT8(s, brush->data[2]); Stream_Write_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; } static INLINE BOOL update_read_delta_rects(wStream* s, DELTA_RECT* rectangles, UINT32* nr) { UINT32 number = *nr; UINT32 i; BYTE flags = 0; BYTE* zeroBits; UINT32 zeroBitsSize; if (number > 45) { WLog_WARN(TAG, "Invalid number of delta rectangles %" PRIu32, number); return FALSE; } zeroBitsSize = ((number + 1) / 2); if (Stream_GetRemainingLength(s) < zeroBitsSize) return FALSE; Stream_GetPointer(s, zeroBits); Stream_Seek(s, zeroBitsSize); ZeroMemory(rectangles, sizeof(DELTA_RECT) * number); for (i = 0; i < number; i++) { if (i % 2 == 0) flags = zeroBits[i / 2]; if ((~flags & 0x80) && !update_read_delta(s, &rectangles[i].left)) return FALSE; if ((~flags & 0x40) && !update_read_delta(s, &rectangles[i].top)) return FALSE; if (~flags & 0x20) { if (!update_read_delta(s, &rectangles[i].width)) return FALSE; } else if (i > 0) rectangles[i].width = rectangles[i - 1].width; else rectangles[i].width = 0; if (~flags & 0x10) { if (!update_read_delta(s, &rectangles[i].height)) return FALSE; } else if (i > 0) rectangles[i].height = rectangles[i - 1].height; else rectangles[i].height = 0; if (i > 0) { rectangles[i].left += rectangles[i - 1].left; rectangles[i].top += rectangles[i - 1].top; } flags <<= 4; } return TRUE; } static INLINE BOOL update_read_delta_points(wStream* s, DELTA_POINT* points, int number, INT16 x, INT16 y) { int i; BYTE flags = 0; BYTE* zeroBits; UINT32 zeroBitsSize; zeroBitsSize = ((number + 3) / 4); if (Stream_GetRemainingLength(s) < zeroBitsSize) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < %" PRIu32 "", zeroBitsSize); return FALSE; } Stream_GetPointer(s, zeroBits); Stream_Seek(s, zeroBitsSize); ZeroMemory(points, sizeof(DELTA_POINT) * number); for (i = 0; i < number; i++) { if (i % 4 == 0) flags = zeroBits[i / 4]; if ((~flags & 0x80) && !update_read_delta(s, &points[i].x)) { WLog_ERR(TAG, "update_read_delta(x) failed"); return FALSE; } if ((~flags & 0x40) && !update_read_delta(s, &points[i].y)) { WLog_ERR(TAG, "update_read_delta(y) failed"); return FALSE; } flags <<= 2; } return TRUE; } #define ORDER_FIELD_BYTE(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 1) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT8(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_2BYTE(NO, TARGET1, TARGET2) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 2) \ { \ WLog_ERR(TAG, "error reading %s or %s", #TARGET1, #TARGET2); \ return FALSE; \ } \ Stream_Read_UINT8(s, TARGET1); \ Stream_Read_UINT8(s, TARGET2); \ } \ } while (0) #define ORDER_FIELD_UINT16(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 2) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT16(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_UINT32(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 4) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT32(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_COORD(NO, TARGET) \ do \ { \ if ((orderInfo->fieldFlags & (1 << (NO - 1))) && \ !update_read_coord(s, &TARGET, orderInfo->deltaCoordinates)) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ } while (0) static INLINE BOOL ORDER_FIELD_COLOR(const ORDER_INFO* orderInfo, wStream* s, UINT32 NO, UINT32* TARGET) { if (!TARGET || !orderInfo) return FALSE; if ((orderInfo->fieldFlags & (1 << (NO - 1))) && !update_read_color(s, TARGET)) return FALSE; return TRUE; } static INLINE BOOL FIELD_SKIP_BUFFER16(wStream* s, UINT32 TARGET_LEN) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, TARGET_LEN); if (!Stream_SafeSeek(s, TARGET_LEN)) { WLog_ERR(TAG, "error skipping %" PRIu32 " bytes", TARGET_LEN); return FALSE; } return TRUE; } /* Primary Drawing Orders */ static BOOL update_read_dstblt_order(wStream* s, const ORDER_INFO* orderInfo, DSTBLT_ORDER* dstblt) { ORDER_FIELD_COORD(1, dstblt->nLeftRect); ORDER_FIELD_COORD(2, dstblt->nTopRect); ORDER_FIELD_COORD(3, dstblt->nWidth); ORDER_FIELD_COORD(4, dstblt->nHeight); ORDER_FIELD_BYTE(5, dstblt->bRop); return TRUE; } int update_approximate_dstblt_order(ORDER_INFO* orderInfo, const DSTBLT_ORDER* dstblt) { return 32; } BOOL update_write_dstblt_order(wStream* s, ORDER_INFO* orderInfo, const DSTBLT_ORDER* dstblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_dstblt_order(orderInfo, dstblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, dstblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, dstblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, dstblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, dstblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, dstblt->bRop); return TRUE; } static BOOL update_read_patblt_order(wStream* s, const ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { ORDER_FIELD_COORD(1, patblt->nLeftRect); ORDER_FIELD_COORD(2, patblt->nTopRect); ORDER_FIELD_COORD(3, patblt->nWidth); ORDER_FIELD_COORD(4, patblt->nHeight); ORDER_FIELD_BYTE(5, patblt->bRop); ORDER_FIELD_COLOR(orderInfo, s, 6, &patblt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 7, &patblt->foreColor); return update_read_brush(s, &patblt->brush, orderInfo->fieldFlags >> 7); } int update_approximate_patblt_order(ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { return 32; } BOOL update_write_patblt_order(wStream* s, ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_patblt_order(orderInfo, patblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, patblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, patblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, patblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, patblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, patblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, patblt->backColor); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_color(s, patblt->foreColor); orderInfo->fieldFlags |= ORDER_FIELD_08; orderInfo->fieldFlags |= ORDER_FIELD_09; orderInfo->fieldFlags |= ORDER_FIELD_10; orderInfo->fieldFlags |= ORDER_FIELD_11; orderInfo->fieldFlags |= ORDER_FIELD_12; update_write_brush(s, &patblt->brush, orderInfo->fieldFlags >> 7); return TRUE; } static BOOL update_read_scrblt_order(wStream* s, const ORDER_INFO* orderInfo, SCRBLT_ORDER* scrblt) { ORDER_FIELD_COORD(1, scrblt->nLeftRect); ORDER_FIELD_COORD(2, scrblt->nTopRect); ORDER_FIELD_COORD(3, scrblt->nWidth); ORDER_FIELD_COORD(4, scrblt->nHeight); ORDER_FIELD_BYTE(5, scrblt->bRop); ORDER_FIELD_COORD(6, scrblt->nXSrc); ORDER_FIELD_COORD(7, scrblt->nYSrc); return TRUE; } int update_approximate_scrblt_order(ORDER_INFO* orderInfo, const SCRBLT_ORDER* scrblt) { return 32; } BOOL update_write_scrblt_order(wStream* s, ORDER_INFO* orderInfo, const SCRBLT_ORDER* scrblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_scrblt_order(orderInfo, scrblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, scrblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, scrblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, scrblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, scrblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, scrblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_coord(s, scrblt->nXSrc); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_coord(s, scrblt->nYSrc); return TRUE; } static BOOL update_read_opaque_rect_order(wStream* s, const ORDER_INFO* orderInfo, OPAQUE_RECT_ORDER* opaque_rect) { BYTE byte; ORDER_FIELD_COORD(1, opaque_rect->nLeftRect); ORDER_FIELD_COORD(2, opaque_rect->nTopRect); ORDER_FIELD_COORD(3, opaque_rect->nWidth); ORDER_FIELD_COORD(4, opaque_rect->nHeight); if (orderInfo->fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x00FFFF00) | ((UINT32)byte); } if (orderInfo->fieldFlags & ORDER_FIELD_06) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x00FF00FF) | ((UINT32)byte << 8); } if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x0000FFFF) | ((UINT32)byte << 16); } return TRUE; } int update_approximate_opaque_rect_order(ORDER_INFO* orderInfo, const OPAQUE_RECT_ORDER* opaque_rect) { return 32; } BOOL update_write_opaque_rect_order(wStream* s, ORDER_INFO* orderInfo, const OPAQUE_RECT_ORDER* opaque_rect) { BYTE byte; int inf = update_approximate_opaque_rect_order(orderInfo, opaque_rect); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; // TODO: Color format conversion orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, opaque_rect->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, opaque_rect->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, opaque_rect->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, opaque_rect->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; byte = opaque_rect->color & 0x000000FF; Stream_Write_UINT8(s, byte); orderInfo->fieldFlags |= ORDER_FIELD_06; byte = (opaque_rect->color & 0x0000FF00) >> 8; Stream_Write_UINT8(s, byte); orderInfo->fieldFlags |= ORDER_FIELD_07; byte = (opaque_rect->color & 0x00FF0000) >> 16; Stream_Write_UINT8(s, byte); return TRUE; } static BOOL update_read_draw_nine_grid_order(wStream* s, const ORDER_INFO* orderInfo, DRAW_NINE_GRID_ORDER* draw_nine_grid) { ORDER_FIELD_COORD(1, draw_nine_grid->srcLeft); ORDER_FIELD_COORD(2, draw_nine_grid->srcTop); ORDER_FIELD_COORD(3, draw_nine_grid->srcRight); ORDER_FIELD_COORD(4, draw_nine_grid->srcBottom); ORDER_FIELD_UINT16(5, draw_nine_grid->bitmapId); return TRUE; } static BOOL update_read_multi_dstblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_DSTBLT_ORDER* multi_dstblt) { ORDER_FIELD_COORD(1, multi_dstblt->nLeftRect); ORDER_FIELD_COORD(2, multi_dstblt->nTopRect); ORDER_FIELD_COORD(3, multi_dstblt->nWidth); ORDER_FIELD_COORD(4, multi_dstblt->nHeight); ORDER_FIELD_BYTE(5, multi_dstblt->bRop); ORDER_FIELD_BYTE(6, multi_dstblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_dstblt->cbData); return update_read_delta_rects(s, multi_dstblt->rectangles, &multi_dstblt->numRectangles); } return TRUE; } static BOOL update_read_multi_patblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_PATBLT_ORDER* multi_patblt) { ORDER_FIELD_COORD(1, multi_patblt->nLeftRect); ORDER_FIELD_COORD(2, multi_patblt->nTopRect); ORDER_FIELD_COORD(3, multi_patblt->nWidth); ORDER_FIELD_COORD(4, multi_patblt->nHeight); ORDER_FIELD_BYTE(5, multi_patblt->bRop); ORDER_FIELD_COLOR(orderInfo, s, 6, &multi_patblt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 7, &multi_patblt->foreColor); if (!update_read_brush(s, &multi_patblt->brush, orderInfo->fieldFlags >> 7)) return FALSE; ORDER_FIELD_BYTE(13, multi_patblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_14) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_patblt->cbData); if (!update_read_delta_rects(s, multi_patblt->rectangles, &multi_patblt->numRectangles)) return FALSE; } return TRUE; } static BOOL update_read_multi_scrblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_SCRBLT_ORDER* multi_scrblt) { ORDER_FIELD_COORD(1, multi_scrblt->nLeftRect); ORDER_FIELD_COORD(2, multi_scrblt->nTopRect); ORDER_FIELD_COORD(3, multi_scrblt->nWidth); ORDER_FIELD_COORD(4, multi_scrblt->nHeight); ORDER_FIELD_BYTE(5, multi_scrblt->bRop); ORDER_FIELD_COORD(6, multi_scrblt->nXSrc); ORDER_FIELD_COORD(7, multi_scrblt->nYSrc); ORDER_FIELD_BYTE(8, multi_scrblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_09) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_scrblt->cbData); return update_read_delta_rects(s, multi_scrblt->rectangles, &multi_scrblt->numRectangles); } return TRUE; } static BOOL update_read_multi_opaque_rect_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_OPAQUE_RECT_ORDER* multi_opaque_rect) { BYTE byte; ORDER_FIELD_COORD(1, multi_opaque_rect->nLeftRect); ORDER_FIELD_COORD(2, multi_opaque_rect->nTopRect); ORDER_FIELD_COORD(3, multi_opaque_rect->nWidth); ORDER_FIELD_COORD(4, multi_opaque_rect->nHeight); if (orderInfo->fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x00FFFF00) | ((UINT32)byte); } if (orderInfo->fieldFlags & ORDER_FIELD_06) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x00FF00FF) | ((UINT32)byte << 8); } if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x0000FFFF) | ((UINT32)byte << 16); } ORDER_FIELD_BYTE(8, multi_opaque_rect->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_09) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_opaque_rect->cbData); return update_read_delta_rects(s, multi_opaque_rect->rectangles, &multi_opaque_rect->numRectangles); } return TRUE; } static BOOL update_read_multi_draw_nine_grid_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_DRAW_NINE_GRID_ORDER* multi_draw_nine_grid) { ORDER_FIELD_COORD(1, multi_draw_nine_grid->srcLeft); ORDER_FIELD_COORD(2, multi_draw_nine_grid->srcTop); ORDER_FIELD_COORD(3, multi_draw_nine_grid->srcRight); ORDER_FIELD_COORD(4, multi_draw_nine_grid->srcBottom); ORDER_FIELD_UINT16(5, multi_draw_nine_grid->bitmapId); ORDER_FIELD_BYTE(6, multi_draw_nine_grid->nDeltaEntries); if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_draw_nine_grid->cbData); return update_read_delta_rects(s, multi_draw_nine_grid->rectangles, &multi_draw_nine_grid->nDeltaEntries); } return TRUE; } static BOOL update_read_line_to_order(wStream* s, const ORDER_INFO* orderInfo, LINE_TO_ORDER* line_to) { ORDER_FIELD_UINT16(1, line_to->backMode); ORDER_FIELD_COORD(2, line_to->nXStart); ORDER_FIELD_COORD(3, line_to->nYStart); ORDER_FIELD_COORD(4, line_to->nXEnd); ORDER_FIELD_COORD(5, line_to->nYEnd); ORDER_FIELD_COLOR(orderInfo, s, 6, &line_to->backColor); ORDER_FIELD_BYTE(7, line_to->bRop2); ORDER_FIELD_BYTE(8, line_to->penStyle); ORDER_FIELD_BYTE(9, line_to->penWidth); ORDER_FIELD_COLOR(orderInfo, s, 10, &line_to->penColor); return TRUE; } int update_approximate_line_to_order(ORDER_INFO* orderInfo, const LINE_TO_ORDER* line_to) { return 32; } BOOL update_write_line_to_order(wStream* s, ORDER_INFO* orderInfo, const LINE_TO_ORDER* line_to) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_line_to_order(orderInfo, line_to))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT16(s, line_to->backMode); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, line_to->nXStart); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, line_to->nYStart); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, line_to->nXEnd); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_coord(s, line_to->nYEnd); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, line_to->backColor); orderInfo->fieldFlags |= ORDER_FIELD_07; Stream_Write_UINT8(s, line_to->bRop2); orderInfo->fieldFlags |= ORDER_FIELD_08; Stream_Write_UINT8(s, line_to->penStyle); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT8(s, line_to->penWidth); orderInfo->fieldFlags |= ORDER_FIELD_10; update_write_color(s, line_to->penColor); return TRUE; } static BOOL update_read_polyline_order(wStream* s, const ORDER_INFO* orderInfo, POLYLINE_ORDER* polyline) { UINT16 word; UINT32 new_num = polyline->numDeltaEntries; ORDER_FIELD_COORD(1, polyline->xStart); ORDER_FIELD_COORD(2, polyline->yStart); ORDER_FIELD_BYTE(3, polyline->bRop2); ORDER_FIELD_UINT16(4, word); ORDER_FIELD_COLOR(orderInfo, s, 5, &polyline->penColor); ORDER_FIELD_BYTE(6, new_num); if (orderInfo->fieldFlags & ORDER_FIELD_07) { DELTA_POINT* new_points; if (new_num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, polyline->cbData); new_points = (DELTA_POINT*)realloc(polyline->points, sizeof(DELTA_POINT) * new_num); if (!new_points) { WLog_ERR(TAG, "realloc(%" PRIu32 ") failed", new_num); return FALSE; } polyline->points = new_points; polyline->numDeltaEntries = new_num; return update_read_delta_points(s, polyline->points, polyline->numDeltaEntries, polyline->xStart, polyline->yStart); } return TRUE; } static BOOL update_read_memblt_order(wStream* s, const ORDER_INFO* orderInfo, MEMBLT_ORDER* memblt) { if (!s || !orderInfo || !memblt) return FALSE; ORDER_FIELD_UINT16(1, memblt->cacheId); ORDER_FIELD_COORD(2, memblt->nLeftRect); ORDER_FIELD_COORD(3, memblt->nTopRect); ORDER_FIELD_COORD(4, memblt->nWidth); ORDER_FIELD_COORD(5, memblt->nHeight); ORDER_FIELD_BYTE(6, memblt->bRop); ORDER_FIELD_COORD(7, memblt->nXSrc); ORDER_FIELD_COORD(8, memblt->nYSrc); ORDER_FIELD_UINT16(9, memblt->cacheIndex); memblt->colorIndex = (memblt->cacheId >> 8); memblt->cacheId = (memblt->cacheId & 0xFF); memblt->bitmap = NULL; return TRUE; } int update_approximate_memblt_order(ORDER_INFO* orderInfo, const MEMBLT_ORDER* memblt) { return 64; } BOOL update_write_memblt_order(wStream* s, ORDER_INFO* orderInfo, const MEMBLT_ORDER* memblt) { UINT16 cacheId; if (!Stream_EnsureRemainingCapacity(s, update_approximate_memblt_order(orderInfo, memblt))) return FALSE; cacheId = (memblt->cacheId & 0xFF) | ((memblt->colorIndex & 0xFF) << 8); orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT16(s, cacheId); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, memblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, memblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, memblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_coord(s, memblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_06; Stream_Write_UINT8(s, memblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_coord(s, memblt->nXSrc); orderInfo->fieldFlags |= ORDER_FIELD_08; update_write_coord(s, memblt->nYSrc); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT16(s, memblt->cacheIndex); return TRUE; } static BOOL update_read_mem3blt_order(wStream* s, const ORDER_INFO* orderInfo, MEM3BLT_ORDER* mem3blt) { ORDER_FIELD_UINT16(1, mem3blt->cacheId); ORDER_FIELD_COORD(2, mem3blt->nLeftRect); ORDER_FIELD_COORD(3, mem3blt->nTopRect); ORDER_FIELD_COORD(4, mem3blt->nWidth); ORDER_FIELD_COORD(5, mem3blt->nHeight); ORDER_FIELD_BYTE(6, mem3blt->bRop); ORDER_FIELD_COORD(7, mem3blt->nXSrc); ORDER_FIELD_COORD(8, mem3blt->nYSrc); ORDER_FIELD_COLOR(orderInfo, s, 9, &mem3blt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 10, &mem3blt->foreColor); if (!update_read_brush(s, &mem3blt->brush, orderInfo->fieldFlags >> 10)) return FALSE; ORDER_FIELD_UINT16(16, mem3blt->cacheIndex); mem3blt->colorIndex = (mem3blt->cacheId >> 8); mem3blt->cacheId = (mem3blt->cacheId & 0xFF); mem3blt->bitmap = NULL; return TRUE; } static BOOL update_read_save_bitmap_order(wStream* s, const ORDER_INFO* orderInfo, SAVE_BITMAP_ORDER* save_bitmap) { ORDER_FIELD_UINT32(1, save_bitmap->savedBitmapPosition); ORDER_FIELD_COORD(2, save_bitmap->nLeftRect); ORDER_FIELD_COORD(3, save_bitmap->nTopRect); ORDER_FIELD_COORD(4, save_bitmap->nRightRect); ORDER_FIELD_COORD(5, save_bitmap->nBottomRect); ORDER_FIELD_BYTE(6, save_bitmap->operation); return TRUE; } static BOOL update_read_glyph_index_order(wStream* s, const ORDER_INFO* orderInfo, GLYPH_INDEX_ORDER* glyph_index) { ORDER_FIELD_BYTE(1, glyph_index->cacheId); ORDER_FIELD_BYTE(2, glyph_index->flAccel); ORDER_FIELD_BYTE(3, glyph_index->ulCharInc); ORDER_FIELD_BYTE(4, glyph_index->fOpRedundant); ORDER_FIELD_COLOR(orderInfo, s, 5, &glyph_index->backColor); ORDER_FIELD_COLOR(orderInfo, s, 6, &glyph_index->foreColor); ORDER_FIELD_UINT16(7, glyph_index->bkLeft); ORDER_FIELD_UINT16(8, glyph_index->bkTop); ORDER_FIELD_UINT16(9, glyph_index->bkRight); ORDER_FIELD_UINT16(10, glyph_index->bkBottom); ORDER_FIELD_UINT16(11, glyph_index->opLeft); ORDER_FIELD_UINT16(12, glyph_index->opTop); ORDER_FIELD_UINT16(13, glyph_index->opRight); ORDER_FIELD_UINT16(14, glyph_index->opBottom); if (!update_read_brush(s, &glyph_index->brush, orderInfo->fieldFlags >> 14)) return FALSE; ORDER_FIELD_UINT16(20, glyph_index->x); ORDER_FIELD_UINT16(21, glyph_index->y); if (orderInfo->fieldFlags & ORDER_FIELD_22) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, glyph_index->cbData); if (Stream_GetRemainingLength(s) < glyph_index->cbData) return FALSE; CopyMemory(glyph_index->data, Stream_Pointer(s), glyph_index->cbData); Stream_Seek(s, glyph_index->cbData); } return TRUE; } int update_approximate_glyph_index_order(ORDER_INFO* orderInfo, const GLYPH_INDEX_ORDER* glyph_index) { return 64; } BOOL update_write_glyph_index_order(wStream* s, ORDER_INFO* orderInfo, GLYPH_INDEX_ORDER* glyph_index) { int inf = update_approximate_glyph_index_order(orderInfo, glyph_index); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT8(s, glyph_index->cacheId); orderInfo->fieldFlags |= ORDER_FIELD_02; Stream_Write_UINT8(s, glyph_index->flAccel); orderInfo->fieldFlags |= ORDER_FIELD_03; Stream_Write_UINT8(s, glyph_index->ulCharInc); orderInfo->fieldFlags |= ORDER_FIELD_04; Stream_Write_UINT8(s, glyph_index->fOpRedundant); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_color(s, glyph_index->backColor); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, glyph_index->foreColor); orderInfo->fieldFlags |= ORDER_FIELD_07; Stream_Write_UINT16(s, glyph_index->bkLeft); orderInfo->fieldFlags |= ORDER_FIELD_08; Stream_Write_UINT16(s, glyph_index->bkTop); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT16(s, glyph_index->bkRight); orderInfo->fieldFlags |= ORDER_FIELD_10; Stream_Write_UINT16(s, glyph_index->bkBottom); orderInfo->fieldFlags |= ORDER_FIELD_11; Stream_Write_UINT16(s, glyph_index->opLeft); orderInfo->fieldFlags |= ORDER_FIELD_12; Stream_Write_UINT16(s, glyph_index->opTop); orderInfo->fieldFlags |= ORDER_FIELD_13; Stream_Write_UINT16(s, glyph_index->opRight); orderInfo->fieldFlags |= ORDER_FIELD_14; Stream_Write_UINT16(s, glyph_index->opBottom); orderInfo->fieldFlags |= ORDER_FIELD_15; orderInfo->fieldFlags |= ORDER_FIELD_16; orderInfo->fieldFlags |= ORDER_FIELD_17; orderInfo->fieldFlags |= ORDER_FIELD_18; orderInfo->fieldFlags |= ORDER_FIELD_19; update_write_brush(s, &glyph_index->brush, orderInfo->fieldFlags >> 14); orderInfo->fieldFlags |= ORDER_FIELD_20; Stream_Write_UINT16(s, glyph_index->x); orderInfo->fieldFlags |= ORDER_FIELD_21; Stream_Write_UINT16(s, glyph_index->y); orderInfo->fieldFlags |= ORDER_FIELD_22; Stream_Write_UINT8(s, glyph_index->cbData); Stream_Write(s, glyph_index->data, glyph_index->cbData); return TRUE; } static BOOL update_read_fast_index_order(wStream* s, const ORDER_INFO* orderInfo, FAST_INDEX_ORDER* fast_index) { ORDER_FIELD_BYTE(1, fast_index->cacheId); ORDER_FIELD_2BYTE(2, fast_index->ulCharInc, fast_index->flAccel); ORDER_FIELD_COLOR(orderInfo, s, 3, &fast_index->backColor); ORDER_FIELD_COLOR(orderInfo, s, 4, &fast_index->foreColor); ORDER_FIELD_COORD(5, fast_index->bkLeft); ORDER_FIELD_COORD(6, fast_index->bkTop); ORDER_FIELD_COORD(7, fast_index->bkRight); ORDER_FIELD_COORD(8, fast_index->bkBottom); ORDER_FIELD_COORD(9, fast_index->opLeft); ORDER_FIELD_COORD(10, fast_index->opTop); ORDER_FIELD_COORD(11, fast_index->opRight); ORDER_FIELD_COORD(12, fast_index->opBottom); ORDER_FIELD_COORD(13, fast_index->x); ORDER_FIELD_COORD(14, fast_index->y); if (orderInfo->fieldFlags & ORDER_FIELD_15) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, fast_index->cbData); if (Stream_GetRemainingLength(s) < fast_index->cbData) return FALSE; CopyMemory(fast_index->data, Stream_Pointer(s), fast_index->cbData); Stream_Seek(s, fast_index->cbData); } return TRUE; } static BOOL update_read_fast_glyph_order(wStream* s, const ORDER_INFO* orderInfo, FAST_GLYPH_ORDER* fastGlyph) { GLYPH_DATA_V2* glyph = &fastGlyph->glyphData; ORDER_FIELD_BYTE(1, fastGlyph->cacheId); ORDER_FIELD_2BYTE(2, fastGlyph->ulCharInc, fastGlyph->flAccel); ORDER_FIELD_COLOR(orderInfo, s, 3, &fastGlyph->backColor); ORDER_FIELD_COLOR(orderInfo, s, 4, &fastGlyph->foreColor); ORDER_FIELD_COORD(5, fastGlyph->bkLeft); ORDER_FIELD_COORD(6, fastGlyph->bkTop); ORDER_FIELD_COORD(7, fastGlyph->bkRight); ORDER_FIELD_COORD(8, fastGlyph->bkBottom); ORDER_FIELD_COORD(9, fastGlyph->opLeft); ORDER_FIELD_COORD(10, fastGlyph->opTop); ORDER_FIELD_COORD(11, fastGlyph->opRight); ORDER_FIELD_COORD(12, fastGlyph->opBottom); ORDER_FIELD_COORD(13, fastGlyph->x); ORDER_FIELD_COORD(14, fastGlyph->y); if (orderInfo->fieldFlags & ORDER_FIELD_15) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, fastGlyph->cbData); if (Stream_GetRemainingLength(s) < fastGlyph->cbData) return FALSE; CopyMemory(fastGlyph->data, Stream_Pointer(s), fastGlyph->cbData); if (Stream_GetRemainingLength(s) < fastGlyph->cbData) return FALSE; if (!Stream_SafeSeek(s, 1)) return FALSE; if (fastGlyph->cbData > 1) { UINT32 new_cb; /* parse optional glyph data */ glyph->cacheIndex = fastGlyph->data[0]; if (!update_read_2byte_signed(s, &glyph->x) || !update_read_2byte_signed(s, &glyph->y) || !update_read_2byte_unsigned(s, &glyph->cx) || !update_read_2byte_unsigned(s, &glyph->cy)) return FALSE; glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; new_cb = ((glyph->cx + 7) / 8) * glyph->cy; new_cb += ((new_cb % 4) > 0) ? 4 - (new_cb % 4) : 0; if (fastGlyph->cbData < new_cb) return FALSE; if (new_cb > 0) { BYTE* new_aj; new_aj = (BYTE*)realloc(glyph->aj, new_cb); if (!new_aj) return FALSE; glyph->aj = new_aj; glyph->cb = new_cb; Stream_Read(s, glyph->aj, glyph->cb); } Stream_Seek(s, fastGlyph->cbData - new_cb); } } return TRUE; } static BOOL update_read_polygon_sc_order(wStream* s, const ORDER_INFO* orderInfo, POLYGON_SC_ORDER* polygon_sc) { UINT32 num = polygon_sc->numPoints; ORDER_FIELD_COORD(1, polygon_sc->xStart); ORDER_FIELD_COORD(2, polygon_sc->yStart); ORDER_FIELD_BYTE(3, polygon_sc->bRop2); ORDER_FIELD_BYTE(4, polygon_sc->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 5, &polygon_sc->brushColor); ORDER_FIELD_BYTE(6, num); if (orderInfo->fieldFlags & ORDER_FIELD_07) { DELTA_POINT* newpoints; if (num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, polygon_sc->cbData); newpoints = (DELTA_POINT*)realloc(polygon_sc->points, sizeof(DELTA_POINT) * num); if (!newpoints) return FALSE; polygon_sc->points = newpoints; polygon_sc->numPoints = num; return update_read_delta_points(s, polygon_sc->points, polygon_sc->numPoints, polygon_sc->xStart, polygon_sc->yStart); } return TRUE; } static BOOL update_read_polygon_cb_order(wStream* s, const ORDER_INFO* orderInfo, POLYGON_CB_ORDER* polygon_cb) { UINT32 num = polygon_cb->numPoints; ORDER_FIELD_COORD(1, polygon_cb->xStart); ORDER_FIELD_COORD(2, polygon_cb->yStart); ORDER_FIELD_BYTE(3, polygon_cb->bRop2); ORDER_FIELD_BYTE(4, polygon_cb->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 5, &polygon_cb->backColor); ORDER_FIELD_COLOR(orderInfo, s, 6, &polygon_cb->foreColor); if (!update_read_brush(s, &polygon_cb->brush, orderInfo->fieldFlags >> 6)) return FALSE; ORDER_FIELD_BYTE(12, num); if (orderInfo->fieldFlags & ORDER_FIELD_13) { DELTA_POINT* newpoints; if (num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, polygon_cb->cbData); newpoints = (DELTA_POINT*)realloc(polygon_cb->points, sizeof(DELTA_POINT) * num); if (!newpoints) return FALSE; polygon_cb->points = newpoints; polygon_cb->numPoints = num; if (!update_read_delta_points(s, polygon_cb->points, polygon_cb->numPoints, polygon_cb->xStart, polygon_cb->yStart)) return FALSE; } polygon_cb->backMode = (polygon_cb->bRop2 & 0x80) ? BACKMODE_TRANSPARENT : BACKMODE_OPAQUE; polygon_cb->bRop2 = (polygon_cb->bRop2 & 0x1F); return TRUE; } static BOOL update_read_ellipse_sc_order(wStream* s, const ORDER_INFO* orderInfo, ELLIPSE_SC_ORDER* ellipse_sc) { ORDER_FIELD_COORD(1, ellipse_sc->leftRect); ORDER_FIELD_COORD(2, ellipse_sc->topRect); ORDER_FIELD_COORD(3, ellipse_sc->rightRect); ORDER_FIELD_COORD(4, ellipse_sc->bottomRect); ORDER_FIELD_BYTE(5, ellipse_sc->bRop2); ORDER_FIELD_BYTE(6, ellipse_sc->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 7, &ellipse_sc->color); return TRUE; } static BOOL update_read_ellipse_cb_order(wStream* s, const ORDER_INFO* orderInfo, ELLIPSE_CB_ORDER* ellipse_cb) { ORDER_FIELD_COORD(1, ellipse_cb->leftRect); ORDER_FIELD_COORD(2, ellipse_cb->topRect); ORDER_FIELD_COORD(3, ellipse_cb->rightRect); ORDER_FIELD_COORD(4, ellipse_cb->bottomRect); ORDER_FIELD_BYTE(5, ellipse_cb->bRop2); ORDER_FIELD_BYTE(6, ellipse_cb->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 7, &ellipse_cb->backColor); ORDER_FIELD_COLOR(orderInfo, s, 8, &ellipse_cb->foreColor); return update_read_brush(s, &ellipse_cb->brush, orderInfo->fieldFlags >> 8); } /* Secondary Drawing Orders */ static CACHE_BITMAP_ORDER* update_read_cache_bitmap_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { CACHE_BITMAP_ORDER* cache_bitmap; if (!update || !s) return NULL; cache_bitmap = calloc(1, sizeof(CACHE_BITMAP_ORDER)); if (!cache_bitmap) goto fail; if (Stream_GetRemainingLength(s) < 9) goto fail; Stream_Read_UINT8(s, cache_bitmap->cacheId); /* cacheId (1 byte) */ Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapWidth); /* bitmapWidth (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapHeight); /* bitmapHeight (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ if ((cache_bitmap->bitmapBpp < 1) || (cache_bitmap->bitmapBpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bitmap bpp %" PRIu32 "", cache_bitmap->bitmapBpp); goto fail; } Stream_Read_UINT16(s, cache_bitmap->bitmapLength); /* bitmapLength (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap->cacheIndex); /* cacheIndex (2 bytes) */ if (compressed) { if ((flags & NO_BITMAP_COMPRESSION_HDR) == 0) { BYTE* bitmapComprHdr = (BYTE*)&(cache_bitmap->bitmapComprHdr); if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read(s, bitmapComprHdr, 8); /* bitmapComprHdr (8 bytes) */ cache_bitmap->bitmapLength -= 8; } } if (cache_bitmap->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap->bitmapLength) goto fail; cache_bitmap->bitmapDataStream = malloc(cache_bitmap->bitmapLength); if (!cache_bitmap->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap->bitmapDataStream, cache_bitmap->bitmapLength); cache_bitmap->compressed = compressed; return cache_bitmap; fail: free_cache_bitmap_order(update->context, cache_bitmap); return NULL; } int update_approximate_cache_bitmap_order(const CACHE_BITMAP_ORDER* cache_bitmap, BOOL compressed, UINT16* flags) { return 64 + cache_bitmap->bitmapLength; } BOOL update_write_cache_bitmap_order(wStream* s, const CACHE_BITMAP_ORDER* cache_bitmap, BOOL compressed, UINT16* flags) { UINT32 bitmapLength = cache_bitmap->bitmapLength; int inf = update_approximate_cache_bitmap_order(cache_bitmap, compressed, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; *flags = NO_BITMAP_COMPRESSION_HDR; if ((*flags & NO_BITMAP_COMPRESSION_HDR) == 0) bitmapLength += 8; Stream_Write_UINT8(s, cache_bitmap->cacheId); /* cacheId (1 byte) */ Stream_Write_UINT8(s, 0); /* pad1Octet (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapWidth); /* bitmapWidth (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapHeight); /* bitmapHeight (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ Stream_Write_UINT16(s, bitmapLength); /* bitmapLength (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap->cacheIndex); /* cacheIndex (2 bytes) */ if (compressed) { if ((*flags & NO_BITMAP_COMPRESSION_HDR) == 0) { BYTE* bitmapComprHdr = (BYTE*)&(cache_bitmap->bitmapComprHdr); Stream_Write(s, bitmapComprHdr, 8); /* bitmapComprHdr (8 bytes) */ bitmapLength -= 8; } Stream_Write(s, cache_bitmap->bitmapDataStream, bitmapLength); } else { Stream_Write(s, cache_bitmap->bitmapDataStream, bitmapLength); } return TRUE; } static CACHE_BITMAP_V2_ORDER* update_read_cache_bitmap_v2_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { BOOL rc; BYTE bitsPerPixelId; CACHE_BITMAP_V2_ORDER* cache_bitmap_v2; if (!update || !s) return NULL; cache_bitmap_v2 = calloc(1, sizeof(CACHE_BITMAP_V2_ORDER)); if (!cache_bitmap_v2) goto fail; cache_bitmap_v2->cacheId = flags & 0x0003; cache_bitmap_v2->flags = (flags & 0xFF80) >> 7; bitsPerPixelId = (flags & 0x0078) >> 3; cache_bitmap_v2->bitmapBpp = get_cbr2_bpp(bitsPerPixelId, &rc); if (!rc) goto fail; if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ goto fail; cache_bitmap_v2->bitmapHeight = cache_bitmap_v2->bitmapWidth; } else { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ goto fail; } if (!update_read_4byte_unsigned(s, &cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->cacheIndex)) /* cacheIndex */ goto fail; if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } } if (cache_bitmap_v2->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap_v2->bitmapLength) goto fail; if (cache_bitmap_v2->bitmapLength == 0) goto fail; cache_bitmap_v2->bitmapDataStream = malloc(cache_bitmap_v2->bitmapLength); if (!cache_bitmap_v2->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); cache_bitmap_v2->compressed = compressed; return cache_bitmap_v2; fail: free_cache_bitmap_v2_order(update->context, cache_bitmap_v2); return NULL; } int update_approximate_cache_bitmap_v2_order(CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { return 64 + cache_bitmap_v2->bitmapLength; } BOOL update_write_cache_bitmap_v2_order(wStream* s, CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { BOOL rc; BYTE bitsPerPixelId; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v2_order(cache_bitmap_v2, compressed, flags))) return FALSE; bitsPerPixelId = get_bpp_bmf(cache_bitmap_v2->bitmapBpp, &rc); if (!rc) return FALSE; *flags = (cache_bitmap_v2->cacheId & 0x0003) | (bitsPerPixelId << 3) | ((cache_bitmap_v2->flags << 7) & 0xFF80); if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { Stream_Write_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ return FALSE; } else { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ return FALSE; } if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (!update_write_4byte_unsigned(s, cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_write_2byte_unsigned(s, cache_bitmap_v2->cacheIndex)) /* cacheIndex */ return FALSE; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { Stream_Write_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } else { if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } cache_bitmap_v2->compressed = compressed; return TRUE; } static CACHE_BITMAP_V3_ORDER* update_read_cache_bitmap_v3_order(rdpUpdate* update, wStream* s, UINT16 flags) { BOOL rc; BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; UINT32 new_len; BYTE* new_data; CACHE_BITMAP_V3_ORDER* cache_bitmap_v3; if (!update || !s) return NULL; cache_bitmap_v3 = calloc(1, sizeof(CACHE_BITMAP_V3_ORDER)); if (!cache_bitmap_v3) goto fail; cache_bitmap_v3->cacheId = flags & 0x00000003; cache_bitmap_v3->flags = (flags & 0x0000FF80) >> 7; bitsPerPixelId = (flags & 0x00000078) >> 3; cache_bitmap_v3->bpp = get_cbr2_bpp(bitsPerPixelId, &rc); if (!rc) goto fail; if (Stream_GetRemainingLength(s) < 21) goto fail; Stream_Read_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ bitmapData = &cache_bitmap_v3->bitmapData; Stream_Read_UINT8(s, bitmapData->bpp); if ((bitmapData->bpp < 1) || (bitmapData->bpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bpp value %" PRIu32 "", bitmapData->bpp); goto fail; } Stream_Seek_UINT8(s); /* reserved1 (1 byte) */ Stream_Seek_UINT8(s); /* reserved2 (1 byte) */ Stream_Read_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Read_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Read_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Read_UINT32(s, new_len); /* length (4 bytes) */ if ((new_len == 0) || (Stream_GetRemainingLength(s) < new_len)) goto fail; new_data = (BYTE*)realloc(bitmapData->data, new_len); if (!new_data) goto fail; bitmapData->data = new_data; bitmapData->length = new_len; Stream_Read(s, bitmapData->data, bitmapData->length); return cache_bitmap_v3; fail: free_cache_bitmap_v3_order(update->context, cache_bitmap_v3); return NULL; } int update_approximate_cache_bitmap_v3_order(CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BITMAP_DATA_EX* bitmapData = &cache_bitmap_v3->bitmapData; return 64 + bitmapData->length; } BOOL update_write_cache_bitmap_v3_order(wStream* s, CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BOOL rc; BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v3_order(cache_bitmap_v3, flags))) return FALSE; bitmapData = &cache_bitmap_v3->bitmapData; bitsPerPixelId = get_bpp_bmf(cache_bitmap_v3->bpp, &rc); if (!rc) return FALSE; *flags = (cache_bitmap_v3->cacheId & 0x00000003) | ((cache_bitmap_v3->flags << 7) & 0x0000FF80) | ((bitsPerPixelId << 3) & 0x00000078); Stream_Write_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ Stream_Write_UINT8(s, bitmapData->bpp); Stream_Write_UINT8(s, 0); /* reserved1 (1 byte) */ Stream_Write_UINT8(s, 0); /* reserved2 (1 byte) */ Stream_Write_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Write_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Write_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Write_UINT32(s, bitmapData->length); /* length (4 bytes) */ Stream_Write(s, bitmapData->data, bitmapData->length); return TRUE; } static CACHE_COLOR_TABLE_ORDER* update_read_cache_color_table_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; UINT32* colorTable; CACHE_COLOR_TABLE_ORDER* cache_color_table = calloc(1, sizeof(CACHE_COLOR_TABLE_ORDER)); if (!cache_color_table) goto fail; if (Stream_GetRemainingLength(s) < 3) goto fail; Stream_Read_UINT8(s, cache_color_table->cacheIndex); /* cacheIndex (1 byte) */ Stream_Read_UINT16(s, cache_color_table->numberColors); /* numberColors (2 bytes) */ if (cache_color_table->numberColors != 256) { /* This field MUST be set to 256 */ goto fail; } if (Stream_GetRemainingLength(s) < cache_color_table->numberColors * 4) goto fail; colorTable = (UINT32*)&cache_color_table->colorTable; for (i = 0; i < (int)cache_color_table->numberColors; i++) update_read_color_quad(s, &colorTable[i]); return cache_color_table; fail: free_cache_color_table_order(update->context, cache_color_table); return NULL; } int update_approximate_cache_color_table_order(const CACHE_COLOR_TABLE_ORDER* cache_color_table, UINT16* flags) { return 16 + (256 * 4); } BOOL update_write_cache_color_table_order(wStream* s, const CACHE_COLOR_TABLE_ORDER* cache_color_table, UINT16* flags) { int i, inf; UINT32* colorTable; if (cache_color_table->numberColors != 256) return FALSE; inf = update_approximate_cache_color_table_order(cache_color_table, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT8(s, cache_color_table->cacheIndex); /* cacheIndex (1 byte) */ Stream_Write_UINT16(s, cache_color_table->numberColors); /* numberColors (2 bytes) */ colorTable = (UINT32*)&cache_color_table->colorTable; for (i = 0; i < (int)cache_color_table->numberColors; i++) { update_write_color_quad(s, colorTable[i]); } return TRUE; } static CACHE_GLYPH_ORDER* update_read_cache_glyph_order(rdpUpdate* update, wStream* s, UINT16 flags) { UINT32 i; CACHE_GLYPH_ORDER* cache_glyph_order = calloc(1, sizeof(CACHE_GLYPH_ORDER)); if (!cache_glyph_order || !update || !s) goto fail; if (Stream_GetRemainingLength(s) < 2) goto fail; Stream_Read_UINT8(s, cache_glyph_order->cacheId); /* cacheId (1 byte) */ Stream_Read_UINT8(s, cache_glyph_order->cGlyphs); /* cGlyphs (1 byte) */ for (i = 0; i < cache_glyph_order->cGlyphs; i++) { GLYPH_DATA* glyph = &cache_glyph_order->glyphData[i]; if (Stream_GetRemainingLength(s) < 10) goto fail; Stream_Read_UINT16(s, glyph->cacheIndex); Stream_Read_INT16(s, glyph->x); Stream_Read_INT16(s, glyph->y); Stream_Read_UINT16(s, glyph->cx); Stream_Read_UINT16(s, glyph->cy); glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; if (Stream_GetRemainingLength(s) < glyph->cb) goto fail; glyph->aj = (BYTE*)malloc(glyph->cb); if (!glyph->aj) goto fail; Stream_Read(s, glyph->aj, glyph->cb); } if ((flags & CG_GLYPH_UNICODE_PRESENT) && (cache_glyph_order->cGlyphs > 0)) { cache_glyph_order->unicodeCharacters = calloc(cache_glyph_order->cGlyphs, sizeof(WCHAR)); if (!cache_glyph_order->unicodeCharacters) goto fail; if (Stream_GetRemainingLength(s) < sizeof(WCHAR) * cache_glyph_order->cGlyphs) goto fail; Stream_Read_UTF16_String(s, cache_glyph_order->unicodeCharacters, cache_glyph_order->cGlyphs); } return cache_glyph_order; fail: free_cache_glyph_order(update->context, cache_glyph_order); return NULL; } int update_approximate_cache_glyph_order(const CACHE_GLYPH_ORDER* cache_glyph, UINT16* flags) { return 2 + cache_glyph->cGlyphs * 32; } BOOL update_write_cache_glyph_order(wStream* s, const CACHE_GLYPH_ORDER* cache_glyph, UINT16* flags) { int i, inf; INT16 lsi16; const GLYPH_DATA* glyph; inf = update_approximate_cache_glyph_order(cache_glyph, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT8(s, cache_glyph->cacheId); /* cacheId (1 byte) */ Stream_Write_UINT8(s, cache_glyph->cGlyphs); /* cGlyphs (1 byte) */ for (i = 0; i < (int)cache_glyph->cGlyphs; i++) { UINT32 cb; glyph = &cache_glyph->glyphData[i]; Stream_Write_UINT16(s, glyph->cacheIndex); /* cacheIndex (2 bytes) */ lsi16 = glyph->x; Stream_Write_UINT16(s, lsi16); /* x (2 bytes) */ lsi16 = glyph->y; Stream_Write_UINT16(s, lsi16); /* y (2 bytes) */ Stream_Write_UINT16(s, glyph->cx); /* cx (2 bytes) */ Stream_Write_UINT16(s, glyph->cy); /* cy (2 bytes) */ cb = ((glyph->cx + 7) / 8) * glyph->cy; cb += ((cb % 4) > 0) ? 4 - (cb % 4) : 0; Stream_Write(s, glyph->aj, cb); } if (*flags & CG_GLYPH_UNICODE_PRESENT) { Stream_Zero(s, cache_glyph->cGlyphs * 2); } return TRUE; } static CACHE_GLYPH_V2_ORDER* update_read_cache_glyph_v2_order(rdpUpdate* update, wStream* s, UINT16 flags) { UINT32 i; CACHE_GLYPH_V2_ORDER* cache_glyph_v2 = calloc(1, sizeof(CACHE_GLYPH_V2_ORDER)); if (!cache_glyph_v2) goto fail; cache_glyph_v2->cacheId = (flags & 0x000F); cache_glyph_v2->flags = (flags & 0x00F0) >> 4; cache_glyph_v2->cGlyphs = (flags & 0xFF00) >> 8; for (i = 0; i < cache_glyph_v2->cGlyphs; i++) { GLYPH_DATA_V2* glyph = &cache_glyph_v2->glyphData[i]; if (Stream_GetRemainingLength(s) < 1) goto fail; Stream_Read_UINT8(s, glyph->cacheIndex); if (!update_read_2byte_signed(s, &glyph->x) || !update_read_2byte_signed(s, &glyph->y) || !update_read_2byte_unsigned(s, &glyph->cx) || !update_read_2byte_unsigned(s, &glyph->cy)) { goto fail; } glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; if (Stream_GetRemainingLength(s) < glyph->cb) goto fail; glyph->aj = (BYTE*)malloc(glyph->cb); if (!glyph->aj) goto fail; Stream_Read(s, glyph->aj, glyph->cb); } if ((flags & CG_GLYPH_UNICODE_PRESENT) && (cache_glyph_v2->cGlyphs > 0)) { cache_glyph_v2->unicodeCharacters = calloc(cache_glyph_v2->cGlyphs, sizeof(WCHAR)); if (!cache_glyph_v2->unicodeCharacters) goto fail; if (Stream_GetRemainingLength(s) < sizeof(WCHAR) * cache_glyph_v2->cGlyphs) goto fail; Stream_Read_UTF16_String(s, cache_glyph_v2->unicodeCharacters, cache_glyph_v2->cGlyphs); } return cache_glyph_v2; fail: free_cache_glyph_v2_order(update->context, cache_glyph_v2); return NULL; } int update_approximate_cache_glyph_v2_order(const CACHE_GLYPH_V2_ORDER* cache_glyph_v2, UINT16* flags) { return 8 + cache_glyph_v2->cGlyphs * 32; } BOOL update_write_cache_glyph_v2_order(wStream* s, const CACHE_GLYPH_V2_ORDER* cache_glyph_v2, UINT16* flags) { UINT32 i, inf; inf = update_approximate_cache_glyph_v2_order(cache_glyph_v2, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; *flags = (cache_glyph_v2->cacheId & 0x000F) | ((cache_glyph_v2->flags & 0x000F) << 4) | ((cache_glyph_v2->cGlyphs & 0x00FF) << 8); for (i = 0; i < cache_glyph_v2->cGlyphs; i++) { UINT32 cb; const GLYPH_DATA_V2* glyph = &cache_glyph_v2->glyphData[i]; Stream_Write_UINT8(s, glyph->cacheIndex); if (!update_write_2byte_signed(s, glyph->x) || !update_write_2byte_signed(s, glyph->y) || !update_write_2byte_unsigned(s, glyph->cx) || !update_write_2byte_unsigned(s, glyph->cy)) { return FALSE; } cb = ((glyph->cx + 7) / 8) * glyph->cy; cb += ((cb % 4) > 0) ? 4 - (cb % 4) : 0; Stream_Write(s, glyph->aj, cb); } if (*flags & CG_GLYPH_UNICODE_PRESENT) { Stream_Zero(s, cache_glyph_v2->cGlyphs * 2); } return TRUE; } static BOOL update_decompress_brush(wStream* s, BYTE* output, size_t outSize, BYTE bpp) { INT32 x, y, k; BYTE byte = 0; const BYTE* palette = Stream_Pointer(s) + 16; const INT32 bytesPerPixel = ((bpp + 1) / 8); if (!Stream_SafeSeek(s, 16ULL + 7ULL * bytesPerPixel)) // 64 / 4 return FALSE; for (y = 7; y >= 0; y--) { for (x = 0; x < 8; x++) { UINT32 index; if ((x % 4) == 0) Stream_Read_UINT8(s, byte); index = ((byte >> ((3 - (x % 4)) * 2)) & 0x03); for (k = 0; k < bytesPerPixel; k++) { const size_t dstIndex = ((y * 8 + x) * bytesPerPixel) + k; const size_t srcIndex = (index * bytesPerPixel) + k; if (dstIndex >= outSize) return FALSE; output[dstIndex] = palette[srcIndex]; } } } return TRUE; } static BOOL update_compress_brush(wStream* s, const BYTE* input, BYTE bpp) { return FALSE; } static CACHE_BRUSH_ORDER* update_read_cache_brush_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; BOOL rc; BYTE iBitmapFormat; BOOL compressed = FALSE; CACHE_BRUSH_ORDER* cache_brush = calloc(1, sizeof(CACHE_BRUSH_ORDER)); if (!cache_brush) goto fail; if (Stream_GetRemainingLength(s) < 6) goto fail; Stream_Read_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Read_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ cache_brush->bpp = get_bmf_bpp(iBitmapFormat, &rc); if (!rc) goto fail; Stream_Read_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Read_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Read_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Read_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_Print(update->log, WLOG_ERROR, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); goto fail; } /* rows are encoded in reverse order */ if (Stream_GetRemainingLength(s) < 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_decompress_brush(s, cache_brush->data, sizeof(cache_brush->data), cache_brush->bpp)) goto fail; } else { /* uncompressed brush */ UINT32 scanline = (cache_brush->bpp / 8) * 8; if (Stream_GetRemainingLength(s) < scanline * 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read(s, &cache_brush->data[i * scanline], scanline); } } } } return cache_brush; fail: free_cache_brush_order(update->context, cache_brush); return NULL; } int update_approximate_cache_brush_order(const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { return 64; } BOOL update_write_cache_brush_order(wStream* s, const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { int i; BYTE iBitmapFormat; BOOL rc; BOOL compressed = FALSE; if (!Stream_EnsureRemainingCapacity(s, update_approximate_cache_brush_order(cache_brush, flags))) return FALSE; iBitmapFormat = get_bpp_bmf(cache_brush->bpp, &rc); if (!rc) return FALSE; Stream_Write_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Write_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ Stream_Write_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Write_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Write_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Write_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_ERR(TAG, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); return FALSE; } for (i = 7; i >= 0; i--) { Stream_Write_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_compress_brush(s, cache_brush->data, cache_brush->bpp)) return FALSE; } else { /* uncompressed brush */ int scanline = (cache_brush->bpp / 8) * 8; for (i = 7; i >= 0; i--) { Stream_Write(s, &cache_brush->data[i * scanline], scanline); } } } } return TRUE; } /* Alternate Secondary Drawing Orders */ static BOOL update_read_create_offscreen_bitmap_order(wStream* s, CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { UINT16 flags; BOOL deleteListPresent; OFFSCREEN_DELETE_LIST* deleteList; if (Stream_GetRemainingLength(s) < 6) return FALSE; Stream_Read_UINT16(s, flags); /* flags (2 bytes) */ create_offscreen_bitmap->id = flags & 0x7FFF; deleteListPresent = (flags & 0x8000) ? TRUE : FALSE; Stream_Read_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */ Stream_Read_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */ deleteList = &(create_offscreen_bitmap->deleteList); if (deleteListPresent) { UINT32 i; if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, deleteList->cIndices); if (deleteList->cIndices > deleteList->sIndices) { UINT16* new_indices; new_indices = (UINT16*)realloc(deleteList->indices, deleteList->cIndices * 2); if (!new_indices) return FALSE; deleteList->sIndices = deleteList->cIndices; deleteList->indices = new_indices; } if (Stream_GetRemainingLength(s) < 2 * deleteList->cIndices) return FALSE; for (i = 0; i < deleteList->cIndices; i++) { Stream_Read_UINT16(s, deleteList->indices[i]); } } else { deleteList->cIndices = 0; } return TRUE; } int update_approximate_create_offscreen_bitmap_order( const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { const OFFSCREEN_DELETE_LIST* deleteList = &(create_offscreen_bitmap->deleteList); return 32 + deleteList->cIndices * 2; } BOOL update_write_create_offscreen_bitmap_order( wStream* s, const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { UINT16 flags; BOOL deleteListPresent; const OFFSCREEN_DELETE_LIST* deleteList; if (!Stream_EnsureRemainingCapacity( s, update_approximate_create_offscreen_bitmap_order(create_offscreen_bitmap))) return FALSE; deleteList = &(create_offscreen_bitmap->deleteList); flags = create_offscreen_bitmap->id & 0x7FFF; deleteListPresent = (deleteList->cIndices > 0) ? TRUE : FALSE; if (deleteListPresent) flags |= 0x8000; Stream_Write_UINT16(s, flags); /* flags (2 bytes) */ Stream_Write_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */ Stream_Write_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */ if (deleteListPresent) { int i; Stream_Write_UINT16(s, deleteList->cIndices); for (i = 0; i < (int)deleteList->cIndices; i++) { Stream_Write_UINT16(s, deleteList->indices[i]); } } return TRUE; } static BOOL update_read_switch_surface_order(wStream* s, SWITCH_SURFACE_ORDER* switch_surface) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, switch_surface->bitmapId); /* bitmapId (2 bytes) */ return TRUE; } int update_approximate_switch_surface_order(const SWITCH_SURFACE_ORDER* switch_surface) { return 2; } BOOL update_write_switch_surface_order(wStream* s, const SWITCH_SURFACE_ORDER* switch_surface) { int inf = update_approximate_switch_surface_order(switch_surface); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT16(s, switch_surface->bitmapId); /* bitmapId (2 bytes) */ return TRUE; } static BOOL update_read_create_nine_grid_bitmap_order(wStream* s, CREATE_NINE_GRID_BITMAP_ORDER* create_nine_grid_bitmap) { NINE_GRID_BITMAP_INFO* nineGridInfo; if (Stream_GetRemainingLength(s) < 19) return FALSE; Stream_Read_UINT8(s, create_nine_grid_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ if ((create_nine_grid_bitmap->bitmapBpp < 1) || (create_nine_grid_bitmap->bitmapBpp > 32)) { WLog_ERR(TAG, "invalid bpp value %" PRIu32 "", create_nine_grid_bitmap->bitmapBpp); return FALSE; } Stream_Read_UINT16(s, create_nine_grid_bitmap->bitmapId); /* bitmapId (2 bytes) */ nineGridInfo = &(create_nine_grid_bitmap->nineGridInfo); Stream_Read_UINT32(s, nineGridInfo->flFlags); /* flFlags (4 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulLeftWidth); /* ulLeftWidth (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulRightWidth); /* ulRightWidth (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulTopHeight); /* ulTopHeight (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulBottomHeight); /* ulBottomHeight (2 bytes) */ update_read_colorref(s, &nineGridInfo->crTransparent); /* crTransparent (4 bytes) */ return TRUE; } static BOOL update_read_frame_marker_order(wStream* s, FRAME_MARKER_ORDER* frame_marker) { if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT32(s, frame_marker->action); /* action (4 bytes) */ return TRUE; } static BOOL update_read_stream_bitmap_first_order(wStream* s, STREAM_BITMAP_FIRST_ORDER* stream_bitmap_first) { if (Stream_GetRemainingLength(s) < 10) // 8 + 2 at least return FALSE; Stream_Read_UINT8(s, stream_bitmap_first->bitmapFlags); /* bitmapFlags (1 byte) */ Stream_Read_UINT8(s, stream_bitmap_first->bitmapBpp); /* bitmapBpp (1 byte) */ if ((stream_bitmap_first->bitmapBpp < 1) || (stream_bitmap_first->bitmapBpp > 32)) { WLog_ERR(TAG, "invalid bpp value %" PRIu32 "", stream_bitmap_first->bitmapBpp); return FALSE; } Stream_Read_UINT16(s, stream_bitmap_first->bitmapType); /* bitmapType (2 bytes) */ Stream_Read_UINT16(s, stream_bitmap_first->bitmapWidth); /* bitmapWidth (2 bytes) */ Stream_Read_UINT16(s, stream_bitmap_first->bitmapHeight); /* bitmapHeigth (2 bytes) */ if (stream_bitmap_first->bitmapFlags & STREAM_BITMAP_V2) { if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT32(s, stream_bitmap_first->bitmapSize); /* bitmapSize (4 bytes) */ } else { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, stream_bitmap_first->bitmapSize); /* bitmapSize (2 bytes) */ } FIELD_SKIP_BUFFER16( s, stream_bitmap_first->bitmapBlockSize); /* bitmapBlockSize(2 bytes) + bitmapBlock */ return TRUE; } static BOOL update_read_stream_bitmap_next_order(wStream* s, STREAM_BITMAP_NEXT_ORDER* stream_bitmap_next) { if (Stream_GetRemainingLength(s) < 5) return FALSE; Stream_Read_UINT8(s, stream_bitmap_next->bitmapFlags); /* bitmapFlags (1 byte) */ Stream_Read_UINT16(s, stream_bitmap_next->bitmapType); /* bitmapType (2 bytes) */ FIELD_SKIP_BUFFER16( s, stream_bitmap_next->bitmapBlockSize); /* bitmapBlockSize(2 bytes) + bitmapBlock */ return TRUE; } static BOOL update_read_draw_gdiplus_first_order(wStream* s, DRAW_GDIPLUS_FIRST_ORDER* draw_gdiplus_first) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_first->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_first->cbTotalSize); /* cbTotalSize (4 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_first->cbTotalEmfSize); /* cbTotalEmfSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_first->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_next_order(wStream* s, DRAW_GDIPLUS_NEXT_ORDER* draw_gdiplus_next) { if (Stream_GetRemainingLength(s) < 3) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ FIELD_SKIP_BUFFER16(s, draw_gdiplus_next->cbSize); /* cbSize(2 bytes) + emfRecords */ return TRUE; } static BOOL update_read_draw_gdiplus_end_order(wStream* s, DRAW_GDIPLUS_END_ORDER* draw_gdiplus_end) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_end->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_end->cbTotalSize); /* cbTotalSize (4 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_end->cbTotalEmfSize); /* cbTotalEmfSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_end->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_cache_first_order(wStream* s, DRAW_GDIPLUS_CACHE_FIRST_ORDER* draw_gdiplus_cache_first) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_first->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_cache_first->cbTotalSize); /* cbTotalSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_cache_first->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_cache_next_order(wStream* s, DRAW_GDIPLUS_CACHE_NEXT_ORDER* draw_gdiplus_cache_next) { if (Stream_GetRemainingLength(s) < 7) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_next->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_next->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_next->cacheIndex); /* cacheIndex (2 bytes) */ FIELD_SKIP_BUFFER16(s, draw_gdiplus_cache_next->cbSize); /* cbSize(2 bytes) + emfRecords */ return TRUE; } static BOOL update_read_draw_gdiplus_cache_end_order(wStream* s, DRAW_GDIPLUS_CACHE_END_ORDER* draw_gdiplus_cache_end) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_end->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_cache_end->cbTotalSize); /* cbTotalSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_cache_end->cbSize); /* emfRecords */ } static BOOL update_read_field_flags(wStream* s, UINT32* fieldFlags, BYTE flags, BYTE fieldBytes) { int i; BYTE byte; if (flags & ORDER_ZERO_FIELD_BYTE_BIT0) fieldBytes--; if (flags & ORDER_ZERO_FIELD_BYTE_BIT1) { if (fieldBytes > 1) fieldBytes -= 2; else fieldBytes = 0; } if (Stream_GetRemainingLength(s) < fieldBytes) return FALSE; *fieldFlags = 0; for (i = 0; i < fieldBytes; i++) { Stream_Read_UINT8(s, byte); *fieldFlags |= byte << (i * 8); } return TRUE; } BOOL update_write_field_flags(wStream* s, UINT32 fieldFlags, BYTE flags, BYTE fieldBytes) { BYTE byte; if (fieldBytes == 1) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); } else if (fieldBytes == 2) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 8) & 0xFF; Stream_Write_UINT8(s, byte); } else if (fieldBytes == 3) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 16) & 0xFF; Stream_Write_UINT8(s, byte); } else { return FALSE; } return TRUE; } static BOOL update_read_bounds(wStream* s, rdpBounds* bounds) { BYTE flags; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, flags); /* field flags */ if (flags & BOUND_LEFT) { if (!update_read_coord(s, &bounds->left, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_LEFT) { if (!update_read_coord(s, &bounds->left, TRUE)) return FALSE; } if (flags & BOUND_TOP) { if (!update_read_coord(s, &bounds->top, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_TOP) { if (!update_read_coord(s, &bounds->top, TRUE)) return FALSE; } if (flags & BOUND_RIGHT) { if (!update_read_coord(s, &bounds->right, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_RIGHT) { if (!update_read_coord(s, &bounds->right, TRUE)) return FALSE; } if (flags & BOUND_BOTTOM) { if (!update_read_coord(s, &bounds->bottom, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_BOTTOM) { if (!update_read_coord(s, &bounds->bottom, TRUE)) return FALSE; } return TRUE; } BOOL update_write_bounds(wStream* s, ORDER_INFO* orderInfo) { if (!(orderInfo->controlFlags & ORDER_BOUNDS)) return TRUE; if (orderInfo->controlFlags & ORDER_ZERO_BOUNDS_DELTAS) return TRUE; Stream_Write_UINT8(s, orderInfo->boundsFlags); /* field flags */ if (orderInfo->boundsFlags & BOUND_LEFT) { if (!update_write_coord(s, orderInfo->bounds.left)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_LEFT) { } if (orderInfo->boundsFlags & BOUND_TOP) { if (!update_write_coord(s, orderInfo->bounds.top)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_TOP) { } if (orderInfo->boundsFlags & BOUND_RIGHT) { if (!update_write_coord(s, orderInfo->bounds.right)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_RIGHT) { } if (orderInfo->boundsFlags & BOUND_BOTTOM) { if (!update_write_coord(s, orderInfo->bounds.bottom)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_BOTTOM) { } return TRUE; } static BOOL read_primary_order(wLog* log, const char* orderName, wStream* s, const ORDER_INFO* orderInfo, rdpPrimaryUpdate* primary) { BOOL rc = FALSE; if (!s || !orderInfo || !primary || !orderName) return FALSE; switch (orderInfo->orderType) { case ORDER_TYPE_DSTBLT: rc = update_read_dstblt_order(s, orderInfo, &(primary->dstblt)); break; case ORDER_TYPE_PATBLT: rc = update_read_patblt_order(s, orderInfo, &(primary->patblt)); break; case ORDER_TYPE_SCRBLT: rc = update_read_scrblt_order(s, orderInfo, &(primary->scrblt)); break; case ORDER_TYPE_OPAQUE_RECT: rc = update_read_opaque_rect_order(s, orderInfo, &(primary->opaque_rect)); break; case ORDER_TYPE_DRAW_NINE_GRID: rc = update_read_draw_nine_grid_order(s, orderInfo, &(primary->draw_nine_grid)); break; case ORDER_TYPE_MULTI_DSTBLT: rc = update_read_multi_dstblt_order(s, orderInfo, &(primary->multi_dstblt)); break; case ORDER_TYPE_MULTI_PATBLT: rc = update_read_multi_patblt_order(s, orderInfo, &(primary->multi_patblt)); break; case ORDER_TYPE_MULTI_SCRBLT: rc = update_read_multi_scrblt_order(s, orderInfo, &(primary->multi_scrblt)); break; case ORDER_TYPE_MULTI_OPAQUE_RECT: rc = update_read_multi_opaque_rect_order(s, orderInfo, &(primary->multi_opaque_rect)); break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: rc = update_read_multi_draw_nine_grid_order(s, orderInfo, &(primary->multi_draw_nine_grid)); break; case ORDER_TYPE_LINE_TO: rc = update_read_line_to_order(s, orderInfo, &(primary->line_to)); break; case ORDER_TYPE_POLYLINE: rc = update_read_polyline_order(s, orderInfo, &(primary->polyline)); break; case ORDER_TYPE_MEMBLT: rc = update_read_memblt_order(s, orderInfo, &(primary->memblt)); break; case ORDER_TYPE_MEM3BLT: rc = update_read_mem3blt_order(s, orderInfo, &(primary->mem3blt)); break; case ORDER_TYPE_SAVE_BITMAP: rc = update_read_save_bitmap_order(s, orderInfo, &(primary->save_bitmap)); break; case ORDER_TYPE_GLYPH_INDEX: rc = update_read_glyph_index_order(s, orderInfo, &(primary->glyph_index)); break; case ORDER_TYPE_FAST_INDEX: rc = update_read_fast_index_order(s, orderInfo, &(primary->fast_index)); break; case ORDER_TYPE_FAST_GLYPH: rc = update_read_fast_glyph_order(s, orderInfo, &(primary->fast_glyph)); break; case ORDER_TYPE_POLYGON_SC: rc = update_read_polygon_sc_order(s, orderInfo, &(primary->polygon_sc)); break; case ORDER_TYPE_POLYGON_CB: rc = update_read_polygon_cb_order(s, orderInfo, &(primary->polygon_cb)); break; case ORDER_TYPE_ELLIPSE_SC: rc = update_read_ellipse_sc_order(s, orderInfo, &(primary->ellipse_sc)); break; case ORDER_TYPE_ELLIPSE_CB: rc = update_read_ellipse_cb_order(s, orderInfo, &(primary->ellipse_cb)); break; default: WLog_Print(log, WLOG_WARN, "Primary Drawing Order %s not supported, ignoring", orderName); rc = TRUE; break; } if (!rc) { WLog_Print(log, WLOG_ERROR, "%s - update_read_dstblt_order() failed", orderName); return FALSE; } return TRUE; } static BOOL update_recv_primary_order(rdpUpdate* update, wStream* s, BYTE flags) { BYTE field; BOOL rc = FALSE; rdpContext* context = update->context; rdpPrimaryUpdate* primary = update->primary; ORDER_INFO* orderInfo = &(primary->order_info); rdpSettings* settings = context->settings; const char* orderName; if (flags & ORDER_TYPE_CHANGE) { if (Stream_GetRemainingLength(s) < 1) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, orderInfo->orderType); /* orderType (1 byte) */ } orderName = primary_order_string(orderInfo->orderType); if (!check_primary_order_supported(update->log, settings, orderInfo->orderType, orderName)) return FALSE; field = get_primary_drawing_order_field_bytes(orderInfo->orderType, &rc); if (!rc) return FALSE; if (!update_read_field_flags(s, &(orderInfo->fieldFlags), flags, field)) { WLog_Print(update->log, WLOG_ERROR, "update_read_field_flags() failed"); return FALSE; } if (flags & ORDER_BOUNDS) { if (!(flags & ORDER_ZERO_BOUNDS_DELTAS)) { if (!update_read_bounds(s, &orderInfo->bounds)) { WLog_Print(update->log, WLOG_ERROR, "update_read_bounds() failed"); return FALSE; } } rc = IFCALLRESULT(FALSE, update->SetBounds, context, &orderInfo->bounds); if (!rc) return FALSE; } orderInfo->deltaCoordinates = (flags & ORDER_DELTA_COORDINATES) ? TRUE : FALSE; if (!read_primary_order(update->log, orderName, s, orderInfo, primary)) return FALSE; switch (orderInfo->orderType) { case ORDER_TYPE_DSTBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->dstblt.bRop), gdi_rop3_code(primary->dstblt.bRop)); rc = IFCALLRESULT(FALSE, primary->DstBlt, context, &primary->dstblt); } break; case ORDER_TYPE_PATBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->patblt.bRop), gdi_rop3_code(primary->patblt.bRop)); rc = IFCALLRESULT(FALSE, primary->PatBlt, context, &primary->patblt); } break; case ORDER_TYPE_SCRBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->scrblt.bRop), gdi_rop3_code(primary->scrblt.bRop)); rc = IFCALLRESULT(FALSE, primary->ScrBlt, context, &primary->scrblt); } break; case ORDER_TYPE_OPAQUE_RECT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->OpaqueRect, context, &primary->opaque_rect); } break; case ORDER_TYPE_DRAW_NINE_GRID: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->DrawNineGrid, context, &primary->draw_nine_grid); } break; case ORDER_TYPE_MULTI_DSTBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_dstblt.bRop), gdi_rop3_code(primary->multi_dstblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiDstBlt, context, &primary->multi_dstblt); } break; case ORDER_TYPE_MULTI_PATBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_patblt.bRop), gdi_rop3_code(primary->multi_patblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiPatBlt, context, &primary->multi_patblt); } break; case ORDER_TYPE_MULTI_SCRBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_scrblt.bRop), gdi_rop3_code(primary->multi_scrblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiScrBlt, context, &primary->multi_scrblt); } break; case ORDER_TYPE_MULTI_OPAQUE_RECT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->MultiOpaqueRect, context, &primary->multi_opaque_rect); } break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->MultiDrawNineGrid, context, &primary->multi_draw_nine_grid); } break; case ORDER_TYPE_LINE_TO: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->LineTo, context, &primary->line_to); } break; case ORDER_TYPE_POLYLINE: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->Polyline, context, &primary->polyline); } break; case ORDER_TYPE_MEMBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->memblt.bRop), gdi_rop3_code(primary->memblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MemBlt, context, &primary->memblt); } break; case ORDER_TYPE_MEM3BLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->mem3blt.bRop), gdi_rop3_code(primary->mem3blt.bRop)); rc = IFCALLRESULT(FALSE, primary->Mem3Blt, context, &primary->mem3blt); } break; case ORDER_TYPE_SAVE_BITMAP: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->SaveBitmap, context, &primary->save_bitmap); } break; case ORDER_TYPE_GLYPH_INDEX: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->GlyphIndex, context, &primary->glyph_index); } break; case ORDER_TYPE_FAST_INDEX: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->FastIndex, context, &primary->fast_index); } break; case ORDER_TYPE_FAST_GLYPH: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->FastGlyph, context, &primary->fast_glyph); } break; case ORDER_TYPE_POLYGON_SC: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->PolygonSC, context, &primary->polygon_sc); } break; case ORDER_TYPE_POLYGON_CB: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->PolygonCB, context, &primary->polygon_cb); } break; case ORDER_TYPE_ELLIPSE_SC: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->EllipseSC, context, &primary->ellipse_sc); } break; case ORDER_TYPE_ELLIPSE_CB: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->EllipseCB, context, &primary->ellipse_cb); } break; default: WLog_Print(update->log, WLOG_WARN, "Primary Drawing Order %s not supported", orderName); break; } if (!rc) { WLog_Print(update->log, WLOG_WARN, "Primary Drawing Order %s failed", orderName); return FALSE; } if (flags & ORDER_BOUNDS) { rc = IFCALLRESULT(FALSE, update->SetBounds, context, NULL); } return rc; } static BOOL update_recv_secondary_order(rdpUpdate* update, wStream* s, BYTE flags) { BOOL rc = FALSE; size_t start, end, diff; BYTE orderType; UINT16 extraFlags; UINT16 orderLength; rdpContext* context = update->context; rdpSettings* settings = context->settings; rdpSecondaryUpdate* secondary = update->secondary; const char* name; if (Stream_GetRemainingLength(s) < 5) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 5"); return FALSE; } Stream_Read_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Read_UINT16(s, extraFlags); /* extraFlags (2 bytes) */ Stream_Read_UINT8(s, orderType); /* orderType (1 byte) */ if (Stream_GetRemainingLength(s) < orderLength + 7U) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) %" PRIuz " < %" PRIu16, Stream_GetRemainingLength(s), orderLength + 7); return FALSE; } start = Stream_GetPosition(s); name = secondary_order_string(orderType); WLog_Print(update->log, WLOG_DEBUG, "Secondary Drawing Order %s", name); if (!check_secondary_order_supported(update->log, settings, orderType, name)) return FALSE; switch (orderType) { case ORDER_TYPE_BITMAP_UNCOMPRESSED: case ORDER_TYPE_CACHE_BITMAP_COMPRESSED: { const BOOL compressed = (orderType == ORDER_TYPE_CACHE_BITMAP_COMPRESSED); CACHE_BITMAP_ORDER* order = update_read_cache_bitmap_order(update, s, compressed, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmap, context, order); free_cache_bitmap_order(context, order); } } break; case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2: case ORDER_TYPE_BITMAP_COMPRESSED_V2: { const BOOL compressed = (orderType == ORDER_TYPE_BITMAP_COMPRESSED_V2); CACHE_BITMAP_V2_ORDER* order = update_read_cache_bitmap_v2_order(update, s, compressed, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV2, context, order); free_cache_bitmap_v2_order(context, order); } } break; case ORDER_TYPE_BITMAP_COMPRESSED_V3: { CACHE_BITMAP_V3_ORDER* order = update_read_cache_bitmap_v3_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV3, context, order); free_cache_bitmap_v3_order(context, order); } } break; case ORDER_TYPE_CACHE_COLOR_TABLE: { CACHE_COLOR_TABLE_ORDER* order = update_read_cache_color_table_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheColorTable, context, order); free_cache_color_table_order(context, order); } } break; case ORDER_TYPE_CACHE_GLYPH: { switch (settings->GlyphSupportLevel) { case GLYPH_SUPPORT_PARTIAL: case GLYPH_SUPPORT_FULL: { CACHE_GLYPH_ORDER* order = update_read_cache_glyph_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheGlyph, context, order); free_cache_glyph_order(context, order); } } break; case GLYPH_SUPPORT_ENCODE: { CACHE_GLYPH_V2_ORDER* order = update_read_cache_glyph_v2_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheGlyphV2, context, order); free_cache_glyph_v2_order(context, order); } } break; case GLYPH_SUPPORT_NONE: default: break; } } break; case ORDER_TYPE_CACHE_BRUSH: /* [MS-RDPEGDI] 2.2.2.2.1.2.7 Cache Brush (CACHE_BRUSH_ORDER) */ { CACHE_BRUSH_ORDER* order = update_read_cache_brush_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBrush, context, order); free_cache_brush_order(context, order); } } break; default: WLog_Print(update->log, WLOG_WARN, "SECONDARY ORDER %s not supported", name); break; } if (!rc) { WLog_Print(update->log, WLOG_ERROR, "SECONDARY ORDER %s failed", name); } start += orderLength + 7; end = Stream_GetPosition(s); if (start > end) { WLog_Print(update->log, WLOG_WARN, "SECONDARY_ORDER %s: read %" PRIuz "bytes too much", name, end - start); return FALSE; } diff = start - end; if (diff > 0) { WLog_Print(update->log, WLOG_DEBUG, "SECONDARY_ORDER %s: read %" PRIuz "bytes short, skipping", name, diff); Stream_Seek(s, diff); } return rc; } static BOOL read_altsec_order(wStream* s, BYTE orderType, rdpAltSecUpdate* altsec) { BOOL rc = FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: rc = update_read_create_offscreen_bitmap_order(s, &(altsec->create_offscreen_bitmap)); break; case ORDER_TYPE_SWITCH_SURFACE: rc = update_read_switch_surface_order(s, &(altsec->switch_surface)); break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: rc = update_read_create_nine_grid_bitmap_order(s, &(altsec->create_nine_grid_bitmap)); break; case ORDER_TYPE_FRAME_MARKER: rc = update_read_frame_marker_order(s, &(altsec->frame_marker)); break; case ORDER_TYPE_STREAM_BITMAP_FIRST: rc = update_read_stream_bitmap_first_order(s, &(altsec->stream_bitmap_first)); break; case ORDER_TYPE_STREAM_BITMAP_NEXT: rc = update_read_stream_bitmap_next_order(s, &(altsec->stream_bitmap_next)); break; case ORDER_TYPE_GDIPLUS_FIRST: rc = update_read_draw_gdiplus_first_order(s, &(altsec->draw_gdiplus_first)); break; case ORDER_TYPE_GDIPLUS_NEXT: rc = update_read_draw_gdiplus_next_order(s, &(altsec->draw_gdiplus_next)); break; case ORDER_TYPE_GDIPLUS_END: rc = update_read_draw_gdiplus_end_order(s, &(altsec->draw_gdiplus_end)); break; case ORDER_TYPE_GDIPLUS_CACHE_FIRST: rc = update_read_draw_gdiplus_cache_first_order(s, &(altsec->draw_gdiplus_cache_first)); break; case ORDER_TYPE_GDIPLUS_CACHE_NEXT: rc = update_read_draw_gdiplus_cache_next_order(s, &(altsec->draw_gdiplus_cache_next)); break; case ORDER_TYPE_GDIPLUS_CACHE_END: rc = update_read_draw_gdiplus_cache_end_order(s, &(altsec->draw_gdiplus_cache_end)); break; case ORDER_TYPE_WINDOW: /* This order is handled elsewhere. */ rc = TRUE; break; case ORDER_TYPE_COMPDESK_FIRST: rc = TRUE; break; default: break; } return rc; } static BOOL update_recv_altsec_order(rdpUpdate* update, wStream* s, BYTE flags) { BYTE orderType = flags >>= 2; /* orderType is in higher 6 bits of flags field */ BOOL rc = FALSE; rdpContext* context = update->context; rdpSettings* settings = context->settings; rdpAltSecUpdate* altsec = update->altsec; const char* orderName = altsec_order_string(orderType); WLog_Print(update->log, WLOG_DEBUG, "Alternate Secondary Drawing Order %s", orderName); if (!check_alt_order_supported(update->log, settings, orderType, orderName)) return FALSE; if (!read_altsec_order(s, orderType, altsec)) return FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: IFCALLRET(altsec->CreateOffscreenBitmap, rc, context, &(altsec->create_offscreen_bitmap)); break; case ORDER_TYPE_SWITCH_SURFACE: IFCALLRET(altsec->SwitchSurface, rc, context, &(altsec->switch_surface)); break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: IFCALLRET(altsec->CreateNineGridBitmap, rc, context, &(altsec->create_nine_grid_bitmap)); break; case ORDER_TYPE_FRAME_MARKER: IFCALLRET(altsec->FrameMarker, rc, context, &(altsec->frame_marker)); break; case ORDER_TYPE_STREAM_BITMAP_FIRST: IFCALLRET(altsec->StreamBitmapFirst, rc, context, &(altsec->stream_bitmap_first)); break; case ORDER_TYPE_STREAM_BITMAP_NEXT: IFCALLRET(altsec->StreamBitmapNext, rc, context, &(altsec->stream_bitmap_next)); break; case ORDER_TYPE_GDIPLUS_FIRST: IFCALLRET(altsec->DrawGdiPlusFirst, rc, context, &(altsec->draw_gdiplus_first)); break; case ORDER_TYPE_GDIPLUS_NEXT: IFCALLRET(altsec->DrawGdiPlusNext, rc, context, &(altsec->draw_gdiplus_next)); break; case ORDER_TYPE_GDIPLUS_END: IFCALLRET(altsec->DrawGdiPlusEnd, rc, context, &(altsec->draw_gdiplus_end)); break; case ORDER_TYPE_GDIPLUS_CACHE_FIRST: IFCALLRET(altsec->DrawGdiPlusCacheFirst, rc, context, &(altsec->draw_gdiplus_cache_first)); break; case ORDER_TYPE_GDIPLUS_CACHE_NEXT: IFCALLRET(altsec->DrawGdiPlusCacheNext, rc, context, &(altsec->draw_gdiplus_cache_next)); break; case ORDER_TYPE_GDIPLUS_CACHE_END: IFCALLRET(altsec->DrawGdiPlusCacheEnd, rc, context, &(altsec->draw_gdiplus_cache_end)); break; case ORDER_TYPE_WINDOW: rc = update_recv_altsec_window_order(update, s); break; case ORDER_TYPE_COMPDESK_FIRST: rc = TRUE; break; default: break; } if (!rc) { WLog_Print(update->log, WLOG_WARN, "Alternate Secondary Drawing Order %s failed", orderName); } return rc; } BOOL update_recv_order(rdpUpdate* update, wStream* s) { BOOL rc; BYTE controlFlags; if (Stream_GetRemainingLength(s) < 1) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, controlFlags); /* controlFlags (1 byte) */ if (!(controlFlags & ORDER_STANDARD)) rc = update_recv_altsec_order(update, s, controlFlags); else if (controlFlags & ORDER_SECONDARY) rc = update_recv_secondary_order(update, s, controlFlags); else rc = update_recv_primary_order(update, s, controlFlags); if (!rc) WLog_Print(update->log, WLOG_ERROR, "order flags %02" PRIx8 " failed", controlFlags); return rc; }
static INLINE BOOL update_write_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { Stream_Write_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { Stream_Write_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { Stream_Write_UINT8(s, brush->style); } if (brush->style & CACHED_BRUSH) { brush->hatch = brush->index; brush->bpp = BMF_BPP[brush->style & 0x07]; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_04) { Stream_Write_UINT8(s, brush->hatch); } if (fieldFlags & ORDER_FIELD_05) { brush->data = (BYTE*)brush->p8x8; Stream_Write_UINT8(s, brush->data[7]); Stream_Write_UINT8(s, brush->data[6]); Stream_Write_UINT8(s, brush->data[5]); Stream_Write_UINT8(s, brush->data[4]); Stream_Write_UINT8(s, brush->data[3]); Stream_Write_UINT8(s, brush->data[2]); Stream_Write_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; }
static INLINE BOOL update_write_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { Stream_Write_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { Stream_Write_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { Stream_Write_UINT8(s, brush->style); } if (brush->style & CACHED_BRUSH) { BOOL rc; brush->hatch = brush->index; brush->bpp = get_bmf_bpp(brush->style, &rc); if (!rc) return FALSE; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_04) { Stream_Write_UINT8(s, brush->hatch); } if (fieldFlags & ORDER_FIELD_05) { brush->data = (BYTE*)brush->p8x8; Stream_Write_UINT8(s, brush->data[7]); Stream_Write_UINT8(s, brush->data[6]); Stream_Write_UINT8(s, brush->data[5]); Stream_Write_UINT8(s, brush->data[4]); Stream_Write_UINT8(s, brush->data[3]); Stream_Write_UINT8(s, brush->data[2]); Stream_Write_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; }
{'added': [(116, 'static BYTE get_cbr2_bpp(UINT32 bpp, BOOL* pValid)'), (117, '{'), (118, '\tif (pValid)'), (119, '\t\t*pValid = TRUE;'), (120, '\tswitch (bpp)'), (121, '\t{'), (122, '\t\tcase 3:'), (123, '\t\t\treturn 8;'), (124, '\t\tcase 4:'), (125, '\t\t\treturn 16;'), (126, '\t\tcase 5:'), (127, '\t\t\treturn 24;'), (128, '\t\tcase 6:'), (129, '\t\t\treturn 32;'), (130, '\t\tdefault:'), (131, '\t\t\tWLog_WARN(TAG, "Invalid bpp %" PRIu32, bpp);'), (132, '\t\t\tif (pValid)'), (133, '\t\t\t\t*pValid = FALSE;'), (134, '\t\t\treturn 0;'), (135, '\t}'), (136, '}'), (138, 'static BYTE get_bmf_bpp(UINT32 bmf, BOOL* pValid)'), (139, '{'), (140, '\tif (pValid)'), (141, '\t\t*pValid = TRUE;'), (142, '\tswitch (bmf)'), (143, '\t{'), (144, '\t\tcase 1:'), (145, '\t\t\treturn 1;'), (146, '\t\tcase 3:'), (147, '\t\t\treturn 8;'), (148, '\t\tcase 4:'), (149, '\t\t\treturn 16;'), (150, '\t\tcase 5:'), (151, '\t\t\treturn 24;'), (152, '\t\tcase 6:'), (153, '\t\t\treturn 32;'), (154, '\t\tdefault:'), (155, '\t\t\tWLog_WARN(TAG, "Invalid bmf %" PRIu32, bmf);'), (156, '\t\t\tif (pValid)'), (157, '\t\t\t\t*pValid = FALSE;'), (158, '\t\t\treturn 0;'), (159, '\t}'), (160, '}'), (161, 'static BYTE get_bpp_bmf(UINT32 bpp, BOOL* pValid)'), (162, '{'), (163, '\tif (pValid)'), (164, '\t\t*pValid = TRUE;'), (165, '\tswitch (bpp)'), (166, '\t{'), (167, '\t\tcase 1:'), (168, '\t\t\treturn 1;'), (169, '\t\tcase 8:'), (170, '\t\t\treturn 3;'), (171, '\t\tcase 16:'), (172, '\t\t\treturn 4;'), (173, '\t\tcase 24:'), (174, '\t\t\treturn 5;'), (175, '\t\tcase 32:'), (176, '\t\t\treturn 6;'), (177, '\t\tdefault:'), (178, '\t\t\tWLog_WARN(TAG, "Invalid color depth %" PRIu32, bpp);'), (179, '\t\t\tif (pValid)'), (180, '\t\t\t\t*pValid = FALSE;'), (181, '\t\t\treturn 0;'), (182, '\t}'), (183, '}'), (871, '\t\tBOOL rc;'), (873, '\t\tbrush->bpp = get_bmf_bpp(brush->style, &rc);'), (874, '\t\tif (!rc)'), (875, '\t\t\treturn FALSE;'), (917, '\t\tBOOL rc;'), (919, '\t\tbrush->bpp = get_bmf_bpp(brush->style, &rc);'), (920, '\t\tif (!rc)'), (921, '\t\t\treturn FALSE;'), (2077, '\tBOOL rc;'), (2092, '\tcache_bitmap_v2->bitmapBpp = get_cbr2_bpp(bitsPerPixelId, &rc);'), (2093, '\tif (!rc)'), (2094, '\t\tgoto fail;'), (2173, '\tBOOL rc;'), (2180, '\tbitsPerPixelId = get_bpp_bmf(cache_bitmap_v2->bitmapBpp, &rc);'), (2181, '\tif (!rc)'), (2182, '\t\treturn FALSE;'), (2244, '\tBOOL rc;'), (2262, '\tcache_bitmap_v3->bpp = get_cbr2_bpp(bitsPerPixelId, &rc);'), (2263, '\tif (!rc)'), (2264, '\t\tgoto fail;'), (2312, '\tBOOL rc;'), (2321, '\tbitsPerPixelId = get_bpp_bmf(cache_bitmap_v3->bpp, &rc);'), (2322, '\tif (!rc)'), (2323, '\t\treturn FALSE;'), (2647, '\tBOOL rc;'), (2661, '\tcache_brush->bpp = get_bmf_bpp(iBitmapFormat, &rc);'), (2662, '\tif (!rc)'), (2735, '\tBOOL rc;'), (2742, '\tiBitmapFormat = get_bpp_bmf(cache_brush->bpp, &rc);'), (2743, '\tif (!rc)'), (2744, '\t\treturn FALSE;')], 'deleted': [(116, 'static const BYTE CBR2_BPP[] = { 0, 0, 0, 8, 16, 24, 32 };'), (117, ''), (118, 'static const BYTE BPP_CBR2[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0,'), (119, '\t 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 };'), (120, ''), (121, 'static const BYTE CBR23_BPP[] = { 0, 0, 0, 8, 16, 24, 32 };'), (122, ''), (123, 'static const BYTE BPP_CBR23[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0,'), (124, '\t 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 };'), (125, ''), (126, 'static const BYTE BMF_BPP[] = { 0, 1, 0, 8, 16, 24, 32, 0 };'), (128, 'static const BYTE BPP_BMF[] = { 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0,'), (129, '\t 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 };'), (818, '\t\tbrush->bpp = BMF_BPP[brush->style & 0x07];'), (819, ''), (862, '\t\tbrush->bpp = BMF_BPP[brush->style & 0x07];'), (863, ''), (2033, '\tcache_bitmap_v2->bitmapBpp = CBR2_BPP[bitsPerPixelId];'), (2118, '\tbitsPerPixelId = BPP_CBR2[cache_bitmap_v2->bitmapBpp];'), (2197, '\tcache_bitmap_v3->bpp = CBR23_BPP[bitsPerPixelId];'), (2253, '\tbitsPerPixelId = BPP_CBR23[cache_bitmap_v3->bpp];'), (2590, '\tif (iBitmapFormat >= ARRAYSIZE(BMF_BPP))'), (2593, '\tcache_brush->bpp = BMF_BPP[iBitmapFormat];'), (2670, '\tiBitmapFormat = BPP_BMF[cache_brush->bpp];')]}
98
24
3,271
19,873
https://github.com/FreeRDP/FreeRDP
CVE-2020-11096
['CWE-125']
orders.c
update_write_cache_bitmap_v2_order
/** * FreeRDP: A Remote Desktop Protocol Implementation * Drawing Orders * * Copyright 2011 Marc-Andre Moreau <marcandre.moreau@gmail.com> * Copyright 2016 Armin Novak <armin.novak@thincast.com> * Copyright 2016 Thincast Technologies GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "window.h" #include <winpr/wtypes.h> #include <winpr/crt.h> #include <freerdp/api.h> #include <freerdp/log.h> #include <freerdp/graphics.h> #include <freerdp/codec/bitmap.h> #include <freerdp/gdi/gdi.h> #include "orders.h" #include "../cache/glyph.h" #include "../cache/bitmap.h" #include "../cache/brush.h" #include "../cache/cache.h" #define TAG FREERDP_TAG("core.orders") BYTE get_primary_drawing_order_field_bytes(UINT32 orderType, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (orderType) { case 0: return DSTBLT_ORDER_FIELD_BYTES; case 1: return PATBLT_ORDER_FIELD_BYTES; case 2: return SCRBLT_ORDER_FIELD_BYTES; case 3: return 0; case 4: return 0; case 5: return 0; case 6: return 0; case 7: return DRAW_NINE_GRID_ORDER_FIELD_BYTES; case 8: return MULTI_DRAW_NINE_GRID_ORDER_FIELD_BYTES; case 9: return LINE_TO_ORDER_FIELD_BYTES; case 10: return OPAQUE_RECT_ORDER_FIELD_BYTES; case 11: return SAVE_BITMAP_ORDER_FIELD_BYTES; case 12: return 0; case 13: return MEMBLT_ORDER_FIELD_BYTES; case 14: return MEM3BLT_ORDER_FIELD_BYTES; case 15: return MULTI_DSTBLT_ORDER_FIELD_BYTES; case 16: return MULTI_PATBLT_ORDER_FIELD_BYTES; case 17: return MULTI_SCRBLT_ORDER_FIELD_BYTES; case 18: return MULTI_OPAQUE_RECT_ORDER_FIELD_BYTES; case 19: return FAST_INDEX_ORDER_FIELD_BYTES; case 20: return POLYGON_SC_ORDER_FIELD_BYTES; case 21: return POLYGON_CB_ORDER_FIELD_BYTES; case 22: return POLYLINE_ORDER_FIELD_BYTES; case 23: return 0; case 24: return FAST_GLYPH_ORDER_FIELD_BYTES; case 25: return ELLIPSE_SC_ORDER_FIELD_BYTES; case 26: return ELLIPSE_CB_ORDER_FIELD_BYTES; case 27: return GLYPH_INDEX_ORDER_FIELD_BYTES; default: if (pValid) *pValid = FALSE; WLog_WARN(TAG, "Invalid orderType 0x%08X received", orderType); return 0; } } static const BYTE CBR2_BPP[] = { 0, 0, 0, 8, 16, 24, 32 }; static const BYTE BPP_CBR2[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 }; static const BYTE CBR23_BPP[] = { 0, 0, 0, 8, 16, 24, 32 }; static const BYTE BPP_CBR23[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 }; static const BYTE BMF_BPP[] = { 0, 1, 0, 8, 16, 24, 32, 0 }; static const BYTE BPP_BMF[] = { 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 }; static BOOL check_order_activated(wLog* log, rdpSettings* settings, const char* orderName, BOOL condition) { if (!condition) { if (settings->AllowUnanouncedOrdersFromServer) { WLog_Print(log, WLOG_WARN, "%s - SERVER BUG: The support for this feature was not announced!", orderName); return TRUE; } else { WLog_Print(log, WLOG_ERROR, "%s - SERVER BUG: The support for this feature was not announced! Use " "/relax-order-checks to ignore", orderName); return FALSE; } } return TRUE; } static BOOL check_alt_order_supported(wLog* log, rdpSettings* settings, BYTE orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: case ORDER_TYPE_SWITCH_SURFACE: condition = settings->OffscreenSupportLevel != 0; break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: condition = settings->DrawNineGridEnabled; break; case ORDER_TYPE_FRAME_MARKER: condition = settings->FrameMarkerCommandEnabled; break; case ORDER_TYPE_GDIPLUS_FIRST: case ORDER_TYPE_GDIPLUS_NEXT: case ORDER_TYPE_GDIPLUS_END: case ORDER_TYPE_GDIPLUS_CACHE_FIRST: case ORDER_TYPE_GDIPLUS_CACHE_NEXT: case ORDER_TYPE_GDIPLUS_CACHE_END: condition = settings->DrawGdiPlusCacheEnabled; break; case ORDER_TYPE_WINDOW: condition = settings->RemoteWndSupportLevel != WINDOW_LEVEL_NOT_SUPPORTED; break; case ORDER_TYPE_STREAM_BITMAP_FIRST: case ORDER_TYPE_STREAM_BITMAP_NEXT: case ORDER_TYPE_COMPDESK_FIRST: condition = TRUE; break; default: WLog_Print(log, WLOG_WARN, "%s - Alternate Secondary Drawing Order UNKNOWN", orderName); condition = FALSE; break; } return check_order_activated(log, settings, orderName, condition); } static BOOL check_secondary_order_supported(wLog* log, rdpSettings* settings, BYTE orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_BITMAP_UNCOMPRESSED: case ORDER_TYPE_CACHE_BITMAP_COMPRESSED: condition = settings->BitmapCacheEnabled; break; case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2: case ORDER_TYPE_BITMAP_COMPRESSED_V2: condition = settings->BitmapCacheEnabled; break; case ORDER_TYPE_BITMAP_COMPRESSED_V3: condition = settings->BitmapCacheV3Enabled; break; case ORDER_TYPE_CACHE_COLOR_TABLE: condition = (settings->OrderSupport[NEG_MEMBLT_INDEX] || settings->OrderSupport[NEG_MEM3BLT_INDEX]); break; case ORDER_TYPE_CACHE_GLYPH: { switch (settings->GlyphSupportLevel) { case GLYPH_SUPPORT_PARTIAL: case GLYPH_SUPPORT_FULL: case GLYPH_SUPPORT_ENCODE: condition = TRUE; break; case GLYPH_SUPPORT_NONE: default: condition = FALSE; break; } } break; case ORDER_TYPE_CACHE_BRUSH: condition = TRUE; break; default: WLog_Print(log, WLOG_WARN, "SECONDARY ORDER %s not supported", orderName); break; } return check_order_activated(log, settings, orderName, condition); } static BOOL check_primary_order_supported(wLog* log, rdpSettings* settings, UINT32 orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_DSTBLT: condition = settings->OrderSupport[NEG_DSTBLT_INDEX]; break; case ORDER_TYPE_SCRBLT: condition = settings->OrderSupport[NEG_SCRBLT_INDEX]; break; case ORDER_TYPE_DRAW_NINE_GRID: condition = settings->OrderSupport[NEG_DRAWNINEGRID_INDEX]; break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: condition = settings->OrderSupport[NEG_MULTI_DRAWNINEGRID_INDEX]; break; case ORDER_TYPE_LINE_TO: condition = settings->OrderSupport[NEG_LINETO_INDEX]; break; /* [MS-RDPEGDI] 2.2.2.2.1.1.2.5 OpaqueRect (OPAQUERECT_ORDER) * suggests that PatBlt and OpaqueRect imply each other. */ case ORDER_TYPE_PATBLT: case ORDER_TYPE_OPAQUE_RECT: condition = settings->OrderSupport[NEG_OPAQUE_RECT_INDEX] || settings->OrderSupport[NEG_PATBLT_INDEX]; break; case ORDER_TYPE_SAVE_BITMAP: condition = settings->OrderSupport[NEG_SAVEBITMAP_INDEX]; break; case ORDER_TYPE_MEMBLT: condition = settings->OrderSupport[NEG_MEMBLT_INDEX]; break; case ORDER_TYPE_MEM3BLT: condition = settings->OrderSupport[NEG_MEM3BLT_INDEX]; break; case ORDER_TYPE_MULTI_DSTBLT: condition = settings->OrderSupport[NEG_MULTIDSTBLT_INDEX]; break; case ORDER_TYPE_MULTI_PATBLT: condition = settings->OrderSupport[NEG_MULTIPATBLT_INDEX]; break; case ORDER_TYPE_MULTI_SCRBLT: condition = settings->OrderSupport[NEG_MULTIDSTBLT_INDEX]; break; case ORDER_TYPE_MULTI_OPAQUE_RECT: condition = settings->OrderSupport[NEG_MULTIOPAQUERECT_INDEX]; break; case ORDER_TYPE_FAST_INDEX: condition = settings->OrderSupport[NEG_FAST_INDEX_INDEX]; break; case ORDER_TYPE_POLYGON_SC: condition = settings->OrderSupport[NEG_POLYGON_SC_INDEX]; break; case ORDER_TYPE_POLYGON_CB: condition = settings->OrderSupport[NEG_POLYGON_CB_INDEX]; break; case ORDER_TYPE_POLYLINE: condition = settings->OrderSupport[NEG_POLYLINE_INDEX]; break; case ORDER_TYPE_FAST_GLYPH: condition = settings->OrderSupport[NEG_FAST_GLYPH_INDEX]; break; case ORDER_TYPE_ELLIPSE_SC: condition = settings->OrderSupport[NEG_ELLIPSE_SC_INDEX]; break; case ORDER_TYPE_ELLIPSE_CB: condition = settings->OrderSupport[NEG_ELLIPSE_CB_INDEX]; break; case ORDER_TYPE_GLYPH_INDEX: condition = settings->OrderSupport[NEG_GLYPH_INDEX_INDEX]; break; default: WLog_Print(log, WLOG_WARN, "%s Primary Drawing Order not supported", orderName); break; } return check_order_activated(log, settings, orderName, condition); } static const char* primary_order_string(UINT32 orderType) { const char* orders[] = { "[0x%02" PRIx8 "] DstBlt", "[0x%02" PRIx8 "] PatBlt", "[0x%02" PRIx8 "] ScrBlt", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] DrawNineGrid", "[0x%02" PRIx8 "] MultiDrawNineGrid", "[0x%02" PRIx8 "] LineTo", "[0x%02" PRIx8 "] OpaqueRect", "[0x%02" PRIx8 "] SaveBitmap", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] MemBlt", "[0x%02" PRIx8 "] Mem3Blt", "[0x%02" PRIx8 "] MultiDstBlt", "[0x%02" PRIx8 "] MultiPatBlt", "[0x%02" PRIx8 "] MultiScrBlt", "[0x%02" PRIx8 "] MultiOpaqueRect", "[0x%02" PRIx8 "] FastIndex", "[0x%02" PRIx8 "] PolygonSC", "[0x%02" PRIx8 "] PolygonCB", "[0x%02" PRIx8 "] Polyline", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] FastGlyph", "[0x%02" PRIx8 "] EllipseSC", "[0x%02" PRIx8 "] EllipseCB", "[0x%02" PRIx8 "] GlyphIndex" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static const char* secondary_order_string(UINT32 orderType) { const char* orders[] = { "[0x%02" PRIx8 "] Cache Bitmap", "[0x%02" PRIx8 "] Cache Color Table", "[0x%02" PRIx8 "] Cache Bitmap (Compressed)", "[0x%02" PRIx8 "] Cache Glyph", "[0x%02" PRIx8 "] Cache Bitmap V2", "[0x%02" PRIx8 "] Cache Bitmap V2 (Compressed)", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] Cache Brush", "[0x%02" PRIx8 "] Cache Bitmap V3" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static const char* altsec_order_string(BYTE orderType) { const char* orders[] = { "[0x%02" PRIx8 "] Switch Surface", "[0x%02" PRIx8 "] Create Offscreen Bitmap", "[0x%02" PRIx8 "] Stream Bitmap First", "[0x%02" PRIx8 "] Stream Bitmap Next", "[0x%02" PRIx8 "] Create NineGrid Bitmap", "[0x%02" PRIx8 "] Draw GDI+ First", "[0x%02" PRIx8 "] Draw GDI+ Next", "[0x%02" PRIx8 "] Draw GDI+ End", "[0x%02" PRIx8 "] Draw GDI+ Cache First", "[0x%02" PRIx8 "] Draw GDI+ Cache Next", "[0x%02" PRIx8 "] Draw GDI+ Cache End", "[0x%02" PRIx8 "] Windowing", "[0x%02" PRIx8 "] Desktop Composition", "[0x%02" PRIx8 "] Frame Marker" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static INLINE BOOL update_read_coord(wStream* s, INT32* coord, BOOL delta) { INT8 lsi8; INT16 lsi16; if (delta) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_INT8(s, lsi8); *coord += lsi8; } else { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_INT16(s, lsi16); *coord = lsi16; } return TRUE; } static INLINE BOOL update_write_coord(wStream* s, INT32 coord) { Stream_Write_UINT16(s, coord); return TRUE; } static INLINE BOOL update_read_color(wStream* s, UINT32* color) { BYTE byte; if (Stream_GetRemainingLength(s) < 3) return FALSE; *color = 0; Stream_Read_UINT8(s, byte); *color = (UINT32)byte; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 8) & 0xFF00; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 16) & 0xFF0000; return TRUE; } static INLINE BOOL update_write_color(wStream* s, UINT32 color) { BYTE byte; byte = (color & 0xFF); Stream_Write_UINT8(s, byte); byte = ((color >> 8) & 0xFF); Stream_Write_UINT8(s, byte); byte = ((color >> 16) & 0xFF); Stream_Write_UINT8(s, byte); return TRUE; } static INLINE BOOL update_read_colorref(wStream* s, UINT32* color) { BYTE byte; if (Stream_GetRemainingLength(s) < 4) return FALSE; *color = 0; Stream_Read_UINT8(s, byte); *color = byte; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 8); Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 16); Stream_Seek_UINT8(s); return TRUE; } static INLINE BOOL update_read_color_quad(wStream* s, UINT32* color) { return update_read_colorref(s, color); } static INLINE void update_write_color_quad(wStream* s, UINT32 color) { BYTE byte; byte = (color >> 16) & 0xFF; Stream_Write_UINT8(s, byte); byte = (color >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = color & 0xFF; Stream_Write_UINT8(s, byte); } static INLINE BOOL update_read_2byte_unsigned(wStream* s, UINT32* value) { BYTE byte; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) return FALSE; *value = (byte & 0x7F) << 8; Stream_Read_UINT8(s, byte); *value |= byte; } else { *value = (byte & 0x7F); } return TRUE; } static INLINE BOOL update_write_2byte_unsigned(wStream* s, UINT32 value) { BYTE byte; if (value > 0x7FFF) return FALSE; if (value >= 0x7F) { byte = ((value & 0x7F00) >> 8); Stream_Write_UINT8(s, byte | 0x80); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else { byte = (value & 0x7F); Stream_Write_UINT8(s, byte); } return TRUE; } static INLINE BOOL update_read_2byte_signed(wStream* s, INT32* value) { BYTE byte; BOOL negative; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); negative = (byte & 0x40) ? TRUE : FALSE; *value = (byte & 0x3F); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); *value = (*value << 8) | byte; } if (negative) *value *= -1; return TRUE; } static INLINE BOOL update_write_2byte_signed(wStream* s, INT32 value) { BYTE byte; BOOL negative = FALSE; if (value < 0) { negative = TRUE; value *= -1; } if (value > 0x3FFF) return FALSE; if (value >= 0x3F) { byte = ((value & 0x3F00) >> 8); if (negative) byte |= 0x40; Stream_Write_UINT8(s, byte | 0x80); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else { byte = (value & 0x3F); if (negative) byte |= 0x40; Stream_Write_UINT8(s, byte); } return TRUE; } static INLINE BOOL update_read_4byte_unsigned(wStream* s, UINT32* value) { BYTE byte; BYTE count; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); count = (byte & 0xC0) >> 6; if (Stream_GetRemainingLength(s) < count) return FALSE; switch (count) { case 0: *value = (byte & 0x3F); break; case 1: *value = (byte & 0x3F) << 8; Stream_Read_UINT8(s, byte); *value |= byte; break; case 2: *value = (byte & 0x3F) << 16; Stream_Read_UINT8(s, byte); *value |= (byte << 8); Stream_Read_UINT8(s, byte); *value |= byte; break; case 3: *value = (byte & 0x3F) << 24; Stream_Read_UINT8(s, byte); *value |= (byte << 16); Stream_Read_UINT8(s, byte); *value |= (byte << 8); Stream_Read_UINT8(s, byte); *value |= byte; break; default: break; } return TRUE; } static INLINE BOOL update_write_4byte_unsigned(wStream* s, UINT32 value) { BYTE byte; if (value <= 0x3F) { Stream_Write_UINT8(s, value); } else if (value <= 0x3FFF) { byte = (value >> 8) & 0x3F; Stream_Write_UINT8(s, byte | 0x40); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else if (value <= 0x3FFFFF) { byte = (value >> 16) & 0x3F; Stream_Write_UINT8(s, byte | 0x80); byte = (value >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else if (value <= 0x3FFFFFFF) { byte = (value >> 24) & 0x3F; Stream_Write_UINT8(s, byte | 0xC0); byte = (value >> 16) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else return FALSE; return TRUE; } static INLINE BOOL update_read_delta(wStream* s, INT32* value) { BYTE byte; if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, byte); if (byte & 0x40) *value = (byte | ~0x3F); else *value = (byte & 0x3F); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, byte); *value = (*value << 8) | byte; } return TRUE; } #if 0 static INLINE void update_read_glyph_delta(wStream* s, UINT16* value) { BYTE byte; Stream_Read_UINT8(s, byte); if (byte == 0x80) Stream_Read_UINT16(s, *value); else *value = (byte & 0x3F); } static INLINE void update_seek_glyph_delta(wStream* s) { BYTE byte; Stream_Read_UINT8(s, byte); if (byte & 0x80) Stream_Seek_UINT8(s); } #endif static INLINE BOOL update_read_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->style); } if (fieldFlags & ORDER_FIELD_04) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->hatch); } if (brush->style & CACHED_BRUSH) { brush->index = brush->hatch; brush->bpp = BMF_BPP[brush->style & 0x07]; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 7) return FALSE; brush->data = (BYTE*)brush->p8x8; Stream_Read_UINT8(s, brush->data[7]); Stream_Read_UINT8(s, brush->data[6]); Stream_Read_UINT8(s, brush->data[5]); Stream_Read_UINT8(s, brush->data[4]); Stream_Read_UINT8(s, brush->data[3]); Stream_Read_UINT8(s, brush->data[2]); Stream_Read_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; } static INLINE BOOL update_write_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { Stream_Write_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { Stream_Write_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { Stream_Write_UINT8(s, brush->style); } if (brush->style & CACHED_BRUSH) { brush->hatch = brush->index; brush->bpp = BMF_BPP[brush->style & 0x07]; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_04) { Stream_Write_UINT8(s, brush->hatch); } if (fieldFlags & ORDER_FIELD_05) { brush->data = (BYTE*)brush->p8x8; Stream_Write_UINT8(s, brush->data[7]); Stream_Write_UINT8(s, brush->data[6]); Stream_Write_UINT8(s, brush->data[5]); Stream_Write_UINT8(s, brush->data[4]); Stream_Write_UINT8(s, brush->data[3]); Stream_Write_UINT8(s, brush->data[2]); Stream_Write_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; } static INLINE BOOL update_read_delta_rects(wStream* s, DELTA_RECT* rectangles, UINT32* nr) { UINT32 number = *nr; UINT32 i; BYTE flags = 0; BYTE* zeroBits; UINT32 zeroBitsSize; if (number > 45) { WLog_WARN(TAG, "Invalid number of delta rectangles %" PRIu32, number); return FALSE; } zeroBitsSize = ((number + 1) / 2); if (Stream_GetRemainingLength(s) < zeroBitsSize) return FALSE; Stream_GetPointer(s, zeroBits); Stream_Seek(s, zeroBitsSize); ZeroMemory(rectangles, sizeof(DELTA_RECT) * number); for (i = 0; i < number; i++) { if (i % 2 == 0) flags = zeroBits[i / 2]; if ((~flags & 0x80) && !update_read_delta(s, &rectangles[i].left)) return FALSE; if ((~flags & 0x40) && !update_read_delta(s, &rectangles[i].top)) return FALSE; if (~flags & 0x20) { if (!update_read_delta(s, &rectangles[i].width)) return FALSE; } else if (i > 0) rectangles[i].width = rectangles[i - 1].width; else rectangles[i].width = 0; if (~flags & 0x10) { if (!update_read_delta(s, &rectangles[i].height)) return FALSE; } else if (i > 0) rectangles[i].height = rectangles[i - 1].height; else rectangles[i].height = 0; if (i > 0) { rectangles[i].left += rectangles[i - 1].left; rectangles[i].top += rectangles[i - 1].top; } flags <<= 4; } return TRUE; } static INLINE BOOL update_read_delta_points(wStream* s, DELTA_POINT* points, int number, INT16 x, INT16 y) { int i; BYTE flags = 0; BYTE* zeroBits; UINT32 zeroBitsSize; zeroBitsSize = ((number + 3) / 4); if (Stream_GetRemainingLength(s) < zeroBitsSize) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < %" PRIu32 "", zeroBitsSize); return FALSE; } Stream_GetPointer(s, zeroBits); Stream_Seek(s, zeroBitsSize); ZeroMemory(points, sizeof(DELTA_POINT) * number); for (i = 0; i < number; i++) { if (i % 4 == 0) flags = zeroBits[i / 4]; if ((~flags & 0x80) && !update_read_delta(s, &points[i].x)) { WLog_ERR(TAG, "update_read_delta(x) failed"); return FALSE; } if ((~flags & 0x40) && !update_read_delta(s, &points[i].y)) { WLog_ERR(TAG, "update_read_delta(y) failed"); return FALSE; } flags <<= 2; } return TRUE; } #define ORDER_FIELD_BYTE(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 1) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT8(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_2BYTE(NO, TARGET1, TARGET2) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 2) \ { \ WLog_ERR(TAG, "error reading %s or %s", #TARGET1, #TARGET2); \ return FALSE; \ } \ Stream_Read_UINT8(s, TARGET1); \ Stream_Read_UINT8(s, TARGET2); \ } \ } while (0) #define ORDER_FIELD_UINT16(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 2) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT16(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_UINT32(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 4) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT32(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_COORD(NO, TARGET) \ do \ { \ if ((orderInfo->fieldFlags & (1 << (NO - 1))) && \ !update_read_coord(s, &TARGET, orderInfo->deltaCoordinates)) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ } while (0) static INLINE BOOL ORDER_FIELD_COLOR(const ORDER_INFO* orderInfo, wStream* s, UINT32 NO, UINT32* TARGET) { if (!TARGET || !orderInfo) return FALSE; if ((orderInfo->fieldFlags & (1 << (NO - 1))) && !update_read_color(s, TARGET)) return FALSE; return TRUE; } static INLINE BOOL FIELD_SKIP_BUFFER16(wStream* s, UINT32 TARGET_LEN) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, TARGET_LEN); if (!Stream_SafeSeek(s, TARGET_LEN)) { WLog_ERR(TAG, "error skipping %" PRIu32 " bytes", TARGET_LEN); return FALSE; } return TRUE; } /* Primary Drawing Orders */ static BOOL update_read_dstblt_order(wStream* s, const ORDER_INFO* orderInfo, DSTBLT_ORDER* dstblt) { ORDER_FIELD_COORD(1, dstblt->nLeftRect); ORDER_FIELD_COORD(2, dstblt->nTopRect); ORDER_FIELD_COORD(3, dstblt->nWidth); ORDER_FIELD_COORD(4, dstblt->nHeight); ORDER_FIELD_BYTE(5, dstblt->bRop); return TRUE; } int update_approximate_dstblt_order(ORDER_INFO* orderInfo, const DSTBLT_ORDER* dstblt) { return 32; } BOOL update_write_dstblt_order(wStream* s, ORDER_INFO* orderInfo, const DSTBLT_ORDER* dstblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_dstblt_order(orderInfo, dstblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, dstblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, dstblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, dstblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, dstblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, dstblt->bRop); return TRUE; } static BOOL update_read_patblt_order(wStream* s, const ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { ORDER_FIELD_COORD(1, patblt->nLeftRect); ORDER_FIELD_COORD(2, patblt->nTopRect); ORDER_FIELD_COORD(3, patblt->nWidth); ORDER_FIELD_COORD(4, patblt->nHeight); ORDER_FIELD_BYTE(5, patblt->bRop); ORDER_FIELD_COLOR(orderInfo, s, 6, &patblt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 7, &patblt->foreColor); return update_read_brush(s, &patblt->brush, orderInfo->fieldFlags >> 7); } int update_approximate_patblt_order(ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { return 32; } BOOL update_write_patblt_order(wStream* s, ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_patblt_order(orderInfo, patblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, patblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, patblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, patblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, patblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, patblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, patblt->backColor); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_color(s, patblt->foreColor); orderInfo->fieldFlags |= ORDER_FIELD_08; orderInfo->fieldFlags |= ORDER_FIELD_09; orderInfo->fieldFlags |= ORDER_FIELD_10; orderInfo->fieldFlags |= ORDER_FIELD_11; orderInfo->fieldFlags |= ORDER_FIELD_12; update_write_brush(s, &patblt->brush, orderInfo->fieldFlags >> 7); return TRUE; } static BOOL update_read_scrblt_order(wStream* s, const ORDER_INFO* orderInfo, SCRBLT_ORDER* scrblt) { ORDER_FIELD_COORD(1, scrblt->nLeftRect); ORDER_FIELD_COORD(2, scrblt->nTopRect); ORDER_FIELD_COORD(3, scrblt->nWidth); ORDER_FIELD_COORD(4, scrblt->nHeight); ORDER_FIELD_BYTE(5, scrblt->bRop); ORDER_FIELD_COORD(6, scrblt->nXSrc); ORDER_FIELD_COORD(7, scrblt->nYSrc); return TRUE; } int update_approximate_scrblt_order(ORDER_INFO* orderInfo, const SCRBLT_ORDER* scrblt) { return 32; } BOOL update_write_scrblt_order(wStream* s, ORDER_INFO* orderInfo, const SCRBLT_ORDER* scrblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_scrblt_order(orderInfo, scrblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, scrblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, scrblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, scrblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, scrblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, scrblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_coord(s, scrblt->nXSrc); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_coord(s, scrblt->nYSrc); return TRUE; } static BOOL update_read_opaque_rect_order(wStream* s, const ORDER_INFO* orderInfo, OPAQUE_RECT_ORDER* opaque_rect) { BYTE byte; ORDER_FIELD_COORD(1, opaque_rect->nLeftRect); ORDER_FIELD_COORD(2, opaque_rect->nTopRect); ORDER_FIELD_COORD(3, opaque_rect->nWidth); ORDER_FIELD_COORD(4, opaque_rect->nHeight); if (orderInfo->fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x00FFFF00) | ((UINT32)byte); } if (orderInfo->fieldFlags & ORDER_FIELD_06) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x00FF00FF) | ((UINT32)byte << 8); } if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x0000FFFF) | ((UINT32)byte << 16); } return TRUE; } int update_approximate_opaque_rect_order(ORDER_INFO* orderInfo, const OPAQUE_RECT_ORDER* opaque_rect) { return 32; } BOOL update_write_opaque_rect_order(wStream* s, ORDER_INFO* orderInfo, const OPAQUE_RECT_ORDER* opaque_rect) { BYTE byte; int inf = update_approximate_opaque_rect_order(orderInfo, opaque_rect); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; // TODO: Color format conversion orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, opaque_rect->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, opaque_rect->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, opaque_rect->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, opaque_rect->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; byte = opaque_rect->color & 0x000000FF; Stream_Write_UINT8(s, byte); orderInfo->fieldFlags |= ORDER_FIELD_06; byte = (opaque_rect->color & 0x0000FF00) >> 8; Stream_Write_UINT8(s, byte); orderInfo->fieldFlags |= ORDER_FIELD_07; byte = (opaque_rect->color & 0x00FF0000) >> 16; Stream_Write_UINT8(s, byte); return TRUE; } static BOOL update_read_draw_nine_grid_order(wStream* s, const ORDER_INFO* orderInfo, DRAW_NINE_GRID_ORDER* draw_nine_grid) { ORDER_FIELD_COORD(1, draw_nine_grid->srcLeft); ORDER_FIELD_COORD(2, draw_nine_grid->srcTop); ORDER_FIELD_COORD(3, draw_nine_grid->srcRight); ORDER_FIELD_COORD(4, draw_nine_grid->srcBottom); ORDER_FIELD_UINT16(5, draw_nine_grid->bitmapId); return TRUE; } static BOOL update_read_multi_dstblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_DSTBLT_ORDER* multi_dstblt) { ORDER_FIELD_COORD(1, multi_dstblt->nLeftRect); ORDER_FIELD_COORD(2, multi_dstblt->nTopRect); ORDER_FIELD_COORD(3, multi_dstblt->nWidth); ORDER_FIELD_COORD(4, multi_dstblt->nHeight); ORDER_FIELD_BYTE(5, multi_dstblt->bRop); ORDER_FIELD_BYTE(6, multi_dstblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_dstblt->cbData); return update_read_delta_rects(s, multi_dstblt->rectangles, &multi_dstblt->numRectangles); } return TRUE; } static BOOL update_read_multi_patblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_PATBLT_ORDER* multi_patblt) { ORDER_FIELD_COORD(1, multi_patblt->nLeftRect); ORDER_FIELD_COORD(2, multi_patblt->nTopRect); ORDER_FIELD_COORD(3, multi_patblt->nWidth); ORDER_FIELD_COORD(4, multi_patblt->nHeight); ORDER_FIELD_BYTE(5, multi_patblt->bRop); ORDER_FIELD_COLOR(orderInfo, s, 6, &multi_patblt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 7, &multi_patblt->foreColor); if (!update_read_brush(s, &multi_patblt->brush, orderInfo->fieldFlags >> 7)) return FALSE; ORDER_FIELD_BYTE(13, multi_patblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_14) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_patblt->cbData); if (!update_read_delta_rects(s, multi_patblt->rectangles, &multi_patblt->numRectangles)) return FALSE; } return TRUE; } static BOOL update_read_multi_scrblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_SCRBLT_ORDER* multi_scrblt) { ORDER_FIELD_COORD(1, multi_scrblt->nLeftRect); ORDER_FIELD_COORD(2, multi_scrblt->nTopRect); ORDER_FIELD_COORD(3, multi_scrblt->nWidth); ORDER_FIELD_COORD(4, multi_scrblt->nHeight); ORDER_FIELD_BYTE(5, multi_scrblt->bRop); ORDER_FIELD_COORD(6, multi_scrblt->nXSrc); ORDER_FIELD_COORD(7, multi_scrblt->nYSrc); ORDER_FIELD_BYTE(8, multi_scrblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_09) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_scrblt->cbData); return update_read_delta_rects(s, multi_scrblt->rectangles, &multi_scrblt->numRectangles); } return TRUE; } static BOOL update_read_multi_opaque_rect_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_OPAQUE_RECT_ORDER* multi_opaque_rect) { BYTE byte; ORDER_FIELD_COORD(1, multi_opaque_rect->nLeftRect); ORDER_FIELD_COORD(2, multi_opaque_rect->nTopRect); ORDER_FIELD_COORD(3, multi_opaque_rect->nWidth); ORDER_FIELD_COORD(4, multi_opaque_rect->nHeight); if (orderInfo->fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x00FFFF00) | ((UINT32)byte); } if (orderInfo->fieldFlags & ORDER_FIELD_06) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x00FF00FF) | ((UINT32)byte << 8); } if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x0000FFFF) | ((UINT32)byte << 16); } ORDER_FIELD_BYTE(8, multi_opaque_rect->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_09) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_opaque_rect->cbData); return update_read_delta_rects(s, multi_opaque_rect->rectangles, &multi_opaque_rect->numRectangles); } return TRUE; } static BOOL update_read_multi_draw_nine_grid_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_DRAW_NINE_GRID_ORDER* multi_draw_nine_grid) { ORDER_FIELD_COORD(1, multi_draw_nine_grid->srcLeft); ORDER_FIELD_COORD(2, multi_draw_nine_grid->srcTop); ORDER_FIELD_COORD(3, multi_draw_nine_grid->srcRight); ORDER_FIELD_COORD(4, multi_draw_nine_grid->srcBottom); ORDER_FIELD_UINT16(5, multi_draw_nine_grid->bitmapId); ORDER_FIELD_BYTE(6, multi_draw_nine_grid->nDeltaEntries); if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_draw_nine_grid->cbData); return update_read_delta_rects(s, multi_draw_nine_grid->rectangles, &multi_draw_nine_grid->nDeltaEntries); } return TRUE; } static BOOL update_read_line_to_order(wStream* s, const ORDER_INFO* orderInfo, LINE_TO_ORDER* line_to) { ORDER_FIELD_UINT16(1, line_to->backMode); ORDER_FIELD_COORD(2, line_to->nXStart); ORDER_FIELD_COORD(3, line_to->nYStart); ORDER_FIELD_COORD(4, line_to->nXEnd); ORDER_FIELD_COORD(5, line_to->nYEnd); ORDER_FIELD_COLOR(orderInfo, s, 6, &line_to->backColor); ORDER_FIELD_BYTE(7, line_to->bRop2); ORDER_FIELD_BYTE(8, line_to->penStyle); ORDER_FIELD_BYTE(9, line_to->penWidth); ORDER_FIELD_COLOR(orderInfo, s, 10, &line_to->penColor); return TRUE; } int update_approximate_line_to_order(ORDER_INFO* orderInfo, const LINE_TO_ORDER* line_to) { return 32; } BOOL update_write_line_to_order(wStream* s, ORDER_INFO* orderInfo, const LINE_TO_ORDER* line_to) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_line_to_order(orderInfo, line_to))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT16(s, line_to->backMode); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, line_to->nXStart); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, line_to->nYStart); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, line_to->nXEnd); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_coord(s, line_to->nYEnd); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, line_to->backColor); orderInfo->fieldFlags |= ORDER_FIELD_07; Stream_Write_UINT8(s, line_to->bRop2); orderInfo->fieldFlags |= ORDER_FIELD_08; Stream_Write_UINT8(s, line_to->penStyle); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT8(s, line_to->penWidth); orderInfo->fieldFlags |= ORDER_FIELD_10; update_write_color(s, line_to->penColor); return TRUE; } static BOOL update_read_polyline_order(wStream* s, const ORDER_INFO* orderInfo, POLYLINE_ORDER* polyline) { UINT16 word; UINT32 new_num = polyline->numDeltaEntries; ORDER_FIELD_COORD(1, polyline->xStart); ORDER_FIELD_COORD(2, polyline->yStart); ORDER_FIELD_BYTE(3, polyline->bRop2); ORDER_FIELD_UINT16(4, word); ORDER_FIELD_COLOR(orderInfo, s, 5, &polyline->penColor); ORDER_FIELD_BYTE(6, new_num); if (orderInfo->fieldFlags & ORDER_FIELD_07) { DELTA_POINT* new_points; if (new_num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, polyline->cbData); new_points = (DELTA_POINT*)realloc(polyline->points, sizeof(DELTA_POINT) * new_num); if (!new_points) { WLog_ERR(TAG, "realloc(%" PRIu32 ") failed", new_num); return FALSE; } polyline->points = new_points; polyline->numDeltaEntries = new_num; return update_read_delta_points(s, polyline->points, polyline->numDeltaEntries, polyline->xStart, polyline->yStart); } return TRUE; } static BOOL update_read_memblt_order(wStream* s, const ORDER_INFO* orderInfo, MEMBLT_ORDER* memblt) { if (!s || !orderInfo || !memblt) return FALSE; ORDER_FIELD_UINT16(1, memblt->cacheId); ORDER_FIELD_COORD(2, memblt->nLeftRect); ORDER_FIELD_COORD(3, memblt->nTopRect); ORDER_FIELD_COORD(4, memblt->nWidth); ORDER_FIELD_COORD(5, memblt->nHeight); ORDER_FIELD_BYTE(6, memblt->bRop); ORDER_FIELD_COORD(7, memblt->nXSrc); ORDER_FIELD_COORD(8, memblt->nYSrc); ORDER_FIELD_UINT16(9, memblt->cacheIndex); memblt->colorIndex = (memblt->cacheId >> 8); memblt->cacheId = (memblt->cacheId & 0xFF); memblt->bitmap = NULL; return TRUE; } int update_approximate_memblt_order(ORDER_INFO* orderInfo, const MEMBLT_ORDER* memblt) { return 64; } BOOL update_write_memblt_order(wStream* s, ORDER_INFO* orderInfo, const MEMBLT_ORDER* memblt) { UINT16 cacheId; if (!Stream_EnsureRemainingCapacity(s, update_approximate_memblt_order(orderInfo, memblt))) return FALSE; cacheId = (memblt->cacheId & 0xFF) | ((memblt->colorIndex & 0xFF) << 8); orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT16(s, cacheId); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, memblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, memblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, memblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_coord(s, memblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_06; Stream_Write_UINT8(s, memblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_coord(s, memblt->nXSrc); orderInfo->fieldFlags |= ORDER_FIELD_08; update_write_coord(s, memblt->nYSrc); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT16(s, memblt->cacheIndex); return TRUE; } static BOOL update_read_mem3blt_order(wStream* s, const ORDER_INFO* orderInfo, MEM3BLT_ORDER* mem3blt) { ORDER_FIELD_UINT16(1, mem3blt->cacheId); ORDER_FIELD_COORD(2, mem3blt->nLeftRect); ORDER_FIELD_COORD(3, mem3blt->nTopRect); ORDER_FIELD_COORD(4, mem3blt->nWidth); ORDER_FIELD_COORD(5, mem3blt->nHeight); ORDER_FIELD_BYTE(6, mem3blt->bRop); ORDER_FIELD_COORD(7, mem3blt->nXSrc); ORDER_FIELD_COORD(8, mem3blt->nYSrc); ORDER_FIELD_COLOR(orderInfo, s, 9, &mem3blt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 10, &mem3blt->foreColor); if (!update_read_brush(s, &mem3blt->brush, orderInfo->fieldFlags >> 10)) return FALSE; ORDER_FIELD_UINT16(16, mem3blt->cacheIndex); mem3blt->colorIndex = (mem3blt->cacheId >> 8); mem3blt->cacheId = (mem3blt->cacheId & 0xFF); mem3blt->bitmap = NULL; return TRUE; } static BOOL update_read_save_bitmap_order(wStream* s, const ORDER_INFO* orderInfo, SAVE_BITMAP_ORDER* save_bitmap) { ORDER_FIELD_UINT32(1, save_bitmap->savedBitmapPosition); ORDER_FIELD_COORD(2, save_bitmap->nLeftRect); ORDER_FIELD_COORD(3, save_bitmap->nTopRect); ORDER_FIELD_COORD(4, save_bitmap->nRightRect); ORDER_FIELD_COORD(5, save_bitmap->nBottomRect); ORDER_FIELD_BYTE(6, save_bitmap->operation); return TRUE; } static BOOL update_read_glyph_index_order(wStream* s, const ORDER_INFO* orderInfo, GLYPH_INDEX_ORDER* glyph_index) { ORDER_FIELD_BYTE(1, glyph_index->cacheId); ORDER_FIELD_BYTE(2, glyph_index->flAccel); ORDER_FIELD_BYTE(3, glyph_index->ulCharInc); ORDER_FIELD_BYTE(4, glyph_index->fOpRedundant); ORDER_FIELD_COLOR(orderInfo, s, 5, &glyph_index->backColor); ORDER_FIELD_COLOR(orderInfo, s, 6, &glyph_index->foreColor); ORDER_FIELD_UINT16(7, glyph_index->bkLeft); ORDER_FIELD_UINT16(8, glyph_index->bkTop); ORDER_FIELD_UINT16(9, glyph_index->bkRight); ORDER_FIELD_UINT16(10, glyph_index->bkBottom); ORDER_FIELD_UINT16(11, glyph_index->opLeft); ORDER_FIELD_UINT16(12, glyph_index->opTop); ORDER_FIELD_UINT16(13, glyph_index->opRight); ORDER_FIELD_UINT16(14, glyph_index->opBottom); if (!update_read_brush(s, &glyph_index->brush, orderInfo->fieldFlags >> 14)) return FALSE; ORDER_FIELD_UINT16(20, glyph_index->x); ORDER_FIELD_UINT16(21, glyph_index->y); if (orderInfo->fieldFlags & ORDER_FIELD_22) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, glyph_index->cbData); if (Stream_GetRemainingLength(s) < glyph_index->cbData) return FALSE; CopyMemory(glyph_index->data, Stream_Pointer(s), glyph_index->cbData); Stream_Seek(s, glyph_index->cbData); } return TRUE; } int update_approximate_glyph_index_order(ORDER_INFO* orderInfo, const GLYPH_INDEX_ORDER* glyph_index) { return 64; } BOOL update_write_glyph_index_order(wStream* s, ORDER_INFO* orderInfo, GLYPH_INDEX_ORDER* glyph_index) { int inf = update_approximate_glyph_index_order(orderInfo, glyph_index); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT8(s, glyph_index->cacheId); orderInfo->fieldFlags |= ORDER_FIELD_02; Stream_Write_UINT8(s, glyph_index->flAccel); orderInfo->fieldFlags |= ORDER_FIELD_03; Stream_Write_UINT8(s, glyph_index->ulCharInc); orderInfo->fieldFlags |= ORDER_FIELD_04; Stream_Write_UINT8(s, glyph_index->fOpRedundant); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_color(s, glyph_index->backColor); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, glyph_index->foreColor); orderInfo->fieldFlags |= ORDER_FIELD_07; Stream_Write_UINT16(s, glyph_index->bkLeft); orderInfo->fieldFlags |= ORDER_FIELD_08; Stream_Write_UINT16(s, glyph_index->bkTop); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT16(s, glyph_index->bkRight); orderInfo->fieldFlags |= ORDER_FIELD_10; Stream_Write_UINT16(s, glyph_index->bkBottom); orderInfo->fieldFlags |= ORDER_FIELD_11; Stream_Write_UINT16(s, glyph_index->opLeft); orderInfo->fieldFlags |= ORDER_FIELD_12; Stream_Write_UINT16(s, glyph_index->opTop); orderInfo->fieldFlags |= ORDER_FIELD_13; Stream_Write_UINT16(s, glyph_index->opRight); orderInfo->fieldFlags |= ORDER_FIELD_14; Stream_Write_UINT16(s, glyph_index->opBottom); orderInfo->fieldFlags |= ORDER_FIELD_15; orderInfo->fieldFlags |= ORDER_FIELD_16; orderInfo->fieldFlags |= ORDER_FIELD_17; orderInfo->fieldFlags |= ORDER_FIELD_18; orderInfo->fieldFlags |= ORDER_FIELD_19; update_write_brush(s, &glyph_index->brush, orderInfo->fieldFlags >> 14); orderInfo->fieldFlags |= ORDER_FIELD_20; Stream_Write_UINT16(s, glyph_index->x); orderInfo->fieldFlags |= ORDER_FIELD_21; Stream_Write_UINT16(s, glyph_index->y); orderInfo->fieldFlags |= ORDER_FIELD_22; Stream_Write_UINT8(s, glyph_index->cbData); Stream_Write(s, glyph_index->data, glyph_index->cbData); return TRUE; } static BOOL update_read_fast_index_order(wStream* s, const ORDER_INFO* orderInfo, FAST_INDEX_ORDER* fast_index) { ORDER_FIELD_BYTE(1, fast_index->cacheId); ORDER_FIELD_2BYTE(2, fast_index->ulCharInc, fast_index->flAccel); ORDER_FIELD_COLOR(orderInfo, s, 3, &fast_index->backColor); ORDER_FIELD_COLOR(orderInfo, s, 4, &fast_index->foreColor); ORDER_FIELD_COORD(5, fast_index->bkLeft); ORDER_FIELD_COORD(6, fast_index->bkTop); ORDER_FIELD_COORD(7, fast_index->bkRight); ORDER_FIELD_COORD(8, fast_index->bkBottom); ORDER_FIELD_COORD(9, fast_index->opLeft); ORDER_FIELD_COORD(10, fast_index->opTop); ORDER_FIELD_COORD(11, fast_index->opRight); ORDER_FIELD_COORD(12, fast_index->opBottom); ORDER_FIELD_COORD(13, fast_index->x); ORDER_FIELD_COORD(14, fast_index->y); if (orderInfo->fieldFlags & ORDER_FIELD_15) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, fast_index->cbData); if (Stream_GetRemainingLength(s) < fast_index->cbData) return FALSE; CopyMemory(fast_index->data, Stream_Pointer(s), fast_index->cbData); Stream_Seek(s, fast_index->cbData); } return TRUE; } static BOOL update_read_fast_glyph_order(wStream* s, const ORDER_INFO* orderInfo, FAST_GLYPH_ORDER* fastGlyph) { GLYPH_DATA_V2* glyph = &fastGlyph->glyphData; ORDER_FIELD_BYTE(1, fastGlyph->cacheId); ORDER_FIELD_2BYTE(2, fastGlyph->ulCharInc, fastGlyph->flAccel); ORDER_FIELD_COLOR(orderInfo, s, 3, &fastGlyph->backColor); ORDER_FIELD_COLOR(orderInfo, s, 4, &fastGlyph->foreColor); ORDER_FIELD_COORD(5, fastGlyph->bkLeft); ORDER_FIELD_COORD(6, fastGlyph->bkTop); ORDER_FIELD_COORD(7, fastGlyph->bkRight); ORDER_FIELD_COORD(8, fastGlyph->bkBottom); ORDER_FIELD_COORD(9, fastGlyph->opLeft); ORDER_FIELD_COORD(10, fastGlyph->opTop); ORDER_FIELD_COORD(11, fastGlyph->opRight); ORDER_FIELD_COORD(12, fastGlyph->opBottom); ORDER_FIELD_COORD(13, fastGlyph->x); ORDER_FIELD_COORD(14, fastGlyph->y); if (orderInfo->fieldFlags & ORDER_FIELD_15) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, fastGlyph->cbData); if (Stream_GetRemainingLength(s) < fastGlyph->cbData) return FALSE; CopyMemory(fastGlyph->data, Stream_Pointer(s), fastGlyph->cbData); if (Stream_GetRemainingLength(s) < fastGlyph->cbData) return FALSE; if (!Stream_SafeSeek(s, 1)) return FALSE; if (fastGlyph->cbData > 1) { UINT32 new_cb; /* parse optional glyph data */ glyph->cacheIndex = fastGlyph->data[0]; if (!update_read_2byte_signed(s, &glyph->x) || !update_read_2byte_signed(s, &glyph->y) || !update_read_2byte_unsigned(s, &glyph->cx) || !update_read_2byte_unsigned(s, &glyph->cy)) return FALSE; glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; new_cb = ((glyph->cx + 7) / 8) * glyph->cy; new_cb += ((new_cb % 4) > 0) ? 4 - (new_cb % 4) : 0; if (fastGlyph->cbData < new_cb) return FALSE; if (new_cb > 0) { BYTE* new_aj; new_aj = (BYTE*)realloc(glyph->aj, new_cb); if (!new_aj) return FALSE; glyph->aj = new_aj; glyph->cb = new_cb; Stream_Read(s, glyph->aj, glyph->cb); } Stream_Seek(s, fastGlyph->cbData - new_cb); } } return TRUE; } static BOOL update_read_polygon_sc_order(wStream* s, const ORDER_INFO* orderInfo, POLYGON_SC_ORDER* polygon_sc) { UINT32 num = polygon_sc->numPoints; ORDER_FIELD_COORD(1, polygon_sc->xStart); ORDER_FIELD_COORD(2, polygon_sc->yStart); ORDER_FIELD_BYTE(3, polygon_sc->bRop2); ORDER_FIELD_BYTE(4, polygon_sc->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 5, &polygon_sc->brushColor); ORDER_FIELD_BYTE(6, num); if (orderInfo->fieldFlags & ORDER_FIELD_07) { DELTA_POINT* newpoints; if (num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, polygon_sc->cbData); newpoints = (DELTA_POINT*)realloc(polygon_sc->points, sizeof(DELTA_POINT) * num); if (!newpoints) return FALSE; polygon_sc->points = newpoints; polygon_sc->numPoints = num; return update_read_delta_points(s, polygon_sc->points, polygon_sc->numPoints, polygon_sc->xStart, polygon_sc->yStart); } return TRUE; } static BOOL update_read_polygon_cb_order(wStream* s, const ORDER_INFO* orderInfo, POLYGON_CB_ORDER* polygon_cb) { UINT32 num = polygon_cb->numPoints; ORDER_FIELD_COORD(1, polygon_cb->xStart); ORDER_FIELD_COORD(2, polygon_cb->yStart); ORDER_FIELD_BYTE(3, polygon_cb->bRop2); ORDER_FIELD_BYTE(4, polygon_cb->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 5, &polygon_cb->backColor); ORDER_FIELD_COLOR(orderInfo, s, 6, &polygon_cb->foreColor); if (!update_read_brush(s, &polygon_cb->brush, orderInfo->fieldFlags >> 6)) return FALSE; ORDER_FIELD_BYTE(12, num); if (orderInfo->fieldFlags & ORDER_FIELD_13) { DELTA_POINT* newpoints; if (num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, polygon_cb->cbData); newpoints = (DELTA_POINT*)realloc(polygon_cb->points, sizeof(DELTA_POINT) * num); if (!newpoints) return FALSE; polygon_cb->points = newpoints; polygon_cb->numPoints = num; if (!update_read_delta_points(s, polygon_cb->points, polygon_cb->numPoints, polygon_cb->xStart, polygon_cb->yStart)) return FALSE; } polygon_cb->backMode = (polygon_cb->bRop2 & 0x80) ? BACKMODE_TRANSPARENT : BACKMODE_OPAQUE; polygon_cb->bRop2 = (polygon_cb->bRop2 & 0x1F); return TRUE; } static BOOL update_read_ellipse_sc_order(wStream* s, const ORDER_INFO* orderInfo, ELLIPSE_SC_ORDER* ellipse_sc) { ORDER_FIELD_COORD(1, ellipse_sc->leftRect); ORDER_FIELD_COORD(2, ellipse_sc->topRect); ORDER_FIELD_COORD(3, ellipse_sc->rightRect); ORDER_FIELD_COORD(4, ellipse_sc->bottomRect); ORDER_FIELD_BYTE(5, ellipse_sc->bRop2); ORDER_FIELD_BYTE(6, ellipse_sc->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 7, &ellipse_sc->color); return TRUE; } static BOOL update_read_ellipse_cb_order(wStream* s, const ORDER_INFO* orderInfo, ELLIPSE_CB_ORDER* ellipse_cb) { ORDER_FIELD_COORD(1, ellipse_cb->leftRect); ORDER_FIELD_COORD(2, ellipse_cb->topRect); ORDER_FIELD_COORD(3, ellipse_cb->rightRect); ORDER_FIELD_COORD(4, ellipse_cb->bottomRect); ORDER_FIELD_BYTE(5, ellipse_cb->bRop2); ORDER_FIELD_BYTE(6, ellipse_cb->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 7, &ellipse_cb->backColor); ORDER_FIELD_COLOR(orderInfo, s, 8, &ellipse_cb->foreColor); return update_read_brush(s, &ellipse_cb->brush, orderInfo->fieldFlags >> 8); } /* Secondary Drawing Orders */ static CACHE_BITMAP_ORDER* update_read_cache_bitmap_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { CACHE_BITMAP_ORDER* cache_bitmap; if (!update || !s) return NULL; cache_bitmap = calloc(1, sizeof(CACHE_BITMAP_ORDER)); if (!cache_bitmap) goto fail; if (Stream_GetRemainingLength(s) < 9) goto fail; Stream_Read_UINT8(s, cache_bitmap->cacheId); /* cacheId (1 byte) */ Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapWidth); /* bitmapWidth (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapHeight); /* bitmapHeight (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ if ((cache_bitmap->bitmapBpp < 1) || (cache_bitmap->bitmapBpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bitmap bpp %" PRIu32 "", cache_bitmap->bitmapBpp); goto fail; } Stream_Read_UINT16(s, cache_bitmap->bitmapLength); /* bitmapLength (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap->cacheIndex); /* cacheIndex (2 bytes) */ if (compressed) { if ((flags & NO_BITMAP_COMPRESSION_HDR) == 0) { BYTE* bitmapComprHdr = (BYTE*)&(cache_bitmap->bitmapComprHdr); if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read(s, bitmapComprHdr, 8); /* bitmapComprHdr (8 bytes) */ cache_bitmap->bitmapLength -= 8; } } if (cache_bitmap->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap->bitmapLength) goto fail; cache_bitmap->bitmapDataStream = malloc(cache_bitmap->bitmapLength); if (!cache_bitmap->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap->bitmapDataStream, cache_bitmap->bitmapLength); cache_bitmap->compressed = compressed; return cache_bitmap; fail: free_cache_bitmap_order(update->context, cache_bitmap); return NULL; } int update_approximate_cache_bitmap_order(const CACHE_BITMAP_ORDER* cache_bitmap, BOOL compressed, UINT16* flags) { return 64 + cache_bitmap->bitmapLength; } BOOL update_write_cache_bitmap_order(wStream* s, const CACHE_BITMAP_ORDER* cache_bitmap, BOOL compressed, UINT16* flags) { UINT32 bitmapLength = cache_bitmap->bitmapLength; int inf = update_approximate_cache_bitmap_order(cache_bitmap, compressed, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; *flags = NO_BITMAP_COMPRESSION_HDR; if ((*flags & NO_BITMAP_COMPRESSION_HDR) == 0) bitmapLength += 8; Stream_Write_UINT8(s, cache_bitmap->cacheId); /* cacheId (1 byte) */ Stream_Write_UINT8(s, 0); /* pad1Octet (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapWidth); /* bitmapWidth (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapHeight); /* bitmapHeight (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ Stream_Write_UINT16(s, bitmapLength); /* bitmapLength (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap->cacheIndex); /* cacheIndex (2 bytes) */ if (compressed) { if ((*flags & NO_BITMAP_COMPRESSION_HDR) == 0) { BYTE* bitmapComprHdr = (BYTE*)&(cache_bitmap->bitmapComprHdr); Stream_Write(s, bitmapComprHdr, 8); /* bitmapComprHdr (8 bytes) */ bitmapLength -= 8; } Stream_Write(s, cache_bitmap->bitmapDataStream, bitmapLength); } else { Stream_Write(s, cache_bitmap->bitmapDataStream, bitmapLength); } return TRUE; } static CACHE_BITMAP_V2_ORDER* update_read_cache_bitmap_v2_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { BYTE bitsPerPixelId; CACHE_BITMAP_V2_ORDER* cache_bitmap_v2; if (!update || !s) return NULL; cache_bitmap_v2 = calloc(1, sizeof(CACHE_BITMAP_V2_ORDER)); if (!cache_bitmap_v2) goto fail; cache_bitmap_v2->cacheId = flags & 0x0003; cache_bitmap_v2->flags = (flags & 0xFF80) >> 7; bitsPerPixelId = (flags & 0x0078) >> 3; cache_bitmap_v2->bitmapBpp = CBR2_BPP[bitsPerPixelId]; if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ goto fail; cache_bitmap_v2->bitmapHeight = cache_bitmap_v2->bitmapWidth; } else { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ goto fail; } if (!update_read_4byte_unsigned(s, &cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->cacheIndex)) /* cacheIndex */ goto fail; if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } } if (cache_bitmap_v2->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap_v2->bitmapLength) goto fail; if (cache_bitmap_v2->bitmapLength == 0) goto fail; cache_bitmap_v2->bitmapDataStream = malloc(cache_bitmap_v2->bitmapLength); if (!cache_bitmap_v2->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); cache_bitmap_v2->compressed = compressed; return cache_bitmap_v2; fail: free_cache_bitmap_v2_order(update->context, cache_bitmap_v2); return NULL; } int update_approximate_cache_bitmap_v2_order(CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { return 64 + cache_bitmap_v2->bitmapLength; } BOOL update_write_cache_bitmap_v2_order(wStream* s, CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { BYTE bitsPerPixelId; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v2_order(cache_bitmap_v2, compressed, flags))) return FALSE; bitsPerPixelId = BPP_CBR2[cache_bitmap_v2->bitmapBpp]; *flags = (cache_bitmap_v2->cacheId & 0x0003) | (bitsPerPixelId << 3) | ((cache_bitmap_v2->flags << 7) & 0xFF80); if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { Stream_Write_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ return FALSE; } else { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ return FALSE; } if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (!update_write_4byte_unsigned(s, cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_write_2byte_unsigned(s, cache_bitmap_v2->cacheIndex)) /* cacheIndex */ return FALSE; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { Stream_Write_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } else { if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } cache_bitmap_v2->compressed = compressed; return TRUE; } static CACHE_BITMAP_V3_ORDER* update_read_cache_bitmap_v3_order(rdpUpdate* update, wStream* s, UINT16 flags) { BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; UINT32 new_len; BYTE* new_data; CACHE_BITMAP_V3_ORDER* cache_bitmap_v3; if (!update || !s) return NULL; cache_bitmap_v3 = calloc(1, sizeof(CACHE_BITMAP_V3_ORDER)); if (!cache_bitmap_v3) goto fail; cache_bitmap_v3->cacheId = flags & 0x00000003; cache_bitmap_v3->flags = (flags & 0x0000FF80) >> 7; bitsPerPixelId = (flags & 0x00000078) >> 3; cache_bitmap_v3->bpp = CBR23_BPP[bitsPerPixelId]; if (Stream_GetRemainingLength(s) < 21) goto fail; Stream_Read_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ bitmapData = &cache_bitmap_v3->bitmapData; Stream_Read_UINT8(s, bitmapData->bpp); if ((bitmapData->bpp < 1) || (bitmapData->bpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bpp value %" PRIu32 "", bitmapData->bpp); goto fail; } Stream_Seek_UINT8(s); /* reserved1 (1 byte) */ Stream_Seek_UINT8(s); /* reserved2 (1 byte) */ Stream_Read_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Read_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Read_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Read_UINT32(s, new_len); /* length (4 bytes) */ if ((new_len == 0) || (Stream_GetRemainingLength(s) < new_len)) goto fail; new_data = (BYTE*)realloc(bitmapData->data, new_len); if (!new_data) goto fail; bitmapData->data = new_data; bitmapData->length = new_len; Stream_Read(s, bitmapData->data, bitmapData->length); return cache_bitmap_v3; fail: free_cache_bitmap_v3_order(update->context, cache_bitmap_v3); return NULL; } int update_approximate_cache_bitmap_v3_order(CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BITMAP_DATA_EX* bitmapData = &cache_bitmap_v3->bitmapData; return 64 + bitmapData->length; } BOOL update_write_cache_bitmap_v3_order(wStream* s, CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v3_order(cache_bitmap_v3, flags))) return FALSE; bitmapData = &cache_bitmap_v3->bitmapData; bitsPerPixelId = BPP_CBR23[cache_bitmap_v3->bpp]; *flags = (cache_bitmap_v3->cacheId & 0x00000003) | ((cache_bitmap_v3->flags << 7) & 0x0000FF80) | ((bitsPerPixelId << 3) & 0x00000078); Stream_Write_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ Stream_Write_UINT8(s, bitmapData->bpp); Stream_Write_UINT8(s, 0); /* reserved1 (1 byte) */ Stream_Write_UINT8(s, 0); /* reserved2 (1 byte) */ Stream_Write_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Write_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Write_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Write_UINT32(s, bitmapData->length); /* length (4 bytes) */ Stream_Write(s, bitmapData->data, bitmapData->length); return TRUE; } static CACHE_COLOR_TABLE_ORDER* update_read_cache_color_table_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; UINT32* colorTable; CACHE_COLOR_TABLE_ORDER* cache_color_table = calloc(1, sizeof(CACHE_COLOR_TABLE_ORDER)); if (!cache_color_table) goto fail; if (Stream_GetRemainingLength(s) < 3) goto fail; Stream_Read_UINT8(s, cache_color_table->cacheIndex); /* cacheIndex (1 byte) */ Stream_Read_UINT16(s, cache_color_table->numberColors); /* numberColors (2 bytes) */ if (cache_color_table->numberColors != 256) { /* This field MUST be set to 256 */ goto fail; } if (Stream_GetRemainingLength(s) < cache_color_table->numberColors * 4) goto fail; colorTable = (UINT32*)&cache_color_table->colorTable; for (i = 0; i < (int)cache_color_table->numberColors; i++) update_read_color_quad(s, &colorTable[i]); return cache_color_table; fail: free_cache_color_table_order(update->context, cache_color_table); return NULL; } int update_approximate_cache_color_table_order(const CACHE_COLOR_TABLE_ORDER* cache_color_table, UINT16* flags) { return 16 + (256 * 4); } BOOL update_write_cache_color_table_order(wStream* s, const CACHE_COLOR_TABLE_ORDER* cache_color_table, UINT16* flags) { int i, inf; UINT32* colorTable; if (cache_color_table->numberColors != 256) return FALSE; inf = update_approximate_cache_color_table_order(cache_color_table, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT8(s, cache_color_table->cacheIndex); /* cacheIndex (1 byte) */ Stream_Write_UINT16(s, cache_color_table->numberColors); /* numberColors (2 bytes) */ colorTable = (UINT32*)&cache_color_table->colorTable; for (i = 0; i < (int)cache_color_table->numberColors; i++) { update_write_color_quad(s, colorTable[i]); } return TRUE; } static CACHE_GLYPH_ORDER* update_read_cache_glyph_order(rdpUpdate* update, wStream* s, UINT16 flags) { UINT32 i; CACHE_GLYPH_ORDER* cache_glyph_order = calloc(1, sizeof(CACHE_GLYPH_ORDER)); if (!cache_glyph_order || !update || !s) goto fail; if (Stream_GetRemainingLength(s) < 2) goto fail; Stream_Read_UINT8(s, cache_glyph_order->cacheId); /* cacheId (1 byte) */ Stream_Read_UINT8(s, cache_glyph_order->cGlyphs); /* cGlyphs (1 byte) */ for (i = 0; i < cache_glyph_order->cGlyphs; i++) { GLYPH_DATA* glyph = &cache_glyph_order->glyphData[i]; if (Stream_GetRemainingLength(s) < 10) goto fail; Stream_Read_UINT16(s, glyph->cacheIndex); Stream_Read_INT16(s, glyph->x); Stream_Read_INT16(s, glyph->y); Stream_Read_UINT16(s, glyph->cx); Stream_Read_UINT16(s, glyph->cy); glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; if (Stream_GetRemainingLength(s) < glyph->cb) goto fail; glyph->aj = (BYTE*)malloc(glyph->cb); if (!glyph->aj) goto fail; Stream_Read(s, glyph->aj, glyph->cb); } if ((flags & CG_GLYPH_UNICODE_PRESENT) && (cache_glyph_order->cGlyphs > 0)) { cache_glyph_order->unicodeCharacters = calloc(cache_glyph_order->cGlyphs, sizeof(WCHAR)); if (!cache_glyph_order->unicodeCharacters) goto fail; if (Stream_GetRemainingLength(s) < sizeof(WCHAR) * cache_glyph_order->cGlyphs) goto fail; Stream_Read_UTF16_String(s, cache_glyph_order->unicodeCharacters, cache_glyph_order->cGlyphs); } return cache_glyph_order; fail: free_cache_glyph_order(update->context, cache_glyph_order); return NULL; } int update_approximate_cache_glyph_order(const CACHE_GLYPH_ORDER* cache_glyph, UINT16* flags) { return 2 + cache_glyph->cGlyphs * 32; } BOOL update_write_cache_glyph_order(wStream* s, const CACHE_GLYPH_ORDER* cache_glyph, UINT16* flags) { int i, inf; INT16 lsi16; const GLYPH_DATA* glyph; inf = update_approximate_cache_glyph_order(cache_glyph, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT8(s, cache_glyph->cacheId); /* cacheId (1 byte) */ Stream_Write_UINT8(s, cache_glyph->cGlyphs); /* cGlyphs (1 byte) */ for (i = 0; i < (int)cache_glyph->cGlyphs; i++) { UINT32 cb; glyph = &cache_glyph->glyphData[i]; Stream_Write_UINT16(s, glyph->cacheIndex); /* cacheIndex (2 bytes) */ lsi16 = glyph->x; Stream_Write_UINT16(s, lsi16); /* x (2 bytes) */ lsi16 = glyph->y; Stream_Write_UINT16(s, lsi16); /* y (2 bytes) */ Stream_Write_UINT16(s, glyph->cx); /* cx (2 bytes) */ Stream_Write_UINT16(s, glyph->cy); /* cy (2 bytes) */ cb = ((glyph->cx + 7) / 8) * glyph->cy; cb += ((cb % 4) > 0) ? 4 - (cb % 4) : 0; Stream_Write(s, glyph->aj, cb); } if (*flags & CG_GLYPH_UNICODE_PRESENT) { Stream_Zero(s, cache_glyph->cGlyphs * 2); } return TRUE; } static CACHE_GLYPH_V2_ORDER* update_read_cache_glyph_v2_order(rdpUpdate* update, wStream* s, UINT16 flags) { UINT32 i; CACHE_GLYPH_V2_ORDER* cache_glyph_v2 = calloc(1, sizeof(CACHE_GLYPH_V2_ORDER)); if (!cache_glyph_v2) goto fail; cache_glyph_v2->cacheId = (flags & 0x000F); cache_glyph_v2->flags = (flags & 0x00F0) >> 4; cache_glyph_v2->cGlyphs = (flags & 0xFF00) >> 8; for (i = 0; i < cache_glyph_v2->cGlyphs; i++) { GLYPH_DATA_V2* glyph = &cache_glyph_v2->glyphData[i]; if (Stream_GetRemainingLength(s) < 1) goto fail; Stream_Read_UINT8(s, glyph->cacheIndex); if (!update_read_2byte_signed(s, &glyph->x) || !update_read_2byte_signed(s, &glyph->y) || !update_read_2byte_unsigned(s, &glyph->cx) || !update_read_2byte_unsigned(s, &glyph->cy)) { goto fail; } glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; if (Stream_GetRemainingLength(s) < glyph->cb) goto fail; glyph->aj = (BYTE*)malloc(glyph->cb); if (!glyph->aj) goto fail; Stream_Read(s, glyph->aj, glyph->cb); } if ((flags & CG_GLYPH_UNICODE_PRESENT) && (cache_glyph_v2->cGlyphs > 0)) { cache_glyph_v2->unicodeCharacters = calloc(cache_glyph_v2->cGlyphs, sizeof(WCHAR)); if (!cache_glyph_v2->unicodeCharacters) goto fail; if (Stream_GetRemainingLength(s) < sizeof(WCHAR) * cache_glyph_v2->cGlyphs) goto fail; Stream_Read_UTF16_String(s, cache_glyph_v2->unicodeCharacters, cache_glyph_v2->cGlyphs); } return cache_glyph_v2; fail: free_cache_glyph_v2_order(update->context, cache_glyph_v2); return NULL; } int update_approximate_cache_glyph_v2_order(const CACHE_GLYPH_V2_ORDER* cache_glyph_v2, UINT16* flags) { return 8 + cache_glyph_v2->cGlyphs * 32; } BOOL update_write_cache_glyph_v2_order(wStream* s, const CACHE_GLYPH_V2_ORDER* cache_glyph_v2, UINT16* flags) { UINT32 i, inf; inf = update_approximate_cache_glyph_v2_order(cache_glyph_v2, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; *flags = (cache_glyph_v2->cacheId & 0x000F) | ((cache_glyph_v2->flags & 0x000F) << 4) | ((cache_glyph_v2->cGlyphs & 0x00FF) << 8); for (i = 0; i < cache_glyph_v2->cGlyphs; i++) { UINT32 cb; const GLYPH_DATA_V2* glyph = &cache_glyph_v2->glyphData[i]; Stream_Write_UINT8(s, glyph->cacheIndex); if (!update_write_2byte_signed(s, glyph->x) || !update_write_2byte_signed(s, glyph->y) || !update_write_2byte_unsigned(s, glyph->cx) || !update_write_2byte_unsigned(s, glyph->cy)) { return FALSE; } cb = ((glyph->cx + 7) / 8) * glyph->cy; cb += ((cb % 4) > 0) ? 4 - (cb % 4) : 0; Stream_Write(s, glyph->aj, cb); } if (*flags & CG_GLYPH_UNICODE_PRESENT) { Stream_Zero(s, cache_glyph_v2->cGlyphs * 2); } return TRUE; } static BOOL update_decompress_brush(wStream* s, BYTE* output, size_t outSize, BYTE bpp) { INT32 x, y, k; BYTE byte = 0; const BYTE* palette = Stream_Pointer(s) + 16; const INT32 bytesPerPixel = ((bpp + 1) / 8); if (!Stream_SafeSeek(s, 16ULL + 7ULL * bytesPerPixel)) // 64 / 4 return FALSE; for (y = 7; y >= 0; y--) { for (x = 0; x < 8; x++) { UINT32 index; if ((x % 4) == 0) Stream_Read_UINT8(s, byte); index = ((byte >> ((3 - (x % 4)) * 2)) & 0x03); for (k = 0; k < bytesPerPixel; k++) { const size_t dstIndex = ((y * 8 + x) * bytesPerPixel) + k; const size_t srcIndex = (index * bytesPerPixel) + k; if (dstIndex >= outSize) return FALSE; output[dstIndex] = palette[srcIndex]; } } } return TRUE; } static BOOL update_compress_brush(wStream* s, const BYTE* input, BYTE bpp) { return FALSE; } static CACHE_BRUSH_ORDER* update_read_cache_brush_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; BYTE iBitmapFormat; BOOL compressed = FALSE; CACHE_BRUSH_ORDER* cache_brush = calloc(1, sizeof(CACHE_BRUSH_ORDER)); if (!cache_brush) goto fail; if (Stream_GetRemainingLength(s) < 6) goto fail; Stream_Read_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Read_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ if (iBitmapFormat >= ARRAYSIZE(BMF_BPP)) goto fail; cache_brush->bpp = BMF_BPP[iBitmapFormat]; Stream_Read_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Read_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Read_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Read_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_Print(update->log, WLOG_ERROR, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); goto fail; } /* rows are encoded in reverse order */ if (Stream_GetRemainingLength(s) < 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_decompress_brush(s, cache_brush->data, sizeof(cache_brush->data), cache_brush->bpp)) goto fail; } else { /* uncompressed brush */ UINT32 scanline = (cache_brush->bpp / 8) * 8; if (Stream_GetRemainingLength(s) < scanline * 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read(s, &cache_brush->data[i * scanline], scanline); } } } } return cache_brush; fail: free_cache_brush_order(update->context, cache_brush); return NULL; } int update_approximate_cache_brush_order(const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { return 64; } BOOL update_write_cache_brush_order(wStream* s, const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { int i; BYTE iBitmapFormat; BOOL compressed = FALSE; if (!Stream_EnsureRemainingCapacity(s, update_approximate_cache_brush_order(cache_brush, flags))) return FALSE; iBitmapFormat = BPP_BMF[cache_brush->bpp]; Stream_Write_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Write_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ Stream_Write_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Write_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Write_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Write_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_ERR(TAG, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); return FALSE; } for (i = 7; i >= 0; i--) { Stream_Write_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_compress_brush(s, cache_brush->data, cache_brush->bpp)) return FALSE; } else { /* uncompressed brush */ int scanline = (cache_brush->bpp / 8) * 8; for (i = 7; i >= 0; i--) { Stream_Write(s, &cache_brush->data[i * scanline], scanline); } } } } return TRUE; } /* Alternate Secondary Drawing Orders */ static BOOL update_read_create_offscreen_bitmap_order(wStream* s, CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { UINT16 flags; BOOL deleteListPresent; OFFSCREEN_DELETE_LIST* deleteList; if (Stream_GetRemainingLength(s) < 6) return FALSE; Stream_Read_UINT16(s, flags); /* flags (2 bytes) */ create_offscreen_bitmap->id = flags & 0x7FFF; deleteListPresent = (flags & 0x8000) ? TRUE : FALSE; Stream_Read_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */ Stream_Read_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */ deleteList = &(create_offscreen_bitmap->deleteList); if (deleteListPresent) { UINT32 i; if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, deleteList->cIndices); if (deleteList->cIndices > deleteList->sIndices) { UINT16* new_indices; new_indices = (UINT16*)realloc(deleteList->indices, deleteList->cIndices * 2); if (!new_indices) return FALSE; deleteList->sIndices = deleteList->cIndices; deleteList->indices = new_indices; } if (Stream_GetRemainingLength(s) < 2 * deleteList->cIndices) return FALSE; for (i = 0; i < deleteList->cIndices; i++) { Stream_Read_UINT16(s, deleteList->indices[i]); } } else { deleteList->cIndices = 0; } return TRUE; } int update_approximate_create_offscreen_bitmap_order( const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { const OFFSCREEN_DELETE_LIST* deleteList = &(create_offscreen_bitmap->deleteList); return 32 + deleteList->cIndices * 2; } BOOL update_write_create_offscreen_bitmap_order( wStream* s, const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { UINT16 flags; BOOL deleteListPresent; const OFFSCREEN_DELETE_LIST* deleteList; if (!Stream_EnsureRemainingCapacity( s, update_approximate_create_offscreen_bitmap_order(create_offscreen_bitmap))) return FALSE; deleteList = &(create_offscreen_bitmap->deleteList); flags = create_offscreen_bitmap->id & 0x7FFF; deleteListPresent = (deleteList->cIndices > 0) ? TRUE : FALSE; if (deleteListPresent) flags |= 0x8000; Stream_Write_UINT16(s, flags); /* flags (2 bytes) */ Stream_Write_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */ Stream_Write_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */ if (deleteListPresent) { int i; Stream_Write_UINT16(s, deleteList->cIndices); for (i = 0; i < (int)deleteList->cIndices; i++) { Stream_Write_UINT16(s, deleteList->indices[i]); } } return TRUE; } static BOOL update_read_switch_surface_order(wStream* s, SWITCH_SURFACE_ORDER* switch_surface) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, switch_surface->bitmapId); /* bitmapId (2 bytes) */ return TRUE; } int update_approximate_switch_surface_order(const SWITCH_SURFACE_ORDER* switch_surface) { return 2; } BOOL update_write_switch_surface_order(wStream* s, const SWITCH_SURFACE_ORDER* switch_surface) { int inf = update_approximate_switch_surface_order(switch_surface); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT16(s, switch_surface->bitmapId); /* bitmapId (2 bytes) */ return TRUE; } static BOOL update_read_create_nine_grid_bitmap_order(wStream* s, CREATE_NINE_GRID_BITMAP_ORDER* create_nine_grid_bitmap) { NINE_GRID_BITMAP_INFO* nineGridInfo; if (Stream_GetRemainingLength(s) < 19) return FALSE; Stream_Read_UINT8(s, create_nine_grid_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ if ((create_nine_grid_bitmap->bitmapBpp < 1) || (create_nine_grid_bitmap->bitmapBpp > 32)) { WLog_ERR(TAG, "invalid bpp value %" PRIu32 "", create_nine_grid_bitmap->bitmapBpp); return FALSE; } Stream_Read_UINT16(s, create_nine_grid_bitmap->bitmapId); /* bitmapId (2 bytes) */ nineGridInfo = &(create_nine_grid_bitmap->nineGridInfo); Stream_Read_UINT32(s, nineGridInfo->flFlags); /* flFlags (4 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulLeftWidth); /* ulLeftWidth (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulRightWidth); /* ulRightWidth (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulTopHeight); /* ulTopHeight (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulBottomHeight); /* ulBottomHeight (2 bytes) */ update_read_colorref(s, &nineGridInfo->crTransparent); /* crTransparent (4 bytes) */ return TRUE; } static BOOL update_read_frame_marker_order(wStream* s, FRAME_MARKER_ORDER* frame_marker) { if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT32(s, frame_marker->action); /* action (4 bytes) */ return TRUE; } static BOOL update_read_stream_bitmap_first_order(wStream* s, STREAM_BITMAP_FIRST_ORDER* stream_bitmap_first) { if (Stream_GetRemainingLength(s) < 10) // 8 + 2 at least return FALSE; Stream_Read_UINT8(s, stream_bitmap_first->bitmapFlags); /* bitmapFlags (1 byte) */ Stream_Read_UINT8(s, stream_bitmap_first->bitmapBpp); /* bitmapBpp (1 byte) */ if ((stream_bitmap_first->bitmapBpp < 1) || (stream_bitmap_first->bitmapBpp > 32)) { WLog_ERR(TAG, "invalid bpp value %" PRIu32 "", stream_bitmap_first->bitmapBpp); return FALSE; } Stream_Read_UINT16(s, stream_bitmap_first->bitmapType); /* bitmapType (2 bytes) */ Stream_Read_UINT16(s, stream_bitmap_first->bitmapWidth); /* bitmapWidth (2 bytes) */ Stream_Read_UINT16(s, stream_bitmap_first->bitmapHeight); /* bitmapHeigth (2 bytes) */ if (stream_bitmap_first->bitmapFlags & STREAM_BITMAP_V2) { if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT32(s, stream_bitmap_first->bitmapSize); /* bitmapSize (4 bytes) */ } else { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, stream_bitmap_first->bitmapSize); /* bitmapSize (2 bytes) */ } FIELD_SKIP_BUFFER16( s, stream_bitmap_first->bitmapBlockSize); /* bitmapBlockSize(2 bytes) + bitmapBlock */ return TRUE; } static BOOL update_read_stream_bitmap_next_order(wStream* s, STREAM_BITMAP_NEXT_ORDER* stream_bitmap_next) { if (Stream_GetRemainingLength(s) < 5) return FALSE; Stream_Read_UINT8(s, stream_bitmap_next->bitmapFlags); /* bitmapFlags (1 byte) */ Stream_Read_UINT16(s, stream_bitmap_next->bitmapType); /* bitmapType (2 bytes) */ FIELD_SKIP_BUFFER16( s, stream_bitmap_next->bitmapBlockSize); /* bitmapBlockSize(2 bytes) + bitmapBlock */ return TRUE; } static BOOL update_read_draw_gdiplus_first_order(wStream* s, DRAW_GDIPLUS_FIRST_ORDER* draw_gdiplus_first) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_first->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_first->cbTotalSize); /* cbTotalSize (4 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_first->cbTotalEmfSize); /* cbTotalEmfSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_first->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_next_order(wStream* s, DRAW_GDIPLUS_NEXT_ORDER* draw_gdiplus_next) { if (Stream_GetRemainingLength(s) < 3) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ FIELD_SKIP_BUFFER16(s, draw_gdiplus_next->cbSize); /* cbSize(2 bytes) + emfRecords */ return TRUE; } static BOOL update_read_draw_gdiplus_end_order(wStream* s, DRAW_GDIPLUS_END_ORDER* draw_gdiplus_end) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_end->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_end->cbTotalSize); /* cbTotalSize (4 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_end->cbTotalEmfSize); /* cbTotalEmfSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_end->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_cache_first_order(wStream* s, DRAW_GDIPLUS_CACHE_FIRST_ORDER* draw_gdiplus_cache_first) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_first->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_cache_first->cbTotalSize); /* cbTotalSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_cache_first->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_cache_next_order(wStream* s, DRAW_GDIPLUS_CACHE_NEXT_ORDER* draw_gdiplus_cache_next) { if (Stream_GetRemainingLength(s) < 7) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_next->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_next->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_next->cacheIndex); /* cacheIndex (2 bytes) */ FIELD_SKIP_BUFFER16(s, draw_gdiplus_cache_next->cbSize); /* cbSize(2 bytes) + emfRecords */ return TRUE; } static BOOL update_read_draw_gdiplus_cache_end_order(wStream* s, DRAW_GDIPLUS_CACHE_END_ORDER* draw_gdiplus_cache_end) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_end->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_cache_end->cbTotalSize); /* cbTotalSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_cache_end->cbSize); /* emfRecords */ } static BOOL update_read_field_flags(wStream* s, UINT32* fieldFlags, BYTE flags, BYTE fieldBytes) { int i; BYTE byte; if (flags & ORDER_ZERO_FIELD_BYTE_BIT0) fieldBytes--; if (flags & ORDER_ZERO_FIELD_BYTE_BIT1) { if (fieldBytes > 1) fieldBytes -= 2; else fieldBytes = 0; } if (Stream_GetRemainingLength(s) < fieldBytes) return FALSE; *fieldFlags = 0; for (i = 0; i < fieldBytes; i++) { Stream_Read_UINT8(s, byte); *fieldFlags |= byte << (i * 8); } return TRUE; } BOOL update_write_field_flags(wStream* s, UINT32 fieldFlags, BYTE flags, BYTE fieldBytes) { BYTE byte; if (fieldBytes == 1) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); } else if (fieldBytes == 2) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 8) & 0xFF; Stream_Write_UINT8(s, byte); } else if (fieldBytes == 3) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 16) & 0xFF; Stream_Write_UINT8(s, byte); } else { return FALSE; } return TRUE; } static BOOL update_read_bounds(wStream* s, rdpBounds* bounds) { BYTE flags; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, flags); /* field flags */ if (flags & BOUND_LEFT) { if (!update_read_coord(s, &bounds->left, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_LEFT) { if (!update_read_coord(s, &bounds->left, TRUE)) return FALSE; } if (flags & BOUND_TOP) { if (!update_read_coord(s, &bounds->top, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_TOP) { if (!update_read_coord(s, &bounds->top, TRUE)) return FALSE; } if (flags & BOUND_RIGHT) { if (!update_read_coord(s, &bounds->right, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_RIGHT) { if (!update_read_coord(s, &bounds->right, TRUE)) return FALSE; } if (flags & BOUND_BOTTOM) { if (!update_read_coord(s, &bounds->bottom, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_BOTTOM) { if (!update_read_coord(s, &bounds->bottom, TRUE)) return FALSE; } return TRUE; } BOOL update_write_bounds(wStream* s, ORDER_INFO* orderInfo) { if (!(orderInfo->controlFlags & ORDER_BOUNDS)) return TRUE; if (orderInfo->controlFlags & ORDER_ZERO_BOUNDS_DELTAS) return TRUE; Stream_Write_UINT8(s, orderInfo->boundsFlags); /* field flags */ if (orderInfo->boundsFlags & BOUND_LEFT) { if (!update_write_coord(s, orderInfo->bounds.left)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_LEFT) { } if (orderInfo->boundsFlags & BOUND_TOP) { if (!update_write_coord(s, orderInfo->bounds.top)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_TOP) { } if (orderInfo->boundsFlags & BOUND_RIGHT) { if (!update_write_coord(s, orderInfo->bounds.right)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_RIGHT) { } if (orderInfo->boundsFlags & BOUND_BOTTOM) { if (!update_write_coord(s, orderInfo->bounds.bottom)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_BOTTOM) { } return TRUE; } static BOOL read_primary_order(wLog* log, const char* orderName, wStream* s, const ORDER_INFO* orderInfo, rdpPrimaryUpdate* primary) { BOOL rc = FALSE; if (!s || !orderInfo || !primary || !orderName) return FALSE; switch (orderInfo->orderType) { case ORDER_TYPE_DSTBLT: rc = update_read_dstblt_order(s, orderInfo, &(primary->dstblt)); break; case ORDER_TYPE_PATBLT: rc = update_read_patblt_order(s, orderInfo, &(primary->patblt)); break; case ORDER_TYPE_SCRBLT: rc = update_read_scrblt_order(s, orderInfo, &(primary->scrblt)); break; case ORDER_TYPE_OPAQUE_RECT: rc = update_read_opaque_rect_order(s, orderInfo, &(primary->opaque_rect)); break; case ORDER_TYPE_DRAW_NINE_GRID: rc = update_read_draw_nine_grid_order(s, orderInfo, &(primary->draw_nine_grid)); break; case ORDER_TYPE_MULTI_DSTBLT: rc = update_read_multi_dstblt_order(s, orderInfo, &(primary->multi_dstblt)); break; case ORDER_TYPE_MULTI_PATBLT: rc = update_read_multi_patblt_order(s, orderInfo, &(primary->multi_patblt)); break; case ORDER_TYPE_MULTI_SCRBLT: rc = update_read_multi_scrblt_order(s, orderInfo, &(primary->multi_scrblt)); break; case ORDER_TYPE_MULTI_OPAQUE_RECT: rc = update_read_multi_opaque_rect_order(s, orderInfo, &(primary->multi_opaque_rect)); break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: rc = update_read_multi_draw_nine_grid_order(s, orderInfo, &(primary->multi_draw_nine_grid)); break; case ORDER_TYPE_LINE_TO: rc = update_read_line_to_order(s, orderInfo, &(primary->line_to)); break; case ORDER_TYPE_POLYLINE: rc = update_read_polyline_order(s, orderInfo, &(primary->polyline)); break; case ORDER_TYPE_MEMBLT: rc = update_read_memblt_order(s, orderInfo, &(primary->memblt)); break; case ORDER_TYPE_MEM3BLT: rc = update_read_mem3blt_order(s, orderInfo, &(primary->mem3blt)); break; case ORDER_TYPE_SAVE_BITMAP: rc = update_read_save_bitmap_order(s, orderInfo, &(primary->save_bitmap)); break; case ORDER_TYPE_GLYPH_INDEX: rc = update_read_glyph_index_order(s, orderInfo, &(primary->glyph_index)); break; case ORDER_TYPE_FAST_INDEX: rc = update_read_fast_index_order(s, orderInfo, &(primary->fast_index)); break; case ORDER_TYPE_FAST_GLYPH: rc = update_read_fast_glyph_order(s, orderInfo, &(primary->fast_glyph)); break; case ORDER_TYPE_POLYGON_SC: rc = update_read_polygon_sc_order(s, orderInfo, &(primary->polygon_sc)); break; case ORDER_TYPE_POLYGON_CB: rc = update_read_polygon_cb_order(s, orderInfo, &(primary->polygon_cb)); break; case ORDER_TYPE_ELLIPSE_SC: rc = update_read_ellipse_sc_order(s, orderInfo, &(primary->ellipse_sc)); break; case ORDER_TYPE_ELLIPSE_CB: rc = update_read_ellipse_cb_order(s, orderInfo, &(primary->ellipse_cb)); break; default: WLog_Print(log, WLOG_WARN, "Primary Drawing Order %s not supported, ignoring", orderName); rc = TRUE; break; } if (!rc) { WLog_Print(log, WLOG_ERROR, "%s - update_read_dstblt_order() failed", orderName); return FALSE; } return TRUE; } static BOOL update_recv_primary_order(rdpUpdate* update, wStream* s, BYTE flags) { BYTE field; BOOL rc = FALSE; rdpContext* context = update->context; rdpPrimaryUpdate* primary = update->primary; ORDER_INFO* orderInfo = &(primary->order_info); rdpSettings* settings = context->settings; const char* orderName; if (flags & ORDER_TYPE_CHANGE) { if (Stream_GetRemainingLength(s) < 1) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, orderInfo->orderType); /* orderType (1 byte) */ } orderName = primary_order_string(orderInfo->orderType); if (!check_primary_order_supported(update->log, settings, orderInfo->orderType, orderName)) return FALSE; field = get_primary_drawing_order_field_bytes(orderInfo->orderType, &rc); if (!rc) return FALSE; if (!update_read_field_flags(s, &(orderInfo->fieldFlags), flags, field)) { WLog_Print(update->log, WLOG_ERROR, "update_read_field_flags() failed"); return FALSE; } if (flags & ORDER_BOUNDS) { if (!(flags & ORDER_ZERO_BOUNDS_DELTAS)) { if (!update_read_bounds(s, &orderInfo->bounds)) { WLog_Print(update->log, WLOG_ERROR, "update_read_bounds() failed"); return FALSE; } } rc = IFCALLRESULT(FALSE, update->SetBounds, context, &orderInfo->bounds); if (!rc) return FALSE; } orderInfo->deltaCoordinates = (flags & ORDER_DELTA_COORDINATES) ? TRUE : FALSE; if (!read_primary_order(update->log, orderName, s, orderInfo, primary)) return FALSE; switch (orderInfo->orderType) { case ORDER_TYPE_DSTBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->dstblt.bRop), gdi_rop3_code(primary->dstblt.bRop)); rc = IFCALLRESULT(FALSE, primary->DstBlt, context, &primary->dstblt); } break; case ORDER_TYPE_PATBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->patblt.bRop), gdi_rop3_code(primary->patblt.bRop)); rc = IFCALLRESULT(FALSE, primary->PatBlt, context, &primary->patblt); } break; case ORDER_TYPE_SCRBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->scrblt.bRop), gdi_rop3_code(primary->scrblt.bRop)); rc = IFCALLRESULT(FALSE, primary->ScrBlt, context, &primary->scrblt); } break; case ORDER_TYPE_OPAQUE_RECT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->OpaqueRect, context, &primary->opaque_rect); } break; case ORDER_TYPE_DRAW_NINE_GRID: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->DrawNineGrid, context, &primary->draw_nine_grid); } break; case ORDER_TYPE_MULTI_DSTBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_dstblt.bRop), gdi_rop3_code(primary->multi_dstblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiDstBlt, context, &primary->multi_dstblt); } break; case ORDER_TYPE_MULTI_PATBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_patblt.bRop), gdi_rop3_code(primary->multi_patblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiPatBlt, context, &primary->multi_patblt); } break; case ORDER_TYPE_MULTI_SCRBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_scrblt.bRop), gdi_rop3_code(primary->multi_scrblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiScrBlt, context, &primary->multi_scrblt); } break; case ORDER_TYPE_MULTI_OPAQUE_RECT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->MultiOpaqueRect, context, &primary->multi_opaque_rect); } break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->MultiDrawNineGrid, context, &primary->multi_draw_nine_grid); } break; case ORDER_TYPE_LINE_TO: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->LineTo, context, &primary->line_to); } break; case ORDER_TYPE_POLYLINE: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->Polyline, context, &primary->polyline); } break; case ORDER_TYPE_MEMBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->memblt.bRop), gdi_rop3_code(primary->memblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MemBlt, context, &primary->memblt); } break; case ORDER_TYPE_MEM3BLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->mem3blt.bRop), gdi_rop3_code(primary->mem3blt.bRop)); rc = IFCALLRESULT(FALSE, primary->Mem3Blt, context, &primary->mem3blt); } break; case ORDER_TYPE_SAVE_BITMAP: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->SaveBitmap, context, &primary->save_bitmap); } break; case ORDER_TYPE_GLYPH_INDEX: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->GlyphIndex, context, &primary->glyph_index); } break; case ORDER_TYPE_FAST_INDEX: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->FastIndex, context, &primary->fast_index); } break; case ORDER_TYPE_FAST_GLYPH: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->FastGlyph, context, &primary->fast_glyph); } break; case ORDER_TYPE_POLYGON_SC: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->PolygonSC, context, &primary->polygon_sc); } break; case ORDER_TYPE_POLYGON_CB: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->PolygonCB, context, &primary->polygon_cb); } break; case ORDER_TYPE_ELLIPSE_SC: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->EllipseSC, context, &primary->ellipse_sc); } break; case ORDER_TYPE_ELLIPSE_CB: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->EllipseCB, context, &primary->ellipse_cb); } break; default: WLog_Print(update->log, WLOG_WARN, "Primary Drawing Order %s not supported", orderName); break; } if (!rc) { WLog_Print(update->log, WLOG_WARN, "Primary Drawing Order %s failed", orderName); return FALSE; } if (flags & ORDER_BOUNDS) { rc = IFCALLRESULT(FALSE, update->SetBounds, context, NULL); } return rc; } static BOOL update_recv_secondary_order(rdpUpdate* update, wStream* s, BYTE flags) { BOOL rc = FALSE; size_t start, end, diff; BYTE orderType; UINT16 extraFlags; UINT16 orderLength; rdpContext* context = update->context; rdpSettings* settings = context->settings; rdpSecondaryUpdate* secondary = update->secondary; const char* name; if (Stream_GetRemainingLength(s) < 5) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 5"); return FALSE; } Stream_Read_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Read_UINT16(s, extraFlags); /* extraFlags (2 bytes) */ Stream_Read_UINT8(s, orderType); /* orderType (1 byte) */ if (Stream_GetRemainingLength(s) < orderLength + 7U) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) %" PRIuz " < %" PRIu16, Stream_GetRemainingLength(s), orderLength + 7); return FALSE; } start = Stream_GetPosition(s); name = secondary_order_string(orderType); WLog_Print(update->log, WLOG_DEBUG, "Secondary Drawing Order %s", name); if (!check_secondary_order_supported(update->log, settings, orderType, name)) return FALSE; switch (orderType) { case ORDER_TYPE_BITMAP_UNCOMPRESSED: case ORDER_TYPE_CACHE_BITMAP_COMPRESSED: { const BOOL compressed = (orderType == ORDER_TYPE_CACHE_BITMAP_COMPRESSED); CACHE_BITMAP_ORDER* order = update_read_cache_bitmap_order(update, s, compressed, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmap, context, order); free_cache_bitmap_order(context, order); } } break; case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2: case ORDER_TYPE_BITMAP_COMPRESSED_V2: { const BOOL compressed = (orderType == ORDER_TYPE_BITMAP_COMPRESSED_V2); CACHE_BITMAP_V2_ORDER* order = update_read_cache_bitmap_v2_order(update, s, compressed, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV2, context, order); free_cache_bitmap_v2_order(context, order); } } break; case ORDER_TYPE_BITMAP_COMPRESSED_V3: { CACHE_BITMAP_V3_ORDER* order = update_read_cache_bitmap_v3_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV3, context, order); free_cache_bitmap_v3_order(context, order); } } break; case ORDER_TYPE_CACHE_COLOR_TABLE: { CACHE_COLOR_TABLE_ORDER* order = update_read_cache_color_table_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheColorTable, context, order); free_cache_color_table_order(context, order); } } break; case ORDER_TYPE_CACHE_GLYPH: { switch (settings->GlyphSupportLevel) { case GLYPH_SUPPORT_PARTIAL: case GLYPH_SUPPORT_FULL: { CACHE_GLYPH_ORDER* order = update_read_cache_glyph_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheGlyph, context, order); free_cache_glyph_order(context, order); } } break; case GLYPH_SUPPORT_ENCODE: { CACHE_GLYPH_V2_ORDER* order = update_read_cache_glyph_v2_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheGlyphV2, context, order); free_cache_glyph_v2_order(context, order); } } break; case GLYPH_SUPPORT_NONE: default: break; } } break; case ORDER_TYPE_CACHE_BRUSH: /* [MS-RDPEGDI] 2.2.2.2.1.2.7 Cache Brush (CACHE_BRUSH_ORDER) */ { CACHE_BRUSH_ORDER* order = update_read_cache_brush_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBrush, context, order); free_cache_brush_order(context, order); } } break; default: WLog_Print(update->log, WLOG_WARN, "SECONDARY ORDER %s not supported", name); break; } if (!rc) { WLog_Print(update->log, WLOG_ERROR, "SECONDARY ORDER %s failed", name); } start += orderLength + 7; end = Stream_GetPosition(s); if (start > end) { WLog_Print(update->log, WLOG_WARN, "SECONDARY_ORDER %s: read %" PRIuz "bytes too much", name, end - start); return FALSE; } diff = start - end; if (diff > 0) { WLog_Print(update->log, WLOG_DEBUG, "SECONDARY_ORDER %s: read %" PRIuz "bytes short, skipping", name, diff); Stream_Seek(s, diff); } return rc; } static BOOL read_altsec_order(wStream* s, BYTE orderType, rdpAltSecUpdate* altsec) { BOOL rc = FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: rc = update_read_create_offscreen_bitmap_order(s, &(altsec->create_offscreen_bitmap)); break; case ORDER_TYPE_SWITCH_SURFACE: rc = update_read_switch_surface_order(s, &(altsec->switch_surface)); break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: rc = update_read_create_nine_grid_bitmap_order(s, &(altsec->create_nine_grid_bitmap)); break; case ORDER_TYPE_FRAME_MARKER: rc = update_read_frame_marker_order(s, &(altsec->frame_marker)); break; case ORDER_TYPE_STREAM_BITMAP_FIRST: rc = update_read_stream_bitmap_first_order(s, &(altsec->stream_bitmap_first)); break; case ORDER_TYPE_STREAM_BITMAP_NEXT: rc = update_read_stream_bitmap_next_order(s, &(altsec->stream_bitmap_next)); break; case ORDER_TYPE_GDIPLUS_FIRST: rc = update_read_draw_gdiplus_first_order(s, &(altsec->draw_gdiplus_first)); break; case ORDER_TYPE_GDIPLUS_NEXT: rc = update_read_draw_gdiplus_next_order(s, &(altsec->draw_gdiplus_next)); break; case ORDER_TYPE_GDIPLUS_END: rc = update_read_draw_gdiplus_end_order(s, &(altsec->draw_gdiplus_end)); break; case ORDER_TYPE_GDIPLUS_CACHE_FIRST: rc = update_read_draw_gdiplus_cache_first_order(s, &(altsec->draw_gdiplus_cache_first)); break; case ORDER_TYPE_GDIPLUS_CACHE_NEXT: rc = update_read_draw_gdiplus_cache_next_order(s, &(altsec->draw_gdiplus_cache_next)); break; case ORDER_TYPE_GDIPLUS_CACHE_END: rc = update_read_draw_gdiplus_cache_end_order(s, &(altsec->draw_gdiplus_cache_end)); break; case ORDER_TYPE_WINDOW: /* This order is handled elsewhere. */ rc = TRUE; break; case ORDER_TYPE_COMPDESK_FIRST: rc = TRUE; break; default: break; } return rc; } static BOOL update_recv_altsec_order(rdpUpdate* update, wStream* s, BYTE flags) { BYTE orderType = flags >>= 2; /* orderType is in higher 6 bits of flags field */ BOOL rc = FALSE; rdpContext* context = update->context; rdpSettings* settings = context->settings; rdpAltSecUpdate* altsec = update->altsec; const char* orderName = altsec_order_string(orderType); WLog_Print(update->log, WLOG_DEBUG, "Alternate Secondary Drawing Order %s", orderName); if (!check_alt_order_supported(update->log, settings, orderType, orderName)) return FALSE; if (!read_altsec_order(s, orderType, altsec)) return FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: IFCALLRET(altsec->CreateOffscreenBitmap, rc, context, &(altsec->create_offscreen_bitmap)); break; case ORDER_TYPE_SWITCH_SURFACE: IFCALLRET(altsec->SwitchSurface, rc, context, &(altsec->switch_surface)); break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: IFCALLRET(altsec->CreateNineGridBitmap, rc, context, &(altsec->create_nine_grid_bitmap)); break; case ORDER_TYPE_FRAME_MARKER: IFCALLRET(altsec->FrameMarker, rc, context, &(altsec->frame_marker)); break; case ORDER_TYPE_STREAM_BITMAP_FIRST: IFCALLRET(altsec->StreamBitmapFirst, rc, context, &(altsec->stream_bitmap_first)); break; case ORDER_TYPE_STREAM_BITMAP_NEXT: IFCALLRET(altsec->StreamBitmapNext, rc, context, &(altsec->stream_bitmap_next)); break; case ORDER_TYPE_GDIPLUS_FIRST: IFCALLRET(altsec->DrawGdiPlusFirst, rc, context, &(altsec->draw_gdiplus_first)); break; case ORDER_TYPE_GDIPLUS_NEXT: IFCALLRET(altsec->DrawGdiPlusNext, rc, context, &(altsec->draw_gdiplus_next)); break; case ORDER_TYPE_GDIPLUS_END: IFCALLRET(altsec->DrawGdiPlusEnd, rc, context, &(altsec->draw_gdiplus_end)); break; case ORDER_TYPE_GDIPLUS_CACHE_FIRST: IFCALLRET(altsec->DrawGdiPlusCacheFirst, rc, context, &(altsec->draw_gdiplus_cache_first)); break; case ORDER_TYPE_GDIPLUS_CACHE_NEXT: IFCALLRET(altsec->DrawGdiPlusCacheNext, rc, context, &(altsec->draw_gdiplus_cache_next)); break; case ORDER_TYPE_GDIPLUS_CACHE_END: IFCALLRET(altsec->DrawGdiPlusCacheEnd, rc, context, &(altsec->draw_gdiplus_cache_end)); break; case ORDER_TYPE_WINDOW: rc = update_recv_altsec_window_order(update, s); break; case ORDER_TYPE_COMPDESK_FIRST: rc = TRUE; break; default: break; } if (!rc) { WLog_Print(update->log, WLOG_WARN, "Alternate Secondary Drawing Order %s failed", orderName); } return rc; } BOOL update_recv_order(rdpUpdate* update, wStream* s) { BOOL rc; BYTE controlFlags; if (Stream_GetRemainingLength(s) < 1) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, controlFlags); /* controlFlags (1 byte) */ if (!(controlFlags & ORDER_STANDARD)) rc = update_recv_altsec_order(update, s, controlFlags); else if (controlFlags & ORDER_SECONDARY) rc = update_recv_secondary_order(update, s, controlFlags); else rc = update_recv_primary_order(update, s, controlFlags); if (!rc) WLog_Print(update->log, WLOG_ERROR, "order flags %02" PRIx8 " failed", controlFlags); return rc; }
/** * FreeRDP: A Remote Desktop Protocol Implementation * Drawing Orders * * Copyright 2011 Marc-Andre Moreau <marcandre.moreau@gmail.com> * Copyright 2016 Armin Novak <armin.novak@thincast.com> * Copyright 2016 Thincast Technologies GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "window.h" #include <winpr/wtypes.h> #include <winpr/crt.h> #include <freerdp/api.h> #include <freerdp/log.h> #include <freerdp/graphics.h> #include <freerdp/codec/bitmap.h> #include <freerdp/gdi/gdi.h> #include "orders.h" #include "../cache/glyph.h" #include "../cache/bitmap.h" #include "../cache/brush.h" #include "../cache/cache.h" #define TAG FREERDP_TAG("core.orders") BYTE get_primary_drawing_order_field_bytes(UINT32 orderType, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (orderType) { case 0: return DSTBLT_ORDER_FIELD_BYTES; case 1: return PATBLT_ORDER_FIELD_BYTES; case 2: return SCRBLT_ORDER_FIELD_BYTES; case 3: return 0; case 4: return 0; case 5: return 0; case 6: return 0; case 7: return DRAW_NINE_GRID_ORDER_FIELD_BYTES; case 8: return MULTI_DRAW_NINE_GRID_ORDER_FIELD_BYTES; case 9: return LINE_TO_ORDER_FIELD_BYTES; case 10: return OPAQUE_RECT_ORDER_FIELD_BYTES; case 11: return SAVE_BITMAP_ORDER_FIELD_BYTES; case 12: return 0; case 13: return MEMBLT_ORDER_FIELD_BYTES; case 14: return MEM3BLT_ORDER_FIELD_BYTES; case 15: return MULTI_DSTBLT_ORDER_FIELD_BYTES; case 16: return MULTI_PATBLT_ORDER_FIELD_BYTES; case 17: return MULTI_SCRBLT_ORDER_FIELD_BYTES; case 18: return MULTI_OPAQUE_RECT_ORDER_FIELD_BYTES; case 19: return FAST_INDEX_ORDER_FIELD_BYTES; case 20: return POLYGON_SC_ORDER_FIELD_BYTES; case 21: return POLYGON_CB_ORDER_FIELD_BYTES; case 22: return POLYLINE_ORDER_FIELD_BYTES; case 23: return 0; case 24: return FAST_GLYPH_ORDER_FIELD_BYTES; case 25: return ELLIPSE_SC_ORDER_FIELD_BYTES; case 26: return ELLIPSE_CB_ORDER_FIELD_BYTES; case 27: return GLYPH_INDEX_ORDER_FIELD_BYTES; default: if (pValid) *pValid = FALSE; WLog_WARN(TAG, "Invalid orderType 0x%08X received", orderType); return 0; } } static BYTE get_cbr2_bpp(UINT32 bpp, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (bpp) { case 3: return 8; case 4: return 16; case 5: return 24; case 6: return 32; default: WLog_WARN(TAG, "Invalid bpp %" PRIu32, bpp); if (pValid) *pValid = FALSE; return 0; } } static BYTE get_bmf_bpp(UINT32 bmf, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (bmf) { case 1: return 1; case 3: return 8; case 4: return 16; case 5: return 24; case 6: return 32; default: WLog_WARN(TAG, "Invalid bmf %" PRIu32, bmf); if (pValid) *pValid = FALSE; return 0; } } static BYTE get_bpp_bmf(UINT32 bpp, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (bpp) { case 1: return 1; case 8: return 3; case 16: return 4; case 24: return 5; case 32: return 6; default: WLog_WARN(TAG, "Invalid color depth %" PRIu32, bpp); if (pValid) *pValid = FALSE; return 0; } } static BOOL check_order_activated(wLog* log, rdpSettings* settings, const char* orderName, BOOL condition) { if (!condition) { if (settings->AllowUnanouncedOrdersFromServer) { WLog_Print(log, WLOG_WARN, "%s - SERVER BUG: The support for this feature was not announced!", orderName); return TRUE; } else { WLog_Print(log, WLOG_ERROR, "%s - SERVER BUG: The support for this feature was not announced! Use " "/relax-order-checks to ignore", orderName); return FALSE; } } return TRUE; } static BOOL check_alt_order_supported(wLog* log, rdpSettings* settings, BYTE orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: case ORDER_TYPE_SWITCH_SURFACE: condition = settings->OffscreenSupportLevel != 0; break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: condition = settings->DrawNineGridEnabled; break; case ORDER_TYPE_FRAME_MARKER: condition = settings->FrameMarkerCommandEnabled; break; case ORDER_TYPE_GDIPLUS_FIRST: case ORDER_TYPE_GDIPLUS_NEXT: case ORDER_TYPE_GDIPLUS_END: case ORDER_TYPE_GDIPLUS_CACHE_FIRST: case ORDER_TYPE_GDIPLUS_CACHE_NEXT: case ORDER_TYPE_GDIPLUS_CACHE_END: condition = settings->DrawGdiPlusCacheEnabled; break; case ORDER_TYPE_WINDOW: condition = settings->RemoteWndSupportLevel != WINDOW_LEVEL_NOT_SUPPORTED; break; case ORDER_TYPE_STREAM_BITMAP_FIRST: case ORDER_TYPE_STREAM_BITMAP_NEXT: case ORDER_TYPE_COMPDESK_FIRST: condition = TRUE; break; default: WLog_Print(log, WLOG_WARN, "%s - Alternate Secondary Drawing Order UNKNOWN", orderName); condition = FALSE; break; } return check_order_activated(log, settings, orderName, condition); } static BOOL check_secondary_order_supported(wLog* log, rdpSettings* settings, BYTE orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_BITMAP_UNCOMPRESSED: case ORDER_TYPE_CACHE_BITMAP_COMPRESSED: condition = settings->BitmapCacheEnabled; break; case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2: case ORDER_TYPE_BITMAP_COMPRESSED_V2: condition = settings->BitmapCacheEnabled; break; case ORDER_TYPE_BITMAP_COMPRESSED_V3: condition = settings->BitmapCacheV3Enabled; break; case ORDER_TYPE_CACHE_COLOR_TABLE: condition = (settings->OrderSupport[NEG_MEMBLT_INDEX] || settings->OrderSupport[NEG_MEM3BLT_INDEX]); break; case ORDER_TYPE_CACHE_GLYPH: { switch (settings->GlyphSupportLevel) { case GLYPH_SUPPORT_PARTIAL: case GLYPH_SUPPORT_FULL: case GLYPH_SUPPORT_ENCODE: condition = TRUE; break; case GLYPH_SUPPORT_NONE: default: condition = FALSE; break; } } break; case ORDER_TYPE_CACHE_BRUSH: condition = TRUE; break; default: WLog_Print(log, WLOG_WARN, "SECONDARY ORDER %s not supported", orderName); break; } return check_order_activated(log, settings, orderName, condition); } static BOOL check_primary_order_supported(wLog* log, rdpSettings* settings, UINT32 orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_DSTBLT: condition = settings->OrderSupport[NEG_DSTBLT_INDEX]; break; case ORDER_TYPE_SCRBLT: condition = settings->OrderSupport[NEG_SCRBLT_INDEX]; break; case ORDER_TYPE_DRAW_NINE_GRID: condition = settings->OrderSupport[NEG_DRAWNINEGRID_INDEX]; break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: condition = settings->OrderSupport[NEG_MULTI_DRAWNINEGRID_INDEX]; break; case ORDER_TYPE_LINE_TO: condition = settings->OrderSupport[NEG_LINETO_INDEX]; break; /* [MS-RDPEGDI] 2.2.2.2.1.1.2.5 OpaqueRect (OPAQUERECT_ORDER) * suggests that PatBlt and OpaqueRect imply each other. */ case ORDER_TYPE_PATBLT: case ORDER_TYPE_OPAQUE_RECT: condition = settings->OrderSupport[NEG_OPAQUE_RECT_INDEX] || settings->OrderSupport[NEG_PATBLT_INDEX]; break; case ORDER_TYPE_SAVE_BITMAP: condition = settings->OrderSupport[NEG_SAVEBITMAP_INDEX]; break; case ORDER_TYPE_MEMBLT: condition = settings->OrderSupport[NEG_MEMBLT_INDEX]; break; case ORDER_TYPE_MEM3BLT: condition = settings->OrderSupport[NEG_MEM3BLT_INDEX]; break; case ORDER_TYPE_MULTI_DSTBLT: condition = settings->OrderSupport[NEG_MULTIDSTBLT_INDEX]; break; case ORDER_TYPE_MULTI_PATBLT: condition = settings->OrderSupport[NEG_MULTIPATBLT_INDEX]; break; case ORDER_TYPE_MULTI_SCRBLT: condition = settings->OrderSupport[NEG_MULTIDSTBLT_INDEX]; break; case ORDER_TYPE_MULTI_OPAQUE_RECT: condition = settings->OrderSupport[NEG_MULTIOPAQUERECT_INDEX]; break; case ORDER_TYPE_FAST_INDEX: condition = settings->OrderSupport[NEG_FAST_INDEX_INDEX]; break; case ORDER_TYPE_POLYGON_SC: condition = settings->OrderSupport[NEG_POLYGON_SC_INDEX]; break; case ORDER_TYPE_POLYGON_CB: condition = settings->OrderSupport[NEG_POLYGON_CB_INDEX]; break; case ORDER_TYPE_POLYLINE: condition = settings->OrderSupport[NEG_POLYLINE_INDEX]; break; case ORDER_TYPE_FAST_GLYPH: condition = settings->OrderSupport[NEG_FAST_GLYPH_INDEX]; break; case ORDER_TYPE_ELLIPSE_SC: condition = settings->OrderSupport[NEG_ELLIPSE_SC_INDEX]; break; case ORDER_TYPE_ELLIPSE_CB: condition = settings->OrderSupport[NEG_ELLIPSE_CB_INDEX]; break; case ORDER_TYPE_GLYPH_INDEX: condition = settings->OrderSupport[NEG_GLYPH_INDEX_INDEX]; break; default: WLog_Print(log, WLOG_WARN, "%s Primary Drawing Order not supported", orderName); break; } return check_order_activated(log, settings, orderName, condition); } static const char* primary_order_string(UINT32 orderType) { const char* orders[] = { "[0x%02" PRIx8 "] DstBlt", "[0x%02" PRIx8 "] PatBlt", "[0x%02" PRIx8 "] ScrBlt", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] DrawNineGrid", "[0x%02" PRIx8 "] MultiDrawNineGrid", "[0x%02" PRIx8 "] LineTo", "[0x%02" PRIx8 "] OpaqueRect", "[0x%02" PRIx8 "] SaveBitmap", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] MemBlt", "[0x%02" PRIx8 "] Mem3Blt", "[0x%02" PRIx8 "] MultiDstBlt", "[0x%02" PRIx8 "] MultiPatBlt", "[0x%02" PRIx8 "] MultiScrBlt", "[0x%02" PRIx8 "] MultiOpaqueRect", "[0x%02" PRIx8 "] FastIndex", "[0x%02" PRIx8 "] PolygonSC", "[0x%02" PRIx8 "] PolygonCB", "[0x%02" PRIx8 "] Polyline", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] FastGlyph", "[0x%02" PRIx8 "] EllipseSC", "[0x%02" PRIx8 "] EllipseCB", "[0x%02" PRIx8 "] GlyphIndex" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static const char* secondary_order_string(UINT32 orderType) { const char* orders[] = { "[0x%02" PRIx8 "] Cache Bitmap", "[0x%02" PRIx8 "] Cache Color Table", "[0x%02" PRIx8 "] Cache Bitmap (Compressed)", "[0x%02" PRIx8 "] Cache Glyph", "[0x%02" PRIx8 "] Cache Bitmap V2", "[0x%02" PRIx8 "] Cache Bitmap V2 (Compressed)", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] Cache Brush", "[0x%02" PRIx8 "] Cache Bitmap V3" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static const char* altsec_order_string(BYTE orderType) { const char* orders[] = { "[0x%02" PRIx8 "] Switch Surface", "[0x%02" PRIx8 "] Create Offscreen Bitmap", "[0x%02" PRIx8 "] Stream Bitmap First", "[0x%02" PRIx8 "] Stream Bitmap Next", "[0x%02" PRIx8 "] Create NineGrid Bitmap", "[0x%02" PRIx8 "] Draw GDI+ First", "[0x%02" PRIx8 "] Draw GDI+ Next", "[0x%02" PRIx8 "] Draw GDI+ End", "[0x%02" PRIx8 "] Draw GDI+ Cache First", "[0x%02" PRIx8 "] Draw GDI+ Cache Next", "[0x%02" PRIx8 "] Draw GDI+ Cache End", "[0x%02" PRIx8 "] Windowing", "[0x%02" PRIx8 "] Desktop Composition", "[0x%02" PRIx8 "] Frame Marker" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static INLINE BOOL update_read_coord(wStream* s, INT32* coord, BOOL delta) { INT8 lsi8; INT16 lsi16; if (delta) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_INT8(s, lsi8); *coord += lsi8; } else { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_INT16(s, lsi16); *coord = lsi16; } return TRUE; } static INLINE BOOL update_write_coord(wStream* s, INT32 coord) { Stream_Write_UINT16(s, coord); return TRUE; } static INLINE BOOL update_read_color(wStream* s, UINT32* color) { BYTE byte; if (Stream_GetRemainingLength(s) < 3) return FALSE; *color = 0; Stream_Read_UINT8(s, byte); *color = (UINT32)byte; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 8) & 0xFF00; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 16) & 0xFF0000; return TRUE; } static INLINE BOOL update_write_color(wStream* s, UINT32 color) { BYTE byte; byte = (color & 0xFF); Stream_Write_UINT8(s, byte); byte = ((color >> 8) & 0xFF); Stream_Write_UINT8(s, byte); byte = ((color >> 16) & 0xFF); Stream_Write_UINT8(s, byte); return TRUE; } static INLINE BOOL update_read_colorref(wStream* s, UINT32* color) { BYTE byte; if (Stream_GetRemainingLength(s) < 4) return FALSE; *color = 0; Stream_Read_UINT8(s, byte); *color = byte; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 8); Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 16); Stream_Seek_UINT8(s); return TRUE; } static INLINE BOOL update_read_color_quad(wStream* s, UINT32* color) { return update_read_colorref(s, color); } static INLINE void update_write_color_quad(wStream* s, UINT32 color) { BYTE byte; byte = (color >> 16) & 0xFF; Stream_Write_UINT8(s, byte); byte = (color >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = color & 0xFF; Stream_Write_UINT8(s, byte); } static INLINE BOOL update_read_2byte_unsigned(wStream* s, UINT32* value) { BYTE byte; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) return FALSE; *value = (byte & 0x7F) << 8; Stream_Read_UINT8(s, byte); *value |= byte; } else { *value = (byte & 0x7F); } return TRUE; } static INLINE BOOL update_write_2byte_unsigned(wStream* s, UINT32 value) { BYTE byte; if (value > 0x7FFF) return FALSE; if (value >= 0x7F) { byte = ((value & 0x7F00) >> 8); Stream_Write_UINT8(s, byte | 0x80); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else { byte = (value & 0x7F); Stream_Write_UINT8(s, byte); } return TRUE; } static INLINE BOOL update_read_2byte_signed(wStream* s, INT32* value) { BYTE byte; BOOL negative; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); negative = (byte & 0x40) ? TRUE : FALSE; *value = (byte & 0x3F); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); *value = (*value << 8) | byte; } if (negative) *value *= -1; return TRUE; } static INLINE BOOL update_write_2byte_signed(wStream* s, INT32 value) { BYTE byte; BOOL negative = FALSE; if (value < 0) { negative = TRUE; value *= -1; } if (value > 0x3FFF) return FALSE; if (value >= 0x3F) { byte = ((value & 0x3F00) >> 8); if (negative) byte |= 0x40; Stream_Write_UINT8(s, byte | 0x80); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else { byte = (value & 0x3F); if (negative) byte |= 0x40; Stream_Write_UINT8(s, byte); } return TRUE; } static INLINE BOOL update_read_4byte_unsigned(wStream* s, UINT32* value) { BYTE byte; BYTE count; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); count = (byte & 0xC0) >> 6; if (Stream_GetRemainingLength(s) < count) return FALSE; switch (count) { case 0: *value = (byte & 0x3F); break; case 1: *value = (byte & 0x3F) << 8; Stream_Read_UINT8(s, byte); *value |= byte; break; case 2: *value = (byte & 0x3F) << 16; Stream_Read_UINT8(s, byte); *value |= (byte << 8); Stream_Read_UINT8(s, byte); *value |= byte; break; case 3: *value = (byte & 0x3F) << 24; Stream_Read_UINT8(s, byte); *value |= (byte << 16); Stream_Read_UINT8(s, byte); *value |= (byte << 8); Stream_Read_UINT8(s, byte); *value |= byte; break; default: break; } return TRUE; } static INLINE BOOL update_write_4byte_unsigned(wStream* s, UINT32 value) { BYTE byte; if (value <= 0x3F) { Stream_Write_UINT8(s, value); } else if (value <= 0x3FFF) { byte = (value >> 8) & 0x3F; Stream_Write_UINT8(s, byte | 0x40); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else if (value <= 0x3FFFFF) { byte = (value >> 16) & 0x3F; Stream_Write_UINT8(s, byte | 0x80); byte = (value >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else if (value <= 0x3FFFFFFF) { byte = (value >> 24) & 0x3F; Stream_Write_UINT8(s, byte | 0xC0); byte = (value >> 16) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else return FALSE; return TRUE; } static INLINE BOOL update_read_delta(wStream* s, INT32* value) { BYTE byte; if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, byte); if (byte & 0x40) *value = (byte | ~0x3F); else *value = (byte & 0x3F); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, byte); *value = (*value << 8) | byte; } return TRUE; } #if 0 static INLINE void update_read_glyph_delta(wStream* s, UINT16* value) { BYTE byte; Stream_Read_UINT8(s, byte); if (byte == 0x80) Stream_Read_UINT16(s, *value); else *value = (byte & 0x3F); } static INLINE void update_seek_glyph_delta(wStream* s) { BYTE byte; Stream_Read_UINT8(s, byte); if (byte & 0x80) Stream_Seek_UINT8(s); } #endif static INLINE BOOL update_read_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->style); } if (fieldFlags & ORDER_FIELD_04) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->hatch); } if (brush->style & CACHED_BRUSH) { BOOL rc; brush->index = brush->hatch; brush->bpp = get_bmf_bpp(brush->style, &rc); if (!rc) return FALSE; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 7) return FALSE; brush->data = (BYTE*)brush->p8x8; Stream_Read_UINT8(s, brush->data[7]); Stream_Read_UINT8(s, brush->data[6]); Stream_Read_UINT8(s, brush->data[5]); Stream_Read_UINT8(s, brush->data[4]); Stream_Read_UINT8(s, brush->data[3]); Stream_Read_UINT8(s, brush->data[2]); Stream_Read_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; } static INLINE BOOL update_write_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { Stream_Write_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { Stream_Write_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { Stream_Write_UINT8(s, brush->style); } if (brush->style & CACHED_BRUSH) { BOOL rc; brush->hatch = brush->index; brush->bpp = get_bmf_bpp(brush->style, &rc); if (!rc) return FALSE; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_04) { Stream_Write_UINT8(s, brush->hatch); } if (fieldFlags & ORDER_FIELD_05) { brush->data = (BYTE*)brush->p8x8; Stream_Write_UINT8(s, brush->data[7]); Stream_Write_UINT8(s, brush->data[6]); Stream_Write_UINT8(s, brush->data[5]); Stream_Write_UINT8(s, brush->data[4]); Stream_Write_UINT8(s, brush->data[3]); Stream_Write_UINT8(s, brush->data[2]); Stream_Write_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; } static INLINE BOOL update_read_delta_rects(wStream* s, DELTA_RECT* rectangles, UINT32* nr) { UINT32 number = *nr; UINT32 i; BYTE flags = 0; BYTE* zeroBits; UINT32 zeroBitsSize; if (number > 45) { WLog_WARN(TAG, "Invalid number of delta rectangles %" PRIu32, number); return FALSE; } zeroBitsSize = ((number + 1) / 2); if (Stream_GetRemainingLength(s) < zeroBitsSize) return FALSE; Stream_GetPointer(s, zeroBits); Stream_Seek(s, zeroBitsSize); ZeroMemory(rectangles, sizeof(DELTA_RECT) * number); for (i = 0; i < number; i++) { if (i % 2 == 0) flags = zeroBits[i / 2]; if ((~flags & 0x80) && !update_read_delta(s, &rectangles[i].left)) return FALSE; if ((~flags & 0x40) && !update_read_delta(s, &rectangles[i].top)) return FALSE; if (~flags & 0x20) { if (!update_read_delta(s, &rectangles[i].width)) return FALSE; } else if (i > 0) rectangles[i].width = rectangles[i - 1].width; else rectangles[i].width = 0; if (~flags & 0x10) { if (!update_read_delta(s, &rectangles[i].height)) return FALSE; } else if (i > 0) rectangles[i].height = rectangles[i - 1].height; else rectangles[i].height = 0; if (i > 0) { rectangles[i].left += rectangles[i - 1].left; rectangles[i].top += rectangles[i - 1].top; } flags <<= 4; } return TRUE; } static INLINE BOOL update_read_delta_points(wStream* s, DELTA_POINT* points, int number, INT16 x, INT16 y) { int i; BYTE flags = 0; BYTE* zeroBits; UINT32 zeroBitsSize; zeroBitsSize = ((number + 3) / 4); if (Stream_GetRemainingLength(s) < zeroBitsSize) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < %" PRIu32 "", zeroBitsSize); return FALSE; } Stream_GetPointer(s, zeroBits); Stream_Seek(s, zeroBitsSize); ZeroMemory(points, sizeof(DELTA_POINT) * number); for (i = 0; i < number; i++) { if (i % 4 == 0) flags = zeroBits[i / 4]; if ((~flags & 0x80) && !update_read_delta(s, &points[i].x)) { WLog_ERR(TAG, "update_read_delta(x) failed"); return FALSE; } if ((~flags & 0x40) && !update_read_delta(s, &points[i].y)) { WLog_ERR(TAG, "update_read_delta(y) failed"); return FALSE; } flags <<= 2; } return TRUE; } #define ORDER_FIELD_BYTE(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 1) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT8(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_2BYTE(NO, TARGET1, TARGET2) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 2) \ { \ WLog_ERR(TAG, "error reading %s or %s", #TARGET1, #TARGET2); \ return FALSE; \ } \ Stream_Read_UINT8(s, TARGET1); \ Stream_Read_UINT8(s, TARGET2); \ } \ } while (0) #define ORDER_FIELD_UINT16(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 2) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT16(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_UINT32(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 4) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT32(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_COORD(NO, TARGET) \ do \ { \ if ((orderInfo->fieldFlags & (1 << (NO - 1))) && \ !update_read_coord(s, &TARGET, orderInfo->deltaCoordinates)) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ } while (0) static INLINE BOOL ORDER_FIELD_COLOR(const ORDER_INFO* orderInfo, wStream* s, UINT32 NO, UINT32* TARGET) { if (!TARGET || !orderInfo) return FALSE; if ((orderInfo->fieldFlags & (1 << (NO - 1))) && !update_read_color(s, TARGET)) return FALSE; return TRUE; } static INLINE BOOL FIELD_SKIP_BUFFER16(wStream* s, UINT32 TARGET_LEN) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, TARGET_LEN); if (!Stream_SafeSeek(s, TARGET_LEN)) { WLog_ERR(TAG, "error skipping %" PRIu32 " bytes", TARGET_LEN); return FALSE; } return TRUE; } /* Primary Drawing Orders */ static BOOL update_read_dstblt_order(wStream* s, const ORDER_INFO* orderInfo, DSTBLT_ORDER* dstblt) { ORDER_FIELD_COORD(1, dstblt->nLeftRect); ORDER_FIELD_COORD(2, dstblt->nTopRect); ORDER_FIELD_COORD(3, dstblt->nWidth); ORDER_FIELD_COORD(4, dstblt->nHeight); ORDER_FIELD_BYTE(5, dstblt->bRop); return TRUE; } int update_approximate_dstblt_order(ORDER_INFO* orderInfo, const DSTBLT_ORDER* dstblt) { return 32; } BOOL update_write_dstblt_order(wStream* s, ORDER_INFO* orderInfo, const DSTBLT_ORDER* dstblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_dstblt_order(orderInfo, dstblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, dstblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, dstblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, dstblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, dstblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, dstblt->bRop); return TRUE; } static BOOL update_read_patblt_order(wStream* s, const ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { ORDER_FIELD_COORD(1, patblt->nLeftRect); ORDER_FIELD_COORD(2, patblt->nTopRect); ORDER_FIELD_COORD(3, patblt->nWidth); ORDER_FIELD_COORD(4, patblt->nHeight); ORDER_FIELD_BYTE(5, patblt->bRop); ORDER_FIELD_COLOR(orderInfo, s, 6, &patblt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 7, &patblt->foreColor); return update_read_brush(s, &patblt->brush, orderInfo->fieldFlags >> 7); } int update_approximate_patblt_order(ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { return 32; } BOOL update_write_patblt_order(wStream* s, ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_patblt_order(orderInfo, patblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, patblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, patblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, patblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, patblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, patblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, patblt->backColor); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_color(s, patblt->foreColor); orderInfo->fieldFlags |= ORDER_FIELD_08; orderInfo->fieldFlags |= ORDER_FIELD_09; orderInfo->fieldFlags |= ORDER_FIELD_10; orderInfo->fieldFlags |= ORDER_FIELD_11; orderInfo->fieldFlags |= ORDER_FIELD_12; update_write_brush(s, &patblt->brush, orderInfo->fieldFlags >> 7); return TRUE; } static BOOL update_read_scrblt_order(wStream* s, const ORDER_INFO* orderInfo, SCRBLT_ORDER* scrblt) { ORDER_FIELD_COORD(1, scrblt->nLeftRect); ORDER_FIELD_COORD(2, scrblt->nTopRect); ORDER_FIELD_COORD(3, scrblt->nWidth); ORDER_FIELD_COORD(4, scrblt->nHeight); ORDER_FIELD_BYTE(5, scrblt->bRop); ORDER_FIELD_COORD(6, scrblt->nXSrc); ORDER_FIELD_COORD(7, scrblt->nYSrc); return TRUE; } int update_approximate_scrblt_order(ORDER_INFO* orderInfo, const SCRBLT_ORDER* scrblt) { return 32; } BOOL update_write_scrblt_order(wStream* s, ORDER_INFO* orderInfo, const SCRBLT_ORDER* scrblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_scrblt_order(orderInfo, scrblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, scrblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, scrblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, scrblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, scrblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, scrblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_coord(s, scrblt->nXSrc); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_coord(s, scrblt->nYSrc); return TRUE; } static BOOL update_read_opaque_rect_order(wStream* s, const ORDER_INFO* orderInfo, OPAQUE_RECT_ORDER* opaque_rect) { BYTE byte; ORDER_FIELD_COORD(1, opaque_rect->nLeftRect); ORDER_FIELD_COORD(2, opaque_rect->nTopRect); ORDER_FIELD_COORD(3, opaque_rect->nWidth); ORDER_FIELD_COORD(4, opaque_rect->nHeight); if (orderInfo->fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x00FFFF00) | ((UINT32)byte); } if (orderInfo->fieldFlags & ORDER_FIELD_06) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x00FF00FF) | ((UINT32)byte << 8); } if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x0000FFFF) | ((UINT32)byte << 16); } return TRUE; } int update_approximate_opaque_rect_order(ORDER_INFO* orderInfo, const OPAQUE_RECT_ORDER* opaque_rect) { return 32; } BOOL update_write_opaque_rect_order(wStream* s, ORDER_INFO* orderInfo, const OPAQUE_RECT_ORDER* opaque_rect) { BYTE byte; int inf = update_approximate_opaque_rect_order(orderInfo, opaque_rect); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; // TODO: Color format conversion orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, opaque_rect->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, opaque_rect->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, opaque_rect->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, opaque_rect->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; byte = opaque_rect->color & 0x000000FF; Stream_Write_UINT8(s, byte); orderInfo->fieldFlags |= ORDER_FIELD_06; byte = (opaque_rect->color & 0x0000FF00) >> 8; Stream_Write_UINT8(s, byte); orderInfo->fieldFlags |= ORDER_FIELD_07; byte = (opaque_rect->color & 0x00FF0000) >> 16; Stream_Write_UINT8(s, byte); return TRUE; } static BOOL update_read_draw_nine_grid_order(wStream* s, const ORDER_INFO* orderInfo, DRAW_NINE_GRID_ORDER* draw_nine_grid) { ORDER_FIELD_COORD(1, draw_nine_grid->srcLeft); ORDER_FIELD_COORD(2, draw_nine_grid->srcTop); ORDER_FIELD_COORD(3, draw_nine_grid->srcRight); ORDER_FIELD_COORD(4, draw_nine_grid->srcBottom); ORDER_FIELD_UINT16(5, draw_nine_grid->bitmapId); return TRUE; } static BOOL update_read_multi_dstblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_DSTBLT_ORDER* multi_dstblt) { ORDER_FIELD_COORD(1, multi_dstblt->nLeftRect); ORDER_FIELD_COORD(2, multi_dstblt->nTopRect); ORDER_FIELD_COORD(3, multi_dstblt->nWidth); ORDER_FIELD_COORD(4, multi_dstblt->nHeight); ORDER_FIELD_BYTE(5, multi_dstblt->bRop); ORDER_FIELD_BYTE(6, multi_dstblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_dstblt->cbData); return update_read_delta_rects(s, multi_dstblt->rectangles, &multi_dstblt->numRectangles); } return TRUE; } static BOOL update_read_multi_patblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_PATBLT_ORDER* multi_patblt) { ORDER_FIELD_COORD(1, multi_patblt->nLeftRect); ORDER_FIELD_COORD(2, multi_patblt->nTopRect); ORDER_FIELD_COORD(3, multi_patblt->nWidth); ORDER_FIELD_COORD(4, multi_patblt->nHeight); ORDER_FIELD_BYTE(5, multi_patblt->bRop); ORDER_FIELD_COLOR(orderInfo, s, 6, &multi_patblt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 7, &multi_patblt->foreColor); if (!update_read_brush(s, &multi_patblt->brush, orderInfo->fieldFlags >> 7)) return FALSE; ORDER_FIELD_BYTE(13, multi_patblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_14) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_patblt->cbData); if (!update_read_delta_rects(s, multi_patblt->rectangles, &multi_patblt->numRectangles)) return FALSE; } return TRUE; } static BOOL update_read_multi_scrblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_SCRBLT_ORDER* multi_scrblt) { ORDER_FIELD_COORD(1, multi_scrblt->nLeftRect); ORDER_FIELD_COORD(2, multi_scrblt->nTopRect); ORDER_FIELD_COORD(3, multi_scrblt->nWidth); ORDER_FIELD_COORD(4, multi_scrblt->nHeight); ORDER_FIELD_BYTE(5, multi_scrblt->bRop); ORDER_FIELD_COORD(6, multi_scrblt->nXSrc); ORDER_FIELD_COORD(7, multi_scrblt->nYSrc); ORDER_FIELD_BYTE(8, multi_scrblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_09) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_scrblt->cbData); return update_read_delta_rects(s, multi_scrblt->rectangles, &multi_scrblt->numRectangles); } return TRUE; } static BOOL update_read_multi_opaque_rect_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_OPAQUE_RECT_ORDER* multi_opaque_rect) { BYTE byte; ORDER_FIELD_COORD(1, multi_opaque_rect->nLeftRect); ORDER_FIELD_COORD(2, multi_opaque_rect->nTopRect); ORDER_FIELD_COORD(3, multi_opaque_rect->nWidth); ORDER_FIELD_COORD(4, multi_opaque_rect->nHeight); if (orderInfo->fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x00FFFF00) | ((UINT32)byte); } if (orderInfo->fieldFlags & ORDER_FIELD_06) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x00FF00FF) | ((UINT32)byte << 8); } if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x0000FFFF) | ((UINT32)byte << 16); } ORDER_FIELD_BYTE(8, multi_opaque_rect->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_09) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_opaque_rect->cbData); return update_read_delta_rects(s, multi_opaque_rect->rectangles, &multi_opaque_rect->numRectangles); } return TRUE; } static BOOL update_read_multi_draw_nine_grid_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_DRAW_NINE_GRID_ORDER* multi_draw_nine_grid) { ORDER_FIELD_COORD(1, multi_draw_nine_grid->srcLeft); ORDER_FIELD_COORD(2, multi_draw_nine_grid->srcTop); ORDER_FIELD_COORD(3, multi_draw_nine_grid->srcRight); ORDER_FIELD_COORD(4, multi_draw_nine_grid->srcBottom); ORDER_FIELD_UINT16(5, multi_draw_nine_grid->bitmapId); ORDER_FIELD_BYTE(6, multi_draw_nine_grid->nDeltaEntries); if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_draw_nine_grid->cbData); return update_read_delta_rects(s, multi_draw_nine_grid->rectangles, &multi_draw_nine_grid->nDeltaEntries); } return TRUE; } static BOOL update_read_line_to_order(wStream* s, const ORDER_INFO* orderInfo, LINE_TO_ORDER* line_to) { ORDER_FIELD_UINT16(1, line_to->backMode); ORDER_FIELD_COORD(2, line_to->nXStart); ORDER_FIELD_COORD(3, line_to->nYStart); ORDER_FIELD_COORD(4, line_to->nXEnd); ORDER_FIELD_COORD(5, line_to->nYEnd); ORDER_FIELD_COLOR(orderInfo, s, 6, &line_to->backColor); ORDER_FIELD_BYTE(7, line_to->bRop2); ORDER_FIELD_BYTE(8, line_to->penStyle); ORDER_FIELD_BYTE(9, line_to->penWidth); ORDER_FIELD_COLOR(orderInfo, s, 10, &line_to->penColor); return TRUE; } int update_approximate_line_to_order(ORDER_INFO* orderInfo, const LINE_TO_ORDER* line_to) { return 32; } BOOL update_write_line_to_order(wStream* s, ORDER_INFO* orderInfo, const LINE_TO_ORDER* line_to) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_line_to_order(orderInfo, line_to))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT16(s, line_to->backMode); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, line_to->nXStart); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, line_to->nYStart); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, line_to->nXEnd); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_coord(s, line_to->nYEnd); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, line_to->backColor); orderInfo->fieldFlags |= ORDER_FIELD_07; Stream_Write_UINT8(s, line_to->bRop2); orderInfo->fieldFlags |= ORDER_FIELD_08; Stream_Write_UINT8(s, line_to->penStyle); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT8(s, line_to->penWidth); orderInfo->fieldFlags |= ORDER_FIELD_10; update_write_color(s, line_to->penColor); return TRUE; } static BOOL update_read_polyline_order(wStream* s, const ORDER_INFO* orderInfo, POLYLINE_ORDER* polyline) { UINT16 word; UINT32 new_num = polyline->numDeltaEntries; ORDER_FIELD_COORD(1, polyline->xStart); ORDER_FIELD_COORD(2, polyline->yStart); ORDER_FIELD_BYTE(3, polyline->bRop2); ORDER_FIELD_UINT16(4, word); ORDER_FIELD_COLOR(orderInfo, s, 5, &polyline->penColor); ORDER_FIELD_BYTE(6, new_num); if (orderInfo->fieldFlags & ORDER_FIELD_07) { DELTA_POINT* new_points; if (new_num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, polyline->cbData); new_points = (DELTA_POINT*)realloc(polyline->points, sizeof(DELTA_POINT) * new_num); if (!new_points) { WLog_ERR(TAG, "realloc(%" PRIu32 ") failed", new_num); return FALSE; } polyline->points = new_points; polyline->numDeltaEntries = new_num; return update_read_delta_points(s, polyline->points, polyline->numDeltaEntries, polyline->xStart, polyline->yStart); } return TRUE; } static BOOL update_read_memblt_order(wStream* s, const ORDER_INFO* orderInfo, MEMBLT_ORDER* memblt) { if (!s || !orderInfo || !memblt) return FALSE; ORDER_FIELD_UINT16(1, memblt->cacheId); ORDER_FIELD_COORD(2, memblt->nLeftRect); ORDER_FIELD_COORD(3, memblt->nTopRect); ORDER_FIELD_COORD(4, memblt->nWidth); ORDER_FIELD_COORD(5, memblt->nHeight); ORDER_FIELD_BYTE(6, memblt->bRop); ORDER_FIELD_COORD(7, memblt->nXSrc); ORDER_FIELD_COORD(8, memblt->nYSrc); ORDER_FIELD_UINT16(9, memblt->cacheIndex); memblt->colorIndex = (memblt->cacheId >> 8); memblt->cacheId = (memblt->cacheId & 0xFF); memblt->bitmap = NULL; return TRUE; } int update_approximate_memblt_order(ORDER_INFO* orderInfo, const MEMBLT_ORDER* memblt) { return 64; } BOOL update_write_memblt_order(wStream* s, ORDER_INFO* orderInfo, const MEMBLT_ORDER* memblt) { UINT16 cacheId; if (!Stream_EnsureRemainingCapacity(s, update_approximate_memblt_order(orderInfo, memblt))) return FALSE; cacheId = (memblt->cacheId & 0xFF) | ((memblt->colorIndex & 0xFF) << 8); orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT16(s, cacheId); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, memblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, memblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, memblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_coord(s, memblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_06; Stream_Write_UINT8(s, memblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_coord(s, memblt->nXSrc); orderInfo->fieldFlags |= ORDER_FIELD_08; update_write_coord(s, memblt->nYSrc); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT16(s, memblt->cacheIndex); return TRUE; } static BOOL update_read_mem3blt_order(wStream* s, const ORDER_INFO* orderInfo, MEM3BLT_ORDER* mem3blt) { ORDER_FIELD_UINT16(1, mem3blt->cacheId); ORDER_FIELD_COORD(2, mem3blt->nLeftRect); ORDER_FIELD_COORD(3, mem3blt->nTopRect); ORDER_FIELD_COORD(4, mem3blt->nWidth); ORDER_FIELD_COORD(5, mem3blt->nHeight); ORDER_FIELD_BYTE(6, mem3blt->bRop); ORDER_FIELD_COORD(7, mem3blt->nXSrc); ORDER_FIELD_COORD(8, mem3blt->nYSrc); ORDER_FIELD_COLOR(orderInfo, s, 9, &mem3blt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 10, &mem3blt->foreColor); if (!update_read_brush(s, &mem3blt->brush, orderInfo->fieldFlags >> 10)) return FALSE; ORDER_FIELD_UINT16(16, mem3blt->cacheIndex); mem3blt->colorIndex = (mem3blt->cacheId >> 8); mem3blt->cacheId = (mem3blt->cacheId & 0xFF); mem3blt->bitmap = NULL; return TRUE; } static BOOL update_read_save_bitmap_order(wStream* s, const ORDER_INFO* orderInfo, SAVE_BITMAP_ORDER* save_bitmap) { ORDER_FIELD_UINT32(1, save_bitmap->savedBitmapPosition); ORDER_FIELD_COORD(2, save_bitmap->nLeftRect); ORDER_FIELD_COORD(3, save_bitmap->nTopRect); ORDER_FIELD_COORD(4, save_bitmap->nRightRect); ORDER_FIELD_COORD(5, save_bitmap->nBottomRect); ORDER_FIELD_BYTE(6, save_bitmap->operation); return TRUE; } static BOOL update_read_glyph_index_order(wStream* s, const ORDER_INFO* orderInfo, GLYPH_INDEX_ORDER* glyph_index) { ORDER_FIELD_BYTE(1, glyph_index->cacheId); ORDER_FIELD_BYTE(2, glyph_index->flAccel); ORDER_FIELD_BYTE(3, glyph_index->ulCharInc); ORDER_FIELD_BYTE(4, glyph_index->fOpRedundant); ORDER_FIELD_COLOR(orderInfo, s, 5, &glyph_index->backColor); ORDER_FIELD_COLOR(orderInfo, s, 6, &glyph_index->foreColor); ORDER_FIELD_UINT16(7, glyph_index->bkLeft); ORDER_FIELD_UINT16(8, glyph_index->bkTop); ORDER_FIELD_UINT16(9, glyph_index->bkRight); ORDER_FIELD_UINT16(10, glyph_index->bkBottom); ORDER_FIELD_UINT16(11, glyph_index->opLeft); ORDER_FIELD_UINT16(12, glyph_index->opTop); ORDER_FIELD_UINT16(13, glyph_index->opRight); ORDER_FIELD_UINT16(14, glyph_index->opBottom); if (!update_read_brush(s, &glyph_index->brush, orderInfo->fieldFlags >> 14)) return FALSE; ORDER_FIELD_UINT16(20, glyph_index->x); ORDER_FIELD_UINT16(21, glyph_index->y); if (orderInfo->fieldFlags & ORDER_FIELD_22) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, glyph_index->cbData); if (Stream_GetRemainingLength(s) < glyph_index->cbData) return FALSE; CopyMemory(glyph_index->data, Stream_Pointer(s), glyph_index->cbData); Stream_Seek(s, glyph_index->cbData); } return TRUE; } int update_approximate_glyph_index_order(ORDER_INFO* orderInfo, const GLYPH_INDEX_ORDER* glyph_index) { return 64; } BOOL update_write_glyph_index_order(wStream* s, ORDER_INFO* orderInfo, GLYPH_INDEX_ORDER* glyph_index) { int inf = update_approximate_glyph_index_order(orderInfo, glyph_index); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT8(s, glyph_index->cacheId); orderInfo->fieldFlags |= ORDER_FIELD_02; Stream_Write_UINT8(s, glyph_index->flAccel); orderInfo->fieldFlags |= ORDER_FIELD_03; Stream_Write_UINT8(s, glyph_index->ulCharInc); orderInfo->fieldFlags |= ORDER_FIELD_04; Stream_Write_UINT8(s, glyph_index->fOpRedundant); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_color(s, glyph_index->backColor); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, glyph_index->foreColor); orderInfo->fieldFlags |= ORDER_FIELD_07; Stream_Write_UINT16(s, glyph_index->bkLeft); orderInfo->fieldFlags |= ORDER_FIELD_08; Stream_Write_UINT16(s, glyph_index->bkTop); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT16(s, glyph_index->bkRight); orderInfo->fieldFlags |= ORDER_FIELD_10; Stream_Write_UINT16(s, glyph_index->bkBottom); orderInfo->fieldFlags |= ORDER_FIELD_11; Stream_Write_UINT16(s, glyph_index->opLeft); orderInfo->fieldFlags |= ORDER_FIELD_12; Stream_Write_UINT16(s, glyph_index->opTop); orderInfo->fieldFlags |= ORDER_FIELD_13; Stream_Write_UINT16(s, glyph_index->opRight); orderInfo->fieldFlags |= ORDER_FIELD_14; Stream_Write_UINT16(s, glyph_index->opBottom); orderInfo->fieldFlags |= ORDER_FIELD_15; orderInfo->fieldFlags |= ORDER_FIELD_16; orderInfo->fieldFlags |= ORDER_FIELD_17; orderInfo->fieldFlags |= ORDER_FIELD_18; orderInfo->fieldFlags |= ORDER_FIELD_19; update_write_brush(s, &glyph_index->brush, orderInfo->fieldFlags >> 14); orderInfo->fieldFlags |= ORDER_FIELD_20; Stream_Write_UINT16(s, glyph_index->x); orderInfo->fieldFlags |= ORDER_FIELD_21; Stream_Write_UINT16(s, glyph_index->y); orderInfo->fieldFlags |= ORDER_FIELD_22; Stream_Write_UINT8(s, glyph_index->cbData); Stream_Write(s, glyph_index->data, glyph_index->cbData); return TRUE; } static BOOL update_read_fast_index_order(wStream* s, const ORDER_INFO* orderInfo, FAST_INDEX_ORDER* fast_index) { ORDER_FIELD_BYTE(1, fast_index->cacheId); ORDER_FIELD_2BYTE(2, fast_index->ulCharInc, fast_index->flAccel); ORDER_FIELD_COLOR(orderInfo, s, 3, &fast_index->backColor); ORDER_FIELD_COLOR(orderInfo, s, 4, &fast_index->foreColor); ORDER_FIELD_COORD(5, fast_index->bkLeft); ORDER_FIELD_COORD(6, fast_index->bkTop); ORDER_FIELD_COORD(7, fast_index->bkRight); ORDER_FIELD_COORD(8, fast_index->bkBottom); ORDER_FIELD_COORD(9, fast_index->opLeft); ORDER_FIELD_COORD(10, fast_index->opTop); ORDER_FIELD_COORD(11, fast_index->opRight); ORDER_FIELD_COORD(12, fast_index->opBottom); ORDER_FIELD_COORD(13, fast_index->x); ORDER_FIELD_COORD(14, fast_index->y); if (orderInfo->fieldFlags & ORDER_FIELD_15) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, fast_index->cbData); if (Stream_GetRemainingLength(s) < fast_index->cbData) return FALSE; CopyMemory(fast_index->data, Stream_Pointer(s), fast_index->cbData); Stream_Seek(s, fast_index->cbData); } return TRUE; } static BOOL update_read_fast_glyph_order(wStream* s, const ORDER_INFO* orderInfo, FAST_GLYPH_ORDER* fastGlyph) { GLYPH_DATA_V2* glyph = &fastGlyph->glyphData; ORDER_FIELD_BYTE(1, fastGlyph->cacheId); ORDER_FIELD_2BYTE(2, fastGlyph->ulCharInc, fastGlyph->flAccel); ORDER_FIELD_COLOR(orderInfo, s, 3, &fastGlyph->backColor); ORDER_FIELD_COLOR(orderInfo, s, 4, &fastGlyph->foreColor); ORDER_FIELD_COORD(5, fastGlyph->bkLeft); ORDER_FIELD_COORD(6, fastGlyph->bkTop); ORDER_FIELD_COORD(7, fastGlyph->bkRight); ORDER_FIELD_COORD(8, fastGlyph->bkBottom); ORDER_FIELD_COORD(9, fastGlyph->opLeft); ORDER_FIELD_COORD(10, fastGlyph->opTop); ORDER_FIELD_COORD(11, fastGlyph->opRight); ORDER_FIELD_COORD(12, fastGlyph->opBottom); ORDER_FIELD_COORD(13, fastGlyph->x); ORDER_FIELD_COORD(14, fastGlyph->y); if (orderInfo->fieldFlags & ORDER_FIELD_15) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, fastGlyph->cbData); if (Stream_GetRemainingLength(s) < fastGlyph->cbData) return FALSE; CopyMemory(fastGlyph->data, Stream_Pointer(s), fastGlyph->cbData); if (Stream_GetRemainingLength(s) < fastGlyph->cbData) return FALSE; if (!Stream_SafeSeek(s, 1)) return FALSE; if (fastGlyph->cbData > 1) { UINT32 new_cb; /* parse optional glyph data */ glyph->cacheIndex = fastGlyph->data[0]; if (!update_read_2byte_signed(s, &glyph->x) || !update_read_2byte_signed(s, &glyph->y) || !update_read_2byte_unsigned(s, &glyph->cx) || !update_read_2byte_unsigned(s, &glyph->cy)) return FALSE; glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; new_cb = ((glyph->cx + 7) / 8) * glyph->cy; new_cb += ((new_cb % 4) > 0) ? 4 - (new_cb % 4) : 0; if (fastGlyph->cbData < new_cb) return FALSE; if (new_cb > 0) { BYTE* new_aj; new_aj = (BYTE*)realloc(glyph->aj, new_cb); if (!new_aj) return FALSE; glyph->aj = new_aj; glyph->cb = new_cb; Stream_Read(s, glyph->aj, glyph->cb); } Stream_Seek(s, fastGlyph->cbData - new_cb); } } return TRUE; } static BOOL update_read_polygon_sc_order(wStream* s, const ORDER_INFO* orderInfo, POLYGON_SC_ORDER* polygon_sc) { UINT32 num = polygon_sc->numPoints; ORDER_FIELD_COORD(1, polygon_sc->xStart); ORDER_FIELD_COORD(2, polygon_sc->yStart); ORDER_FIELD_BYTE(3, polygon_sc->bRop2); ORDER_FIELD_BYTE(4, polygon_sc->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 5, &polygon_sc->brushColor); ORDER_FIELD_BYTE(6, num); if (orderInfo->fieldFlags & ORDER_FIELD_07) { DELTA_POINT* newpoints; if (num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, polygon_sc->cbData); newpoints = (DELTA_POINT*)realloc(polygon_sc->points, sizeof(DELTA_POINT) * num); if (!newpoints) return FALSE; polygon_sc->points = newpoints; polygon_sc->numPoints = num; return update_read_delta_points(s, polygon_sc->points, polygon_sc->numPoints, polygon_sc->xStart, polygon_sc->yStart); } return TRUE; } static BOOL update_read_polygon_cb_order(wStream* s, const ORDER_INFO* orderInfo, POLYGON_CB_ORDER* polygon_cb) { UINT32 num = polygon_cb->numPoints; ORDER_FIELD_COORD(1, polygon_cb->xStart); ORDER_FIELD_COORD(2, polygon_cb->yStart); ORDER_FIELD_BYTE(3, polygon_cb->bRop2); ORDER_FIELD_BYTE(4, polygon_cb->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 5, &polygon_cb->backColor); ORDER_FIELD_COLOR(orderInfo, s, 6, &polygon_cb->foreColor); if (!update_read_brush(s, &polygon_cb->brush, orderInfo->fieldFlags >> 6)) return FALSE; ORDER_FIELD_BYTE(12, num); if (orderInfo->fieldFlags & ORDER_FIELD_13) { DELTA_POINT* newpoints; if (num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, polygon_cb->cbData); newpoints = (DELTA_POINT*)realloc(polygon_cb->points, sizeof(DELTA_POINT) * num); if (!newpoints) return FALSE; polygon_cb->points = newpoints; polygon_cb->numPoints = num; if (!update_read_delta_points(s, polygon_cb->points, polygon_cb->numPoints, polygon_cb->xStart, polygon_cb->yStart)) return FALSE; } polygon_cb->backMode = (polygon_cb->bRop2 & 0x80) ? BACKMODE_TRANSPARENT : BACKMODE_OPAQUE; polygon_cb->bRop2 = (polygon_cb->bRop2 & 0x1F); return TRUE; } static BOOL update_read_ellipse_sc_order(wStream* s, const ORDER_INFO* orderInfo, ELLIPSE_SC_ORDER* ellipse_sc) { ORDER_FIELD_COORD(1, ellipse_sc->leftRect); ORDER_FIELD_COORD(2, ellipse_sc->topRect); ORDER_FIELD_COORD(3, ellipse_sc->rightRect); ORDER_FIELD_COORD(4, ellipse_sc->bottomRect); ORDER_FIELD_BYTE(5, ellipse_sc->bRop2); ORDER_FIELD_BYTE(6, ellipse_sc->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 7, &ellipse_sc->color); return TRUE; } static BOOL update_read_ellipse_cb_order(wStream* s, const ORDER_INFO* orderInfo, ELLIPSE_CB_ORDER* ellipse_cb) { ORDER_FIELD_COORD(1, ellipse_cb->leftRect); ORDER_FIELD_COORD(2, ellipse_cb->topRect); ORDER_FIELD_COORD(3, ellipse_cb->rightRect); ORDER_FIELD_COORD(4, ellipse_cb->bottomRect); ORDER_FIELD_BYTE(5, ellipse_cb->bRop2); ORDER_FIELD_BYTE(6, ellipse_cb->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 7, &ellipse_cb->backColor); ORDER_FIELD_COLOR(orderInfo, s, 8, &ellipse_cb->foreColor); return update_read_brush(s, &ellipse_cb->brush, orderInfo->fieldFlags >> 8); } /* Secondary Drawing Orders */ static CACHE_BITMAP_ORDER* update_read_cache_bitmap_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { CACHE_BITMAP_ORDER* cache_bitmap; if (!update || !s) return NULL; cache_bitmap = calloc(1, sizeof(CACHE_BITMAP_ORDER)); if (!cache_bitmap) goto fail; if (Stream_GetRemainingLength(s) < 9) goto fail; Stream_Read_UINT8(s, cache_bitmap->cacheId); /* cacheId (1 byte) */ Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapWidth); /* bitmapWidth (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapHeight); /* bitmapHeight (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ if ((cache_bitmap->bitmapBpp < 1) || (cache_bitmap->bitmapBpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bitmap bpp %" PRIu32 "", cache_bitmap->bitmapBpp); goto fail; } Stream_Read_UINT16(s, cache_bitmap->bitmapLength); /* bitmapLength (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap->cacheIndex); /* cacheIndex (2 bytes) */ if (compressed) { if ((flags & NO_BITMAP_COMPRESSION_HDR) == 0) { BYTE* bitmapComprHdr = (BYTE*)&(cache_bitmap->bitmapComprHdr); if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read(s, bitmapComprHdr, 8); /* bitmapComprHdr (8 bytes) */ cache_bitmap->bitmapLength -= 8; } } if (cache_bitmap->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap->bitmapLength) goto fail; cache_bitmap->bitmapDataStream = malloc(cache_bitmap->bitmapLength); if (!cache_bitmap->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap->bitmapDataStream, cache_bitmap->bitmapLength); cache_bitmap->compressed = compressed; return cache_bitmap; fail: free_cache_bitmap_order(update->context, cache_bitmap); return NULL; } int update_approximate_cache_bitmap_order(const CACHE_BITMAP_ORDER* cache_bitmap, BOOL compressed, UINT16* flags) { return 64 + cache_bitmap->bitmapLength; } BOOL update_write_cache_bitmap_order(wStream* s, const CACHE_BITMAP_ORDER* cache_bitmap, BOOL compressed, UINT16* flags) { UINT32 bitmapLength = cache_bitmap->bitmapLength; int inf = update_approximate_cache_bitmap_order(cache_bitmap, compressed, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; *flags = NO_BITMAP_COMPRESSION_HDR; if ((*flags & NO_BITMAP_COMPRESSION_HDR) == 0) bitmapLength += 8; Stream_Write_UINT8(s, cache_bitmap->cacheId); /* cacheId (1 byte) */ Stream_Write_UINT8(s, 0); /* pad1Octet (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapWidth); /* bitmapWidth (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapHeight); /* bitmapHeight (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ Stream_Write_UINT16(s, bitmapLength); /* bitmapLength (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap->cacheIndex); /* cacheIndex (2 bytes) */ if (compressed) { if ((*flags & NO_BITMAP_COMPRESSION_HDR) == 0) { BYTE* bitmapComprHdr = (BYTE*)&(cache_bitmap->bitmapComprHdr); Stream_Write(s, bitmapComprHdr, 8); /* bitmapComprHdr (8 bytes) */ bitmapLength -= 8; } Stream_Write(s, cache_bitmap->bitmapDataStream, bitmapLength); } else { Stream_Write(s, cache_bitmap->bitmapDataStream, bitmapLength); } return TRUE; } static CACHE_BITMAP_V2_ORDER* update_read_cache_bitmap_v2_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { BOOL rc; BYTE bitsPerPixelId; CACHE_BITMAP_V2_ORDER* cache_bitmap_v2; if (!update || !s) return NULL; cache_bitmap_v2 = calloc(1, sizeof(CACHE_BITMAP_V2_ORDER)); if (!cache_bitmap_v2) goto fail; cache_bitmap_v2->cacheId = flags & 0x0003; cache_bitmap_v2->flags = (flags & 0xFF80) >> 7; bitsPerPixelId = (flags & 0x0078) >> 3; cache_bitmap_v2->bitmapBpp = get_cbr2_bpp(bitsPerPixelId, &rc); if (!rc) goto fail; if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ goto fail; cache_bitmap_v2->bitmapHeight = cache_bitmap_v2->bitmapWidth; } else { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ goto fail; } if (!update_read_4byte_unsigned(s, &cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->cacheIndex)) /* cacheIndex */ goto fail; if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } } if (cache_bitmap_v2->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap_v2->bitmapLength) goto fail; if (cache_bitmap_v2->bitmapLength == 0) goto fail; cache_bitmap_v2->bitmapDataStream = malloc(cache_bitmap_v2->bitmapLength); if (!cache_bitmap_v2->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); cache_bitmap_v2->compressed = compressed; return cache_bitmap_v2; fail: free_cache_bitmap_v2_order(update->context, cache_bitmap_v2); return NULL; } int update_approximate_cache_bitmap_v2_order(CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { return 64 + cache_bitmap_v2->bitmapLength; } BOOL update_write_cache_bitmap_v2_order(wStream* s, CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { BOOL rc; BYTE bitsPerPixelId; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v2_order(cache_bitmap_v2, compressed, flags))) return FALSE; bitsPerPixelId = get_bpp_bmf(cache_bitmap_v2->bitmapBpp, &rc); if (!rc) return FALSE; *flags = (cache_bitmap_v2->cacheId & 0x0003) | (bitsPerPixelId << 3) | ((cache_bitmap_v2->flags << 7) & 0xFF80); if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { Stream_Write_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ return FALSE; } else { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ return FALSE; } if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (!update_write_4byte_unsigned(s, cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_write_2byte_unsigned(s, cache_bitmap_v2->cacheIndex)) /* cacheIndex */ return FALSE; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { Stream_Write_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } else { if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } cache_bitmap_v2->compressed = compressed; return TRUE; } static CACHE_BITMAP_V3_ORDER* update_read_cache_bitmap_v3_order(rdpUpdate* update, wStream* s, UINT16 flags) { BOOL rc; BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; UINT32 new_len; BYTE* new_data; CACHE_BITMAP_V3_ORDER* cache_bitmap_v3; if (!update || !s) return NULL; cache_bitmap_v3 = calloc(1, sizeof(CACHE_BITMAP_V3_ORDER)); if (!cache_bitmap_v3) goto fail; cache_bitmap_v3->cacheId = flags & 0x00000003; cache_bitmap_v3->flags = (flags & 0x0000FF80) >> 7; bitsPerPixelId = (flags & 0x00000078) >> 3; cache_bitmap_v3->bpp = get_cbr2_bpp(bitsPerPixelId, &rc); if (!rc) goto fail; if (Stream_GetRemainingLength(s) < 21) goto fail; Stream_Read_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ bitmapData = &cache_bitmap_v3->bitmapData; Stream_Read_UINT8(s, bitmapData->bpp); if ((bitmapData->bpp < 1) || (bitmapData->bpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bpp value %" PRIu32 "", bitmapData->bpp); goto fail; } Stream_Seek_UINT8(s); /* reserved1 (1 byte) */ Stream_Seek_UINT8(s); /* reserved2 (1 byte) */ Stream_Read_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Read_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Read_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Read_UINT32(s, new_len); /* length (4 bytes) */ if ((new_len == 0) || (Stream_GetRemainingLength(s) < new_len)) goto fail; new_data = (BYTE*)realloc(bitmapData->data, new_len); if (!new_data) goto fail; bitmapData->data = new_data; bitmapData->length = new_len; Stream_Read(s, bitmapData->data, bitmapData->length); return cache_bitmap_v3; fail: free_cache_bitmap_v3_order(update->context, cache_bitmap_v3); return NULL; } int update_approximate_cache_bitmap_v3_order(CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BITMAP_DATA_EX* bitmapData = &cache_bitmap_v3->bitmapData; return 64 + bitmapData->length; } BOOL update_write_cache_bitmap_v3_order(wStream* s, CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BOOL rc; BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v3_order(cache_bitmap_v3, flags))) return FALSE; bitmapData = &cache_bitmap_v3->bitmapData; bitsPerPixelId = get_bpp_bmf(cache_bitmap_v3->bpp, &rc); if (!rc) return FALSE; *flags = (cache_bitmap_v3->cacheId & 0x00000003) | ((cache_bitmap_v3->flags << 7) & 0x0000FF80) | ((bitsPerPixelId << 3) & 0x00000078); Stream_Write_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ Stream_Write_UINT8(s, bitmapData->bpp); Stream_Write_UINT8(s, 0); /* reserved1 (1 byte) */ Stream_Write_UINT8(s, 0); /* reserved2 (1 byte) */ Stream_Write_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Write_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Write_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Write_UINT32(s, bitmapData->length); /* length (4 bytes) */ Stream_Write(s, bitmapData->data, bitmapData->length); return TRUE; } static CACHE_COLOR_TABLE_ORDER* update_read_cache_color_table_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; UINT32* colorTable; CACHE_COLOR_TABLE_ORDER* cache_color_table = calloc(1, sizeof(CACHE_COLOR_TABLE_ORDER)); if (!cache_color_table) goto fail; if (Stream_GetRemainingLength(s) < 3) goto fail; Stream_Read_UINT8(s, cache_color_table->cacheIndex); /* cacheIndex (1 byte) */ Stream_Read_UINT16(s, cache_color_table->numberColors); /* numberColors (2 bytes) */ if (cache_color_table->numberColors != 256) { /* This field MUST be set to 256 */ goto fail; } if (Stream_GetRemainingLength(s) < cache_color_table->numberColors * 4) goto fail; colorTable = (UINT32*)&cache_color_table->colorTable; for (i = 0; i < (int)cache_color_table->numberColors; i++) update_read_color_quad(s, &colorTable[i]); return cache_color_table; fail: free_cache_color_table_order(update->context, cache_color_table); return NULL; } int update_approximate_cache_color_table_order(const CACHE_COLOR_TABLE_ORDER* cache_color_table, UINT16* flags) { return 16 + (256 * 4); } BOOL update_write_cache_color_table_order(wStream* s, const CACHE_COLOR_TABLE_ORDER* cache_color_table, UINT16* flags) { int i, inf; UINT32* colorTable; if (cache_color_table->numberColors != 256) return FALSE; inf = update_approximate_cache_color_table_order(cache_color_table, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT8(s, cache_color_table->cacheIndex); /* cacheIndex (1 byte) */ Stream_Write_UINT16(s, cache_color_table->numberColors); /* numberColors (2 bytes) */ colorTable = (UINT32*)&cache_color_table->colorTable; for (i = 0; i < (int)cache_color_table->numberColors; i++) { update_write_color_quad(s, colorTable[i]); } return TRUE; } static CACHE_GLYPH_ORDER* update_read_cache_glyph_order(rdpUpdate* update, wStream* s, UINT16 flags) { UINT32 i; CACHE_GLYPH_ORDER* cache_glyph_order = calloc(1, sizeof(CACHE_GLYPH_ORDER)); if (!cache_glyph_order || !update || !s) goto fail; if (Stream_GetRemainingLength(s) < 2) goto fail; Stream_Read_UINT8(s, cache_glyph_order->cacheId); /* cacheId (1 byte) */ Stream_Read_UINT8(s, cache_glyph_order->cGlyphs); /* cGlyphs (1 byte) */ for (i = 0; i < cache_glyph_order->cGlyphs; i++) { GLYPH_DATA* glyph = &cache_glyph_order->glyphData[i]; if (Stream_GetRemainingLength(s) < 10) goto fail; Stream_Read_UINT16(s, glyph->cacheIndex); Stream_Read_INT16(s, glyph->x); Stream_Read_INT16(s, glyph->y); Stream_Read_UINT16(s, glyph->cx); Stream_Read_UINT16(s, glyph->cy); glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; if (Stream_GetRemainingLength(s) < glyph->cb) goto fail; glyph->aj = (BYTE*)malloc(glyph->cb); if (!glyph->aj) goto fail; Stream_Read(s, glyph->aj, glyph->cb); } if ((flags & CG_GLYPH_UNICODE_PRESENT) && (cache_glyph_order->cGlyphs > 0)) { cache_glyph_order->unicodeCharacters = calloc(cache_glyph_order->cGlyphs, sizeof(WCHAR)); if (!cache_glyph_order->unicodeCharacters) goto fail; if (Stream_GetRemainingLength(s) < sizeof(WCHAR) * cache_glyph_order->cGlyphs) goto fail; Stream_Read_UTF16_String(s, cache_glyph_order->unicodeCharacters, cache_glyph_order->cGlyphs); } return cache_glyph_order; fail: free_cache_glyph_order(update->context, cache_glyph_order); return NULL; } int update_approximate_cache_glyph_order(const CACHE_GLYPH_ORDER* cache_glyph, UINT16* flags) { return 2 + cache_glyph->cGlyphs * 32; } BOOL update_write_cache_glyph_order(wStream* s, const CACHE_GLYPH_ORDER* cache_glyph, UINT16* flags) { int i, inf; INT16 lsi16; const GLYPH_DATA* glyph; inf = update_approximate_cache_glyph_order(cache_glyph, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT8(s, cache_glyph->cacheId); /* cacheId (1 byte) */ Stream_Write_UINT8(s, cache_glyph->cGlyphs); /* cGlyphs (1 byte) */ for (i = 0; i < (int)cache_glyph->cGlyphs; i++) { UINT32 cb; glyph = &cache_glyph->glyphData[i]; Stream_Write_UINT16(s, glyph->cacheIndex); /* cacheIndex (2 bytes) */ lsi16 = glyph->x; Stream_Write_UINT16(s, lsi16); /* x (2 bytes) */ lsi16 = glyph->y; Stream_Write_UINT16(s, lsi16); /* y (2 bytes) */ Stream_Write_UINT16(s, glyph->cx); /* cx (2 bytes) */ Stream_Write_UINT16(s, glyph->cy); /* cy (2 bytes) */ cb = ((glyph->cx + 7) / 8) * glyph->cy; cb += ((cb % 4) > 0) ? 4 - (cb % 4) : 0; Stream_Write(s, glyph->aj, cb); } if (*flags & CG_GLYPH_UNICODE_PRESENT) { Stream_Zero(s, cache_glyph->cGlyphs * 2); } return TRUE; } static CACHE_GLYPH_V2_ORDER* update_read_cache_glyph_v2_order(rdpUpdate* update, wStream* s, UINT16 flags) { UINT32 i; CACHE_GLYPH_V2_ORDER* cache_glyph_v2 = calloc(1, sizeof(CACHE_GLYPH_V2_ORDER)); if (!cache_glyph_v2) goto fail; cache_glyph_v2->cacheId = (flags & 0x000F); cache_glyph_v2->flags = (flags & 0x00F0) >> 4; cache_glyph_v2->cGlyphs = (flags & 0xFF00) >> 8; for (i = 0; i < cache_glyph_v2->cGlyphs; i++) { GLYPH_DATA_V2* glyph = &cache_glyph_v2->glyphData[i]; if (Stream_GetRemainingLength(s) < 1) goto fail; Stream_Read_UINT8(s, glyph->cacheIndex); if (!update_read_2byte_signed(s, &glyph->x) || !update_read_2byte_signed(s, &glyph->y) || !update_read_2byte_unsigned(s, &glyph->cx) || !update_read_2byte_unsigned(s, &glyph->cy)) { goto fail; } glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; if (Stream_GetRemainingLength(s) < glyph->cb) goto fail; glyph->aj = (BYTE*)malloc(glyph->cb); if (!glyph->aj) goto fail; Stream_Read(s, glyph->aj, glyph->cb); } if ((flags & CG_GLYPH_UNICODE_PRESENT) && (cache_glyph_v2->cGlyphs > 0)) { cache_glyph_v2->unicodeCharacters = calloc(cache_glyph_v2->cGlyphs, sizeof(WCHAR)); if (!cache_glyph_v2->unicodeCharacters) goto fail; if (Stream_GetRemainingLength(s) < sizeof(WCHAR) * cache_glyph_v2->cGlyphs) goto fail; Stream_Read_UTF16_String(s, cache_glyph_v2->unicodeCharacters, cache_glyph_v2->cGlyphs); } return cache_glyph_v2; fail: free_cache_glyph_v2_order(update->context, cache_glyph_v2); return NULL; } int update_approximate_cache_glyph_v2_order(const CACHE_GLYPH_V2_ORDER* cache_glyph_v2, UINT16* flags) { return 8 + cache_glyph_v2->cGlyphs * 32; } BOOL update_write_cache_glyph_v2_order(wStream* s, const CACHE_GLYPH_V2_ORDER* cache_glyph_v2, UINT16* flags) { UINT32 i, inf; inf = update_approximate_cache_glyph_v2_order(cache_glyph_v2, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; *flags = (cache_glyph_v2->cacheId & 0x000F) | ((cache_glyph_v2->flags & 0x000F) << 4) | ((cache_glyph_v2->cGlyphs & 0x00FF) << 8); for (i = 0; i < cache_glyph_v2->cGlyphs; i++) { UINT32 cb; const GLYPH_DATA_V2* glyph = &cache_glyph_v2->glyphData[i]; Stream_Write_UINT8(s, glyph->cacheIndex); if (!update_write_2byte_signed(s, glyph->x) || !update_write_2byte_signed(s, glyph->y) || !update_write_2byte_unsigned(s, glyph->cx) || !update_write_2byte_unsigned(s, glyph->cy)) { return FALSE; } cb = ((glyph->cx + 7) / 8) * glyph->cy; cb += ((cb % 4) > 0) ? 4 - (cb % 4) : 0; Stream_Write(s, glyph->aj, cb); } if (*flags & CG_GLYPH_UNICODE_PRESENT) { Stream_Zero(s, cache_glyph_v2->cGlyphs * 2); } return TRUE; } static BOOL update_decompress_brush(wStream* s, BYTE* output, size_t outSize, BYTE bpp) { INT32 x, y, k; BYTE byte = 0; const BYTE* palette = Stream_Pointer(s) + 16; const INT32 bytesPerPixel = ((bpp + 1) / 8); if (!Stream_SafeSeek(s, 16ULL + 7ULL * bytesPerPixel)) // 64 / 4 return FALSE; for (y = 7; y >= 0; y--) { for (x = 0; x < 8; x++) { UINT32 index; if ((x % 4) == 0) Stream_Read_UINT8(s, byte); index = ((byte >> ((3 - (x % 4)) * 2)) & 0x03); for (k = 0; k < bytesPerPixel; k++) { const size_t dstIndex = ((y * 8 + x) * bytesPerPixel) + k; const size_t srcIndex = (index * bytesPerPixel) + k; if (dstIndex >= outSize) return FALSE; output[dstIndex] = palette[srcIndex]; } } } return TRUE; } static BOOL update_compress_brush(wStream* s, const BYTE* input, BYTE bpp) { return FALSE; } static CACHE_BRUSH_ORDER* update_read_cache_brush_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; BOOL rc; BYTE iBitmapFormat; BOOL compressed = FALSE; CACHE_BRUSH_ORDER* cache_brush = calloc(1, sizeof(CACHE_BRUSH_ORDER)); if (!cache_brush) goto fail; if (Stream_GetRemainingLength(s) < 6) goto fail; Stream_Read_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Read_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ cache_brush->bpp = get_bmf_bpp(iBitmapFormat, &rc); if (!rc) goto fail; Stream_Read_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Read_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Read_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Read_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_Print(update->log, WLOG_ERROR, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); goto fail; } /* rows are encoded in reverse order */ if (Stream_GetRemainingLength(s) < 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_decompress_brush(s, cache_brush->data, sizeof(cache_brush->data), cache_brush->bpp)) goto fail; } else { /* uncompressed brush */ UINT32 scanline = (cache_brush->bpp / 8) * 8; if (Stream_GetRemainingLength(s) < scanline * 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read(s, &cache_brush->data[i * scanline], scanline); } } } } return cache_brush; fail: free_cache_brush_order(update->context, cache_brush); return NULL; } int update_approximate_cache_brush_order(const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { return 64; } BOOL update_write_cache_brush_order(wStream* s, const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { int i; BYTE iBitmapFormat; BOOL rc; BOOL compressed = FALSE; if (!Stream_EnsureRemainingCapacity(s, update_approximate_cache_brush_order(cache_brush, flags))) return FALSE; iBitmapFormat = get_bpp_bmf(cache_brush->bpp, &rc); if (!rc) return FALSE; Stream_Write_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Write_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ Stream_Write_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Write_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Write_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Write_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_ERR(TAG, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); return FALSE; } for (i = 7; i >= 0; i--) { Stream_Write_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_compress_brush(s, cache_brush->data, cache_brush->bpp)) return FALSE; } else { /* uncompressed brush */ int scanline = (cache_brush->bpp / 8) * 8; for (i = 7; i >= 0; i--) { Stream_Write(s, &cache_brush->data[i * scanline], scanline); } } } } return TRUE; } /* Alternate Secondary Drawing Orders */ static BOOL update_read_create_offscreen_bitmap_order(wStream* s, CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { UINT16 flags; BOOL deleteListPresent; OFFSCREEN_DELETE_LIST* deleteList; if (Stream_GetRemainingLength(s) < 6) return FALSE; Stream_Read_UINT16(s, flags); /* flags (2 bytes) */ create_offscreen_bitmap->id = flags & 0x7FFF; deleteListPresent = (flags & 0x8000) ? TRUE : FALSE; Stream_Read_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */ Stream_Read_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */ deleteList = &(create_offscreen_bitmap->deleteList); if (deleteListPresent) { UINT32 i; if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, deleteList->cIndices); if (deleteList->cIndices > deleteList->sIndices) { UINT16* new_indices; new_indices = (UINT16*)realloc(deleteList->indices, deleteList->cIndices * 2); if (!new_indices) return FALSE; deleteList->sIndices = deleteList->cIndices; deleteList->indices = new_indices; } if (Stream_GetRemainingLength(s) < 2 * deleteList->cIndices) return FALSE; for (i = 0; i < deleteList->cIndices; i++) { Stream_Read_UINT16(s, deleteList->indices[i]); } } else { deleteList->cIndices = 0; } return TRUE; } int update_approximate_create_offscreen_bitmap_order( const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { const OFFSCREEN_DELETE_LIST* deleteList = &(create_offscreen_bitmap->deleteList); return 32 + deleteList->cIndices * 2; } BOOL update_write_create_offscreen_bitmap_order( wStream* s, const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { UINT16 flags; BOOL deleteListPresent; const OFFSCREEN_DELETE_LIST* deleteList; if (!Stream_EnsureRemainingCapacity( s, update_approximate_create_offscreen_bitmap_order(create_offscreen_bitmap))) return FALSE; deleteList = &(create_offscreen_bitmap->deleteList); flags = create_offscreen_bitmap->id & 0x7FFF; deleteListPresent = (deleteList->cIndices > 0) ? TRUE : FALSE; if (deleteListPresent) flags |= 0x8000; Stream_Write_UINT16(s, flags); /* flags (2 bytes) */ Stream_Write_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */ Stream_Write_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */ if (deleteListPresent) { int i; Stream_Write_UINT16(s, deleteList->cIndices); for (i = 0; i < (int)deleteList->cIndices; i++) { Stream_Write_UINT16(s, deleteList->indices[i]); } } return TRUE; } static BOOL update_read_switch_surface_order(wStream* s, SWITCH_SURFACE_ORDER* switch_surface) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, switch_surface->bitmapId); /* bitmapId (2 bytes) */ return TRUE; } int update_approximate_switch_surface_order(const SWITCH_SURFACE_ORDER* switch_surface) { return 2; } BOOL update_write_switch_surface_order(wStream* s, const SWITCH_SURFACE_ORDER* switch_surface) { int inf = update_approximate_switch_surface_order(switch_surface); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT16(s, switch_surface->bitmapId); /* bitmapId (2 bytes) */ return TRUE; } static BOOL update_read_create_nine_grid_bitmap_order(wStream* s, CREATE_NINE_GRID_BITMAP_ORDER* create_nine_grid_bitmap) { NINE_GRID_BITMAP_INFO* nineGridInfo; if (Stream_GetRemainingLength(s) < 19) return FALSE; Stream_Read_UINT8(s, create_nine_grid_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ if ((create_nine_grid_bitmap->bitmapBpp < 1) || (create_nine_grid_bitmap->bitmapBpp > 32)) { WLog_ERR(TAG, "invalid bpp value %" PRIu32 "", create_nine_grid_bitmap->bitmapBpp); return FALSE; } Stream_Read_UINT16(s, create_nine_grid_bitmap->bitmapId); /* bitmapId (2 bytes) */ nineGridInfo = &(create_nine_grid_bitmap->nineGridInfo); Stream_Read_UINT32(s, nineGridInfo->flFlags); /* flFlags (4 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulLeftWidth); /* ulLeftWidth (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulRightWidth); /* ulRightWidth (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulTopHeight); /* ulTopHeight (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulBottomHeight); /* ulBottomHeight (2 bytes) */ update_read_colorref(s, &nineGridInfo->crTransparent); /* crTransparent (4 bytes) */ return TRUE; } static BOOL update_read_frame_marker_order(wStream* s, FRAME_MARKER_ORDER* frame_marker) { if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT32(s, frame_marker->action); /* action (4 bytes) */ return TRUE; } static BOOL update_read_stream_bitmap_first_order(wStream* s, STREAM_BITMAP_FIRST_ORDER* stream_bitmap_first) { if (Stream_GetRemainingLength(s) < 10) // 8 + 2 at least return FALSE; Stream_Read_UINT8(s, stream_bitmap_first->bitmapFlags); /* bitmapFlags (1 byte) */ Stream_Read_UINT8(s, stream_bitmap_first->bitmapBpp); /* bitmapBpp (1 byte) */ if ((stream_bitmap_first->bitmapBpp < 1) || (stream_bitmap_first->bitmapBpp > 32)) { WLog_ERR(TAG, "invalid bpp value %" PRIu32 "", stream_bitmap_first->bitmapBpp); return FALSE; } Stream_Read_UINT16(s, stream_bitmap_first->bitmapType); /* bitmapType (2 bytes) */ Stream_Read_UINT16(s, stream_bitmap_first->bitmapWidth); /* bitmapWidth (2 bytes) */ Stream_Read_UINT16(s, stream_bitmap_first->bitmapHeight); /* bitmapHeigth (2 bytes) */ if (stream_bitmap_first->bitmapFlags & STREAM_BITMAP_V2) { if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT32(s, stream_bitmap_first->bitmapSize); /* bitmapSize (4 bytes) */ } else { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, stream_bitmap_first->bitmapSize); /* bitmapSize (2 bytes) */ } FIELD_SKIP_BUFFER16( s, stream_bitmap_first->bitmapBlockSize); /* bitmapBlockSize(2 bytes) + bitmapBlock */ return TRUE; } static BOOL update_read_stream_bitmap_next_order(wStream* s, STREAM_BITMAP_NEXT_ORDER* stream_bitmap_next) { if (Stream_GetRemainingLength(s) < 5) return FALSE; Stream_Read_UINT8(s, stream_bitmap_next->bitmapFlags); /* bitmapFlags (1 byte) */ Stream_Read_UINT16(s, stream_bitmap_next->bitmapType); /* bitmapType (2 bytes) */ FIELD_SKIP_BUFFER16( s, stream_bitmap_next->bitmapBlockSize); /* bitmapBlockSize(2 bytes) + bitmapBlock */ return TRUE; } static BOOL update_read_draw_gdiplus_first_order(wStream* s, DRAW_GDIPLUS_FIRST_ORDER* draw_gdiplus_first) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_first->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_first->cbTotalSize); /* cbTotalSize (4 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_first->cbTotalEmfSize); /* cbTotalEmfSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_first->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_next_order(wStream* s, DRAW_GDIPLUS_NEXT_ORDER* draw_gdiplus_next) { if (Stream_GetRemainingLength(s) < 3) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ FIELD_SKIP_BUFFER16(s, draw_gdiplus_next->cbSize); /* cbSize(2 bytes) + emfRecords */ return TRUE; } static BOOL update_read_draw_gdiplus_end_order(wStream* s, DRAW_GDIPLUS_END_ORDER* draw_gdiplus_end) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_end->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_end->cbTotalSize); /* cbTotalSize (4 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_end->cbTotalEmfSize); /* cbTotalEmfSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_end->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_cache_first_order(wStream* s, DRAW_GDIPLUS_CACHE_FIRST_ORDER* draw_gdiplus_cache_first) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_first->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_cache_first->cbTotalSize); /* cbTotalSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_cache_first->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_cache_next_order(wStream* s, DRAW_GDIPLUS_CACHE_NEXT_ORDER* draw_gdiplus_cache_next) { if (Stream_GetRemainingLength(s) < 7) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_next->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_next->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_next->cacheIndex); /* cacheIndex (2 bytes) */ FIELD_SKIP_BUFFER16(s, draw_gdiplus_cache_next->cbSize); /* cbSize(2 bytes) + emfRecords */ return TRUE; } static BOOL update_read_draw_gdiplus_cache_end_order(wStream* s, DRAW_GDIPLUS_CACHE_END_ORDER* draw_gdiplus_cache_end) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_end->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_cache_end->cbTotalSize); /* cbTotalSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_cache_end->cbSize); /* emfRecords */ } static BOOL update_read_field_flags(wStream* s, UINT32* fieldFlags, BYTE flags, BYTE fieldBytes) { int i; BYTE byte; if (flags & ORDER_ZERO_FIELD_BYTE_BIT0) fieldBytes--; if (flags & ORDER_ZERO_FIELD_BYTE_BIT1) { if (fieldBytes > 1) fieldBytes -= 2; else fieldBytes = 0; } if (Stream_GetRemainingLength(s) < fieldBytes) return FALSE; *fieldFlags = 0; for (i = 0; i < fieldBytes; i++) { Stream_Read_UINT8(s, byte); *fieldFlags |= byte << (i * 8); } return TRUE; } BOOL update_write_field_flags(wStream* s, UINT32 fieldFlags, BYTE flags, BYTE fieldBytes) { BYTE byte; if (fieldBytes == 1) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); } else if (fieldBytes == 2) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 8) & 0xFF; Stream_Write_UINT8(s, byte); } else if (fieldBytes == 3) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 16) & 0xFF; Stream_Write_UINT8(s, byte); } else { return FALSE; } return TRUE; } static BOOL update_read_bounds(wStream* s, rdpBounds* bounds) { BYTE flags; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, flags); /* field flags */ if (flags & BOUND_LEFT) { if (!update_read_coord(s, &bounds->left, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_LEFT) { if (!update_read_coord(s, &bounds->left, TRUE)) return FALSE; } if (flags & BOUND_TOP) { if (!update_read_coord(s, &bounds->top, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_TOP) { if (!update_read_coord(s, &bounds->top, TRUE)) return FALSE; } if (flags & BOUND_RIGHT) { if (!update_read_coord(s, &bounds->right, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_RIGHT) { if (!update_read_coord(s, &bounds->right, TRUE)) return FALSE; } if (flags & BOUND_BOTTOM) { if (!update_read_coord(s, &bounds->bottom, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_BOTTOM) { if (!update_read_coord(s, &bounds->bottom, TRUE)) return FALSE; } return TRUE; } BOOL update_write_bounds(wStream* s, ORDER_INFO* orderInfo) { if (!(orderInfo->controlFlags & ORDER_BOUNDS)) return TRUE; if (orderInfo->controlFlags & ORDER_ZERO_BOUNDS_DELTAS) return TRUE; Stream_Write_UINT8(s, orderInfo->boundsFlags); /* field flags */ if (orderInfo->boundsFlags & BOUND_LEFT) { if (!update_write_coord(s, orderInfo->bounds.left)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_LEFT) { } if (orderInfo->boundsFlags & BOUND_TOP) { if (!update_write_coord(s, orderInfo->bounds.top)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_TOP) { } if (orderInfo->boundsFlags & BOUND_RIGHT) { if (!update_write_coord(s, orderInfo->bounds.right)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_RIGHT) { } if (orderInfo->boundsFlags & BOUND_BOTTOM) { if (!update_write_coord(s, orderInfo->bounds.bottom)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_BOTTOM) { } return TRUE; } static BOOL read_primary_order(wLog* log, const char* orderName, wStream* s, const ORDER_INFO* orderInfo, rdpPrimaryUpdate* primary) { BOOL rc = FALSE; if (!s || !orderInfo || !primary || !orderName) return FALSE; switch (orderInfo->orderType) { case ORDER_TYPE_DSTBLT: rc = update_read_dstblt_order(s, orderInfo, &(primary->dstblt)); break; case ORDER_TYPE_PATBLT: rc = update_read_patblt_order(s, orderInfo, &(primary->patblt)); break; case ORDER_TYPE_SCRBLT: rc = update_read_scrblt_order(s, orderInfo, &(primary->scrblt)); break; case ORDER_TYPE_OPAQUE_RECT: rc = update_read_opaque_rect_order(s, orderInfo, &(primary->opaque_rect)); break; case ORDER_TYPE_DRAW_NINE_GRID: rc = update_read_draw_nine_grid_order(s, orderInfo, &(primary->draw_nine_grid)); break; case ORDER_TYPE_MULTI_DSTBLT: rc = update_read_multi_dstblt_order(s, orderInfo, &(primary->multi_dstblt)); break; case ORDER_TYPE_MULTI_PATBLT: rc = update_read_multi_patblt_order(s, orderInfo, &(primary->multi_patblt)); break; case ORDER_TYPE_MULTI_SCRBLT: rc = update_read_multi_scrblt_order(s, orderInfo, &(primary->multi_scrblt)); break; case ORDER_TYPE_MULTI_OPAQUE_RECT: rc = update_read_multi_opaque_rect_order(s, orderInfo, &(primary->multi_opaque_rect)); break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: rc = update_read_multi_draw_nine_grid_order(s, orderInfo, &(primary->multi_draw_nine_grid)); break; case ORDER_TYPE_LINE_TO: rc = update_read_line_to_order(s, orderInfo, &(primary->line_to)); break; case ORDER_TYPE_POLYLINE: rc = update_read_polyline_order(s, orderInfo, &(primary->polyline)); break; case ORDER_TYPE_MEMBLT: rc = update_read_memblt_order(s, orderInfo, &(primary->memblt)); break; case ORDER_TYPE_MEM3BLT: rc = update_read_mem3blt_order(s, orderInfo, &(primary->mem3blt)); break; case ORDER_TYPE_SAVE_BITMAP: rc = update_read_save_bitmap_order(s, orderInfo, &(primary->save_bitmap)); break; case ORDER_TYPE_GLYPH_INDEX: rc = update_read_glyph_index_order(s, orderInfo, &(primary->glyph_index)); break; case ORDER_TYPE_FAST_INDEX: rc = update_read_fast_index_order(s, orderInfo, &(primary->fast_index)); break; case ORDER_TYPE_FAST_GLYPH: rc = update_read_fast_glyph_order(s, orderInfo, &(primary->fast_glyph)); break; case ORDER_TYPE_POLYGON_SC: rc = update_read_polygon_sc_order(s, orderInfo, &(primary->polygon_sc)); break; case ORDER_TYPE_POLYGON_CB: rc = update_read_polygon_cb_order(s, orderInfo, &(primary->polygon_cb)); break; case ORDER_TYPE_ELLIPSE_SC: rc = update_read_ellipse_sc_order(s, orderInfo, &(primary->ellipse_sc)); break; case ORDER_TYPE_ELLIPSE_CB: rc = update_read_ellipse_cb_order(s, orderInfo, &(primary->ellipse_cb)); break; default: WLog_Print(log, WLOG_WARN, "Primary Drawing Order %s not supported, ignoring", orderName); rc = TRUE; break; } if (!rc) { WLog_Print(log, WLOG_ERROR, "%s - update_read_dstblt_order() failed", orderName); return FALSE; } return TRUE; } static BOOL update_recv_primary_order(rdpUpdate* update, wStream* s, BYTE flags) { BYTE field; BOOL rc = FALSE; rdpContext* context = update->context; rdpPrimaryUpdate* primary = update->primary; ORDER_INFO* orderInfo = &(primary->order_info); rdpSettings* settings = context->settings; const char* orderName; if (flags & ORDER_TYPE_CHANGE) { if (Stream_GetRemainingLength(s) < 1) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, orderInfo->orderType); /* orderType (1 byte) */ } orderName = primary_order_string(orderInfo->orderType); if (!check_primary_order_supported(update->log, settings, orderInfo->orderType, orderName)) return FALSE; field = get_primary_drawing_order_field_bytes(orderInfo->orderType, &rc); if (!rc) return FALSE; if (!update_read_field_flags(s, &(orderInfo->fieldFlags), flags, field)) { WLog_Print(update->log, WLOG_ERROR, "update_read_field_flags() failed"); return FALSE; } if (flags & ORDER_BOUNDS) { if (!(flags & ORDER_ZERO_BOUNDS_DELTAS)) { if (!update_read_bounds(s, &orderInfo->bounds)) { WLog_Print(update->log, WLOG_ERROR, "update_read_bounds() failed"); return FALSE; } } rc = IFCALLRESULT(FALSE, update->SetBounds, context, &orderInfo->bounds); if (!rc) return FALSE; } orderInfo->deltaCoordinates = (flags & ORDER_DELTA_COORDINATES) ? TRUE : FALSE; if (!read_primary_order(update->log, orderName, s, orderInfo, primary)) return FALSE; switch (orderInfo->orderType) { case ORDER_TYPE_DSTBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->dstblt.bRop), gdi_rop3_code(primary->dstblt.bRop)); rc = IFCALLRESULT(FALSE, primary->DstBlt, context, &primary->dstblt); } break; case ORDER_TYPE_PATBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->patblt.bRop), gdi_rop3_code(primary->patblt.bRop)); rc = IFCALLRESULT(FALSE, primary->PatBlt, context, &primary->patblt); } break; case ORDER_TYPE_SCRBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->scrblt.bRop), gdi_rop3_code(primary->scrblt.bRop)); rc = IFCALLRESULT(FALSE, primary->ScrBlt, context, &primary->scrblt); } break; case ORDER_TYPE_OPAQUE_RECT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->OpaqueRect, context, &primary->opaque_rect); } break; case ORDER_TYPE_DRAW_NINE_GRID: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->DrawNineGrid, context, &primary->draw_nine_grid); } break; case ORDER_TYPE_MULTI_DSTBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_dstblt.bRop), gdi_rop3_code(primary->multi_dstblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiDstBlt, context, &primary->multi_dstblt); } break; case ORDER_TYPE_MULTI_PATBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_patblt.bRop), gdi_rop3_code(primary->multi_patblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiPatBlt, context, &primary->multi_patblt); } break; case ORDER_TYPE_MULTI_SCRBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_scrblt.bRop), gdi_rop3_code(primary->multi_scrblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiScrBlt, context, &primary->multi_scrblt); } break; case ORDER_TYPE_MULTI_OPAQUE_RECT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->MultiOpaqueRect, context, &primary->multi_opaque_rect); } break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->MultiDrawNineGrid, context, &primary->multi_draw_nine_grid); } break; case ORDER_TYPE_LINE_TO: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->LineTo, context, &primary->line_to); } break; case ORDER_TYPE_POLYLINE: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->Polyline, context, &primary->polyline); } break; case ORDER_TYPE_MEMBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->memblt.bRop), gdi_rop3_code(primary->memblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MemBlt, context, &primary->memblt); } break; case ORDER_TYPE_MEM3BLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->mem3blt.bRop), gdi_rop3_code(primary->mem3blt.bRop)); rc = IFCALLRESULT(FALSE, primary->Mem3Blt, context, &primary->mem3blt); } break; case ORDER_TYPE_SAVE_BITMAP: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->SaveBitmap, context, &primary->save_bitmap); } break; case ORDER_TYPE_GLYPH_INDEX: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->GlyphIndex, context, &primary->glyph_index); } break; case ORDER_TYPE_FAST_INDEX: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->FastIndex, context, &primary->fast_index); } break; case ORDER_TYPE_FAST_GLYPH: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->FastGlyph, context, &primary->fast_glyph); } break; case ORDER_TYPE_POLYGON_SC: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->PolygonSC, context, &primary->polygon_sc); } break; case ORDER_TYPE_POLYGON_CB: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->PolygonCB, context, &primary->polygon_cb); } break; case ORDER_TYPE_ELLIPSE_SC: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->EllipseSC, context, &primary->ellipse_sc); } break; case ORDER_TYPE_ELLIPSE_CB: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->EllipseCB, context, &primary->ellipse_cb); } break; default: WLog_Print(update->log, WLOG_WARN, "Primary Drawing Order %s not supported", orderName); break; } if (!rc) { WLog_Print(update->log, WLOG_WARN, "Primary Drawing Order %s failed", orderName); return FALSE; } if (flags & ORDER_BOUNDS) { rc = IFCALLRESULT(FALSE, update->SetBounds, context, NULL); } return rc; } static BOOL update_recv_secondary_order(rdpUpdate* update, wStream* s, BYTE flags) { BOOL rc = FALSE; size_t start, end, diff; BYTE orderType; UINT16 extraFlags; UINT16 orderLength; rdpContext* context = update->context; rdpSettings* settings = context->settings; rdpSecondaryUpdate* secondary = update->secondary; const char* name; if (Stream_GetRemainingLength(s) < 5) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 5"); return FALSE; } Stream_Read_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Read_UINT16(s, extraFlags); /* extraFlags (2 bytes) */ Stream_Read_UINT8(s, orderType); /* orderType (1 byte) */ if (Stream_GetRemainingLength(s) < orderLength + 7U) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) %" PRIuz " < %" PRIu16, Stream_GetRemainingLength(s), orderLength + 7); return FALSE; } start = Stream_GetPosition(s); name = secondary_order_string(orderType); WLog_Print(update->log, WLOG_DEBUG, "Secondary Drawing Order %s", name); if (!check_secondary_order_supported(update->log, settings, orderType, name)) return FALSE; switch (orderType) { case ORDER_TYPE_BITMAP_UNCOMPRESSED: case ORDER_TYPE_CACHE_BITMAP_COMPRESSED: { const BOOL compressed = (orderType == ORDER_TYPE_CACHE_BITMAP_COMPRESSED); CACHE_BITMAP_ORDER* order = update_read_cache_bitmap_order(update, s, compressed, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmap, context, order); free_cache_bitmap_order(context, order); } } break; case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2: case ORDER_TYPE_BITMAP_COMPRESSED_V2: { const BOOL compressed = (orderType == ORDER_TYPE_BITMAP_COMPRESSED_V2); CACHE_BITMAP_V2_ORDER* order = update_read_cache_bitmap_v2_order(update, s, compressed, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV2, context, order); free_cache_bitmap_v2_order(context, order); } } break; case ORDER_TYPE_BITMAP_COMPRESSED_V3: { CACHE_BITMAP_V3_ORDER* order = update_read_cache_bitmap_v3_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV3, context, order); free_cache_bitmap_v3_order(context, order); } } break; case ORDER_TYPE_CACHE_COLOR_TABLE: { CACHE_COLOR_TABLE_ORDER* order = update_read_cache_color_table_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheColorTable, context, order); free_cache_color_table_order(context, order); } } break; case ORDER_TYPE_CACHE_GLYPH: { switch (settings->GlyphSupportLevel) { case GLYPH_SUPPORT_PARTIAL: case GLYPH_SUPPORT_FULL: { CACHE_GLYPH_ORDER* order = update_read_cache_glyph_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheGlyph, context, order); free_cache_glyph_order(context, order); } } break; case GLYPH_SUPPORT_ENCODE: { CACHE_GLYPH_V2_ORDER* order = update_read_cache_glyph_v2_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheGlyphV2, context, order); free_cache_glyph_v2_order(context, order); } } break; case GLYPH_SUPPORT_NONE: default: break; } } break; case ORDER_TYPE_CACHE_BRUSH: /* [MS-RDPEGDI] 2.2.2.2.1.2.7 Cache Brush (CACHE_BRUSH_ORDER) */ { CACHE_BRUSH_ORDER* order = update_read_cache_brush_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBrush, context, order); free_cache_brush_order(context, order); } } break; default: WLog_Print(update->log, WLOG_WARN, "SECONDARY ORDER %s not supported", name); break; } if (!rc) { WLog_Print(update->log, WLOG_ERROR, "SECONDARY ORDER %s failed", name); } start += orderLength + 7; end = Stream_GetPosition(s); if (start > end) { WLog_Print(update->log, WLOG_WARN, "SECONDARY_ORDER %s: read %" PRIuz "bytes too much", name, end - start); return FALSE; } diff = start - end; if (diff > 0) { WLog_Print(update->log, WLOG_DEBUG, "SECONDARY_ORDER %s: read %" PRIuz "bytes short, skipping", name, diff); Stream_Seek(s, diff); } return rc; } static BOOL read_altsec_order(wStream* s, BYTE orderType, rdpAltSecUpdate* altsec) { BOOL rc = FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: rc = update_read_create_offscreen_bitmap_order(s, &(altsec->create_offscreen_bitmap)); break; case ORDER_TYPE_SWITCH_SURFACE: rc = update_read_switch_surface_order(s, &(altsec->switch_surface)); break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: rc = update_read_create_nine_grid_bitmap_order(s, &(altsec->create_nine_grid_bitmap)); break; case ORDER_TYPE_FRAME_MARKER: rc = update_read_frame_marker_order(s, &(altsec->frame_marker)); break; case ORDER_TYPE_STREAM_BITMAP_FIRST: rc = update_read_stream_bitmap_first_order(s, &(altsec->stream_bitmap_first)); break; case ORDER_TYPE_STREAM_BITMAP_NEXT: rc = update_read_stream_bitmap_next_order(s, &(altsec->stream_bitmap_next)); break; case ORDER_TYPE_GDIPLUS_FIRST: rc = update_read_draw_gdiplus_first_order(s, &(altsec->draw_gdiplus_first)); break; case ORDER_TYPE_GDIPLUS_NEXT: rc = update_read_draw_gdiplus_next_order(s, &(altsec->draw_gdiplus_next)); break; case ORDER_TYPE_GDIPLUS_END: rc = update_read_draw_gdiplus_end_order(s, &(altsec->draw_gdiplus_end)); break; case ORDER_TYPE_GDIPLUS_CACHE_FIRST: rc = update_read_draw_gdiplus_cache_first_order(s, &(altsec->draw_gdiplus_cache_first)); break; case ORDER_TYPE_GDIPLUS_CACHE_NEXT: rc = update_read_draw_gdiplus_cache_next_order(s, &(altsec->draw_gdiplus_cache_next)); break; case ORDER_TYPE_GDIPLUS_CACHE_END: rc = update_read_draw_gdiplus_cache_end_order(s, &(altsec->draw_gdiplus_cache_end)); break; case ORDER_TYPE_WINDOW: /* This order is handled elsewhere. */ rc = TRUE; break; case ORDER_TYPE_COMPDESK_FIRST: rc = TRUE; break; default: break; } return rc; } static BOOL update_recv_altsec_order(rdpUpdate* update, wStream* s, BYTE flags) { BYTE orderType = flags >>= 2; /* orderType is in higher 6 bits of flags field */ BOOL rc = FALSE; rdpContext* context = update->context; rdpSettings* settings = context->settings; rdpAltSecUpdate* altsec = update->altsec; const char* orderName = altsec_order_string(orderType); WLog_Print(update->log, WLOG_DEBUG, "Alternate Secondary Drawing Order %s", orderName); if (!check_alt_order_supported(update->log, settings, orderType, orderName)) return FALSE; if (!read_altsec_order(s, orderType, altsec)) return FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: IFCALLRET(altsec->CreateOffscreenBitmap, rc, context, &(altsec->create_offscreen_bitmap)); break; case ORDER_TYPE_SWITCH_SURFACE: IFCALLRET(altsec->SwitchSurface, rc, context, &(altsec->switch_surface)); break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: IFCALLRET(altsec->CreateNineGridBitmap, rc, context, &(altsec->create_nine_grid_bitmap)); break; case ORDER_TYPE_FRAME_MARKER: IFCALLRET(altsec->FrameMarker, rc, context, &(altsec->frame_marker)); break; case ORDER_TYPE_STREAM_BITMAP_FIRST: IFCALLRET(altsec->StreamBitmapFirst, rc, context, &(altsec->stream_bitmap_first)); break; case ORDER_TYPE_STREAM_BITMAP_NEXT: IFCALLRET(altsec->StreamBitmapNext, rc, context, &(altsec->stream_bitmap_next)); break; case ORDER_TYPE_GDIPLUS_FIRST: IFCALLRET(altsec->DrawGdiPlusFirst, rc, context, &(altsec->draw_gdiplus_first)); break; case ORDER_TYPE_GDIPLUS_NEXT: IFCALLRET(altsec->DrawGdiPlusNext, rc, context, &(altsec->draw_gdiplus_next)); break; case ORDER_TYPE_GDIPLUS_END: IFCALLRET(altsec->DrawGdiPlusEnd, rc, context, &(altsec->draw_gdiplus_end)); break; case ORDER_TYPE_GDIPLUS_CACHE_FIRST: IFCALLRET(altsec->DrawGdiPlusCacheFirst, rc, context, &(altsec->draw_gdiplus_cache_first)); break; case ORDER_TYPE_GDIPLUS_CACHE_NEXT: IFCALLRET(altsec->DrawGdiPlusCacheNext, rc, context, &(altsec->draw_gdiplus_cache_next)); break; case ORDER_TYPE_GDIPLUS_CACHE_END: IFCALLRET(altsec->DrawGdiPlusCacheEnd, rc, context, &(altsec->draw_gdiplus_cache_end)); break; case ORDER_TYPE_WINDOW: rc = update_recv_altsec_window_order(update, s); break; case ORDER_TYPE_COMPDESK_FIRST: rc = TRUE; break; default: break; } if (!rc) { WLog_Print(update->log, WLOG_WARN, "Alternate Secondary Drawing Order %s failed", orderName); } return rc; } BOOL update_recv_order(rdpUpdate* update, wStream* s) { BOOL rc; BYTE controlFlags; if (Stream_GetRemainingLength(s) < 1) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, controlFlags); /* controlFlags (1 byte) */ if (!(controlFlags & ORDER_STANDARD)) rc = update_recv_altsec_order(update, s, controlFlags); else if (controlFlags & ORDER_SECONDARY) rc = update_recv_secondary_order(update, s, controlFlags); else rc = update_recv_primary_order(update, s, controlFlags); if (!rc) WLog_Print(update->log, WLOG_ERROR, "order flags %02" PRIx8 " failed", controlFlags); return rc; }
BOOL update_write_cache_bitmap_v2_order(wStream* s, CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { BYTE bitsPerPixelId; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v2_order(cache_bitmap_v2, compressed, flags))) return FALSE; bitsPerPixelId = BPP_CBR2[cache_bitmap_v2->bitmapBpp]; *flags = (cache_bitmap_v2->cacheId & 0x0003) | (bitsPerPixelId << 3) | ((cache_bitmap_v2->flags << 7) & 0xFF80); if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { Stream_Write_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ return FALSE; } else { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ return FALSE; } if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (!update_write_4byte_unsigned(s, cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_write_2byte_unsigned(s, cache_bitmap_v2->cacheIndex)) /* cacheIndex */ return FALSE; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { Stream_Write_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } else { if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } cache_bitmap_v2->compressed = compressed; return TRUE; }
BOOL update_write_cache_bitmap_v2_order(wStream* s, CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { BOOL rc; BYTE bitsPerPixelId; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v2_order(cache_bitmap_v2, compressed, flags))) return FALSE; bitsPerPixelId = get_bpp_bmf(cache_bitmap_v2->bitmapBpp, &rc); if (!rc) return FALSE; *flags = (cache_bitmap_v2->cacheId & 0x0003) | (bitsPerPixelId << 3) | ((cache_bitmap_v2->flags << 7) & 0xFF80); if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { Stream_Write_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ return FALSE; } else { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ return FALSE; } if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (!update_write_4byte_unsigned(s, cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_write_2byte_unsigned(s, cache_bitmap_v2->cacheIndex)) /* cacheIndex */ return FALSE; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { Stream_Write_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } else { if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } cache_bitmap_v2->compressed = compressed; return TRUE; }
{'added': [(116, 'static BYTE get_cbr2_bpp(UINT32 bpp, BOOL* pValid)'), (117, '{'), (118, '\tif (pValid)'), (119, '\t\t*pValid = TRUE;'), (120, '\tswitch (bpp)'), (121, '\t{'), (122, '\t\tcase 3:'), (123, '\t\t\treturn 8;'), (124, '\t\tcase 4:'), (125, '\t\t\treturn 16;'), (126, '\t\tcase 5:'), (127, '\t\t\treturn 24;'), (128, '\t\tcase 6:'), (129, '\t\t\treturn 32;'), (130, '\t\tdefault:'), (131, '\t\t\tWLog_WARN(TAG, "Invalid bpp %" PRIu32, bpp);'), (132, '\t\t\tif (pValid)'), (133, '\t\t\t\t*pValid = FALSE;'), (134, '\t\t\treturn 0;'), (135, '\t}'), (136, '}'), (138, 'static BYTE get_bmf_bpp(UINT32 bmf, BOOL* pValid)'), (139, '{'), (140, '\tif (pValid)'), (141, '\t\t*pValid = TRUE;'), (142, '\tswitch (bmf)'), (143, '\t{'), (144, '\t\tcase 1:'), (145, '\t\t\treturn 1;'), (146, '\t\tcase 3:'), (147, '\t\t\treturn 8;'), (148, '\t\tcase 4:'), (149, '\t\t\treturn 16;'), (150, '\t\tcase 5:'), (151, '\t\t\treturn 24;'), (152, '\t\tcase 6:'), (153, '\t\t\treturn 32;'), (154, '\t\tdefault:'), (155, '\t\t\tWLog_WARN(TAG, "Invalid bmf %" PRIu32, bmf);'), (156, '\t\t\tif (pValid)'), (157, '\t\t\t\t*pValid = FALSE;'), (158, '\t\t\treturn 0;'), (159, '\t}'), (160, '}'), (161, 'static BYTE get_bpp_bmf(UINT32 bpp, BOOL* pValid)'), (162, '{'), (163, '\tif (pValid)'), (164, '\t\t*pValid = TRUE;'), (165, '\tswitch (bpp)'), (166, '\t{'), (167, '\t\tcase 1:'), (168, '\t\t\treturn 1;'), (169, '\t\tcase 8:'), (170, '\t\t\treturn 3;'), (171, '\t\tcase 16:'), (172, '\t\t\treturn 4;'), (173, '\t\tcase 24:'), (174, '\t\t\treturn 5;'), (175, '\t\tcase 32:'), (176, '\t\t\treturn 6;'), (177, '\t\tdefault:'), (178, '\t\t\tWLog_WARN(TAG, "Invalid color depth %" PRIu32, bpp);'), (179, '\t\t\tif (pValid)'), (180, '\t\t\t\t*pValid = FALSE;'), (181, '\t\t\treturn 0;'), (182, '\t}'), (183, '}'), (871, '\t\tBOOL rc;'), (873, '\t\tbrush->bpp = get_bmf_bpp(brush->style, &rc);'), (874, '\t\tif (!rc)'), (875, '\t\t\treturn FALSE;'), (917, '\t\tBOOL rc;'), (919, '\t\tbrush->bpp = get_bmf_bpp(brush->style, &rc);'), (920, '\t\tif (!rc)'), (921, '\t\t\treturn FALSE;'), (2077, '\tBOOL rc;'), (2092, '\tcache_bitmap_v2->bitmapBpp = get_cbr2_bpp(bitsPerPixelId, &rc);'), (2093, '\tif (!rc)'), (2094, '\t\tgoto fail;'), (2173, '\tBOOL rc;'), (2180, '\tbitsPerPixelId = get_bpp_bmf(cache_bitmap_v2->bitmapBpp, &rc);'), (2181, '\tif (!rc)'), (2182, '\t\treturn FALSE;'), (2244, '\tBOOL rc;'), (2262, '\tcache_bitmap_v3->bpp = get_cbr2_bpp(bitsPerPixelId, &rc);'), (2263, '\tif (!rc)'), (2264, '\t\tgoto fail;'), (2312, '\tBOOL rc;'), (2321, '\tbitsPerPixelId = get_bpp_bmf(cache_bitmap_v3->bpp, &rc);'), (2322, '\tif (!rc)'), (2323, '\t\treturn FALSE;'), (2647, '\tBOOL rc;'), (2661, '\tcache_brush->bpp = get_bmf_bpp(iBitmapFormat, &rc);'), (2662, '\tif (!rc)'), (2735, '\tBOOL rc;'), (2742, '\tiBitmapFormat = get_bpp_bmf(cache_brush->bpp, &rc);'), (2743, '\tif (!rc)'), (2744, '\t\treturn FALSE;')], 'deleted': [(116, 'static const BYTE CBR2_BPP[] = { 0, 0, 0, 8, 16, 24, 32 };'), (117, ''), (118, 'static const BYTE BPP_CBR2[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0,'), (119, '\t 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 };'), (120, ''), (121, 'static const BYTE CBR23_BPP[] = { 0, 0, 0, 8, 16, 24, 32 };'), (122, ''), (123, 'static const BYTE BPP_CBR23[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0,'), (124, '\t 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 };'), (125, ''), (126, 'static const BYTE BMF_BPP[] = { 0, 1, 0, 8, 16, 24, 32, 0 };'), (128, 'static const BYTE BPP_BMF[] = { 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0,'), (129, '\t 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 };'), (818, '\t\tbrush->bpp = BMF_BPP[brush->style & 0x07];'), (819, ''), (862, '\t\tbrush->bpp = BMF_BPP[brush->style & 0x07];'), (863, ''), (2033, '\tcache_bitmap_v2->bitmapBpp = CBR2_BPP[bitsPerPixelId];'), (2118, '\tbitsPerPixelId = BPP_CBR2[cache_bitmap_v2->bitmapBpp];'), (2197, '\tcache_bitmap_v3->bpp = CBR23_BPP[bitsPerPixelId];'), (2253, '\tbitsPerPixelId = BPP_CBR23[cache_bitmap_v3->bpp];'), (2590, '\tif (iBitmapFormat >= ARRAYSIZE(BMF_BPP))'), (2593, '\tcache_brush->bpp = BMF_BPP[iBitmapFormat];'), (2670, '\tiBitmapFormat = BPP_BMF[cache_brush->bpp];')]}
98
24
3,271
19,873
https://github.com/FreeRDP/FreeRDP
CVE-2020-11096
['CWE-125']
orders.c
update_write_cache_bitmap_v3_order
/** * FreeRDP: A Remote Desktop Protocol Implementation * Drawing Orders * * Copyright 2011 Marc-Andre Moreau <marcandre.moreau@gmail.com> * Copyright 2016 Armin Novak <armin.novak@thincast.com> * Copyright 2016 Thincast Technologies GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "window.h" #include <winpr/wtypes.h> #include <winpr/crt.h> #include <freerdp/api.h> #include <freerdp/log.h> #include <freerdp/graphics.h> #include <freerdp/codec/bitmap.h> #include <freerdp/gdi/gdi.h> #include "orders.h" #include "../cache/glyph.h" #include "../cache/bitmap.h" #include "../cache/brush.h" #include "../cache/cache.h" #define TAG FREERDP_TAG("core.orders") BYTE get_primary_drawing_order_field_bytes(UINT32 orderType, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (orderType) { case 0: return DSTBLT_ORDER_FIELD_BYTES; case 1: return PATBLT_ORDER_FIELD_BYTES; case 2: return SCRBLT_ORDER_FIELD_BYTES; case 3: return 0; case 4: return 0; case 5: return 0; case 6: return 0; case 7: return DRAW_NINE_GRID_ORDER_FIELD_BYTES; case 8: return MULTI_DRAW_NINE_GRID_ORDER_FIELD_BYTES; case 9: return LINE_TO_ORDER_FIELD_BYTES; case 10: return OPAQUE_RECT_ORDER_FIELD_BYTES; case 11: return SAVE_BITMAP_ORDER_FIELD_BYTES; case 12: return 0; case 13: return MEMBLT_ORDER_FIELD_BYTES; case 14: return MEM3BLT_ORDER_FIELD_BYTES; case 15: return MULTI_DSTBLT_ORDER_FIELD_BYTES; case 16: return MULTI_PATBLT_ORDER_FIELD_BYTES; case 17: return MULTI_SCRBLT_ORDER_FIELD_BYTES; case 18: return MULTI_OPAQUE_RECT_ORDER_FIELD_BYTES; case 19: return FAST_INDEX_ORDER_FIELD_BYTES; case 20: return POLYGON_SC_ORDER_FIELD_BYTES; case 21: return POLYGON_CB_ORDER_FIELD_BYTES; case 22: return POLYLINE_ORDER_FIELD_BYTES; case 23: return 0; case 24: return FAST_GLYPH_ORDER_FIELD_BYTES; case 25: return ELLIPSE_SC_ORDER_FIELD_BYTES; case 26: return ELLIPSE_CB_ORDER_FIELD_BYTES; case 27: return GLYPH_INDEX_ORDER_FIELD_BYTES; default: if (pValid) *pValid = FALSE; WLog_WARN(TAG, "Invalid orderType 0x%08X received", orderType); return 0; } } static const BYTE CBR2_BPP[] = { 0, 0, 0, 8, 16, 24, 32 }; static const BYTE BPP_CBR2[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 }; static const BYTE CBR23_BPP[] = { 0, 0, 0, 8, 16, 24, 32 }; static const BYTE BPP_CBR23[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 }; static const BYTE BMF_BPP[] = { 0, 1, 0, 8, 16, 24, 32, 0 }; static const BYTE BPP_BMF[] = { 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 }; static BOOL check_order_activated(wLog* log, rdpSettings* settings, const char* orderName, BOOL condition) { if (!condition) { if (settings->AllowUnanouncedOrdersFromServer) { WLog_Print(log, WLOG_WARN, "%s - SERVER BUG: The support for this feature was not announced!", orderName); return TRUE; } else { WLog_Print(log, WLOG_ERROR, "%s - SERVER BUG: The support for this feature was not announced! Use " "/relax-order-checks to ignore", orderName); return FALSE; } } return TRUE; } static BOOL check_alt_order_supported(wLog* log, rdpSettings* settings, BYTE orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: case ORDER_TYPE_SWITCH_SURFACE: condition = settings->OffscreenSupportLevel != 0; break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: condition = settings->DrawNineGridEnabled; break; case ORDER_TYPE_FRAME_MARKER: condition = settings->FrameMarkerCommandEnabled; break; case ORDER_TYPE_GDIPLUS_FIRST: case ORDER_TYPE_GDIPLUS_NEXT: case ORDER_TYPE_GDIPLUS_END: case ORDER_TYPE_GDIPLUS_CACHE_FIRST: case ORDER_TYPE_GDIPLUS_CACHE_NEXT: case ORDER_TYPE_GDIPLUS_CACHE_END: condition = settings->DrawGdiPlusCacheEnabled; break; case ORDER_TYPE_WINDOW: condition = settings->RemoteWndSupportLevel != WINDOW_LEVEL_NOT_SUPPORTED; break; case ORDER_TYPE_STREAM_BITMAP_FIRST: case ORDER_TYPE_STREAM_BITMAP_NEXT: case ORDER_TYPE_COMPDESK_FIRST: condition = TRUE; break; default: WLog_Print(log, WLOG_WARN, "%s - Alternate Secondary Drawing Order UNKNOWN", orderName); condition = FALSE; break; } return check_order_activated(log, settings, orderName, condition); } static BOOL check_secondary_order_supported(wLog* log, rdpSettings* settings, BYTE orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_BITMAP_UNCOMPRESSED: case ORDER_TYPE_CACHE_BITMAP_COMPRESSED: condition = settings->BitmapCacheEnabled; break; case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2: case ORDER_TYPE_BITMAP_COMPRESSED_V2: condition = settings->BitmapCacheEnabled; break; case ORDER_TYPE_BITMAP_COMPRESSED_V3: condition = settings->BitmapCacheV3Enabled; break; case ORDER_TYPE_CACHE_COLOR_TABLE: condition = (settings->OrderSupport[NEG_MEMBLT_INDEX] || settings->OrderSupport[NEG_MEM3BLT_INDEX]); break; case ORDER_TYPE_CACHE_GLYPH: { switch (settings->GlyphSupportLevel) { case GLYPH_SUPPORT_PARTIAL: case GLYPH_SUPPORT_FULL: case GLYPH_SUPPORT_ENCODE: condition = TRUE; break; case GLYPH_SUPPORT_NONE: default: condition = FALSE; break; } } break; case ORDER_TYPE_CACHE_BRUSH: condition = TRUE; break; default: WLog_Print(log, WLOG_WARN, "SECONDARY ORDER %s not supported", orderName); break; } return check_order_activated(log, settings, orderName, condition); } static BOOL check_primary_order_supported(wLog* log, rdpSettings* settings, UINT32 orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_DSTBLT: condition = settings->OrderSupport[NEG_DSTBLT_INDEX]; break; case ORDER_TYPE_SCRBLT: condition = settings->OrderSupport[NEG_SCRBLT_INDEX]; break; case ORDER_TYPE_DRAW_NINE_GRID: condition = settings->OrderSupport[NEG_DRAWNINEGRID_INDEX]; break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: condition = settings->OrderSupport[NEG_MULTI_DRAWNINEGRID_INDEX]; break; case ORDER_TYPE_LINE_TO: condition = settings->OrderSupport[NEG_LINETO_INDEX]; break; /* [MS-RDPEGDI] 2.2.2.2.1.1.2.5 OpaqueRect (OPAQUERECT_ORDER) * suggests that PatBlt and OpaqueRect imply each other. */ case ORDER_TYPE_PATBLT: case ORDER_TYPE_OPAQUE_RECT: condition = settings->OrderSupport[NEG_OPAQUE_RECT_INDEX] || settings->OrderSupport[NEG_PATBLT_INDEX]; break; case ORDER_TYPE_SAVE_BITMAP: condition = settings->OrderSupport[NEG_SAVEBITMAP_INDEX]; break; case ORDER_TYPE_MEMBLT: condition = settings->OrderSupport[NEG_MEMBLT_INDEX]; break; case ORDER_TYPE_MEM3BLT: condition = settings->OrderSupport[NEG_MEM3BLT_INDEX]; break; case ORDER_TYPE_MULTI_DSTBLT: condition = settings->OrderSupport[NEG_MULTIDSTBLT_INDEX]; break; case ORDER_TYPE_MULTI_PATBLT: condition = settings->OrderSupport[NEG_MULTIPATBLT_INDEX]; break; case ORDER_TYPE_MULTI_SCRBLT: condition = settings->OrderSupport[NEG_MULTIDSTBLT_INDEX]; break; case ORDER_TYPE_MULTI_OPAQUE_RECT: condition = settings->OrderSupport[NEG_MULTIOPAQUERECT_INDEX]; break; case ORDER_TYPE_FAST_INDEX: condition = settings->OrderSupport[NEG_FAST_INDEX_INDEX]; break; case ORDER_TYPE_POLYGON_SC: condition = settings->OrderSupport[NEG_POLYGON_SC_INDEX]; break; case ORDER_TYPE_POLYGON_CB: condition = settings->OrderSupport[NEG_POLYGON_CB_INDEX]; break; case ORDER_TYPE_POLYLINE: condition = settings->OrderSupport[NEG_POLYLINE_INDEX]; break; case ORDER_TYPE_FAST_GLYPH: condition = settings->OrderSupport[NEG_FAST_GLYPH_INDEX]; break; case ORDER_TYPE_ELLIPSE_SC: condition = settings->OrderSupport[NEG_ELLIPSE_SC_INDEX]; break; case ORDER_TYPE_ELLIPSE_CB: condition = settings->OrderSupport[NEG_ELLIPSE_CB_INDEX]; break; case ORDER_TYPE_GLYPH_INDEX: condition = settings->OrderSupport[NEG_GLYPH_INDEX_INDEX]; break; default: WLog_Print(log, WLOG_WARN, "%s Primary Drawing Order not supported", orderName); break; } return check_order_activated(log, settings, orderName, condition); } static const char* primary_order_string(UINT32 orderType) { const char* orders[] = { "[0x%02" PRIx8 "] DstBlt", "[0x%02" PRIx8 "] PatBlt", "[0x%02" PRIx8 "] ScrBlt", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] DrawNineGrid", "[0x%02" PRIx8 "] MultiDrawNineGrid", "[0x%02" PRIx8 "] LineTo", "[0x%02" PRIx8 "] OpaqueRect", "[0x%02" PRIx8 "] SaveBitmap", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] MemBlt", "[0x%02" PRIx8 "] Mem3Blt", "[0x%02" PRIx8 "] MultiDstBlt", "[0x%02" PRIx8 "] MultiPatBlt", "[0x%02" PRIx8 "] MultiScrBlt", "[0x%02" PRIx8 "] MultiOpaqueRect", "[0x%02" PRIx8 "] FastIndex", "[0x%02" PRIx8 "] PolygonSC", "[0x%02" PRIx8 "] PolygonCB", "[0x%02" PRIx8 "] Polyline", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] FastGlyph", "[0x%02" PRIx8 "] EllipseSC", "[0x%02" PRIx8 "] EllipseCB", "[0x%02" PRIx8 "] GlyphIndex" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static const char* secondary_order_string(UINT32 orderType) { const char* orders[] = { "[0x%02" PRIx8 "] Cache Bitmap", "[0x%02" PRIx8 "] Cache Color Table", "[0x%02" PRIx8 "] Cache Bitmap (Compressed)", "[0x%02" PRIx8 "] Cache Glyph", "[0x%02" PRIx8 "] Cache Bitmap V2", "[0x%02" PRIx8 "] Cache Bitmap V2 (Compressed)", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] Cache Brush", "[0x%02" PRIx8 "] Cache Bitmap V3" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static const char* altsec_order_string(BYTE orderType) { const char* orders[] = { "[0x%02" PRIx8 "] Switch Surface", "[0x%02" PRIx8 "] Create Offscreen Bitmap", "[0x%02" PRIx8 "] Stream Bitmap First", "[0x%02" PRIx8 "] Stream Bitmap Next", "[0x%02" PRIx8 "] Create NineGrid Bitmap", "[0x%02" PRIx8 "] Draw GDI+ First", "[0x%02" PRIx8 "] Draw GDI+ Next", "[0x%02" PRIx8 "] Draw GDI+ End", "[0x%02" PRIx8 "] Draw GDI+ Cache First", "[0x%02" PRIx8 "] Draw GDI+ Cache Next", "[0x%02" PRIx8 "] Draw GDI+ Cache End", "[0x%02" PRIx8 "] Windowing", "[0x%02" PRIx8 "] Desktop Composition", "[0x%02" PRIx8 "] Frame Marker" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static INLINE BOOL update_read_coord(wStream* s, INT32* coord, BOOL delta) { INT8 lsi8; INT16 lsi16; if (delta) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_INT8(s, lsi8); *coord += lsi8; } else { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_INT16(s, lsi16); *coord = lsi16; } return TRUE; } static INLINE BOOL update_write_coord(wStream* s, INT32 coord) { Stream_Write_UINT16(s, coord); return TRUE; } static INLINE BOOL update_read_color(wStream* s, UINT32* color) { BYTE byte; if (Stream_GetRemainingLength(s) < 3) return FALSE; *color = 0; Stream_Read_UINT8(s, byte); *color = (UINT32)byte; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 8) & 0xFF00; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 16) & 0xFF0000; return TRUE; } static INLINE BOOL update_write_color(wStream* s, UINT32 color) { BYTE byte; byte = (color & 0xFF); Stream_Write_UINT8(s, byte); byte = ((color >> 8) & 0xFF); Stream_Write_UINT8(s, byte); byte = ((color >> 16) & 0xFF); Stream_Write_UINT8(s, byte); return TRUE; } static INLINE BOOL update_read_colorref(wStream* s, UINT32* color) { BYTE byte; if (Stream_GetRemainingLength(s) < 4) return FALSE; *color = 0; Stream_Read_UINT8(s, byte); *color = byte; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 8); Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 16); Stream_Seek_UINT8(s); return TRUE; } static INLINE BOOL update_read_color_quad(wStream* s, UINT32* color) { return update_read_colorref(s, color); } static INLINE void update_write_color_quad(wStream* s, UINT32 color) { BYTE byte; byte = (color >> 16) & 0xFF; Stream_Write_UINT8(s, byte); byte = (color >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = color & 0xFF; Stream_Write_UINT8(s, byte); } static INLINE BOOL update_read_2byte_unsigned(wStream* s, UINT32* value) { BYTE byte; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) return FALSE; *value = (byte & 0x7F) << 8; Stream_Read_UINT8(s, byte); *value |= byte; } else { *value = (byte & 0x7F); } return TRUE; } static INLINE BOOL update_write_2byte_unsigned(wStream* s, UINT32 value) { BYTE byte; if (value > 0x7FFF) return FALSE; if (value >= 0x7F) { byte = ((value & 0x7F00) >> 8); Stream_Write_UINT8(s, byte | 0x80); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else { byte = (value & 0x7F); Stream_Write_UINT8(s, byte); } return TRUE; } static INLINE BOOL update_read_2byte_signed(wStream* s, INT32* value) { BYTE byte; BOOL negative; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); negative = (byte & 0x40) ? TRUE : FALSE; *value = (byte & 0x3F); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); *value = (*value << 8) | byte; } if (negative) *value *= -1; return TRUE; } static INLINE BOOL update_write_2byte_signed(wStream* s, INT32 value) { BYTE byte; BOOL negative = FALSE; if (value < 0) { negative = TRUE; value *= -1; } if (value > 0x3FFF) return FALSE; if (value >= 0x3F) { byte = ((value & 0x3F00) >> 8); if (negative) byte |= 0x40; Stream_Write_UINT8(s, byte | 0x80); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else { byte = (value & 0x3F); if (negative) byte |= 0x40; Stream_Write_UINT8(s, byte); } return TRUE; } static INLINE BOOL update_read_4byte_unsigned(wStream* s, UINT32* value) { BYTE byte; BYTE count; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); count = (byte & 0xC0) >> 6; if (Stream_GetRemainingLength(s) < count) return FALSE; switch (count) { case 0: *value = (byte & 0x3F); break; case 1: *value = (byte & 0x3F) << 8; Stream_Read_UINT8(s, byte); *value |= byte; break; case 2: *value = (byte & 0x3F) << 16; Stream_Read_UINT8(s, byte); *value |= (byte << 8); Stream_Read_UINT8(s, byte); *value |= byte; break; case 3: *value = (byte & 0x3F) << 24; Stream_Read_UINT8(s, byte); *value |= (byte << 16); Stream_Read_UINT8(s, byte); *value |= (byte << 8); Stream_Read_UINT8(s, byte); *value |= byte; break; default: break; } return TRUE; } static INLINE BOOL update_write_4byte_unsigned(wStream* s, UINT32 value) { BYTE byte; if (value <= 0x3F) { Stream_Write_UINT8(s, value); } else if (value <= 0x3FFF) { byte = (value >> 8) & 0x3F; Stream_Write_UINT8(s, byte | 0x40); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else if (value <= 0x3FFFFF) { byte = (value >> 16) & 0x3F; Stream_Write_UINT8(s, byte | 0x80); byte = (value >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else if (value <= 0x3FFFFFFF) { byte = (value >> 24) & 0x3F; Stream_Write_UINT8(s, byte | 0xC0); byte = (value >> 16) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else return FALSE; return TRUE; } static INLINE BOOL update_read_delta(wStream* s, INT32* value) { BYTE byte; if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, byte); if (byte & 0x40) *value = (byte | ~0x3F); else *value = (byte & 0x3F); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, byte); *value = (*value << 8) | byte; } return TRUE; } #if 0 static INLINE void update_read_glyph_delta(wStream* s, UINT16* value) { BYTE byte; Stream_Read_UINT8(s, byte); if (byte == 0x80) Stream_Read_UINT16(s, *value); else *value = (byte & 0x3F); } static INLINE void update_seek_glyph_delta(wStream* s) { BYTE byte; Stream_Read_UINT8(s, byte); if (byte & 0x80) Stream_Seek_UINT8(s); } #endif static INLINE BOOL update_read_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->style); } if (fieldFlags & ORDER_FIELD_04) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->hatch); } if (brush->style & CACHED_BRUSH) { brush->index = brush->hatch; brush->bpp = BMF_BPP[brush->style & 0x07]; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 7) return FALSE; brush->data = (BYTE*)brush->p8x8; Stream_Read_UINT8(s, brush->data[7]); Stream_Read_UINT8(s, brush->data[6]); Stream_Read_UINT8(s, brush->data[5]); Stream_Read_UINT8(s, brush->data[4]); Stream_Read_UINT8(s, brush->data[3]); Stream_Read_UINT8(s, brush->data[2]); Stream_Read_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; } static INLINE BOOL update_write_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { Stream_Write_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { Stream_Write_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { Stream_Write_UINT8(s, brush->style); } if (brush->style & CACHED_BRUSH) { brush->hatch = brush->index; brush->bpp = BMF_BPP[brush->style & 0x07]; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_04) { Stream_Write_UINT8(s, brush->hatch); } if (fieldFlags & ORDER_FIELD_05) { brush->data = (BYTE*)brush->p8x8; Stream_Write_UINT8(s, brush->data[7]); Stream_Write_UINT8(s, brush->data[6]); Stream_Write_UINT8(s, brush->data[5]); Stream_Write_UINT8(s, brush->data[4]); Stream_Write_UINT8(s, brush->data[3]); Stream_Write_UINT8(s, brush->data[2]); Stream_Write_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; } static INLINE BOOL update_read_delta_rects(wStream* s, DELTA_RECT* rectangles, UINT32* nr) { UINT32 number = *nr; UINT32 i; BYTE flags = 0; BYTE* zeroBits; UINT32 zeroBitsSize; if (number > 45) { WLog_WARN(TAG, "Invalid number of delta rectangles %" PRIu32, number); return FALSE; } zeroBitsSize = ((number + 1) / 2); if (Stream_GetRemainingLength(s) < zeroBitsSize) return FALSE; Stream_GetPointer(s, zeroBits); Stream_Seek(s, zeroBitsSize); ZeroMemory(rectangles, sizeof(DELTA_RECT) * number); for (i = 0; i < number; i++) { if (i % 2 == 0) flags = zeroBits[i / 2]; if ((~flags & 0x80) && !update_read_delta(s, &rectangles[i].left)) return FALSE; if ((~flags & 0x40) && !update_read_delta(s, &rectangles[i].top)) return FALSE; if (~flags & 0x20) { if (!update_read_delta(s, &rectangles[i].width)) return FALSE; } else if (i > 0) rectangles[i].width = rectangles[i - 1].width; else rectangles[i].width = 0; if (~flags & 0x10) { if (!update_read_delta(s, &rectangles[i].height)) return FALSE; } else if (i > 0) rectangles[i].height = rectangles[i - 1].height; else rectangles[i].height = 0; if (i > 0) { rectangles[i].left += rectangles[i - 1].left; rectangles[i].top += rectangles[i - 1].top; } flags <<= 4; } return TRUE; } static INLINE BOOL update_read_delta_points(wStream* s, DELTA_POINT* points, int number, INT16 x, INT16 y) { int i; BYTE flags = 0; BYTE* zeroBits; UINT32 zeroBitsSize; zeroBitsSize = ((number + 3) / 4); if (Stream_GetRemainingLength(s) < zeroBitsSize) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < %" PRIu32 "", zeroBitsSize); return FALSE; } Stream_GetPointer(s, zeroBits); Stream_Seek(s, zeroBitsSize); ZeroMemory(points, sizeof(DELTA_POINT) * number); for (i = 0; i < number; i++) { if (i % 4 == 0) flags = zeroBits[i / 4]; if ((~flags & 0x80) && !update_read_delta(s, &points[i].x)) { WLog_ERR(TAG, "update_read_delta(x) failed"); return FALSE; } if ((~flags & 0x40) && !update_read_delta(s, &points[i].y)) { WLog_ERR(TAG, "update_read_delta(y) failed"); return FALSE; } flags <<= 2; } return TRUE; } #define ORDER_FIELD_BYTE(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 1) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT8(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_2BYTE(NO, TARGET1, TARGET2) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 2) \ { \ WLog_ERR(TAG, "error reading %s or %s", #TARGET1, #TARGET2); \ return FALSE; \ } \ Stream_Read_UINT8(s, TARGET1); \ Stream_Read_UINT8(s, TARGET2); \ } \ } while (0) #define ORDER_FIELD_UINT16(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 2) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT16(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_UINT32(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 4) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT32(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_COORD(NO, TARGET) \ do \ { \ if ((orderInfo->fieldFlags & (1 << (NO - 1))) && \ !update_read_coord(s, &TARGET, orderInfo->deltaCoordinates)) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ } while (0) static INLINE BOOL ORDER_FIELD_COLOR(const ORDER_INFO* orderInfo, wStream* s, UINT32 NO, UINT32* TARGET) { if (!TARGET || !orderInfo) return FALSE; if ((orderInfo->fieldFlags & (1 << (NO - 1))) && !update_read_color(s, TARGET)) return FALSE; return TRUE; } static INLINE BOOL FIELD_SKIP_BUFFER16(wStream* s, UINT32 TARGET_LEN) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, TARGET_LEN); if (!Stream_SafeSeek(s, TARGET_LEN)) { WLog_ERR(TAG, "error skipping %" PRIu32 " bytes", TARGET_LEN); return FALSE; } return TRUE; } /* Primary Drawing Orders */ static BOOL update_read_dstblt_order(wStream* s, const ORDER_INFO* orderInfo, DSTBLT_ORDER* dstblt) { ORDER_FIELD_COORD(1, dstblt->nLeftRect); ORDER_FIELD_COORD(2, dstblt->nTopRect); ORDER_FIELD_COORD(3, dstblt->nWidth); ORDER_FIELD_COORD(4, dstblt->nHeight); ORDER_FIELD_BYTE(5, dstblt->bRop); return TRUE; } int update_approximate_dstblt_order(ORDER_INFO* orderInfo, const DSTBLT_ORDER* dstblt) { return 32; } BOOL update_write_dstblt_order(wStream* s, ORDER_INFO* orderInfo, const DSTBLT_ORDER* dstblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_dstblt_order(orderInfo, dstblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, dstblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, dstblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, dstblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, dstblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, dstblt->bRop); return TRUE; } static BOOL update_read_patblt_order(wStream* s, const ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { ORDER_FIELD_COORD(1, patblt->nLeftRect); ORDER_FIELD_COORD(2, patblt->nTopRect); ORDER_FIELD_COORD(3, patblt->nWidth); ORDER_FIELD_COORD(4, patblt->nHeight); ORDER_FIELD_BYTE(5, patblt->bRop); ORDER_FIELD_COLOR(orderInfo, s, 6, &patblt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 7, &patblt->foreColor); return update_read_brush(s, &patblt->brush, orderInfo->fieldFlags >> 7); } int update_approximate_patblt_order(ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { return 32; } BOOL update_write_patblt_order(wStream* s, ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_patblt_order(orderInfo, patblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, patblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, patblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, patblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, patblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, patblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, patblt->backColor); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_color(s, patblt->foreColor); orderInfo->fieldFlags |= ORDER_FIELD_08; orderInfo->fieldFlags |= ORDER_FIELD_09; orderInfo->fieldFlags |= ORDER_FIELD_10; orderInfo->fieldFlags |= ORDER_FIELD_11; orderInfo->fieldFlags |= ORDER_FIELD_12; update_write_brush(s, &patblt->brush, orderInfo->fieldFlags >> 7); return TRUE; } static BOOL update_read_scrblt_order(wStream* s, const ORDER_INFO* orderInfo, SCRBLT_ORDER* scrblt) { ORDER_FIELD_COORD(1, scrblt->nLeftRect); ORDER_FIELD_COORD(2, scrblt->nTopRect); ORDER_FIELD_COORD(3, scrblt->nWidth); ORDER_FIELD_COORD(4, scrblt->nHeight); ORDER_FIELD_BYTE(5, scrblt->bRop); ORDER_FIELD_COORD(6, scrblt->nXSrc); ORDER_FIELD_COORD(7, scrblt->nYSrc); return TRUE; } int update_approximate_scrblt_order(ORDER_INFO* orderInfo, const SCRBLT_ORDER* scrblt) { return 32; } BOOL update_write_scrblt_order(wStream* s, ORDER_INFO* orderInfo, const SCRBLT_ORDER* scrblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_scrblt_order(orderInfo, scrblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, scrblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, scrblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, scrblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, scrblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, scrblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_coord(s, scrblt->nXSrc); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_coord(s, scrblt->nYSrc); return TRUE; } static BOOL update_read_opaque_rect_order(wStream* s, const ORDER_INFO* orderInfo, OPAQUE_RECT_ORDER* opaque_rect) { BYTE byte; ORDER_FIELD_COORD(1, opaque_rect->nLeftRect); ORDER_FIELD_COORD(2, opaque_rect->nTopRect); ORDER_FIELD_COORD(3, opaque_rect->nWidth); ORDER_FIELD_COORD(4, opaque_rect->nHeight); if (orderInfo->fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x00FFFF00) | ((UINT32)byte); } if (orderInfo->fieldFlags & ORDER_FIELD_06) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x00FF00FF) | ((UINT32)byte << 8); } if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x0000FFFF) | ((UINT32)byte << 16); } return TRUE; } int update_approximate_opaque_rect_order(ORDER_INFO* orderInfo, const OPAQUE_RECT_ORDER* opaque_rect) { return 32; } BOOL update_write_opaque_rect_order(wStream* s, ORDER_INFO* orderInfo, const OPAQUE_RECT_ORDER* opaque_rect) { BYTE byte; int inf = update_approximate_opaque_rect_order(orderInfo, opaque_rect); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; // TODO: Color format conversion orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, opaque_rect->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, opaque_rect->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, opaque_rect->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, opaque_rect->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; byte = opaque_rect->color & 0x000000FF; Stream_Write_UINT8(s, byte); orderInfo->fieldFlags |= ORDER_FIELD_06; byte = (opaque_rect->color & 0x0000FF00) >> 8; Stream_Write_UINT8(s, byte); orderInfo->fieldFlags |= ORDER_FIELD_07; byte = (opaque_rect->color & 0x00FF0000) >> 16; Stream_Write_UINT8(s, byte); return TRUE; } static BOOL update_read_draw_nine_grid_order(wStream* s, const ORDER_INFO* orderInfo, DRAW_NINE_GRID_ORDER* draw_nine_grid) { ORDER_FIELD_COORD(1, draw_nine_grid->srcLeft); ORDER_FIELD_COORD(2, draw_nine_grid->srcTop); ORDER_FIELD_COORD(3, draw_nine_grid->srcRight); ORDER_FIELD_COORD(4, draw_nine_grid->srcBottom); ORDER_FIELD_UINT16(5, draw_nine_grid->bitmapId); return TRUE; } static BOOL update_read_multi_dstblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_DSTBLT_ORDER* multi_dstblt) { ORDER_FIELD_COORD(1, multi_dstblt->nLeftRect); ORDER_FIELD_COORD(2, multi_dstblt->nTopRect); ORDER_FIELD_COORD(3, multi_dstblt->nWidth); ORDER_FIELD_COORD(4, multi_dstblt->nHeight); ORDER_FIELD_BYTE(5, multi_dstblt->bRop); ORDER_FIELD_BYTE(6, multi_dstblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_dstblt->cbData); return update_read_delta_rects(s, multi_dstblt->rectangles, &multi_dstblt->numRectangles); } return TRUE; } static BOOL update_read_multi_patblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_PATBLT_ORDER* multi_patblt) { ORDER_FIELD_COORD(1, multi_patblt->nLeftRect); ORDER_FIELD_COORD(2, multi_patblt->nTopRect); ORDER_FIELD_COORD(3, multi_patblt->nWidth); ORDER_FIELD_COORD(4, multi_patblt->nHeight); ORDER_FIELD_BYTE(5, multi_patblt->bRop); ORDER_FIELD_COLOR(orderInfo, s, 6, &multi_patblt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 7, &multi_patblt->foreColor); if (!update_read_brush(s, &multi_patblt->brush, orderInfo->fieldFlags >> 7)) return FALSE; ORDER_FIELD_BYTE(13, multi_patblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_14) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_patblt->cbData); if (!update_read_delta_rects(s, multi_patblt->rectangles, &multi_patblt->numRectangles)) return FALSE; } return TRUE; } static BOOL update_read_multi_scrblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_SCRBLT_ORDER* multi_scrblt) { ORDER_FIELD_COORD(1, multi_scrblt->nLeftRect); ORDER_FIELD_COORD(2, multi_scrblt->nTopRect); ORDER_FIELD_COORD(3, multi_scrblt->nWidth); ORDER_FIELD_COORD(4, multi_scrblt->nHeight); ORDER_FIELD_BYTE(5, multi_scrblt->bRop); ORDER_FIELD_COORD(6, multi_scrblt->nXSrc); ORDER_FIELD_COORD(7, multi_scrblt->nYSrc); ORDER_FIELD_BYTE(8, multi_scrblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_09) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_scrblt->cbData); return update_read_delta_rects(s, multi_scrblt->rectangles, &multi_scrblt->numRectangles); } return TRUE; } static BOOL update_read_multi_opaque_rect_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_OPAQUE_RECT_ORDER* multi_opaque_rect) { BYTE byte; ORDER_FIELD_COORD(1, multi_opaque_rect->nLeftRect); ORDER_FIELD_COORD(2, multi_opaque_rect->nTopRect); ORDER_FIELD_COORD(3, multi_opaque_rect->nWidth); ORDER_FIELD_COORD(4, multi_opaque_rect->nHeight); if (orderInfo->fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x00FFFF00) | ((UINT32)byte); } if (orderInfo->fieldFlags & ORDER_FIELD_06) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x00FF00FF) | ((UINT32)byte << 8); } if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x0000FFFF) | ((UINT32)byte << 16); } ORDER_FIELD_BYTE(8, multi_opaque_rect->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_09) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_opaque_rect->cbData); return update_read_delta_rects(s, multi_opaque_rect->rectangles, &multi_opaque_rect->numRectangles); } return TRUE; } static BOOL update_read_multi_draw_nine_grid_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_DRAW_NINE_GRID_ORDER* multi_draw_nine_grid) { ORDER_FIELD_COORD(1, multi_draw_nine_grid->srcLeft); ORDER_FIELD_COORD(2, multi_draw_nine_grid->srcTop); ORDER_FIELD_COORD(3, multi_draw_nine_grid->srcRight); ORDER_FIELD_COORD(4, multi_draw_nine_grid->srcBottom); ORDER_FIELD_UINT16(5, multi_draw_nine_grid->bitmapId); ORDER_FIELD_BYTE(6, multi_draw_nine_grid->nDeltaEntries); if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_draw_nine_grid->cbData); return update_read_delta_rects(s, multi_draw_nine_grid->rectangles, &multi_draw_nine_grid->nDeltaEntries); } return TRUE; } static BOOL update_read_line_to_order(wStream* s, const ORDER_INFO* orderInfo, LINE_TO_ORDER* line_to) { ORDER_FIELD_UINT16(1, line_to->backMode); ORDER_FIELD_COORD(2, line_to->nXStart); ORDER_FIELD_COORD(3, line_to->nYStart); ORDER_FIELD_COORD(4, line_to->nXEnd); ORDER_FIELD_COORD(5, line_to->nYEnd); ORDER_FIELD_COLOR(orderInfo, s, 6, &line_to->backColor); ORDER_FIELD_BYTE(7, line_to->bRop2); ORDER_FIELD_BYTE(8, line_to->penStyle); ORDER_FIELD_BYTE(9, line_to->penWidth); ORDER_FIELD_COLOR(orderInfo, s, 10, &line_to->penColor); return TRUE; } int update_approximate_line_to_order(ORDER_INFO* orderInfo, const LINE_TO_ORDER* line_to) { return 32; } BOOL update_write_line_to_order(wStream* s, ORDER_INFO* orderInfo, const LINE_TO_ORDER* line_to) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_line_to_order(orderInfo, line_to))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT16(s, line_to->backMode); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, line_to->nXStart); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, line_to->nYStart); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, line_to->nXEnd); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_coord(s, line_to->nYEnd); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, line_to->backColor); orderInfo->fieldFlags |= ORDER_FIELD_07; Stream_Write_UINT8(s, line_to->bRop2); orderInfo->fieldFlags |= ORDER_FIELD_08; Stream_Write_UINT8(s, line_to->penStyle); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT8(s, line_to->penWidth); orderInfo->fieldFlags |= ORDER_FIELD_10; update_write_color(s, line_to->penColor); return TRUE; } static BOOL update_read_polyline_order(wStream* s, const ORDER_INFO* orderInfo, POLYLINE_ORDER* polyline) { UINT16 word; UINT32 new_num = polyline->numDeltaEntries; ORDER_FIELD_COORD(1, polyline->xStart); ORDER_FIELD_COORD(2, polyline->yStart); ORDER_FIELD_BYTE(3, polyline->bRop2); ORDER_FIELD_UINT16(4, word); ORDER_FIELD_COLOR(orderInfo, s, 5, &polyline->penColor); ORDER_FIELD_BYTE(6, new_num); if (orderInfo->fieldFlags & ORDER_FIELD_07) { DELTA_POINT* new_points; if (new_num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, polyline->cbData); new_points = (DELTA_POINT*)realloc(polyline->points, sizeof(DELTA_POINT) * new_num); if (!new_points) { WLog_ERR(TAG, "realloc(%" PRIu32 ") failed", new_num); return FALSE; } polyline->points = new_points; polyline->numDeltaEntries = new_num; return update_read_delta_points(s, polyline->points, polyline->numDeltaEntries, polyline->xStart, polyline->yStart); } return TRUE; } static BOOL update_read_memblt_order(wStream* s, const ORDER_INFO* orderInfo, MEMBLT_ORDER* memblt) { if (!s || !orderInfo || !memblt) return FALSE; ORDER_FIELD_UINT16(1, memblt->cacheId); ORDER_FIELD_COORD(2, memblt->nLeftRect); ORDER_FIELD_COORD(3, memblt->nTopRect); ORDER_FIELD_COORD(4, memblt->nWidth); ORDER_FIELD_COORD(5, memblt->nHeight); ORDER_FIELD_BYTE(6, memblt->bRop); ORDER_FIELD_COORD(7, memblt->nXSrc); ORDER_FIELD_COORD(8, memblt->nYSrc); ORDER_FIELD_UINT16(9, memblt->cacheIndex); memblt->colorIndex = (memblt->cacheId >> 8); memblt->cacheId = (memblt->cacheId & 0xFF); memblt->bitmap = NULL; return TRUE; } int update_approximate_memblt_order(ORDER_INFO* orderInfo, const MEMBLT_ORDER* memblt) { return 64; } BOOL update_write_memblt_order(wStream* s, ORDER_INFO* orderInfo, const MEMBLT_ORDER* memblt) { UINT16 cacheId; if (!Stream_EnsureRemainingCapacity(s, update_approximate_memblt_order(orderInfo, memblt))) return FALSE; cacheId = (memblt->cacheId & 0xFF) | ((memblt->colorIndex & 0xFF) << 8); orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT16(s, cacheId); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, memblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, memblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, memblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_coord(s, memblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_06; Stream_Write_UINT8(s, memblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_coord(s, memblt->nXSrc); orderInfo->fieldFlags |= ORDER_FIELD_08; update_write_coord(s, memblt->nYSrc); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT16(s, memblt->cacheIndex); return TRUE; } static BOOL update_read_mem3blt_order(wStream* s, const ORDER_INFO* orderInfo, MEM3BLT_ORDER* mem3blt) { ORDER_FIELD_UINT16(1, mem3blt->cacheId); ORDER_FIELD_COORD(2, mem3blt->nLeftRect); ORDER_FIELD_COORD(3, mem3blt->nTopRect); ORDER_FIELD_COORD(4, mem3blt->nWidth); ORDER_FIELD_COORD(5, mem3blt->nHeight); ORDER_FIELD_BYTE(6, mem3blt->bRop); ORDER_FIELD_COORD(7, mem3blt->nXSrc); ORDER_FIELD_COORD(8, mem3blt->nYSrc); ORDER_FIELD_COLOR(orderInfo, s, 9, &mem3blt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 10, &mem3blt->foreColor); if (!update_read_brush(s, &mem3blt->brush, orderInfo->fieldFlags >> 10)) return FALSE; ORDER_FIELD_UINT16(16, mem3blt->cacheIndex); mem3blt->colorIndex = (mem3blt->cacheId >> 8); mem3blt->cacheId = (mem3blt->cacheId & 0xFF); mem3blt->bitmap = NULL; return TRUE; } static BOOL update_read_save_bitmap_order(wStream* s, const ORDER_INFO* orderInfo, SAVE_BITMAP_ORDER* save_bitmap) { ORDER_FIELD_UINT32(1, save_bitmap->savedBitmapPosition); ORDER_FIELD_COORD(2, save_bitmap->nLeftRect); ORDER_FIELD_COORD(3, save_bitmap->nTopRect); ORDER_FIELD_COORD(4, save_bitmap->nRightRect); ORDER_FIELD_COORD(5, save_bitmap->nBottomRect); ORDER_FIELD_BYTE(6, save_bitmap->operation); return TRUE; } static BOOL update_read_glyph_index_order(wStream* s, const ORDER_INFO* orderInfo, GLYPH_INDEX_ORDER* glyph_index) { ORDER_FIELD_BYTE(1, glyph_index->cacheId); ORDER_FIELD_BYTE(2, glyph_index->flAccel); ORDER_FIELD_BYTE(3, glyph_index->ulCharInc); ORDER_FIELD_BYTE(4, glyph_index->fOpRedundant); ORDER_FIELD_COLOR(orderInfo, s, 5, &glyph_index->backColor); ORDER_FIELD_COLOR(orderInfo, s, 6, &glyph_index->foreColor); ORDER_FIELD_UINT16(7, glyph_index->bkLeft); ORDER_FIELD_UINT16(8, glyph_index->bkTop); ORDER_FIELD_UINT16(9, glyph_index->bkRight); ORDER_FIELD_UINT16(10, glyph_index->bkBottom); ORDER_FIELD_UINT16(11, glyph_index->opLeft); ORDER_FIELD_UINT16(12, glyph_index->opTop); ORDER_FIELD_UINT16(13, glyph_index->opRight); ORDER_FIELD_UINT16(14, glyph_index->opBottom); if (!update_read_brush(s, &glyph_index->brush, orderInfo->fieldFlags >> 14)) return FALSE; ORDER_FIELD_UINT16(20, glyph_index->x); ORDER_FIELD_UINT16(21, glyph_index->y); if (orderInfo->fieldFlags & ORDER_FIELD_22) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, glyph_index->cbData); if (Stream_GetRemainingLength(s) < glyph_index->cbData) return FALSE; CopyMemory(glyph_index->data, Stream_Pointer(s), glyph_index->cbData); Stream_Seek(s, glyph_index->cbData); } return TRUE; } int update_approximate_glyph_index_order(ORDER_INFO* orderInfo, const GLYPH_INDEX_ORDER* glyph_index) { return 64; } BOOL update_write_glyph_index_order(wStream* s, ORDER_INFO* orderInfo, GLYPH_INDEX_ORDER* glyph_index) { int inf = update_approximate_glyph_index_order(orderInfo, glyph_index); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT8(s, glyph_index->cacheId); orderInfo->fieldFlags |= ORDER_FIELD_02; Stream_Write_UINT8(s, glyph_index->flAccel); orderInfo->fieldFlags |= ORDER_FIELD_03; Stream_Write_UINT8(s, glyph_index->ulCharInc); orderInfo->fieldFlags |= ORDER_FIELD_04; Stream_Write_UINT8(s, glyph_index->fOpRedundant); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_color(s, glyph_index->backColor); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, glyph_index->foreColor); orderInfo->fieldFlags |= ORDER_FIELD_07; Stream_Write_UINT16(s, glyph_index->bkLeft); orderInfo->fieldFlags |= ORDER_FIELD_08; Stream_Write_UINT16(s, glyph_index->bkTop); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT16(s, glyph_index->bkRight); orderInfo->fieldFlags |= ORDER_FIELD_10; Stream_Write_UINT16(s, glyph_index->bkBottom); orderInfo->fieldFlags |= ORDER_FIELD_11; Stream_Write_UINT16(s, glyph_index->opLeft); orderInfo->fieldFlags |= ORDER_FIELD_12; Stream_Write_UINT16(s, glyph_index->opTop); orderInfo->fieldFlags |= ORDER_FIELD_13; Stream_Write_UINT16(s, glyph_index->opRight); orderInfo->fieldFlags |= ORDER_FIELD_14; Stream_Write_UINT16(s, glyph_index->opBottom); orderInfo->fieldFlags |= ORDER_FIELD_15; orderInfo->fieldFlags |= ORDER_FIELD_16; orderInfo->fieldFlags |= ORDER_FIELD_17; orderInfo->fieldFlags |= ORDER_FIELD_18; orderInfo->fieldFlags |= ORDER_FIELD_19; update_write_brush(s, &glyph_index->brush, orderInfo->fieldFlags >> 14); orderInfo->fieldFlags |= ORDER_FIELD_20; Stream_Write_UINT16(s, glyph_index->x); orderInfo->fieldFlags |= ORDER_FIELD_21; Stream_Write_UINT16(s, glyph_index->y); orderInfo->fieldFlags |= ORDER_FIELD_22; Stream_Write_UINT8(s, glyph_index->cbData); Stream_Write(s, glyph_index->data, glyph_index->cbData); return TRUE; } static BOOL update_read_fast_index_order(wStream* s, const ORDER_INFO* orderInfo, FAST_INDEX_ORDER* fast_index) { ORDER_FIELD_BYTE(1, fast_index->cacheId); ORDER_FIELD_2BYTE(2, fast_index->ulCharInc, fast_index->flAccel); ORDER_FIELD_COLOR(orderInfo, s, 3, &fast_index->backColor); ORDER_FIELD_COLOR(orderInfo, s, 4, &fast_index->foreColor); ORDER_FIELD_COORD(5, fast_index->bkLeft); ORDER_FIELD_COORD(6, fast_index->bkTop); ORDER_FIELD_COORD(7, fast_index->bkRight); ORDER_FIELD_COORD(8, fast_index->bkBottom); ORDER_FIELD_COORD(9, fast_index->opLeft); ORDER_FIELD_COORD(10, fast_index->opTop); ORDER_FIELD_COORD(11, fast_index->opRight); ORDER_FIELD_COORD(12, fast_index->opBottom); ORDER_FIELD_COORD(13, fast_index->x); ORDER_FIELD_COORD(14, fast_index->y); if (orderInfo->fieldFlags & ORDER_FIELD_15) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, fast_index->cbData); if (Stream_GetRemainingLength(s) < fast_index->cbData) return FALSE; CopyMemory(fast_index->data, Stream_Pointer(s), fast_index->cbData); Stream_Seek(s, fast_index->cbData); } return TRUE; } static BOOL update_read_fast_glyph_order(wStream* s, const ORDER_INFO* orderInfo, FAST_GLYPH_ORDER* fastGlyph) { GLYPH_DATA_V2* glyph = &fastGlyph->glyphData; ORDER_FIELD_BYTE(1, fastGlyph->cacheId); ORDER_FIELD_2BYTE(2, fastGlyph->ulCharInc, fastGlyph->flAccel); ORDER_FIELD_COLOR(orderInfo, s, 3, &fastGlyph->backColor); ORDER_FIELD_COLOR(orderInfo, s, 4, &fastGlyph->foreColor); ORDER_FIELD_COORD(5, fastGlyph->bkLeft); ORDER_FIELD_COORD(6, fastGlyph->bkTop); ORDER_FIELD_COORD(7, fastGlyph->bkRight); ORDER_FIELD_COORD(8, fastGlyph->bkBottom); ORDER_FIELD_COORD(9, fastGlyph->opLeft); ORDER_FIELD_COORD(10, fastGlyph->opTop); ORDER_FIELD_COORD(11, fastGlyph->opRight); ORDER_FIELD_COORD(12, fastGlyph->opBottom); ORDER_FIELD_COORD(13, fastGlyph->x); ORDER_FIELD_COORD(14, fastGlyph->y); if (orderInfo->fieldFlags & ORDER_FIELD_15) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, fastGlyph->cbData); if (Stream_GetRemainingLength(s) < fastGlyph->cbData) return FALSE; CopyMemory(fastGlyph->data, Stream_Pointer(s), fastGlyph->cbData); if (Stream_GetRemainingLength(s) < fastGlyph->cbData) return FALSE; if (!Stream_SafeSeek(s, 1)) return FALSE; if (fastGlyph->cbData > 1) { UINT32 new_cb; /* parse optional glyph data */ glyph->cacheIndex = fastGlyph->data[0]; if (!update_read_2byte_signed(s, &glyph->x) || !update_read_2byte_signed(s, &glyph->y) || !update_read_2byte_unsigned(s, &glyph->cx) || !update_read_2byte_unsigned(s, &glyph->cy)) return FALSE; glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; new_cb = ((glyph->cx + 7) / 8) * glyph->cy; new_cb += ((new_cb % 4) > 0) ? 4 - (new_cb % 4) : 0; if (fastGlyph->cbData < new_cb) return FALSE; if (new_cb > 0) { BYTE* new_aj; new_aj = (BYTE*)realloc(glyph->aj, new_cb); if (!new_aj) return FALSE; glyph->aj = new_aj; glyph->cb = new_cb; Stream_Read(s, glyph->aj, glyph->cb); } Stream_Seek(s, fastGlyph->cbData - new_cb); } } return TRUE; } static BOOL update_read_polygon_sc_order(wStream* s, const ORDER_INFO* orderInfo, POLYGON_SC_ORDER* polygon_sc) { UINT32 num = polygon_sc->numPoints; ORDER_FIELD_COORD(1, polygon_sc->xStart); ORDER_FIELD_COORD(2, polygon_sc->yStart); ORDER_FIELD_BYTE(3, polygon_sc->bRop2); ORDER_FIELD_BYTE(4, polygon_sc->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 5, &polygon_sc->brushColor); ORDER_FIELD_BYTE(6, num); if (orderInfo->fieldFlags & ORDER_FIELD_07) { DELTA_POINT* newpoints; if (num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, polygon_sc->cbData); newpoints = (DELTA_POINT*)realloc(polygon_sc->points, sizeof(DELTA_POINT) * num); if (!newpoints) return FALSE; polygon_sc->points = newpoints; polygon_sc->numPoints = num; return update_read_delta_points(s, polygon_sc->points, polygon_sc->numPoints, polygon_sc->xStart, polygon_sc->yStart); } return TRUE; } static BOOL update_read_polygon_cb_order(wStream* s, const ORDER_INFO* orderInfo, POLYGON_CB_ORDER* polygon_cb) { UINT32 num = polygon_cb->numPoints; ORDER_FIELD_COORD(1, polygon_cb->xStart); ORDER_FIELD_COORD(2, polygon_cb->yStart); ORDER_FIELD_BYTE(3, polygon_cb->bRop2); ORDER_FIELD_BYTE(4, polygon_cb->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 5, &polygon_cb->backColor); ORDER_FIELD_COLOR(orderInfo, s, 6, &polygon_cb->foreColor); if (!update_read_brush(s, &polygon_cb->brush, orderInfo->fieldFlags >> 6)) return FALSE; ORDER_FIELD_BYTE(12, num); if (orderInfo->fieldFlags & ORDER_FIELD_13) { DELTA_POINT* newpoints; if (num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, polygon_cb->cbData); newpoints = (DELTA_POINT*)realloc(polygon_cb->points, sizeof(DELTA_POINT) * num); if (!newpoints) return FALSE; polygon_cb->points = newpoints; polygon_cb->numPoints = num; if (!update_read_delta_points(s, polygon_cb->points, polygon_cb->numPoints, polygon_cb->xStart, polygon_cb->yStart)) return FALSE; } polygon_cb->backMode = (polygon_cb->bRop2 & 0x80) ? BACKMODE_TRANSPARENT : BACKMODE_OPAQUE; polygon_cb->bRop2 = (polygon_cb->bRop2 & 0x1F); return TRUE; } static BOOL update_read_ellipse_sc_order(wStream* s, const ORDER_INFO* orderInfo, ELLIPSE_SC_ORDER* ellipse_sc) { ORDER_FIELD_COORD(1, ellipse_sc->leftRect); ORDER_FIELD_COORD(2, ellipse_sc->topRect); ORDER_FIELD_COORD(3, ellipse_sc->rightRect); ORDER_FIELD_COORD(4, ellipse_sc->bottomRect); ORDER_FIELD_BYTE(5, ellipse_sc->bRop2); ORDER_FIELD_BYTE(6, ellipse_sc->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 7, &ellipse_sc->color); return TRUE; } static BOOL update_read_ellipse_cb_order(wStream* s, const ORDER_INFO* orderInfo, ELLIPSE_CB_ORDER* ellipse_cb) { ORDER_FIELD_COORD(1, ellipse_cb->leftRect); ORDER_FIELD_COORD(2, ellipse_cb->topRect); ORDER_FIELD_COORD(3, ellipse_cb->rightRect); ORDER_FIELD_COORD(4, ellipse_cb->bottomRect); ORDER_FIELD_BYTE(5, ellipse_cb->bRop2); ORDER_FIELD_BYTE(6, ellipse_cb->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 7, &ellipse_cb->backColor); ORDER_FIELD_COLOR(orderInfo, s, 8, &ellipse_cb->foreColor); return update_read_brush(s, &ellipse_cb->brush, orderInfo->fieldFlags >> 8); } /* Secondary Drawing Orders */ static CACHE_BITMAP_ORDER* update_read_cache_bitmap_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { CACHE_BITMAP_ORDER* cache_bitmap; if (!update || !s) return NULL; cache_bitmap = calloc(1, sizeof(CACHE_BITMAP_ORDER)); if (!cache_bitmap) goto fail; if (Stream_GetRemainingLength(s) < 9) goto fail; Stream_Read_UINT8(s, cache_bitmap->cacheId); /* cacheId (1 byte) */ Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapWidth); /* bitmapWidth (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapHeight); /* bitmapHeight (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ if ((cache_bitmap->bitmapBpp < 1) || (cache_bitmap->bitmapBpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bitmap bpp %" PRIu32 "", cache_bitmap->bitmapBpp); goto fail; } Stream_Read_UINT16(s, cache_bitmap->bitmapLength); /* bitmapLength (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap->cacheIndex); /* cacheIndex (2 bytes) */ if (compressed) { if ((flags & NO_BITMAP_COMPRESSION_HDR) == 0) { BYTE* bitmapComprHdr = (BYTE*)&(cache_bitmap->bitmapComprHdr); if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read(s, bitmapComprHdr, 8); /* bitmapComprHdr (8 bytes) */ cache_bitmap->bitmapLength -= 8; } } if (cache_bitmap->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap->bitmapLength) goto fail; cache_bitmap->bitmapDataStream = malloc(cache_bitmap->bitmapLength); if (!cache_bitmap->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap->bitmapDataStream, cache_bitmap->bitmapLength); cache_bitmap->compressed = compressed; return cache_bitmap; fail: free_cache_bitmap_order(update->context, cache_bitmap); return NULL; } int update_approximate_cache_bitmap_order(const CACHE_BITMAP_ORDER* cache_bitmap, BOOL compressed, UINT16* flags) { return 64 + cache_bitmap->bitmapLength; } BOOL update_write_cache_bitmap_order(wStream* s, const CACHE_BITMAP_ORDER* cache_bitmap, BOOL compressed, UINT16* flags) { UINT32 bitmapLength = cache_bitmap->bitmapLength; int inf = update_approximate_cache_bitmap_order(cache_bitmap, compressed, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; *flags = NO_BITMAP_COMPRESSION_HDR; if ((*flags & NO_BITMAP_COMPRESSION_HDR) == 0) bitmapLength += 8; Stream_Write_UINT8(s, cache_bitmap->cacheId); /* cacheId (1 byte) */ Stream_Write_UINT8(s, 0); /* pad1Octet (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapWidth); /* bitmapWidth (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapHeight); /* bitmapHeight (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ Stream_Write_UINT16(s, bitmapLength); /* bitmapLength (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap->cacheIndex); /* cacheIndex (2 bytes) */ if (compressed) { if ((*flags & NO_BITMAP_COMPRESSION_HDR) == 0) { BYTE* bitmapComprHdr = (BYTE*)&(cache_bitmap->bitmapComprHdr); Stream_Write(s, bitmapComprHdr, 8); /* bitmapComprHdr (8 bytes) */ bitmapLength -= 8; } Stream_Write(s, cache_bitmap->bitmapDataStream, bitmapLength); } else { Stream_Write(s, cache_bitmap->bitmapDataStream, bitmapLength); } return TRUE; } static CACHE_BITMAP_V2_ORDER* update_read_cache_bitmap_v2_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { BYTE bitsPerPixelId; CACHE_BITMAP_V2_ORDER* cache_bitmap_v2; if (!update || !s) return NULL; cache_bitmap_v2 = calloc(1, sizeof(CACHE_BITMAP_V2_ORDER)); if (!cache_bitmap_v2) goto fail; cache_bitmap_v2->cacheId = flags & 0x0003; cache_bitmap_v2->flags = (flags & 0xFF80) >> 7; bitsPerPixelId = (flags & 0x0078) >> 3; cache_bitmap_v2->bitmapBpp = CBR2_BPP[bitsPerPixelId]; if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ goto fail; cache_bitmap_v2->bitmapHeight = cache_bitmap_v2->bitmapWidth; } else { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ goto fail; } if (!update_read_4byte_unsigned(s, &cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->cacheIndex)) /* cacheIndex */ goto fail; if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } } if (cache_bitmap_v2->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap_v2->bitmapLength) goto fail; if (cache_bitmap_v2->bitmapLength == 0) goto fail; cache_bitmap_v2->bitmapDataStream = malloc(cache_bitmap_v2->bitmapLength); if (!cache_bitmap_v2->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); cache_bitmap_v2->compressed = compressed; return cache_bitmap_v2; fail: free_cache_bitmap_v2_order(update->context, cache_bitmap_v2); return NULL; } int update_approximate_cache_bitmap_v2_order(CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { return 64 + cache_bitmap_v2->bitmapLength; } BOOL update_write_cache_bitmap_v2_order(wStream* s, CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { BYTE bitsPerPixelId; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v2_order(cache_bitmap_v2, compressed, flags))) return FALSE; bitsPerPixelId = BPP_CBR2[cache_bitmap_v2->bitmapBpp]; *flags = (cache_bitmap_v2->cacheId & 0x0003) | (bitsPerPixelId << 3) | ((cache_bitmap_v2->flags << 7) & 0xFF80); if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { Stream_Write_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ return FALSE; } else { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ return FALSE; } if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (!update_write_4byte_unsigned(s, cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_write_2byte_unsigned(s, cache_bitmap_v2->cacheIndex)) /* cacheIndex */ return FALSE; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { Stream_Write_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } else { if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } cache_bitmap_v2->compressed = compressed; return TRUE; } static CACHE_BITMAP_V3_ORDER* update_read_cache_bitmap_v3_order(rdpUpdate* update, wStream* s, UINT16 flags) { BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; UINT32 new_len; BYTE* new_data; CACHE_BITMAP_V3_ORDER* cache_bitmap_v3; if (!update || !s) return NULL; cache_bitmap_v3 = calloc(1, sizeof(CACHE_BITMAP_V3_ORDER)); if (!cache_bitmap_v3) goto fail; cache_bitmap_v3->cacheId = flags & 0x00000003; cache_bitmap_v3->flags = (flags & 0x0000FF80) >> 7; bitsPerPixelId = (flags & 0x00000078) >> 3; cache_bitmap_v3->bpp = CBR23_BPP[bitsPerPixelId]; if (Stream_GetRemainingLength(s) < 21) goto fail; Stream_Read_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ bitmapData = &cache_bitmap_v3->bitmapData; Stream_Read_UINT8(s, bitmapData->bpp); if ((bitmapData->bpp < 1) || (bitmapData->bpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bpp value %" PRIu32 "", bitmapData->bpp); goto fail; } Stream_Seek_UINT8(s); /* reserved1 (1 byte) */ Stream_Seek_UINT8(s); /* reserved2 (1 byte) */ Stream_Read_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Read_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Read_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Read_UINT32(s, new_len); /* length (4 bytes) */ if ((new_len == 0) || (Stream_GetRemainingLength(s) < new_len)) goto fail; new_data = (BYTE*)realloc(bitmapData->data, new_len); if (!new_data) goto fail; bitmapData->data = new_data; bitmapData->length = new_len; Stream_Read(s, bitmapData->data, bitmapData->length); return cache_bitmap_v3; fail: free_cache_bitmap_v3_order(update->context, cache_bitmap_v3); return NULL; } int update_approximate_cache_bitmap_v3_order(CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BITMAP_DATA_EX* bitmapData = &cache_bitmap_v3->bitmapData; return 64 + bitmapData->length; } BOOL update_write_cache_bitmap_v3_order(wStream* s, CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v3_order(cache_bitmap_v3, flags))) return FALSE; bitmapData = &cache_bitmap_v3->bitmapData; bitsPerPixelId = BPP_CBR23[cache_bitmap_v3->bpp]; *flags = (cache_bitmap_v3->cacheId & 0x00000003) | ((cache_bitmap_v3->flags << 7) & 0x0000FF80) | ((bitsPerPixelId << 3) & 0x00000078); Stream_Write_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ Stream_Write_UINT8(s, bitmapData->bpp); Stream_Write_UINT8(s, 0); /* reserved1 (1 byte) */ Stream_Write_UINT8(s, 0); /* reserved2 (1 byte) */ Stream_Write_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Write_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Write_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Write_UINT32(s, bitmapData->length); /* length (4 bytes) */ Stream_Write(s, bitmapData->data, bitmapData->length); return TRUE; } static CACHE_COLOR_TABLE_ORDER* update_read_cache_color_table_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; UINT32* colorTable; CACHE_COLOR_TABLE_ORDER* cache_color_table = calloc(1, sizeof(CACHE_COLOR_TABLE_ORDER)); if (!cache_color_table) goto fail; if (Stream_GetRemainingLength(s) < 3) goto fail; Stream_Read_UINT8(s, cache_color_table->cacheIndex); /* cacheIndex (1 byte) */ Stream_Read_UINT16(s, cache_color_table->numberColors); /* numberColors (2 bytes) */ if (cache_color_table->numberColors != 256) { /* This field MUST be set to 256 */ goto fail; } if (Stream_GetRemainingLength(s) < cache_color_table->numberColors * 4) goto fail; colorTable = (UINT32*)&cache_color_table->colorTable; for (i = 0; i < (int)cache_color_table->numberColors; i++) update_read_color_quad(s, &colorTable[i]); return cache_color_table; fail: free_cache_color_table_order(update->context, cache_color_table); return NULL; } int update_approximate_cache_color_table_order(const CACHE_COLOR_TABLE_ORDER* cache_color_table, UINT16* flags) { return 16 + (256 * 4); } BOOL update_write_cache_color_table_order(wStream* s, const CACHE_COLOR_TABLE_ORDER* cache_color_table, UINT16* flags) { int i, inf; UINT32* colorTable; if (cache_color_table->numberColors != 256) return FALSE; inf = update_approximate_cache_color_table_order(cache_color_table, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT8(s, cache_color_table->cacheIndex); /* cacheIndex (1 byte) */ Stream_Write_UINT16(s, cache_color_table->numberColors); /* numberColors (2 bytes) */ colorTable = (UINT32*)&cache_color_table->colorTable; for (i = 0; i < (int)cache_color_table->numberColors; i++) { update_write_color_quad(s, colorTable[i]); } return TRUE; } static CACHE_GLYPH_ORDER* update_read_cache_glyph_order(rdpUpdate* update, wStream* s, UINT16 flags) { UINT32 i; CACHE_GLYPH_ORDER* cache_glyph_order = calloc(1, sizeof(CACHE_GLYPH_ORDER)); if (!cache_glyph_order || !update || !s) goto fail; if (Stream_GetRemainingLength(s) < 2) goto fail; Stream_Read_UINT8(s, cache_glyph_order->cacheId); /* cacheId (1 byte) */ Stream_Read_UINT8(s, cache_glyph_order->cGlyphs); /* cGlyphs (1 byte) */ for (i = 0; i < cache_glyph_order->cGlyphs; i++) { GLYPH_DATA* glyph = &cache_glyph_order->glyphData[i]; if (Stream_GetRemainingLength(s) < 10) goto fail; Stream_Read_UINT16(s, glyph->cacheIndex); Stream_Read_INT16(s, glyph->x); Stream_Read_INT16(s, glyph->y); Stream_Read_UINT16(s, glyph->cx); Stream_Read_UINT16(s, glyph->cy); glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; if (Stream_GetRemainingLength(s) < glyph->cb) goto fail; glyph->aj = (BYTE*)malloc(glyph->cb); if (!glyph->aj) goto fail; Stream_Read(s, glyph->aj, glyph->cb); } if ((flags & CG_GLYPH_UNICODE_PRESENT) && (cache_glyph_order->cGlyphs > 0)) { cache_glyph_order->unicodeCharacters = calloc(cache_glyph_order->cGlyphs, sizeof(WCHAR)); if (!cache_glyph_order->unicodeCharacters) goto fail; if (Stream_GetRemainingLength(s) < sizeof(WCHAR) * cache_glyph_order->cGlyphs) goto fail; Stream_Read_UTF16_String(s, cache_glyph_order->unicodeCharacters, cache_glyph_order->cGlyphs); } return cache_glyph_order; fail: free_cache_glyph_order(update->context, cache_glyph_order); return NULL; } int update_approximate_cache_glyph_order(const CACHE_GLYPH_ORDER* cache_glyph, UINT16* flags) { return 2 + cache_glyph->cGlyphs * 32; } BOOL update_write_cache_glyph_order(wStream* s, const CACHE_GLYPH_ORDER* cache_glyph, UINT16* flags) { int i, inf; INT16 lsi16; const GLYPH_DATA* glyph; inf = update_approximate_cache_glyph_order(cache_glyph, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT8(s, cache_glyph->cacheId); /* cacheId (1 byte) */ Stream_Write_UINT8(s, cache_glyph->cGlyphs); /* cGlyphs (1 byte) */ for (i = 0; i < (int)cache_glyph->cGlyphs; i++) { UINT32 cb; glyph = &cache_glyph->glyphData[i]; Stream_Write_UINT16(s, glyph->cacheIndex); /* cacheIndex (2 bytes) */ lsi16 = glyph->x; Stream_Write_UINT16(s, lsi16); /* x (2 bytes) */ lsi16 = glyph->y; Stream_Write_UINT16(s, lsi16); /* y (2 bytes) */ Stream_Write_UINT16(s, glyph->cx); /* cx (2 bytes) */ Stream_Write_UINT16(s, glyph->cy); /* cy (2 bytes) */ cb = ((glyph->cx + 7) / 8) * glyph->cy; cb += ((cb % 4) > 0) ? 4 - (cb % 4) : 0; Stream_Write(s, glyph->aj, cb); } if (*flags & CG_GLYPH_UNICODE_PRESENT) { Stream_Zero(s, cache_glyph->cGlyphs * 2); } return TRUE; } static CACHE_GLYPH_V2_ORDER* update_read_cache_glyph_v2_order(rdpUpdate* update, wStream* s, UINT16 flags) { UINT32 i; CACHE_GLYPH_V2_ORDER* cache_glyph_v2 = calloc(1, sizeof(CACHE_GLYPH_V2_ORDER)); if (!cache_glyph_v2) goto fail; cache_glyph_v2->cacheId = (flags & 0x000F); cache_glyph_v2->flags = (flags & 0x00F0) >> 4; cache_glyph_v2->cGlyphs = (flags & 0xFF00) >> 8; for (i = 0; i < cache_glyph_v2->cGlyphs; i++) { GLYPH_DATA_V2* glyph = &cache_glyph_v2->glyphData[i]; if (Stream_GetRemainingLength(s) < 1) goto fail; Stream_Read_UINT8(s, glyph->cacheIndex); if (!update_read_2byte_signed(s, &glyph->x) || !update_read_2byte_signed(s, &glyph->y) || !update_read_2byte_unsigned(s, &glyph->cx) || !update_read_2byte_unsigned(s, &glyph->cy)) { goto fail; } glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; if (Stream_GetRemainingLength(s) < glyph->cb) goto fail; glyph->aj = (BYTE*)malloc(glyph->cb); if (!glyph->aj) goto fail; Stream_Read(s, glyph->aj, glyph->cb); } if ((flags & CG_GLYPH_UNICODE_PRESENT) && (cache_glyph_v2->cGlyphs > 0)) { cache_glyph_v2->unicodeCharacters = calloc(cache_glyph_v2->cGlyphs, sizeof(WCHAR)); if (!cache_glyph_v2->unicodeCharacters) goto fail; if (Stream_GetRemainingLength(s) < sizeof(WCHAR) * cache_glyph_v2->cGlyphs) goto fail; Stream_Read_UTF16_String(s, cache_glyph_v2->unicodeCharacters, cache_glyph_v2->cGlyphs); } return cache_glyph_v2; fail: free_cache_glyph_v2_order(update->context, cache_glyph_v2); return NULL; } int update_approximate_cache_glyph_v2_order(const CACHE_GLYPH_V2_ORDER* cache_glyph_v2, UINT16* flags) { return 8 + cache_glyph_v2->cGlyphs * 32; } BOOL update_write_cache_glyph_v2_order(wStream* s, const CACHE_GLYPH_V2_ORDER* cache_glyph_v2, UINT16* flags) { UINT32 i, inf; inf = update_approximate_cache_glyph_v2_order(cache_glyph_v2, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; *flags = (cache_glyph_v2->cacheId & 0x000F) | ((cache_glyph_v2->flags & 0x000F) << 4) | ((cache_glyph_v2->cGlyphs & 0x00FF) << 8); for (i = 0; i < cache_glyph_v2->cGlyphs; i++) { UINT32 cb; const GLYPH_DATA_V2* glyph = &cache_glyph_v2->glyphData[i]; Stream_Write_UINT8(s, glyph->cacheIndex); if (!update_write_2byte_signed(s, glyph->x) || !update_write_2byte_signed(s, glyph->y) || !update_write_2byte_unsigned(s, glyph->cx) || !update_write_2byte_unsigned(s, glyph->cy)) { return FALSE; } cb = ((glyph->cx + 7) / 8) * glyph->cy; cb += ((cb % 4) > 0) ? 4 - (cb % 4) : 0; Stream_Write(s, glyph->aj, cb); } if (*flags & CG_GLYPH_UNICODE_PRESENT) { Stream_Zero(s, cache_glyph_v2->cGlyphs * 2); } return TRUE; } static BOOL update_decompress_brush(wStream* s, BYTE* output, size_t outSize, BYTE bpp) { INT32 x, y, k; BYTE byte = 0; const BYTE* palette = Stream_Pointer(s) + 16; const INT32 bytesPerPixel = ((bpp + 1) / 8); if (!Stream_SafeSeek(s, 16ULL + 7ULL * bytesPerPixel)) // 64 / 4 return FALSE; for (y = 7; y >= 0; y--) { for (x = 0; x < 8; x++) { UINT32 index; if ((x % 4) == 0) Stream_Read_UINT8(s, byte); index = ((byte >> ((3 - (x % 4)) * 2)) & 0x03); for (k = 0; k < bytesPerPixel; k++) { const size_t dstIndex = ((y * 8 + x) * bytesPerPixel) + k; const size_t srcIndex = (index * bytesPerPixel) + k; if (dstIndex >= outSize) return FALSE; output[dstIndex] = palette[srcIndex]; } } } return TRUE; } static BOOL update_compress_brush(wStream* s, const BYTE* input, BYTE bpp) { return FALSE; } static CACHE_BRUSH_ORDER* update_read_cache_brush_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; BYTE iBitmapFormat; BOOL compressed = FALSE; CACHE_BRUSH_ORDER* cache_brush = calloc(1, sizeof(CACHE_BRUSH_ORDER)); if (!cache_brush) goto fail; if (Stream_GetRemainingLength(s) < 6) goto fail; Stream_Read_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Read_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ if (iBitmapFormat >= ARRAYSIZE(BMF_BPP)) goto fail; cache_brush->bpp = BMF_BPP[iBitmapFormat]; Stream_Read_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Read_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Read_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Read_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_Print(update->log, WLOG_ERROR, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); goto fail; } /* rows are encoded in reverse order */ if (Stream_GetRemainingLength(s) < 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_decompress_brush(s, cache_brush->data, sizeof(cache_brush->data), cache_brush->bpp)) goto fail; } else { /* uncompressed brush */ UINT32 scanline = (cache_brush->bpp / 8) * 8; if (Stream_GetRemainingLength(s) < scanline * 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read(s, &cache_brush->data[i * scanline], scanline); } } } } return cache_brush; fail: free_cache_brush_order(update->context, cache_brush); return NULL; } int update_approximate_cache_brush_order(const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { return 64; } BOOL update_write_cache_brush_order(wStream* s, const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { int i; BYTE iBitmapFormat; BOOL compressed = FALSE; if (!Stream_EnsureRemainingCapacity(s, update_approximate_cache_brush_order(cache_brush, flags))) return FALSE; iBitmapFormat = BPP_BMF[cache_brush->bpp]; Stream_Write_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Write_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ Stream_Write_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Write_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Write_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Write_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_ERR(TAG, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); return FALSE; } for (i = 7; i >= 0; i--) { Stream_Write_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_compress_brush(s, cache_brush->data, cache_brush->bpp)) return FALSE; } else { /* uncompressed brush */ int scanline = (cache_brush->bpp / 8) * 8; for (i = 7; i >= 0; i--) { Stream_Write(s, &cache_brush->data[i * scanline], scanline); } } } } return TRUE; } /* Alternate Secondary Drawing Orders */ static BOOL update_read_create_offscreen_bitmap_order(wStream* s, CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { UINT16 flags; BOOL deleteListPresent; OFFSCREEN_DELETE_LIST* deleteList; if (Stream_GetRemainingLength(s) < 6) return FALSE; Stream_Read_UINT16(s, flags); /* flags (2 bytes) */ create_offscreen_bitmap->id = flags & 0x7FFF; deleteListPresent = (flags & 0x8000) ? TRUE : FALSE; Stream_Read_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */ Stream_Read_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */ deleteList = &(create_offscreen_bitmap->deleteList); if (deleteListPresent) { UINT32 i; if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, deleteList->cIndices); if (deleteList->cIndices > deleteList->sIndices) { UINT16* new_indices; new_indices = (UINT16*)realloc(deleteList->indices, deleteList->cIndices * 2); if (!new_indices) return FALSE; deleteList->sIndices = deleteList->cIndices; deleteList->indices = new_indices; } if (Stream_GetRemainingLength(s) < 2 * deleteList->cIndices) return FALSE; for (i = 0; i < deleteList->cIndices; i++) { Stream_Read_UINT16(s, deleteList->indices[i]); } } else { deleteList->cIndices = 0; } return TRUE; } int update_approximate_create_offscreen_bitmap_order( const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { const OFFSCREEN_DELETE_LIST* deleteList = &(create_offscreen_bitmap->deleteList); return 32 + deleteList->cIndices * 2; } BOOL update_write_create_offscreen_bitmap_order( wStream* s, const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { UINT16 flags; BOOL deleteListPresent; const OFFSCREEN_DELETE_LIST* deleteList; if (!Stream_EnsureRemainingCapacity( s, update_approximate_create_offscreen_bitmap_order(create_offscreen_bitmap))) return FALSE; deleteList = &(create_offscreen_bitmap->deleteList); flags = create_offscreen_bitmap->id & 0x7FFF; deleteListPresent = (deleteList->cIndices > 0) ? TRUE : FALSE; if (deleteListPresent) flags |= 0x8000; Stream_Write_UINT16(s, flags); /* flags (2 bytes) */ Stream_Write_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */ Stream_Write_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */ if (deleteListPresent) { int i; Stream_Write_UINT16(s, deleteList->cIndices); for (i = 0; i < (int)deleteList->cIndices; i++) { Stream_Write_UINT16(s, deleteList->indices[i]); } } return TRUE; } static BOOL update_read_switch_surface_order(wStream* s, SWITCH_SURFACE_ORDER* switch_surface) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, switch_surface->bitmapId); /* bitmapId (2 bytes) */ return TRUE; } int update_approximate_switch_surface_order(const SWITCH_SURFACE_ORDER* switch_surface) { return 2; } BOOL update_write_switch_surface_order(wStream* s, const SWITCH_SURFACE_ORDER* switch_surface) { int inf = update_approximate_switch_surface_order(switch_surface); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT16(s, switch_surface->bitmapId); /* bitmapId (2 bytes) */ return TRUE; } static BOOL update_read_create_nine_grid_bitmap_order(wStream* s, CREATE_NINE_GRID_BITMAP_ORDER* create_nine_grid_bitmap) { NINE_GRID_BITMAP_INFO* nineGridInfo; if (Stream_GetRemainingLength(s) < 19) return FALSE; Stream_Read_UINT8(s, create_nine_grid_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ if ((create_nine_grid_bitmap->bitmapBpp < 1) || (create_nine_grid_bitmap->bitmapBpp > 32)) { WLog_ERR(TAG, "invalid bpp value %" PRIu32 "", create_nine_grid_bitmap->bitmapBpp); return FALSE; } Stream_Read_UINT16(s, create_nine_grid_bitmap->bitmapId); /* bitmapId (2 bytes) */ nineGridInfo = &(create_nine_grid_bitmap->nineGridInfo); Stream_Read_UINT32(s, nineGridInfo->flFlags); /* flFlags (4 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulLeftWidth); /* ulLeftWidth (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulRightWidth); /* ulRightWidth (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulTopHeight); /* ulTopHeight (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulBottomHeight); /* ulBottomHeight (2 bytes) */ update_read_colorref(s, &nineGridInfo->crTransparent); /* crTransparent (4 bytes) */ return TRUE; } static BOOL update_read_frame_marker_order(wStream* s, FRAME_MARKER_ORDER* frame_marker) { if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT32(s, frame_marker->action); /* action (4 bytes) */ return TRUE; } static BOOL update_read_stream_bitmap_first_order(wStream* s, STREAM_BITMAP_FIRST_ORDER* stream_bitmap_first) { if (Stream_GetRemainingLength(s) < 10) // 8 + 2 at least return FALSE; Stream_Read_UINT8(s, stream_bitmap_first->bitmapFlags); /* bitmapFlags (1 byte) */ Stream_Read_UINT8(s, stream_bitmap_first->bitmapBpp); /* bitmapBpp (1 byte) */ if ((stream_bitmap_first->bitmapBpp < 1) || (stream_bitmap_first->bitmapBpp > 32)) { WLog_ERR(TAG, "invalid bpp value %" PRIu32 "", stream_bitmap_first->bitmapBpp); return FALSE; } Stream_Read_UINT16(s, stream_bitmap_first->bitmapType); /* bitmapType (2 bytes) */ Stream_Read_UINT16(s, stream_bitmap_first->bitmapWidth); /* bitmapWidth (2 bytes) */ Stream_Read_UINT16(s, stream_bitmap_first->bitmapHeight); /* bitmapHeigth (2 bytes) */ if (stream_bitmap_first->bitmapFlags & STREAM_BITMAP_V2) { if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT32(s, stream_bitmap_first->bitmapSize); /* bitmapSize (4 bytes) */ } else { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, stream_bitmap_first->bitmapSize); /* bitmapSize (2 bytes) */ } FIELD_SKIP_BUFFER16( s, stream_bitmap_first->bitmapBlockSize); /* bitmapBlockSize(2 bytes) + bitmapBlock */ return TRUE; } static BOOL update_read_stream_bitmap_next_order(wStream* s, STREAM_BITMAP_NEXT_ORDER* stream_bitmap_next) { if (Stream_GetRemainingLength(s) < 5) return FALSE; Stream_Read_UINT8(s, stream_bitmap_next->bitmapFlags); /* bitmapFlags (1 byte) */ Stream_Read_UINT16(s, stream_bitmap_next->bitmapType); /* bitmapType (2 bytes) */ FIELD_SKIP_BUFFER16( s, stream_bitmap_next->bitmapBlockSize); /* bitmapBlockSize(2 bytes) + bitmapBlock */ return TRUE; } static BOOL update_read_draw_gdiplus_first_order(wStream* s, DRAW_GDIPLUS_FIRST_ORDER* draw_gdiplus_first) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_first->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_first->cbTotalSize); /* cbTotalSize (4 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_first->cbTotalEmfSize); /* cbTotalEmfSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_first->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_next_order(wStream* s, DRAW_GDIPLUS_NEXT_ORDER* draw_gdiplus_next) { if (Stream_GetRemainingLength(s) < 3) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ FIELD_SKIP_BUFFER16(s, draw_gdiplus_next->cbSize); /* cbSize(2 bytes) + emfRecords */ return TRUE; } static BOOL update_read_draw_gdiplus_end_order(wStream* s, DRAW_GDIPLUS_END_ORDER* draw_gdiplus_end) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_end->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_end->cbTotalSize); /* cbTotalSize (4 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_end->cbTotalEmfSize); /* cbTotalEmfSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_end->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_cache_first_order(wStream* s, DRAW_GDIPLUS_CACHE_FIRST_ORDER* draw_gdiplus_cache_first) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_first->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_cache_first->cbTotalSize); /* cbTotalSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_cache_first->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_cache_next_order(wStream* s, DRAW_GDIPLUS_CACHE_NEXT_ORDER* draw_gdiplus_cache_next) { if (Stream_GetRemainingLength(s) < 7) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_next->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_next->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_next->cacheIndex); /* cacheIndex (2 bytes) */ FIELD_SKIP_BUFFER16(s, draw_gdiplus_cache_next->cbSize); /* cbSize(2 bytes) + emfRecords */ return TRUE; } static BOOL update_read_draw_gdiplus_cache_end_order(wStream* s, DRAW_GDIPLUS_CACHE_END_ORDER* draw_gdiplus_cache_end) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_end->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_cache_end->cbTotalSize); /* cbTotalSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_cache_end->cbSize); /* emfRecords */ } static BOOL update_read_field_flags(wStream* s, UINT32* fieldFlags, BYTE flags, BYTE fieldBytes) { int i; BYTE byte; if (flags & ORDER_ZERO_FIELD_BYTE_BIT0) fieldBytes--; if (flags & ORDER_ZERO_FIELD_BYTE_BIT1) { if (fieldBytes > 1) fieldBytes -= 2; else fieldBytes = 0; } if (Stream_GetRemainingLength(s) < fieldBytes) return FALSE; *fieldFlags = 0; for (i = 0; i < fieldBytes; i++) { Stream_Read_UINT8(s, byte); *fieldFlags |= byte << (i * 8); } return TRUE; } BOOL update_write_field_flags(wStream* s, UINT32 fieldFlags, BYTE flags, BYTE fieldBytes) { BYTE byte; if (fieldBytes == 1) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); } else if (fieldBytes == 2) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 8) & 0xFF; Stream_Write_UINT8(s, byte); } else if (fieldBytes == 3) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 16) & 0xFF; Stream_Write_UINT8(s, byte); } else { return FALSE; } return TRUE; } static BOOL update_read_bounds(wStream* s, rdpBounds* bounds) { BYTE flags; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, flags); /* field flags */ if (flags & BOUND_LEFT) { if (!update_read_coord(s, &bounds->left, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_LEFT) { if (!update_read_coord(s, &bounds->left, TRUE)) return FALSE; } if (flags & BOUND_TOP) { if (!update_read_coord(s, &bounds->top, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_TOP) { if (!update_read_coord(s, &bounds->top, TRUE)) return FALSE; } if (flags & BOUND_RIGHT) { if (!update_read_coord(s, &bounds->right, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_RIGHT) { if (!update_read_coord(s, &bounds->right, TRUE)) return FALSE; } if (flags & BOUND_BOTTOM) { if (!update_read_coord(s, &bounds->bottom, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_BOTTOM) { if (!update_read_coord(s, &bounds->bottom, TRUE)) return FALSE; } return TRUE; } BOOL update_write_bounds(wStream* s, ORDER_INFO* orderInfo) { if (!(orderInfo->controlFlags & ORDER_BOUNDS)) return TRUE; if (orderInfo->controlFlags & ORDER_ZERO_BOUNDS_DELTAS) return TRUE; Stream_Write_UINT8(s, orderInfo->boundsFlags); /* field flags */ if (orderInfo->boundsFlags & BOUND_LEFT) { if (!update_write_coord(s, orderInfo->bounds.left)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_LEFT) { } if (orderInfo->boundsFlags & BOUND_TOP) { if (!update_write_coord(s, orderInfo->bounds.top)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_TOP) { } if (orderInfo->boundsFlags & BOUND_RIGHT) { if (!update_write_coord(s, orderInfo->bounds.right)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_RIGHT) { } if (orderInfo->boundsFlags & BOUND_BOTTOM) { if (!update_write_coord(s, orderInfo->bounds.bottom)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_BOTTOM) { } return TRUE; } static BOOL read_primary_order(wLog* log, const char* orderName, wStream* s, const ORDER_INFO* orderInfo, rdpPrimaryUpdate* primary) { BOOL rc = FALSE; if (!s || !orderInfo || !primary || !orderName) return FALSE; switch (orderInfo->orderType) { case ORDER_TYPE_DSTBLT: rc = update_read_dstblt_order(s, orderInfo, &(primary->dstblt)); break; case ORDER_TYPE_PATBLT: rc = update_read_patblt_order(s, orderInfo, &(primary->patblt)); break; case ORDER_TYPE_SCRBLT: rc = update_read_scrblt_order(s, orderInfo, &(primary->scrblt)); break; case ORDER_TYPE_OPAQUE_RECT: rc = update_read_opaque_rect_order(s, orderInfo, &(primary->opaque_rect)); break; case ORDER_TYPE_DRAW_NINE_GRID: rc = update_read_draw_nine_grid_order(s, orderInfo, &(primary->draw_nine_grid)); break; case ORDER_TYPE_MULTI_DSTBLT: rc = update_read_multi_dstblt_order(s, orderInfo, &(primary->multi_dstblt)); break; case ORDER_TYPE_MULTI_PATBLT: rc = update_read_multi_patblt_order(s, orderInfo, &(primary->multi_patblt)); break; case ORDER_TYPE_MULTI_SCRBLT: rc = update_read_multi_scrblt_order(s, orderInfo, &(primary->multi_scrblt)); break; case ORDER_TYPE_MULTI_OPAQUE_RECT: rc = update_read_multi_opaque_rect_order(s, orderInfo, &(primary->multi_opaque_rect)); break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: rc = update_read_multi_draw_nine_grid_order(s, orderInfo, &(primary->multi_draw_nine_grid)); break; case ORDER_TYPE_LINE_TO: rc = update_read_line_to_order(s, orderInfo, &(primary->line_to)); break; case ORDER_TYPE_POLYLINE: rc = update_read_polyline_order(s, orderInfo, &(primary->polyline)); break; case ORDER_TYPE_MEMBLT: rc = update_read_memblt_order(s, orderInfo, &(primary->memblt)); break; case ORDER_TYPE_MEM3BLT: rc = update_read_mem3blt_order(s, orderInfo, &(primary->mem3blt)); break; case ORDER_TYPE_SAVE_BITMAP: rc = update_read_save_bitmap_order(s, orderInfo, &(primary->save_bitmap)); break; case ORDER_TYPE_GLYPH_INDEX: rc = update_read_glyph_index_order(s, orderInfo, &(primary->glyph_index)); break; case ORDER_TYPE_FAST_INDEX: rc = update_read_fast_index_order(s, orderInfo, &(primary->fast_index)); break; case ORDER_TYPE_FAST_GLYPH: rc = update_read_fast_glyph_order(s, orderInfo, &(primary->fast_glyph)); break; case ORDER_TYPE_POLYGON_SC: rc = update_read_polygon_sc_order(s, orderInfo, &(primary->polygon_sc)); break; case ORDER_TYPE_POLYGON_CB: rc = update_read_polygon_cb_order(s, orderInfo, &(primary->polygon_cb)); break; case ORDER_TYPE_ELLIPSE_SC: rc = update_read_ellipse_sc_order(s, orderInfo, &(primary->ellipse_sc)); break; case ORDER_TYPE_ELLIPSE_CB: rc = update_read_ellipse_cb_order(s, orderInfo, &(primary->ellipse_cb)); break; default: WLog_Print(log, WLOG_WARN, "Primary Drawing Order %s not supported, ignoring", orderName); rc = TRUE; break; } if (!rc) { WLog_Print(log, WLOG_ERROR, "%s - update_read_dstblt_order() failed", orderName); return FALSE; } return TRUE; } static BOOL update_recv_primary_order(rdpUpdate* update, wStream* s, BYTE flags) { BYTE field; BOOL rc = FALSE; rdpContext* context = update->context; rdpPrimaryUpdate* primary = update->primary; ORDER_INFO* orderInfo = &(primary->order_info); rdpSettings* settings = context->settings; const char* orderName; if (flags & ORDER_TYPE_CHANGE) { if (Stream_GetRemainingLength(s) < 1) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, orderInfo->orderType); /* orderType (1 byte) */ } orderName = primary_order_string(orderInfo->orderType); if (!check_primary_order_supported(update->log, settings, orderInfo->orderType, orderName)) return FALSE; field = get_primary_drawing_order_field_bytes(orderInfo->orderType, &rc); if (!rc) return FALSE; if (!update_read_field_flags(s, &(orderInfo->fieldFlags), flags, field)) { WLog_Print(update->log, WLOG_ERROR, "update_read_field_flags() failed"); return FALSE; } if (flags & ORDER_BOUNDS) { if (!(flags & ORDER_ZERO_BOUNDS_DELTAS)) { if (!update_read_bounds(s, &orderInfo->bounds)) { WLog_Print(update->log, WLOG_ERROR, "update_read_bounds() failed"); return FALSE; } } rc = IFCALLRESULT(FALSE, update->SetBounds, context, &orderInfo->bounds); if (!rc) return FALSE; } orderInfo->deltaCoordinates = (flags & ORDER_DELTA_COORDINATES) ? TRUE : FALSE; if (!read_primary_order(update->log, orderName, s, orderInfo, primary)) return FALSE; switch (orderInfo->orderType) { case ORDER_TYPE_DSTBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->dstblt.bRop), gdi_rop3_code(primary->dstblt.bRop)); rc = IFCALLRESULT(FALSE, primary->DstBlt, context, &primary->dstblt); } break; case ORDER_TYPE_PATBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->patblt.bRop), gdi_rop3_code(primary->patblt.bRop)); rc = IFCALLRESULT(FALSE, primary->PatBlt, context, &primary->patblt); } break; case ORDER_TYPE_SCRBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->scrblt.bRop), gdi_rop3_code(primary->scrblt.bRop)); rc = IFCALLRESULT(FALSE, primary->ScrBlt, context, &primary->scrblt); } break; case ORDER_TYPE_OPAQUE_RECT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->OpaqueRect, context, &primary->opaque_rect); } break; case ORDER_TYPE_DRAW_NINE_GRID: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->DrawNineGrid, context, &primary->draw_nine_grid); } break; case ORDER_TYPE_MULTI_DSTBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_dstblt.bRop), gdi_rop3_code(primary->multi_dstblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiDstBlt, context, &primary->multi_dstblt); } break; case ORDER_TYPE_MULTI_PATBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_patblt.bRop), gdi_rop3_code(primary->multi_patblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiPatBlt, context, &primary->multi_patblt); } break; case ORDER_TYPE_MULTI_SCRBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_scrblt.bRop), gdi_rop3_code(primary->multi_scrblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiScrBlt, context, &primary->multi_scrblt); } break; case ORDER_TYPE_MULTI_OPAQUE_RECT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->MultiOpaqueRect, context, &primary->multi_opaque_rect); } break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->MultiDrawNineGrid, context, &primary->multi_draw_nine_grid); } break; case ORDER_TYPE_LINE_TO: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->LineTo, context, &primary->line_to); } break; case ORDER_TYPE_POLYLINE: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->Polyline, context, &primary->polyline); } break; case ORDER_TYPE_MEMBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->memblt.bRop), gdi_rop3_code(primary->memblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MemBlt, context, &primary->memblt); } break; case ORDER_TYPE_MEM3BLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->mem3blt.bRop), gdi_rop3_code(primary->mem3blt.bRop)); rc = IFCALLRESULT(FALSE, primary->Mem3Blt, context, &primary->mem3blt); } break; case ORDER_TYPE_SAVE_BITMAP: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->SaveBitmap, context, &primary->save_bitmap); } break; case ORDER_TYPE_GLYPH_INDEX: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->GlyphIndex, context, &primary->glyph_index); } break; case ORDER_TYPE_FAST_INDEX: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->FastIndex, context, &primary->fast_index); } break; case ORDER_TYPE_FAST_GLYPH: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->FastGlyph, context, &primary->fast_glyph); } break; case ORDER_TYPE_POLYGON_SC: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->PolygonSC, context, &primary->polygon_sc); } break; case ORDER_TYPE_POLYGON_CB: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->PolygonCB, context, &primary->polygon_cb); } break; case ORDER_TYPE_ELLIPSE_SC: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->EllipseSC, context, &primary->ellipse_sc); } break; case ORDER_TYPE_ELLIPSE_CB: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->EllipseCB, context, &primary->ellipse_cb); } break; default: WLog_Print(update->log, WLOG_WARN, "Primary Drawing Order %s not supported", orderName); break; } if (!rc) { WLog_Print(update->log, WLOG_WARN, "Primary Drawing Order %s failed", orderName); return FALSE; } if (flags & ORDER_BOUNDS) { rc = IFCALLRESULT(FALSE, update->SetBounds, context, NULL); } return rc; } static BOOL update_recv_secondary_order(rdpUpdate* update, wStream* s, BYTE flags) { BOOL rc = FALSE; size_t start, end, diff; BYTE orderType; UINT16 extraFlags; UINT16 orderLength; rdpContext* context = update->context; rdpSettings* settings = context->settings; rdpSecondaryUpdate* secondary = update->secondary; const char* name; if (Stream_GetRemainingLength(s) < 5) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 5"); return FALSE; } Stream_Read_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Read_UINT16(s, extraFlags); /* extraFlags (2 bytes) */ Stream_Read_UINT8(s, orderType); /* orderType (1 byte) */ if (Stream_GetRemainingLength(s) < orderLength + 7U) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) %" PRIuz " < %" PRIu16, Stream_GetRemainingLength(s), orderLength + 7); return FALSE; } start = Stream_GetPosition(s); name = secondary_order_string(orderType); WLog_Print(update->log, WLOG_DEBUG, "Secondary Drawing Order %s", name); if (!check_secondary_order_supported(update->log, settings, orderType, name)) return FALSE; switch (orderType) { case ORDER_TYPE_BITMAP_UNCOMPRESSED: case ORDER_TYPE_CACHE_BITMAP_COMPRESSED: { const BOOL compressed = (orderType == ORDER_TYPE_CACHE_BITMAP_COMPRESSED); CACHE_BITMAP_ORDER* order = update_read_cache_bitmap_order(update, s, compressed, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmap, context, order); free_cache_bitmap_order(context, order); } } break; case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2: case ORDER_TYPE_BITMAP_COMPRESSED_V2: { const BOOL compressed = (orderType == ORDER_TYPE_BITMAP_COMPRESSED_V2); CACHE_BITMAP_V2_ORDER* order = update_read_cache_bitmap_v2_order(update, s, compressed, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV2, context, order); free_cache_bitmap_v2_order(context, order); } } break; case ORDER_TYPE_BITMAP_COMPRESSED_V3: { CACHE_BITMAP_V3_ORDER* order = update_read_cache_bitmap_v3_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV3, context, order); free_cache_bitmap_v3_order(context, order); } } break; case ORDER_TYPE_CACHE_COLOR_TABLE: { CACHE_COLOR_TABLE_ORDER* order = update_read_cache_color_table_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheColorTable, context, order); free_cache_color_table_order(context, order); } } break; case ORDER_TYPE_CACHE_GLYPH: { switch (settings->GlyphSupportLevel) { case GLYPH_SUPPORT_PARTIAL: case GLYPH_SUPPORT_FULL: { CACHE_GLYPH_ORDER* order = update_read_cache_glyph_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheGlyph, context, order); free_cache_glyph_order(context, order); } } break; case GLYPH_SUPPORT_ENCODE: { CACHE_GLYPH_V2_ORDER* order = update_read_cache_glyph_v2_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheGlyphV2, context, order); free_cache_glyph_v2_order(context, order); } } break; case GLYPH_SUPPORT_NONE: default: break; } } break; case ORDER_TYPE_CACHE_BRUSH: /* [MS-RDPEGDI] 2.2.2.2.1.2.7 Cache Brush (CACHE_BRUSH_ORDER) */ { CACHE_BRUSH_ORDER* order = update_read_cache_brush_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBrush, context, order); free_cache_brush_order(context, order); } } break; default: WLog_Print(update->log, WLOG_WARN, "SECONDARY ORDER %s not supported", name); break; } if (!rc) { WLog_Print(update->log, WLOG_ERROR, "SECONDARY ORDER %s failed", name); } start += orderLength + 7; end = Stream_GetPosition(s); if (start > end) { WLog_Print(update->log, WLOG_WARN, "SECONDARY_ORDER %s: read %" PRIuz "bytes too much", name, end - start); return FALSE; } diff = start - end; if (diff > 0) { WLog_Print(update->log, WLOG_DEBUG, "SECONDARY_ORDER %s: read %" PRIuz "bytes short, skipping", name, diff); Stream_Seek(s, diff); } return rc; } static BOOL read_altsec_order(wStream* s, BYTE orderType, rdpAltSecUpdate* altsec) { BOOL rc = FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: rc = update_read_create_offscreen_bitmap_order(s, &(altsec->create_offscreen_bitmap)); break; case ORDER_TYPE_SWITCH_SURFACE: rc = update_read_switch_surface_order(s, &(altsec->switch_surface)); break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: rc = update_read_create_nine_grid_bitmap_order(s, &(altsec->create_nine_grid_bitmap)); break; case ORDER_TYPE_FRAME_MARKER: rc = update_read_frame_marker_order(s, &(altsec->frame_marker)); break; case ORDER_TYPE_STREAM_BITMAP_FIRST: rc = update_read_stream_bitmap_first_order(s, &(altsec->stream_bitmap_first)); break; case ORDER_TYPE_STREAM_BITMAP_NEXT: rc = update_read_stream_bitmap_next_order(s, &(altsec->stream_bitmap_next)); break; case ORDER_TYPE_GDIPLUS_FIRST: rc = update_read_draw_gdiplus_first_order(s, &(altsec->draw_gdiplus_first)); break; case ORDER_TYPE_GDIPLUS_NEXT: rc = update_read_draw_gdiplus_next_order(s, &(altsec->draw_gdiplus_next)); break; case ORDER_TYPE_GDIPLUS_END: rc = update_read_draw_gdiplus_end_order(s, &(altsec->draw_gdiplus_end)); break; case ORDER_TYPE_GDIPLUS_CACHE_FIRST: rc = update_read_draw_gdiplus_cache_first_order(s, &(altsec->draw_gdiplus_cache_first)); break; case ORDER_TYPE_GDIPLUS_CACHE_NEXT: rc = update_read_draw_gdiplus_cache_next_order(s, &(altsec->draw_gdiplus_cache_next)); break; case ORDER_TYPE_GDIPLUS_CACHE_END: rc = update_read_draw_gdiplus_cache_end_order(s, &(altsec->draw_gdiplus_cache_end)); break; case ORDER_TYPE_WINDOW: /* This order is handled elsewhere. */ rc = TRUE; break; case ORDER_TYPE_COMPDESK_FIRST: rc = TRUE; break; default: break; } return rc; } static BOOL update_recv_altsec_order(rdpUpdate* update, wStream* s, BYTE flags) { BYTE orderType = flags >>= 2; /* orderType is in higher 6 bits of flags field */ BOOL rc = FALSE; rdpContext* context = update->context; rdpSettings* settings = context->settings; rdpAltSecUpdate* altsec = update->altsec; const char* orderName = altsec_order_string(orderType); WLog_Print(update->log, WLOG_DEBUG, "Alternate Secondary Drawing Order %s", orderName); if (!check_alt_order_supported(update->log, settings, orderType, orderName)) return FALSE; if (!read_altsec_order(s, orderType, altsec)) return FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: IFCALLRET(altsec->CreateOffscreenBitmap, rc, context, &(altsec->create_offscreen_bitmap)); break; case ORDER_TYPE_SWITCH_SURFACE: IFCALLRET(altsec->SwitchSurface, rc, context, &(altsec->switch_surface)); break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: IFCALLRET(altsec->CreateNineGridBitmap, rc, context, &(altsec->create_nine_grid_bitmap)); break; case ORDER_TYPE_FRAME_MARKER: IFCALLRET(altsec->FrameMarker, rc, context, &(altsec->frame_marker)); break; case ORDER_TYPE_STREAM_BITMAP_FIRST: IFCALLRET(altsec->StreamBitmapFirst, rc, context, &(altsec->stream_bitmap_first)); break; case ORDER_TYPE_STREAM_BITMAP_NEXT: IFCALLRET(altsec->StreamBitmapNext, rc, context, &(altsec->stream_bitmap_next)); break; case ORDER_TYPE_GDIPLUS_FIRST: IFCALLRET(altsec->DrawGdiPlusFirst, rc, context, &(altsec->draw_gdiplus_first)); break; case ORDER_TYPE_GDIPLUS_NEXT: IFCALLRET(altsec->DrawGdiPlusNext, rc, context, &(altsec->draw_gdiplus_next)); break; case ORDER_TYPE_GDIPLUS_END: IFCALLRET(altsec->DrawGdiPlusEnd, rc, context, &(altsec->draw_gdiplus_end)); break; case ORDER_TYPE_GDIPLUS_CACHE_FIRST: IFCALLRET(altsec->DrawGdiPlusCacheFirst, rc, context, &(altsec->draw_gdiplus_cache_first)); break; case ORDER_TYPE_GDIPLUS_CACHE_NEXT: IFCALLRET(altsec->DrawGdiPlusCacheNext, rc, context, &(altsec->draw_gdiplus_cache_next)); break; case ORDER_TYPE_GDIPLUS_CACHE_END: IFCALLRET(altsec->DrawGdiPlusCacheEnd, rc, context, &(altsec->draw_gdiplus_cache_end)); break; case ORDER_TYPE_WINDOW: rc = update_recv_altsec_window_order(update, s); break; case ORDER_TYPE_COMPDESK_FIRST: rc = TRUE; break; default: break; } if (!rc) { WLog_Print(update->log, WLOG_WARN, "Alternate Secondary Drawing Order %s failed", orderName); } return rc; } BOOL update_recv_order(rdpUpdate* update, wStream* s) { BOOL rc; BYTE controlFlags; if (Stream_GetRemainingLength(s) < 1) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, controlFlags); /* controlFlags (1 byte) */ if (!(controlFlags & ORDER_STANDARD)) rc = update_recv_altsec_order(update, s, controlFlags); else if (controlFlags & ORDER_SECONDARY) rc = update_recv_secondary_order(update, s, controlFlags); else rc = update_recv_primary_order(update, s, controlFlags); if (!rc) WLog_Print(update->log, WLOG_ERROR, "order flags %02" PRIx8 " failed", controlFlags); return rc; }
/** * FreeRDP: A Remote Desktop Protocol Implementation * Drawing Orders * * Copyright 2011 Marc-Andre Moreau <marcandre.moreau@gmail.com> * Copyright 2016 Armin Novak <armin.novak@thincast.com> * Copyright 2016 Thincast Technologies GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "window.h" #include <winpr/wtypes.h> #include <winpr/crt.h> #include <freerdp/api.h> #include <freerdp/log.h> #include <freerdp/graphics.h> #include <freerdp/codec/bitmap.h> #include <freerdp/gdi/gdi.h> #include "orders.h" #include "../cache/glyph.h" #include "../cache/bitmap.h" #include "../cache/brush.h" #include "../cache/cache.h" #define TAG FREERDP_TAG("core.orders") BYTE get_primary_drawing_order_field_bytes(UINT32 orderType, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (orderType) { case 0: return DSTBLT_ORDER_FIELD_BYTES; case 1: return PATBLT_ORDER_FIELD_BYTES; case 2: return SCRBLT_ORDER_FIELD_BYTES; case 3: return 0; case 4: return 0; case 5: return 0; case 6: return 0; case 7: return DRAW_NINE_GRID_ORDER_FIELD_BYTES; case 8: return MULTI_DRAW_NINE_GRID_ORDER_FIELD_BYTES; case 9: return LINE_TO_ORDER_FIELD_BYTES; case 10: return OPAQUE_RECT_ORDER_FIELD_BYTES; case 11: return SAVE_BITMAP_ORDER_FIELD_BYTES; case 12: return 0; case 13: return MEMBLT_ORDER_FIELD_BYTES; case 14: return MEM3BLT_ORDER_FIELD_BYTES; case 15: return MULTI_DSTBLT_ORDER_FIELD_BYTES; case 16: return MULTI_PATBLT_ORDER_FIELD_BYTES; case 17: return MULTI_SCRBLT_ORDER_FIELD_BYTES; case 18: return MULTI_OPAQUE_RECT_ORDER_FIELD_BYTES; case 19: return FAST_INDEX_ORDER_FIELD_BYTES; case 20: return POLYGON_SC_ORDER_FIELD_BYTES; case 21: return POLYGON_CB_ORDER_FIELD_BYTES; case 22: return POLYLINE_ORDER_FIELD_BYTES; case 23: return 0; case 24: return FAST_GLYPH_ORDER_FIELD_BYTES; case 25: return ELLIPSE_SC_ORDER_FIELD_BYTES; case 26: return ELLIPSE_CB_ORDER_FIELD_BYTES; case 27: return GLYPH_INDEX_ORDER_FIELD_BYTES; default: if (pValid) *pValid = FALSE; WLog_WARN(TAG, "Invalid orderType 0x%08X received", orderType); return 0; } } static BYTE get_cbr2_bpp(UINT32 bpp, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (bpp) { case 3: return 8; case 4: return 16; case 5: return 24; case 6: return 32; default: WLog_WARN(TAG, "Invalid bpp %" PRIu32, bpp); if (pValid) *pValid = FALSE; return 0; } } static BYTE get_bmf_bpp(UINT32 bmf, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (bmf) { case 1: return 1; case 3: return 8; case 4: return 16; case 5: return 24; case 6: return 32; default: WLog_WARN(TAG, "Invalid bmf %" PRIu32, bmf); if (pValid) *pValid = FALSE; return 0; } } static BYTE get_bpp_bmf(UINT32 bpp, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (bpp) { case 1: return 1; case 8: return 3; case 16: return 4; case 24: return 5; case 32: return 6; default: WLog_WARN(TAG, "Invalid color depth %" PRIu32, bpp); if (pValid) *pValid = FALSE; return 0; } } static BOOL check_order_activated(wLog* log, rdpSettings* settings, const char* orderName, BOOL condition) { if (!condition) { if (settings->AllowUnanouncedOrdersFromServer) { WLog_Print(log, WLOG_WARN, "%s - SERVER BUG: The support for this feature was not announced!", orderName); return TRUE; } else { WLog_Print(log, WLOG_ERROR, "%s - SERVER BUG: The support for this feature was not announced! Use " "/relax-order-checks to ignore", orderName); return FALSE; } } return TRUE; } static BOOL check_alt_order_supported(wLog* log, rdpSettings* settings, BYTE orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: case ORDER_TYPE_SWITCH_SURFACE: condition = settings->OffscreenSupportLevel != 0; break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: condition = settings->DrawNineGridEnabled; break; case ORDER_TYPE_FRAME_MARKER: condition = settings->FrameMarkerCommandEnabled; break; case ORDER_TYPE_GDIPLUS_FIRST: case ORDER_TYPE_GDIPLUS_NEXT: case ORDER_TYPE_GDIPLUS_END: case ORDER_TYPE_GDIPLUS_CACHE_FIRST: case ORDER_TYPE_GDIPLUS_CACHE_NEXT: case ORDER_TYPE_GDIPLUS_CACHE_END: condition = settings->DrawGdiPlusCacheEnabled; break; case ORDER_TYPE_WINDOW: condition = settings->RemoteWndSupportLevel != WINDOW_LEVEL_NOT_SUPPORTED; break; case ORDER_TYPE_STREAM_BITMAP_FIRST: case ORDER_TYPE_STREAM_BITMAP_NEXT: case ORDER_TYPE_COMPDESK_FIRST: condition = TRUE; break; default: WLog_Print(log, WLOG_WARN, "%s - Alternate Secondary Drawing Order UNKNOWN", orderName); condition = FALSE; break; } return check_order_activated(log, settings, orderName, condition); } static BOOL check_secondary_order_supported(wLog* log, rdpSettings* settings, BYTE orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_BITMAP_UNCOMPRESSED: case ORDER_TYPE_CACHE_BITMAP_COMPRESSED: condition = settings->BitmapCacheEnabled; break; case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2: case ORDER_TYPE_BITMAP_COMPRESSED_V2: condition = settings->BitmapCacheEnabled; break; case ORDER_TYPE_BITMAP_COMPRESSED_V3: condition = settings->BitmapCacheV3Enabled; break; case ORDER_TYPE_CACHE_COLOR_TABLE: condition = (settings->OrderSupport[NEG_MEMBLT_INDEX] || settings->OrderSupport[NEG_MEM3BLT_INDEX]); break; case ORDER_TYPE_CACHE_GLYPH: { switch (settings->GlyphSupportLevel) { case GLYPH_SUPPORT_PARTIAL: case GLYPH_SUPPORT_FULL: case GLYPH_SUPPORT_ENCODE: condition = TRUE; break; case GLYPH_SUPPORT_NONE: default: condition = FALSE; break; } } break; case ORDER_TYPE_CACHE_BRUSH: condition = TRUE; break; default: WLog_Print(log, WLOG_WARN, "SECONDARY ORDER %s not supported", orderName); break; } return check_order_activated(log, settings, orderName, condition); } static BOOL check_primary_order_supported(wLog* log, rdpSettings* settings, UINT32 orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_DSTBLT: condition = settings->OrderSupport[NEG_DSTBLT_INDEX]; break; case ORDER_TYPE_SCRBLT: condition = settings->OrderSupport[NEG_SCRBLT_INDEX]; break; case ORDER_TYPE_DRAW_NINE_GRID: condition = settings->OrderSupport[NEG_DRAWNINEGRID_INDEX]; break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: condition = settings->OrderSupport[NEG_MULTI_DRAWNINEGRID_INDEX]; break; case ORDER_TYPE_LINE_TO: condition = settings->OrderSupport[NEG_LINETO_INDEX]; break; /* [MS-RDPEGDI] 2.2.2.2.1.1.2.5 OpaqueRect (OPAQUERECT_ORDER) * suggests that PatBlt and OpaqueRect imply each other. */ case ORDER_TYPE_PATBLT: case ORDER_TYPE_OPAQUE_RECT: condition = settings->OrderSupport[NEG_OPAQUE_RECT_INDEX] || settings->OrderSupport[NEG_PATBLT_INDEX]; break; case ORDER_TYPE_SAVE_BITMAP: condition = settings->OrderSupport[NEG_SAVEBITMAP_INDEX]; break; case ORDER_TYPE_MEMBLT: condition = settings->OrderSupport[NEG_MEMBLT_INDEX]; break; case ORDER_TYPE_MEM3BLT: condition = settings->OrderSupport[NEG_MEM3BLT_INDEX]; break; case ORDER_TYPE_MULTI_DSTBLT: condition = settings->OrderSupport[NEG_MULTIDSTBLT_INDEX]; break; case ORDER_TYPE_MULTI_PATBLT: condition = settings->OrderSupport[NEG_MULTIPATBLT_INDEX]; break; case ORDER_TYPE_MULTI_SCRBLT: condition = settings->OrderSupport[NEG_MULTIDSTBLT_INDEX]; break; case ORDER_TYPE_MULTI_OPAQUE_RECT: condition = settings->OrderSupport[NEG_MULTIOPAQUERECT_INDEX]; break; case ORDER_TYPE_FAST_INDEX: condition = settings->OrderSupport[NEG_FAST_INDEX_INDEX]; break; case ORDER_TYPE_POLYGON_SC: condition = settings->OrderSupport[NEG_POLYGON_SC_INDEX]; break; case ORDER_TYPE_POLYGON_CB: condition = settings->OrderSupport[NEG_POLYGON_CB_INDEX]; break; case ORDER_TYPE_POLYLINE: condition = settings->OrderSupport[NEG_POLYLINE_INDEX]; break; case ORDER_TYPE_FAST_GLYPH: condition = settings->OrderSupport[NEG_FAST_GLYPH_INDEX]; break; case ORDER_TYPE_ELLIPSE_SC: condition = settings->OrderSupport[NEG_ELLIPSE_SC_INDEX]; break; case ORDER_TYPE_ELLIPSE_CB: condition = settings->OrderSupport[NEG_ELLIPSE_CB_INDEX]; break; case ORDER_TYPE_GLYPH_INDEX: condition = settings->OrderSupport[NEG_GLYPH_INDEX_INDEX]; break; default: WLog_Print(log, WLOG_WARN, "%s Primary Drawing Order not supported", orderName); break; } return check_order_activated(log, settings, orderName, condition); } static const char* primary_order_string(UINT32 orderType) { const char* orders[] = { "[0x%02" PRIx8 "] DstBlt", "[0x%02" PRIx8 "] PatBlt", "[0x%02" PRIx8 "] ScrBlt", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] DrawNineGrid", "[0x%02" PRIx8 "] MultiDrawNineGrid", "[0x%02" PRIx8 "] LineTo", "[0x%02" PRIx8 "] OpaqueRect", "[0x%02" PRIx8 "] SaveBitmap", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] MemBlt", "[0x%02" PRIx8 "] Mem3Blt", "[0x%02" PRIx8 "] MultiDstBlt", "[0x%02" PRIx8 "] MultiPatBlt", "[0x%02" PRIx8 "] MultiScrBlt", "[0x%02" PRIx8 "] MultiOpaqueRect", "[0x%02" PRIx8 "] FastIndex", "[0x%02" PRIx8 "] PolygonSC", "[0x%02" PRIx8 "] PolygonCB", "[0x%02" PRIx8 "] Polyline", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] FastGlyph", "[0x%02" PRIx8 "] EllipseSC", "[0x%02" PRIx8 "] EllipseCB", "[0x%02" PRIx8 "] GlyphIndex" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static const char* secondary_order_string(UINT32 orderType) { const char* orders[] = { "[0x%02" PRIx8 "] Cache Bitmap", "[0x%02" PRIx8 "] Cache Color Table", "[0x%02" PRIx8 "] Cache Bitmap (Compressed)", "[0x%02" PRIx8 "] Cache Glyph", "[0x%02" PRIx8 "] Cache Bitmap V2", "[0x%02" PRIx8 "] Cache Bitmap V2 (Compressed)", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] Cache Brush", "[0x%02" PRIx8 "] Cache Bitmap V3" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static const char* altsec_order_string(BYTE orderType) { const char* orders[] = { "[0x%02" PRIx8 "] Switch Surface", "[0x%02" PRIx8 "] Create Offscreen Bitmap", "[0x%02" PRIx8 "] Stream Bitmap First", "[0x%02" PRIx8 "] Stream Bitmap Next", "[0x%02" PRIx8 "] Create NineGrid Bitmap", "[0x%02" PRIx8 "] Draw GDI+ First", "[0x%02" PRIx8 "] Draw GDI+ Next", "[0x%02" PRIx8 "] Draw GDI+ End", "[0x%02" PRIx8 "] Draw GDI+ Cache First", "[0x%02" PRIx8 "] Draw GDI+ Cache Next", "[0x%02" PRIx8 "] Draw GDI+ Cache End", "[0x%02" PRIx8 "] Windowing", "[0x%02" PRIx8 "] Desktop Composition", "[0x%02" PRIx8 "] Frame Marker" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static INLINE BOOL update_read_coord(wStream* s, INT32* coord, BOOL delta) { INT8 lsi8; INT16 lsi16; if (delta) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_INT8(s, lsi8); *coord += lsi8; } else { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_INT16(s, lsi16); *coord = lsi16; } return TRUE; } static INLINE BOOL update_write_coord(wStream* s, INT32 coord) { Stream_Write_UINT16(s, coord); return TRUE; } static INLINE BOOL update_read_color(wStream* s, UINT32* color) { BYTE byte; if (Stream_GetRemainingLength(s) < 3) return FALSE; *color = 0; Stream_Read_UINT8(s, byte); *color = (UINT32)byte; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 8) & 0xFF00; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 16) & 0xFF0000; return TRUE; } static INLINE BOOL update_write_color(wStream* s, UINT32 color) { BYTE byte; byte = (color & 0xFF); Stream_Write_UINT8(s, byte); byte = ((color >> 8) & 0xFF); Stream_Write_UINT8(s, byte); byte = ((color >> 16) & 0xFF); Stream_Write_UINT8(s, byte); return TRUE; } static INLINE BOOL update_read_colorref(wStream* s, UINT32* color) { BYTE byte; if (Stream_GetRemainingLength(s) < 4) return FALSE; *color = 0; Stream_Read_UINT8(s, byte); *color = byte; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 8); Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 16); Stream_Seek_UINT8(s); return TRUE; } static INLINE BOOL update_read_color_quad(wStream* s, UINT32* color) { return update_read_colorref(s, color); } static INLINE void update_write_color_quad(wStream* s, UINT32 color) { BYTE byte; byte = (color >> 16) & 0xFF; Stream_Write_UINT8(s, byte); byte = (color >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = color & 0xFF; Stream_Write_UINT8(s, byte); } static INLINE BOOL update_read_2byte_unsigned(wStream* s, UINT32* value) { BYTE byte; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) return FALSE; *value = (byte & 0x7F) << 8; Stream_Read_UINT8(s, byte); *value |= byte; } else { *value = (byte & 0x7F); } return TRUE; } static INLINE BOOL update_write_2byte_unsigned(wStream* s, UINT32 value) { BYTE byte; if (value > 0x7FFF) return FALSE; if (value >= 0x7F) { byte = ((value & 0x7F00) >> 8); Stream_Write_UINT8(s, byte | 0x80); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else { byte = (value & 0x7F); Stream_Write_UINT8(s, byte); } return TRUE; } static INLINE BOOL update_read_2byte_signed(wStream* s, INT32* value) { BYTE byte; BOOL negative; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); negative = (byte & 0x40) ? TRUE : FALSE; *value = (byte & 0x3F); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); *value = (*value << 8) | byte; } if (negative) *value *= -1; return TRUE; } static INLINE BOOL update_write_2byte_signed(wStream* s, INT32 value) { BYTE byte; BOOL negative = FALSE; if (value < 0) { negative = TRUE; value *= -1; } if (value > 0x3FFF) return FALSE; if (value >= 0x3F) { byte = ((value & 0x3F00) >> 8); if (negative) byte |= 0x40; Stream_Write_UINT8(s, byte | 0x80); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else { byte = (value & 0x3F); if (negative) byte |= 0x40; Stream_Write_UINT8(s, byte); } return TRUE; } static INLINE BOOL update_read_4byte_unsigned(wStream* s, UINT32* value) { BYTE byte; BYTE count; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); count = (byte & 0xC0) >> 6; if (Stream_GetRemainingLength(s) < count) return FALSE; switch (count) { case 0: *value = (byte & 0x3F); break; case 1: *value = (byte & 0x3F) << 8; Stream_Read_UINT8(s, byte); *value |= byte; break; case 2: *value = (byte & 0x3F) << 16; Stream_Read_UINT8(s, byte); *value |= (byte << 8); Stream_Read_UINT8(s, byte); *value |= byte; break; case 3: *value = (byte & 0x3F) << 24; Stream_Read_UINT8(s, byte); *value |= (byte << 16); Stream_Read_UINT8(s, byte); *value |= (byte << 8); Stream_Read_UINT8(s, byte); *value |= byte; break; default: break; } return TRUE; } static INLINE BOOL update_write_4byte_unsigned(wStream* s, UINT32 value) { BYTE byte; if (value <= 0x3F) { Stream_Write_UINT8(s, value); } else if (value <= 0x3FFF) { byte = (value >> 8) & 0x3F; Stream_Write_UINT8(s, byte | 0x40); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else if (value <= 0x3FFFFF) { byte = (value >> 16) & 0x3F; Stream_Write_UINT8(s, byte | 0x80); byte = (value >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else if (value <= 0x3FFFFFFF) { byte = (value >> 24) & 0x3F; Stream_Write_UINT8(s, byte | 0xC0); byte = (value >> 16) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else return FALSE; return TRUE; } static INLINE BOOL update_read_delta(wStream* s, INT32* value) { BYTE byte; if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, byte); if (byte & 0x40) *value = (byte | ~0x3F); else *value = (byte & 0x3F); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, byte); *value = (*value << 8) | byte; } return TRUE; } #if 0 static INLINE void update_read_glyph_delta(wStream* s, UINT16* value) { BYTE byte; Stream_Read_UINT8(s, byte); if (byte == 0x80) Stream_Read_UINT16(s, *value); else *value = (byte & 0x3F); } static INLINE void update_seek_glyph_delta(wStream* s) { BYTE byte; Stream_Read_UINT8(s, byte); if (byte & 0x80) Stream_Seek_UINT8(s); } #endif static INLINE BOOL update_read_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->style); } if (fieldFlags & ORDER_FIELD_04) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->hatch); } if (brush->style & CACHED_BRUSH) { BOOL rc; brush->index = brush->hatch; brush->bpp = get_bmf_bpp(brush->style, &rc); if (!rc) return FALSE; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 7) return FALSE; brush->data = (BYTE*)brush->p8x8; Stream_Read_UINT8(s, brush->data[7]); Stream_Read_UINT8(s, brush->data[6]); Stream_Read_UINT8(s, brush->data[5]); Stream_Read_UINT8(s, brush->data[4]); Stream_Read_UINT8(s, brush->data[3]); Stream_Read_UINT8(s, brush->data[2]); Stream_Read_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; } static INLINE BOOL update_write_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { Stream_Write_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { Stream_Write_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { Stream_Write_UINT8(s, brush->style); } if (brush->style & CACHED_BRUSH) { BOOL rc; brush->hatch = brush->index; brush->bpp = get_bmf_bpp(brush->style, &rc); if (!rc) return FALSE; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_04) { Stream_Write_UINT8(s, brush->hatch); } if (fieldFlags & ORDER_FIELD_05) { brush->data = (BYTE*)brush->p8x8; Stream_Write_UINT8(s, brush->data[7]); Stream_Write_UINT8(s, brush->data[6]); Stream_Write_UINT8(s, brush->data[5]); Stream_Write_UINT8(s, brush->data[4]); Stream_Write_UINT8(s, brush->data[3]); Stream_Write_UINT8(s, brush->data[2]); Stream_Write_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; } static INLINE BOOL update_read_delta_rects(wStream* s, DELTA_RECT* rectangles, UINT32* nr) { UINT32 number = *nr; UINT32 i; BYTE flags = 0; BYTE* zeroBits; UINT32 zeroBitsSize; if (number > 45) { WLog_WARN(TAG, "Invalid number of delta rectangles %" PRIu32, number); return FALSE; } zeroBitsSize = ((number + 1) / 2); if (Stream_GetRemainingLength(s) < zeroBitsSize) return FALSE; Stream_GetPointer(s, zeroBits); Stream_Seek(s, zeroBitsSize); ZeroMemory(rectangles, sizeof(DELTA_RECT) * number); for (i = 0; i < number; i++) { if (i % 2 == 0) flags = zeroBits[i / 2]; if ((~flags & 0x80) && !update_read_delta(s, &rectangles[i].left)) return FALSE; if ((~flags & 0x40) && !update_read_delta(s, &rectangles[i].top)) return FALSE; if (~flags & 0x20) { if (!update_read_delta(s, &rectangles[i].width)) return FALSE; } else if (i > 0) rectangles[i].width = rectangles[i - 1].width; else rectangles[i].width = 0; if (~flags & 0x10) { if (!update_read_delta(s, &rectangles[i].height)) return FALSE; } else if (i > 0) rectangles[i].height = rectangles[i - 1].height; else rectangles[i].height = 0; if (i > 0) { rectangles[i].left += rectangles[i - 1].left; rectangles[i].top += rectangles[i - 1].top; } flags <<= 4; } return TRUE; } static INLINE BOOL update_read_delta_points(wStream* s, DELTA_POINT* points, int number, INT16 x, INT16 y) { int i; BYTE flags = 0; BYTE* zeroBits; UINT32 zeroBitsSize; zeroBitsSize = ((number + 3) / 4); if (Stream_GetRemainingLength(s) < zeroBitsSize) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < %" PRIu32 "", zeroBitsSize); return FALSE; } Stream_GetPointer(s, zeroBits); Stream_Seek(s, zeroBitsSize); ZeroMemory(points, sizeof(DELTA_POINT) * number); for (i = 0; i < number; i++) { if (i % 4 == 0) flags = zeroBits[i / 4]; if ((~flags & 0x80) && !update_read_delta(s, &points[i].x)) { WLog_ERR(TAG, "update_read_delta(x) failed"); return FALSE; } if ((~flags & 0x40) && !update_read_delta(s, &points[i].y)) { WLog_ERR(TAG, "update_read_delta(y) failed"); return FALSE; } flags <<= 2; } return TRUE; } #define ORDER_FIELD_BYTE(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 1) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT8(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_2BYTE(NO, TARGET1, TARGET2) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 2) \ { \ WLog_ERR(TAG, "error reading %s or %s", #TARGET1, #TARGET2); \ return FALSE; \ } \ Stream_Read_UINT8(s, TARGET1); \ Stream_Read_UINT8(s, TARGET2); \ } \ } while (0) #define ORDER_FIELD_UINT16(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 2) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT16(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_UINT32(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 4) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT32(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_COORD(NO, TARGET) \ do \ { \ if ((orderInfo->fieldFlags & (1 << (NO - 1))) && \ !update_read_coord(s, &TARGET, orderInfo->deltaCoordinates)) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ } while (0) static INLINE BOOL ORDER_FIELD_COLOR(const ORDER_INFO* orderInfo, wStream* s, UINT32 NO, UINT32* TARGET) { if (!TARGET || !orderInfo) return FALSE; if ((orderInfo->fieldFlags & (1 << (NO - 1))) && !update_read_color(s, TARGET)) return FALSE; return TRUE; } static INLINE BOOL FIELD_SKIP_BUFFER16(wStream* s, UINT32 TARGET_LEN) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, TARGET_LEN); if (!Stream_SafeSeek(s, TARGET_LEN)) { WLog_ERR(TAG, "error skipping %" PRIu32 " bytes", TARGET_LEN); return FALSE; } return TRUE; } /* Primary Drawing Orders */ static BOOL update_read_dstblt_order(wStream* s, const ORDER_INFO* orderInfo, DSTBLT_ORDER* dstblt) { ORDER_FIELD_COORD(1, dstblt->nLeftRect); ORDER_FIELD_COORD(2, dstblt->nTopRect); ORDER_FIELD_COORD(3, dstblt->nWidth); ORDER_FIELD_COORD(4, dstblt->nHeight); ORDER_FIELD_BYTE(5, dstblt->bRop); return TRUE; } int update_approximate_dstblt_order(ORDER_INFO* orderInfo, const DSTBLT_ORDER* dstblt) { return 32; } BOOL update_write_dstblt_order(wStream* s, ORDER_INFO* orderInfo, const DSTBLT_ORDER* dstblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_dstblt_order(orderInfo, dstblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, dstblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, dstblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, dstblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, dstblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, dstblt->bRop); return TRUE; } static BOOL update_read_patblt_order(wStream* s, const ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { ORDER_FIELD_COORD(1, patblt->nLeftRect); ORDER_FIELD_COORD(2, patblt->nTopRect); ORDER_FIELD_COORD(3, patblt->nWidth); ORDER_FIELD_COORD(4, patblt->nHeight); ORDER_FIELD_BYTE(5, patblt->bRop); ORDER_FIELD_COLOR(orderInfo, s, 6, &patblt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 7, &patblt->foreColor); return update_read_brush(s, &patblt->brush, orderInfo->fieldFlags >> 7); } int update_approximate_patblt_order(ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { return 32; } BOOL update_write_patblt_order(wStream* s, ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_patblt_order(orderInfo, patblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, patblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, patblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, patblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, patblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, patblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, patblt->backColor); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_color(s, patblt->foreColor); orderInfo->fieldFlags |= ORDER_FIELD_08; orderInfo->fieldFlags |= ORDER_FIELD_09; orderInfo->fieldFlags |= ORDER_FIELD_10; orderInfo->fieldFlags |= ORDER_FIELD_11; orderInfo->fieldFlags |= ORDER_FIELD_12; update_write_brush(s, &patblt->brush, orderInfo->fieldFlags >> 7); return TRUE; } static BOOL update_read_scrblt_order(wStream* s, const ORDER_INFO* orderInfo, SCRBLT_ORDER* scrblt) { ORDER_FIELD_COORD(1, scrblt->nLeftRect); ORDER_FIELD_COORD(2, scrblt->nTopRect); ORDER_FIELD_COORD(3, scrblt->nWidth); ORDER_FIELD_COORD(4, scrblt->nHeight); ORDER_FIELD_BYTE(5, scrblt->bRop); ORDER_FIELD_COORD(6, scrblt->nXSrc); ORDER_FIELD_COORD(7, scrblt->nYSrc); return TRUE; } int update_approximate_scrblt_order(ORDER_INFO* orderInfo, const SCRBLT_ORDER* scrblt) { return 32; } BOOL update_write_scrblt_order(wStream* s, ORDER_INFO* orderInfo, const SCRBLT_ORDER* scrblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_scrblt_order(orderInfo, scrblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, scrblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, scrblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, scrblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, scrblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, scrblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_coord(s, scrblt->nXSrc); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_coord(s, scrblt->nYSrc); return TRUE; } static BOOL update_read_opaque_rect_order(wStream* s, const ORDER_INFO* orderInfo, OPAQUE_RECT_ORDER* opaque_rect) { BYTE byte; ORDER_FIELD_COORD(1, opaque_rect->nLeftRect); ORDER_FIELD_COORD(2, opaque_rect->nTopRect); ORDER_FIELD_COORD(3, opaque_rect->nWidth); ORDER_FIELD_COORD(4, opaque_rect->nHeight); if (orderInfo->fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x00FFFF00) | ((UINT32)byte); } if (orderInfo->fieldFlags & ORDER_FIELD_06) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x00FF00FF) | ((UINT32)byte << 8); } if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x0000FFFF) | ((UINT32)byte << 16); } return TRUE; } int update_approximate_opaque_rect_order(ORDER_INFO* orderInfo, const OPAQUE_RECT_ORDER* opaque_rect) { return 32; } BOOL update_write_opaque_rect_order(wStream* s, ORDER_INFO* orderInfo, const OPAQUE_RECT_ORDER* opaque_rect) { BYTE byte; int inf = update_approximate_opaque_rect_order(orderInfo, opaque_rect); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; // TODO: Color format conversion orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, opaque_rect->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, opaque_rect->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, opaque_rect->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, opaque_rect->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; byte = opaque_rect->color & 0x000000FF; Stream_Write_UINT8(s, byte); orderInfo->fieldFlags |= ORDER_FIELD_06; byte = (opaque_rect->color & 0x0000FF00) >> 8; Stream_Write_UINT8(s, byte); orderInfo->fieldFlags |= ORDER_FIELD_07; byte = (opaque_rect->color & 0x00FF0000) >> 16; Stream_Write_UINT8(s, byte); return TRUE; } static BOOL update_read_draw_nine_grid_order(wStream* s, const ORDER_INFO* orderInfo, DRAW_NINE_GRID_ORDER* draw_nine_grid) { ORDER_FIELD_COORD(1, draw_nine_grid->srcLeft); ORDER_FIELD_COORD(2, draw_nine_grid->srcTop); ORDER_FIELD_COORD(3, draw_nine_grid->srcRight); ORDER_FIELD_COORD(4, draw_nine_grid->srcBottom); ORDER_FIELD_UINT16(5, draw_nine_grid->bitmapId); return TRUE; } static BOOL update_read_multi_dstblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_DSTBLT_ORDER* multi_dstblt) { ORDER_FIELD_COORD(1, multi_dstblt->nLeftRect); ORDER_FIELD_COORD(2, multi_dstblt->nTopRect); ORDER_FIELD_COORD(3, multi_dstblt->nWidth); ORDER_FIELD_COORD(4, multi_dstblt->nHeight); ORDER_FIELD_BYTE(5, multi_dstblt->bRop); ORDER_FIELD_BYTE(6, multi_dstblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_dstblt->cbData); return update_read_delta_rects(s, multi_dstblt->rectangles, &multi_dstblt->numRectangles); } return TRUE; } static BOOL update_read_multi_patblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_PATBLT_ORDER* multi_patblt) { ORDER_FIELD_COORD(1, multi_patblt->nLeftRect); ORDER_FIELD_COORD(2, multi_patblt->nTopRect); ORDER_FIELD_COORD(3, multi_patblt->nWidth); ORDER_FIELD_COORD(4, multi_patblt->nHeight); ORDER_FIELD_BYTE(5, multi_patblt->bRop); ORDER_FIELD_COLOR(orderInfo, s, 6, &multi_patblt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 7, &multi_patblt->foreColor); if (!update_read_brush(s, &multi_patblt->brush, orderInfo->fieldFlags >> 7)) return FALSE; ORDER_FIELD_BYTE(13, multi_patblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_14) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_patblt->cbData); if (!update_read_delta_rects(s, multi_patblt->rectangles, &multi_patblt->numRectangles)) return FALSE; } return TRUE; } static BOOL update_read_multi_scrblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_SCRBLT_ORDER* multi_scrblt) { ORDER_FIELD_COORD(1, multi_scrblt->nLeftRect); ORDER_FIELD_COORD(2, multi_scrblt->nTopRect); ORDER_FIELD_COORD(3, multi_scrblt->nWidth); ORDER_FIELD_COORD(4, multi_scrblt->nHeight); ORDER_FIELD_BYTE(5, multi_scrblt->bRop); ORDER_FIELD_COORD(6, multi_scrblt->nXSrc); ORDER_FIELD_COORD(7, multi_scrblt->nYSrc); ORDER_FIELD_BYTE(8, multi_scrblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_09) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_scrblt->cbData); return update_read_delta_rects(s, multi_scrblt->rectangles, &multi_scrblt->numRectangles); } return TRUE; } static BOOL update_read_multi_opaque_rect_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_OPAQUE_RECT_ORDER* multi_opaque_rect) { BYTE byte; ORDER_FIELD_COORD(1, multi_opaque_rect->nLeftRect); ORDER_FIELD_COORD(2, multi_opaque_rect->nTopRect); ORDER_FIELD_COORD(3, multi_opaque_rect->nWidth); ORDER_FIELD_COORD(4, multi_opaque_rect->nHeight); if (orderInfo->fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x00FFFF00) | ((UINT32)byte); } if (orderInfo->fieldFlags & ORDER_FIELD_06) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x00FF00FF) | ((UINT32)byte << 8); } if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x0000FFFF) | ((UINT32)byte << 16); } ORDER_FIELD_BYTE(8, multi_opaque_rect->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_09) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_opaque_rect->cbData); return update_read_delta_rects(s, multi_opaque_rect->rectangles, &multi_opaque_rect->numRectangles); } return TRUE; } static BOOL update_read_multi_draw_nine_grid_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_DRAW_NINE_GRID_ORDER* multi_draw_nine_grid) { ORDER_FIELD_COORD(1, multi_draw_nine_grid->srcLeft); ORDER_FIELD_COORD(2, multi_draw_nine_grid->srcTop); ORDER_FIELD_COORD(3, multi_draw_nine_grid->srcRight); ORDER_FIELD_COORD(4, multi_draw_nine_grid->srcBottom); ORDER_FIELD_UINT16(5, multi_draw_nine_grid->bitmapId); ORDER_FIELD_BYTE(6, multi_draw_nine_grid->nDeltaEntries); if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_draw_nine_grid->cbData); return update_read_delta_rects(s, multi_draw_nine_grid->rectangles, &multi_draw_nine_grid->nDeltaEntries); } return TRUE; } static BOOL update_read_line_to_order(wStream* s, const ORDER_INFO* orderInfo, LINE_TO_ORDER* line_to) { ORDER_FIELD_UINT16(1, line_to->backMode); ORDER_FIELD_COORD(2, line_to->nXStart); ORDER_FIELD_COORD(3, line_to->nYStart); ORDER_FIELD_COORD(4, line_to->nXEnd); ORDER_FIELD_COORD(5, line_to->nYEnd); ORDER_FIELD_COLOR(orderInfo, s, 6, &line_to->backColor); ORDER_FIELD_BYTE(7, line_to->bRop2); ORDER_FIELD_BYTE(8, line_to->penStyle); ORDER_FIELD_BYTE(9, line_to->penWidth); ORDER_FIELD_COLOR(orderInfo, s, 10, &line_to->penColor); return TRUE; } int update_approximate_line_to_order(ORDER_INFO* orderInfo, const LINE_TO_ORDER* line_to) { return 32; } BOOL update_write_line_to_order(wStream* s, ORDER_INFO* orderInfo, const LINE_TO_ORDER* line_to) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_line_to_order(orderInfo, line_to))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT16(s, line_to->backMode); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, line_to->nXStart); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, line_to->nYStart); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, line_to->nXEnd); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_coord(s, line_to->nYEnd); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, line_to->backColor); orderInfo->fieldFlags |= ORDER_FIELD_07; Stream_Write_UINT8(s, line_to->bRop2); orderInfo->fieldFlags |= ORDER_FIELD_08; Stream_Write_UINT8(s, line_to->penStyle); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT8(s, line_to->penWidth); orderInfo->fieldFlags |= ORDER_FIELD_10; update_write_color(s, line_to->penColor); return TRUE; } static BOOL update_read_polyline_order(wStream* s, const ORDER_INFO* orderInfo, POLYLINE_ORDER* polyline) { UINT16 word; UINT32 new_num = polyline->numDeltaEntries; ORDER_FIELD_COORD(1, polyline->xStart); ORDER_FIELD_COORD(2, polyline->yStart); ORDER_FIELD_BYTE(3, polyline->bRop2); ORDER_FIELD_UINT16(4, word); ORDER_FIELD_COLOR(orderInfo, s, 5, &polyline->penColor); ORDER_FIELD_BYTE(6, new_num); if (orderInfo->fieldFlags & ORDER_FIELD_07) { DELTA_POINT* new_points; if (new_num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, polyline->cbData); new_points = (DELTA_POINT*)realloc(polyline->points, sizeof(DELTA_POINT) * new_num); if (!new_points) { WLog_ERR(TAG, "realloc(%" PRIu32 ") failed", new_num); return FALSE; } polyline->points = new_points; polyline->numDeltaEntries = new_num; return update_read_delta_points(s, polyline->points, polyline->numDeltaEntries, polyline->xStart, polyline->yStart); } return TRUE; } static BOOL update_read_memblt_order(wStream* s, const ORDER_INFO* orderInfo, MEMBLT_ORDER* memblt) { if (!s || !orderInfo || !memblt) return FALSE; ORDER_FIELD_UINT16(1, memblt->cacheId); ORDER_FIELD_COORD(2, memblt->nLeftRect); ORDER_FIELD_COORD(3, memblt->nTopRect); ORDER_FIELD_COORD(4, memblt->nWidth); ORDER_FIELD_COORD(5, memblt->nHeight); ORDER_FIELD_BYTE(6, memblt->bRop); ORDER_FIELD_COORD(7, memblt->nXSrc); ORDER_FIELD_COORD(8, memblt->nYSrc); ORDER_FIELD_UINT16(9, memblt->cacheIndex); memblt->colorIndex = (memblt->cacheId >> 8); memblt->cacheId = (memblt->cacheId & 0xFF); memblt->bitmap = NULL; return TRUE; } int update_approximate_memblt_order(ORDER_INFO* orderInfo, const MEMBLT_ORDER* memblt) { return 64; } BOOL update_write_memblt_order(wStream* s, ORDER_INFO* orderInfo, const MEMBLT_ORDER* memblt) { UINT16 cacheId; if (!Stream_EnsureRemainingCapacity(s, update_approximate_memblt_order(orderInfo, memblt))) return FALSE; cacheId = (memblt->cacheId & 0xFF) | ((memblt->colorIndex & 0xFF) << 8); orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT16(s, cacheId); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, memblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, memblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, memblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_coord(s, memblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_06; Stream_Write_UINT8(s, memblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_coord(s, memblt->nXSrc); orderInfo->fieldFlags |= ORDER_FIELD_08; update_write_coord(s, memblt->nYSrc); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT16(s, memblt->cacheIndex); return TRUE; } static BOOL update_read_mem3blt_order(wStream* s, const ORDER_INFO* orderInfo, MEM3BLT_ORDER* mem3blt) { ORDER_FIELD_UINT16(1, mem3blt->cacheId); ORDER_FIELD_COORD(2, mem3blt->nLeftRect); ORDER_FIELD_COORD(3, mem3blt->nTopRect); ORDER_FIELD_COORD(4, mem3blt->nWidth); ORDER_FIELD_COORD(5, mem3blt->nHeight); ORDER_FIELD_BYTE(6, mem3blt->bRop); ORDER_FIELD_COORD(7, mem3blt->nXSrc); ORDER_FIELD_COORD(8, mem3blt->nYSrc); ORDER_FIELD_COLOR(orderInfo, s, 9, &mem3blt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 10, &mem3blt->foreColor); if (!update_read_brush(s, &mem3blt->brush, orderInfo->fieldFlags >> 10)) return FALSE; ORDER_FIELD_UINT16(16, mem3blt->cacheIndex); mem3blt->colorIndex = (mem3blt->cacheId >> 8); mem3blt->cacheId = (mem3blt->cacheId & 0xFF); mem3blt->bitmap = NULL; return TRUE; } static BOOL update_read_save_bitmap_order(wStream* s, const ORDER_INFO* orderInfo, SAVE_BITMAP_ORDER* save_bitmap) { ORDER_FIELD_UINT32(1, save_bitmap->savedBitmapPosition); ORDER_FIELD_COORD(2, save_bitmap->nLeftRect); ORDER_FIELD_COORD(3, save_bitmap->nTopRect); ORDER_FIELD_COORD(4, save_bitmap->nRightRect); ORDER_FIELD_COORD(5, save_bitmap->nBottomRect); ORDER_FIELD_BYTE(6, save_bitmap->operation); return TRUE; } static BOOL update_read_glyph_index_order(wStream* s, const ORDER_INFO* orderInfo, GLYPH_INDEX_ORDER* glyph_index) { ORDER_FIELD_BYTE(1, glyph_index->cacheId); ORDER_FIELD_BYTE(2, glyph_index->flAccel); ORDER_FIELD_BYTE(3, glyph_index->ulCharInc); ORDER_FIELD_BYTE(4, glyph_index->fOpRedundant); ORDER_FIELD_COLOR(orderInfo, s, 5, &glyph_index->backColor); ORDER_FIELD_COLOR(orderInfo, s, 6, &glyph_index->foreColor); ORDER_FIELD_UINT16(7, glyph_index->bkLeft); ORDER_FIELD_UINT16(8, glyph_index->bkTop); ORDER_FIELD_UINT16(9, glyph_index->bkRight); ORDER_FIELD_UINT16(10, glyph_index->bkBottom); ORDER_FIELD_UINT16(11, glyph_index->opLeft); ORDER_FIELD_UINT16(12, glyph_index->opTop); ORDER_FIELD_UINT16(13, glyph_index->opRight); ORDER_FIELD_UINT16(14, glyph_index->opBottom); if (!update_read_brush(s, &glyph_index->brush, orderInfo->fieldFlags >> 14)) return FALSE; ORDER_FIELD_UINT16(20, glyph_index->x); ORDER_FIELD_UINT16(21, glyph_index->y); if (orderInfo->fieldFlags & ORDER_FIELD_22) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, glyph_index->cbData); if (Stream_GetRemainingLength(s) < glyph_index->cbData) return FALSE; CopyMemory(glyph_index->data, Stream_Pointer(s), glyph_index->cbData); Stream_Seek(s, glyph_index->cbData); } return TRUE; } int update_approximate_glyph_index_order(ORDER_INFO* orderInfo, const GLYPH_INDEX_ORDER* glyph_index) { return 64; } BOOL update_write_glyph_index_order(wStream* s, ORDER_INFO* orderInfo, GLYPH_INDEX_ORDER* glyph_index) { int inf = update_approximate_glyph_index_order(orderInfo, glyph_index); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT8(s, glyph_index->cacheId); orderInfo->fieldFlags |= ORDER_FIELD_02; Stream_Write_UINT8(s, glyph_index->flAccel); orderInfo->fieldFlags |= ORDER_FIELD_03; Stream_Write_UINT8(s, glyph_index->ulCharInc); orderInfo->fieldFlags |= ORDER_FIELD_04; Stream_Write_UINT8(s, glyph_index->fOpRedundant); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_color(s, glyph_index->backColor); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, glyph_index->foreColor); orderInfo->fieldFlags |= ORDER_FIELD_07; Stream_Write_UINT16(s, glyph_index->bkLeft); orderInfo->fieldFlags |= ORDER_FIELD_08; Stream_Write_UINT16(s, glyph_index->bkTop); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT16(s, glyph_index->bkRight); orderInfo->fieldFlags |= ORDER_FIELD_10; Stream_Write_UINT16(s, glyph_index->bkBottom); orderInfo->fieldFlags |= ORDER_FIELD_11; Stream_Write_UINT16(s, glyph_index->opLeft); orderInfo->fieldFlags |= ORDER_FIELD_12; Stream_Write_UINT16(s, glyph_index->opTop); orderInfo->fieldFlags |= ORDER_FIELD_13; Stream_Write_UINT16(s, glyph_index->opRight); orderInfo->fieldFlags |= ORDER_FIELD_14; Stream_Write_UINT16(s, glyph_index->opBottom); orderInfo->fieldFlags |= ORDER_FIELD_15; orderInfo->fieldFlags |= ORDER_FIELD_16; orderInfo->fieldFlags |= ORDER_FIELD_17; orderInfo->fieldFlags |= ORDER_FIELD_18; orderInfo->fieldFlags |= ORDER_FIELD_19; update_write_brush(s, &glyph_index->brush, orderInfo->fieldFlags >> 14); orderInfo->fieldFlags |= ORDER_FIELD_20; Stream_Write_UINT16(s, glyph_index->x); orderInfo->fieldFlags |= ORDER_FIELD_21; Stream_Write_UINT16(s, glyph_index->y); orderInfo->fieldFlags |= ORDER_FIELD_22; Stream_Write_UINT8(s, glyph_index->cbData); Stream_Write(s, glyph_index->data, glyph_index->cbData); return TRUE; } static BOOL update_read_fast_index_order(wStream* s, const ORDER_INFO* orderInfo, FAST_INDEX_ORDER* fast_index) { ORDER_FIELD_BYTE(1, fast_index->cacheId); ORDER_FIELD_2BYTE(2, fast_index->ulCharInc, fast_index->flAccel); ORDER_FIELD_COLOR(orderInfo, s, 3, &fast_index->backColor); ORDER_FIELD_COLOR(orderInfo, s, 4, &fast_index->foreColor); ORDER_FIELD_COORD(5, fast_index->bkLeft); ORDER_FIELD_COORD(6, fast_index->bkTop); ORDER_FIELD_COORD(7, fast_index->bkRight); ORDER_FIELD_COORD(8, fast_index->bkBottom); ORDER_FIELD_COORD(9, fast_index->opLeft); ORDER_FIELD_COORD(10, fast_index->opTop); ORDER_FIELD_COORD(11, fast_index->opRight); ORDER_FIELD_COORD(12, fast_index->opBottom); ORDER_FIELD_COORD(13, fast_index->x); ORDER_FIELD_COORD(14, fast_index->y); if (orderInfo->fieldFlags & ORDER_FIELD_15) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, fast_index->cbData); if (Stream_GetRemainingLength(s) < fast_index->cbData) return FALSE; CopyMemory(fast_index->data, Stream_Pointer(s), fast_index->cbData); Stream_Seek(s, fast_index->cbData); } return TRUE; } static BOOL update_read_fast_glyph_order(wStream* s, const ORDER_INFO* orderInfo, FAST_GLYPH_ORDER* fastGlyph) { GLYPH_DATA_V2* glyph = &fastGlyph->glyphData; ORDER_FIELD_BYTE(1, fastGlyph->cacheId); ORDER_FIELD_2BYTE(2, fastGlyph->ulCharInc, fastGlyph->flAccel); ORDER_FIELD_COLOR(orderInfo, s, 3, &fastGlyph->backColor); ORDER_FIELD_COLOR(orderInfo, s, 4, &fastGlyph->foreColor); ORDER_FIELD_COORD(5, fastGlyph->bkLeft); ORDER_FIELD_COORD(6, fastGlyph->bkTop); ORDER_FIELD_COORD(7, fastGlyph->bkRight); ORDER_FIELD_COORD(8, fastGlyph->bkBottom); ORDER_FIELD_COORD(9, fastGlyph->opLeft); ORDER_FIELD_COORD(10, fastGlyph->opTop); ORDER_FIELD_COORD(11, fastGlyph->opRight); ORDER_FIELD_COORD(12, fastGlyph->opBottom); ORDER_FIELD_COORD(13, fastGlyph->x); ORDER_FIELD_COORD(14, fastGlyph->y); if (orderInfo->fieldFlags & ORDER_FIELD_15) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, fastGlyph->cbData); if (Stream_GetRemainingLength(s) < fastGlyph->cbData) return FALSE; CopyMemory(fastGlyph->data, Stream_Pointer(s), fastGlyph->cbData); if (Stream_GetRemainingLength(s) < fastGlyph->cbData) return FALSE; if (!Stream_SafeSeek(s, 1)) return FALSE; if (fastGlyph->cbData > 1) { UINT32 new_cb; /* parse optional glyph data */ glyph->cacheIndex = fastGlyph->data[0]; if (!update_read_2byte_signed(s, &glyph->x) || !update_read_2byte_signed(s, &glyph->y) || !update_read_2byte_unsigned(s, &glyph->cx) || !update_read_2byte_unsigned(s, &glyph->cy)) return FALSE; glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; new_cb = ((glyph->cx + 7) / 8) * glyph->cy; new_cb += ((new_cb % 4) > 0) ? 4 - (new_cb % 4) : 0; if (fastGlyph->cbData < new_cb) return FALSE; if (new_cb > 0) { BYTE* new_aj; new_aj = (BYTE*)realloc(glyph->aj, new_cb); if (!new_aj) return FALSE; glyph->aj = new_aj; glyph->cb = new_cb; Stream_Read(s, glyph->aj, glyph->cb); } Stream_Seek(s, fastGlyph->cbData - new_cb); } } return TRUE; } static BOOL update_read_polygon_sc_order(wStream* s, const ORDER_INFO* orderInfo, POLYGON_SC_ORDER* polygon_sc) { UINT32 num = polygon_sc->numPoints; ORDER_FIELD_COORD(1, polygon_sc->xStart); ORDER_FIELD_COORD(2, polygon_sc->yStart); ORDER_FIELD_BYTE(3, polygon_sc->bRop2); ORDER_FIELD_BYTE(4, polygon_sc->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 5, &polygon_sc->brushColor); ORDER_FIELD_BYTE(6, num); if (orderInfo->fieldFlags & ORDER_FIELD_07) { DELTA_POINT* newpoints; if (num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, polygon_sc->cbData); newpoints = (DELTA_POINT*)realloc(polygon_sc->points, sizeof(DELTA_POINT) * num); if (!newpoints) return FALSE; polygon_sc->points = newpoints; polygon_sc->numPoints = num; return update_read_delta_points(s, polygon_sc->points, polygon_sc->numPoints, polygon_sc->xStart, polygon_sc->yStart); } return TRUE; } static BOOL update_read_polygon_cb_order(wStream* s, const ORDER_INFO* orderInfo, POLYGON_CB_ORDER* polygon_cb) { UINT32 num = polygon_cb->numPoints; ORDER_FIELD_COORD(1, polygon_cb->xStart); ORDER_FIELD_COORD(2, polygon_cb->yStart); ORDER_FIELD_BYTE(3, polygon_cb->bRop2); ORDER_FIELD_BYTE(4, polygon_cb->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 5, &polygon_cb->backColor); ORDER_FIELD_COLOR(orderInfo, s, 6, &polygon_cb->foreColor); if (!update_read_brush(s, &polygon_cb->brush, orderInfo->fieldFlags >> 6)) return FALSE; ORDER_FIELD_BYTE(12, num); if (orderInfo->fieldFlags & ORDER_FIELD_13) { DELTA_POINT* newpoints; if (num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, polygon_cb->cbData); newpoints = (DELTA_POINT*)realloc(polygon_cb->points, sizeof(DELTA_POINT) * num); if (!newpoints) return FALSE; polygon_cb->points = newpoints; polygon_cb->numPoints = num; if (!update_read_delta_points(s, polygon_cb->points, polygon_cb->numPoints, polygon_cb->xStart, polygon_cb->yStart)) return FALSE; } polygon_cb->backMode = (polygon_cb->bRop2 & 0x80) ? BACKMODE_TRANSPARENT : BACKMODE_OPAQUE; polygon_cb->bRop2 = (polygon_cb->bRop2 & 0x1F); return TRUE; } static BOOL update_read_ellipse_sc_order(wStream* s, const ORDER_INFO* orderInfo, ELLIPSE_SC_ORDER* ellipse_sc) { ORDER_FIELD_COORD(1, ellipse_sc->leftRect); ORDER_FIELD_COORD(2, ellipse_sc->topRect); ORDER_FIELD_COORD(3, ellipse_sc->rightRect); ORDER_FIELD_COORD(4, ellipse_sc->bottomRect); ORDER_FIELD_BYTE(5, ellipse_sc->bRop2); ORDER_FIELD_BYTE(6, ellipse_sc->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 7, &ellipse_sc->color); return TRUE; } static BOOL update_read_ellipse_cb_order(wStream* s, const ORDER_INFO* orderInfo, ELLIPSE_CB_ORDER* ellipse_cb) { ORDER_FIELD_COORD(1, ellipse_cb->leftRect); ORDER_FIELD_COORD(2, ellipse_cb->topRect); ORDER_FIELD_COORD(3, ellipse_cb->rightRect); ORDER_FIELD_COORD(4, ellipse_cb->bottomRect); ORDER_FIELD_BYTE(5, ellipse_cb->bRop2); ORDER_FIELD_BYTE(6, ellipse_cb->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 7, &ellipse_cb->backColor); ORDER_FIELD_COLOR(orderInfo, s, 8, &ellipse_cb->foreColor); return update_read_brush(s, &ellipse_cb->brush, orderInfo->fieldFlags >> 8); } /* Secondary Drawing Orders */ static CACHE_BITMAP_ORDER* update_read_cache_bitmap_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { CACHE_BITMAP_ORDER* cache_bitmap; if (!update || !s) return NULL; cache_bitmap = calloc(1, sizeof(CACHE_BITMAP_ORDER)); if (!cache_bitmap) goto fail; if (Stream_GetRemainingLength(s) < 9) goto fail; Stream_Read_UINT8(s, cache_bitmap->cacheId); /* cacheId (1 byte) */ Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapWidth); /* bitmapWidth (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapHeight); /* bitmapHeight (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ if ((cache_bitmap->bitmapBpp < 1) || (cache_bitmap->bitmapBpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bitmap bpp %" PRIu32 "", cache_bitmap->bitmapBpp); goto fail; } Stream_Read_UINT16(s, cache_bitmap->bitmapLength); /* bitmapLength (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap->cacheIndex); /* cacheIndex (2 bytes) */ if (compressed) { if ((flags & NO_BITMAP_COMPRESSION_HDR) == 0) { BYTE* bitmapComprHdr = (BYTE*)&(cache_bitmap->bitmapComprHdr); if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read(s, bitmapComprHdr, 8); /* bitmapComprHdr (8 bytes) */ cache_bitmap->bitmapLength -= 8; } } if (cache_bitmap->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap->bitmapLength) goto fail; cache_bitmap->bitmapDataStream = malloc(cache_bitmap->bitmapLength); if (!cache_bitmap->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap->bitmapDataStream, cache_bitmap->bitmapLength); cache_bitmap->compressed = compressed; return cache_bitmap; fail: free_cache_bitmap_order(update->context, cache_bitmap); return NULL; } int update_approximate_cache_bitmap_order(const CACHE_BITMAP_ORDER* cache_bitmap, BOOL compressed, UINT16* flags) { return 64 + cache_bitmap->bitmapLength; } BOOL update_write_cache_bitmap_order(wStream* s, const CACHE_BITMAP_ORDER* cache_bitmap, BOOL compressed, UINT16* flags) { UINT32 bitmapLength = cache_bitmap->bitmapLength; int inf = update_approximate_cache_bitmap_order(cache_bitmap, compressed, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; *flags = NO_BITMAP_COMPRESSION_HDR; if ((*flags & NO_BITMAP_COMPRESSION_HDR) == 0) bitmapLength += 8; Stream_Write_UINT8(s, cache_bitmap->cacheId); /* cacheId (1 byte) */ Stream_Write_UINT8(s, 0); /* pad1Octet (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapWidth); /* bitmapWidth (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapHeight); /* bitmapHeight (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ Stream_Write_UINT16(s, bitmapLength); /* bitmapLength (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap->cacheIndex); /* cacheIndex (2 bytes) */ if (compressed) { if ((*flags & NO_BITMAP_COMPRESSION_HDR) == 0) { BYTE* bitmapComprHdr = (BYTE*)&(cache_bitmap->bitmapComprHdr); Stream_Write(s, bitmapComprHdr, 8); /* bitmapComprHdr (8 bytes) */ bitmapLength -= 8; } Stream_Write(s, cache_bitmap->bitmapDataStream, bitmapLength); } else { Stream_Write(s, cache_bitmap->bitmapDataStream, bitmapLength); } return TRUE; } static CACHE_BITMAP_V2_ORDER* update_read_cache_bitmap_v2_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { BOOL rc; BYTE bitsPerPixelId; CACHE_BITMAP_V2_ORDER* cache_bitmap_v2; if (!update || !s) return NULL; cache_bitmap_v2 = calloc(1, sizeof(CACHE_BITMAP_V2_ORDER)); if (!cache_bitmap_v2) goto fail; cache_bitmap_v2->cacheId = flags & 0x0003; cache_bitmap_v2->flags = (flags & 0xFF80) >> 7; bitsPerPixelId = (flags & 0x0078) >> 3; cache_bitmap_v2->bitmapBpp = get_cbr2_bpp(bitsPerPixelId, &rc); if (!rc) goto fail; if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ goto fail; cache_bitmap_v2->bitmapHeight = cache_bitmap_v2->bitmapWidth; } else { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ goto fail; } if (!update_read_4byte_unsigned(s, &cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->cacheIndex)) /* cacheIndex */ goto fail; if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } } if (cache_bitmap_v2->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap_v2->bitmapLength) goto fail; if (cache_bitmap_v2->bitmapLength == 0) goto fail; cache_bitmap_v2->bitmapDataStream = malloc(cache_bitmap_v2->bitmapLength); if (!cache_bitmap_v2->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); cache_bitmap_v2->compressed = compressed; return cache_bitmap_v2; fail: free_cache_bitmap_v2_order(update->context, cache_bitmap_v2); return NULL; } int update_approximate_cache_bitmap_v2_order(CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { return 64 + cache_bitmap_v2->bitmapLength; } BOOL update_write_cache_bitmap_v2_order(wStream* s, CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { BOOL rc; BYTE bitsPerPixelId; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v2_order(cache_bitmap_v2, compressed, flags))) return FALSE; bitsPerPixelId = get_bpp_bmf(cache_bitmap_v2->bitmapBpp, &rc); if (!rc) return FALSE; *flags = (cache_bitmap_v2->cacheId & 0x0003) | (bitsPerPixelId << 3) | ((cache_bitmap_v2->flags << 7) & 0xFF80); if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { Stream_Write_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ return FALSE; } else { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ return FALSE; } if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (!update_write_4byte_unsigned(s, cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_write_2byte_unsigned(s, cache_bitmap_v2->cacheIndex)) /* cacheIndex */ return FALSE; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { Stream_Write_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } else { if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } cache_bitmap_v2->compressed = compressed; return TRUE; } static CACHE_BITMAP_V3_ORDER* update_read_cache_bitmap_v3_order(rdpUpdate* update, wStream* s, UINT16 flags) { BOOL rc; BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; UINT32 new_len; BYTE* new_data; CACHE_BITMAP_V3_ORDER* cache_bitmap_v3; if (!update || !s) return NULL; cache_bitmap_v3 = calloc(1, sizeof(CACHE_BITMAP_V3_ORDER)); if (!cache_bitmap_v3) goto fail; cache_bitmap_v3->cacheId = flags & 0x00000003; cache_bitmap_v3->flags = (flags & 0x0000FF80) >> 7; bitsPerPixelId = (flags & 0x00000078) >> 3; cache_bitmap_v3->bpp = get_cbr2_bpp(bitsPerPixelId, &rc); if (!rc) goto fail; if (Stream_GetRemainingLength(s) < 21) goto fail; Stream_Read_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ bitmapData = &cache_bitmap_v3->bitmapData; Stream_Read_UINT8(s, bitmapData->bpp); if ((bitmapData->bpp < 1) || (bitmapData->bpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bpp value %" PRIu32 "", bitmapData->bpp); goto fail; } Stream_Seek_UINT8(s); /* reserved1 (1 byte) */ Stream_Seek_UINT8(s); /* reserved2 (1 byte) */ Stream_Read_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Read_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Read_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Read_UINT32(s, new_len); /* length (4 bytes) */ if ((new_len == 0) || (Stream_GetRemainingLength(s) < new_len)) goto fail; new_data = (BYTE*)realloc(bitmapData->data, new_len); if (!new_data) goto fail; bitmapData->data = new_data; bitmapData->length = new_len; Stream_Read(s, bitmapData->data, bitmapData->length); return cache_bitmap_v3; fail: free_cache_bitmap_v3_order(update->context, cache_bitmap_v3); return NULL; } int update_approximate_cache_bitmap_v3_order(CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BITMAP_DATA_EX* bitmapData = &cache_bitmap_v3->bitmapData; return 64 + bitmapData->length; } BOOL update_write_cache_bitmap_v3_order(wStream* s, CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BOOL rc; BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v3_order(cache_bitmap_v3, flags))) return FALSE; bitmapData = &cache_bitmap_v3->bitmapData; bitsPerPixelId = get_bpp_bmf(cache_bitmap_v3->bpp, &rc); if (!rc) return FALSE; *flags = (cache_bitmap_v3->cacheId & 0x00000003) | ((cache_bitmap_v3->flags << 7) & 0x0000FF80) | ((bitsPerPixelId << 3) & 0x00000078); Stream_Write_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ Stream_Write_UINT8(s, bitmapData->bpp); Stream_Write_UINT8(s, 0); /* reserved1 (1 byte) */ Stream_Write_UINT8(s, 0); /* reserved2 (1 byte) */ Stream_Write_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Write_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Write_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Write_UINT32(s, bitmapData->length); /* length (4 bytes) */ Stream_Write(s, bitmapData->data, bitmapData->length); return TRUE; } static CACHE_COLOR_TABLE_ORDER* update_read_cache_color_table_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; UINT32* colorTable; CACHE_COLOR_TABLE_ORDER* cache_color_table = calloc(1, sizeof(CACHE_COLOR_TABLE_ORDER)); if (!cache_color_table) goto fail; if (Stream_GetRemainingLength(s) < 3) goto fail; Stream_Read_UINT8(s, cache_color_table->cacheIndex); /* cacheIndex (1 byte) */ Stream_Read_UINT16(s, cache_color_table->numberColors); /* numberColors (2 bytes) */ if (cache_color_table->numberColors != 256) { /* This field MUST be set to 256 */ goto fail; } if (Stream_GetRemainingLength(s) < cache_color_table->numberColors * 4) goto fail; colorTable = (UINT32*)&cache_color_table->colorTable; for (i = 0; i < (int)cache_color_table->numberColors; i++) update_read_color_quad(s, &colorTable[i]); return cache_color_table; fail: free_cache_color_table_order(update->context, cache_color_table); return NULL; } int update_approximate_cache_color_table_order(const CACHE_COLOR_TABLE_ORDER* cache_color_table, UINT16* flags) { return 16 + (256 * 4); } BOOL update_write_cache_color_table_order(wStream* s, const CACHE_COLOR_TABLE_ORDER* cache_color_table, UINT16* flags) { int i, inf; UINT32* colorTable; if (cache_color_table->numberColors != 256) return FALSE; inf = update_approximate_cache_color_table_order(cache_color_table, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT8(s, cache_color_table->cacheIndex); /* cacheIndex (1 byte) */ Stream_Write_UINT16(s, cache_color_table->numberColors); /* numberColors (2 bytes) */ colorTable = (UINT32*)&cache_color_table->colorTable; for (i = 0; i < (int)cache_color_table->numberColors; i++) { update_write_color_quad(s, colorTable[i]); } return TRUE; } static CACHE_GLYPH_ORDER* update_read_cache_glyph_order(rdpUpdate* update, wStream* s, UINT16 flags) { UINT32 i; CACHE_GLYPH_ORDER* cache_glyph_order = calloc(1, sizeof(CACHE_GLYPH_ORDER)); if (!cache_glyph_order || !update || !s) goto fail; if (Stream_GetRemainingLength(s) < 2) goto fail; Stream_Read_UINT8(s, cache_glyph_order->cacheId); /* cacheId (1 byte) */ Stream_Read_UINT8(s, cache_glyph_order->cGlyphs); /* cGlyphs (1 byte) */ for (i = 0; i < cache_glyph_order->cGlyphs; i++) { GLYPH_DATA* glyph = &cache_glyph_order->glyphData[i]; if (Stream_GetRemainingLength(s) < 10) goto fail; Stream_Read_UINT16(s, glyph->cacheIndex); Stream_Read_INT16(s, glyph->x); Stream_Read_INT16(s, glyph->y); Stream_Read_UINT16(s, glyph->cx); Stream_Read_UINT16(s, glyph->cy); glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; if (Stream_GetRemainingLength(s) < glyph->cb) goto fail; glyph->aj = (BYTE*)malloc(glyph->cb); if (!glyph->aj) goto fail; Stream_Read(s, glyph->aj, glyph->cb); } if ((flags & CG_GLYPH_UNICODE_PRESENT) && (cache_glyph_order->cGlyphs > 0)) { cache_glyph_order->unicodeCharacters = calloc(cache_glyph_order->cGlyphs, sizeof(WCHAR)); if (!cache_glyph_order->unicodeCharacters) goto fail; if (Stream_GetRemainingLength(s) < sizeof(WCHAR) * cache_glyph_order->cGlyphs) goto fail; Stream_Read_UTF16_String(s, cache_glyph_order->unicodeCharacters, cache_glyph_order->cGlyphs); } return cache_glyph_order; fail: free_cache_glyph_order(update->context, cache_glyph_order); return NULL; } int update_approximate_cache_glyph_order(const CACHE_GLYPH_ORDER* cache_glyph, UINT16* flags) { return 2 + cache_glyph->cGlyphs * 32; } BOOL update_write_cache_glyph_order(wStream* s, const CACHE_GLYPH_ORDER* cache_glyph, UINT16* flags) { int i, inf; INT16 lsi16; const GLYPH_DATA* glyph; inf = update_approximate_cache_glyph_order(cache_glyph, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT8(s, cache_glyph->cacheId); /* cacheId (1 byte) */ Stream_Write_UINT8(s, cache_glyph->cGlyphs); /* cGlyphs (1 byte) */ for (i = 0; i < (int)cache_glyph->cGlyphs; i++) { UINT32 cb; glyph = &cache_glyph->glyphData[i]; Stream_Write_UINT16(s, glyph->cacheIndex); /* cacheIndex (2 bytes) */ lsi16 = glyph->x; Stream_Write_UINT16(s, lsi16); /* x (2 bytes) */ lsi16 = glyph->y; Stream_Write_UINT16(s, lsi16); /* y (2 bytes) */ Stream_Write_UINT16(s, glyph->cx); /* cx (2 bytes) */ Stream_Write_UINT16(s, glyph->cy); /* cy (2 bytes) */ cb = ((glyph->cx + 7) / 8) * glyph->cy; cb += ((cb % 4) > 0) ? 4 - (cb % 4) : 0; Stream_Write(s, glyph->aj, cb); } if (*flags & CG_GLYPH_UNICODE_PRESENT) { Stream_Zero(s, cache_glyph->cGlyphs * 2); } return TRUE; } static CACHE_GLYPH_V2_ORDER* update_read_cache_glyph_v2_order(rdpUpdate* update, wStream* s, UINT16 flags) { UINT32 i; CACHE_GLYPH_V2_ORDER* cache_glyph_v2 = calloc(1, sizeof(CACHE_GLYPH_V2_ORDER)); if (!cache_glyph_v2) goto fail; cache_glyph_v2->cacheId = (flags & 0x000F); cache_glyph_v2->flags = (flags & 0x00F0) >> 4; cache_glyph_v2->cGlyphs = (flags & 0xFF00) >> 8; for (i = 0; i < cache_glyph_v2->cGlyphs; i++) { GLYPH_DATA_V2* glyph = &cache_glyph_v2->glyphData[i]; if (Stream_GetRemainingLength(s) < 1) goto fail; Stream_Read_UINT8(s, glyph->cacheIndex); if (!update_read_2byte_signed(s, &glyph->x) || !update_read_2byte_signed(s, &glyph->y) || !update_read_2byte_unsigned(s, &glyph->cx) || !update_read_2byte_unsigned(s, &glyph->cy)) { goto fail; } glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; if (Stream_GetRemainingLength(s) < glyph->cb) goto fail; glyph->aj = (BYTE*)malloc(glyph->cb); if (!glyph->aj) goto fail; Stream_Read(s, glyph->aj, glyph->cb); } if ((flags & CG_GLYPH_UNICODE_PRESENT) && (cache_glyph_v2->cGlyphs > 0)) { cache_glyph_v2->unicodeCharacters = calloc(cache_glyph_v2->cGlyphs, sizeof(WCHAR)); if (!cache_glyph_v2->unicodeCharacters) goto fail; if (Stream_GetRemainingLength(s) < sizeof(WCHAR) * cache_glyph_v2->cGlyphs) goto fail; Stream_Read_UTF16_String(s, cache_glyph_v2->unicodeCharacters, cache_glyph_v2->cGlyphs); } return cache_glyph_v2; fail: free_cache_glyph_v2_order(update->context, cache_glyph_v2); return NULL; } int update_approximate_cache_glyph_v2_order(const CACHE_GLYPH_V2_ORDER* cache_glyph_v2, UINT16* flags) { return 8 + cache_glyph_v2->cGlyphs * 32; } BOOL update_write_cache_glyph_v2_order(wStream* s, const CACHE_GLYPH_V2_ORDER* cache_glyph_v2, UINT16* flags) { UINT32 i, inf; inf = update_approximate_cache_glyph_v2_order(cache_glyph_v2, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; *flags = (cache_glyph_v2->cacheId & 0x000F) | ((cache_glyph_v2->flags & 0x000F) << 4) | ((cache_glyph_v2->cGlyphs & 0x00FF) << 8); for (i = 0; i < cache_glyph_v2->cGlyphs; i++) { UINT32 cb; const GLYPH_DATA_V2* glyph = &cache_glyph_v2->glyphData[i]; Stream_Write_UINT8(s, glyph->cacheIndex); if (!update_write_2byte_signed(s, glyph->x) || !update_write_2byte_signed(s, glyph->y) || !update_write_2byte_unsigned(s, glyph->cx) || !update_write_2byte_unsigned(s, glyph->cy)) { return FALSE; } cb = ((glyph->cx + 7) / 8) * glyph->cy; cb += ((cb % 4) > 0) ? 4 - (cb % 4) : 0; Stream_Write(s, glyph->aj, cb); } if (*flags & CG_GLYPH_UNICODE_PRESENT) { Stream_Zero(s, cache_glyph_v2->cGlyphs * 2); } return TRUE; } static BOOL update_decompress_brush(wStream* s, BYTE* output, size_t outSize, BYTE bpp) { INT32 x, y, k; BYTE byte = 0; const BYTE* palette = Stream_Pointer(s) + 16; const INT32 bytesPerPixel = ((bpp + 1) / 8); if (!Stream_SafeSeek(s, 16ULL + 7ULL * bytesPerPixel)) // 64 / 4 return FALSE; for (y = 7; y >= 0; y--) { for (x = 0; x < 8; x++) { UINT32 index; if ((x % 4) == 0) Stream_Read_UINT8(s, byte); index = ((byte >> ((3 - (x % 4)) * 2)) & 0x03); for (k = 0; k < bytesPerPixel; k++) { const size_t dstIndex = ((y * 8 + x) * bytesPerPixel) + k; const size_t srcIndex = (index * bytesPerPixel) + k; if (dstIndex >= outSize) return FALSE; output[dstIndex] = palette[srcIndex]; } } } return TRUE; } static BOOL update_compress_brush(wStream* s, const BYTE* input, BYTE bpp) { return FALSE; } static CACHE_BRUSH_ORDER* update_read_cache_brush_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; BOOL rc; BYTE iBitmapFormat; BOOL compressed = FALSE; CACHE_BRUSH_ORDER* cache_brush = calloc(1, sizeof(CACHE_BRUSH_ORDER)); if (!cache_brush) goto fail; if (Stream_GetRemainingLength(s) < 6) goto fail; Stream_Read_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Read_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ cache_brush->bpp = get_bmf_bpp(iBitmapFormat, &rc); if (!rc) goto fail; Stream_Read_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Read_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Read_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Read_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_Print(update->log, WLOG_ERROR, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); goto fail; } /* rows are encoded in reverse order */ if (Stream_GetRemainingLength(s) < 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_decompress_brush(s, cache_brush->data, sizeof(cache_brush->data), cache_brush->bpp)) goto fail; } else { /* uncompressed brush */ UINT32 scanline = (cache_brush->bpp / 8) * 8; if (Stream_GetRemainingLength(s) < scanline * 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read(s, &cache_brush->data[i * scanline], scanline); } } } } return cache_brush; fail: free_cache_brush_order(update->context, cache_brush); return NULL; } int update_approximate_cache_brush_order(const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { return 64; } BOOL update_write_cache_brush_order(wStream* s, const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { int i; BYTE iBitmapFormat; BOOL rc; BOOL compressed = FALSE; if (!Stream_EnsureRemainingCapacity(s, update_approximate_cache_brush_order(cache_brush, flags))) return FALSE; iBitmapFormat = get_bpp_bmf(cache_brush->bpp, &rc); if (!rc) return FALSE; Stream_Write_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Write_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ Stream_Write_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Write_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Write_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Write_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_ERR(TAG, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); return FALSE; } for (i = 7; i >= 0; i--) { Stream_Write_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_compress_brush(s, cache_brush->data, cache_brush->bpp)) return FALSE; } else { /* uncompressed brush */ int scanline = (cache_brush->bpp / 8) * 8; for (i = 7; i >= 0; i--) { Stream_Write(s, &cache_brush->data[i * scanline], scanline); } } } } return TRUE; } /* Alternate Secondary Drawing Orders */ static BOOL update_read_create_offscreen_bitmap_order(wStream* s, CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { UINT16 flags; BOOL deleteListPresent; OFFSCREEN_DELETE_LIST* deleteList; if (Stream_GetRemainingLength(s) < 6) return FALSE; Stream_Read_UINT16(s, flags); /* flags (2 bytes) */ create_offscreen_bitmap->id = flags & 0x7FFF; deleteListPresent = (flags & 0x8000) ? TRUE : FALSE; Stream_Read_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */ Stream_Read_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */ deleteList = &(create_offscreen_bitmap->deleteList); if (deleteListPresent) { UINT32 i; if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, deleteList->cIndices); if (deleteList->cIndices > deleteList->sIndices) { UINT16* new_indices; new_indices = (UINT16*)realloc(deleteList->indices, deleteList->cIndices * 2); if (!new_indices) return FALSE; deleteList->sIndices = deleteList->cIndices; deleteList->indices = new_indices; } if (Stream_GetRemainingLength(s) < 2 * deleteList->cIndices) return FALSE; for (i = 0; i < deleteList->cIndices; i++) { Stream_Read_UINT16(s, deleteList->indices[i]); } } else { deleteList->cIndices = 0; } return TRUE; } int update_approximate_create_offscreen_bitmap_order( const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { const OFFSCREEN_DELETE_LIST* deleteList = &(create_offscreen_bitmap->deleteList); return 32 + deleteList->cIndices * 2; } BOOL update_write_create_offscreen_bitmap_order( wStream* s, const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { UINT16 flags; BOOL deleteListPresent; const OFFSCREEN_DELETE_LIST* deleteList; if (!Stream_EnsureRemainingCapacity( s, update_approximate_create_offscreen_bitmap_order(create_offscreen_bitmap))) return FALSE; deleteList = &(create_offscreen_bitmap->deleteList); flags = create_offscreen_bitmap->id & 0x7FFF; deleteListPresent = (deleteList->cIndices > 0) ? TRUE : FALSE; if (deleteListPresent) flags |= 0x8000; Stream_Write_UINT16(s, flags); /* flags (2 bytes) */ Stream_Write_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */ Stream_Write_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */ if (deleteListPresent) { int i; Stream_Write_UINT16(s, deleteList->cIndices); for (i = 0; i < (int)deleteList->cIndices; i++) { Stream_Write_UINT16(s, deleteList->indices[i]); } } return TRUE; } static BOOL update_read_switch_surface_order(wStream* s, SWITCH_SURFACE_ORDER* switch_surface) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, switch_surface->bitmapId); /* bitmapId (2 bytes) */ return TRUE; } int update_approximate_switch_surface_order(const SWITCH_SURFACE_ORDER* switch_surface) { return 2; } BOOL update_write_switch_surface_order(wStream* s, const SWITCH_SURFACE_ORDER* switch_surface) { int inf = update_approximate_switch_surface_order(switch_surface); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT16(s, switch_surface->bitmapId); /* bitmapId (2 bytes) */ return TRUE; } static BOOL update_read_create_nine_grid_bitmap_order(wStream* s, CREATE_NINE_GRID_BITMAP_ORDER* create_nine_grid_bitmap) { NINE_GRID_BITMAP_INFO* nineGridInfo; if (Stream_GetRemainingLength(s) < 19) return FALSE; Stream_Read_UINT8(s, create_nine_grid_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ if ((create_nine_grid_bitmap->bitmapBpp < 1) || (create_nine_grid_bitmap->bitmapBpp > 32)) { WLog_ERR(TAG, "invalid bpp value %" PRIu32 "", create_nine_grid_bitmap->bitmapBpp); return FALSE; } Stream_Read_UINT16(s, create_nine_grid_bitmap->bitmapId); /* bitmapId (2 bytes) */ nineGridInfo = &(create_nine_grid_bitmap->nineGridInfo); Stream_Read_UINT32(s, nineGridInfo->flFlags); /* flFlags (4 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulLeftWidth); /* ulLeftWidth (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulRightWidth); /* ulRightWidth (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulTopHeight); /* ulTopHeight (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulBottomHeight); /* ulBottomHeight (2 bytes) */ update_read_colorref(s, &nineGridInfo->crTransparent); /* crTransparent (4 bytes) */ return TRUE; } static BOOL update_read_frame_marker_order(wStream* s, FRAME_MARKER_ORDER* frame_marker) { if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT32(s, frame_marker->action); /* action (4 bytes) */ return TRUE; } static BOOL update_read_stream_bitmap_first_order(wStream* s, STREAM_BITMAP_FIRST_ORDER* stream_bitmap_first) { if (Stream_GetRemainingLength(s) < 10) // 8 + 2 at least return FALSE; Stream_Read_UINT8(s, stream_bitmap_first->bitmapFlags); /* bitmapFlags (1 byte) */ Stream_Read_UINT8(s, stream_bitmap_first->bitmapBpp); /* bitmapBpp (1 byte) */ if ((stream_bitmap_first->bitmapBpp < 1) || (stream_bitmap_first->bitmapBpp > 32)) { WLog_ERR(TAG, "invalid bpp value %" PRIu32 "", stream_bitmap_first->bitmapBpp); return FALSE; } Stream_Read_UINT16(s, stream_bitmap_first->bitmapType); /* bitmapType (2 bytes) */ Stream_Read_UINT16(s, stream_bitmap_first->bitmapWidth); /* bitmapWidth (2 bytes) */ Stream_Read_UINT16(s, stream_bitmap_first->bitmapHeight); /* bitmapHeigth (2 bytes) */ if (stream_bitmap_first->bitmapFlags & STREAM_BITMAP_V2) { if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT32(s, stream_bitmap_first->bitmapSize); /* bitmapSize (4 bytes) */ } else { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, stream_bitmap_first->bitmapSize); /* bitmapSize (2 bytes) */ } FIELD_SKIP_BUFFER16( s, stream_bitmap_first->bitmapBlockSize); /* bitmapBlockSize(2 bytes) + bitmapBlock */ return TRUE; } static BOOL update_read_stream_bitmap_next_order(wStream* s, STREAM_BITMAP_NEXT_ORDER* stream_bitmap_next) { if (Stream_GetRemainingLength(s) < 5) return FALSE; Stream_Read_UINT8(s, stream_bitmap_next->bitmapFlags); /* bitmapFlags (1 byte) */ Stream_Read_UINT16(s, stream_bitmap_next->bitmapType); /* bitmapType (2 bytes) */ FIELD_SKIP_BUFFER16( s, stream_bitmap_next->bitmapBlockSize); /* bitmapBlockSize(2 bytes) + bitmapBlock */ return TRUE; } static BOOL update_read_draw_gdiplus_first_order(wStream* s, DRAW_GDIPLUS_FIRST_ORDER* draw_gdiplus_first) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_first->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_first->cbTotalSize); /* cbTotalSize (4 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_first->cbTotalEmfSize); /* cbTotalEmfSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_first->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_next_order(wStream* s, DRAW_GDIPLUS_NEXT_ORDER* draw_gdiplus_next) { if (Stream_GetRemainingLength(s) < 3) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ FIELD_SKIP_BUFFER16(s, draw_gdiplus_next->cbSize); /* cbSize(2 bytes) + emfRecords */ return TRUE; } static BOOL update_read_draw_gdiplus_end_order(wStream* s, DRAW_GDIPLUS_END_ORDER* draw_gdiplus_end) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_end->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_end->cbTotalSize); /* cbTotalSize (4 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_end->cbTotalEmfSize); /* cbTotalEmfSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_end->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_cache_first_order(wStream* s, DRAW_GDIPLUS_CACHE_FIRST_ORDER* draw_gdiplus_cache_first) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_first->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_cache_first->cbTotalSize); /* cbTotalSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_cache_first->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_cache_next_order(wStream* s, DRAW_GDIPLUS_CACHE_NEXT_ORDER* draw_gdiplus_cache_next) { if (Stream_GetRemainingLength(s) < 7) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_next->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_next->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_next->cacheIndex); /* cacheIndex (2 bytes) */ FIELD_SKIP_BUFFER16(s, draw_gdiplus_cache_next->cbSize); /* cbSize(2 bytes) + emfRecords */ return TRUE; } static BOOL update_read_draw_gdiplus_cache_end_order(wStream* s, DRAW_GDIPLUS_CACHE_END_ORDER* draw_gdiplus_cache_end) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_end->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_cache_end->cbTotalSize); /* cbTotalSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_cache_end->cbSize); /* emfRecords */ } static BOOL update_read_field_flags(wStream* s, UINT32* fieldFlags, BYTE flags, BYTE fieldBytes) { int i; BYTE byte; if (flags & ORDER_ZERO_FIELD_BYTE_BIT0) fieldBytes--; if (flags & ORDER_ZERO_FIELD_BYTE_BIT1) { if (fieldBytes > 1) fieldBytes -= 2; else fieldBytes = 0; } if (Stream_GetRemainingLength(s) < fieldBytes) return FALSE; *fieldFlags = 0; for (i = 0; i < fieldBytes; i++) { Stream_Read_UINT8(s, byte); *fieldFlags |= byte << (i * 8); } return TRUE; } BOOL update_write_field_flags(wStream* s, UINT32 fieldFlags, BYTE flags, BYTE fieldBytes) { BYTE byte; if (fieldBytes == 1) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); } else if (fieldBytes == 2) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 8) & 0xFF; Stream_Write_UINT8(s, byte); } else if (fieldBytes == 3) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 16) & 0xFF; Stream_Write_UINT8(s, byte); } else { return FALSE; } return TRUE; } static BOOL update_read_bounds(wStream* s, rdpBounds* bounds) { BYTE flags; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, flags); /* field flags */ if (flags & BOUND_LEFT) { if (!update_read_coord(s, &bounds->left, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_LEFT) { if (!update_read_coord(s, &bounds->left, TRUE)) return FALSE; } if (flags & BOUND_TOP) { if (!update_read_coord(s, &bounds->top, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_TOP) { if (!update_read_coord(s, &bounds->top, TRUE)) return FALSE; } if (flags & BOUND_RIGHT) { if (!update_read_coord(s, &bounds->right, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_RIGHT) { if (!update_read_coord(s, &bounds->right, TRUE)) return FALSE; } if (flags & BOUND_BOTTOM) { if (!update_read_coord(s, &bounds->bottom, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_BOTTOM) { if (!update_read_coord(s, &bounds->bottom, TRUE)) return FALSE; } return TRUE; } BOOL update_write_bounds(wStream* s, ORDER_INFO* orderInfo) { if (!(orderInfo->controlFlags & ORDER_BOUNDS)) return TRUE; if (orderInfo->controlFlags & ORDER_ZERO_BOUNDS_DELTAS) return TRUE; Stream_Write_UINT8(s, orderInfo->boundsFlags); /* field flags */ if (orderInfo->boundsFlags & BOUND_LEFT) { if (!update_write_coord(s, orderInfo->bounds.left)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_LEFT) { } if (orderInfo->boundsFlags & BOUND_TOP) { if (!update_write_coord(s, orderInfo->bounds.top)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_TOP) { } if (orderInfo->boundsFlags & BOUND_RIGHT) { if (!update_write_coord(s, orderInfo->bounds.right)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_RIGHT) { } if (orderInfo->boundsFlags & BOUND_BOTTOM) { if (!update_write_coord(s, orderInfo->bounds.bottom)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_BOTTOM) { } return TRUE; } static BOOL read_primary_order(wLog* log, const char* orderName, wStream* s, const ORDER_INFO* orderInfo, rdpPrimaryUpdate* primary) { BOOL rc = FALSE; if (!s || !orderInfo || !primary || !orderName) return FALSE; switch (orderInfo->orderType) { case ORDER_TYPE_DSTBLT: rc = update_read_dstblt_order(s, orderInfo, &(primary->dstblt)); break; case ORDER_TYPE_PATBLT: rc = update_read_patblt_order(s, orderInfo, &(primary->patblt)); break; case ORDER_TYPE_SCRBLT: rc = update_read_scrblt_order(s, orderInfo, &(primary->scrblt)); break; case ORDER_TYPE_OPAQUE_RECT: rc = update_read_opaque_rect_order(s, orderInfo, &(primary->opaque_rect)); break; case ORDER_TYPE_DRAW_NINE_GRID: rc = update_read_draw_nine_grid_order(s, orderInfo, &(primary->draw_nine_grid)); break; case ORDER_TYPE_MULTI_DSTBLT: rc = update_read_multi_dstblt_order(s, orderInfo, &(primary->multi_dstblt)); break; case ORDER_TYPE_MULTI_PATBLT: rc = update_read_multi_patblt_order(s, orderInfo, &(primary->multi_patblt)); break; case ORDER_TYPE_MULTI_SCRBLT: rc = update_read_multi_scrblt_order(s, orderInfo, &(primary->multi_scrblt)); break; case ORDER_TYPE_MULTI_OPAQUE_RECT: rc = update_read_multi_opaque_rect_order(s, orderInfo, &(primary->multi_opaque_rect)); break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: rc = update_read_multi_draw_nine_grid_order(s, orderInfo, &(primary->multi_draw_nine_grid)); break; case ORDER_TYPE_LINE_TO: rc = update_read_line_to_order(s, orderInfo, &(primary->line_to)); break; case ORDER_TYPE_POLYLINE: rc = update_read_polyline_order(s, orderInfo, &(primary->polyline)); break; case ORDER_TYPE_MEMBLT: rc = update_read_memblt_order(s, orderInfo, &(primary->memblt)); break; case ORDER_TYPE_MEM3BLT: rc = update_read_mem3blt_order(s, orderInfo, &(primary->mem3blt)); break; case ORDER_TYPE_SAVE_BITMAP: rc = update_read_save_bitmap_order(s, orderInfo, &(primary->save_bitmap)); break; case ORDER_TYPE_GLYPH_INDEX: rc = update_read_glyph_index_order(s, orderInfo, &(primary->glyph_index)); break; case ORDER_TYPE_FAST_INDEX: rc = update_read_fast_index_order(s, orderInfo, &(primary->fast_index)); break; case ORDER_TYPE_FAST_GLYPH: rc = update_read_fast_glyph_order(s, orderInfo, &(primary->fast_glyph)); break; case ORDER_TYPE_POLYGON_SC: rc = update_read_polygon_sc_order(s, orderInfo, &(primary->polygon_sc)); break; case ORDER_TYPE_POLYGON_CB: rc = update_read_polygon_cb_order(s, orderInfo, &(primary->polygon_cb)); break; case ORDER_TYPE_ELLIPSE_SC: rc = update_read_ellipse_sc_order(s, orderInfo, &(primary->ellipse_sc)); break; case ORDER_TYPE_ELLIPSE_CB: rc = update_read_ellipse_cb_order(s, orderInfo, &(primary->ellipse_cb)); break; default: WLog_Print(log, WLOG_WARN, "Primary Drawing Order %s not supported, ignoring", orderName); rc = TRUE; break; } if (!rc) { WLog_Print(log, WLOG_ERROR, "%s - update_read_dstblt_order() failed", orderName); return FALSE; } return TRUE; } static BOOL update_recv_primary_order(rdpUpdate* update, wStream* s, BYTE flags) { BYTE field; BOOL rc = FALSE; rdpContext* context = update->context; rdpPrimaryUpdate* primary = update->primary; ORDER_INFO* orderInfo = &(primary->order_info); rdpSettings* settings = context->settings; const char* orderName; if (flags & ORDER_TYPE_CHANGE) { if (Stream_GetRemainingLength(s) < 1) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, orderInfo->orderType); /* orderType (1 byte) */ } orderName = primary_order_string(orderInfo->orderType); if (!check_primary_order_supported(update->log, settings, orderInfo->orderType, orderName)) return FALSE; field = get_primary_drawing_order_field_bytes(orderInfo->orderType, &rc); if (!rc) return FALSE; if (!update_read_field_flags(s, &(orderInfo->fieldFlags), flags, field)) { WLog_Print(update->log, WLOG_ERROR, "update_read_field_flags() failed"); return FALSE; } if (flags & ORDER_BOUNDS) { if (!(flags & ORDER_ZERO_BOUNDS_DELTAS)) { if (!update_read_bounds(s, &orderInfo->bounds)) { WLog_Print(update->log, WLOG_ERROR, "update_read_bounds() failed"); return FALSE; } } rc = IFCALLRESULT(FALSE, update->SetBounds, context, &orderInfo->bounds); if (!rc) return FALSE; } orderInfo->deltaCoordinates = (flags & ORDER_DELTA_COORDINATES) ? TRUE : FALSE; if (!read_primary_order(update->log, orderName, s, orderInfo, primary)) return FALSE; switch (orderInfo->orderType) { case ORDER_TYPE_DSTBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->dstblt.bRop), gdi_rop3_code(primary->dstblt.bRop)); rc = IFCALLRESULT(FALSE, primary->DstBlt, context, &primary->dstblt); } break; case ORDER_TYPE_PATBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->patblt.bRop), gdi_rop3_code(primary->patblt.bRop)); rc = IFCALLRESULT(FALSE, primary->PatBlt, context, &primary->patblt); } break; case ORDER_TYPE_SCRBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->scrblt.bRop), gdi_rop3_code(primary->scrblt.bRop)); rc = IFCALLRESULT(FALSE, primary->ScrBlt, context, &primary->scrblt); } break; case ORDER_TYPE_OPAQUE_RECT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->OpaqueRect, context, &primary->opaque_rect); } break; case ORDER_TYPE_DRAW_NINE_GRID: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->DrawNineGrid, context, &primary->draw_nine_grid); } break; case ORDER_TYPE_MULTI_DSTBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_dstblt.bRop), gdi_rop3_code(primary->multi_dstblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiDstBlt, context, &primary->multi_dstblt); } break; case ORDER_TYPE_MULTI_PATBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_patblt.bRop), gdi_rop3_code(primary->multi_patblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiPatBlt, context, &primary->multi_patblt); } break; case ORDER_TYPE_MULTI_SCRBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_scrblt.bRop), gdi_rop3_code(primary->multi_scrblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiScrBlt, context, &primary->multi_scrblt); } break; case ORDER_TYPE_MULTI_OPAQUE_RECT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->MultiOpaqueRect, context, &primary->multi_opaque_rect); } break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->MultiDrawNineGrid, context, &primary->multi_draw_nine_grid); } break; case ORDER_TYPE_LINE_TO: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->LineTo, context, &primary->line_to); } break; case ORDER_TYPE_POLYLINE: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->Polyline, context, &primary->polyline); } break; case ORDER_TYPE_MEMBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->memblt.bRop), gdi_rop3_code(primary->memblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MemBlt, context, &primary->memblt); } break; case ORDER_TYPE_MEM3BLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->mem3blt.bRop), gdi_rop3_code(primary->mem3blt.bRop)); rc = IFCALLRESULT(FALSE, primary->Mem3Blt, context, &primary->mem3blt); } break; case ORDER_TYPE_SAVE_BITMAP: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->SaveBitmap, context, &primary->save_bitmap); } break; case ORDER_TYPE_GLYPH_INDEX: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->GlyphIndex, context, &primary->glyph_index); } break; case ORDER_TYPE_FAST_INDEX: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->FastIndex, context, &primary->fast_index); } break; case ORDER_TYPE_FAST_GLYPH: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->FastGlyph, context, &primary->fast_glyph); } break; case ORDER_TYPE_POLYGON_SC: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->PolygonSC, context, &primary->polygon_sc); } break; case ORDER_TYPE_POLYGON_CB: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->PolygonCB, context, &primary->polygon_cb); } break; case ORDER_TYPE_ELLIPSE_SC: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->EllipseSC, context, &primary->ellipse_sc); } break; case ORDER_TYPE_ELLIPSE_CB: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->EllipseCB, context, &primary->ellipse_cb); } break; default: WLog_Print(update->log, WLOG_WARN, "Primary Drawing Order %s not supported", orderName); break; } if (!rc) { WLog_Print(update->log, WLOG_WARN, "Primary Drawing Order %s failed", orderName); return FALSE; } if (flags & ORDER_BOUNDS) { rc = IFCALLRESULT(FALSE, update->SetBounds, context, NULL); } return rc; } static BOOL update_recv_secondary_order(rdpUpdate* update, wStream* s, BYTE flags) { BOOL rc = FALSE; size_t start, end, diff; BYTE orderType; UINT16 extraFlags; UINT16 orderLength; rdpContext* context = update->context; rdpSettings* settings = context->settings; rdpSecondaryUpdate* secondary = update->secondary; const char* name; if (Stream_GetRemainingLength(s) < 5) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 5"); return FALSE; } Stream_Read_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Read_UINT16(s, extraFlags); /* extraFlags (2 bytes) */ Stream_Read_UINT8(s, orderType); /* orderType (1 byte) */ if (Stream_GetRemainingLength(s) < orderLength + 7U) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) %" PRIuz " < %" PRIu16, Stream_GetRemainingLength(s), orderLength + 7); return FALSE; } start = Stream_GetPosition(s); name = secondary_order_string(orderType); WLog_Print(update->log, WLOG_DEBUG, "Secondary Drawing Order %s", name); if (!check_secondary_order_supported(update->log, settings, orderType, name)) return FALSE; switch (orderType) { case ORDER_TYPE_BITMAP_UNCOMPRESSED: case ORDER_TYPE_CACHE_BITMAP_COMPRESSED: { const BOOL compressed = (orderType == ORDER_TYPE_CACHE_BITMAP_COMPRESSED); CACHE_BITMAP_ORDER* order = update_read_cache_bitmap_order(update, s, compressed, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmap, context, order); free_cache_bitmap_order(context, order); } } break; case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2: case ORDER_TYPE_BITMAP_COMPRESSED_V2: { const BOOL compressed = (orderType == ORDER_TYPE_BITMAP_COMPRESSED_V2); CACHE_BITMAP_V2_ORDER* order = update_read_cache_bitmap_v2_order(update, s, compressed, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV2, context, order); free_cache_bitmap_v2_order(context, order); } } break; case ORDER_TYPE_BITMAP_COMPRESSED_V3: { CACHE_BITMAP_V3_ORDER* order = update_read_cache_bitmap_v3_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV3, context, order); free_cache_bitmap_v3_order(context, order); } } break; case ORDER_TYPE_CACHE_COLOR_TABLE: { CACHE_COLOR_TABLE_ORDER* order = update_read_cache_color_table_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheColorTable, context, order); free_cache_color_table_order(context, order); } } break; case ORDER_TYPE_CACHE_GLYPH: { switch (settings->GlyphSupportLevel) { case GLYPH_SUPPORT_PARTIAL: case GLYPH_SUPPORT_FULL: { CACHE_GLYPH_ORDER* order = update_read_cache_glyph_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheGlyph, context, order); free_cache_glyph_order(context, order); } } break; case GLYPH_SUPPORT_ENCODE: { CACHE_GLYPH_V2_ORDER* order = update_read_cache_glyph_v2_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheGlyphV2, context, order); free_cache_glyph_v2_order(context, order); } } break; case GLYPH_SUPPORT_NONE: default: break; } } break; case ORDER_TYPE_CACHE_BRUSH: /* [MS-RDPEGDI] 2.2.2.2.1.2.7 Cache Brush (CACHE_BRUSH_ORDER) */ { CACHE_BRUSH_ORDER* order = update_read_cache_brush_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBrush, context, order); free_cache_brush_order(context, order); } } break; default: WLog_Print(update->log, WLOG_WARN, "SECONDARY ORDER %s not supported", name); break; } if (!rc) { WLog_Print(update->log, WLOG_ERROR, "SECONDARY ORDER %s failed", name); } start += orderLength + 7; end = Stream_GetPosition(s); if (start > end) { WLog_Print(update->log, WLOG_WARN, "SECONDARY_ORDER %s: read %" PRIuz "bytes too much", name, end - start); return FALSE; } diff = start - end; if (diff > 0) { WLog_Print(update->log, WLOG_DEBUG, "SECONDARY_ORDER %s: read %" PRIuz "bytes short, skipping", name, diff); Stream_Seek(s, diff); } return rc; } static BOOL read_altsec_order(wStream* s, BYTE orderType, rdpAltSecUpdate* altsec) { BOOL rc = FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: rc = update_read_create_offscreen_bitmap_order(s, &(altsec->create_offscreen_bitmap)); break; case ORDER_TYPE_SWITCH_SURFACE: rc = update_read_switch_surface_order(s, &(altsec->switch_surface)); break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: rc = update_read_create_nine_grid_bitmap_order(s, &(altsec->create_nine_grid_bitmap)); break; case ORDER_TYPE_FRAME_MARKER: rc = update_read_frame_marker_order(s, &(altsec->frame_marker)); break; case ORDER_TYPE_STREAM_BITMAP_FIRST: rc = update_read_stream_bitmap_first_order(s, &(altsec->stream_bitmap_first)); break; case ORDER_TYPE_STREAM_BITMAP_NEXT: rc = update_read_stream_bitmap_next_order(s, &(altsec->stream_bitmap_next)); break; case ORDER_TYPE_GDIPLUS_FIRST: rc = update_read_draw_gdiplus_first_order(s, &(altsec->draw_gdiplus_first)); break; case ORDER_TYPE_GDIPLUS_NEXT: rc = update_read_draw_gdiplus_next_order(s, &(altsec->draw_gdiplus_next)); break; case ORDER_TYPE_GDIPLUS_END: rc = update_read_draw_gdiplus_end_order(s, &(altsec->draw_gdiplus_end)); break; case ORDER_TYPE_GDIPLUS_CACHE_FIRST: rc = update_read_draw_gdiplus_cache_first_order(s, &(altsec->draw_gdiplus_cache_first)); break; case ORDER_TYPE_GDIPLUS_CACHE_NEXT: rc = update_read_draw_gdiplus_cache_next_order(s, &(altsec->draw_gdiplus_cache_next)); break; case ORDER_TYPE_GDIPLUS_CACHE_END: rc = update_read_draw_gdiplus_cache_end_order(s, &(altsec->draw_gdiplus_cache_end)); break; case ORDER_TYPE_WINDOW: /* This order is handled elsewhere. */ rc = TRUE; break; case ORDER_TYPE_COMPDESK_FIRST: rc = TRUE; break; default: break; } return rc; } static BOOL update_recv_altsec_order(rdpUpdate* update, wStream* s, BYTE flags) { BYTE orderType = flags >>= 2; /* orderType is in higher 6 bits of flags field */ BOOL rc = FALSE; rdpContext* context = update->context; rdpSettings* settings = context->settings; rdpAltSecUpdate* altsec = update->altsec; const char* orderName = altsec_order_string(orderType); WLog_Print(update->log, WLOG_DEBUG, "Alternate Secondary Drawing Order %s", orderName); if (!check_alt_order_supported(update->log, settings, orderType, orderName)) return FALSE; if (!read_altsec_order(s, orderType, altsec)) return FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: IFCALLRET(altsec->CreateOffscreenBitmap, rc, context, &(altsec->create_offscreen_bitmap)); break; case ORDER_TYPE_SWITCH_SURFACE: IFCALLRET(altsec->SwitchSurface, rc, context, &(altsec->switch_surface)); break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: IFCALLRET(altsec->CreateNineGridBitmap, rc, context, &(altsec->create_nine_grid_bitmap)); break; case ORDER_TYPE_FRAME_MARKER: IFCALLRET(altsec->FrameMarker, rc, context, &(altsec->frame_marker)); break; case ORDER_TYPE_STREAM_BITMAP_FIRST: IFCALLRET(altsec->StreamBitmapFirst, rc, context, &(altsec->stream_bitmap_first)); break; case ORDER_TYPE_STREAM_BITMAP_NEXT: IFCALLRET(altsec->StreamBitmapNext, rc, context, &(altsec->stream_bitmap_next)); break; case ORDER_TYPE_GDIPLUS_FIRST: IFCALLRET(altsec->DrawGdiPlusFirst, rc, context, &(altsec->draw_gdiplus_first)); break; case ORDER_TYPE_GDIPLUS_NEXT: IFCALLRET(altsec->DrawGdiPlusNext, rc, context, &(altsec->draw_gdiplus_next)); break; case ORDER_TYPE_GDIPLUS_END: IFCALLRET(altsec->DrawGdiPlusEnd, rc, context, &(altsec->draw_gdiplus_end)); break; case ORDER_TYPE_GDIPLUS_CACHE_FIRST: IFCALLRET(altsec->DrawGdiPlusCacheFirst, rc, context, &(altsec->draw_gdiplus_cache_first)); break; case ORDER_TYPE_GDIPLUS_CACHE_NEXT: IFCALLRET(altsec->DrawGdiPlusCacheNext, rc, context, &(altsec->draw_gdiplus_cache_next)); break; case ORDER_TYPE_GDIPLUS_CACHE_END: IFCALLRET(altsec->DrawGdiPlusCacheEnd, rc, context, &(altsec->draw_gdiplus_cache_end)); break; case ORDER_TYPE_WINDOW: rc = update_recv_altsec_window_order(update, s); break; case ORDER_TYPE_COMPDESK_FIRST: rc = TRUE; break; default: break; } if (!rc) { WLog_Print(update->log, WLOG_WARN, "Alternate Secondary Drawing Order %s failed", orderName); } return rc; } BOOL update_recv_order(rdpUpdate* update, wStream* s) { BOOL rc; BYTE controlFlags; if (Stream_GetRemainingLength(s) < 1) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, controlFlags); /* controlFlags (1 byte) */ if (!(controlFlags & ORDER_STANDARD)) rc = update_recv_altsec_order(update, s, controlFlags); else if (controlFlags & ORDER_SECONDARY) rc = update_recv_secondary_order(update, s, controlFlags); else rc = update_recv_primary_order(update, s, controlFlags); if (!rc) WLog_Print(update->log, WLOG_ERROR, "order flags %02" PRIx8 " failed", controlFlags); return rc; }
BOOL update_write_cache_bitmap_v3_order(wStream* s, CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v3_order(cache_bitmap_v3, flags))) return FALSE; bitmapData = &cache_bitmap_v3->bitmapData; bitsPerPixelId = BPP_CBR23[cache_bitmap_v3->bpp]; *flags = (cache_bitmap_v3->cacheId & 0x00000003) | ((cache_bitmap_v3->flags << 7) & 0x0000FF80) | ((bitsPerPixelId << 3) & 0x00000078); Stream_Write_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ Stream_Write_UINT8(s, bitmapData->bpp); Stream_Write_UINT8(s, 0); /* reserved1 (1 byte) */ Stream_Write_UINT8(s, 0); /* reserved2 (1 byte) */ Stream_Write_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Write_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Write_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Write_UINT32(s, bitmapData->length); /* length (4 bytes) */ Stream_Write(s, bitmapData->data, bitmapData->length); return TRUE; }
BOOL update_write_cache_bitmap_v3_order(wStream* s, CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BOOL rc; BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v3_order(cache_bitmap_v3, flags))) return FALSE; bitmapData = &cache_bitmap_v3->bitmapData; bitsPerPixelId = get_bpp_bmf(cache_bitmap_v3->bpp, &rc); if (!rc) return FALSE; *flags = (cache_bitmap_v3->cacheId & 0x00000003) | ((cache_bitmap_v3->flags << 7) & 0x0000FF80) | ((bitsPerPixelId << 3) & 0x00000078); Stream_Write_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ Stream_Write_UINT8(s, bitmapData->bpp); Stream_Write_UINT8(s, 0); /* reserved1 (1 byte) */ Stream_Write_UINT8(s, 0); /* reserved2 (1 byte) */ Stream_Write_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Write_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Write_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Write_UINT32(s, bitmapData->length); /* length (4 bytes) */ Stream_Write(s, bitmapData->data, bitmapData->length); return TRUE; }
{'added': [(116, 'static BYTE get_cbr2_bpp(UINT32 bpp, BOOL* pValid)'), (117, '{'), (118, '\tif (pValid)'), (119, '\t\t*pValid = TRUE;'), (120, '\tswitch (bpp)'), (121, '\t{'), (122, '\t\tcase 3:'), (123, '\t\t\treturn 8;'), (124, '\t\tcase 4:'), (125, '\t\t\treturn 16;'), (126, '\t\tcase 5:'), (127, '\t\t\treturn 24;'), (128, '\t\tcase 6:'), (129, '\t\t\treturn 32;'), (130, '\t\tdefault:'), (131, '\t\t\tWLog_WARN(TAG, "Invalid bpp %" PRIu32, bpp);'), (132, '\t\t\tif (pValid)'), (133, '\t\t\t\t*pValid = FALSE;'), (134, '\t\t\treturn 0;'), (135, '\t}'), (136, '}'), (138, 'static BYTE get_bmf_bpp(UINT32 bmf, BOOL* pValid)'), (139, '{'), (140, '\tif (pValid)'), (141, '\t\t*pValid = TRUE;'), (142, '\tswitch (bmf)'), (143, '\t{'), (144, '\t\tcase 1:'), (145, '\t\t\treturn 1;'), (146, '\t\tcase 3:'), (147, '\t\t\treturn 8;'), (148, '\t\tcase 4:'), (149, '\t\t\treturn 16;'), (150, '\t\tcase 5:'), (151, '\t\t\treturn 24;'), (152, '\t\tcase 6:'), (153, '\t\t\treturn 32;'), (154, '\t\tdefault:'), (155, '\t\t\tWLog_WARN(TAG, "Invalid bmf %" PRIu32, bmf);'), (156, '\t\t\tif (pValid)'), (157, '\t\t\t\t*pValid = FALSE;'), (158, '\t\t\treturn 0;'), (159, '\t}'), (160, '}'), (161, 'static BYTE get_bpp_bmf(UINT32 bpp, BOOL* pValid)'), (162, '{'), (163, '\tif (pValid)'), (164, '\t\t*pValid = TRUE;'), (165, '\tswitch (bpp)'), (166, '\t{'), (167, '\t\tcase 1:'), (168, '\t\t\treturn 1;'), (169, '\t\tcase 8:'), (170, '\t\t\treturn 3;'), (171, '\t\tcase 16:'), (172, '\t\t\treturn 4;'), (173, '\t\tcase 24:'), (174, '\t\t\treturn 5;'), (175, '\t\tcase 32:'), (176, '\t\t\treturn 6;'), (177, '\t\tdefault:'), (178, '\t\t\tWLog_WARN(TAG, "Invalid color depth %" PRIu32, bpp);'), (179, '\t\t\tif (pValid)'), (180, '\t\t\t\t*pValid = FALSE;'), (181, '\t\t\treturn 0;'), (182, '\t}'), (183, '}'), (871, '\t\tBOOL rc;'), (873, '\t\tbrush->bpp = get_bmf_bpp(brush->style, &rc);'), (874, '\t\tif (!rc)'), (875, '\t\t\treturn FALSE;'), (917, '\t\tBOOL rc;'), (919, '\t\tbrush->bpp = get_bmf_bpp(brush->style, &rc);'), (920, '\t\tif (!rc)'), (921, '\t\t\treturn FALSE;'), (2077, '\tBOOL rc;'), (2092, '\tcache_bitmap_v2->bitmapBpp = get_cbr2_bpp(bitsPerPixelId, &rc);'), (2093, '\tif (!rc)'), (2094, '\t\tgoto fail;'), (2173, '\tBOOL rc;'), (2180, '\tbitsPerPixelId = get_bpp_bmf(cache_bitmap_v2->bitmapBpp, &rc);'), (2181, '\tif (!rc)'), (2182, '\t\treturn FALSE;'), (2244, '\tBOOL rc;'), (2262, '\tcache_bitmap_v3->bpp = get_cbr2_bpp(bitsPerPixelId, &rc);'), (2263, '\tif (!rc)'), (2264, '\t\tgoto fail;'), (2312, '\tBOOL rc;'), (2321, '\tbitsPerPixelId = get_bpp_bmf(cache_bitmap_v3->bpp, &rc);'), (2322, '\tif (!rc)'), (2323, '\t\treturn FALSE;'), (2647, '\tBOOL rc;'), (2661, '\tcache_brush->bpp = get_bmf_bpp(iBitmapFormat, &rc);'), (2662, '\tif (!rc)'), (2735, '\tBOOL rc;'), (2742, '\tiBitmapFormat = get_bpp_bmf(cache_brush->bpp, &rc);'), (2743, '\tif (!rc)'), (2744, '\t\treturn FALSE;')], 'deleted': [(116, 'static const BYTE CBR2_BPP[] = { 0, 0, 0, 8, 16, 24, 32 };'), (117, ''), (118, 'static const BYTE BPP_CBR2[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0,'), (119, '\t 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 };'), (120, ''), (121, 'static const BYTE CBR23_BPP[] = { 0, 0, 0, 8, 16, 24, 32 };'), (122, ''), (123, 'static const BYTE BPP_CBR23[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0,'), (124, '\t 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 };'), (125, ''), (126, 'static const BYTE BMF_BPP[] = { 0, 1, 0, 8, 16, 24, 32, 0 };'), (128, 'static const BYTE BPP_BMF[] = { 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0,'), (129, '\t 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 };'), (818, '\t\tbrush->bpp = BMF_BPP[brush->style & 0x07];'), (819, ''), (862, '\t\tbrush->bpp = BMF_BPP[brush->style & 0x07];'), (863, ''), (2033, '\tcache_bitmap_v2->bitmapBpp = CBR2_BPP[bitsPerPixelId];'), (2118, '\tbitsPerPixelId = BPP_CBR2[cache_bitmap_v2->bitmapBpp];'), (2197, '\tcache_bitmap_v3->bpp = CBR23_BPP[bitsPerPixelId];'), (2253, '\tbitsPerPixelId = BPP_CBR23[cache_bitmap_v3->bpp];'), (2590, '\tif (iBitmapFormat >= ARRAYSIZE(BMF_BPP))'), (2593, '\tcache_brush->bpp = BMF_BPP[iBitmapFormat];'), (2670, '\tiBitmapFormat = BPP_BMF[cache_brush->bpp];')]}
98
24
3,271
19,873
https://github.com/FreeRDP/FreeRDP
CVE-2020-11096
['CWE-125']
orders.c
update_write_cache_brush_order
/** * FreeRDP: A Remote Desktop Protocol Implementation * Drawing Orders * * Copyright 2011 Marc-Andre Moreau <marcandre.moreau@gmail.com> * Copyright 2016 Armin Novak <armin.novak@thincast.com> * Copyright 2016 Thincast Technologies GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "window.h" #include <winpr/wtypes.h> #include <winpr/crt.h> #include <freerdp/api.h> #include <freerdp/log.h> #include <freerdp/graphics.h> #include <freerdp/codec/bitmap.h> #include <freerdp/gdi/gdi.h> #include "orders.h" #include "../cache/glyph.h" #include "../cache/bitmap.h" #include "../cache/brush.h" #include "../cache/cache.h" #define TAG FREERDP_TAG("core.orders") BYTE get_primary_drawing_order_field_bytes(UINT32 orderType, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (orderType) { case 0: return DSTBLT_ORDER_FIELD_BYTES; case 1: return PATBLT_ORDER_FIELD_BYTES; case 2: return SCRBLT_ORDER_FIELD_BYTES; case 3: return 0; case 4: return 0; case 5: return 0; case 6: return 0; case 7: return DRAW_NINE_GRID_ORDER_FIELD_BYTES; case 8: return MULTI_DRAW_NINE_GRID_ORDER_FIELD_BYTES; case 9: return LINE_TO_ORDER_FIELD_BYTES; case 10: return OPAQUE_RECT_ORDER_FIELD_BYTES; case 11: return SAVE_BITMAP_ORDER_FIELD_BYTES; case 12: return 0; case 13: return MEMBLT_ORDER_FIELD_BYTES; case 14: return MEM3BLT_ORDER_FIELD_BYTES; case 15: return MULTI_DSTBLT_ORDER_FIELD_BYTES; case 16: return MULTI_PATBLT_ORDER_FIELD_BYTES; case 17: return MULTI_SCRBLT_ORDER_FIELD_BYTES; case 18: return MULTI_OPAQUE_RECT_ORDER_FIELD_BYTES; case 19: return FAST_INDEX_ORDER_FIELD_BYTES; case 20: return POLYGON_SC_ORDER_FIELD_BYTES; case 21: return POLYGON_CB_ORDER_FIELD_BYTES; case 22: return POLYLINE_ORDER_FIELD_BYTES; case 23: return 0; case 24: return FAST_GLYPH_ORDER_FIELD_BYTES; case 25: return ELLIPSE_SC_ORDER_FIELD_BYTES; case 26: return ELLIPSE_CB_ORDER_FIELD_BYTES; case 27: return GLYPH_INDEX_ORDER_FIELD_BYTES; default: if (pValid) *pValid = FALSE; WLog_WARN(TAG, "Invalid orderType 0x%08X received", orderType); return 0; } } static const BYTE CBR2_BPP[] = { 0, 0, 0, 8, 16, 24, 32 }; static const BYTE BPP_CBR2[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 }; static const BYTE CBR23_BPP[] = { 0, 0, 0, 8, 16, 24, 32 }; static const BYTE BPP_CBR23[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 }; static const BYTE BMF_BPP[] = { 0, 1, 0, 8, 16, 24, 32, 0 }; static const BYTE BPP_BMF[] = { 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 }; static BOOL check_order_activated(wLog* log, rdpSettings* settings, const char* orderName, BOOL condition) { if (!condition) { if (settings->AllowUnanouncedOrdersFromServer) { WLog_Print(log, WLOG_WARN, "%s - SERVER BUG: The support for this feature was not announced!", orderName); return TRUE; } else { WLog_Print(log, WLOG_ERROR, "%s - SERVER BUG: The support for this feature was not announced! Use " "/relax-order-checks to ignore", orderName); return FALSE; } } return TRUE; } static BOOL check_alt_order_supported(wLog* log, rdpSettings* settings, BYTE orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: case ORDER_TYPE_SWITCH_SURFACE: condition = settings->OffscreenSupportLevel != 0; break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: condition = settings->DrawNineGridEnabled; break; case ORDER_TYPE_FRAME_MARKER: condition = settings->FrameMarkerCommandEnabled; break; case ORDER_TYPE_GDIPLUS_FIRST: case ORDER_TYPE_GDIPLUS_NEXT: case ORDER_TYPE_GDIPLUS_END: case ORDER_TYPE_GDIPLUS_CACHE_FIRST: case ORDER_TYPE_GDIPLUS_CACHE_NEXT: case ORDER_TYPE_GDIPLUS_CACHE_END: condition = settings->DrawGdiPlusCacheEnabled; break; case ORDER_TYPE_WINDOW: condition = settings->RemoteWndSupportLevel != WINDOW_LEVEL_NOT_SUPPORTED; break; case ORDER_TYPE_STREAM_BITMAP_FIRST: case ORDER_TYPE_STREAM_BITMAP_NEXT: case ORDER_TYPE_COMPDESK_FIRST: condition = TRUE; break; default: WLog_Print(log, WLOG_WARN, "%s - Alternate Secondary Drawing Order UNKNOWN", orderName); condition = FALSE; break; } return check_order_activated(log, settings, orderName, condition); } static BOOL check_secondary_order_supported(wLog* log, rdpSettings* settings, BYTE orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_BITMAP_UNCOMPRESSED: case ORDER_TYPE_CACHE_BITMAP_COMPRESSED: condition = settings->BitmapCacheEnabled; break; case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2: case ORDER_TYPE_BITMAP_COMPRESSED_V2: condition = settings->BitmapCacheEnabled; break; case ORDER_TYPE_BITMAP_COMPRESSED_V3: condition = settings->BitmapCacheV3Enabled; break; case ORDER_TYPE_CACHE_COLOR_TABLE: condition = (settings->OrderSupport[NEG_MEMBLT_INDEX] || settings->OrderSupport[NEG_MEM3BLT_INDEX]); break; case ORDER_TYPE_CACHE_GLYPH: { switch (settings->GlyphSupportLevel) { case GLYPH_SUPPORT_PARTIAL: case GLYPH_SUPPORT_FULL: case GLYPH_SUPPORT_ENCODE: condition = TRUE; break; case GLYPH_SUPPORT_NONE: default: condition = FALSE; break; } } break; case ORDER_TYPE_CACHE_BRUSH: condition = TRUE; break; default: WLog_Print(log, WLOG_WARN, "SECONDARY ORDER %s not supported", orderName); break; } return check_order_activated(log, settings, orderName, condition); } static BOOL check_primary_order_supported(wLog* log, rdpSettings* settings, UINT32 orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_DSTBLT: condition = settings->OrderSupport[NEG_DSTBLT_INDEX]; break; case ORDER_TYPE_SCRBLT: condition = settings->OrderSupport[NEG_SCRBLT_INDEX]; break; case ORDER_TYPE_DRAW_NINE_GRID: condition = settings->OrderSupport[NEG_DRAWNINEGRID_INDEX]; break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: condition = settings->OrderSupport[NEG_MULTI_DRAWNINEGRID_INDEX]; break; case ORDER_TYPE_LINE_TO: condition = settings->OrderSupport[NEG_LINETO_INDEX]; break; /* [MS-RDPEGDI] 2.2.2.2.1.1.2.5 OpaqueRect (OPAQUERECT_ORDER) * suggests that PatBlt and OpaqueRect imply each other. */ case ORDER_TYPE_PATBLT: case ORDER_TYPE_OPAQUE_RECT: condition = settings->OrderSupport[NEG_OPAQUE_RECT_INDEX] || settings->OrderSupport[NEG_PATBLT_INDEX]; break; case ORDER_TYPE_SAVE_BITMAP: condition = settings->OrderSupport[NEG_SAVEBITMAP_INDEX]; break; case ORDER_TYPE_MEMBLT: condition = settings->OrderSupport[NEG_MEMBLT_INDEX]; break; case ORDER_TYPE_MEM3BLT: condition = settings->OrderSupport[NEG_MEM3BLT_INDEX]; break; case ORDER_TYPE_MULTI_DSTBLT: condition = settings->OrderSupport[NEG_MULTIDSTBLT_INDEX]; break; case ORDER_TYPE_MULTI_PATBLT: condition = settings->OrderSupport[NEG_MULTIPATBLT_INDEX]; break; case ORDER_TYPE_MULTI_SCRBLT: condition = settings->OrderSupport[NEG_MULTIDSTBLT_INDEX]; break; case ORDER_TYPE_MULTI_OPAQUE_RECT: condition = settings->OrderSupport[NEG_MULTIOPAQUERECT_INDEX]; break; case ORDER_TYPE_FAST_INDEX: condition = settings->OrderSupport[NEG_FAST_INDEX_INDEX]; break; case ORDER_TYPE_POLYGON_SC: condition = settings->OrderSupport[NEG_POLYGON_SC_INDEX]; break; case ORDER_TYPE_POLYGON_CB: condition = settings->OrderSupport[NEG_POLYGON_CB_INDEX]; break; case ORDER_TYPE_POLYLINE: condition = settings->OrderSupport[NEG_POLYLINE_INDEX]; break; case ORDER_TYPE_FAST_GLYPH: condition = settings->OrderSupport[NEG_FAST_GLYPH_INDEX]; break; case ORDER_TYPE_ELLIPSE_SC: condition = settings->OrderSupport[NEG_ELLIPSE_SC_INDEX]; break; case ORDER_TYPE_ELLIPSE_CB: condition = settings->OrderSupport[NEG_ELLIPSE_CB_INDEX]; break; case ORDER_TYPE_GLYPH_INDEX: condition = settings->OrderSupport[NEG_GLYPH_INDEX_INDEX]; break; default: WLog_Print(log, WLOG_WARN, "%s Primary Drawing Order not supported", orderName); break; } return check_order_activated(log, settings, orderName, condition); } static const char* primary_order_string(UINT32 orderType) { const char* orders[] = { "[0x%02" PRIx8 "] DstBlt", "[0x%02" PRIx8 "] PatBlt", "[0x%02" PRIx8 "] ScrBlt", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] DrawNineGrid", "[0x%02" PRIx8 "] MultiDrawNineGrid", "[0x%02" PRIx8 "] LineTo", "[0x%02" PRIx8 "] OpaqueRect", "[0x%02" PRIx8 "] SaveBitmap", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] MemBlt", "[0x%02" PRIx8 "] Mem3Blt", "[0x%02" PRIx8 "] MultiDstBlt", "[0x%02" PRIx8 "] MultiPatBlt", "[0x%02" PRIx8 "] MultiScrBlt", "[0x%02" PRIx8 "] MultiOpaqueRect", "[0x%02" PRIx8 "] FastIndex", "[0x%02" PRIx8 "] PolygonSC", "[0x%02" PRIx8 "] PolygonCB", "[0x%02" PRIx8 "] Polyline", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] FastGlyph", "[0x%02" PRIx8 "] EllipseSC", "[0x%02" PRIx8 "] EllipseCB", "[0x%02" PRIx8 "] GlyphIndex" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static const char* secondary_order_string(UINT32 orderType) { const char* orders[] = { "[0x%02" PRIx8 "] Cache Bitmap", "[0x%02" PRIx8 "] Cache Color Table", "[0x%02" PRIx8 "] Cache Bitmap (Compressed)", "[0x%02" PRIx8 "] Cache Glyph", "[0x%02" PRIx8 "] Cache Bitmap V2", "[0x%02" PRIx8 "] Cache Bitmap V2 (Compressed)", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] Cache Brush", "[0x%02" PRIx8 "] Cache Bitmap V3" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static const char* altsec_order_string(BYTE orderType) { const char* orders[] = { "[0x%02" PRIx8 "] Switch Surface", "[0x%02" PRIx8 "] Create Offscreen Bitmap", "[0x%02" PRIx8 "] Stream Bitmap First", "[0x%02" PRIx8 "] Stream Bitmap Next", "[0x%02" PRIx8 "] Create NineGrid Bitmap", "[0x%02" PRIx8 "] Draw GDI+ First", "[0x%02" PRIx8 "] Draw GDI+ Next", "[0x%02" PRIx8 "] Draw GDI+ End", "[0x%02" PRIx8 "] Draw GDI+ Cache First", "[0x%02" PRIx8 "] Draw GDI+ Cache Next", "[0x%02" PRIx8 "] Draw GDI+ Cache End", "[0x%02" PRIx8 "] Windowing", "[0x%02" PRIx8 "] Desktop Composition", "[0x%02" PRIx8 "] Frame Marker" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static INLINE BOOL update_read_coord(wStream* s, INT32* coord, BOOL delta) { INT8 lsi8; INT16 lsi16; if (delta) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_INT8(s, lsi8); *coord += lsi8; } else { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_INT16(s, lsi16); *coord = lsi16; } return TRUE; } static INLINE BOOL update_write_coord(wStream* s, INT32 coord) { Stream_Write_UINT16(s, coord); return TRUE; } static INLINE BOOL update_read_color(wStream* s, UINT32* color) { BYTE byte; if (Stream_GetRemainingLength(s) < 3) return FALSE; *color = 0; Stream_Read_UINT8(s, byte); *color = (UINT32)byte; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 8) & 0xFF00; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 16) & 0xFF0000; return TRUE; } static INLINE BOOL update_write_color(wStream* s, UINT32 color) { BYTE byte; byte = (color & 0xFF); Stream_Write_UINT8(s, byte); byte = ((color >> 8) & 0xFF); Stream_Write_UINT8(s, byte); byte = ((color >> 16) & 0xFF); Stream_Write_UINT8(s, byte); return TRUE; } static INLINE BOOL update_read_colorref(wStream* s, UINT32* color) { BYTE byte; if (Stream_GetRemainingLength(s) < 4) return FALSE; *color = 0; Stream_Read_UINT8(s, byte); *color = byte; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 8); Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 16); Stream_Seek_UINT8(s); return TRUE; } static INLINE BOOL update_read_color_quad(wStream* s, UINT32* color) { return update_read_colorref(s, color); } static INLINE void update_write_color_quad(wStream* s, UINT32 color) { BYTE byte; byte = (color >> 16) & 0xFF; Stream_Write_UINT8(s, byte); byte = (color >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = color & 0xFF; Stream_Write_UINT8(s, byte); } static INLINE BOOL update_read_2byte_unsigned(wStream* s, UINT32* value) { BYTE byte; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) return FALSE; *value = (byte & 0x7F) << 8; Stream_Read_UINT8(s, byte); *value |= byte; } else { *value = (byte & 0x7F); } return TRUE; } static INLINE BOOL update_write_2byte_unsigned(wStream* s, UINT32 value) { BYTE byte; if (value > 0x7FFF) return FALSE; if (value >= 0x7F) { byte = ((value & 0x7F00) >> 8); Stream_Write_UINT8(s, byte | 0x80); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else { byte = (value & 0x7F); Stream_Write_UINT8(s, byte); } return TRUE; } static INLINE BOOL update_read_2byte_signed(wStream* s, INT32* value) { BYTE byte; BOOL negative; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); negative = (byte & 0x40) ? TRUE : FALSE; *value = (byte & 0x3F); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); *value = (*value << 8) | byte; } if (negative) *value *= -1; return TRUE; } static INLINE BOOL update_write_2byte_signed(wStream* s, INT32 value) { BYTE byte; BOOL negative = FALSE; if (value < 0) { negative = TRUE; value *= -1; } if (value > 0x3FFF) return FALSE; if (value >= 0x3F) { byte = ((value & 0x3F00) >> 8); if (negative) byte |= 0x40; Stream_Write_UINT8(s, byte | 0x80); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else { byte = (value & 0x3F); if (negative) byte |= 0x40; Stream_Write_UINT8(s, byte); } return TRUE; } static INLINE BOOL update_read_4byte_unsigned(wStream* s, UINT32* value) { BYTE byte; BYTE count; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); count = (byte & 0xC0) >> 6; if (Stream_GetRemainingLength(s) < count) return FALSE; switch (count) { case 0: *value = (byte & 0x3F); break; case 1: *value = (byte & 0x3F) << 8; Stream_Read_UINT8(s, byte); *value |= byte; break; case 2: *value = (byte & 0x3F) << 16; Stream_Read_UINT8(s, byte); *value |= (byte << 8); Stream_Read_UINT8(s, byte); *value |= byte; break; case 3: *value = (byte & 0x3F) << 24; Stream_Read_UINT8(s, byte); *value |= (byte << 16); Stream_Read_UINT8(s, byte); *value |= (byte << 8); Stream_Read_UINT8(s, byte); *value |= byte; break; default: break; } return TRUE; } static INLINE BOOL update_write_4byte_unsigned(wStream* s, UINT32 value) { BYTE byte; if (value <= 0x3F) { Stream_Write_UINT8(s, value); } else if (value <= 0x3FFF) { byte = (value >> 8) & 0x3F; Stream_Write_UINT8(s, byte | 0x40); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else if (value <= 0x3FFFFF) { byte = (value >> 16) & 0x3F; Stream_Write_UINT8(s, byte | 0x80); byte = (value >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else if (value <= 0x3FFFFFFF) { byte = (value >> 24) & 0x3F; Stream_Write_UINT8(s, byte | 0xC0); byte = (value >> 16) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else return FALSE; return TRUE; } static INLINE BOOL update_read_delta(wStream* s, INT32* value) { BYTE byte; if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, byte); if (byte & 0x40) *value = (byte | ~0x3F); else *value = (byte & 0x3F); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, byte); *value = (*value << 8) | byte; } return TRUE; } #if 0 static INLINE void update_read_glyph_delta(wStream* s, UINT16* value) { BYTE byte; Stream_Read_UINT8(s, byte); if (byte == 0x80) Stream_Read_UINT16(s, *value); else *value = (byte & 0x3F); } static INLINE void update_seek_glyph_delta(wStream* s) { BYTE byte; Stream_Read_UINT8(s, byte); if (byte & 0x80) Stream_Seek_UINT8(s); } #endif static INLINE BOOL update_read_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->style); } if (fieldFlags & ORDER_FIELD_04) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->hatch); } if (brush->style & CACHED_BRUSH) { brush->index = brush->hatch; brush->bpp = BMF_BPP[brush->style & 0x07]; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 7) return FALSE; brush->data = (BYTE*)brush->p8x8; Stream_Read_UINT8(s, brush->data[7]); Stream_Read_UINT8(s, brush->data[6]); Stream_Read_UINT8(s, brush->data[5]); Stream_Read_UINT8(s, brush->data[4]); Stream_Read_UINT8(s, brush->data[3]); Stream_Read_UINT8(s, brush->data[2]); Stream_Read_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; } static INLINE BOOL update_write_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { Stream_Write_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { Stream_Write_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { Stream_Write_UINT8(s, brush->style); } if (brush->style & CACHED_BRUSH) { brush->hatch = brush->index; brush->bpp = BMF_BPP[brush->style & 0x07]; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_04) { Stream_Write_UINT8(s, brush->hatch); } if (fieldFlags & ORDER_FIELD_05) { brush->data = (BYTE*)brush->p8x8; Stream_Write_UINT8(s, brush->data[7]); Stream_Write_UINT8(s, brush->data[6]); Stream_Write_UINT8(s, brush->data[5]); Stream_Write_UINT8(s, brush->data[4]); Stream_Write_UINT8(s, brush->data[3]); Stream_Write_UINT8(s, brush->data[2]); Stream_Write_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; } static INLINE BOOL update_read_delta_rects(wStream* s, DELTA_RECT* rectangles, UINT32* nr) { UINT32 number = *nr; UINT32 i; BYTE flags = 0; BYTE* zeroBits; UINT32 zeroBitsSize; if (number > 45) { WLog_WARN(TAG, "Invalid number of delta rectangles %" PRIu32, number); return FALSE; } zeroBitsSize = ((number + 1) / 2); if (Stream_GetRemainingLength(s) < zeroBitsSize) return FALSE; Stream_GetPointer(s, zeroBits); Stream_Seek(s, zeroBitsSize); ZeroMemory(rectangles, sizeof(DELTA_RECT) * number); for (i = 0; i < number; i++) { if (i % 2 == 0) flags = zeroBits[i / 2]; if ((~flags & 0x80) && !update_read_delta(s, &rectangles[i].left)) return FALSE; if ((~flags & 0x40) && !update_read_delta(s, &rectangles[i].top)) return FALSE; if (~flags & 0x20) { if (!update_read_delta(s, &rectangles[i].width)) return FALSE; } else if (i > 0) rectangles[i].width = rectangles[i - 1].width; else rectangles[i].width = 0; if (~flags & 0x10) { if (!update_read_delta(s, &rectangles[i].height)) return FALSE; } else if (i > 0) rectangles[i].height = rectangles[i - 1].height; else rectangles[i].height = 0; if (i > 0) { rectangles[i].left += rectangles[i - 1].left; rectangles[i].top += rectangles[i - 1].top; } flags <<= 4; } return TRUE; } static INLINE BOOL update_read_delta_points(wStream* s, DELTA_POINT* points, int number, INT16 x, INT16 y) { int i; BYTE flags = 0; BYTE* zeroBits; UINT32 zeroBitsSize; zeroBitsSize = ((number + 3) / 4); if (Stream_GetRemainingLength(s) < zeroBitsSize) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < %" PRIu32 "", zeroBitsSize); return FALSE; } Stream_GetPointer(s, zeroBits); Stream_Seek(s, zeroBitsSize); ZeroMemory(points, sizeof(DELTA_POINT) * number); for (i = 0; i < number; i++) { if (i % 4 == 0) flags = zeroBits[i / 4]; if ((~flags & 0x80) && !update_read_delta(s, &points[i].x)) { WLog_ERR(TAG, "update_read_delta(x) failed"); return FALSE; } if ((~flags & 0x40) && !update_read_delta(s, &points[i].y)) { WLog_ERR(TAG, "update_read_delta(y) failed"); return FALSE; } flags <<= 2; } return TRUE; } #define ORDER_FIELD_BYTE(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 1) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT8(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_2BYTE(NO, TARGET1, TARGET2) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 2) \ { \ WLog_ERR(TAG, "error reading %s or %s", #TARGET1, #TARGET2); \ return FALSE; \ } \ Stream_Read_UINT8(s, TARGET1); \ Stream_Read_UINT8(s, TARGET2); \ } \ } while (0) #define ORDER_FIELD_UINT16(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 2) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT16(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_UINT32(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 4) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT32(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_COORD(NO, TARGET) \ do \ { \ if ((orderInfo->fieldFlags & (1 << (NO - 1))) && \ !update_read_coord(s, &TARGET, orderInfo->deltaCoordinates)) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ } while (0) static INLINE BOOL ORDER_FIELD_COLOR(const ORDER_INFO* orderInfo, wStream* s, UINT32 NO, UINT32* TARGET) { if (!TARGET || !orderInfo) return FALSE; if ((orderInfo->fieldFlags & (1 << (NO - 1))) && !update_read_color(s, TARGET)) return FALSE; return TRUE; } static INLINE BOOL FIELD_SKIP_BUFFER16(wStream* s, UINT32 TARGET_LEN) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, TARGET_LEN); if (!Stream_SafeSeek(s, TARGET_LEN)) { WLog_ERR(TAG, "error skipping %" PRIu32 " bytes", TARGET_LEN); return FALSE; } return TRUE; } /* Primary Drawing Orders */ static BOOL update_read_dstblt_order(wStream* s, const ORDER_INFO* orderInfo, DSTBLT_ORDER* dstblt) { ORDER_FIELD_COORD(1, dstblt->nLeftRect); ORDER_FIELD_COORD(2, dstblt->nTopRect); ORDER_FIELD_COORD(3, dstblt->nWidth); ORDER_FIELD_COORD(4, dstblt->nHeight); ORDER_FIELD_BYTE(5, dstblt->bRop); return TRUE; } int update_approximate_dstblt_order(ORDER_INFO* orderInfo, const DSTBLT_ORDER* dstblt) { return 32; } BOOL update_write_dstblt_order(wStream* s, ORDER_INFO* orderInfo, const DSTBLT_ORDER* dstblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_dstblt_order(orderInfo, dstblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, dstblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, dstblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, dstblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, dstblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, dstblt->bRop); return TRUE; } static BOOL update_read_patblt_order(wStream* s, const ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { ORDER_FIELD_COORD(1, patblt->nLeftRect); ORDER_FIELD_COORD(2, patblt->nTopRect); ORDER_FIELD_COORD(3, patblt->nWidth); ORDER_FIELD_COORD(4, patblt->nHeight); ORDER_FIELD_BYTE(5, patblt->bRop); ORDER_FIELD_COLOR(orderInfo, s, 6, &patblt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 7, &patblt->foreColor); return update_read_brush(s, &patblt->brush, orderInfo->fieldFlags >> 7); } int update_approximate_patblt_order(ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { return 32; } BOOL update_write_patblt_order(wStream* s, ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_patblt_order(orderInfo, patblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, patblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, patblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, patblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, patblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, patblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, patblt->backColor); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_color(s, patblt->foreColor); orderInfo->fieldFlags |= ORDER_FIELD_08; orderInfo->fieldFlags |= ORDER_FIELD_09; orderInfo->fieldFlags |= ORDER_FIELD_10; orderInfo->fieldFlags |= ORDER_FIELD_11; orderInfo->fieldFlags |= ORDER_FIELD_12; update_write_brush(s, &patblt->brush, orderInfo->fieldFlags >> 7); return TRUE; } static BOOL update_read_scrblt_order(wStream* s, const ORDER_INFO* orderInfo, SCRBLT_ORDER* scrblt) { ORDER_FIELD_COORD(1, scrblt->nLeftRect); ORDER_FIELD_COORD(2, scrblt->nTopRect); ORDER_FIELD_COORD(3, scrblt->nWidth); ORDER_FIELD_COORD(4, scrblt->nHeight); ORDER_FIELD_BYTE(5, scrblt->bRop); ORDER_FIELD_COORD(6, scrblt->nXSrc); ORDER_FIELD_COORD(7, scrblt->nYSrc); return TRUE; } int update_approximate_scrblt_order(ORDER_INFO* orderInfo, const SCRBLT_ORDER* scrblt) { return 32; } BOOL update_write_scrblt_order(wStream* s, ORDER_INFO* orderInfo, const SCRBLT_ORDER* scrblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_scrblt_order(orderInfo, scrblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, scrblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, scrblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, scrblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, scrblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, scrblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_coord(s, scrblt->nXSrc); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_coord(s, scrblt->nYSrc); return TRUE; } static BOOL update_read_opaque_rect_order(wStream* s, const ORDER_INFO* orderInfo, OPAQUE_RECT_ORDER* opaque_rect) { BYTE byte; ORDER_FIELD_COORD(1, opaque_rect->nLeftRect); ORDER_FIELD_COORD(2, opaque_rect->nTopRect); ORDER_FIELD_COORD(3, opaque_rect->nWidth); ORDER_FIELD_COORD(4, opaque_rect->nHeight); if (orderInfo->fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x00FFFF00) | ((UINT32)byte); } if (orderInfo->fieldFlags & ORDER_FIELD_06) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x00FF00FF) | ((UINT32)byte << 8); } if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x0000FFFF) | ((UINT32)byte << 16); } return TRUE; } int update_approximate_opaque_rect_order(ORDER_INFO* orderInfo, const OPAQUE_RECT_ORDER* opaque_rect) { return 32; } BOOL update_write_opaque_rect_order(wStream* s, ORDER_INFO* orderInfo, const OPAQUE_RECT_ORDER* opaque_rect) { BYTE byte; int inf = update_approximate_opaque_rect_order(orderInfo, opaque_rect); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; // TODO: Color format conversion orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, opaque_rect->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, opaque_rect->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, opaque_rect->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, opaque_rect->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; byte = opaque_rect->color & 0x000000FF; Stream_Write_UINT8(s, byte); orderInfo->fieldFlags |= ORDER_FIELD_06; byte = (opaque_rect->color & 0x0000FF00) >> 8; Stream_Write_UINT8(s, byte); orderInfo->fieldFlags |= ORDER_FIELD_07; byte = (opaque_rect->color & 0x00FF0000) >> 16; Stream_Write_UINT8(s, byte); return TRUE; } static BOOL update_read_draw_nine_grid_order(wStream* s, const ORDER_INFO* orderInfo, DRAW_NINE_GRID_ORDER* draw_nine_grid) { ORDER_FIELD_COORD(1, draw_nine_grid->srcLeft); ORDER_FIELD_COORD(2, draw_nine_grid->srcTop); ORDER_FIELD_COORD(3, draw_nine_grid->srcRight); ORDER_FIELD_COORD(4, draw_nine_grid->srcBottom); ORDER_FIELD_UINT16(5, draw_nine_grid->bitmapId); return TRUE; } static BOOL update_read_multi_dstblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_DSTBLT_ORDER* multi_dstblt) { ORDER_FIELD_COORD(1, multi_dstblt->nLeftRect); ORDER_FIELD_COORD(2, multi_dstblt->nTopRect); ORDER_FIELD_COORD(3, multi_dstblt->nWidth); ORDER_FIELD_COORD(4, multi_dstblt->nHeight); ORDER_FIELD_BYTE(5, multi_dstblt->bRop); ORDER_FIELD_BYTE(6, multi_dstblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_dstblt->cbData); return update_read_delta_rects(s, multi_dstblt->rectangles, &multi_dstblt->numRectangles); } return TRUE; } static BOOL update_read_multi_patblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_PATBLT_ORDER* multi_patblt) { ORDER_FIELD_COORD(1, multi_patblt->nLeftRect); ORDER_FIELD_COORD(2, multi_patblt->nTopRect); ORDER_FIELD_COORD(3, multi_patblt->nWidth); ORDER_FIELD_COORD(4, multi_patblt->nHeight); ORDER_FIELD_BYTE(5, multi_patblt->bRop); ORDER_FIELD_COLOR(orderInfo, s, 6, &multi_patblt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 7, &multi_patblt->foreColor); if (!update_read_brush(s, &multi_patblt->brush, orderInfo->fieldFlags >> 7)) return FALSE; ORDER_FIELD_BYTE(13, multi_patblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_14) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_patblt->cbData); if (!update_read_delta_rects(s, multi_patblt->rectangles, &multi_patblt->numRectangles)) return FALSE; } return TRUE; } static BOOL update_read_multi_scrblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_SCRBLT_ORDER* multi_scrblt) { ORDER_FIELD_COORD(1, multi_scrblt->nLeftRect); ORDER_FIELD_COORD(2, multi_scrblt->nTopRect); ORDER_FIELD_COORD(3, multi_scrblt->nWidth); ORDER_FIELD_COORD(4, multi_scrblt->nHeight); ORDER_FIELD_BYTE(5, multi_scrblt->bRop); ORDER_FIELD_COORD(6, multi_scrblt->nXSrc); ORDER_FIELD_COORD(7, multi_scrblt->nYSrc); ORDER_FIELD_BYTE(8, multi_scrblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_09) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_scrblt->cbData); return update_read_delta_rects(s, multi_scrblt->rectangles, &multi_scrblt->numRectangles); } return TRUE; } static BOOL update_read_multi_opaque_rect_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_OPAQUE_RECT_ORDER* multi_opaque_rect) { BYTE byte; ORDER_FIELD_COORD(1, multi_opaque_rect->nLeftRect); ORDER_FIELD_COORD(2, multi_opaque_rect->nTopRect); ORDER_FIELD_COORD(3, multi_opaque_rect->nWidth); ORDER_FIELD_COORD(4, multi_opaque_rect->nHeight); if (orderInfo->fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x00FFFF00) | ((UINT32)byte); } if (orderInfo->fieldFlags & ORDER_FIELD_06) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x00FF00FF) | ((UINT32)byte << 8); } if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x0000FFFF) | ((UINT32)byte << 16); } ORDER_FIELD_BYTE(8, multi_opaque_rect->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_09) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_opaque_rect->cbData); return update_read_delta_rects(s, multi_opaque_rect->rectangles, &multi_opaque_rect->numRectangles); } return TRUE; } static BOOL update_read_multi_draw_nine_grid_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_DRAW_NINE_GRID_ORDER* multi_draw_nine_grid) { ORDER_FIELD_COORD(1, multi_draw_nine_grid->srcLeft); ORDER_FIELD_COORD(2, multi_draw_nine_grid->srcTop); ORDER_FIELD_COORD(3, multi_draw_nine_grid->srcRight); ORDER_FIELD_COORD(4, multi_draw_nine_grid->srcBottom); ORDER_FIELD_UINT16(5, multi_draw_nine_grid->bitmapId); ORDER_FIELD_BYTE(6, multi_draw_nine_grid->nDeltaEntries); if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_draw_nine_grid->cbData); return update_read_delta_rects(s, multi_draw_nine_grid->rectangles, &multi_draw_nine_grid->nDeltaEntries); } return TRUE; } static BOOL update_read_line_to_order(wStream* s, const ORDER_INFO* orderInfo, LINE_TO_ORDER* line_to) { ORDER_FIELD_UINT16(1, line_to->backMode); ORDER_FIELD_COORD(2, line_to->nXStart); ORDER_FIELD_COORD(3, line_to->nYStart); ORDER_FIELD_COORD(4, line_to->nXEnd); ORDER_FIELD_COORD(5, line_to->nYEnd); ORDER_FIELD_COLOR(orderInfo, s, 6, &line_to->backColor); ORDER_FIELD_BYTE(7, line_to->bRop2); ORDER_FIELD_BYTE(8, line_to->penStyle); ORDER_FIELD_BYTE(9, line_to->penWidth); ORDER_FIELD_COLOR(orderInfo, s, 10, &line_to->penColor); return TRUE; } int update_approximate_line_to_order(ORDER_INFO* orderInfo, const LINE_TO_ORDER* line_to) { return 32; } BOOL update_write_line_to_order(wStream* s, ORDER_INFO* orderInfo, const LINE_TO_ORDER* line_to) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_line_to_order(orderInfo, line_to))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT16(s, line_to->backMode); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, line_to->nXStart); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, line_to->nYStart); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, line_to->nXEnd); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_coord(s, line_to->nYEnd); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, line_to->backColor); orderInfo->fieldFlags |= ORDER_FIELD_07; Stream_Write_UINT8(s, line_to->bRop2); orderInfo->fieldFlags |= ORDER_FIELD_08; Stream_Write_UINT8(s, line_to->penStyle); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT8(s, line_to->penWidth); orderInfo->fieldFlags |= ORDER_FIELD_10; update_write_color(s, line_to->penColor); return TRUE; } static BOOL update_read_polyline_order(wStream* s, const ORDER_INFO* orderInfo, POLYLINE_ORDER* polyline) { UINT16 word; UINT32 new_num = polyline->numDeltaEntries; ORDER_FIELD_COORD(1, polyline->xStart); ORDER_FIELD_COORD(2, polyline->yStart); ORDER_FIELD_BYTE(3, polyline->bRop2); ORDER_FIELD_UINT16(4, word); ORDER_FIELD_COLOR(orderInfo, s, 5, &polyline->penColor); ORDER_FIELD_BYTE(6, new_num); if (orderInfo->fieldFlags & ORDER_FIELD_07) { DELTA_POINT* new_points; if (new_num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, polyline->cbData); new_points = (DELTA_POINT*)realloc(polyline->points, sizeof(DELTA_POINT) * new_num); if (!new_points) { WLog_ERR(TAG, "realloc(%" PRIu32 ") failed", new_num); return FALSE; } polyline->points = new_points; polyline->numDeltaEntries = new_num; return update_read_delta_points(s, polyline->points, polyline->numDeltaEntries, polyline->xStart, polyline->yStart); } return TRUE; } static BOOL update_read_memblt_order(wStream* s, const ORDER_INFO* orderInfo, MEMBLT_ORDER* memblt) { if (!s || !orderInfo || !memblt) return FALSE; ORDER_FIELD_UINT16(1, memblt->cacheId); ORDER_FIELD_COORD(2, memblt->nLeftRect); ORDER_FIELD_COORD(3, memblt->nTopRect); ORDER_FIELD_COORD(4, memblt->nWidth); ORDER_FIELD_COORD(5, memblt->nHeight); ORDER_FIELD_BYTE(6, memblt->bRop); ORDER_FIELD_COORD(7, memblt->nXSrc); ORDER_FIELD_COORD(8, memblt->nYSrc); ORDER_FIELD_UINT16(9, memblt->cacheIndex); memblt->colorIndex = (memblt->cacheId >> 8); memblt->cacheId = (memblt->cacheId & 0xFF); memblt->bitmap = NULL; return TRUE; } int update_approximate_memblt_order(ORDER_INFO* orderInfo, const MEMBLT_ORDER* memblt) { return 64; } BOOL update_write_memblt_order(wStream* s, ORDER_INFO* orderInfo, const MEMBLT_ORDER* memblt) { UINT16 cacheId; if (!Stream_EnsureRemainingCapacity(s, update_approximate_memblt_order(orderInfo, memblt))) return FALSE; cacheId = (memblt->cacheId & 0xFF) | ((memblt->colorIndex & 0xFF) << 8); orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT16(s, cacheId); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, memblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, memblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, memblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_coord(s, memblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_06; Stream_Write_UINT8(s, memblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_coord(s, memblt->nXSrc); orderInfo->fieldFlags |= ORDER_FIELD_08; update_write_coord(s, memblt->nYSrc); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT16(s, memblt->cacheIndex); return TRUE; } static BOOL update_read_mem3blt_order(wStream* s, const ORDER_INFO* orderInfo, MEM3BLT_ORDER* mem3blt) { ORDER_FIELD_UINT16(1, mem3blt->cacheId); ORDER_FIELD_COORD(2, mem3blt->nLeftRect); ORDER_FIELD_COORD(3, mem3blt->nTopRect); ORDER_FIELD_COORD(4, mem3blt->nWidth); ORDER_FIELD_COORD(5, mem3blt->nHeight); ORDER_FIELD_BYTE(6, mem3blt->bRop); ORDER_FIELD_COORD(7, mem3blt->nXSrc); ORDER_FIELD_COORD(8, mem3blt->nYSrc); ORDER_FIELD_COLOR(orderInfo, s, 9, &mem3blt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 10, &mem3blt->foreColor); if (!update_read_brush(s, &mem3blt->brush, orderInfo->fieldFlags >> 10)) return FALSE; ORDER_FIELD_UINT16(16, mem3blt->cacheIndex); mem3blt->colorIndex = (mem3blt->cacheId >> 8); mem3blt->cacheId = (mem3blt->cacheId & 0xFF); mem3blt->bitmap = NULL; return TRUE; } static BOOL update_read_save_bitmap_order(wStream* s, const ORDER_INFO* orderInfo, SAVE_BITMAP_ORDER* save_bitmap) { ORDER_FIELD_UINT32(1, save_bitmap->savedBitmapPosition); ORDER_FIELD_COORD(2, save_bitmap->nLeftRect); ORDER_FIELD_COORD(3, save_bitmap->nTopRect); ORDER_FIELD_COORD(4, save_bitmap->nRightRect); ORDER_FIELD_COORD(5, save_bitmap->nBottomRect); ORDER_FIELD_BYTE(6, save_bitmap->operation); return TRUE; } static BOOL update_read_glyph_index_order(wStream* s, const ORDER_INFO* orderInfo, GLYPH_INDEX_ORDER* glyph_index) { ORDER_FIELD_BYTE(1, glyph_index->cacheId); ORDER_FIELD_BYTE(2, glyph_index->flAccel); ORDER_FIELD_BYTE(3, glyph_index->ulCharInc); ORDER_FIELD_BYTE(4, glyph_index->fOpRedundant); ORDER_FIELD_COLOR(orderInfo, s, 5, &glyph_index->backColor); ORDER_FIELD_COLOR(orderInfo, s, 6, &glyph_index->foreColor); ORDER_FIELD_UINT16(7, glyph_index->bkLeft); ORDER_FIELD_UINT16(8, glyph_index->bkTop); ORDER_FIELD_UINT16(9, glyph_index->bkRight); ORDER_FIELD_UINT16(10, glyph_index->bkBottom); ORDER_FIELD_UINT16(11, glyph_index->opLeft); ORDER_FIELD_UINT16(12, glyph_index->opTop); ORDER_FIELD_UINT16(13, glyph_index->opRight); ORDER_FIELD_UINT16(14, glyph_index->opBottom); if (!update_read_brush(s, &glyph_index->brush, orderInfo->fieldFlags >> 14)) return FALSE; ORDER_FIELD_UINT16(20, glyph_index->x); ORDER_FIELD_UINT16(21, glyph_index->y); if (orderInfo->fieldFlags & ORDER_FIELD_22) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, glyph_index->cbData); if (Stream_GetRemainingLength(s) < glyph_index->cbData) return FALSE; CopyMemory(glyph_index->data, Stream_Pointer(s), glyph_index->cbData); Stream_Seek(s, glyph_index->cbData); } return TRUE; } int update_approximate_glyph_index_order(ORDER_INFO* orderInfo, const GLYPH_INDEX_ORDER* glyph_index) { return 64; } BOOL update_write_glyph_index_order(wStream* s, ORDER_INFO* orderInfo, GLYPH_INDEX_ORDER* glyph_index) { int inf = update_approximate_glyph_index_order(orderInfo, glyph_index); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT8(s, glyph_index->cacheId); orderInfo->fieldFlags |= ORDER_FIELD_02; Stream_Write_UINT8(s, glyph_index->flAccel); orderInfo->fieldFlags |= ORDER_FIELD_03; Stream_Write_UINT8(s, glyph_index->ulCharInc); orderInfo->fieldFlags |= ORDER_FIELD_04; Stream_Write_UINT8(s, glyph_index->fOpRedundant); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_color(s, glyph_index->backColor); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, glyph_index->foreColor); orderInfo->fieldFlags |= ORDER_FIELD_07; Stream_Write_UINT16(s, glyph_index->bkLeft); orderInfo->fieldFlags |= ORDER_FIELD_08; Stream_Write_UINT16(s, glyph_index->bkTop); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT16(s, glyph_index->bkRight); orderInfo->fieldFlags |= ORDER_FIELD_10; Stream_Write_UINT16(s, glyph_index->bkBottom); orderInfo->fieldFlags |= ORDER_FIELD_11; Stream_Write_UINT16(s, glyph_index->opLeft); orderInfo->fieldFlags |= ORDER_FIELD_12; Stream_Write_UINT16(s, glyph_index->opTop); orderInfo->fieldFlags |= ORDER_FIELD_13; Stream_Write_UINT16(s, glyph_index->opRight); orderInfo->fieldFlags |= ORDER_FIELD_14; Stream_Write_UINT16(s, glyph_index->opBottom); orderInfo->fieldFlags |= ORDER_FIELD_15; orderInfo->fieldFlags |= ORDER_FIELD_16; orderInfo->fieldFlags |= ORDER_FIELD_17; orderInfo->fieldFlags |= ORDER_FIELD_18; orderInfo->fieldFlags |= ORDER_FIELD_19; update_write_brush(s, &glyph_index->brush, orderInfo->fieldFlags >> 14); orderInfo->fieldFlags |= ORDER_FIELD_20; Stream_Write_UINT16(s, glyph_index->x); orderInfo->fieldFlags |= ORDER_FIELD_21; Stream_Write_UINT16(s, glyph_index->y); orderInfo->fieldFlags |= ORDER_FIELD_22; Stream_Write_UINT8(s, glyph_index->cbData); Stream_Write(s, glyph_index->data, glyph_index->cbData); return TRUE; } static BOOL update_read_fast_index_order(wStream* s, const ORDER_INFO* orderInfo, FAST_INDEX_ORDER* fast_index) { ORDER_FIELD_BYTE(1, fast_index->cacheId); ORDER_FIELD_2BYTE(2, fast_index->ulCharInc, fast_index->flAccel); ORDER_FIELD_COLOR(orderInfo, s, 3, &fast_index->backColor); ORDER_FIELD_COLOR(orderInfo, s, 4, &fast_index->foreColor); ORDER_FIELD_COORD(5, fast_index->bkLeft); ORDER_FIELD_COORD(6, fast_index->bkTop); ORDER_FIELD_COORD(7, fast_index->bkRight); ORDER_FIELD_COORD(8, fast_index->bkBottom); ORDER_FIELD_COORD(9, fast_index->opLeft); ORDER_FIELD_COORD(10, fast_index->opTop); ORDER_FIELD_COORD(11, fast_index->opRight); ORDER_FIELD_COORD(12, fast_index->opBottom); ORDER_FIELD_COORD(13, fast_index->x); ORDER_FIELD_COORD(14, fast_index->y); if (orderInfo->fieldFlags & ORDER_FIELD_15) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, fast_index->cbData); if (Stream_GetRemainingLength(s) < fast_index->cbData) return FALSE; CopyMemory(fast_index->data, Stream_Pointer(s), fast_index->cbData); Stream_Seek(s, fast_index->cbData); } return TRUE; } static BOOL update_read_fast_glyph_order(wStream* s, const ORDER_INFO* orderInfo, FAST_GLYPH_ORDER* fastGlyph) { GLYPH_DATA_V2* glyph = &fastGlyph->glyphData; ORDER_FIELD_BYTE(1, fastGlyph->cacheId); ORDER_FIELD_2BYTE(2, fastGlyph->ulCharInc, fastGlyph->flAccel); ORDER_FIELD_COLOR(orderInfo, s, 3, &fastGlyph->backColor); ORDER_FIELD_COLOR(orderInfo, s, 4, &fastGlyph->foreColor); ORDER_FIELD_COORD(5, fastGlyph->bkLeft); ORDER_FIELD_COORD(6, fastGlyph->bkTop); ORDER_FIELD_COORD(7, fastGlyph->bkRight); ORDER_FIELD_COORD(8, fastGlyph->bkBottom); ORDER_FIELD_COORD(9, fastGlyph->opLeft); ORDER_FIELD_COORD(10, fastGlyph->opTop); ORDER_FIELD_COORD(11, fastGlyph->opRight); ORDER_FIELD_COORD(12, fastGlyph->opBottom); ORDER_FIELD_COORD(13, fastGlyph->x); ORDER_FIELD_COORD(14, fastGlyph->y); if (orderInfo->fieldFlags & ORDER_FIELD_15) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, fastGlyph->cbData); if (Stream_GetRemainingLength(s) < fastGlyph->cbData) return FALSE; CopyMemory(fastGlyph->data, Stream_Pointer(s), fastGlyph->cbData); if (Stream_GetRemainingLength(s) < fastGlyph->cbData) return FALSE; if (!Stream_SafeSeek(s, 1)) return FALSE; if (fastGlyph->cbData > 1) { UINT32 new_cb; /* parse optional glyph data */ glyph->cacheIndex = fastGlyph->data[0]; if (!update_read_2byte_signed(s, &glyph->x) || !update_read_2byte_signed(s, &glyph->y) || !update_read_2byte_unsigned(s, &glyph->cx) || !update_read_2byte_unsigned(s, &glyph->cy)) return FALSE; glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; new_cb = ((glyph->cx + 7) / 8) * glyph->cy; new_cb += ((new_cb % 4) > 0) ? 4 - (new_cb % 4) : 0; if (fastGlyph->cbData < new_cb) return FALSE; if (new_cb > 0) { BYTE* new_aj; new_aj = (BYTE*)realloc(glyph->aj, new_cb); if (!new_aj) return FALSE; glyph->aj = new_aj; glyph->cb = new_cb; Stream_Read(s, glyph->aj, glyph->cb); } Stream_Seek(s, fastGlyph->cbData - new_cb); } } return TRUE; } static BOOL update_read_polygon_sc_order(wStream* s, const ORDER_INFO* orderInfo, POLYGON_SC_ORDER* polygon_sc) { UINT32 num = polygon_sc->numPoints; ORDER_FIELD_COORD(1, polygon_sc->xStart); ORDER_FIELD_COORD(2, polygon_sc->yStart); ORDER_FIELD_BYTE(3, polygon_sc->bRop2); ORDER_FIELD_BYTE(4, polygon_sc->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 5, &polygon_sc->brushColor); ORDER_FIELD_BYTE(6, num); if (orderInfo->fieldFlags & ORDER_FIELD_07) { DELTA_POINT* newpoints; if (num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, polygon_sc->cbData); newpoints = (DELTA_POINT*)realloc(polygon_sc->points, sizeof(DELTA_POINT) * num); if (!newpoints) return FALSE; polygon_sc->points = newpoints; polygon_sc->numPoints = num; return update_read_delta_points(s, polygon_sc->points, polygon_sc->numPoints, polygon_sc->xStart, polygon_sc->yStart); } return TRUE; } static BOOL update_read_polygon_cb_order(wStream* s, const ORDER_INFO* orderInfo, POLYGON_CB_ORDER* polygon_cb) { UINT32 num = polygon_cb->numPoints; ORDER_FIELD_COORD(1, polygon_cb->xStart); ORDER_FIELD_COORD(2, polygon_cb->yStart); ORDER_FIELD_BYTE(3, polygon_cb->bRop2); ORDER_FIELD_BYTE(4, polygon_cb->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 5, &polygon_cb->backColor); ORDER_FIELD_COLOR(orderInfo, s, 6, &polygon_cb->foreColor); if (!update_read_brush(s, &polygon_cb->brush, orderInfo->fieldFlags >> 6)) return FALSE; ORDER_FIELD_BYTE(12, num); if (orderInfo->fieldFlags & ORDER_FIELD_13) { DELTA_POINT* newpoints; if (num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, polygon_cb->cbData); newpoints = (DELTA_POINT*)realloc(polygon_cb->points, sizeof(DELTA_POINT) * num); if (!newpoints) return FALSE; polygon_cb->points = newpoints; polygon_cb->numPoints = num; if (!update_read_delta_points(s, polygon_cb->points, polygon_cb->numPoints, polygon_cb->xStart, polygon_cb->yStart)) return FALSE; } polygon_cb->backMode = (polygon_cb->bRop2 & 0x80) ? BACKMODE_TRANSPARENT : BACKMODE_OPAQUE; polygon_cb->bRop2 = (polygon_cb->bRop2 & 0x1F); return TRUE; } static BOOL update_read_ellipse_sc_order(wStream* s, const ORDER_INFO* orderInfo, ELLIPSE_SC_ORDER* ellipse_sc) { ORDER_FIELD_COORD(1, ellipse_sc->leftRect); ORDER_FIELD_COORD(2, ellipse_sc->topRect); ORDER_FIELD_COORD(3, ellipse_sc->rightRect); ORDER_FIELD_COORD(4, ellipse_sc->bottomRect); ORDER_FIELD_BYTE(5, ellipse_sc->bRop2); ORDER_FIELD_BYTE(6, ellipse_sc->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 7, &ellipse_sc->color); return TRUE; } static BOOL update_read_ellipse_cb_order(wStream* s, const ORDER_INFO* orderInfo, ELLIPSE_CB_ORDER* ellipse_cb) { ORDER_FIELD_COORD(1, ellipse_cb->leftRect); ORDER_FIELD_COORD(2, ellipse_cb->topRect); ORDER_FIELD_COORD(3, ellipse_cb->rightRect); ORDER_FIELD_COORD(4, ellipse_cb->bottomRect); ORDER_FIELD_BYTE(5, ellipse_cb->bRop2); ORDER_FIELD_BYTE(6, ellipse_cb->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 7, &ellipse_cb->backColor); ORDER_FIELD_COLOR(orderInfo, s, 8, &ellipse_cb->foreColor); return update_read_brush(s, &ellipse_cb->brush, orderInfo->fieldFlags >> 8); } /* Secondary Drawing Orders */ static CACHE_BITMAP_ORDER* update_read_cache_bitmap_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { CACHE_BITMAP_ORDER* cache_bitmap; if (!update || !s) return NULL; cache_bitmap = calloc(1, sizeof(CACHE_BITMAP_ORDER)); if (!cache_bitmap) goto fail; if (Stream_GetRemainingLength(s) < 9) goto fail; Stream_Read_UINT8(s, cache_bitmap->cacheId); /* cacheId (1 byte) */ Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapWidth); /* bitmapWidth (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapHeight); /* bitmapHeight (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ if ((cache_bitmap->bitmapBpp < 1) || (cache_bitmap->bitmapBpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bitmap bpp %" PRIu32 "", cache_bitmap->bitmapBpp); goto fail; } Stream_Read_UINT16(s, cache_bitmap->bitmapLength); /* bitmapLength (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap->cacheIndex); /* cacheIndex (2 bytes) */ if (compressed) { if ((flags & NO_BITMAP_COMPRESSION_HDR) == 0) { BYTE* bitmapComprHdr = (BYTE*)&(cache_bitmap->bitmapComprHdr); if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read(s, bitmapComprHdr, 8); /* bitmapComprHdr (8 bytes) */ cache_bitmap->bitmapLength -= 8; } } if (cache_bitmap->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap->bitmapLength) goto fail; cache_bitmap->bitmapDataStream = malloc(cache_bitmap->bitmapLength); if (!cache_bitmap->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap->bitmapDataStream, cache_bitmap->bitmapLength); cache_bitmap->compressed = compressed; return cache_bitmap; fail: free_cache_bitmap_order(update->context, cache_bitmap); return NULL; } int update_approximate_cache_bitmap_order(const CACHE_BITMAP_ORDER* cache_bitmap, BOOL compressed, UINT16* flags) { return 64 + cache_bitmap->bitmapLength; } BOOL update_write_cache_bitmap_order(wStream* s, const CACHE_BITMAP_ORDER* cache_bitmap, BOOL compressed, UINT16* flags) { UINT32 bitmapLength = cache_bitmap->bitmapLength; int inf = update_approximate_cache_bitmap_order(cache_bitmap, compressed, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; *flags = NO_BITMAP_COMPRESSION_HDR; if ((*flags & NO_BITMAP_COMPRESSION_HDR) == 0) bitmapLength += 8; Stream_Write_UINT8(s, cache_bitmap->cacheId); /* cacheId (1 byte) */ Stream_Write_UINT8(s, 0); /* pad1Octet (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapWidth); /* bitmapWidth (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapHeight); /* bitmapHeight (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ Stream_Write_UINT16(s, bitmapLength); /* bitmapLength (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap->cacheIndex); /* cacheIndex (2 bytes) */ if (compressed) { if ((*flags & NO_BITMAP_COMPRESSION_HDR) == 0) { BYTE* bitmapComprHdr = (BYTE*)&(cache_bitmap->bitmapComprHdr); Stream_Write(s, bitmapComprHdr, 8); /* bitmapComprHdr (8 bytes) */ bitmapLength -= 8; } Stream_Write(s, cache_bitmap->bitmapDataStream, bitmapLength); } else { Stream_Write(s, cache_bitmap->bitmapDataStream, bitmapLength); } return TRUE; } static CACHE_BITMAP_V2_ORDER* update_read_cache_bitmap_v2_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { BYTE bitsPerPixelId; CACHE_BITMAP_V2_ORDER* cache_bitmap_v2; if (!update || !s) return NULL; cache_bitmap_v2 = calloc(1, sizeof(CACHE_BITMAP_V2_ORDER)); if (!cache_bitmap_v2) goto fail; cache_bitmap_v2->cacheId = flags & 0x0003; cache_bitmap_v2->flags = (flags & 0xFF80) >> 7; bitsPerPixelId = (flags & 0x0078) >> 3; cache_bitmap_v2->bitmapBpp = CBR2_BPP[bitsPerPixelId]; if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ goto fail; cache_bitmap_v2->bitmapHeight = cache_bitmap_v2->bitmapWidth; } else { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ goto fail; } if (!update_read_4byte_unsigned(s, &cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->cacheIndex)) /* cacheIndex */ goto fail; if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } } if (cache_bitmap_v2->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap_v2->bitmapLength) goto fail; if (cache_bitmap_v2->bitmapLength == 0) goto fail; cache_bitmap_v2->bitmapDataStream = malloc(cache_bitmap_v2->bitmapLength); if (!cache_bitmap_v2->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); cache_bitmap_v2->compressed = compressed; return cache_bitmap_v2; fail: free_cache_bitmap_v2_order(update->context, cache_bitmap_v2); return NULL; } int update_approximate_cache_bitmap_v2_order(CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { return 64 + cache_bitmap_v2->bitmapLength; } BOOL update_write_cache_bitmap_v2_order(wStream* s, CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { BYTE bitsPerPixelId; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v2_order(cache_bitmap_v2, compressed, flags))) return FALSE; bitsPerPixelId = BPP_CBR2[cache_bitmap_v2->bitmapBpp]; *flags = (cache_bitmap_v2->cacheId & 0x0003) | (bitsPerPixelId << 3) | ((cache_bitmap_v2->flags << 7) & 0xFF80); if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { Stream_Write_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ return FALSE; } else { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ return FALSE; } if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (!update_write_4byte_unsigned(s, cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_write_2byte_unsigned(s, cache_bitmap_v2->cacheIndex)) /* cacheIndex */ return FALSE; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { Stream_Write_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } else { if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } cache_bitmap_v2->compressed = compressed; return TRUE; } static CACHE_BITMAP_V3_ORDER* update_read_cache_bitmap_v3_order(rdpUpdate* update, wStream* s, UINT16 flags) { BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; UINT32 new_len; BYTE* new_data; CACHE_BITMAP_V3_ORDER* cache_bitmap_v3; if (!update || !s) return NULL; cache_bitmap_v3 = calloc(1, sizeof(CACHE_BITMAP_V3_ORDER)); if (!cache_bitmap_v3) goto fail; cache_bitmap_v3->cacheId = flags & 0x00000003; cache_bitmap_v3->flags = (flags & 0x0000FF80) >> 7; bitsPerPixelId = (flags & 0x00000078) >> 3; cache_bitmap_v3->bpp = CBR23_BPP[bitsPerPixelId]; if (Stream_GetRemainingLength(s) < 21) goto fail; Stream_Read_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ bitmapData = &cache_bitmap_v3->bitmapData; Stream_Read_UINT8(s, bitmapData->bpp); if ((bitmapData->bpp < 1) || (bitmapData->bpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bpp value %" PRIu32 "", bitmapData->bpp); goto fail; } Stream_Seek_UINT8(s); /* reserved1 (1 byte) */ Stream_Seek_UINT8(s); /* reserved2 (1 byte) */ Stream_Read_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Read_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Read_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Read_UINT32(s, new_len); /* length (4 bytes) */ if ((new_len == 0) || (Stream_GetRemainingLength(s) < new_len)) goto fail; new_data = (BYTE*)realloc(bitmapData->data, new_len); if (!new_data) goto fail; bitmapData->data = new_data; bitmapData->length = new_len; Stream_Read(s, bitmapData->data, bitmapData->length); return cache_bitmap_v3; fail: free_cache_bitmap_v3_order(update->context, cache_bitmap_v3); return NULL; } int update_approximate_cache_bitmap_v3_order(CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BITMAP_DATA_EX* bitmapData = &cache_bitmap_v3->bitmapData; return 64 + bitmapData->length; } BOOL update_write_cache_bitmap_v3_order(wStream* s, CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v3_order(cache_bitmap_v3, flags))) return FALSE; bitmapData = &cache_bitmap_v3->bitmapData; bitsPerPixelId = BPP_CBR23[cache_bitmap_v3->bpp]; *flags = (cache_bitmap_v3->cacheId & 0x00000003) | ((cache_bitmap_v3->flags << 7) & 0x0000FF80) | ((bitsPerPixelId << 3) & 0x00000078); Stream_Write_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ Stream_Write_UINT8(s, bitmapData->bpp); Stream_Write_UINT8(s, 0); /* reserved1 (1 byte) */ Stream_Write_UINT8(s, 0); /* reserved2 (1 byte) */ Stream_Write_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Write_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Write_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Write_UINT32(s, bitmapData->length); /* length (4 bytes) */ Stream_Write(s, bitmapData->data, bitmapData->length); return TRUE; } static CACHE_COLOR_TABLE_ORDER* update_read_cache_color_table_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; UINT32* colorTable; CACHE_COLOR_TABLE_ORDER* cache_color_table = calloc(1, sizeof(CACHE_COLOR_TABLE_ORDER)); if (!cache_color_table) goto fail; if (Stream_GetRemainingLength(s) < 3) goto fail; Stream_Read_UINT8(s, cache_color_table->cacheIndex); /* cacheIndex (1 byte) */ Stream_Read_UINT16(s, cache_color_table->numberColors); /* numberColors (2 bytes) */ if (cache_color_table->numberColors != 256) { /* This field MUST be set to 256 */ goto fail; } if (Stream_GetRemainingLength(s) < cache_color_table->numberColors * 4) goto fail; colorTable = (UINT32*)&cache_color_table->colorTable; for (i = 0; i < (int)cache_color_table->numberColors; i++) update_read_color_quad(s, &colorTable[i]); return cache_color_table; fail: free_cache_color_table_order(update->context, cache_color_table); return NULL; } int update_approximate_cache_color_table_order(const CACHE_COLOR_TABLE_ORDER* cache_color_table, UINT16* flags) { return 16 + (256 * 4); } BOOL update_write_cache_color_table_order(wStream* s, const CACHE_COLOR_TABLE_ORDER* cache_color_table, UINT16* flags) { int i, inf; UINT32* colorTable; if (cache_color_table->numberColors != 256) return FALSE; inf = update_approximate_cache_color_table_order(cache_color_table, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT8(s, cache_color_table->cacheIndex); /* cacheIndex (1 byte) */ Stream_Write_UINT16(s, cache_color_table->numberColors); /* numberColors (2 bytes) */ colorTable = (UINT32*)&cache_color_table->colorTable; for (i = 0; i < (int)cache_color_table->numberColors; i++) { update_write_color_quad(s, colorTable[i]); } return TRUE; } static CACHE_GLYPH_ORDER* update_read_cache_glyph_order(rdpUpdate* update, wStream* s, UINT16 flags) { UINT32 i; CACHE_GLYPH_ORDER* cache_glyph_order = calloc(1, sizeof(CACHE_GLYPH_ORDER)); if (!cache_glyph_order || !update || !s) goto fail; if (Stream_GetRemainingLength(s) < 2) goto fail; Stream_Read_UINT8(s, cache_glyph_order->cacheId); /* cacheId (1 byte) */ Stream_Read_UINT8(s, cache_glyph_order->cGlyphs); /* cGlyphs (1 byte) */ for (i = 0; i < cache_glyph_order->cGlyphs; i++) { GLYPH_DATA* glyph = &cache_glyph_order->glyphData[i]; if (Stream_GetRemainingLength(s) < 10) goto fail; Stream_Read_UINT16(s, glyph->cacheIndex); Stream_Read_INT16(s, glyph->x); Stream_Read_INT16(s, glyph->y); Stream_Read_UINT16(s, glyph->cx); Stream_Read_UINT16(s, glyph->cy); glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; if (Stream_GetRemainingLength(s) < glyph->cb) goto fail; glyph->aj = (BYTE*)malloc(glyph->cb); if (!glyph->aj) goto fail; Stream_Read(s, glyph->aj, glyph->cb); } if ((flags & CG_GLYPH_UNICODE_PRESENT) && (cache_glyph_order->cGlyphs > 0)) { cache_glyph_order->unicodeCharacters = calloc(cache_glyph_order->cGlyphs, sizeof(WCHAR)); if (!cache_glyph_order->unicodeCharacters) goto fail; if (Stream_GetRemainingLength(s) < sizeof(WCHAR) * cache_glyph_order->cGlyphs) goto fail; Stream_Read_UTF16_String(s, cache_glyph_order->unicodeCharacters, cache_glyph_order->cGlyphs); } return cache_glyph_order; fail: free_cache_glyph_order(update->context, cache_glyph_order); return NULL; } int update_approximate_cache_glyph_order(const CACHE_GLYPH_ORDER* cache_glyph, UINT16* flags) { return 2 + cache_glyph->cGlyphs * 32; } BOOL update_write_cache_glyph_order(wStream* s, const CACHE_GLYPH_ORDER* cache_glyph, UINT16* flags) { int i, inf; INT16 lsi16; const GLYPH_DATA* glyph; inf = update_approximate_cache_glyph_order(cache_glyph, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT8(s, cache_glyph->cacheId); /* cacheId (1 byte) */ Stream_Write_UINT8(s, cache_glyph->cGlyphs); /* cGlyphs (1 byte) */ for (i = 0; i < (int)cache_glyph->cGlyphs; i++) { UINT32 cb; glyph = &cache_glyph->glyphData[i]; Stream_Write_UINT16(s, glyph->cacheIndex); /* cacheIndex (2 bytes) */ lsi16 = glyph->x; Stream_Write_UINT16(s, lsi16); /* x (2 bytes) */ lsi16 = glyph->y; Stream_Write_UINT16(s, lsi16); /* y (2 bytes) */ Stream_Write_UINT16(s, glyph->cx); /* cx (2 bytes) */ Stream_Write_UINT16(s, glyph->cy); /* cy (2 bytes) */ cb = ((glyph->cx + 7) / 8) * glyph->cy; cb += ((cb % 4) > 0) ? 4 - (cb % 4) : 0; Stream_Write(s, glyph->aj, cb); } if (*flags & CG_GLYPH_UNICODE_PRESENT) { Stream_Zero(s, cache_glyph->cGlyphs * 2); } return TRUE; } static CACHE_GLYPH_V2_ORDER* update_read_cache_glyph_v2_order(rdpUpdate* update, wStream* s, UINT16 flags) { UINT32 i; CACHE_GLYPH_V2_ORDER* cache_glyph_v2 = calloc(1, sizeof(CACHE_GLYPH_V2_ORDER)); if (!cache_glyph_v2) goto fail; cache_glyph_v2->cacheId = (flags & 0x000F); cache_glyph_v2->flags = (flags & 0x00F0) >> 4; cache_glyph_v2->cGlyphs = (flags & 0xFF00) >> 8; for (i = 0; i < cache_glyph_v2->cGlyphs; i++) { GLYPH_DATA_V2* glyph = &cache_glyph_v2->glyphData[i]; if (Stream_GetRemainingLength(s) < 1) goto fail; Stream_Read_UINT8(s, glyph->cacheIndex); if (!update_read_2byte_signed(s, &glyph->x) || !update_read_2byte_signed(s, &glyph->y) || !update_read_2byte_unsigned(s, &glyph->cx) || !update_read_2byte_unsigned(s, &glyph->cy)) { goto fail; } glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; if (Stream_GetRemainingLength(s) < glyph->cb) goto fail; glyph->aj = (BYTE*)malloc(glyph->cb); if (!glyph->aj) goto fail; Stream_Read(s, glyph->aj, glyph->cb); } if ((flags & CG_GLYPH_UNICODE_PRESENT) && (cache_glyph_v2->cGlyphs > 0)) { cache_glyph_v2->unicodeCharacters = calloc(cache_glyph_v2->cGlyphs, sizeof(WCHAR)); if (!cache_glyph_v2->unicodeCharacters) goto fail; if (Stream_GetRemainingLength(s) < sizeof(WCHAR) * cache_glyph_v2->cGlyphs) goto fail; Stream_Read_UTF16_String(s, cache_glyph_v2->unicodeCharacters, cache_glyph_v2->cGlyphs); } return cache_glyph_v2; fail: free_cache_glyph_v2_order(update->context, cache_glyph_v2); return NULL; } int update_approximate_cache_glyph_v2_order(const CACHE_GLYPH_V2_ORDER* cache_glyph_v2, UINT16* flags) { return 8 + cache_glyph_v2->cGlyphs * 32; } BOOL update_write_cache_glyph_v2_order(wStream* s, const CACHE_GLYPH_V2_ORDER* cache_glyph_v2, UINT16* flags) { UINT32 i, inf; inf = update_approximate_cache_glyph_v2_order(cache_glyph_v2, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; *flags = (cache_glyph_v2->cacheId & 0x000F) | ((cache_glyph_v2->flags & 0x000F) << 4) | ((cache_glyph_v2->cGlyphs & 0x00FF) << 8); for (i = 0; i < cache_glyph_v2->cGlyphs; i++) { UINT32 cb; const GLYPH_DATA_V2* glyph = &cache_glyph_v2->glyphData[i]; Stream_Write_UINT8(s, glyph->cacheIndex); if (!update_write_2byte_signed(s, glyph->x) || !update_write_2byte_signed(s, glyph->y) || !update_write_2byte_unsigned(s, glyph->cx) || !update_write_2byte_unsigned(s, glyph->cy)) { return FALSE; } cb = ((glyph->cx + 7) / 8) * glyph->cy; cb += ((cb % 4) > 0) ? 4 - (cb % 4) : 0; Stream_Write(s, glyph->aj, cb); } if (*flags & CG_GLYPH_UNICODE_PRESENT) { Stream_Zero(s, cache_glyph_v2->cGlyphs * 2); } return TRUE; } static BOOL update_decompress_brush(wStream* s, BYTE* output, size_t outSize, BYTE bpp) { INT32 x, y, k; BYTE byte = 0; const BYTE* palette = Stream_Pointer(s) + 16; const INT32 bytesPerPixel = ((bpp + 1) / 8); if (!Stream_SafeSeek(s, 16ULL + 7ULL * bytesPerPixel)) // 64 / 4 return FALSE; for (y = 7; y >= 0; y--) { for (x = 0; x < 8; x++) { UINT32 index; if ((x % 4) == 0) Stream_Read_UINT8(s, byte); index = ((byte >> ((3 - (x % 4)) * 2)) & 0x03); for (k = 0; k < bytesPerPixel; k++) { const size_t dstIndex = ((y * 8 + x) * bytesPerPixel) + k; const size_t srcIndex = (index * bytesPerPixel) + k; if (dstIndex >= outSize) return FALSE; output[dstIndex] = palette[srcIndex]; } } } return TRUE; } static BOOL update_compress_brush(wStream* s, const BYTE* input, BYTE bpp) { return FALSE; } static CACHE_BRUSH_ORDER* update_read_cache_brush_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; BYTE iBitmapFormat; BOOL compressed = FALSE; CACHE_BRUSH_ORDER* cache_brush = calloc(1, sizeof(CACHE_BRUSH_ORDER)); if (!cache_brush) goto fail; if (Stream_GetRemainingLength(s) < 6) goto fail; Stream_Read_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Read_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ if (iBitmapFormat >= ARRAYSIZE(BMF_BPP)) goto fail; cache_brush->bpp = BMF_BPP[iBitmapFormat]; Stream_Read_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Read_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Read_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Read_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_Print(update->log, WLOG_ERROR, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); goto fail; } /* rows are encoded in reverse order */ if (Stream_GetRemainingLength(s) < 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_decompress_brush(s, cache_brush->data, sizeof(cache_brush->data), cache_brush->bpp)) goto fail; } else { /* uncompressed brush */ UINT32 scanline = (cache_brush->bpp / 8) * 8; if (Stream_GetRemainingLength(s) < scanline * 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read(s, &cache_brush->data[i * scanline], scanline); } } } } return cache_brush; fail: free_cache_brush_order(update->context, cache_brush); return NULL; } int update_approximate_cache_brush_order(const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { return 64; } BOOL update_write_cache_brush_order(wStream* s, const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { int i; BYTE iBitmapFormat; BOOL compressed = FALSE; if (!Stream_EnsureRemainingCapacity(s, update_approximate_cache_brush_order(cache_brush, flags))) return FALSE; iBitmapFormat = BPP_BMF[cache_brush->bpp]; Stream_Write_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Write_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ Stream_Write_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Write_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Write_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Write_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_ERR(TAG, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); return FALSE; } for (i = 7; i >= 0; i--) { Stream_Write_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_compress_brush(s, cache_brush->data, cache_brush->bpp)) return FALSE; } else { /* uncompressed brush */ int scanline = (cache_brush->bpp / 8) * 8; for (i = 7; i >= 0; i--) { Stream_Write(s, &cache_brush->data[i * scanline], scanline); } } } } return TRUE; } /* Alternate Secondary Drawing Orders */ static BOOL update_read_create_offscreen_bitmap_order(wStream* s, CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { UINT16 flags; BOOL deleteListPresent; OFFSCREEN_DELETE_LIST* deleteList; if (Stream_GetRemainingLength(s) < 6) return FALSE; Stream_Read_UINT16(s, flags); /* flags (2 bytes) */ create_offscreen_bitmap->id = flags & 0x7FFF; deleteListPresent = (flags & 0x8000) ? TRUE : FALSE; Stream_Read_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */ Stream_Read_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */ deleteList = &(create_offscreen_bitmap->deleteList); if (deleteListPresent) { UINT32 i; if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, deleteList->cIndices); if (deleteList->cIndices > deleteList->sIndices) { UINT16* new_indices; new_indices = (UINT16*)realloc(deleteList->indices, deleteList->cIndices * 2); if (!new_indices) return FALSE; deleteList->sIndices = deleteList->cIndices; deleteList->indices = new_indices; } if (Stream_GetRemainingLength(s) < 2 * deleteList->cIndices) return FALSE; for (i = 0; i < deleteList->cIndices; i++) { Stream_Read_UINT16(s, deleteList->indices[i]); } } else { deleteList->cIndices = 0; } return TRUE; } int update_approximate_create_offscreen_bitmap_order( const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { const OFFSCREEN_DELETE_LIST* deleteList = &(create_offscreen_bitmap->deleteList); return 32 + deleteList->cIndices * 2; } BOOL update_write_create_offscreen_bitmap_order( wStream* s, const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { UINT16 flags; BOOL deleteListPresent; const OFFSCREEN_DELETE_LIST* deleteList; if (!Stream_EnsureRemainingCapacity( s, update_approximate_create_offscreen_bitmap_order(create_offscreen_bitmap))) return FALSE; deleteList = &(create_offscreen_bitmap->deleteList); flags = create_offscreen_bitmap->id & 0x7FFF; deleteListPresent = (deleteList->cIndices > 0) ? TRUE : FALSE; if (deleteListPresent) flags |= 0x8000; Stream_Write_UINT16(s, flags); /* flags (2 bytes) */ Stream_Write_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */ Stream_Write_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */ if (deleteListPresent) { int i; Stream_Write_UINT16(s, deleteList->cIndices); for (i = 0; i < (int)deleteList->cIndices; i++) { Stream_Write_UINT16(s, deleteList->indices[i]); } } return TRUE; } static BOOL update_read_switch_surface_order(wStream* s, SWITCH_SURFACE_ORDER* switch_surface) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, switch_surface->bitmapId); /* bitmapId (2 bytes) */ return TRUE; } int update_approximate_switch_surface_order(const SWITCH_SURFACE_ORDER* switch_surface) { return 2; } BOOL update_write_switch_surface_order(wStream* s, const SWITCH_SURFACE_ORDER* switch_surface) { int inf = update_approximate_switch_surface_order(switch_surface); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT16(s, switch_surface->bitmapId); /* bitmapId (2 bytes) */ return TRUE; } static BOOL update_read_create_nine_grid_bitmap_order(wStream* s, CREATE_NINE_GRID_BITMAP_ORDER* create_nine_grid_bitmap) { NINE_GRID_BITMAP_INFO* nineGridInfo; if (Stream_GetRemainingLength(s) < 19) return FALSE; Stream_Read_UINT8(s, create_nine_grid_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ if ((create_nine_grid_bitmap->bitmapBpp < 1) || (create_nine_grid_bitmap->bitmapBpp > 32)) { WLog_ERR(TAG, "invalid bpp value %" PRIu32 "", create_nine_grid_bitmap->bitmapBpp); return FALSE; } Stream_Read_UINT16(s, create_nine_grid_bitmap->bitmapId); /* bitmapId (2 bytes) */ nineGridInfo = &(create_nine_grid_bitmap->nineGridInfo); Stream_Read_UINT32(s, nineGridInfo->flFlags); /* flFlags (4 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulLeftWidth); /* ulLeftWidth (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulRightWidth); /* ulRightWidth (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulTopHeight); /* ulTopHeight (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulBottomHeight); /* ulBottomHeight (2 bytes) */ update_read_colorref(s, &nineGridInfo->crTransparent); /* crTransparent (4 bytes) */ return TRUE; } static BOOL update_read_frame_marker_order(wStream* s, FRAME_MARKER_ORDER* frame_marker) { if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT32(s, frame_marker->action); /* action (4 bytes) */ return TRUE; } static BOOL update_read_stream_bitmap_first_order(wStream* s, STREAM_BITMAP_FIRST_ORDER* stream_bitmap_first) { if (Stream_GetRemainingLength(s) < 10) // 8 + 2 at least return FALSE; Stream_Read_UINT8(s, stream_bitmap_first->bitmapFlags); /* bitmapFlags (1 byte) */ Stream_Read_UINT8(s, stream_bitmap_first->bitmapBpp); /* bitmapBpp (1 byte) */ if ((stream_bitmap_first->bitmapBpp < 1) || (stream_bitmap_first->bitmapBpp > 32)) { WLog_ERR(TAG, "invalid bpp value %" PRIu32 "", stream_bitmap_first->bitmapBpp); return FALSE; } Stream_Read_UINT16(s, stream_bitmap_first->bitmapType); /* bitmapType (2 bytes) */ Stream_Read_UINT16(s, stream_bitmap_first->bitmapWidth); /* bitmapWidth (2 bytes) */ Stream_Read_UINT16(s, stream_bitmap_first->bitmapHeight); /* bitmapHeigth (2 bytes) */ if (stream_bitmap_first->bitmapFlags & STREAM_BITMAP_V2) { if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT32(s, stream_bitmap_first->bitmapSize); /* bitmapSize (4 bytes) */ } else { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, stream_bitmap_first->bitmapSize); /* bitmapSize (2 bytes) */ } FIELD_SKIP_BUFFER16( s, stream_bitmap_first->bitmapBlockSize); /* bitmapBlockSize(2 bytes) + bitmapBlock */ return TRUE; } static BOOL update_read_stream_bitmap_next_order(wStream* s, STREAM_BITMAP_NEXT_ORDER* stream_bitmap_next) { if (Stream_GetRemainingLength(s) < 5) return FALSE; Stream_Read_UINT8(s, stream_bitmap_next->bitmapFlags); /* bitmapFlags (1 byte) */ Stream_Read_UINT16(s, stream_bitmap_next->bitmapType); /* bitmapType (2 bytes) */ FIELD_SKIP_BUFFER16( s, stream_bitmap_next->bitmapBlockSize); /* bitmapBlockSize(2 bytes) + bitmapBlock */ return TRUE; } static BOOL update_read_draw_gdiplus_first_order(wStream* s, DRAW_GDIPLUS_FIRST_ORDER* draw_gdiplus_first) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_first->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_first->cbTotalSize); /* cbTotalSize (4 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_first->cbTotalEmfSize); /* cbTotalEmfSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_first->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_next_order(wStream* s, DRAW_GDIPLUS_NEXT_ORDER* draw_gdiplus_next) { if (Stream_GetRemainingLength(s) < 3) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ FIELD_SKIP_BUFFER16(s, draw_gdiplus_next->cbSize); /* cbSize(2 bytes) + emfRecords */ return TRUE; } static BOOL update_read_draw_gdiplus_end_order(wStream* s, DRAW_GDIPLUS_END_ORDER* draw_gdiplus_end) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_end->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_end->cbTotalSize); /* cbTotalSize (4 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_end->cbTotalEmfSize); /* cbTotalEmfSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_end->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_cache_first_order(wStream* s, DRAW_GDIPLUS_CACHE_FIRST_ORDER* draw_gdiplus_cache_first) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_first->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_cache_first->cbTotalSize); /* cbTotalSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_cache_first->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_cache_next_order(wStream* s, DRAW_GDIPLUS_CACHE_NEXT_ORDER* draw_gdiplus_cache_next) { if (Stream_GetRemainingLength(s) < 7) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_next->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_next->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_next->cacheIndex); /* cacheIndex (2 bytes) */ FIELD_SKIP_BUFFER16(s, draw_gdiplus_cache_next->cbSize); /* cbSize(2 bytes) + emfRecords */ return TRUE; } static BOOL update_read_draw_gdiplus_cache_end_order(wStream* s, DRAW_GDIPLUS_CACHE_END_ORDER* draw_gdiplus_cache_end) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_end->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_cache_end->cbTotalSize); /* cbTotalSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_cache_end->cbSize); /* emfRecords */ } static BOOL update_read_field_flags(wStream* s, UINT32* fieldFlags, BYTE flags, BYTE fieldBytes) { int i; BYTE byte; if (flags & ORDER_ZERO_FIELD_BYTE_BIT0) fieldBytes--; if (flags & ORDER_ZERO_FIELD_BYTE_BIT1) { if (fieldBytes > 1) fieldBytes -= 2; else fieldBytes = 0; } if (Stream_GetRemainingLength(s) < fieldBytes) return FALSE; *fieldFlags = 0; for (i = 0; i < fieldBytes; i++) { Stream_Read_UINT8(s, byte); *fieldFlags |= byte << (i * 8); } return TRUE; } BOOL update_write_field_flags(wStream* s, UINT32 fieldFlags, BYTE flags, BYTE fieldBytes) { BYTE byte; if (fieldBytes == 1) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); } else if (fieldBytes == 2) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 8) & 0xFF; Stream_Write_UINT8(s, byte); } else if (fieldBytes == 3) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 16) & 0xFF; Stream_Write_UINT8(s, byte); } else { return FALSE; } return TRUE; } static BOOL update_read_bounds(wStream* s, rdpBounds* bounds) { BYTE flags; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, flags); /* field flags */ if (flags & BOUND_LEFT) { if (!update_read_coord(s, &bounds->left, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_LEFT) { if (!update_read_coord(s, &bounds->left, TRUE)) return FALSE; } if (flags & BOUND_TOP) { if (!update_read_coord(s, &bounds->top, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_TOP) { if (!update_read_coord(s, &bounds->top, TRUE)) return FALSE; } if (flags & BOUND_RIGHT) { if (!update_read_coord(s, &bounds->right, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_RIGHT) { if (!update_read_coord(s, &bounds->right, TRUE)) return FALSE; } if (flags & BOUND_BOTTOM) { if (!update_read_coord(s, &bounds->bottom, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_BOTTOM) { if (!update_read_coord(s, &bounds->bottom, TRUE)) return FALSE; } return TRUE; } BOOL update_write_bounds(wStream* s, ORDER_INFO* orderInfo) { if (!(orderInfo->controlFlags & ORDER_BOUNDS)) return TRUE; if (orderInfo->controlFlags & ORDER_ZERO_BOUNDS_DELTAS) return TRUE; Stream_Write_UINT8(s, orderInfo->boundsFlags); /* field flags */ if (orderInfo->boundsFlags & BOUND_LEFT) { if (!update_write_coord(s, orderInfo->bounds.left)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_LEFT) { } if (orderInfo->boundsFlags & BOUND_TOP) { if (!update_write_coord(s, orderInfo->bounds.top)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_TOP) { } if (orderInfo->boundsFlags & BOUND_RIGHT) { if (!update_write_coord(s, orderInfo->bounds.right)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_RIGHT) { } if (orderInfo->boundsFlags & BOUND_BOTTOM) { if (!update_write_coord(s, orderInfo->bounds.bottom)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_BOTTOM) { } return TRUE; } static BOOL read_primary_order(wLog* log, const char* orderName, wStream* s, const ORDER_INFO* orderInfo, rdpPrimaryUpdate* primary) { BOOL rc = FALSE; if (!s || !orderInfo || !primary || !orderName) return FALSE; switch (orderInfo->orderType) { case ORDER_TYPE_DSTBLT: rc = update_read_dstblt_order(s, orderInfo, &(primary->dstblt)); break; case ORDER_TYPE_PATBLT: rc = update_read_patblt_order(s, orderInfo, &(primary->patblt)); break; case ORDER_TYPE_SCRBLT: rc = update_read_scrblt_order(s, orderInfo, &(primary->scrblt)); break; case ORDER_TYPE_OPAQUE_RECT: rc = update_read_opaque_rect_order(s, orderInfo, &(primary->opaque_rect)); break; case ORDER_TYPE_DRAW_NINE_GRID: rc = update_read_draw_nine_grid_order(s, orderInfo, &(primary->draw_nine_grid)); break; case ORDER_TYPE_MULTI_DSTBLT: rc = update_read_multi_dstblt_order(s, orderInfo, &(primary->multi_dstblt)); break; case ORDER_TYPE_MULTI_PATBLT: rc = update_read_multi_patblt_order(s, orderInfo, &(primary->multi_patblt)); break; case ORDER_TYPE_MULTI_SCRBLT: rc = update_read_multi_scrblt_order(s, orderInfo, &(primary->multi_scrblt)); break; case ORDER_TYPE_MULTI_OPAQUE_RECT: rc = update_read_multi_opaque_rect_order(s, orderInfo, &(primary->multi_opaque_rect)); break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: rc = update_read_multi_draw_nine_grid_order(s, orderInfo, &(primary->multi_draw_nine_grid)); break; case ORDER_TYPE_LINE_TO: rc = update_read_line_to_order(s, orderInfo, &(primary->line_to)); break; case ORDER_TYPE_POLYLINE: rc = update_read_polyline_order(s, orderInfo, &(primary->polyline)); break; case ORDER_TYPE_MEMBLT: rc = update_read_memblt_order(s, orderInfo, &(primary->memblt)); break; case ORDER_TYPE_MEM3BLT: rc = update_read_mem3blt_order(s, orderInfo, &(primary->mem3blt)); break; case ORDER_TYPE_SAVE_BITMAP: rc = update_read_save_bitmap_order(s, orderInfo, &(primary->save_bitmap)); break; case ORDER_TYPE_GLYPH_INDEX: rc = update_read_glyph_index_order(s, orderInfo, &(primary->glyph_index)); break; case ORDER_TYPE_FAST_INDEX: rc = update_read_fast_index_order(s, orderInfo, &(primary->fast_index)); break; case ORDER_TYPE_FAST_GLYPH: rc = update_read_fast_glyph_order(s, orderInfo, &(primary->fast_glyph)); break; case ORDER_TYPE_POLYGON_SC: rc = update_read_polygon_sc_order(s, orderInfo, &(primary->polygon_sc)); break; case ORDER_TYPE_POLYGON_CB: rc = update_read_polygon_cb_order(s, orderInfo, &(primary->polygon_cb)); break; case ORDER_TYPE_ELLIPSE_SC: rc = update_read_ellipse_sc_order(s, orderInfo, &(primary->ellipse_sc)); break; case ORDER_TYPE_ELLIPSE_CB: rc = update_read_ellipse_cb_order(s, orderInfo, &(primary->ellipse_cb)); break; default: WLog_Print(log, WLOG_WARN, "Primary Drawing Order %s not supported, ignoring", orderName); rc = TRUE; break; } if (!rc) { WLog_Print(log, WLOG_ERROR, "%s - update_read_dstblt_order() failed", orderName); return FALSE; } return TRUE; } static BOOL update_recv_primary_order(rdpUpdate* update, wStream* s, BYTE flags) { BYTE field; BOOL rc = FALSE; rdpContext* context = update->context; rdpPrimaryUpdate* primary = update->primary; ORDER_INFO* orderInfo = &(primary->order_info); rdpSettings* settings = context->settings; const char* orderName; if (flags & ORDER_TYPE_CHANGE) { if (Stream_GetRemainingLength(s) < 1) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, orderInfo->orderType); /* orderType (1 byte) */ } orderName = primary_order_string(orderInfo->orderType); if (!check_primary_order_supported(update->log, settings, orderInfo->orderType, orderName)) return FALSE; field = get_primary_drawing_order_field_bytes(orderInfo->orderType, &rc); if (!rc) return FALSE; if (!update_read_field_flags(s, &(orderInfo->fieldFlags), flags, field)) { WLog_Print(update->log, WLOG_ERROR, "update_read_field_flags() failed"); return FALSE; } if (flags & ORDER_BOUNDS) { if (!(flags & ORDER_ZERO_BOUNDS_DELTAS)) { if (!update_read_bounds(s, &orderInfo->bounds)) { WLog_Print(update->log, WLOG_ERROR, "update_read_bounds() failed"); return FALSE; } } rc = IFCALLRESULT(FALSE, update->SetBounds, context, &orderInfo->bounds); if (!rc) return FALSE; } orderInfo->deltaCoordinates = (flags & ORDER_DELTA_COORDINATES) ? TRUE : FALSE; if (!read_primary_order(update->log, orderName, s, orderInfo, primary)) return FALSE; switch (orderInfo->orderType) { case ORDER_TYPE_DSTBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->dstblt.bRop), gdi_rop3_code(primary->dstblt.bRop)); rc = IFCALLRESULT(FALSE, primary->DstBlt, context, &primary->dstblt); } break; case ORDER_TYPE_PATBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->patblt.bRop), gdi_rop3_code(primary->patblt.bRop)); rc = IFCALLRESULT(FALSE, primary->PatBlt, context, &primary->patblt); } break; case ORDER_TYPE_SCRBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->scrblt.bRop), gdi_rop3_code(primary->scrblt.bRop)); rc = IFCALLRESULT(FALSE, primary->ScrBlt, context, &primary->scrblt); } break; case ORDER_TYPE_OPAQUE_RECT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->OpaqueRect, context, &primary->opaque_rect); } break; case ORDER_TYPE_DRAW_NINE_GRID: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->DrawNineGrid, context, &primary->draw_nine_grid); } break; case ORDER_TYPE_MULTI_DSTBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_dstblt.bRop), gdi_rop3_code(primary->multi_dstblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiDstBlt, context, &primary->multi_dstblt); } break; case ORDER_TYPE_MULTI_PATBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_patblt.bRop), gdi_rop3_code(primary->multi_patblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiPatBlt, context, &primary->multi_patblt); } break; case ORDER_TYPE_MULTI_SCRBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_scrblt.bRop), gdi_rop3_code(primary->multi_scrblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiScrBlt, context, &primary->multi_scrblt); } break; case ORDER_TYPE_MULTI_OPAQUE_RECT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->MultiOpaqueRect, context, &primary->multi_opaque_rect); } break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->MultiDrawNineGrid, context, &primary->multi_draw_nine_grid); } break; case ORDER_TYPE_LINE_TO: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->LineTo, context, &primary->line_to); } break; case ORDER_TYPE_POLYLINE: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->Polyline, context, &primary->polyline); } break; case ORDER_TYPE_MEMBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->memblt.bRop), gdi_rop3_code(primary->memblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MemBlt, context, &primary->memblt); } break; case ORDER_TYPE_MEM3BLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->mem3blt.bRop), gdi_rop3_code(primary->mem3blt.bRop)); rc = IFCALLRESULT(FALSE, primary->Mem3Blt, context, &primary->mem3blt); } break; case ORDER_TYPE_SAVE_BITMAP: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->SaveBitmap, context, &primary->save_bitmap); } break; case ORDER_TYPE_GLYPH_INDEX: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->GlyphIndex, context, &primary->glyph_index); } break; case ORDER_TYPE_FAST_INDEX: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->FastIndex, context, &primary->fast_index); } break; case ORDER_TYPE_FAST_GLYPH: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->FastGlyph, context, &primary->fast_glyph); } break; case ORDER_TYPE_POLYGON_SC: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->PolygonSC, context, &primary->polygon_sc); } break; case ORDER_TYPE_POLYGON_CB: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->PolygonCB, context, &primary->polygon_cb); } break; case ORDER_TYPE_ELLIPSE_SC: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->EllipseSC, context, &primary->ellipse_sc); } break; case ORDER_TYPE_ELLIPSE_CB: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->EllipseCB, context, &primary->ellipse_cb); } break; default: WLog_Print(update->log, WLOG_WARN, "Primary Drawing Order %s not supported", orderName); break; } if (!rc) { WLog_Print(update->log, WLOG_WARN, "Primary Drawing Order %s failed", orderName); return FALSE; } if (flags & ORDER_BOUNDS) { rc = IFCALLRESULT(FALSE, update->SetBounds, context, NULL); } return rc; } static BOOL update_recv_secondary_order(rdpUpdate* update, wStream* s, BYTE flags) { BOOL rc = FALSE; size_t start, end, diff; BYTE orderType; UINT16 extraFlags; UINT16 orderLength; rdpContext* context = update->context; rdpSettings* settings = context->settings; rdpSecondaryUpdate* secondary = update->secondary; const char* name; if (Stream_GetRemainingLength(s) < 5) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 5"); return FALSE; } Stream_Read_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Read_UINT16(s, extraFlags); /* extraFlags (2 bytes) */ Stream_Read_UINT8(s, orderType); /* orderType (1 byte) */ if (Stream_GetRemainingLength(s) < orderLength + 7U) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) %" PRIuz " < %" PRIu16, Stream_GetRemainingLength(s), orderLength + 7); return FALSE; } start = Stream_GetPosition(s); name = secondary_order_string(orderType); WLog_Print(update->log, WLOG_DEBUG, "Secondary Drawing Order %s", name); if (!check_secondary_order_supported(update->log, settings, orderType, name)) return FALSE; switch (orderType) { case ORDER_TYPE_BITMAP_UNCOMPRESSED: case ORDER_TYPE_CACHE_BITMAP_COMPRESSED: { const BOOL compressed = (orderType == ORDER_TYPE_CACHE_BITMAP_COMPRESSED); CACHE_BITMAP_ORDER* order = update_read_cache_bitmap_order(update, s, compressed, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmap, context, order); free_cache_bitmap_order(context, order); } } break; case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2: case ORDER_TYPE_BITMAP_COMPRESSED_V2: { const BOOL compressed = (orderType == ORDER_TYPE_BITMAP_COMPRESSED_V2); CACHE_BITMAP_V2_ORDER* order = update_read_cache_bitmap_v2_order(update, s, compressed, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV2, context, order); free_cache_bitmap_v2_order(context, order); } } break; case ORDER_TYPE_BITMAP_COMPRESSED_V3: { CACHE_BITMAP_V3_ORDER* order = update_read_cache_bitmap_v3_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV3, context, order); free_cache_bitmap_v3_order(context, order); } } break; case ORDER_TYPE_CACHE_COLOR_TABLE: { CACHE_COLOR_TABLE_ORDER* order = update_read_cache_color_table_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheColorTable, context, order); free_cache_color_table_order(context, order); } } break; case ORDER_TYPE_CACHE_GLYPH: { switch (settings->GlyphSupportLevel) { case GLYPH_SUPPORT_PARTIAL: case GLYPH_SUPPORT_FULL: { CACHE_GLYPH_ORDER* order = update_read_cache_glyph_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheGlyph, context, order); free_cache_glyph_order(context, order); } } break; case GLYPH_SUPPORT_ENCODE: { CACHE_GLYPH_V2_ORDER* order = update_read_cache_glyph_v2_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheGlyphV2, context, order); free_cache_glyph_v2_order(context, order); } } break; case GLYPH_SUPPORT_NONE: default: break; } } break; case ORDER_TYPE_CACHE_BRUSH: /* [MS-RDPEGDI] 2.2.2.2.1.2.7 Cache Brush (CACHE_BRUSH_ORDER) */ { CACHE_BRUSH_ORDER* order = update_read_cache_brush_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBrush, context, order); free_cache_brush_order(context, order); } } break; default: WLog_Print(update->log, WLOG_WARN, "SECONDARY ORDER %s not supported", name); break; } if (!rc) { WLog_Print(update->log, WLOG_ERROR, "SECONDARY ORDER %s failed", name); } start += orderLength + 7; end = Stream_GetPosition(s); if (start > end) { WLog_Print(update->log, WLOG_WARN, "SECONDARY_ORDER %s: read %" PRIuz "bytes too much", name, end - start); return FALSE; } diff = start - end; if (diff > 0) { WLog_Print(update->log, WLOG_DEBUG, "SECONDARY_ORDER %s: read %" PRIuz "bytes short, skipping", name, diff); Stream_Seek(s, diff); } return rc; } static BOOL read_altsec_order(wStream* s, BYTE orderType, rdpAltSecUpdate* altsec) { BOOL rc = FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: rc = update_read_create_offscreen_bitmap_order(s, &(altsec->create_offscreen_bitmap)); break; case ORDER_TYPE_SWITCH_SURFACE: rc = update_read_switch_surface_order(s, &(altsec->switch_surface)); break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: rc = update_read_create_nine_grid_bitmap_order(s, &(altsec->create_nine_grid_bitmap)); break; case ORDER_TYPE_FRAME_MARKER: rc = update_read_frame_marker_order(s, &(altsec->frame_marker)); break; case ORDER_TYPE_STREAM_BITMAP_FIRST: rc = update_read_stream_bitmap_first_order(s, &(altsec->stream_bitmap_first)); break; case ORDER_TYPE_STREAM_BITMAP_NEXT: rc = update_read_stream_bitmap_next_order(s, &(altsec->stream_bitmap_next)); break; case ORDER_TYPE_GDIPLUS_FIRST: rc = update_read_draw_gdiplus_first_order(s, &(altsec->draw_gdiplus_first)); break; case ORDER_TYPE_GDIPLUS_NEXT: rc = update_read_draw_gdiplus_next_order(s, &(altsec->draw_gdiplus_next)); break; case ORDER_TYPE_GDIPLUS_END: rc = update_read_draw_gdiplus_end_order(s, &(altsec->draw_gdiplus_end)); break; case ORDER_TYPE_GDIPLUS_CACHE_FIRST: rc = update_read_draw_gdiplus_cache_first_order(s, &(altsec->draw_gdiplus_cache_first)); break; case ORDER_TYPE_GDIPLUS_CACHE_NEXT: rc = update_read_draw_gdiplus_cache_next_order(s, &(altsec->draw_gdiplus_cache_next)); break; case ORDER_TYPE_GDIPLUS_CACHE_END: rc = update_read_draw_gdiplus_cache_end_order(s, &(altsec->draw_gdiplus_cache_end)); break; case ORDER_TYPE_WINDOW: /* This order is handled elsewhere. */ rc = TRUE; break; case ORDER_TYPE_COMPDESK_FIRST: rc = TRUE; break; default: break; } return rc; } static BOOL update_recv_altsec_order(rdpUpdate* update, wStream* s, BYTE flags) { BYTE orderType = flags >>= 2; /* orderType is in higher 6 bits of flags field */ BOOL rc = FALSE; rdpContext* context = update->context; rdpSettings* settings = context->settings; rdpAltSecUpdate* altsec = update->altsec; const char* orderName = altsec_order_string(orderType); WLog_Print(update->log, WLOG_DEBUG, "Alternate Secondary Drawing Order %s", orderName); if (!check_alt_order_supported(update->log, settings, orderType, orderName)) return FALSE; if (!read_altsec_order(s, orderType, altsec)) return FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: IFCALLRET(altsec->CreateOffscreenBitmap, rc, context, &(altsec->create_offscreen_bitmap)); break; case ORDER_TYPE_SWITCH_SURFACE: IFCALLRET(altsec->SwitchSurface, rc, context, &(altsec->switch_surface)); break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: IFCALLRET(altsec->CreateNineGridBitmap, rc, context, &(altsec->create_nine_grid_bitmap)); break; case ORDER_TYPE_FRAME_MARKER: IFCALLRET(altsec->FrameMarker, rc, context, &(altsec->frame_marker)); break; case ORDER_TYPE_STREAM_BITMAP_FIRST: IFCALLRET(altsec->StreamBitmapFirst, rc, context, &(altsec->stream_bitmap_first)); break; case ORDER_TYPE_STREAM_BITMAP_NEXT: IFCALLRET(altsec->StreamBitmapNext, rc, context, &(altsec->stream_bitmap_next)); break; case ORDER_TYPE_GDIPLUS_FIRST: IFCALLRET(altsec->DrawGdiPlusFirst, rc, context, &(altsec->draw_gdiplus_first)); break; case ORDER_TYPE_GDIPLUS_NEXT: IFCALLRET(altsec->DrawGdiPlusNext, rc, context, &(altsec->draw_gdiplus_next)); break; case ORDER_TYPE_GDIPLUS_END: IFCALLRET(altsec->DrawGdiPlusEnd, rc, context, &(altsec->draw_gdiplus_end)); break; case ORDER_TYPE_GDIPLUS_CACHE_FIRST: IFCALLRET(altsec->DrawGdiPlusCacheFirst, rc, context, &(altsec->draw_gdiplus_cache_first)); break; case ORDER_TYPE_GDIPLUS_CACHE_NEXT: IFCALLRET(altsec->DrawGdiPlusCacheNext, rc, context, &(altsec->draw_gdiplus_cache_next)); break; case ORDER_TYPE_GDIPLUS_CACHE_END: IFCALLRET(altsec->DrawGdiPlusCacheEnd, rc, context, &(altsec->draw_gdiplus_cache_end)); break; case ORDER_TYPE_WINDOW: rc = update_recv_altsec_window_order(update, s); break; case ORDER_TYPE_COMPDESK_FIRST: rc = TRUE; break; default: break; } if (!rc) { WLog_Print(update->log, WLOG_WARN, "Alternate Secondary Drawing Order %s failed", orderName); } return rc; } BOOL update_recv_order(rdpUpdate* update, wStream* s) { BOOL rc; BYTE controlFlags; if (Stream_GetRemainingLength(s) < 1) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, controlFlags); /* controlFlags (1 byte) */ if (!(controlFlags & ORDER_STANDARD)) rc = update_recv_altsec_order(update, s, controlFlags); else if (controlFlags & ORDER_SECONDARY) rc = update_recv_secondary_order(update, s, controlFlags); else rc = update_recv_primary_order(update, s, controlFlags); if (!rc) WLog_Print(update->log, WLOG_ERROR, "order flags %02" PRIx8 " failed", controlFlags); return rc; }
/** * FreeRDP: A Remote Desktop Protocol Implementation * Drawing Orders * * Copyright 2011 Marc-Andre Moreau <marcandre.moreau@gmail.com> * Copyright 2016 Armin Novak <armin.novak@thincast.com> * Copyright 2016 Thincast Technologies GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "window.h" #include <winpr/wtypes.h> #include <winpr/crt.h> #include <freerdp/api.h> #include <freerdp/log.h> #include <freerdp/graphics.h> #include <freerdp/codec/bitmap.h> #include <freerdp/gdi/gdi.h> #include "orders.h" #include "../cache/glyph.h" #include "../cache/bitmap.h" #include "../cache/brush.h" #include "../cache/cache.h" #define TAG FREERDP_TAG("core.orders") BYTE get_primary_drawing_order_field_bytes(UINT32 orderType, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (orderType) { case 0: return DSTBLT_ORDER_FIELD_BYTES; case 1: return PATBLT_ORDER_FIELD_BYTES; case 2: return SCRBLT_ORDER_FIELD_BYTES; case 3: return 0; case 4: return 0; case 5: return 0; case 6: return 0; case 7: return DRAW_NINE_GRID_ORDER_FIELD_BYTES; case 8: return MULTI_DRAW_NINE_GRID_ORDER_FIELD_BYTES; case 9: return LINE_TO_ORDER_FIELD_BYTES; case 10: return OPAQUE_RECT_ORDER_FIELD_BYTES; case 11: return SAVE_BITMAP_ORDER_FIELD_BYTES; case 12: return 0; case 13: return MEMBLT_ORDER_FIELD_BYTES; case 14: return MEM3BLT_ORDER_FIELD_BYTES; case 15: return MULTI_DSTBLT_ORDER_FIELD_BYTES; case 16: return MULTI_PATBLT_ORDER_FIELD_BYTES; case 17: return MULTI_SCRBLT_ORDER_FIELD_BYTES; case 18: return MULTI_OPAQUE_RECT_ORDER_FIELD_BYTES; case 19: return FAST_INDEX_ORDER_FIELD_BYTES; case 20: return POLYGON_SC_ORDER_FIELD_BYTES; case 21: return POLYGON_CB_ORDER_FIELD_BYTES; case 22: return POLYLINE_ORDER_FIELD_BYTES; case 23: return 0; case 24: return FAST_GLYPH_ORDER_FIELD_BYTES; case 25: return ELLIPSE_SC_ORDER_FIELD_BYTES; case 26: return ELLIPSE_CB_ORDER_FIELD_BYTES; case 27: return GLYPH_INDEX_ORDER_FIELD_BYTES; default: if (pValid) *pValid = FALSE; WLog_WARN(TAG, "Invalid orderType 0x%08X received", orderType); return 0; } } static BYTE get_cbr2_bpp(UINT32 bpp, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (bpp) { case 3: return 8; case 4: return 16; case 5: return 24; case 6: return 32; default: WLog_WARN(TAG, "Invalid bpp %" PRIu32, bpp); if (pValid) *pValid = FALSE; return 0; } } static BYTE get_bmf_bpp(UINT32 bmf, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (bmf) { case 1: return 1; case 3: return 8; case 4: return 16; case 5: return 24; case 6: return 32; default: WLog_WARN(TAG, "Invalid bmf %" PRIu32, bmf); if (pValid) *pValid = FALSE; return 0; } } static BYTE get_bpp_bmf(UINT32 bpp, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (bpp) { case 1: return 1; case 8: return 3; case 16: return 4; case 24: return 5; case 32: return 6; default: WLog_WARN(TAG, "Invalid color depth %" PRIu32, bpp); if (pValid) *pValid = FALSE; return 0; } } static BOOL check_order_activated(wLog* log, rdpSettings* settings, const char* orderName, BOOL condition) { if (!condition) { if (settings->AllowUnanouncedOrdersFromServer) { WLog_Print(log, WLOG_WARN, "%s - SERVER BUG: The support for this feature was not announced!", orderName); return TRUE; } else { WLog_Print(log, WLOG_ERROR, "%s - SERVER BUG: The support for this feature was not announced! Use " "/relax-order-checks to ignore", orderName); return FALSE; } } return TRUE; } static BOOL check_alt_order_supported(wLog* log, rdpSettings* settings, BYTE orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: case ORDER_TYPE_SWITCH_SURFACE: condition = settings->OffscreenSupportLevel != 0; break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: condition = settings->DrawNineGridEnabled; break; case ORDER_TYPE_FRAME_MARKER: condition = settings->FrameMarkerCommandEnabled; break; case ORDER_TYPE_GDIPLUS_FIRST: case ORDER_TYPE_GDIPLUS_NEXT: case ORDER_TYPE_GDIPLUS_END: case ORDER_TYPE_GDIPLUS_CACHE_FIRST: case ORDER_TYPE_GDIPLUS_CACHE_NEXT: case ORDER_TYPE_GDIPLUS_CACHE_END: condition = settings->DrawGdiPlusCacheEnabled; break; case ORDER_TYPE_WINDOW: condition = settings->RemoteWndSupportLevel != WINDOW_LEVEL_NOT_SUPPORTED; break; case ORDER_TYPE_STREAM_BITMAP_FIRST: case ORDER_TYPE_STREAM_BITMAP_NEXT: case ORDER_TYPE_COMPDESK_FIRST: condition = TRUE; break; default: WLog_Print(log, WLOG_WARN, "%s - Alternate Secondary Drawing Order UNKNOWN", orderName); condition = FALSE; break; } return check_order_activated(log, settings, orderName, condition); } static BOOL check_secondary_order_supported(wLog* log, rdpSettings* settings, BYTE orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_BITMAP_UNCOMPRESSED: case ORDER_TYPE_CACHE_BITMAP_COMPRESSED: condition = settings->BitmapCacheEnabled; break; case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2: case ORDER_TYPE_BITMAP_COMPRESSED_V2: condition = settings->BitmapCacheEnabled; break; case ORDER_TYPE_BITMAP_COMPRESSED_V3: condition = settings->BitmapCacheV3Enabled; break; case ORDER_TYPE_CACHE_COLOR_TABLE: condition = (settings->OrderSupport[NEG_MEMBLT_INDEX] || settings->OrderSupport[NEG_MEM3BLT_INDEX]); break; case ORDER_TYPE_CACHE_GLYPH: { switch (settings->GlyphSupportLevel) { case GLYPH_SUPPORT_PARTIAL: case GLYPH_SUPPORT_FULL: case GLYPH_SUPPORT_ENCODE: condition = TRUE; break; case GLYPH_SUPPORT_NONE: default: condition = FALSE; break; } } break; case ORDER_TYPE_CACHE_BRUSH: condition = TRUE; break; default: WLog_Print(log, WLOG_WARN, "SECONDARY ORDER %s not supported", orderName); break; } return check_order_activated(log, settings, orderName, condition); } static BOOL check_primary_order_supported(wLog* log, rdpSettings* settings, UINT32 orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_DSTBLT: condition = settings->OrderSupport[NEG_DSTBLT_INDEX]; break; case ORDER_TYPE_SCRBLT: condition = settings->OrderSupport[NEG_SCRBLT_INDEX]; break; case ORDER_TYPE_DRAW_NINE_GRID: condition = settings->OrderSupport[NEG_DRAWNINEGRID_INDEX]; break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: condition = settings->OrderSupport[NEG_MULTI_DRAWNINEGRID_INDEX]; break; case ORDER_TYPE_LINE_TO: condition = settings->OrderSupport[NEG_LINETO_INDEX]; break; /* [MS-RDPEGDI] 2.2.2.2.1.1.2.5 OpaqueRect (OPAQUERECT_ORDER) * suggests that PatBlt and OpaqueRect imply each other. */ case ORDER_TYPE_PATBLT: case ORDER_TYPE_OPAQUE_RECT: condition = settings->OrderSupport[NEG_OPAQUE_RECT_INDEX] || settings->OrderSupport[NEG_PATBLT_INDEX]; break; case ORDER_TYPE_SAVE_BITMAP: condition = settings->OrderSupport[NEG_SAVEBITMAP_INDEX]; break; case ORDER_TYPE_MEMBLT: condition = settings->OrderSupport[NEG_MEMBLT_INDEX]; break; case ORDER_TYPE_MEM3BLT: condition = settings->OrderSupport[NEG_MEM3BLT_INDEX]; break; case ORDER_TYPE_MULTI_DSTBLT: condition = settings->OrderSupport[NEG_MULTIDSTBLT_INDEX]; break; case ORDER_TYPE_MULTI_PATBLT: condition = settings->OrderSupport[NEG_MULTIPATBLT_INDEX]; break; case ORDER_TYPE_MULTI_SCRBLT: condition = settings->OrderSupport[NEG_MULTIDSTBLT_INDEX]; break; case ORDER_TYPE_MULTI_OPAQUE_RECT: condition = settings->OrderSupport[NEG_MULTIOPAQUERECT_INDEX]; break; case ORDER_TYPE_FAST_INDEX: condition = settings->OrderSupport[NEG_FAST_INDEX_INDEX]; break; case ORDER_TYPE_POLYGON_SC: condition = settings->OrderSupport[NEG_POLYGON_SC_INDEX]; break; case ORDER_TYPE_POLYGON_CB: condition = settings->OrderSupport[NEG_POLYGON_CB_INDEX]; break; case ORDER_TYPE_POLYLINE: condition = settings->OrderSupport[NEG_POLYLINE_INDEX]; break; case ORDER_TYPE_FAST_GLYPH: condition = settings->OrderSupport[NEG_FAST_GLYPH_INDEX]; break; case ORDER_TYPE_ELLIPSE_SC: condition = settings->OrderSupport[NEG_ELLIPSE_SC_INDEX]; break; case ORDER_TYPE_ELLIPSE_CB: condition = settings->OrderSupport[NEG_ELLIPSE_CB_INDEX]; break; case ORDER_TYPE_GLYPH_INDEX: condition = settings->OrderSupport[NEG_GLYPH_INDEX_INDEX]; break; default: WLog_Print(log, WLOG_WARN, "%s Primary Drawing Order not supported", orderName); break; } return check_order_activated(log, settings, orderName, condition); } static const char* primary_order_string(UINT32 orderType) { const char* orders[] = { "[0x%02" PRIx8 "] DstBlt", "[0x%02" PRIx8 "] PatBlt", "[0x%02" PRIx8 "] ScrBlt", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] DrawNineGrid", "[0x%02" PRIx8 "] MultiDrawNineGrid", "[0x%02" PRIx8 "] LineTo", "[0x%02" PRIx8 "] OpaqueRect", "[0x%02" PRIx8 "] SaveBitmap", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] MemBlt", "[0x%02" PRIx8 "] Mem3Blt", "[0x%02" PRIx8 "] MultiDstBlt", "[0x%02" PRIx8 "] MultiPatBlt", "[0x%02" PRIx8 "] MultiScrBlt", "[0x%02" PRIx8 "] MultiOpaqueRect", "[0x%02" PRIx8 "] FastIndex", "[0x%02" PRIx8 "] PolygonSC", "[0x%02" PRIx8 "] PolygonCB", "[0x%02" PRIx8 "] Polyline", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] FastGlyph", "[0x%02" PRIx8 "] EllipseSC", "[0x%02" PRIx8 "] EllipseCB", "[0x%02" PRIx8 "] GlyphIndex" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static const char* secondary_order_string(UINT32 orderType) { const char* orders[] = { "[0x%02" PRIx8 "] Cache Bitmap", "[0x%02" PRIx8 "] Cache Color Table", "[0x%02" PRIx8 "] Cache Bitmap (Compressed)", "[0x%02" PRIx8 "] Cache Glyph", "[0x%02" PRIx8 "] Cache Bitmap V2", "[0x%02" PRIx8 "] Cache Bitmap V2 (Compressed)", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] Cache Brush", "[0x%02" PRIx8 "] Cache Bitmap V3" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static const char* altsec_order_string(BYTE orderType) { const char* orders[] = { "[0x%02" PRIx8 "] Switch Surface", "[0x%02" PRIx8 "] Create Offscreen Bitmap", "[0x%02" PRIx8 "] Stream Bitmap First", "[0x%02" PRIx8 "] Stream Bitmap Next", "[0x%02" PRIx8 "] Create NineGrid Bitmap", "[0x%02" PRIx8 "] Draw GDI+ First", "[0x%02" PRIx8 "] Draw GDI+ Next", "[0x%02" PRIx8 "] Draw GDI+ End", "[0x%02" PRIx8 "] Draw GDI+ Cache First", "[0x%02" PRIx8 "] Draw GDI+ Cache Next", "[0x%02" PRIx8 "] Draw GDI+ Cache End", "[0x%02" PRIx8 "] Windowing", "[0x%02" PRIx8 "] Desktop Composition", "[0x%02" PRIx8 "] Frame Marker" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static INLINE BOOL update_read_coord(wStream* s, INT32* coord, BOOL delta) { INT8 lsi8; INT16 lsi16; if (delta) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_INT8(s, lsi8); *coord += lsi8; } else { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_INT16(s, lsi16); *coord = lsi16; } return TRUE; } static INLINE BOOL update_write_coord(wStream* s, INT32 coord) { Stream_Write_UINT16(s, coord); return TRUE; } static INLINE BOOL update_read_color(wStream* s, UINT32* color) { BYTE byte; if (Stream_GetRemainingLength(s) < 3) return FALSE; *color = 0; Stream_Read_UINT8(s, byte); *color = (UINT32)byte; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 8) & 0xFF00; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 16) & 0xFF0000; return TRUE; } static INLINE BOOL update_write_color(wStream* s, UINT32 color) { BYTE byte; byte = (color & 0xFF); Stream_Write_UINT8(s, byte); byte = ((color >> 8) & 0xFF); Stream_Write_UINT8(s, byte); byte = ((color >> 16) & 0xFF); Stream_Write_UINT8(s, byte); return TRUE; } static INLINE BOOL update_read_colorref(wStream* s, UINT32* color) { BYTE byte; if (Stream_GetRemainingLength(s) < 4) return FALSE; *color = 0; Stream_Read_UINT8(s, byte); *color = byte; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 8); Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 16); Stream_Seek_UINT8(s); return TRUE; } static INLINE BOOL update_read_color_quad(wStream* s, UINT32* color) { return update_read_colorref(s, color); } static INLINE void update_write_color_quad(wStream* s, UINT32 color) { BYTE byte; byte = (color >> 16) & 0xFF; Stream_Write_UINT8(s, byte); byte = (color >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = color & 0xFF; Stream_Write_UINT8(s, byte); } static INLINE BOOL update_read_2byte_unsigned(wStream* s, UINT32* value) { BYTE byte; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) return FALSE; *value = (byte & 0x7F) << 8; Stream_Read_UINT8(s, byte); *value |= byte; } else { *value = (byte & 0x7F); } return TRUE; } static INLINE BOOL update_write_2byte_unsigned(wStream* s, UINT32 value) { BYTE byte; if (value > 0x7FFF) return FALSE; if (value >= 0x7F) { byte = ((value & 0x7F00) >> 8); Stream_Write_UINT8(s, byte | 0x80); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else { byte = (value & 0x7F); Stream_Write_UINT8(s, byte); } return TRUE; } static INLINE BOOL update_read_2byte_signed(wStream* s, INT32* value) { BYTE byte; BOOL negative; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); negative = (byte & 0x40) ? TRUE : FALSE; *value = (byte & 0x3F); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); *value = (*value << 8) | byte; } if (negative) *value *= -1; return TRUE; } static INLINE BOOL update_write_2byte_signed(wStream* s, INT32 value) { BYTE byte; BOOL negative = FALSE; if (value < 0) { negative = TRUE; value *= -1; } if (value > 0x3FFF) return FALSE; if (value >= 0x3F) { byte = ((value & 0x3F00) >> 8); if (negative) byte |= 0x40; Stream_Write_UINT8(s, byte | 0x80); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else { byte = (value & 0x3F); if (negative) byte |= 0x40; Stream_Write_UINT8(s, byte); } return TRUE; } static INLINE BOOL update_read_4byte_unsigned(wStream* s, UINT32* value) { BYTE byte; BYTE count; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); count = (byte & 0xC0) >> 6; if (Stream_GetRemainingLength(s) < count) return FALSE; switch (count) { case 0: *value = (byte & 0x3F); break; case 1: *value = (byte & 0x3F) << 8; Stream_Read_UINT8(s, byte); *value |= byte; break; case 2: *value = (byte & 0x3F) << 16; Stream_Read_UINT8(s, byte); *value |= (byte << 8); Stream_Read_UINT8(s, byte); *value |= byte; break; case 3: *value = (byte & 0x3F) << 24; Stream_Read_UINT8(s, byte); *value |= (byte << 16); Stream_Read_UINT8(s, byte); *value |= (byte << 8); Stream_Read_UINT8(s, byte); *value |= byte; break; default: break; } return TRUE; } static INLINE BOOL update_write_4byte_unsigned(wStream* s, UINT32 value) { BYTE byte; if (value <= 0x3F) { Stream_Write_UINT8(s, value); } else if (value <= 0x3FFF) { byte = (value >> 8) & 0x3F; Stream_Write_UINT8(s, byte | 0x40); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else if (value <= 0x3FFFFF) { byte = (value >> 16) & 0x3F; Stream_Write_UINT8(s, byte | 0x80); byte = (value >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else if (value <= 0x3FFFFFFF) { byte = (value >> 24) & 0x3F; Stream_Write_UINT8(s, byte | 0xC0); byte = (value >> 16) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else return FALSE; return TRUE; } static INLINE BOOL update_read_delta(wStream* s, INT32* value) { BYTE byte; if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, byte); if (byte & 0x40) *value = (byte | ~0x3F); else *value = (byte & 0x3F); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, byte); *value = (*value << 8) | byte; } return TRUE; } #if 0 static INLINE void update_read_glyph_delta(wStream* s, UINT16* value) { BYTE byte; Stream_Read_UINT8(s, byte); if (byte == 0x80) Stream_Read_UINT16(s, *value); else *value = (byte & 0x3F); } static INLINE void update_seek_glyph_delta(wStream* s) { BYTE byte; Stream_Read_UINT8(s, byte); if (byte & 0x80) Stream_Seek_UINT8(s); } #endif static INLINE BOOL update_read_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->style); } if (fieldFlags & ORDER_FIELD_04) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->hatch); } if (brush->style & CACHED_BRUSH) { BOOL rc; brush->index = brush->hatch; brush->bpp = get_bmf_bpp(brush->style, &rc); if (!rc) return FALSE; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 7) return FALSE; brush->data = (BYTE*)brush->p8x8; Stream_Read_UINT8(s, brush->data[7]); Stream_Read_UINT8(s, brush->data[6]); Stream_Read_UINT8(s, brush->data[5]); Stream_Read_UINT8(s, brush->data[4]); Stream_Read_UINT8(s, brush->data[3]); Stream_Read_UINT8(s, brush->data[2]); Stream_Read_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; } static INLINE BOOL update_write_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { Stream_Write_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { Stream_Write_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { Stream_Write_UINT8(s, brush->style); } if (brush->style & CACHED_BRUSH) { BOOL rc; brush->hatch = brush->index; brush->bpp = get_bmf_bpp(brush->style, &rc); if (!rc) return FALSE; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_04) { Stream_Write_UINT8(s, brush->hatch); } if (fieldFlags & ORDER_FIELD_05) { brush->data = (BYTE*)brush->p8x8; Stream_Write_UINT8(s, brush->data[7]); Stream_Write_UINT8(s, brush->data[6]); Stream_Write_UINT8(s, brush->data[5]); Stream_Write_UINT8(s, brush->data[4]); Stream_Write_UINT8(s, brush->data[3]); Stream_Write_UINT8(s, brush->data[2]); Stream_Write_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; } static INLINE BOOL update_read_delta_rects(wStream* s, DELTA_RECT* rectangles, UINT32* nr) { UINT32 number = *nr; UINT32 i; BYTE flags = 0; BYTE* zeroBits; UINT32 zeroBitsSize; if (number > 45) { WLog_WARN(TAG, "Invalid number of delta rectangles %" PRIu32, number); return FALSE; } zeroBitsSize = ((number + 1) / 2); if (Stream_GetRemainingLength(s) < zeroBitsSize) return FALSE; Stream_GetPointer(s, zeroBits); Stream_Seek(s, zeroBitsSize); ZeroMemory(rectangles, sizeof(DELTA_RECT) * number); for (i = 0; i < number; i++) { if (i % 2 == 0) flags = zeroBits[i / 2]; if ((~flags & 0x80) && !update_read_delta(s, &rectangles[i].left)) return FALSE; if ((~flags & 0x40) && !update_read_delta(s, &rectangles[i].top)) return FALSE; if (~flags & 0x20) { if (!update_read_delta(s, &rectangles[i].width)) return FALSE; } else if (i > 0) rectangles[i].width = rectangles[i - 1].width; else rectangles[i].width = 0; if (~flags & 0x10) { if (!update_read_delta(s, &rectangles[i].height)) return FALSE; } else if (i > 0) rectangles[i].height = rectangles[i - 1].height; else rectangles[i].height = 0; if (i > 0) { rectangles[i].left += rectangles[i - 1].left; rectangles[i].top += rectangles[i - 1].top; } flags <<= 4; } return TRUE; } static INLINE BOOL update_read_delta_points(wStream* s, DELTA_POINT* points, int number, INT16 x, INT16 y) { int i; BYTE flags = 0; BYTE* zeroBits; UINT32 zeroBitsSize; zeroBitsSize = ((number + 3) / 4); if (Stream_GetRemainingLength(s) < zeroBitsSize) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < %" PRIu32 "", zeroBitsSize); return FALSE; } Stream_GetPointer(s, zeroBits); Stream_Seek(s, zeroBitsSize); ZeroMemory(points, sizeof(DELTA_POINT) * number); for (i = 0; i < number; i++) { if (i % 4 == 0) flags = zeroBits[i / 4]; if ((~flags & 0x80) && !update_read_delta(s, &points[i].x)) { WLog_ERR(TAG, "update_read_delta(x) failed"); return FALSE; } if ((~flags & 0x40) && !update_read_delta(s, &points[i].y)) { WLog_ERR(TAG, "update_read_delta(y) failed"); return FALSE; } flags <<= 2; } return TRUE; } #define ORDER_FIELD_BYTE(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 1) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT8(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_2BYTE(NO, TARGET1, TARGET2) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 2) \ { \ WLog_ERR(TAG, "error reading %s or %s", #TARGET1, #TARGET2); \ return FALSE; \ } \ Stream_Read_UINT8(s, TARGET1); \ Stream_Read_UINT8(s, TARGET2); \ } \ } while (0) #define ORDER_FIELD_UINT16(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 2) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT16(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_UINT32(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 4) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT32(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_COORD(NO, TARGET) \ do \ { \ if ((orderInfo->fieldFlags & (1 << (NO - 1))) && \ !update_read_coord(s, &TARGET, orderInfo->deltaCoordinates)) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ } while (0) static INLINE BOOL ORDER_FIELD_COLOR(const ORDER_INFO* orderInfo, wStream* s, UINT32 NO, UINT32* TARGET) { if (!TARGET || !orderInfo) return FALSE; if ((orderInfo->fieldFlags & (1 << (NO - 1))) && !update_read_color(s, TARGET)) return FALSE; return TRUE; } static INLINE BOOL FIELD_SKIP_BUFFER16(wStream* s, UINT32 TARGET_LEN) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, TARGET_LEN); if (!Stream_SafeSeek(s, TARGET_LEN)) { WLog_ERR(TAG, "error skipping %" PRIu32 " bytes", TARGET_LEN); return FALSE; } return TRUE; } /* Primary Drawing Orders */ static BOOL update_read_dstblt_order(wStream* s, const ORDER_INFO* orderInfo, DSTBLT_ORDER* dstblt) { ORDER_FIELD_COORD(1, dstblt->nLeftRect); ORDER_FIELD_COORD(2, dstblt->nTopRect); ORDER_FIELD_COORD(3, dstblt->nWidth); ORDER_FIELD_COORD(4, dstblt->nHeight); ORDER_FIELD_BYTE(5, dstblt->bRop); return TRUE; } int update_approximate_dstblt_order(ORDER_INFO* orderInfo, const DSTBLT_ORDER* dstblt) { return 32; } BOOL update_write_dstblt_order(wStream* s, ORDER_INFO* orderInfo, const DSTBLT_ORDER* dstblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_dstblt_order(orderInfo, dstblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, dstblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, dstblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, dstblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, dstblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, dstblt->bRop); return TRUE; } static BOOL update_read_patblt_order(wStream* s, const ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { ORDER_FIELD_COORD(1, patblt->nLeftRect); ORDER_FIELD_COORD(2, patblt->nTopRect); ORDER_FIELD_COORD(3, patblt->nWidth); ORDER_FIELD_COORD(4, patblt->nHeight); ORDER_FIELD_BYTE(5, patblt->bRop); ORDER_FIELD_COLOR(orderInfo, s, 6, &patblt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 7, &patblt->foreColor); return update_read_brush(s, &patblt->brush, orderInfo->fieldFlags >> 7); } int update_approximate_patblt_order(ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { return 32; } BOOL update_write_patblt_order(wStream* s, ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_patblt_order(orderInfo, patblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, patblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, patblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, patblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, patblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, patblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, patblt->backColor); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_color(s, patblt->foreColor); orderInfo->fieldFlags |= ORDER_FIELD_08; orderInfo->fieldFlags |= ORDER_FIELD_09; orderInfo->fieldFlags |= ORDER_FIELD_10; orderInfo->fieldFlags |= ORDER_FIELD_11; orderInfo->fieldFlags |= ORDER_FIELD_12; update_write_brush(s, &patblt->brush, orderInfo->fieldFlags >> 7); return TRUE; } static BOOL update_read_scrblt_order(wStream* s, const ORDER_INFO* orderInfo, SCRBLT_ORDER* scrblt) { ORDER_FIELD_COORD(1, scrblt->nLeftRect); ORDER_FIELD_COORD(2, scrblt->nTopRect); ORDER_FIELD_COORD(3, scrblt->nWidth); ORDER_FIELD_COORD(4, scrblt->nHeight); ORDER_FIELD_BYTE(5, scrblt->bRop); ORDER_FIELD_COORD(6, scrblt->nXSrc); ORDER_FIELD_COORD(7, scrblt->nYSrc); return TRUE; } int update_approximate_scrblt_order(ORDER_INFO* orderInfo, const SCRBLT_ORDER* scrblt) { return 32; } BOOL update_write_scrblt_order(wStream* s, ORDER_INFO* orderInfo, const SCRBLT_ORDER* scrblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_scrblt_order(orderInfo, scrblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, scrblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, scrblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, scrblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, scrblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, scrblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_coord(s, scrblt->nXSrc); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_coord(s, scrblt->nYSrc); return TRUE; } static BOOL update_read_opaque_rect_order(wStream* s, const ORDER_INFO* orderInfo, OPAQUE_RECT_ORDER* opaque_rect) { BYTE byte; ORDER_FIELD_COORD(1, opaque_rect->nLeftRect); ORDER_FIELD_COORD(2, opaque_rect->nTopRect); ORDER_FIELD_COORD(3, opaque_rect->nWidth); ORDER_FIELD_COORD(4, opaque_rect->nHeight); if (orderInfo->fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x00FFFF00) | ((UINT32)byte); } if (orderInfo->fieldFlags & ORDER_FIELD_06) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x00FF00FF) | ((UINT32)byte << 8); } if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x0000FFFF) | ((UINT32)byte << 16); } return TRUE; } int update_approximate_opaque_rect_order(ORDER_INFO* orderInfo, const OPAQUE_RECT_ORDER* opaque_rect) { return 32; } BOOL update_write_opaque_rect_order(wStream* s, ORDER_INFO* orderInfo, const OPAQUE_RECT_ORDER* opaque_rect) { BYTE byte; int inf = update_approximate_opaque_rect_order(orderInfo, opaque_rect); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; // TODO: Color format conversion orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, opaque_rect->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, opaque_rect->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, opaque_rect->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, opaque_rect->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; byte = opaque_rect->color & 0x000000FF; Stream_Write_UINT8(s, byte); orderInfo->fieldFlags |= ORDER_FIELD_06; byte = (opaque_rect->color & 0x0000FF00) >> 8; Stream_Write_UINT8(s, byte); orderInfo->fieldFlags |= ORDER_FIELD_07; byte = (opaque_rect->color & 0x00FF0000) >> 16; Stream_Write_UINT8(s, byte); return TRUE; } static BOOL update_read_draw_nine_grid_order(wStream* s, const ORDER_INFO* orderInfo, DRAW_NINE_GRID_ORDER* draw_nine_grid) { ORDER_FIELD_COORD(1, draw_nine_grid->srcLeft); ORDER_FIELD_COORD(2, draw_nine_grid->srcTop); ORDER_FIELD_COORD(3, draw_nine_grid->srcRight); ORDER_FIELD_COORD(4, draw_nine_grid->srcBottom); ORDER_FIELD_UINT16(5, draw_nine_grid->bitmapId); return TRUE; } static BOOL update_read_multi_dstblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_DSTBLT_ORDER* multi_dstblt) { ORDER_FIELD_COORD(1, multi_dstblt->nLeftRect); ORDER_FIELD_COORD(2, multi_dstblt->nTopRect); ORDER_FIELD_COORD(3, multi_dstblt->nWidth); ORDER_FIELD_COORD(4, multi_dstblt->nHeight); ORDER_FIELD_BYTE(5, multi_dstblt->bRop); ORDER_FIELD_BYTE(6, multi_dstblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_dstblt->cbData); return update_read_delta_rects(s, multi_dstblt->rectangles, &multi_dstblt->numRectangles); } return TRUE; } static BOOL update_read_multi_patblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_PATBLT_ORDER* multi_patblt) { ORDER_FIELD_COORD(1, multi_patblt->nLeftRect); ORDER_FIELD_COORD(2, multi_patblt->nTopRect); ORDER_FIELD_COORD(3, multi_patblt->nWidth); ORDER_FIELD_COORD(4, multi_patblt->nHeight); ORDER_FIELD_BYTE(5, multi_patblt->bRop); ORDER_FIELD_COLOR(orderInfo, s, 6, &multi_patblt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 7, &multi_patblt->foreColor); if (!update_read_brush(s, &multi_patblt->brush, orderInfo->fieldFlags >> 7)) return FALSE; ORDER_FIELD_BYTE(13, multi_patblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_14) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_patblt->cbData); if (!update_read_delta_rects(s, multi_patblt->rectangles, &multi_patblt->numRectangles)) return FALSE; } return TRUE; } static BOOL update_read_multi_scrblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_SCRBLT_ORDER* multi_scrblt) { ORDER_FIELD_COORD(1, multi_scrblt->nLeftRect); ORDER_FIELD_COORD(2, multi_scrblt->nTopRect); ORDER_FIELD_COORD(3, multi_scrblt->nWidth); ORDER_FIELD_COORD(4, multi_scrblt->nHeight); ORDER_FIELD_BYTE(5, multi_scrblt->bRop); ORDER_FIELD_COORD(6, multi_scrblt->nXSrc); ORDER_FIELD_COORD(7, multi_scrblt->nYSrc); ORDER_FIELD_BYTE(8, multi_scrblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_09) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_scrblt->cbData); return update_read_delta_rects(s, multi_scrblt->rectangles, &multi_scrblt->numRectangles); } return TRUE; } static BOOL update_read_multi_opaque_rect_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_OPAQUE_RECT_ORDER* multi_opaque_rect) { BYTE byte; ORDER_FIELD_COORD(1, multi_opaque_rect->nLeftRect); ORDER_FIELD_COORD(2, multi_opaque_rect->nTopRect); ORDER_FIELD_COORD(3, multi_opaque_rect->nWidth); ORDER_FIELD_COORD(4, multi_opaque_rect->nHeight); if (orderInfo->fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x00FFFF00) | ((UINT32)byte); } if (orderInfo->fieldFlags & ORDER_FIELD_06) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x00FF00FF) | ((UINT32)byte << 8); } if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x0000FFFF) | ((UINT32)byte << 16); } ORDER_FIELD_BYTE(8, multi_opaque_rect->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_09) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_opaque_rect->cbData); return update_read_delta_rects(s, multi_opaque_rect->rectangles, &multi_opaque_rect->numRectangles); } return TRUE; } static BOOL update_read_multi_draw_nine_grid_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_DRAW_NINE_GRID_ORDER* multi_draw_nine_grid) { ORDER_FIELD_COORD(1, multi_draw_nine_grid->srcLeft); ORDER_FIELD_COORD(2, multi_draw_nine_grid->srcTop); ORDER_FIELD_COORD(3, multi_draw_nine_grid->srcRight); ORDER_FIELD_COORD(4, multi_draw_nine_grid->srcBottom); ORDER_FIELD_UINT16(5, multi_draw_nine_grid->bitmapId); ORDER_FIELD_BYTE(6, multi_draw_nine_grid->nDeltaEntries); if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_draw_nine_grid->cbData); return update_read_delta_rects(s, multi_draw_nine_grid->rectangles, &multi_draw_nine_grid->nDeltaEntries); } return TRUE; } static BOOL update_read_line_to_order(wStream* s, const ORDER_INFO* orderInfo, LINE_TO_ORDER* line_to) { ORDER_FIELD_UINT16(1, line_to->backMode); ORDER_FIELD_COORD(2, line_to->nXStart); ORDER_FIELD_COORD(3, line_to->nYStart); ORDER_FIELD_COORD(4, line_to->nXEnd); ORDER_FIELD_COORD(5, line_to->nYEnd); ORDER_FIELD_COLOR(orderInfo, s, 6, &line_to->backColor); ORDER_FIELD_BYTE(7, line_to->bRop2); ORDER_FIELD_BYTE(8, line_to->penStyle); ORDER_FIELD_BYTE(9, line_to->penWidth); ORDER_FIELD_COLOR(orderInfo, s, 10, &line_to->penColor); return TRUE; } int update_approximate_line_to_order(ORDER_INFO* orderInfo, const LINE_TO_ORDER* line_to) { return 32; } BOOL update_write_line_to_order(wStream* s, ORDER_INFO* orderInfo, const LINE_TO_ORDER* line_to) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_line_to_order(orderInfo, line_to))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT16(s, line_to->backMode); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, line_to->nXStart); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, line_to->nYStart); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, line_to->nXEnd); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_coord(s, line_to->nYEnd); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, line_to->backColor); orderInfo->fieldFlags |= ORDER_FIELD_07; Stream_Write_UINT8(s, line_to->bRop2); orderInfo->fieldFlags |= ORDER_FIELD_08; Stream_Write_UINT8(s, line_to->penStyle); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT8(s, line_to->penWidth); orderInfo->fieldFlags |= ORDER_FIELD_10; update_write_color(s, line_to->penColor); return TRUE; } static BOOL update_read_polyline_order(wStream* s, const ORDER_INFO* orderInfo, POLYLINE_ORDER* polyline) { UINT16 word; UINT32 new_num = polyline->numDeltaEntries; ORDER_FIELD_COORD(1, polyline->xStart); ORDER_FIELD_COORD(2, polyline->yStart); ORDER_FIELD_BYTE(3, polyline->bRop2); ORDER_FIELD_UINT16(4, word); ORDER_FIELD_COLOR(orderInfo, s, 5, &polyline->penColor); ORDER_FIELD_BYTE(6, new_num); if (orderInfo->fieldFlags & ORDER_FIELD_07) { DELTA_POINT* new_points; if (new_num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, polyline->cbData); new_points = (DELTA_POINT*)realloc(polyline->points, sizeof(DELTA_POINT) * new_num); if (!new_points) { WLog_ERR(TAG, "realloc(%" PRIu32 ") failed", new_num); return FALSE; } polyline->points = new_points; polyline->numDeltaEntries = new_num; return update_read_delta_points(s, polyline->points, polyline->numDeltaEntries, polyline->xStart, polyline->yStart); } return TRUE; } static BOOL update_read_memblt_order(wStream* s, const ORDER_INFO* orderInfo, MEMBLT_ORDER* memblt) { if (!s || !orderInfo || !memblt) return FALSE; ORDER_FIELD_UINT16(1, memblt->cacheId); ORDER_FIELD_COORD(2, memblt->nLeftRect); ORDER_FIELD_COORD(3, memblt->nTopRect); ORDER_FIELD_COORD(4, memblt->nWidth); ORDER_FIELD_COORD(5, memblt->nHeight); ORDER_FIELD_BYTE(6, memblt->bRop); ORDER_FIELD_COORD(7, memblt->nXSrc); ORDER_FIELD_COORD(8, memblt->nYSrc); ORDER_FIELD_UINT16(9, memblt->cacheIndex); memblt->colorIndex = (memblt->cacheId >> 8); memblt->cacheId = (memblt->cacheId & 0xFF); memblt->bitmap = NULL; return TRUE; } int update_approximate_memblt_order(ORDER_INFO* orderInfo, const MEMBLT_ORDER* memblt) { return 64; } BOOL update_write_memblt_order(wStream* s, ORDER_INFO* orderInfo, const MEMBLT_ORDER* memblt) { UINT16 cacheId; if (!Stream_EnsureRemainingCapacity(s, update_approximate_memblt_order(orderInfo, memblt))) return FALSE; cacheId = (memblt->cacheId & 0xFF) | ((memblt->colorIndex & 0xFF) << 8); orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT16(s, cacheId); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, memblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, memblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, memblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_coord(s, memblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_06; Stream_Write_UINT8(s, memblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_coord(s, memblt->nXSrc); orderInfo->fieldFlags |= ORDER_FIELD_08; update_write_coord(s, memblt->nYSrc); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT16(s, memblt->cacheIndex); return TRUE; } static BOOL update_read_mem3blt_order(wStream* s, const ORDER_INFO* orderInfo, MEM3BLT_ORDER* mem3blt) { ORDER_FIELD_UINT16(1, mem3blt->cacheId); ORDER_FIELD_COORD(2, mem3blt->nLeftRect); ORDER_FIELD_COORD(3, mem3blt->nTopRect); ORDER_FIELD_COORD(4, mem3blt->nWidth); ORDER_FIELD_COORD(5, mem3blt->nHeight); ORDER_FIELD_BYTE(6, mem3blt->bRop); ORDER_FIELD_COORD(7, mem3blt->nXSrc); ORDER_FIELD_COORD(8, mem3blt->nYSrc); ORDER_FIELD_COLOR(orderInfo, s, 9, &mem3blt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 10, &mem3blt->foreColor); if (!update_read_brush(s, &mem3blt->brush, orderInfo->fieldFlags >> 10)) return FALSE; ORDER_FIELD_UINT16(16, mem3blt->cacheIndex); mem3blt->colorIndex = (mem3blt->cacheId >> 8); mem3blt->cacheId = (mem3blt->cacheId & 0xFF); mem3blt->bitmap = NULL; return TRUE; } static BOOL update_read_save_bitmap_order(wStream* s, const ORDER_INFO* orderInfo, SAVE_BITMAP_ORDER* save_bitmap) { ORDER_FIELD_UINT32(1, save_bitmap->savedBitmapPosition); ORDER_FIELD_COORD(2, save_bitmap->nLeftRect); ORDER_FIELD_COORD(3, save_bitmap->nTopRect); ORDER_FIELD_COORD(4, save_bitmap->nRightRect); ORDER_FIELD_COORD(5, save_bitmap->nBottomRect); ORDER_FIELD_BYTE(6, save_bitmap->operation); return TRUE; } static BOOL update_read_glyph_index_order(wStream* s, const ORDER_INFO* orderInfo, GLYPH_INDEX_ORDER* glyph_index) { ORDER_FIELD_BYTE(1, glyph_index->cacheId); ORDER_FIELD_BYTE(2, glyph_index->flAccel); ORDER_FIELD_BYTE(3, glyph_index->ulCharInc); ORDER_FIELD_BYTE(4, glyph_index->fOpRedundant); ORDER_FIELD_COLOR(orderInfo, s, 5, &glyph_index->backColor); ORDER_FIELD_COLOR(orderInfo, s, 6, &glyph_index->foreColor); ORDER_FIELD_UINT16(7, glyph_index->bkLeft); ORDER_FIELD_UINT16(8, glyph_index->bkTop); ORDER_FIELD_UINT16(9, glyph_index->bkRight); ORDER_FIELD_UINT16(10, glyph_index->bkBottom); ORDER_FIELD_UINT16(11, glyph_index->opLeft); ORDER_FIELD_UINT16(12, glyph_index->opTop); ORDER_FIELD_UINT16(13, glyph_index->opRight); ORDER_FIELD_UINT16(14, glyph_index->opBottom); if (!update_read_brush(s, &glyph_index->brush, orderInfo->fieldFlags >> 14)) return FALSE; ORDER_FIELD_UINT16(20, glyph_index->x); ORDER_FIELD_UINT16(21, glyph_index->y); if (orderInfo->fieldFlags & ORDER_FIELD_22) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, glyph_index->cbData); if (Stream_GetRemainingLength(s) < glyph_index->cbData) return FALSE; CopyMemory(glyph_index->data, Stream_Pointer(s), glyph_index->cbData); Stream_Seek(s, glyph_index->cbData); } return TRUE; } int update_approximate_glyph_index_order(ORDER_INFO* orderInfo, const GLYPH_INDEX_ORDER* glyph_index) { return 64; } BOOL update_write_glyph_index_order(wStream* s, ORDER_INFO* orderInfo, GLYPH_INDEX_ORDER* glyph_index) { int inf = update_approximate_glyph_index_order(orderInfo, glyph_index); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT8(s, glyph_index->cacheId); orderInfo->fieldFlags |= ORDER_FIELD_02; Stream_Write_UINT8(s, glyph_index->flAccel); orderInfo->fieldFlags |= ORDER_FIELD_03; Stream_Write_UINT8(s, glyph_index->ulCharInc); orderInfo->fieldFlags |= ORDER_FIELD_04; Stream_Write_UINT8(s, glyph_index->fOpRedundant); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_color(s, glyph_index->backColor); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, glyph_index->foreColor); orderInfo->fieldFlags |= ORDER_FIELD_07; Stream_Write_UINT16(s, glyph_index->bkLeft); orderInfo->fieldFlags |= ORDER_FIELD_08; Stream_Write_UINT16(s, glyph_index->bkTop); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT16(s, glyph_index->bkRight); orderInfo->fieldFlags |= ORDER_FIELD_10; Stream_Write_UINT16(s, glyph_index->bkBottom); orderInfo->fieldFlags |= ORDER_FIELD_11; Stream_Write_UINT16(s, glyph_index->opLeft); orderInfo->fieldFlags |= ORDER_FIELD_12; Stream_Write_UINT16(s, glyph_index->opTop); orderInfo->fieldFlags |= ORDER_FIELD_13; Stream_Write_UINT16(s, glyph_index->opRight); orderInfo->fieldFlags |= ORDER_FIELD_14; Stream_Write_UINT16(s, glyph_index->opBottom); orderInfo->fieldFlags |= ORDER_FIELD_15; orderInfo->fieldFlags |= ORDER_FIELD_16; orderInfo->fieldFlags |= ORDER_FIELD_17; orderInfo->fieldFlags |= ORDER_FIELD_18; orderInfo->fieldFlags |= ORDER_FIELD_19; update_write_brush(s, &glyph_index->brush, orderInfo->fieldFlags >> 14); orderInfo->fieldFlags |= ORDER_FIELD_20; Stream_Write_UINT16(s, glyph_index->x); orderInfo->fieldFlags |= ORDER_FIELD_21; Stream_Write_UINT16(s, glyph_index->y); orderInfo->fieldFlags |= ORDER_FIELD_22; Stream_Write_UINT8(s, glyph_index->cbData); Stream_Write(s, glyph_index->data, glyph_index->cbData); return TRUE; } static BOOL update_read_fast_index_order(wStream* s, const ORDER_INFO* orderInfo, FAST_INDEX_ORDER* fast_index) { ORDER_FIELD_BYTE(1, fast_index->cacheId); ORDER_FIELD_2BYTE(2, fast_index->ulCharInc, fast_index->flAccel); ORDER_FIELD_COLOR(orderInfo, s, 3, &fast_index->backColor); ORDER_FIELD_COLOR(orderInfo, s, 4, &fast_index->foreColor); ORDER_FIELD_COORD(5, fast_index->bkLeft); ORDER_FIELD_COORD(6, fast_index->bkTop); ORDER_FIELD_COORD(7, fast_index->bkRight); ORDER_FIELD_COORD(8, fast_index->bkBottom); ORDER_FIELD_COORD(9, fast_index->opLeft); ORDER_FIELD_COORD(10, fast_index->opTop); ORDER_FIELD_COORD(11, fast_index->opRight); ORDER_FIELD_COORD(12, fast_index->opBottom); ORDER_FIELD_COORD(13, fast_index->x); ORDER_FIELD_COORD(14, fast_index->y); if (orderInfo->fieldFlags & ORDER_FIELD_15) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, fast_index->cbData); if (Stream_GetRemainingLength(s) < fast_index->cbData) return FALSE; CopyMemory(fast_index->data, Stream_Pointer(s), fast_index->cbData); Stream_Seek(s, fast_index->cbData); } return TRUE; } static BOOL update_read_fast_glyph_order(wStream* s, const ORDER_INFO* orderInfo, FAST_GLYPH_ORDER* fastGlyph) { GLYPH_DATA_V2* glyph = &fastGlyph->glyphData; ORDER_FIELD_BYTE(1, fastGlyph->cacheId); ORDER_FIELD_2BYTE(2, fastGlyph->ulCharInc, fastGlyph->flAccel); ORDER_FIELD_COLOR(orderInfo, s, 3, &fastGlyph->backColor); ORDER_FIELD_COLOR(orderInfo, s, 4, &fastGlyph->foreColor); ORDER_FIELD_COORD(5, fastGlyph->bkLeft); ORDER_FIELD_COORD(6, fastGlyph->bkTop); ORDER_FIELD_COORD(7, fastGlyph->bkRight); ORDER_FIELD_COORD(8, fastGlyph->bkBottom); ORDER_FIELD_COORD(9, fastGlyph->opLeft); ORDER_FIELD_COORD(10, fastGlyph->opTop); ORDER_FIELD_COORD(11, fastGlyph->opRight); ORDER_FIELD_COORD(12, fastGlyph->opBottom); ORDER_FIELD_COORD(13, fastGlyph->x); ORDER_FIELD_COORD(14, fastGlyph->y); if (orderInfo->fieldFlags & ORDER_FIELD_15) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, fastGlyph->cbData); if (Stream_GetRemainingLength(s) < fastGlyph->cbData) return FALSE; CopyMemory(fastGlyph->data, Stream_Pointer(s), fastGlyph->cbData); if (Stream_GetRemainingLength(s) < fastGlyph->cbData) return FALSE; if (!Stream_SafeSeek(s, 1)) return FALSE; if (fastGlyph->cbData > 1) { UINT32 new_cb; /* parse optional glyph data */ glyph->cacheIndex = fastGlyph->data[0]; if (!update_read_2byte_signed(s, &glyph->x) || !update_read_2byte_signed(s, &glyph->y) || !update_read_2byte_unsigned(s, &glyph->cx) || !update_read_2byte_unsigned(s, &glyph->cy)) return FALSE; glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; new_cb = ((glyph->cx + 7) / 8) * glyph->cy; new_cb += ((new_cb % 4) > 0) ? 4 - (new_cb % 4) : 0; if (fastGlyph->cbData < new_cb) return FALSE; if (new_cb > 0) { BYTE* new_aj; new_aj = (BYTE*)realloc(glyph->aj, new_cb); if (!new_aj) return FALSE; glyph->aj = new_aj; glyph->cb = new_cb; Stream_Read(s, glyph->aj, glyph->cb); } Stream_Seek(s, fastGlyph->cbData - new_cb); } } return TRUE; } static BOOL update_read_polygon_sc_order(wStream* s, const ORDER_INFO* orderInfo, POLYGON_SC_ORDER* polygon_sc) { UINT32 num = polygon_sc->numPoints; ORDER_FIELD_COORD(1, polygon_sc->xStart); ORDER_FIELD_COORD(2, polygon_sc->yStart); ORDER_FIELD_BYTE(3, polygon_sc->bRop2); ORDER_FIELD_BYTE(4, polygon_sc->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 5, &polygon_sc->brushColor); ORDER_FIELD_BYTE(6, num); if (orderInfo->fieldFlags & ORDER_FIELD_07) { DELTA_POINT* newpoints; if (num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, polygon_sc->cbData); newpoints = (DELTA_POINT*)realloc(polygon_sc->points, sizeof(DELTA_POINT) * num); if (!newpoints) return FALSE; polygon_sc->points = newpoints; polygon_sc->numPoints = num; return update_read_delta_points(s, polygon_sc->points, polygon_sc->numPoints, polygon_sc->xStart, polygon_sc->yStart); } return TRUE; } static BOOL update_read_polygon_cb_order(wStream* s, const ORDER_INFO* orderInfo, POLYGON_CB_ORDER* polygon_cb) { UINT32 num = polygon_cb->numPoints; ORDER_FIELD_COORD(1, polygon_cb->xStart); ORDER_FIELD_COORD(2, polygon_cb->yStart); ORDER_FIELD_BYTE(3, polygon_cb->bRop2); ORDER_FIELD_BYTE(4, polygon_cb->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 5, &polygon_cb->backColor); ORDER_FIELD_COLOR(orderInfo, s, 6, &polygon_cb->foreColor); if (!update_read_brush(s, &polygon_cb->brush, orderInfo->fieldFlags >> 6)) return FALSE; ORDER_FIELD_BYTE(12, num); if (orderInfo->fieldFlags & ORDER_FIELD_13) { DELTA_POINT* newpoints; if (num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, polygon_cb->cbData); newpoints = (DELTA_POINT*)realloc(polygon_cb->points, sizeof(DELTA_POINT) * num); if (!newpoints) return FALSE; polygon_cb->points = newpoints; polygon_cb->numPoints = num; if (!update_read_delta_points(s, polygon_cb->points, polygon_cb->numPoints, polygon_cb->xStart, polygon_cb->yStart)) return FALSE; } polygon_cb->backMode = (polygon_cb->bRop2 & 0x80) ? BACKMODE_TRANSPARENT : BACKMODE_OPAQUE; polygon_cb->bRop2 = (polygon_cb->bRop2 & 0x1F); return TRUE; } static BOOL update_read_ellipse_sc_order(wStream* s, const ORDER_INFO* orderInfo, ELLIPSE_SC_ORDER* ellipse_sc) { ORDER_FIELD_COORD(1, ellipse_sc->leftRect); ORDER_FIELD_COORD(2, ellipse_sc->topRect); ORDER_FIELD_COORD(3, ellipse_sc->rightRect); ORDER_FIELD_COORD(4, ellipse_sc->bottomRect); ORDER_FIELD_BYTE(5, ellipse_sc->bRop2); ORDER_FIELD_BYTE(6, ellipse_sc->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 7, &ellipse_sc->color); return TRUE; } static BOOL update_read_ellipse_cb_order(wStream* s, const ORDER_INFO* orderInfo, ELLIPSE_CB_ORDER* ellipse_cb) { ORDER_FIELD_COORD(1, ellipse_cb->leftRect); ORDER_FIELD_COORD(2, ellipse_cb->topRect); ORDER_FIELD_COORD(3, ellipse_cb->rightRect); ORDER_FIELD_COORD(4, ellipse_cb->bottomRect); ORDER_FIELD_BYTE(5, ellipse_cb->bRop2); ORDER_FIELD_BYTE(6, ellipse_cb->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 7, &ellipse_cb->backColor); ORDER_FIELD_COLOR(orderInfo, s, 8, &ellipse_cb->foreColor); return update_read_brush(s, &ellipse_cb->brush, orderInfo->fieldFlags >> 8); } /* Secondary Drawing Orders */ static CACHE_BITMAP_ORDER* update_read_cache_bitmap_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { CACHE_BITMAP_ORDER* cache_bitmap; if (!update || !s) return NULL; cache_bitmap = calloc(1, sizeof(CACHE_BITMAP_ORDER)); if (!cache_bitmap) goto fail; if (Stream_GetRemainingLength(s) < 9) goto fail; Stream_Read_UINT8(s, cache_bitmap->cacheId); /* cacheId (1 byte) */ Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapWidth); /* bitmapWidth (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapHeight); /* bitmapHeight (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ if ((cache_bitmap->bitmapBpp < 1) || (cache_bitmap->bitmapBpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bitmap bpp %" PRIu32 "", cache_bitmap->bitmapBpp); goto fail; } Stream_Read_UINT16(s, cache_bitmap->bitmapLength); /* bitmapLength (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap->cacheIndex); /* cacheIndex (2 bytes) */ if (compressed) { if ((flags & NO_BITMAP_COMPRESSION_HDR) == 0) { BYTE* bitmapComprHdr = (BYTE*)&(cache_bitmap->bitmapComprHdr); if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read(s, bitmapComprHdr, 8); /* bitmapComprHdr (8 bytes) */ cache_bitmap->bitmapLength -= 8; } } if (cache_bitmap->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap->bitmapLength) goto fail; cache_bitmap->bitmapDataStream = malloc(cache_bitmap->bitmapLength); if (!cache_bitmap->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap->bitmapDataStream, cache_bitmap->bitmapLength); cache_bitmap->compressed = compressed; return cache_bitmap; fail: free_cache_bitmap_order(update->context, cache_bitmap); return NULL; } int update_approximate_cache_bitmap_order(const CACHE_BITMAP_ORDER* cache_bitmap, BOOL compressed, UINT16* flags) { return 64 + cache_bitmap->bitmapLength; } BOOL update_write_cache_bitmap_order(wStream* s, const CACHE_BITMAP_ORDER* cache_bitmap, BOOL compressed, UINT16* flags) { UINT32 bitmapLength = cache_bitmap->bitmapLength; int inf = update_approximate_cache_bitmap_order(cache_bitmap, compressed, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; *flags = NO_BITMAP_COMPRESSION_HDR; if ((*flags & NO_BITMAP_COMPRESSION_HDR) == 0) bitmapLength += 8; Stream_Write_UINT8(s, cache_bitmap->cacheId); /* cacheId (1 byte) */ Stream_Write_UINT8(s, 0); /* pad1Octet (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapWidth); /* bitmapWidth (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapHeight); /* bitmapHeight (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ Stream_Write_UINT16(s, bitmapLength); /* bitmapLength (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap->cacheIndex); /* cacheIndex (2 bytes) */ if (compressed) { if ((*flags & NO_BITMAP_COMPRESSION_HDR) == 0) { BYTE* bitmapComprHdr = (BYTE*)&(cache_bitmap->bitmapComprHdr); Stream_Write(s, bitmapComprHdr, 8); /* bitmapComprHdr (8 bytes) */ bitmapLength -= 8; } Stream_Write(s, cache_bitmap->bitmapDataStream, bitmapLength); } else { Stream_Write(s, cache_bitmap->bitmapDataStream, bitmapLength); } return TRUE; } static CACHE_BITMAP_V2_ORDER* update_read_cache_bitmap_v2_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { BOOL rc; BYTE bitsPerPixelId; CACHE_BITMAP_V2_ORDER* cache_bitmap_v2; if (!update || !s) return NULL; cache_bitmap_v2 = calloc(1, sizeof(CACHE_BITMAP_V2_ORDER)); if (!cache_bitmap_v2) goto fail; cache_bitmap_v2->cacheId = flags & 0x0003; cache_bitmap_v2->flags = (flags & 0xFF80) >> 7; bitsPerPixelId = (flags & 0x0078) >> 3; cache_bitmap_v2->bitmapBpp = get_cbr2_bpp(bitsPerPixelId, &rc); if (!rc) goto fail; if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ goto fail; cache_bitmap_v2->bitmapHeight = cache_bitmap_v2->bitmapWidth; } else { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ goto fail; } if (!update_read_4byte_unsigned(s, &cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->cacheIndex)) /* cacheIndex */ goto fail; if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } } if (cache_bitmap_v2->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap_v2->bitmapLength) goto fail; if (cache_bitmap_v2->bitmapLength == 0) goto fail; cache_bitmap_v2->bitmapDataStream = malloc(cache_bitmap_v2->bitmapLength); if (!cache_bitmap_v2->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); cache_bitmap_v2->compressed = compressed; return cache_bitmap_v2; fail: free_cache_bitmap_v2_order(update->context, cache_bitmap_v2); return NULL; } int update_approximate_cache_bitmap_v2_order(CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { return 64 + cache_bitmap_v2->bitmapLength; } BOOL update_write_cache_bitmap_v2_order(wStream* s, CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { BOOL rc; BYTE bitsPerPixelId; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v2_order(cache_bitmap_v2, compressed, flags))) return FALSE; bitsPerPixelId = get_bpp_bmf(cache_bitmap_v2->bitmapBpp, &rc); if (!rc) return FALSE; *flags = (cache_bitmap_v2->cacheId & 0x0003) | (bitsPerPixelId << 3) | ((cache_bitmap_v2->flags << 7) & 0xFF80); if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { Stream_Write_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ return FALSE; } else { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ return FALSE; } if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (!update_write_4byte_unsigned(s, cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_write_2byte_unsigned(s, cache_bitmap_v2->cacheIndex)) /* cacheIndex */ return FALSE; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { Stream_Write_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } else { if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } cache_bitmap_v2->compressed = compressed; return TRUE; } static CACHE_BITMAP_V3_ORDER* update_read_cache_bitmap_v3_order(rdpUpdate* update, wStream* s, UINT16 flags) { BOOL rc; BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; UINT32 new_len; BYTE* new_data; CACHE_BITMAP_V3_ORDER* cache_bitmap_v3; if (!update || !s) return NULL; cache_bitmap_v3 = calloc(1, sizeof(CACHE_BITMAP_V3_ORDER)); if (!cache_bitmap_v3) goto fail; cache_bitmap_v3->cacheId = flags & 0x00000003; cache_bitmap_v3->flags = (flags & 0x0000FF80) >> 7; bitsPerPixelId = (flags & 0x00000078) >> 3; cache_bitmap_v3->bpp = get_cbr2_bpp(bitsPerPixelId, &rc); if (!rc) goto fail; if (Stream_GetRemainingLength(s) < 21) goto fail; Stream_Read_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ bitmapData = &cache_bitmap_v3->bitmapData; Stream_Read_UINT8(s, bitmapData->bpp); if ((bitmapData->bpp < 1) || (bitmapData->bpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bpp value %" PRIu32 "", bitmapData->bpp); goto fail; } Stream_Seek_UINT8(s); /* reserved1 (1 byte) */ Stream_Seek_UINT8(s); /* reserved2 (1 byte) */ Stream_Read_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Read_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Read_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Read_UINT32(s, new_len); /* length (4 bytes) */ if ((new_len == 0) || (Stream_GetRemainingLength(s) < new_len)) goto fail; new_data = (BYTE*)realloc(bitmapData->data, new_len); if (!new_data) goto fail; bitmapData->data = new_data; bitmapData->length = new_len; Stream_Read(s, bitmapData->data, bitmapData->length); return cache_bitmap_v3; fail: free_cache_bitmap_v3_order(update->context, cache_bitmap_v3); return NULL; } int update_approximate_cache_bitmap_v3_order(CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BITMAP_DATA_EX* bitmapData = &cache_bitmap_v3->bitmapData; return 64 + bitmapData->length; } BOOL update_write_cache_bitmap_v3_order(wStream* s, CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BOOL rc; BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v3_order(cache_bitmap_v3, flags))) return FALSE; bitmapData = &cache_bitmap_v3->bitmapData; bitsPerPixelId = get_bpp_bmf(cache_bitmap_v3->bpp, &rc); if (!rc) return FALSE; *flags = (cache_bitmap_v3->cacheId & 0x00000003) | ((cache_bitmap_v3->flags << 7) & 0x0000FF80) | ((bitsPerPixelId << 3) & 0x00000078); Stream_Write_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ Stream_Write_UINT8(s, bitmapData->bpp); Stream_Write_UINT8(s, 0); /* reserved1 (1 byte) */ Stream_Write_UINT8(s, 0); /* reserved2 (1 byte) */ Stream_Write_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Write_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Write_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Write_UINT32(s, bitmapData->length); /* length (4 bytes) */ Stream_Write(s, bitmapData->data, bitmapData->length); return TRUE; } static CACHE_COLOR_TABLE_ORDER* update_read_cache_color_table_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; UINT32* colorTable; CACHE_COLOR_TABLE_ORDER* cache_color_table = calloc(1, sizeof(CACHE_COLOR_TABLE_ORDER)); if (!cache_color_table) goto fail; if (Stream_GetRemainingLength(s) < 3) goto fail; Stream_Read_UINT8(s, cache_color_table->cacheIndex); /* cacheIndex (1 byte) */ Stream_Read_UINT16(s, cache_color_table->numberColors); /* numberColors (2 bytes) */ if (cache_color_table->numberColors != 256) { /* This field MUST be set to 256 */ goto fail; } if (Stream_GetRemainingLength(s) < cache_color_table->numberColors * 4) goto fail; colorTable = (UINT32*)&cache_color_table->colorTable; for (i = 0; i < (int)cache_color_table->numberColors; i++) update_read_color_quad(s, &colorTable[i]); return cache_color_table; fail: free_cache_color_table_order(update->context, cache_color_table); return NULL; } int update_approximate_cache_color_table_order(const CACHE_COLOR_TABLE_ORDER* cache_color_table, UINT16* flags) { return 16 + (256 * 4); } BOOL update_write_cache_color_table_order(wStream* s, const CACHE_COLOR_TABLE_ORDER* cache_color_table, UINT16* flags) { int i, inf; UINT32* colorTable; if (cache_color_table->numberColors != 256) return FALSE; inf = update_approximate_cache_color_table_order(cache_color_table, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT8(s, cache_color_table->cacheIndex); /* cacheIndex (1 byte) */ Stream_Write_UINT16(s, cache_color_table->numberColors); /* numberColors (2 bytes) */ colorTable = (UINT32*)&cache_color_table->colorTable; for (i = 0; i < (int)cache_color_table->numberColors; i++) { update_write_color_quad(s, colorTable[i]); } return TRUE; } static CACHE_GLYPH_ORDER* update_read_cache_glyph_order(rdpUpdate* update, wStream* s, UINT16 flags) { UINT32 i; CACHE_GLYPH_ORDER* cache_glyph_order = calloc(1, sizeof(CACHE_GLYPH_ORDER)); if (!cache_glyph_order || !update || !s) goto fail; if (Stream_GetRemainingLength(s) < 2) goto fail; Stream_Read_UINT8(s, cache_glyph_order->cacheId); /* cacheId (1 byte) */ Stream_Read_UINT8(s, cache_glyph_order->cGlyphs); /* cGlyphs (1 byte) */ for (i = 0; i < cache_glyph_order->cGlyphs; i++) { GLYPH_DATA* glyph = &cache_glyph_order->glyphData[i]; if (Stream_GetRemainingLength(s) < 10) goto fail; Stream_Read_UINT16(s, glyph->cacheIndex); Stream_Read_INT16(s, glyph->x); Stream_Read_INT16(s, glyph->y); Stream_Read_UINT16(s, glyph->cx); Stream_Read_UINT16(s, glyph->cy); glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; if (Stream_GetRemainingLength(s) < glyph->cb) goto fail; glyph->aj = (BYTE*)malloc(glyph->cb); if (!glyph->aj) goto fail; Stream_Read(s, glyph->aj, glyph->cb); } if ((flags & CG_GLYPH_UNICODE_PRESENT) && (cache_glyph_order->cGlyphs > 0)) { cache_glyph_order->unicodeCharacters = calloc(cache_glyph_order->cGlyphs, sizeof(WCHAR)); if (!cache_glyph_order->unicodeCharacters) goto fail; if (Stream_GetRemainingLength(s) < sizeof(WCHAR) * cache_glyph_order->cGlyphs) goto fail; Stream_Read_UTF16_String(s, cache_glyph_order->unicodeCharacters, cache_glyph_order->cGlyphs); } return cache_glyph_order; fail: free_cache_glyph_order(update->context, cache_glyph_order); return NULL; } int update_approximate_cache_glyph_order(const CACHE_GLYPH_ORDER* cache_glyph, UINT16* flags) { return 2 + cache_glyph->cGlyphs * 32; } BOOL update_write_cache_glyph_order(wStream* s, const CACHE_GLYPH_ORDER* cache_glyph, UINT16* flags) { int i, inf; INT16 lsi16; const GLYPH_DATA* glyph; inf = update_approximate_cache_glyph_order(cache_glyph, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT8(s, cache_glyph->cacheId); /* cacheId (1 byte) */ Stream_Write_UINT8(s, cache_glyph->cGlyphs); /* cGlyphs (1 byte) */ for (i = 0; i < (int)cache_glyph->cGlyphs; i++) { UINT32 cb; glyph = &cache_glyph->glyphData[i]; Stream_Write_UINT16(s, glyph->cacheIndex); /* cacheIndex (2 bytes) */ lsi16 = glyph->x; Stream_Write_UINT16(s, lsi16); /* x (2 bytes) */ lsi16 = glyph->y; Stream_Write_UINT16(s, lsi16); /* y (2 bytes) */ Stream_Write_UINT16(s, glyph->cx); /* cx (2 bytes) */ Stream_Write_UINT16(s, glyph->cy); /* cy (2 bytes) */ cb = ((glyph->cx + 7) / 8) * glyph->cy; cb += ((cb % 4) > 0) ? 4 - (cb % 4) : 0; Stream_Write(s, glyph->aj, cb); } if (*flags & CG_GLYPH_UNICODE_PRESENT) { Stream_Zero(s, cache_glyph->cGlyphs * 2); } return TRUE; } static CACHE_GLYPH_V2_ORDER* update_read_cache_glyph_v2_order(rdpUpdate* update, wStream* s, UINT16 flags) { UINT32 i; CACHE_GLYPH_V2_ORDER* cache_glyph_v2 = calloc(1, sizeof(CACHE_GLYPH_V2_ORDER)); if (!cache_glyph_v2) goto fail; cache_glyph_v2->cacheId = (flags & 0x000F); cache_glyph_v2->flags = (flags & 0x00F0) >> 4; cache_glyph_v2->cGlyphs = (flags & 0xFF00) >> 8; for (i = 0; i < cache_glyph_v2->cGlyphs; i++) { GLYPH_DATA_V2* glyph = &cache_glyph_v2->glyphData[i]; if (Stream_GetRemainingLength(s) < 1) goto fail; Stream_Read_UINT8(s, glyph->cacheIndex); if (!update_read_2byte_signed(s, &glyph->x) || !update_read_2byte_signed(s, &glyph->y) || !update_read_2byte_unsigned(s, &glyph->cx) || !update_read_2byte_unsigned(s, &glyph->cy)) { goto fail; } glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; if (Stream_GetRemainingLength(s) < glyph->cb) goto fail; glyph->aj = (BYTE*)malloc(glyph->cb); if (!glyph->aj) goto fail; Stream_Read(s, glyph->aj, glyph->cb); } if ((flags & CG_GLYPH_UNICODE_PRESENT) && (cache_glyph_v2->cGlyphs > 0)) { cache_glyph_v2->unicodeCharacters = calloc(cache_glyph_v2->cGlyphs, sizeof(WCHAR)); if (!cache_glyph_v2->unicodeCharacters) goto fail; if (Stream_GetRemainingLength(s) < sizeof(WCHAR) * cache_glyph_v2->cGlyphs) goto fail; Stream_Read_UTF16_String(s, cache_glyph_v2->unicodeCharacters, cache_glyph_v2->cGlyphs); } return cache_glyph_v2; fail: free_cache_glyph_v2_order(update->context, cache_glyph_v2); return NULL; } int update_approximate_cache_glyph_v2_order(const CACHE_GLYPH_V2_ORDER* cache_glyph_v2, UINT16* flags) { return 8 + cache_glyph_v2->cGlyphs * 32; } BOOL update_write_cache_glyph_v2_order(wStream* s, const CACHE_GLYPH_V2_ORDER* cache_glyph_v2, UINT16* flags) { UINT32 i, inf; inf = update_approximate_cache_glyph_v2_order(cache_glyph_v2, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; *flags = (cache_glyph_v2->cacheId & 0x000F) | ((cache_glyph_v2->flags & 0x000F) << 4) | ((cache_glyph_v2->cGlyphs & 0x00FF) << 8); for (i = 0; i < cache_glyph_v2->cGlyphs; i++) { UINT32 cb; const GLYPH_DATA_V2* glyph = &cache_glyph_v2->glyphData[i]; Stream_Write_UINT8(s, glyph->cacheIndex); if (!update_write_2byte_signed(s, glyph->x) || !update_write_2byte_signed(s, glyph->y) || !update_write_2byte_unsigned(s, glyph->cx) || !update_write_2byte_unsigned(s, glyph->cy)) { return FALSE; } cb = ((glyph->cx + 7) / 8) * glyph->cy; cb += ((cb % 4) > 0) ? 4 - (cb % 4) : 0; Stream_Write(s, glyph->aj, cb); } if (*flags & CG_GLYPH_UNICODE_PRESENT) { Stream_Zero(s, cache_glyph_v2->cGlyphs * 2); } return TRUE; } static BOOL update_decompress_brush(wStream* s, BYTE* output, size_t outSize, BYTE bpp) { INT32 x, y, k; BYTE byte = 0; const BYTE* palette = Stream_Pointer(s) + 16; const INT32 bytesPerPixel = ((bpp + 1) / 8); if (!Stream_SafeSeek(s, 16ULL + 7ULL * bytesPerPixel)) // 64 / 4 return FALSE; for (y = 7; y >= 0; y--) { for (x = 0; x < 8; x++) { UINT32 index; if ((x % 4) == 0) Stream_Read_UINT8(s, byte); index = ((byte >> ((3 - (x % 4)) * 2)) & 0x03); for (k = 0; k < bytesPerPixel; k++) { const size_t dstIndex = ((y * 8 + x) * bytesPerPixel) + k; const size_t srcIndex = (index * bytesPerPixel) + k; if (dstIndex >= outSize) return FALSE; output[dstIndex] = palette[srcIndex]; } } } return TRUE; } static BOOL update_compress_brush(wStream* s, const BYTE* input, BYTE bpp) { return FALSE; } static CACHE_BRUSH_ORDER* update_read_cache_brush_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; BOOL rc; BYTE iBitmapFormat; BOOL compressed = FALSE; CACHE_BRUSH_ORDER* cache_brush = calloc(1, sizeof(CACHE_BRUSH_ORDER)); if (!cache_brush) goto fail; if (Stream_GetRemainingLength(s) < 6) goto fail; Stream_Read_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Read_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ cache_brush->bpp = get_bmf_bpp(iBitmapFormat, &rc); if (!rc) goto fail; Stream_Read_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Read_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Read_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Read_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_Print(update->log, WLOG_ERROR, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); goto fail; } /* rows are encoded in reverse order */ if (Stream_GetRemainingLength(s) < 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_decompress_brush(s, cache_brush->data, sizeof(cache_brush->data), cache_brush->bpp)) goto fail; } else { /* uncompressed brush */ UINT32 scanline = (cache_brush->bpp / 8) * 8; if (Stream_GetRemainingLength(s) < scanline * 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read(s, &cache_brush->data[i * scanline], scanline); } } } } return cache_brush; fail: free_cache_brush_order(update->context, cache_brush); return NULL; } int update_approximate_cache_brush_order(const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { return 64; } BOOL update_write_cache_brush_order(wStream* s, const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { int i; BYTE iBitmapFormat; BOOL rc; BOOL compressed = FALSE; if (!Stream_EnsureRemainingCapacity(s, update_approximate_cache_brush_order(cache_brush, flags))) return FALSE; iBitmapFormat = get_bpp_bmf(cache_brush->bpp, &rc); if (!rc) return FALSE; Stream_Write_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Write_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ Stream_Write_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Write_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Write_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Write_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_ERR(TAG, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); return FALSE; } for (i = 7; i >= 0; i--) { Stream_Write_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_compress_brush(s, cache_brush->data, cache_brush->bpp)) return FALSE; } else { /* uncompressed brush */ int scanline = (cache_brush->bpp / 8) * 8; for (i = 7; i >= 0; i--) { Stream_Write(s, &cache_brush->data[i * scanline], scanline); } } } } return TRUE; } /* Alternate Secondary Drawing Orders */ static BOOL update_read_create_offscreen_bitmap_order(wStream* s, CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { UINT16 flags; BOOL deleteListPresent; OFFSCREEN_DELETE_LIST* deleteList; if (Stream_GetRemainingLength(s) < 6) return FALSE; Stream_Read_UINT16(s, flags); /* flags (2 bytes) */ create_offscreen_bitmap->id = flags & 0x7FFF; deleteListPresent = (flags & 0x8000) ? TRUE : FALSE; Stream_Read_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */ Stream_Read_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */ deleteList = &(create_offscreen_bitmap->deleteList); if (deleteListPresent) { UINT32 i; if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, deleteList->cIndices); if (deleteList->cIndices > deleteList->sIndices) { UINT16* new_indices; new_indices = (UINT16*)realloc(deleteList->indices, deleteList->cIndices * 2); if (!new_indices) return FALSE; deleteList->sIndices = deleteList->cIndices; deleteList->indices = new_indices; } if (Stream_GetRemainingLength(s) < 2 * deleteList->cIndices) return FALSE; for (i = 0; i < deleteList->cIndices; i++) { Stream_Read_UINT16(s, deleteList->indices[i]); } } else { deleteList->cIndices = 0; } return TRUE; } int update_approximate_create_offscreen_bitmap_order( const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { const OFFSCREEN_DELETE_LIST* deleteList = &(create_offscreen_bitmap->deleteList); return 32 + deleteList->cIndices * 2; } BOOL update_write_create_offscreen_bitmap_order( wStream* s, const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { UINT16 flags; BOOL deleteListPresent; const OFFSCREEN_DELETE_LIST* deleteList; if (!Stream_EnsureRemainingCapacity( s, update_approximate_create_offscreen_bitmap_order(create_offscreen_bitmap))) return FALSE; deleteList = &(create_offscreen_bitmap->deleteList); flags = create_offscreen_bitmap->id & 0x7FFF; deleteListPresent = (deleteList->cIndices > 0) ? TRUE : FALSE; if (deleteListPresent) flags |= 0x8000; Stream_Write_UINT16(s, flags); /* flags (2 bytes) */ Stream_Write_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */ Stream_Write_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */ if (deleteListPresent) { int i; Stream_Write_UINT16(s, deleteList->cIndices); for (i = 0; i < (int)deleteList->cIndices; i++) { Stream_Write_UINT16(s, deleteList->indices[i]); } } return TRUE; } static BOOL update_read_switch_surface_order(wStream* s, SWITCH_SURFACE_ORDER* switch_surface) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, switch_surface->bitmapId); /* bitmapId (2 bytes) */ return TRUE; } int update_approximate_switch_surface_order(const SWITCH_SURFACE_ORDER* switch_surface) { return 2; } BOOL update_write_switch_surface_order(wStream* s, const SWITCH_SURFACE_ORDER* switch_surface) { int inf = update_approximate_switch_surface_order(switch_surface); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT16(s, switch_surface->bitmapId); /* bitmapId (2 bytes) */ return TRUE; } static BOOL update_read_create_nine_grid_bitmap_order(wStream* s, CREATE_NINE_GRID_BITMAP_ORDER* create_nine_grid_bitmap) { NINE_GRID_BITMAP_INFO* nineGridInfo; if (Stream_GetRemainingLength(s) < 19) return FALSE; Stream_Read_UINT8(s, create_nine_grid_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ if ((create_nine_grid_bitmap->bitmapBpp < 1) || (create_nine_grid_bitmap->bitmapBpp > 32)) { WLog_ERR(TAG, "invalid bpp value %" PRIu32 "", create_nine_grid_bitmap->bitmapBpp); return FALSE; } Stream_Read_UINT16(s, create_nine_grid_bitmap->bitmapId); /* bitmapId (2 bytes) */ nineGridInfo = &(create_nine_grid_bitmap->nineGridInfo); Stream_Read_UINT32(s, nineGridInfo->flFlags); /* flFlags (4 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulLeftWidth); /* ulLeftWidth (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulRightWidth); /* ulRightWidth (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulTopHeight); /* ulTopHeight (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulBottomHeight); /* ulBottomHeight (2 bytes) */ update_read_colorref(s, &nineGridInfo->crTransparent); /* crTransparent (4 bytes) */ return TRUE; } static BOOL update_read_frame_marker_order(wStream* s, FRAME_MARKER_ORDER* frame_marker) { if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT32(s, frame_marker->action); /* action (4 bytes) */ return TRUE; } static BOOL update_read_stream_bitmap_first_order(wStream* s, STREAM_BITMAP_FIRST_ORDER* stream_bitmap_first) { if (Stream_GetRemainingLength(s) < 10) // 8 + 2 at least return FALSE; Stream_Read_UINT8(s, stream_bitmap_first->bitmapFlags); /* bitmapFlags (1 byte) */ Stream_Read_UINT8(s, stream_bitmap_first->bitmapBpp); /* bitmapBpp (1 byte) */ if ((stream_bitmap_first->bitmapBpp < 1) || (stream_bitmap_first->bitmapBpp > 32)) { WLog_ERR(TAG, "invalid bpp value %" PRIu32 "", stream_bitmap_first->bitmapBpp); return FALSE; } Stream_Read_UINT16(s, stream_bitmap_first->bitmapType); /* bitmapType (2 bytes) */ Stream_Read_UINT16(s, stream_bitmap_first->bitmapWidth); /* bitmapWidth (2 bytes) */ Stream_Read_UINT16(s, stream_bitmap_first->bitmapHeight); /* bitmapHeigth (2 bytes) */ if (stream_bitmap_first->bitmapFlags & STREAM_BITMAP_V2) { if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT32(s, stream_bitmap_first->bitmapSize); /* bitmapSize (4 bytes) */ } else { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, stream_bitmap_first->bitmapSize); /* bitmapSize (2 bytes) */ } FIELD_SKIP_BUFFER16( s, stream_bitmap_first->bitmapBlockSize); /* bitmapBlockSize(2 bytes) + bitmapBlock */ return TRUE; } static BOOL update_read_stream_bitmap_next_order(wStream* s, STREAM_BITMAP_NEXT_ORDER* stream_bitmap_next) { if (Stream_GetRemainingLength(s) < 5) return FALSE; Stream_Read_UINT8(s, stream_bitmap_next->bitmapFlags); /* bitmapFlags (1 byte) */ Stream_Read_UINT16(s, stream_bitmap_next->bitmapType); /* bitmapType (2 bytes) */ FIELD_SKIP_BUFFER16( s, stream_bitmap_next->bitmapBlockSize); /* bitmapBlockSize(2 bytes) + bitmapBlock */ return TRUE; } static BOOL update_read_draw_gdiplus_first_order(wStream* s, DRAW_GDIPLUS_FIRST_ORDER* draw_gdiplus_first) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_first->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_first->cbTotalSize); /* cbTotalSize (4 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_first->cbTotalEmfSize); /* cbTotalEmfSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_first->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_next_order(wStream* s, DRAW_GDIPLUS_NEXT_ORDER* draw_gdiplus_next) { if (Stream_GetRemainingLength(s) < 3) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ FIELD_SKIP_BUFFER16(s, draw_gdiplus_next->cbSize); /* cbSize(2 bytes) + emfRecords */ return TRUE; } static BOOL update_read_draw_gdiplus_end_order(wStream* s, DRAW_GDIPLUS_END_ORDER* draw_gdiplus_end) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_end->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_end->cbTotalSize); /* cbTotalSize (4 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_end->cbTotalEmfSize); /* cbTotalEmfSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_end->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_cache_first_order(wStream* s, DRAW_GDIPLUS_CACHE_FIRST_ORDER* draw_gdiplus_cache_first) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_first->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_cache_first->cbTotalSize); /* cbTotalSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_cache_first->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_cache_next_order(wStream* s, DRAW_GDIPLUS_CACHE_NEXT_ORDER* draw_gdiplus_cache_next) { if (Stream_GetRemainingLength(s) < 7) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_next->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_next->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_next->cacheIndex); /* cacheIndex (2 bytes) */ FIELD_SKIP_BUFFER16(s, draw_gdiplus_cache_next->cbSize); /* cbSize(2 bytes) + emfRecords */ return TRUE; } static BOOL update_read_draw_gdiplus_cache_end_order(wStream* s, DRAW_GDIPLUS_CACHE_END_ORDER* draw_gdiplus_cache_end) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_end->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_cache_end->cbTotalSize); /* cbTotalSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_cache_end->cbSize); /* emfRecords */ } static BOOL update_read_field_flags(wStream* s, UINT32* fieldFlags, BYTE flags, BYTE fieldBytes) { int i; BYTE byte; if (flags & ORDER_ZERO_FIELD_BYTE_BIT0) fieldBytes--; if (flags & ORDER_ZERO_FIELD_BYTE_BIT1) { if (fieldBytes > 1) fieldBytes -= 2; else fieldBytes = 0; } if (Stream_GetRemainingLength(s) < fieldBytes) return FALSE; *fieldFlags = 0; for (i = 0; i < fieldBytes; i++) { Stream_Read_UINT8(s, byte); *fieldFlags |= byte << (i * 8); } return TRUE; } BOOL update_write_field_flags(wStream* s, UINT32 fieldFlags, BYTE flags, BYTE fieldBytes) { BYTE byte; if (fieldBytes == 1) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); } else if (fieldBytes == 2) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 8) & 0xFF; Stream_Write_UINT8(s, byte); } else if (fieldBytes == 3) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 16) & 0xFF; Stream_Write_UINT8(s, byte); } else { return FALSE; } return TRUE; } static BOOL update_read_bounds(wStream* s, rdpBounds* bounds) { BYTE flags; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, flags); /* field flags */ if (flags & BOUND_LEFT) { if (!update_read_coord(s, &bounds->left, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_LEFT) { if (!update_read_coord(s, &bounds->left, TRUE)) return FALSE; } if (flags & BOUND_TOP) { if (!update_read_coord(s, &bounds->top, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_TOP) { if (!update_read_coord(s, &bounds->top, TRUE)) return FALSE; } if (flags & BOUND_RIGHT) { if (!update_read_coord(s, &bounds->right, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_RIGHT) { if (!update_read_coord(s, &bounds->right, TRUE)) return FALSE; } if (flags & BOUND_BOTTOM) { if (!update_read_coord(s, &bounds->bottom, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_BOTTOM) { if (!update_read_coord(s, &bounds->bottom, TRUE)) return FALSE; } return TRUE; } BOOL update_write_bounds(wStream* s, ORDER_INFO* orderInfo) { if (!(orderInfo->controlFlags & ORDER_BOUNDS)) return TRUE; if (orderInfo->controlFlags & ORDER_ZERO_BOUNDS_DELTAS) return TRUE; Stream_Write_UINT8(s, orderInfo->boundsFlags); /* field flags */ if (orderInfo->boundsFlags & BOUND_LEFT) { if (!update_write_coord(s, orderInfo->bounds.left)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_LEFT) { } if (orderInfo->boundsFlags & BOUND_TOP) { if (!update_write_coord(s, orderInfo->bounds.top)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_TOP) { } if (orderInfo->boundsFlags & BOUND_RIGHT) { if (!update_write_coord(s, orderInfo->bounds.right)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_RIGHT) { } if (orderInfo->boundsFlags & BOUND_BOTTOM) { if (!update_write_coord(s, orderInfo->bounds.bottom)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_BOTTOM) { } return TRUE; } static BOOL read_primary_order(wLog* log, const char* orderName, wStream* s, const ORDER_INFO* orderInfo, rdpPrimaryUpdate* primary) { BOOL rc = FALSE; if (!s || !orderInfo || !primary || !orderName) return FALSE; switch (orderInfo->orderType) { case ORDER_TYPE_DSTBLT: rc = update_read_dstblt_order(s, orderInfo, &(primary->dstblt)); break; case ORDER_TYPE_PATBLT: rc = update_read_patblt_order(s, orderInfo, &(primary->patblt)); break; case ORDER_TYPE_SCRBLT: rc = update_read_scrblt_order(s, orderInfo, &(primary->scrblt)); break; case ORDER_TYPE_OPAQUE_RECT: rc = update_read_opaque_rect_order(s, orderInfo, &(primary->opaque_rect)); break; case ORDER_TYPE_DRAW_NINE_GRID: rc = update_read_draw_nine_grid_order(s, orderInfo, &(primary->draw_nine_grid)); break; case ORDER_TYPE_MULTI_DSTBLT: rc = update_read_multi_dstblt_order(s, orderInfo, &(primary->multi_dstblt)); break; case ORDER_TYPE_MULTI_PATBLT: rc = update_read_multi_patblt_order(s, orderInfo, &(primary->multi_patblt)); break; case ORDER_TYPE_MULTI_SCRBLT: rc = update_read_multi_scrblt_order(s, orderInfo, &(primary->multi_scrblt)); break; case ORDER_TYPE_MULTI_OPAQUE_RECT: rc = update_read_multi_opaque_rect_order(s, orderInfo, &(primary->multi_opaque_rect)); break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: rc = update_read_multi_draw_nine_grid_order(s, orderInfo, &(primary->multi_draw_nine_grid)); break; case ORDER_TYPE_LINE_TO: rc = update_read_line_to_order(s, orderInfo, &(primary->line_to)); break; case ORDER_TYPE_POLYLINE: rc = update_read_polyline_order(s, orderInfo, &(primary->polyline)); break; case ORDER_TYPE_MEMBLT: rc = update_read_memblt_order(s, orderInfo, &(primary->memblt)); break; case ORDER_TYPE_MEM3BLT: rc = update_read_mem3blt_order(s, orderInfo, &(primary->mem3blt)); break; case ORDER_TYPE_SAVE_BITMAP: rc = update_read_save_bitmap_order(s, orderInfo, &(primary->save_bitmap)); break; case ORDER_TYPE_GLYPH_INDEX: rc = update_read_glyph_index_order(s, orderInfo, &(primary->glyph_index)); break; case ORDER_TYPE_FAST_INDEX: rc = update_read_fast_index_order(s, orderInfo, &(primary->fast_index)); break; case ORDER_TYPE_FAST_GLYPH: rc = update_read_fast_glyph_order(s, orderInfo, &(primary->fast_glyph)); break; case ORDER_TYPE_POLYGON_SC: rc = update_read_polygon_sc_order(s, orderInfo, &(primary->polygon_sc)); break; case ORDER_TYPE_POLYGON_CB: rc = update_read_polygon_cb_order(s, orderInfo, &(primary->polygon_cb)); break; case ORDER_TYPE_ELLIPSE_SC: rc = update_read_ellipse_sc_order(s, orderInfo, &(primary->ellipse_sc)); break; case ORDER_TYPE_ELLIPSE_CB: rc = update_read_ellipse_cb_order(s, orderInfo, &(primary->ellipse_cb)); break; default: WLog_Print(log, WLOG_WARN, "Primary Drawing Order %s not supported, ignoring", orderName); rc = TRUE; break; } if (!rc) { WLog_Print(log, WLOG_ERROR, "%s - update_read_dstblt_order() failed", orderName); return FALSE; } return TRUE; } static BOOL update_recv_primary_order(rdpUpdate* update, wStream* s, BYTE flags) { BYTE field; BOOL rc = FALSE; rdpContext* context = update->context; rdpPrimaryUpdate* primary = update->primary; ORDER_INFO* orderInfo = &(primary->order_info); rdpSettings* settings = context->settings; const char* orderName; if (flags & ORDER_TYPE_CHANGE) { if (Stream_GetRemainingLength(s) < 1) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, orderInfo->orderType); /* orderType (1 byte) */ } orderName = primary_order_string(orderInfo->orderType); if (!check_primary_order_supported(update->log, settings, orderInfo->orderType, orderName)) return FALSE; field = get_primary_drawing_order_field_bytes(orderInfo->orderType, &rc); if (!rc) return FALSE; if (!update_read_field_flags(s, &(orderInfo->fieldFlags), flags, field)) { WLog_Print(update->log, WLOG_ERROR, "update_read_field_flags() failed"); return FALSE; } if (flags & ORDER_BOUNDS) { if (!(flags & ORDER_ZERO_BOUNDS_DELTAS)) { if (!update_read_bounds(s, &orderInfo->bounds)) { WLog_Print(update->log, WLOG_ERROR, "update_read_bounds() failed"); return FALSE; } } rc = IFCALLRESULT(FALSE, update->SetBounds, context, &orderInfo->bounds); if (!rc) return FALSE; } orderInfo->deltaCoordinates = (flags & ORDER_DELTA_COORDINATES) ? TRUE : FALSE; if (!read_primary_order(update->log, orderName, s, orderInfo, primary)) return FALSE; switch (orderInfo->orderType) { case ORDER_TYPE_DSTBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->dstblt.bRop), gdi_rop3_code(primary->dstblt.bRop)); rc = IFCALLRESULT(FALSE, primary->DstBlt, context, &primary->dstblt); } break; case ORDER_TYPE_PATBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->patblt.bRop), gdi_rop3_code(primary->patblt.bRop)); rc = IFCALLRESULT(FALSE, primary->PatBlt, context, &primary->patblt); } break; case ORDER_TYPE_SCRBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->scrblt.bRop), gdi_rop3_code(primary->scrblt.bRop)); rc = IFCALLRESULT(FALSE, primary->ScrBlt, context, &primary->scrblt); } break; case ORDER_TYPE_OPAQUE_RECT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->OpaqueRect, context, &primary->opaque_rect); } break; case ORDER_TYPE_DRAW_NINE_GRID: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->DrawNineGrid, context, &primary->draw_nine_grid); } break; case ORDER_TYPE_MULTI_DSTBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_dstblt.bRop), gdi_rop3_code(primary->multi_dstblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiDstBlt, context, &primary->multi_dstblt); } break; case ORDER_TYPE_MULTI_PATBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_patblt.bRop), gdi_rop3_code(primary->multi_patblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiPatBlt, context, &primary->multi_patblt); } break; case ORDER_TYPE_MULTI_SCRBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_scrblt.bRop), gdi_rop3_code(primary->multi_scrblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiScrBlt, context, &primary->multi_scrblt); } break; case ORDER_TYPE_MULTI_OPAQUE_RECT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->MultiOpaqueRect, context, &primary->multi_opaque_rect); } break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->MultiDrawNineGrid, context, &primary->multi_draw_nine_grid); } break; case ORDER_TYPE_LINE_TO: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->LineTo, context, &primary->line_to); } break; case ORDER_TYPE_POLYLINE: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->Polyline, context, &primary->polyline); } break; case ORDER_TYPE_MEMBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->memblt.bRop), gdi_rop3_code(primary->memblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MemBlt, context, &primary->memblt); } break; case ORDER_TYPE_MEM3BLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->mem3blt.bRop), gdi_rop3_code(primary->mem3blt.bRop)); rc = IFCALLRESULT(FALSE, primary->Mem3Blt, context, &primary->mem3blt); } break; case ORDER_TYPE_SAVE_BITMAP: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->SaveBitmap, context, &primary->save_bitmap); } break; case ORDER_TYPE_GLYPH_INDEX: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->GlyphIndex, context, &primary->glyph_index); } break; case ORDER_TYPE_FAST_INDEX: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->FastIndex, context, &primary->fast_index); } break; case ORDER_TYPE_FAST_GLYPH: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->FastGlyph, context, &primary->fast_glyph); } break; case ORDER_TYPE_POLYGON_SC: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->PolygonSC, context, &primary->polygon_sc); } break; case ORDER_TYPE_POLYGON_CB: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->PolygonCB, context, &primary->polygon_cb); } break; case ORDER_TYPE_ELLIPSE_SC: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->EllipseSC, context, &primary->ellipse_sc); } break; case ORDER_TYPE_ELLIPSE_CB: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->EllipseCB, context, &primary->ellipse_cb); } break; default: WLog_Print(update->log, WLOG_WARN, "Primary Drawing Order %s not supported", orderName); break; } if (!rc) { WLog_Print(update->log, WLOG_WARN, "Primary Drawing Order %s failed", orderName); return FALSE; } if (flags & ORDER_BOUNDS) { rc = IFCALLRESULT(FALSE, update->SetBounds, context, NULL); } return rc; } static BOOL update_recv_secondary_order(rdpUpdate* update, wStream* s, BYTE flags) { BOOL rc = FALSE; size_t start, end, diff; BYTE orderType; UINT16 extraFlags; UINT16 orderLength; rdpContext* context = update->context; rdpSettings* settings = context->settings; rdpSecondaryUpdate* secondary = update->secondary; const char* name; if (Stream_GetRemainingLength(s) < 5) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 5"); return FALSE; } Stream_Read_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Read_UINT16(s, extraFlags); /* extraFlags (2 bytes) */ Stream_Read_UINT8(s, orderType); /* orderType (1 byte) */ if (Stream_GetRemainingLength(s) < orderLength + 7U) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) %" PRIuz " < %" PRIu16, Stream_GetRemainingLength(s), orderLength + 7); return FALSE; } start = Stream_GetPosition(s); name = secondary_order_string(orderType); WLog_Print(update->log, WLOG_DEBUG, "Secondary Drawing Order %s", name); if (!check_secondary_order_supported(update->log, settings, orderType, name)) return FALSE; switch (orderType) { case ORDER_TYPE_BITMAP_UNCOMPRESSED: case ORDER_TYPE_CACHE_BITMAP_COMPRESSED: { const BOOL compressed = (orderType == ORDER_TYPE_CACHE_BITMAP_COMPRESSED); CACHE_BITMAP_ORDER* order = update_read_cache_bitmap_order(update, s, compressed, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmap, context, order); free_cache_bitmap_order(context, order); } } break; case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2: case ORDER_TYPE_BITMAP_COMPRESSED_V2: { const BOOL compressed = (orderType == ORDER_TYPE_BITMAP_COMPRESSED_V2); CACHE_BITMAP_V2_ORDER* order = update_read_cache_bitmap_v2_order(update, s, compressed, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV2, context, order); free_cache_bitmap_v2_order(context, order); } } break; case ORDER_TYPE_BITMAP_COMPRESSED_V3: { CACHE_BITMAP_V3_ORDER* order = update_read_cache_bitmap_v3_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV3, context, order); free_cache_bitmap_v3_order(context, order); } } break; case ORDER_TYPE_CACHE_COLOR_TABLE: { CACHE_COLOR_TABLE_ORDER* order = update_read_cache_color_table_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheColorTable, context, order); free_cache_color_table_order(context, order); } } break; case ORDER_TYPE_CACHE_GLYPH: { switch (settings->GlyphSupportLevel) { case GLYPH_SUPPORT_PARTIAL: case GLYPH_SUPPORT_FULL: { CACHE_GLYPH_ORDER* order = update_read_cache_glyph_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheGlyph, context, order); free_cache_glyph_order(context, order); } } break; case GLYPH_SUPPORT_ENCODE: { CACHE_GLYPH_V2_ORDER* order = update_read_cache_glyph_v2_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheGlyphV2, context, order); free_cache_glyph_v2_order(context, order); } } break; case GLYPH_SUPPORT_NONE: default: break; } } break; case ORDER_TYPE_CACHE_BRUSH: /* [MS-RDPEGDI] 2.2.2.2.1.2.7 Cache Brush (CACHE_BRUSH_ORDER) */ { CACHE_BRUSH_ORDER* order = update_read_cache_brush_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBrush, context, order); free_cache_brush_order(context, order); } } break; default: WLog_Print(update->log, WLOG_WARN, "SECONDARY ORDER %s not supported", name); break; } if (!rc) { WLog_Print(update->log, WLOG_ERROR, "SECONDARY ORDER %s failed", name); } start += orderLength + 7; end = Stream_GetPosition(s); if (start > end) { WLog_Print(update->log, WLOG_WARN, "SECONDARY_ORDER %s: read %" PRIuz "bytes too much", name, end - start); return FALSE; } diff = start - end; if (diff > 0) { WLog_Print(update->log, WLOG_DEBUG, "SECONDARY_ORDER %s: read %" PRIuz "bytes short, skipping", name, diff); Stream_Seek(s, diff); } return rc; } static BOOL read_altsec_order(wStream* s, BYTE orderType, rdpAltSecUpdate* altsec) { BOOL rc = FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: rc = update_read_create_offscreen_bitmap_order(s, &(altsec->create_offscreen_bitmap)); break; case ORDER_TYPE_SWITCH_SURFACE: rc = update_read_switch_surface_order(s, &(altsec->switch_surface)); break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: rc = update_read_create_nine_grid_bitmap_order(s, &(altsec->create_nine_grid_bitmap)); break; case ORDER_TYPE_FRAME_MARKER: rc = update_read_frame_marker_order(s, &(altsec->frame_marker)); break; case ORDER_TYPE_STREAM_BITMAP_FIRST: rc = update_read_stream_bitmap_first_order(s, &(altsec->stream_bitmap_first)); break; case ORDER_TYPE_STREAM_BITMAP_NEXT: rc = update_read_stream_bitmap_next_order(s, &(altsec->stream_bitmap_next)); break; case ORDER_TYPE_GDIPLUS_FIRST: rc = update_read_draw_gdiplus_first_order(s, &(altsec->draw_gdiplus_first)); break; case ORDER_TYPE_GDIPLUS_NEXT: rc = update_read_draw_gdiplus_next_order(s, &(altsec->draw_gdiplus_next)); break; case ORDER_TYPE_GDIPLUS_END: rc = update_read_draw_gdiplus_end_order(s, &(altsec->draw_gdiplus_end)); break; case ORDER_TYPE_GDIPLUS_CACHE_FIRST: rc = update_read_draw_gdiplus_cache_first_order(s, &(altsec->draw_gdiplus_cache_first)); break; case ORDER_TYPE_GDIPLUS_CACHE_NEXT: rc = update_read_draw_gdiplus_cache_next_order(s, &(altsec->draw_gdiplus_cache_next)); break; case ORDER_TYPE_GDIPLUS_CACHE_END: rc = update_read_draw_gdiplus_cache_end_order(s, &(altsec->draw_gdiplus_cache_end)); break; case ORDER_TYPE_WINDOW: /* This order is handled elsewhere. */ rc = TRUE; break; case ORDER_TYPE_COMPDESK_FIRST: rc = TRUE; break; default: break; } return rc; } static BOOL update_recv_altsec_order(rdpUpdate* update, wStream* s, BYTE flags) { BYTE orderType = flags >>= 2; /* orderType is in higher 6 bits of flags field */ BOOL rc = FALSE; rdpContext* context = update->context; rdpSettings* settings = context->settings; rdpAltSecUpdate* altsec = update->altsec; const char* orderName = altsec_order_string(orderType); WLog_Print(update->log, WLOG_DEBUG, "Alternate Secondary Drawing Order %s", orderName); if (!check_alt_order_supported(update->log, settings, orderType, orderName)) return FALSE; if (!read_altsec_order(s, orderType, altsec)) return FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: IFCALLRET(altsec->CreateOffscreenBitmap, rc, context, &(altsec->create_offscreen_bitmap)); break; case ORDER_TYPE_SWITCH_SURFACE: IFCALLRET(altsec->SwitchSurface, rc, context, &(altsec->switch_surface)); break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: IFCALLRET(altsec->CreateNineGridBitmap, rc, context, &(altsec->create_nine_grid_bitmap)); break; case ORDER_TYPE_FRAME_MARKER: IFCALLRET(altsec->FrameMarker, rc, context, &(altsec->frame_marker)); break; case ORDER_TYPE_STREAM_BITMAP_FIRST: IFCALLRET(altsec->StreamBitmapFirst, rc, context, &(altsec->stream_bitmap_first)); break; case ORDER_TYPE_STREAM_BITMAP_NEXT: IFCALLRET(altsec->StreamBitmapNext, rc, context, &(altsec->stream_bitmap_next)); break; case ORDER_TYPE_GDIPLUS_FIRST: IFCALLRET(altsec->DrawGdiPlusFirst, rc, context, &(altsec->draw_gdiplus_first)); break; case ORDER_TYPE_GDIPLUS_NEXT: IFCALLRET(altsec->DrawGdiPlusNext, rc, context, &(altsec->draw_gdiplus_next)); break; case ORDER_TYPE_GDIPLUS_END: IFCALLRET(altsec->DrawGdiPlusEnd, rc, context, &(altsec->draw_gdiplus_end)); break; case ORDER_TYPE_GDIPLUS_CACHE_FIRST: IFCALLRET(altsec->DrawGdiPlusCacheFirst, rc, context, &(altsec->draw_gdiplus_cache_first)); break; case ORDER_TYPE_GDIPLUS_CACHE_NEXT: IFCALLRET(altsec->DrawGdiPlusCacheNext, rc, context, &(altsec->draw_gdiplus_cache_next)); break; case ORDER_TYPE_GDIPLUS_CACHE_END: IFCALLRET(altsec->DrawGdiPlusCacheEnd, rc, context, &(altsec->draw_gdiplus_cache_end)); break; case ORDER_TYPE_WINDOW: rc = update_recv_altsec_window_order(update, s); break; case ORDER_TYPE_COMPDESK_FIRST: rc = TRUE; break; default: break; } if (!rc) { WLog_Print(update->log, WLOG_WARN, "Alternate Secondary Drawing Order %s failed", orderName); } return rc; } BOOL update_recv_order(rdpUpdate* update, wStream* s) { BOOL rc; BYTE controlFlags; if (Stream_GetRemainingLength(s) < 1) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, controlFlags); /* controlFlags (1 byte) */ if (!(controlFlags & ORDER_STANDARD)) rc = update_recv_altsec_order(update, s, controlFlags); else if (controlFlags & ORDER_SECONDARY) rc = update_recv_secondary_order(update, s, controlFlags); else rc = update_recv_primary_order(update, s, controlFlags); if (!rc) WLog_Print(update->log, WLOG_ERROR, "order flags %02" PRIx8 " failed", controlFlags); return rc; }
BOOL update_write_cache_brush_order(wStream* s, const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { int i; BYTE iBitmapFormat; BOOL compressed = FALSE; if (!Stream_EnsureRemainingCapacity(s, update_approximate_cache_brush_order(cache_brush, flags))) return FALSE; iBitmapFormat = BPP_BMF[cache_brush->bpp]; Stream_Write_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Write_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ Stream_Write_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Write_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Write_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Write_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_ERR(TAG, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); return FALSE; } for (i = 7; i >= 0; i--) { Stream_Write_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_compress_brush(s, cache_brush->data, cache_brush->bpp)) return FALSE; } else { /* uncompressed brush */ int scanline = (cache_brush->bpp / 8) * 8; for (i = 7; i >= 0; i--) { Stream_Write(s, &cache_brush->data[i * scanline], scanline); } } } } return TRUE; }
BOOL update_write_cache_brush_order(wStream* s, const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { int i; BYTE iBitmapFormat; BOOL rc; BOOL compressed = FALSE; if (!Stream_EnsureRemainingCapacity(s, update_approximate_cache_brush_order(cache_brush, flags))) return FALSE; iBitmapFormat = get_bpp_bmf(cache_brush->bpp, &rc); if (!rc) return FALSE; Stream_Write_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Write_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ Stream_Write_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Write_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Write_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Write_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_ERR(TAG, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); return FALSE; } for (i = 7; i >= 0; i--) { Stream_Write_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_compress_brush(s, cache_brush->data, cache_brush->bpp)) return FALSE; } else { /* uncompressed brush */ int scanline = (cache_brush->bpp / 8) * 8; for (i = 7; i >= 0; i--) { Stream_Write(s, &cache_brush->data[i * scanline], scanline); } } } } return TRUE; }
{'added': [(116, 'static BYTE get_cbr2_bpp(UINT32 bpp, BOOL* pValid)'), (117, '{'), (118, '\tif (pValid)'), (119, '\t\t*pValid = TRUE;'), (120, '\tswitch (bpp)'), (121, '\t{'), (122, '\t\tcase 3:'), (123, '\t\t\treturn 8;'), (124, '\t\tcase 4:'), (125, '\t\t\treturn 16;'), (126, '\t\tcase 5:'), (127, '\t\t\treturn 24;'), (128, '\t\tcase 6:'), (129, '\t\t\treturn 32;'), (130, '\t\tdefault:'), (131, '\t\t\tWLog_WARN(TAG, "Invalid bpp %" PRIu32, bpp);'), (132, '\t\t\tif (pValid)'), (133, '\t\t\t\t*pValid = FALSE;'), (134, '\t\t\treturn 0;'), (135, '\t}'), (136, '}'), (138, 'static BYTE get_bmf_bpp(UINT32 bmf, BOOL* pValid)'), (139, '{'), (140, '\tif (pValid)'), (141, '\t\t*pValid = TRUE;'), (142, '\tswitch (bmf)'), (143, '\t{'), (144, '\t\tcase 1:'), (145, '\t\t\treturn 1;'), (146, '\t\tcase 3:'), (147, '\t\t\treturn 8;'), (148, '\t\tcase 4:'), (149, '\t\t\treturn 16;'), (150, '\t\tcase 5:'), (151, '\t\t\treturn 24;'), (152, '\t\tcase 6:'), (153, '\t\t\treturn 32;'), (154, '\t\tdefault:'), (155, '\t\t\tWLog_WARN(TAG, "Invalid bmf %" PRIu32, bmf);'), (156, '\t\t\tif (pValid)'), (157, '\t\t\t\t*pValid = FALSE;'), (158, '\t\t\treturn 0;'), (159, '\t}'), (160, '}'), (161, 'static BYTE get_bpp_bmf(UINT32 bpp, BOOL* pValid)'), (162, '{'), (163, '\tif (pValid)'), (164, '\t\t*pValid = TRUE;'), (165, '\tswitch (bpp)'), (166, '\t{'), (167, '\t\tcase 1:'), (168, '\t\t\treturn 1;'), (169, '\t\tcase 8:'), (170, '\t\t\treturn 3;'), (171, '\t\tcase 16:'), (172, '\t\t\treturn 4;'), (173, '\t\tcase 24:'), (174, '\t\t\treturn 5;'), (175, '\t\tcase 32:'), (176, '\t\t\treturn 6;'), (177, '\t\tdefault:'), (178, '\t\t\tWLog_WARN(TAG, "Invalid color depth %" PRIu32, bpp);'), (179, '\t\t\tif (pValid)'), (180, '\t\t\t\t*pValid = FALSE;'), (181, '\t\t\treturn 0;'), (182, '\t}'), (183, '}'), (871, '\t\tBOOL rc;'), (873, '\t\tbrush->bpp = get_bmf_bpp(brush->style, &rc);'), (874, '\t\tif (!rc)'), (875, '\t\t\treturn FALSE;'), (917, '\t\tBOOL rc;'), (919, '\t\tbrush->bpp = get_bmf_bpp(brush->style, &rc);'), (920, '\t\tif (!rc)'), (921, '\t\t\treturn FALSE;'), (2077, '\tBOOL rc;'), (2092, '\tcache_bitmap_v2->bitmapBpp = get_cbr2_bpp(bitsPerPixelId, &rc);'), (2093, '\tif (!rc)'), (2094, '\t\tgoto fail;'), (2173, '\tBOOL rc;'), (2180, '\tbitsPerPixelId = get_bpp_bmf(cache_bitmap_v2->bitmapBpp, &rc);'), (2181, '\tif (!rc)'), (2182, '\t\treturn FALSE;'), (2244, '\tBOOL rc;'), (2262, '\tcache_bitmap_v3->bpp = get_cbr2_bpp(bitsPerPixelId, &rc);'), (2263, '\tif (!rc)'), (2264, '\t\tgoto fail;'), (2312, '\tBOOL rc;'), (2321, '\tbitsPerPixelId = get_bpp_bmf(cache_bitmap_v3->bpp, &rc);'), (2322, '\tif (!rc)'), (2323, '\t\treturn FALSE;'), (2647, '\tBOOL rc;'), (2661, '\tcache_brush->bpp = get_bmf_bpp(iBitmapFormat, &rc);'), (2662, '\tif (!rc)'), (2735, '\tBOOL rc;'), (2742, '\tiBitmapFormat = get_bpp_bmf(cache_brush->bpp, &rc);'), (2743, '\tif (!rc)'), (2744, '\t\treturn FALSE;')], 'deleted': [(116, 'static const BYTE CBR2_BPP[] = { 0, 0, 0, 8, 16, 24, 32 };'), (117, ''), (118, 'static const BYTE BPP_CBR2[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0,'), (119, '\t 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 };'), (120, ''), (121, 'static const BYTE CBR23_BPP[] = { 0, 0, 0, 8, 16, 24, 32 };'), (122, ''), (123, 'static const BYTE BPP_CBR23[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0,'), (124, '\t 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 };'), (125, ''), (126, 'static const BYTE BMF_BPP[] = { 0, 1, 0, 8, 16, 24, 32, 0 };'), (128, 'static const BYTE BPP_BMF[] = { 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0,'), (129, '\t 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 };'), (818, '\t\tbrush->bpp = BMF_BPP[brush->style & 0x07];'), (819, ''), (862, '\t\tbrush->bpp = BMF_BPP[brush->style & 0x07];'), (863, ''), (2033, '\tcache_bitmap_v2->bitmapBpp = CBR2_BPP[bitsPerPixelId];'), (2118, '\tbitsPerPixelId = BPP_CBR2[cache_bitmap_v2->bitmapBpp];'), (2197, '\tcache_bitmap_v3->bpp = CBR23_BPP[bitsPerPixelId];'), (2253, '\tbitsPerPixelId = BPP_CBR23[cache_bitmap_v3->bpp];'), (2590, '\tif (iBitmapFormat >= ARRAYSIZE(BMF_BPP))'), (2593, '\tcache_brush->bpp = BMF_BPP[iBitmapFormat];'), (2670, '\tiBitmapFormat = BPP_BMF[cache_brush->bpp];')]}
98
24
3,271
19,873
https://github.com/FreeRDP/FreeRDP
CVE-2020-11096
['CWE-125']
env.c
mongo_env_read_socket
#if !defined(MONGO_ENV_STANDARD) && (defined(_WIN32) || defined(_WIN64)) /* env_win32.c */ /* Copyright 2009-2012 10gen Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Networking and other niceties for WIN32. */ #include "env.h" #include <string.h> #ifdef _MSC_VER #include <ws2tcpip.h> /* send,recv,socklen_t etc */ #include <wspiapi.h> /* addrinfo */ #else #include <ws2tcpip.h> /* send,recv,socklen_t etc */ #include <winsock2.h> typedef int socklen_t; #endif #ifndef NI_MAXSERV # define NI_MAXSERV 32 #endif int mongo_env_close_socket( int socket ) { return closesocket( socket ); } int mongo_env_write_socket( mongo *conn, const void *buf, int len ) { const char *cbuf = buf; int flags = 0; while ( len ) { int sent = send( conn->sock, cbuf, len, flags ); if ( sent == -1 ) { __mongo_set_error( conn, MONGO_IO_ERROR, NULL, WSAGetLastError() ); conn->connected = 0; return MONGO_ERROR; } cbuf += sent; len -= sent; } return MONGO_OK; } int mongo_env_read_socket( mongo *conn, void *buf, int len ) { char *cbuf = buf; while ( len ) { int sent = recv( conn->sock, cbuf, len, 0 ); if ( sent == 0 || sent == -1 ) { __mongo_set_error( conn, MONGO_IO_ERROR, NULL, WSAGetLastError() ); return MONGO_ERROR; } cbuf += sent; len -= sent; } return MONGO_OK; } int mongo_env_set_socket_op_timeout( mongo *conn, int millis ) { if ( setsockopt( conn->sock, SOL_SOCKET, SO_RCVTIMEO, (const char *)&millis, sizeof( millis ) ) == -1 ) { __mongo_set_error( conn, MONGO_IO_ERROR, "setsockopt SO_RCVTIMEO failed.", WSAGetLastError() ); return MONGO_ERROR; } if ( setsockopt( conn->sock, SOL_SOCKET, SO_SNDTIMEO, (const char *)&millis, sizeof( millis ) ) == -1 ) { __mongo_set_error( conn, MONGO_IO_ERROR, "setsockopt SO_SNDTIMEO failed.", WSAGetLastError() ); return MONGO_ERROR; } return MONGO_OK; } int mongo_env_socket_connect( mongo *conn, const char *host, int port ) { char port_str[NI_MAXSERV]; char errstr[MONGO_ERR_LEN]; int status; struct addrinfo ai_hints; struct addrinfo *ai_list = NULL; struct addrinfo *ai_ptr = NULL; conn->sock = 0; conn->connected = 0; bson_sprintf( port_str, "%d", port ); memset( &ai_hints, 0, sizeof( ai_hints ) ); ai_hints.ai_family = AF_UNSPEC; ai_hints.ai_socktype = SOCK_STREAM; ai_hints.ai_protocol = IPPROTO_TCP; status = getaddrinfo( host, port_str, &ai_hints, &ai_list ); if ( status != 0 ) { bson_sprintf( errstr, "getaddrinfo failed with error %d", status ); __mongo_set_error( conn, MONGO_CONN_ADDR_FAIL, errstr, WSAGetLastError() ); return MONGO_ERROR; } for ( ai_ptr = ai_list; ai_ptr != NULL; ai_ptr = ai_ptr->ai_next ) { conn->sock = socket( ai_ptr->ai_family, ai_ptr->ai_socktype, ai_ptr->ai_protocol ); if ( conn->sock < 0 ) { __mongo_set_error( conn, MONGO_SOCKET_ERROR, "socket() failed", WSAGetLastError() ); conn->sock = 0; continue; } status = connect( conn->sock, ai_ptr->ai_addr, ai_ptr->ai_addrlen ); if ( status != 0 ) { __mongo_set_error( conn, MONGO_SOCKET_ERROR, "connect() failed", WSAGetLastError() ); mongo_env_close_socket( conn->sock ); conn->sock = 0; continue; } if ( ai_ptr->ai_protocol == IPPROTO_TCP ) { int flag = 1; setsockopt( conn->sock, IPPROTO_TCP, TCP_NODELAY, ( void * ) &flag, sizeof( flag ) ); if ( conn->op_timeout_ms > 0 ) mongo_env_set_socket_op_timeout( conn, conn->op_timeout_ms ); } conn->connected = 1; break; } freeaddrinfo( ai_list ); if ( ! conn->connected ) { conn->err = MONGO_CONN_FAIL; return MONGO_ERROR; } else { mongo_clear_errors( conn ); return MONGO_OK; } } MONGO_EXPORT int mongo_env_sock_init( void ) { WSADATA wsaData; WORD wVers; static int called_once; static int retval; if (called_once) return retval; called_once = 1; wVers = MAKEWORD(1, 1); retval = (WSAStartup(wVers, &wsaData) == 0); return retval; } #elif !defined(MONGO_ENV_STANDARD) && (defined(__APPLE__) || defined(__linux) || defined(__unix) || defined(__posix)) /* env_posix.c */ /* Copyright 2009-2012 10gen Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Networking and other niceties for POSIX systems. */ #include "env.h" #include <string.h> #include <errno.h> #include <sys/time.h> #include <arpa/inet.h> #include <sys/types.h> #include <sys/socket.h> #include <sys/un.h> #include <netdb.h> #include <netinet/in.h> #include <netinet/tcp.h> #include <fcntl.h> #include <unistd.h> #ifndef NI_MAXSERV # define NI_MAXSERV 32 #endif int mongo_env_close_socket( int socket ) { return close( socket ); } int mongo_env_sock_init( void ) { return 0; } int mongo_env_write_socket( mongo *conn, const void *buf, int len ) { const char *cbuf = buf; #ifdef __APPLE__ int flags = 0; #else int flags = MSG_NOSIGNAL; #endif while ( len ) { int sent = send( conn->sock, cbuf, len, flags ); if ( sent == -1 ) { if (errno == EPIPE) conn->connected = 0; __mongo_set_error( conn, MONGO_IO_ERROR, strerror( errno ), errno ); return MONGO_ERROR; } cbuf += sent; len -= sent; } return MONGO_OK; } int mongo_env_read_socket( mongo *conn, void *buf, int len ) { char *cbuf = buf; while ( len ) { int sent = recv( conn->sock, cbuf, len, 0 ); if ( sent == 0 || sent == -1 ) { __mongo_set_error( conn, MONGO_IO_ERROR, strerror( errno ), errno ); return MONGO_ERROR; } cbuf += sent; len -= sent; } return MONGO_OK; } int mongo_env_set_socket_op_timeout( mongo *conn, int millis ) { struct timeval tv; tv.tv_sec = millis / 1000; tv.tv_usec = ( millis % 1000 ) * 1000; if ( setsockopt( conn->sock, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof( tv ) ) == -1 ) { conn->err = MONGO_IO_ERROR; __mongo_set_error( conn, MONGO_IO_ERROR, "setsockopt SO_RCVTIMEO failed.", errno ); return MONGO_ERROR; } if ( setsockopt( conn->sock, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof( tv ) ) == -1 ) { __mongo_set_error( conn, MONGO_IO_ERROR, "setsockopt SO_SNDTIMEO failed.", errno ); return MONGO_ERROR; } return MONGO_OK; } static int mongo_env_unix_socket_connect( mongo *conn, const char *sock_path ) { struct sockaddr_un addr; int status, len; conn->connected = 0; conn->sock = socket( AF_UNIX, SOCK_STREAM, 0 ); if ( conn->sock < 0 ) { conn->sock = 0; return MONGO_ERROR; } addr.sun_family = AF_UNIX; strncpy( addr.sun_path, sock_path, sizeof(addr.sun_path) - 1 ); len = sizeof( addr ); status = connect( conn->sock, (struct sockaddr *) &addr, len ); if( status < 0 ) { mongo_env_close_socket( conn->sock ); conn->sock = 0; conn->err = MONGO_CONN_FAIL; return MONGO_ERROR; } conn->connected = 1; return MONGO_OK; } int mongo_env_socket_connect( mongo *conn, const char *host, int port ) { char port_str[NI_MAXSERV]; int status; struct addrinfo ai_hints; struct addrinfo *ai_list = NULL; struct addrinfo *ai_ptr = NULL; if ( port < 0 ) { return mongo_env_unix_socket_connect( conn, host ); } conn->sock = 0; conn->connected = 0; sprintf(port_str,"%d",port); bson_sprintf( port_str, "%d", port ); memset( &ai_hints, 0, sizeof( ai_hints ) ); #ifdef AI_ADDRCONFIG ai_hints.ai_flags = AI_ADDRCONFIG; #endif ai_hints.ai_family = AF_UNSPEC; ai_hints.ai_socktype = SOCK_STREAM; status = getaddrinfo( host, port_str, &ai_hints, &ai_list ); if ( status != 0 ) { bson_errprintf( "getaddrinfo failed: %s", gai_strerror( status ) ); conn->err = MONGO_CONN_ADDR_FAIL; return MONGO_ERROR; } for ( ai_ptr = ai_list; ai_ptr != NULL; ai_ptr = ai_ptr->ai_next ) { conn->sock = socket( ai_ptr->ai_family, ai_ptr->ai_socktype, ai_ptr->ai_protocol ); if ( conn->sock < 0 ) { conn->sock = 0; continue; } status = connect( conn->sock, ai_ptr->ai_addr, ai_ptr->ai_addrlen ); if ( status != 0 ) { mongo_env_close_socket( conn->sock ); conn->sock = 0; continue; } #if __APPLE__ { int flag = 1; setsockopt( conn->sock, SOL_SOCKET, SO_NOSIGPIPE, ( void * ) &flag, sizeof( flag ) ); } #endif if ( ai_ptr->ai_protocol == IPPROTO_TCP ) { int flag = 1; setsockopt( conn->sock, IPPROTO_TCP, TCP_NODELAY, ( void * ) &flag, sizeof( flag ) ); if ( conn->op_timeout_ms > 0 ) mongo_env_set_socket_op_timeout( conn, conn->op_timeout_ms ); } conn->connected = 1; break; } freeaddrinfo( ai_list ); if ( ! conn->connected ) { conn->err = MONGO_CONN_FAIL; return MONGO_ERROR; } return MONGO_OK; } #else /* env_standard.c */ /* Copyright 2009-2012 10gen Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Vanilla networking designed to work on all systems. */ #include "env.h" #include <errno.h> #include <string.h> #ifdef _WIN32 #ifdef _MSC_VER #include <ws2tcpip.h> /* send,recv,socklen_t etc */ #include <wspiapi.h> /* addrinfo */ #else #include <windows.h> #include <winsock.h> typedef int socklen_t; #endif #else #include <arpa/inet.h> #include <sys/types.h> #include <sys/socket.h> #include <netdb.h> #include <netinet/in.h> #include <netinet/tcp.h> #include <fcntl.h> #include <unistd.h> #endif #ifndef NI_MAXSERV # define NI_MAXSERV 32 #endif int mongo_env_close_socket( int socket ) { #ifdef _WIN32 return closesocket( socket ); #else return close( socket ); #endif } int mongo_env_write_socket( mongo *conn, const void *buf, int len ) { const char *cbuf = buf; #ifdef _WIN32 int flags = 0; #else #ifdef __APPLE__ int flags = 0; #else int flags = MSG_NOSIGNAL; #endif #endif while ( len ) { int sent = send( conn->sock, cbuf, len, flags ); if ( sent == -1 ) { if (errno == EPIPE) conn->connected = 0; conn->err = MONGO_IO_ERROR; return MONGO_ERROR; } cbuf += sent; len -= sent; } return MONGO_OK; } int mongo_env_read_socket( mongo *conn, void *buf, int len ) { char *cbuf = buf; while ( len ) { int sent = recv( conn->sock, cbuf, len, 0 ); if ( sent == 0 || sent == -1 ) { conn->err = MONGO_IO_ERROR; return MONGO_ERROR; } cbuf += sent; len -= sent; } return MONGO_OK; } /* This is a no-op in the generic implementation. */ int mongo_env_set_socket_op_timeout( mongo *conn, int millis ) { return MONGO_OK; } int mongo_env_socket_connect( mongo *conn, const char *host, int port ) { struct sockaddr_in sa; socklen_t addressSize; int flag = 1; if ( ( conn->sock = socket( AF_INET, SOCK_STREAM, 0 ) ) < 0 ) { conn->sock = 0; conn->err = MONGO_CONN_NO_SOCKET; return MONGO_ERROR; } memset( sa.sin_zero , 0 , sizeof( sa.sin_zero ) ); sa.sin_family = AF_INET; sa.sin_port = htons( port ); sa.sin_addr.s_addr = inet_addr( host ); addressSize = sizeof( sa ); if ( connect( conn->sock, ( struct sockaddr * )&sa, addressSize ) == -1 ) { mongo_env_close_socket( conn->sock ); conn->connected = 0; conn->sock = 0; conn->err = MONGO_CONN_FAIL; return MONGO_ERROR; } setsockopt( conn->sock, IPPROTO_TCP, TCP_NODELAY, ( char * ) &flag, sizeof( flag ) ); if( conn->op_timeout_ms > 0 ) mongo_env_set_socket_op_timeout( conn, conn->op_timeout_ms ); conn->connected = 1; return MONGO_OK; } MONGO_EXPORT int mongo_env_sock_init( void ) { #if defined(_WIN32) WSADATA wsaData; WORD wVers; #elif defined(SIGPIPE) struct sigaction act; #endif static int called_once; static int retval; if (called_once) return retval; called_once = 1; #if defined(_WIN32) wVers = MAKEWORD(1, 1); retval = (WSAStartup(wVers, &wsaData) == 0); #elif defined(MACINTOSH) GUSISetup(GUSIwithInternetSockets); retval = 1; #elif defined(SIGPIPE) retval = 1; if (sigaction(SIGPIPE, (struct sigaction *)NULL, &act) < 0) retval = 0; else if (act.sa_handler == SIG_DFL) { act.sa_handler = SIG_IGN; if (sigaction(SIGPIPE, &act, (struct sigaction *)NULL) < 0) retval = 0; } #endif return retval; } #endif
#if !defined(MONGO_ENV_STANDARD) && (defined(_WIN32) || defined(_WIN64)) /* env_win32.c */ /* Copyright 2009-2012 10gen Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Networking and other niceties for WIN32. */ #include "env.h" #include <string.h> #ifdef _MSC_VER #include <ws2tcpip.h> /* send,recv,socklen_t etc */ #include <wspiapi.h> /* addrinfo */ #else #include <ws2tcpip.h> /* send,recv,socklen_t etc */ #include <winsock2.h> typedef int socklen_t; #endif #ifndef NI_MAXSERV # define NI_MAXSERV 32 #endif int mongo_env_close_socket( int socket ) { return closesocket( socket ); } int mongo_env_write_socket( mongo *conn, const void *buf, size_t len ) { const char *cbuf = buf; int flags = 0; while ( len ) { size_t sent = send( conn->sock, cbuf, len, flags ); if ( sent == -1 ) { __mongo_set_error( conn, MONGO_IO_ERROR, NULL, WSAGetLastError() ); conn->connected = 0; return MONGO_ERROR; } cbuf += sent; len -= sent; } return MONGO_OK; } int mongo_env_read_socket( mongo *conn, void *buf, size_t len ) { char *cbuf = buf; while ( len ) { size_t sent = recv( conn->sock, cbuf, len, 0 ); if ( sent == 0 || sent == -1 ) { __mongo_set_error( conn, MONGO_IO_ERROR, NULL, WSAGetLastError() ); return MONGO_ERROR; } cbuf += sent; len -= sent; } return MONGO_OK; } int mongo_env_set_socket_op_timeout( mongo *conn, int millis ) { if ( setsockopt( conn->sock, SOL_SOCKET, SO_RCVTIMEO, (const char *)&millis, sizeof( millis ) ) == -1 ) { __mongo_set_error( conn, MONGO_IO_ERROR, "setsockopt SO_RCVTIMEO failed.", WSAGetLastError() ); return MONGO_ERROR; } if ( setsockopt( conn->sock, SOL_SOCKET, SO_SNDTIMEO, (const char *)&millis, sizeof( millis ) ) == -1 ) { __mongo_set_error( conn, MONGO_IO_ERROR, "setsockopt SO_SNDTIMEO failed.", WSAGetLastError() ); return MONGO_ERROR; } return MONGO_OK; } int mongo_env_socket_connect( mongo *conn, const char *host, int port ) { char port_str[NI_MAXSERV]; char errstr[MONGO_ERR_LEN]; int status; struct addrinfo ai_hints; struct addrinfo *ai_list = NULL; struct addrinfo *ai_ptr = NULL; conn->sock = 0; conn->connected = 0; bson_sprintf( port_str, "%d", port ); memset( &ai_hints, 0, sizeof( ai_hints ) ); ai_hints.ai_family = AF_UNSPEC; ai_hints.ai_socktype = SOCK_STREAM; ai_hints.ai_protocol = IPPROTO_TCP; status = getaddrinfo( host, port_str, &ai_hints, &ai_list ); if ( status != 0 ) { bson_sprintf( errstr, "getaddrinfo failed with error %d", status ); __mongo_set_error( conn, MONGO_CONN_ADDR_FAIL, errstr, WSAGetLastError() ); return MONGO_ERROR; } for ( ai_ptr = ai_list; ai_ptr != NULL; ai_ptr = ai_ptr->ai_next ) { conn->sock = socket( ai_ptr->ai_family, ai_ptr->ai_socktype, ai_ptr->ai_protocol ); if ( conn->sock < 0 ) { __mongo_set_error( conn, MONGO_SOCKET_ERROR, "socket() failed", WSAGetLastError() ); conn->sock = 0; continue; } status = connect( conn->sock, ai_ptr->ai_addr, ai_ptr->ai_addrlen ); if ( status != 0 ) { __mongo_set_error( conn, MONGO_SOCKET_ERROR, "connect() failed", WSAGetLastError() ); mongo_env_close_socket( conn->sock ); conn->sock = 0; continue; } if ( ai_ptr->ai_protocol == IPPROTO_TCP ) { int flag = 1; setsockopt( conn->sock, IPPROTO_TCP, TCP_NODELAY, ( void * ) &flag, sizeof( flag ) ); if ( conn->op_timeout_ms > 0 ) mongo_env_set_socket_op_timeout( conn, conn->op_timeout_ms ); } conn->connected = 1; break; } freeaddrinfo( ai_list ); if ( ! conn->connected ) { conn->err = MONGO_CONN_FAIL; return MONGO_ERROR; } else { mongo_clear_errors( conn ); return MONGO_OK; } } MONGO_EXPORT int mongo_env_sock_init( void ) { WSADATA wsaData; WORD wVers; static int called_once; static int retval; if (called_once) return retval; called_once = 1; wVers = MAKEWORD(1, 1); retval = (WSAStartup(wVers, &wsaData) == 0); return retval; } #elif !defined(MONGO_ENV_STANDARD) && (defined(__APPLE__) || defined(__linux) || defined(__unix) || defined(__posix)) /* env_posix.c */ /* Copyright 2009-2012 10gen Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Networking and other niceties for POSIX systems. */ #include "env.h" #include <string.h> #include <errno.h> #include <sys/time.h> #include <arpa/inet.h> #include <sys/types.h> #include <sys/socket.h> #include <sys/un.h> #include <netdb.h> #include <netinet/in.h> #include <netinet/tcp.h> #include <fcntl.h> #include <unistd.h> #ifndef NI_MAXSERV # define NI_MAXSERV 32 #endif int mongo_env_close_socket( int socket ) { return close( socket ); } int mongo_env_sock_init( void ) { return 0; } int mongo_env_write_socket( mongo *conn, const void *buf, size_t len ) { const char *cbuf = buf; #ifdef __APPLE__ int flags = 0; #else int flags = MSG_NOSIGNAL; #endif while ( len ) { size_t sent = send( conn->sock, cbuf, len, flags ); if ( sent == -1 ) { if (errno == EPIPE) conn->connected = 0; __mongo_set_error( conn, MONGO_IO_ERROR, strerror( errno ), errno ); return MONGO_ERROR; } cbuf += sent; len -= sent; } return MONGO_OK; } int mongo_env_read_socket( mongo *conn, void *buf, size_t len ) { char *cbuf = buf; while ( len ) { size_t sent = recv( conn->sock, cbuf, len, 0 ); if ( sent == 0 || sent == -1 ) { __mongo_set_error( conn, MONGO_IO_ERROR, strerror( errno ), errno ); return MONGO_ERROR; } cbuf += sent; len -= sent; } return MONGO_OK; } int mongo_env_set_socket_op_timeout( mongo *conn, int millis ) { struct timeval tv; tv.tv_sec = millis / 1000; tv.tv_usec = ( millis % 1000 ) * 1000; if ( setsockopt( conn->sock, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof( tv ) ) == -1 ) { conn->err = MONGO_IO_ERROR; __mongo_set_error( conn, MONGO_IO_ERROR, "setsockopt SO_RCVTIMEO failed.", errno ); return MONGO_ERROR; } if ( setsockopt( conn->sock, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof( tv ) ) == -1 ) { __mongo_set_error( conn, MONGO_IO_ERROR, "setsockopt SO_SNDTIMEO failed.", errno ); return MONGO_ERROR; } return MONGO_OK; } static int mongo_env_unix_socket_connect( mongo *conn, const char *sock_path ) { struct sockaddr_un addr; int status, len; conn->connected = 0; conn->sock = socket( AF_UNIX, SOCK_STREAM, 0 ); if ( conn->sock < 0 ) { conn->sock = 0; return MONGO_ERROR; } addr.sun_family = AF_UNIX; strncpy( addr.sun_path, sock_path, sizeof(addr.sun_path) - 1 ); len = sizeof( addr ); status = connect( conn->sock, (struct sockaddr *) &addr, len ); if( status < 0 ) { mongo_env_close_socket( conn->sock ); conn->sock = 0; conn->err = MONGO_CONN_FAIL; return MONGO_ERROR; } conn->connected = 1; return MONGO_OK; } int mongo_env_socket_connect( mongo *conn, const char *host, int port ) { char port_str[NI_MAXSERV]; int status; struct addrinfo ai_hints; struct addrinfo *ai_list = NULL; struct addrinfo *ai_ptr = NULL; if ( port < 0 ) { return mongo_env_unix_socket_connect( conn, host ); } conn->sock = 0; conn->connected = 0; sprintf(port_str,"%d",port); bson_sprintf( port_str, "%d", port ); memset( &ai_hints, 0, sizeof( ai_hints ) ); #ifdef AI_ADDRCONFIG ai_hints.ai_flags = AI_ADDRCONFIG; #endif ai_hints.ai_family = AF_UNSPEC; ai_hints.ai_socktype = SOCK_STREAM; status = getaddrinfo( host, port_str, &ai_hints, &ai_list ); if ( status != 0 ) { bson_errprintf( "getaddrinfo failed: %s", gai_strerror( status ) ); conn->err = MONGO_CONN_ADDR_FAIL; return MONGO_ERROR; } for ( ai_ptr = ai_list; ai_ptr != NULL; ai_ptr = ai_ptr->ai_next ) { conn->sock = socket( ai_ptr->ai_family, ai_ptr->ai_socktype, ai_ptr->ai_protocol ); if ( conn->sock < 0 ) { conn->sock = 0; continue; } status = connect( conn->sock, ai_ptr->ai_addr, ai_ptr->ai_addrlen ); if ( status != 0 ) { mongo_env_close_socket( conn->sock ); conn->sock = 0; continue; } #if __APPLE__ { int flag = 1; setsockopt( conn->sock, SOL_SOCKET, SO_NOSIGPIPE, ( void * ) &flag, sizeof( flag ) ); } #endif if ( ai_ptr->ai_protocol == IPPROTO_TCP ) { int flag = 1; setsockopt( conn->sock, IPPROTO_TCP, TCP_NODELAY, ( void * ) &flag, sizeof( flag ) ); if ( conn->op_timeout_ms > 0 ) mongo_env_set_socket_op_timeout( conn, conn->op_timeout_ms ); } conn->connected = 1; break; } freeaddrinfo( ai_list ); if ( ! conn->connected ) { conn->err = MONGO_CONN_FAIL; return MONGO_ERROR; } return MONGO_OK; } #else /* env_standard.c */ /* Copyright 2009-2012 10gen Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Vanilla networking designed to work on all systems. */ #include "env.h" #include <errno.h> #include <string.h> #ifdef _WIN32 #ifdef _MSC_VER #include <ws2tcpip.h> /* send,recv,socklen_t etc */ #include <wspiapi.h> /* addrinfo */ #else #include <windows.h> #include <winsock.h> typedef int socklen_t; #endif #else #include <arpa/inet.h> #include <sys/types.h> #include <sys/socket.h> #include <netdb.h> #include <netinet/in.h> #include <netinet/tcp.h> #include <fcntl.h> #include <unistd.h> #endif #ifndef NI_MAXSERV # define NI_MAXSERV 32 #endif int mongo_env_close_socket( int socket ) { #ifdef _WIN32 return closesocket( socket ); #else return close( socket ); #endif } int mongo_env_write_socket( mongo *conn, const void *buf, size_t len ) { const char *cbuf = buf; #ifdef _WIN32 int flags = 0; #else #ifdef __APPLE__ int flags = 0; #else int flags = MSG_NOSIGNAL; #endif #endif while ( len ) { size_t sent = send( conn->sock, cbuf, len, flags ); if ( sent == -1 ) { if (errno == EPIPE) conn->connected = 0; conn->err = MONGO_IO_ERROR; return MONGO_ERROR; } cbuf += sent; len -= sent; } return MONGO_OK; } int mongo_env_read_socket( mongo *conn, void *buf, size_t len ) { char *cbuf = buf; while ( len ) { size_t sent = recv( conn->sock, cbuf, len, 0 ); if ( sent == 0 || sent == -1 ) { conn->err = MONGO_IO_ERROR; return MONGO_ERROR; } cbuf += sent; len -= sent; } return MONGO_OK; } /* This is a no-op in the generic implementation. */ int mongo_env_set_socket_op_timeout( mongo *conn, int millis ) { return MONGO_OK; } int mongo_env_socket_connect( mongo *conn, const char *host, int port ) { struct sockaddr_in sa; socklen_t addressSize; int flag = 1; if ( ( conn->sock = socket( AF_INET, SOCK_STREAM, 0 ) ) < 0 ) { conn->sock = 0; conn->err = MONGO_CONN_NO_SOCKET; return MONGO_ERROR; } memset( sa.sin_zero , 0 , sizeof( sa.sin_zero ) ); sa.sin_family = AF_INET; sa.sin_port = htons( port ); sa.sin_addr.s_addr = inet_addr( host ); addressSize = sizeof( sa ); if ( connect( conn->sock, ( struct sockaddr * )&sa, addressSize ) == -1 ) { mongo_env_close_socket( conn->sock ); conn->connected = 0; conn->sock = 0; conn->err = MONGO_CONN_FAIL; return MONGO_ERROR; } setsockopt( conn->sock, IPPROTO_TCP, TCP_NODELAY, ( char * ) &flag, sizeof( flag ) ); if( conn->op_timeout_ms > 0 ) mongo_env_set_socket_op_timeout( conn, conn->op_timeout_ms ); conn->connected = 1; return MONGO_OK; } MONGO_EXPORT int mongo_env_sock_init( void ) { #if defined(_WIN32) WSADATA wsaData; WORD wVers; #elif defined(SIGPIPE) struct sigaction act; #endif static int called_once; static int retval; if (called_once) return retval; called_once = 1; #if defined(_WIN32) wVers = MAKEWORD(1, 1); retval = (WSAStartup(wVers, &wsaData) == 0); #elif defined(MACINTOSH) GUSISetup(GUSIwithInternetSockets); retval = 1; #elif defined(SIGPIPE) retval = 1; if (sigaction(SIGPIPE, (struct sigaction *)NULL, &act) < 0) retval = 0; else if (act.sa_handler == SIG_DFL) { act.sa_handler = SIG_IGN; if (sigaction(SIGPIPE, &act, (struct sigaction *)NULL) < 0) retval = 0; } #endif return retval; } #endif
int mongo_env_read_socket( mongo *conn, void *buf, int len ) { char *cbuf = buf; while ( len ) { int sent = recv( conn->sock, cbuf, len, 0 ); if ( sent == 0 || sent == -1 ) { __mongo_set_error( conn, MONGO_IO_ERROR, NULL, WSAGetLastError() ); return MONGO_ERROR; } cbuf += sent; len -= sent; } return MONGO_OK; }
int mongo_env_read_socket( mongo *conn, void *buf, size_t len ) { char *cbuf = buf; while ( len ) { size_t sent = recv( conn->sock, cbuf, len, 0 ); if ( sent == 0 || sent == -1 ) { __mongo_set_error( conn, MONGO_IO_ERROR, NULL, WSAGetLastError() ); return MONGO_ERROR; } cbuf += sent; len -= sent; } return MONGO_OK; }
{'added': [(41, 'int mongo_env_write_socket( mongo *conn, const void *buf, size_t len ) {'), (46, ' size_t sent = send( conn->sock, cbuf, len, flags );'), (59, 'int mongo_env_read_socket( mongo *conn, void *buf, size_t len ) {'), (63, ' size_t sent = recv( conn->sock, cbuf, len, 0 );'), (228, 'int mongo_env_write_socket( mongo *conn, const void *buf, size_t len ) {'), (237, ' size_t sent = send( conn->sock, cbuf, len, flags );'), (251, 'int mongo_env_read_socket( mongo *conn, void *buf, size_t len ) {'), (254, ' size_t sent = recv( conn->sock, cbuf, len, 0 );'), (446, 'int mongo_env_write_socket( mongo *conn, const void *buf, size_t len ) {'), (459, ' size_t sent = send( conn->sock, cbuf, len, flags );'), (473, 'int mongo_env_read_socket( mongo *conn, void *buf, size_t len ) {'), (476, ' size_t sent = recv( conn->sock, cbuf, len, 0 );')], 'deleted': [(41, 'int mongo_env_write_socket( mongo *conn, const void *buf, int len ) {'), (46, ' int sent = send( conn->sock, cbuf, len, flags );'), (59, 'int mongo_env_read_socket( mongo *conn, void *buf, int len ) {'), (63, ' int sent = recv( conn->sock, cbuf, len, 0 );'), (228, 'int mongo_env_write_socket( mongo *conn, const void *buf, int len ) {'), (237, ' int sent = send( conn->sock, cbuf, len, flags );'), (251, 'int mongo_env_read_socket( mongo *conn, void *buf, int len ) {'), (254, ' int sent = recv( conn->sock, cbuf, len, 0 );'), (446, 'int mongo_env_write_socket( mongo *conn, const void *buf, int len ) {'), (459, ' int sent = send( conn->sock, cbuf, len, flags );'), (473, 'int mongo_env_read_socket( mongo *conn, void *buf, int len ) {'), (476, ' int sent = recv( conn->sock, cbuf, len, 0 );')]}
12
12
367
2,238
https://github.com/10gen-archive/mongo-c-driver-legacy
CVE-2020-12135
['CWE-190']
env.c
mongo_env_write_socket
#if !defined(MONGO_ENV_STANDARD) && (defined(_WIN32) || defined(_WIN64)) /* env_win32.c */ /* Copyright 2009-2012 10gen Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Networking and other niceties for WIN32. */ #include "env.h" #include <string.h> #ifdef _MSC_VER #include <ws2tcpip.h> /* send,recv,socklen_t etc */ #include <wspiapi.h> /* addrinfo */ #else #include <ws2tcpip.h> /* send,recv,socklen_t etc */ #include <winsock2.h> typedef int socklen_t; #endif #ifndef NI_MAXSERV # define NI_MAXSERV 32 #endif int mongo_env_close_socket( int socket ) { return closesocket( socket ); } int mongo_env_write_socket( mongo *conn, const void *buf, int len ) { const char *cbuf = buf; int flags = 0; while ( len ) { int sent = send( conn->sock, cbuf, len, flags ); if ( sent == -1 ) { __mongo_set_error( conn, MONGO_IO_ERROR, NULL, WSAGetLastError() ); conn->connected = 0; return MONGO_ERROR; } cbuf += sent; len -= sent; } return MONGO_OK; } int mongo_env_read_socket( mongo *conn, void *buf, int len ) { char *cbuf = buf; while ( len ) { int sent = recv( conn->sock, cbuf, len, 0 ); if ( sent == 0 || sent == -1 ) { __mongo_set_error( conn, MONGO_IO_ERROR, NULL, WSAGetLastError() ); return MONGO_ERROR; } cbuf += sent; len -= sent; } return MONGO_OK; } int mongo_env_set_socket_op_timeout( mongo *conn, int millis ) { if ( setsockopt( conn->sock, SOL_SOCKET, SO_RCVTIMEO, (const char *)&millis, sizeof( millis ) ) == -1 ) { __mongo_set_error( conn, MONGO_IO_ERROR, "setsockopt SO_RCVTIMEO failed.", WSAGetLastError() ); return MONGO_ERROR; } if ( setsockopt( conn->sock, SOL_SOCKET, SO_SNDTIMEO, (const char *)&millis, sizeof( millis ) ) == -1 ) { __mongo_set_error( conn, MONGO_IO_ERROR, "setsockopt SO_SNDTIMEO failed.", WSAGetLastError() ); return MONGO_ERROR; } return MONGO_OK; } int mongo_env_socket_connect( mongo *conn, const char *host, int port ) { char port_str[NI_MAXSERV]; char errstr[MONGO_ERR_LEN]; int status; struct addrinfo ai_hints; struct addrinfo *ai_list = NULL; struct addrinfo *ai_ptr = NULL; conn->sock = 0; conn->connected = 0; bson_sprintf( port_str, "%d", port ); memset( &ai_hints, 0, sizeof( ai_hints ) ); ai_hints.ai_family = AF_UNSPEC; ai_hints.ai_socktype = SOCK_STREAM; ai_hints.ai_protocol = IPPROTO_TCP; status = getaddrinfo( host, port_str, &ai_hints, &ai_list ); if ( status != 0 ) { bson_sprintf( errstr, "getaddrinfo failed with error %d", status ); __mongo_set_error( conn, MONGO_CONN_ADDR_FAIL, errstr, WSAGetLastError() ); return MONGO_ERROR; } for ( ai_ptr = ai_list; ai_ptr != NULL; ai_ptr = ai_ptr->ai_next ) { conn->sock = socket( ai_ptr->ai_family, ai_ptr->ai_socktype, ai_ptr->ai_protocol ); if ( conn->sock < 0 ) { __mongo_set_error( conn, MONGO_SOCKET_ERROR, "socket() failed", WSAGetLastError() ); conn->sock = 0; continue; } status = connect( conn->sock, ai_ptr->ai_addr, ai_ptr->ai_addrlen ); if ( status != 0 ) { __mongo_set_error( conn, MONGO_SOCKET_ERROR, "connect() failed", WSAGetLastError() ); mongo_env_close_socket( conn->sock ); conn->sock = 0; continue; } if ( ai_ptr->ai_protocol == IPPROTO_TCP ) { int flag = 1; setsockopt( conn->sock, IPPROTO_TCP, TCP_NODELAY, ( void * ) &flag, sizeof( flag ) ); if ( conn->op_timeout_ms > 0 ) mongo_env_set_socket_op_timeout( conn, conn->op_timeout_ms ); } conn->connected = 1; break; } freeaddrinfo( ai_list ); if ( ! conn->connected ) { conn->err = MONGO_CONN_FAIL; return MONGO_ERROR; } else { mongo_clear_errors( conn ); return MONGO_OK; } } MONGO_EXPORT int mongo_env_sock_init( void ) { WSADATA wsaData; WORD wVers; static int called_once; static int retval; if (called_once) return retval; called_once = 1; wVers = MAKEWORD(1, 1); retval = (WSAStartup(wVers, &wsaData) == 0); return retval; } #elif !defined(MONGO_ENV_STANDARD) && (defined(__APPLE__) || defined(__linux) || defined(__unix) || defined(__posix)) /* env_posix.c */ /* Copyright 2009-2012 10gen Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Networking and other niceties for POSIX systems. */ #include "env.h" #include <string.h> #include <errno.h> #include <sys/time.h> #include <arpa/inet.h> #include <sys/types.h> #include <sys/socket.h> #include <sys/un.h> #include <netdb.h> #include <netinet/in.h> #include <netinet/tcp.h> #include <fcntl.h> #include <unistd.h> #ifndef NI_MAXSERV # define NI_MAXSERV 32 #endif int mongo_env_close_socket( int socket ) { return close( socket ); } int mongo_env_sock_init( void ) { return 0; } int mongo_env_write_socket( mongo *conn, const void *buf, int len ) { const char *cbuf = buf; #ifdef __APPLE__ int flags = 0; #else int flags = MSG_NOSIGNAL; #endif while ( len ) { int sent = send( conn->sock, cbuf, len, flags ); if ( sent == -1 ) { if (errno == EPIPE) conn->connected = 0; __mongo_set_error( conn, MONGO_IO_ERROR, strerror( errno ), errno ); return MONGO_ERROR; } cbuf += sent; len -= sent; } return MONGO_OK; } int mongo_env_read_socket( mongo *conn, void *buf, int len ) { char *cbuf = buf; while ( len ) { int sent = recv( conn->sock, cbuf, len, 0 ); if ( sent == 0 || sent == -1 ) { __mongo_set_error( conn, MONGO_IO_ERROR, strerror( errno ), errno ); return MONGO_ERROR; } cbuf += sent; len -= sent; } return MONGO_OK; } int mongo_env_set_socket_op_timeout( mongo *conn, int millis ) { struct timeval tv; tv.tv_sec = millis / 1000; tv.tv_usec = ( millis % 1000 ) * 1000; if ( setsockopt( conn->sock, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof( tv ) ) == -1 ) { conn->err = MONGO_IO_ERROR; __mongo_set_error( conn, MONGO_IO_ERROR, "setsockopt SO_RCVTIMEO failed.", errno ); return MONGO_ERROR; } if ( setsockopt( conn->sock, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof( tv ) ) == -1 ) { __mongo_set_error( conn, MONGO_IO_ERROR, "setsockopt SO_SNDTIMEO failed.", errno ); return MONGO_ERROR; } return MONGO_OK; } static int mongo_env_unix_socket_connect( mongo *conn, const char *sock_path ) { struct sockaddr_un addr; int status, len; conn->connected = 0; conn->sock = socket( AF_UNIX, SOCK_STREAM, 0 ); if ( conn->sock < 0 ) { conn->sock = 0; return MONGO_ERROR; } addr.sun_family = AF_UNIX; strncpy( addr.sun_path, sock_path, sizeof(addr.sun_path) - 1 ); len = sizeof( addr ); status = connect( conn->sock, (struct sockaddr *) &addr, len ); if( status < 0 ) { mongo_env_close_socket( conn->sock ); conn->sock = 0; conn->err = MONGO_CONN_FAIL; return MONGO_ERROR; } conn->connected = 1; return MONGO_OK; } int mongo_env_socket_connect( mongo *conn, const char *host, int port ) { char port_str[NI_MAXSERV]; int status; struct addrinfo ai_hints; struct addrinfo *ai_list = NULL; struct addrinfo *ai_ptr = NULL; if ( port < 0 ) { return mongo_env_unix_socket_connect( conn, host ); } conn->sock = 0; conn->connected = 0; sprintf(port_str,"%d",port); bson_sprintf( port_str, "%d", port ); memset( &ai_hints, 0, sizeof( ai_hints ) ); #ifdef AI_ADDRCONFIG ai_hints.ai_flags = AI_ADDRCONFIG; #endif ai_hints.ai_family = AF_UNSPEC; ai_hints.ai_socktype = SOCK_STREAM; status = getaddrinfo( host, port_str, &ai_hints, &ai_list ); if ( status != 0 ) { bson_errprintf( "getaddrinfo failed: %s", gai_strerror( status ) ); conn->err = MONGO_CONN_ADDR_FAIL; return MONGO_ERROR; } for ( ai_ptr = ai_list; ai_ptr != NULL; ai_ptr = ai_ptr->ai_next ) { conn->sock = socket( ai_ptr->ai_family, ai_ptr->ai_socktype, ai_ptr->ai_protocol ); if ( conn->sock < 0 ) { conn->sock = 0; continue; } status = connect( conn->sock, ai_ptr->ai_addr, ai_ptr->ai_addrlen ); if ( status != 0 ) { mongo_env_close_socket( conn->sock ); conn->sock = 0; continue; } #if __APPLE__ { int flag = 1; setsockopt( conn->sock, SOL_SOCKET, SO_NOSIGPIPE, ( void * ) &flag, sizeof( flag ) ); } #endif if ( ai_ptr->ai_protocol == IPPROTO_TCP ) { int flag = 1; setsockopt( conn->sock, IPPROTO_TCP, TCP_NODELAY, ( void * ) &flag, sizeof( flag ) ); if ( conn->op_timeout_ms > 0 ) mongo_env_set_socket_op_timeout( conn, conn->op_timeout_ms ); } conn->connected = 1; break; } freeaddrinfo( ai_list ); if ( ! conn->connected ) { conn->err = MONGO_CONN_FAIL; return MONGO_ERROR; } return MONGO_OK; } #else /* env_standard.c */ /* Copyright 2009-2012 10gen Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Vanilla networking designed to work on all systems. */ #include "env.h" #include <errno.h> #include <string.h> #ifdef _WIN32 #ifdef _MSC_VER #include <ws2tcpip.h> /* send,recv,socklen_t etc */ #include <wspiapi.h> /* addrinfo */ #else #include <windows.h> #include <winsock.h> typedef int socklen_t; #endif #else #include <arpa/inet.h> #include <sys/types.h> #include <sys/socket.h> #include <netdb.h> #include <netinet/in.h> #include <netinet/tcp.h> #include <fcntl.h> #include <unistd.h> #endif #ifndef NI_MAXSERV # define NI_MAXSERV 32 #endif int mongo_env_close_socket( int socket ) { #ifdef _WIN32 return closesocket( socket ); #else return close( socket ); #endif } int mongo_env_write_socket( mongo *conn, const void *buf, int len ) { const char *cbuf = buf; #ifdef _WIN32 int flags = 0; #else #ifdef __APPLE__ int flags = 0; #else int flags = MSG_NOSIGNAL; #endif #endif while ( len ) { int sent = send( conn->sock, cbuf, len, flags ); if ( sent == -1 ) { if (errno == EPIPE) conn->connected = 0; conn->err = MONGO_IO_ERROR; return MONGO_ERROR; } cbuf += sent; len -= sent; } return MONGO_OK; } int mongo_env_read_socket( mongo *conn, void *buf, int len ) { char *cbuf = buf; while ( len ) { int sent = recv( conn->sock, cbuf, len, 0 ); if ( sent == 0 || sent == -1 ) { conn->err = MONGO_IO_ERROR; return MONGO_ERROR; } cbuf += sent; len -= sent; } return MONGO_OK; } /* This is a no-op in the generic implementation. */ int mongo_env_set_socket_op_timeout( mongo *conn, int millis ) { return MONGO_OK; } int mongo_env_socket_connect( mongo *conn, const char *host, int port ) { struct sockaddr_in sa; socklen_t addressSize; int flag = 1; if ( ( conn->sock = socket( AF_INET, SOCK_STREAM, 0 ) ) < 0 ) { conn->sock = 0; conn->err = MONGO_CONN_NO_SOCKET; return MONGO_ERROR; } memset( sa.sin_zero , 0 , sizeof( sa.sin_zero ) ); sa.sin_family = AF_INET; sa.sin_port = htons( port ); sa.sin_addr.s_addr = inet_addr( host ); addressSize = sizeof( sa ); if ( connect( conn->sock, ( struct sockaddr * )&sa, addressSize ) == -1 ) { mongo_env_close_socket( conn->sock ); conn->connected = 0; conn->sock = 0; conn->err = MONGO_CONN_FAIL; return MONGO_ERROR; } setsockopt( conn->sock, IPPROTO_TCP, TCP_NODELAY, ( char * ) &flag, sizeof( flag ) ); if( conn->op_timeout_ms > 0 ) mongo_env_set_socket_op_timeout( conn, conn->op_timeout_ms ); conn->connected = 1; return MONGO_OK; } MONGO_EXPORT int mongo_env_sock_init( void ) { #if defined(_WIN32) WSADATA wsaData; WORD wVers; #elif defined(SIGPIPE) struct sigaction act; #endif static int called_once; static int retval; if (called_once) return retval; called_once = 1; #if defined(_WIN32) wVers = MAKEWORD(1, 1); retval = (WSAStartup(wVers, &wsaData) == 0); #elif defined(MACINTOSH) GUSISetup(GUSIwithInternetSockets); retval = 1; #elif defined(SIGPIPE) retval = 1; if (sigaction(SIGPIPE, (struct sigaction *)NULL, &act) < 0) retval = 0; else if (act.sa_handler == SIG_DFL) { act.sa_handler = SIG_IGN; if (sigaction(SIGPIPE, &act, (struct sigaction *)NULL) < 0) retval = 0; } #endif return retval; } #endif
#if !defined(MONGO_ENV_STANDARD) && (defined(_WIN32) || defined(_WIN64)) /* env_win32.c */ /* Copyright 2009-2012 10gen Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Networking and other niceties for WIN32. */ #include "env.h" #include <string.h> #ifdef _MSC_VER #include <ws2tcpip.h> /* send,recv,socklen_t etc */ #include <wspiapi.h> /* addrinfo */ #else #include <ws2tcpip.h> /* send,recv,socklen_t etc */ #include <winsock2.h> typedef int socklen_t; #endif #ifndef NI_MAXSERV # define NI_MAXSERV 32 #endif int mongo_env_close_socket( int socket ) { return closesocket( socket ); } int mongo_env_write_socket( mongo *conn, const void *buf, size_t len ) { const char *cbuf = buf; int flags = 0; while ( len ) { size_t sent = send( conn->sock, cbuf, len, flags ); if ( sent == -1 ) { __mongo_set_error( conn, MONGO_IO_ERROR, NULL, WSAGetLastError() ); conn->connected = 0; return MONGO_ERROR; } cbuf += sent; len -= sent; } return MONGO_OK; } int mongo_env_read_socket( mongo *conn, void *buf, size_t len ) { char *cbuf = buf; while ( len ) { size_t sent = recv( conn->sock, cbuf, len, 0 ); if ( sent == 0 || sent == -1 ) { __mongo_set_error( conn, MONGO_IO_ERROR, NULL, WSAGetLastError() ); return MONGO_ERROR; } cbuf += sent; len -= sent; } return MONGO_OK; } int mongo_env_set_socket_op_timeout( mongo *conn, int millis ) { if ( setsockopt( conn->sock, SOL_SOCKET, SO_RCVTIMEO, (const char *)&millis, sizeof( millis ) ) == -1 ) { __mongo_set_error( conn, MONGO_IO_ERROR, "setsockopt SO_RCVTIMEO failed.", WSAGetLastError() ); return MONGO_ERROR; } if ( setsockopt( conn->sock, SOL_SOCKET, SO_SNDTIMEO, (const char *)&millis, sizeof( millis ) ) == -1 ) { __mongo_set_error( conn, MONGO_IO_ERROR, "setsockopt SO_SNDTIMEO failed.", WSAGetLastError() ); return MONGO_ERROR; } return MONGO_OK; } int mongo_env_socket_connect( mongo *conn, const char *host, int port ) { char port_str[NI_MAXSERV]; char errstr[MONGO_ERR_LEN]; int status; struct addrinfo ai_hints; struct addrinfo *ai_list = NULL; struct addrinfo *ai_ptr = NULL; conn->sock = 0; conn->connected = 0; bson_sprintf( port_str, "%d", port ); memset( &ai_hints, 0, sizeof( ai_hints ) ); ai_hints.ai_family = AF_UNSPEC; ai_hints.ai_socktype = SOCK_STREAM; ai_hints.ai_protocol = IPPROTO_TCP; status = getaddrinfo( host, port_str, &ai_hints, &ai_list ); if ( status != 0 ) { bson_sprintf( errstr, "getaddrinfo failed with error %d", status ); __mongo_set_error( conn, MONGO_CONN_ADDR_FAIL, errstr, WSAGetLastError() ); return MONGO_ERROR; } for ( ai_ptr = ai_list; ai_ptr != NULL; ai_ptr = ai_ptr->ai_next ) { conn->sock = socket( ai_ptr->ai_family, ai_ptr->ai_socktype, ai_ptr->ai_protocol ); if ( conn->sock < 0 ) { __mongo_set_error( conn, MONGO_SOCKET_ERROR, "socket() failed", WSAGetLastError() ); conn->sock = 0; continue; } status = connect( conn->sock, ai_ptr->ai_addr, ai_ptr->ai_addrlen ); if ( status != 0 ) { __mongo_set_error( conn, MONGO_SOCKET_ERROR, "connect() failed", WSAGetLastError() ); mongo_env_close_socket( conn->sock ); conn->sock = 0; continue; } if ( ai_ptr->ai_protocol == IPPROTO_TCP ) { int flag = 1; setsockopt( conn->sock, IPPROTO_TCP, TCP_NODELAY, ( void * ) &flag, sizeof( flag ) ); if ( conn->op_timeout_ms > 0 ) mongo_env_set_socket_op_timeout( conn, conn->op_timeout_ms ); } conn->connected = 1; break; } freeaddrinfo( ai_list ); if ( ! conn->connected ) { conn->err = MONGO_CONN_FAIL; return MONGO_ERROR; } else { mongo_clear_errors( conn ); return MONGO_OK; } } MONGO_EXPORT int mongo_env_sock_init( void ) { WSADATA wsaData; WORD wVers; static int called_once; static int retval; if (called_once) return retval; called_once = 1; wVers = MAKEWORD(1, 1); retval = (WSAStartup(wVers, &wsaData) == 0); return retval; } #elif !defined(MONGO_ENV_STANDARD) && (defined(__APPLE__) || defined(__linux) || defined(__unix) || defined(__posix)) /* env_posix.c */ /* Copyright 2009-2012 10gen Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Networking and other niceties for POSIX systems. */ #include "env.h" #include <string.h> #include <errno.h> #include <sys/time.h> #include <arpa/inet.h> #include <sys/types.h> #include <sys/socket.h> #include <sys/un.h> #include <netdb.h> #include <netinet/in.h> #include <netinet/tcp.h> #include <fcntl.h> #include <unistd.h> #ifndef NI_MAXSERV # define NI_MAXSERV 32 #endif int mongo_env_close_socket( int socket ) { return close( socket ); } int mongo_env_sock_init( void ) { return 0; } int mongo_env_write_socket( mongo *conn, const void *buf, size_t len ) { const char *cbuf = buf; #ifdef __APPLE__ int flags = 0; #else int flags = MSG_NOSIGNAL; #endif while ( len ) { size_t sent = send( conn->sock, cbuf, len, flags ); if ( sent == -1 ) { if (errno == EPIPE) conn->connected = 0; __mongo_set_error( conn, MONGO_IO_ERROR, strerror( errno ), errno ); return MONGO_ERROR; } cbuf += sent; len -= sent; } return MONGO_OK; } int mongo_env_read_socket( mongo *conn, void *buf, size_t len ) { char *cbuf = buf; while ( len ) { size_t sent = recv( conn->sock, cbuf, len, 0 ); if ( sent == 0 || sent == -1 ) { __mongo_set_error( conn, MONGO_IO_ERROR, strerror( errno ), errno ); return MONGO_ERROR; } cbuf += sent; len -= sent; } return MONGO_OK; } int mongo_env_set_socket_op_timeout( mongo *conn, int millis ) { struct timeval tv; tv.tv_sec = millis / 1000; tv.tv_usec = ( millis % 1000 ) * 1000; if ( setsockopt( conn->sock, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof( tv ) ) == -1 ) { conn->err = MONGO_IO_ERROR; __mongo_set_error( conn, MONGO_IO_ERROR, "setsockopt SO_RCVTIMEO failed.", errno ); return MONGO_ERROR; } if ( setsockopt( conn->sock, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof( tv ) ) == -1 ) { __mongo_set_error( conn, MONGO_IO_ERROR, "setsockopt SO_SNDTIMEO failed.", errno ); return MONGO_ERROR; } return MONGO_OK; } static int mongo_env_unix_socket_connect( mongo *conn, const char *sock_path ) { struct sockaddr_un addr; int status, len; conn->connected = 0; conn->sock = socket( AF_UNIX, SOCK_STREAM, 0 ); if ( conn->sock < 0 ) { conn->sock = 0; return MONGO_ERROR; } addr.sun_family = AF_UNIX; strncpy( addr.sun_path, sock_path, sizeof(addr.sun_path) - 1 ); len = sizeof( addr ); status = connect( conn->sock, (struct sockaddr *) &addr, len ); if( status < 0 ) { mongo_env_close_socket( conn->sock ); conn->sock = 0; conn->err = MONGO_CONN_FAIL; return MONGO_ERROR; } conn->connected = 1; return MONGO_OK; } int mongo_env_socket_connect( mongo *conn, const char *host, int port ) { char port_str[NI_MAXSERV]; int status; struct addrinfo ai_hints; struct addrinfo *ai_list = NULL; struct addrinfo *ai_ptr = NULL; if ( port < 0 ) { return mongo_env_unix_socket_connect( conn, host ); } conn->sock = 0; conn->connected = 0; sprintf(port_str,"%d",port); bson_sprintf( port_str, "%d", port ); memset( &ai_hints, 0, sizeof( ai_hints ) ); #ifdef AI_ADDRCONFIG ai_hints.ai_flags = AI_ADDRCONFIG; #endif ai_hints.ai_family = AF_UNSPEC; ai_hints.ai_socktype = SOCK_STREAM; status = getaddrinfo( host, port_str, &ai_hints, &ai_list ); if ( status != 0 ) { bson_errprintf( "getaddrinfo failed: %s", gai_strerror( status ) ); conn->err = MONGO_CONN_ADDR_FAIL; return MONGO_ERROR; } for ( ai_ptr = ai_list; ai_ptr != NULL; ai_ptr = ai_ptr->ai_next ) { conn->sock = socket( ai_ptr->ai_family, ai_ptr->ai_socktype, ai_ptr->ai_protocol ); if ( conn->sock < 0 ) { conn->sock = 0; continue; } status = connect( conn->sock, ai_ptr->ai_addr, ai_ptr->ai_addrlen ); if ( status != 0 ) { mongo_env_close_socket( conn->sock ); conn->sock = 0; continue; } #if __APPLE__ { int flag = 1; setsockopt( conn->sock, SOL_SOCKET, SO_NOSIGPIPE, ( void * ) &flag, sizeof( flag ) ); } #endif if ( ai_ptr->ai_protocol == IPPROTO_TCP ) { int flag = 1; setsockopt( conn->sock, IPPROTO_TCP, TCP_NODELAY, ( void * ) &flag, sizeof( flag ) ); if ( conn->op_timeout_ms > 0 ) mongo_env_set_socket_op_timeout( conn, conn->op_timeout_ms ); } conn->connected = 1; break; } freeaddrinfo( ai_list ); if ( ! conn->connected ) { conn->err = MONGO_CONN_FAIL; return MONGO_ERROR; } return MONGO_OK; } #else /* env_standard.c */ /* Copyright 2009-2012 10gen Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Vanilla networking designed to work on all systems. */ #include "env.h" #include <errno.h> #include <string.h> #ifdef _WIN32 #ifdef _MSC_VER #include <ws2tcpip.h> /* send,recv,socklen_t etc */ #include <wspiapi.h> /* addrinfo */ #else #include <windows.h> #include <winsock.h> typedef int socklen_t; #endif #else #include <arpa/inet.h> #include <sys/types.h> #include <sys/socket.h> #include <netdb.h> #include <netinet/in.h> #include <netinet/tcp.h> #include <fcntl.h> #include <unistd.h> #endif #ifndef NI_MAXSERV # define NI_MAXSERV 32 #endif int mongo_env_close_socket( int socket ) { #ifdef _WIN32 return closesocket( socket ); #else return close( socket ); #endif } int mongo_env_write_socket( mongo *conn, const void *buf, size_t len ) { const char *cbuf = buf; #ifdef _WIN32 int flags = 0; #else #ifdef __APPLE__ int flags = 0; #else int flags = MSG_NOSIGNAL; #endif #endif while ( len ) { size_t sent = send( conn->sock, cbuf, len, flags ); if ( sent == -1 ) { if (errno == EPIPE) conn->connected = 0; conn->err = MONGO_IO_ERROR; return MONGO_ERROR; } cbuf += sent; len -= sent; } return MONGO_OK; } int mongo_env_read_socket( mongo *conn, void *buf, size_t len ) { char *cbuf = buf; while ( len ) { size_t sent = recv( conn->sock, cbuf, len, 0 ); if ( sent == 0 || sent == -1 ) { conn->err = MONGO_IO_ERROR; return MONGO_ERROR; } cbuf += sent; len -= sent; } return MONGO_OK; } /* This is a no-op in the generic implementation. */ int mongo_env_set_socket_op_timeout( mongo *conn, int millis ) { return MONGO_OK; } int mongo_env_socket_connect( mongo *conn, const char *host, int port ) { struct sockaddr_in sa; socklen_t addressSize; int flag = 1; if ( ( conn->sock = socket( AF_INET, SOCK_STREAM, 0 ) ) < 0 ) { conn->sock = 0; conn->err = MONGO_CONN_NO_SOCKET; return MONGO_ERROR; } memset( sa.sin_zero , 0 , sizeof( sa.sin_zero ) ); sa.sin_family = AF_INET; sa.sin_port = htons( port ); sa.sin_addr.s_addr = inet_addr( host ); addressSize = sizeof( sa ); if ( connect( conn->sock, ( struct sockaddr * )&sa, addressSize ) == -1 ) { mongo_env_close_socket( conn->sock ); conn->connected = 0; conn->sock = 0; conn->err = MONGO_CONN_FAIL; return MONGO_ERROR; } setsockopt( conn->sock, IPPROTO_TCP, TCP_NODELAY, ( char * ) &flag, sizeof( flag ) ); if( conn->op_timeout_ms > 0 ) mongo_env_set_socket_op_timeout( conn, conn->op_timeout_ms ); conn->connected = 1; return MONGO_OK; } MONGO_EXPORT int mongo_env_sock_init( void ) { #if defined(_WIN32) WSADATA wsaData; WORD wVers; #elif defined(SIGPIPE) struct sigaction act; #endif static int called_once; static int retval; if (called_once) return retval; called_once = 1; #if defined(_WIN32) wVers = MAKEWORD(1, 1); retval = (WSAStartup(wVers, &wsaData) == 0); #elif defined(MACINTOSH) GUSISetup(GUSIwithInternetSockets); retval = 1; #elif defined(SIGPIPE) retval = 1; if (sigaction(SIGPIPE, (struct sigaction *)NULL, &act) < 0) retval = 0; else if (act.sa_handler == SIG_DFL) { act.sa_handler = SIG_IGN; if (sigaction(SIGPIPE, &act, (struct sigaction *)NULL) < 0) retval = 0; } #endif return retval; } #endif
int mongo_env_write_socket( mongo *conn, const void *buf, int len ) { const char *cbuf = buf; int flags = 0; while ( len ) { int sent = send( conn->sock, cbuf, len, flags ); if ( sent == -1 ) { __mongo_set_error( conn, MONGO_IO_ERROR, NULL, WSAGetLastError() ); conn->connected = 0; return MONGO_ERROR; } cbuf += sent; len -= sent; } return MONGO_OK; }
int mongo_env_write_socket( mongo *conn, const void *buf, size_t len ) { const char *cbuf = buf; int flags = 0; while ( len ) { size_t sent = send( conn->sock, cbuf, len, flags ); if ( sent == -1 ) { __mongo_set_error( conn, MONGO_IO_ERROR, NULL, WSAGetLastError() ); conn->connected = 0; return MONGO_ERROR; } cbuf += sent; len -= sent; } return MONGO_OK; }
{'added': [(41, 'int mongo_env_write_socket( mongo *conn, const void *buf, size_t len ) {'), (46, ' size_t sent = send( conn->sock, cbuf, len, flags );'), (59, 'int mongo_env_read_socket( mongo *conn, void *buf, size_t len ) {'), (63, ' size_t sent = recv( conn->sock, cbuf, len, 0 );'), (228, 'int mongo_env_write_socket( mongo *conn, const void *buf, size_t len ) {'), (237, ' size_t sent = send( conn->sock, cbuf, len, flags );'), (251, 'int mongo_env_read_socket( mongo *conn, void *buf, size_t len ) {'), (254, ' size_t sent = recv( conn->sock, cbuf, len, 0 );'), (446, 'int mongo_env_write_socket( mongo *conn, const void *buf, size_t len ) {'), (459, ' size_t sent = send( conn->sock, cbuf, len, flags );'), (473, 'int mongo_env_read_socket( mongo *conn, void *buf, size_t len ) {'), (476, ' size_t sent = recv( conn->sock, cbuf, len, 0 );')], 'deleted': [(41, 'int mongo_env_write_socket( mongo *conn, const void *buf, int len ) {'), (46, ' int sent = send( conn->sock, cbuf, len, flags );'), (59, 'int mongo_env_read_socket( mongo *conn, void *buf, int len ) {'), (63, ' int sent = recv( conn->sock, cbuf, len, 0 );'), (228, 'int mongo_env_write_socket( mongo *conn, const void *buf, int len ) {'), (237, ' int sent = send( conn->sock, cbuf, len, flags );'), (251, 'int mongo_env_read_socket( mongo *conn, void *buf, int len ) {'), (254, ' int sent = recv( conn->sock, cbuf, len, 0 );'), (446, 'int mongo_env_write_socket( mongo *conn, const void *buf, int len ) {'), (459, ' int sent = send( conn->sock, cbuf, len, flags );'), (473, 'int mongo_env_read_socket( mongo *conn, void *buf, int len ) {'), (476, ' int sent = recv( conn->sock, cbuf, len, 0 );')]}
12
12
367
2,238
https://github.com/10gen-archive/mongo-c-driver-legacy
CVE-2020-12135
['CWE-190']
gtco.c
parse_hid_report_descriptor
/* -*- linux-c -*- GTCO digitizer USB driver TO CHECK: Is pressure done right on report 5? Copyright (C) 2006 GTCO CalComp This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Permission to use, copy, modify, distribute, and sell this software and its documentation for any purpose is hereby granted without fee, provided that the above copyright notice appear in all copies and that both that copyright notice and this permission notice appear in supporting documentation, and that the name of GTCO-CalComp not be used in advertising or publicity pertaining to distribution of the software without specific, written prior permission. GTCO-CalComp makes no representations about the suitability of this software for any purpose. It is provided "as is" without express or implied warranty. GTCO-CALCOMP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL GTCO-CALCOMP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTIONS, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. GTCO CalComp, Inc. 7125 Riverwood Drive Columbia, MD 21046 Jeremy Roberson jroberson@gtcocalcomp.com Scott Hill shill@gtcocalcomp.com */ /*#define DEBUG*/ #include <linux/kernel.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/usb.h> #include <linux/uaccess.h> #include <asm/unaligned.h> #include <asm/byteorder.h> #include <linux/bitops.h> #include <linux/usb/input.h> /* Version with a Major number of 2 is for kernel inclusion only. */ #define GTCO_VERSION "2.00.0006" /* MACROS */ #define VENDOR_ID_GTCO 0x078C #define PID_400 0x400 #define PID_401 0x401 #define PID_1000 0x1000 #define PID_1001 0x1001 #define PID_1002 0x1002 /* Max size of a single report */ #define REPORT_MAX_SIZE 10 /* Bitmask whether pen is in range */ #define MASK_INRANGE 0x20 #define MASK_BUTTON 0x01F #define PATHLENGTH 64 /* DATA STRUCTURES */ /* Device table */ static const struct usb_device_id gtco_usbid_table[] = { { USB_DEVICE(VENDOR_ID_GTCO, PID_400) }, { USB_DEVICE(VENDOR_ID_GTCO, PID_401) }, { USB_DEVICE(VENDOR_ID_GTCO, PID_1000) }, { USB_DEVICE(VENDOR_ID_GTCO, PID_1001) }, { USB_DEVICE(VENDOR_ID_GTCO, PID_1002) }, { } }; MODULE_DEVICE_TABLE (usb, gtco_usbid_table); /* Structure to hold all of our device specific stuff */ struct gtco { struct input_dev *inputdevice; /* input device struct pointer */ struct usb_interface *intf; /* the usb interface for this device */ struct urb *urbinfo; /* urb for incoming reports */ dma_addr_t buf_dma; /* dma addr of the data buffer*/ unsigned char * buffer; /* databuffer for reports */ char usbpath[PATHLENGTH]; int openCount; /* Information pulled from Report Descriptor */ u32 usage; u32 min_X; u32 max_X; u32 min_Y; u32 max_Y; s8 mintilt_X; s8 maxtilt_X; s8 mintilt_Y; s8 maxtilt_Y; u32 maxpressure; u32 minpressure; }; /* Code for parsing the HID REPORT DESCRIPTOR */ /* From HID1.11 spec */ struct hid_descriptor { struct usb_descriptor_header header; __le16 bcdHID; u8 bCountryCode; u8 bNumDescriptors; u8 bDescriptorType; __le16 wDescriptorLength; } __attribute__ ((packed)); #define HID_DESCRIPTOR_SIZE 9 #define HID_DEVICE_TYPE 33 #define REPORT_DEVICE_TYPE 34 #define PREF_TAG(x) ((x)>>4) #define PREF_TYPE(x) ((x>>2)&0x03) #define PREF_SIZE(x) ((x)&0x03) #define TYPE_MAIN 0 #define TYPE_GLOBAL 1 #define TYPE_LOCAL 2 #define TYPE_RESERVED 3 #define TAG_MAIN_INPUT 0x8 #define TAG_MAIN_OUTPUT 0x9 #define TAG_MAIN_FEATURE 0xB #define TAG_MAIN_COL_START 0xA #define TAG_MAIN_COL_END 0xC #define TAG_GLOB_USAGE 0 #define TAG_GLOB_LOG_MIN 1 #define TAG_GLOB_LOG_MAX 2 #define TAG_GLOB_PHYS_MIN 3 #define TAG_GLOB_PHYS_MAX 4 #define TAG_GLOB_UNIT_EXP 5 #define TAG_GLOB_UNIT 6 #define TAG_GLOB_REPORT_SZ 7 #define TAG_GLOB_REPORT_ID 8 #define TAG_GLOB_REPORT_CNT 9 #define TAG_GLOB_PUSH 10 #define TAG_GLOB_POP 11 #define TAG_GLOB_MAX 12 #define DIGITIZER_USAGE_TIP_PRESSURE 0x30 #define DIGITIZER_USAGE_TILT_X 0x3D #define DIGITIZER_USAGE_TILT_Y 0x3E /* * This is an abbreviated parser for the HID Report Descriptor. We * know what devices we are talking to, so this is by no means meant * to be generic. We can make some safe assumptions: * * - We know there are no LONG tags, all short * - We know that we have no MAIN Feature and MAIN Output items * - We know what the IRQ reports are supposed to look like. * * The main purpose of this is to use the HID report desc to figure * out the mins and maxs of the fields in the IRQ reports. The IRQ * reports for 400/401 change slightly if the max X is bigger than 64K. * */ static void parse_hid_report_descriptor(struct gtco *device, char * report, int length) { struct device *ddev = &device->intf->dev; int x, i = 0; /* Tag primitive vars */ __u8 prefix; __u8 size; __u8 tag; __u8 type; __u8 data = 0; __u16 data16 = 0; __u32 data32 = 0; /* For parsing logic */ int inputnum = 0; __u32 usage = 0; /* Global Values, indexed by TAG */ __u32 globalval[TAG_GLOB_MAX]; __u32 oldval[TAG_GLOB_MAX]; /* Debug stuff */ char maintype = 'x'; char globtype[12]; int indent = 0; char indentstr[10] = ""; dev_dbg(ddev, "======>>>>>>PARSE<<<<<<======\n"); /* Walk this report and pull out the info we need */ while (i < length) { prefix = report[i]; /* Skip over prefix */ i++; /* Determine data size and save the data in the proper variable */ size = PREF_SIZE(prefix); switch (size) { case 1: data = report[i]; break; case 2: data16 = get_unaligned_le16(&report[i]); break; case 3: size = 4; data32 = get_unaligned_le32(&report[i]); break; } /* Skip size of data */ i += size; /* What we do depends on the tag type */ tag = PREF_TAG(prefix); type = PREF_TYPE(prefix); switch (type) { case TYPE_MAIN: strcpy(globtype, ""); switch (tag) { case TAG_MAIN_INPUT: /* * The INPUT MAIN tag signifies this is * information from a report. We need to * figure out what it is and store the * min/max values */ maintype = 'I'; if (data == 2) strcpy(globtype, "Variable"); else if (data == 3) strcpy(globtype, "Var|Const"); dev_dbg(ddev, "::::: Saving Report: %d input #%d Max: 0x%X(%d) Min:0x%X(%d) of %d bits\n", globalval[TAG_GLOB_REPORT_ID], inputnum, globalval[TAG_GLOB_LOG_MAX], globalval[TAG_GLOB_LOG_MAX], globalval[TAG_GLOB_LOG_MIN], globalval[TAG_GLOB_LOG_MIN], globalval[TAG_GLOB_REPORT_SZ] * globalval[TAG_GLOB_REPORT_CNT]); /* We can assume that the first two input items are always the X and Y coordinates. After that, we look for everything else by local usage value */ switch (inputnum) { case 0: /* X coord */ dev_dbg(ddev, "GER: X Usage: 0x%x\n", usage); if (device->max_X == 0) { device->max_X = globalval[TAG_GLOB_LOG_MAX]; device->min_X = globalval[TAG_GLOB_LOG_MIN]; } break; case 1: /* Y coord */ dev_dbg(ddev, "GER: Y Usage: 0x%x\n", usage); if (device->max_Y == 0) { device->max_Y = globalval[TAG_GLOB_LOG_MAX]; device->min_Y = globalval[TAG_GLOB_LOG_MIN]; } break; default: /* Tilt X */ if (usage == DIGITIZER_USAGE_TILT_X) { if (device->maxtilt_X == 0) { device->maxtilt_X = globalval[TAG_GLOB_LOG_MAX]; device->mintilt_X = globalval[TAG_GLOB_LOG_MIN]; } } /* Tilt Y */ if (usage == DIGITIZER_USAGE_TILT_Y) { if (device->maxtilt_Y == 0) { device->maxtilt_Y = globalval[TAG_GLOB_LOG_MAX]; device->mintilt_Y = globalval[TAG_GLOB_LOG_MIN]; } } /* Pressure */ if (usage == DIGITIZER_USAGE_TIP_PRESSURE) { if (device->maxpressure == 0) { device->maxpressure = globalval[TAG_GLOB_LOG_MAX]; device->minpressure = globalval[TAG_GLOB_LOG_MIN]; } } break; } inputnum++; break; case TAG_MAIN_OUTPUT: maintype = 'O'; break; case TAG_MAIN_FEATURE: maintype = 'F'; break; case TAG_MAIN_COL_START: maintype = 'S'; if (data == 0) { dev_dbg(ddev, "======>>>>>> Physical\n"); strcpy(globtype, "Physical"); } else dev_dbg(ddev, "======>>>>>>\n"); /* Indent the debug output */ indent++; for (x = 0; x < indent; x++) indentstr[x] = '-'; indentstr[x] = 0; /* Save global tags */ for (x = 0; x < TAG_GLOB_MAX; x++) oldval[x] = globalval[x]; break; case TAG_MAIN_COL_END: dev_dbg(ddev, "<<<<<<======\n"); maintype = 'E'; indent--; for (x = 0; x < indent; x++) indentstr[x] = '-'; indentstr[x] = 0; /* Copy global tags back */ for (x = 0; x < TAG_GLOB_MAX; x++) globalval[x] = oldval[x]; break; } switch (size) { case 1: dev_dbg(ddev, "%sMAINTAG:(%d) %c SIZE: %d Data: %s 0x%x\n", indentstr, tag, maintype, size, globtype, data); break; case 2: dev_dbg(ddev, "%sMAINTAG:(%d) %c SIZE: %d Data: %s 0x%x\n", indentstr, tag, maintype, size, globtype, data16); break; case 4: dev_dbg(ddev, "%sMAINTAG:(%d) %c SIZE: %d Data: %s 0x%x\n", indentstr, tag, maintype, size, globtype, data32); break; } break; case TYPE_GLOBAL: switch (tag) { case TAG_GLOB_USAGE: /* * First time we hit the global usage tag, * it should tell us the type of device */ if (device->usage == 0) device->usage = data; strcpy(globtype, "USAGE"); break; case TAG_GLOB_LOG_MIN: strcpy(globtype, "LOG_MIN"); break; case TAG_GLOB_LOG_MAX: strcpy(globtype, "LOG_MAX"); break; case TAG_GLOB_PHYS_MIN: strcpy(globtype, "PHYS_MIN"); break; case TAG_GLOB_PHYS_MAX: strcpy(globtype, "PHYS_MAX"); break; case TAG_GLOB_UNIT_EXP: strcpy(globtype, "EXP"); break; case TAG_GLOB_UNIT: strcpy(globtype, "UNIT"); break; case TAG_GLOB_REPORT_SZ: strcpy(globtype, "REPORT_SZ"); break; case TAG_GLOB_REPORT_ID: strcpy(globtype, "REPORT_ID"); /* New report, restart numbering */ inputnum = 0; break; case TAG_GLOB_REPORT_CNT: strcpy(globtype, "REPORT_CNT"); break; case TAG_GLOB_PUSH: strcpy(globtype, "PUSH"); break; case TAG_GLOB_POP: strcpy(globtype, "POP"); break; } /* Check to make sure we have a good tag number so we don't overflow array */ if (tag < TAG_GLOB_MAX) { switch (size) { case 1: dev_dbg(ddev, "%sGLOBALTAG:%s(%d) SIZE: %d Data: 0x%x\n", indentstr, globtype, tag, size, data); globalval[tag] = data; break; case 2: dev_dbg(ddev, "%sGLOBALTAG:%s(%d) SIZE: %d Data: 0x%x\n", indentstr, globtype, tag, size, data16); globalval[tag] = data16; break; case 4: dev_dbg(ddev, "%sGLOBALTAG:%s(%d) SIZE: %d Data: 0x%x\n", indentstr, globtype, tag, size, data32); globalval[tag] = data32; break; } } else { dev_dbg(ddev, "%sGLOBALTAG: ILLEGAL TAG:%d SIZE: %d\n", indentstr, tag, size); } break; case TYPE_LOCAL: switch (tag) { case TAG_GLOB_USAGE: strcpy(globtype, "USAGE"); /* Always 1 byte */ usage = data; break; case TAG_GLOB_LOG_MIN: strcpy(globtype, "MIN"); break; case TAG_GLOB_LOG_MAX: strcpy(globtype, "MAX"); break; default: strcpy(globtype, "UNKNOWN"); break; } switch (size) { case 1: dev_dbg(ddev, "%sLOCALTAG:(%d) %s SIZE: %d Data: 0x%x\n", indentstr, tag, globtype, size, data); break; case 2: dev_dbg(ddev, "%sLOCALTAG:(%d) %s SIZE: %d Data: 0x%x\n", indentstr, tag, globtype, size, data16); break; case 4: dev_dbg(ddev, "%sLOCALTAG:(%d) %s SIZE: %d Data: 0x%x\n", indentstr, tag, globtype, size, data32); break; } break; } } } /* INPUT DRIVER Routines */ /* * Called when opening the input device. This will submit the URB to * the usb system so we start getting reports */ static int gtco_input_open(struct input_dev *inputdev) { struct gtco *device = input_get_drvdata(inputdev); device->urbinfo->dev = interface_to_usbdev(device->intf); if (usb_submit_urb(device->urbinfo, GFP_KERNEL)) return -EIO; return 0; } /* * Called when closing the input device. This will unlink the URB */ static void gtco_input_close(struct input_dev *inputdev) { struct gtco *device = input_get_drvdata(inputdev); usb_kill_urb(device->urbinfo); } /* * Setup input device capabilities. Tell the input system what this * device is capable of generating. * * This information is based on what is read from the HID report and * placed in the struct gtco structure * */ static void gtco_setup_caps(struct input_dev *inputdev) { struct gtco *device = input_get_drvdata(inputdev); /* Which events */ inputdev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) | BIT_MASK(EV_MSC); /* Misc event menu block */ inputdev->mscbit[0] = BIT_MASK(MSC_SCAN) | BIT_MASK(MSC_SERIAL) | BIT_MASK(MSC_RAW); /* Absolute values based on HID report info */ input_set_abs_params(inputdev, ABS_X, device->min_X, device->max_X, 0, 0); input_set_abs_params(inputdev, ABS_Y, device->min_Y, device->max_Y, 0, 0); /* Proximity */ input_set_abs_params(inputdev, ABS_DISTANCE, 0, 1, 0, 0); /* Tilt & pressure */ input_set_abs_params(inputdev, ABS_TILT_X, device->mintilt_X, device->maxtilt_X, 0, 0); input_set_abs_params(inputdev, ABS_TILT_Y, device->mintilt_Y, device->maxtilt_Y, 0, 0); input_set_abs_params(inputdev, ABS_PRESSURE, device->minpressure, device->maxpressure, 0, 0); /* Transducer */ input_set_abs_params(inputdev, ABS_MISC, 0, 0xFF, 0, 0); } /* USB Routines */ /* * URB callback routine. Called when we get IRQ reports from the * digitizer. * * This bridges the USB and input device worlds. It generates events * on the input device based on the USB reports. */ static void gtco_urb_callback(struct urb *urbinfo) { struct gtco *device = urbinfo->context; struct input_dev *inputdev; int rc; u32 val = 0; char le_buffer[2]; inputdev = device->inputdevice; /* Was callback OK? */ if (urbinfo->status == -ECONNRESET || urbinfo->status == -ENOENT || urbinfo->status == -ESHUTDOWN) { /* Shutdown is occurring. Return and don't queue up any more */ return; } if (urbinfo->status != 0) { /* * Some unknown error. Hopefully temporary. Just go and * requeue an URB */ goto resubmit; } /* * Good URB, now process */ /* PID dependent when we interpret the report */ if (inputdev->id.product == PID_1000 || inputdev->id.product == PID_1001 || inputdev->id.product == PID_1002) { /* * Switch on the report ID * Conveniently, the reports have more information, the higher * the report number. We can just fall through the case * statements if we start with the highest number report */ switch (device->buffer[0]) { case 5: /* Pressure is 9 bits */ val = ((u16)(device->buffer[8]) << 1); val |= (u16)(device->buffer[7] >> 7); input_report_abs(inputdev, ABS_PRESSURE, device->buffer[8]); /* Mask out the Y tilt value used for pressure */ device->buffer[7] = (u8)((device->buffer[7]) & 0x7F); /* Fall thru */ case 4: /* Tilt */ input_report_abs(inputdev, ABS_TILT_X, sign_extend32(device->buffer[6], 6)); input_report_abs(inputdev, ABS_TILT_Y, sign_extend32(device->buffer[7], 6)); /* Fall thru */ case 2: case 3: /* Convert buttons, only 5 bits possible */ val = (device->buffer[5]) & MASK_BUTTON; /* We don't apply any meaning to the bitmask, just report */ input_event(inputdev, EV_MSC, MSC_SERIAL, val); /* Fall thru */ case 1: /* All reports have X and Y coords in the same place */ val = get_unaligned_le16(&device->buffer[1]); input_report_abs(inputdev, ABS_X, val); val = get_unaligned_le16(&device->buffer[3]); input_report_abs(inputdev, ABS_Y, val); /* Ditto for proximity bit */ val = device->buffer[5] & MASK_INRANGE ? 1 : 0; input_report_abs(inputdev, ABS_DISTANCE, val); /* Report 1 is an exception to how we handle buttons */ /* Buttons are an index, not a bitmask */ if (device->buffer[0] == 1) { /* * Convert buttons, 5 bit index * Report value of index set as one, * the rest as 0 */ val = device->buffer[5] & MASK_BUTTON; dev_dbg(&device->intf->dev, "======>>>>>>REPORT 1: val 0x%X(%d)\n", val, val); /* * We don't apply any meaning to the button * index, just report it */ input_event(inputdev, EV_MSC, MSC_SERIAL, val); } break; case 7: /* Menu blocks */ input_event(inputdev, EV_MSC, MSC_SCAN, device->buffer[1]); break; } } /* Other pid class */ if (inputdev->id.product == PID_400 || inputdev->id.product == PID_401) { /* Report 2 */ if (device->buffer[0] == 2) { /* Menu blocks */ input_event(inputdev, EV_MSC, MSC_SCAN, device->buffer[1]); } /* Report 1 */ if (device->buffer[0] == 1) { char buttonbyte; /* IF X max > 64K, we still a bit from the y report */ if (device->max_X > 0x10000) { val = (u16)(((u16)(device->buffer[2] << 8)) | (u8)device->buffer[1]); val |= (u32)(((u8)device->buffer[3] & 0x1) << 16); input_report_abs(inputdev, ABS_X, val); le_buffer[0] = (u8)((u8)(device->buffer[3]) >> 1); le_buffer[0] |= (u8)((device->buffer[3] & 0x1) << 7); le_buffer[1] = (u8)(device->buffer[4] >> 1); le_buffer[1] |= (u8)((device->buffer[5] & 0x1) << 7); val = get_unaligned_le16(le_buffer); input_report_abs(inputdev, ABS_Y, val); /* * Shift the button byte right by one to * make it look like the standard report */ buttonbyte = device->buffer[5] >> 1; } else { val = get_unaligned_le16(&device->buffer[1]); input_report_abs(inputdev, ABS_X, val); val = get_unaligned_le16(&device->buffer[3]); input_report_abs(inputdev, ABS_Y, val); buttonbyte = device->buffer[5]; } /* BUTTONS and PROXIMITY */ val = buttonbyte & MASK_INRANGE ? 1 : 0; input_report_abs(inputdev, ABS_DISTANCE, val); /* Convert buttons, only 4 bits possible */ val = buttonbyte & 0x0F; #ifdef USE_BUTTONS for (i = 0; i < 5; i++) input_report_key(inputdev, BTN_DIGI + i, val & (1 << i)); #else /* We don't apply any meaning to the bitmask, just report */ input_event(inputdev, EV_MSC, MSC_SERIAL, val); #endif /* TRANSDUCER */ input_report_abs(inputdev, ABS_MISC, device->buffer[6]); } } /* Everybody gets report ID's */ input_event(inputdev, EV_MSC, MSC_RAW, device->buffer[0]); /* Sync it up */ input_sync(inputdev); resubmit: rc = usb_submit_urb(urbinfo, GFP_ATOMIC); if (rc != 0) dev_err(&device->intf->dev, "usb_submit_urb failed rc=0x%x\n", rc); } /* * The probe routine. This is called when the kernel find the matching USB * vendor/product. We do the following: * * - Allocate mem for a local structure to manage the device * - Request a HID Report Descriptor from the device and parse it to * find out the device parameters * - Create an input device and assign it attributes * - Allocate an URB so the device can talk to us when the input * queue is open */ static int gtco_probe(struct usb_interface *usbinterface, const struct usb_device_id *id) { struct gtco *gtco; struct input_dev *input_dev; struct hid_descriptor *hid_desc; char *report; int result = 0, retry; int error; struct usb_endpoint_descriptor *endpoint; struct usb_device *udev = interface_to_usbdev(usbinterface); /* Allocate memory for device structure */ gtco = kzalloc(sizeof(struct gtco), GFP_KERNEL); input_dev = input_allocate_device(); if (!gtco || !input_dev) { dev_err(&usbinterface->dev, "No more memory\n"); error = -ENOMEM; goto err_free_devs; } /* Set pointer to the input device */ gtco->inputdevice = input_dev; /* Save interface information */ gtco->intf = usbinterface; /* Allocate some data for incoming reports */ gtco->buffer = usb_alloc_coherent(udev, REPORT_MAX_SIZE, GFP_KERNEL, &gtco->buf_dma); if (!gtco->buffer) { dev_err(&usbinterface->dev, "No more memory for us buffers\n"); error = -ENOMEM; goto err_free_devs; } /* Allocate URB for reports */ gtco->urbinfo = usb_alloc_urb(0, GFP_KERNEL); if (!gtco->urbinfo) { dev_err(&usbinterface->dev, "Failed to allocate URB\n"); error = -ENOMEM; goto err_free_buf; } /* Sanity check that a device has an endpoint */ if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) { dev_err(&usbinterface->dev, "Invalid number of endpoints\n"); error = -EINVAL; goto err_free_urb; } /* * The endpoint is always altsetting 0, we know this since we know * this device only has one interrupt endpoint */ endpoint = &usbinterface->altsetting[0].endpoint[0].desc; /* Some debug */ dev_dbg(&usbinterface->dev, "gtco # interfaces: %d\n", usbinterface->num_altsetting); dev_dbg(&usbinterface->dev, "num endpoints: %d\n", usbinterface->cur_altsetting->desc.bNumEndpoints); dev_dbg(&usbinterface->dev, "interface class: %d\n", usbinterface->cur_altsetting->desc.bInterfaceClass); dev_dbg(&usbinterface->dev, "endpoint: attribute:0x%x type:0x%x\n", endpoint->bmAttributes, endpoint->bDescriptorType); if (usb_endpoint_xfer_int(endpoint)) dev_dbg(&usbinterface->dev, "endpoint: we have interrupt endpoint\n"); dev_dbg(&usbinterface->dev, "endpoint extra len:%d\n", usbinterface->altsetting[0].extralen); /* * Find the HID descriptor so we can find out the size of the * HID report descriptor */ if (usb_get_extra_descriptor(usbinterface->cur_altsetting, HID_DEVICE_TYPE, &hid_desc) != 0) { dev_err(&usbinterface->dev, "Can't retrieve exta USB descriptor to get hid report descriptor length\n"); error = -EIO; goto err_free_urb; } dev_dbg(&usbinterface->dev, "Extra descriptor success: type:%d len:%d\n", hid_desc->bDescriptorType, hid_desc->wDescriptorLength); report = kzalloc(le16_to_cpu(hid_desc->wDescriptorLength), GFP_KERNEL); if (!report) { dev_err(&usbinterface->dev, "No more memory for report\n"); error = -ENOMEM; goto err_free_urb; } /* Couple of tries to get reply */ for (retry = 0; retry < 3; retry++) { result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), USB_REQ_GET_DESCRIPTOR, USB_RECIP_INTERFACE | USB_DIR_IN, REPORT_DEVICE_TYPE << 8, 0, /* interface */ report, le16_to_cpu(hid_desc->wDescriptorLength), 5000); /* 5 secs */ dev_dbg(&usbinterface->dev, "usb_control_msg result: %d\n", result); if (result == le16_to_cpu(hid_desc->wDescriptorLength)) { parse_hid_report_descriptor(gtco, report, result); break; } } kfree(report); /* If we didn't get the report, fail */ if (result != le16_to_cpu(hid_desc->wDescriptorLength)) { dev_err(&usbinterface->dev, "Failed to get HID Report Descriptor of size: %d\n", hid_desc->wDescriptorLength); error = -EIO; goto err_free_urb; } /* Create a device file node */ usb_make_path(udev, gtco->usbpath, sizeof(gtco->usbpath)); strlcat(gtco->usbpath, "/input0", sizeof(gtco->usbpath)); /* Set Input device functions */ input_dev->open = gtco_input_open; input_dev->close = gtco_input_close; /* Set input device information */ input_dev->name = "GTCO_CalComp"; input_dev->phys = gtco->usbpath; input_set_drvdata(input_dev, gtco); /* Now set up all the input device capabilities */ gtco_setup_caps(input_dev); /* Set input device required ID information */ usb_to_input_id(udev, &input_dev->id); input_dev->dev.parent = &usbinterface->dev; /* Setup the URB, it will be posted later on open of input device */ endpoint = &usbinterface->altsetting[0].endpoint[0].desc; usb_fill_int_urb(gtco->urbinfo, udev, usb_rcvintpipe(udev, endpoint->bEndpointAddress), gtco->buffer, REPORT_MAX_SIZE, gtco_urb_callback, gtco, endpoint->bInterval); gtco->urbinfo->transfer_dma = gtco->buf_dma; gtco->urbinfo->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; /* Save gtco pointer in USB interface gtco */ usb_set_intfdata(usbinterface, gtco); /* All done, now register the input device */ error = input_register_device(input_dev); if (error) goto err_free_urb; return 0; err_free_urb: usb_free_urb(gtco->urbinfo); err_free_buf: usb_free_coherent(udev, REPORT_MAX_SIZE, gtco->buffer, gtco->buf_dma); err_free_devs: input_free_device(input_dev); kfree(gtco); return error; } /* * This function is a standard USB function called when the USB device * is disconnected. We will get rid of the URV, de-register the input * device, and free up allocated memory */ static void gtco_disconnect(struct usb_interface *interface) { /* Grab private device ptr */ struct gtco *gtco = usb_get_intfdata(interface); struct usb_device *udev = interface_to_usbdev(interface); /* Now reverse all the registration stuff */ if (gtco) { input_unregister_device(gtco->inputdevice); usb_kill_urb(gtco->urbinfo); usb_free_urb(gtco->urbinfo); usb_free_coherent(udev, REPORT_MAX_SIZE, gtco->buffer, gtco->buf_dma); kfree(gtco); } dev_info(&interface->dev, "gtco driver disconnected\n"); } /* STANDARD MODULE LOAD ROUTINES */ static struct usb_driver gtco_driverinfo_table = { .name = "gtco", .id_table = gtco_usbid_table, .probe = gtco_probe, .disconnect = gtco_disconnect, }; module_usb_driver(gtco_driverinfo_table); MODULE_DESCRIPTION("GTCO digitizer USB driver"); MODULE_LICENSE("GPL");
/* -*- linux-c -*- GTCO digitizer USB driver TO CHECK: Is pressure done right on report 5? Copyright (C) 2006 GTCO CalComp This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Permission to use, copy, modify, distribute, and sell this software and its documentation for any purpose is hereby granted without fee, provided that the above copyright notice appear in all copies and that both that copyright notice and this permission notice appear in supporting documentation, and that the name of GTCO-CalComp not be used in advertising or publicity pertaining to distribution of the software without specific, written prior permission. GTCO-CalComp makes no representations about the suitability of this software for any purpose. It is provided "as is" without express or implied warranty. GTCO-CALCOMP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL GTCO-CALCOMP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTIONS, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. GTCO CalComp, Inc. 7125 Riverwood Drive Columbia, MD 21046 Jeremy Roberson jroberson@gtcocalcomp.com Scott Hill shill@gtcocalcomp.com */ /*#define DEBUG*/ #include <linux/kernel.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/usb.h> #include <linux/uaccess.h> #include <asm/unaligned.h> #include <asm/byteorder.h> #include <linux/bitops.h> #include <linux/usb/input.h> /* Version with a Major number of 2 is for kernel inclusion only. */ #define GTCO_VERSION "2.00.0006" /* MACROS */ #define VENDOR_ID_GTCO 0x078C #define PID_400 0x400 #define PID_401 0x401 #define PID_1000 0x1000 #define PID_1001 0x1001 #define PID_1002 0x1002 /* Max size of a single report */ #define REPORT_MAX_SIZE 10 /* Bitmask whether pen is in range */ #define MASK_INRANGE 0x20 #define MASK_BUTTON 0x01F #define PATHLENGTH 64 /* DATA STRUCTURES */ /* Device table */ static const struct usb_device_id gtco_usbid_table[] = { { USB_DEVICE(VENDOR_ID_GTCO, PID_400) }, { USB_DEVICE(VENDOR_ID_GTCO, PID_401) }, { USB_DEVICE(VENDOR_ID_GTCO, PID_1000) }, { USB_DEVICE(VENDOR_ID_GTCO, PID_1001) }, { USB_DEVICE(VENDOR_ID_GTCO, PID_1002) }, { } }; MODULE_DEVICE_TABLE (usb, gtco_usbid_table); /* Structure to hold all of our device specific stuff */ struct gtco { struct input_dev *inputdevice; /* input device struct pointer */ struct usb_interface *intf; /* the usb interface for this device */ struct urb *urbinfo; /* urb for incoming reports */ dma_addr_t buf_dma; /* dma addr of the data buffer*/ unsigned char * buffer; /* databuffer for reports */ char usbpath[PATHLENGTH]; int openCount; /* Information pulled from Report Descriptor */ u32 usage; u32 min_X; u32 max_X; u32 min_Y; u32 max_Y; s8 mintilt_X; s8 maxtilt_X; s8 mintilt_Y; s8 maxtilt_Y; u32 maxpressure; u32 minpressure; }; /* Code for parsing the HID REPORT DESCRIPTOR */ /* From HID1.11 spec */ struct hid_descriptor { struct usb_descriptor_header header; __le16 bcdHID; u8 bCountryCode; u8 bNumDescriptors; u8 bDescriptorType; __le16 wDescriptorLength; } __attribute__ ((packed)); #define HID_DESCRIPTOR_SIZE 9 #define HID_DEVICE_TYPE 33 #define REPORT_DEVICE_TYPE 34 #define PREF_TAG(x) ((x)>>4) #define PREF_TYPE(x) ((x>>2)&0x03) #define PREF_SIZE(x) ((x)&0x03) #define TYPE_MAIN 0 #define TYPE_GLOBAL 1 #define TYPE_LOCAL 2 #define TYPE_RESERVED 3 #define TAG_MAIN_INPUT 0x8 #define TAG_MAIN_OUTPUT 0x9 #define TAG_MAIN_FEATURE 0xB #define TAG_MAIN_COL_START 0xA #define TAG_MAIN_COL_END 0xC #define TAG_GLOB_USAGE 0 #define TAG_GLOB_LOG_MIN 1 #define TAG_GLOB_LOG_MAX 2 #define TAG_GLOB_PHYS_MIN 3 #define TAG_GLOB_PHYS_MAX 4 #define TAG_GLOB_UNIT_EXP 5 #define TAG_GLOB_UNIT 6 #define TAG_GLOB_REPORT_SZ 7 #define TAG_GLOB_REPORT_ID 8 #define TAG_GLOB_REPORT_CNT 9 #define TAG_GLOB_PUSH 10 #define TAG_GLOB_POP 11 #define TAG_GLOB_MAX 12 #define DIGITIZER_USAGE_TIP_PRESSURE 0x30 #define DIGITIZER_USAGE_TILT_X 0x3D #define DIGITIZER_USAGE_TILT_Y 0x3E /* * This is an abbreviated parser for the HID Report Descriptor. We * know what devices we are talking to, so this is by no means meant * to be generic. We can make some safe assumptions: * * - We know there are no LONG tags, all short * - We know that we have no MAIN Feature and MAIN Output items * - We know what the IRQ reports are supposed to look like. * * The main purpose of this is to use the HID report desc to figure * out the mins and maxs of the fields in the IRQ reports. The IRQ * reports for 400/401 change slightly if the max X is bigger than 64K. * */ static void parse_hid_report_descriptor(struct gtco *device, char * report, int length) { struct device *ddev = &device->intf->dev; int x, i = 0; /* Tag primitive vars */ __u8 prefix; __u8 size; __u8 tag; __u8 type; __u8 data = 0; __u16 data16 = 0; __u32 data32 = 0; /* For parsing logic */ int inputnum = 0; __u32 usage = 0; /* Global Values, indexed by TAG */ __u32 globalval[TAG_GLOB_MAX]; __u32 oldval[TAG_GLOB_MAX]; /* Debug stuff */ char maintype = 'x'; char globtype[12]; int indent = 0; char indentstr[10] = ""; dev_dbg(ddev, "======>>>>>>PARSE<<<<<<======\n"); /* Walk this report and pull out the info we need */ while (i < length) { prefix = report[i++]; /* Determine data size and save the data in the proper variable */ size = (1U << PREF_SIZE(prefix)) >> 1; if (i + size > length) { dev_err(ddev, "Not enough data (need %d, have %d)\n", i + size, length); break; } switch (size) { case 1: data = report[i]; break; case 2: data16 = get_unaligned_le16(&report[i]); break; case 4: data32 = get_unaligned_le32(&report[i]); break; } /* Skip size of data */ i += size; /* What we do depends on the tag type */ tag = PREF_TAG(prefix); type = PREF_TYPE(prefix); switch (type) { case TYPE_MAIN: strcpy(globtype, ""); switch (tag) { case TAG_MAIN_INPUT: /* * The INPUT MAIN tag signifies this is * information from a report. We need to * figure out what it is and store the * min/max values */ maintype = 'I'; if (data == 2) strcpy(globtype, "Variable"); else if (data == 3) strcpy(globtype, "Var|Const"); dev_dbg(ddev, "::::: Saving Report: %d input #%d Max: 0x%X(%d) Min:0x%X(%d) of %d bits\n", globalval[TAG_GLOB_REPORT_ID], inputnum, globalval[TAG_GLOB_LOG_MAX], globalval[TAG_GLOB_LOG_MAX], globalval[TAG_GLOB_LOG_MIN], globalval[TAG_GLOB_LOG_MIN], globalval[TAG_GLOB_REPORT_SZ] * globalval[TAG_GLOB_REPORT_CNT]); /* We can assume that the first two input items are always the X and Y coordinates. After that, we look for everything else by local usage value */ switch (inputnum) { case 0: /* X coord */ dev_dbg(ddev, "GER: X Usage: 0x%x\n", usage); if (device->max_X == 0) { device->max_X = globalval[TAG_GLOB_LOG_MAX]; device->min_X = globalval[TAG_GLOB_LOG_MIN]; } break; case 1: /* Y coord */ dev_dbg(ddev, "GER: Y Usage: 0x%x\n", usage); if (device->max_Y == 0) { device->max_Y = globalval[TAG_GLOB_LOG_MAX]; device->min_Y = globalval[TAG_GLOB_LOG_MIN]; } break; default: /* Tilt X */ if (usage == DIGITIZER_USAGE_TILT_X) { if (device->maxtilt_X == 0) { device->maxtilt_X = globalval[TAG_GLOB_LOG_MAX]; device->mintilt_X = globalval[TAG_GLOB_LOG_MIN]; } } /* Tilt Y */ if (usage == DIGITIZER_USAGE_TILT_Y) { if (device->maxtilt_Y == 0) { device->maxtilt_Y = globalval[TAG_GLOB_LOG_MAX]; device->mintilt_Y = globalval[TAG_GLOB_LOG_MIN]; } } /* Pressure */ if (usage == DIGITIZER_USAGE_TIP_PRESSURE) { if (device->maxpressure == 0) { device->maxpressure = globalval[TAG_GLOB_LOG_MAX]; device->minpressure = globalval[TAG_GLOB_LOG_MIN]; } } break; } inputnum++; break; case TAG_MAIN_OUTPUT: maintype = 'O'; break; case TAG_MAIN_FEATURE: maintype = 'F'; break; case TAG_MAIN_COL_START: maintype = 'S'; if (data == 0) { dev_dbg(ddev, "======>>>>>> Physical\n"); strcpy(globtype, "Physical"); } else dev_dbg(ddev, "======>>>>>>\n"); /* Indent the debug output */ indent++; for (x = 0; x < indent; x++) indentstr[x] = '-'; indentstr[x] = 0; /* Save global tags */ for (x = 0; x < TAG_GLOB_MAX; x++) oldval[x] = globalval[x]; break; case TAG_MAIN_COL_END: dev_dbg(ddev, "<<<<<<======\n"); maintype = 'E'; indent--; for (x = 0; x < indent; x++) indentstr[x] = '-'; indentstr[x] = 0; /* Copy global tags back */ for (x = 0; x < TAG_GLOB_MAX; x++) globalval[x] = oldval[x]; break; } switch (size) { case 1: dev_dbg(ddev, "%sMAINTAG:(%d) %c SIZE: %d Data: %s 0x%x\n", indentstr, tag, maintype, size, globtype, data); break; case 2: dev_dbg(ddev, "%sMAINTAG:(%d) %c SIZE: %d Data: %s 0x%x\n", indentstr, tag, maintype, size, globtype, data16); break; case 4: dev_dbg(ddev, "%sMAINTAG:(%d) %c SIZE: %d Data: %s 0x%x\n", indentstr, tag, maintype, size, globtype, data32); break; } break; case TYPE_GLOBAL: switch (tag) { case TAG_GLOB_USAGE: /* * First time we hit the global usage tag, * it should tell us the type of device */ if (device->usage == 0) device->usage = data; strcpy(globtype, "USAGE"); break; case TAG_GLOB_LOG_MIN: strcpy(globtype, "LOG_MIN"); break; case TAG_GLOB_LOG_MAX: strcpy(globtype, "LOG_MAX"); break; case TAG_GLOB_PHYS_MIN: strcpy(globtype, "PHYS_MIN"); break; case TAG_GLOB_PHYS_MAX: strcpy(globtype, "PHYS_MAX"); break; case TAG_GLOB_UNIT_EXP: strcpy(globtype, "EXP"); break; case TAG_GLOB_UNIT: strcpy(globtype, "UNIT"); break; case TAG_GLOB_REPORT_SZ: strcpy(globtype, "REPORT_SZ"); break; case TAG_GLOB_REPORT_ID: strcpy(globtype, "REPORT_ID"); /* New report, restart numbering */ inputnum = 0; break; case TAG_GLOB_REPORT_CNT: strcpy(globtype, "REPORT_CNT"); break; case TAG_GLOB_PUSH: strcpy(globtype, "PUSH"); break; case TAG_GLOB_POP: strcpy(globtype, "POP"); break; } /* Check to make sure we have a good tag number so we don't overflow array */ if (tag < TAG_GLOB_MAX) { switch (size) { case 1: dev_dbg(ddev, "%sGLOBALTAG:%s(%d) SIZE: %d Data: 0x%x\n", indentstr, globtype, tag, size, data); globalval[tag] = data; break; case 2: dev_dbg(ddev, "%sGLOBALTAG:%s(%d) SIZE: %d Data: 0x%x\n", indentstr, globtype, tag, size, data16); globalval[tag] = data16; break; case 4: dev_dbg(ddev, "%sGLOBALTAG:%s(%d) SIZE: %d Data: 0x%x\n", indentstr, globtype, tag, size, data32); globalval[tag] = data32; break; } } else { dev_dbg(ddev, "%sGLOBALTAG: ILLEGAL TAG:%d SIZE: %d\n", indentstr, tag, size); } break; case TYPE_LOCAL: switch (tag) { case TAG_GLOB_USAGE: strcpy(globtype, "USAGE"); /* Always 1 byte */ usage = data; break; case TAG_GLOB_LOG_MIN: strcpy(globtype, "MIN"); break; case TAG_GLOB_LOG_MAX: strcpy(globtype, "MAX"); break; default: strcpy(globtype, "UNKNOWN"); break; } switch (size) { case 1: dev_dbg(ddev, "%sLOCALTAG:(%d) %s SIZE: %d Data: 0x%x\n", indentstr, tag, globtype, size, data); break; case 2: dev_dbg(ddev, "%sLOCALTAG:(%d) %s SIZE: %d Data: 0x%x\n", indentstr, tag, globtype, size, data16); break; case 4: dev_dbg(ddev, "%sLOCALTAG:(%d) %s SIZE: %d Data: 0x%x\n", indentstr, tag, globtype, size, data32); break; } break; } } } /* INPUT DRIVER Routines */ /* * Called when opening the input device. This will submit the URB to * the usb system so we start getting reports */ static int gtco_input_open(struct input_dev *inputdev) { struct gtco *device = input_get_drvdata(inputdev); device->urbinfo->dev = interface_to_usbdev(device->intf); if (usb_submit_urb(device->urbinfo, GFP_KERNEL)) return -EIO; return 0; } /* * Called when closing the input device. This will unlink the URB */ static void gtco_input_close(struct input_dev *inputdev) { struct gtco *device = input_get_drvdata(inputdev); usb_kill_urb(device->urbinfo); } /* * Setup input device capabilities. Tell the input system what this * device is capable of generating. * * This information is based on what is read from the HID report and * placed in the struct gtco structure * */ static void gtco_setup_caps(struct input_dev *inputdev) { struct gtco *device = input_get_drvdata(inputdev); /* Which events */ inputdev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) | BIT_MASK(EV_MSC); /* Misc event menu block */ inputdev->mscbit[0] = BIT_MASK(MSC_SCAN) | BIT_MASK(MSC_SERIAL) | BIT_MASK(MSC_RAW); /* Absolute values based on HID report info */ input_set_abs_params(inputdev, ABS_X, device->min_X, device->max_X, 0, 0); input_set_abs_params(inputdev, ABS_Y, device->min_Y, device->max_Y, 0, 0); /* Proximity */ input_set_abs_params(inputdev, ABS_DISTANCE, 0, 1, 0, 0); /* Tilt & pressure */ input_set_abs_params(inputdev, ABS_TILT_X, device->mintilt_X, device->maxtilt_X, 0, 0); input_set_abs_params(inputdev, ABS_TILT_Y, device->mintilt_Y, device->maxtilt_Y, 0, 0); input_set_abs_params(inputdev, ABS_PRESSURE, device->minpressure, device->maxpressure, 0, 0); /* Transducer */ input_set_abs_params(inputdev, ABS_MISC, 0, 0xFF, 0, 0); } /* USB Routines */ /* * URB callback routine. Called when we get IRQ reports from the * digitizer. * * This bridges the USB and input device worlds. It generates events * on the input device based on the USB reports. */ static void gtco_urb_callback(struct urb *urbinfo) { struct gtco *device = urbinfo->context; struct input_dev *inputdev; int rc; u32 val = 0; char le_buffer[2]; inputdev = device->inputdevice; /* Was callback OK? */ if (urbinfo->status == -ECONNRESET || urbinfo->status == -ENOENT || urbinfo->status == -ESHUTDOWN) { /* Shutdown is occurring. Return and don't queue up any more */ return; } if (urbinfo->status != 0) { /* * Some unknown error. Hopefully temporary. Just go and * requeue an URB */ goto resubmit; } /* * Good URB, now process */ /* PID dependent when we interpret the report */ if (inputdev->id.product == PID_1000 || inputdev->id.product == PID_1001 || inputdev->id.product == PID_1002) { /* * Switch on the report ID * Conveniently, the reports have more information, the higher * the report number. We can just fall through the case * statements if we start with the highest number report */ switch (device->buffer[0]) { case 5: /* Pressure is 9 bits */ val = ((u16)(device->buffer[8]) << 1); val |= (u16)(device->buffer[7] >> 7); input_report_abs(inputdev, ABS_PRESSURE, device->buffer[8]); /* Mask out the Y tilt value used for pressure */ device->buffer[7] = (u8)((device->buffer[7]) & 0x7F); /* Fall thru */ case 4: /* Tilt */ input_report_abs(inputdev, ABS_TILT_X, sign_extend32(device->buffer[6], 6)); input_report_abs(inputdev, ABS_TILT_Y, sign_extend32(device->buffer[7], 6)); /* Fall thru */ case 2: case 3: /* Convert buttons, only 5 bits possible */ val = (device->buffer[5]) & MASK_BUTTON; /* We don't apply any meaning to the bitmask, just report */ input_event(inputdev, EV_MSC, MSC_SERIAL, val); /* Fall thru */ case 1: /* All reports have X and Y coords in the same place */ val = get_unaligned_le16(&device->buffer[1]); input_report_abs(inputdev, ABS_X, val); val = get_unaligned_le16(&device->buffer[3]); input_report_abs(inputdev, ABS_Y, val); /* Ditto for proximity bit */ val = device->buffer[5] & MASK_INRANGE ? 1 : 0; input_report_abs(inputdev, ABS_DISTANCE, val); /* Report 1 is an exception to how we handle buttons */ /* Buttons are an index, not a bitmask */ if (device->buffer[0] == 1) { /* * Convert buttons, 5 bit index * Report value of index set as one, * the rest as 0 */ val = device->buffer[5] & MASK_BUTTON; dev_dbg(&device->intf->dev, "======>>>>>>REPORT 1: val 0x%X(%d)\n", val, val); /* * We don't apply any meaning to the button * index, just report it */ input_event(inputdev, EV_MSC, MSC_SERIAL, val); } break; case 7: /* Menu blocks */ input_event(inputdev, EV_MSC, MSC_SCAN, device->buffer[1]); break; } } /* Other pid class */ if (inputdev->id.product == PID_400 || inputdev->id.product == PID_401) { /* Report 2 */ if (device->buffer[0] == 2) { /* Menu blocks */ input_event(inputdev, EV_MSC, MSC_SCAN, device->buffer[1]); } /* Report 1 */ if (device->buffer[0] == 1) { char buttonbyte; /* IF X max > 64K, we still a bit from the y report */ if (device->max_X > 0x10000) { val = (u16)(((u16)(device->buffer[2] << 8)) | (u8)device->buffer[1]); val |= (u32)(((u8)device->buffer[3] & 0x1) << 16); input_report_abs(inputdev, ABS_X, val); le_buffer[0] = (u8)((u8)(device->buffer[3]) >> 1); le_buffer[0] |= (u8)((device->buffer[3] & 0x1) << 7); le_buffer[1] = (u8)(device->buffer[4] >> 1); le_buffer[1] |= (u8)((device->buffer[5] & 0x1) << 7); val = get_unaligned_le16(le_buffer); input_report_abs(inputdev, ABS_Y, val); /* * Shift the button byte right by one to * make it look like the standard report */ buttonbyte = device->buffer[5] >> 1; } else { val = get_unaligned_le16(&device->buffer[1]); input_report_abs(inputdev, ABS_X, val); val = get_unaligned_le16(&device->buffer[3]); input_report_abs(inputdev, ABS_Y, val); buttonbyte = device->buffer[5]; } /* BUTTONS and PROXIMITY */ val = buttonbyte & MASK_INRANGE ? 1 : 0; input_report_abs(inputdev, ABS_DISTANCE, val); /* Convert buttons, only 4 bits possible */ val = buttonbyte & 0x0F; #ifdef USE_BUTTONS for (i = 0; i < 5; i++) input_report_key(inputdev, BTN_DIGI + i, val & (1 << i)); #else /* We don't apply any meaning to the bitmask, just report */ input_event(inputdev, EV_MSC, MSC_SERIAL, val); #endif /* TRANSDUCER */ input_report_abs(inputdev, ABS_MISC, device->buffer[6]); } } /* Everybody gets report ID's */ input_event(inputdev, EV_MSC, MSC_RAW, device->buffer[0]); /* Sync it up */ input_sync(inputdev); resubmit: rc = usb_submit_urb(urbinfo, GFP_ATOMIC); if (rc != 0) dev_err(&device->intf->dev, "usb_submit_urb failed rc=0x%x\n", rc); } /* * The probe routine. This is called when the kernel find the matching USB * vendor/product. We do the following: * * - Allocate mem for a local structure to manage the device * - Request a HID Report Descriptor from the device and parse it to * find out the device parameters * - Create an input device and assign it attributes * - Allocate an URB so the device can talk to us when the input * queue is open */ static int gtco_probe(struct usb_interface *usbinterface, const struct usb_device_id *id) { struct gtco *gtco; struct input_dev *input_dev; struct hid_descriptor *hid_desc; char *report; int result = 0, retry; int error; struct usb_endpoint_descriptor *endpoint; struct usb_device *udev = interface_to_usbdev(usbinterface); /* Allocate memory for device structure */ gtco = kzalloc(sizeof(struct gtco), GFP_KERNEL); input_dev = input_allocate_device(); if (!gtco || !input_dev) { dev_err(&usbinterface->dev, "No more memory\n"); error = -ENOMEM; goto err_free_devs; } /* Set pointer to the input device */ gtco->inputdevice = input_dev; /* Save interface information */ gtco->intf = usbinterface; /* Allocate some data for incoming reports */ gtco->buffer = usb_alloc_coherent(udev, REPORT_MAX_SIZE, GFP_KERNEL, &gtco->buf_dma); if (!gtco->buffer) { dev_err(&usbinterface->dev, "No more memory for us buffers\n"); error = -ENOMEM; goto err_free_devs; } /* Allocate URB for reports */ gtco->urbinfo = usb_alloc_urb(0, GFP_KERNEL); if (!gtco->urbinfo) { dev_err(&usbinterface->dev, "Failed to allocate URB\n"); error = -ENOMEM; goto err_free_buf; } /* Sanity check that a device has an endpoint */ if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) { dev_err(&usbinterface->dev, "Invalid number of endpoints\n"); error = -EINVAL; goto err_free_urb; } /* * The endpoint is always altsetting 0, we know this since we know * this device only has one interrupt endpoint */ endpoint = &usbinterface->altsetting[0].endpoint[0].desc; /* Some debug */ dev_dbg(&usbinterface->dev, "gtco # interfaces: %d\n", usbinterface->num_altsetting); dev_dbg(&usbinterface->dev, "num endpoints: %d\n", usbinterface->cur_altsetting->desc.bNumEndpoints); dev_dbg(&usbinterface->dev, "interface class: %d\n", usbinterface->cur_altsetting->desc.bInterfaceClass); dev_dbg(&usbinterface->dev, "endpoint: attribute:0x%x type:0x%x\n", endpoint->bmAttributes, endpoint->bDescriptorType); if (usb_endpoint_xfer_int(endpoint)) dev_dbg(&usbinterface->dev, "endpoint: we have interrupt endpoint\n"); dev_dbg(&usbinterface->dev, "endpoint extra len:%d\n", usbinterface->altsetting[0].extralen); /* * Find the HID descriptor so we can find out the size of the * HID report descriptor */ if (usb_get_extra_descriptor(usbinterface->cur_altsetting, HID_DEVICE_TYPE, &hid_desc) != 0) { dev_err(&usbinterface->dev, "Can't retrieve exta USB descriptor to get hid report descriptor length\n"); error = -EIO; goto err_free_urb; } dev_dbg(&usbinterface->dev, "Extra descriptor success: type:%d len:%d\n", hid_desc->bDescriptorType, hid_desc->wDescriptorLength); report = kzalloc(le16_to_cpu(hid_desc->wDescriptorLength), GFP_KERNEL); if (!report) { dev_err(&usbinterface->dev, "No more memory for report\n"); error = -ENOMEM; goto err_free_urb; } /* Couple of tries to get reply */ for (retry = 0; retry < 3; retry++) { result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), USB_REQ_GET_DESCRIPTOR, USB_RECIP_INTERFACE | USB_DIR_IN, REPORT_DEVICE_TYPE << 8, 0, /* interface */ report, le16_to_cpu(hid_desc->wDescriptorLength), 5000); /* 5 secs */ dev_dbg(&usbinterface->dev, "usb_control_msg result: %d\n", result); if (result == le16_to_cpu(hid_desc->wDescriptorLength)) { parse_hid_report_descriptor(gtco, report, result); break; } } kfree(report); /* If we didn't get the report, fail */ if (result != le16_to_cpu(hid_desc->wDescriptorLength)) { dev_err(&usbinterface->dev, "Failed to get HID Report Descriptor of size: %d\n", hid_desc->wDescriptorLength); error = -EIO; goto err_free_urb; } /* Create a device file node */ usb_make_path(udev, gtco->usbpath, sizeof(gtco->usbpath)); strlcat(gtco->usbpath, "/input0", sizeof(gtco->usbpath)); /* Set Input device functions */ input_dev->open = gtco_input_open; input_dev->close = gtco_input_close; /* Set input device information */ input_dev->name = "GTCO_CalComp"; input_dev->phys = gtco->usbpath; input_set_drvdata(input_dev, gtco); /* Now set up all the input device capabilities */ gtco_setup_caps(input_dev); /* Set input device required ID information */ usb_to_input_id(udev, &input_dev->id); input_dev->dev.parent = &usbinterface->dev; /* Setup the URB, it will be posted later on open of input device */ endpoint = &usbinterface->altsetting[0].endpoint[0].desc; usb_fill_int_urb(gtco->urbinfo, udev, usb_rcvintpipe(udev, endpoint->bEndpointAddress), gtco->buffer, REPORT_MAX_SIZE, gtco_urb_callback, gtco, endpoint->bInterval); gtco->urbinfo->transfer_dma = gtco->buf_dma; gtco->urbinfo->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; /* Save gtco pointer in USB interface gtco */ usb_set_intfdata(usbinterface, gtco); /* All done, now register the input device */ error = input_register_device(input_dev); if (error) goto err_free_urb; return 0; err_free_urb: usb_free_urb(gtco->urbinfo); err_free_buf: usb_free_coherent(udev, REPORT_MAX_SIZE, gtco->buffer, gtco->buf_dma); err_free_devs: input_free_device(input_dev); kfree(gtco); return error; } /* * This function is a standard USB function called when the USB device * is disconnected. We will get rid of the URV, de-register the input * device, and free up allocated memory */ static void gtco_disconnect(struct usb_interface *interface) { /* Grab private device ptr */ struct gtco *gtco = usb_get_intfdata(interface); struct usb_device *udev = interface_to_usbdev(interface); /* Now reverse all the registration stuff */ if (gtco) { input_unregister_device(gtco->inputdevice); usb_kill_urb(gtco->urbinfo); usb_free_urb(gtco->urbinfo); usb_free_coherent(udev, REPORT_MAX_SIZE, gtco->buffer, gtco->buf_dma); kfree(gtco); } dev_info(&interface->dev, "gtco driver disconnected\n"); } /* STANDARD MODULE LOAD ROUTINES */ static struct usb_driver gtco_driverinfo_table = { .name = "gtco", .id_table = gtco_usbid_table, .probe = gtco_probe, .disconnect = gtco_disconnect, }; module_usb_driver(gtco_driverinfo_table); MODULE_DESCRIPTION("GTCO digitizer USB driver"); MODULE_LICENSE("GPL");
static void parse_hid_report_descriptor(struct gtco *device, char * report, int length) { struct device *ddev = &device->intf->dev; int x, i = 0; /* Tag primitive vars */ __u8 prefix; __u8 size; __u8 tag; __u8 type; __u8 data = 0; __u16 data16 = 0; __u32 data32 = 0; /* For parsing logic */ int inputnum = 0; __u32 usage = 0; /* Global Values, indexed by TAG */ __u32 globalval[TAG_GLOB_MAX]; __u32 oldval[TAG_GLOB_MAX]; /* Debug stuff */ char maintype = 'x'; char globtype[12]; int indent = 0; char indentstr[10] = ""; dev_dbg(ddev, "======>>>>>>PARSE<<<<<<======\n"); /* Walk this report and pull out the info we need */ while (i < length) { prefix = report[i]; /* Skip over prefix */ i++; /* Determine data size and save the data in the proper variable */ size = PREF_SIZE(prefix); switch (size) { case 1: data = report[i]; break; case 2: data16 = get_unaligned_le16(&report[i]); break; case 3: size = 4; data32 = get_unaligned_le32(&report[i]); break; } /* Skip size of data */ i += size; /* What we do depends on the tag type */ tag = PREF_TAG(prefix); type = PREF_TYPE(prefix); switch (type) { case TYPE_MAIN: strcpy(globtype, ""); switch (tag) { case TAG_MAIN_INPUT: /* * The INPUT MAIN tag signifies this is * information from a report. We need to * figure out what it is and store the * min/max values */ maintype = 'I'; if (data == 2) strcpy(globtype, "Variable"); else if (data == 3) strcpy(globtype, "Var|Const"); dev_dbg(ddev, "::::: Saving Report: %d input #%d Max: 0x%X(%d) Min:0x%X(%d) of %d bits\n", globalval[TAG_GLOB_REPORT_ID], inputnum, globalval[TAG_GLOB_LOG_MAX], globalval[TAG_GLOB_LOG_MAX], globalval[TAG_GLOB_LOG_MIN], globalval[TAG_GLOB_LOG_MIN], globalval[TAG_GLOB_REPORT_SZ] * globalval[TAG_GLOB_REPORT_CNT]); /* We can assume that the first two input items are always the X and Y coordinates. After that, we look for everything else by local usage value */ switch (inputnum) { case 0: /* X coord */ dev_dbg(ddev, "GER: X Usage: 0x%x\n", usage); if (device->max_X == 0) { device->max_X = globalval[TAG_GLOB_LOG_MAX]; device->min_X = globalval[TAG_GLOB_LOG_MIN]; } break; case 1: /* Y coord */ dev_dbg(ddev, "GER: Y Usage: 0x%x\n", usage); if (device->max_Y == 0) { device->max_Y = globalval[TAG_GLOB_LOG_MAX]; device->min_Y = globalval[TAG_GLOB_LOG_MIN]; } break; default: /* Tilt X */ if (usage == DIGITIZER_USAGE_TILT_X) { if (device->maxtilt_X == 0) { device->maxtilt_X = globalval[TAG_GLOB_LOG_MAX]; device->mintilt_X = globalval[TAG_GLOB_LOG_MIN]; } } /* Tilt Y */ if (usage == DIGITIZER_USAGE_TILT_Y) { if (device->maxtilt_Y == 0) { device->maxtilt_Y = globalval[TAG_GLOB_LOG_MAX]; device->mintilt_Y = globalval[TAG_GLOB_LOG_MIN]; } } /* Pressure */ if (usage == DIGITIZER_USAGE_TIP_PRESSURE) { if (device->maxpressure == 0) { device->maxpressure = globalval[TAG_GLOB_LOG_MAX]; device->minpressure = globalval[TAG_GLOB_LOG_MIN]; } } break; } inputnum++; break; case TAG_MAIN_OUTPUT: maintype = 'O'; break; case TAG_MAIN_FEATURE: maintype = 'F'; break; case TAG_MAIN_COL_START: maintype = 'S'; if (data == 0) { dev_dbg(ddev, "======>>>>>> Physical\n"); strcpy(globtype, "Physical"); } else dev_dbg(ddev, "======>>>>>>\n"); /* Indent the debug output */ indent++; for (x = 0; x < indent; x++) indentstr[x] = '-'; indentstr[x] = 0; /* Save global tags */ for (x = 0; x < TAG_GLOB_MAX; x++) oldval[x] = globalval[x]; break; case TAG_MAIN_COL_END: dev_dbg(ddev, "<<<<<<======\n"); maintype = 'E'; indent--; for (x = 0; x < indent; x++) indentstr[x] = '-'; indentstr[x] = 0; /* Copy global tags back */ for (x = 0; x < TAG_GLOB_MAX; x++) globalval[x] = oldval[x]; break; } switch (size) { case 1: dev_dbg(ddev, "%sMAINTAG:(%d) %c SIZE: %d Data: %s 0x%x\n", indentstr, tag, maintype, size, globtype, data); break; case 2: dev_dbg(ddev, "%sMAINTAG:(%d) %c SIZE: %d Data: %s 0x%x\n", indentstr, tag, maintype, size, globtype, data16); break; case 4: dev_dbg(ddev, "%sMAINTAG:(%d) %c SIZE: %d Data: %s 0x%x\n", indentstr, tag, maintype, size, globtype, data32); break; } break; case TYPE_GLOBAL: switch (tag) { case TAG_GLOB_USAGE: /* * First time we hit the global usage tag, * it should tell us the type of device */ if (device->usage == 0) device->usage = data; strcpy(globtype, "USAGE"); break; case TAG_GLOB_LOG_MIN: strcpy(globtype, "LOG_MIN"); break; case TAG_GLOB_LOG_MAX: strcpy(globtype, "LOG_MAX"); break; case TAG_GLOB_PHYS_MIN: strcpy(globtype, "PHYS_MIN"); break; case TAG_GLOB_PHYS_MAX: strcpy(globtype, "PHYS_MAX"); break; case TAG_GLOB_UNIT_EXP: strcpy(globtype, "EXP"); break; case TAG_GLOB_UNIT: strcpy(globtype, "UNIT"); break; case TAG_GLOB_REPORT_SZ: strcpy(globtype, "REPORT_SZ"); break; case TAG_GLOB_REPORT_ID: strcpy(globtype, "REPORT_ID"); /* New report, restart numbering */ inputnum = 0; break; case TAG_GLOB_REPORT_CNT: strcpy(globtype, "REPORT_CNT"); break; case TAG_GLOB_PUSH: strcpy(globtype, "PUSH"); break; case TAG_GLOB_POP: strcpy(globtype, "POP"); break; } /* Check to make sure we have a good tag number so we don't overflow array */ if (tag < TAG_GLOB_MAX) { switch (size) { case 1: dev_dbg(ddev, "%sGLOBALTAG:%s(%d) SIZE: %d Data: 0x%x\n", indentstr, globtype, tag, size, data); globalval[tag] = data; break; case 2: dev_dbg(ddev, "%sGLOBALTAG:%s(%d) SIZE: %d Data: 0x%x\n", indentstr, globtype, tag, size, data16); globalval[tag] = data16; break; case 4: dev_dbg(ddev, "%sGLOBALTAG:%s(%d) SIZE: %d Data: 0x%x\n", indentstr, globtype, tag, size, data32); globalval[tag] = data32; break; } } else { dev_dbg(ddev, "%sGLOBALTAG: ILLEGAL TAG:%d SIZE: %d\n", indentstr, tag, size); } break; case TYPE_LOCAL: switch (tag) { case TAG_GLOB_USAGE: strcpy(globtype, "USAGE"); /* Always 1 byte */ usage = data; break; case TAG_GLOB_LOG_MIN: strcpy(globtype, "MIN"); break; case TAG_GLOB_LOG_MAX: strcpy(globtype, "MAX"); break; default: strcpy(globtype, "UNKNOWN"); break; } switch (size) { case 1: dev_dbg(ddev, "%sLOCALTAG:(%d) %s SIZE: %d Data: 0x%x\n", indentstr, tag, globtype, size, data); break; case 2: dev_dbg(ddev, "%sLOCALTAG:(%d) %s SIZE: %d Data: 0x%x\n", indentstr, tag, globtype, size, data16); break; case 4: dev_dbg(ddev, "%sLOCALTAG:(%d) %s SIZE: %d Data: 0x%x\n", indentstr, tag, globtype, size, data32); break; } break; } } }
static void parse_hid_report_descriptor(struct gtco *device, char * report, int length) { struct device *ddev = &device->intf->dev; int x, i = 0; /* Tag primitive vars */ __u8 prefix; __u8 size; __u8 tag; __u8 type; __u8 data = 0; __u16 data16 = 0; __u32 data32 = 0; /* For parsing logic */ int inputnum = 0; __u32 usage = 0; /* Global Values, indexed by TAG */ __u32 globalval[TAG_GLOB_MAX]; __u32 oldval[TAG_GLOB_MAX]; /* Debug stuff */ char maintype = 'x'; char globtype[12]; int indent = 0; char indentstr[10] = ""; dev_dbg(ddev, "======>>>>>>PARSE<<<<<<======\n"); /* Walk this report and pull out the info we need */ while (i < length) { prefix = report[i++]; /* Determine data size and save the data in the proper variable */ size = (1U << PREF_SIZE(prefix)) >> 1; if (i + size > length) { dev_err(ddev, "Not enough data (need %d, have %d)\n", i + size, length); break; } switch (size) { case 1: data = report[i]; break; case 2: data16 = get_unaligned_le16(&report[i]); break; case 4: data32 = get_unaligned_le32(&report[i]); break; } /* Skip size of data */ i += size; /* What we do depends on the tag type */ tag = PREF_TAG(prefix); type = PREF_TYPE(prefix); switch (type) { case TYPE_MAIN: strcpy(globtype, ""); switch (tag) { case TAG_MAIN_INPUT: /* * The INPUT MAIN tag signifies this is * information from a report. We need to * figure out what it is and store the * min/max values */ maintype = 'I'; if (data == 2) strcpy(globtype, "Variable"); else if (data == 3) strcpy(globtype, "Var|Const"); dev_dbg(ddev, "::::: Saving Report: %d input #%d Max: 0x%X(%d) Min:0x%X(%d) of %d bits\n", globalval[TAG_GLOB_REPORT_ID], inputnum, globalval[TAG_GLOB_LOG_MAX], globalval[TAG_GLOB_LOG_MAX], globalval[TAG_GLOB_LOG_MIN], globalval[TAG_GLOB_LOG_MIN], globalval[TAG_GLOB_REPORT_SZ] * globalval[TAG_GLOB_REPORT_CNT]); /* We can assume that the first two input items are always the X and Y coordinates. After that, we look for everything else by local usage value */ switch (inputnum) { case 0: /* X coord */ dev_dbg(ddev, "GER: X Usage: 0x%x\n", usage); if (device->max_X == 0) { device->max_X = globalval[TAG_GLOB_LOG_MAX]; device->min_X = globalval[TAG_GLOB_LOG_MIN]; } break; case 1: /* Y coord */ dev_dbg(ddev, "GER: Y Usage: 0x%x\n", usage); if (device->max_Y == 0) { device->max_Y = globalval[TAG_GLOB_LOG_MAX]; device->min_Y = globalval[TAG_GLOB_LOG_MIN]; } break; default: /* Tilt X */ if (usage == DIGITIZER_USAGE_TILT_X) { if (device->maxtilt_X == 0) { device->maxtilt_X = globalval[TAG_GLOB_LOG_MAX]; device->mintilt_X = globalval[TAG_GLOB_LOG_MIN]; } } /* Tilt Y */ if (usage == DIGITIZER_USAGE_TILT_Y) { if (device->maxtilt_Y == 0) { device->maxtilt_Y = globalval[TAG_GLOB_LOG_MAX]; device->mintilt_Y = globalval[TAG_GLOB_LOG_MIN]; } } /* Pressure */ if (usage == DIGITIZER_USAGE_TIP_PRESSURE) { if (device->maxpressure == 0) { device->maxpressure = globalval[TAG_GLOB_LOG_MAX]; device->minpressure = globalval[TAG_GLOB_LOG_MIN]; } } break; } inputnum++; break; case TAG_MAIN_OUTPUT: maintype = 'O'; break; case TAG_MAIN_FEATURE: maintype = 'F'; break; case TAG_MAIN_COL_START: maintype = 'S'; if (data == 0) { dev_dbg(ddev, "======>>>>>> Physical\n"); strcpy(globtype, "Physical"); } else dev_dbg(ddev, "======>>>>>>\n"); /* Indent the debug output */ indent++; for (x = 0; x < indent; x++) indentstr[x] = '-'; indentstr[x] = 0; /* Save global tags */ for (x = 0; x < TAG_GLOB_MAX; x++) oldval[x] = globalval[x]; break; case TAG_MAIN_COL_END: dev_dbg(ddev, "<<<<<<======\n"); maintype = 'E'; indent--; for (x = 0; x < indent; x++) indentstr[x] = '-'; indentstr[x] = 0; /* Copy global tags back */ for (x = 0; x < TAG_GLOB_MAX; x++) globalval[x] = oldval[x]; break; } switch (size) { case 1: dev_dbg(ddev, "%sMAINTAG:(%d) %c SIZE: %d Data: %s 0x%x\n", indentstr, tag, maintype, size, globtype, data); break; case 2: dev_dbg(ddev, "%sMAINTAG:(%d) %c SIZE: %d Data: %s 0x%x\n", indentstr, tag, maintype, size, globtype, data16); break; case 4: dev_dbg(ddev, "%sMAINTAG:(%d) %c SIZE: %d Data: %s 0x%x\n", indentstr, tag, maintype, size, globtype, data32); break; } break; case TYPE_GLOBAL: switch (tag) { case TAG_GLOB_USAGE: /* * First time we hit the global usage tag, * it should tell us the type of device */ if (device->usage == 0) device->usage = data; strcpy(globtype, "USAGE"); break; case TAG_GLOB_LOG_MIN: strcpy(globtype, "LOG_MIN"); break; case TAG_GLOB_LOG_MAX: strcpy(globtype, "LOG_MAX"); break; case TAG_GLOB_PHYS_MIN: strcpy(globtype, "PHYS_MIN"); break; case TAG_GLOB_PHYS_MAX: strcpy(globtype, "PHYS_MAX"); break; case TAG_GLOB_UNIT_EXP: strcpy(globtype, "EXP"); break; case TAG_GLOB_UNIT: strcpy(globtype, "UNIT"); break; case TAG_GLOB_REPORT_SZ: strcpy(globtype, "REPORT_SZ"); break; case TAG_GLOB_REPORT_ID: strcpy(globtype, "REPORT_ID"); /* New report, restart numbering */ inputnum = 0; break; case TAG_GLOB_REPORT_CNT: strcpy(globtype, "REPORT_CNT"); break; case TAG_GLOB_PUSH: strcpy(globtype, "PUSH"); break; case TAG_GLOB_POP: strcpy(globtype, "POP"); break; } /* Check to make sure we have a good tag number so we don't overflow array */ if (tag < TAG_GLOB_MAX) { switch (size) { case 1: dev_dbg(ddev, "%sGLOBALTAG:%s(%d) SIZE: %d Data: 0x%x\n", indentstr, globtype, tag, size, data); globalval[tag] = data; break; case 2: dev_dbg(ddev, "%sGLOBALTAG:%s(%d) SIZE: %d Data: 0x%x\n", indentstr, globtype, tag, size, data16); globalval[tag] = data16; break; case 4: dev_dbg(ddev, "%sGLOBALTAG:%s(%d) SIZE: %d Data: 0x%x\n", indentstr, globtype, tag, size, data32); globalval[tag] = data32; break; } } else { dev_dbg(ddev, "%sGLOBALTAG: ILLEGAL TAG:%d SIZE: %d\n", indentstr, tag, size); } break; case TYPE_LOCAL: switch (tag) { case TAG_GLOB_USAGE: strcpy(globtype, "USAGE"); /* Always 1 byte */ usage = data; break; case TAG_GLOB_LOG_MIN: strcpy(globtype, "MIN"); break; case TAG_GLOB_LOG_MAX: strcpy(globtype, "MAX"); break; default: strcpy(globtype, "UNKNOWN"); break; } switch (size) { case 1: dev_dbg(ddev, "%sLOCALTAG:(%d) %s SIZE: %d Data: 0x%x\n", indentstr, tag, globtype, size, data); break; case 2: dev_dbg(ddev, "%sLOCALTAG:(%d) %s SIZE: %d Data: 0x%x\n", indentstr, tag, globtype, size, data16); break; case 4: dev_dbg(ddev, "%sLOCALTAG:(%d) %s SIZE: %d Data: 0x%x\n", indentstr, tag, globtype, size, data32); break; } break; } } }
{'added': [(233, '\t\tprefix = report[i++];'), (236, '\t\tsize = (1U << PREF_SIZE(prefix)) >> 1;'), (237, '\t\tif (i + size > length) {'), (238, '\t\t\tdev_err(ddev,'), (239, '\t\t\t\t"Not enough data (need %d, have %d)\\n",'), (240, '\t\t\t\ti + size, length);'), (241, '\t\t\tbreak;'), (242, '\t\t}'), (243, ''), (251, '\t\tcase 4:')], 'deleted': [(233, '\t\tprefix = report[i];'), (234, ''), (235, '\t\t/* Skip over prefix */'), (236, '\t\ti++;'), (239, '\t\tsize = PREF_SIZE(prefix);'), (247, '\t\tcase 3:'), (248, '\t\t\tsize = 4;')]}
10
7
569
3,491
https://github.com/torvalds/linux
CVE-2017-16643
['CWE-125']
diskstore.c
dsOpen
/* diskstore.c implements a very simple disk backed key-value store used * by Redis for the "disk" backend. This implementation uses the filesystem * to store key/value pairs. Every file represents a given key. * * The key path is calculated using the SHA1 of the key name. For instance * the key "foo" is stored as a file name called: * * /0b/ee/0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33 * * The couples of characters from the hex output of SHA1 are also used * to locate two two levels of directories to store the file (as most * filesystems are not able to handle too many files in a single dir). * * In the end there are 65536 final directories (256 directories inside * every 256 top level directories), so that with 1 billion of files every * directory will contain in the average 15258 entires, that is ok with * most filesystems implementation. * * The actaul implementation of this disk store is highly related to the * filesystem implementation. This implementation may be replaced by * a B+TREE implementation in future implementations. * * Data ok every key is serialized using the same format used for .rdb * serialization. Everything is serialized on every entry: key name, * ttl information in case of keys with an associated expire time, and the * serialized value itself. * * Because the format is the same of the .rdb files it is trivial to create * an .rdb file starting from this format just by mean of scanning the * directories and concatenating entries, with the sole addition of an * .rdb header at the start and the end-of-db opcode at the end. * * ------------------------------------------------------------------------- * * Copyright (c) 2010-2011, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "redis.h" #include <fcntl.h> #include <sys/stat.h> int dsOpen(void) { struct stat sb; int retval; char *path = server.diskstore_path; if ((retval = stat(path,&sb) == -1) && errno != ENOENT) { redisLog(REDIS_WARNING, "Error opening disk store at %s: %s", path, strerror(errno)); return REDIS_ERR; } /* Directory already in place. Assume everything is ok. */ if (retval == 0 && S_ISDIR(sb.st_mode)) return REDIS_OK; /* File exists but it's not a directory */ if (retval == 0 && !S_ISDIR(sb.st_mode)) { redisLog(REDIS_WARNING,"Disk store at %s is not a directory", path); return REDIS_ERR; } /* New disk store, create the directory structure now, as creating * them in a lazy way is not a good idea, after very few insertions * we'll need most of the 65536 directories anyway. */ if (mkdir(path) == -1) { redisLog(REDIS_WARNING,"Disk store init failed creating dir %s: %s", path, strerror(errno)); return REDIS_ERR; } return REDIS_OK; } int dsClose(void) { return REDIS_OK; } int dsSet(redisDb *db, robj *key, robj *val) { } robj *dsGet(redisDb *db, robj *key) { } int dsExists(redisDb *db, robj *key) { }
/* diskstore.c implements a very simple disk backed key-value store used * by Redis for the "disk" backend. This implementation uses the filesystem * to store key/value pairs. Every file represents a given key. * * The key path is calculated using the SHA1 of the key name. For instance * the key "foo" is stored as a file name called: * * /0b/ee/0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33 * * The couples of characters from the hex output of SHA1 are also used * to locate two two levels of directories to store the file (as most * filesystems are not able to handle too many files in a single dir). * * In the end there are 65536 final directories (256 directories inside * every 256 top level directories), so that with 1 billion of files every * directory will contain in the average 15258 entires, that is ok with * most filesystems implementation. * * Note that since Redis supports multiple databases, the actual key name * is: * * /0b/ee/<dbid>_0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33 * * so for instance if the key is inside DB 0: * * /0b/ee/0_0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33 * * The actaul implementation of this disk store is highly dependant to the * filesystem implementation itself. This implementation may be replaced by * a B+TREE implementation in future implementations. * * Data ok every key is serialized using the same format used for .rdb * serialization. Everything is serialized on every entry: key name, * ttl information in case of keys with an associated expire time, and the * serialized value itself. * * Because the format is the same of the .rdb files it is trivial to create * an .rdb file starting from this format just by mean of scanning the * directories and concatenating entries, with the sole addition of an * .rdb header at the start and the end-of-db opcode at the end. * * ------------------------------------------------------------------------- * * Copyright (c) 2010-2011, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "redis.h" #include <fcntl.h> #include <sys/stat.h> int dsOpen(void) { struct stat sb; int retval; char *path = server.ds_path; if ((retval = stat(path,&sb) == -1) && errno != ENOENT) { redisLog(REDIS_WARNING, "Error opening disk store at %s: %s", path, strerror(errno)); return REDIS_ERR; } /* Directory already in place. Assume everything is ok. */ if (retval == 0 && S_ISDIR(sb.st_mode)) return REDIS_OK; /* File exists but it's not a directory */ if (retval == 0 && !S_ISDIR(sb.st_mode)) { redisLog(REDIS_WARNING,"Disk store at %s is not a directory", path); return REDIS_ERR; } /* New disk store, create the directory structure now, as creating * them in a lazy way is not a good idea, after very few insertions * we'll need most of the 65536 directories anyway. */ if (mkdir(path) == -1) { redisLog(REDIS_WARNING,"Disk store init failed creating dir %s: %s", path, strerror(errno)); return REDIS_ERR; } return REDIS_OK; } int dsClose(void) { return REDIS_OK; } int dsSet(redisDb *db, robj *key, robj *val) { } robj *dsGet(redisDb *db, robj *key) { } int dsExists(redisDb *db, robj *key) { }
int dsOpen(void) { struct stat sb; int retval; char *path = server.diskstore_path; if ((retval = stat(path,&sb) == -1) && errno != ENOENT) { redisLog(REDIS_WARNING, "Error opening disk store at %s: %s", path, strerror(errno)); return REDIS_ERR; } /* Directory already in place. Assume everything is ok. */ if (retval == 0 && S_ISDIR(sb.st_mode)) return REDIS_OK; /* File exists but it's not a directory */ if (retval == 0 && !S_ISDIR(sb.st_mode)) { redisLog(REDIS_WARNING,"Disk store at %s is not a directory", path); return REDIS_ERR; } /* New disk store, create the directory structure now, as creating * them in a lazy way is not a good idea, after very few insertions * we'll need most of the 65536 directories anyway. */ if (mkdir(path) == -1) { redisLog(REDIS_WARNING,"Disk store init failed creating dir %s: %s", path, strerror(errno)); return REDIS_ERR; } return REDIS_OK; }
int dsOpen(void) { struct stat sb; int retval; char *path = server.ds_path; if ((retval = stat(path,&sb) == -1) && errno != ENOENT) { redisLog(REDIS_WARNING, "Error opening disk store at %s: %s", path, strerror(errno)); return REDIS_ERR; } /* Directory already in place. Assume everything is ok. */ if (retval == 0 && S_ISDIR(sb.st_mode)) return REDIS_OK; /* File exists but it's not a directory */ if (retval == 0 && !S_ISDIR(sb.st_mode)) { redisLog(REDIS_WARNING,"Disk store at %s is not a directory", path); return REDIS_ERR; } /* New disk store, create the directory structure now, as creating * them in a lazy way is not a good idea, after very few insertions * we'll need most of the 65536 directories anyway. */ if (mkdir(path) == -1) { redisLog(REDIS_WARNING,"Disk store init failed creating dir %s: %s", path, strerror(errno)); return REDIS_ERR; } return REDIS_OK; }
{'added': [(19, ' * Note that since Redis supports multiple databases, the actual key name'), (20, ' * is:'), (21, ' *'), (22, ' * /0b/ee/<dbid>_0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'), (23, ' *'), (24, ' * so for instance if the key is inside DB 0:'), (25, ' *'), (26, ' * /0b/ee/0_0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'), (27, ' *'), (28, ' * The actaul implementation of this disk store is highly dependant to the'), (29, ' * filesystem implementation itself. This implementation may be replaced by'), (80, ' char *path = server.ds_path;')], 'deleted': [(19, ' * The actaul implementation of this disk store is highly related to the'), (20, ' * filesystem implementation. This implementation may be replaced by'), (71, ' char *path = server.diskstore_path;')]}
12
3
33
198
https://github.com/antirez/redis
CVE-2013-0178
['CWE-20']
pkcs15-gemsafeV1.c
gemsafe_get_cert_len
/* * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* Initially written by David Mattes <david.mattes@boeing.com> */ /* Support for multiple key containers by Lukas Wunner <lukas@wunner.de> */ #if HAVE_CONFIG_H #include "config.h" #endif #include <stdlib.h> #include <string.h> #include <stdio.h> #include "internal.h" #include "pkcs15.h" #define MANU_ID "Gemplus" #define APPLET_NAME "GemSAFE V1" #define DRIVER_SERIAL_NUMBER "v0.9" #define GEMSAFE_APP_PATH "3F001600" #define GEMSAFE_PATH "3F0016000004" /* Apparently, the Applet max read "quanta" is 248 bytes * Gemalto ClassicClient reads files in chunks of 238 bytes */ #define GEMSAFE_READ_QUANTUM 248 #define GEMSAFE_MAX_OBJLEN 28672 int sc_pkcs15emu_gemsafeV1_init_ex(sc_pkcs15_card_t *, struct sc_aid *,sc_pkcs15emu_opt_t *); static int sc_pkcs15emu_add_cert(sc_pkcs15_card_t *p15card, int type, int authority, const sc_path_t *path, const sc_pkcs15_id_t *id, const char *label, int obj_flags); static int sc_pkcs15emu_add_pin(sc_pkcs15_card_t *p15card, const sc_pkcs15_id_t *id, const char *label, const sc_path_t *path, int ref, int type, unsigned int min_length, unsigned int max_length, int flags, int tries_left, const char pad_char, int obj_flags); static int sc_pkcs15emu_add_prkey(sc_pkcs15_card_t *p15card, const sc_pkcs15_id_t *id, const char *label, int type, unsigned int modulus_length, int usage, const sc_path_t *path, int ref, const sc_pkcs15_id_t *auth_id, int obj_flags); typedef struct cdata_st { char *label; int authority; const char *path; size_t index; size_t count; const char *id; int obj_flags; } cdata; const unsigned int gemsafe_cert_max = 12; cdata gemsafe_cert[] = { {"DS certificate #1", 0, GEMSAFE_PATH, 0, 0, "45", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #2", 0, GEMSAFE_PATH, 0, 0, "46", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #3", 0, GEMSAFE_PATH, 0, 0, "47", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #4", 0, GEMSAFE_PATH, 0, 0, "48", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #5", 0, GEMSAFE_PATH, 0, 0, "49", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #6", 0, GEMSAFE_PATH, 0, 0, "50", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #7", 0, GEMSAFE_PATH, 0, 0, "51", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #8", 0, GEMSAFE_PATH, 0, 0, "52", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #9", 0, GEMSAFE_PATH, 0, 0, "53", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #10", 0, GEMSAFE_PATH, 0, 0, "54", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #11", 0, GEMSAFE_PATH, 0, 0, "55", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #12", 0, GEMSAFE_PATH, 0, 0, "56", SC_PKCS15_CO_FLAG_MODIFIABLE}, }; typedef struct pdata_st { const u8 atr[SC_MAX_ATR_SIZE]; const size_t atr_len; const char *id; const char *label; const char *path; const int ref; const int type; const unsigned int maxlen; const unsigned int minlen; const int flags; const int tries_left; const char pad_char; const int obj_flags; } pindata; const unsigned int gemsafe_pin_max = 2; const pindata gemsafe_pin[] = { /* ATR-specific PIN policies, first match found is used: */ { {0x3B, 0x7D, 0x96, 0x00, 0x00, 0x80, 0x31, 0x80, 0x65, 0xB0, 0x83, 0x11, 0x48, 0xC8, 0x83, 0x00, 0x90, 0x00}, 18, "01", "DS pin", GEMSAFE_PATH, 0x01, SC_PKCS15_PIN_TYPE_ASCII_NUMERIC, 8, 4, SC_PKCS15_PIN_FLAG_NEEDS_PADDING | SC_PKCS15_PIN_FLAG_LOCAL, 3, 0x00, SC_PKCS15_CO_FLAG_MODIFIABLE | SC_PKCS15_CO_FLAG_PRIVATE }, /* default PIN policy comes last: */ { { 0 }, 0, "01", "DS pin", GEMSAFE_PATH, 0x01, SC_PKCS15_PIN_TYPE_BCD, 16, 6, SC_PKCS15_PIN_FLAG_NEEDS_PADDING | SC_PKCS15_PIN_FLAG_LOCAL, 3, 0xFF, SC_PKCS15_CO_FLAG_MODIFIABLE | SC_PKCS15_CO_FLAG_PRIVATE } }; typedef struct prdata_st { const char *id; char *label; unsigned int modulus_len; int usage; const char *path; int ref; const char *auth_id; int obj_flags; } prdata; #define USAGE_NONREP SC_PKCS15_PRKEY_USAGE_NONREPUDIATION #define USAGE_KE SC_PKCS15_PRKEY_USAGE_ENCRYPT | \ SC_PKCS15_PRKEY_USAGE_DECRYPT | \ SC_PKCS15_PRKEY_USAGE_WRAP | \ SC_PKCS15_PRKEY_USAGE_UNWRAP #define USAGE_AUT SC_PKCS15_PRKEY_USAGE_ENCRYPT | \ SC_PKCS15_PRKEY_USAGE_DECRYPT | \ SC_PKCS15_PRKEY_USAGE_WRAP | \ SC_PKCS15_PRKEY_USAGE_UNWRAP | \ SC_PKCS15_PRKEY_USAGE_SIGN prdata gemsafe_prkeys[] = { { "45", "DS key #1", 1024, USAGE_AUT, GEMSAFE_PATH, 0x03, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "46", "DS key #2", 1024, USAGE_AUT, GEMSAFE_PATH, 0x04, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "47", "DS key #3", 1024, USAGE_AUT, GEMSAFE_PATH, 0x05, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "48", "DS key #4", 1024, USAGE_AUT, GEMSAFE_PATH, 0x06, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "49", "DS key #5", 1024, USAGE_AUT, GEMSAFE_PATH, 0x07, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "50", "DS key #6", 1024, USAGE_AUT, GEMSAFE_PATH, 0x08, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "51", "DS key #7", 1024, USAGE_AUT, GEMSAFE_PATH, 0x09, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "52", "DS key #8", 1024, USAGE_AUT, GEMSAFE_PATH, 0x0a, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "53", "DS key #9", 1024, USAGE_AUT, GEMSAFE_PATH, 0x0b, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "54", "DS key #10", 1024, USAGE_AUT, GEMSAFE_PATH, 0x0c, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "55", "DS key #11", 1024, USAGE_AUT, GEMSAFE_PATH, 0x0d, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "56", "DS key #12", 1024, USAGE_AUT, GEMSAFE_PATH, 0x0e, "01", SC_PKCS15_CO_FLAG_PRIVATE}, }; static int gemsafe_get_cert_len(sc_card_t *card) { int r; u8 ibuf[GEMSAFE_MAX_OBJLEN]; u8 *iptr; struct sc_path path; struct sc_file *file; size_t objlen, certlen; unsigned int ind, i=0; sc_format_path(GEMSAFE_PATH, &path); r = sc_select_file(card, &path, &file); if (r != SC_SUCCESS || !file) return SC_ERROR_INTERNAL; /* Initial read */ r = sc_read_binary(card, 0, ibuf, GEMSAFE_READ_QUANTUM, 0); if (r < 0) return SC_ERROR_INTERNAL; /* Actual stored object size is encoded in first 2 bytes * (allocated EF space is much greater!) */ objlen = (((size_t) ibuf[0]) << 8) | ibuf[1]; sc_log(card->ctx, "Stored object is of size: %"SC_FORMAT_LEN_SIZE_T"u", objlen); if (objlen < 1 || objlen > GEMSAFE_MAX_OBJLEN) { sc_log(card->ctx, "Invalid object size: %"SC_FORMAT_LEN_SIZE_T"u", objlen); return SC_ERROR_INTERNAL; } /* It looks like the first thing in the block is a table of * which keys are allocated. The table is small and is in the * first 248 bytes. Example for a card with 10 key containers: * 01 f0 00 03 03 b0 00 03 <= 1st key unallocated * 01 f0 00 04 03 b0 00 04 <= 2nd key unallocated * 01 fe 14 00 05 03 b0 00 05 <= 3rd key allocated * 01 fe 14 01 06 03 b0 00 06 <= 4th key allocated * 01 f0 00 07 03 b0 00 07 <= 5th key unallocated * ... * 01 f0 00 0c 03 b0 00 0c <= 10th key unallocated * For allocated keys, the fourth byte seems to indicate the * default key and the fifth byte indicates the key_ref of * the private key. */ ind = 2; /* skip length */ while (ibuf[ind] == 0x01) { if (ibuf[ind+1] == 0xFE) { gemsafe_prkeys[i].ref = ibuf[ind+4]; sc_log(card->ctx, "Key container %d is allocated and uses key_ref %d", i+1, gemsafe_prkeys[i].ref); ind += 9; } else { gemsafe_prkeys[i].label = NULL; gemsafe_cert[i].label = NULL; sc_log(card->ctx, "Key container %d is unallocated", i+1); ind += 8; } i++; } /* Delete additional key containers from the data structures if * this card can't accommodate them. */ for (; i < gemsafe_cert_max; i++) { gemsafe_prkeys[i].label = NULL; gemsafe_cert[i].label = NULL; } /* Read entire file, then dissect in memory. * Gemalto ClassicClient seems to do it the same way. */ iptr = ibuf + GEMSAFE_READ_QUANTUM; while ((size_t)(iptr - ibuf) < objlen) { r = sc_read_binary(card, iptr - ibuf, iptr, MIN(GEMSAFE_READ_QUANTUM, objlen - (iptr - ibuf)), 0); if (r < 0) { sc_log(card->ctx, "Could not read cert object"); return SC_ERROR_INTERNAL; } iptr += GEMSAFE_READ_QUANTUM; } /* Search buffer for certificates, they start with 0x3082. */ i = 0; while (ind < objlen - 1) { if (ibuf[ind] == 0x30 && ibuf[ind+1] == 0x82) { /* Find next allocated key container */ while (i < gemsafe_cert_max && gemsafe_cert[i].label == NULL) i++; if (i == gemsafe_cert_max) { sc_log(card->ctx, "Warning: Found orphaned certificate at offset %d", ind); return SC_SUCCESS; } /* DER cert len is encoded this way */ if (ind+3 >= sizeof ibuf) return SC_ERROR_INVALID_DATA; certlen = ((((size_t) ibuf[ind+2]) << 8) | ibuf[ind+3]) + 4; sc_log(card->ctx, "Found certificate of key container %d at offset %d, len %"SC_FORMAT_LEN_SIZE_T"u", i+1, ind, certlen); gemsafe_cert[i].index = ind; gemsafe_cert[i].count = certlen; ind += certlen; i++; } else ind++; } /* Delete additional key containers from the data structures if * they're missing on the card. */ for (; i < gemsafe_cert_max; i++) { if (gemsafe_cert[i].label) { sc_log(card->ctx, "Warning: Certificate of key container %d is missing", i+1); gemsafe_prkeys[i].label = NULL; gemsafe_cert[i].label = NULL; } } return SC_SUCCESS; } static int gemsafe_detect_card( sc_pkcs15_card_t *p15card) { if (strcmp(p15card->card->name, "GemSAFE V1")) return SC_ERROR_WRONG_CARD; return SC_SUCCESS; } static int sc_pkcs15emu_gemsafeV1_init( sc_pkcs15_card_t *p15card) { int r; unsigned int i; struct sc_path path; struct sc_file *file = NULL; struct sc_card *card = p15card->card; struct sc_apdu apdu; u8 rbuf[SC_MAX_APDU_BUFFER_SIZE]; sc_log(p15card->card->ctx, "Setting pkcs15 parameters"); if (p15card->tokeninfo->label) free(p15card->tokeninfo->label); p15card->tokeninfo->label = malloc(strlen(APPLET_NAME) + 1); if (!p15card->tokeninfo->label) return SC_ERROR_INTERNAL; strcpy(p15card->tokeninfo->label, APPLET_NAME); if (p15card->tokeninfo->serial_number) free(p15card->tokeninfo->serial_number); p15card->tokeninfo->serial_number = malloc(strlen(DRIVER_SERIAL_NUMBER) + 1); if (!p15card->tokeninfo->serial_number) return SC_ERROR_INTERNAL; strcpy(p15card->tokeninfo->serial_number, DRIVER_SERIAL_NUMBER); /* the GemSAFE applet version number */ sc_format_apdu(card, &apdu, SC_APDU_CASE_2_SHORT, 0xca, 0xdf, 0x03); apdu.cla = 0x80; apdu.resp = rbuf; apdu.resplen = sizeof(rbuf); /* Manual says Le=0x05, but should be 0x08 to return full version number */ apdu.le = 0x08; apdu.lc = 0; apdu.datalen = 0; r = sc_transmit_apdu(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); if (apdu.sw1 != 0x90 || apdu.sw2 != 0x00) return SC_ERROR_INTERNAL; if (r != SC_SUCCESS) return SC_ERROR_INTERNAL; /* the manufacturer ID, in this case GemPlus */ if (p15card->tokeninfo->manufacturer_id) free(p15card->tokeninfo->manufacturer_id); p15card->tokeninfo->manufacturer_id = malloc(strlen(MANU_ID) + 1); if (!p15card->tokeninfo->manufacturer_id) return SC_ERROR_INTERNAL; strcpy(p15card->tokeninfo->manufacturer_id, MANU_ID); /* determine allocated key containers and length of certificates */ r = gemsafe_get_cert_len(card); if (r != SC_SUCCESS) return SC_ERROR_INTERNAL; /* set certs */ sc_log(p15card->card->ctx, "Setting certificates"); for (i = 0; i < gemsafe_cert_max; i++) { struct sc_pkcs15_id p15Id; struct sc_path path; if (gemsafe_cert[i].label == NULL) continue; sc_format_path(gemsafe_cert[i].path, &path); sc_pkcs15_format_id(gemsafe_cert[i].id, &p15Id); path.index = gemsafe_cert[i].index; path.count = gemsafe_cert[i].count; sc_pkcs15emu_add_cert(p15card, SC_PKCS15_TYPE_CERT_X509, gemsafe_cert[i].authority, &path, &p15Id, gemsafe_cert[i].label, gemsafe_cert[i].obj_flags); } /* set gemsafe_pin */ sc_log(p15card->card->ctx, "Setting PIN"); for (i=0; i < gemsafe_pin_max; i++) { struct sc_pkcs15_id p15Id; struct sc_path path; sc_pkcs15_format_id(gemsafe_pin[i].id, &p15Id); sc_format_path(gemsafe_pin[i].path, &path); if (gemsafe_pin[i].atr_len == 0 || (gemsafe_pin[i].atr_len == p15card->card->atr.len && memcmp(p15card->card->atr.value, gemsafe_pin[i].atr, p15card->card->atr.len) == 0)) { sc_pkcs15emu_add_pin(p15card, &p15Id, gemsafe_pin[i].label, &path, gemsafe_pin[i].ref, gemsafe_pin[i].type, gemsafe_pin[i].minlen, gemsafe_pin[i].maxlen, gemsafe_pin[i].flags, gemsafe_pin[i].tries_left, gemsafe_pin[i].pad_char, gemsafe_pin[i].obj_flags); break; } }; /* set private keys */ sc_log(p15card->card->ctx, "Setting private keys"); for (i = 0; i < gemsafe_cert_max; i++) { struct sc_pkcs15_id p15Id, authId, *pauthId; struct sc_path path; int key_ref = 0x03; if (gemsafe_prkeys[i].label == NULL) continue; sc_pkcs15_format_id(gemsafe_prkeys[i].id, &p15Id); if (gemsafe_prkeys[i].auth_id) { sc_pkcs15_format_id(gemsafe_prkeys[i].auth_id, &authId); pauthId = &authId; } else pauthId = NULL; sc_format_path(gemsafe_prkeys[i].path, &path); /* * The key ref may be different for different sites; * by adding flags=n where the low order 4 bits can be * the key ref we can force it. */ if ( p15card->card->flags & 0x0F) { key_ref = p15card->card->flags & 0x0F; sc_debug(p15card->card->ctx, SC_LOG_DEBUG_NORMAL, "Overriding key_ref %d with %d\n", gemsafe_prkeys[i].ref, key_ref); } else key_ref = gemsafe_prkeys[i].ref; sc_pkcs15emu_add_prkey(p15card, &p15Id, gemsafe_prkeys[i].label, SC_PKCS15_TYPE_PRKEY_RSA, gemsafe_prkeys[i].modulus_len, gemsafe_prkeys[i].usage, &path, key_ref, pauthId, gemsafe_prkeys[i].obj_flags); } /* select the application DF */ sc_log(p15card->card->ctx, "Selecting application DF"); sc_format_path(GEMSAFE_APP_PATH, &path); r = sc_select_file(card, &path, &file); if (r != SC_SUCCESS || !file) return SC_ERROR_INTERNAL; /* set the application DF */ if (p15card->file_app) free(p15card->file_app); p15card->file_app = file; return SC_SUCCESS; } int sc_pkcs15emu_gemsafeV1_init_ex( sc_pkcs15_card_t *p15card, struct sc_aid *aid, sc_pkcs15emu_opt_t *opts) { if (opts && opts->flags & SC_PKCS15EMU_FLAGS_NO_CHECK) return sc_pkcs15emu_gemsafeV1_init(p15card); else { int r = gemsafe_detect_card(p15card); if (r) return SC_ERROR_WRONG_CARD; return sc_pkcs15emu_gemsafeV1_init(p15card); } } static sc_pkcs15_df_t * sc_pkcs15emu_get_df(sc_pkcs15_card_t *p15card, unsigned int type) { sc_pkcs15_df_t *df; sc_file_t *file; int created = 0; while (1) { for (df = p15card->df_list; df; df = df->next) { if (df->type == type) { if (created) df->enumerated = 1; return df; } } assert(created == 0); file = sc_file_new(); if (!file) return NULL; sc_format_path("11001101", &file->path); sc_pkcs15_add_df(p15card, type, &file->path); sc_file_free(file); created++; } } static int sc_pkcs15emu_add_object(sc_pkcs15_card_t *p15card, int type, const char *label, void *data, const sc_pkcs15_id_t *auth_id, int obj_flags) { sc_pkcs15_object_t *obj; int df_type; obj = calloc(1, sizeof(*obj)); obj->type = type; obj->data = data; if (label) strncpy(obj->label, label, sizeof(obj->label)-1); obj->flags = obj_flags; if (auth_id) obj->auth_id = *auth_id; switch (type & SC_PKCS15_TYPE_CLASS_MASK) { case SC_PKCS15_TYPE_AUTH: df_type = SC_PKCS15_AODF; break; case SC_PKCS15_TYPE_PRKEY: df_type = SC_PKCS15_PRKDF; break; case SC_PKCS15_TYPE_PUBKEY: df_type = SC_PKCS15_PUKDF; break; case SC_PKCS15_TYPE_CERT: df_type = SC_PKCS15_CDF; break; default: sc_log(p15card->card->ctx, "Unknown PKCS15 object type %d", type); free(obj); return SC_ERROR_INVALID_ARGUMENTS; } obj->df = sc_pkcs15emu_get_df(p15card, df_type); sc_pkcs15_add_object(p15card, obj); return 0; } static int sc_pkcs15emu_add_pin(sc_pkcs15_card_t *p15card, const sc_pkcs15_id_t *id, const char *label, const sc_path_t *path, int ref, int type, unsigned int min_length, unsigned int max_length, int flags, int tries_left, const char pad_char, int obj_flags) { sc_pkcs15_auth_info_t *info; info = calloc(1, sizeof(*info)); if (!info) LOG_FUNC_RETURN(p15card->card->ctx, SC_ERROR_OUT_OF_MEMORY); info->auth_type = SC_PKCS15_PIN_AUTH_TYPE_PIN; info->auth_method = SC_AC_CHV; info->auth_id = *id; info->attrs.pin.min_length = min_length; info->attrs.pin.max_length = max_length; info->attrs.pin.stored_length = max_length; info->attrs.pin.type = type; info->attrs.pin.reference = ref; info->attrs.pin.flags = flags; info->attrs.pin.pad_char = pad_char; info->tries_left = tries_left; info->logged_in = SC_PIN_STATE_UNKNOWN; if (path) info->path = *path; return sc_pkcs15emu_add_object(p15card, SC_PKCS15_TYPE_AUTH_PIN, label, info, NULL, obj_flags); } static int sc_pkcs15emu_add_cert(sc_pkcs15_card_t *p15card, int type, int authority, const sc_path_t *path, const sc_pkcs15_id_t *id, const char *label, int obj_flags) { sc_pkcs15_cert_info_t *info; info = calloc(1, sizeof(*info)); if (!info) { LOG_FUNC_RETURN(p15card->card->ctx, SC_ERROR_OUT_OF_MEMORY); } info->id = *id; info->authority = authority; if (path) info->path = *path; return sc_pkcs15emu_add_object(p15card, type, label, info, NULL, obj_flags); } static int sc_pkcs15emu_add_prkey(sc_pkcs15_card_t *p15card, const sc_pkcs15_id_t *id, const char *label, int type, unsigned int modulus_length, int usage, const sc_path_t *path, int ref, const sc_pkcs15_id_t *auth_id, int obj_flags) { sc_pkcs15_prkey_info_t *info; info = calloc(1, sizeof(*info)); if (!info) { LOG_FUNC_RETURN(p15card->card->ctx, SC_ERROR_OUT_OF_MEMORY); } info->id = *id; info->modulus_length = modulus_length; info->usage = usage; info->native = 1; info->access_flags = SC_PKCS15_PRKEY_ACCESS_SENSITIVE | SC_PKCS15_PRKEY_ACCESS_ALWAYSSENSITIVE | SC_PKCS15_PRKEY_ACCESS_NEVEREXTRACTABLE | SC_PKCS15_PRKEY_ACCESS_LOCAL; info->key_reference = ref; if (path) info->path = *path; return sc_pkcs15emu_add_object(p15card, type, label, info, auth_id, obj_flags); } /* SC_IMPLEMENT_DRIVER_VERSION("0.9.4") */
/* * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* Initially written by David Mattes <david.mattes@boeing.com> */ /* Support for multiple key containers by Lukas Wunner <lukas@wunner.de> */ #if HAVE_CONFIG_H #include "config.h" #endif #include <stdlib.h> #include <string.h> #include <stdio.h> #include "internal.h" #include "pkcs15.h" #define MANU_ID "Gemplus" #define APPLET_NAME "GemSAFE V1" #define DRIVER_SERIAL_NUMBER "v0.9" #define GEMSAFE_APP_PATH "3F001600" #define GEMSAFE_PATH "3F0016000004" /* Apparently, the Applet max read "quanta" is 248 bytes * Gemalto ClassicClient reads files in chunks of 238 bytes */ #define GEMSAFE_READ_QUANTUM 248 #define GEMSAFE_MAX_OBJLEN 28672 int sc_pkcs15emu_gemsafeV1_init_ex(sc_pkcs15_card_t *, struct sc_aid *,sc_pkcs15emu_opt_t *); static int sc_pkcs15emu_add_cert(sc_pkcs15_card_t *p15card, int type, int authority, const sc_path_t *path, const sc_pkcs15_id_t *id, const char *label, int obj_flags); static int sc_pkcs15emu_add_pin(sc_pkcs15_card_t *p15card, const sc_pkcs15_id_t *id, const char *label, const sc_path_t *path, int ref, int type, unsigned int min_length, unsigned int max_length, int flags, int tries_left, const char pad_char, int obj_flags); static int sc_pkcs15emu_add_prkey(sc_pkcs15_card_t *p15card, const sc_pkcs15_id_t *id, const char *label, int type, unsigned int modulus_length, int usage, const sc_path_t *path, int ref, const sc_pkcs15_id_t *auth_id, int obj_flags); typedef struct cdata_st { char *label; int authority; const char *path; size_t index; size_t count; const char *id; int obj_flags; } cdata; const unsigned int gemsafe_cert_max = 12; cdata gemsafe_cert[] = { {"DS certificate #1", 0, GEMSAFE_PATH, 0, 0, "45", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #2", 0, GEMSAFE_PATH, 0, 0, "46", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #3", 0, GEMSAFE_PATH, 0, 0, "47", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #4", 0, GEMSAFE_PATH, 0, 0, "48", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #5", 0, GEMSAFE_PATH, 0, 0, "49", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #6", 0, GEMSAFE_PATH, 0, 0, "50", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #7", 0, GEMSAFE_PATH, 0, 0, "51", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #8", 0, GEMSAFE_PATH, 0, 0, "52", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #9", 0, GEMSAFE_PATH, 0, 0, "53", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #10", 0, GEMSAFE_PATH, 0, 0, "54", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #11", 0, GEMSAFE_PATH, 0, 0, "55", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #12", 0, GEMSAFE_PATH, 0, 0, "56", SC_PKCS15_CO_FLAG_MODIFIABLE}, }; typedef struct pdata_st { const u8 atr[SC_MAX_ATR_SIZE]; const size_t atr_len; const char *id; const char *label; const char *path; const int ref; const int type; const unsigned int maxlen; const unsigned int minlen; const int flags; const int tries_left; const char pad_char; const int obj_flags; } pindata; const unsigned int gemsafe_pin_max = 2; const pindata gemsafe_pin[] = { /* ATR-specific PIN policies, first match found is used: */ { {0x3B, 0x7D, 0x96, 0x00, 0x00, 0x80, 0x31, 0x80, 0x65, 0xB0, 0x83, 0x11, 0x48, 0xC8, 0x83, 0x00, 0x90, 0x00}, 18, "01", "DS pin", GEMSAFE_PATH, 0x01, SC_PKCS15_PIN_TYPE_ASCII_NUMERIC, 8, 4, SC_PKCS15_PIN_FLAG_NEEDS_PADDING | SC_PKCS15_PIN_FLAG_LOCAL, 3, 0x00, SC_PKCS15_CO_FLAG_MODIFIABLE | SC_PKCS15_CO_FLAG_PRIVATE }, /* default PIN policy comes last: */ { { 0 }, 0, "01", "DS pin", GEMSAFE_PATH, 0x01, SC_PKCS15_PIN_TYPE_BCD, 16, 6, SC_PKCS15_PIN_FLAG_NEEDS_PADDING | SC_PKCS15_PIN_FLAG_LOCAL, 3, 0xFF, SC_PKCS15_CO_FLAG_MODIFIABLE | SC_PKCS15_CO_FLAG_PRIVATE } }; typedef struct prdata_st { const char *id; char *label; unsigned int modulus_len; int usage; const char *path; int ref; const char *auth_id; int obj_flags; } prdata; #define USAGE_NONREP SC_PKCS15_PRKEY_USAGE_NONREPUDIATION #define USAGE_KE SC_PKCS15_PRKEY_USAGE_ENCRYPT | \ SC_PKCS15_PRKEY_USAGE_DECRYPT | \ SC_PKCS15_PRKEY_USAGE_WRAP | \ SC_PKCS15_PRKEY_USAGE_UNWRAP #define USAGE_AUT SC_PKCS15_PRKEY_USAGE_ENCRYPT | \ SC_PKCS15_PRKEY_USAGE_DECRYPT | \ SC_PKCS15_PRKEY_USAGE_WRAP | \ SC_PKCS15_PRKEY_USAGE_UNWRAP | \ SC_PKCS15_PRKEY_USAGE_SIGN prdata gemsafe_prkeys[] = { { "45", "DS key #1", 1024, USAGE_AUT, GEMSAFE_PATH, 0x03, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "46", "DS key #2", 1024, USAGE_AUT, GEMSAFE_PATH, 0x04, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "47", "DS key #3", 1024, USAGE_AUT, GEMSAFE_PATH, 0x05, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "48", "DS key #4", 1024, USAGE_AUT, GEMSAFE_PATH, 0x06, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "49", "DS key #5", 1024, USAGE_AUT, GEMSAFE_PATH, 0x07, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "50", "DS key #6", 1024, USAGE_AUT, GEMSAFE_PATH, 0x08, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "51", "DS key #7", 1024, USAGE_AUT, GEMSAFE_PATH, 0x09, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "52", "DS key #8", 1024, USAGE_AUT, GEMSAFE_PATH, 0x0a, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "53", "DS key #9", 1024, USAGE_AUT, GEMSAFE_PATH, 0x0b, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "54", "DS key #10", 1024, USAGE_AUT, GEMSAFE_PATH, 0x0c, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "55", "DS key #11", 1024, USAGE_AUT, GEMSAFE_PATH, 0x0d, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "56", "DS key #12", 1024, USAGE_AUT, GEMSAFE_PATH, 0x0e, "01", SC_PKCS15_CO_FLAG_PRIVATE}, }; static int gemsafe_get_cert_len(sc_card_t *card) { int r; u8 ibuf[GEMSAFE_MAX_OBJLEN]; u8 *iptr; struct sc_path path; struct sc_file *file; size_t objlen, certlen; unsigned int ind, i=0; sc_format_path(GEMSAFE_PATH, &path); r = sc_select_file(card, &path, &file); if (r != SC_SUCCESS || !file) return SC_ERROR_INTERNAL; /* Initial read */ r = sc_read_binary(card, 0, ibuf, GEMSAFE_READ_QUANTUM, 0); if (r < 0) return SC_ERROR_INTERNAL; /* Actual stored object size is encoded in first 2 bytes * (allocated EF space is much greater!) */ objlen = (((size_t) ibuf[0]) << 8) | ibuf[1]; sc_log(card->ctx, "Stored object is of size: %"SC_FORMAT_LEN_SIZE_T"u", objlen); if (objlen < 1 || objlen > GEMSAFE_MAX_OBJLEN) { sc_log(card->ctx, "Invalid object size: %"SC_FORMAT_LEN_SIZE_T"u", objlen); return SC_ERROR_INTERNAL; } /* It looks like the first thing in the block is a table of * which keys are allocated. The table is small and is in the * first 248 bytes. Example for a card with 10 key containers: * 01 f0 00 03 03 b0 00 03 <= 1st key unallocated * 01 f0 00 04 03 b0 00 04 <= 2nd key unallocated * 01 fe 14 00 05 03 b0 00 05 <= 3rd key allocated * 01 fe 14 01 06 03 b0 00 06 <= 4th key allocated * 01 f0 00 07 03 b0 00 07 <= 5th key unallocated * ... * 01 f0 00 0c 03 b0 00 0c <= 10th key unallocated * For allocated keys, the fourth byte seems to indicate the * default key and the fifth byte indicates the key_ref of * the private key. */ ind = 2; /* skip length */ while (ibuf[ind] == 0x01 && i < gemsafe_cert_max) { if (ibuf[ind+1] == 0xFE) { gemsafe_prkeys[i].ref = ibuf[ind+4]; sc_log(card->ctx, "Key container %d is allocated and uses key_ref %d", i+1, gemsafe_prkeys[i].ref); ind += 9; } else { gemsafe_prkeys[i].label = NULL; gemsafe_cert[i].label = NULL; sc_log(card->ctx, "Key container %d is unallocated", i+1); ind += 8; } i++; } /* Delete additional key containers from the data structures if * this card can't accommodate them. */ for (; i < gemsafe_cert_max; i++) { gemsafe_prkeys[i].label = NULL; gemsafe_cert[i].label = NULL; } /* Read entire file, then dissect in memory. * Gemalto ClassicClient seems to do it the same way. */ iptr = ibuf + GEMSAFE_READ_QUANTUM; while ((size_t)(iptr - ibuf) < objlen) { r = sc_read_binary(card, iptr - ibuf, iptr, MIN(GEMSAFE_READ_QUANTUM, objlen - (iptr - ibuf)), 0); if (r < 0) { sc_log(card->ctx, "Could not read cert object"); return SC_ERROR_INTERNAL; } iptr += GEMSAFE_READ_QUANTUM; } /* Search buffer for certificates, they start with 0x3082. */ i = 0; while (ind < objlen - 1) { if (ibuf[ind] == 0x30 && ibuf[ind+1] == 0x82) { /* Find next allocated key container */ while (i < gemsafe_cert_max && gemsafe_cert[i].label == NULL) i++; if (i == gemsafe_cert_max) { sc_log(card->ctx, "Warning: Found orphaned certificate at offset %d", ind); return SC_SUCCESS; } /* DER cert len is encoded this way */ if (ind+3 >= sizeof ibuf) return SC_ERROR_INVALID_DATA; certlen = ((((size_t) ibuf[ind+2]) << 8) | ibuf[ind+3]) + 4; sc_log(card->ctx, "Found certificate of key container %d at offset %d, len %"SC_FORMAT_LEN_SIZE_T"u", i+1, ind, certlen); gemsafe_cert[i].index = ind; gemsafe_cert[i].count = certlen; ind += certlen; i++; } else ind++; } /* Delete additional key containers from the data structures if * they're missing on the card. */ for (; i < gemsafe_cert_max; i++) { if (gemsafe_cert[i].label) { sc_log(card->ctx, "Warning: Certificate of key container %d is missing", i+1); gemsafe_prkeys[i].label = NULL; gemsafe_cert[i].label = NULL; } } return SC_SUCCESS; } static int gemsafe_detect_card( sc_pkcs15_card_t *p15card) { if (strcmp(p15card->card->name, "GemSAFE V1")) return SC_ERROR_WRONG_CARD; return SC_SUCCESS; } static int sc_pkcs15emu_gemsafeV1_init( sc_pkcs15_card_t *p15card) { int r; unsigned int i; struct sc_path path; struct sc_file *file = NULL; struct sc_card *card = p15card->card; struct sc_apdu apdu; u8 rbuf[SC_MAX_APDU_BUFFER_SIZE]; sc_log(p15card->card->ctx, "Setting pkcs15 parameters"); if (p15card->tokeninfo->label) free(p15card->tokeninfo->label); p15card->tokeninfo->label = malloc(strlen(APPLET_NAME) + 1); if (!p15card->tokeninfo->label) return SC_ERROR_INTERNAL; strcpy(p15card->tokeninfo->label, APPLET_NAME); if (p15card->tokeninfo->serial_number) free(p15card->tokeninfo->serial_number); p15card->tokeninfo->serial_number = malloc(strlen(DRIVER_SERIAL_NUMBER) + 1); if (!p15card->tokeninfo->serial_number) return SC_ERROR_INTERNAL; strcpy(p15card->tokeninfo->serial_number, DRIVER_SERIAL_NUMBER); /* the GemSAFE applet version number */ sc_format_apdu(card, &apdu, SC_APDU_CASE_2_SHORT, 0xca, 0xdf, 0x03); apdu.cla = 0x80; apdu.resp = rbuf; apdu.resplen = sizeof(rbuf); /* Manual says Le=0x05, but should be 0x08 to return full version number */ apdu.le = 0x08; apdu.lc = 0; apdu.datalen = 0; r = sc_transmit_apdu(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); if (apdu.sw1 != 0x90 || apdu.sw2 != 0x00) return SC_ERROR_INTERNAL; if (r != SC_SUCCESS) return SC_ERROR_INTERNAL; /* the manufacturer ID, in this case GemPlus */ if (p15card->tokeninfo->manufacturer_id) free(p15card->tokeninfo->manufacturer_id); p15card->tokeninfo->manufacturer_id = malloc(strlen(MANU_ID) + 1); if (!p15card->tokeninfo->manufacturer_id) return SC_ERROR_INTERNAL; strcpy(p15card->tokeninfo->manufacturer_id, MANU_ID); /* determine allocated key containers and length of certificates */ r = gemsafe_get_cert_len(card); if (r != SC_SUCCESS) return SC_ERROR_INTERNAL; /* set certs */ sc_log(p15card->card->ctx, "Setting certificates"); for (i = 0; i < gemsafe_cert_max; i++) { struct sc_pkcs15_id p15Id; struct sc_path path; if (gemsafe_cert[i].label == NULL) continue; sc_format_path(gemsafe_cert[i].path, &path); sc_pkcs15_format_id(gemsafe_cert[i].id, &p15Id); path.index = gemsafe_cert[i].index; path.count = gemsafe_cert[i].count; sc_pkcs15emu_add_cert(p15card, SC_PKCS15_TYPE_CERT_X509, gemsafe_cert[i].authority, &path, &p15Id, gemsafe_cert[i].label, gemsafe_cert[i].obj_flags); } /* set gemsafe_pin */ sc_log(p15card->card->ctx, "Setting PIN"); for (i=0; i < gemsafe_pin_max; i++) { struct sc_pkcs15_id p15Id; struct sc_path path; sc_pkcs15_format_id(gemsafe_pin[i].id, &p15Id); sc_format_path(gemsafe_pin[i].path, &path); if (gemsafe_pin[i].atr_len == 0 || (gemsafe_pin[i].atr_len == p15card->card->atr.len && memcmp(p15card->card->atr.value, gemsafe_pin[i].atr, p15card->card->atr.len) == 0)) { sc_pkcs15emu_add_pin(p15card, &p15Id, gemsafe_pin[i].label, &path, gemsafe_pin[i].ref, gemsafe_pin[i].type, gemsafe_pin[i].minlen, gemsafe_pin[i].maxlen, gemsafe_pin[i].flags, gemsafe_pin[i].tries_left, gemsafe_pin[i].pad_char, gemsafe_pin[i].obj_flags); break; } }; /* set private keys */ sc_log(p15card->card->ctx, "Setting private keys"); for (i = 0; i < gemsafe_cert_max; i++) { struct sc_pkcs15_id p15Id, authId, *pauthId; struct sc_path path; int key_ref = 0x03; if (gemsafe_prkeys[i].label == NULL) continue; sc_pkcs15_format_id(gemsafe_prkeys[i].id, &p15Id); if (gemsafe_prkeys[i].auth_id) { sc_pkcs15_format_id(gemsafe_prkeys[i].auth_id, &authId); pauthId = &authId; } else pauthId = NULL; sc_format_path(gemsafe_prkeys[i].path, &path); /* * The key ref may be different for different sites; * by adding flags=n where the low order 4 bits can be * the key ref we can force it. */ if ( p15card->card->flags & 0x0F) { key_ref = p15card->card->flags & 0x0F; sc_debug(p15card->card->ctx, SC_LOG_DEBUG_NORMAL, "Overriding key_ref %d with %d\n", gemsafe_prkeys[i].ref, key_ref); } else key_ref = gemsafe_prkeys[i].ref; sc_pkcs15emu_add_prkey(p15card, &p15Id, gemsafe_prkeys[i].label, SC_PKCS15_TYPE_PRKEY_RSA, gemsafe_prkeys[i].modulus_len, gemsafe_prkeys[i].usage, &path, key_ref, pauthId, gemsafe_prkeys[i].obj_flags); } /* select the application DF */ sc_log(p15card->card->ctx, "Selecting application DF"); sc_format_path(GEMSAFE_APP_PATH, &path); r = sc_select_file(card, &path, &file); if (r != SC_SUCCESS || !file) return SC_ERROR_INTERNAL; /* set the application DF */ if (p15card->file_app) free(p15card->file_app); p15card->file_app = file; return SC_SUCCESS; } int sc_pkcs15emu_gemsafeV1_init_ex( sc_pkcs15_card_t *p15card, struct sc_aid *aid, sc_pkcs15emu_opt_t *opts) { if (opts && opts->flags & SC_PKCS15EMU_FLAGS_NO_CHECK) return sc_pkcs15emu_gemsafeV1_init(p15card); else { int r = gemsafe_detect_card(p15card); if (r) return SC_ERROR_WRONG_CARD; return sc_pkcs15emu_gemsafeV1_init(p15card); } } static sc_pkcs15_df_t * sc_pkcs15emu_get_df(sc_pkcs15_card_t *p15card, unsigned int type) { sc_pkcs15_df_t *df; sc_file_t *file; int created = 0; while (1) { for (df = p15card->df_list; df; df = df->next) { if (df->type == type) { if (created) df->enumerated = 1; return df; } } assert(created == 0); file = sc_file_new(); if (!file) return NULL; sc_format_path("11001101", &file->path); sc_pkcs15_add_df(p15card, type, &file->path); sc_file_free(file); created++; } } static int sc_pkcs15emu_add_object(sc_pkcs15_card_t *p15card, int type, const char *label, void *data, const sc_pkcs15_id_t *auth_id, int obj_flags) { sc_pkcs15_object_t *obj; int df_type; obj = calloc(1, sizeof(*obj)); obj->type = type; obj->data = data; if (label) strncpy(obj->label, label, sizeof(obj->label)-1); obj->flags = obj_flags; if (auth_id) obj->auth_id = *auth_id; switch (type & SC_PKCS15_TYPE_CLASS_MASK) { case SC_PKCS15_TYPE_AUTH: df_type = SC_PKCS15_AODF; break; case SC_PKCS15_TYPE_PRKEY: df_type = SC_PKCS15_PRKDF; break; case SC_PKCS15_TYPE_PUBKEY: df_type = SC_PKCS15_PUKDF; break; case SC_PKCS15_TYPE_CERT: df_type = SC_PKCS15_CDF; break; default: sc_log(p15card->card->ctx, "Unknown PKCS15 object type %d", type); free(obj); return SC_ERROR_INVALID_ARGUMENTS; } obj->df = sc_pkcs15emu_get_df(p15card, df_type); sc_pkcs15_add_object(p15card, obj); return 0; } static int sc_pkcs15emu_add_pin(sc_pkcs15_card_t *p15card, const sc_pkcs15_id_t *id, const char *label, const sc_path_t *path, int ref, int type, unsigned int min_length, unsigned int max_length, int flags, int tries_left, const char pad_char, int obj_flags) { sc_pkcs15_auth_info_t *info; info = calloc(1, sizeof(*info)); if (!info) LOG_FUNC_RETURN(p15card->card->ctx, SC_ERROR_OUT_OF_MEMORY); info->auth_type = SC_PKCS15_PIN_AUTH_TYPE_PIN; info->auth_method = SC_AC_CHV; info->auth_id = *id; info->attrs.pin.min_length = min_length; info->attrs.pin.max_length = max_length; info->attrs.pin.stored_length = max_length; info->attrs.pin.type = type; info->attrs.pin.reference = ref; info->attrs.pin.flags = flags; info->attrs.pin.pad_char = pad_char; info->tries_left = tries_left; info->logged_in = SC_PIN_STATE_UNKNOWN; if (path) info->path = *path; return sc_pkcs15emu_add_object(p15card, SC_PKCS15_TYPE_AUTH_PIN, label, info, NULL, obj_flags); } static int sc_pkcs15emu_add_cert(sc_pkcs15_card_t *p15card, int type, int authority, const sc_path_t *path, const sc_pkcs15_id_t *id, const char *label, int obj_flags) { sc_pkcs15_cert_info_t *info; info = calloc(1, sizeof(*info)); if (!info) { LOG_FUNC_RETURN(p15card->card->ctx, SC_ERROR_OUT_OF_MEMORY); } info->id = *id; info->authority = authority; if (path) info->path = *path; return sc_pkcs15emu_add_object(p15card, type, label, info, NULL, obj_flags); } static int sc_pkcs15emu_add_prkey(sc_pkcs15_card_t *p15card, const sc_pkcs15_id_t *id, const char *label, int type, unsigned int modulus_length, int usage, const sc_path_t *path, int ref, const sc_pkcs15_id_t *auth_id, int obj_flags) { sc_pkcs15_prkey_info_t *info; info = calloc(1, sizeof(*info)); if (!info) { LOG_FUNC_RETURN(p15card->card->ctx, SC_ERROR_OUT_OF_MEMORY); } info->id = *id; info->modulus_length = modulus_length; info->usage = usage; info->native = 1; info->access_flags = SC_PKCS15_PRKEY_ACCESS_SENSITIVE | SC_PKCS15_PRKEY_ACCESS_ALWAYSSENSITIVE | SC_PKCS15_PRKEY_ACCESS_NEVEREXTRACTABLE | SC_PKCS15_PRKEY_ACCESS_LOCAL; info->key_reference = ref; if (path) info->path = *path; return sc_pkcs15emu_add_object(p15card, type, label, info, auth_id, obj_flags); } /* SC_IMPLEMENT_DRIVER_VERSION("0.9.4") */
static int gemsafe_get_cert_len(sc_card_t *card) { int r; u8 ibuf[GEMSAFE_MAX_OBJLEN]; u8 *iptr; struct sc_path path; struct sc_file *file; size_t objlen, certlen; unsigned int ind, i=0; sc_format_path(GEMSAFE_PATH, &path); r = sc_select_file(card, &path, &file); if (r != SC_SUCCESS || !file) return SC_ERROR_INTERNAL; /* Initial read */ r = sc_read_binary(card, 0, ibuf, GEMSAFE_READ_QUANTUM, 0); if (r < 0) return SC_ERROR_INTERNAL; /* Actual stored object size is encoded in first 2 bytes * (allocated EF space is much greater!) */ objlen = (((size_t) ibuf[0]) << 8) | ibuf[1]; sc_log(card->ctx, "Stored object is of size: %"SC_FORMAT_LEN_SIZE_T"u", objlen); if (objlen < 1 || objlen > GEMSAFE_MAX_OBJLEN) { sc_log(card->ctx, "Invalid object size: %"SC_FORMAT_LEN_SIZE_T"u", objlen); return SC_ERROR_INTERNAL; } /* It looks like the first thing in the block is a table of * which keys are allocated. The table is small and is in the * first 248 bytes. Example for a card with 10 key containers: * 01 f0 00 03 03 b0 00 03 <= 1st key unallocated * 01 f0 00 04 03 b0 00 04 <= 2nd key unallocated * 01 fe 14 00 05 03 b0 00 05 <= 3rd key allocated * 01 fe 14 01 06 03 b0 00 06 <= 4th key allocated * 01 f0 00 07 03 b0 00 07 <= 5th key unallocated * ... * 01 f0 00 0c 03 b0 00 0c <= 10th key unallocated * For allocated keys, the fourth byte seems to indicate the * default key and the fifth byte indicates the key_ref of * the private key. */ ind = 2; /* skip length */ while (ibuf[ind] == 0x01) { if (ibuf[ind+1] == 0xFE) { gemsafe_prkeys[i].ref = ibuf[ind+4]; sc_log(card->ctx, "Key container %d is allocated and uses key_ref %d", i+1, gemsafe_prkeys[i].ref); ind += 9; } else { gemsafe_prkeys[i].label = NULL; gemsafe_cert[i].label = NULL; sc_log(card->ctx, "Key container %d is unallocated", i+1); ind += 8; } i++; } /* Delete additional key containers from the data structures if * this card can't accommodate them. */ for (; i < gemsafe_cert_max; i++) { gemsafe_prkeys[i].label = NULL; gemsafe_cert[i].label = NULL; } /* Read entire file, then dissect in memory. * Gemalto ClassicClient seems to do it the same way. */ iptr = ibuf + GEMSAFE_READ_QUANTUM; while ((size_t)(iptr - ibuf) < objlen) { r = sc_read_binary(card, iptr - ibuf, iptr, MIN(GEMSAFE_READ_QUANTUM, objlen - (iptr - ibuf)), 0); if (r < 0) { sc_log(card->ctx, "Could not read cert object"); return SC_ERROR_INTERNAL; } iptr += GEMSAFE_READ_QUANTUM; } /* Search buffer for certificates, they start with 0x3082. */ i = 0; while (ind < objlen - 1) { if (ibuf[ind] == 0x30 && ibuf[ind+1] == 0x82) { /* Find next allocated key container */ while (i < gemsafe_cert_max && gemsafe_cert[i].label == NULL) i++; if (i == gemsafe_cert_max) { sc_log(card->ctx, "Warning: Found orphaned certificate at offset %d", ind); return SC_SUCCESS; } /* DER cert len is encoded this way */ if (ind+3 >= sizeof ibuf) return SC_ERROR_INVALID_DATA; certlen = ((((size_t) ibuf[ind+2]) << 8) | ibuf[ind+3]) + 4; sc_log(card->ctx, "Found certificate of key container %d at offset %d, len %"SC_FORMAT_LEN_SIZE_T"u", i+1, ind, certlen); gemsafe_cert[i].index = ind; gemsafe_cert[i].count = certlen; ind += certlen; i++; } else ind++; } /* Delete additional key containers from the data structures if * they're missing on the card. */ for (; i < gemsafe_cert_max; i++) { if (gemsafe_cert[i].label) { sc_log(card->ctx, "Warning: Certificate of key container %d is missing", i+1); gemsafe_prkeys[i].label = NULL; gemsafe_cert[i].label = NULL; } } return SC_SUCCESS; }
static int gemsafe_get_cert_len(sc_card_t *card) { int r; u8 ibuf[GEMSAFE_MAX_OBJLEN]; u8 *iptr; struct sc_path path; struct sc_file *file; size_t objlen, certlen; unsigned int ind, i=0; sc_format_path(GEMSAFE_PATH, &path); r = sc_select_file(card, &path, &file); if (r != SC_SUCCESS || !file) return SC_ERROR_INTERNAL; /* Initial read */ r = sc_read_binary(card, 0, ibuf, GEMSAFE_READ_QUANTUM, 0); if (r < 0) return SC_ERROR_INTERNAL; /* Actual stored object size is encoded in first 2 bytes * (allocated EF space is much greater!) */ objlen = (((size_t) ibuf[0]) << 8) | ibuf[1]; sc_log(card->ctx, "Stored object is of size: %"SC_FORMAT_LEN_SIZE_T"u", objlen); if (objlen < 1 || objlen > GEMSAFE_MAX_OBJLEN) { sc_log(card->ctx, "Invalid object size: %"SC_FORMAT_LEN_SIZE_T"u", objlen); return SC_ERROR_INTERNAL; } /* It looks like the first thing in the block is a table of * which keys are allocated. The table is small and is in the * first 248 bytes. Example for a card with 10 key containers: * 01 f0 00 03 03 b0 00 03 <= 1st key unallocated * 01 f0 00 04 03 b0 00 04 <= 2nd key unallocated * 01 fe 14 00 05 03 b0 00 05 <= 3rd key allocated * 01 fe 14 01 06 03 b0 00 06 <= 4th key allocated * 01 f0 00 07 03 b0 00 07 <= 5th key unallocated * ... * 01 f0 00 0c 03 b0 00 0c <= 10th key unallocated * For allocated keys, the fourth byte seems to indicate the * default key and the fifth byte indicates the key_ref of * the private key. */ ind = 2; /* skip length */ while (ibuf[ind] == 0x01 && i < gemsafe_cert_max) { if (ibuf[ind+1] == 0xFE) { gemsafe_prkeys[i].ref = ibuf[ind+4]; sc_log(card->ctx, "Key container %d is allocated and uses key_ref %d", i+1, gemsafe_prkeys[i].ref); ind += 9; } else { gemsafe_prkeys[i].label = NULL; gemsafe_cert[i].label = NULL; sc_log(card->ctx, "Key container %d is unallocated", i+1); ind += 8; } i++; } /* Delete additional key containers from the data structures if * this card can't accommodate them. */ for (; i < gemsafe_cert_max; i++) { gemsafe_prkeys[i].label = NULL; gemsafe_cert[i].label = NULL; } /* Read entire file, then dissect in memory. * Gemalto ClassicClient seems to do it the same way. */ iptr = ibuf + GEMSAFE_READ_QUANTUM; while ((size_t)(iptr - ibuf) < objlen) { r = sc_read_binary(card, iptr - ibuf, iptr, MIN(GEMSAFE_READ_QUANTUM, objlen - (iptr - ibuf)), 0); if (r < 0) { sc_log(card->ctx, "Could not read cert object"); return SC_ERROR_INTERNAL; } iptr += GEMSAFE_READ_QUANTUM; } /* Search buffer for certificates, they start with 0x3082. */ i = 0; while (ind < objlen - 1) { if (ibuf[ind] == 0x30 && ibuf[ind+1] == 0x82) { /* Find next allocated key container */ while (i < gemsafe_cert_max && gemsafe_cert[i].label == NULL) i++; if (i == gemsafe_cert_max) { sc_log(card->ctx, "Warning: Found orphaned certificate at offset %d", ind); return SC_SUCCESS; } /* DER cert len is encoded this way */ if (ind+3 >= sizeof ibuf) return SC_ERROR_INVALID_DATA; certlen = ((((size_t) ibuf[ind+2]) << 8) | ibuf[ind+3]) + 4; sc_log(card->ctx, "Found certificate of key container %d at offset %d, len %"SC_FORMAT_LEN_SIZE_T"u", i+1, ind, certlen); gemsafe_cert[i].index = ind; gemsafe_cert[i].count = certlen; ind += certlen; i++; } else ind++; } /* Delete additional key containers from the data structures if * they're missing on the card. */ for (; i < gemsafe_cert_max; i++) { if (gemsafe_cert[i].label) { sc_log(card->ctx, "Warning: Certificate of key container %d is missing", i+1); gemsafe_prkeys[i].label = NULL; gemsafe_cert[i].label = NULL; } } return SC_SUCCESS; }
{'added': [(211, '\twhile (ibuf[ind] == 0x01 && i < gemsafe_cert_max) {')], 'deleted': [(211, '\twhile (ibuf[ind] == 0x01) {')]}
1
1
455
3,267
https://github.com/OpenSC/OpenSC
CVE-2018-16391
['CWE-119']
bin.c
string_scan_range
/* radare - LGPL - Copyright 2009-2017 - pancake, nibble, dso */ // TODO: dlopen library and show address #include <r_bin.h> #include <r_types.h> #include <r_util.h> #include <r_lib.h> #include <r_io.h> #include <config.h> R_LIB_VERSION (r_bin); #define bprintf if(binfile->rbin->verbose)eprintf #define DB a->sdb; #define RBINLISTFREE(x)\ if (x) { \ r_list_free (x);\ x = NULL;\ } #define REBASE_PADDR(o, l, type_t)\ do { \ RListIter *_it;\ type_t *_el;\ r_list_foreach ((l), _it, _el) { \ _el->paddr += (o)->loadaddr;\ }\ } while (0) #define ARCHS_KEY "archs" #if !defined(R_BIN_STATIC_PLUGINS) #define R_BIN_STATIC_PLUGINS 0 #endif #if !defined(R_BIN_XTR_STATIC_PLUGINS) #define R_BIN_XTR_STATIC_PLUGINS 0 #endif static RBinPlugin *bin_static_plugins[] = { R_BIN_STATIC_PLUGINS, NULL }; static RBinXtrPlugin *bin_xtr_static_plugins[] = { R_BIN_XTR_STATIC_PLUGINS, NULL }; static int is_data_section(RBinFile *a, RBinSection *s); static RList *get_strings(RBinFile *a, int min, int dump); static void r_bin_object_delete_items(RBinObject *o); static void r_bin_object_free(void /*RBinObject*/ *o_); // static int r_bin_object_set_items(RBinFile *binfile, RBinObject *o); static int r_bin_file_set_bytes(RBinFile *binfile, const ut8 *bytes, ut64 sz, bool steal_ptr); //static int remove_bin_file_by_binfile (RBin *bin, RBinFile * binfile); //static void r_bin_free_bin_files (RBin *bin); static void r_bin_file_free(void /*RBinFile*/ *bf_); static RBinFile *r_bin_file_create_append(RBin *bin, const char *file, const ut8 *bytes, ut64 sz, ut64 file_sz, int rawstr, int fd, const char *xtrname, bool steal_ptr); static RBinFile *r_bin_file_xtr_load_bytes(RBin *bin, RBinXtrPlugin *xtr, const char *filename, const ut8 *bytes, ut64 sz, ut64 file_sz, ut64 baseaddr, ut64 loadaddr, int idx, int fd, int rawstr); int r_bin_load_io_at_offset_as_sz(RBin *bin, int fd, ut64 baseaddr, ut64 loadaddr, int xtr_idx, ut64 offset, const char *name, ut64 sz); static RBinPlugin *r_bin_get_binplugin_by_name(RBin *bin, const char *name); static RBinXtrPlugin *r_bin_get_xtrplugin_by_name(RBin *bin, const char *name); static RBinPlugin *r_bin_get_binplugin_any(RBin *bin); static RBinObject *r_bin_object_new(RBinFile *binfile, RBinPlugin *plugin, ut64 baseaddr, ut64 loadaddr, ut64 offset, ut64 sz); static RBinFile *r_bin_file_new(RBin *bin, const char *file, const ut8 *bytes, ut64 sz, ut64 file_sz, int rawstr, int fd, const char *xtrname, Sdb *sdb, bool steal_ptr); static RBinFile *r_bin_file_new_from_bytes(RBin *bin, const char *file, const ut8 *bytes, ut64 sz, ut64 file_sz, int rawstr, ut64 baseaddr, ut64 loadaddr, int fd, const char *pluginname, const char *xtrname, ut64 offset, bool steal_ptr); static int getoffset(RBin *bin, int type, int idx) { RBinFile *a = r_bin_cur (bin); RBinPlugin *plugin = r_bin_file_cur_plugin (a); if (plugin && plugin->get_offset) { return plugin->get_offset (a, type, idx); } return -1; } static const char *getname(RBin *bin, int type, int idx) { RBinFile *a = r_bin_cur (bin); RBinPlugin *plugin = r_bin_file_cur_plugin (a); if (plugin && plugin->get_name) { return plugin->get_name (a, type, idx); } return NULL; } static int r_bin_file_object_add(RBinFile *binfile, RBinObject *o) { if (!o) { return false; } r_list_append (binfile->objs, o); r_bin_file_set_cur_binfile_obj (binfile->rbin, binfile, o); return true; } static void binobj_set_baddr(RBinObject *o, ut64 baddr) { if (!o || baddr == UT64_MAX) { return; } o->baddr_shift = baddr - o->baddr; } static ut64 binobj_a2b(RBinObject *o, ut64 addr) { return addr + (o? o->baddr_shift: 0); } static void filterStrings (RBin *bin, RList *strings) { RBinString *ptr; RListIter *iter; r_list_foreach (strings, iter, ptr) { char *dec = (char *)r_base64_decode_dyn (ptr->string, -1); if (dec) { char *s = ptr->string; do { char *dec2 = (char *)r_base64_decode_dyn (s, -1); if (!dec2) { break; } if (!r_str_is_printable (dec2)) { free (dec2); break; } free (dec); s = dec = dec2; } while (true); if (r_str_is_printable (dec) && strlen (dec) > 3) { free (ptr->string); ptr->string = dec; ptr->type = R_STRING_TYPE_BASE64; } else { free (dec); } } } } R_API void r_bin_iobind(RBin *bin, RIO *io) { r_io_bind (io, &bin->iob); } // TODO: move these two function do a different file R_API RBinXtrData *r_bin_xtrdata_new(RBuffer *buf, ut64 offset, ut64 size, ut32 file_count, RBinXtrMetadata *metadata) { RBinXtrData *data = R_NEW0 (RBinXtrData); if (!data) { return NULL; } data->offset = offset; data->size = size; data->file_count = file_count; data->metadata = metadata; data->loaded = 0; data->buffer = malloc (size + 1); // data->laddr = 0; /// XXX if (!data->buffer) { free (data); return NULL; } memcpy (data->buffer, r_buf_buffer (buf), size); data->buffer[size] = 0; return data; } R_API const char *r_bin_string_type (int type) { switch (type) { case 'a': return "ascii"; case 'u': return "utf8"; case 'w': return "utf16le"; case 'W': return "utf32le"; case 'b': return "base64"; } return "ascii"; // XXX } R_API void r_bin_xtrdata_free(void /*RBinXtrData*/ *data_) { RBinXtrData *data = data_; if (data) { if (data->metadata) { free (data->metadata->libname); free (data->metadata->arch); free (data->metadata->machine); free (data->metadata); } free (data->file); free (data->buffer); free (data); } } R_API RBinObject *r_bin_file_object_get_cur(RBinFile *binfile) { return binfile? binfile->o: NULL; } R_API RBinObject *r_bin_object_get_cur(RBin *bin) { return bin ? r_bin_file_object_get_cur (r_bin_cur (bin)) : NULL; } R_API RBinPlugin *r_bin_file_cur_plugin(RBinFile *binfile) { return binfile && binfile->o? binfile->o->plugin: NULL; } R_API int r_bin_file_cur_set_plugin(RBinFile *binfile, RBinPlugin *plugin) { if (binfile && binfile->o) { binfile->o->plugin = plugin; return true; } return false; } // maybe too big sometimes? 2KB of stack eaten here.. #define R_STRING_SCAN_BUFFER_SIZE 2048 static int string_scan_range(RList *list, const ut8 *buf, int min, const ut64 from, const ut64 to, int type) { ut8 tmp[R_STRING_SCAN_BUFFER_SIZE]; ut64 str_start, needle = from; int count = 0, i, rc, runes; int str_type = R_STRING_TYPE_DETECT; if (type == -1) { type = R_STRING_TYPE_DETECT; } if (!buf || !min) { return -1; } while (needle < to) { rc = r_utf8_decode (buf + needle, to - needle, NULL); if (!rc) { needle++; continue; } if (type == R_STRING_TYPE_DETECT) { char *w = (char *)buf + needle + rc; if ((to - needle) > 4) { bool is_wide32 = needle + rc + 2 < to && !w[0] && !w[1] && !w[2] && w[3] && !w[4]; if (is_wide32) { str_type = R_STRING_TYPE_WIDE32; } else { bool is_wide = needle + rc + 2 < to && !w[0] && w[1] && !w[2]; str_type = is_wide? R_STRING_TYPE_WIDE: R_STRING_TYPE_ASCII; } } else { str_type = R_STRING_TYPE_ASCII; } } else { str_type = type; } runes = 0; str_start = needle; /* Eat a whole C string */ for (rc = i = 0; i < sizeof (tmp) - 3 && needle < to; i += rc) { RRune r = {0}; if (str_type == R_STRING_TYPE_WIDE32) { rc = r_utf32le_decode (buf + needle, to - needle, &r); if (rc) { rc = 4; } } else if (str_type == R_STRING_TYPE_WIDE) { rc = r_utf16le_decode (buf + needle, to - needle, &r); if (rc == 1) { rc = 2; } } else { rc = r_utf8_decode (buf + needle, to - needle, &r); if (rc > 1) { str_type = R_STRING_TYPE_UTF8; } } /* Invalid sequence detected */ if (!rc) { needle++; break; } needle += rc; if (r_isprint (r)) { if (str_type == R_STRING_TYPE_WIDE32) { if (r == 0xff) { r = 0; } } rc = r_utf8_encode (&tmp[i], r); runes++; /* Print the escape code */ } else if (r && r < 0x100 && strchr ("\b\v\f\n\r\t\a\e", (char)r)) { if ((i + 32) < sizeof (tmp) && r < 28) { tmp[i + 0] = '\\'; tmp[i + 1] = " abtnvfr e"[r]; } else { // string too long break; } rc = 2; runes++; } else { /* \0 marks the end of C-strings */ break; } } tmp[i++] = '\0'; if (runes >= min) { if (str_type == R_STRING_TYPE_ASCII) { // reduce false positives int j; for (j = 0; j < i; j++) { char ch = tmp[j]; if (ch != '\n' && ch != '\r' && ch != '\t') { if (!IS_PRINTABLE (tmp[j])) { continue; } } } } if (list) { RBinString *new = R_NEW0 (RBinString); if (!new) { break; } new->type = str_type; new->length = runes; new->size = needle - str_start; new->ordinal = count++; // TODO: move into adjust_offset switch (str_type) { case R_STRING_TYPE_WIDE: { const ut8 *p = buf + str_start - 2; if (p[0] == 0xff && p[1] == 0xfe) { str_start -= 2; // \xff\xfe } } break; case R_STRING_TYPE_WIDE32: { const ut8 *p = buf + str_start - 4; if (p[0] == 0xff && p[1] == 0xfe) { str_start -= 4; // \xff\xfe\x00\x00 } } break; } new->paddr = new->vaddr = str_start; new->string = r_str_ndup ((const char *)tmp, i); r_list_append (list, new); } else { // DUMP TO STDOUT. raw dumping for rabin2 -zzz printf ("0x%08" PFMT64x " %s\n", str_start, tmp); } } } return count; } static void get_strings_range(RBinFile *arch, RList *list, int min, ut64 from, ut64 to) { RBinPlugin *plugin = r_bin_file_cur_plugin (arch); RBinString *ptr; RListIter *it; if (!arch || !arch->buf || !arch->buf->buf) { return; } if (!arch->rawstr) { if (!plugin || !plugin->info) { return; } } if (!min) { min = plugin? plugin->minstrlen: 4; } /* Some plugins return zero, fix it up */ if (!min) { min = 4; } if (min < 0) { return; } if (!to || to > arch->buf->length) { to = arch->buf->length; } if (arch->rawstr != 2) { ut64 size = to - from; // in case of dump ignore here if (arch->rbin->maxstrbuf && size && size > arch->rbin->maxstrbuf) { if (arch->rbin->verbose) { eprintf ("WARNING: bin_strings buffer is too big " "(0x%08" PFMT64x ")." " Use -zzz or set bin.maxstrbuf " "(RABIN2_MAXSTRBUF) in r2 (rabin2)\n", size); } return; } } if (string_scan_range (list, arch->buf->buf, min, from, to, -1) < 0) { return; } r_list_foreach (list, it, ptr) { RBinSection *s = r_bin_get_section_at (arch->o, ptr->paddr, false); if (s) { ptr->vaddr = s->vaddr + (ptr->paddr - s->paddr); } } } static int is_data_section(RBinFile *a, RBinSection *s) { if (s->has_strings || s->is_data) { return true; } if (s->is_data) { return true; } // Rust return (strstr (s->name, "_const") != NULL); } static RList *get_strings(RBinFile *a, int min, int dump) { RListIter *iter; RBinSection *section; RBinObject *o = a? a->o: NULL; RList *ret; if (!o) { return NULL; } if (dump) { /* dump to stdout, not stored in list */ ret = NULL; } else { ret = r_list_newf (r_bin_string_free); if (!ret) { return NULL; } } if (o->sections && !r_list_empty (o->sections) && !a->rawstr) { r_list_foreach (o->sections, iter, section) { if (is_data_section (a, section)) { get_strings_range (a, ret, min, section->paddr, section->paddr + section->size); } } r_list_foreach (o->sections, iter, section) { RBinString *s; RListIter *iter2; /* load objc/swift strings */ const int bits = (a->o && a->o->info) ? a->o->info->bits : 32; const int cfstr_size = (bits == 64) ? 32 : 16; const int cfstr_offs = (bits == 64) ? 16 : 8; if (strstr (section->name, "__cfstring")) { int i; // XXX do not walk if bin.strings == 0 ut8 *p; for (i = 0; i < section->size; i += cfstr_size) { ut8 buf[32]; if (!r_buf_read_at ( a->buf, section->paddr + i + cfstr_offs, buf, sizeof (buf))) { break; } p = buf; ut64 cfstr_vaddr = section->vaddr + i; ut64 cstr_vaddr = (bits == 64) ? r_read_le64 (p) : r_read_le32 (p); r_list_foreach (ret, iter2, s) { if (s->vaddr == cstr_vaddr) { RBinString *new = R_NEW0 (RBinString); new->type = s->type; new->length = s->length; new->size = s->size; new->ordinal = s->ordinal; new->paddr = new->vaddr = cfstr_vaddr; new->string = r_str_newf ("cstr.%s", s->string); r_list_append (ret, new); break; } } } } } } else { get_strings_range (a, ret, min, 0, a->size); } return ret; } R_API RList* r_bin_raw_strings(RBinFile *a, int min) { RList *l = NULL; if (a) { int tmp = a->rawstr; a->rawstr = 2; l = get_strings (a, min, 0); a->rawstr = tmp; } return l; } R_API int r_bin_dump_strings(RBinFile *a, int min) { get_strings (a, min, 1); return 0; } /* This is very slow if there are lot of symbols */ R_API int r_bin_load_languages(RBinFile *binfile) { if (r_bin_lang_rust (binfile)) { return R_BIN_NM_RUST; } if (r_bin_lang_swift (binfile)) { return R_BIN_NM_SWIFT; } if (r_bin_lang_objc (binfile)) { return R_BIN_NM_OBJC; } if (r_bin_lang_cxx (binfile)) { return R_BIN_NM_CXX; } if (r_bin_lang_dlang (binfile)) { return R_BIN_NM_DLANG; } if (r_bin_lang_msvc (binfile)) { return R_BIN_NM_MSVC; } return R_BIN_NM_NONE; } static void mem_free(void *data) { RBinMem *mem = (RBinMem *)data; if (mem && mem->mirrors) { mem->mirrors->free = mem_free; r_list_free (mem->mirrors); mem->mirrors = NULL; } free (mem); } static void r_bin_object_delete_items(RBinObject *o) { ut32 i = 0; if (!o) { return; } r_list_free (o->entries); r_list_free (o->fields); r_list_free (o->imports); r_list_free (o->libs); r_list_free (o->relocs); r_list_free (o->sections); r_list_free (o->strings); r_list_free (o->symbols); r_list_free (o->classes); r_list_free (o->lines); sdb_free (o->kv); if (o->mem) { o->mem->free = mem_free; } r_list_free (o->mem); o->mem = NULL; o->entries = NULL; o->fields = NULL; o->imports = NULL; o->libs = NULL; o->relocs = NULL; o->sections = NULL; o->strings = NULL; o->symbols = NULL; o->classes = NULL; o->lines = NULL; o->info = NULL; o->kv = NULL; for (i = 0; i < R_BIN_SYM_LAST; i++) { free (o->binsym[i]); o->binsym[i] = NULL; } } R_API void r_bin_info_free(RBinInfo *rb) { if (!rb) { return; } free (rb->intrp); free (rb->file); free (rb->type); free (rb->bclass); free (rb->rclass); free (rb->arch); free (rb->cpu); free (rb->machine); free (rb->os); free (rb->subsystem); free (rb->rpath); free (rb->guid); free (rb->debug_file_name); free (rb); } R_API void r_bin_import_free(void *_imp) { RBinImport *imp = (RBinImport *)_imp; if (imp) { R_FREE (imp->name); R_FREE (imp->classname); R_FREE (imp->descriptor); free (imp); } } R_API void r_bin_symbol_free(void *_sym) { RBinSymbol *sym = (RBinSymbol *)_sym; free (sym->name); free (sym->classname); free (sym); } R_API void r_bin_string_free(void *_str) { RBinString *str = (RBinString *)_str; free (str->string); free (str); } static void r_bin_object_free(void /*RBinObject*/ *o_) { RBinObject *o = o_; if (!o) { return; } r_bin_info_free (o->info); r_bin_object_delete_items (o); R_FREE (o); } static char *swiftField(const char *dn, const char *cn) { char *p = strstr (dn, ".getter_"); if (!p) { p = strstr (dn, ".setter_"); if (!p) { p = strstr (dn, ".method_"); } } if (p) { char *q = strstr (dn, cn); if (q && q[strlen (cn)] == '.') { q = strdup (q + strlen (cn) + 1); char *r = strchr (q, '.'); if (r) { *r = 0; } return q; } } return NULL; } R_API RList *r_bin_classes_from_symbols (RBinFile *bf, RBinObject *o) { RBinSymbol *sym; RListIter *iter; RList *symbols = o->symbols; RList *classes = o->classes; if (!classes) { classes = r_list_newf ((RListFree)r_bin_class_free); } r_list_foreach (symbols, iter, sym) { if (sym->name[0] != '_') { continue; } const char *cn = sym->classname; if (cn) { RBinClass *c = r_bin_class_new (bf, sym->classname, NULL, 0); if (!c) { continue; } // swift specific char *dn = sym->dname; char *fn = swiftField (dn, cn); if (fn) { // eprintf ("FIELD %s %s\n", cn, fn); RBinField *f = r_bin_field_new (sym->paddr, sym->vaddr, sym->size, fn, NULL, NULL); r_list_append (c->fields, f); free (fn); } else { char *mn = strstr (dn, ".."); if (mn) { // eprintf ("META %s %s\n", sym->classname, mn); } else { char *mn = strstr (dn, cn); if (mn && mn[strlen(cn)] == '.') { mn += strlen (cn) + 1; // eprintf ("METHOD %s %s\n", sym->classname, mn); r_list_append (c->methods, sym); } } } } } if (r_list_empty (classes)) { r_list_free (classes); return NULL; } return classes; } // XXX - change this to RBinObject instead of RBinFile // makes no sense to pass in a binfile and set the RBinObject // kinda a clunky functions R_API int r_bin_object_set_items(RBinFile *binfile, RBinObject *o) { RBinObject *old_o; RBinPlugin *cp; int i, minlen; RBin *bin; if (!binfile || !o || !o->plugin) { return false; } bin = binfile->rbin; old_o = binfile->o; cp = o->plugin; if (binfile->rbin->minstrlen > 0) { minlen = binfile->rbin->minstrlen; } else { minlen = cp->minstrlen; } binfile->o = o; if (cp->baddr) { ut64 old_baddr = o->baddr; o->baddr = cp->baddr (binfile); binobj_set_baddr (o, old_baddr); } if (cp->boffset) { o->boffset = cp->boffset (binfile); } // XXX: no way to get info from xtr pluginz? // Note, object size can not be set from here due to potential // inconsistencies if (cp->size) { o->size = cp->size (binfile); } if (cp->binsym) { for (i = 0; i < R_BIN_SYM_LAST; i++) { o->binsym[i] = cp->binsym (binfile, i); if (o->binsym[i]) { o->binsym[i]->paddr += o->loadaddr; } } } if (cp->entries) { o->entries = cp->entries (binfile); REBASE_PADDR (o, o->entries, RBinAddr); } if (cp->fields) { o->fields = cp->fields (binfile); if (o->fields) { o->fields->free = r_bin_field_free; REBASE_PADDR (o, o->fields, RBinField); } } if (cp->imports) { r_list_free (o->imports); o->imports = cp->imports (binfile); if (o->imports) { o->imports->free = r_bin_import_free; } } //if (bin->filter_rules & (R_BIN_REQ_SYMBOLS | R_BIN_REQ_IMPORTS)) { if (true) { if (cp->symbols) { o->symbols = cp->symbols (binfile); if (o->symbols) { o->symbols->free = r_bin_symbol_free; REBASE_PADDR (o, o->symbols, RBinSymbol); if (bin->filter) { r_bin_filter_symbols (o->symbols); } } } } //} o->info = cp->info? cp->info (binfile): NULL; if (cp->libs) { o->libs = cp->libs (binfile); } if (cp->sections) { // XXX sections are populated by call to size if (!o->sections) { o->sections = cp->sections (binfile); } REBASE_PADDR (o, o->sections, RBinSection); if (bin->filter) { r_bin_filter_sections (o->sections); } } if (bin->filter_rules & (R_BIN_REQ_RELOCS | R_BIN_REQ_IMPORTS)) { if (cp->relocs) { o->relocs = cp->relocs (binfile); REBASE_PADDR (o, o->relocs, RBinReloc); } } if (bin->filter_rules & R_BIN_REQ_STRINGS) { if (cp->strings) { o->strings = cp->strings (binfile); } else { o->strings = get_strings (binfile, minlen, 0); } if (bin->debase64) { filterStrings (bin, o->strings); } REBASE_PADDR (o, o->strings, RBinString); } if (bin->filter_rules & R_BIN_REQ_CLASSES) { if (cp->classes) { o->classes = cp->classes (binfile); if (r_bin_lang_swift (binfile)) { o->classes = r_bin_classes_from_symbols (binfile, o); } } else { o->classes = r_bin_classes_from_symbols (binfile, o); } if (bin->filter) { r_bin_filter_classes (o->classes); } } if (cp->lines) { o->lines = cp->lines (binfile); } if (cp->get_sdb) { Sdb* new_kv = cp->get_sdb (binfile); if (new_kv != o->kv) { sdb_free (o->kv); } o->kv = new_kv; } if (cp->mem) { o->mem = cp->mem (binfile); } if (bin->filter_rules & (R_BIN_REQ_SYMBOLS | R_BIN_REQ_IMPORTS)) { o->lang = r_bin_load_languages (binfile); } binfile->o = old_o; return true; } // XXX - this is a rather hacky way to do things, there may need to be a better // way. R_API int r_bin_load(RBin *bin, const char *file, ut64 baseaddr, ut64 loadaddr, int xtr_idx, int fd, int rawstr) { if (!bin) { return false; } // ALIAS? return r_bin_load_as (bin, file, baseaddr, loadaddr, // xtr_idx, fd, rawstr, 0, file); RIOBind *iob = &(bin->iob); if (!iob) { return false; } if (!iob->io) { iob->io = r_io_new (); //wtf if (!iob->io) { return false; } bin->io_owned = true; r_io_bind (iob->io, &bin->iob); //memleak? iob = &bin->iob; } if (!iob->desc_get (iob->io, fd)) { fd = iob->fd_open (iob->io, file, R_IO_READ, 0644); } bin->rawstr = rawstr; // Use the current RIODesc otherwise r_io_map_select can swap them later on if (fd < 0) { r_io_free (iob->io); memset (&bin->iob, 0, sizeof (bin->iob)); bin->io_owned = false; return false; } //Use the current RIODesc otherwise r_io_map_select can swap them later on return r_bin_load_io (bin, fd, baseaddr, loadaddr, xtr_idx); } R_API int r_bin_load_as(RBin *bin, const char *file, ut64 baseaddr, ut64 loadaddr, int xtr_idx, int fd, int rawstr, int fileoffset, const char *name) { RIOBind *iob = &(bin->iob); if (!iob || !iob->io) { return false; } if (fd < 0) { fd = iob->fd_open (iob->io, file, R_IO_READ, 0644); } if (fd < 0) { return false; } return r_bin_load_io_at_offset_as (bin, fd, baseaddr, loadaddr, xtr_idx, fileoffset, name); } R_API int r_bin_reload(RBin *bin, int fd, ut64 baseaddr) { RIOBind *iob = &(bin->iob); RList *the_obj_list = NULL; int res = false; RBinFile *bf = NULL; ut8 *buf_bytes = NULL; ut64 sz = UT64_MAX; if (!iob || !iob->io) { res = false; goto error; } const char *name = iob->fd_get_name (iob->io, fd); bf = r_bin_file_find_by_name (bin, name); if (!bf) { res = false; goto error; } the_obj_list = bf->objs; bf->objs = r_list_newf ((RListFree)r_bin_object_free); // invalidate current object reference bf->o = NULL; sz = iob->fd_size (iob->io, fd); if (sz == UT64_MAX || sz > (64 * 1024 * 1024)) { // too big, probably wrong eprintf ("Too big\n"); res = false; goto error; } if (sz == UT64_MAX && iob->fd_is_dbg (iob->io, fd)) { // attempt a local open and read // This happens when a plugin like debugger does not have a // fixed size. // if there is no fixed size or its MAXED, there is no way to // definitively // load the bin-properly. Many of the plugins require all // content and are not // stream based loaders int tfd = iob->fd_open (iob->io, name, R_IO_READ, 0); if (tfd < 0) { res = false; goto error; } sz = iob->fd_size (iob->io, tfd); if (sz == UT64_MAX) { iob->fd_close (iob->io, tfd); res = false; goto error; } buf_bytes = calloc (1, sz + 1); if (!buf_bytes) { iob->fd_close (iob->io, tfd); res = false; goto error; } if (!iob->read_at (iob->io, 0LL, buf_bytes, sz)) { free (buf_bytes); iob->fd_close (iob->io, tfd); res = false; goto error; } iob->fd_close (iob->io, tfd); } else { buf_bytes = calloc (1, sz + 1); if (!buf_bytes) { res = false; goto error; } if (!iob->fd_read_at (iob->io, fd, 0LL, buf_bytes, sz)) { free (buf_bytes); res = false; goto error; } } bool yes_plz_steal_ptr = true; r_bin_file_set_bytes (bf, buf_bytes, sz, yes_plz_steal_ptr); if (r_list_length (the_obj_list) == 1) { RBinObject *old_o = (RBinObject *)r_list_get_n (the_obj_list, 0); res = r_bin_load_io_at_offset_as (bin, fd, baseaddr, old_o->loadaddr, 0, old_o->boffset, NULL); } else { RListIter *iter = NULL; RBinObject *old_o; r_list_foreach (the_obj_list, iter, old_o) { // XXX - naive. do we need a way to prevent multiple "anys" from being opened? res = r_bin_load_io_at_offset_as (bin, fd, baseaddr, old_o->loadaddr, 0, old_o->boffset, old_o->plugin->name); } } bf->o = r_list_get_n (bf->objs, 0); error: r_list_free (the_obj_list); return res; } R_API int r_bin_load_io(RBin *bin, int fd, ut64 baseaddr, ut64 loadaddr, int xtr_idx) { return r_bin_load_io_at_offset_as (bin, fd, baseaddr, loadaddr, xtr_idx, 0, NULL); } R_API int r_bin_load_io_at_offset_as_sz(RBin *bin, int fd, ut64 baseaddr, ut64 loadaddr, int xtr_idx, ut64 offset, const char *name, ut64 sz) { RIOBind *iob = &(bin->iob); RIO *io = iob? iob->io: NULL; RListIter *it; ut8 *buf_bytes = NULL; RBinXtrPlugin *xtr; ut64 file_sz = UT64_MAX; RBinFile *binfile = NULL; int tfd = -1; if (!io || (fd < 0) || (st64)sz < 0) { return false; } bool is_debugger = iob->fd_is_dbg (io, fd); const char *fname = iob->fd_get_name (io, fd); if (loadaddr == UT64_MAX) { loadaddr = 0; } file_sz = iob->fd_size (io, fd); // file_sz = UT64_MAX happens when attaching to frida:// and other non-debugger io plugins which results in double opening if (is_debugger && file_sz == UT64_MAX) { tfd = iob->fd_open (io, fname, R_IO_READ, 0644); if (tfd >= 1) { file_sz = iob->fd_size (io, tfd); } } if (!sz) { sz = file_sz; } bin->file = fname; sz = R_MIN (file_sz, sz); if (!r_list_length (bin->binfiles)) { if (is_debugger) { //use the temporal RIODesc to read the content of the file instead //from the memory if (tfd >= 0) { buf_bytes = calloc (1, sz + 1); iob->fd_read_at (io, tfd, 0, buf_bytes, sz); // iob->fd_close (io, tfd); } } } if (!buf_bytes) { buf_bytes = calloc (1, sz + 1); if (!buf_bytes) { return false; } ut64 seekaddr = is_debugger? baseaddr: loadaddr; if (!iob->fd_read_at (io, fd, seekaddr, buf_bytes, sz)) { sz = 0LL; } } if (!name && (st64)sz > 0) { // XXX - for the time being this is fine, but we may want to // change the name to something like // <xtr_name>:<bin_type_name> r_list_foreach (bin->binxtrs, it, xtr) { if (xtr && xtr->check_bytes (buf_bytes, sz)) { if (xtr && (xtr->extract_from_bytes || xtr->extractall_from_bytes)) { if (is_debugger && sz != file_sz) { R_FREE (buf_bytes); if (tfd < 0) { tfd = iob->fd_open (io, fname, R_IO_READ, 0); } sz = iob->fd_size (io, tfd); if (sz != UT64_MAX) { buf_bytes = calloc (1, sz + 1); if (buf_bytes) { (void) iob->fd_read_at (io, tfd, 0, buf_bytes, sz); } } //DOUBLECLOSE UAF : iob->fd_close (io, tfd); tfd = -1; // marking it closed } else if (sz != file_sz) { (void) iob->read_at (io, 0LL, buf_bytes, sz); } binfile = r_bin_file_xtr_load_bytes (bin, xtr, fname, buf_bytes, sz, file_sz, baseaddr, loadaddr, xtr_idx, fd, bin->rawstr); } xtr = NULL; } } } if (!binfile) { bool steal_ptr = true; // transfer buf_bytes ownership to binfile binfile = r_bin_file_new_from_bytes ( bin, fname, buf_bytes, sz, file_sz, bin->rawstr, baseaddr, loadaddr, fd, name, NULL, offset, steal_ptr); } return binfile? r_bin_file_set_cur_binfile (bin, binfile): false; } R_API bool r_bin_load_io_at_offset_as(RBin *bin, int fd, ut64 baseaddr, ut64 loadaddr, int xtr_idx, ut64 offset, const char *name) { // adding file_sz to help reduce the performance impact on the system // in this case the number of bytes read will be limited to 2MB // (MIN_LOAD_SIZE) // if it fails, the whole file is loaded. const ut64 MAX_LOAD_SIZE = 0; // 0xfffff; //128 * (1 << 10 << 10); int res = r_bin_load_io_at_offset_as_sz (bin, fd, baseaddr, loadaddr, xtr_idx, offset, name, MAX_LOAD_SIZE); if (!res) { res = r_bin_load_io_at_offset_as_sz (bin, fd, baseaddr, loadaddr, xtr_idx, offset, name, UT64_MAX); } return res; } R_API int r_bin_file_deref_by_bind(RBinBind *binb) { RBin *bin = binb? binb->bin: NULL; RBinFile *a = r_bin_cur (bin); return r_bin_file_deref (bin, a); } R_API int r_bin_file_deref(RBin *bin, RBinFile *a) { RBinObject *o = r_bin_cur_object (bin); int res = false; if (a && !o) { //r_list_delete_data (bin->binfiles, a); res = true; } else if (a && o->referenced - 1 < 1) { //r_list_delete_data (bin->binfiles, a); res = true; // not thread safe } else if (o) { o->referenced--; } // it is possible for a file not // to be bound to RBin and RBinFiles // XXX - is this an ok assumption? if (bin) bin->cur = NULL; return res; } R_API int r_bin_file_ref_by_bind(RBinBind *binb) { RBin *bin = binb? binb->bin: NULL; RBinFile *a = r_bin_cur (bin); return r_bin_file_ref (bin, a); } R_API int r_bin_file_ref(RBin *bin, RBinFile *a) { RBinObject *o = r_bin_cur_object (bin); if (a && o) { o->referenced--; return true; } return false; } static void r_bin_file_free(void /*RBinFile*/ *bf_) { RBinFile *a = bf_; RBinPlugin *plugin = r_bin_file_cur_plugin (a); if (!a) { return; } // Binary format objects are connected to the // RBinObject, so the plugin must destroy the // format data first if (plugin && plugin->destroy) { plugin->destroy (a); } if (a->curxtr && a->curxtr->destroy && a->xtr_obj) { a->curxtr->free_xtr ((void *)(a->xtr_obj)); } r_buf_free (a->buf); // TODO: unset related sdb namespaces if (a && a->sdb_addrinfo) { sdb_free (a->sdb_addrinfo); a->sdb_addrinfo = NULL; } free (a->file); a->o = NULL; r_list_free (a->objs); r_list_free (a->xtr_data); r_id_pool_kick_id (a->rbin->file_ids, a->id); memset (a, 0, sizeof (RBinFile)); free (a); } static RBinFile *r_bin_file_create_append(RBin *bin, const char *file, const ut8 *bytes, ut64 sz, ut64 file_sz, int rawstr, int fd, const char *xtrname, bool steal_ptr) { RBinFile *bf = r_bin_file_new (bin, file, bytes, sz, file_sz, rawstr, fd, xtrname, bin->sdb, steal_ptr); if (bf) { r_list_append (bin->binfiles, bf); } return bf; } // This function populate RBinFile->xtr_data, that information is enough to // create RBinObject when needed using r_bin_file_object_new_from_xtr_data static RBinFile *r_bin_file_xtr_load_bytes(RBin *bin, RBinXtrPlugin *xtr, const char *filename, const ut8 *bytes, ut64 sz, ut64 file_sz, ut64 baseaddr, ut64 loadaddr, int idx, int fd, int rawstr) { if (!bin || !bytes) { return NULL; } RBinFile *bf = r_bin_file_find_by_name (bin, filename); if (!bf) { bf = r_bin_file_create_append (bin, filename, bytes, sz, file_sz, rawstr, fd, xtr->name, false); if (!bf) { return NULL; } if (!bin->cur) { bin->cur = bf; } } if (bf->xtr_data) { r_list_free (bf->xtr_data); } if (xtr && bytes) { RList *xtr_data_list = xtr->extractall_from_bytes (bin, bytes, sz); RListIter *iter; RBinXtrData *xtr; //populate xtr_data with baddr and laddr that will be used later on //r_bin_file_object_new_from_xtr_data r_list_foreach (xtr_data_list, iter, xtr) { xtr->baddr = baseaddr? baseaddr : UT64_MAX; xtr->laddr = loadaddr? loadaddr : UT64_MAX; } bf->loadaddr = loadaddr; bf->xtr_data = xtr_data_list ? xtr_data_list : NULL; } return bf; } static RBinPlugin *r_bin_get_binplugin_by_name(RBin *bin, const char *name) { RBinPlugin *plugin; RListIter *it; if (bin && name) { r_list_foreach (bin->plugins, it, plugin) { if (!strcmp (plugin->name, name)) { return plugin; } } } return NULL; } R_API RBinPlugin *r_bin_get_binplugin_by_bytes(RBin *bin, const ut8 *bytes, ut64 sz) { RBinPlugin *plugin; RListIter *it; if (!bin || !bytes) { return NULL; } r_list_foreach (bin->plugins, it, plugin) { if (plugin->check_bytes && plugin->check_bytes (bytes, sz)) { return plugin; } } return NULL; } static RBinXtrPlugin *r_bin_get_xtrplugin_by_name(RBin *bin, const char *name) { RBinXtrPlugin *xtr; RListIter *it; if (!bin || !name) return NULL; r_list_foreach (bin->binxtrs, it, xtr) { if (!strcmp (xtr->name, name)) { return xtr; } // must be set to null xtr = NULL; } return NULL; } static RBinPlugin *r_bin_get_binplugin_any(RBin *bin) { return r_bin_get_binplugin_by_name (bin, "any"); } static RBinObject *r_bin_object_new(RBinFile *binfile, RBinPlugin *plugin, ut64 baseaddr, ut64 loadaddr, ut64 offset, ut64 sz) { const ut8 *bytes = binfile? r_buf_buffer (binfile->buf): NULL; ut64 bytes_sz = binfile? r_buf_size (binfile->buf): 0; Sdb *sdb = binfile? binfile->sdb: NULL; RBinObject *o = R_NEW0 (RBinObject); if (!o) { return NULL; } o->obj_size = bytes && (bytes_sz >= sz + offset)? sz: 0; o->boffset = offset; o->id = r_num_rand (0xfffff000); o->kv = sdb_new0 (); o->baddr = baseaddr; o->baddr_shift = 0; o->plugin = plugin; o->loadaddr = loadaddr != UT64_MAX ? loadaddr : 0; // XXX more checking will be needed here // only use LoadBytes if buffer offset != 0 // if (offset != 0 && bytes && plugin && plugin->load_bytes && (bytes_sz // >= sz + offset) ) { if (bytes && plugin && plugin->load_bytes && (bytes_sz >= sz + offset)) { ut64 bsz = bytes_sz - offset; if (sz < bsz) { bsz = sz; } o->bin_obj = plugin->load_bytes (binfile, bytes + offset, sz, loadaddr, sdb); if (!o->bin_obj) { bprintf ( "Error in r_bin_object_new: load_bytes failed " "for %s plugin\n", plugin->name); sdb_free (o->kv); free (o); return NULL; } } else if (binfile && plugin && plugin->load) { // XXX - haha, this is a hack. // switching out the current object for the new // one to be processed RBinObject *old_o = binfile->o; binfile->o = o; if (plugin->load (binfile)) { binfile->sdb_info = o->kv; // mark as do not walk sdb_ns_set (binfile->sdb, "info", o->kv); } else { binfile->o = old_o; } o->obj_size = sz; } else { sdb_free (o->kv); free (o); return NULL; } // XXX - binfile could be null here meaning an improper load // XXX - object size cant be set here and needs to be set where // where the object is created from. The reason for this is to prevent // mis-reporting when the file is loaded from impartial bytes or is // extracted // from a set of bytes in the file r_bin_object_set_items (binfile, o); r_bin_file_object_add (binfile, o); // XXX this is a very hacky alternative to rewriting the // RIO stuff, as discussed here: return o; } #define LIMIT_SIZE 0 static int r_bin_file_set_bytes(RBinFile *binfile, const ut8 *bytes, ut64 sz, bool steal_ptr) { if (!bytes) { return false; } r_buf_free (binfile->buf); binfile->buf = r_buf_new (); #if LIMIT_SIZE if (sz > 1024 * 1024) { eprintf ("Too big\n"); // TODO: use r_buf_io instead of setbytes all the time to save memory return NULL; } #else if (steal_ptr) { r_buf_set_bytes_steal (binfile->buf, bytes, sz); } else { r_buf_set_bytes (binfile->buf, bytes, sz); } #endif return binfile->buf != NULL; } static RBinFile *r_bin_file_new(RBin *bin, const char *file, const ut8 *bytes, ut64 sz, ut64 file_sz, int rawstr, int fd, const char *xtrname, Sdb *sdb, bool steal_ptr) { RBinFile *binfile = R_NEW0 (RBinFile); if (!binfile) { return NULL; } if (!r_id_pool_grab_id (bin->file_ids, &binfile->id)) { if (steal_ptr) { // we own the ptr, free on error free ((void*) bytes); } free (binfile); //no id means no binfile return NULL; } int res = r_bin_file_set_bytes (binfile, bytes, sz, steal_ptr); if (!res && steal_ptr) { // we own the ptr, free on error free((void*) bytes); } binfile->rbin = bin; binfile->file = file? strdup (file): NULL; binfile->rawstr = rawstr; binfile->fd = fd; binfile->curxtr = r_bin_get_xtrplugin_by_name (bin, xtrname); binfile->sdb = sdb; binfile->size = file_sz; binfile->xtr_data = r_list_newf ((RListFree)r_bin_xtrdata_free); binfile->objs = r_list_newf ((RListFree)r_bin_object_free); binfile->xtr_obj = NULL; if (!binfile->buf) { //r_bin_file_free (binfile); binfile->buf = r_buf_new (); // return NULL; } if (sdb) { binfile->sdb = sdb_ns (sdb, sdb_fmt (0, "fd.%d", fd), 1); sdb_set (binfile->sdb, "archs", "0:0:x86:32", 0); // x86?? /* NOTE */ /* Those refs++ are necessary because sdb_ns() doesnt rerefs all * sub-namespaces */ /* And if any namespace is referenced backwards it gets * double-freed */ binfile->sdb_addrinfo = sdb_ns (binfile->sdb, "addrinfo", 1); binfile->sdb_addrinfo->refs++; sdb_ns_set (sdb, "cur", binfile->sdb); binfile->sdb->refs++; } return binfile; } R_API bool r_bin_file_object_new_from_xtr_data(RBin *bin, RBinFile *bf, ut64 baseaddr, ut64 loadaddr, RBinXtrData *data) { RBinObject *o = NULL; RBinPlugin *plugin = NULL; ut8* bytes; ut64 offset = data? data->offset: 0; ut64 sz = data ? data->size : 0; if (!data || !bf) { return false; } // for right now the bytes used will just be the offest into the binfile // buffer // if the extraction requires some sort of transformation then this will // need to be fixed // here. bytes = data->buffer; if (!bytes) { return false; } plugin = r_bin_get_binplugin_by_bytes (bin, (const ut8*)bytes, sz); if (!plugin) { plugin = r_bin_get_binplugin_any (bin); } r_buf_free (bf->buf); bf->buf = r_buf_new_with_bytes ((const ut8*)bytes, data->size); //r_bin_object_new append the new object into binfile o = r_bin_object_new (bf, plugin, baseaddr, loadaddr, offset, sz); // size is set here because the reported size of the object depends on // if loaded from xtr plugin or partially read if (!o) { return false; } if (o && !o->size) { o->size = sz; } bf->narch = data->file_count; if (!o->info) { o->info = R_NEW0 (RBinInfo); } free (o->info->file); free (o->info->arch); free (o->info->machine); free (o->info->type); o->info->file = strdup (bf->file); o->info->arch = strdup (data->metadata->arch); o->info->machine = strdup (data->metadata->machine); o->info->type = strdup (data->metadata->type); o->info->bits = data->metadata->bits; o->info->has_crypto = bf->o->info->has_crypto; data->loaded = true; return true; } static RBinFile *r_bin_file_new_from_bytes(RBin *bin, const char *file, const ut8 *bytes, ut64 sz, ut64 file_sz, int rawstr, ut64 baseaddr, ut64 loadaddr, int fd, const char *pluginname, const char *xtrname, ut64 offset, bool steal_ptr) { ut8 binfile_created = false; RBinPlugin *plugin = NULL; RBinXtrPlugin *xtr = NULL; RBinObject *o = NULL; RBinFile *bf = NULL; if (sz == UT64_MAX) { return NULL; } if (xtrname) { xtr = r_bin_get_xtrplugin_by_name (bin, xtrname); } if (xtr && xtr->check_bytes (bytes, sz)) { return r_bin_file_xtr_load_bytes (bin, xtr, file, bytes, sz, file_sz, baseaddr, loadaddr, 0, fd, rawstr); } if (!bf) { bf = r_bin_file_create_append (bin, file, bytes, sz, file_sz, rawstr, fd, xtrname, steal_ptr); if (!bf) { if (!steal_ptr) { // we own the ptr, free on error free ((void*) bytes); } return NULL; } binfile_created = true; } if (bin->force) { plugin = r_bin_get_binplugin_by_name (bin, bin->force); } if (!plugin) { if (pluginname) { plugin = r_bin_get_binplugin_by_name (bin, pluginname); } if (!plugin) { plugin = r_bin_get_binplugin_by_bytes (bin, bytes, sz); if (!plugin) { plugin = r_bin_get_binplugin_any (bin); } } } o = r_bin_object_new (bf, plugin, baseaddr, loadaddr, 0, r_buf_size (bf->buf)); // size is set here because the reported size of the object depends on // if loaded from xtr plugin or partially read if (o && !o->size) { o->size = file_sz; } if (!o) { if (bf && binfile_created) { r_list_delete_data (bin->binfiles, bf); } return NULL; } /* WTF */ if (strcmp (plugin->name, "any")) { bf->narch = 1; } /* free unnecessary rbuffer (???) */ return bf; } static void plugin_free(RBinPlugin *p) { if (p && p->fini) { p->fini (NULL); } R_FREE (p); } // rename to r_bin_plugin_add like the rest R_API int r_bin_add(RBin *bin, RBinPlugin *foo) { RListIter *it; RBinPlugin *plugin; if (foo->init) { foo->init (bin->user); } r_list_foreach (bin->plugins, it, plugin) { if (!strcmp (plugin->name, foo->name)) { return false; } } plugin = R_NEW0 (RBinPlugin); memcpy (plugin, foo, sizeof (RBinPlugin)); r_list_append (bin->plugins, plugin); return true; } R_API int r_bin_xtr_add(RBin *bin, RBinXtrPlugin *foo) { RListIter *it; RBinXtrPlugin *xtr; if (foo->init) { foo->init (bin->user); } // avoid duplicates r_list_foreach (bin->binxtrs, it, xtr) { if (!strcmp (xtr->name, foo->name)) { return false; } } r_list_append (bin->binxtrs, foo); return true; } R_API void *r_bin_free(RBin *bin) { if (!bin) { return NULL; } if (bin->io_owned) { r_io_free (bin->iob.io); } bin->file = NULL; free (bin->force); free (bin->srcdir); //r_bin_free_bin_files (bin); r_list_free (bin->binfiles); r_list_free (bin->binxtrs); r_list_free (bin->plugins); sdb_free (bin->sdb); r_id_pool_free (bin->file_ids); memset (bin, 0, sizeof (RBin)); free (bin); return NULL; } static int r_bin_print_plugin_details(RBin *bin, RBinPlugin *bp, int json) { if (json == 'q') { bin->cb_printf ("%s\n", bp->name); } else if (json) { bin->cb_printf ( "{\"name\":\"%s\",\"description\":\"%s\"," "\"license\":\"%s\"}\n", bp->name, bp->desc, bp->license? bp->license: "???"); } else { bin->cb_printf ("Name: %s\n", bp->name); bin->cb_printf ("Description: %s\n", bp->desc); if (bp->license) { bin->cb_printf ("License: %s\n", bp->license); } if (bp->version) { bin->cb_printf ("Version: %s\n", bp->version); } if (bp->author) { bin->cb_printf ("Author: %s\n", bp->author); } } return true; } static int r_bin_print_xtrplugin_details(RBin *bin, RBinXtrPlugin *bx, int json) { if (json == 'q') { bin->cb_printf ("%s\n", bx->name); } else if (json) { bin->cb_printf ( "{\"name\":\"%s\",\"description\":\"%s\"," "\"license\":\"%s\"}\n", bx->name, bx->desc, bx->license? bx->license: "???"); } else { bin->cb_printf ("Name: %s\n", bx->name); bin->cb_printf ("Description: %s\n", bx->desc); if (bx->license) { bin->cb_printf ("License: %s\n", bx->license); } } return true; } R_API int r_bin_list(RBin *bin, int json) { RListIter *it; RBinPlugin *bp; RBinXtrPlugin *bx; if (json == 'q') { r_list_foreach (bin->plugins, it, bp) { bin->cb_printf ("%s\n", bp->name); } r_list_foreach (bin->binxtrs, it, bx) { bin->cb_printf ("%s\n", bx->name); } } else if (json) { int i; i = 0; bin->cb_printf ("{\"bin\":["); r_list_foreach (bin->plugins, it, bp) { bin->cb_printf ( "%s{\"name\":\"%s\",\"description\":\"%s\"," "\"license\":\"%s\"}", i? ",": "", bp->name, bp->desc, bp->license? bp->license: "???"); i++; } i = 0; bin->cb_printf ("],\"xtr\":["); r_list_foreach (bin->binxtrs, it, bx) { bin->cb_printf ( "%s{\"name\":\"%s\",\"description\":\"%s\"," "\"license\":\"%s\"}", i? ",": "", bx->name, bx->desc, bx->license? bx->license: "???"); i++; } bin->cb_printf ("]}\n"); } else { r_list_foreach (bin->plugins, it, bp) { bin->cb_printf ("bin %-11s %s (%s) %s %s\n", bp->name, bp->desc, bp->license? bp->license: "???", bp->version? bp->version: "", bp->author? bp->author: ""); } r_list_foreach (bin->binxtrs, it, bx) { bin->cb_printf ("xtr %-11s %s (%s)\n", bx->name, bx->desc, bx->license? bx->license: "???"); } } return false; } R_API int r_bin_list_plugin(RBin *bin, const char* name, int json) { RListIter *it; RBinPlugin *bp; RBinXtrPlugin *bx; r_list_foreach (bin->plugins, it, bp) { if (!r_str_cmp (name, bp->name, strlen (name))) { continue; } return r_bin_print_plugin_details (bin, bp, json); } r_list_foreach (bin->binxtrs, it, bx) { if (!r_str_cmp (name, bx->name, strlen (name))) { continue; } return r_bin_print_xtrplugin_details (bin, bx, json); } eprintf ("cannot find plugin %s\n", name); return false; } static ut64 binobj_get_baddr(RBinObject *o) { return o? o->baddr + o->baddr_shift: UT64_MAX; } R_API ut64 r_binfile_get_baddr(RBinFile *binfile) { return binfile? binobj_get_baddr (binfile->o): UT64_MAX; } /* returns the base address of bin or UT64_MAX in case of errors */ R_API ut64 r_bin_get_baddr(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return binobj_get_baddr (o); } /* returns the load address of bin or UT64_MAX in case of errors */ R_API ut64 r_bin_get_laddr(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return o? o->loadaddr: UT64_MAX; } R_API void r_bin_set_baddr(RBin *bin, ut64 baddr) { RBinObject *o = r_bin_cur_object (bin); binobj_set_baddr (o, baddr); // XXX - update all the infos? } R_API ut64 r_bin_get_boffset(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return o? o->boffset: UT64_MAX; } R_API RBinAddr *r_bin_get_sym(RBin *bin, int sym) { RBinObject *o = r_bin_cur_object (bin); if (sym < 0 || sym >= R_BIN_SYM_LAST) { return NULL; } return o? o->binsym[sym]: NULL; } // XXX: those accessors are redundant R_API RList *r_bin_get_entries(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return o? o->entries: NULL; } R_API RList *r_bin_get_fields(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return o? o->fields: NULL; } R_API RList *r_bin_get_imports(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return o? o->imports: NULL; } R_API RBinInfo *r_bin_get_info(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return o? o->info: NULL; } R_API RList *r_bin_get_libs(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return o? o->libs: NULL; } R_API RList * r_bin_patch_relocs(RBin *bin) { static bool first = true; RBinObject *o = r_bin_cur_object (bin); if (!o) { return NULL; } // r_bin_object_set_items set o->relocs but there we don't have access // to io // so we need to be run from bin_relocs, free the previous reloc and get // the patched ones if (first && o->plugin && o->plugin->patch_relocs) { RList *tmp = o->plugin->patch_relocs (bin); first = false; if (!tmp) { return o->relocs; } r_list_free (o->relocs); o->relocs = tmp; REBASE_PADDR (o, o->relocs, RBinReloc); first = false; return o->relocs; } return o->relocs; } R_API RList *r_bin_get_relocs(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return o? o->relocs: NULL; } R_API RList *r_bin_get_sections(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return o? o->sections: NULL; } // TODO: Move into section.c and rename it to r_io_section_get_at () R_API RBinSection *r_bin_get_section_at(RBinObject *o, ut64 off, int va) { RBinSection *section; RListIter *iter; ut64 from, to; if (o) { // TODO: must be O(1) .. use sdb here r_list_foreach (o->sections, iter, section) { from = va? binobj_a2b (o, section->vaddr): section->paddr; to = va? (binobj_a2b (o, section->vaddr) + section->vsize) : (section->paddr + section->size); if (off >= from && off < to) { return section; } } } return NULL; } R_API RList *r_bin_reset_strings(RBin *bin) { RBinFile *a = r_bin_cur (bin); RBinObject *o = r_bin_cur_object (bin); RBinPlugin *plugin = r_bin_file_cur_plugin (a); if (!a || !o) { return NULL; } if (o->strings) { r_list_free (o->strings); o->strings = NULL; } if (bin->minstrlen <= 0) { return NULL; } a->rawstr = bin->rawstr; if (plugin && plugin->strings) { o->strings = plugin->strings (a); } else { o->strings = get_strings (a, bin->minstrlen, 0); } if (bin->debase64) { filterStrings (bin, o->strings); } return o->strings; } R_API RList *r_bin_get_strings(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return o? o->strings: NULL; } R_API int r_bin_is_string(RBin *bin, ut64 va) { RBinString *string; RListIter *iter; RList *list; if (!(list = r_bin_get_strings (bin))) { return false; } r_list_foreach (list, iter, string) { if (string->vaddr == va) { return true; } if (string->vaddr > va) { return false; } } return false; } //callee must not free the symbol R_API RBinSymbol *r_bin_get_symbol_at_vaddr(RBin *bin, ut64 addr) { //use skiplist here RList *symbols = r_bin_get_symbols (bin); RListIter *iter; RBinSymbol *symbol; r_list_foreach (symbols, iter, symbol) { if (symbol->vaddr == addr) { return symbol; } } return NULL; } //callee must not free the symbol R_API RBinSymbol *r_bin_get_symbol_at_paddr(RBin *bin, ut64 addr) { //use skiplist here RList *symbols = r_bin_get_symbols (bin); RListIter *iter; RBinSymbol *symbol; r_list_foreach (symbols, iter, symbol) { if (symbol->paddr == addr) { return symbol; } } return NULL; } R_API RList *r_bin_get_symbols(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return o? o->symbols: NULL; } R_API RList *r_bin_get_mem(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return o? o->mem: NULL; } R_API int r_bin_is_big_endian(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return (o && o->info)? o->info->big_endian: -1; } R_API int r_bin_is_stripped(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return o? (R_BIN_DBG_STRIPPED & o->info->dbg_info): 1; } R_API int r_bin_is_static(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); if (o && r_list_length (o->libs) > 0) return R_BIN_DBG_STATIC & o->info->dbg_info; return true; } // TODO: Integrate with r_bin_dbg */ R_API int r_bin_has_dbg_linenums(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return o? (R_BIN_DBG_LINENUMS & o->info->dbg_info): false; } R_API int r_bin_has_dbg_syms(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return o? (R_BIN_DBG_SYMS & o->info->dbg_info): false; } R_API int r_bin_has_dbg_relocs(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return o? (R_BIN_DBG_RELOCS & o->info->dbg_info): false; } R_API RBin *r_bin_new() { int i; RBinXtrPlugin *static_xtr_plugin; RBin *bin = R_NEW0 (RBin); if (!bin) { return NULL; } bin->force = NULL; bin->filter_rules = UT64_MAX; bin->sdb = sdb_new0 (); bin->cb_printf = (PrintfCallback)printf; bin->plugins = r_list_newf ((RListFree)plugin_free); bin->minstrlen = 0; bin->want_dbginfo = true; bin->cur = NULL; bin->io_owned = false; bin->binfiles = r_list_newf ((RListFree)r_bin_file_free); for (i = 0; bin_static_plugins[i]; i++) { r_bin_add (bin, bin_static_plugins[i]); } bin->binxtrs = r_list_new (); bin->binxtrs->free = free; for (i = 0; bin_xtr_static_plugins[i]; i++) { static_xtr_plugin = R_NEW0 (RBinXtrPlugin); if (!static_xtr_plugin) { free (bin); return NULL; } *static_xtr_plugin = *bin_xtr_static_plugins[i]; r_bin_xtr_add (bin, static_xtr_plugin); } bin->file_ids = r_id_pool_new (0, 0xffffffff); return bin; } R_API int r_bin_use_arch(RBin *bin, const char *arch, int bits, const char *name) { RBinFile *binfile = r_bin_file_find_by_arch_bits (bin, arch, bits, name); RBinObject *obj = NULL; if (binfile) { obj = r_bin_object_find_by_arch_bits (binfile, arch, bits, name); if (!obj) { if (binfile->xtr_data) { RBinXtrData *xtr_data = r_list_get_n (binfile->xtr_data, 0); if (!r_bin_file_object_new_from_xtr_data (bin, binfile, UT64_MAX, r_bin_get_laddr (bin), xtr_data)) { return false; } obj = r_list_get_n (binfile->objs, 0); } } } else { void *plugin = r_bin_get_binplugin_by_name (bin, name); if (plugin) { if (bin->cur) { bin->cur->curplugin = plugin; } binfile = r_bin_file_new (bin, "-", NULL, 0, 0, 0, 999, NULL, NULL, false); // create object and set arch/bits obj = r_bin_object_new (binfile, plugin, 0, 0, 0, 1024); binfile->o = obj; obj->info = R_NEW0 (RBinInfo); obj->info->arch = strdup (arch); obj->info->bits = bits; } } return (binfile && r_bin_file_set_cur_binfile_obj (bin, binfile, obj)); } R_API RBinObject *r_bin_object_find_by_arch_bits(RBinFile *binfile, const char *arch, int bits, const char *name) { RBinObject *obj = NULL; RListIter *iter = NULL; RBinInfo *info = NULL; r_list_foreach (binfile->objs, iter, obj) { info = obj->info; if (info && info->arch && info->file && (bits == info->bits) && !strcmp (info->arch, arch) && !strcmp (info->file, name)) { break; } obj = NULL; } return obj; } R_API RBinFile *r_bin_file_find_by_arch_bits(RBin *bin, const char *arch, int bits, const char *name) { RListIter *iter; RBinFile *binfile = NULL; RBinXtrData *xtr_data; if (!name || !arch) { return NULL; } r_list_foreach (bin->binfiles, iter, binfile) { RListIter *iter_xtr; if (!binfile->xtr_data) { continue; } // look for sub-bins in Xtr Data and Load if we need to r_list_foreach (binfile->xtr_data, iter_xtr, xtr_data) { if (xtr_data->metadata && xtr_data->metadata->arch) { char *iter_arch = xtr_data->metadata->arch; int iter_bits = xtr_data->metadata->bits; if (bits == iter_bits && !strcmp (iter_arch, arch)) { if (!xtr_data->loaded) { if (!r_bin_file_object_new_from_xtr_data ( bin, binfile, xtr_data->baddr, xtr_data->laddr, xtr_data)) { return NULL; } return binfile; } } } } } return binfile; } R_API int r_bin_select(RBin *bin, const char *arch, int bits, const char *name) { RBinFile *cur = r_bin_cur (bin), *binfile = NULL; RBinObject *obj = NULL; name = !name && cur? cur->file: name; binfile = r_bin_file_find_by_arch_bits (bin, arch, bits, name); if (binfile && name) { obj = r_bin_object_find_by_arch_bits (binfile, arch, bits, name); } return binfile && r_bin_file_set_cur_binfile_obj (bin, binfile, obj); } R_API int r_bin_select_object(RBinFile *binfile, const char *arch, int bits, const char *name) { RBinObject *obj = binfile ? r_bin_object_find_by_arch_bits ( binfile, arch, bits, name) : NULL; return obj && r_bin_file_set_cur_binfile_obj (binfile->rbin, binfile, obj); } static RBinObject *r_bin_file_object_find_by_id(RBinFile *binfile, ut32 binobj_id) { RBinObject *obj; RListIter *iter; if (binfile) { r_list_foreach (binfile->objs, iter, obj) { if (obj->id == binobj_id) { return obj; } } } return NULL; } static RBinFile *r_bin_file_find_by_object_id(RBin *bin, ut32 binobj_id) { RListIter *iter; RBinFile *binfile; r_list_foreach (bin->binfiles, iter, binfile) { if (r_bin_file_object_find_by_id (binfile, binobj_id)) { return binfile; } } return NULL; } static RBinFile *r_bin_file_find_by_id(RBin *bin, ut32 binfile_id) { RBinFile *binfile = NULL; RListIter *iter = NULL; r_list_foreach (bin->binfiles, iter, binfile) { if (binfile->id == binfile_id) { break; } binfile = NULL; } return binfile; } R_API int r_bin_object_delete(RBin *bin, ut32 binfile_id, ut32 binobj_id) { RBinFile *binfile = NULL; //, *cbinfile = r_bin_cur (bin); RBinObject *obj = NULL; int res = false; #if 0 if (binfile_id == UT32_MAX && binobj_id == UT32_MAX) { return false; } #endif if (binfile_id == -1) { binfile = r_bin_file_find_by_object_id (bin, binobj_id); obj = binfile? r_bin_file_object_find_by_id (binfile, binobj_id): NULL; } else if (binobj_id == -1) { binfile = r_bin_file_find_by_id (bin, binfile_id); obj = binfile? binfile->o: NULL; } else { binfile = r_bin_file_find_by_id (bin, binfile_id); obj = binfile? r_bin_file_object_find_by_id (binfile, binobj_id): NULL; } // lazy way out, always leaving at least 1 bin object loaded if (binfile && (r_list_length (binfile->objs) > 1)) { binfile->o = NULL; r_list_delete_data (binfile->objs, obj); obj = (RBinObject *)r_list_get_n (binfile->objs, 0); res = obj && binfile && r_bin_file_set_cur_binfile_obj (bin, binfile, obj); } return res; } R_API int r_bin_select_by_ids(RBin *bin, ut32 binfile_id, ut32 binobj_id) { RBinFile *binfile = NULL; RBinObject *obj = NULL; if (binfile_id == UT32_MAX && binobj_id == UT32_MAX) { return false; } if (binfile_id == -1) { binfile = r_bin_file_find_by_object_id (bin, binobj_id); obj = binfile? r_bin_file_object_find_by_id (binfile, binobj_id): NULL; } else if (binobj_id == -1) { binfile = r_bin_file_find_by_id (bin, binfile_id); obj = binfile? binfile->o: NULL; } else { binfile = r_bin_file_find_by_id (bin, binfile_id); obj = binfile? r_bin_file_object_find_by_id (binfile, binobj_id): NULL; } if (!binfile || !obj) { return false; } return obj && binfile && r_bin_file_set_cur_binfile_obj (bin, binfile, obj); } R_API int r_bin_select_idx(RBin *bin, const char *name, int idx) { RBinFile *nbinfile = NULL, *binfile = r_bin_cur (bin); RBinObject *obj = NULL; const char *tname = !name && binfile? binfile->file: name; int res = false; if (!tname || !bin) { return res; } nbinfile = r_bin_file_find_by_name_n (bin, tname, idx); obj = nbinfile? r_list_get_n (nbinfile->objs, idx): NULL; return obj && nbinfile && r_bin_file_set_cur_binfile_obj (bin, nbinfile, obj); } static void list_xtr_archs(RBin *bin, int mode) { RBinFile *binfile = r_bin_cur (bin); if (binfile->xtr_data) { RListIter *iter_xtr; RBinXtrData *xtr_data; int bits, i = 0; char *arch, *machine; r_list_foreach (binfile->xtr_data, iter_xtr, xtr_data) { if (!xtr_data || !xtr_data->metadata || !xtr_data->metadata->arch) { continue; } arch = xtr_data->metadata->arch; machine = xtr_data->metadata->machine; bits = xtr_data->metadata->bits; switch (mode) { case 'q': bin->cb_printf ("%s\n", arch); break; case 'j': bin->cb_printf ( "%s{\"arch\":\"%s\",\"bits\":%d," "\"offset\":%" PFMT64d ",\"size\":\"%" PFMT64d ",\"machine\":\"%s\"}", i++ ? "," : "", arch, bits, xtr_data->offset, xtr_data->size, machine); break; default: bin->cb_printf ("%03i 0x%08" PFMT64x " %" PFMT64d " %s_%i %s\n", i++, xtr_data->offset, xtr_data->size, arch, bits, machine); break; } } } } R_API void r_bin_list_archs(RBin *bin, int mode) { RListIter *iter; int i = 0; char unk[128]; char archline[128]; RBinFile *binfile = r_bin_cur (bin); RBinObject *obj = NULL; const char *name = binfile? binfile->file: NULL; int narch = binfile? binfile->narch: 0; //are we with xtr format? if (binfile && binfile->curxtr) { list_xtr_archs (bin, mode); return; } Sdb *binfile_sdb = binfile? binfile->sdb: NULL; if (!binfile_sdb) { eprintf ("Cannot find SDB!\n"); return; } else if (!binfile) { eprintf ("Binary format not currently loaded!\n"); return; } sdb_unset (binfile_sdb, ARCHS_KEY, 0); if (mode == 'j') { bin->cb_printf ("\"bins\":["); } RBinFile *nbinfile = r_bin_file_find_by_name_n (bin, name, i); if (!nbinfile) { return; } i = -1; r_list_foreach (nbinfile->objs, iter, obj) { RBinInfo *info = obj->info; char bits = info? info->bits: 0; ut64 boffset = obj->boffset; ut32 obj_size = obj->obj_size; const char *arch = info? info->arch: NULL; const char *machine = info? info->machine: "unknown_machine"; i++; if (!arch) { snprintf (unk, sizeof (unk), "unk_%d", i); arch = unk; } if (info && narch > 1) { switch (mode) { case 'q': bin->cb_printf ("%s\n", arch); break; case 'j': bin->cb_printf ("%s{\"arch\":\"%s\",\"bits\":%d," "\"offset\":%" PFMT64d ",\"size\":%d," "\"machine\":\"%s\"}", i? ",": "", arch, bits, boffset, obj_size, machine); break; default: bin->cb_printf ("%03i 0x%08" PFMT64x " %d %s_%i %s\n", i, boffset, obj_size, arch, bits, machine); } snprintf (archline, sizeof (archline) - 1, "0x%08" PFMT64x ":%d:%s:%d:%s", boffset, obj_size, arch, bits, machine); /// xxx machine not exported? //sdb_array_push (binfile_sdb, ARCHS_KEY, archline, 0); } else { if (info) { switch (mode) { case 'q': bin->cb_printf ("%s\n", arch); break; case 'j': bin->cb_printf ("%s{\"arch\":\"%s\",\"bits\":%d," "\"offset\":%" PFMT64d ",\"size\":%d," "\"machine\":\"%s\"}", i? ",": "", arch, bits, boffset, obj_size, machine); break; default: bin->cb_printf ("%03i 0x%08" PFMT64x " %d %s_%d\n", i, boffset, obj_size, arch, bits); } snprintf (archline, sizeof (archline), "0x%08" PFMT64x ":%d:%s:%d", boffset, obj_size, arch, bits); } else if (nbinfile && mode) { switch (mode) { case 'q': bin->cb_printf ("%s\n", arch); break; case 'j': bin->cb_printf ("%s{\"arch\":\"unk_%d\",\"bits\":%d," "\"offset\":%" PFMT64d ",\"size\":%d," "\"machine\":\"%s\"}", i? ",": "", i, bits, boffset, obj_size, machine); break; default: bin->cb_printf ("%03i 0x%08" PFMT64x " %d unk_0\n", i, boffset, obj_size); } snprintf (archline, sizeof (archline), "0x%08" PFMT64x ":%d:%s:%d", boffset, obj_size, "unk", 0); } else { eprintf ("Error: Invalid RBinFile.\n"); } //sdb_array_push (binfile_sdb, ARCHS_KEY, archline, 0); } } if (mode == 'j') { bin->cb_printf ("]"); } } R_API void r_bin_set_user_ptr(RBin *bin, void *user) { bin->user = user; } static RBinSection* _get_vsection_at(RBin *bin, ut64 vaddr) { RBinObject *cur = r_bin_object_get_cur (bin); return r_bin_get_section_at (cur, vaddr, true); } R_API void r_bin_bind(RBin *bin, RBinBind *b) { if (b) { b->bin = bin; b->get_offset = getoffset; b->get_name = getname; b->get_sections = r_bin_get_sections; b->get_vsect_at = _get_vsection_at; } } R_API RBuffer *r_bin_create(RBin *bin, const ut8 *code, int codelen, const ut8 *data, int datalen) { RBinFile *a = r_bin_cur (bin); RBinPlugin *plugin = r_bin_file_cur_plugin (a); if (codelen < 0) { codelen = 0; } if (datalen < 0) { datalen = 0; } if (plugin && plugin->create) { return plugin->create (bin, code, codelen, data, datalen); } return NULL; } R_API RBuffer *r_bin_package(RBin *bin, const char *type, const char *file, RList *files) { if (!strcmp (type, "zip")) { #if 0 int zep = 0; struct zip * z = zip_open (file, 8 | 1, &zep); if (z) { RListIter *iter; const char *f; eprintf ("zip file created\n"); r_list_foreach (files, iter, f) { struct zip_source *zs = NULL; zs = zip_source_file (z, f, 0, 1024); if (zs) { eprintf ("ADD %s\n", f); zip_add (z, f, zs); zip_source_free (zs); } else { eprintf ("Cannot find file %s\n", f); } eprintf ("zS %p\n", zs); } zip_close (z); } else { eprintf ("Cannot create zip file\n"); } #endif } else if (!strcmp (type, "fat")) { const char *f; RListIter *iter; ut32 num; ut8 *num8 = (ut8*)&num; RBuffer *buf = r_buf_new_file (file, true); r_buf_write_at (buf, 0, (const ut8*)"\xca\xfe\xba\xbe", 4); int count = r_list_length (files); num = r_read_be32 (&count); ut64 from = 0x1000; r_buf_write_at (buf, 4, num8, 4); int off = 12; int item = 0; r_list_foreach (files, iter, f) { int f_len = 0; ut8 *f_buf = (ut8 *)r_file_slurp (f, &f_len); if (f_buf && f_len >= 0) { eprintf ("ADD %s %d\n", f, f_len); } else { eprintf ("Cannot open %s\n", f); free (f_buf); continue; } item++; /* CPU */ num8[0] = f_buf[7]; num8[1] = f_buf[6]; num8[2] = f_buf[5]; num8[3] = f_buf[4]; r_buf_write_at (buf, off - 4, num8, 4); /* SUBTYPE */ num8[0] = f_buf[11]; num8[1] = f_buf[10]; num8[2] = f_buf[9]; num8[3] = f_buf[8]; r_buf_write_at (buf, off, num8, 4); ut32 from32 = from; /* FROM */ num = r_read_be32 (&from32); r_buf_write_at (buf, off + 4, num8, 4); r_buf_write_at (buf, from, f_buf, f_len); /* SIZE */ num = r_read_be32 (&f_len); r_buf_write_at (buf, off + 8, num8, 4); off += 20; from += f_len + (f_len % 0x1000); free (f_buf); } r_buf_free (buf); return NULL; } else { eprintf ("Usage: rabin2 -X [fat|zip] [filename] [files ...]\n"); } return NULL; } R_API RBinObject *r_bin_get_object(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); if (o) { o->referenced++; } return o; } R_API RList * /*<RBinClass>*/ r_bin_get_classes(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return o? o->classes: NULL; } R_API void r_bin_class_free(RBinClass *c) { free (c->name); free (c->super); r_list_free (c->methods); r_list_free (c->fields); free (c); } R_API RBinClass *r_bin_class_new(RBinFile *binfile, const char *name, const char *super, int view) { RBinObject *o = binfile? binfile->o: NULL; RList *list = NULL; RBinClass *c; if (!o) { return NULL; } list = o->classes; if (!name) { return NULL; } c = r_bin_class_get (binfile, name); if (c) { if (super) { free (c->super); c->super = strdup (super); } return c; } c = R_NEW0 (RBinClass); if (!c) { return NULL; } c->name = strdup (name); c->super = super? strdup (super): NULL; c->index = r_list_length (list); c->methods = r_list_new (); c->fields = r_list_new (); c->visibility = view; if (!list) { list = o->classes = r_list_new (); } r_list_append (list, c); return c; } R_API RBinClass *r_bin_class_get(RBinFile *binfile, const char *name) { if (!binfile || !binfile->o || !name) { return NULL; } RBinClass *c; RListIter *iter; RList *list = binfile->o->classes; r_list_foreach (list, iter, c) { if (!strcmp (c->name, name)) { return c; } } return NULL; } R_API RBinSymbol *r_bin_class_add_method(RBinFile *binfile, const char *classname, const char *name, int nargs) { RBinClass *c = r_bin_class_get (binfile, classname); if (!c) { c = r_bin_class_new (binfile, classname, NULL, 0); if (!c) { eprintf ("Cannot allocate class %s\n", classname); return NULL; } } RBinSymbol *m; RListIter *iter; r_list_foreach (c->methods, iter, m) { if (!strcmp (m->name, name)) { return NULL; } } RBinSymbol *sym = R_NEW0 (RBinSymbol); if (!sym) { return NULL; } sym->name = strdup (name); r_list_append (c->methods, sym); return sym; } R_API void r_bin_class_add_field(RBinFile *binfile, const char *classname, const char *name) { //TODO: add_field into class //eprintf ("TODO add field: %s \n", name); } /* returns vaddr, rebased with the baseaddr of binfile, if va is enabled for * bin, paddr otherwise */ R_API ut64 r_binfile_get_vaddr(RBinFile *binfile, ut64 paddr, ut64 vaddr) { int use_va = 0; if (binfile && binfile->o && binfile->o->info) { use_va = binfile->o->info->has_va; } return use_va? binobj_a2b (binfile->o, vaddr): paddr; } /* returns vaddr, rebased with the baseaddr of bin, if va is enabled for bin, * paddr otherwise */ R_API ut64 r_bin_get_vaddr(RBin *bin, ut64 paddr, ut64 vaddr) { if (!bin || !bin->cur) { return UT64_MAX; } if (paddr == UT64_MAX) { return UT64_MAX; } /* hack to realign thumb symbols */ if (bin->cur->o && bin->cur->o->info && bin->cur->o->info->arch) { if (bin->cur->o->info->bits == 16) { RBinSection *s = r_bin_get_section_at (bin->cur->o, paddr, false); // autodetect thumb if (s && s->srwx & 1 && strstr (s->name, "text")) { if (!strcmp (bin->cur->o->info->arch, "arm") && (vaddr & 1)) { vaddr = (vaddr >> 1) << 1; } } } } return r_binfile_get_vaddr (bin->cur, paddr, vaddr); } R_API ut64 r_bin_a2b(RBin *bin, ut64 addr) { RBinObject *o = r_bin_cur_object (bin); return o? o->baddr_shift + addr: addr; } R_API ut64 r_bin_get_size(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return o ? o->size : 0; } R_API int r_bin_file_delete_all(RBin *bin) { int counter = 0; if (bin) { counter = r_list_length (bin->binfiles); r_list_purge (bin->binfiles); bin->cur = NULL; } return counter; } R_API int r_bin_file_delete(RBin *bin, ut32 bin_fd) { RListIter *iter; RBinFile *bf; RBinFile *cur = r_bin_cur (bin); if (bin && cur) { r_list_foreach (bin->binfiles, iter, bf) { if (bf && bf->fd == bin_fd) { if (cur->fd == bin_fd) { //avoiding UaF due to dead reference bin->cur = NULL; } r_list_delete (bin->binfiles, iter); return 1; } } } return 0; } R_API RBinFile *r_bin_file_find_by_fd(RBin *bin, ut32 bin_fd) { RListIter *iter; RBinFile *bf; if (bin) { r_list_foreach (bin->binfiles, iter, bf) { if (bf && bf->fd == bin_fd) { return bf; } } } return NULL; } R_API RBinFile *r_bin_file_find_by_name(RBin *bin, const char *name) { RListIter *iter; RBinFile *bf = NULL; if (!bin || !name) { return NULL; } r_list_foreach (bin->binfiles, iter, bf) { if (bf && bf->file && !strcmp (bf->file, name)) { break; } bf = NULL; } return bf; } R_API RBinFile *r_bin_file_find_by_name_n(RBin *bin, const char *name, int idx) { RListIter *iter; RBinFile *bf = NULL; int i = 0; if (!bin) { return bf; } r_list_foreach (bin->binfiles, iter, bf) { if (bf && bf->file && !strcmp (bf->file, name)) { if (i == idx) { break; } i++; } bf = NULL; } return bf; } R_API int r_bin_file_set_cur_by_fd(RBin *bin, ut32 bin_fd) { RBinFile *bf = r_bin_file_find_by_fd (bin, bin_fd); return r_bin_file_set_cur_binfile (bin, bf); } R_API int r_bin_file_set_cur_binfile_obj(RBin *bin, RBinFile *bf, RBinObject *obj) { RBinPlugin *plugin = NULL; if (!bin || !bf || !obj) { return false; } bin->file = bf->file; bin->cur = bf; bin->narch = bf->narch; bf->o = obj; plugin = r_bin_file_cur_plugin (bf); if (bin->minstrlen < 1) { bin->minstrlen = plugin? plugin->minstrlen: bin->minstrlen; } return true; } R_API int r_bin_file_set_cur_binfile(RBin *bin, RBinFile *bf) { RBinObject *obj = bf? bf->o: NULL; return obj? r_bin_file_set_cur_binfile_obj (bin, bf, obj): false; } R_API int r_bin_file_set_cur_by_name(RBin *bin, const char *name) { RBinFile *bf = r_bin_file_find_by_name (bin, name); return r_bin_file_set_cur_binfile (bin, bf); } R_API RBinFile *r_bin_cur(RBin *bin) { return bin? bin->cur: NULL; } R_API RBinObject *r_bin_cur_object(RBin *bin) { RBinFile *binfile = r_bin_cur (bin); return binfile? binfile->o: NULL; } R_API void r_bin_force_plugin(RBin *bin, const char *name) { free (bin->force); bin->force = (name && *name)? strdup (name): NULL; } R_API int r_bin_read_at(RBin *bin, ut64 addr, ut8 *buf, int size) { RIOBind *iob; if (!bin || !(iob = &(bin->iob))) { return false; } return iob->read_at (iob->io, addr, buf, size); } R_API int r_bin_write_at(RBin *bin, ut64 addr, const ut8 *buf, int size) { RIOBind *iob; if (!bin || !(iob = &(bin->iob))) { return false; } return iob->write_at (iob->io, addr, buf, size); } R_API const char *r_bin_entry_type_string(int etype) { switch (etype) { case R_BIN_ENTRY_TYPE_PROGRAM: return "program"; case R_BIN_ENTRY_TYPE_MAIN: return "main"; case R_BIN_ENTRY_TYPE_INIT: return "init"; case R_BIN_ENTRY_TYPE_FINI: return "fini"; case R_BIN_ENTRY_TYPE_TLS: return "tls"; } return NULL; } R_API void r_bin_load_filter(RBin *bin, ut64 rules) { bin->filter_rules = rules; } /* RBinField */ R_API RBinField *r_bin_field_new(ut64 paddr, ut64 vaddr, int size, const char *name, const char *comment, const char *format) { RBinField *ptr; if (!(ptr = R_NEW0 (RBinField))) { return NULL; } ptr->name = strdup (name); ptr->comment = (comment && *comment)? strdup (comment): NULL; ptr->format = (format && *format)? strdup (format): NULL; ptr->paddr = paddr; ptr->size = size; // ptr->visibility = ??? ptr->vaddr = vaddr; return ptr; } // use void* to honor the RListFree signature R_API void r_bin_field_free(void *_field) { RBinField *field = (RBinField*) _field; free (field->name); free (field->comment); free (field->format); free (field); } R_API const char *r_bin_get_meth_flag_string(ut64 flag, bool compact) { switch (flag) { case R_BIN_METH_CLASS: return compact ? "c" : "class"; case R_BIN_METH_STATIC: return compact ? "s" : "static"; case R_BIN_METH_PUBLIC: return compact ? "p" : "public"; case R_BIN_METH_PRIVATE: return compact ? "P" : "private"; case R_BIN_METH_PROTECTED: return compact ? "r" : "protected"; case R_BIN_METH_INTERNAL: return compact ? "i" : "internal"; case R_BIN_METH_OPEN: return compact ? "o" : "open"; case R_BIN_METH_FILEPRIVATE: return compact ? "e" : "fileprivate"; case R_BIN_METH_FINAL: return compact ? "f" : "final"; case R_BIN_METH_VIRTUAL: return compact ? "v" : "virtual"; case R_BIN_METH_CONST: return compact ? "k" : "const"; case R_BIN_METH_MUTATING: return compact ? "m" : "mutating"; case R_BIN_METH_ABSTRACT: return compact ? "a" : "abstract"; case R_BIN_METH_SYNCHRONIZED: return compact ? "y" : "synchronized"; case R_BIN_METH_NATIVE: return compact ? "n" : "native"; case R_BIN_METH_BRIDGE: return compact ? "b" : "bridge"; case R_BIN_METH_VARARGS: return compact ? "g" : "varargs"; case R_BIN_METH_SYNTHETIC: return compact ? "h" : "synthetic"; case R_BIN_METH_STRICT: return compact ? "t" : "strict"; case R_BIN_METH_MIRANDA: return compact ? "A" : "miranda"; case R_BIN_METH_CONSTRUCTOR: return compact ? "C" : "constructor"; case R_BIN_METH_DECLARED_SYNCHRONIZED: return compact ? "Y" : "declared_synchronized"; default: return NULL; } }
/* radare - LGPL - Copyright 2009-2017 - pancake, nibble, dso */ // TODO: dlopen library and show address #include <r_bin.h> #include <r_types.h> #include <r_util.h> #include <r_lib.h> #include <r_io.h> #include <config.h> R_LIB_VERSION (r_bin); #define bprintf if(binfile->rbin->verbose)eprintf #define DB a->sdb; #define RBINLISTFREE(x)\ if (x) { \ r_list_free (x);\ x = NULL;\ } #define REBASE_PADDR(o, l, type_t)\ do { \ RListIter *_it;\ type_t *_el;\ r_list_foreach ((l), _it, _el) { \ _el->paddr += (o)->loadaddr;\ }\ } while (0) #define ARCHS_KEY "archs" #if !defined(R_BIN_STATIC_PLUGINS) #define R_BIN_STATIC_PLUGINS 0 #endif #if !defined(R_BIN_XTR_STATIC_PLUGINS) #define R_BIN_XTR_STATIC_PLUGINS 0 #endif static RBinPlugin *bin_static_plugins[] = { R_BIN_STATIC_PLUGINS, NULL }; static RBinXtrPlugin *bin_xtr_static_plugins[] = { R_BIN_XTR_STATIC_PLUGINS, NULL }; static int is_data_section(RBinFile *a, RBinSection *s); static RList *get_strings(RBinFile *a, int min, int dump); static void r_bin_object_delete_items(RBinObject *o); static void r_bin_object_free(void /*RBinObject*/ *o_); // static int r_bin_object_set_items(RBinFile *binfile, RBinObject *o); static int r_bin_file_set_bytes(RBinFile *binfile, const ut8 *bytes, ut64 sz, bool steal_ptr); //static int remove_bin_file_by_binfile (RBin *bin, RBinFile * binfile); //static void r_bin_free_bin_files (RBin *bin); static void r_bin_file_free(void /*RBinFile*/ *bf_); static RBinFile *r_bin_file_create_append(RBin *bin, const char *file, const ut8 *bytes, ut64 sz, ut64 file_sz, int rawstr, int fd, const char *xtrname, bool steal_ptr); static RBinFile *r_bin_file_xtr_load_bytes(RBin *bin, RBinXtrPlugin *xtr, const char *filename, const ut8 *bytes, ut64 sz, ut64 file_sz, ut64 baseaddr, ut64 loadaddr, int idx, int fd, int rawstr); int r_bin_load_io_at_offset_as_sz(RBin *bin, int fd, ut64 baseaddr, ut64 loadaddr, int xtr_idx, ut64 offset, const char *name, ut64 sz); static RBinPlugin *r_bin_get_binplugin_by_name(RBin *bin, const char *name); static RBinXtrPlugin *r_bin_get_xtrplugin_by_name(RBin *bin, const char *name); static RBinPlugin *r_bin_get_binplugin_any(RBin *bin); static RBinObject *r_bin_object_new(RBinFile *binfile, RBinPlugin *plugin, ut64 baseaddr, ut64 loadaddr, ut64 offset, ut64 sz); static RBinFile *r_bin_file_new(RBin *bin, const char *file, const ut8 *bytes, ut64 sz, ut64 file_sz, int rawstr, int fd, const char *xtrname, Sdb *sdb, bool steal_ptr); static RBinFile *r_bin_file_new_from_bytes(RBin *bin, const char *file, const ut8 *bytes, ut64 sz, ut64 file_sz, int rawstr, ut64 baseaddr, ut64 loadaddr, int fd, const char *pluginname, const char *xtrname, ut64 offset, bool steal_ptr); static int getoffset(RBin *bin, int type, int idx) { RBinFile *a = r_bin_cur (bin); RBinPlugin *plugin = r_bin_file_cur_plugin (a); if (plugin && plugin->get_offset) { return plugin->get_offset (a, type, idx); } return -1; } static const char *getname(RBin *bin, int type, int idx) { RBinFile *a = r_bin_cur (bin); RBinPlugin *plugin = r_bin_file_cur_plugin (a); if (plugin && plugin->get_name) { return plugin->get_name (a, type, idx); } return NULL; } static int r_bin_file_object_add(RBinFile *binfile, RBinObject *o) { if (!o) { return false; } r_list_append (binfile->objs, o); r_bin_file_set_cur_binfile_obj (binfile->rbin, binfile, o); return true; } static void binobj_set_baddr(RBinObject *o, ut64 baddr) { if (!o || baddr == UT64_MAX) { return; } o->baddr_shift = baddr - o->baddr; } static ut64 binobj_a2b(RBinObject *o, ut64 addr) { return addr + (o? o->baddr_shift: 0); } static void filterStrings (RBin *bin, RList *strings) { RBinString *ptr; RListIter *iter; r_list_foreach (strings, iter, ptr) { char *dec = (char *)r_base64_decode_dyn (ptr->string, -1); if (dec) { char *s = ptr->string; do { char *dec2 = (char *)r_base64_decode_dyn (s, -1); if (!dec2) { break; } if (!r_str_is_printable (dec2)) { free (dec2); break; } free (dec); s = dec = dec2; } while (true); if (r_str_is_printable (dec) && strlen (dec) > 3) { free (ptr->string); ptr->string = dec; ptr->type = R_STRING_TYPE_BASE64; } else { free (dec); } } } } R_API void r_bin_iobind(RBin *bin, RIO *io) { r_io_bind (io, &bin->iob); } // TODO: move these two function do a different file R_API RBinXtrData *r_bin_xtrdata_new(RBuffer *buf, ut64 offset, ut64 size, ut32 file_count, RBinXtrMetadata *metadata) { RBinXtrData *data = R_NEW0 (RBinXtrData); if (!data) { return NULL; } data->offset = offset; data->size = size; data->file_count = file_count; data->metadata = metadata; data->loaded = 0; data->buffer = malloc (size + 1); // data->laddr = 0; /// XXX if (!data->buffer) { free (data); return NULL; } memcpy (data->buffer, r_buf_buffer (buf), size); data->buffer[size] = 0; return data; } R_API const char *r_bin_string_type (int type) { switch (type) { case 'a': return "ascii"; case 'u': return "utf8"; case 'w': return "utf16le"; case 'W': return "utf32le"; case 'b': return "base64"; } return "ascii"; // XXX } R_API void r_bin_xtrdata_free(void /*RBinXtrData*/ *data_) { RBinXtrData *data = data_; if (data) { if (data->metadata) { free (data->metadata->libname); free (data->metadata->arch); free (data->metadata->machine); free (data->metadata); } free (data->file); free (data->buffer); free (data); } } R_API RBinObject *r_bin_file_object_get_cur(RBinFile *binfile) { return binfile? binfile->o: NULL; } R_API RBinObject *r_bin_object_get_cur(RBin *bin) { return bin ? r_bin_file_object_get_cur (r_bin_cur (bin)) : NULL; } R_API RBinPlugin *r_bin_file_cur_plugin(RBinFile *binfile) { return binfile && binfile->o? binfile->o->plugin: NULL; } R_API int r_bin_file_cur_set_plugin(RBinFile *binfile, RBinPlugin *plugin) { if (binfile && binfile->o) { binfile->o->plugin = plugin; return true; } return false; } // maybe too big sometimes? 2KB of stack eaten here.. #define R_STRING_SCAN_BUFFER_SIZE 2048 static int string_scan_range(RList *list, const ut8 *buf, int min, const ut64 from, const ut64 to, int type) { ut8 tmp[R_STRING_SCAN_BUFFER_SIZE]; ut64 str_start, needle = from; int count = 0, i, rc, runes; int str_type = R_STRING_TYPE_DETECT; if (type == -1) { type = R_STRING_TYPE_DETECT; } if (!buf || !min) { return -1; } while (needle < to) { rc = r_utf8_decode (buf + needle, to - needle, NULL); if (!rc) { needle++; continue; } if (type == R_STRING_TYPE_DETECT) { char *w = (char *)buf + needle + rc; if ((to - needle) > 4) { bool is_wide32 = needle + rc + 2 < to && !w[0] && !w[1] && !w[2] && w[3] && !w[4]; if (is_wide32) { str_type = R_STRING_TYPE_WIDE32; } else { bool is_wide = needle + rc + 2 < to && !w[0] && w[1] && !w[2]; str_type = is_wide? R_STRING_TYPE_WIDE: R_STRING_TYPE_ASCII; } } else { str_type = R_STRING_TYPE_ASCII; } } else { str_type = type; } runes = 0; str_start = needle; /* Eat a whole C string */ for (rc = i = 0; i < sizeof (tmp) - 3 && needle < to; i += rc) { RRune r = {0}; if (str_type == R_STRING_TYPE_WIDE32) { rc = r_utf32le_decode (buf + needle, to - needle, &r); if (rc) { rc = 4; } } else if (str_type == R_STRING_TYPE_WIDE) { rc = r_utf16le_decode (buf + needle, to - needle, &r); if (rc == 1) { rc = 2; } } else { rc = r_utf8_decode (buf + needle, to - needle, &r); if (rc > 1) { str_type = R_STRING_TYPE_UTF8; } } /* Invalid sequence detected */ if (!rc) { needle++; break; } needle += rc; if (r_isprint (r)) { if (str_type == R_STRING_TYPE_WIDE32) { if (r == 0xff) { r = 0; } } rc = r_utf8_encode (&tmp[i], r); runes++; /* Print the escape code */ } else if (r && r < 0x100 && strchr ("\b\v\f\n\r\t\a\e", (char)r)) { if ((i + 32) < sizeof (tmp) && r < 28) { tmp[i + 0] = '\\'; tmp[i + 1] = " abtnvfr e"[r]; } else { // string too long break; } rc = 2; runes++; } else { /* \0 marks the end of C-strings */ break; } } tmp[i++] = '\0'; if (runes >= min) { if (str_type == R_STRING_TYPE_ASCII) { // reduce false positives int j; for (j = 0; j < i; j++) { char ch = tmp[j]; if (ch != '\n' && ch != '\r' && ch != '\t') { if (!IS_PRINTABLE (tmp[j])) { continue; } } } } if (list) { RBinString *new = R_NEW0 (RBinString); if (!new) { break; } new->type = str_type; new->length = runes; new->size = needle - str_start; new->ordinal = count++; // TODO: move into adjust_offset switch (str_type) { case R_STRING_TYPE_WIDE: if (str_start > 1) { const ut8 *p = buf + str_start - 2; if (p[0] == 0xff && p[1] == 0xfe) { str_start -= 2; // \xff\xfe } } break; case R_STRING_TYPE_WIDE32: if (str_start > 3) { const ut8 *p = buf + str_start - 4; if (p[0] == 0xff && p[1] == 0xfe) { str_start -= 4; // \xff\xfe\x00\x00 } } break; } new->paddr = new->vaddr = str_start; new->string = r_str_ndup ((const char *)tmp, i); r_list_append (list, new); } else { // DUMP TO STDOUT. raw dumping for rabin2 -zzz printf ("0x%08" PFMT64x " %s\n", str_start, tmp); } } } return count; } static void get_strings_range(RBinFile *arch, RList *list, int min, ut64 from, ut64 to) { RBinPlugin *plugin = r_bin_file_cur_plugin (arch); RBinString *ptr; RListIter *it; if (!arch || !arch->buf || !arch->buf->buf) { return; } if (!arch->rawstr) { if (!plugin || !plugin->info) { return; } } if (!min) { min = plugin? plugin->minstrlen: 4; } /* Some plugins return zero, fix it up */ if (!min) { min = 4; } if (min < 0) { return; } if (!to || to > arch->buf->length) { to = arch->buf->length; } if (arch->rawstr != 2) { ut64 size = to - from; // in case of dump ignore here if (arch->rbin->maxstrbuf && size && size > arch->rbin->maxstrbuf) { if (arch->rbin->verbose) { eprintf ("WARNING: bin_strings buffer is too big " "(0x%08" PFMT64x ")." " Use -zzz or set bin.maxstrbuf " "(RABIN2_MAXSTRBUF) in r2 (rabin2)\n", size); } return; } } if (string_scan_range (list, arch->buf->buf, min, from, to, -1) < 0) { return; } r_list_foreach (list, it, ptr) { RBinSection *s = r_bin_get_section_at (arch->o, ptr->paddr, false); if (s) { ptr->vaddr = s->vaddr + (ptr->paddr - s->paddr); } } } static int is_data_section(RBinFile *a, RBinSection *s) { if (s->has_strings || s->is_data) { return true; } if (s->is_data) { return true; } // Rust return (strstr (s->name, "_const") != NULL); } static RList *get_strings(RBinFile *a, int min, int dump) { RListIter *iter; RBinSection *section; RBinObject *o = a? a->o: NULL; RList *ret; if (!o) { return NULL; } if (dump) { /* dump to stdout, not stored in list */ ret = NULL; } else { ret = r_list_newf (r_bin_string_free); if (!ret) { return NULL; } } if (o->sections && !r_list_empty (o->sections) && !a->rawstr) { r_list_foreach (o->sections, iter, section) { if (is_data_section (a, section)) { get_strings_range (a, ret, min, section->paddr, section->paddr + section->size); } } r_list_foreach (o->sections, iter, section) { RBinString *s; RListIter *iter2; /* load objc/swift strings */ const int bits = (a->o && a->o->info) ? a->o->info->bits : 32; const int cfstr_size = (bits == 64) ? 32 : 16; const int cfstr_offs = (bits == 64) ? 16 : 8; if (strstr (section->name, "__cfstring")) { int i; // XXX do not walk if bin.strings == 0 ut8 *p; for (i = 0; i < section->size; i += cfstr_size) { ut8 buf[32]; if (!r_buf_read_at ( a->buf, section->paddr + i + cfstr_offs, buf, sizeof (buf))) { break; } p = buf; ut64 cfstr_vaddr = section->vaddr + i; ut64 cstr_vaddr = (bits == 64) ? r_read_le64 (p) : r_read_le32 (p); r_list_foreach (ret, iter2, s) { if (s->vaddr == cstr_vaddr) { RBinString *new = R_NEW0 (RBinString); new->type = s->type; new->length = s->length; new->size = s->size; new->ordinal = s->ordinal; new->paddr = new->vaddr = cfstr_vaddr; new->string = r_str_newf ("cstr.%s", s->string); r_list_append (ret, new); break; } } } } } } else { get_strings_range (a, ret, min, 0, a->size); } return ret; } R_API RList* r_bin_raw_strings(RBinFile *a, int min) { RList *l = NULL; if (a) { int tmp = a->rawstr; a->rawstr = 2; l = get_strings (a, min, 0); a->rawstr = tmp; } return l; } R_API int r_bin_dump_strings(RBinFile *a, int min) { get_strings (a, min, 1); return 0; } /* This is very slow if there are lot of symbols */ R_API int r_bin_load_languages(RBinFile *binfile) { if (r_bin_lang_rust (binfile)) { return R_BIN_NM_RUST; } if (r_bin_lang_swift (binfile)) { return R_BIN_NM_SWIFT; } if (r_bin_lang_objc (binfile)) { return R_BIN_NM_OBJC; } if (r_bin_lang_cxx (binfile)) { return R_BIN_NM_CXX; } if (r_bin_lang_dlang (binfile)) { return R_BIN_NM_DLANG; } if (r_bin_lang_msvc (binfile)) { return R_BIN_NM_MSVC; } return R_BIN_NM_NONE; } static void mem_free(void *data) { RBinMem *mem = (RBinMem *)data; if (mem && mem->mirrors) { mem->mirrors->free = mem_free; r_list_free (mem->mirrors); mem->mirrors = NULL; } free (mem); } static void r_bin_object_delete_items(RBinObject *o) { ut32 i = 0; if (!o) { return; } r_list_free (o->entries); r_list_free (o->fields); r_list_free (o->imports); r_list_free (o->libs); r_list_free (o->relocs); r_list_free (o->sections); r_list_free (o->strings); r_list_free (o->symbols); r_list_free (o->classes); r_list_free (o->lines); sdb_free (o->kv); if (o->mem) { o->mem->free = mem_free; } r_list_free (o->mem); o->mem = NULL; o->entries = NULL; o->fields = NULL; o->imports = NULL; o->libs = NULL; o->relocs = NULL; o->sections = NULL; o->strings = NULL; o->symbols = NULL; o->classes = NULL; o->lines = NULL; o->info = NULL; o->kv = NULL; for (i = 0; i < R_BIN_SYM_LAST; i++) { free (o->binsym[i]); o->binsym[i] = NULL; } } R_API void r_bin_info_free(RBinInfo *rb) { if (!rb) { return; } free (rb->intrp); free (rb->file); free (rb->type); free (rb->bclass); free (rb->rclass); free (rb->arch); free (rb->cpu); free (rb->machine); free (rb->os); free (rb->subsystem); free (rb->rpath); free (rb->guid); free (rb->debug_file_name); free (rb); } R_API void r_bin_import_free(void *_imp) { RBinImport *imp = (RBinImport *)_imp; if (imp) { R_FREE (imp->name); R_FREE (imp->classname); R_FREE (imp->descriptor); free (imp); } } R_API void r_bin_symbol_free(void *_sym) { RBinSymbol *sym = (RBinSymbol *)_sym; free (sym->name); free (sym->classname); free (sym); } R_API void r_bin_string_free(void *_str) { RBinString *str = (RBinString *)_str; free (str->string); free (str); } static void r_bin_object_free(void /*RBinObject*/ *o_) { RBinObject *o = o_; if (!o) { return; } r_bin_info_free (o->info); r_bin_object_delete_items (o); R_FREE (o); } static char *swiftField(const char *dn, const char *cn) { char *p = strstr (dn, ".getter_"); if (!p) { p = strstr (dn, ".setter_"); if (!p) { p = strstr (dn, ".method_"); } } if (p) { char *q = strstr (dn, cn); if (q && q[strlen (cn)] == '.') { q = strdup (q + strlen (cn) + 1); char *r = strchr (q, '.'); if (r) { *r = 0; } return q; } } return NULL; } R_API RList *r_bin_classes_from_symbols (RBinFile *bf, RBinObject *o) { RBinSymbol *sym; RListIter *iter; RList *symbols = o->symbols; RList *classes = o->classes; if (!classes) { classes = r_list_newf ((RListFree)r_bin_class_free); } r_list_foreach (symbols, iter, sym) { if (sym->name[0] != '_') { continue; } const char *cn = sym->classname; if (cn) { RBinClass *c = r_bin_class_new (bf, sym->classname, NULL, 0); if (!c) { continue; } // swift specific char *dn = sym->dname; char *fn = swiftField (dn, cn); if (fn) { // eprintf ("FIELD %s %s\n", cn, fn); RBinField *f = r_bin_field_new (sym->paddr, sym->vaddr, sym->size, fn, NULL, NULL); r_list_append (c->fields, f); free (fn); } else { char *mn = strstr (dn, ".."); if (mn) { // eprintf ("META %s %s\n", sym->classname, mn); } else { char *mn = strstr (dn, cn); if (mn && mn[strlen(cn)] == '.') { mn += strlen (cn) + 1; // eprintf ("METHOD %s %s\n", sym->classname, mn); r_list_append (c->methods, sym); } } } } } if (r_list_empty (classes)) { r_list_free (classes); return NULL; } return classes; } // XXX - change this to RBinObject instead of RBinFile // makes no sense to pass in a binfile and set the RBinObject // kinda a clunky functions R_API int r_bin_object_set_items(RBinFile *binfile, RBinObject *o) { RBinObject *old_o; RBinPlugin *cp; int i, minlen; RBin *bin; if (!binfile || !o || !o->plugin) { return false; } bin = binfile->rbin; old_o = binfile->o; cp = o->plugin; if (binfile->rbin->minstrlen > 0) { minlen = binfile->rbin->minstrlen; } else { minlen = cp->minstrlen; } binfile->o = o; if (cp->baddr) { ut64 old_baddr = o->baddr; o->baddr = cp->baddr (binfile); binobj_set_baddr (o, old_baddr); } if (cp->boffset) { o->boffset = cp->boffset (binfile); } // XXX: no way to get info from xtr pluginz? // Note, object size can not be set from here due to potential // inconsistencies if (cp->size) { o->size = cp->size (binfile); } if (cp->binsym) { for (i = 0; i < R_BIN_SYM_LAST; i++) { o->binsym[i] = cp->binsym (binfile, i); if (o->binsym[i]) { o->binsym[i]->paddr += o->loadaddr; } } } if (cp->entries) { o->entries = cp->entries (binfile); REBASE_PADDR (o, o->entries, RBinAddr); } if (cp->fields) { o->fields = cp->fields (binfile); if (o->fields) { o->fields->free = r_bin_field_free; REBASE_PADDR (o, o->fields, RBinField); } } if (cp->imports) { r_list_free (o->imports); o->imports = cp->imports (binfile); if (o->imports) { o->imports->free = r_bin_import_free; } } //if (bin->filter_rules & (R_BIN_REQ_SYMBOLS | R_BIN_REQ_IMPORTS)) { if (true) { if (cp->symbols) { o->symbols = cp->symbols (binfile); if (o->symbols) { o->symbols->free = r_bin_symbol_free; REBASE_PADDR (o, o->symbols, RBinSymbol); if (bin->filter) { r_bin_filter_symbols (o->symbols); } } } } //} o->info = cp->info? cp->info (binfile): NULL; if (cp->libs) { o->libs = cp->libs (binfile); } if (cp->sections) { // XXX sections are populated by call to size if (!o->sections) { o->sections = cp->sections (binfile); } REBASE_PADDR (o, o->sections, RBinSection); if (bin->filter) { r_bin_filter_sections (o->sections); } } if (bin->filter_rules & (R_BIN_REQ_RELOCS | R_BIN_REQ_IMPORTS)) { if (cp->relocs) { o->relocs = cp->relocs (binfile); REBASE_PADDR (o, o->relocs, RBinReloc); } } if (bin->filter_rules & R_BIN_REQ_STRINGS) { if (cp->strings) { o->strings = cp->strings (binfile); } else { o->strings = get_strings (binfile, minlen, 0); } if (bin->debase64) { filterStrings (bin, o->strings); } REBASE_PADDR (o, o->strings, RBinString); } if (bin->filter_rules & R_BIN_REQ_CLASSES) { if (cp->classes) { o->classes = cp->classes (binfile); if (r_bin_lang_swift (binfile)) { o->classes = r_bin_classes_from_symbols (binfile, o); } } else { o->classes = r_bin_classes_from_symbols (binfile, o); } if (bin->filter) { r_bin_filter_classes (o->classes); } } if (cp->lines) { o->lines = cp->lines (binfile); } if (cp->get_sdb) { Sdb* new_kv = cp->get_sdb (binfile); if (new_kv != o->kv) { sdb_free (o->kv); } o->kv = new_kv; } if (cp->mem) { o->mem = cp->mem (binfile); } if (bin->filter_rules & (R_BIN_REQ_SYMBOLS | R_BIN_REQ_IMPORTS)) { o->lang = r_bin_load_languages (binfile); } binfile->o = old_o; return true; } // XXX - this is a rather hacky way to do things, there may need to be a better // way. R_API int r_bin_load(RBin *bin, const char *file, ut64 baseaddr, ut64 loadaddr, int xtr_idx, int fd, int rawstr) { if (!bin) { return false; } // ALIAS? return r_bin_load_as (bin, file, baseaddr, loadaddr, // xtr_idx, fd, rawstr, 0, file); RIOBind *iob = &(bin->iob); if (!iob) { return false; } if (!iob->io) { iob->io = r_io_new (); //wtf if (!iob->io) { return false; } bin->io_owned = true; r_io_bind (iob->io, &bin->iob); //memleak? iob = &bin->iob; } if (!iob->desc_get (iob->io, fd)) { fd = iob->fd_open (iob->io, file, R_IO_READ, 0644); } bin->rawstr = rawstr; // Use the current RIODesc otherwise r_io_map_select can swap them later on if (fd < 0) { r_io_free (iob->io); memset (&bin->iob, 0, sizeof (bin->iob)); bin->io_owned = false; return false; } //Use the current RIODesc otherwise r_io_map_select can swap them later on return r_bin_load_io (bin, fd, baseaddr, loadaddr, xtr_idx); } R_API int r_bin_load_as(RBin *bin, const char *file, ut64 baseaddr, ut64 loadaddr, int xtr_idx, int fd, int rawstr, int fileoffset, const char *name) { RIOBind *iob = &(bin->iob); if (!iob || !iob->io) { return false; } if (fd < 0) { fd = iob->fd_open (iob->io, file, R_IO_READ, 0644); } if (fd < 0) { return false; } return r_bin_load_io_at_offset_as (bin, fd, baseaddr, loadaddr, xtr_idx, fileoffset, name); } R_API int r_bin_reload(RBin *bin, int fd, ut64 baseaddr) { RIOBind *iob = &(bin->iob); RList *the_obj_list = NULL; int res = false; RBinFile *bf = NULL; ut8 *buf_bytes = NULL; ut64 sz = UT64_MAX; if (!iob || !iob->io) { res = false; goto error; } const char *name = iob->fd_get_name (iob->io, fd); bf = r_bin_file_find_by_name (bin, name); if (!bf) { res = false; goto error; } the_obj_list = bf->objs; bf->objs = r_list_newf ((RListFree)r_bin_object_free); // invalidate current object reference bf->o = NULL; sz = iob->fd_size (iob->io, fd); if (sz == UT64_MAX || sz > (64 * 1024 * 1024)) { // too big, probably wrong eprintf ("Too big\n"); res = false; goto error; } if (sz == UT64_MAX && iob->fd_is_dbg (iob->io, fd)) { // attempt a local open and read // This happens when a plugin like debugger does not have a // fixed size. // if there is no fixed size or its MAXED, there is no way to // definitively // load the bin-properly. Many of the plugins require all // content and are not // stream based loaders int tfd = iob->fd_open (iob->io, name, R_IO_READ, 0); if (tfd < 0) { res = false; goto error; } sz = iob->fd_size (iob->io, tfd); if (sz == UT64_MAX) { iob->fd_close (iob->io, tfd); res = false; goto error; } buf_bytes = calloc (1, sz + 1); if (!buf_bytes) { iob->fd_close (iob->io, tfd); res = false; goto error; } if (!iob->read_at (iob->io, 0LL, buf_bytes, sz)) { free (buf_bytes); iob->fd_close (iob->io, tfd); res = false; goto error; } iob->fd_close (iob->io, tfd); } else { buf_bytes = calloc (1, sz + 1); if (!buf_bytes) { res = false; goto error; } if (!iob->fd_read_at (iob->io, fd, 0LL, buf_bytes, sz)) { free (buf_bytes); res = false; goto error; } } bool yes_plz_steal_ptr = true; r_bin_file_set_bytes (bf, buf_bytes, sz, yes_plz_steal_ptr); if (r_list_length (the_obj_list) == 1) { RBinObject *old_o = (RBinObject *)r_list_get_n (the_obj_list, 0); res = r_bin_load_io_at_offset_as (bin, fd, baseaddr, old_o->loadaddr, 0, old_o->boffset, NULL); } else { RListIter *iter = NULL; RBinObject *old_o; r_list_foreach (the_obj_list, iter, old_o) { // XXX - naive. do we need a way to prevent multiple "anys" from being opened? res = r_bin_load_io_at_offset_as (bin, fd, baseaddr, old_o->loadaddr, 0, old_o->boffset, old_o->plugin->name); } } bf->o = r_list_get_n (bf->objs, 0); error: r_list_free (the_obj_list); return res; } R_API int r_bin_load_io(RBin *bin, int fd, ut64 baseaddr, ut64 loadaddr, int xtr_idx) { return r_bin_load_io_at_offset_as (bin, fd, baseaddr, loadaddr, xtr_idx, 0, NULL); } R_API int r_bin_load_io_at_offset_as_sz(RBin *bin, int fd, ut64 baseaddr, ut64 loadaddr, int xtr_idx, ut64 offset, const char *name, ut64 sz) { RIOBind *iob = &(bin->iob); RIO *io = iob? iob->io: NULL; RListIter *it; ut8 *buf_bytes = NULL; RBinXtrPlugin *xtr; ut64 file_sz = UT64_MAX; RBinFile *binfile = NULL; int tfd = -1; if (!io || (fd < 0) || (st64)sz < 0) { return false; } bool is_debugger = iob->fd_is_dbg (io, fd); const char *fname = iob->fd_get_name (io, fd); if (loadaddr == UT64_MAX) { loadaddr = 0; } file_sz = iob->fd_size (io, fd); // file_sz = UT64_MAX happens when attaching to frida:// and other non-debugger io plugins which results in double opening if (is_debugger && file_sz == UT64_MAX) { tfd = iob->fd_open (io, fname, R_IO_READ, 0644); if (tfd >= 1) { file_sz = iob->fd_size (io, tfd); } } if (!sz) { sz = file_sz; } bin->file = fname; sz = R_MIN (file_sz, sz); if (!r_list_length (bin->binfiles)) { if (is_debugger) { //use the temporal RIODesc to read the content of the file instead //from the memory if (tfd >= 0) { buf_bytes = calloc (1, sz + 1); iob->fd_read_at (io, tfd, 0, buf_bytes, sz); // iob->fd_close (io, tfd); } } } if (!buf_bytes) { buf_bytes = calloc (1, sz + 1); if (!buf_bytes) { return false; } ut64 seekaddr = is_debugger? baseaddr: loadaddr; if (!iob->fd_read_at (io, fd, seekaddr, buf_bytes, sz)) { sz = 0LL; } } if (!name && (st64)sz > 0) { // XXX - for the time being this is fine, but we may want to // change the name to something like // <xtr_name>:<bin_type_name> r_list_foreach (bin->binxtrs, it, xtr) { if (xtr && xtr->check_bytes (buf_bytes, sz)) { if (xtr && (xtr->extract_from_bytes || xtr->extractall_from_bytes)) { if (is_debugger && sz != file_sz) { R_FREE (buf_bytes); if (tfd < 0) { tfd = iob->fd_open (io, fname, R_IO_READ, 0); } sz = iob->fd_size (io, tfd); if (sz != UT64_MAX) { buf_bytes = calloc (1, sz + 1); if (buf_bytes) { (void) iob->fd_read_at (io, tfd, 0, buf_bytes, sz); } } //DOUBLECLOSE UAF : iob->fd_close (io, tfd); tfd = -1; // marking it closed } else if (sz != file_sz) { (void) iob->read_at (io, 0LL, buf_bytes, sz); } binfile = r_bin_file_xtr_load_bytes (bin, xtr, fname, buf_bytes, sz, file_sz, baseaddr, loadaddr, xtr_idx, fd, bin->rawstr); } xtr = NULL; } } } if (!binfile) { bool steal_ptr = true; // transfer buf_bytes ownership to binfile binfile = r_bin_file_new_from_bytes ( bin, fname, buf_bytes, sz, file_sz, bin->rawstr, baseaddr, loadaddr, fd, name, NULL, offset, steal_ptr); } return binfile? r_bin_file_set_cur_binfile (bin, binfile): false; } R_API bool r_bin_load_io_at_offset_as(RBin *bin, int fd, ut64 baseaddr, ut64 loadaddr, int xtr_idx, ut64 offset, const char *name) { // adding file_sz to help reduce the performance impact on the system // in this case the number of bytes read will be limited to 2MB // (MIN_LOAD_SIZE) // if it fails, the whole file is loaded. const ut64 MAX_LOAD_SIZE = 0; // 0xfffff; //128 * (1 << 10 << 10); int res = r_bin_load_io_at_offset_as_sz (bin, fd, baseaddr, loadaddr, xtr_idx, offset, name, MAX_LOAD_SIZE); if (!res) { res = r_bin_load_io_at_offset_as_sz (bin, fd, baseaddr, loadaddr, xtr_idx, offset, name, UT64_MAX); } return res; } R_API int r_bin_file_deref_by_bind(RBinBind *binb) { RBin *bin = binb? binb->bin: NULL; RBinFile *a = r_bin_cur (bin); return r_bin_file_deref (bin, a); } R_API int r_bin_file_deref(RBin *bin, RBinFile *a) { RBinObject *o = r_bin_cur_object (bin); int res = false; if (a && !o) { //r_list_delete_data (bin->binfiles, a); res = true; } else if (a && o->referenced - 1 < 1) { //r_list_delete_data (bin->binfiles, a); res = true; // not thread safe } else if (o) { o->referenced--; } // it is possible for a file not // to be bound to RBin and RBinFiles // XXX - is this an ok assumption? if (bin) bin->cur = NULL; return res; } R_API int r_bin_file_ref_by_bind(RBinBind *binb) { RBin *bin = binb? binb->bin: NULL; RBinFile *a = r_bin_cur (bin); return r_bin_file_ref (bin, a); } R_API int r_bin_file_ref(RBin *bin, RBinFile *a) { RBinObject *o = r_bin_cur_object (bin); if (a && o) { o->referenced--; return true; } return false; } static void r_bin_file_free(void /*RBinFile*/ *bf_) { RBinFile *a = bf_; RBinPlugin *plugin = r_bin_file_cur_plugin (a); if (!a) { return; } // Binary format objects are connected to the // RBinObject, so the plugin must destroy the // format data first if (plugin && plugin->destroy) { plugin->destroy (a); } if (a->curxtr && a->curxtr->destroy && a->xtr_obj) { a->curxtr->free_xtr ((void *)(a->xtr_obj)); } r_buf_free (a->buf); // TODO: unset related sdb namespaces if (a && a->sdb_addrinfo) { sdb_free (a->sdb_addrinfo); a->sdb_addrinfo = NULL; } free (a->file); a->o = NULL; r_list_free (a->objs); r_list_free (a->xtr_data); r_id_pool_kick_id (a->rbin->file_ids, a->id); memset (a, 0, sizeof (RBinFile)); free (a); } static RBinFile *r_bin_file_create_append(RBin *bin, const char *file, const ut8 *bytes, ut64 sz, ut64 file_sz, int rawstr, int fd, const char *xtrname, bool steal_ptr) { RBinFile *bf = r_bin_file_new (bin, file, bytes, sz, file_sz, rawstr, fd, xtrname, bin->sdb, steal_ptr); if (bf) { r_list_append (bin->binfiles, bf); } return bf; } // This function populate RBinFile->xtr_data, that information is enough to // create RBinObject when needed using r_bin_file_object_new_from_xtr_data static RBinFile *r_bin_file_xtr_load_bytes(RBin *bin, RBinXtrPlugin *xtr, const char *filename, const ut8 *bytes, ut64 sz, ut64 file_sz, ut64 baseaddr, ut64 loadaddr, int idx, int fd, int rawstr) { if (!bin || !bytes) { return NULL; } RBinFile *bf = r_bin_file_find_by_name (bin, filename); if (!bf) { bf = r_bin_file_create_append (bin, filename, bytes, sz, file_sz, rawstr, fd, xtr->name, false); if (!bf) { return NULL; } if (!bin->cur) { bin->cur = bf; } } if (bf->xtr_data) { r_list_free (bf->xtr_data); } if (xtr && bytes) { RList *xtr_data_list = xtr->extractall_from_bytes (bin, bytes, sz); RListIter *iter; RBinXtrData *xtr; //populate xtr_data with baddr and laddr that will be used later on //r_bin_file_object_new_from_xtr_data r_list_foreach (xtr_data_list, iter, xtr) { xtr->baddr = baseaddr? baseaddr : UT64_MAX; xtr->laddr = loadaddr? loadaddr : UT64_MAX; } bf->loadaddr = loadaddr; bf->xtr_data = xtr_data_list ? xtr_data_list : NULL; } return bf; } static RBinPlugin *r_bin_get_binplugin_by_name(RBin *bin, const char *name) { RBinPlugin *plugin; RListIter *it; if (bin && name) { r_list_foreach (bin->plugins, it, plugin) { if (!strcmp (plugin->name, name)) { return plugin; } } } return NULL; } R_API RBinPlugin *r_bin_get_binplugin_by_bytes(RBin *bin, const ut8 *bytes, ut64 sz) { RBinPlugin *plugin; RListIter *it; if (!bin || !bytes) { return NULL; } r_list_foreach (bin->plugins, it, plugin) { if (plugin->check_bytes && plugin->check_bytes (bytes, sz)) { return plugin; } } return NULL; } static RBinXtrPlugin *r_bin_get_xtrplugin_by_name(RBin *bin, const char *name) { RBinXtrPlugin *xtr; RListIter *it; if (!bin || !name) return NULL; r_list_foreach (bin->binxtrs, it, xtr) { if (!strcmp (xtr->name, name)) { return xtr; } // must be set to null xtr = NULL; } return NULL; } static RBinPlugin *r_bin_get_binplugin_any(RBin *bin) { return r_bin_get_binplugin_by_name (bin, "any"); } static RBinObject *r_bin_object_new(RBinFile *binfile, RBinPlugin *plugin, ut64 baseaddr, ut64 loadaddr, ut64 offset, ut64 sz) { const ut8 *bytes = binfile? r_buf_buffer (binfile->buf): NULL; ut64 bytes_sz = binfile? r_buf_size (binfile->buf): 0; Sdb *sdb = binfile? binfile->sdb: NULL; RBinObject *o = R_NEW0 (RBinObject); if (!o) { return NULL; } o->obj_size = bytes && (bytes_sz >= sz + offset)? sz: 0; o->boffset = offset; o->id = r_num_rand (0xfffff000); o->kv = sdb_new0 (); o->baddr = baseaddr; o->baddr_shift = 0; o->plugin = plugin; o->loadaddr = loadaddr != UT64_MAX ? loadaddr : 0; // XXX more checking will be needed here // only use LoadBytes if buffer offset != 0 // if (offset != 0 && bytes && plugin && plugin->load_bytes && (bytes_sz // >= sz + offset) ) { if (bytes && plugin && plugin->load_bytes && (bytes_sz >= sz + offset)) { ut64 bsz = bytes_sz - offset; if (sz < bsz) { bsz = sz; } o->bin_obj = plugin->load_bytes (binfile, bytes + offset, sz, loadaddr, sdb); if (!o->bin_obj) { bprintf ( "Error in r_bin_object_new: load_bytes failed " "for %s plugin\n", plugin->name); sdb_free (o->kv); free (o); return NULL; } } else if (binfile && plugin && plugin->load) { // XXX - haha, this is a hack. // switching out the current object for the new // one to be processed RBinObject *old_o = binfile->o; binfile->o = o; if (plugin->load (binfile)) { binfile->sdb_info = o->kv; // mark as do not walk sdb_ns_set (binfile->sdb, "info", o->kv); } else { binfile->o = old_o; } o->obj_size = sz; } else { sdb_free (o->kv); free (o); return NULL; } // XXX - binfile could be null here meaning an improper load // XXX - object size cant be set here and needs to be set where // where the object is created from. The reason for this is to prevent // mis-reporting when the file is loaded from impartial bytes or is // extracted // from a set of bytes in the file r_bin_object_set_items (binfile, o); r_bin_file_object_add (binfile, o); // XXX this is a very hacky alternative to rewriting the // RIO stuff, as discussed here: return o; } #define LIMIT_SIZE 0 static int r_bin_file_set_bytes(RBinFile *binfile, const ut8 *bytes, ut64 sz, bool steal_ptr) { if (!bytes) { return false; } r_buf_free (binfile->buf); binfile->buf = r_buf_new (); #if LIMIT_SIZE if (sz > 1024 * 1024) { eprintf ("Too big\n"); // TODO: use r_buf_io instead of setbytes all the time to save memory return NULL; } #else if (steal_ptr) { r_buf_set_bytes_steal (binfile->buf, bytes, sz); } else { r_buf_set_bytes (binfile->buf, bytes, sz); } #endif return binfile->buf != NULL; } static RBinFile *r_bin_file_new(RBin *bin, const char *file, const ut8 *bytes, ut64 sz, ut64 file_sz, int rawstr, int fd, const char *xtrname, Sdb *sdb, bool steal_ptr) { RBinFile *binfile = R_NEW0 (RBinFile); if (!binfile) { return NULL; } if (!r_id_pool_grab_id (bin->file_ids, &binfile->id)) { if (steal_ptr) { // we own the ptr, free on error free ((void*) bytes); } free (binfile); //no id means no binfile return NULL; } int res = r_bin_file_set_bytes (binfile, bytes, sz, steal_ptr); if (!res && steal_ptr) { // we own the ptr, free on error free((void*) bytes); } binfile->rbin = bin; binfile->file = file? strdup (file): NULL; binfile->rawstr = rawstr; binfile->fd = fd; binfile->curxtr = r_bin_get_xtrplugin_by_name (bin, xtrname); binfile->sdb = sdb; binfile->size = file_sz; binfile->xtr_data = r_list_newf ((RListFree)r_bin_xtrdata_free); binfile->objs = r_list_newf ((RListFree)r_bin_object_free); binfile->xtr_obj = NULL; if (!binfile->buf) { //r_bin_file_free (binfile); binfile->buf = r_buf_new (); // return NULL; } if (sdb) { binfile->sdb = sdb_ns (sdb, sdb_fmt (0, "fd.%d", fd), 1); sdb_set (binfile->sdb, "archs", "0:0:x86:32", 0); // x86?? /* NOTE */ /* Those refs++ are necessary because sdb_ns() doesnt rerefs all * sub-namespaces */ /* And if any namespace is referenced backwards it gets * double-freed */ binfile->sdb_addrinfo = sdb_ns (binfile->sdb, "addrinfo", 1); binfile->sdb_addrinfo->refs++; sdb_ns_set (sdb, "cur", binfile->sdb); binfile->sdb->refs++; } return binfile; } R_API bool r_bin_file_object_new_from_xtr_data(RBin *bin, RBinFile *bf, ut64 baseaddr, ut64 loadaddr, RBinXtrData *data) { RBinObject *o = NULL; RBinPlugin *plugin = NULL; ut8* bytes; ut64 offset = data? data->offset: 0; ut64 sz = data ? data->size : 0; if (!data || !bf) { return false; } // for right now the bytes used will just be the offest into the binfile // buffer // if the extraction requires some sort of transformation then this will // need to be fixed // here. bytes = data->buffer; if (!bytes) { return false; } plugin = r_bin_get_binplugin_by_bytes (bin, (const ut8*)bytes, sz); if (!plugin) { plugin = r_bin_get_binplugin_any (bin); } r_buf_free (bf->buf); bf->buf = r_buf_new_with_bytes ((const ut8*)bytes, data->size); //r_bin_object_new append the new object into binfile o = r_bin_object_new (bf, plugin, baseaddr, loadaddr, offset, sz); // size is set here because the reported size of the object depends on // if loaded from xtr plugin or partially read if (!o) { return false; } if (o && !o->size) { o->size = sz; } bf->narch = data->file_count; if (!o->info) { o->info = R_NEW0 (RBinInfo); } free (o->info->file); free (o->info->arch); free (o->info->machine); free (o->info->type); o->info->file = strdup (bf->file); o->info->arch = strdup (data->metadata->arch); o->info->machine = strdup (data->metadata->machine); o->info->type = strdup (data->metadata->type); o->info->bits = data->metadata->bits; o->info->has_crypto = bf->o->info->has_crypto; data->loaded = true; return true; } static RBinFile *r_bin_file_new_from_bytes(RBin *bin, const char *file, const ut8 *bytes, ut64 sz, ut64 file_sz, int rawstr, ut64 baseaddr, ut64 loadaddr, int fd, const char *pluginname, const char *xtrname, ut64 offset, bool steal_ptr) { ut8 binfile_created = false; RBinPlugin *plugin = NULL; RBinXtrPlugin *xtr = NULL; RBinObject *o = NULL; RBinFile *bf = NULL; if (sz == UT64_MAX) { return NULL; } if (xtrname) { xtr = r_bin_get_xtrplugin_by_name (bin, xtrname); } if (xtr && xtr->check_bytes (bytes, sz)) { return r_bin_file_xtr_load_bytes (bin, xtr, file, bytes, sz, file_sz, baseaddr, loadaddr, 0, fd, rawstr); } if (!bf) { bf = r_bin_file_create_append (bin, file, bytes, sz, file_sz, rawstr, fd, xtrname, steal_ptr); if (!bf) { if (!steal_ptr) { // we own the ptr, free on error free ((void*) bytes); } return NULL; } binfile_created = true; } if (bin->force) { plugin = r_bin_get_binplugin_by_name (bin, bin->force); } if (!plugin) { if (pluginname) { plugin = r_bin_get_binplugin_by_name (bin, pluginname); } if (!plugin) { plugin = r_bin_get_binplugin_by_bytes (bin, bytes, sz); if (!plugin) { plugin = r_bin_get_binplugin_any (bin); } } } o = r_bin_object_new (bf, plugin, baseaddr, loadaddr, 0, r_buf_size (bf->buf)); // size is set here because the reported size of the object depends on // if loaded from xtr plugin or partially read if (o && !o->size) { o->size = file_sz; } if (!o) { if (bf && binfile_created) { r_list_delete_data (bin->binfiles, bf); } return NULL; } /* WTF */ if (strcmp (plugin->name, "any")) { bf->narch = 1; } /* free unnecessary rbuffer (???) */ return bf; } static void plugin_free(RBinPlugin *p) { if (p && p->fini) { p->fini (NULL); } R_FREE (p); } // rename to r_bin_plugin_add like the rest R_API int r_bin_add(RBin *bin, RBinPlugin *foo) { RListIter *it; RBinPlugin *plugin; if (foo->init) { foo->init (bin->user); } r_list_foreach (bin->plugins, it, plugin) { if (!strcmp (plugin->name, foo->name)) { return false; } } plugin = R_NEW0 (RBinPlugin); memcpy (plugin, foo, sizeof (RBinPlugin)); r_list_append (bin->plugins, plugin); return true; } R_API int r_bin_xtr_add(RBin *bin, RBinXtrPlugin *foo) { RListIter *it; RBinXtrPlugin *xtr; if (foo->init) { foo->init (bin->user); } // avoid duplicates r_list_foreach (bin->binxtrs, it, xtr) { if (!strcmp (xtr->name, foo->name)) { return false; } } r_list_append (bin->binxtrs, foo); return true; } R_API void *r_bin_free(RBin *bin) { if (!bin) { return NULL; } if (bin->io_owned) { r_io_free (bin->iob.io); } bin->file = NULL; free (bin->force); free (bin->srcdir); //r_bin_free_bin_files (bin); r_list_free (bin->binfiles); r_list_free (bin->binxtrs); r_list_free (bin->plugins); sdb_free (bin->sdb); r_id_pool_free (bin->file_ids); memset (bin, 0, sizeof (RBin)); free (bin); return NULL; } static int r_bin_print_plugin_details(RBin *bin, RBinPlugin *bp, int json) { if (json == 'q') { bin->cb_printf ("%s\n", bp->name); } else if (json) { bin->cb_printf ( "{\"name\":\"%s\",\"description\":\"%s\"," "\"license\":\"%s\"}\n", bp->name, bp->desc, bp->license? bp->license: "???"); } else { bin->cb_printf ("Name: %s\n", bp->name); bin->cb_printf ("Description: %s\n", bp->desc); if (bp->license) { bin->cb_printf ("License: %s\n", bp->license); } if (bp->version) { bin->cb_printf ("Version: %s\n", bp->version); } if (bp->author) { bin->cb_printf ("Author: %s\n", bp->author); } } return true; } static int r_bin_print_xtrplugin_details(RBin *bin, RBinXtrPlugin *bx, int json) { if (json == 'q') { bin->cb_printf ("%s\n", bx->name); } else if (json) { bin->cb_printf ( "{\"name\":\"%s\",\"description\":\"%s\"," "\"license\":\"%s\"}\n", bx->name, bx->desc, bx->license? bx->license: "???"); } else { bin->cb_printf ("Name: %s\n", bx->name); bin->cb_printf ("Description: %s\n", bx->desc); if (bx->license) { bin->cb_printf ("License: %s\n", bx->license); } } return true; } R_API int r_bin_list(RBin *bin, int json) { RListIter *it; RBinPlugin *bp; RBinXtrPlugin *bx; if (json == 'q') { r_list_foreach (bin->plugins, it, bp) { bin->cb_printf ("%s\n", bp->name); } r_list_foreach (bin->binxtrs, it, bx) { bin->cb_printf ("%s\n", bx->name); } } else if (json) { int i; i = 0; bin->cb_printf ("{\"bin\":["); r_list_foreach (bin->plugins, it, bp) { bin->cb_printf ( "%s{\"name\":\"%s\",\"description\":\"%s\"," "\"license\":\"%s\"}", i? ",": "", bp->name, bp->desc, bp->license? bp->license: "???"); i++; } i = 0; bin->cb_printf ("],\"xtr\":["); r_list_foreach (bin->binxtrs, it, bx) { bin->cb_printf ( "%s{\"name\":\"%s\",\"description\":\"%s\"," "\"license\":\"%s\"}", i? ",": "", bx->name, bx->desc, bx->license? bx->license: "???"); i++; } bin->cb_printf ("]}\n"); } else { r_list_foreach (bin->plugins, it, bp) { bin->cb_printf ("bin %-11s %s (%s) %s %s\n", bp->name, bp->desc, bp->license? bp->license: "???", bp->version? bp->version: "", bp->author? bp->author: ""); } r_list_foreach (bin->binxtrs, it, bx) { bin->cb_printf ("xtr %-11s %s (%s)\n", bx->name, bx->desc, bx->license? bx->license: "???"); } } return false; } R_API int r_bin_list_plugin(RBin *bin, const char* name, int json) { RListIter *it; RBinPlugin *bp; RBinXtrPlugin *bx; r_list_foreach (bin->plugins, it, bp) { if (!r_str_cmp (name, bp->name, strlen (name))) { continue; } return r_bin_print_plugin_details (bin, bp, json); } r_list_foreach (bin->binxtrs, it, bx) { if (!r_str_cmp (name, bx->name, strlen (name))) { continue; } return r_bin_print_xtrplugin_details (bin, bx, json); } eprintf ("cannot find plugin %s\n", name); return false; } static ut64 binobj_get_baddr(RBinObject *o) { return o? o->baddr + o->baddr_shift: UT64_MAX; } R_API ut64 r_binfile_get_baddr(RBinFile *binfile) { return binfile? binobj_get_baddr (binfile->o): UT64_MAX; } /* returns the base address of bin or UT64_MAX in case of errors */ R_API ut64 r_bin_get_baddr(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return binobj_get_baddr (o); } /* returns the load address of bin or UT64_MAX in case of errors */ R_API ut64 r_bin_get_laddr(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return o? o->loadaddr: UT64_MAX; } R_API void r_bin_set_baddr(RBin *bin, ut64 baddr) { RBinObject *o = r_bin_cur_object (bin); binobj_set_baddr (o, baddr); // XXX - update all the infos? } R_API ut64 r_bin_get_boffset(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return o? o->boffset: UT64_MAX; } R_API RBinAddr *r_bin_get_sym(RBin *bin, int sym) { RBinObject *o = r_bin_cur_object (bin); if (sym < 0 || sym >= R_BIN_SYM_LAST) { return NULL; } return o? o->binsym[sym]: NULL; } // XXX: those accessors are redundant R_API RList *r_bin_get_entries(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return o? o->entries: NULL; } R_API RList *r_bin_get_fields(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return o? o->fields: NULL; } R_API RList *r_bin_get_imports(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return o? o->imports: NULL; } R_API RBinInfo *r_bin_get_info(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return o? o->info: NULL; } R_API RList *r_bin_get_libs(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return o? o->libs: NULL; } R_API RList * r_bin_patch_relocs(RBin *bin) { static bool first = true; RBinObject *o = r_bin_cur_object (bin); if (!o) { return NULL; } // r_bin_object_set_items set o->relocs but there we don't have access // to io // so we need to be run from bin_relocs, free the previous reloc and get // the patched ones if (first && o->plugin && o->plugin->patch_relocs) { RList *tmp = o->plugin->patch_relocs (bin); first = false; if (!tmp) { return o->relocs; } r_list_free (o->relocs); o->relocs = tmp; REBASE_PADDR (o, o->relocs, RBinReloc); first = false; return o->relocs; } return o->relocs; } R_API RList *r_bin_get_relocs(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return o? o->relocs: NULL; } R_API RList *r_bin_get_sections(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return o? o->sections: NULL; } // TODO: Move into section.c and rename it to r_io_section_get_at () R_API RBinSection *r_bin_get_section_at(RBinObject *o, ut64 off, int va) { RBinSection *section; RListIter *iter; ut64 from, to; if (o) { // TODO: must be O(1) .. use sdb here r_list_foreach (o->sections, iter, section) { from = va? binobj_a2b (o, section->vaddr): section->paddr; to = va? (binobj_a2b (o, section->vaddr) + section->vsize) : (section->paddr + section->size); if (off >= from && off < to) { return section; } } } return NULL; } R_API RList *r_bin_reset_strings(RBin *bin) { RBinFile *a = r_bin_cur (bin); RBinObject *o = r_bin_cur_object (bin); RBinPlugin *plugin = r_bin_file_cur_plugin (a); if (!a || !o) { return NULL; } if (o->strings) { r_list_free (o->strings); o->strings = NULL; } if (bin->minstrlen <= 0) { return NULL; } a->rawstr = bin->rawstr; if (plugin && plugin->strings) { o->strings = plugin->strings (a); } else { o->strings = get_strings (a, bin->minstrlen, 0); } if (bin->debase64) { filterStrings (bin, o->strings); } return o->strings; } R_API RList *r_bin_get_strings(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return o? o->strings: NULL; } R_API int r_bin_is_string(RBin *bin, ut64 va) { RBinString *string; RListIter *iter; RList *list; if (!(list = r_bin_get_strings (bin))) { return false; } r_list_foreach (list, iter, string) { if (string->vaddr == va) { return true; } if (string->vaddr > va) { return false; } } return false; } //callee must not free the symbol R_API RBinSymbol *r_bin_get_symbol_at_vaddr(RBin *bin, ut64 addr) { //use skiplist here RList *symbols = r_bin_get_symbols (bin); RListIter *iter; RBinSymbol *symbol; r_list_foreach (symbols, iter, symbol) { if (symbol->vaddr == addr) { return symbol; } } return NULL; } //callee must not free the symbol R_API RBinSymbol *r_bin_get_symbol_at_paddr(RBin *bin, ut64 addr) { //use skiplist here RList *symbols = r_bin_get_symbols (bin); RListIter *iter; RBinSymbol *symbol; r_list_foreach (symbols, iter, symbol) { if (symbol->paddr == addr) { return symbol; } } return NULL; } R_API RList *r_bin_get_symbols(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return o? o->symbols: NULL; } R_API RList *r_bin_get_mem(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return o? o->mem: NULL; } R_API int r_bin_is_big_endian(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return (o && o->info)? o->info->big_endian: -1; } R_API int r_bin_is_stripped(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return o? (R_BIN_DBG_STRIPPED & o->info->dbg_info): 1; } R_API int r_bin_is_static(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); if (o && r_list_length (o->libs) > 0) return R_BIN_DBG_STATIC & o->info->dbg_info; return true; } // TODO: Integrate with r_bin_dbg */ R_API int r_bin_has_dbg_linenums(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return o? (R_BIN_DBG_LINENUMS & o->info->dbg_info): false; } R_API int r_bin_has_dbg_syms(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return o? (R_BIN_DBG_SYMS & o->info->dbg_info): false; } R_API int r_bin_has_dbg_relocs(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return o? (R_BIN_DBG_RELOCS & o->info->dbg_info): false; } R_API RBin *r_bin_new() { int i; RBinXtrPlugin *static_xtr_plugin; RBin *bin = R_NEW0 (RBin); if (!bin) { return NULL; } bin->force = NULL; bin->filter_rules = UT64_MAX; bin->sdb = sdb_new0 (); bin->cb_printf = (PrintfCallback)printf; bin->plugins = r_list_newf ((RListFree)plugin_free); bin->minstrlen = 0; bin->want_dbginfo = true; bin->cur = NULL; bin->io_owned = false; bin->binfiles = r_list_newf ((RListFree)r_bin_file_free); for (i = 0; bin_static_plugins[i]; i++) { r_bin_add (bin, bin_static_plugins[i]); } bin->binxtrs = r_list_new (); bin->binxtrs->free = free; for (i = 0; bin_xtr_static_plugins[i]; i++) { static_xtr_plugin = R_NEW0 (RBinXtrPlugin); if (!static_xtr_plugin) { free (bin); return NULL; } *static_xtr_plugin = *bin_xtr_static_plugins[i]; r_bin_xtr_add (bin, static_xtr_plugin); } bin->file_ids = r_id_pool_new (0, 0xffffffff); return bin; } R_API int r_bin_use_arch(RBin *bin, const char *arch, int bits, const char *name) { RBinFile *binfile = r_bin_file_find_by_arch_bits (bin, arch, bits, name); RBinObject *obj = NULL; if (binfile) { obj = r_bin_object_find_by_arch_bits (binfile, arch, bits, name); if (!obj) { if (binfile->xtr_data) { RBinXtrData *xtr_data = r_list_get_n (binfile->xtr_data, 0); if (!r_bin_file_object_new_from_xtr_data (bin, binfile, UT64_MAX, r_bin_get_laddr (bin), xtr_data)) { return false; } obj = r_list_get_n (binfile->objs, 0); } } } else { void *plugin = r_bin_get_binplugin_by_name (bin, name); if (plugin) { if (bin->cur) { bin->cur->curplugin = plugin; } binfile = r_bin_file_new (bin, "-", NULL, 0, 0, 0, 999, NULL, NULL, false); // create object and set arch/bits obj = r_bin_object_new (binfile, plugin, 0, 0, 0, 1024); binfile->o = obj; obj->info = R_NEW0 (RBinInfo); obj->info->arch = strdup (arch); obj->info->bits = bits; } } return (binfile && r_bin_file_set_cur_binfile_obj (bin, binfile, obj)); } R_API RBinObject *r_bin_object_find_by_arch_bits(RBinFile *binfile, const char *arch, int bits, const char *name) { RBinObject *obj = NULL; RListIter *iter = NULL; RBinInfo *info = NULL; r_list_foreach (binfile->objs, iter, obj) { info = obj->info; if (info && info->arch && info->file && (bits == info->bits) && !strcmp (info->arch, arch) && !strcmp (info->file, name)) { break; } obj = NULL; } return obj; } R_API RBinFile *r_bin_file_find_by_arch_bits(RBin *bin, const char *arch, int bits, const char *name) { RListIter *iter; RBinFile *binfile = NULL; RBinXtrData *xtr_data; if (!name || !arch) { return NULL; } r_list_foreach (bin->binfiles, iter, binfile) { RListIter *iter_xtr; if (!binfile->xtr_data) { continue; } // look for sub-bins in Xtr Data and Load if we need to r_list_foreach (binfile->xtr_data, iter_xtr, xtr_data) { if (xtr_data->metadata && xtr_data->metadata->arch) { char *iter_arch = xtr_data->metadata->arch; int iter_bits = xtr_data->metadata->bits; if (bits == iter_bits && !strcmp (iter_arch, arch)) { if (!xtr_data->loaded) { if (!r_bin_file_object_new_from_xtr_data ( bin, binfile, xtr_data->baddr, xtr_data->laddr, xtr_data)) { return NULL; } return binfile; } } } } } return binfile; } R_API int r_bin_select(RBin *bin, const char *arch, int bits, const char *name) { RBinFile *cur = r_bin_cur (bin), *binfile = NULL; RBinObject *obj = NULL; name = !name && cur? cur->file: name; binfile = r_bin_file_find_by_arch_bits (bin, arch, bits, name); if (binfile && name) { obj = r_bin_object_find_by_arch_bits (binfile, arch, bits, name); } return binfile && r_bin_file_set_cur_binfile_obj (bin, binfile, obj); } R_API int r_bin_select_object(RBinFile *binfile, const char *arch, int bits, const char *name) { RBinObject *obj = binfile ? r_bin_object_find_by_arch_bits ( binfile, arch, bits, name) : NULL; return obj && r_bin_file_set_cur_binfile_obj (binfile->rbin, binfile, obj); } static RBinObject *r_bin_file_object_find_by_id(RBinFile *binfile, ut32 binobj_id) { RBinObject *obj; RListIter *iter; if (binfile) { r_list_foreach (binfile->objs, iter, obj) { if (obj->id == binobj_id) { return obj; } } } return NULL; } static RBinFile *r_bin_file_find_by_object_id(RBin *bin, ut32 binobj_id) { RListIter *iter; RBinFile *binfile; r_list_foreach (bin->binfiles, iter, binfile) { if (r_bin_file_object_find_by_id (binfile, binobj_id)) { return binfile; } } return NULL; } static RBinFile *r_bin_file_find_by_id(RBin *bin, ut32 binfile_id) { RBinFile *binfile = NULL; RListIter *iter = NULL; r_list_foreach (bin->binfiles, iter, binfile) { if (binfile->id == binfile_id) { break; } binfile = NULL; } return binfile; } R_API int r_bin_object_delete(RBin *bin, ut32 binfile_id, ut32 binobj_id) { RBinFile *binfile = NULL; //, *cbinfile = r_bin_cur (bin); RBinObject *obj = NULL; int res = false; #if 0 if (binfile_id == UT32_MAX && binobj_id == UT32_MAX) { return false; } #endif if (binfile_id == -1) { binfile = r_bin_file_find_by_object_id (bin, binobj_id); obj = binfile? r_bin_file_object_find_by_id (binfile, binobj_id): NULL; } else if (binobj_id == -1) { binfile = r_bin_file_find_by_id (bin, binfile_id); obj = binfile? binfile->o: NULL; } else { binfile = r_bin_file_find_by_id (bin, binfile_id); obj = binfile? r_bin_file_object_find_by_id (binfile, binobj_id): NULL; } // lazy way out, always leaving at least 1 bin object loaded if (binfile && (r_list_length (binfile->objs) > 1)) { binfile->o = NULL; r_list_delete_data (binfile->objs, obj); obj = (RBinObject *)r_list_get_n (binfile->objs, 0); res = obj && binfile && r_bin_file_set_cur_binfile_obj (bin, binfile, obj); } return res; } R_API int r_bin_select_by_ids(RBin *bin, ut32 binfile_id, ut32 binobj_id) { RBinFile *binfile = NULL; RBinObject *obj = NULL; if (binfile_id == UT32_MAX && binobj_id == UT32_MAX) { return false; } if (binfile_id == -1) { binfile = r_bin_file_find_by_object_id (bin, binobj_id); obj = binfile? r_bin_file_object_find_by_id (binfile, binobj_id): NULL; } else if (binobj_id == -1) { binfile = r_bin_file_find_by_id (bin, binfile_id); obj = binfile? binfile->o: NULL; } else { binfile = r_bin_file_find_by_id (bin, binfile_id); obj = binfile? r_bin_file_object_find_by_id (binfile, binobj_id): NULL; } if (!binfile || !obj) { return false; } return obj && binfile && r_bin_file_set_cur_binfile_obj (bin, binfile, obj); } R_API int r_bin_select_idx(RBin *bin, const char *name, int idx) { RBinFile *nbinfile = NULL, *binfile = r_bin_cur (bin); RBinObject *obj = NULL; const char *tname = !name && binfile? binfile->file: name; int res = false; if (!tname || !bin) { return res; } nbinfile = r_bin_file_find_by_name_n (bin, tname, idx); obj = nbinfile? r_list_get_n (nbinfile->objs, idx): NULL; return obj && nbinfile && r_bin_file_set_cur_binfile_obj (bin, nbinfile, obj); } static void list_xtr_archs(RBin *bin, int mode) { RBinFile *binfile = r_bin_cur (bin); if (binfile->xtr_data) { RListIter *iter_xtr; RBinXtrData *xtr_data; int bits, i = 0; char *arch, *machine; r_list_foreach (binfile->xtr_data, iter_xtr, xtr_data) { if (!xtr_data || !xtr_data->metadata || !xtr_data->metadata->arch) { continue; } arch = xtr_data->metadata->arch; machine = xtr_data->metadata->machine; bits = xtr_data->metadata->bits; switch (mode) { case 'q': bin->cb_printf ("%s\n", arch); break; case 'j': bin->cb_printf ( "%s{\"arch\":\"%s\",\"bits\":%d," "\"offset\":%" PFMT64d ",\"size\":\"%" PFMT64d ",\"machine\":\"%s\"}", i++ ? "," : "", arch, bits, xtr_data->offset, xtr_data->size, machine); break; default: bin->cb_printf ("%03i 0x%08" PFMT64x " %" PFMT64d " %s_%i %s\n", i++, xtr_data->offset, xtr_data->size, arch, bits, machine); break; } } } } R_API void r_bin_list_archs(RBin *bin, int mode) { RListIter *iter; int i = 0; char unk[128]; char archline[128]; RBinFile *binfile = r_bin_cur (bin); RBinObject *obj = NULL; const char *name = binfile? binfile->file: NULL; int narch = binfile? binfile->narch: 0; //are we with xtr format? if (binfile && binfile->curxtr) { list_xtr_archs (bin, mode); return; } Sdb *binfile_sdb = binfile? binfile->sdb: NULL; if (!binfile_sdb) { eprintf ("Cannot find SDB!\n"); return; } else if (!binfile) { eprintf ("Binary format not currently loaded!\n"); return; } sdb_unset (binfile_sdb, ARCHS_KEY, 0); if (mode == 'j') { bin->cb_printf ("\"bins\":["); } RBinFile *nbinfile = r_bin_file_find_by_name_n (bin, name, i); if (!nbinfile) { return; } i = -1; r_list_foreach (nbinfile->objs, iter, obj) { RBinInfo *info = obj->info; char bits = info? info->bits: 0; ut64 boffset = obj->boffset; ut32 obj_size = obj->obj_size; const char *arch = info? info->arch: NULL; const char *machine = info? info->machine: "unknown_machine"; i++; if (!arch) { snprintf (unk, sizeof (unk), "unk_%d", i); arch = unk; } if (info && narch > 1) { switch (mode) { case 'q': bin->cb_printf ("%s\n", arch); break; case 'j': bin->cb_printf ("%s{\"arch\":\"%s\",\"bits\":%d," "\"offset\":%" PFMT64d ",\"size\":%d," "\"machine\":\"%s\"}", i? ",": "", arch, bits, boffset, obj_size, machine); break; default: bin->cb_printf ("%03i 0x%08" PFMT64x " %d %s_%i %s\n", i, boffset, obj_size, arch, bits, machine); } snprintf (archline, sizeof (archline) - 1, "0x%08" PFMT64x ":%d:%s:%d:%s", boffset, obj_size, arch, bits, machine); /// xxx machine not exported? //sdb_array_push (binfile_sdb, ARCHS_KEY, archline, 0); } else { if (info) { switch (mode) { case 'q': bin->cb_printf ("%s\n", arch); break; case 'j': bin->cb_printf ("%s{\"arch\":\"%s\",\"bits\":%d," "\"offset\":%" PFMT64d ",\"size\":%d," "\"machine\":\"%s\"}", i? ",": "", arch, bits, boffset, obj_size, machine); break; default: bin->cb_printf ("%03i 0x%08" PFMT64x " %d %s_%d\n", i, boffset, obj_size, arch, bits); } snprintf (archline, sizeof (archline), "0x%08" PFMT64x ":%d:%s:%d", boffset, obj_size, arch, bits); } else if (nbinfile && mode) { switch (mode) { case 'q': bin->cb_printf ("%s\n", arch); break; case 'j': bin->cb_printf ("%s{\"arch\":\"unk_%d\",\"bits\":%d," "\"offset\":%" PFMT64d ",\"size\":%d," "\"machine\":\"%s\"}", i? ",": "", i, bits, boffset, obj_size, machine); break; default: bin->cb_printf ("%03i 0x%08" PFMT64x " %d unk_0\n", i, boffset, obj_size); } snprintf (archline, sizeof (archline), "0x%08" PFMT64x ":%d:%s:%d", boffset, obj_size, "unk", 0); } else { eprintf ("Error: Invalid RBinFile.\n"); } //sdb_array_push (binfile_sdb, ARCHS_KEY, archline, 0); } } if (mode == 'j') { bin->cb_printf ("]"); } } R_API void r_bin_set_user_ptr(RBin *bin, void *user) { bin->user = user; } static RBinSection* _get_vsection_at(RBin *bin, ut64 vaddr) { RBinObject *cur = r_bin_object_get_cur (bin); return r_bin_get_section_at (cur, vaddr, true); } R_API void r_bin_bind(RBin *bin, RBinBind *b) { if (b) { b->bin = bin; b->get_offset = getoffset; b->get_name = getname; b->get_sections = r_bin_get_sections; b->get_vsect_at = _get_vsection_at; } } R_API RBuffer *r_bin_create(RBin *bin, const ut8 *code, int codelen, const ut8 *data, int datalen) { RBinFile *a = r_bin_cur (bin); RBinPlugin *plugin = r_bin_file_cur_plugin (a); if (codelen < 0) { codelen = 0; } if (datalen < 0) { datalen = 0; } if (plugin && plugin->create) { return plugin->create (bin, code, codelen, data, datalen); } return NULL; } R_API RBuffer *r_bin_package(RBin *bin, const char *type, const char *file, RList *files) { if (!strcmp (type, "zip")) { #if 0 int zep = 0; struct zip * z = zip_open (file, 8 | 1, &zep); if (z) { RListIter *iter; const char *f; eprintf ("zip file created\n"); r_list_foreach (files, iter, f) { struct zip_source *zs = NULL; zs = zip_source_file (z, f, 0, 1024); if (zs) { eprintf ("ADD %s\n", f); zip_add (z, f, zs); zip_source_free (zs); } else { eprintf ("Cannot find file %s\n", f); } eprintf ("zS %p\n", zs); } zip_close (z); } else { eprintf ("Cannot create zip file\n"); } #endif } else if (!strcmp (type, "fat")) { const char *f; RListIter *iter; ut32 num; ut8 *num8 = (ut8*)&num; RBuffer *buf = r_buf_new_file (file, true); r_buf_write_at (buf, 0, (const ut8*)"\xca\xfe\xba\xbe", 4); int count = r_list_length (files); num = r_read_be32 (&count); ut64 from = 0x1000; r_buf_write_at (buf, 4, num8, 4); int off = 12; int item = 0; r_list_foreach (files, iter, f) { int f_len = 0; ut8 *f_buf = (ut8 *)r_file_slurp (f, &f_len); if (f_buf && f_len >= 0) { eprintf ("ADD %s %d\n", f, f_len); } else { eprintf ("Cannot open %s\n", f); free (f_buf); continue; } item++; /* CPU */ num8[0] = f_buf[7]; num8[1] = f_buf[6]; num8[2] = f_buf[5]; num8[3] = f_buf[4]; r_buf_write_at (buf, off - 4, num8, 4); /* SUBTYPE */ num8[0] = f_buf[11]; num8[1] = f_buf[10]; num8[2] = f_buf[9]; num8[3] = f_buf[8]; r_buf_write_at (buf, off, num8, 4); ut32 from32 = from; /* FROM */ num = r_read_be32 (&from32); r_buf_write_at (buf, off + 4, num8, 4); r_buf_write_at (buf, from, f_buf, f_len); /* SIZE */ num = r_read_be32 (&f_len); r_buf_write_at (buf, off + 8, num8, 4); off += 20; from += f_len + (f_len % 0x1000); free (f_buf); } r_buf_free (buf); return NULL; } else { eprintf ("Usage: rabin2 -X [fat|zip] [filename] [files ...]\n"); } return NULL; } R_API RBinObject *r_bin_get_object(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); if (o) { o->referenced++; } return o; } R_API RList * /*<RBinClass>*/ r_bin_get_classes(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return o? o->classes: NULL; } R_API void r_bin_class_free(RBinClass *c) { free (c->name); free (c->super); r_list_free (c->methods); r_list_free (c->fields); free (c); } R_API RBinClass *r_bin_class_new(RBinFile *binfile, const char *name, const char *super, int view) { RBinObject *o = binfile? binfile->o: NULL; RList *list = NULL; RBinClass *c; if (!o) { return NULL; } list = o->classes; if (!name) { return NULL; } c = r_bin_class_get (binfile, name); if (c) { if (super) { free (c->super); c->super = strdup (super); } return c; } c = R_NEW0 (RBinClass); if (!c) { return NULL; } c->name = strdup (name); c->super = super? strdup (super): NULL; c->index = r_list_length (list); c->methods = r_list_new (); c->fields = r_list_new (); c->visibility = view; if (!list) { list = o->classes = r_list_new (); } r_list_append (list, c); return c; } R_API RBinClass *r_bin_class_get(RBinFile *binfile, const char *name) { if (!binfile || !binfile->o || !name) { return NULL; } RBinClass *c; RListIter *iter; RList *list = binfile->o->classes; r_list_foreach (list, iter, c) { if (!strcmp (c->name, name)) { return c; } } return NULL; } R_API RBinSymbol *r_bin_class_add_method(RBinFile *binfile, const char *classname, const char *name, int nargs) { RBinClass *c = r_bin_class_get (binfile, classname); if (!c) { c = r_bin_class_new (binfile, classname, NULL, 0); if (!c) { eprintf ("Cannot allocate class %s\n", classname); return NULL; } } RBinSymbol *m; RListIter *iter; r_list_foreach (c->methods, iter, m) { if (!strcmp (m->name, name)) { return NULL; } } RBinSymbol *sym = R_NEW0 (RBinSymbol); if (!sym) { return NULL; } sym->name = strdup (name); r_list_append (c->methods, sym); return sym; } R_API void r_bin_class_add_field(RBinFile *binfile, const char *classname, const char *name) { //TODO: add_field into class //eprintf ("TODO add field: %s \n", name); } /* returns vaddr, rebased with the baseaddr of binfile, if va is enabled for * bin, paddr otherwise */ R_API ut64 r_binfile_get_vaddr(RBinFile *binfile, ut64 paddr, ut64 vaddr) { int use_va = 0; if (binfile && binfile->o && binfile->o->info) { use_va = binfile->o->info->has_va; } return use_va? binobj_a2b (binfile->o, vaddr): paddr; } /* returns vaddr, rebased with the baseaddr of bin, if va is enabled for bin, * paddr otherwise */ R_API ut64 r_bin_get_vaddr(RBin *bin, ut64 paddr, ut64 vaddr) { if (!bin || !bin->cur) { return UT64_MAX; } if (paddr == UT64_MAX) { return UT64_MAX; } /* hack to realign thumb symbols */ if (bin->cur->o && bin->cur->o->info && bin->cur->o->info->arch) { if (bin->cur->o->info->bits == 16) { RBinSection *s = r_bin_get_section_at (bin->cur->o, paddr, false); // autodetect thumb if (s && s->srwx & 1 && strstr (s->name, "text")) { if (!strcmp (bin->cur->o->info->arch, "arm") && (vaddr & 1)) { vaddr = (vaddr >> 1) << 1; } } } } return r_binfile_get_vaddr (bin->cur, paddr, vaddr); } R_API ut64 r_bin_a2b(RBin *bin, ut64 addr) { RBinObject *o = r_bin_cur_object (bin); return o? o->baddr_shift + addr: addr; } R_API ut64 r_bin_get_size(RBin *bin) { RBinObject *o = r_bin_cur_object (bin); return o ? o->size : 0; } R_API int r_bin_file_delete_all(RBin *bin) { int counter = 0; if (bin) { counter = r_list_length (bin->binfiles); r_list_purge (bin->binfiles); bin->cur = NULL; } return counter; } R_API int r_bin_file_delete(RBin *bin, ut32 bin_fd) { RListIter *iter; RBinFile *bf; RBinFile *cur = r_bin_cur (bin); if (bin && cur) { r_list_foreach (bin->binfiles, iter, bf) { if (bf && bf->fd == bin_fd) { if (cur->fd == bin_fd) { //avoiding UaF due to dead reference bin->cur = NULL; } r_list_delete (bin->binfiles, iter); return 1; } } } return 0; } R_API RBinFile *r_bin_file_find_by_fd(RBin *bin, ut32 bin_fd) { RListIter *iter; RBinFile *bf; if (bin) { r_list_foreach (bin->binfiles, iter, bf) { if (bf && bf->fd == bin_fd) { return bf; } } } return NULL; } R_API RBinFile *r_bin_file_find_by_name(RBin *bin, const char *name) { RListIter *iter; RBinFile *bf = NULL; if (!bin || !name) { return NULL; } r_list_foreach (bin->binfiles, iter, bf) { if (bf && bf->file && !strcmp (bf->file, name)) { break; } bf = NULL; } return bf; } R_API RBinFile *r_bin_file_find_by_name_n(RBin *bin, const char *name, int idx) { RListIter *iter; RBinFile *bf = NULL; int i = 0; if (!bin) { return bf; } r_list_foreach (bin->binfiles, iter, bf) { if (bf && bf->file && !strcmp (bf->file, name)) { if (i == idx) { break; } i++; } bf = NULL; } return bf; } R_API int r_bin_file_set_cur_by_fd(RBin *bin, ut32 bin_fd) { RBinFile *bf = r_bin_file_find_by_fd (bin, bin_fd); return r_bin_file_set_cur_binfile (bin, bf); } R_API int r_bin_file_set_cur_binfile_obj(RBin *bin, RBinFile *bf, RBinObject *obj) { RBinPlugin *plugin = NULL; if (!bin || !bf || !obj) { return false; } bin->file = bf->file; bin->cur = bf; bin->narch = bf->narch; bf->o = obj; plugin = r_bin_file_cur_plugin (bf); if (bin->minstrlen < 1) { bin->minstrlen = plugin? plugin->minstrlen: bin->minstrlen; } return true; } R_API int r_bin_file_set_cur_binfile(RBin *bin, RBinFile *bf) { RBinObject *obj = bf? bf->o: NULL; return obj? r_bin_file_set_cur_binfile_obj (bin, bf, obj): false; } R_API int r_bin_file_set_cur_by_name(RBin *bin, const char *name) { RBinFile *bf = r_bin_file_find_by_name (bin, name); return r_bin_file_set_cur_binfile (bin, bf); } R_API RBinFile *r_bin_cur(RBin *bin) { return bin? bin->cur: NULL; } R_API RBinObject *r_bin_cur_object(RBin *bin) { RBinFile *binfile = r_bin_cur (bin); return binfile? binfile->o: NULL; } R_API void r_bin_force_plugin(RBin *bin, const char *name) { free (bin->force); bin->force = (name && *name)? strdup (name): NULL; } R_API int r_bin_read_at(RBin *bin, ut64 addr, ut8 *buf, int size) { RIOBind *iob; if (!bin || !(iob = &(bin->iob))) { return false; } return iob->read_at (iob->io, addr, buf, size); } R_API int r_bin_write_at(RBin *bin, ut64 addr, const ut8 *buf, int size) { RIOBind *iob; if (!bin || !(iob = &(bin->iob))) { return false; } return iob->write_at (iob->io, addr, buf, size); } R_API const char *r_bin_entry_type_string(int etype) { switch (etype) { case R_BIN_ENTRY_TYPE_PROGRAM: return "program"; case R_BIN_ENTRY_TYPE_MAIN: return "main"; case R_BIN_ENTRY_TYPE_INIT: return "init"; case R_BIN_ENTRY_TYPE_FINI: return "fini"; case R_BIN_ENTRY_TYPE_TLS: return "tls"; } return NULL; } R_API void r_bin_load_filter(RBin *bin, ut64 rules) { bin->filter_rules = rules; } /* RBinField */ R_API RBinField *r_bin_field_new(ut64 paddr, ut64 vaddr, int size, const char *name, const char *comment, const char *format) { RBinField *ptr; if (!(ptr = R_NEW0 (RBinField))) { return NULL; } ptr->name = strdup (name); ptr->comment = (comment && *comment)? strdup (comment): NULL; ptr->format = (format && *format)? strdup (format): NULL; ptr->paddr = paddr; ptr->size = size; // ptr->visibility = ??? ptr->vaddr = vaddr; return ptr; } // use void* to honor the RListFree signature R_API void r_bin_field_free(void *_field) { RBinField *field = (RBinField*) _field; free (field->name); free (field->comment); free (field->format); free (field); } R_API const char *r_bin_get_meth_flag_string(ut64 flag, bool compact) { switch (flag) { case R_BIN_METH_CLASS: return compact ? "c" : "class"; case R_BIN_METH_STATIC: return compact ? "s" : "static"; case R_BIN_METH_PUBLIC: return compact ? "p" : "public"; case R_BIN_METH_PRIVATE: return compact ? "P" : "private"; case R_BIN_METH_PROTECTED: return compact ? "r" : "protected"; case R_BIN_METH_INTERNAL: return compact ? "i" : "internal"; case R_BIN_METH_OPEN: return compact ? "o" : "open"; case R_BIN_METH_FILEPRIVATE: return compact ? "e" : "fileprivate"; case R_BIN_METH_FINAL: return compact ? "f" : "final"; case R_BIN_METH_VIRTUAL: return compact ? "v" : "virtual"; case R_BIN_METH_CONST: return compact ? "k" : "const"; case R_BIN_METH_MUTATING: return compact ? "m" : "mutating"; case R_BIN_METH_ABSTRACT: return compact ? "a" : "abstract"; case R_BIN_METH_SYNCHRONIZED: return compact ? "y" : "synchronized"; case R_BIN_METH_NATIVE: return compact ? "n" : "native"; case R_BIN_METH_BRIDGE: return compact ? "b" : "bridge"; case R_BIN_METH_VARARGS: return compact ? "g" : "varargs"; case R_BIN_METH_SYNTHETIC: return compact ? "h" : "synthetic"; case R_BIN_METH_STRICT: return compact ? "t" : "strict"; case R_BIN_METH_MIRANDA: return compact ? "A" : "miranda"; case R_BIN_METH_CONSTRUCTOR: return compact ? "C" : "constructor"; case R_BIN_METH_DECLARED_SYNCHRONIZED: return compact ? "Y" : "declared_synchronized"; default: return NULL; } }
static int string_scan_range(RList *list, const ut8 *buf, int min, const ut64 from, const ut64 to, int type) { ut8 tmp[R_STRING_SCAN_BUFFER_SIZE]; ut64 str_start, needle = from; int count = 0, i, rc, runes; int str_type = R_STRING_TYPE_DETECT; if (type == -1) { type = R_STRING_TYPE_DETECT; } if (!buf || !min) { return -1; } while (needle < to) { rc = r_utf8_decode (buf + needle, to - needle, NULL); if (!rc) { needle++; continue; } if (type == R_STRING_TYPE_DETECT) { char *w = (char *)buf + needle + rc; if ((to - needle) > 4) { bool is_wide32 = needle + rc + 2 < to && !w[0] && !w[1] && !w[2] && w[3] && !w[4]; if (is_wide32) { str_type = R_STRING_TYPE_WIDE32; } else { bool is_wide = needle + rc + 2 < to && !w[0] && w[1] && !w[2]; str_type = is_wide? R_STRING_TYPE_WIDE: R_STRING_TYPE_ASCII; } } else { str_type = R_STRING_TYPE_ASCII; } } else { str_type = type; } runes = 0; str_start = needle; /* Eat a whole C string */ for (rc = i = 0; i < sizeof (tmp) - 3 && needle < to; i += rc) { RRune r = {0}; if (str_type == R_STRING_TYPE_WIDE32) { rc = r_utf32le_decode (buf + needle, to - needle, &r); if (rc) { rc = 4; } } else if (str_type == R_STRING_TYPE_WIDE) { rc = r_utf16le_decode (buf + needle, to - needle, &r); if (rc == 1) { rc = 2; } } else { rc = r_utf8_decode (buf + needle, to - needle, &r); if (rc > 1) { str_type = R_STRING_TYPE_UTF8; } } /* Invalid sequence detected */ if (!rc) { needle++; break; } needle += rc; if (r_isprint (r)) { if (str_type == R_STRING_TYPE_WIDE32) { if (r == 0xff) { r = 0; } } rc = r_utf8_encode (&tmp[i], r); runes++; /* Print the escape code */ } else if (r && r < 0x100 && strchr ("\b\v\f\n\r\t\a\e", (char)r)) { if ((i + 32) < sizeof (tmp) && r < 28) { tmp[i + 0] = '\\'; tmp[i + 1] = " abtnvfr e"[r]; } else { // string too long break; } rc = 2; runes++; } else { /* \0 marks the end of C-strings */ break; } } tmp[i++] = '\0'; if (runes >= min) { if (str_type == R_STRING_TYPE_ASCII) { // reduce false positives int j; for (j = 0; j < i; j++) { char ch = tmp[j]; if (ch != '\n' && ch != '\r' && ch != '\t') { if (!IS_PRINTABLE (tmp[j])) { continue; } } } } if (list) { RBinString *new = R_NEW0 (RBinString); if (!new) { break; } new->type = str_type; new->length = runes; new->size = needle - str_start; new->ordinal = count++; // TODO: move into adjust_offset switch (str_type) { case R_STRING_TYPE_WIDE: { const ut8 *p = buf + str_start - 2; if (p[0] == 0xff && p[1] == 0xfe) { str_start -= 2; // \xff\xfe } } break; case R_STRING_TYPE_WIDE32: { const ut8 *p = buf + str_start - 4; if (p[0] == 0xff && p[1] == 0xfe) { str_start -= 4; // \xff\xfe\x00\x00 } } break; } new->paddr = new->vaddr = str_start; new->string = r_str_ndup ((const char *)tmp, i); r_list_append (list, new); } else { // DUMP TO STDOUT. raw dumping for rabin2 -zzz printf ("0x%08" PFMT64x " %s\n", str_start, tmp); } } } return count; }
static int string_scan_range(RList *list, const ut8 *buf, int min, const ut64 from, const ut64 to, int type) { ut8 tmp[R_STRING_SCAN_BUFFER_SIZE]; ut64 str_start, needle = from; int count = 0, i, rc, runes; int str_type = R_STRING_TYPE_DETECT; if (type == -1) { type = R_STRING_TYPE_DETECT; } if (!buf || !min) { return -1; } while (needle < to) { rc = r_utf8_decode (buf + needle, to - needle, NULL); if (!rc) { needle++; continue; } if (type == R_STRING_TYPE_DETECT) { char *w = (char *)buf + needle + rc; if ((to - needle) > 4) { bool is_wide32 = needle + rc + 2 < to && !w[0] && !w[1] && !w[2] && w[3] && !w[4]; if (is_wide32) { str_type = R_STRING_TYPE_WIDE32; } else { bool is_wide = needle + rc + 2 < to && !w[0] && w[1] && !w[2]; str_type = is_wide? R_STRING_TYPE_WIDE: R_STRING_TYPE_ASCII; } } else { str_type = R_STRING_TYPE_ASCII; } } else { str_type = type; } runes = 0; str_start = needle; /* Eat a whole C string */ for (rc = i = 0; i < sizeof (tmp) - 3 && needle < to; i += rc) { RRune r = {0}; if (str_type == R_STRING_TYPE_WIDE32) { rc = r_utf32le_decode (buf + needle, to - needle, &r); if (rc) { rc = 4; } } else if (str_type == R_STRING_TYPE_WIDE) { rc = r_utf16le_decode (buf + needle, to - needle, &r); if (rc == 1) { rc = 2; } } else { rc = r_utf8_decode (buf + needle, to - needle, &r); if (rc > 1) { str_type = R_STRING_TYPE_UTF8; } } /* Invalid sequence detected */ if (!rc) { needle++; break; } needle += rc; if (r_isprint (r)) { if (str_type == R_STRING_TYPE_WIDE32) { if (r == 0xff) { r = 0; } } rc = r_utf8_encode (&tmp[i], r); runes++; /* Print the escape code */ } else if (r && r < 0x100 && strchr ("\b\v\f\n\r\t\a\e", (char)r)) { if ((i + 32) < sizeof (tmp) && r < 28) { tmp[i + 0] = '\\'; tmp[i + 1] = " abtnvfr e"[r]; } else { // string too long break; } rc = 2; runes++; } else { /* \0 marks the end of C-strings */ break; } } tmp[i++] = '\0'; if (runes >= min) { if (str_type == R_STRING_TYPE_ASCII) { // reduce false positives int j; for (j = 0; j < i; j++) { char ch = tmp[j]; if (ch != '\n' && ch != '\r' && ch != '\t') { if (!IS_PRINTABLE (tmp[j])) { continue; } } } } if (list) { RBinString *new = R_NEW0 (RBinString); if (!new) { break; } new->type = str_type; new->length = runes; new->size = needle - str_start; new->ordinal = count++; // TODO: move into adjust_offset switch (str_type) { case R_STRING_TYPE_WIDE: if (str_start > 1) { const ut8 *p = buf + str_start - 2; if (p[0] == 0xff && p[1] == 0xfe) { str_start -= 2; // \xff\xfe } } break; case R_STRING_TYPE_WIDE32: if (str_start > 3) { const ut8 *p = buf + str_start - 4; if (p[0] == 0xff && p[1] == 0xfe) { str_start -= 4; // \xff\xfe\x00\x00 } } break; } new->paddr = new->vaddr = str_start; new->string = r_str_ndup ((const char *)tmp, i); r_list_append (list, new); } else { // DUMP TO STDOUT. raw dumping for rabin2 -zzz printf ("0x%08" PFMT64x " %s\n", str_start, tmp); } } } return count; }
{'added': [(355, '\t\t\t\t\tif (str_start > 1) {'), (356, '\t\t\t\t\t\tconst ut8 *p = buf + str_start - 2;'), (363, '\t\t\t\t\tif (str_start > 3) {'), (364, '\t\t\t\t\t\tconst ut8 *p = buf + str_start - 4;')], 'deleted': [(355, '\t\t\t\t\t{'), (356, '\t\t\t\t\t\tconst ut8 *p = buf + str_start - 2;'), (363, '\t\t\t\t\t{'), (364, '\t\t\t\t\t\tconst ut8 *p = buf + str_start - 4;')]}
4
4
2,511
16,375
https://github.com/radare/radare2
CVE-2017-16358
['CWE-125']
file.c
__close_fd_get_file
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/file.c * * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes * * Manage the dynamic fd arrays in the process files_struct. */ #include <linux/syscalls.h> #include <linux/export.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/fdtable.h> #include <linux/bitops.h> #include <linux/spinlock.h> #include <linux/rcupdate.h> #include <linux/nospec.h> unsigned int sysctl_nr_open __read_mostly = 1024*1024; unsigned int sysctl_nr_open_min = BITS_PER_LONG; /* our min() is unusable in constant expressions ;-/ */ #define __const_min(x, y) ((x) < (y) ? (x) : (y)) unsigned int sysctl_nr_open_max = __const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG; static void __free_fdtable(struct fdtable *fdt) { kvfree(fdt->fd); kvfree(fdt->open_fds); kfree(fdt); } static void free_fdtable_rcu(struct rcu_head *rcu) { __free_fdtable(container_of(rcu, struct fdtable, rcu)); } #define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr)) #define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long)) /* * Copy 'count' fd bits from the old table to the new table and clear the extra * space if any. This does not copy the file pointers. Called with the files * spinlock held for write. */ static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt, unsigned int count) { unsigned int cpy, set; cpy = count / BITS_PER_BYTE; set = (nfdt->max_fds - count) / BITS_PER_BYTE; memcpy(nfdt->open_fds, ofdt->open_fds, cpy); memset((char *)nfdt->open_fds + cpy, 0, set); memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy); memset((char *)nfdt->close_on_exec + cpy, 0, set); cpy = BITBIT_SIZE(count); set = BITBIT_SIZE(nfdt->max_fds) - cpy; memcpy(nfdt->full_fds_bits, ofdt->full_fds_bits, cpy); memset((char *)nfdt->full_fds_bits + cpy, 0, set); } /* * Copy all file descriptors from the old table to the new, expanded table and * clear the extra space. Called with the files spinlock held for write. */ static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt) { size_t cpy, set; BUG_ON(nfdt->max_fds < ofdt->max_fds); cpy = ofdt->max_fds * sizeof(struct file *); set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *); memcpy(nfdt->fd, ofdt->fd, cpy); memset((char *)nfdt->fd + cpy, 0, set); copy_fd_bitmaps(nfdt, ofdt, ofdt->max_fds); } static struct fdtable * alloc_fdtable(unsigned int nr) { struct fdtable *fdt; void *data; /* * Figure out how many fds we actually want to support in this fdtable. * Allocation steps are keyed to the size of the fdarray, since it * grows far faster than any of the other dynamic data. We try to fit * the fdarray into comfortable page-tuned chunks: starting at 1024B * and growing in powers of two from there on. */ nr /= (1024 / sizeof(struct file *)); nr = roundup_pow_of_two(nr + 1); nr *= (1024 / sizeof(struct file *)); /* * Note that this can drive nr *below* what we had passed if sysctl_nr_open * had been set lower between the check in expand_files() and here. Deal * with that in caller, it's cheaper that way. * * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise * bitmaps handling below becomes unpleasant, to put it mildly... */ if (unlikely(nr > sysctl_nr_open)) nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1; fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT); if (!fdt) goto out; fdt->max_fds = nr; data = kvmalloc_array(nr, sizeof(struct file *), GFP_KERNEL_ACCOUNT); if (!data) goto out_fdt; fdt->fd = data; data = kvmalloc(max_t(size_t, 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES), GFP_KERNEL_ACCOUNT); if (!data) goto out_arr; fdt->open_fds = data; data += nr / BITS_PER_BYTE; fdt->close_on_exec = data; data += nr / BITS_PER_BYTE; fdt->full_fds_bits = data; return fdt; out_arr: kvfree(fdt->fd); out_fdt: kfree(fdt); out: return NULL; } /* * Expand the file descriptor table. * This function will allocate a new fdtable and both fd array and fdset, of * the given size. * Return <0 error code on error; 1 on successful completion. * The files->file_lock should be held on entry, and will be held on exit. */ static int expand_fdtable(struct files_struct *files, unsigned int nr) __releases(files->file_lock) __acquires(files->file_lock) { struct fdtable *new_fdt, *cur_fdt; spin_unlock(&files->file_lock); new_fdt = alloc_fdtable(nr); /* make sure all __fd_install() have seen resize_in_progress * or have finished their rcu_read_lock_sched() section. */ if (atomic_read(&files->count) > 1) synchronize_rcu(); spin_lock(&files->file_lock); if (!new_fdt) return -ENOMEM; /* * extremely unlikely race - sysctl_nr_open decreased between the check in * caller and alloc_fdtable(). Cheaper to catch it here... */ if (unlikely(new_fdt->max_fds <= nr)) { __free_fdtable(new_fdt); return -EMFILE; } cur_fdt = files_fdtable(files); BUG_ON(nr < cur_fdt->max_fds); copy_fdtable(new_fdt, cur_fdt); rcu_assign_pointer(files->fdt, new_fdt); if (cur_fdt != &files->fdtab) call_rcu(&cur_fdt->rcu, free_fdtable_rcu); /* coupled with smp_rmb() in __fd_install() */ smp_wmb(); return 1; } /* * Expand files. * This function will expand the file structures, if the requested size exceeds * the current capacity and there is room for expansion. * Return <0 error code on error; 0 when nothing done; 1 when files were * expanded and execution may have blocked. * The files->file_lock should be held on entry, and will be held on exit. */ static int expand_files(struct files_struct *files, unsigned int nr) __releases(files->file_lock) __acquires(files->file_lock) { struct fdtable *fdt; int expanded = 0; repeat: fdt = files_fdtable(files); /* Do we need to expand? */ if (nr < fdt->max_fds) return expanded; /* Can we expand? */ if (nr >= sysctl_nr_open) return -EMFILE; if (unlikely(files->resize_in_progress)) { spin_unlock(&files->file_lock); expanded = 1; wait_event(files->resize_wait, !files->resize_in_progress); spin_lock(&files->file_lock); goto repeat; } /* All good, so we try */ files->resize_in_progress = true; expanded = expand_fdtable(files, nr); files->resize_in_progress = false; wake_up_all(&files->resize_wait); return expanded; } static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt) { __set_bit(fd, fdt->close_on_exec); } static inline void __clear_close_on_exec(unsigned int fd, struct fdtable *fdt) { if (test_bit(fd, fdt->close_on_exec)) __clear_bit(fd, fdt->close_on_exec); } static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt) { __set_bit(fd, fdt->open_fds); fd /= BITS_PER_LONG; if (!~fdt->open_fds[fd]) __set_bit(fd, fdt->full_fds_bits); } static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt) { __clear_bit(fd, fdt->open_fds); __clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits); } static unsigned int count_open_files(struct fdtable *fdt) { unsigned int size = fdt->max_fds; unsigned int i; /* Find the last open fd */ for (i = size / BITS_PER_LONG; i > 0; ) { if (fdt->open_fds[--i]) break; } i = (i + 1) * BITS_PER_LONG; return i; } /* * Allocate a new files structure and copy contents from the * passed in files structure. * errorp will be valid only when the returned files_struct is NULL. */ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp) { struct files_struct *newf; struct file **old_fds, **new_fds; unsigned int open_files, i; struct fdtable *old_fdt, *new_fdt; *errorp = -ENOMEM; newf = kmem_cache_alloc(files_cachep, GFP_KERNEL); if (!newf) goto out; atomic_set(&newf->count, 1); spin_lock_init(&newf->file_lock); newf->resize_in_progress = false; init_waitqueue_head(&newf->resize_wait); newf->next_fd = 0; new_fdt = &newf->fdtab; new_fdt->max_fds = NR_OPEN_DEFAULT; new_fdt->close_on_exec = newf->close_on_exec_init; new_fdt->open_fds = newf->open_fds_init; new_fdt->full_fds_bits = newf->full_fds_bits_init; new_fdt->fd = &newf->fd_array[0]; spin_lock(&oldf->file_lock); old_fdt = files_fdtable(oldf); open_files = count_open_files(old_fdt); /* * Check whether we need to allocate a larger fd array and fd set. */ while (unlikely(open_files > new_fdt->max_fds)) { spin_unlock(&oldf->file_lock); if (new_fdt != &newf->fdtab) __free_fdtable(new_fdt); new_fdt = alloc_fdtable(open_files - 1); if (!new_fdt) { *errorp = -ENOMEM; goto out_release; } /* beyond sysctl_nr_open; nothing to do */ if (unlikely(new_fdt->max_fds < open_files)) { __free_fdtable(new_fdt); *errorp = -EMFILE; goto out_release; } /* * Reacquire the oldf lock and a pointer to its fd table * who knows it may have a new bigger fd table. We need * the latest pointer. */ spin_lock(&oldf->file_lock); old_fdt = files_fdtable(oldf); open_files = count_open_files(old_fdt); } copy_fd_bitmaps(new_fdt, old_fdt, open_files); old_fds = old_fdt->fd; new_fds = new_fdt->fd; for (i = open_files; i != 0; i--) { struct file *f = *old_fds++; if (f) { get_file(f); } else { /* * The fd may be claimed in the fd bitmap but not yet * instantiated in the files array if a sibling thread * is partway through open(). So make sure that this * fd is available to the new process. */ __clear_open_fd(open_files - i, new_fdt); } rcu_assign_pointer(*new_fds++, f); } spin_unlock(&oldf->file_lock); /* clear the remainder */ memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *)); rcu_assign_pointer(newf->fdt, new_fdt); return newf; out_release: kmem_cache_free(files_cachep, newf); out: return NULL; } static struct fdtable *close_files(struct files_struct * files) { /* * It is safe to dereference the fd table without RCU or * ->file_lock because this is the last reference to the * files structure. */ struct fdtable *fdt = rcu_dereference_raw(files->fdt); unsigned int i, j = 0; for (;;) { unsigned long set; i = j * BITS_PER_LONG; if (i >= fdt->max_fds) break; set = fdt->open_fds[j++]; while (set) { if (set & 1) { struct file * file = xchg(&fdt->fd[i], NULL); if (file) { filp_close(file, files); cond_resched(); } } i++; set >>= 1; } } return fdt; } struct files_struct *get_files_struct(struct task_struct *task) { struct files_struct *files; task_lock(task); files = task->files; if (files) atomic_inc(&files->count); task_unlock(task); return files; } void put_files_struct(struct files_struct *files) { if (atomic_dec_and_test(&files->count)) { struct fdtable *fdt = close_files(files); /* free the arrays if they are not embedded */ if (fdt != &files->fdtab) __free_fdtable(fdt); kmem_cache_free(files_cachep, files); } } void reset_files_struct(struct files_struct *files) { struct task_struct *tsk = current; struct files_struct *old; old = tsk->files; task_lock(tsk); tsk->files = files; task_unlock(tsk); put_files_struct(old); } void exit_files(struct task_struct *tsk) { struct files_struct * files = tsk->files; if (files) { task_lock(tsk); tsk->files = NULL; task_unlock(tsk); put_files_struct(files); } } struct files_struct init_files = { .count = ATOMIC_INIT(1), .fdt = &init_files.fdtab, .fdtab = { .max_fds = NR_OPEN_DEFAULT, .fd = &init_files.fd_array[0], .close_on_exec = init_files.close_on_exec_init, .open_fds = init_files.open_fds_init, .full_fds_bits = init_files.full_fds_bits_init, }, .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock), .resize_wait = __WAIT_QUEUE_HEAD_INITIALIZER(init_files.resize_wait), }; static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start) { unsigned int maxfd = fdt->max_fds; unsigned int maxbit = maxfd / BITS_PER_LONG; unsigned int bitbit = start / BITS_PER_LONG; bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG; if (bitbit > maxfd) return maxfd; if (bitbit > start) start = bitbit; return find_next_zero_bit(fdt->open_fds, maxfd, start); } /* * allocate a file descriptor, mark it busy. */ int __alloc_fd(struct files_struct *files, unsigned start, unsigned end, unsigned flags) { unsigned int fd; int error; struct fdtable *fdt; spin_lock(&files->file_lock); repeat: fdt = files_fdtable(files); fd = start; if (fd < files->next_fd) fd = files->next_fd; if (fd < fdt->max_fds) fd = find_next_fd(fdt, fd); /* * N.B. For clone tasks sharing a files structure, this test * will limit the total number of files that can be opened. */ error = -EMFILE; if (fd >= end) goto out; error = expand_files(files, fd); if (error < 0) goto out; /* * If we needed to expand the fs array we * might have blocked - try again. */ if (error) goto repeat; if (start <= files->next_fd) files->next_fd = fd + 1; __set_open_fd(fd, fdt); if (flags & O_CLOEXEC) __set_close_on_exec(fd, fdt); else __clear_close_on_exec(fd, fdt); error = fd; #if 1 /* Sanity check */ if (rcu_access_pointer(fdt->fd[fd]) != NULL) { printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd); rcu_assign_pointer(fdt->fd[fd], NULL); } #endif out: spin_unlock(&files->file_lock); return error; } static int alloc_fd(unsigned start, unsigned flags) { return __alloc_fd(current->files, start, rlimit(RLIMIT_NOFILE), flags); } int __get_unused_fd_flags(unsigned flags, unsigned long nofile) { return __alloc_fd(current->files, 0, nofile, flags); } int get_unused_fd_flags(unsigned flags) { return __get_unused_fd_flags(flags, rlimit(RLIMIT_NOFILE)); } EXPORT_SYMBOL(get_unused_fd_flags); static void __put_unused_fd(struct files_struct *files, unsigned int fd) { struct fdtable *fdt = files_fdtable(files); __clear_open_fd(fd, fdt); if (fd < files->next_fd) files->next_fd = fd; } void put_unused_fd(unsigned int fd) { struct files_struct *files = current->files; spin_lock(&files->file_lock); __put_unused_fd(files, fd); spin_unlock(&files->file_lock); } EXPORT_SYMBOL(put_unused_fd); /* * Install a file pointer in the fd array. * * The VFS is full of places where we drop the files lock between * setting the open_fds bitmap and installing the file in the file * array. At any such point, we are vulnerable to a dup2() race * installing a file in the array before us. We need to detect this and * fput() the struct file we are about to overwrite in this case. * * It should never happen - if we allow dup2() do it, _really_ bad things * will follow. * * NOTE: __fd_install() variant is really, really low-level; don't * use it unless you are forced to by truly lousy API shoved down * your throat. 'files' *MUST* be either current->files or obtained * by get_files_struct(current) done by whoever had given it to you, * or really bad things will happen. Normally you want to use * fd_install() instead. */ void __fd_install(struct files_struct *files, unsigned int fd, struct file *file) { struct fdtable *fdt; rcu_read_lock_sched(); if (unlikely(files->resize_in_progress)) { rcu_read_unlock_sched(); spin_lock(&files->file_lock); fdt = files_fdtable(files); BUG_ON(fdt->fd[fd] != NULL); rcu_assign_pointer(fdt->fd[fd], file); spin_unlock(&files->file_lock); return; } /* coupled with smp_wmb() in expand_fdtable() */ smp_rmb(); fdt = rcu_dereference_sched(files->fdt); BUG_ON(fdt->fd[fd] != NULL); rcu_assign_pointer(fdt->fd[fd], file); rcu_read_unlock_sched(); } void fd_install(unsigned int fd, struct file *file) { __fd_install(current->files, fd, file); } EXPORT_SYMBOL(fd_install); /* * The same warnings as for __alloc_fd()/__fd_install() apply here... */ int __close_fd(struct files_struct *files, unsigned fd) { struct file *file; struct fdtable *fdt; spin_lock(&files->file_lock); fdt = files_fdtable(files); if (fd >= fdt->max_fds) goto out_unlock; fd = array_index_nospec(fd, fdt->max_fds); file = fdt->fd[fd]; if (!file) goto out_unlock; rcu_assign_pointer(fdt->fd[fd], NULL); __put_unused_fd(files, fd); spin_unlock(&files->file_lock); return filp_close(file, files); out_unlock: spin_unlock(&files->file_lock); return -EBADF; } EXPORT_SYMBOL(__close_fd); /* for ksys_close() */ /* * variant of __close_fd that gets a ref on the file for later fput */ int __close_fd_get_file(unsigned int fd, struct file **res) { struct files_struct *files = current->files; struct file *file; struct fdtable *fdt; spin_lock(&files->file_lock); fdt = files_fdtable(files); if (fd >= fdt->max_fds) goto out_unlock; file = fdt->fd[fd]; if (!file) goto out_unlock; rcu_assign_pointer(fdt->fd[fd], NULL); __put_unused_fd(files, fd); spin_unlock(&files->file_lock); get_file(file); *res = file; return filp_close(file, files); out_unlock: spin_unlock(&files->file_lock); *res = NULL; return -ENOENT; } void do_close_on_exec(struct files_struct *files) { unsigned i; struct fdtable *fdt; /* exec unshares first */ spin_lock(&files->file_lock); for (i = 0; ; i++) { unsigned long set; unsigned fd = i * BITS_PER_LONG; fdt = files_fdtable(files); if (fd >= fdt->max_fds) break; set = fdt->close_on_exec[i]; if (!set) continue; fdt->close_on_exec[i] = 0; for ( ; set ; fd++, set >>= 1) { struct file *file; if (!(set & 1)) continue; file = fdt->fd[fd]; if (!file) continue; rcu_assign_pointer(fdt->fd[fd], NULL); __put_unused_fd(files, fd); spin_unlock(&files->file_lock); filp_close(file, files); cond_resched(); spin_lock(&files->file_lock); } } spin_unlock(&files->file_lock); } static struct file *__fget(unsigned int fd, fmode_t mask, unsigned int refs) { struct files_struct *files = current->files; struct file *file; rcu_read_lock(); loop: file = fcheck_files(files, fd); if (file) { /* File object ref couldn't be taken. * dup2() atomicity guarantee is the reason * we loop to catch the new file (or NULL pointer) */ if (file->f_mode & mask) file = NULL; else if (!get_file_rcu_many(file, refs)) goto loop; else if (__fcheck_files(files, fd) != file) { fput_many(file, refs); goto loop; } } rcu_read_unlock(); return file; } struct file *fget_many(unsigned int fd, unsigned int refs) { return __fget(fd, FMODE_PATH, refs); } struct file *fget(unsigned int fd) { return __fget(fd, FMODE_PATH, 1); } EXPORT_SYMBOL(fget); struct file *fget_raw(unsigned int fd) { return __fget(fd, 0, 1); } EXPORT_SYMBOL(fget_raw); /* * Lightweight file lookup - no refcnt increment if fd table isn't shared. * * You can use this instead of fget if you satisfy all of the following * conditions: * 1) You must call fput_light before exiting the syscall and returning control * to userspace (i.e. you cannot remember the returned struct file * after * returning to userspace). * 2) You must not call filp_close on the returned struct file * in between * calls to fget_light and fput_light. * 3) You must not clone the current task in between the calls to fget_light * and fput_light. * * The fput_needed flag returned by fget_light should be passed to the * corresponding fput_light. */ static unsigned long __fget_light(unsigned int fd, fmode_t mask) { struct files_struct *files = current->files; struct file *file; if (atomic_read(&files->count) == 1) { file = __fcheck_files(files, fd); if (!file || unlikely(file->f_mode & mask)) return 0; return (unsigned long)file; } else { file = __fget(fd, mask, 1); if (!file) return 0; return FDPUT_FPUT | (unsigned long)file; } } unsigned long __fdget(unsigned int fd) { return __fget_light(fd, FMODE_PATH); } EXPORT_SYMBOL(__fdget); unsigned long __fdget_raw(unsigned int fd) { return __fget_light(fd, 0); } unsigned long __fdget_pos(unsigned int fd) { unsigned long v = __fdget(fd); struct file *file = (struct file *)(v & ~3); if (file && (file->f_mode & FMODE_ATOMIC_POS)) { if (file_count(file) > 1) { v |= FDPUT_POS_UNLOCK; mutex_lock(&file->f_pos_lock); } } return v; } void __f_unlock_pos(struct file *f) { mutex_unlock(&f->f_pos_lock); } /* * We only lock f_pos if we have threads or if the file might be * shared with another process. In both cases we'll have an elevated * file count (done either by fdget() or by fork()). */ void set_close_on_exec(unsigned int fd, int flag) { struct files_struct *files = current->files; struct fdtable *fdt; spin_lock(&files->file_lock); fdt = files_fdtable(files); if (flag) __set_close_on_exec(fd, fdt); else __clear_close_on_exec(fd, fdt); spin_unlock(&files->file_lock); } bool get_close_on_exec(unsigned int fd) { struct files_struct *files = current->files; struct fdtable *fdt; bool res; rcu_read_lock(); fdt = files_fdtable(files); res = close_on_exec(fd, fdt); rcu_read_unlock(); return res; } static int do_dup2(struct files_struct *files, struct file *file, unsigned fd, unsigned flags) __releases(&files->file_lock) { struct file *tofree; struct fdtable *fdt; /* * We need to detect attempts to do dup2() over allocated but still * not finished descriptor. NB: OpenBSD avoids that at the price of * extra work in their equivalent of fget() - they insert struct * file immediately after grabbing descriptor, mark it larval if * more work (e.g. actual opening) is needed and make sure that * fget() treats larval files as absent. Potentially interesting, * but while extra work in fget() is trivial, locking implications * and amount of surgery on open()-related paths in VFS are not. * FreeBSD fails with -EBADF in the same situation, NetBSD "solution" * deadlocks in rather amusing ways, AFAICS. All of that is out of * scope of POSIX or SUS, since neither considers shared descriptor * tables and this condition does not arise without those. */ fdt = files_fdtable(files); tofree = fdt->fd[fd]; if (!tofree && fd_is_open(fd, fdt)) goto Ebusy; get_file(file); rcu_assign_pointer(fdt->fd[fd], file); __set_open_fd(fd, fdt); if (flags & O_CLOEXEC) __set_close_on_exec(fd, fdt); else __clear_close_on_exec(fd, fdt); spin_unlock(&files->file_lock); if (tofree) filp_close(tofree, files); return fd; Ebusy: spin_unlock(&files->file_lock); return -EBUSY; } int replace_fd(unsigned fd, struct file *file, unsigned flags) { int err; struct files_struct *files = current->files; if (!file) return __close_fd(files, fd); if (fd >= rlimit(RLIMIT_NOFILE)) return -EBADF; spin_lock(&files->file_lock); err = expand_files(files, fd); if (unlikely(err < 0)) goto out_unlock; return do_dup2(files, file, fd, flags); out_unlock: spin_unlock(&files->file_lock); return err; } static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags) { int err = -EBADF; struct file *file; struct files_struct *files = current->files; if ((flags & ~O_CLOEXEC) != 0) return -EINVAL; if (unlikely(oldfd == newfd)) return -EINVAL; if (newfd >= rlimit(RLIMIT_NOFILE)) return -EBADF; spin_lock(&files->file_lock); err = expand_files(files, newfd); file = fcheck(oldfd); if (unlikely(!file)) goto Ebadf; if (unlikely(err < 0)) { if (err == -EMFILE) goto Ebadf; goto out_unlock; } return do_dup2(files, file, newfd, flags); Ebadf: err = -EBADF; out_unlock: spin_unlock(&files->file_lock); return err; } SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags) { return ksys_dup3(oldfd, newfd, flags); } SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd) { if (unlikely(newfd == oldfd)) { /* corner case */ struct files_struct *files = current->files; int retval = oldfd; rcu_read_lock(); if (!fcheck_files(files, oldfd)) retval = -EBADF; rcu_read_unlock(); return retval; } return ksys_dup3(oldfd, newfd, 0); } int ksys_dup(unsigned int fildes) { int ret = -EBADF; struct file *file = fget_raw(fildes); if (file) { ret = get_unused_fd_flags(0); if (ret >= 0) fd_install(ret, file); else fput(file); } return ret; } SYSCALL_DEFINE1(dup, unsigned int, fildes) { return ksys_dup(fildes); } int f_dupfd(unsigned int from, struct file *file, unsigned flags) { int err; if (from >= rlimit(RLIMIT_NOFILE)) return -EINVAL; err = alloc_fd(from, flags); if (err >= 0) { get_file(file); fd_install(err, file); } return err; } int iterate_fd(struct files_struct *files, unsigned n, int (*f)(const void *, struct file *, unsigned), const void *p) { struct fdtable *fdt; int res = 0; if (!files) return 0; spin_lock(&files->file_lock); for (fdt = files_fdtable(files); n < fdt->max_fds; n++) { struct file *file; file = rcu_dereference_check_fdtable(files, fdt->fd[n]); if (!file) continue; res = f(p, file, n); if (res) break; } spin_unlock(&files->file_lock); return res; } EXPORT_SYMBOL(iterate_fd);
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/file.c * * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes * * Manage the dynamic fd arrays in the process files_struct. */ #include <linux/syscalls.h> #include <linux/export.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/fdtable.h> #include <linux/bitops.h> #include <linux/spinlock.h> #include <linux/rcupdate.h> #include <linux/nospec.h> unsigned int sysctl_nr_open __read_mostly = 1024*1024; unsigned int sysctl_nr_open_min = BITS_PER_LONG; /* our min() is unusable in constant expressions ;-/ */ #define __const_min(x, y) ((x) < (y) ? (x) : (y)) unsigned int sysctl_nr_open_max = __const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG; static void __free_fdtable(struct fdtable *fdt) { kvfree(fdt->fd); kvfree(fdt->open_fds); kfree(fdt); } static void free_fdtable_rcu(struct rcu_head *rcu) { __free_fdtable(container_of(rcu, struct fdtable, rcu)); } #define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr)) #define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long)) /* * Copy 'count' fd bits from the old table to the new table and clear the extra * space if any. This does not copy the file pointers. Called with the files * spinlock held for write. */ static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt, unsigned int count) { unsigned int cpy, set; cpy = count / BITS_PER_BYTE; set = (nfdt->max_fds - count) / BITS_PER_BYTE; memcpy(nfdt->open_fds, ofdt->open_fds, cpy); memset((char *)nfdt->open_fds + cpy, 0, set); memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy); memset((char *)nfdt->close_on_exec + cpy, 0, set); cpy = BITBIT_SIZE(count); set = BITBIT_SIZE(nfdt->max_fds) - cpy; memcpy(nfdt->full_fds_bits, ofdt->full_fds_bits, cpy); memset((char *)nfdt->full_fds_bits + cpy, 0, set); } /* * Copy all file descriptors from the old table to the new, expanded table and * clear the extra space. Called with the files spinlock held for write. */ static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt) { size_t cpy, set; BUG_ON(nfdt->max_fds < ofdt->max_fds); cpy = ofdt->max_fds * sizeof(struct file *); set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *); memcpy(nfdt->fd, ofdt->fd, cpy); memset((char *)nfdt->fd + cpy, 0, set); copy_fd_bitmaps(nfdt, ofdt, ofdt->max_fds); } static struct fdtable * alloc_fdtable(unsigned int nr) { struct fdtable *fdt; void *data; /* * Figure out how many fds we actually want to support in this fdtable. * Allocation steps are keyed to the size of the fdarray, since it * grows far faster than any of the other dynamic data. We try to fit * the fdarray into comfortable page-tuned chunks: starting at 1024B * and growing in powers of two from there on. */ nr /= (1024 / sizeof(struct file *)); nr = roundup_pow_of_two(nr + 1); nr *= (1024 / sizeof(struct file *)); /* * Note that this can drive nr *below* what we had passed if sysctl_nr_open * had been set lower between the check in expand_files() and here. Deal * with that in caller, it's cheaper that way. * * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise * bitmaps handling below becomes unpleasant, to put it mildly... */ if (unlikely(nr > sysctl_nr_open)) nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1; fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT); if (!fdt) goto out; fdt->max_fds = nr; data = kvmalloc_array(nr, sizeof(struct file *), GFP_KERNEL_ACCOUNT); if (!data) goto out_fdt; fdt->fd = data; data = kvmalloc(max_t(size_t, 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES), GFP_KERNEL_ACCOUNT); if (!data) goto out_arr; fdt->open_fds = data; data += nr / BITS_PER_BYTE; fdt->close_on_exec = data; data += nr / BITS_PER_BYTE; fdt->full_fds_bits = data; return fdt; out_arr: kvfree(fdt->fd); out_fdt: kfree(fdt); out: return NULL; } /* * Expand the file descriptor table. * This function will allocate a new fdtable and both fd array and fdset, of * the given size. * Return <0 error code on error; 1 on successful completion. * The files->file_lock should be held on entry, and will be held on exit. */ static int expand_fdtable(struct files_struct *files, unsigned int nr) __releases(files->file_lock) __acquires(files->file_lock) { struct fdtable *new_fdt, *cur_fdt; spin_unlock(&files->file_lock); new_fdt = alloc_fdtable(nr); /* make sure all __fd_install() have seen resize_in_progress * or have finished their rcu_read_lock_sched() section. */ if (atomic_read(&files->count) > 1) synchronize_rcu(); spin_lock(&files->file_lock); if (!new_fdt) return -ENOMEM; /* * extremely unlikely race - sysctl_nr_open decreased between the check in * caller and alloc_fdtable(). Cheaper to catch it here... */ if (unlikely(new_fdt->max_fds <= nr)) { __free_fdtable(new_fdt); return -EMFILE; } cur_fdt = files_fdtable(files); BUG_ON(nr < cur_fdt->max_fds); copy_fdtable(new_fdt, cur_fdt); rcu_assign_pointer(files->fdt, new_fdt); if (cur_fdt != &files->fdtab) call_rcu(&cur_fdt->rcu, free_fdtable_rcu); /* coupled with smp_rmb() in __fd_install() */ smp_wmb(); return 1; } /* * Expand files. * This function will expand the file structures, if the requested size exceeds * the current capacity and there is room for expansion. * Return <0 error code on error; 0 when nothing done; 1 when files were * expanded and execution may have blocked. * The files->file_lock should be held on entry, and will be held on exit. */ static int expand_files(struct files_struct *files, unsigned int nr) __releases(files->file_lock) __acquires(files->file_lock) { struct fdtable *fdt; int expanded = 0; repeat: fdt = files_fdtable(files); /* Do we need to expand? */ if (nr < fdt->max_fds) return expanded; /* Can we expand? */ if (nr >= sysctl_nr_open) return -EMFILE; if (unlikely(files->resize_in_progress)) { spin_unlock(&files->file_lock); expanded = 1; wait_event(files->resize_wait, !files->resize_in_progress); spin_lock(&files->file_lock); goto repeat; } /* All good, so we try */ files->resize_in_progress = true; expanded = expand_fdtable(files, nr); files->resize_in_progress = false; wake_up_all(&files->resize_wait); return expanded; } static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt) { __set_bit(fd, fdt->close_on_exec); } static inline void __clear_close_on_exec(unsigned int fd, struct fdtable *fdt) { if (test_bit(fd, fdt->close_on_exec)) __clear_bit(fd, fdt->close_on_exec); } static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt) { __set_bit(fd, fdt->open_fds); fd /= BITS_PER_LONG; if (!~fdt->open_fds[fd]) __set_bit(fd, fdt->full_fds_bits); } static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt) { __clear_bit(fd, fdt->open_fds); __clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits); } static unsigned int count_open_files(struct fdtable *fdt) { unsigned int size = fdt->max_fds; unsigned int i; /* Find the last open fd */ for (i = size / BITS_PER_LONG; i > 0; ) { if (fdt->open_fds[--i]) break; } i = (i + 1) * BITS_PER_LONG; return i; } /* * Allocate a new files structure and copy contents from the * passed in files structure. * errorp will be valid only when the returned files_struct is NULL. */ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp) { struct files_struct *newf; struct file **old_fds, **new_fds; unsigned int open_files, i; struct fdtable *old_fdt, *new_fdt; *errorp = -ENOMEM; newf = kmem_cache_alloc(files_cachep, GFP_KERNEL); if (!newf) goto out; atomic_set(&newf->count, 1); spin_lock_init(&newf->file_lock); newf->resize_in_progress = false; init_waitqueue_head(&newf->resize_wait); newf->next_fd = 0; new_fdt = &newf->fdtab; new_fdt->max_fds = NR_OPEN_DEFAULT; new_fdt->close_on_exec = newf->close_on_exec_init; new_fdt->open_fds = newf->open_fds_init; new_fdt->full_fds_bits = newf->full_fds_bits_init; new_fdt->fd = &newf->fd_array[0]; spin_lock(&oldf->file_lock); old_fdt = files_fdtable(oldf); open_files = count_open_files(old_fdt); /* * Check whether we need to allocate a larger fd array and fd set. */ while (unlikely(open_files > new_fdt->max_fds)) { spin_unlock(&oldf->file_lock); if (new_fdt != &newf->fdtab) __free_fdtable(new_fdt); new_fdt = alloc_fdtable(open_files - 1); if (!new_fdt) { *errorp = -ENOMEM; goto out_release; } /* beyond sysctl_nr_open; nothing to do */ if (unlikely(new_fdt->max_fds < open_files)) { __free_fdtable(new_fdt); *errorp = -EMFILE; goto out_release; } /* * Reacquire the oldf lock and a pointer to its fd table * who knows it may have a new bigger fd table. We need * the latest pointer. */ spin_lock(&oldf->file_lock); old_fdt = files_fdtable(oldf); open_files = count_open_files(old_fdt); } copy_fd_bitmaps(new_fdt, old_fdt, open_files); old_fds = old_fdt->fd; new_fds = new_fdt->fd; for (i = open_files; i != 0; i--) { struct file *f = *old_fds++; if (f) { get_file(f); } else { /* * The fd may be claimed in the fd bitmap but not yet * instantiated in the files array if a sibling thread * is partway through open(). So make sure that this * fd is available to the new process. */ __clear_open_fd(open_files - i, new_fdt); } rcu_assign_pointer(*new_fds++, f); } spin_unlock(&oldf->file_lock); /* clear the remainder */ memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *)); rcu_assign_pointer(newf->fdt, new_fdt); return newf; out_release: kmem_cache_free(files_cachep, newf); out: return NULL; } static struct fdtable *close_files(struct files_struct * files) { /* * It is safe to dereference the fd table without RCU or * ->file_lock because this is the last reference to the * files structure. */ struct fdtable *fdt = rcu_dereference_raw(files->fdt); unsigned int i, j = 0; for (;;) { unsigned long set; i = j * BITS_PER_LONG; if (i >= fdt->max_fds) break; set = fdt->open_fds[j++]; while (set) { if (set & 1) { struct file * file = xchg(&fdt->fd[i], NULL); if (file) { filp_close(file, files); cond_resched(); } } i++; set >>= 1; } } return fdt; } struct files_struct *get_files_struct(struct task_struct *task) { struct files_struct *files; task_lock(task); files = task->files; if (files) atomic_inc(&files->count); task_unlock(task); return files; } void put_files_struct(struct files_struct *files) { if (atomic_dec_and_test(&files->count)) { struct fdtable *fdt = close_files(files); /* free the arrays if they are not embedded */ if (fdt != &files->fdtab) __free_fdtable(fdt); kmem_cache_free(files_cachep, files); } } void reset_files_struct(struct files_struct *files) { struct task_struct *tsk = current; struct files_struct *old; old = tsk->files; task_lock(tsk); tsk->files = files; task_unlock(tsk); put_files_struct(old); } void exit_files(struct task_struct *tsk) { struct files_struct * files = tsk->files; if (files) { task_lock(tsk); tsk->files = NULL; task_unlock(tsk); put_files_struct(files); } } struct files_struct init_files = { .count = ATOMIC_INIT(1), .fdt = &init_files.fdtab, .fdtab = { .max_fds = NR_OPEN_DEFAULT, .fd = &init_files.fd_array[0], .close_on_exec = init_files.close_on_exec_init, .open_fds = init_files.open_fds_init, .full_fds_bits = init_files.full_fds_bits_init, }, .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock), .resize_wait = __WAIT_QUEUE_HEAD_INITIALIZER(init_files.resize_wait), }; static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start) { unsigned int maxfd = fdt->max_fds; unsigned int maxbit = maxfd / BITS_PER_LONG; unsigned int bitbit = start / BITS_PER_LONG; bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG; if (bitbit > maxfd) return maxfd; if (bitbit > start) start = bitbit; return find_next_zero_bit(fdt->open_fds, maxfd, start); } /* * allocate a file descriptor, mark it busy. */ int __alloc_fd(struct files_struct *files, unsigned start, unsigned end, unsigned flags) { unsigned int fd; int error; struct fdtable *fdt; spin_lock(&files->file_lock); repeat: fdt = files_fdtable(files); fd = start; if (fd < files->next_fd) fd = files->next_fd; if (fd < fdt->max_fds) fd = find_next_fd(fdt, fd); /* * N.B. For clone tasks sharing a files structure, this test * will limit the total number of files that can be opened. */ error = -EMFILE; if (fd >= end) goto out; error = expand_files(files, fd); if (error < 0) goto out; /* * If we needed to expand the fs array we * might have blocked - try again. */ if (error) goto repeat; if (start <= files->next_fd) files->next_fd = fd + 1; __set_open_fd(fd, fdt); if (flags & O_CLOEXEC) __set_close_on_exec(fd, fdt); else __clear_close_on_exec(fd, fdt); error = fd; #if 1 /* Sanity check */ if (rcu_access_pointer(fdt->fd[fd]) != NULL) { printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd); rcu_assign_pointer(fdt->fd[fd], NULL); } #endif out: spin_unlock(&files->file_lock); return error; } static int alloc_fd(unsigned start, unsigned flags) { return __alloc_fd(current->files, start, rlimit(RLIMIT_NOFILE), flags); } int __get_unused_fd_flags(unsigned flags, unsigned long nofile) { return __alloc_fd(current->files, 0, nofile, flags); } int get_unused_fd_flags(unsigned flags) { return __get_unused_fd_flags(flags, rlimit(RLIMIT_NOFILE)); } EXPORT_SYMBOL(get_unused_fd_flags); static void __put_unused_fd(struct files_struct *files, unsigned int fd) { struct fdtable *fdt = files_fdtable(files); __clear_open_fd(fd, fdt); if (fd < files->next_fd) files->next_fd = fd; } void put_unused_fd(unsigned int fd) { struct files_struct *files = current->files; spin_lock(&files->file_lock); __put_unused_fd(files, fd); spin_unlock(&files->file_lock); } EXPORT_SYMBOL(put_unused_fd); /* * Install a file pointer in the fd array. * * The VFS is full of places where we drop the files lock between * setting the open_fds bitmap and installing the file in the file * array. At any such point, we are vulnerable to a dup2() race * installing a file in the array before us. We need to detect this and * fput() the struct file we are about to overwrite in this case. * * It should never happen - if we allow dup2() do it, _really_ bad things * will follow. * * NOTE: __fd_install() variant is really, really low-level; don't * use it unless you are forced to by truly lousy API shoved down * your throat. 'files' *MUST* be either current->files or obtained * by get_files_struct(current) done by whoever had given it to you, * or really bad things will happen. Normally you want to use * fd_install() instead. */ void __fd_install(struct files_struct *files, unsigned int fd, struct file *file) { struct fdtable *fdt; rcu_read_lock_sched(); if (unlikely(files->resize_in_progress)) { rcu_read_unlock_sched(); spin_lock(&files->file_lock); fdt = files_fdtable(files); BUG_ON(fdt->fd[fd] != NULL); rcu_assign_pointer(fdt->fd[fd], file); spin_unlock(&files->file_lock); return; } /* coupled with smp_wmb() in expand_fdtable() */ smp_rmb(); fdt = rcu_dereference_sched(files->fdt); BUG_ON(fdt->fd[fd] != NULL); rcu_assign_pointer(fdt->fd[fd], file); rcu_read_unlock_sched(); } void fd_install(unsigned int fd, struct file *file) { __fd_install(current->files, fd, file); } EXPORT_SYMBOL(fd_install); /* * The same warnings as for __alloc_fd()/__fd_install() apply here... */ int __close_fd(struct files_struct *files, unsigned fd) { struct file *file; struct fdtable *fdt; spin_lock(&files->file_lock); fdt = files_fdtable(files); if (fd >= fdt->max_fds) goto out_unlock; fd = array_index_nospec(fd, fdt->max_fds); file = fdt->fd[fd]; if (!file) goto out_unlock; rcu_assign_pointer(fdt->fd[fd], NULL); __put_unused_fd(files, fd); spin_unlock(&files->file_lock); return filp_close(file, files); out_unlock: spin_unlock(&files->file_lock); return -EBADF; } EXPORT_SYMBOL(__close_fd); /* for ksys_close() */ /* * variant of __close_fd that gets a ref on the file for later fput. * The caller must ensure that filp_close() called on the file, and then * an fput(). */ int __close_fd_get_file(unsigned int fd, struct file **res) { struct files_struct *files = current->files; struct file *file; struct fdtable *fdt; spin_lock(&files->file_lock); fdt = files_fdtable(files); if (fd >= fdt->max_fds) goto out_unlock; file = fdt->fd[fd]; if (!file) goto out_unlock; rcu_assign_pointer(fdt->fd[fd], NULL); __put_unused_fd(files, fd); spin_unlock(&files->file_lock); get_file(file); *res = file; return 0; out_unlock: spin_unlock(&files->file_lock); *res = NULL; return -ENOENT; } void do_close_on_exec(struct files_struct *files) { unsigned i; struct fdtable *fdt; /* exec unshares first */ spin_lock(&files->file_lock); for (i = 0; ; i++) { unsigned long set; unsigned fd = i * BITS_PER_LONG; fdt = files_fdtable(files); if (fd >= fdt->max_fds) break; set = fdt->close_on_exec[i]; if (!set) continue; fdt->close_on_exec[i] = 0; for ( ; set ; fd++, set >>= 1) { struct file *file; if (!(set & 1)) continue; file = fdt->fd[fd]; if (!file) continue; rcu_assign_pointer(fdt->fd[fd], NULL); __put_unused_fd(files, fd); spin_unlock(&files->file_lock); filp_close(file, files); cond_resched(); spin_lock(&files->file_lock); } } spin_unlock(&files->file_lock); } static struct file *__fget(unsigned int fd, fmode_t mask, unsigned int refs) { struct files_struct *files = current->files; struct file *file; rcu_read_lock(); loop: file = fcheck_files(files, fd); if (file) { /* File object ref couldn't be taken. * dup2() atomicity guarantee is the reason * we loop to catch the new file (or NULL pointer) */ if (file->f_mode & mask) file = NULL; else if (!get_file_rcu_many(file, refs)) goto loop; else if (__fcheck_files(files, fd) != file) { fput_many(file, refs); goto loop; } } rcu_read_unlock(); return file; } struct file *fget_many(unsigned int fd, unsigned int refs) { return __fget(fd, FMODE_PATH, refs); } struct file *fget(unsigned int fd) { return __fget(fd, FMODE_PATH, 1); } EXPORT_SYMBOL(fget); struct file *fget_raw(unsigned int fd) { return __fget(fd, 0, 1); } EXPORT_SYMBOL(fget_raw); /* * Lightweight file lookup - no refcnt increment if fd table isn't shared. * * You can use this instead of fget if you satisfy all of the following * conditions: * 1) You must call fput_light before exiting the syscall and returning control * to userspace (i.e. you cannot remember the returned struct file * after * returning to userspace). * 2) You must not call filp_close on the returned struct file * in between * calls to fget_light and fput_light. * 3) You must not clone the current task in between the calls to fget_light * and fput_light. * * The fput_needed flag returned by fget_light should be passed to the * corresponding fput_light. */ static unsigned long __fget_light(unsigned int fd, fmode_t mask) { struct files_struct *files = current->files; struct file *file; if (atomic_read(&files->count) == 1) { file = __fcheck_files(files, fd); if (!file || unlikely(file->f_mode & mask)) return 0; return (unsigned long)file; } else { file = __fget(fd, mask, 1); if (!file) return 0; return FDPUT_FPUT | (unsigned long)file; } } unsigned long __fdget(unsigned int fd) { return __fget_light(fd, FMODE_PATH); } EXPORT_SYMBOL(__fdget); unsigned long __fdget_raw(unsigned int fd) { return __fget_light(fd, 0); } unsigned long __fdget_pos(unsigned int fd) { unsigned long v = __fdget(fd); struct file *file = (struct file *)(v & ~3); if (file && (file->f_mode & FMODE_ATOMIC_POS)) { if (file_count(file) > 1) { v |= FDPUT_POS_UNLOCK; mutex_lock(&file->f_pos_lock); } } return v; } void __f_unlock_pos(struct file *f) { mutex_unlock(&f->f_pos_lock); } /* * We only lock f_pos if we have threads or if the file might be * shared with another process. In both cases we'll have an elevated * file count (done either by fdget() or by fork()). */ void set_close_on_exec(unsigned int fd, int flag) { struct files_struct *files = current->files; struct fdtable *fdt; spin_lock(&files->file_lock); fdt = files_fdtable(files); if (flag) __set_close_on_exec(fd, fdt); else __clear_close_on_exec(fd, fdt); spin_unlock(&files->file_lock); } bool get_close_on_exec(unsigned int fd) { struct files_struct *files = current->files; struct fdtable *fdt; bool res; rcu_read_lock(); fdt = files_fdtable(files); res = close_on_exec(fd, fdt); rcu_read_unlock(); return res; } static int do_dup2(struct files_struct *files, struct file *file, unsigned fd, unsigned flags) __releases(&files->file_lock) { struct file *tofree; struct fdtable *fdt; /* * We need to detect attempts to do dup2() over allocated but still * not finished descriptor. NB: OpenBSD avoids that at the price of * extra work in their equivalent of fget() - they insert struct * file immediately after grabbing descriptor, mark it larval if * more work (e.g. actual opening) is needed and make sure that * fget() treats larval files as absent. Potentially interesting, * but while extra work in fget() is trivial, locking implications * and amount of surgery on open()-related paths in VFS are not. * FreeBSD fails with -EBADF in the same situation, NetBSD "solution" * deadlocks in rather amusing ways, AFAICS. All of that is out of * scope of POSIX or SUS, since neither considers shared descriptor * tables and this condition does not arise without those. */ fdt = files_fdtable(files); tofree = fdt->fd[fd]; if (!tofree && fd_is_open(fd, fdt)) goto Ebusy; get_file(file); rcu_assign_pointer(fdt->fd[fd], file); __set_open_fd(fd, fdt); if (flags & O_CLOEXEC) __set_close_on_exec(fd, fdt); else __clear_close_on_exec(fd, fdt); spin_unlock(&files->file_lock); if (tofree) filp_close(tofree, files); return fd; Ebusy: spin_unlock(&files->file_lock); return -EBUSY; } int replace_fd(unsigned fd, struct file *file, unsigned flags) { int err; struct files_struct *files = current->files; if (!file) return __close_fd(files, fd); if (fd >= rlimit(RLIMIT_NOFILE)) return -EBADF; spin_lock(&files->file_lock); err = expand_files(files, fd); if (unlikely(err < 0)) goto out_unlock; return do_dup2(files, file, fd, flags); out_unlock: spin_unlock(&files->file_lock); return err; } static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags) { int err = -EBADF; struct file *file; struct files_struct *files = current->files; if ((flags & ~O_CLOEXEC) != 0) return -EINVAL; if (unlikely(oldfd == newfd)) return -EINVAL; if (newfd >= rlimit(RLIMIT_NOFILE)) return -EBADF; spin_lock(&files->file_lock); err = expand_files(files, newfd); file = fcheck(oldfd); if (unlikely(!file)) goto Ebadf; if (unlikely(err < 0)) { if (err == -EMFILE) goto Ebadf; goto out_unlock; } return do_dup2(files, file, newfd, flags); Ebadf: err = -EBADF; out_unlock: spin_unlock(&files->file_lock); return err; } SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags) { return ksys_dup3(oldfd, newfd, flags); } SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd) { if (unlikely(newfd == oldfd)) { /* corner case */ struct files_struct *files = current->files; int retval = oldfd; rcu_read_lock(); if (!fcheck_files(files, oldfd)) retval = -EBADF; rcu_read_unlock(); return retval; } return ksys_dup3(oldfd, newfd, 0); } int ksys_dup(unsigned int fildes) { int ret = -EBADF; struct file *file = fget_raw(fildes); if (file) { ret = get_unused_fd_flags(0); if (ret >= 0) fd_install(ret, file); else fput(file); } return ret; } SYSCALL_DEFINE1(dup, unsigned int, fildes) { return ksys_dup(fildes); } int f_dupfd(unsigned int from, struct file *file, unsigned flags) { int err; if (from >= rlimit(RLIMIT_NOFILE)) return -EINVAL; err = alloc_fd(from, flags); if (err >= 0) { get_file(file); fd_install(err, file); } return err; } int iterate_fd(struct files_struct *files, unsigned n, int (*f)(const void *, struct file *, unsigned), const void *p) { struct fdtable *fdt; int res = 0; if (!files) return 0; spin_lock(&files->file_lock); for (fdt = files_fdtable(files); n < fdt->max_fds; n++) { struct file *file; file = rcu_dereference_check_fdtable(files, fdt->fd[n]); if (!file) continue; res = f(p, file, n); if (res) break; } spin_unlock(&files->file_lock); return res; } EXPORT_SYMBOL(iterate_fd);
int __close_fd_get_file(unsigned int fd, struct file **res) { struct files_struct *files = current->files; struct file *file; struct fdtable *fdt; spin_lock(&files->file_lock); fdt = files_fdtable(files); if (fd >= fdt->max_fds) goto out_unlock; file = fdt->fd[fd]; if (!file) goto out_unlock; rcu_assign_pointer(fdt->fd[fd], NULL); __put_unused_fd(files, fd); spin_unlock(&files->file_lock); get_file(file); *res = file; return filp_close(file, files); out_unlock: spin_unlock(&files->file_lock); *res = NULL; return -ENOENT; }
int __close_fd_get_file(unsigned int fd, struct file **res) { struct files_struct *files = current->files; struct file *file; struct fdtable *fdt; spin_lock(&files->file_lock); fdt = files_fdtable(files); if (fd >= fdt->max_fds) goto out_unlock; file = fdt->fd[fd]; if (!file) goto out_unlock; rcu_assign_pointer(fdt->fd[fd], NULL); __put_unused_fd(files, fd); spin_unlock(&files->file_lock); get_file(file); *res = file; return 0; out_unlock: spin_unlock(&files->file_lock); *res = NULL; return -ENOENT; }
{'added': [(653, ' * variant of __close_fd that gets a ref on the file for later fput.'), (654, ' * The caller must ensure that filp_close() called on the file, and then'), (655, ' * an fput().'), (675, '\treturn 0;')], 'deleted': [(653, ' * variant of __close_fd that gets a ref on the file for later fput'), (673, '\treturn filp_close(file, files);')]}
4
2
722
4,409
https://github.com/oracle/linux-uek
CVE-2022-21504
['CWE-416']
mqueue.c
do_mq_notify
/* * POSIX message queues filesystem for Linux. * * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl) * Michal Wronski (michal.wronski@gmail.com) * * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com) * Lockless receive & send, fd based notify: * Manfred Spraul (manfred@colorfullife.com) * * Audit: George Wilson (ltcgcw@us.ibm.com) * * This file is released under the GPL. */ #include <linux/capability.h> #include <linux/init.h> #include <linux/pagemap.h> #include <linux/file.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/sysctl.h> #include <linux/poll.h> #include <linux/mqueue.h> #include <linux/msg.h> #include <linux/skbuff.h> #include <linux/vmalloc.h> #include <linux/netlink.h> #include <linux/syscalls.h> #include <linux/audit.h> #include <linux/signal.h> #include <linux/mutex.h> #include <linux/nsproxy.h> #include <linux/pid.h> #include <linux/ipc_namespace.h> #include <linux/user_namespace.h> #include <linux/slab.h> #include <linux/sched/wake_q.h> #include <linux/sched/signal.h> #include <linux/sched/user.h> #include <net/sock.h> #include "util.h" #define MQUEUE_MAGIC 0x19800202 #define DIRENT_SIZE 20 #define FILENT_SIZE 80 #define SEND 0 #define RECV 1 #define STATE_NONE 0 #define STATE_READY 1 struct posix_msg_tree_node { struct rb_node rb_node; struct list_head msg_list; int priority; }; struct ext_wait_queue { /* queue of sleeping tasks */ struct task_struct *task; struct list_head list; struct msg_msg *msg; /* ptr of loaded message */ int state; /* one of STATE_* values */ }; struct mqueue_inode_info { spinlock_t lock; struct inode vfs_inode; wait_queue_head_t wait_q; struct rb_root msg_tree; struct posix_msg_tree_node *node_cache; struct mq_attr attr; struct sigevent notify; struct pid *notify_owner; struct user_namespace *notify_user_ns; struct user_struct *user; /* user who created, for accounting */ struct sock *notify_sock; struct sk_buff *notify_cookie; /* for tasks waiting for free space and messages, respectively */ struct ext_wait_queue e_wait_q[2]; unsigned long qsize; /* size of queue in memory (sum of all msgs) */ }; static const struct inode_operations mqueue_dir_inode_operations; static const struct file_operations mqueue_file_operations; static const struct super_operations mqueue_super_ops; static void remove_notification(struct mqueue_inode_info *info); static struct kmem_cache *mqueue_inode_cachep; static struct ctl_table_header *mq_sysctl_table; static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode) { return container_of(inode, struct mqueue_inode_info, vfs_inode); } /* * This routine should be called with the mq_lock held. */ static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode) { return get_ipc_ns(inode->i_sb->s_fs_info); } static struct ipc_namespace *get_ns_from_inode(struct inode *inode) { struct ipc_namespace *ns; spin_lock(&mq_lock); ns = __get_ns_from_inode(inode); spin_unlock(&mq_lock); return ns; } /* Auxiliary functions to manipulate messages' list */ static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info) { struct rb_node **p, *parent = NULL; struct posix_msg_tree_node *leaf; p = &info->msg_tree.rb_node; while (*p) { parent = *p; leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); if (likely(leaf->priority == msg->m_type)) goto insert_msg; else if (msg->m_type < leaf->priority) p = &(*p)->rb_left; else p = &(*p)->rb_right; } if (info->node_cache) { leaf = info->node_cache; info->node_cache = NULL; } else { leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC); if (!leaf) return -ENOMEM; INIT_LIST_HEAD(&leaf->msg_list); } leaf->priority = msg->m_type; rb_link_node(&leaf->rb_node, parent, p); rb_insert_color(&leaf->rb_node, &info->msg_tree); insert_msg: info->attr.mq_curmsgs++; info->qsize += msg->m_ts; list_add_tail(&msg->m_list, &leaf->msg_list); return 0; } static inline struct msg_msg *msg_get(struct mqueue_inode_info *info) { struct rb_node **p, *parent = NULL; struct posix_msg_tree_node *leaf; struct msg_msg *msg; try_again: p = &info->msg_tree.rb_node; while (*p) { parent = *p; /* * During insert, low priorities go to the left and high to the * right. On receive, we want the highest priorities first, so * walk all the way to the right. */ p = &(*p)->rb_right; } if (!parent) { if (info->attr.mq_curmsgs) { pr_warn_once("Inconsistency in POSIX message queue, " "no tree element, but supposedly messages " "should exist!\n"); info->attr.mq_curmsgs = 0; } return NULL; } leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); if (unlikely(list_empty(&leaf->msg_list))) { pr_warn_once("Inconsistency in POSIX message queue, " "empty leaf node but we haven't implemented " "lazy leaf delete!\n"); rb_erase(&leaf->rb_node, &info->msg_tree); if (info->node_cache) { kfree(leaf); } else { info->node_cache = leaf; } goto try_again; } else { msg = list_first_entry(&leaf->msg_list, struct msg_msg, m_list); list_del(&msg->m_list); if (list_empty(&leaf->msg_list)) { rb_erase(&leaf->rb_node, &info->msg_tree); if (info->node_cache) { kfree(leaf); } else { info->node_cache = leaf; } } } info->attr.mq_curmsgs--; info->qsize -= msg->m_ts; return msg; } static struct inode *mqueue_get_inode(struct super_block *sb, struct ipc_namespace *ipc_ns, umode_t mode, struct mq_attr *attr) { struct user_struct *u = current_user(); struct inode *inode; int ret = -ENOMEM; inode = new_inode(sb); if (!inode) goto err; inode->i_ino = get_next_ino(); inode->i_mode = mode; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); inode->i_mtime = inode->i_ctime = inode->i_atime = current_time(inode); if (S_ISREG(mode)) { struct mqueue_inode_info *info; unsigned long mq_bytes, mq_treesize; inode->i_fop = &mqueue_file_operations; inode->i_size = FILENT_SIZE; /* mqueue specific info */ info = MQUEUE_I(inode); spin_lock_init(&info->lock); init_waitqueue_head(&info->wait_q); INIT_LIST_HEAD(&info->e_wait_q[0].list); INIT_LIST_HEAD(&info->e_wait_q[1].list); info->notify_owner = NULL; info->notify_user_ns = NULL; info->qsize = 0; info->user = NULL; /* set when all is ok */ info->msg_tree = RB_ROOT; info->node_cache = NULL; memset(&info->attr, 0, sizeof(info->attr)); info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max, ipc_ns->mq_msg_default); info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max, ipc_ns->mq_msgsize_default); if (attr) { info->attr.mq_maxmsg = attr->mq_maxmsg; info->attr.mq_msgsize = attr->mq_msgsize; } /* * We used to allocate a static array of pointers and account * the size of that array as well as one msg_msg struct per * possible message into the queue size. That's no longer * accurate as the queue is now an rbtree and will grow and * shrink depending on usage patterns. We can, however, still * account one msg_msg struct per message, but the nodes are * allocated depending on priority usage, and most programs * only use one, or a handful, of priorities. However, since * this is pinned memory, we need to assume worst case, so * that means the min(mq_maxmsg, max_priorities) * struct * posix_msg_tree_node. */ mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * sizeof(struct posix_msg_tree_node); mq_bytes = mq_treesize + (info->attr.mq_maxmsg * info->attr.mq_msgsize); spin_lock(&mq_lock); if (u->mq_bytes + mq_bytes < u->mq_bytes || u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) { spin_unlock(&mq_lock); /* mqueue_evict_inode() releases info->messages */ ret = -EMFILE; goto out_inode; } u->mq_bytes += mq_bytes; spin_unlock(&mq_lock); /* all is ok */ info->user = get_uid(u); } else if (S_ISDIR(mode)) { inc_nlink(inode); /* Some things misbehave if size == 0 on a directory */ inode->i_size = 2 * DIRENT_SIZE; inode->i_op = &mqueue_dir_inode_operations; inode->i_fop = &simple_dir_operations; } return inode; out_inode: iput(inode); err: return ERR_PTR(ret); } static int mqueue_fill_super(struct super_block *sb, void *data, int silent) { struct inode *inode; struct ipc_namespace *ns = sb->s_fs_info; sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV; sb->s_blocksize = PAGE_SIZE; sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = MQUEUE_MAGIC; sb->s_op = &mqueue_super_ops; inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL); if (IS_ERR(inode)) return PTR_ERR(inode); sb->s_root = d_make_root(inode); if (!sb->s_root) return -ENOMEM; return 0; } static struct dentry *mqueue_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { struct ipc_namespace *ns; if (flags & MS_KERNMOUNT) { ns = data; data = NULL; } else { ns = current->nsproxy->ipc_ns; } return mount_ns(fs_type, flags, data, ns, ns->user_ns, mqueue_fill_super); } static void init_once(void *foo) { struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; inode_init_once(&p->vfs_inode); } static struct inode *mqueue_alloc_inode(struct super_block *sb) { struct mqueue_inode_info *ei; ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; } static void mqueue_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode)); } static void mqueue_destroy_inode(struct inode *inode) { call_rcu(&inode->i_rcu, mqueue_i_callback); } static void mqueue_evict_inode(struct inode *inode) { struct mqueue_inode_info *info; struct user_struct *user; unsigned long mq_bytes, mq_treesize; struct ipc_namespace *ipc_ns; struct msg_msg *msg; clear_inode(inode); if (S_ISDIR(inode->i_mode)) return; ipc_ns = get_ns_from_inode(inode); info = MQUEUE_I(inode); spin_lock(&info->lock); while ((msg = msg_get(info)) != NULL) free_msg(msg); kfree(info->node_cache); spin_unlock(&info->lock); /* Total amount of bytes accounted for the mqueue */ mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * sizeof(struct posix_msg_tree_node); mq_bytes = mq_treesize + (info->attr.mq_maxmsg * info->attr.mq_msgsize); user = info->user; if (user) { spin_lock(&mq_lock); user->mq_bytes -= mq_bytes; /* * get_ns_from_inode() ensures that the * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns * to which we now hold a reference, or it is NULL. * We can't put it here under mq_lock, though. */ if (ipc_ns) ipc_ns->mq_queues_count--; spin_unlock(&mq_lock); free_uid(user); } if (ipc_ns) put_ipc_ns(ipc_ns); } static int mqueue_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { struct inode *inode; struct mq_attr *attr = dentry->d_fsdata; int error; struct ipc_namespace *ipc_ns; spin_lock(&mq_lock); ipc_ns = __get_ns_from_inode(dir); if (!ipc_ns) { error = -EACCES; goto out_unlock; } if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max && !capable(CAP_SYS_RESOURCE)) { error = -ENOSPC; goto out_unlock; } ipc_ns->mq_queues_count++; spin_unlock(&mq_lock); inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr); if (IS_ERR(inode)) { error = PTR_ERR(inode); spin_lock(&mq_lock); ipc_ns->mq_queues_count--; goto out_unlock; } put_ipc_ns(ipc_ns); dir->i_size += DIRENT_SIZE; dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir); d_instantiate(dentry, inode); dget(dentry); return 0; out_unlock: spin_unlock(&mq_lock); if (ipc_ns) put_ipc_ns(ipc_ns); return error; } static int mqueue_unlink(struct inode *dir, struct dentry *dentry) { struct inode *inode = d_inode(dentry); dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir); dir->i_size -= DIRENT_SIZE; drop_nlink(inode); dput(dentry); return 0; } /* * This is routine for system read from queue file. * To avoid mess with doing here some sort of mq_receive we allow * to read only queue size & notification info (the only values * that are interesting from user point of view and aren't accessible * through std routines) */ static ssize_t mqueue_read_file(struct file *filp, char __user *u_data, size_t count, loff_t *off) { struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); char buffer[FILENT_SIZE]; ssize_t ret; spin_lock(&info->lock); snprintf(buffer, sizeof(buffer), "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n", info->qsize, info->notify_owner ? info->notify.sigev_notify : 0, (info->notify_owner && info->notify.sigev_notify == SIGEV_SIGNAL) ? info->notify.sigev_signo : 0, pid_vnr(info->notify_owner)); spin_unlock(&info->lock); buffer[sizeof(buffer)-1] = '\0'; ret = simple_read_from_buffer(u_data, count, off, buffer, strlen(buffer)); if (ret <= 0) return ret; file_inode(filp)->i_atime = file_inode(filp)->i_ctime = current_time(file_inode(filp)); return ret; } static int mqueue_flush_file(struct file *filp, fl_owner_t id) { struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); spin_lock(&info->lock); if (task_tgid(current) == info->notify_owner) remove_notification(info); spin_unlock(&info->lock); return 0; } static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab) { struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); int retval = 0; poll_wait(filp, &info->wait_q, poll_tab); spin_lock(&info->lock); if (info->attr.mq_curmsgs) retval = POLLIN | POLLRDNORM; if (info->attr.mq_curmsgs < info->attr.mq_maxmsg) retval |= POLLOUT | POLLWRNORM; spin_unlock(&info->lock); return retval; } /* Adds current to info->e_wait_q[sr] before element with smaller prio */ static void wq_add(struct mqueue_inode_info *info, int sr, struct ext_wait_queue *ewp) { struct ext_wait_queue *walk; ewp->task = current; list_for_each_entry(walk, &info->e_wait_q[sr].list, list) { if (walk->task->static_prio <= current->static_prio) { list_add_tail(&ewp->list, &walk->list); return; } } list_add_tail(&ewp->list, &info->e_wait_q[sr].list); } /* * Puts current task to sleep. Caller must hold queue lock. After return * lock isn't held. * sr: SEND or RECV */ static int wq_sleep(struct mqueue_inode_info *info, int sr, ktime_t *timeout, struct ext_wait_queue *ewp) __releases(&info->lock) { int retval; signed long time; wq_add(info, sr, ewp); for (;;) { __set_current_state(TASK_INTERRUPTIBLE); spin_unlock(&info->lock); time = schedule_hrtimeout_range_clock(timeout, 0, HRTIMER_MODE_ABS, CLOCK_REALTIME); if (ewp->state == STATE_READY) { retval = 0; goto out; } spin_lock(&info->lock); if (ewp->state == STATE_READY) { retval = 0; goto out_unlock; } if (signal_pending(current)) { retval = -ERESTARTSYS; break; } if (time == 0) { retval = -ETIMEDOUT; break; } } list_del(&ewp->list); out_unlock: spin_unlock(&info->lock); out: return retval; } /* * Returns waiting task that should be serviced first or NULL if none exists */ static struct ext_wait_queue *wq_get_first_waiter( struct mqueue_inode_info *info, int sr) { struct list_head *ptr; ptr = info->e_wait_q[sr].list.prev; if (ptr == &info->e_wait_q[sr].list) return NULL; return list_entry(ptr, struct ext_wait_queue, list); } static inline void set_cookie(struct sk_buff *skb, char code) { ((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code; } /* * The next function is only to split too long sys_mq_timedsend */ static void __do_notify(struct mqueue_inode_info *info) { /* notification * invoked when there is registered process and there isn't process * waiting synchronously for message AND state of queue changed from * empty to not empty. Here we are sure that no one is waiting * synchronously. */ if (info->notify_owner && info->attr.mq_curmsgs == 1) { struct siginfo sig_i; switch (info->notify.sigev_notify) { case SIGEV_NONE: break; case SIGEV_SIGNAL: /* sends signal */ sig_i.si_signo = info->notify.sigev_signo; sig_i.si_errno = 0; sig_i.si_code = SI_MESGQ; sig_i.si_value = info->notify.sigev_value; /* map current pid/uid into info->owner's namespaces */ rcu_read_lock(); sig_i.si_pid = task_tgid_nr_ns(current, ns_of_pid(info->notify_owner)); sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid()); rcu_read_unlock(); kill_pid_info(info->notify.sigev_signo, &sig_i, info->notify_owner); break; case SIGEV_THREAD: set_cookie(info->notify_cookie, NOTIFY_WOKENUP); netlink_sendskb(info->notify_sock, info->notify_cookie); break; } /* after notification unregisters process */ put_pid(info->notify_owner); put_user_ns(info->notify_user_ns); info->notify_owner = NULL; info->notify_user_ns = NULL; } wake_up(&info->wait_q); } static int prepare_timeout(const struct timespec __user *u_abs_timeout, struct timespec *ts) { if (copy_from_user(ts, u_abs_timeout, sizeof(struct timespec))) return -EFAULT; if (!timespec_valid(ts)) return -EINVAL; return 0; } static void remove_notification(struct mqueue_inode_info *info) { if (info->notify_owner != NULL && info->notify.sigev_notify == SIGEV_THREAD) { set_cookie(info->notify_cookie, NOTIFY_REMOVED); netlink_sendskb(info->notify_sock, info->notify_cookie); } put_pid(info->notify_owner); put_user_ns(info->notify_user_ns); info->notify_owner = NULL; info->notify_user_ns = NULL; } static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr) { int mq_treesize; unsigned long total_size; if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0) return -EINVAL; if (capable(CAP_SYS_RESOURCE)) { if (attr->mq_maxmsg > HARD_MSGMAX || attr->mq_msgsize > HARD_MSGSIZEMAX) return -EINVAL; } else { if (attr->mq_maxmsg > ipc_ns->mq_msg_max || attr->mq_msgsize > ipc_ns->mq_msgsize_max) return -EINVAL; } /* check for overflow */ if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg) return -EOVERFLOW; mq_treesize = attr->mq_maxmsg * sizeof(struct msg_msg) + min_t(unsigned int, attr->mq_maxmsg, MQ_PRIO_MAX) * sizeof(struct posix_msg_tree_node); total_size = attr->mq_maxmsg * attr->mq_msgsize; if (total_size + mq_treesize < total_size) return -EOVERFLOW; return 0; } /* * Invoked when creating a new queue via sys_mq_open */ static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir, struct path *path, int oflag, umode_t mode, struct mq_attr *attr) { const struct cred *cred = current_cred(); int ret; if (attr) { ret = mq_attr_ok(ipc_ns, attr); if (ret) return ERR_PTR(ret); /* store for use during create */ path->dentry->d_fsdata = attr; } else { struct mq_attr def_attr; def_attr.mq_maxmsg = min(ipc_ns->mq_msg_max, ipc_ns->mq_msg_default); def_attr.mq_msgsize = min(ipc_ns->mq_msgsize_max, ipc_ns->mq_msgsize_default); ret = mq_attr_ok(ipc_ns, &def_attr); if (ret) return ERR_PTR(ret); } mode &= ~current_umask(); ret = vfs_create(dir, path->dentry, mode, true); path->dentry->d_fsdata = NULL; if (ret) return ERR_PTR(ret); return dentry_open(path, oflag, cred); } /* Opens existing queue */ static struct file *do_open(struct path *path, int oflag) { static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, MAY_READ | MAY_WRITE }; int acc; if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) return ERR_PTR(-EINVAL); acc = oflag2acc[oflag & O_ACCMODE]; if (inode_permission(d_inode(path->dentry), acc)) return ERR_PTR(-EACCES); return dentry_open(path, oflag, current_cred()); } static int do_mq_open(const char __user *u_name, int oflag, umode_t mode, struct mq_attr *attr) { struct path path; struct file *filp; struct filename *name; int fd, error; struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; struct vfsmount *mnt = ipc_ns->mq_mnt; struct dentry *root = mnt->mnt_root; int ro; audit_mq_open(oflag, mode, attr); if (IS_ERR(name = getname(u_name))) return PTR_ERR(name); fd = get_unused_fd_flags(O_CLOEXEC); if (fd < 0) goto out_putname; ro = mnt_want_write(mnt); /* we'll drop it in any case */ error = 0; inode_lock(d_inode(root)); path.dentry = lookup_one_len(name->name, root, strlen(name->name)); if (IS_ERR(path.dentry)) { error = PTR_ERR(path.dentry); goto out_putfd; } path.mnt = mntget(mnt); if (oflag & O_CREAT) { if (d_really_is_positive(path.dentry)) { /* entry already exists */ audit_inode(name, path.dentry, 0); if (oflag & O_EXCL) { error = -EEXIST; goto out; } filp = do_open(&path, oflag); } else { if (ro) { error = ro; goto out; } audit_inode_parent_hidden(name, root); filp = do_create(ipc_ns, d_inode(root), &path, oflag, mode, attr); } } else { if (d_really_is_negative(path.dentry)) { error = -ENOENT; goto out; } audit_inode(name, path.dentry, 0); filp = do_open(&path, oflag); } if (!IS_ERR(filp)) fd_install(fd, filp); else error = PTR_ERR(filp); out: path_put(&path); out_putfd: if (error) { put_unused_fd(fd); fd = error; } inode_unlock(d_inode(root)); if (!ro) mnt_drop_write(mnt); out_putname: putname(name); return fd; } SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode, struct mq_attr __user *, u_attr) { struct mq_attr attr; if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr))) return -EFAULT; return do_mq_open(u_name, oflag, mode, u_attr ? &attr : NULL); } SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name) { int err; struct filename *name; struct dentry *dentry; struct inode *inode = NULL; struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; struct vfsmount *mnt = ipc_ns->mq_mnt; name = getname(u_name); if (IS_ERR(name)) return PTR_ERR(name); audit_inode_parent_hidden(name, mnt->mnt_root); err = mnt_want_write(mnt); if (err) goto out_name; inode_lock_nested(d_inode(mnt->mnt_root), I_MUTEX_PARENT); dentry = lookup_one_len(name->name, mnt->mnt_root, strlen(name->name)); if (IS_ERR(dentry)) { err = PTR_ERR(dentry); goto out_unlock; } inode = d_inode(dentry); if (!inode) { err = -ENOENT; } else { ihold(inode); err = vfs_unlink(d_inode(dentry->d_parent), dentry, NULL); } dput(dentry); out_unlock: inode_unlock(d_inode(mnt->mnt_root)); if (inode) iput(inode); mnt_drop_write(mnt); out_name: putname(name); return err; } /* Pipelined send and receive functions. * * If a receiver finds no waiting message, then it registers itself in the * list of waiting receivers. A sender checks that list before adding the new * message into the message array. If there is a waiting receiver, then it * bypasses the message array and directly hands the message over to the * receiver. The receiver accepts the message and returns without grabbing the * queue spinlock: * * - Set pointer to message. * - Queue the receiver task for later wakeup (without the info->lock). * - Update its state to STATE_READY. Now the receiver can continue. * - Wake up the process after the lock is dropped. Should the process wake up * before this wakeup (due to a timeout or a signal) it will either see * STATE_READY and continue or acquire the lock to check the state again. * * The same algorithm is used for senders. */ /* pipelined_send() - send a message directly to the task waiting in * sys_mq_timedreceive() (without inserting message into a queue). */ static inline void pipelined_send(struct wake_q_head *wake_q, struct mqueue_inode_info *info, struct msg_msg *message, struct ext_wait_queue *receiver) { receiver->msg = message; list_del(&receiver->list); wake_q_add(wake_q, receiver->task); /* * Rely on the implicit cmpxchg barrier from wake_q_add such * that we can ensure that updating receiver->state is the last * write operation: As once set, the receiver can continue, * and if we don't have the reference count from the wake_q, * yet, at that point we can later have a use-after-free * condition and bogus wakeup. */ receiver->state = STATE_READY; } /* pipelined_receive() - if there is task waiting in sys_mq_timedsend() * gets its message and put to the queue (we have one free place for sure). */ static inline void pipelined_receive(struct wake_q_head *wake_q, struct mqueue_inode_info *info) { struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND); if (!sender) { /* for poll */ wake_up_interruptible(&info->wait_q); return; } if (msg_insert(sender->msg, info)) return; list_del(&sender->list); wake_q_add(wake_q, sender->task); sender->state = STATE_READY; } static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr, size_t msg_len, unsigned int msg_prio, struct timespec *ts) { struct fd f; struct inode *inode; struct ext_wait_queue wait; struct ext_wait_queue *receiver; struct msg_msg *msg_ptr; struct mqueue_inode_info *info; ktime_t expires, *timeout = NULL; struct posix_msg_tree_node *new_leaf = NULL; int ret = 0; DEFINE_WAKE_Q(wake_q); if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX)) return -EINVAL; if (ts) { expires = timespec_to_ktime(*ts); timeout = &expires; } audit_mq_sendrecv(mqdes, msg_len, msg_prio, ts); f = fdget(mqdes); if (unlikely(!f.file)) { ret = -EBADF; goto out; } inode = file_inode(f.file); if (unlikely(f.file->f_op != &mqueue_file_operations)) { ret = -EBADF; goto out_fput; } info = MQUEUE_I(inode); audit_file(f.file); if (unlikely(!(f.file->f_mode & FMODE_WRITE))) { ret = -EBADF; goto out_fput; } if (unlikely(msg_len > info->attr.mq_msgsize)) { ret = -EMSGSIZE; goto out_fput; } /* First try to allocate memory, before doing anything with * existing queues. */ msg_ptr = load_msg(u_msg_ptr, msg_len); if (IS_ERR(msg_ptr)) { ret = PTR_ERR(msg_ptr); goto out_fput; } msg_ptr->m_ts = msg_len; msg_ptr->m_type = msg_prio; /* * msg_insert really wants us to have a valid, spare node struct so * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will * fall back to that if necessary. */ if (!info->node_cache) new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL); spin_lock(&info->lock); if (!info->node_cache && new_leaf) { /* Save our speculative allocation into the cache */ INIT_LIST_HEAD(&new_leaf->msg_list); info->node_cache = new_leaf; new_leaf = NULL; } else { kfree(new_leaf); } if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) { if (f.file->f_flags & O_NONBLOCK) { ret = -EAGAIN; } else { wait.task = current; wait.msg = (void *) msg_ptr; wait.state = STATE_NONE; ret = wq_sleep(info, SEND, timeout, &wait); /* * wq_sleep must be called with info->lock held, and * returns with the lock released */ goto out_free; } } else { receiver = wq_get_first_waiter(info, RECV); if (receiver) { pipelined_send(&wake_q, info, msg_ptr, receiver); } else { /* adds message to the queue */ ret = msg_insert(msg_ptr, info); if (ret) goto out_unlock; __do_notify(info); } inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); } out_unlock: spin_unlock(&info->lock); wake_up_q(&wake_q); out_free: if (ret) free_msg(msg_ptr); out_fput: fdput(f); out: return ret; } static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr, size_t msg_len, unsigned int __user *u_msg_prio, struct timespec *ts) { ssize_t ret; struct msg_msg *msg_ptr; struct fd f; struct inode *inode; struct mqueue_inode_info *info; struct ext_wait_queue wait; ktime_t expires, *timeout = NULL; struct posix_msg_tree_node *new_leaf = NULL; if (ts) { expires = timespec_to_ktime(*ts); timeout = &expires; } audit_mq_sendrecv(mqdes, msg_len, 0, ts); f = fdget(mqdes); if (unlikely(!f.file)) { ret = -EBADF; goto out; } inode = file_inode(f.file); if (unlikely(f.file->f_op != &mqueue_file_operations)) { ret = -EBADF; goto out_fput; } info = MQUEUE_I(inode); audit_file(f.file); if (unlikely(!(f.file->f_mode & FMODE_READ))) { ret = -EBADF; goto out_fput; } /* checks if buffer is big enough */ if (unlikely(msg_len < info->attr.mq_msgsize)) { ret = -EMSGSIZE; goto out_fput; } /* * msg_insert really wants us to have a valid, spare node struct so * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will * fall back to that if necessary. */ if (!info->node_cache) new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL); spin_lock(&info->lock); if (!info->node_cache && new_leaf) { /* Save our speculative allocation into the cache */ INIT_LIST_HEAD(&new_leaf->msg_list); info->node_cache = new_leaf; } else { kfree(new_leaf); } if (info->attr.mq_curmsgs == 0) { if (f.file->f_flags & O_NONBLOCK) { spin_unlock(&info->lock); ret = -EAGAIN; } else { wait.task = current; wait.state = STATE_NONE; ret = wq_sleep(info, RECV, timeout, &wait); msg_ptr = wait.msg; } } else { DEFINE_WAKE_Q(wake_q); msg_ptr = msg_get(info); inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); /* There is now free space in queue. */ pipelined_receive(&wake_q, info); spin_unlock(&info->lock); wake_up_q(&wake_q); ret = 0; } if (ret == 0) { ret = msg_ptr->m_ts; if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) || store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) { ret = -EFAULT; } free_msg(msg_ptr); } out_fput: fdput(f); out: return ret; } SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, size_t, msg_len, unsigned int, msg_prio, const struct timespec __user *, u_abs_timeout) { struct timespec ts, *p = NULL; if (u_abs_timeout) { int res = prepare_timeout(u_abs_timeout, &ts); if (res) return res; p = &ts; } return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p); } SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, size_t, msg_len, unsigned int __user *, u_msg_prio, const struct timespec __user *, u_abs_timeout) { struct timespec ts, *p = NULL; if (u_abs_timeout) { int res = prepare_timeout(u_abs_timeout, &ts); if (res) return res; p = &ts; } return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p); } /* * Notes: the case when user wants us to deregister (with NULL as pointer) * and he isn't currently owner of notification, will be silently discarded. * It isn't explicitly defined in the POSIX. */ static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification) { int ret; struct fd f; struct sock *sock; struct inode *inode; struct mqueue_inode_info *info; struct sk_buff *nc; audit_mq_notify(mqdes, notification); nc = NULL; sock = NULL; if (notification != NULL) { if (unlikely(notification->sigev_notify != SIGEV_NONE && notification->sigev_notify != SIGEV_SIGNAL && notification->sigev_notify != SIGEV_THREAD)) return -EINVAL; if (notification->sigev_notify == SIGEV_SIGNAL && !valid_signal(notification->sigev_signo)) { return -EINVAL; } if (notification->sigev_notify == SIGEV_THREAD) { long timeo; /* create the notify skb */ nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL); if (!nc) { ret = -ENOMEM; goto out; } if (copy_from_user(nc->data, notification->sigev_value.sival_ptr, NOTIFY_COOKIE_LEN)) { ret = -EFAULT; goto out; } /* TODO: add a header? */ skb_put(nc, NOTIFY_COOKIE_LEN); /* and attach it to the socket */ retry: f = fdget(notification->sigev_signo); if (!f.file) { ret = -EBADF; goto out; } sock = netlink_getsockbyfilp(f.file); fdput(f); if (IS_ERR(sock)) { ret = PTR_ERR(sock); sock = NULL; goto out; } timeo = MAX_SCHEDULE_TIMEOUT; ret = netlink_attachskb(sock, nc, &timeo, NULL); if (ret == 1) goto retry; if (ret) { sock = NULL; nc = NULL; goto out; } } } f = fdget(mqdes); if (!f.file) { ret = -EBADF; goto out; } inode = file_inode(f.file); if (unlikely(f.file->f_op != &mqueue_file_operations)) { ret = -EBADF; goto out_fput; } info = MQUEUE_I(inode); ret = 0; spin_lock(&info->lock); if (notification == NULL) { if (info->notify_owner == task_tgid(current)) { remove_notification(info); inode->i_atime = inode->i_ctime = current_time(inode); } } else if (info->notify_owner != NULL) { ret = -EBUSY; } else { switch (notification->sigev_notify) { case SIGEV_NONE: info->notify.sigev_notify = SIGEV_NONE; break; case SIGEV_THREAD: info->notify_sock = sock; info->notify_cookie = nc; sock = NULL; nc = NULL; info->notify.sigev_notify = SIGEV_THREAD; break; case SIGEV_SIGNAL: info->notify.sigev_signo = notification->sigev_signo; info->notify.sigev_value = notification->sigev_value; info->notify.sigev_notify = SIGEV_SIGNAL; break; } info->notify_owner = get_pid(task_tgid(current)); info->notify_user_ns = get_user_ns(current_user_ns()); inode->i_atime = inode->i_ctime = current_time(inode); } spin_unlock(&info->lock); out_fput: fdput(f); out: if (sock) netlink_detachskb(sock, nc); else if (nc) dev_kfree_skb(nc); return ret; } SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, const struct sigevent __user *, u_notification) { struct sigevent n, *p = NULL; if (u_notification) { if (copy_from_user(&n, u_notification, sizeof(struct sigevent))) return -EFAULT; p = &n; } return do_mq_notify(mqdes, p); } static int do_mq_getsetattr(int mqdes, struct mq_attr *new, struct mq_attr *old) { struct fd f; struct inode *inode; struct mqueue_inode_info *info; if (new && (new->mq_flags & (~O_NONBLOCK))) return -EINVAL; f = fdget(mqdes); if (!f.file) return -EBADF; if (unlikely(f.file->f_op != &mqueue_file_operations)) { fdput(f); return -EBADF; } inode = file_inode(f.file); info = MQUEUE_I(inode); spin_lock(&info->lock); if (old) { *old = info->attr; old->mq_flags = f.file->f_flags & O_NONBLOCK; } if (new) { audit_mq_getsetattr(mqdes, new); spin_lock(&f.file->f_lock); if (new->mq_flags & O_NONBLOCK) f.file->f_flags |= O_NONBLOCK; else f.file->f_flags &= ~O_NONBLOCK; spin_unlock(&f.file->f_lock); inode->i_atime = inode->i_ctime = current_time(inode); } spin_unlock(&info->lock); fdput(f); return 0; } SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, const struct mq_attr __user *, u_mqstat, struct mq_attr __user *, u_omqstat) { int ret; struct mq_attr mqstat, omqstat; struct mq_attr *new = NULL, *old = NULL; if (u_mqstat) { new = &mqstat; if (copy_from_user(new, u_mqstat, sizeof(struct mq_attr))) return -EFAULT; } if (u_omqstat) old = &omqstat; ret = do_mq_getsetattr(mqdes, new, old); if (ret || !old) return ret; if (copy_to_user(u_omqstat, old, sizeof(struct mq_attr))) return -EFAULT; return 0; } #ifdef CONFIG_COMPAT struct compat_mq_attr { compat_long_t mq_flags; /* message queue flags */ compat_long_t mq_maxmsg; /* maximum number of messages */ compat_long_t mq_msgsize; /* maximum message size */ compat_long_t mq_curmsgs; /* number of messages currently queued */ compat_long_t __reserved[4]; /* ignored for input, zeroed for output */ }; static inline int get_compat_mq_attr(struct mq_attr *attr, const struct compat_mq_attr __user *uattr) { struct compat_mq_attr v; if (copy_from_user(&v, uattr, sizeof(*uattr))) return -EFAULT; memset(attr, 0, sizeof(*attr)); attr->mq_flags = v.mq_flags; attr->mq_maxmsg = v.mq_maxmsg; attr->mq_msgsize = v.mq_msgsize; attr->mq_curmsgs = v.mq_curmsgs; return 0; } static inline int put_compat_mq_attr(const struct mq_attr *attr, struct compat_mq_attr __user *uattr) { struct compat_mq_attr v; memset(&v, 0, sizeof(v)); v.mq_flags = attr->mq_flags; v.mq_maxmsg = attr->mq_maxmsg; v.mq_msgsize = attr->mq_msgsize; v.mq_curmsgs = attr->mq_curmsgs; if (copy_to_user(uattr, &v, sizeof(*uattr))) return -EFAULT; return 0; } COMPAT_SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, compat_mode_t, mode, struct compat_mq_attr __user *, u_attr) { struct mq_attr attr, *p = NULL; if (u_attr && oflag & O_CREAT) { p = &attr; if (get_compat_mq_attr(&attr, u_attr)) return -EFAULT; } return do_mq_open(u_name, oflag, mode, p); } static int compat_prepare_timeout(const struct compat_timespec __user *p, struct timespec *ts) { if (compat_get_timespec(ts, p)) return -EFAULT; if (!timespec_valid(ts)) return -EINVAL; return 0; } COMPAT_SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, compat_size_t, msg_len, unsigned int, msg_prio, const struct compat_timespec __user *, u_abs_timeout) { struct timespec ts, *p = NULL; if (u_abs_timeout) { int res = compat_prepare_timeout(u_abs_timeout, &ts); if (res) return res; p = &ts; } return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p); } COMPAT_SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, compat_size_t, msg_len, unsigned int __user *, u_msg_prio, const struct compat_timespec __user *, u_abs_timeout) { struct timespec ts, *p = NULL; if (u_abs_timeout) { int res = compat_prepare_timeout(u_abs_timeout, &ts); if (res) return res; p = &ts; } return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p); } COMPAT_SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, const struct compat_sigevent __user *, u_notification) { struct sigevent n, *p = NULL; if (u_notification) { if (get_compat_sigevent(&n, u_notification)) return -EFAULT; if (n.sigev_notify == SIGEV_THREAD) n.sigev_value.sival_ptr = compat_ptr(n.sigev_value.sival_int); p = &n; } return do_mq_notify(mqdes, p); } COMPAT_SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, const struct compat_mq_attr __user *, u_mqstat, struct compat_mq_attr __user *, u_omqstat) { int ret; struct mq_attr mqstat, omqstat; struct mq_attr *new = NULL, *old = NULL; if (u_mqstat) { new = &mqstat; if (get_compat_mq_attr(new, u_mqstat)) return -EFAULT; } if (u_omqstat) old = &omqstat; ret = do_mq_getsetattr(mqdes, new, old); if (ret || !old) return ret; if (put_compat_mq_attr(old, u_omqstat)) return -EFAULT; return 0; } #endif static const struct inode_operations mqueue_dir_inode_operations = { .lookup = simple_lookup, .create = mqueue_create, .unlink = mqueue_unlink, }; static const struct file_operations mqueue_file_operations = { .flush = mqueue_flush_file, .poll = mqueue_poll_file, .read = mqueue_read_file, .llseek = default_llseek, }; static const struct super_operations mqueue_super_ops = { .alloc_inode = mqueue_alloc_inode, .destroy_inode = mqueue_destroy_inode, .evict_inode = mqueue_evict_inode, .statfs = simple_statfs, }; static struct file_system_type mqueue_fs_type = { .name = "mqueue", .mount = mqueue_mount, .kill_sb = kill_litter_super, .fs_flags = FS_USERNS_MOUNT, }; int mq_init_ns(struct ipc_namespace *ns) { ns->mq_queues_count = 0; ns->mq_queues_max = DFLT_QUEUESMAX; ns->mq_msg_max = DFLT_MSGMAX; ns->mq_msgsize_max = DFLT_MSGSIZEMAX; ns->mq_msg_default = DFLT_MSG; ns->mq_msgsize_default = DFLT_MSGSIZE; ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns); if (IS_ERR(ns->mq_mnt)) { int err = PTR_ERR(ns->mq_mnt); ns->mq_mnt = NULL; return err; } return 0; } void mq_clear_sbinfo(struct ipc_namespace *ns) { ns->mq_mnt->mnt_sb->s_fs_info = NULL; } void mq_put_mnt(struct ipc_namespace *ns) { kern_unmount(ns->mq_mnt); } static int __init init_mqueue_fs(void) { int error; mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache", sizeof(struct mqueue_inode_info), 0, SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, init_once); if (mqueue_inode_cachep == NULL) return -ENOMEM; /* ignore failures - they are not fatal */ mq_sysctl_table = mq_register_sysctl_table(); error = register_filesystem(&mqueue_fs_type); if (error) goto out_sysctl; spin_lock_init(&mq_lock); error = mq_init_ns(&init_ipc_ns); if (error) goto out_filesystem; return 0; out_filesystem: unregister_filesystem(&mqueue_fs_type); out_sysctl: if (mq_sysctl_table) unregister_sysctl_table(mq_sysctl_table); kmem_cache_destroy(mqueue_inode_cachep); return error; } device_initcall(init_mqueue_fs);
/* * POSIX message queues filesystem for Linux. * * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl) * Michal Wronski (michal.wronski@gmail.com) * * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com) * Lockless receive & send, fd based notify: * Manfred Spraul (manfred@colorfullife.com) * * Audit: George Wilson (ltcgcw@us.ibm.com) * * This file is released under the GPL. */ #include <linux/capability.h> #include <linux/init.h> #include <linux/pagemap.h> #include <linux/file.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/sysctl.h> #include <linux/poll.h> #include <linux/mqueue.h> #include <linux/msg.h> #include <linux/skbuff.h> #include <linux/vmalloc.h> #include <linux/netlink.h> #include <linux/syscalls.h> #include <linux/audit.h> #include <linux/signal.h> #include <linux/mutex.h> #include <linux/nsproxy.h> #include <linux/pid.h> #include <linux/ipc_namespace.h> #include <linux/user_namespace.h> #include <linux/slab.h> #include <linux/sched/wake_q.h> #include <linux/sched/signal.h> #include <linux/sched/user.h> #include <net/sock.h> #include "util.h" #define MQUEUE_MAGIC 0x19800202 #define DIRENT_SIZE 20 #define FILENT_SIZE 80 #define SEND 0 #define RECV 1 #define STATE_NONE 0 #define STATE_READY 1 struct posix_msg_tree_node { struct rb_node rb_node; struct list_head msg_list; int priority; }; struct ext_wait_queue { /* queue of sleeping tasks */ struct task_struct *task; struct list_head list; struct msg_msg *msg; /* ptr of loaded message */ int state; /* one of STATE_* values */ }; struct mqueue_inode_info { spinlock_t lock; struct inode vfs_inode; wait_queue_head_t wait_q; struct rb_root msg_tree; struct posix_msg_tree_node *node_cache; struct mq_attr attr; struct sigevent notify; struct pid *notify_owner; struct user_namespace *notify_user_ns; struct user_struct *user; /* user who created, for accounting */ struct sock *notify_sock; struct sk_buff *notify_cookie; /* for tasks waiting for free space and messages, respectively */ struct ext_wait_queue e_wait_q[2]; unsigned long qsize; /* size of queue in memory (sum of all msgs) */ }; static const struct inode_operations mqueue_dir_inode_operations; static const struct file_operations mqueue_file_operations; static const struct super_operations mqueue_super_ops; static void remove_notification(struct mqueue_inode_info *info); static struct kmem_cache *mqueue_inode_cachep; static struct ctl_table_header *mq_sysctl_table; static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode) { return container_of(inode, struct mqueue_inode_info, vfs_inode); } /* * This routine should be called with the mq_lock held. */ static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode) { return get_ipc_ns(inode->i_sb->s_fs_info); } static struct ipc_namespace *get_ns_from_inode(struct inode *inode) { struct ipc_namespace *ns; spin_lock(&mq_lock); ns = __get_ns_from_inode(inode); spin_unlock(&mq_lock); return ns; } /* Auxiliary functions to manipulate messages' list */ static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info) { struct rb_node **p, *parent = NULL; struct posix_msg_tree_node *leaf; p = &info->msg_tree.rb_node; while (*p) { parent = *p; leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); if (likely(leaf->priority == msg->m_type)) goto insert_msg; else if (msg->m_type < leaf->priority) p = &(*p)->rb_left; else p = &(*p)->rb_right; } if (info->node_cache) { leaf = info->node_cache; info->node_cache = NULL; } else { leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC); if (!leaf) return -ENOMEM; INIT_LIST_HEAD(&leaf->msg_list); } leaf->priority = msg->m_type; rb_link_node(&leaf->rb_node, parent, p); rb_insert_color(&leaf->rb_node, &info->msg_tree); insert_msg: info->attr.mq_curmsgs++; info->qsize += msg->m_ts; list_add_tail(&msg->m_list, &leaf->msg_list); return 0; } static inline struct msg_msg *msg_get(struct mqueue_inode_info *info) { struct rb_node **p, *parent = NULL; struct posix_msg_tree_node *leaf; struct msg_msg *msg; try_again: p = &info->msg_tree.rb_node; while (*p) { parent = *p; /* * During insert, low priorities go to the left and high to the * right. On receive, we want the highest priorities first, so * walk all the way to the right. */ p = &(*p)->rb_right; } if (!parent) { if (info->attr.mq_curmsgs) { pr_warn_once("Inconsistency in POSIX message queue, " "no tree element, but supposedly messages " "should exist!\n"); info->attr.mq_curmsgs = 0; } return NULL; } leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); if (unlikely(list_empty(&leaf->msg_list))) { pr_warn_once("Inconsistency in POSIX message queue, " "empty leaf node but we haven't implemented " "lazy leaf delete!\n"); rb_erase(&leaf->rb_node, &info->msg_tree); if (info->node_cache) { kfree(leaf); } else { info->node_cache = leaf; } goto try_again; } else { msg = list_first_entry(&leaf->msg_list, struct msg_msg, m_list); list_del(&msg->m_list); if (list_empty(&leaf->msg_list)) { rb_erase(&leaf->rb_node, &info->msg_tree); if (info->node_cache) { kfree(leaf); } else { info->node_cache = leaf; } } } info->attr.mq_curmsgs--; info->qsize -= msg->m_ts; return msg; } static struct inode *mqueue_get_inode(struct super_block *sb, struct ipc_namespace *ipc_ns, umode_t mode, struct mq_attr *attr) { struct user_struct *u = current_user(); struct inode *inode; int ret = -ENOMEM; inode = new_inode(sb); if (!inode) goto err; inode->i_ino = get_next_ino(); inode->i_mode = mode; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); inode->i_mtime = inode->i_ctime = inode->i_atime = current_time(inode); if (S_ISREG(mode)) { struct mqueue_inode_info *info; unsigned long mq_bytes, mq_treesize; inode->i_fop = &mqueue_file_operations; inode->i_size = FILENT_SIZE; /* mqueue specific info */ info = MQUEUE_I(inode); spin_lock_init(&info->lock); init_waitqueue_head(&info->wait_q); INIT_LIST_HEAD(&info->e_wait_q[0].list); INIT_LIST_HEAD(&info->e_wait_q[1].list); info->notify_owner = NULL; info->notify_user_ns = NULL; info->qsize = 0; info->user = NULL; /* set when all is ok */ info->msg_tree = RB_ROOT; info->node_cache = NULL; memset(&info->attr, 0, sizeof(info->attr)); info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max, ipc_ns->mq_msg_default); info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max, ipc_ns->mq_msgsize_default); if (attr) { info->attr.mq_maxmsg = attr->mq_maxmsg; info->attr.mq_msgsize = attr->mq_msgsize; } /* * We used to allocate a static array of pointers and account * the size of that array as well as one msg_msg struct per * possible message into the queue size. That's no longer * accurate as the queue is now an rbtree and will grow and * shrink depending on usage patterns. We can, however, still * account one msg_msg struct per message, but the nodes are * allocated depending on priority usage, and most programs * only use one, or a handful, of priorities. However, since * this is pinned memory, we need to assume worst case, so * that means the min(mq_maxmsg, max_priorities) * struct * posix_msg_tree_node. */ mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * sizeof(struct posix_msg_tree_node); mq_bytes = mq_treesize + (info->attr.mq_maxmsg * info->attr.mq_msgsize); spin_lock(&mq_lock); if (u->mq_bytes + mq_bytes < u->mq_bytes || u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) { spin_unlock(&mq_lock); /* mqueue_evict_inode() releases info->messages */ ret = -EMFILE; goto out_inode; } u->mq_bytes += mq_bytes; spin_unlock(&mq_lock); /* all is ok */ info->user = get_uid(u); } else if (S_ISDIR(mode)) { inc_nlink(inode); /* Some things misbehave if size == 0 on a directory */ inode->i_size = 2 * DIRENT_SIZE; inode->i_op = &mqueue_dir_inode_operations; inode->i_fop = &simple_dir_operations; } return inode; out_inode: iput(inode); err: return ERR_PTR(ret); } static int mqueue_fill_super(struct super_block *sb, void *data, int silent) { struct inode *inode; struct ipc_namespace *ns = sb->s_fs_info; sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV; sb->s_blocksize = PAGE_SIZE; sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = MQUEUE_MAGIC; sb->s_op = &mqueue_super_ops; inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL); if (IS_ERR(inode)) return PTR_ERR(inode); sb->s_root = d_make_root(inode); if (!sb->s_root) return -ENOMEM; return 0; } static struct dentry *mqueue_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { struct ipc_namespace *ns; if (flags & MS_KERNMOUNT) { ns = data; data = NULL; } else { ns = current->nsproxy->ipc_ns; } return mount_ns(fs_type, flags, data, ns, ns->user_ns, mqueue_fill_super); } static void init_once(void *foo) { struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; inode_init_once(&p->vfs_inode); } static struct inode *mqueue_alloc_inode(struct super_block *sb) { struct mqueue_inode_info *ei; ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; } static void mqueue_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode)); } static void mqueue_destroy_inode(struct inode *inode) { call_rcu(&inode->i_rcu, mqueue_i_callback); } static void mqueue_evict_inode(struct inode *inode) { struct mqueue_inode_info *info; struct user_struct *user; unsigned long mq_bytes, mq_treesize; struct ipc_namespace *ipc_ns; struct msg_msg *msg; clear_inode(inode); if (S_ISDIR(inode->i_mode)) return; ipc_ns = get_ns_from_inode(inode); info = MQUEUE_I(inode); spin_lock(&info->lock); while ((msg = msg_get(info)) != NULL) free_msg(msg); kfree(info->node_cache); spin_unlock(&info->lock); /* Total amount of bytes accounted for the mqueue */ mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * sizeof(struct posix_msg_tree_node); mq_bytes = mq_treesize + (info->attr.mq_maxmsg * info->attr.mq_msgsize); user = info->user; if (user) { spin_lock(&mq_lock); user->mq_bytes -= mq_bytes; /* * get_ns_from_inode() ensures that the * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns * to which we now hold a reference, or it is NULL. * We can't put it here under mq_lock, though. */ if (ipc_ns) ipc_ns->mq_queues_count--; spin_unlock(&mq_lock); free_uid(user); } if (ipc_ns) put_ipc_ns(ipc_ns); } static int mqueue_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { struct inode *inode; struct mq_attr *attr = dentry->d_fsdata; int error; struct ipc_namespace *ipc_ns; spin_lock(&mq_lock); ipc_ns = __get_ns_from_inode(dir); if (!ipc_ns) { error = -EACCES; goto out_unlock; } if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max && !capable(CAP_SYS_RESOURCE)) { error = -ENOSPC; goto out_unlock; } ipc_ns->mq_queues_count++; spin_unlock(&mq_lock); inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr); if (IS_ERR(inode)) { error = PTR_ERR(inode); spin_lock(&mq_lock); ipc_ns->mq_queues_count--; goto out_unlock; } put_ipc_ns(ipc_ns); dir->i_size += DIRENT_SIZE; dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir); d_instantiate(dentry, inode); dget(dentry); return 0; out_unlock: spin_unlock(&mq_lock); if (ipc_ns) put_ipc_ns(ipc_ns); return error; } static int mqueue_unlink(struct inode *dir, struct dentry *dentry) { struct inode *inode = d_inode(dentry); dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir); dir->i_size -= DIRENT_SIZE; drop_nlink(inode); dput(dentry); return 0; } /* * This is routine for system read from queue file. * To avoid mess with doing here some sort of mq_receive we allow * to read only queue size & notification info (the only values * that are interesting from user point of view and aren't accessible * through std routines) */ static ssize_t mqueue_read_file(struct file *filp, char __user *u_data, size_t count, loff_t *off) { struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); char buffer[FILENT_SIZE]; ssize_t ret; spin_lock(&info->lock); snprintf(buffer, sizeof(buffer), "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n", info->qsize, info->notify_owner ? info->notify.sigev_notify : 0, (info->notify_owner && info->notify.sigev_notify == SIGEV_SIGNAL) ? info->notify.sigev_signo : 0, pid_vnr(info->notify_owner)); spin_unlock(&info->lock); buffer[sizeof(buffer)-1] = '\0'; ret = simple_read_from_buffer(u_data, count, off, buffer, strlen(buffer)); if (ret <= 0) return ret; file_inode(filp)->i_atime = file_inode(filp)->i_ctime = current_time(file_inode(filp)); return ret; } static int mqueue_flush_file(struct file *filp, fl_owner_t id) { struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); spin_lock(&info->lock); if (task_tgid(current) == info->notify_owner) remove_notification(info); spin_unlock(&info->lock); return 0; } static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab) { struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); int retval = 0; poll_wait(filp, &info->wait_q, poll_tab); spin_lock(&info->lock); if (info->attr.mq_curmsgs) retval = POLLIN | POLLRDNORM; if (info->attr.mq_curmsgs < info->attr.mq_maxmsg) retval |= POLLOUT | POLLWRNORM; spin_unlock(&info->lock); return retval; } /* Adds current to info->e_wait_q[sr] before element with smaller prio */ static void wq_add(struct mqueue_inode_info *info, int sr, struct ext_wait_queue *ewp) { struct ext_wait_queue *walk; ewp->task = current; list_for_each_entry(walk, &info->e_wait_q[sr].list, list) { if (walk->task->static_prio <= current->static_prio) { list_add_tail(&ewp->list, &walk->list); return; } } list_add_tail(&ewp->list, &info->e_wait_q[sr].list); } /* * Puts current task to sleep. Caller must hold queue lock. After return * lock isn't held. * sr: SEND or RECV */ static int wq_sleep(struct mqueue_inode_info *info, int sr, ktime_t *timeout, struct ext_wait_queue *ewp) __releases(&info->lock) { int retval; signed long time; wq_add(info, sr, ewp); for (;;) { __set_current_state(TASK_INTERRUPTIBLE); spin_unlock(&info->lock); time = schedule_hrtimeout_range_clock(timeout, 0, HRTIMER_MODE_ABS, CLOCK_REALTIME); if (ewp->state == STATE_READY) { retval = 0; goto out; } spin_lock(&info->lock); if (ewp->state == STATE_READY) { retval = 0; goto out_unlock; } if (signal_pending(current)) { retval = -ERESTARTSYS; break; } if (time == 0) { retval = -ETIMEDOUT; break; } } list_del(&ewp->list); out_unlock: spin_unlock(&info->lock); out: return retval; } /* * Returns waiting task that should be serviced first or NULL if none exists */ static struct ext_wait_queue *wq_get_first_waiter( struct mqueue_inode_info *info, int sr) { struct list_head *ptr; ptr = info->e_wait_q[sr].list.prev; if (ptr == &info->e_wait_q[sr].list) return NULL; return list_entry(ptr, struct ext_wait_queue, list); } static inline void set_cookie(struct sk_buff *skb, char code) { ((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code; } /* * The next function is only to split too long sys_mq_timedsend */ static void __do_notify(struct mqueue_inode_info *info) { /* notification * invoked when there is registered process and there isn't process * waiting synchronously for message AND state of queue changed from * empty to not empty. Here we are sure that no one is waiting * synchronously. */ if (info->notify_owner && info->attr.mq_curmsgs == 1) { struct siginfo sig_i; switch (info->notify.sigev_notify) { case SIGEV_NONE: break; case SIGEV_SIGNAL: /* sends signal */ sig_i.si_signo = info->notify.sigev_signo; sig_i.si_errno = 0; sig_i.si_code = SI_MESGQ; sig_i.si_value = info->notify.sigev_value; /* map current pid/uid into info->owner's namespaces */ rcu_read_lock(); sig_i.si_pid = task_tgid_nr_ns(current, ns_of_pid(info->notify_owner)); sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid()); rcu_read_unlock(); kill_pid_info(info->notify.sigev_signo, &sig_i, info->notify_owner); break; case SIGEV_THREAD: set_cookie(info->notify_cookie, NOTIFY_WOKENUP); netlink_sendskb(info->notify_sock, info->notify_cookie); break; } /* after notification unregisters process */ put_pid(info->notify_owner); put_user_ns(info->notify_user_ns); info->notify_owner = NULL; info->notify_user_ns = NULL; } wake_up(&info->wait_q); } static int prepare_timeout(const struct timespec __user *u_abs_timeout, struct timespec *ts) { if (copy_from_user(ts, u_abs_timeout, sizeof(struct timespec))) return -EFAULT; if (!timespec_valid(ts)) return -EINVAL; return 0; } static void remove_notification(struct mqueue_inode_info *info) { if (info->notify_owner != NULL && info->notify.sigev_notify == SIGEV_THREAD) { set_cookie(info->notify_cookie, NOTIFY_REMOVED); netlink_sendskb(info->notify_sock, info->notify_cookie); } put_pid(info->notify_owner); put_user_ns(info->notify_user_ns); info->notify_owner = NULL; info->notify_user_ns = NULL; } static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr) { int mq_treesize; unsigned long total_size; if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0) return -EINVAL; if (capable(CAP_SYS_RESOURCE)) { if (attr->mq_maxmsg > HARD_MSGMAX || attr->mq_msgsize > HARD_MSGSIZEMAX) return -EINVAL; } else { if (attr->mq_maxmsg > ipc_ns->mq_msg_max || attr->mq_msgsize > ipc_ns->mq_msgsize_max) return -EINVAL; } /* check for overflow */ if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg) return -EOVERFLOW; mq_treesize = attr->mq_maxmsg * sizeof(struct msg_msg) + min_t(unsigned int, attr->mq_maxmsg, MQ_PRIO_MAX) * sizeof(struct posix_msg_tree_node); total_size = attr->mq_maxmsg * attr->mq_msgsize; if (total_size + mq_treesize < total_size) return -EOVERFLOW; return 0; } /* * Invoked when creating a new queue via sys_mq_open */ static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir, struct path *path, int oflag, umode_t mode, struct mq_attr *attr) { const struct cred *cred = current_cred(); int ret; if (attr) { ret = mq_attr_ok(ipc_ns, attr); if (ret) return ERR_PTR(ret); /* store for use during create */ path->dentry->d_fsdata = attr; } else { struct mq_attr def_attr; def_attr.mq_maxmsg = min(ipc_ns->mq_msg_max, ipc_ns->mq_msg_default); def_attr.mq_msgsize = min(ipc_ns->mq_msgsize_max, ipc_ns->mq_msgsize_default); ret = mq_attr_ok(ipc_ns, &def_attr); if (ret) return ERR_PTR(ret); } mode &= ~current_umask(); ret = vfs_create(dir, path->dentry, mode, true); path->dentry->d_fsdata = NULL; if (ret) return ERR_PTR(ret); return dentry_open(path, oflag, cred); } /* Opens existing queue */ static struct file *do_open(struct path *path, int oflag) { static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, MAY_READ | MAY_WRITE }; int acc; if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) return ERR_PTR(-EINVAL); acc = oflag2acc[oflag & O_ACCMODE]; if (inode_permission(d_inode(path->dentry), acc)) return ERR_PTR(-EACCES); return dentry_open(path, oflag, current_cred()); } static int do_mq_open(const char __user *u_name, int oflag, umode_t mode, struct mq_attr *attr) { struct path path; struct file *filp; struct filename *name; int fd, error; struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; struct vfsmount *mnt = ipc_ns->mq_mnt; struct dentry *root = mnt->mnt_root; int ro; audit_mq_open(oflag, mode, attr); if (IS_ERR(name = getname(u_name))) return PTR_ERR(name); fd = get_unused_fd_flags(O_CLOEXEC); if (fd < 0) goto out_putname; ro = mnt_want_write(mnt); /* we'll drop it in any case */ error = 0; inode_lock(d_inode(root)); path.dentry = lookup_one_len(name->name, root, strlen(name->name)); if (IS_ERR(path.dentry)) { error = PTR_ERR(path.dentry); goto out_putfd; } path.mnt = mntget(mnt); if (oflag & O_CREAT) { if (d_really_is_positive(path.dentry)) { /* entry already exists */ audit_inode(name, path.dentry, 0); if (oflag & O_EXCL) { error = -EEXIST; goto out; } filp = do_open(&path, oflag); } else { if (ro) { error = ro; goto out; } audit_inode_parent_hidden(name, root); filp = do_create(ipc_ns, d_inode(root), &path, oflag, mode, attr); } } else { if (d_really_is_negative(path.dentry)) { error = -ENOENT; goto out; } audit_inode(name, path.dentry, 0); filp = do_open(&path, oflag); } if (!IS_ERR(filp)) fd_install(fd, filp); else error = PTR_ERR(filp); out: path_put(&path); out_putfd: if (error) { put_unused_fd(fd); fd = error; } inode_unlock(d_inode(root)); if (!ro) mnt_drop_write(mnt); out_putname: putname(name); return fd; } SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode, struct mq_attr __user *, u_attr) { struct mq_attr attr; if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr))) return -EFAULT; return do_mq_open(u_name, oflag, mode, u_attr ? &attr : NULL); } SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name) { int err; struct filename *name; struct dentry *dentry; struct inode *inode = NULL; struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; struct vfsmount *mnt = ipc_ns->mq_mnt; name = getname(u_name); if (IS_ERR(name)) return PTR_ERR(name); audit_inode_parent_hidden(name, mnt->mnt_root); err = mnt_want_write(mnt); if (err) goto out_name; inode_lock_nested(d_inode(mnt->mnt_root), I_MUTEX_PARENT); dentry = lookup_one_len(name->name, mnt->mnt_root, strlen(name->name)); if (IS_ERR(dentry)) { err = PTR_ERR(dentry); goto out_unlock; } inode = d_inode(dentry); if (!inode) { err = -ENOENT; } else { ihold(inode); err = vfs_unlink(d_inode(dentry->d_parent), dentry, NULL); } dput(dentry); out_unlock: inode_unlock(d_inode(mnt->mnt_root)); if (inode) iput(inode); mnt_drop_write(mnt); out_name: putname(name); return err; } /* Pipelined send and receive functions. * * If a receiver finds no waiting message, then it registers itself in the * list of waiting receivers. A sender checks that list before adding the new * message into the message array. If there is a waiting receiver, then it * bypasses the message array and directly hands the message over to the * receiver. The receiver accepts the message and returns without grabbing the * queue spinlock: * * - Set pointer to message. * - Queue the receiver task for later wakeup (without the info->lock). * - Update its state to STATE_READY. Now the receiver can continue. * - Wake up the process after the lock is dropped. Should the process wake up * before this wakeup (due to a timeout or a signal) it will either see * STATE_READY and continue or acquire the lock to check the state again. * * The same algorithm is used for senders. */ /* pipelined_send() - send a message directly to the task waiting in * sys_mq_timedreceive() (without inserting message into a queue). */ static inline void pipelined_send(struct wake_q_head *wake_q, struct mqueue_inode_info *info, struct msg_msg *message, struct ext_wait_queue *receiver) { receiver->msg = message; list_del(&receiver->list); wake_q_add(wake_q, receiver->task); /* * Rely on the implicit cmpxchg barrier from wake_q_add such * that we can ensure that updating receiver->state is the last * write operation: As once set, the receiver can continue, * and if we don't have the reference count from the wake_q, * yet, at that point we can later have a use-after-free * condition and bogus wakeup. */ receiver->state = STATE_READY; } /* pipelined_receive() - if there is task waiting in sys_mq_timedsend() * gets its message and put to the queue (we have one free place for sure). */ static inline void pipelined_receive(struct wake_q_head *wake_q, struct mqueue_inode_info *info) { struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND); if (!sender) { /* for poll */ wake_up_interruptible(&info->wait_q); return; } if (msg_insert(sender->msg, info)) return; list_del(&sender->list); wake_q_add(wake_q, sender->task); sender->state = STATE_READY; } static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr, size_t msg_len, unsigned int msg_prio, struct timespec *ts) { struct fd f; struct inode *inode; struct ext_wait_queue wait; struct ext_wait_queue *receiver; struct msg_msg *msg_ptr; struct mqueue_inode_info *info; ktime_t expires, *timeout = NULL; struct posix_msg_tree_node *new_leaf = NULL; int ret = 0; DEFINE_WAKE_Q(wake_q); if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX)) return -EINVAL; if (ts) { expires = timespec_to_ktime(*ts); timeout = &expires; } audit_mq_sendrecv(mqdes, msg_len, msg_prio, ts); f = fdget(mqdes); if (unlikely(!f.file)) { ret = -EBADF; goto out; } inode = file_inode(f.file); if (unlikely(f.file->f_op != &mqueue_file_operations)) { ret = -EBADF; goto out_fput; } info = MQUEUE_I(inode); audit_file(f.file); if (unlikely(!(f.file->f_mode & FMODE_WRITE))) { ret = -EBADF; goto out_fput; } if (unlikely(msg_len > info->attr.mq_msgsize)) { ret = -EMSGSIZE; goto out_fput; } /* First try to allocate memory, before doing anything with * existing queues. */ msg_ptr = load_msg(u_msg_ptr, msg_len); if (IS_ERR(msg_ptr)) { ret = PTR_ERR(msg_ptr); goto out_fput; } msg_ptr->m_ts = msg_len; msg_ptr->m_type = msg_prio; /* * msg_insert really wants us to have a valid, spare node struct so * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will * fall back to that if necessary. */ if (!info->node_cache) new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL); spin_lock(&info->lock); if (!info->node_cache && new_leaf) { /* Save our speculative allocation into the cache */ INIT_LIST_HEAD(&new_leaf->msg_list); info->node_cache = new_leaf; new_leaf = NULL; } else { kfree(new_leaf); } if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) { if (f.file->f_flags & O_NONBLOCK) { ret = -EAGAIN; } else { wait.task = current; wait.msg = (void *) msg_ptr; wait.state = STATE_NONE; ret = wq_sleep(info, SEND, timeout, &wait); /* * wq_sleep must be called with info->lock held, and * returns with the lock released */ goto out_free; } } else { receiver = wq_get_first_waiter(info, RECV); if (receiver) { pipelined_send(&wake_q, info, msg_ptr, receiver); } else { /* adds message to the queue */ ret = msg_insert(msg_ptr, info); if (ret) goto out_unlock; __do_notify(info); } inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); } out_unlock: spin_unlock(&info->lock); wake_up_q(&wake_q); out_free: if (ret) free_msg(msg_ptr); out_fput: fdput(f); out: return ret; } static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr, size_t msg_len, unsigned int __user *u_msg_prio, struct timespec *ts) { ssize_t ret; struct msg_msg *msg_ptr; struct fd f; struct inode *inode; struct mqueue_inode_info *info; struct ext_wait_queue wait; ktime_t expires, *timeout = NULL; struct posix_msg_tree_node *new_leaf = NULL; if (ts) { expires = timespec_to_ktime(*ts); timeout = &expires; } audit_mq_sendrecv(mqdes, msg_len, 0, ts); f = fdget(mqdes); if (unlikely(!f.file)) { ret = -EBADF; goto out; } inode = file_inode(f.file); if (unlikely(f.file->f_op != &mqueue_file_operations)) { ret = -EBADF; goto out_fput; } info = MQUEUE_I(inode); audit_file(f.file); if (unlikely(!(f.file->f_mode & FMODE_READ))) { ret = -EBADF; goto out_fput; } /* checks if buffer is big enough */ if (unlikely(msg_len < info->attr.mq_msgsize)) { ret = -EMSGSIZE; goto out_fput; } /* * msg_insert really wants us to have a valid, spare node struct so * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will * fall back to that if necessary. */ if (!info->node_cache) new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL); spin_lock(&info->lock); if (!info->node_cache && new_leaf) { /* Save our speculative allocation into the cache */ INIT_LIST_HEAD(&new_leaf->msg_list); info->node_cache = new_leaf; } else { kfree(new_leaf); } if (info->attr.mq_curmsgs == 0) { if (f.file->f_flags & O_NONBLOCK) { spin_unlock(&info->lock); ret = -EAGAIN; } else { wait.task = current; wait.state = STATE_NONE; ret = wq_sleep(info, RECV, timeout, &wait); msg_ptr = wait.msg; } } else { DEFINE_WAKE_Q(wake_q); msg_ptr = msg_get(info); inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); /* There is now free space in queue. */ pipelined_receive(&wake_q, info); spin_unlock(&info->lock); wake_up_q(&wake_q); ret = 0; } if (ret == 0) { ret = msg_ptr->m_ts; if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) || store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) { ret = -EFAULT; } free_msg(msg_ptr); } out_fput: fdput(f); out: return ret; } SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, size_t, msg_len, unsigned int, msg_prio, const struct timespec __user *, u_abs_timeout) { struct timespec ts, *p = NULL; if (u_abs_timeout) { int res = prepare_timeout(u_abs_timeout, &ts); if (res) return res; p = &ts; } return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p); } SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, size_t, msg_len, unsigned int __user *, u_msg_prio, const struct timespec __user *, u_abs_timeout) { struct timespec ts, *p = NULL; if (u_abs_timeout) { int res = prepare_timeout(u_abs_timeout, &ts); if (res) return res; p = &ts; } return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p); } /* * Notes: the case when user wants us to deregister (with NULL as pointer) * and he isn't currently owner of notification, will be silently discarded. * It isn't explicitly defined in the POSIX. */ static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification) { int ret; struct fd f; struct sock *sock; struct inode *inode; struct mqueue_inode_info *info; struct sk_buff *nc; audit_mq_notify(mqdes, notification); nc = NULL; sock = NULL; if (notification != NULL) { if (unlikely(notification->sigev_notify != SIGEV_NONE && notification->sigev_notify != SIGEV_SIGNAL && notification->sigev_notify != SIGEV_THREAD)) return -EINVAL; if (notification->sigev_notify == SIGEV_SIGNAL && !valid_signal(notification->sigev_signo)) { return -EINVAL; } if (notification->sigev_notify == SIGEV_THREAD) { long timeo; /* create the notify skb */ nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL); if (!nc) { ret = -ENOMEM; goto out; } if (copy_from_user(nc->data, notification->sigev_value.sival_ptr, NOTIFY_COOKIE_LEN)) { ret = -EFAULT; goto out; } /* TODO: add a header? */ skb_put(nc, NOTIFY_COOKIE_LEN); /* and attach it to the socket */ retry: f = fdget(notification->sigev_signo); if (!f.file) { ret = -EBADF; goto out; } sock = netlink_getsockbyfilp(f.file); fdput(f); if (IS_ERR(sock)) { ret = PTR_ERR(sock); sock = NULL; goto out; } timeo = MAX_SCHEDULE_TIMEOUT; ret = netlink_attachskb(sock, nc, &timeo, NULL); if (ret == 1) { sock = NULL; goto retry; } if (ret) { sock = NULL; nc = NULL; goto out; } } } f = fdget(mqdes); if (!f.file) { ret = -EBADF; goto out; } inode = file_inode(f.file); if (unlikely(f.file->f_op != &mqueue_file_operations)) { ret = -EBADF; goto out_fput; } info = MQUEUE_I(inode); ret = 0; spin_lock(&info->lock); if (notification == NULL) { if (info->notify_owner == task_tgid(current)) { remove_notification(info); inode->i_atime = inode->i_ctime = current_time(inode); } } else if (info->notify_owner != NULL) { ret = -EBUSY; } else { switch (notification->sigev_notify) { case SIGEV_NONE: info->notify.sigev_notify = SIGEV_NONE; break; case SIGEV_THREAD: info->notify_sock = sock; info->notify_cookie = nc; sock = NULL; nc = NULL; info->notify.sigev_notify = SIGEV_THREAD; break; case SIGEV_SIGNAL: info->notify.sigev_signo = notification->sigev_signo; info->notify.sigev_value = notification->sigev_value; info->notify.sigev_notify = SIGEV_SIGNAL; break; } info->notify_owner = get_pid(task_tgid(current)); info->notify_user_ns = get_user_ns(current_user_ns()); inode->i_atime = inode->i_ctime = current_time(inode); } spin_unlock(&info->lock); out_fput: fdput(f); out: if (sock) netlink_detachskb(sock, nc); else if (nc) dev_kfree_skb(nc); return ret; } SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, const struct sigevent __user *, u_notification) { struct sigevent n, *p = NULL; if (u_notification) { if (copy_from_user(&n, u_notification, sizeof(struct sigevent))) return -EFAULT; p = &n; } return do_mq_notify(mqdes, p); } static int do_mq_getsetattr(int mqdes, struct mq_attr *new, struct mq_attr *old) { struct fd f; struct inode *inode; struct mqueue_inode_info *info; if (new && (new->mq_flags & (~O_NONBLOCK))) return -EINVAL; f = fdget(mqdes); if (!f.file) return -EBADF; if (unlikely(f.file->f_op != &mqueue_file_operations)) { fdput(f); return -EBADF; } inode = file_inode(f.file); info = MQUEUE_I(inode); spin_lock(&info->lock); if (old) { *old = info->attr; old->mq_flags = f.file->f_flags & O_NONBLOCK; } if (new) { audit_mq_getsetattr(mqdes, new); spin_lock(&f.file->f_lock); if (new->mq_flags & O_NONBLOCK) f.file->f_flags |= O_NONBLOCK; else f.file->f_flags &= ~O_NONBLOCK; spin_unlock(&f.file->f_lock); inode->i_atime = inode->i_ctime = current_time(inode); } spin_unlock(&info->lock); fdput(f); return 0; } SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, const struct mq_attr __user *, u_mqstat, struct mq_attr __user *, u_omqstat) { int ret; struct mq_attr mqstat, omqstat; struct mq_attr *new = NULL, *old = NULL; if (u_mqstat) { new = &mqstat; if (copy_from_user(new, u_mqstat, sizeof(struct mq_attr))) return -EFAULT; } if (u_omqstat) old = &omqstat; ret = do_mq_getsetattr(mqdes, new, old); if (ret || !old) return ret; if (copy_to_user(u_omqstat, old, sizeof(struct mq_attr))) return -EFAULT; return 0; } #ifdef CONFIG_COMPAT struct compat_mq_attr { compat_long_t mq_flags; /* message queue flags */ compat_long_t mq_maxmsg; /* maximum number of messages */ compat_long_t mq_msgsize; /* maximum message size */ compat_long_t mq_curmsgs; /* number of messages currently queued */ compat_long_t __reserved[4]; /* ignored for input, zeroed for output */ }; static inline int get_compat_mq_attr(struct mq_attr *attr, const struct compat_mq_attr __user *uattr) { struct compat_mq_attr v; if (copy_from_user(&v, uattr, sizeof(*uattr))) return -EFAULT; memset(attr, 0, sizeof(*attr)); attr->mq_flags = v.mq_flags; attr->mq_maxmsg = v.mq_maxmsg; attr->mq_msgsize = v.mq_msgsize; attr->mq_curmsgs = v.mq_curmsgs; return 0; } static inline int put_compat_mq_attr(const struct mq_attr *attr, struct compat_mq_attr __user *uattr) { struct compat_mq_attr v; memset(&v, 0, sizeof(v)); v.mq_flags = attr->mq_flags; v.mq_maxmsg = attr->mq_maxmsg; v.mq_msgsize = attr->mq_msgsize; v.mq_curmsgs = attr->mq_curmsgs; if (copy_to_user(uattr, &v, sizeof(*uattr))) return -EFAULT; return 0; } COMPAT_SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, compat_mode_t, mode, struct compat_mq_attr __user *, u_attr) { struct mq_attr attr, *p = NULL; if (u_attr && oflag & O_CREAT) { p = &attr; if (get_compat_mq_attr(&attr, u_attr)) return -EFAULT; } return do_mq_open(u_name, oflag, mode, p); } static int compat_prepare_timeout(const struct compat_timespec __user *p, struct timespec *ts) { if (compat_get_timespec(ts, p)) return -EFAULT; if (!timespec_valid(ts)) return -EINVAL; return 0; } COMPAT_SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, compat_size_t, msg_len, unsigned int, msg_prio, const struct compat_timespec __user *, u_abs_timeout) { struct timespec ts, *p = NULL; if (u_abs_timeout) { int res = compat_prepare_timeout(u_abs_timeout, &ts); if (res) return res; p = &ts; } return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p); } COMPAT_SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, compat_size_t, msg_len, unsigned int __user *, u_msg_prio, const struct compat_timespec __user *, u_abs_timeout) { struct timespec ts, *p = NULL; if (u_abs_timeout) { int res = compat_prepare_timeout(u_abs_timeout, &ts); if (res) return res; p = &ts; } return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p); } COMPAT_SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, const struct compat_sigevent __user *, u_notification) { struct sigevent n, *p = NULL; if (u_notification) { if (get_compat_sigevent(&n, u_notification)) return -EFAULT; if (n.sigev_notify == SIGEV_THREAD) n.sigev_value.sival_ptr = compat_ptr(n.sigev_value.sival_int); p = &n; } return do_mq_notify(mqdes, p); } COMPAT_SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, const struct compat_mq_attr __user *, u_mqstat, struct compat_mq_attr __user *, u_omqstat) { int ret; struct mq_attr mqstat, omqstat; struct mq_attr *new = NULL, *old = NULL; if (u_mqstat) { new = &mqstat; if (get_compat_mq_attr(new, u_mqstat)) return -EFAULT; } if (u_omqstat) old = &omqstat; ret = do_mq_getsetattr(mqdes, new, old); if (ret || !old) return ret; if (put_compat_mq_attr(old, u_omqstat)) return -EFAULT; return 0; } #endif static const struct inode_operations mqueue_dir_inode_operations = { .lookup = simple_lookup, .create = mqueue_create, .unlink = mqueue_unlink, }; static const struct file_operations mqueue_file_operations = { .flush = mqueue_flush_file, .poll = mqueue_poll_file, .read = mqueue_read_file, .llseek = default_llseek, }; static const struct super_operations mqueue_super_ops = { .alloc_inode = mqueue_alloc_inode, .destroy_inode = mqueue_destroy_inode, .evict_inode = mqueue_evict_inode, .statfs = simple_statfs, }; static struct file_system_type mqueue_fs_type = { .name = "mqueue", .mount = mqueue_mount, .kill_sb = kill_litter_super, .fs_flags = FS_USERNS_MOUNT, }; int mq_init_ns(struct ipc_namespace *ns) { ns->mq_queues_count = 0; ns->mq_queues_max = DFLT_QUEUESMAX; ns->mq_msg_max = DFLT_MSGMAX; ns->mq_msgsize_max = DFLT_MSGSIZEMAX; ns->mq_msg_default = DFLT_MSG; ns->mq_msgsize_default = DFLT_MSGSIZE; ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns); if (IS_ERR(ns->mq_mnt)) { int err = PTR_ERR(ns->mq_mnt); ns->mq_mnt = NULL; return err; } return 0; } void mq_clear_sbinfo(struct ipc_namespace *ns) { ns->mq_mnt->mnt_sb->s_fs_info = NULL; } void mq_put_mnt(struct ipc_namespace *ns) { kern_unmount(ns->mq_mnt); } static int __init init_mqueue_fs(void) { int error; mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache", sizeof(struct mqueue_inode_info), 0, SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, init_once); if (mqueue_inode_cachep == NULL) return -ENOMEM; /* ignore failures - they are not fatal */ mq_sysctl_table = mq_register_sysctl_table(); error = register_filesystem(&mqueue_fs_type); if (error) goto out_sysctl; spin_lock_init(&mq_lock); error = mq_init_ns(&init_ipc_ns); if (error) goto out_filesystem; return 0; out_filesystem: unregister_filesystem(&mqueue_fs_type); out_sysctl: if (mq_sysctl_table) unregister_sysctl_table(mq_sysctl_table); kmem_cache_destroy(mqueue_inode_cachep); return error; } device_initcall(init_mqueue_fs);
static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification) { int ret; struct fd f; struct sock *sock; struct inode *inode; struct mqueue_inode_info *info; struct sk_buff *nc; audit_mq_notify(mqdes, notification); nc = NULL; sock = NULL; if (notification != NULL) { if (unlikely(notification->sigev_notify != SIGEV_NONE && notification->sigev_notify != SIGEV_SIGNAL && notification->sigev_notify != SIGEV_THREAD)) return -EINVAL; if (notification->sigev_notify == SIGEV_SIGNAL && !valid_signal(notification->sigev_signo)) { return -EINVAL; } if (notification->sigev_notify == SIGEV_THREAD) { long timeo; /* create the notify skb */ nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL); if (!nc) { ret = -ENOMEM; goto out; } if (copy_from_user(nc->data, notification->sigev_value.sival_ptr, NOTIFY_COOKIE_LEN)) { ret = -EFAULT; goto out; } /* TODO: add a header? */ skb_put(nc, NOTIFY_COOKIE_LEN); /* and attach it to the socket */ retry: f = fdget(notification->sigev_signo); if (!f.file) { ret = -EBADF; goto out; } sock = netlink_getsockbyfilp(f.file); fdput(f); if (IS_ERR(sock)) { ret = PTR_ERR(sock); sock = NULL; goto out; } timeo = MAX_SCHEDULE_TIMEOUT; ret = netlink_attachskb(sock, nc, &timeo, NULL); if (ret == 1) goto retry; if (ret) { sock = NULL; nc = NULL; goto out; } } } f = fdget(mqdes); if (!f.file) { ret = -EBADF; goto out; } inode = file_inode(f.file); if (unlikely(f.file->f_op != &mqueue_file_operations)) { ret = -EBADF; goto out_fput; } info = MQUEUE_I(inode); ret = 0; spin_lock(&info->lock); if (notification == NULL) { if (info->notify_owner == task_tgid(current)) { remove_notification(info); inode->i_atime = inode->i_ctime = current_time(inode); } } else if (info->notify_owner != NULL) { ret = -EBUSY; } else { switch (notification->sigev_notify) { case SIGEV_NONE: info->notify.sigev_notify = SIGEV_NONE; break; case SIGEV_THREAD: info->notify_sock = sock; info->notify_cookie = nc; sock = NULL; nc = NULL; info->notify.sigev_notify = SIGEV_THREAD; break; case SIGEV_SIGNAL: info->notify.sigev_signo = notification->sigev_signo; info->notify.sigev_value = notification->sigev_value; info->notify.sigev_notify = SIGEV_SIGNAL; break; } info->notify_owner = get_pid(task_tgid(current)); info->notify_user_ns = get_user_ns(current_user_ns()); inode->i_atime = inode->i_ctime = current_time(inode); } spin_unlock(&info->lock); out_fput: fdput(f); out: if (sock) netlink_detachskb(sock, nc); else if (nc) dev_kfree_skb(nc); return ret; }
static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification) { int ret; struct fd f; struct sock *sock; struct inode *inode; struct mqueue_inode_info *info; struct sk_buff *nc; audit_mq_notify(mqdes, notification); nc = NULL; sock = NULL; if (notification != NULL) { if (unlikely(notification->sigev_notify != SIGEV_NONE && notification->sigev_notify != SIGEV_SIGNAL && notification->sigev_notify != SIGEV_THREAD)) return -EINVAL; if (notification->sigev_notify == SIGEV_SIGNAL && !valid_signal(notification->sigev_signo)) { return -EINVAL; } if (notification->sigev_notify == SIGEV_THREAD) { long timeo; /* create the notify skb */ nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL); if (!nc) { ret = -ENOMEM; goto out; } if (copy_from_user(nc->data, notification->sigev_value.sival_ptr, NOTIFY_COOKIE_LEN)) { ret = -EFAULT; goto out; } /* TODO: add a header? */ skb_put(nc, NOTIFY_COOKIE_LEN); /* and attach it to the socket */ retry: f = fdget(notification->sigev_signo); if (!f.file) { ret = -EBADF; goto out; } sock = netlink_getsockbyfilp(f.file); fdput(f); if (IS_ERR(sock)) { ret = PTR_ERR(sock); sock = NULL; goto out; } timeo = MAX_SCHEDULE_TIMEOUT; ret = netlink_attachskb(sock, nc, &timeo, NULL); if (ret == 1) { sock = NULL; goto retry; } if (ret) { sock = NULL; nc = NULL; goto out; } } } f = fdget(mqdes); if (!f.file) { ret = -EBADF; goto out; } inode = file_inode(f.file); if (unlikely(f.file->f_op != &mqueue_file_operations)) { ret = -EBADF; goto out_fput; } info = MQUEUE_I(inode); ret = 0; spin_lock(&info->lock); if (notification == NULL) { if (info->notify_owner == task_tgid(current)) { remove_notification(info); inode->i_atime = inode->i_ctime = current_time(inode); } } else if (info->notify_owner != NULL) { ret = -EBUSY; } else { switch (notification->sigev_notify) { case SIGEV_NONE: info->notify.sigev_notify = SIGEV_NONE; break; case SIGEV_THREAD: info->notify_sock = sock; info->notify_cookie = nc; sock = NULL; nc = NULL; info->notify.sigev_notify = SIGEV_THREAD; break; case SIGEV_SIGNAL: info->notify.sigev_signo = notification->sigev_signo; info->notify.sigev_value = notification->sigev_value; info->notify.sigev_notify = SIGEV_SIGNAL; break; } info->notify_owner = get_pid(task_tgid(current)); info->notify_user_ns = get_user_ns(current_user_ns()); inode->i_atime = inode->i_ctime = current_time(inode); } spin_unlock(&info->lock); out_fput: fdput(f); out: if (sock) netlink_detachskb(sock, nc); else if (nc) dev_kfree_skb(nc); return ret; }
{'added': [(1273, '\t\t\tif (ret == 1) {'), (1274, '\t\t\t\tsock = NULL;'), (1276, '\t\t\t}')], 'deleted': [(1273, '\t\t\tif (ret == 1)')]}
3
1
1,291
7,674
https://github.com/torvalds/linux
CVE-2017-11176
['CWE-416']
blkback.c
dispatch_discard_io
/****************************************************************************** * * Back-end of the driver for virtual block devices. This portion of the * driver exports a 'unified' block-device interface that can be accessed * by any operating system that implements a compatible front end. A * reference front-end implementation can be found in: * drivers/block/xen-blkfront.c * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Copyright (c) 2005, Christopher Clark * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <linux/spinlock.h> #include <linux/kthread.h> #include <linux/list.h> #include <linux/delay.h> #include <linux/freezer.h> #include <linux/bitmap.h> #include <xen/events.h> #include <xen/page.h> #include <xen/xen.h> #include <asm/xen/hypervisor.h> #include <asm/xen/hypercall.h> #include <xen/balloon.h> #include "common.h" /* * Maximum number of unused free pages to keep in the internal buffer. * Setting this to a value too low will reduce memory used in each backend, * but can have a performance penalty. * * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can * be set to a lower value that might degrade performance on some intensive * IO workloads. */ static int xen_blkif_max_buffer_pages = 1024; module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644); MODULE_PARM_DESC(max_buffer_pages, "Maximum number of free pages to keep in each block backend buffer"); /* * Maximum number of grants to map persistently in blkback. For maximum * performance this should be the total numbers of grants that can be used * to fill the ring, but since this might become too high, specially with * the use of indirect descriptors, we set it to a value that provides good * performance without using too much memory. * * When the list of persistent grants is full we clean it up using a LRU * algorithm. */ static int xen_blkif_max_pgrants = 1056; module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644); MODULE_PARM_DESC(max_persistent_grants, "Maximum number of grants to map persistently"); /* * The LRU mechanism to clean the lists of persistent grants needs to * be executed periodically. The time interval between consecutive executions * of the purge mechanism is set in ms. */ #define LRU_INTERVAL 100 /* * When the persistent grants list is full we will remove unused grants * from the list. The percent number of grants to be removed at each LRU * execution. */ #define LRU_PERCENT_CLEAN 5 /* Run-time switchable: /sys/module/blkback/parameters/ */ static unsigned int log_stats; module_param(log_stats, int, 0644); #define BLKBACK_INVALID_HANDLE (~0) /* Number of free pages to remove on each call to free_xenballooned_pages */ #define NUM_BATCH_FREE_PAGES 10 static inline int get_free_page(struct xen_blkif *blkif, struct page **page) { unsigned long flags; spin_lock_irqsave(&blkif->free_pages_lock, flags); if (list_empty(&blkif->free_pages)) { BUG_ON(blkif->free_pages_num != 0); spin_unlock_irqrestore(&blkif->free_pages_lock, flags); return alloc_xenballooned_pages(1, page, false); } BUG_ON(blkif->free_pages_num == 0); page[0] = list_first_entry(&blkif->free_pages, struct page, lru); list_del(&page[0]->lru); blkif->free_pages_num--; spin_unlock_irqrestore(&blkif->free_pages_lock, flags); return 0; } static inline void put_free_pages(struct xen_blkif *blkif, struct page **page, int num) { unsigned long flags; int i; spin_lock_irqsave(&blkif->free_pages_lock, flags); for (i = 0; i < num; i++) list_add(&page[i]->lru, &blkif->free_pages); blkif->free_pages_num += num; spin_unlock_irqrestore(&blkif->free_pages_lock, flags); } static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num) { /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */ struct page *page[NUM_BATCH_FREE_PAGES]; unsigned int num_pages = 0; unsigned long flags; spin_lock_irqsave(&blkif->free_pages_lock, flags); while (blkif->free_pages_num > num) { BUG_ON(list_empty(&blkif->free_pages)); page[num_pages] = list_first_entry(&blkif->free_pages, struct page, lru); list_del(&page[num_pages]->lru); blkif->free_pages_num--; if (++num_pages == NUM_BATCH_FREE_PAGES) { spin_unlock_irqrestore(&blkif->free_pages_lock, flags); free_xenballooned_pages(num_pages, page); spin_lock_irqsave(&blkif->free_pages_lock, flags); num_pages = 0; } } spin_unlock_irqrestore(&blkif->free_pages_lock, flags); if (num_pages != 0) free_xenballooned_pages(num_pages, page); } #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page))) static int do_block_io_op(struct xen_blkif *blkif); static int dispatch_rw_block_io(struct xen_blkif *blkif, struct blkif_request *req, struct pending_req *pending_req); static void make_response(struct xen_blkif *blkif, u64 id, unsigned short op, int st); #define foreach_grant_safe(pos, n, rbtree, node) \ for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \ (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \ &(pos)->node != NULL; \ (pos) = container_of(n, typeof(*(pos)), node), \ (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL) /* * We don't need locking around the persistent grant helpers * because blkback uses a single-thread for each backed, so we * can be sure that this functions will never be called recursively. * * The only exception to that is put_persistent_grant, that can be called * from interrupt context (by xen_blkbk_unmap), so we have to use atomic * bit operations to modify the flags of a persistent grant and to count * the number of used grants. */ static int add_persistent_gnt(struct xen_blkif *blkif, struct persistent_gnt *persistent_gnt) { struct rb_node **new = NULL, *parent = NULL; struct persistent_gnt *this; if (blkif->persistent_gnt_c >= xen_blkif_max_pgrants) { if (!blkif->vbd.overflow_max_grants) blkif->vbd.overflow_max_grants = 1; return -EBUSY; } /* Figure out where to put new node */ new = &blkif->persistent_gnts.rb_node; while (*new) { this = container_of(*new, struct persistent_gnt, node); parent = *new; if (persistent_gnt->gnt < this->gnt) new = &((*new)->rb_left); else if (persistent_gnt->gnt > this->gnt) new = &((*new)->rb_right); else { pr_alert_ratelimited(DRV_PFX " trying to add a gref that's already in the tree\n"); return -EINVAL; } } bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE); set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); /* Add new node and rebalance tree. */ rb_link_node(&(persistent_gnt->node), parent, new); rb_insert_color(&(persistent_gnt->node), &blkif->persistent_gnts); blkif->persistent_gnt_c++; atomic_inc(&blkif->persistent_gnt_in_use); return 0; } static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif, grant_ref_t gref) { struct persistent_gnt *data; struct rb_node *node = NULL; node = blkif->persistent_gnts.rb_node; while (node) { data = container_of(node, struct persistent_gnt, node); if (gref < data->gnt) node = node->rb_left; else if (gref > data->gnt) node = node->rb_right; else { if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) { pr_alert_ratelimited(DRV_PFX " requesting a grant already in use\n"); return NULL; } set_bit(PERSISTENT_GNT_ACTIVE, data->flags); atomic_inc(&blkif->persistent_gnt_in_use); return data; } } return NULL; } static void put_persistent_gnt(struct xen_blkif *blkif, struct persistent_gnt *persistent_gnt) { if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) pr_alert_ratelimited(DRV_PFX " freeing a grant already unused"); set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); atomic_dec(&blkif->persistent_gnt_in_use); } static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, unsigned int num) { struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct persistent_gnt *persistent_gnt; struct rb_node *n; int ret = 0; int segs_to_unmap = 0; foreach_grant_safe(persistent_gnt, n, root, node) { BUG_ON(persistent_gnt->handle == BLKBACK_INVALID_HANDLE); gnttab_set_unmap_op(&unmap[segs_to_unmap], (unsigned long) pfn_to_kaddr(page_to_pfn( persistent_gnt->page)), GNTMAP_host_map, persistent_gnt->handle); pages[segs_to_unmap] = persistent_gnt->page; if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST || !rb_next(&persistent_gnt->node)) { ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap); BUG_ON(ret); put_free_pages(blkif, pages, segs_to_unmap); segs_to_unmap = 0; } rb_erase(&persistent_gnt->node, root); kfree(persistent_gnt); num--; } BUG_ON(num != 0); } static void unmap_purged_grants(struct work_struct *work) { struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct persistent_gnt *persistent_gnt; int ret, segs_to_unmap = 0; struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work); while(!list_empty(&blkif->persistent_purge_list)) { persistent_gnt = list_first_entry(&blkif->persistent_purge_list, struct persistent_gnt, remove_node); list_del(&persistent_gnt->remove_node); gnttab_set_unmap_op(&unmap[segs_to_unmap], vaddr(persistent_gnt->page), GNTMAP_host_map, persistent_gnt->handle); pages[segs_to_unmap] = persistent_gnt->page; if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) { ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap); BUG_ON(ret); put_free_pages(blkif, pages, segs_to_unmap); segs_to_unmap = 0; } kfree(persistent_gnt); } if (segs_to_unmap > 0) { ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap); BUG_ON(ret); put_free_pages(blkif, pages, segs_to_unmap); } } static void purge_persistent_gnt(struct xen_blkif *blkif) { struct persistent_gnt *persistent_gnt; struct rb_node *n; unsigned int num_clean, total; bool scan_used = false; struct rb_root *root; if (blkif->persistent_gnt_c < xen_blkif_max_pgrants || (blkif->persistent_gnt_c == xen_blkif_max_pgrants && !blkif->vbd.overflow_max_grants)) { return; } if (work_pending(&blkif->persistent_purge_work)) { pr_alert_ratelimited(DRV_PFX "Scheduled work from previous purge is still pending, cannot purge list\n"); return; } num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN; num_clean = blkif->persistent_gnt_c - xen_blkif_max_pgrants + num_clean; num_clean = min(blkif->persistent_gnt_c, num_clean); if (num_clean > (blkif->persistent_gnt_c - atomic_read(&blkif->persistent_gnt_in_use))) return; /* * At this point, we can assure that there will be no calls * to get_persistent_grant (because we are executing this code from * xen_blkif_schedule), there can only be calls to put_persistent_gnt, * which means that the number of currently used grants will go down, * but never up, so we will always be able to remove the requested * number of grants. */ total = num_clean; pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean); INIT_LIST_HEAD(&blkif->persistent_purge_list); root = &blkif->persistent_gnts; purge_list: foreach_grant_safe(persistent_gnt, n, root, node) { BUG_ON(persistent_gnt->handle == BLKBACK_INVALID_HANDLE); if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) continue; if (!scan_used && (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags))) continue; rb_erase(&persistent_gnt->node, root); list_add(&persistent_gnt->remove_node, &blkif->persistent_purge_list); if (--num_clean == 0) goto finished; } /* * If we get here it means we also need to start cleaning * grants that were used since last purge in order to cope * with the requested num */ if (!scan_used) { pr_debug(DRV_PFX "Still missing %u purged frames\n", num_clean); scan_used = true; goto purge_list; } finished: /* Remove the "used" flag from all the persistent grants */ foreach_grant_safe(persistent_gnt, n, root, node) { BUG_ON(persistent_gnt->handle == BLKBACK_INVALID_HANDLE); clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); } blkif->persistent_gnt_c -= (total - num_clean); blkif->vbd.overflow_max_grants = 0; /* We can defer this work */ INIT_WORK(&blkif->persistent_purge_work, unmap_purged_grants); schedule_work(&blkif->persistent_purge_work); pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total); return; } /* * Retrieve from the 'pending_reqs' a free pending_req structure to be used. */ static struct pending_req *alloc_req(struct xen_blkif *blkif) { struct pending_req *req = NULL; unsigned long flags; spin_lock_irqsave(&blkif->pending_free_lock, flags); if (!list_empty(&blkif->pending_free)) { req = list_entry(blkif->pending_free.next, struct pending_req, free_list); list_del(&req->free_list); } spin_unlock_irqrestore(&blkif->pending_free_lock, flags); return req; } /* * Return the 'pending_req' structure back to the freepool. We also * wake up the thread if it was waiting for a free page. */ static void free_req(struct xen_blkif *blkif, struct pending_req *req) { unsigned long flags; int was_empty; spin_lock_irqsave(&blkif->pending_free_lock, flags); was_empty = list_empty(&blkif->pending_free); list_add(&req->free_list, &blkif->pending_free); spin_unlock_irqrestore(&blkif->pending_free_lock, flags); if (was_empty) wake_up(&blkif->pending_free_wq); } /* * Routines for managing virtual block devices (vbds). */ static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif, int operation) { struct xen_vbd *vbd = &blkif->vbd; int rc = -EACCES; if ((operation != READ) && vbd->readonly) goto out; if (likely(req->nr_sects)) { blkif_sector_t end = req->sector_number + req->nr_sects; if (unlikely(end < req->sector_number)) goto out; if (unlikely(end > vbd_sz(vbd))) goto out; } req->dev = vbd->pdevice; req->bdev = vbd->bdev; rc = 0; out: return rc; } static void xen_vbd_resize(struct xen_blkif *blkif) { struct xen_vbd *vbd = &blkif->vbd; struct xenbus_transaction xbt; int err; struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be); unsigned long long new_size = vbd_sz(vbd); pr_info(DRV_PFX "VBD Resize: Domid: %d, Device: (%d, %d)\n", blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice)); pr_info(DRV_PFX "VBD Resize: new size %llu\n", new_size); vbd->size = new_size; again: err = xenbus_transaction_start(&xbt); if (err) { pr_warn(DRV_PFX "Error starting transaction"); return; } err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu", (unsigned long long)vbd_sz(vbd)); if (err) { pr_warn(DRV_PFX "Error writing new size"); goto abort; } /* * Write the current state; we will use this to synchronize * the front-end. If the current state is "connected" the * front-end will get the new size information online. */ err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state); if (err) { pr_warn(DRV_PFX "Error writing the state"); goto abort; } err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) goto again; if (err) pr_warn(DRV_PFX "Error ending transaction"); return; abort: xenbus_transaction_end(xbt, 1); } /* * Notification from the guest OS. */ static void blkif_notify_work(struct xen_blkif *blkif) { blkif->waiting_reqs = 1; wake_up(&blkif->wq); } irqreturn_t xen_blkif_be_int(int irq, void *dev_id) { blkif_notify_work(dev_id); return IRQ_HANDLED; } /* * SCHEDULER FUNCTIONS */ static void print_stats(struct xen_blkif *blkif) { pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu" " | ds %4llu | pg: %4u/%4d\n", current->comm, blkif->st_oo_req, blkif->st_rd_req, blkif->st_wr_req, blkif->st_f_req, blkif->st_ds_req, blkif->persistent_gnt_c, xen_blkif_max_pgrants); blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000); blkif->st_rd_req = 0; blkif->st_wr_req = 0; blkif->st_oo_req = 0; blkif->st_ds_req = 0; } int xen_blkif_schedule(void *arg) { struct xen_blkif *blkif = arg; struct xen_vbd *vbd = &blkif->vbd; unsigned long timeout; xen_blkif_get(blkif); while (!kthread_should_stop()) { if (try_to_freeze()) continue; if (unlikely(vbd->size != vbd_sz(vbd))) xen_vbd_resize(blkif); timeout = msecs_to_jiffies(LRU_INTERVAL); timeout = wait_event_interruptible_timeout( blkif->wq, blkif->waiting_reqs || kthread_should_stop(), timeout); if (timeout == 0) goto purge_gnt_list; timeout = wait_event_interruptible_timeout( blkif->pending_free_wq, !list_empty(&blkif->pending_free) || kthread_should_stop(), timeout); if (timeout == 0) goto purge_gnt_list; blkif->waiting_reqs = 0; smp_mb(); /* clear flag *before* checking for work */ if (do_block_io_op(blkif)) blkif->waiting_reqs = 1; purge_gnt_list: if (blkif->vbd.feature_gnt_persistent && time_after(jiffies, blkif->next_lru)) { purge_persistent_gnt(blkif); blkif->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL); } /* Shrink if we have more than xen_blkif_max_buffer_pages */ shrink_free_pagepool(blkif, xen_blkif_max_buffer_pages); if (log_stats && time_after(jiffies, blkif->st_print)) print_stats(blkif); } /* Since we are shutting down remove all pages from the buffer */ shrink_free_pagepool(blkif, 0 /* All */); /* Free all persistent grant pages */ if (!RB_EMPTY_ROOT(&blkif->persistent_gnts)) free_persistent_gnts(blkif, &blkif->persistent_gnts, blkif->persistent_gnt_c); BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); blkif->persistent_gnt_c = 0; if (log_stats) print_stats(blkif); blkif->xenblkd = NULL; xen_blkif_put(blkif); return 0; } /* * Unmap the grant references, and also remove the M2P over-rides * used in the 'pending_req'. */ static void xen_blkbk_unmap(struct xen_blkif *blkif, struct grant_page *pages[], int num) { struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; unsigned int i, invcount = 0; int ret; for (i = 0; i < num; i++) { if (pages[i]->persistent_gnt != NULL) { put_persistent_gnt(blkif, pages[i]->persistent_gnt); continue; } if (pages[i]->handle == BLKBACK_INVALID_HANDLE) continue; unmap_pages[invcount] = pages[i]->page; gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[i]->page), GNTMAP_host_map, pages[i]->handle); pages[i]->handle = BLKBACK_INVALID_HANDLE; if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) { ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount); BUG_ON(ret); put_free_pages(blkif, unmap_pages, invcount); invcount = 0; } } if (invcount) { ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount); BUG_ON(ret); put_free_pages(blkif, unmap_pages, invcount); } } static int xen_blkbk_map(struct xen_blkif *blkif, struct grant_page *pages[], int num, bool ro) { struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct persistent_gnt *persistent_gnt = NULL; phys_addr_t addr = 0; int i, seg_idx, new_map_idx; int segs_to_map = 0; int ret = 0; int last_map = 0, map_until = 0; int use_persistent_gnts; use_persistent_gnts = (blkif->vbd.feature_gnt_persistent); /* * Fill out preq.nr_sects with proper amount of sectors, and setup * assign map[..] with the PFN of the page in our domain with the * corresponding grant reference for each page. */ again: for (i = map_until; i < num; i++) { uint32_t flags; if (use_persistent_gnts) persistent_gnt = get_persistent_gnt( blkif, pages[i]->gref); if (persistent_gnt) { /* * We are using persistent grants and * the grant is already mapped */ pages[i]->page = persistent_gnt->page; pages[i]->persistent_gnt = persistent_gnt; } else { if (get_free_page(blkif, &pages[i]->page)) goto out_of_memory; addr = vaddr(pages[i]->page); pages_to_gnt[segs_to_map] = pages[i]->page; pages[i]->persistent_gnt = NULL; flags = GNTMAP_host_map; if (!use_persistent_gnts && ro) flags |= GNTMAP_readonly; gnttab_set_map_op(&map[segs_to_map++], addr, flags, pages[i]->gref, blkif->domid); } map_until = i + 1; if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST) break; } if (segs_to_map) { ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map); BUG_ON(ret); } /* * Now swizzle the MFN in our domain with the MFN from the other domain * so that when we access vaddr(pending_req,i) it has the contents of * the page from the other domain. */ for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) { if (!pages[seg_idx]->persistent_gnt) { /* This is a newly mapped grant */ BUG_ON(new_map_idx >= segs_to_map); if (unlikely(map[new_map_idx].status != 0)) { pr_debug(DRV_PFX "invalid buffer -- could not remap it\n"); pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE; ret |= 1; goto next; } pages[seg_idx]->handle = map[new_map_idx].handle; } else { continue; } if (use_persistent_gnts && blkif->persistent_gnt_c < xen_blkif_max_pgrants) { /* * We are using persistent grants, the grant is * not mapped but we might have room for it. */ persistent_gnt = kmalloc(sizeof(struct persistent_gnt), GFP_KERNEL); if (!persistent_gnt) { /* * If we don't have enough memory to * allocate the persistent_gnt struct * map this grant non-persistenly */ goto next; } persistent_gnt->gnt = map[new_map_idx].ref; persistent_gnt->handle = map[new_map_idx].handle; persistent_gnt->page = pages[seg_idx]->page; if (add_persistent_gnt(blkif, persistent_gnt)) { kfree(persistent_gnt); persistent_gnt = NULL; goto next; } pages[seg_idx]->persistent_gnt = persistent_gnt; pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n", persistent_gnt->gnt, blkif->persistent_gnt_c, xen_blkif_max_pgrants); goto next; } if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) { blkif->vbd.overflow_max_grants = 1; pr_debug(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n", blkif->domid, blkif->vbd.handle); } /* * We could not map this grant persistently, so use it as * a non-persistent grant. */ next: new_map_idx++; } segs_to_map = 0; last_map = map_until; if (map_until != num) goto again; return ret; out_of_memory: pr_alert(DRV_PFX "%s: out of memory\n", __func__); put_free_pages(blkif, pages_to_gnt, segs_to_map); return -ENOMEM; } static int xen_blkbk_map_seg(struct pending_req *pending_req) { int rc; rc = xen_blkbk_map(pending_req->blkif, pending_req->segments, pending_req->nr_pages, (pending_req->operation != BLKIF_OP_READ)); return rc; } static int xen_blkbk_parse_indirect(struct blkif_request *req, struct pending_req *pending_req, struct seg_buf seg[], struct phys_req *preq) { struct grant_page **pages = pending_req->indirect_pages; struct xen_blkif *blkif = pending_req->blkif; int indirect_grefs, rc, n, nseg, i; struct blkif_request_segment_aligned *segments = NULL; nseg = pending_req->nr_pages; indirect_grefs = INDIRECT_PAGES(nseg); BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST); for (i = 0; i < indirect_grefs; i++) pages[i]->gref = req->u.indirect.indirect_grefs[i]; rc = xen_blkbk_map(blkif, pages, indirect_grefs, true); if (rc) goto unmap; for (n = 0, i = 0; n < nseg; n++) { if ((n % SEGS_PER_INDIRECT_FRAME) == 0) { /* Map indirect segments */ if (segments) kunmap_atomic(segments); segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page); } i = n % SEGS_PER_INDIRECT_FRAME; pending_req->segments[n]->gref = segments[i].gref; seg[n].nsec = segments[i].last_sect - segments[i].first_sect + 1; seg[n].offset = (segments[i].first_sect << 9); if ((segments[i].last_sect >= (PAGE_SIZE >> 9)) || (segments[i].last_sect < segments[i].first_sect)) { rc = -EINVAL; goto unmap; } preq->nr_sects += seg[n].nsec; } unmap: if (segments) kunmap_atomic(segments); xen_blkbk_unmap(blkif, pages, indirect_grefs); return rc; } static int dispatch_discard_io(struct xen_blkif *blkif, struct blkif_request *req) { int err = 0; int status = BLKIF_RSP_OKAY; struct block_device *bdev = blkif->vbd.bdev; unsigned long secure; blkif->st_ds_req++; xen_blkif_get(blkif); secure = (blkif->vbd.discard_secure && (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ? BLKDEV_DISCARD_SECURE : 0; err = blkdev_issue_discard(bdev, req->u.discard.sector_number, req->u.discard.nr_sectors, GFP_KERNEL, secure); if (err == -EOPNOTSUPP) { pr_debug(DRV_PFX "discard op failed, not supported\n"); status = BLKIF_RSP_EOPNOTSUPP; } else if (err) status = BLKIF_RSP_ERROR; make_response(blkif, req->u.discard.id, req->operation, status); xen_blkif_put(blkif); return err; } static int dispatch_other_io(struct xen_blkif *blkif, struct blkif_request *req, struct pending_req *pending_req) { free_req(blkif, pending_req); make_response(blkif, req->u.other.id, req->operation, BLKIF_RSP_EOPNOTSUPP); return -EIO; } static void xen_blk_drain_io(struct xen_blkif *blkif) { atomic_set(&blkif->drain, 1); do { /* The initial value is one, and one refcnt taken at the * start of the xen_blkif_schedule thread. */ if (atomic_read(&blkif->refcnt) <= 2) break; wait_for_completion_interruptible_timeout( &blkif->drain_complete, HZ); if (!atomic_read(&blkif->drain)) break; } while (!kthread_should_stop()); atomic_set(&blkif->drain, 0); } /* * Completion callback on the bio's. Called as bh->b_end_io() */ static void __end_block_io_op(struct pending_req *pending_req, int error) { /* An error fails the entire request. */ if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) && (error == -EOPNOTSUPP)) { pr_debug(DRV_PFX "flush diskcache op failed, not supported\n"); xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0); pending_req->status = BLKIF_RSP_EOPNOTSUPP; } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) && (error == -EOPNOTSUPP)) { pr_debug(DRV_PFX "write barrier op failed, not supported\n"); xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0); pending_req->status = BLKIF_RSP_EOPNOTSUPP; } else if (error) { pr_debug(DRV_PFX "Buffer not up-to-date at end of operation," " error=%d\n", error); pending_req->status = BLKIF_RSP_ERROR; } /* * If all of the bio's have completed it is time to unmap * the grant references associated with 'request' and provide * the proper response on the ring. */ if (atomic_dec_and_test(&pending_req->pendcnt)) { xen_blkbk_unmap(pending_req->blkif, pending_req->segments, pending_req->nr_pages); make_response(pending_req->blkif, pending_req->id, pending_req->operation, pending_req->status); xen_blkif_put(pending_req->blkif); if (atomic_read(&pending_req->blkif->refcnt) <= 2) { if (atomic_read(&pending_req->blkif->drain)) complete(&pending_req->blkif->drain_complete); } free_req(pending_req->blkif, pending_req); } } /* * bio callback. */ static void end_block_io_op(struct bio *bio, int error) { __end_block_io_op(bio->bi_private, error); bio_put(bio); } /* * Function to copy the from the ring buffer the 'struct blkif_request' * (which has the sectors we want, number of them, grant references, etc), * and transmute it to the block API to hand it over to the proper block disk. */ static int __do_block_io_op(struct xen_blkif *blkif) { union blkif_back_rings *blk_rings = &blkif->blk_rings; struct blkif_request req; struct pending_req *pending_req; RING_IDX rc, rp; int more_to_do = 0; rc = blk_rings->common.req_cons; rp = blk_rings->common.sring->req_prod; rmb(); /* Ensure we see queued requests up to 'rp'. */ while (rc != rp) { if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc)) break; if (kthread_should_stop()) { more_to_do = 1; break; } pending_req = alloc_req(blkif); if (NULL == pending_req) { blkif->st_oo_req++; more_to_do = 1; break; } switch (blkif->blk_protocol) { case BLKIF_PROTOCOL_NATIVE: memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req)); break; case BLKIF_PROTOCOL_X86_32: blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc)); break; case BLKIF_PROTOCOL_X86_64: blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc)); break; default: BUG(); } blk_rings->common.req_cons = ++rc; /* before make_response() */ /* Apply all sanity checks to /private copy/ of request. */ barrier(); switch (req.operation) { case BLKIF_OP_READ: case BLKIF_OP_WRITE: case BLKIF_OP_WRITE_BARRIER: case BLKIF_OP_FLUSH_DISKCACHE: case BLKIF_OP_INDIRECT: if (dispatch_rw_block_io(blkif, &req, pending_req)) goto done; break; case BLKIF_OP_DISCARD: free_req(blkif, pending_req); if (dispatch_discard_io(blkif, &req)) goto done; break; default: if (dispatch_other_io(blkif, &req, pending_req)) goto done; break; } /* Yield point for this unbounded loop. */ cond_resched(); } done: return more_to_do; } static int do_block_io_op(struct xen_blkif *blkif) { union blkif_back_rings *blk_rings = &blkif->blk_rings; int more_to_do; do { more_to_do = __do_block_io_op(blkif); if (more_to_do) break; RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do); } while (more_to_do); return more_to_do; } /* * Transmutation of the 'struct blkif_request' to a proper 'struct bio' * and call the 'submit_bio' to pass it to the underlying storage. */ static int dispatch_rw_block_io(struct xen_blkif *blkif, struct blkif_request *req, struct pending_req *pending_req) { struct phys_req preq; struct seg_buf *seg = pending_req->seg; unsigned int nseg; struct bio *bio = NULL; struct bio **biolist = pending_req->biolist; int i, nbio = 0; int operation; struct blk_plug plug; bool drain = false; struct grant_page **pages = pending_req->segments; unsigned short req_operation; req_operation = req->operation == BLKIF_OP_INDIRECT ? req->u.indirect.indirect_op : req->operation; if ((req->operation == BLKIF_OP_INDIRECT) && (req_operation != BLKIF_OP_READ) && (req_operation != BLKIF_OP_WRITE)) { pr_debug(DRV_PFX "Invalid indirect operation (%u)\n", req_operation); goto fail_response; } switch (req_operation) { case BLKIF_OP_READ: blkif->st_rd_req++; operation = READ; break; case BLKIF_OP_WRITE: blkif->st_wr_req++; operation = WRITE_ODIRECT; break; case BLKIF_OP_WRITE_BARRIER: drain = true; case BLKIF_OP_FLUSH_DISKCACHE: blkif->st_f_req++; operation = WRITE_FLUSH; break; default: operation = 0; /* make gcc happy */ goto fail_response; break; } /* Check that the number of segments is sane. */ nseg = req->operation == BLKIF_OP_INDIRECT ? req->u.indirect.nr_segments : req->u.rw.nr_segments; if (unlikely(nseg == 0 && operation != WRITE_FLUSH) || unlikely((req->operation != BLKIF_OP_INDIRECT) && (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) || unlikely((req->operation == BLKIF_OP_INDIRECT) && (nseg > MAX_INDIRECT_SEGMENTS))) { pr_debug(DRV_PFX "Bad number of segments in request (%d)\n", nseg); /* Haven't submitted any bio's yet. */ goto fail_response; } preq.nr_sects = 0; pending_req->blkif = blkif; pending_req->id = req->u.rw.id; pending_req->operation = req_operation; pending_req->status = BLKIF_RSP_OKAY; pending_req->nr_pages = nseg; if (req->operation != BLKIF_OP_INDIRECT) { preq.dev = req->u.rw.handle; preq.sector_number = req->u.rw.sector_number; for (i = 0; i < nseg; i++) { pages[i]->gref = req->u.rw.seg[i].gref; seg[i].nsec = req->u.rw.seg[i].last_sect - req->u.rw.seg[i].first_sect + 1; seg[i].offset = (req->u.rw.seg[i].first_sect << 9); if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) || (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect)) goto fail_response; preq.nr_sects += seg[i].nsec; } } else { preq.dev = req->u.indirect.handle; preq.sector_number = req->u.indirect.sector_number; if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq)) goto fail_response; } if (xen_vbd_translate(&preq, blkif, operation) != 0) { pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n", operation == READ ? "read" : "write", preq.sector_number, preq.sector_number + preq.nr_sects, blkif->vbd.pdevice); goto fail_response; } /* * This check _MUST_ be done after xen_vbd_translate as the preq.bdev * is set there. */ for (i = 0; i < nseg; i++) { if (((int)preq.sector_number|(int)seg[i].nsec) & ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) { pr_debug(DRV_PFX "Misaligned I/O request from domain %d", blkif->domid); goto fail_response; } } /* Wait on all outstanding I/O's and once that has been completed * issue the WRITE_FLUSH. */ if (drain) xen_blk_drain_io(pending_req->blkif); /* * If we have failed at this point, we need to undo the M2P override, * set gnttab_set_unmap_op on all of the grant references and perform * the hypercall to unmap the grants - that is all done in * xen_blkbk_unmap. */ if (xen_blkbk_map_seg(pending_req)) goto fail_flush; /* * This corresponding xen_blkif_put is done in __end_block_io_op, or * below (in "!bio") if we are handling a BLKIF_OP_DISCARD. */ xen_blkif_get(blkif); for (i = 0; i < nseg; i++) { while ((bio == NULL) || (bio_add_page(bio, pages[i]->page, seg[i].nsec << 9, seg[i].offset) == 0)) { bio = bio_alloc(GFP_KERNEL, nseg-i); if (unlikely(bio == NULL)) goto fail_put_bio; biolist[nbio++] = bio; bio->bi_bdev = preq.bdev; bio->bi_private = pending_req; bio->bi_end_io = end_block_io_op; bio->bi_sector = preq.sector_number; } preq.sector_number += seg[i].nsec; } /* This will be hit if the operation was a flush or discard. */ if (!bio) { BUG_ON(operation != WRITE_FLUSH); bio = bio_alloc(GFP_KERNEL, 0); if (unlikely(bio == NULL)) goto fail_put_bio; biolist[nbio++] = bio; bio->bi_bdev = preq.bdev; bio->bi_private = pending_req; bio->bi_end_io = end_block_io_op; } atomic_set(&pending_req->pendcnt, nbio); blk_start_plug(&plug); for (i = 0; i < nbio; i++) submit_bio(operation, biolist[i]); /* Let the I/Os go.. */ blk_finish_plug(&plug); if (operation == READ) blkif->st_rd_sect += preq.nr_sects; else if (operation & WRITE) blkif->st_wr_sect += preq.nr_sects; return 0; fail_flush: xen_blkbk_unmap(blkif, pending_req->segments, pending_req->nr_pages); fail_response: /* Haven't submitted any bio's yet. */ make_response(blkif, req->u.rw.id, req_operation, BLKIF_RSP_ERROR); free_req(blkif, pending_req); msleep(1); /* back off a bit */ return -EIO; fail_put_bio: for (i = 0; i < nbio; i++) bio_put(biolist[i]); atomic_set(&pending_req->pendcnt, 1); __end_block_io_op(pending_req, -EINVAL); msleep(1); /* back off a bit */ return -EIO; } /* * Put a response on the ring on how the operation fared. */ static void make_response(struct xen_blkif *blkif, u64 id, unsigned short op, int st) { struct blkif_response resp; unsigned long flags; union blkif_back_rings *blk_rings = &blkif->blk_rings; int notify; resp.id = id; resp.operation = op; resp.status = st; spin_lock_irqsave(&blkif->blk_ring_lock, flags); /* Place on the response ring for the relevant domain. */ switch (blkif->blk_protocol) { case BLKIF_PROTOCOL_NATIVE: memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt), &resp, sizeof(resp)); break; case BLKIF_PROTOCOL_X86_32: memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt), &resp, sizeof(resp)); break; case BLKIF_PROTOCOL_X86_64: memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt), &resp, sizeof(resp)); break; default: BUG(); } blk_rings->common.rsp_prod_pvt++; RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify); spin_unlock_irqrestore(&blkif->blk_ring_lock, flags); if (notify) notify_remote_via_irq(blkif->irq); } static int __init xen_blkif_init(void) { int rc = 0; if (!xen_domain()) return -ENODEV; rc = xen_blkif_interface_init(); if (rc) goto failed_init; rc = xen_blkif_xenbus_init(); if (rc) goto failed_init; failed_init: return rc; } module_init(xen_blkif_init); MODULE_LICENSE("Dual BSD/GPL"); MODULE_ALIAS("xen-backend:vbd");
/****************************************************************************** * * Back-end of the driver for virtual block devices. This portion of the * driver exports a 'unified' block-device interface that can be accessed * by any operating system that implements a compatible front end. A * reference front-end implementation can be found in: * drivers/block/xen-blkfront.c * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Copyright (c) 2005, Christopher Clark * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <linux/spinlock.h> #include <linux/kthread.h> #include <linux/list.h> #include <linux/delay.h> #include <linux/freezer.h> #include <linux/bitmap.h> #include <xen/events.h> #include <xen/page.h> #include <xen/xen.h> #include <asm/xen/hypervisor.h> #include <asm/xen/hypercall.h> #include <xen/balloon.h> #include "common.h" /* * Maximum number of unused free pages to keep in the internal buffer. * Setting this to a value too low will reduce memory used in each backend, * but can have a performance penalty. * * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can * be set to a lower value that might degrade performance on some intensive * IO workloads. */ static int xen_blkif_max_buffer_pages = 1024; module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644); MODULE_PARM_DESC(max_buffer_pages, "Maximum number of free pages to keep in each block backend buffer"); /* * Maximum number of grants to map persistently in blkback. For maximum * performance this should be the total numbers of grants that can be used * to fill the ring, but since this might become too high, specially with * the use of indirect descriptors, we set it to a value that provides good * performance without using too much memory. * * When the list of persistent grants is full we clean it up using a LRU * algorithm. */ static int xen_blkif_max_pgrants = 1056; module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644); MODULE_PARM_DESC(max_persistent_grants, "Maximum number of grants to map persistently"); /* * The LRU mechanism to clean the lists of persistent grants needs to * be executed periodically. The time interval between consecutive executions * of the purge mechanism is set in ms. */ #define LRU_INTERVAL 100 /* * When the persistent grants list is full we will remove unused grants * from the list. The percent number of grants to be removed at each LRU * execution. */ #define LRU_PERCENT_CLEAN 5 /* Run-time switchable: /sys/module/blkback/parameters/ */ static unsigned int log_stats; module_param(log_stats, int, 0644); #define BLKBACK_INVALID_HANDLE (~0) /* Number of free pages to remove on each call to free_xenballooned_pages */ #define NUM_BATCH_FREE_PAGES 10 static inline int get_free_page(struct xen_blkif *blkif, struct page **page) { unsigned long flags; spin_lock_irqsave(&blkif->free_pages_lock, flags); if (list_empty(&blkif->free_pages)) { BUG_ON(blkif->free_pages_num != 0); spin_unlock_irqrestore(&blkif->free_pages_lock, flags); return alloc_xenballooned_pages(1, page, false); } BUG_ON(blkif->free_pages_num == 0); page[0] = list_first_entry(&blkif->free_pages, struct page, lru); list_del(&page[0]->lru); blkif->free_pages_num--; spin_unlock_irqrestore(&blkif->free_pages_lock, flags); return 0; } static inline void put_free_pages(struct xen_blkif *blkif, struct page **page, int num) { unsigned long flags; int i; spin_lock_irqsave(&blkif->free_pages_lock, flags); for (i = 0; i < num; i++) list_add(&page[i]->lru, &blkif->free_pages); blkif->free_pages_num += num; spin_unlock_irqrestore(&blkif->free_pages_lock, flags); } static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num) { /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */ struct page *page[NUM_BATCH_FREE_PAGES]; unsigned int num_pages = 0; unsigned long flags; spin_lock_irqsave(&blkif->free_pages_lock, flags); while (blkif->free_pages_num > num) { BUG_ON(list_empty(&blkif->free_pages)); page[num_pages] = list_first_entry(&blkif->free_pages, struct page, lru); list_del(&page[num_pages]->lru); blkif->free_pages_num--; if (++num_pages == NUM_BATCH_FREE_PAGES) { spin_unlock_irqrestore(&blkif->free_pages_lock, flags); free_xenballooned_pages(num_pages, page); spin_lock_irqsave(&blkif->free_pages_lock, flags); num_pages = 0; } } spin_unlock_irqrestore(&blkif->free_pages_lock, flags); if (num_pages != 0) free_xenballooned_pages(num_pages, page); } #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page))) static int do_block_io_op(struct xen_blkif *blkif); static int dispatch_rw_block_io(struct xen_blkif *blkif, struct blkif_request *req, struct pending_req *pending_req); static void make_response(struct xen_blkif *blkif, u64 id, unsigned short op, int st); #define foreach_grant_safe(pos, n, rbtree, node) \ for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \ (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \ &(pos)->node != NULL; \ (pos) = container_of(n, typeof(*(pos)), node), \ (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL) /* * We don't need locking around the persistent grant helpers * because blkback uses a single-thread for each backed, so we * can be sure that this functions will never be called recursively. * * The only exception to that is put_persistent_grant, that can be called * from interrupt context (by xen_blkbk_unmap), so we have to use atomic * bit operations to modify the flags of a persistent grant and to count * the number of used grants. */ static int add_persistent_gnt(struct xen_blkif *blkif, struct persistent_gnt *persistent_gnt) { struct rb_node **new = NULL, *parent = NULL; struct persistent_gnt *this; if (blkif->persistent_gnt_c >= xen_blkif_max_pgrants) { if (!blkif->vbd.overflow_max_grants) blkif->vbd.overflow_max_grants = 1; return -EBUSY; } /* Figure out where to put new node */ new = &blkif->persistent_gnts.rb_node; while (*new) { this = container_of(*new, struct persistent_gnt, node); parent = *new; if (persistent_gnt->gnt < this->gnt) new = &((*new)->rb_left); else if (persistent_gnt->gnt > this->gnt) new = &((*new)->rb_right); else { pr_alert_ratelimited(DRV_PFX " trying to add a gref that's already in the tree\n"); return -EINVAL; } } bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE); set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); /* Add new node and rebalance tree. */ rb_link_node(&(persistent_gnt->node), parent, new); rb_insert_color(&(persistent_gnt->node), &blkif->persistent_gnts); blkif->persistent_gnt_c++; atomic_inc(&blkif->persistent_gnt_in_use); return 0; } static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif, grant_ref_t gref) { struct persistent_gnt *data; struct rb_node *node = NULL; node = blkif->persistent_gnts.rb_node; while (node) { data = container_of(node, struct persistent_gnt, node); if (gref < data->gnt) node = node->rb_left; else if (gref > data->gnt) node = node->rb_right; else { if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) { pr_alert_ratelimited(DRV_PFX " requesting a grant already in use\n"); return NULL; } set_bit(PERSISTENT_GNT_ACTIVE, data->flags); atomic_inc(&blkif->persistent_gnt_in_use); return data; } } return NULL; } static void put_persistent_gnt(struct xen_blkif *blkif, struct persistent_gnt *persistent_gnt) { if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) pr_alert_ratelimited(DRV_PFX " freeing a grant already unused"); set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); atomic_dec(&blkif->persistent_gnt_in_use); } static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, unsigned int num) { struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct persistent_gnt *persistent_gnt; struct rb_node *n; int ret = 0; int segs_to_unmap = 0; foreach_grant_safe(persistent_gnt, n, root, node) { BUG_ON(persistent_gnt->handle == BLKBACK_INVALID_HANDLE); gnttab_set_unmap_op(&unmap[segs_to_unmap], (unsigned long) pfn_to_kaddr(page_to_pfn( persistent_gnt->page)), GNTMAP_host_map, persistent_gnt->handle); pages[segs_to_unmap] = persistent_gnt->page; if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST || !rb_next(&persistent_gnt->node)) { ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap); BUG_ON(ret); put_free_pages(blkif, pages, segs_to_unmap); segs_to_unmap = 0; } rb_erase(&persistent_gnt->node, root); kfree(persistent_gnt); num--; } BUG_ON(num != 0); } static void unmap_purged_grants(struct work_struct *work) { struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct persistent_gnt *persistent_gnt; int ret, segs_to_unmap = 0; struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work); while(!list_empty(&blkif->persistent_purge_list)) { persistent_gnt = list_first_entry(&blkif->persistent_purge_list, struct persistent_gnt, remove_node); list_del(&persistent_gnt->remove_node); gnttab_set_unmap_op(&unmap[segs_to_unmap], vaddr(persistent_gnt->page), GNTMAP_host_map, persistent_gnt->handle); pages[segs_to_unmap] = persistent_gnt->page; if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) { ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap); BUG_ON(ret); put_free_pages(blkif, pages, segs_to_unmap); segs_to_unmap = 0; } kfree(persistent_gnt); } if (segs_to_unmap > 0) { ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap); BUG_ON(ret); put_free_pages(blkif, pages, segs_to_unmap); } } static void purge_persistent_gnt(struct xen_blkif *blkif) { struct persistent_gnt *persistent_gnt; struct rb_node *n; unsigned int num_clean, total; bool scan_used = false; struct rb_root *root; if (blkif->persistent_gnt_c < xen_blkif_max_pgrants || (blkif->persistent_gnt_c == xen_blkif_max_pgrants && !blkif->vbd.overflow_max_grants)) { return; } if (work_pending(&blkif->persistent_purge_work)) { pr_alert_ratelimited(DRV_PFX "Scheduled work from previous purge is still pending, cannot purge list\n"); return; } num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN; num_clean = blkif->persistent_gnt_c - xen_blkif_max_pgrants + num_clean; num_clean = min(blkif->persistent_gnt_c, num_clean); if (num_clean > (blkif->persistent_gnt_c - atomic_read(&blkif->persistent_gnt_in_use))) return; /* * At this point, we can assure that there will be no calls * to get_persistent_grant (because we are executing this code from * xen_blkif_schedule), there can only be calls to put_persistent_gnt, * which means that the number of currently used grants will go down, * but never up, so we will always be able to remove the requested * number of grants. */ total = num_clean; pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean); INIT_LIST_HEAD(&blkif->persistent_purge_list); root = &blkif->persistent_gnts; purge_list: foreach_grant_safe(persistent_gnt, n, root, node) { BUG_ON(persistent_gnt->handle == BLKBACK_INVALID_HANDLE); if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) continue; if (!scan_used && (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags))) continue; rb_erase(&persistent_gnt->node, root); list_add(&persistent_gnt->remove_node, &blkif->persistent_purge_list); if (--num_clean == 0) goto finished; } /* * If we get here it means we also need to start cleaning * grants that were used since last purge in order to cope * with the requested num */ if (!scan_used) { pr_debug(DRV_PFX "Still missing %u purged frames\n", num_clean); scan_used = true; goto purge_list; } finished: /* Remove the "used" flag from all the persistent grants */ foreach_grant_safe(persistent_gnt, n, root, node) { BUG_ON(persistent_gnt->handle == BLKBACK_INVALID_HANDLE); clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); } blkif->persistent_gnt_c -= (total - num_clean); blkif->vbd.overflow_max_grants = 0; /* We can defer this work */ INIT_WORK(&blkif->persistent_purge_work, unmap_purged_grants); schedule_work(&blkif->persistent_purge_work); pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total); return; } /* * Retrieve from the 'pending_reqs' a free pending_req structure to be used. */ static struct pending_req *alloc_req(struct xen_blkif *blkif) { struct pending_req *req = NULL; unsigned long flags; spin_lock_irqsave(&blkif->pending_free_lock, flags); if (!list_empty(&blkif->pending_free)) { req = list_entry(blkif->pending_free.next, struct pending_req, free_list); list_del(&req->free_list); } spin_unlock_irqrestore(&blkif->pending_free_lock, flags); return req; } /* * Return the 'pending_req' structure back to the freepool. We also * wake up the thread if it was waiting for a free page. */ static void free_req(struct xen_blkif *blkif, struct pending_req *req) { unsigned long flags; int was_empty; spin_lock_irqsave(&blkif->pending_free_lock, flags); was_empty = list_empty(&blkif->pending_free); list_add(&req->free_list, &blkif->pending_free); spin_unlock_irqrestore(&blkif->pending_free_lock, flags); if (was_empty) wake_up(&blkif->pending_free_wq); } /* * Routines for managing virtual block devices (vbds). */ static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif, int operation) { struct xen_vbd *vbd = &blkif->vbd; int rc = -EACCES; if ((operation != READ) && vbd->readonly) goto out; if (likely(req->nr_sects)) { blkif_sector_t end = req->sector_number + req->nr_sects; if (unlikely(end < req->sector_number)) goto out; if (unlikely(end > vbd_sz(vbd))) goto out; } req->dev = vbd->pdevice; req->bdev = vbd->bdev; rc = 0; out: return rc; } static void xen_vbd_resize(struct xen_blkif *blkif) { struct xen_vbd *vbd = &blkif->vbd; struct xenbus_transaction xbt; int err; struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be); unsigned long long new_size = vbd_sz(vbd); pr_info(DRV_PFX "VBD Resize: Domid: %d, Device: (%d, %d)\n", blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice)); pr_info(DRV_PFX "VBD Resize: new size %llu\n", new_size); vbd->size = new_size; again: err = xenbus_transaction_start(&xbt); if (err) { pr_warn(DRV_PFX "Error starting transaction"); return; } err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu", (unsigned long long)vbd_sz(vbd)); if (err) { pr_warn(DRV_PFX "Error writing new size"); goto abort; } /* * Write the current state; we will use this to synchronize * the front-end. If the current state is "connected" the * front-end will get the new size information online. */ err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state); if (err) { pr_warn(DRV_PFX "Error writing the state"); goto abort; } err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) goto again; if (err) pr_warn(DRV_PFX "Error ending transaction"); return; abort: xenbus_transaction_end(xbt, 1); } /* * Notification from the guest OS. */ static void blkif_notify_work(struct xen_blkif *blkif) { blkif->waiting_reqs = 1; wake_up(&blkif->wq); } irqreturn_t xen_blkif_be_int(int irq, void *dev_id) { blkif_notify_work(dev_id); return IRQ_HANDLED; } /* * SCHEDULER FUNCTIONS */ static void print_stats(struct xen_blkif *blkif) { pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu" " | ds %4llu | pg: %4u/%4d\n", current->comm, blkif->st_oo_req, blkif->st_rd_req, blkif->st_wr_req, blkif->st_f_req, blkif->st_ds_req, blkif->persistent_gnt_c, xen_blkif_max_pgrants); blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000); blkif->st_rd_req = 0; blkif->st_wr_req = 0; blkif->st_oo_req = 0; blkif->st_ds_req = 0; } int xen_blkif_schedule(void *arg) { struct xen_blkif *blkif = arg; struct xen_vbd *vbd = &blkif->vbd; unsigned long timeout; xen_blkif_get(blkif); while (!kthread_should_stop()) { if (try_to_freeze()) continue; if (unlikely(vbd->size != vbd_sz(vbd))) xen_vbd_resize(blkif); timeout = msecs_to_jiffies(LRU_INTERVAL); timeout = wait_event_interruptible_timeout( blkif->wq, blkif->waiting_reqs || kthread_should_stop(), timeout); if (timeout == 0) goto purge_gnt_list; timeout = wait_event_interruptible_timeout( blkif->pending_free_wq, !list_empty(&blkif->pending_free) || kthread_should_stop(), timeout); if (timeout == 0) goto purge_gnt_list; blkif->waiting_reqs = 0; smp_mb(); /* clear flag *before* checking for work */ if (do_block_io_op(blkif)) blkif->waiting_reqs = 1; purge_gnt_list: if (blkif->vbd.feature_gnt_persistent && time_after(jiffies, blkif->next_lru)) { purge_persistent_gnt(blkif); blkif->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL); } /* Shrink if we have more than xen_blkif_max_buffer_pages */ shrink_free_pagepool(blkif, xen_blkif_max_buffer_pages); if (log_stats && time_after(jiffies, blkif->st_print)) print_stats(blkif); } /* Since we are shutting down remove all pages from the buffer */ shrink_free_pagepool(blkif, 0 /* All */); /* Free all persistent grant pages */ if (!RB_EMPTY_ROOT(&blkif->persistent_gnts)) free_persistent_gnts(blkif, &blkif->persistent_gnts, blkif->persistent_gnt_c); BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); blkif->persistent_gnt_c = 0; if (log_stats) print_stats(blkif); blkif->xenblkd = NULL; xen_blkif_put(blkif); return 0; } /* * Unmap the grant references, and also remove the M2P over-rides * used in the 'pending_req'. */ static void xen_blkbk_unmap(struct xen_blkif *blkif, struct grant_page *pages[], int num) { struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; unsigned int i, invcount = 0; int ret; for (i = 0; i < num; i++) { if (pages[i]->persistent_gnt != NULL) { put_persistent_gnt(blkif, pages[i]->persistent_gnt); continue; } if (pages[i]->handle == BLKBACK_INVALID_HANDLE) continue; unmap_pages[invcount] = pages[i]->page; gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[i]->page), GNTMAP_host_map, pages[i]->handle); pages[i]->handle = BLKBACK_INVALID_HANDLE; if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) { ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount); BUG_ON(ret); put_free_pages(blkif, unmap_pages, invcount); invcount = 0; } } if (invcount) { ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount); BUG_ON(ret); put_free_pages(blkif, unmap_pages, invcount); } } static int xen_blkbk_map(struct xen_blkif *blkif, struct grant_page *pages[], int num, bool ro) { struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct persistent_gnt *persistent_gnt = NULL; phys_addr_t addr = 0; int i, seg_idx, new_map_idx; int segs_to_map = 0; int ret = 0; int last_map = 0, map_until = 0; int use_persistent_gnts; use_persistent_gnts = (blkif->vbd.feature_gnt_persistent); /* * Fill out preq.nr_sects with proper amount of sectors, and setup * assign map[..] with the PFN of the page in our domain with the * corresponding grant reference for each page. */ again: for (i = map_until; i < num; i++) { uint32_t flags; if (use_persistent_gnts) persistent_gnt = get_persistent_gnt( blkif, pages[i]->gref); if (persistent_gnt) { /* * We are using persistent grants and * the grant is already mapped */ pages[i]->page = persistent_gnt->page; pages[i]->persistent_gnt = persistent_gnt; } else { if (get_free_page(blkif, &pages[i]->page)) goto out_of_memory; addr = vaddr(pages[i]->page); pages_to_gnt[segs_to_map] = pages[i]->page; pages[i]->persistent_gnt = NULL; flags = GNTMAP_host_map; if (!use_persistent_gnts && ro) flags |= GNTMAP_readonly; gnttab_set_map_op(&map[segs_to_map++], addr, flags, pages[i]->gref, blkif->domid); } map_until = i + 1; if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST) break; } if (segs_to_map) { ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map); BUG_ON(ret); } /* * Now swizzle the MFN in our domain with the MFN from the other domain * so that when we access vaddr(pending_req,i) it has the contents of * the page from the other domain. */ for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) { if (!pages[seg_idx]->persistent_gnt) { /* This is a newly mapped grant */ BUG_ON(new_map_idx >= segs_to_map); if (unlikely(map[new_map_idx].status != 0)) { pr_debug(DRV_PFX "invalid buffer -- could not remap it\n"); pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE; ret |= 1; goto next; } pages[seg_idx]->handle = map[new_map_idx].handle; } else { continue; } if (use_persistent_gnts && blkif->persistent_gnt_c < xen_blkif_max_pgrants) { /* * We are using persistent grants, the grant is * not mapped but we might have room for it. */ persistent_gnt = kmalloc(sizeof(struct persistent_gnt), GFP_KERNEL); if (!persistent_gnt) { /* * If we don't have enough memory to * allocate the persistent_gnt struct * map this grant non-persistenly */ goto next; } persistent_gnt->gnt = map[new_map_idx].ref; persistent_gnt->handle = map[new_map_idx].handle; persistent_gnt->page = pages[seg_idx]->page; if (add_persistent_gnt(blkif, persistent_gnt)) { kfree(persistent_gnt); persistent_gnt = NULL; goto next; } pages[seg_idx]->persistent_gnt = persistent_gnt; pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n", persistent_gnt->gnt, blkif->persistent_gnt_c, xen_blkif_max_pgrants); goto next; } if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) { blkif->vbd.overflow_max_grants = 1; pr_debug(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n", blkif->domid, blkif->vbd.handle); } /* * We could not map this grant persistently, so use it as * a non-persistent grant. */ next: new_map_idx++; } segs_to_map = 0; last_map = map_until; if (map_until != num) goto again; return ret; out_of_memory: pr_alert(DRV_PFX "%s: out of memory\n", __func__); put_free_pages(blkif, pages_to_gnt, segs_to_map); return -ENOMEM; } static int xen_blkbk_map_seg(struct pending_req *pending_req) { int rc; rc = xen_blkbk_map(pending_req->blkif, pending_req->segments, pending_req->nr_pages, (pending_req->operation != BLKIF_OP_READ)); return rc; } static int xen_blkbk_parse_indirect(struct blkif_request *req, struct pending_req *pending_req, struct seg_buf seg[], struct phys_req *preq) { struct grant_page **pages = pending_req->indirect_pages; struct xen_blkif *blkif = pending_req->blkif; int indirect_grefs, rc, n, nseg, i; struct blkif_request_segment_aligned *segments = NULL; nseg = pending_req->nr_pages; indirect_grefs = INDIRECT_PAGES(nseg); BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST); for (i = 0; i < indirect_grefs; i++) pages[i]->gref = req->u.indirect.indirect_grefs[i]; rc = xen_blkbk_map(blkif, pages, indirect_grefs, true); if (rc) goto unmap; for (n = 0, i = 0; n < nseg; n++) { if ((n % SEGS_PER_INDIRECT_FRAME) == 0) { /* Map indirect segments */ if (segments) kunmap_atomic(segments); segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page); } i = n % SEGS_PER_INDIRECT_FRAME; pending_req->segments[n]->gref = segments[i].gref; seg[n].nsec = segments[i].last_sect - segments[i].first_sect + 1; seg[n].offset = (segments[i].first_sect << 9); if ((segments[i].last_sect >= (PAGE_SIZE >> 9)) || (segments[i].last_sect < segments[i].first_sect)) { rc = -EINVAL; goto unmap; } preq->nr_sects += seg[n].nsec; } unmap: if (segments) kunmap_atomic(segments); xen_blkbk_unmap(blkif, pages, indirect_grefs); return rc; } static int dispatch_discard_io(struct xen_blkif *blkif, struct blkif_request *req) { int err = 0; int status = BLKIF_RSP_OKAY; struct block_device *bdev = blkif->vbd.bdev; unsigned long secure; struct phys_req preq; preq.sector_number = req->u.discard.sector_number; preq.nr_sects = req->u.discard.nr_sectors; err = xen_vbd_translate(&preq, blkif, WRITE); if (err) { pr_warn(DRV_PFX "access denied: DISCARD [%llu->%llu] on dev=%04x\n", preq.sector_number, preq.sector_number + preq.nr_sects, blkif->vbd.pdevice); goto fail_response; } blkif->st_ds_req++; xen_blkif_get(blkif); secure = (blkif->vbd.discard_secure && (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ? BLKDEV_DISCARD_SECURE : 0; err = blkdev_issue_discard(bdev, req->u.discard.sector_number, req->u.discard.nr_sectors, GFP_KERNEL, secure); fail_response: if (err == -EOPNOTSUPP) { pr_debug(DRV_PFX "discard op failed, not supported\n"); status = BLKIF_RSP_EOPNOTSUPP; } else if (err) status = BLKIF_RSP_ERROR; make_response(blkif, req->u.discard.id, req->operation, status); xen_blkif_put(blkif); return err; } static int dispatch_other_io(struct xen_blkif *blkif, struct blkif_request *req, struct pending_req *pending_req) { free_req(blkif, pending_req); make_response(blkif, req->u.other.id, req->operation, BLKIF_RSP_EOPNOTSUPP); return -EIO; } static void xen_blk_drain_io(struct xen_blkif *blkif) { atomic_set(&blkif->drain, 1); do { /* The initial value is one, and one refcnt taken at the * start of the xen_blkif_schedule thread. */ if (atomic_read(&blkif->refcnt) <= 2) break; wait_for_completion_interruptible_timeout( &blkif->drain_complete, HZ); if (!atomic_read(&blkif->drain)) break; } while (!kthread_should_stop()); atomic_set(&blkif->drain, 0); } /* * Completion callback on the bio's. Called as bh->b_end_io() */ static void __end_block_io_op(struct pending_req *pending_req, int error) { /* An error fails the entire request. */ if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) && (error == -EOPNOTSUPP)) { pr_debug(DRV_PFX "flush diskcache op failed, not supported\n"); xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0); pending_req->status = BLKIF_RSP_EOPNOTSUPP; } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) && (error == -EOPNOTSUPP)) { pr_debug(DRV_PFX "write barrier op failed, not supported\n"); xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0); pending_req->status = BLKIF_RSP_EOPNOTSUPP; } else if (error) { pr_debug(DRV_PFX "Buffer not up-to-date at end of operation," " error=%d\n", error); pending_req->status = BLKIF_RSP_ERROR; } /* * If all of the bio's have completed it is time to unmap * the grant references associated with 'request' and provide * the proper response on the ring. */ if (atomic_dec_and_test(&pending_req->pendcnt)) { xen_blkbk_unmap(pending_req->blkif, pending_req->segments, pending_req->nr_pages); make_response(pending_req->blkif, pending_req->id, pending_req->operation, pending_req->status); xen_blkif_put(pending_req->blkif); if (atomic_read(&pending_req->blkif->refcnt) <= 2) { if (atomic_read(&pending_req->blkif->drain)) complete(&pending_req->blkif->drain_complete); } free_req(pending_req->blkif, pending_req); } } /* * bio callback. */ static void end_block_io_op(struct bio *bio, int error) { __end_block_io_op(bio->bi_private, error); bio_put(bio); } /* * Function to copy the from the ring buffer the 'struct blkif_request' * (which has the sectors we want, number of them, grant references, etc), * and transmute it to the block API to hand it over to the proper block disk. */ static int __do_block_io_op(struct xen_blkif *blkif) { union blkif_back_rings *blk_rings = &blkif->blk_rings; struct blkif_request req; struct pending_req *pending_req; RING_IDX rc, rp; int more_to_do = 0; rc = blk_rings->common.req_cons; rp = blk_rings->common.sring->req_prod; rmb(); /* Ensure we see queued requests up to 'rp'. */ while (rc != rp) { if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc)) break; if (kthread_should_stop()) { more_to_do = 1; break; } pending_req = alloc_req(blkif); if (NULL == pending_req) { blkif->st_oo_req++; more_to_do = 1; break; } switch (blkif->blk_protocol) { case BLKIF_PROTOCOL_NATIVE: memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req)); break; case BLKIF_PROTOCOL_X86_32: blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc)); break; case BLKIF_PROTOCOL_X86_64: blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc)); break; default: BUG(); } blk_rings->common.req_cons = ++rc; /* before make_response() */ /* Apply all sanity checks to /private copy/ of request. */ barrier(); switch (req.operation) { case BLKIF_OP_READ: case BLKIF_OP_WRITE: case BLKIF_OP_WRITE_BARRIER: case BLKIF_OP_FLUSH_DISKCACHE: case BLKIF_OP_INDIRECT: if (dispatch_rw_block_io(blkif, &req, pending_req)) goto done; break; case BLKIF_OP_DISCARD: free_req(blkif, pending_req); if (dispatch_discard_io(blkif, &req)) goto done; break; default: if (dispatch_other_io(blkif, &req, pending_req)) goto done; break; } /* Yield point for this unbounded loop. */ cond_resched(); } done: return more_to_do; } static int do_block_io_op(struct xen_blkif *blkif) { union blkif_back_rings *blk_rings = &blkif->blk_rings; int more_to_do; do { more_to_do = __do_block_io_op(blkif); if (more_to_do) break; RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do); } while (more_to_do); return more_to_do; } /* * Transmutation of the 'struct blkif_request' to a proper 'struct bio' * and call the 'submit_bio' to pass it to the underlying storage. */ static int dispatch_rw_block_io(struct xen_blkif *blkif, struct blkif_request *req, struct pending_req *pending_req) { struct phys_req preq; struct seg_buf *seg = pending_req->seg; unsigned int nseg; struct bio *bio = NULL; struct bio **biolist = pending_req->biolist; int i, nbio = 0; int operation; struct blk_plug plug; bool drain = false; struct grant_page **pages = pending_req->segments; unsigned short req_operation; req_operation = req->operation == BLKIF_OP_INDIRECT ? req->u.indirect.indirect_op : req->operation; if ((req->operation == BLKIF_OP_INDIRECT) && (req_operation != BLKIF_OP_READ) && (req_operation != BLKIF_OP_WRITE)) { pr_debug(DRV_PFX "Invalid indirect operation (%u)\n", req_operation); goto fail_response; } switch (req_operation) { case BLKIF_OP_READ: blkif->st_rd_req++; operation = READ; break; case BLKIF_OP_WRITE: blkif->st_wr_req++; operation = WRITE_ODIRECT; break; case BLKIF_OP_WRITE_BARRIER: drain = true; case BLKIF_OP_FLUSH_DISKCACHE: blkif->st_f_req++; operation = WRITE_FLUSH; break; default: operation = 0; /* make gcc happy */ goto fail_response; break; } /* Check that the number of segments is sane. */ nseg = req->operation == BLKIF_OP_INDIRECT ? req->u.indirect.nr_segments : req->u.rw.nr_segments; if (unlikely(nseg == 0 && operation != WRITE_FLUSH) || unlikely((req->operation != BLKIF_OP_INDIRECT) && (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) || unlikely((req->operation == BLKIF_OP_INDIRECT) && (nseg > MAX_INDIRECT_SEGMENTS))) { pr_debug(DRV_PFX "Bad number of segments in request (%d)\n", nseg); /* Haven't submitted any bio's yet. */ goto fail_response; } preq.nr_sects = 0; pending_req->blkif = blkif; pending_req->id = req->u.rw.id; pending_req->operation = req_operation; pending_req->status = BLKIF_RSP_OKAY; pending_req->nr_pages = nseg; if (req->operation != BLKIF_OP_INDIRECT) { preq.dev = req->u.rw.handle; preq.sector_number = req->u.rw.sector_number; for (i = 0; i < nseg; i++) { pages[i]->gref = req->u.rw.seg[i].gref; seg[i].nsec = req->u.rw.seg[i].last_sect - req->u.rw.seg[i].first_sect + 1; seg[i].offset = (req->u.rw.seg[i].first_sect << 9); if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) || (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect)) goto fail_response; preq.nr_sects += seg[i].nsec; } } else { preq.dev = req->u.indirect.handle; preq.sector_number = req->u.indirect.sector_number; if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq)) goto fail_response; } if (xen_vbd_translate(&preq, blkif, operation) != 0) { pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n", operation == READ ? "read" : "write", preq.sector_number, preq.sector_number + preq.nr_sects, blkif->vbd.pdevice); goto fail_response; } /* * This check _MUST_ be done after xen_vbd_translate as the preq.bdev * is set there. */ for (i = 0; i < nseg; i++) { if (((int)preq.sector_number|(int)seg[i].nsec) & ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) { pr_debug(DRV_PFX "Misaligned I/O request from domain %d", blkif->domid); goto fail_response; } } /* Wait on all outstanding I/O's and once that has been completed * issue the WRITE_FLUSH. */ if (drain) xen_blk_drain_io(pending_req->blkif); /* * If we have failed at this point, we need to undo the M2P override, * set gnttab_set_unmap_op on all of the grant references and perform * the hypercall to unmap the grants - that is all done in * xen_blkbk_unmap. */ if (xen_blkbk_map_seg(pending_req)) goto fail_flush; /* * This corresponding xen_blkif_put is done in __end_block_io_op, or * below (in "!bio") if we are handling a BLKIF_OP_DISCARD. */ xen_blkif_get(blkif); for (i = 0; i < nseg; i++) { while ((bio == NULL) || (bio_add_page(bio, pages[i]->page, seg[i].nsec << 9, seg[i].offset) == 0)) { bio = bio_alloc(GFP_KERNEL, nseg-i); if (unlikely(bio == NULL)) goto fail_put_bio; biolist[nbio++] = bio; bio->bi_bdev = preq.bdev; bio->bi_private = pending_req; bio->bi_end_io = end_block_io_op; bio->bi_sector = preq.sector_number; } preq.sector_number += seg[i].nsec; } /* This will be hit if the operation was a flush or discard. */ if (!bio) { BUG_ON(operation != WRITE_FLUSH); bio = bio_alloc(GFP_KERNEL, 0); if (unlikely(bio == NULL)) goto fail_put_bio; biolist[nbio++] = bio; bio->bi_bdev = preq.bdev; bio->bi_private = pending_req; bio->bi_end_io = end_block_io_op; } atomic_set(&pending_req->pendcnt, nbio); blk_start_plug(&plug); for (i = 0; i < nbio; i++) submit_bio(operation, biolist[i]); /* Let the I/Os go.. */ blk_finish_plug(&plug); if (operation == READ) blkif->st_rd_sect += preq.nr_sects; else if (operation & WRITE) blkif->st_wr_sect += preq.nr_sects; return 0; fail_flush: xen_blkbk_unmap(blkif, pending_req->segments, pending_req->nr_pages); fail_response: /* Haven't submitted any bio's yet. */ make_response(blkif, req->u.rw.id, req_operation, BLKIF_RSP_ERROR); free_req(blkif, pending_req); msleep(1); /* back off a bit */ return -EIO; fail_put_bio: for (i = 0; i < nbio; i++) bio_put(biolist[i]); atomic_set(&pending_req->pendcnt, 1); __end_block_io_op(pending_req, -EINVAL); msleep(1); /* back off a bit */ return -EIO; } /* * Put a response on the ring on how the operation fared. */ static void make_response(struct xen_blkif *blkif, u64 id, unsigned short op, int st) { struct blkif_response resp; unsigned long flags; union blkif_back_rings *blk_rings = &blkif->blk_rings; int notify; resp.id = id; resp.operation = op; resp.status = st; spin_lock_irqsave(&blkif->blk_ring_lock, flags); /* Place on the response ring for the relevant domain. */ switch (blkif->blk_protocol) { case BLKIF_PROTOCOL_NATIVE: memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt), &resp, sizeof(resp)); break; case BLKIF_PROTOCOL_X86_32: memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt), &resp, sizeof(resp)); break; case BLKIF_PROTOCOL_X86_64: memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt), &resp, sizeof(resp)); break; default: BUG(); } blk_rings->common.rsp_prod_pvt++; RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify); spin_unlock_irqrestore(&blkif->blk_ring_lock, flags); if (notify) notify_remote_via_irq(blkif->irq); } static int __init xen_blkif_init(void) { int rc = 0; if (!xen_domain()) return -ENODEV; rc = xen_blkif_interface_init(); if (rc) goto failed_init; rc = xen_blkif_xenbus_init(); if (rc) goto failed_init; failed_init: return rc; } module_init(xen_blkif_init); MODULE_LICENSE("Dual BSD/GPL"); MODULE_ALIAS("xen-backend:vbd");
static int dispatch_discard_io(struct xen_blkif *blkif, struct blkif_request *req) { int err = 0; int status = BLKIF_RSP_OKAY; struct block_device *bdev = blkif->vbd.bdev; unsigned long secure; blkif->st_ds_req++; xen_blkif_get(blkif); secure = (blkif->vbd.discard_secure && (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ? BLKDEV_DISCARD_SECURE : 0; err = blkdev_issue_discard(bdev, req->u.discard.sector_number, req->u.discard.nr_sectors, GFP_KERNEL, secure); if (err == -EOPNOTSUPP) { pr_debug(DRV_PFX "discard op failed, not supported\n"); status = BLKIF_RSP_EOPNOTSUPP; } else if (err) status = BLKIF_RSP_ERROR; make_response(blkif, req->u.discard.id, req->operation, status); xen_blkif_put(blkif); return err; }
static int dispatch_discard_io(struct xen_blkif *blkif, struct blkif_request *req) { int err = 0; int status = BLKIF_RSP_OKAY; struct block_device *bdev = blkif->vbd.bdev; unsigned long secure; struct phys_req preq; preq.sector_number = req->u.discard.sector_number; preq.nr_sects = req->u.discard.nr_sectors; err = xen_vbd_translate(&preq, blkif, WRITE); if (err) { pr_warn(DRV_PFX "access denied: DISCARD [%llu->%llu] on dev=%04x\n", preq.sector_number, preq.sector_number + preq.nr_sects, blkif->vbd.pdevice); goto fail_response; } blkif->st_ds_req++; xen_blkif_get(blkif); secure = (blkif->vbd.discard_secure && (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ? BLKDEV_DISCARD_SECURE : 0; err = blkdev_issue_discard(bdev, req->u.discard.sector_number, req->u.discard.nr_sectors, GFP_KERNEL, secure); fail_response: if (err == -EOPNOTSUPP) { pr_debug(DRV_PFX "discard op failed, not supported\n"); status = BLKIF_RSP_EOPNOTSUPP; } else if (err) status = BLKIF_RSP_ERROR; make_response(blkif, req->u.discard.id, req->operation, status); xen_blkif_put(blkif); return err; }
{'added': [(879, '\tstruct phys_req preq;'), (880, ''), (881, '\tpreq.sector_number = req->u.discard.sector_number;'), (882, '\tpreq.nr_sects = req->u.discard.nr_sectors;'), (884, '\terr = xen_vbd_translate(&preq, blkif, WRITE);'), (885, '\tif (err) {'), (886, '\t\tpr_warn(DRV_PFX "access denied: DISCARD [%llu->%llu] on dev=%04x\\n",'), (887, '\t\t\tpreq.sector_number,'), (888, '\t\t\tpreq.sector_number + preq.nr_sects, blkif->vbd.pdevice);'), (889, '\t\tgoto fail_response;'), (890, '\t}'), (901, 'fail_response:')], 'deleted': [(890, '')]}
12
1
978
6,140
https://github.com/torvalds/linux
CVE-2013-2140
['CWE-20']
insn-eval.c
get_desc
/* * Utility functions for x86 operand and address decoding * * Copyright (C) Intel Corporation 2017 */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/ratelimit.h> #include <linux/mmu_context.h> #include <asm/desc_defs.h> #include <asm/desc.h> #include <asm/inat.h> #include <asm/insn.h> #include <asm/insn-eval.h> #include <asm/ldt.h> #include <asm/vm86.h> #undef pr_fmt #define pr_fmt(fmt) "insn: " fmt enum reg_type { REG_TYPE_RM = 0, REG_TYPE_INDEX, REG_TYPE_BASE, }; /** * is_string_insn() - Determine if instruction is a string instruction * @insn: Instruction containing the opcode to inspect * * Returns: * * true if the instruction, determined by the opcode, is any of the * string instructions as defined in the Intel Software Development manual. * False otherwise. */ static bool is_string_insn(struct insn *insn) { insn_get_opcode(insn); /* All string instructions have a 1-byte opcode. */ if (insn->opcode.nbytes != 1) return false; switch (insn->opcode.bytes[0]) { case 0x6c ... 0x6f: /* INS, OUTS */ case 0xa4 ... 0xa7: /* MOVS, CMPS */ case 0xaa ... 0xaf: /* STOS, LODS, SCAS */ return true; default: return false; } } /** * get_seg_reg_override_idx() - obtain segment register override index * @insn: Valid instruction with segment override prefixes * * Inspect the instruction prefixes in @insn and find segment overrides, if any. * * Returns: * * A constant identifying the segment register to use, among CS, SS, DS, * ES, FS, or GS. INAT_SEG_REG_DEFAULT is returned if no segment override * prefixes were found. * * -EINVAL in case of error. */ static int get_seg_reg_override_idx(struct insn *insn) { int idx = INAT_SEG_REG_DEFAULT; int num_overrides = 0, i; insn_get_prefixes(insn); /* Look for any segment override prefixes. */ for (i = 0; i < insn->prefixes.nbytes; i++) { insn_attr_t attr; attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]); switch (attr) { case INAT_MAKE_PREFIX(INAT_PFX_CS): idx = INAT_SEG_REG_CS; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_SS): idx = INAT_SEG_REG_SS; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_DS): idx = INAT_SEG_REG_DS; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_ES): idx = INAT_SEG_REG_ES; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_FS): idx = INAT_SEG_REG_FS; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_GS): idx = INAT_SEG_REG_GS; num_overrides++; break; /* No default action needed. */ } } /* More than one segment override prefix leads to undefined behavior. */ if (num_overrides > 1) return -EINVAL; return idx; } /** * check_seg_overrides() - check if segment override prefixes are allowed * @insn: Valid instruction with segment override prefixes * @regoff: Operand offset, in pt_regs, for which the check is performed * * For a particular register used in register-indirect addressing, determine if * segment override prefixes can be used. Specifically, no overrides are allowed * for rDI if used with a string instruction. * * Returns: * * True if segment override prefixes can be used with the register indicated * in @regoff. False if otherwise. */ static bool check_seg_overrides(struct insn *insn, int regoff) { if (regoff == offsetof(struct pt_regs, di) && is_string_insn(insn)) return false; return true; } /** * resolve_default_seg() - resolve default segment register index for an operand * @insn: Instruction with opcode and address size. Must be valid. * @regs: Register values as seen when entering kernel mode * @off: Operand offset, in pt_regs, for which resolution is needed * * Resolve the default segment register index associated with the instruction * operand register indicated by @off. Such index is resolved based on defaults * described in the Intel Software Development Manual. * * Returns: * * If in protected mode, a constant identifying the segment register to use, * among CS, SS, ES or DS. If in long mode, INAT_SEG_REG_IGNORE. * * -EINVAL in case of error. */ static int resolve_default_seg(struct insn *insn, struct pt_regs *regs, int off) { if (user_64bit_mode(regs)) return INAT_SEG_REG_IGNORE; /* * Resolve the default segment register as described in Section 3.7.4 * of the Intel Software Development Manual Vol. 1: * * + DS for all references involving r[ABCD]X, and rSI. * + If used in a string instruction, ES for rDI. Otherwise, DS. * + AX, CX and DX are not valid register operands in 16-bit address * encodings but are valid for 32-bit and 64-bit encodings. * + -EDOM is reserved to identify for cases in which no register * is used (i.e., displacement-only addressing). Use DS. * + SS for rSP or rBP. * + CS for rIP. */ switch (off) { case offsetof(struct pt_regs, ax): case offsetof(struct pt_regs, cx): case offsetof(struct pt_regs, dx): /* Need insn to verify address size. */ if (insn->addr_bytes == 2) return -EINVAL; /* fall through */ case -EDOM: case offsetof(struct pt_regs, bx): case offsetof(struct pt_regs, si): return INAT_SEG_REG_DS; case offsetof(struct pt_regs, di): if (is_string_insn(insn)) return INAT_SEG_REG_ES; return INAT_SEG_REG_DS; case offsetof(struct pt_regs, bp): case offsetof(struct pt_regs, sp): return INAT_SEG_REG_SS; case offsetof(struct pt_regs, ip): return INAT_SEG_REG_CS; default: return -EINVAL; } } /** * resolve_seg_reg() - obtain segment register index * @insn: Instruction with operands * @regs: Register values as seen when entering kernel mode * @regoff: Operand offset, in pt_regs, used to deterimine segment register * * Determine the segment register associated with the operands and, if * applicable, prefixes and the instruction pointed by @insn. * * The segment register associated to an operand used in register-indirect * addressing depends on: * * a) Whether running in long mode (in such a case segments are ignored, except * if FS or GS are used). * * b) Whether segment override prefixes can be used. Certain instructions and * registers do not allow override prefixes. * * c) Whether segment overrides prefixes are found in the instruction prefixes. * * d) If there are not segment override prefixes or they cannot be used, the * default segment register associated with the operand register is used. * * The function checks first if segment override prefixes can be used with the * operand indicated by @regoff. If allowed, obtain such overridden segment * register index. Lastly, if not prefixes were found or cannot be used, resolve * the segment register index to use based on the defaults described in the * Intel documentation. In long mode, all segment register indexes will be * ignored, except if overrides were found for FS or GS. All these operations * are done using helper functions. * * The operand register, @regoff, is represented as the offset from the base of * pt_regs. * * As stated, the main use of this function is to determine the segment register * index based on the instruction, its operands and prefixes. Hence, @insn * must be valid. However, if @regoff indicates rIP, we don't need to inspect * @insn at all as in this case CS is used in all cases. This case is checked * before proceeding further. * * Please note that this function does not return the value in the segment * register (i.e., the segment selector) but our defined index. The segment * selector needs to be obtained using get_segment_selector() and passing the * segment register index resolved by this function. * * Returns: * * An index identifying the segment register to use, among CS, SS, DS, * ES, FS, or GS. INAT_SEG_REG_IGNORE is returned if running in long mode. * * -EINVAL in case of error. */ static int resolve_seg_reg(struct insn *insn, struct pt_regs *regs, int regoff) { int idx; /* * In the unlikely event of having to resolve the segment register * index for rIP, do it first. Segment override prefixes should not * be used. Hence, it is not necessary to inspect the instruction, * which may be invalid at this point. */ if (regoff == offsetof(struct pt_regs, ip)) { if (user_64bit_mode(regs)) return INAT_SEG_REG_IGNORE; else return INAT_SEG_REG_CS; } if (!insn) return -EINVAL; if (!check_seg_overrides(insn, regoff)) return resolve_default_seg(insn, regs, regoff); idx = get_seg_reg_override_idx(insn); if (idx < 0) return idx; if (idx == INAT_SEG_REG_DEFAULT) return resolve_default_seg(insn, regs, regoff); /* * In long mode, segment override prefixes are ignored, except for * overrides for FS and GS. */ if (user_64bit_mode(regs)) { if (idx != INAT_SEG_REG_FS && idx != INAT_SEG_REG_GS) idx = INAT_SEG_REG_IGNORE; } return idx; } /** * get_segment_selector() - obtain segment selector * @regs: Register values as seen when entering kernel mode * @seg_reg_idx: Segment register index to use * * Obtain the segment selector from any of the CS, SS, DS, ES, FS, GS segment * registers. In CONFIG_X86_32, the segment is obtained from either pt_regs or * kernel_vm86_regs as applicable. In CONFIG_X86_64, CS and SS are obtained * from pt_regs. DS, ES, FS and GS are obtained by reading the actual CPU * registers. This done for only for completeness as in CONFIG_X86_64 segment * registers are ignored. * * Returns: * * Value of the segment selector, including null when running in * long mode. * * -EINVAL on error. */ static short get_segment_selector(struct pt_regs *regs, int seg_reg_idx) { #ifdef CONFIG_X86_64 unsigned short sel; switch (seg_reg_idx) { case INAT_SEG_REG_IGNORE: return 0; case INAT_SEG_REG_CS: return (unsigned short)(regs->cs & 0xffff); case INAT_SEG_REG_SS: return (unsigned short)(regs->ss & 0xffff); case INAT_SEG_REG_DS: savesegment(ds, sel); return sel; case INAT_SEG_REG_ES: savesegment(es, sel); return sel; case INAT_SEG_REG_FS: savesegment(fs, sel); return sel; case INAT_SEG_REG_GS: savesegment(gs, sel); return sel; default: return -EINVAL; } #else /* CONFIG_X86_32 */ struct kernel_vm86_regs *vm86regs = (struct kernel_vm86_regs *)regs; if (v8086_mode(regs)) { switch (seg_reg_idx) { case INAT_SEG_REG_CS: return (unsigned short)(regs->cs & 0xffff); case INAT_SEG_REG_SS: return (unsigned short)(regs->ss & 0xffff); case INAT_SEG_REG_DS: return vm86regs->ds; case INAT_SEG_REG_ES: return vm86regs->es; case INAT_SEG_REG_FS: return vm86regs->fs; case INAT_SEG_REG_GS: return vm86regs->gs; case INAT_SEG_REG_IGNORE: /* fall through */ default: return -EINVAL; } } switch (seg_reg_idx) { case INAT_SEG_REG_CS: return (unsigned short)(regs->cs & 0xffff); case INAT_SEG_REG_SS: return (unsigned short)(regs->ss & 0xffff); case INAT_SEG_REG_DS: return (unsigned short)(regs->ds & 0xffff); case INAT_SEG_REG_ES: return (unsigned short)(regs->es & 0xffff); case INAT_SEG_REG_FS: return (unsigned short)(regs->fs & 0xffff); case INAT_SEG_REG_GS: /* * GS may or may not be in regs as per CONFIG_X86_32_LAZY_GS. * The macro below takes care of both cases. */ return get_user_gs(regs); case INAT_SEG_REG_IGNORE: /* fall through */ default: return -EINVAL; } #endif /* CONFIG_X86_64 */ } static int get_reg_offset(struct insn *insn, struct pt_regs *regs, enum reg_type type) { int regno = 0; static const int regoff[] = { offsetof(struct pt_regs, ax), offsetof(struct pt_regs, cx), offsetof(struct pt_regs, dx), offsetof(struct pt_regs, bx), offsetof(struct pt_regs, sp), offsetof(struct pt_regs, bp), offsetof(struct pt_regs, si), offsetof(struct pt_regs, di), #ifdef CONFIG_X86_64 offsetof(struct pt_regs, r8), offsetof(struct pt_regs, r9), offsetof(struct pt_regs, r10), offsetof(struct pt_regs, r11), offsetof(struct pt_regs, r12), offsetof(struct pt_regs, r13), offsetof(struct pt_regs, r14), offsetof(struct pt_regs, r15), #endif }; int nr_registers = ARRAY_SIZE(regoff); /* * Don't possibly decode a 32-bit instructions as * reading a 64-bit-only register. */ if (IS_ENABLED(CONFIG_X86_64) && !insn->x86_64) nr_registers -= 8; switch (type) { case REG_TYPE_RM: regno = X86_MODRM_RM(insn->modrm.value); /* * ModRM.mod == 0 and ModRM.rm == 5 means a 32-bit displacement * follows the ModRM byte. */ if (!X86_MODRM_MOD(insn->modrm.value) && regno == 5) return -EDOM; if (X86_REX_B(insn->rex_prefix.value)) regno += 8; break; case REG_TYPE_INDEX: regno = X86_SIB_INDEX(insn->sib.value); if (X86_REX_X(insn->rex_prefix.value)) regno += 8; /* * If ModRM.mod != 3 and SIB.index = 4 the scale*index * portion of the address computation is null. This is * true only if REX.X is 0. In such a case, the SIB index * is used in the address computation. */ if (X86_MODRM_MOD(insn->modrm.value) != 3 && regno == 4) return -EDOM; break; case REG_TYPE_BASE: regno = X86_SIB_BASE(insn->sib.value); /* * If ModRM.mod is 0 and SIB.base == 5, the base of the * register-indirect addressing is 0. In this case, a * 32-bit displacement follows the SIB byte. */ if (!X86_MODRM_MOD(insn->modrm.value) && regno == 5) return -EDOM; if (X86_REX_B(insn->rex_prefix.value)) regno += 8; break; default: pr_err_ratelimited("invalid register type: %d\n", type); return -EINVAL; } if (regno >= nr_registers) { WARN_ONCE(1, "decoded an instruction with an invalid register"); return -EINVAL; } return regoff[regno]; } /** * get_reg_offset_16() - Obtain offset of register indicated by instruction * @insn: Instruction containing ModRM byte * @regs: Register values as seen when entering kernel mode * @offs1: Offset of the first operand register * @offs2: Offset of the second opeand register, if applicable * * Obtain the offset, in pt_regs, of the registers indicated by the ModRM byte * in @insn. This function is to be used with 16-bit address encodings. The * @offs1 and @offs2 will be written with the offset of the two registers * indicated by the instruction. In cases where any of the registers is not * referenced by the instruction, the value will be set to -EDOM. * * Returns: * * 0 on success, -EINVAL on error. */ static int get_reg_offset_16(struct insn *insn, struct pt_regs *regs, int *offs1, int *offs2) { /* * 16-bit addressing can use one or two registers. Specifics of * encodings are given in Table 2-1. "16-Bit Addressing Forms with the * ModR/M Byte" of the Intel Software Development Manual. */ static const int regoff1[] = { offsetof(struct pt_regs, bx), offsetof(struct pt_regs, bx), offsetof(struct pt_regs, bp), offsetof(struct pt_regs, bp), offsetof(struct pt_regs, si), offsetof(struct pt_regs, di), offsetof(struct pt_regs, bp), offsetof(struct pt_regs, bx), }; static const int regoff2[] = { offsetof(struct pt_regs, si), offsetof(struct pt_regs, di), offsetof(struct pt_regs, si), offsetof(struct pt_regs, di), -EDOM, -EDOM, -EDOM, -EDOM, }; if (!offs1 || !offs2) return -EINVAL; /* Operand is a register, use the generic function. */ if (X86_MODRM_MOD(insn->modrm.value) == 3) { *offs1 = insn_get_modrm_rm_off(insn, regs); *offs2 = -EDOM; return 0; } *offs1 = regoff1[X86_MODRM_RM(insn->modrm.value)]; *offs2 = regoff2[X86_MODRM_RM(insn->modrm.value)]; /* * If ModRM.mod is 0 and ModRM.rm is 110b, then we use displacement- * only addressing. This means that no registers are involved in * computing the effective address. Thus, ensure that the first * register offset is invalild. The second register offset is already * invalid under the aforementioned conditions. */ if ((X86_MODRM_MOD(insn->modrm.value) == 0) && (X86_MODRM_RM(insn->modrm.value) == 6)) *offs1 = -EDOM; return 0; } /** * get_desc() - Obtain pointer to a segment descriptor * @sel: Segment selector * * Given a segment selector, obtain a pointer to the segment descriptor. * Both global and local descriptor tables are supported. * * Returns: * * Pointer to segment descriptor on success. * * NULL on error. */ static struct desc_struct *get_desc(unsigned short sel) { struct desc_ptr gdt_desc = {0, 0}; unsigned long desc_base; #ifdef CONFIG_MODIFY_LDT_SYSCALL if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) { struct desc_struct *desc = NULL; struct ldt_struct *ldt; /* Bits [15:3] contain the index of the desired entry. */ sel >>= 3; mutex_lock(&current->active_mm->context.lock); ldt = current->active_mm->context.ldt; if (ldt && sel < ldt->nr_entries) desc = &ldt->entries[sel]; mutex_unlock(&current->active_mm->context.lock); return desc; } #endif native_store_gdt(&gdt_desc); /* * Segment descriptors have a size of 8 bytes. Thus, the index is * multiplied by 8 to obtain the memory offset of the desired descriptor * from the base of the GDT. As bits [15:3] of the segment selector * contain the index, it can be regarded as multiplied by 8 already. * All that remains is to clear bits [2:0]. */ desc_base = sel & ~(SEGMENT_RPL_MASK | SEGMENT_TI_MASK); if (desc_base > gdt_desc.size) return NULL; return (struct desc_struct *)(gdt_desc.address + desc_base); } /** * insn_get_seg_base() - Obtain base address of segment descriptor. * @regs: Register values as seen when entering kernel mode * @seg_reg_idx: Index of the segment register pointing to seg descriptor * * Obtain the base address of the segment as indicated by the segment descriptor * pointed by the segment selector. The segment selector is obtained from the * input segment register index @seg_reg_idx. * * Returns: * * In protected mode, base address of the segment. Zero in long mode, * except when FS or GS are used. In virtual-8086 mode, the segment * selector shifted 4 bits to the right. * * -1L in case of error. */ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx) { struct desc_struct *desc; short sel; sel = get_segment_selector(regs, seg_reg_idx); if (sel < 0) return -1L; if (v8086_mode(regs)) /* * Base is simply the segment selector shifted 4 * bits to the right. */ return (unsigned long)(sel << 4); if (user_64bit_mode(regs)) { /* * Only FS or GS will have a base address, the rest of * the segments' bases are forced to 0. */ unsigned long base; if (seg_reg_idx == INAT_SEG_REG_FS) rdmsrl(MSR_FS_BASE, base); else if (seg_reg_idx == INAT_SEG_REG_GS) /* * swapgs was called at the kernel entry point. Thus, * MSR_KERNEL_GS_BASE will have the user-space GS base. */ rdmsrl(MSR_KERNEL_GS_BASE, base); else base = 0; return base; } /* In protected mode the segment selector cannot be null. */ if (!sel) return -1L; desc = get_desc(sel); if (!desc) return -1L; return get_desc_base(desc); } /** * get_seg_limit() - Obtain the limit of a segment descriptor * @regs: Register values as seen when entering kernel mode * @seg_reg_idx: Index of the segment register pointing to seg descriptor * * Obtain the limit of the segment as indicated by the segment descriptor * pointed by the segment selector. The segment selector is obtained from the * input segment register index @seg_reg_idx. * * Returns: * * In protected mode, the limit of the segment descriptor in bytes. * In long mode and virtual-8086 mode, segment limits are not enforced. Thus, * limit is returned as -1L to imply a limit-less segment. * * Zero is returned on error. */ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx) { struct desc_struct *desc; unsigned long limit; short sel; sel = get_segment_selector(regs, seg_reg_idx); if (sel < 0) return 0; if (user_64bit_mode(regs) || v8086_mode(regs)) return -1L; if (!sel) return 0; desc = get_desc(sel); if (!desc) return 0; /* * If the granularity bit is set, the limit is given in multiples * of 4096. This also means that the 12 least significant bits are * not tested when checking the segment limits. In practice, * this means that the segment ends in (limit << 12) + 0xfff. */ limit = get_desc_limit(desc); if (desc->g) limit = (limit << 12) + 0xfff; return limit; } /** * insn_get_code_seg_params() - Obtain code segment parameters * @regs: Structure with register values as seen when entering kernel mode * * Obtain address and operand sizes of the code segment. It is obtained from the * selector contained in the CS register in regs. In protected mode, the default * address is determined by inspecting the L and D bits of the segment * descriptor. In virtual-8086 mode, the default is always two bytes for both * address and operand sizes. * * Returns: * * An int containing ORed-in default parameters on success. * * -EINVAL on error. */ int insn_get_code_seg_params(struct pt_regs *regs) { struct desc_struct *desc; short sel; if (v8086_mode(regs)) /* Address and operand size are both 16-bit. */ return INSN_CODE_SEG_PARAMS(2, 2); sel = get_segment_selector(regs, INAT_SEG_REG_CS); if (sel < 0) return sel; desc = get_desc(sel); if (!desc) return -EINVAL; /* * The most significant byte of the Type field of the segment descriptor * determines whether a segment contains data or code. If this is a data * segment, return error. */ if (!(desc->type & BIT(3))) return -EINVAL; switch ((desc->l << 1) | desc->d) { case 0: /* * Legacy mode. CS.L=0, CS.D=0. Address and operand size are * both 16-bit. */ return INSN_CODE_SEG_PARAMS(2, 2); case 1: /* * Legacy mode. CS.L=0, CS.D=1. Address and operand size are * both 32-bit. */ return INSN_CODE_SEG_PARAMS(4, 4); case 2: /* * IA-32e 64-bit mode. CS.L=1, CS.D=0. Address size is 64-bit; * operand size is 32-bit. */ return INSN_CODE_SEG_PARAMS(4, 8); case 3: /* Invalid setting. CS.L=1, CS.D=1 */ /* fall through */ default: return -EINVAL; } } /** * insn_get_modrm_rm_off() - Obtain register in r/m part of the ModRM byte * @insn: Instruction containing the ModRM byte * @regs: Register values as seen when entering kernel mode * * Returns: * * The register indicated by the r/m part of the ModRM byte. The * register is obtained as an offset from the base of pt_regs. In specific * cases, the returned value can be -EDOM to indicate that the particular value * of ModRM does not refer to a register and shall be ignored. */ int insn_get_modrm_rm_off(struct insn *insn, struct pt_regs *regs) { return get_reg_offset(insn, regs, REG_TYPE_RM); } /** * get_seg_base_limit() - obtain base address and limit of a segment * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Operand offset, in pt_regs, used to resolve segment descriptor * @base: Obtained segment base * @limit: Obtained segment limit * * Obtain the base address and limit of the segment associated with the operand * @regoff and, if any or allowed, override prefixes in @insn. This function is * different from insn_get_seg_base() as the latter does not resolve the segment * associated with the instruction operand. If a limit is not needed (e.g., * when running in long mode), @limit can be NULL. * * Returns: * * 0 on success. @base and @limit will contain the base address and of the * resolved segment, respectively. * * -EINVAL on error. */ static int get_seg_base_limit(struct insn *insn, struct pt_regs *regs, int regoff, unsigned long *base, unsigned long *limit) { int seg_reg_idx; if (!base) return -EINVAL; seg_reg_idx = resolve_seg_reg(insn, regs, regoff); if (seg_reg_idx < 0) return seg_reg_idx; *base = insn_get_seg_base(regs, seg_reg_idx); if (*base == -1L) return -EINVAL; if (!limit) return 0; *limit = get_seg_limit(regs, seg_reg_idx); if (!(*limit)) return -EINVAL; return 0; } /** * get_eff_addr_reg() - Obtain effective address from register operand * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Obtained operand offset, in pt_regs, with the effective address * @eff_addr: Obtained effective address * * Obtain the effective address stored in the register operand as indicated by * the ModRM byte. This function is to be used only with register addressing * (i.e., ModRM.mod is 3). The effective address is saved in @eff_addr. The * register operand, as an offset from the base of pt_regs, is saved in @regoff; * such offset can then be used to resolve the segment associated with the * operand. This function can be used with any of the supported address sizes * in x86. * * Returns: * * 0 on success. @eff_addr will have the effective address stored in the * operand indicated by ModRM. @regoff will have such operand as an offset from * the base of pt_regs. * * -EINVAL on error. */ static int get_eff_addr_reg(struct insn *insn, struct pt_regs *regs, int *regoff, long *eff_addr) { insn_get_modrm(insn); if (!insn->modrm.nbytes) return -EINVAL; if (X86_MODRM_MOD(insn->modrm.value) != 3) return -EINVAL; *regoff = get_reg_offset(insn, regs, REG_TYPE_RM); if (*regoff < 0) return -EINVAL; /* Ignore bytes that are outside the address size. */ if (insn->addr_bytes == 2) *eff_addr = regs_get_register(regs, *regoff) & 0xffff; else if (insn->addr_bytes == 4) *eff_addr = regs_get_register(regs, *regoff) & 0xffffffff; else /* 64-bit address */ *eff_addr = regs_get_register(regs, *regoff); return 0; } /** * get_eff_addr_modrm() - Obtain referenced effective address via ModRM * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Obtained operand offset, in pt_regs, associated with segment * @eff_addr: Obtained effective address * * Obtain the effective address referenced by the ModRM byte of @insn. After * identifying the registers involved in the register-indirect memory reference, * its value is obtained from the operands in @regs. The computed address is * stored @eff_addr. Also, the register operand that indicates the associated * segment is stored in @regoff, this parameter can later be used to determine * such segment. * * Returns: * * 0 on success. @eff_addr will have the referenced effective address. @regoff * will have a register, as an offset from the base of pt_regs, that can be used * to resolve the associated segment. * * -EINVAL on error. */ static int get_eff_addr_modrm(struct insn *insn, struct pt_regs *regs, int *regoff, long *eff_addr) { long tmp; if (insn->addr_bytes != 8 && insn->addr_bytes != 4) return -EINVAL; insn_get_modrm(insn); if (!insn->modrm.nbytes) return -EINVAL; if (X86_MODRM_MOD(insn->modrm.value) > 2) return -EINVAL; *regoff = get_reg_offset(insn, regs, REG_TYPE_RM); /* * -EDOM means that we must ignore the address_offset. In such a case, * in 64-bit mode the effective address relative to the rIP of the * following instruction. */ if (*regoff == -EDOM) { if (user_64bit_mode(regs)) tmp = regs->ip + insn->length; else tmp = 0; } else if (*regoff < 0) { return -EINVAL; } else { tmp = regs_get_register(regs, *regoff); } if (insn->addr_bytes == 4) { int addr32 = (int)(tmp & 0xffffffff) + insn->displacement.value; *eff_addr = addr32 & 0xffffffff; } else { *eff_addr = tmp + insn->displacement.value; } return 0; } /** * get_eff_addr_modrm_16() - Obtain referenced effective address via ModRM * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Obtained operand offset, in pt_regs, associated with segment * @eff_addr: Obtained effective address * * Obtain the 16-bit effective address referenced by the ModRM byte of @insn. * After identifying the registers involved in the register-indirect memory * reference, its value is obtained from the operands in @regs. The computed * address is stored @eff_addr. Also, the register operand that indicates * the associated segment is stored in @regoff, this parameter can later be used * to determine such segment. * * Returns: * * 0 on success. @eff_addr will have the referenced effective address. @regoff * will have a register, as an offset from the base of pt_regs, that can be used * to resolve the associated segment. * * -EINVAL on error. */ static int get_eff_addr_modrm_16(struct insn *insn, struct pt_regs *regs, int *regoff, short *eff_addr) { int addr_offset1, addr_offset2, ret; short addr1 = 0, addr2 = 0, displacement; if (insn->addr_bytes != 2) return -EINVAL; insn_get_modrm(insn); if (!insn->modrm.nbytes) return -EINVAL; if (X86_MODRM_MOD(insn->modrm.value) > 2) return -EINVAL; ret = get_reg_offset_16(insn, regs, &addr_offset1, &addr_offset2); if (ret < 0) return -EINVAL; /* * Don't fail on invalid offset values. They might be invalid because * they cannot be used for this particular value of ModRM. Instead, use * them in the computation only if they contain a valid value. */ if (addr_offset1 != -EDOM) addr1 = regs_get_register(regs, addr_offset1) & 0xffff; if (addr_offset2 != -EDOM) addr2 = regs_get_register(regs, addr_offset2) & 0xffff; displacement = insn->displacement.value & 0xffff; *eff_addr = addr1 + addr2 + displacement; /* * The first operand register could indicate to use of either SS or DS * registers to obtain the segment selector. The second operand * register can only indicate the use of DS. Thus, the first operand * will be used to obtain the segment selector. */ *regoff = addr_offset1; return 0; } /** * get_eff_addr_sib() - Obtain referenced effective address via SIB * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Obtained operand offset, in pt_regs, associated with segment * @eff_addr: Obtained effective address * * Obtain the effective address referenced by the SIB byte of @insn. After * identifying the registers involved in the indexed, register-indirect memory * reference, its value is obtained from the operands in @regs. The computed * address is stored @eff_addr. Also, the register operand that indicates the * associated segment is stored in @regoff, this parameter can later be used to * determine such segment. * * Returns: * * 0 on success. @eff_addr will have the referenced effective address. * @base_offset will have a register, as an offset from the base of pt_regs, * that can be used to resolve the associated segment. * * -EINVAL on error. */ static int get_eff_addr_sib(struct insn *insn, struct pt_regs *regs, int *base_offset, long *eff_addr) { long base, indx; int indx_offset; if (insn->addr_bytes != 8 && insn->addr_bytes != 4) return -EINVAL; insn_get_modrm(insn); if (!insn->modrm.nbytes) return -EINVAL; if (X86_MODRM_MOD(insn->modrm.value) > 2) return -EINVAL; insn_get_sib(insn); if (!insn->sib.nbytes) return -EINVAL; *base_offset = get_reg_offset(insn, regs, REG_TYPE_BASE); indx_offset = get_reg_offset(insn, regs, REG_TYPE_INDEX); /* * Negative values in the base and index offset means an error when * decoding the SIB byte. Except -EDOM, which means that the registers * should not be used in the address computation. */ if (*base_offset == -EDOM) base = 0; else if (*base_offset < 0) return -EINVAL; else base = regs_get_register(regs, *base_offset); if (indx_offset == -EDOM) indx = 0; else if (indx_offset < 0) return -EINVAL; else indx = regs_get_register(regs, indx_offset); if (insn->addr_bytes == 4) { int addr32, base32, idx32; base32 = base & 0xffffffff; idx32 = indx & 0xffffffff; addr32 = base32 + idx32 * (1 << X86_SIB_SCALE(insn->sib.value)); addr32 += insn->displacement.value; *eff_addr = addr32 & 0xffffffff; } else { *eff_addr = base + indx * (1 << X86_SIB_SCALE(insn->sib.value)); *eff_addr += insn->displacement.value; } return 0; } /** * get_addr_ref_16() - Obtain the 16-bit address referred by instruction * @insn: Instruction containing ModRM byte and displacement * @regs: Register values as seen when entering kernel mode * * This function is to be used with 16-bit address encodings. Obtain the memory * address referred by the instruction's ModRM and displacement bytes. Also, the * segment used as base is determined by either any segment override prefixes in * @insn or the default segment of the registers involved in the address * computation. In protected mode, segment limits are enforced. * * Returns: * * Linear address referenced by the instruction operands on success. * * -1L on error. */ static void __user *get_addr_ref_16(struct insn *insn, struct pt_regs *regs) { unsigned long linear_addr = -1L, seg_base, seg_limit; int ret, regoff; short eff_addr; long tmp; insn_get_modrm(insn); insn_get_displacement(insn); if (insn->addr_bytes != 2) goto out; if (X86_MODRM_MOD(insn->modrm.value) == 3) { ret = get_eff_addr_reg(insn, regs, &regoff, &tmp); if (ret) goto out; eff_addr = tmp; } else { ret = get_eff_addr_modrm_16(insn, regs, &regoff, &eff_addr); if (ret) goto out; } ret = get_seg_base_limit(insn, regs, regoff, &seg_base, &seg_limit); if (ret) goto out; /* * Before computing the linear address, make sure the effective address * is within the limits of the segment. In virtual-8086 mode, segment * limits are not enforced. In such a case, the segment limit is -1L to * reflect this fact. */ if ((unsigned long)(eff_addr & 0xffff) > seg_limit) goto out; linear_addr = (unsigned long)(eff_addr & 0xffff) + seg_base; /* Limit linear address to 20 bits */ if (v8086_mode(regs)) linear_addr &= 0xfffff; out: return (void __user *)linear_addr; } /** * get_addr_ref_32() - Obtain a 32-bit linear address * @insn: Instruction with ModRM, SIB bytes and displacement * @regs: Register values as seen when entering kernel mode * * This function is to be used with 32-bit address encodings to obtain the * linear memory address referred by the instruction's ModRM, SIB, * displacement bytes and segment base address, as applicable. If in protected * mode, segment limits are enforced. * * Returns: * * Linear address referenced by instruction and registers on success. * * -1L on error. */ static void __user *get_addr_ref_32(struct insn *insn, struct pt_regs *regs) { unsigned long linear_addr = -1L, seg_base, seg_limit; int eff_addr, regoff; long tmp; int ret; if (insn->addr_bytes != 4) goto out; if (X86_MODRM_MOD(insn->modrm.value) == 3) { ret = get_eff_addr_reg(insn, regs, &regoff, &tmp); if (ret) goto out; eff_addr = tmp; } else { if (insn->sib.nbytes) { ret = get_eff_addr_sib(insn, regs, &regoff, &tmp); if (ret) goto out; eff_addr = tmp; } else { ret = get_eff_addr_modrm(insn, regs, &regoff, &tmp); if (ret) goto out; eff_addr = tmp; } } ret = get_seg_base_limit(insn, regs, regoff, &seg_base, &seg_limit); if (ret) goto out; /* * In protected mode, before computing the linear address, make sure * the effective address is within the limits of the segment. * 32-bit addresses can be used in long and virtual-8086 modes if an * address override prefix is used. In such cases, segment limits are * not enforced. When in virtual-8086 mode, the segment limit is -1L * to reflect this situation. * * After computed, the effective address is treated as an unsigned * quantity. */ if (!user_64bit_mode(regs) && ((unsigned int)eff_addr > seg_limit)) goto out; /* * Even though 32-bit address encodings are allowed in virtual-8086 * mode, the address range is still limited to [0x-0xffff]. */ if (v8086_mode(regs) && (eff_addr & ~0xffff)) goto out; /* * Data type long could be 64 bits in size. Ensure that our 32-bit * effective address is not sign-extended when computing the linear * address. */ linear_addr = (unsigned long)(eff_addr & 0xffffffff) + seg_base; /* Limit linear address to 20 bits */ if (v8086_mode(regs)) linear_addr &= 0xfffff; out: return (void __user *)linear_addr; } /** * get_addr_ref_64() - Obtain a 64-bit linear address * @insn: Instruction struct with ModRM and SIB bytes and displacement * @regs: Structure with register values as seen when entering kernel mode * * This function is to be used with 64-bit address encodings to obtain the * linear memory address referred by the instruction's ModRM, SIB, * displacement bytes and segment base address, as applicable. * * Returns: * * Linear address referenced by instruction and registers on success. * * -1L on error. */ #ifndef CONFIG_X86_64 static void __user *get_addr_ref_64(struct insn *insn, struct pt_regs *regs) { return (void __user *)-1L; } #else static void __user *get_addr_ref_64(struct insn *insn, struct pt_regs *regs) { unsigned long linear_addr = -1L, seg_base; int regoff, ret; long eff_addr; if (insn->addr_bytes != 8) goto out; if (X86_MODRM_MOD(insn->modrm.value) == 3) { ret = get_eff_addr_reg(insn, regs, &regoff, &eff_addr); if (ret) goto out; } else { if (insn->sib.nbytes) { ret = get_eff_addr_sib(insn, regs, &regoff, &eff_addr); if (ret) goto out; } else { ret = get_eff_addr_modrm(insn, regs, &regoff, &eff_addr); if (ret) goto out; } } ret = get_seg_base_limit(insn, regs, regoff, &seg_base, NULL); if (ret) goto out; linear_addr = (unsigned long)eff_addr + seg_base; out: return (void __user *)linear_addr; } #endif /* CONFIG_X86_64 */ /** * insn_get_addr_ref() - Obtain the linear address referred by instruction * @insn: Instruction structure containing ModRM byte and displacement * @regs: Structure with register values as seen when entering kernel mode * * Obtain the linear address referred by the instruction's ModRM, SIB and * displacement bytes, and segment base, as applicable. In protected mode, * segment limits are enforced. * * Returns: * * Linear address referenced by instruction and registers on success. * * -1L on error. */ void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs) { if (!insn || !regs) return (void __user *)-1L; switch (insn->addr_bytes) { case 2: return get_addr_ref_16(insn, regs); case 4: return get_addr_ref_32(insn, regs); case 8: return get_addr_ref_64(insn, regs); default: return (void __user *)-1L; } }
/* * Utility functions for x86 operand and address decoding * * Copyright (C) Intel Corporation 2017 */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/ratelimit.h> #include <linux/mmu_context.h> #include <asm/desc_defs.h> #include <asm/desc.h> #include <asm/inat.h> #include <asm/insn.h> #include <asm/insn-eval.h> #include <asm/ldt.h> #include <asm/vm86.h> #undef pr_fmt #define pr_fmt(fmt) "insn: " fmt enum reg_type { REG_TYPE_RM = 0, REG_TYPE_INDEX, REG_TYPE_BASE, }; /** * is_string_insn() - Determine if instruction is a string instruction * @insn: Instruction containing the opcode to inspect * * Returns: * * true if the instruction, determined by the opcode, is any of the * string instructions as defined in the Intel Software Development manual. * False otherwise. */ static bool is_string_insn(struct insn *insn) { insn_get_opcode(insn); /* All string instructions have a 1-byte opcode. */ if (insn->opcode.nbytes != 1) return false; switch (insn->opcode.bytes[0]) { case 0x6c ... 0x6f: /* INS, OUTS */ case 0xa4 ... 0xa7: /* MOVS, CMPS */ case 0xaa ... 0xaf: /* STOS, LODS, SCAS */ return true; default: return false; } } /** * get_seg_reg_override_idx() - obtain segment register override index * @insn: Valid instruction with segment override prefixes * * Inspect the instruction prefixes in @insn and find segment overrides, if any. * * Returns: * * A constant identifying the segment register to use, among CS, SS, DS, * ES, FS, or GS. INAT_SEG_REG_DEFAULT is returned if no segment override * prefixes were found. * * -EINVAL in case of error. */ static int get_seg_reg_override_idx(struct insn *insn) { int idx = INAT_SEG_REG_DEFAULT; int num_overrides = 0, i; insn_get_prefixes(insn); /* Look for any segment override prefixes. */ for (i = 0; i < insn->prefixes.nbytes; i++) { insn_attr_t attr; attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]); switch (attr) { case INAT_MAKE_PREFIX(INAT_PFX_CS): idx = INAT_SEG_REG_CS; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_SS): idx = INAT_SEG_REG_SS; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_DS): idx = INAT_SEG_REG_DS; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_ES): idx = INAT_SEG_REG_ES; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_FS): idx = INAT_SEG_REG_FS; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_GS): idx = INAT_SEG_REG_GS; num_overrides++; break; /* No default action needed. */ } } /* More than one segment override prefix leads to undefined behavior. */ if (num_overrides > 1) return -EINVAL; return idx; } /** * check_seg_overrides() - check if segment override prefixes are allowed * @insn: Valid instruction with segment override prefixes * @regoff: Operand offset, in pt_regs, for which the check is performed * * For a particular register used in register-indirect addressing, determine if * segment override prefixes can be used. Specifically, no overrides are allowed * for rDI if used with a string instruction. * * Returns: * * True if segment override prefixes can be used with the register indicated * in @regoff. False if otherwise. */ static bool check_seg_overrides(struct insn *insn, int regoff) { if (regoff == offsetof(struct pt_regs, di) && is_string_insn(insn)) return false; return true; } /** * resolve_default_seg() - resolve default segment register index for an operand * @insn: Instruction with opcode and address size. Must be valid. * @regs: Register values as seen when entering kernel mode * @off: Operand offset, in pt_regs, for which resolution is needed * * Resolve the default segment register index associated with the instruction * operand register indicated by @off. Such index is resolved based on defaults * described in the Intel Software Development Manual. * * Returns: * * If in protected mode, a constant identifying the segment register to use, * among CS, SS, ES or DS. If in long mode, INAT_SEG_REG_IGNORE. * * -EINVAL in case of error. */ static int resolve_default_seg(struct insn *insn, struct pt_regs *regs, int off) { if (user_64bit_mode(regs)) return INAT_SEG_REG_IGNORE; /* * Resolve the default segment register as described in Section 3.7.4 * of the Intel Software Development Manual Vol. 1: * * + DS for all references involving r[ABCD]X, and rSI. * + If used in a string instruction, ES for rDI. Otherwise, DS. * + AX, CX and DX are not valid register operands in 16-bit address * encodings but are valid for 32-bit and 64-bit encodings. * + -EDOM is reserved to identify for cases in which no register * is used (i.e., displacement-only addressing). Use DS. * + SS for rSP or rBP. * + CS for rIP. */ switch (off) { case offsetof(struct pt_regs, ax): case offsetof(struct pt_regs, cx): case offsetof(struct pt_regs, dx): /* Need insn to verify address size. */ if (insn->addr_bytes == 2) return -EINVAL; /* fall through */ case -EDOM: case offsetof(struct pt_regs, bx): case offsetof(struct pt_regs, si): return INAT_SEG_REG_DS; case offsetof(struct pt_regs, di): if (is_string_insn(insn)) return INAT_SEG_REG_ES; return INAT_SEG_REG_DS; case offsetof(struct pt_regs, bp): case offsetof(struct pt_regs, sp): return INAT_SEG_REG_SS; case offsetof(struct pt_regs, ip): return INAT_SEG_REG_CS; default: return -EINVAL; } } /** * resolve_seg_reg() - obtain segment register index * @insn: Instruction with operands * @regs: Register values as seen when entering kernel mode * @regoff: Operand offset, in pt_regs, used to deterimine segment register * * Determine the segment register associated with the operands and, if * applicable, prefixes and the instruction pointed by @insn. * * The segment register associated to an operand used in register-indirect * addressing depends on: * * a) Whether running in long mode (in such a case segments are ignored, except * if FS or GS are used). * * b) Whether segment override prefixes can be used. Certain instructions and * registers do not allow override prefixes. * * c) Whether segment overrides prefixes are found in the instruction prefixes. * * d) If there are not segment override prefixes or they cannot be used, the * default segment register associated with the operand register is used. * * The function checks first if segment override prefixes can be used with the * operand indicated by @regoff. If allowed, obtain such overridden segment * register index. Lastly, if not prefixes were found or cannot be used, resolve * the segment register index to use based on the defaults described in the * Intel documentation. In long mode, all segment register indexes will be * ignored, except if overrides were found for FS or GS. All these operations * are done using helper functions. * * The operand register, @regoff, is represented as the offset from the base of * pt_regs. * * As stated, the main use of this function is to determine the segment register * index based on the instruction, its operands and prefixes. Hence, @insn * must be valid. However, if @regoff indicates rIP, we don't need to inspect * @insn at all as in this case CS is used in all cases. This case is checked * before proceeding further. * * Please note that this function does not return the value in the segment * register (i.e., the segment selector) but our defined index. The segment * selector needs to be obtained using get_segment_selector() and passing the * segment register index resolved by this function. * * Returns: * * An index identifying the segment register to use, among CS, SS, DS, * ES, FS, or GS. INAT_SEG_REG_IGNORE is returned if running in long mode. * * -EINVAL in case of error. */ static int resolve_seg_reg(struct insn *insn, struct pt_regs *regs, int regoff) { int idx; /* * In the unlikely event of having to resolve the segment register * index for rIP, do it first. Segment override prefixes should not * be used. Hence, it is not necessary to inspect the instruction, * which may be invalid at this point. */ if (regoff == offsetof(struct pt_regs, ip)) { if (user_64bit_mode(regs)) return INAT_SEG_REG_IGNORE; else return INAT_SEG_REG_CS; } if (!insn) return -EINVAL; if (!check_seg_overrides(insn, regoff)) return resolve_default_seg(insn, regs, regoff); idx = get_seg_reg_override_idx(insn); if (idx < 0) return idx; if (idx == INAT_SEG_REG_DEFAULT) return resolve_default_seg(insn, regs, regoff); /* * In long mode, segment override prefixes are ignored, except for * overrides for FS and GS. */ if (user_64bit_mode(regs)) { if (idx != INAT_SEG_REG_FS && idx != INAT_SEG_REG_GS) idx = INAT_SEG_REG_IGNORE; } return idx; } /** * get_segment_selector() - obtain segment selector * @regs: Register values as seen when entering kernel mode * @seg_reg_idx: Segment register index to use * * Obtain the segment selector from any of the CS, SS, DS, ES, FS, GS segment * registers. In CONFIG_X86_32, the segment is obtained from either pt_regs or * kernel_vm86_regs as applicable. In CONFIG_X86_64, CS and SS are obtained * from pt_regs. DS, ES, FS and GS are obtained by reading the actual CPU * registers. This done for only for completeness as in CONFIG_X86_64 segment * registers are ignored. * * Returns: * * Value of the segment selector, including null when running in * long mode. * * -EINVAL on error. */ static short get_segment_selector(struct pt_regs *regs, int seg_reg_idx) { #ifdef CONFIG_X86_64 unsigned short sel; switch (seg_reg_idx) { case INAT_SEG_REG_IGNORE: return 0; case INAT_SEG_REG_CS: return (unsigned short)(regs->cs & 0xffff); case INAT_SEG_REG_SS: return (unsigned short)(regs->ss & 0xffff); case INAT_SEG_REG_DS: savesegment(ds, sel); return sel; case INAT_SEG_REG_ES: savesegment(es, sel); return sel; case INAT_SEG_REG_FS: savesegment(fs, sel); return sel; case INAT_SEG_REG_GS: savesegment(gs, sel); return sel; default: return -EINVAL; } #else /* CONFIG_X86_32 */ struct kernel_vm86_regs *vm86regs = (struct kernel_vm86_regs *)regs; if (v8086_mode(regs)) { switch (seg_reg_idx) { case INAT_SEG_REG_CS: return (unsigned short)(regs->cs & 0xffff); case INAT_SEG_REG_SS: return (unsigned short)(regs->ss & 0xffff); case INAT_SEG_REG_DS: return vm86regs->ds; case INAT_SEG_REG_ES: return vm86regs->es; case INAT_SEG_REG_FS: return vm86regs->fs; case INAT_SEG_REG_GS: return vm86regs->gs; case INAT_SEG_REG_IGNORE: /* fall through */ default: return -EINVAL; } } switch (seg_reg_idx) { case INAT_SEG_REG_CS: return (unsigned short)(regs->cs & 0xffff); case INAT_SEG_REG_SS: return (unsigned short)(regs->ss & 0xffff); case INAT_SEG_REG_DS: return (unsigned short)(regs->ds & 0xffff); case INAT_SEG_REG_ES: return (unsigned short)(regs->es & 0xffff); case INAT_SEG_REG_FS: return (unsigned short)(regs->fs & 0xffff); case INAT_SEG_REG_GS: /* * GS may or may not be in regs as per CONFIG_X86_32_LAZY_GS. * The macro below takes care of both cases. */ return get_user_gs(regs); case INAT_SEG_REG_IGNORE: /* fall through */ default: return -EINVAL; } #endif /* CONFIG_X86_64 */ } static int get_reg_offset(struct insn *insn, struct pt_regs *regs, enum reg_type type) { int regno = 0; static const int regoff[] = { offsetof(struct pt_regs, ax), offsetof(struct pt_regs, cx), offsetof(struct pt_regs, dx), offsetof(struct pt_regs, bx), offsetof(struct pt_regs, sp), offsetof(struct pt_regs, bp), offsetof(struct pt_regs, si), offsetof(struct pt_regs, di), #ifdef CONFIG_X86_64 offsetof(struct pt_regs, r8), offsetof(struct pt_regs, r9), offsetof(struct pt_regs, r10), offsetof(struct pt_regs, r11), offsetof(struct pt_regs, r12), offsetof(struct pt_regs, r13), offsetof(struct pt_regs, r14), offsetof(struct pt_regs, r15), #endif }; int nr_registers = ARRAY_SIZE(regoff); /* * Don't possibly decode a 32-bit instructions as * reading a 64-bit-only register. */ if (IS_ENABLED(CONFIG_X86_64) && !insn->x86_64) nr_registers -= 8; switch (type) { case REG_TYPE_RM: regno = X86_MODRM_RM(insn->modrm.value); /* * ModRM.mod == 0 and ModRM.rm == 5 means a 32-bit displacement * follows the ModRM byte. */ if (!X86_MODRM_MOD(insn->modrm.value) && regno == 5) return -EDOM; if (X86_REX_B(insn->rex_prefix.value)) regno += 8; break; case REG_TYPE_INDEX: regno = X86_SIB_INDEX(insn->sib.value); if (X86_REX_X(insn->rex_prefix.value)) regno += 8; /* * If ModRM.mod != 3 and SIB.index = 4 the scale*index * portion of the address computation is null. This is * true only if REX.X is 0. In such a case, the SIB index * is used in the address computation. */ if (X86_MODRM_MOD(insn->modrm.value) != 3 && regno == 4) return -EDOM; break; case REG_TYPE_BASE: regno = X86_SIB_BASE(insn->sib.value); /* * If ModRM.mod is 0 and SIB.base == 5, the base of the * register-indirect addressing is 0. In this case, a * 32-bit displacement follows the SIB byte. */ if (!X86_MODRM_MOD(insn->modrm.value) && regno == 5) return -EDOM; if (X86_REX_B(insn->rex_prefix.value)) regno += 8; break; default: pr_err_ratelimited("invalid register type: %d\n", type); return -EINVAL; } if (regno >= nr_registers) { WARN_ONCE(1, "decoded an instruction with an invalid register"); return -EINVAL; } return regoff[regno]; } /** * get_reg_offset_16() - Obtain offset of register indicated by instruction * @insn: Instruction containing ModRM byte * @regs: Register values as seen when entering kernel mode * @offs1: Offset of the first operand register * @offs2: Offset of the second opeand register, if applicable * * Obtain the offset, in pt_regs, of the registers indicated by the ModRM byte * in @insn. This function is to be used with 16-bit address encodings. The * @offs1 and @offs2 will be written with the offset of the two registers * indicated by the instruction. In cases where any of the registers is not * referenced by the instruction, the value will be set to -EDOM. * * Returns: * * 0 on success, -EINVAL on error. */ static int get_reg_offset_16(struct insn *insn, struct pt_regs *regs, int *offs1, int *offs2) { /* * 16-bit addressing can use one or two registers. Specifics of * encodings are given in Table 2-1. "16-Bit Addressing Forms with the * ModR/M Byte" of the Intel Software Development Manual. */ static const int regoff1[] = { offsetof(struct pt_regs, bx), offsetof(struct pt_regs, bx), offsetof(struct pt_regs, bp), offsetof(struct pt_regs, bp), offsetof(struct pt_regs, si), offsetof(struct pt_regs, di), offsetof(struct pt_regs, bp), offsetof(struct pt_regs, bx), }; static const int regoff2[] = { offsetof(struct pt_regs, si), offsetof(struct pt_regs, di), offsetof(struct pt_regs, si), offsetof(struct pt_regs, di), -EDOM, -EDOM, -EDOM, -EDOM, }; if (!offs1 || !offs2) return -EINVAL; /* Operand is a register, use the generic function. */ if (X86_MODRM_MOD(insn->modrm.value) == 3) { *offs1 = insn_get_modrm_rm_off(insn, regs); *offs2 = -EDOM; return 0; } *offs1 = regoff1[X86_MODRM_RM(insn->modrm.value)]; *offs2 = regoff2[X86_MODRM_RM(insn->modrm.value)]; /* * If ModRM.mod is 0 and ModRM.rm is 110b, then we use displacement- * only addressing. This means that no registers are involved in * computing the effective address. Thus, ensure that the first * register offset is invalild. The second register offset is already * invalid under the aforementioned conditions. */ if ((X86_MODRM_MOD(insn->modrm.value) == 0) && (X86_MODRM_RM(insn->modrm.value) == 6)) *offs1 = -EDOM; return 0; } /** * get_desc() - Obtain contents of a segment descriptor * @out: Segment descriptor contents on success * @sel: Segment selector * * Given a segment selector, obtain a pointer to the segment descriptor. * Both global and local descriptor tables are supported. * * Returns: * * True on success, false on failure. * * NULL on error. */ static bool get_desc(struct desc_struct *out, unsigned short sel) { struct desc_ptr gdt_desc = {0, 0}; unsigned long desc_base; #ifdef CONFIG_MODIFY_LDT_SYSCALL if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) { bool success = false; struct ldt_struct *ldt; /* Bits [15:3] contain the index of the desired entry. */ sel >>= 3; mutex_lock(&current->active_mm->context.lock); ldt = current->active_mm->context.ldt; if (ldt && sel < ldt->nr_entries) { *out = ldt->entries[sel]; success = true; } mutex_unlock(&current->active_mm->context.lock); return success; } #endif native_store_gdt(&gdt_desc); /* * Segment descriptors have a size of 8 bytes. Thus, the index is * multiplied by 8 to obtain the memory offset of the desired descriptor * from the base of the GDT. As bits [15:3] of the segment selector * contain the index, it can be regarded as multiplied by 8 already. * All that remains is to clear bits [2:0]. */ desc_base = sel & ~(SEGMENT_RPL_MASK | SEGMENT_TI_MASK); if (desc_base > gdt_desc.size) return false; *out = *(struct desc_struct *)(gdt_desc.address + desc_base); return true; } /** * insn_get_seg_base() - Obtain base address of segment descriptor. * @regs: Register values as seen when entering kernel mode * @seg_reg_idx: Index of the segment register pointing to seg descriptor * * Obtain the base address of the segment as indicated by the segment descriptor * pointed by the segment selector. The segment selector is obtained from the * input segment register index @seg_reg_idx. * * Returns: * * In protected mode, base address of the segment. Zero in long mode, * except when FS or GS are used. In virtual-8086 mode, the segment * selector shifted 4 bits to the right. * * -1L in case of error. */ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx) { struct desc_struct desc; short sel; sel = get_segment_selector(regs, seg_reg_idx); if (sel < 0) return -1L; if (v8086_mode(regs)) /* * Base is simply the segment selector shifted 4 * bits to the right. */ return (unsigned long)(sel << 4); if (user_64bit_mode(regs)) { /* * Only FS or GS will have a base address, the rest of * the segments' bases are forced to 0. */ unsigned long base; if (seg_reg_idx == INAT_SEG_REG_FS) rdmsrl(MSR_FS_BASE, base); else if (seg_reg_idx == INAT_SEG_REG_GS) /* * swapgs was called at the kernel entry point. Thus, * MSR_KERNEL_GS_BASE will have the user-space GS base. */ rdmsrl(MSR_KERNEL_GS_BASE, base); else base = 0; return base; } /* In protected mode the segment selector cannot be null. */ if (!sel) return -1L; if (!get_desc(&desc, sel)) return -1L; return get_desc_base(&desc); } /** * get_seg_limit() - Obtain the limit of a segment descriptor * @regs: Register values as seen when entering kernel mode * @seg_reg_idx: Index of the segment register pointing to seg descriptor * * Obtain the limit of the segment as indicated by the segment descriptor * pointed by the segment selector. The segment selector is obtained from the * input segment register index @seg_reg_idx. * * Returns: * * In protected mode, the limit of the segment descriptor in bytes. * In long mode and virtual-8086 mode, segment limits are not enforced. Thus, * limit is returned as -1L to imply a limit-less segment. * * Zero is returned on error. */ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx) { struct desc_struct desc; unsigned long limit; short sel; sel = get_segment_selector(regs, seg_reg_idx); if (sel < 0) return 0; if (user_64bit_mode(regs) || v8086_mode(regs)) return -1L; if (!sel) return 0; if (!get_desc(&desc, sel)) return 0; /* * If the granularity bit is set, the limit is given in multiples * of 4096. This also means that the 12 least significant bits are * not tested when checking the segment limits. In practice, * this means that the segment ends in (limit << 12) + 0xfff. */ limit = get_desc_limit(&desc); if (desc.g) limit = (limit << 12) + 0xfff; return limit; } /** * insn_get_code_seg_params() - Obtain code segment parameters * @regs: Structure with register values as seen when entering kernel mode * * Obtain address and operand sizes of the code segment. It is obtained from the * selector contained in the CS register in regs. In protected mode, the default * address is determined by inspecting the L and D bits of the segment * descriptor. In virtual-8086 mode, the default is always two bytes for both * address and operand sizes. * * Returns: * * An int containing ORed-in default parameters on success. * * -EINVAL on error. */ int insn_get_code_seg_params(struct pt_regs *regs) { struct desc_struct desc; short sel; if (v8086_mode(regs)) /* Address and operand size are both 16-bit. */ return INSN_CODE_SEG_PARAMS(2, 2); sel = get_segment_selector(regs, INAT_SEG_REG_CS); if (sel < 0) return sel; if (!get_desc(&desc, sel)) return -EINVAL; /* * The most significant byte of the Type field of the segment descriptor * determines whether a segment contains data or code. If this is a data * segment, return error. */ if (!(desc.type & BIT(3))) return -EINVAL; switch ((desc.l << 1) | desc.d) { case 0: /* * Legacy mode. CS.L=0, CS.D=0. Address and operand size are * both 16-bit. */ return INSN_CODE_SEG_PARAMS(2, 2); case 1: /* * Legacy mode. CS.L=0, CS.D=1. Address and operand size are * both 32-bit. */ return INSN_CODE_SEG_PARAMS(4, 4); case 2: /* * IA-32e 64-bit mode. CS.L=1, CS.D=0. Address size is 64-bit; * operand size is 32-bit. */ return INSN_CODE_SEG_PARAMS(4, 8); case 3: /* Invalid setting. CS.L=1, CS.D=1 */ /* fall through */ default: return -EINVAL; } } /** * insn_get_modrm_rm_off() - Obtain register in r/m part of the ModRM byte * @insn: Instruction containing the ModRM byte * @regs: Register values as seen when entering kernel mode * * Returns: * * The register indicated by the r/m part of the ModRM byte. The * register is obtained as an offset from the base of pt_regs. In specific * cases, the returned value can be -EDOM to indicate that the particular value * of ModRM does not refer to a register and shall be ignored. */ int insn_get_modrm_rm_off(struct insn *insn, struct pt_regs *regs) { return get_reg_offset(insn, regs, REG_TYPE_RM); } /** * get_seg_base_limit() - obtain base address and limit of a segment * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Operand offset, in pt_regs, used to resolve segment descriptor * @base: Obtained segment base * @limit: Obtained segment limit * * Obtain the base address and limit of the segment associated with the operand * @regoff and, if any or allowed, override prefixes in @insn. This function is * different from insn_get_seg_base() as the latter does not resolve the segment * associated with the instruction operand. If a limit is not needed (e.g., * when running in long mode), @limit can be NULL. * * Returns: * * 0 on success. @base and @limit will contain the base address and of the * resolved segment, respectively. * * -EINVAL on error. */ static int get_seg_base_limit(struct insn *insn, struct pt_regs *regs, int regoff, unsigned long *base, unsigned long *limit) { int seg_reg_idx; if (!base) return -EINVAL; seg_reg_idx = resolve_seg_reg(insn, regs, regoff); if (seg_reg_idx < 0) return seg_reg_idx; *base = insn_get_seg_base(regs, seg_reg_idx); if (*base == -1L) return -EINVAL; if (!limit) return 0; *limit = get_seg_limit(regs, seg_reg_idx); if (!(*limit)) return -EINVAL; return 0; } /** * get_eff_addr_reg() - Obtain effective address from register operand * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Obtained operand offset, in pt_regs, with the effective address * @eff_addr: Obtained effective address * * Obtain the effective address stored in the register operand as indicated by * the ModRM byte. This function is to be used only with register addressing * (i.e., ModRM.mod is 3). The effective address is saved in @eff_addr. The * register operand, as an offset from the base of pt_regs, is saved in @regoff; * such offset can then be used to resolve the segment associated with the * operand. This function can be used with any of the supported address sizes * in x86. * * Returns: * * 0 on success. @eff_addr will have the effective address stored in the * operand indicated by ModRM. @regoff will have such operand as an offset from * the base of pt_regs. * * -EINVAL on error. */ static int get_eff_addr_reg(struct insn *insn, struct pt_regs *regs, int *regoff, long *eff_addr) { insn_get_modrm(insn); if (!insn->modrm.nbytes) return -EINVAL; if (X86_MODRM_MOD(insn->modrm.value) != 3) return -EINVAL; *regoff = get_reg_offset(insn, regs, REG_TYPE_RM); if (*regoff < 0) return -EINVAL; /* Ignore bytes that are outside the address size. */ if (insn->addr_bytes == 2) *eff_addr = regs_get_register(regs, *regoff) & 0xffff; else if (insn->addr_bytes == 4) *eff_addr = regs_get_register(regs, *regoff) & 0xffffffff; else /* 64-bit address */ *eff_addr = regs_get_register(regs, *regoff); return 0; } /** * get_eff_addr_modrm() - Obtain referenced effective address via ModRM * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Obtained operand offset, in pt_regs, associated with segment * @eff_addr: Obtained effective address * * Obtain the effective address referenced by the ModRM byte of @insn. After * identifying the registers involved in the register-indirect memory reference, * its value is obtained from the operands in @regs. The computed address is * stored @eff_addr. Also, the register operand that indicates the associated * segment is stored in @regoff, this parameter can later be used to determine * such segment. * * Returns: * * 0 on success. @eff_addr will have the referenced effective address. @regoff * will have a register, as an offset from the base of pt_regs, that can be used * to resolve the associated segment. * * -EINVAL on error. */ static int get_eff_addr_modrm(struct insn *insn, struct pt_regs *regs, int *regoff, long *eff_addr) { long tmp; if (insn->addr_bytes != 8 && insn->addr_bytes != 4) return -EINVAL; insn_get_modrm(insn); if (!insn->modrm.nbytes) return -EINVAL; if (X86_MODRM_MOD(insn->modrm.value) > 2) return -EINVAL; *regoff = get_reg_offset(insn, regs, REG_TYPE_RM); /* * -EDOM means that we must ignore the address_offset. In such a case, * in 64-bit mode the effective address relative to the rIP of the * following instruction. */ if (*regoff == -EDOM) { if (user_64bit_mode(regs)) tmp = regs->ip + insn->length; else tmp = 0; } else if (*regoff < 0) { return -EINVAL; } else { tmp = regs_get_register(regs, *regoff); } if (insn->addr_bytes == 4) { int addr32 = (int)(tmp & 0xffffffff) + insn->displacement.value; *eff_addr = addr32 & 0xffffffff; } else { *eff_addr = tmp + insn->displacement.value; } return 0; } /** * get_eff_addr_modrm_16() - Obtain referenced effective address via ModRM * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Obtained operand offset, in pt_regs, associated with segment * @eff_addr: Obtained effective address * * Obtain the 16-bit effective address referenced by the ModRM byte of @insn. * After identifying the registers involved in the register-indirect memory * reference, its value is obtained from the operands in @regs. The computed * address is stored @eff_addr. Also, the register operand that indicates * the associated segment is stored in @regoff, this parameter can later be used * to determine such segment. * * Returns: * * 0 on success. @eff_addr will have the referenced effective address. @regoff * will have a register, as an offset from the base of pt_regs, that can be used * to resolve the associated segment. * * -EINVAL on error. */ static int get_eff_addr_modrm_16(struct insn *insn, struct pt_regs *regs, int *regoff, short *eff_addr) { int addr_offset1, addr_offset2, ret; short addr1 = 0, addr2 = 0, displacement; if (insn->addr_bytes != 2) return -EINVAL; insn_get_modrm(insn); if (!insn->modrm.nbytes) return -EINVAL; if (X86_MODRM_MOD(insn->modrm.value) > 2) return -EINVAL; ret = get_reg_offset_16(insn, regs, &addr_offset1, &addr_offset2); if (ret < 0) return -EINVAL; /* * Don't fail on invalid offset values. They might be invalid because * they cannot be used for this particular value of ModRM. Instead, use * them in the computation only if they contain a valid value. */ if (addr_offset1 != -EDOM) addr1 = regs_get_register(regs, addr_offset1) & 0xffff; if (addr_offset2 != -EDOM) addr2 = regs_get_register(regs, addr_offset2) & 0xffff; displacement = insn->displacement.value & 0xffff; *eff_addr = addr1 + addr2 + displacement; /* * The first operand register could indicate to use of either SS or DS * registers to obtain the segment selector. The second operand * register can only indicate the use of DS. Thus, the first operand * will be used to obtain the segment selector. */ *regoff = addr_offset1; return 0; } /** * get_eff_addr_sib() - Obtain referenced effective address via SIB * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Obtained operand offset, in pt_regs, associated with segment * @eff_addr: Obtained effective address * * Obtain the effective address referenced by the SIB byte of @insn. After * identifying the registers involved in the indexed, register-indirect memory * reference, its value is obtained from the operands in @regs. The computed * address is stored @eff_addr. Also, the register operand that indicates the * associated segment is stored in @regoff, this parameter can later be used to * determine such segment. * * Returns: * * 0 on success. @eff_addr will have the referenced effective address. * @base_offset will have a register, as an offset from the base of pt_regs, * that can be used to resolve the associated segment. * * -EINVAL on error. */ static int get_eff_addr_sib(struct insn *insn, struct pt_regs *regs, int *base_offset, long *eff_addr) { long base, indx; int indx_offset; if (insn->addr_bytes != 8 && insn->addr_bytes != 4) return -EINVAL; insn_get_modrm(insn); if (!insn->modrm.nbytes) return -EINVAL; if (X86_MODRM_MOD(insn->modrm.value) > 2) return -EINVAL; insn_get_sib(insn); if (!insn->sib.nbytes) return -EINVAL; *base_offset = get_reg_offset(insn, regs, REG_TYPE_BASE); indx_offset = get_reg_offset(insn, regs, REG_TYPE_INDEX); /* * Negative values in the base and index offset means an error when * decoding the SIB byte. Except -EDOM, which means that the registers * should not be used in the address computation. */ if (*base_offset == -EDOM) base = 0; else if (*base_offset < 0) return -EINVAL; else base = regs_get_register(regs, *base_offset); if (indx_offset == -EDOM) indx = 0; else if (indx_offset < 0) return -EINVAL; else indx = regs_get_register(regs, indx_offset); if (insn->addr_bytes == 4) { int addr32, base32, idx32; base32 = base & 0xffffffff; idx32 = indx & 0xffffffff; addr32 = base32 + idx32 * (1 << X86_SIB_SCALE(insn->sib.value)); addr32 += insn->displacement.value; *eff_addr = addr32 & 0xffffffff; } else { *eff_addr = base + indx * (1 << X86_SIB_SCALE(insn->sib.value)); *eff_addr += insn->displacement.value; } return 0; } /** * get_addr_ref_16() - Obtain the 16-bit address referred by instruction * @insn: Instruction containing ModRM byte and displacement * @regs: Register values as seen when entering kernel mode * * This function is to be used with 16-bit address encodings. Obtain the memory * address referred by the instruction's ModRM and displacement bytes. Also, the * segment used as base is determined by either any segment override prefixes in * @insn or the default segment of the registers involved in the address * computation. In protected mode, segment limits are enforced. * * Returns: * * Linear address referenced by the instruction operands on success. * * -1L on error. */ static void __user *get_addr_ref_16(struct insn *insn, struct pt_regs *regs) { unsigned long linear_addr = -1L, seg_base, seg_limit; int ret, regoff; short eff_addr; long tmp; insn_get_modrm(insn); insn_get_displacement(insn); if (insn->addr_bytes != 2) goto out; if (X86_MODRM_MOD(insn->modrm.value) == 3) { ret = get_eff_addr_reg(insn, regs, &regoff, &tmp); if (ret) goto out; eff_addr = tmp; } else { ret = get_eff_addr_modrm_16(insn, regs, &regoff, &eff_addr); if (ret) goto out; } ret = get_seg_base_limit(insn, regs, regoff, &seg_base, &seg_limit); if (ret) goto out; /* * Before computing the linear address, make sure the effective address * is within the limits of the segment. In virtual-8086 mode, segment * limits are not enforced. In such a case, the segment limit is -1L to * reflect this fact. */ if ((unsigned long)(eff_addr & 0xffff) > seg_limit) goto out; linear_addr = (unsigned long)(eff_addr & 0xffff) + seg_base; /* Limit linear address to 20 bits */ if (v8086_mode(regs)) linear_addr &= 0xfffff; out: return (void __user *)linear_addr; } /** * get_addr_ref_32() - Obtain a 32-bit linear address * @insn: Instruction with ModRM, SIB bytes and displacement * @regs: Register values as seen when entering kernel mode * * This function is to be used with 32-bit address encodings to obtain the * linear memory address referred by the instruction's ModRM, SIB, * displacement bytes and segment base address, as applicable. If in protected * mode, segment limits are enforced. * * Returns: * * Linear address referenced by instruction and registers on success. * * -1L on error. */ static void __user *get_addr_ref_32(struct insn *insn, struct pt_regs *regs) { unsigned long linear_addr = -1L, seg_base, seg_limit; int eff_addr, regoff; long tmp; int ret; if (insn->addr_bytes != 4) goto out; if (X86_MODRM_MOD(insn->modrm.value) == 3) { ret = get_eff_addr_reg(insn, regs, &regoff, &tmp); if (ret) goto out; eff_addr = tmp; } else { if (insn->sib.nbytes) { ret = get_eff_addr_sib(insn, regs, &regoff, &tmp); if (ret) goto out; eff_addr = tmp; } else { ret = get_eff_addr_modrm(insn, regs, &regoff, &tmp); if (ret) goto out; eff_addr = tmp; } } ret = get_seg_base_limit(insn, regs, regoff, &seg_base, &seg_limit); if (ret) goto out; /* * In protected mode, before computing the linear address, make sure * the effective address is within the limits of the segment. * 32-bit addresses can be used in long and virtual-8086 modes if an * address override prefix is used. In such cases, segment limits are * not enforced. When in virtual-8086 mode, the segment limit is -1L * to reflect this situation. * * After computed, the effective address is treated as an unsigned * quantity. */ if (!user_64bit_mode(regs) && ((unsigned int)eff_addr > seg_limit)) goto out; /* * Even though 32-bit address encodings are allowed in virtual-8086 * mode, the address range is still limited to [0x-0xffff]. */ if (v8086_mode(regs) && (eff_addr & ~0xffff)) goto out; /* * Data type long could be 64 bits in size. Ensure that our 32-bit * effective address is not sign-extended when computing the linear * address. */ linear_addr = (unsigned long)(eff_addr & 0xffffffff) + seg_base; /* Limit linear address to 20 bits */ if (v8086_mode(regs)) linear_addr &= 0xfffff; out: return (void __user *)linear_addr; } /** * get_addr_ref_64() - Obtain a 64-bit linear address * @insn: Instruction struct with ModRM and SIB bytes and displacement * @regs: Structure with register values as seen when entering kernel mode * * This function is to be used with 64-bit address encodings to obtain the * linear memory address referred by the instruction's ModRM, SIB, * displacement bytes and segment base address, as applicable. * * Returns: * * Linear address referenced by instruction and registers on success. * * -1L on error. */ #ifndef CONFIG_X86_64 static void __user *get_addr_ref_64(struct insn *insn, struct pt_regs *regs) { return (void __user *)-1L; } #else static void __user *get_addr_ref_64(struct insn *insn, struct pt_regs *regs) { unsigned long linear_addr = -1L, seg_base; int regoff, ret; long eff_addr; if (insn->addr_bytes != 8) goto out; if (X86_MODRM_MOD(insn->modrm.value) == 3) { ret = get_eff_addr_reg(insn, regs, &regoff, &eff_addr); if (ret) goto out; } else { if (insn->sib.nbytes) { ret = get_eff_addr_sib(insn, regs, &regoff, &eff_addr); if (ret) goto out; } else { ret = get_eff_addr_modrm(insn, regs, &regoff, &eff_addr); if (ret) goto out; } } ret = get_seg_base_limit(insn, regs, regoff, &seg_base, NULL); if (ret) goto out; linear_addr = (unsigned long)eff_addr + seg_base; out: return (void __user *)linear_addr; } #endif /* CONFIG_X86_64 */ /** * insn_get_addr_ref() - Obtain the linear address referred by instruction * @insn: Instruction structure containing ModRM byte and displacement * @regs: Structure with register values as seen when entering kernel mode * * Obtain the linear address referred by the instruction's ModRM, SIB and * displacement bytes, and segment base, as applicable. In protected mode, * segment limits are enforced. * * Returns: * * Linear address referenced by instruction and registers on success. * * -1L on error. */ void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs) { if (!insn || !regs) return (void __user *)-1L; switch (insn->addr_bytes) { case 2: return get_addr_ref_16(insn, regs); case 4: return get_addr_ref_32(insn, regs); case 8: return get_addr_ref_64(insn, regs); default: return (void __user *)-1L; } }
static struct desc_struct *get_desc(unsigned short sel) { struct desc_ptr gdt_desc = {0, 0}; unsigned long desc_base; #ifdef CONFIG_MODIFY_LDT_SYSCALL if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) { struct desc_struct *desc = NULL; struct ldt_struct *ldt; /* Bits [15:3] contain the index of the desired entry. */ sel >>= 3; mutex_lock(&current->active_mm->context.lock); ldt = current->active_mm->context.ldt; if (ldt && sel < ldt->nr_entries) desc = &ldt->entries[sel]; mutex_unlock(&current->active_mm->context.lock); return desc; } #endif native_store_gdt(&gdt_desc); /* * Segment descriptors have a size of 8 bytes. Thus, the index is * multiplied by 8 to obtain the memory offset of the desired descriptor * from the base of the GDT. As bits [15:3] of the segment selector * contain the index, it can be regarded as multiplied by 8 already. * All that remains is to clear bits [2:0]. */ desc_base = sel & ~(SEGMENT_RPL_MASK | SEGMENT_TI_MASK); if (desc_base > gdt_desc.size) return NULL; return (struct desc_struct *)(gdt_desc.address + desc_base); }
static bool get_desc(struct desc_struct *out, unsigned short sel) { struct desc_ptr gdt_desc = {0, 0}; unsigned long desc_base; #ifdef CONFIG_MODIFY_LDT_SYSCALL if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) { bool success = false; struct ldt_struct *ldt; /* Bits [15:3] contain the index of the desired entry. */ sel >>= 3; mutex_lock(&current->active_mm->context.lock); ldt = current->active_mm->context.ldt; if (ldt && sel < ldt->nr_entries) { *out = ldt->entries[sel]; success = true; } mutex_unlock(&current->active_mm->context.lock); return success; } #endif native_store_gdt(&gdt_desc); /* * Segment descriptors have a size of 8 bytes. Thus, the index is * multiplied by 8 to obtain the memory offset of the desired descriptor * from the base of the GDT. As bits [15:3] of the segment selector * contain the index, it can be regarded as multiplied by 8 already. * All that remains is to clear bits [2:0]. */ desc_base = sel & ~(SEGMENT_RPL_MASK | SEGMENT_TI_MASK); if (desc_base > gdt_desc.size) return false; *out = *(struct desc_struct *)(gdt_desc.address + desc_base); return true; }
{'added': [(560, ' * get_desc() - Obtain contents of a segment descriptor'), (561, ' * @out:\tSegment descriptor contents on success'), (569, ' * True on success, false on failure.'), (573, 'static bool get_desc(struct desc_struct *out, unsigned short sel)'), (580, '\t\tbool success = false;'), (588, '\t\tif (ldt && sel < ldt->nr_entries) {'), (589, '\t\t\t*out = ldt->entries[sel];'), (590, '\t\t\tsuccess = true;'), (591, '\t\t}'), (595, '\t\treturn success;'), (610, '\t\treturn false;'), (612, '\t*out = *(struct desc_struct *)(gdt_desc.address + desc_base);'), (613, '\treturn true;'), (635, '\tstruct desc_struct desc;'), (673, '\tif (!get_desc(&desc, sel))'), (676, '\treturn get_desc_base(&desc);'), (698, '\tstruct desc_struct desc;'), (712, '\tif (!get_desc(&desc, sel))'), (721, '\tlimit = get_desc_limit(&desc);'), (722, '\tif (desc.g)'), (746, '\tstruct desc_struct desc;'), (757, '\tif (!get_desc(&desc, sel))'), (765, '\tif (!(desc.type & BIT(3)))'), (768, '\tswitch ((desc.l << 1) | desc.d) {')], 'deleted': [(560, ' * get_desc() - Obtain pointer to a segment descriptor'), (568, ' * Pointer to segment descriptor on success.'), (572, 'static struct desc_struct *get_desc(unsigned short sel)'), (579, '\t\tstruct desc_struct *desc = NULL;'), (587, '\t\tif (ldt && sel < ldt->nr_entries)'), (588, '\t\t\tdesc = &ldt->entries[sel];'), (592, '\t\treturn desc;'), (607, '\t\treturn NULL;'), (609, '\treturn (struct desc_struct *)(gdt_desc.address + desc_base);'), (631, '\tstruct desc_struct *desc;'), (669, '\tdesc = get_desc(sel);'), (670, '\tif (!desc)'), (673, '\treturn get_desc_base(desc);'), (695, '\tstruct desc_struct *desc;'), (709, '\tdesc = get_desc(sel);'), (710, '\tif (!desc)'), (719, '\tlimit = get_desc_limit(desc);'), (720, '\tif (desc->g)'), (744, '\tstruct desc_struct *desc;'), (755, '\tdesc = get_desc(sel);'), (756, '\tif (!desc)'), (764, '\tif (!(desc->type & BIT(3)))'), (767, '\tswitch ((desc->l << 1) | desc->d) {')]}
24
23
634
3,848
https://github.com/torvalds/linux
CVE-2019-13233
['CWE-416', 'CWE-362']
insn-eval.c
get_seg_limit
/* * Utility functions for x86 operand and address decoding * * Copyright (C) Intel Corporation 2017 */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/ratelimit.h> #include <linux/mmu_context.h> #include <asm/desc_defs.h> #include <asm/desc.h> #include <asm/inat.h> #include <asm/insn.h> #include <asm/insn-eval.h> #include <asm/ldt.h> #include <asm/vm86.h> #undef pr_fmt #define pr_fmt(fmt) "insn: " fmt enum reg_type { REG_TYPE_RM = 0, REG_TYPE_INDEX, REG_TYPE_BASE, }; /** * is_string_insn() - Determine if instruction is a string instruction * @insn: Instruction containing the opcode to inspect * * Returns: * * true if the instruction, determined by the opcode, is any of the * string instructions as defined in the Intel Software Development manual. * False otherwise. */ static bool is_string_insn(struct insn *insn) { insn_get_opcode(insn); /* All string instructions have a 1-byte opcode. */ if (insn->opcode.nbytes != 1) return false; switch (insn->opcode.bytes[0]) { case 0x6c ... 0x6f: /* INS, OUTS */ case 0xa4 ... 0xa7: /* MOVS, CMPS */ case 0xaa ... 0xaf: /* STOS, LODS, SCAS */ return true; default: return false; } } /** * get_seg_reg_override_idx() - obtain segment register override index * @insn: Valid instruction with segment override prefixes * * Inspect the instruction prefixes in @insn and find segment overrides, if any. * * Returns: * * A constant identifying the segment register to use, among CS, SS, DS, * ES, FS, or GS. INAT_SEG_REG_DEFAULT is returned if no segment override * prefixes were found. * * -EINVAL in case of error. */ static int get_seg_reg_override_idx(struct insn *insn) { int idx = INAT_SEG_REG_DEFAULT; int num_overrides = 0, i; insn_get_prefixes(insn); /* Look for any segment override prefixes. */ for (i = 0; i < insn->prefixes.nbytes; i++) { insn_attr_t attr; attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]); switch (attr) { case INAT_MAKE_PREFIX(INAT_PFX_CS): idx = INAT_SEG_REG_CS; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_SS): idx = INAT_SEG_REG_SS; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_DS): idx = INAT_SEG_REG_DS; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_ES): idx = INAT_SEG_REG_ES; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_FS): idx = INAT_SEG_REG_FS; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_GS): idx = INAT_SEG_REG_GS; num_overrides++; break; /* No default action needed. */ } } /* More than one segment override prefix leads to undefined behavior. */ if (num_overrides > 1) return -EINVAL; return idx; } /** * check_seg_overrides() - check if segment override prefixes are allowed * @insn: Valid instruction with segment override prefixes * @regoff: Operand offset, in pt_regs, for which the check is performed * * For a particular register used in register-indirect addressing, determine if * segment override prefixes can be used. Specifically, no overrides are allowed * for rDI if used with a string instruction. * * Returns: * * True if segment override prefixes can be used with the register indicated * in @regoff. False if otherwise. */ static bool check_seg_overrides(struct insn *insn, int regoff) { if (regoff == offsetof(struct pt_regs, di) && is_string_insn(insn)) return false; return true; } /** * resolve_default_seg() - resolve default segment register index for an operand * @insn: Instruction with opcode and address size. Must be valid. * @regs: Register values as seen when entering kernel mode * @off: Operand offset, in pt_regs, for which resolution is needed * * Resolve the default segment register index associated with the instruction * operand register indicated by @off. Such index is resolved based on defaults * described in the Intel Software Development Manual. * * Returns: * * If in protected mode, a constant identifying the segment register to use, * among CS, SS, ES or DS. If in long mode, INAT_SEG_REG_IGNORE. * * -EINVAL in case of error. */ static int resolve_default_seg(struct insn *insn, struct pt_regs *regs, int off) { if (user_64bit_mode(regs)) return INAT_SEG_REG_IGNORE; /* * Resolve the default segment register as described in Section 3.7.4 * of the Intel Software Development Manual Vol. 1: * * + DS for all references involving r[ABCD]X, and rSI. * + If used in a string instruction, ES for rDI. Otherwise, DS. * + AX, CX and DX are not valid register operands in 16-bit address * encodings but are valid for 32-bit and 64-bit encodings. * + -EDOM is reserved to identify for cases in which no register * is used (i.e., displacement-only addressing). Use DS. * + SS for rSP or rBP. * + CS for rIP. */ switch (off) { case offsetof(struct pt_regs, ax): case offsetof(struct pt_regs, cx): case offsetof(struct pt_regs, dx): /* Need insn to verify address size. */ if (insn->addr_bytes == 2) return -EINVAL; /* fall through */ case -EDOM: case offsetof(struct pt_regs, bx): case offsetof(struct pt_regs, si): return INAT_SEG_REG_DS; case offsetof(struct pt_regs, di): if (is_string_insn(insn)) return INAT_SEG_REG_ES; return INAT_SEG_REG_DS; case offsetof(struct pt_regs, bp): case offsetof(struct pt_regs, sp): return INAT_SEG_REG_SS; case offsetof(struct pt_regs, ip): return INAT_SEG_REG_CS; default: return -EINVAL; } } /** * resolve_seg_reg() - obtain segment register index * @insn: Instruction with operands * @regs: Register values as seen when entering kernel mode * @regoff: Operand offset, in pt_regs, used to deterimine segment register * * Determine the segment register associated with the operands and, if * applicable, prefixes and the instruction pointed by @insn. * * The segment register associated to an operand used in register-indirect * addressing depends on: * * a) Whether running in long mode (in such a case segments are ignored, except * if FS or GS are used). * * b) Whether segment override prefixes can be used. Certain instructions and * registers do not allow override prefixes. * * c) Whether segment overrides prefixes are found in the instruction prefixes. * * d) If there are not segment override prefixes or they cannot be used, the * default segment register associated with the operand register is used. * * The function checks first if segment override prefixes can be used with the * operand indicated by @regoff. If allowed, obtain such overridden segment * register index. Lastly, if not prefixes were found or cannot be used, resolve * the segment register index to use based on the defaults described in the * Intel documentation. In long mode, all segment register indexes will be * ignored, except if overrides were found for FS or GS. All these operations * are done using helper functions. * * The operand register, @regoff, is represented as the offset from the base of * pt_regs. * * As stated, the main use of this function is to determine the segment register * index based on the instruction, its operands and prefixes. Hence, @insn * must be valid. However, if @regoff indicates rIP, we don't need to inspect * @insn at all as in this case CS is used in all cases. This case is checked * before proceeding further. * * Please note that this function does not return the value in the segment * register (i.e., the segment selector) but our defined index. The segment * selector needs to be obtained using get_segment_selector() and passing the * segment register index resolved by this function. * * Returns: * * An index identifying the segment register to use, among CS, SS, DS, * ES, FS, or GS. INAT_SEG_REG_IGNORE is returned if running in long mode. * * -EINVAL in case of error. */ static int resolve_seg_reg(struct insn *insn, struct pt_regs *regs, int regoff) { int idx; /* * In the unlikely event of having to resolve the segment register * index for rIP, do it first. Segment override prefixes should not * be used. Hence, it is not necessary to inspect the instruction, * which may be invalid at this point. */ if (regoff == offsetof(struct pt_regs, ip)) { if (user_64bit_mode(regs)) return INAT_SEG_REG_IGNORE; else return INAT_SEG_REG_CS; } if (!insn) return -EINVAL; if (!check_seg_overrides(insn, regoff)) return resolve_default_seg(insn, regs, regoff); idx = get_seg_reg_override_idx(insn); if (idx < 0) return idx; if (idx == INAT_SEG_REG_DEFAULT) return resolve_default_seg(insn, regs, regoff); /* * In long mode, segment override prefixes are ignored, except for * overrides for FS and GS. */ if (user_64bit_mode(regs)) { if (idx != INAT_SEG_REG_FS && idx != INAT_SEG_REG_GS) idx = INAT_SEG_REG_IGNORE; } return idx; } /** * get_segment_selector() - obtain segment selector * @regs: Register values as seen when entering kernel mode * @seg_reg_idx: Segment register index to use * * Obtain the segment selector from any of the CS, SS, DS, ES, FS, GS segment * registers. In CONFIG_X86_32, the segment is obtained from either pt_regs or * kernel_vm86_regs as applicable. In CONFIG_X86_64, CS and SS are obtained * from pt_regs. DS, ES, FS and GS are obtained by reading the actual CPU * registers. This done for only for completeness as in CONFIG_X86_64 segment * registers are ignored. * * Returns: * * Value of the segment selector, including null when running in * long mode. * * -EINVAL on error. */ static short get_segment_selector(struct pt_regs *regs, int seg_reg_idx) { #ifdef CONFIG_X86_64 unsigned short sel; switch (seg_reg_idx) { case INAT_SEG_REG_IGNORE: return 0; case INAT_SEG_REG_CS: return (unsigned short)(regs->cs & 0xffff); case INAT_SEG_REG_SS: return (unsigned short)(regs->ss & 0xffff); case INAT_SEG_REG_DS: savesegment(ds, sel); return sel; case INAT_SEG_REG_ES: savesegment(es, sel); return sel; case INAT_SEG_REG_FS: savesegment(fs, sel); return sel; case INAT_SEG_REG_GS: savesegment(gs, sel); return sel; default: return -EINVAL; } #else /* CONFIG_X86_32 */ struct kernel_vm86_regs *vm86regs = (struct kernel_vm86_regs *)regs; if (v8086_mode(regs)) { switch (seg_reg_idx) { case INAT_SEG_REG_CS: return (unsigned short)(regs->cs & 0xffff); case INAT_SEG_REG_SS: return (unsigned short)(regs->ss & 0xffff); case INAT_SEG_REG_DS: return vm86regs->ds; case INAT_SEG_REG_ES: return vm86regs->es; case INAT_SEG_REG_FS: return vm86regs->fs; case INAT_SEG_REG_GS: return vm86regs->gs; case INAT_SEG_REG_IGNORE: /* fall through */ default: return -EINVAL; } } switch (seg_reg_idx) { case INAT_SEG_REG_CS: return (unsigned short)(regs->cs & 0xffff); case INAT_SEG_REG_SS: return (unsigned short)(regs->ss & 0xffff); case INAT_SEG_REG_DS: return (unsigned short)(regs->ds & 0xffff); case INAT_SEG_REG_ES: return (unsigned short)(regs->es & 0xffff); case INAT_SEG_REG_FS: return (unsigned short)(regs->fs & 0xffff); case INAT_SEG_REG_GS: /* * GS may or may not be in regs as per CONFIG_X86_32_LAZY_GS. * The macro below takes care of both cases. */ return get_user_gs(regs); case INAT_SEG_REG_IGNORE: /* fall through */ default: return -EINVAL; } #endif /* CONFIG_X86_64 */ } static int get_reg_offset(struct insn *insn, struct pt_regs *regs, enum reg_type type) { int regno = 0; static const int regoff[] = { offsetof(struct pt_regs, ax), offsetof(struct pt_regs, cx), offsetof(struct pt_regs, dx), offsetof(struct pt_regs, bx), offsetof(struct pt_regs, sp), offsetof(struct pt_regs, bp), offsetof(struct pt_regs, si), offsetof(struct pt_regs, di), #ifdef CONFIG_X86_64 offsetof(struct pt_regs, r8), offsetof(struct pt_regs, r9), offsetof(struct pt_regs, r10), offsetof(struct pt_regs, r11), offsetof(struct pt_regs, r12), offsetof(struct pt_regs, r13), offsetof(struct pt_regs, r14), offsetof(struct pt_regs, r15), #endif }; int nr_registers = ARRAY_SIZE(regoff); /* * Don't possibly decode a 32-bit instructions as * reading a 64-bit-only register. */ if (IS_ENABLED(CONFIG_X86_64) && !insn->x86_64) nr_registers -= 8; switch (type) { case REG_TYPE_RM: regno = X86_MODRM_RM(insn->modrm.value); /* * ModRM.mod == 0 and ModRM.rm == 5 means a 32-bit displacement * follows the ModRM byte. */ if (!X86_MODRM_MOD(insn->modrm.value) && regno == 5) return -EDOM; if (X86_REX_B(insn->rex_prefix.value)) regno += 8; break; case REG_TYPE_INDEX: regno = X86_SIB_INDEX(insn->sib.value); if (X86_REX_X(insn->rex_prefix.value)) regno += 8; /* * If ModRM.mod != 3 and SIB.index = 4 the scale*index * portion of the address computation is null. This is * true only if REX.X is 0. In such a case, the SIB index * is used in the address computation. */ if (X86_MODRM_MOD(insn->modrm.value) != 3 && regno == 4) return -EDOM; break; case REG_TYPE_BASE: regno = X86_SIB_BASE(insn->sib.value); /* * If ModRM.mod is 0 and SIB.base == 5, the base of the * register-indirect addressing is 0. In this case, a * 32-bit displacement follows the SIB byte. */ if (!X86_MODRM_MOD(insn->modrm.value) && regno == 5) return -EDOM; if (X86_REX_B(insn->rex_prefix.value)) regno += 8; break; default: pr_err_ratelimited("invalid register type: %d\n", type); return -EINVAL; } if (regno >= nr_registers) { WARN_ONCE(1, "decoded an instruction with an invalid register"); return -EINVAL; } return regoff[regno]; } /** * get_reg_offset_16() - Obtain offset of register indicated by instruction * @insn: Instruction containing ModRM byte * @regs: Register values as seen when entering kernel mode * @offs1: Offset of the first operand register * @offs2: Offset of the second opeand register, if applicable * * Obtain the offset, in pt_regs, of the registers indicated by the ModRM byte * in @insn. This function is to be used with 16-bit address encodings. The * @offs1 and @offs2 will be written with the offset of the two registers * indicated by the instruction. In cases where any of the registers is not * referenced by the instruction, the value will be set to -EDOM. * * Returns: * * 0 on success, -EINVAL on error. */ static int get_reg_offset_16(struct insn *insn, struct pt_regs *regs, int *offs1, int *offs2) { /* * 16-bit addressing can use one or two registers. Specifics of * encodings are given in Table 2-1. "16-Bit Addressing Forms with the * ModR/M Byte" of the Intel Software Development Manual. */ static const int regoff1[] = { offsetof(struct pt_regs, bx), offsetof(struct pt_regs, bx), offsetof(struct pt_regs, bp), offsetof(struct pt_regs, bp), offsetof(struct pt_regs, si), offsetof(struct pt_regs, di), offsetof(struct pt_regs, bp), offsetof(struct pt_regs, bx), }; static const int regoff2[] = { offsetof(struct pt_regs, si), offsetof(struct pt_regs, di), offsetof(struct pt_regs, si), offsetof(struct pt_regs, di), -EDOM, -EDOM, -EDOM, -EDOM, }; if (!offs1 || !offs2) return -EINVAL; /* Operand is a register, use the generic function. */ if (X86_MODRM_MOD(insn->modrm.value) == 3) { *offs1 = insn_get_modrm_rm_off(insn, regs); *offs2 = -EDOM; return 0; } *offs1 = regoff1[X86_MODRM_RM(insn->modrm.value)]; *offs2 = regoff2[X86_MODRM_RM(insn->modrm.value)]; /* * If ModRM.mod is 0 and ModRM.rm is 110b, then we use displacement- * only addressing. This means that no registers are involved in * computing the effective address. Thus, ensure that the first * register offset is invalild. The second register offset is already * invalid under the aforementioned conditions. */ if ((X86_MODRM_MOD(insn->modrm.value) == 0) && (X86_MODRM_RM(insn->modrm.value) == 6)) *offs1 = -EDOM; return 0; } /** * get_desc() - Obtain pointer to a segment descriptor * @sel: Segment selector * * Given a segment selector, obtain a pointer to the segment descriptor. * Both global and local descriptor tables are supported. * * Returns: * * Pointer to segment descriptor on success. * * NULL on error. */ static struct desc_struct *get_desc(unsigned short sel) { struct desc_ptr gdt_desc = {0, 0}; unsigned long desc_base; #ifdef CONFIG_MODIFY_LDT_SYSCALL if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) { struct desc_struct *desc = NULL; struct ldt_struct *ldt; /* Bits [15:3] contain the index of the desired entry. */ sel >>= 3; mutex_lock(&current->active_mm->context.lock); ldt = current->active_mm->context.ldt; if (ldt && sel < ldt->nr_entries) desc = &ldt->entries[sel]; mutex_unlock(&current->active_mm->context.lock); return desc; } #endif native_store_gdt(&gdt_desc); /* * Segment descriptors have a size of 8 bytes. Thus, the index is * multiplied by 8 to obtain the memory offset of the desired descriptor * from the base of the GDT. As bits [15:3] of the segment selector * contain the index, it can be regarded as multiplied by 8 already. * All that remains is to clear bits [2:0]. */ desc_base = sel & ~(SEGMENT_RPL_MASK | SEGMENT_TI_MASK); if (desc_base > gdt_desc.size) return NULL; return (struct desc_struct *)(gdt_desc.address + desc_base); } /** * insn_get_seg_base() - Obtain base address of segment descriptor. * @regs: Register values as seen when entering kernel mode * @seg_reg_idx: Index of the segment register pointing to seg descriptor * * Obtain the base address of the segment as indicated by the segment descriptor * pointed by the segment selector. The segment selector is obtained from the * input segment register index @seg_reg_idx. * * Returns: * * In protected mode, base address of the segment. Zero in long mode, * except when FS or GS are used. In virtual-8086 mode, the segment * selector shifted 4 bits to the right. * * -1L in case of error. */ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx) { struct desc_struct *desc; short sel; sel = get_segment_selector(regs, seg_reg_idx); if (sel < 0) return -1L; if (v8086_mode(regs)) /* * Base is simply the segment selector shifted 4 * bits to the right. */ return (unsigned long)(sel << 4); if (user_64bit_mode(regs)) { /* * Only FS or GS will have a base address, the rest of * the segments' bases are forced to 0. */ unsigned long base; if (seg_reg_idx == INAT_SEG_REG_FS) rdmsrl(MSR_FS_BASE, base); else if (seg_reg_idx == INAT_SEG_REG_GS) /* * swapgs was called at the kernel entry point. Thus, * MSR_KERNEL_GS_BASE will have the user-space GS base. */ rdmsrl(MSR_KERNEL_GS_BASE, base); else base = 0; return base; } /* In protected mode the segment selector cannot be null. */ if (!sel) return -1L; desc = get_desc(sel); if (!desc) return -1L; return get_desc_base(desc); } /** * get_seg_limit() - Obtain the limit of a segment descriptor * @regs: Register values as seen when entering kernel mode * @seg_reg_idx: Index of the segment register pointing to seg descriptor * * Obtain the limit of the segment as indicated by the segment descriptor * pointed by the segment selector. The segment selector is obtained from the * input segment register index @seg_reg_idx. * * Returns: * * In protected mode, the limit of the segment descriptor in bytes. * In long mode and virtual-8086 mode, segment limits are not enforced. Thus, * limit is returned as -1L to imply a limit-less segment. * * Zero is returned on error. */ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx) { struct desc_struct *desc; unsigned long limit; short sel; sel = get_segment_selector(regs, seg_reg_idx); if (sel < 0) return 0; if (user_64bit_mode(regs) || v8086_mode(regs)) return -1L; if (!sel) return 0; desc = get_desc(sel); if (!desc) return 0; /* * If the granularity bit is set, the limit is given in multiples * of 4096. This also means that the 12 least significant bits are * not tested when checking the segment limits. In practice, * this means that the segment ends in (limit << 12) + 0xfff. */ limit = get_desc_limit(desc); if (desc->g) limit = (limit << 12) + 0xfff; return limit; } /** * insn_get_code_seg_params() - Obtain code segment parameters * @regs: Structure with register values as seen when entering kernel mode * * Obtain address and operand sizes of the code segment. It is obtained from the * selector contained in the CS register in regs. In protected mode, the default * address is determined by inspecting the L and D bits of the segment * descriptor. In virtual-8086 mode, the default is always two bytes for both * address and operand sizes. * * Returns: * * An int containing ORed-in default parameters on success. * * -EINVAL on error. */ int insn_get_code_seg_params(struct pt_regs *regs) { struct desc_struct *desc; short sel; if (v8086_mode(regs)) /* Address and operand size are both 16-bit. */ return INSN_CODE_SEG_PARAMS(2, 2); sel = get_segment_selector(regs, INAT_SEG_REG_CS); if (sel < 0) return sel; desc = get_desc(sel); if (!desc) return -EINVAL; /* * The most significant byte of the Type field of the segment descriptor * determines whether a segment contains data or code. If this is a data * segment, return error. */ if (!(desc->type & BIT(3))) return -EINVAL; switch ((desc->l << 1) | desc->d) { case 0: /* * Legacy mode. CS.L=0, CS.D=0. Address and operand size are * both 16-bit. */ return INSN_CODE_SEG_PARAMS(2, 2); case 1: /* * Legacy mode. CS.L=0, CS.D=1. Address and operand size are * both 32-bit. */ return INSN_CODE_SEG_PARAMS(4, 4); case 2: /* * IA-32e 64-bit mode. CS.L=1, CS.D=0. Address size is 64-bit; * operand size is 32-bit. */ return INSN_CODE_SEG_PARAMS(4, 8); case 3: /* Invalid setting. CS.L=1, CS.D=1 */ /* fall through */ default: return -EINVAL; } } /** * insn_get_modrm_rm_off() - Obtain register in r/m part of the ModRM byte * @insn: Instruction containing the ModRM byte * @regs: Register values as seen when entering kernel mode * * Returns: * * The register indicated by the r/m part of the ModRM byte. The * register is obtained as an offset from the base of pt_regs. In specific * cases, the returned value can be -EDOM to indicate that the particular value * of ModRM does not refer to a register and shall be ignored. */ int insn_get_modrm_rm_off(struct insn *insn, struct pt_regs *regs) { return get_reg_offset(insn, regs, REG_TYPE_RM); } /** * get_seg_base_limit() - obtain base address and limit of a segment * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Operand offset, in pt_regs, used to resolve segment descriptor * @base: Obtained segment base * @limit: Obtained segment limit * * Obtain the base address and limit of the segment associated with the operand * @regoff and, if any or allowed, override prefixes in @insn. This function is * different from insn_get_seg_base() as the latter does not resolve the segment * associated with the instruction operand. If a limit is not needed (e.g., * when running in long mode), @limit can be NULL. * * Returns: * * 0 on success. @base and @limit will contain the base address and of the * resolved segment, respectively. * * -EINVAL on error. */ static int get_seg_base_limit(struct insn *insn, struct pt_regs *regs, int regoff, unsigned long *base, unsigned long *limit) { int seg_reg_idx; if (!base) return -EINVAL; seg_reg_idx = resolve_seg_reg(insn, regs, regoff); if (seg_reg_idx < 0) return seg_reg_idx; *base = insn_get_seg_base(regs, seg_reg_idx); if (*base == -1L) return -EINVAL; if (!limit) return 0; *limit = get_seg_limit(regs, seg_reg_idx); if (!(*limit)) return -EINVAL; return 0; } /** * get_eff_addr_reg() - Obtain effective address from register operand * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Obtained operand offset, in pt_regs, with the effective address * @eff_addr: Obtained effective address * * Obtain the effective address stored in the register operand as indicated by * the ModRM byte. This function is to be used only with register addressing * (i.e., ModRM.mod is 3). The effective address is saved in @eff_addr. The * register operand, as an offset from the base of pt_regs, is saved in @regoff; * such offset can then be used to resolve the segment associated with the * operand. This function can be used with any of the supported address sizes * in x86. * * Returns: * * 0 on success. @eff_addr will have the effective address stored in the * operand indicated by ModRM. @regoff will have such operand as an offset from * the base of pt_regs. * * -EINVAL on error. */ static int get_eff_addr_reg(struct insn *insn, struct pt_regs *regs, int *regoff, long *eff_addr) { insn_get_modrm(insn); if (!insn->modrm.nbytes) return -EINVAL; if (X86_MODRM_MOD(insn->modrm.value) != 3) return -EINVAL; *regoff = get_reg_offset(insn, regs, REG_TYPE_RM); if (*regoff < 0) return -EINVAL; /* Ignore bytes that are outside the address size. */ if (insn->addr_bytes == 2) *eff_addr = regs_get_register(regs, *regoff) & 0xffff; else if (insn->addr_bytes == 4) *eff_addr = regs_get_register(regs, *regoff) & 0xffffffff; else /* 64-bit address */ *eff_addr = regs_get_register(regs, *regoff); return 0; } /** * get_eff_addr_modrm() - Obtain referenced effective address via ModRM * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Obtained operand offset, in pt_regs, associated with segment * @eff_addr: Obtained effective address * * Obtain the effective address referenced by the ModRM byte of @insn. After * identifying the registers involved in the register-indirect memory reference, * its value is obtained from the operands in @regs. The computed address is * stored @eff_addr. Also, the register operand that indicates the associated * segment is stored in @regoff, this parameter can later be used to determine * such segment. * * Returns: * * 0 on success. @eff_addr will have the referenced effective address. @regoff * will have a register, as an offset from the base of pt_regs, that can be used * to resolve the associated segment. * * -EINVAL on error. */ static int get_eff_addr_modrm(struct insn *insn, struct pt_regs *regs, int *regoff, long *eff_addr) { long tmp; if (insn->addr_bytes != 8 && insn->addr_bytes != 4) return -EINVAL; insn_get_modrm(insn); if (!insn->modrm.nbytes) return -EINVAL; if (X86_MODRM_MOD(insn->modrm.value) > 2) return -EINVAL; *regoff = get_reg_offset(insn, regs, REG_TYPE_RM); /* * -EDOM means that we must ignore the address_offset. In such a case, * in 64-bit mode the effective address relative to the rIP of the * following instruction. */ if (*regoff == -EDOM) { if (user_64bit_mode(regs)) tmp = regs->ip + insn->length; else tmp = 0; } else if (*regoff < 0) { return -EINVAL; } else { tmp = regs_get_register(regs, *regoff); } if (insn->addr_bytes == 4) { int addr32 = (int)(tmp & 0xffffffff) + insn->displacement.value; *eff_addr = addr32 & 0xffffffff; } else { *eff_addr = tmp + insn->displacement.value; } return 0; } /** * get_eff_addr_modrm_16() - Obtain referenced effective address via ModRM * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Obtained operand offset, in pt_regs, associated with segment * @eff_addr: Obtained effective address * * Obtain the 16-bit effective address referenced by the ModRM byte of @insn. * After identifying the registers involved in the register-indirect memory * reference, its value is obtained from the operands in @regs. The computed * address is stored @eff_addr. Also, the register operand that indicates * the associated segment is stored in @regoff, this parameter can later be used * to determine such segment. * * Returns: * * 0 on success. @eff_addr will have the referenced effective address. @regoff * will have a register, as an offset from the base of pt_regs, that can be used * to resolve the associated segment. * * -EINVAL on error. */ static int get_eff_addr_modrm_16(struct insn *insn, struct pt_regs *regs, int *regoff, short *eff_addr) { int addr_offset1, addr_offset2, ret; short addr1 = 0, addr2 = 0, displacement; if (insn->addr_bytes != 2) return -EINVAL; insn_get_modrm(insn); if (!insn->modrm.nbytes) return -EINVAL; if (X86_MODRM_MOD(insn->modrm.value) > 2) return -EINVAL; ret = get_reg_offset_16(insn, regs, &addr_offset1, &addr_offset2); if (ret < 0) return -EINVAL; /* * Don't fail on invalid offset values. They might be invalid because * they cannot be used for this particular value of ModRM. Instead, use * them in the computation only if they contain a valid value. */ if (addr_offset1 != -EDOM) addr1 = regs_get_register(regs, addr_offset1) & 0xffff; if (addr_offset2 != -EDOM) addr2 = regs_get_register(regs, addr_offset2) & 0xffff; displacement = insn->displacement.value & 0xffff; *eff_addr = addr1 + addr2 + displacement; /* * The first operand register could indicate to use of either SS or DS * registers to obtain the segment selector. The second operand * register can only indicate the use of DS. Thus, the first operand * will be used to obtain the segment selector. */ *regoff = addr_offset1; return 0; } /** * get_eff_addr_sib() - Obtain referenced effective address via SIB * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Obtained operand offset, in pt_regs, associated with segment * @eff_addr: Obtained effective address * * Obtain the effective address referenced by the SIB byte of @insn. After * identifying the registers involved in the indexed, register-indirect memory * reference, its value is obtained from the operands in @regs. The computed * address is stored @eff_addr. Also, the register operand that indicates the * associated segment is stored in @regoff, this parameter can later be used to * determine such segment. * * Returns: * * 0 on success. @eff_addr will have the referenced effective address. * @base_offset will have a register, as an offset from the base of pt_regs, * that can be used to resolve the associated segment. * * -EINVAL on error. */ static int get_eff_addr_sib(struct insn *insn, struct pt_regs *regs, int *base_offset, long *eff_addr) { long base, indx; int indx_offset; if (insn->addr_bytes != 8 && insn->addr_bytes != 4) return -EINVAL; insn_get_modrm(insn); if (!insn->modrm.nbytes) return -EINVAL; if (X86_MODRM_MOD(insn->modrm.value) > 2) return -EINVAL; insn_get_sib(insn); if (!insn->sib.nbytes) return -EINVAL; *base_offset = get_reg_offset(insn, regs, REG_TYPE_BASE); indx_offset = get_reg_offset(insn, regs, REG_TYPE_INDEX); /* * Negative values in the base and index offset means an error when * decoding the SIB byte. Except -EDOM, which means that the registers * should not be used in the address computation. */ if (*base_offset == -EDOM) base = 0; else if (*base_offset < 0) return -EINVAL; else base = regs_get_register(regs, *base_offset); if (indx_offset == -EDOM) indx = 0; else if (indx_offset < 0) return -EINVAL; else indx = regs_get_register(regs, indx_offset); if (insn->addr_bytes == 4) { int addr32, base32, idx32; base32 = base & 0xffffffff; idx32 = indx & 0xffffffff; addr32 = base32 + idx32 * (1 << X86_SIB_SCALE(insn->sib.value)); addr32 += insn->displacement.value; *eff_addr = addr32 & 0xffffffff; } else { *eff_addr = base + indx * (1 << X86_SIB_SCALE(insn->sib.value)); *eff_addr += insn->displacement.value; } return 0; } /** * get_addr_ref_16() - Obtain the 16-bit address referred by instruction * @insn: Instruction containing ModRM byte and displacement * @regs: Register values as seen when entering kernel mode * * This function is to be used with 16-bit address encodings. Obtain the memory * address referred by the instruction's ModRM and displacement bytes. Also, the * segment used as base is determined by either any segment override prefixes in * @insn or the default segment of the registers involved in the address * computation. In protected mode, segment limits are enforced. * * Returns: * * Linear address referenced by the instruction operands on success. * * -1L on error. */ static void __user *get_addr_ref_16(struct insn *insn, struct pt_regs *regs) { unsigned long linear_addr = -1L, seg_base, seg_limit; int ret, regoff; short eff_addr; long tmp; insn_get_modrm(insn); insn_get_displacement(insn); if (insn->addr_bytes != 2) goto out; if (X86_MODRM_MOD(insn->modrm.value) == 3) { ret = get_eff_addr_reg(insn, regs, &regoff, &tmp); if (ret) goto out; eff_addr = tmp; } else { ret = get_eff_addr_modrm_16(insn, regs, &regoff, &eff_addr); if (ret) goto out; } ret = get_seg_base_limit(insn, regs, regoff, &seg_base, &seg_limit); if (ret) goto out; /* * Before computing the linear address, make sure the effective address * is within the limits of the segment. In virtual-8086 mode, segment * limits are not enforced. In such a case, the segment limit is -1L to * reflect this fact. */ if ((unsigned long)(eff_addr & 0xffff) > seg_limit) goto out; linear_addr = (unsigned long)(eff_addr & 0xffff) + seg_base; /* Limit linear address to 20 bits */ if (v8086_mode(regs)) linear_addr &= 0xfffff; out: return (void __user *)linear_addr; } /** * get_addr_ref_32() - Obtain a 32-bit linear address * @insn: Instruction with ModRM, SIB bytes and displacement * @regs: Register values as seen when entering kernel mode * * This function is to be used with 32-bit address encodings to obtain the * linear memory address referred by the instruction's ModRM, SIB, * displacement bytes and segment base address, as applicable. If in protected * mode, segment limits are enforced. * * Returns: * * Linear address referenced by instruction and registers on success. * * -1L on error. */ static void __user *get_addr_ref_32(struct insn *insn, struct pt_regs *regs) { unsigned long linear_addr = -1L, seg_base, seg_limit; int eff_addr, regoff; long tmp; int ret; if (insn->addr_bytes != 4) goto out; if (X86_MODRM_MOD(insn->modrm.value) == 3) { ret = get_eff_addr_reg(insn, regs, &regoff, &tmp); if (ret) goto out; eff_addr = tmp; } else { if (insn->sib.nbytes) { ret = get_eff_addr_sib(insn, regs, &regoff, &tmp); if (ret) goto out; eff_addr = tmp; } else { ret = get_eff_addr_modrm(insn, regs, &regoff, &tmp); if (ret) goto out; eff_addr = tmp; } } ret = get_seg_base_limit(insn, regs, regoff, &seg_base, &seg_limit); if (ret) goto out; /* * In protected mode, before computing the linear address, make sure * the effective address is within the limits of the segment. * 32-bit addresses can be used in long and virtual-8086 modes if an * address override prefix is used. In such cases, segment limits are * not enforced. When in virtual-8086 mode, the segment limit is -1L * to reflect this situation. * * After computed, the effective address is treated as an unsigned * quantity. */ if (!user_64bit_mode(regs) && ((unsigned int)eff_addr > seg_limit)) goto out; /* * Even though 32-bit address encodings are allowed in virtual-8086 * mode, the address range is still limited to [0x-0xffff]. */ if (v8086_mode(regs) && (eff_addr & ~0xffff)) goto out; /* * Data type long could be 64 bits in size. Ensure that our 32-bit * effective address is not sign-extended when computing the linear * address. */ linear_addr = (unsigned long)(eff_addr & 0xffffffff) + seg_base; /* Limit linear address to 20 bits */ if (v8086_mode(regs)) linear_addr &= 0xfffff; out: return (void __user *)linear_addr; } /** * get_addr_ref_64() - Obtain a 64-bit linear address * @insn: Instruction struct with ModRM and SIB bytes and displacement * @regs: Structure with register values as seen when entering kernel mode * * This function is to be used with 64-bit address encodings to obtain the * linear memory address referred by the instruction's ModRM, SIB, * displacement bytes and segment base address, as applicable. * * Returns: * * Linear address referenced by instruction and registers on success. * * -1L on error. */ #ifndef CONFIG_X86_64 static void __user *get_addr_ref_64(struct insn *insn, struct pt_regs *regs) { return (void __user *)-1L; } #else static void __user *get_addr_ref_64(struct insn *insn, struct pt_regs *regs) { unsigned long linear_addr = -1L, seg_base; int regoff, ret; long eff_addr; if (insn->addr_bytes != 8) goto out; if (X86_MODRM_MOD(insn->modrm.value) == 3) { ret = get_eff_addr_reg(insn, regs, &regoff, &eff_addr); if (ret) goto out; } else { if (insn->sib.nbytes) { ret = get_eff_addr_sib(insn, regs, &regoff, &eff_addr); if (ret) goto out; } else { ret = get_eff_addr_modrm(insn, regs, &regoff, &eff_addr); if (ret) goto out; } } ret = get_seg_base_limit(insn, regs, regoff, &seg_base, NULL); if (ret) goto out; linear_addr = (unsigned long)eff_addr + seg_base; out: return (void __user *)linear_addr; } #endif /* CONFIG_X86_64 */ /** * insn_get_addr_ref() - Obtain the linear address referred by instruction * @insn: Instruction structure containing ModRM byte and displacement * @regs: Structure with register values as seen when entering kernel mode * * Obtain the linear address referred by the instruction's ModRM, SIB and * displacement bytes, and segment base, as applicable. In protected mode, * segment limits are enforced. * * Returns: * * Linear address referenced by instruction and registers on success. * * -1L on error. */ void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs) { if (!insn || !regs) return (void __user *)-1L; switch (insn->addr_bytes) { case 2: return get_addr_ref_16(insn, regs); case 4: return get_addr_ref_32(insn, regs); case 8: return get_addr_ref_64(insn, regs); default: return (void __user *)-1L; } }
/* * Utility functions for x86 operand and address decoding * * Copyright (C) Intel Corporation 2017 */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/ratelimit.h> #include <linux/mmu_context.h> #include <asm/desc_defs.h> #include <asm/desc.h> #include <asm/inat.h> #include <asm/insn.h> #include <asm/insn-eval.h> #include <asm/ldt.h> #include <asm/vm86.h> #undef pr_fmt #define pr_fmt(fmt) "insn: " fmt enum reg_type { REG_TYPE_RM = 0, REG_TYPE_INDEX, REG_TYPE_BASE, }; /** * is_string_insn() - Determine if instruction is a string instruction * @insn: Instruction containing the opcode to inspect * * Returns: * * true if the instruction, determined by the opcode, is any of the * string instructions as defined in the Intel Software Development manual. * False otherwise. */ static bool is_string_insn(struct insn *insn) { insn_get_opcode(insn); /* All string instructions have a 1-byte opcode. */ if (insn->opcode.nbytes != 1) return false; switch (insn->opcode.bytes[0]) { case 0x6c ... 0x6f: /* INS, OUTS */ case 0xa4 ... 0xa7: /* MOVS, CMPS */ case 0xaa ... 0xaf: /* STOS, LODS, SCAS */ return true; default: return false; } } /** * get_seg_reg_override_idx() - obtain segment register override index * @insn: Valid instruction with segment override prefixes * * Inspect the instruction prefixes in @insn and find segment overrides, if any. * * Returns: * * A constant identifying the segment register to use, among CS, SS, DS, * ES, FS, or GS. INAT_SEG_REG_DEFAULT is returned if no segment override * prefixes were found. * * -EINVAL in case of error. */ static int get_seg_reg_override_idx(struct insn *insn) { int idx = INAT_SEG_REG_DEFAULT; int num_overrides = 0, i; insn_get_prefixes(insn); /* Look for any segment override prefixes. */ for (i = 0; i < insn->prefixes.nbytes; i++) { insn_attr_t attr; attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]); switch (attr) { case INAT_MAKE_PREFIX(INAT_PFX_CS): idx = INAT_SEG_REG_CS; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_SS): idx = INAT_SEG_REG_SS; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_DS): idx = INAT_SEG_REG_DS; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_ES): idx = INAT_SEG_REG_ES; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_FS): idx = INAT_SEG_REG_FS; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_GS): idx = INAT_SEG_REG_GS; num_overrides++; break; /* No default action needed. */ } } /* More than one segment override prefix leads to undefined behavior. */ if (num_overrides > 1) return -EINVAL; return idx; } /** * check_seg_overrides() - check if segment override prefixes are allowed * @insn: Valid instruction with segment override prefixes * @regoff: Operand offset, in pt_regs, for which the check is performed * * For a particular register used in register-indirect addressing, determine if * segment override prefixes can be used. Specifically, no overrides are allowed * for rDI if used with a string instruction. * * Returns: * * True if segment override prefixes can be used with the register indicated * in @regoff. False if otherwise. */ static bool check_seg_overrides(struct insn *insn, int regoff) { if (regoff == offsetof(struct pt_regs, di) && is_string_insn(insn)) return false; return true; } /** * resolve_default_seg() - resolve default segment register index for an operand * @insn: Instruction with opcode and address size. Must be valid. * @regs: Register values as seen when entering kernel mode * @off: Operand offset, in pt_regs, for which resolution is needed * * Resolve the default segment register index associated with the instruction * operand register indicated by @off. Such index is resolved based on defaults * described in the Intel Software Development Manual. * * Returns: * * If in protected mode, a constant identifying the segment register to use, * among CS, SS, ES or DS. If in long mode, INAT_SEG_REG_IGNORE. * * -EINVAL in case of error. */ static int resolve_default_seg(struct insn *insn, struct pt_regs *regs, int off) { if (user_64bit_mode(regs)) return INAT_SEG_REG_IGNORE; /* * Resolve the default segment register as described in Section 3.7.4 * of the Intel Software Development Manual Vol. 1: * * + DS for all references involving r[ABCD]X, and rSI. * + If used in a string instruction, ES for rDI. Otherwise, DS. * + AX, CX and DX are not valid register operands in 16-bit address * encodings but are valid for 32-bit and 64-bit encodings. * + -EDOM is reserved to identify for cases in which no register * is used (i.e., displacement-only addressing). Use DS. * + SS for rSP or rBP. * + CS for rIP. */ switch (off) { case offsetof(struct pt_regs, ax): case offsetof(struct pt_regs, cx): case offsetof(struct pt_regs, dx): /* Need insn to verify address size. */ if (insn->addr_bytes == 2) return -EINVAL; /* fall through */ case -EDOM: case offsetof(struct pt_regs, bx): case offsetof(struct pt_regs, si): return INAT_SEG_REG_DS; case offsetof(struct pt_regs, di): if (is_string_insn(insn)) return INAT_SEG_REG_ES; return INAT_SEG_REG_DS; case offsetof(struct pt_regs, bp): case offsetof(struct pt_regs, sp): return INAT_SEG_REG_SS; case offsetof(struct pt_regs, ip): return INAT_SEG_REG_CS; default: return -EINVAL; } } /** * resolve_seg_reg() - obtain segment register index * @insn: Instruction with operands * @regs: Register values as seen when entering kernel mode * @regoff: Operand offset, in pt_regs, used to deterimine segment register * * Determine the segment register associated with the operands and, if * applicable, prefixes and the instruction pointed by @insn. * * The segment register associated to an operand used in register-indirect * addressing depends on: * * a) Whether running in long mode (in such a case segments are ignored, except * if FS or GS are used). * * b) Whether segment override prefixes can be used. Certain instructions and * registers do not allow override prefixes. * * c) Whether segment overrides prefixes are found in the instruction prefixes. * * d) If there are not segment override prefixes or they cannot be used, the * default segment register associated with the operand register is used. * * The function checks first if segment override prefixes can be used with the * operand indicated by @regoff. If allowed, obtain such overridden segment * register index. Lastly, if not prefixes were found or cannot be used, resolve * the segment register index to use based on the defaults described in the * Intel documentation. In long mode, all segment register indexes will be * ignored, except if overrides were found for FS or GS. All these operations * are done using helper functions. * * The operand register, @regoff, is represented as the offset from the base of * pt_regs. * * As stated, the main use of this function is to determine the segment register * index based on the instruction, its operands and prefixes. Hence, @insn * must be valid. However, if @regoff indicates rIP, we don't need to inspect * @insn at all as in this case CS is used in all cases. This case is checked * before proceeding further. * * Please note that this function does not return the value in the segment * register (i.e., the segment selector) but our defined index. The segment * selector needs to be obtained using get_segment_selector() and passing the * segment register index resolved by this function. * * Returns: * * An index identifying the segment register to use, among CS, SS, DS, * ES, FS, or GS. INAT_SEG_REG_IGNORE is returned if running in long mode. * * -EINVAL in case of error. */ static int resolve_seg_reg(struct insn *insn, struct pt_regs *regs, int regoff) { int idx; /* * In the unlikely event of having to resolve the segment register * index for rIP, do it first. Segment override prefixes should not * be used. Hence, it is not necessary to inspect the instruction, * which may be invalid at this point. */ if (regoff == offsetof(struct pt_regs, ip)) { if (user_64bit_mode(regs)) return INAT_SEG_REG_IGNORE; else return INAT_SEG_REG_CS; } if (!insn) return -EINVAL; if (!check_seg_overrides(insn, regoff)) return resolve_default_seg(insn, regs, regoff); idx = get_seg_reg_override_idx(insn); if (idx < 0) return idx; if (idx == INAT_SEG_REG_DEFAULT) return resolve_default_seg(insn, regs, regoff); /* * In long mode, segment override prefixes are ignored, except for * overrides for FS and GS. */ if (user_64bit_mode(regs)) { if (idx != INAT_SEG_REG_FS && idx != INAT_SEG_REG_GS) idx = INAT_SEG_REG_IGNORE; } return idx; } /** * get_segment_selector() - obtain segment selector * @regs: Register values as seen when entering kernel mode * @seg_reg_idx: Segment register index to use * * Obtain the segment selector from any of the CS, SS, DS, ES, FS, GS segment * registers. In CONFIG_X86_32, the segment is obtained from either pt_regs or * kernel_vm86_regs as applicable. In CONFIG_X86_64, CS and SS are obtained * from pt_regs. DS, ES, FS and GS are obtained by reading the actual CPU * registers. This done for only for completeness as in CONFIG_X86_64 segment * registers are ignored. * * Returns: * * Value of the segment selector, including null when running in * long mode. * * -EINVAL on error. */ static short get_segment_selector(struct pt_regs *regs, int seg_reg_idx) { #ifdef CONFIG_X86_64 unsigned short sel; switch (seg_reg_idx) { case INAT_SEG_REG_IGNORE: return 0; case INAT_SEG_REG_CS: return (unsigned short)(regs->cs & 0xffff); case INAT_SEG_REG_SS: return (unsigned short)(regs->ss & 0xffff); case INAT_SEG_REG_DS: savesegment(ds, sel); return sel; case INAT_SEG_REG_ES: savesegment(es, sel); return sel; case INAT_SEG_REG_FS: savesegment(fs, sel); return sel; case INAT_SEG_REG_GS: savesegment(gs, sel); return sel; default: return -EINVAL; } #else /* CONFIG_X86_32 */ struct kernel_vm86_regs *vm86regs = (struct kernel_vm86_regs *)regs; if (v8086_mode(regs)) { switch (seg_reg_idx) { case INAT_SEG_REG_CS: return (unsigned short)(regs->cs & 0xffff); case INAT_SEG_REG_SS: return (unsigned short)(regs->ss & 0xffff); case INAT_SEG_REG_DS: return vm86regs->ds; case INAT_SEG_REG_ES: return vm86regs->es; case INAT_SEG_REG_FS: return vm86regs->fs; case INAT_SEG_REG_GS: return vm86regs->gs; case INAT_SEG_REG_IGNORE: /* fall through */ default: return -EINVAL; } } switch (seg_reg_idx) { case INAT_SEG_REG_CS: return (unsigned short)(regs->cs & 0xffff); case INAT_SEG_REG_SS: return (unsigned short)(regs->ss & 0xffff); case INAT_SEG_REG_DS: return (unsigned short)(regs->ds & 0xffff); case INAT_SEG_REG_ES: return (unsigned short)(regs->es & 0xffff); case INAT_SEG_REG_FS: return (unsigned short)(regs->fs & 0xffff); case INAT_SEG_REG_GS: /* * GS may or may not be in regs as per CONFIG_X86_32_LAZY_GS. * The macro below takes care of both cases. */ return get_user_gs(regs); case INAT_SEG_REG_IGNORE: /* fall through */ default: return -EINVAL; } #endif /* CONFIG_X86_64 */ } static int get_reg_offset(struct insn *insn, struct pt_regs *regs, enum reg_type type) { int regno = 0; static const int regoff[] = { offsetof(struct pt_regs, ax), offsetof(struct pt_regs, cx), offsetof(struct pt_regs, dx), offsetof(struct pt_regs, bx), offsetof(struct pt_regs, sp), offsetof(struct pt_regs, bp), offsetof(struct pt_regs, si), offsetof(struct pt_regs, di), #ifdef CONFIG_X86_64 offsetof(struct pt_regs, r8), offsetof(struct pt_regs, r9), offsetof(struct pt_regs, r10), offsetof(struct pt_regs, r11), offsetof(struct pt_regs, r12), offsetof(struct pt_regs, r13), offsetof(struct pt_regs, r14), offsetof(struct pt_regs, r15), #endif }; int nr_registers = ARRAY_SIZE(regoff); /* * Don't possibly decode a 32-bit instructions as * reading a 64-bit-only register. */ if (IS_ENABLED(CONFIG_X86_64) && !insn->x86_64) nr_registers -= 8; switch (type) { case REG_TYPE_RM: regno = X86_MODRM_RM(insn->modrm.value); /* * ModRM.mod == 0 and ModRM.rm == 5 means a 32-bit displacement * follows the ModRM byte. */ if (!X86_MODRM_MOD(insn->modrm.value) && regno == 5) return -EDOM; if (X86_REX_B(insn->rex_prefix.value)) regno += 8; break; case REG_TYPE_INDEX: regno = X86_SIB_INDEX(insn->sib.value); if (X86_REX_X(insn->rex_prefix.value)) regno += 8; /* * If ModRM.mod != 3 and SIB.index = 4 the scale*index * portion of the address computation is null. This is * true only if REX.X is 0. In such a case, the SIB index * is used in the address computation. */ if (X86_MODRM_MOD(insn->modrm.value) != 3 && regno == 4) return -EDOM; break; case REG_TYPE_BASE: regno = X86_SIB_BASE(insn->sib.value); /* * If ModRM.mod is 0 and SIB.base == 5, the base of the * register-indirect addressing is 0. In this case, a * 32-bit displacement follows the SIB byte. */ if (!X86_MODRM_MOD(insn->modrm.value) && regno == 5) return -EDOM; if (X86_REX_B(insn->rex_prefix.value)) regno += 8; break; default: pr_err_ratelimited("invalid register type: %d\n", type); return -EINVAL; } if (regno >= nr_registers) { WARN_ONCE(1, "decoded an instruction with an invalid register"); return -EINVAL; } return regoff[regno]; } /** * get_reg_offset_16() - Obtain offset of register indicated by instruction * @insn: Instruction containing ModRM byte * @regs: Register values as seen when entering kernel mode * @offs1: Offset of the first operand register * @offs2: Offset of the second opeand register, if applicable * * Obtain the offset, in pt_regs, of the registers indicated by the ModRM byte * in @insn. This function is to be used with 16-bit address encodings. The * @offs1 and @offs2 will be written with the offset of the two registers * indicated by the instruction. In cases where any of the registers is not * referenced by the instruction, the value will be set to -EDOM. * * Returns: * * 0 on success, -EINVAL on error. */ static int get_reg_offset_16(struct insn *insn, struct pt_regs *regs, int *offs1, int *offs2) { /* * 16-bit addressing can use one or two registers. Specifics of * encodings are given in Table 2-1. "16-Bit Addressing Forms with the * ModR/M Byte" of the Intel Software Development Manual. */ static const int regoff1[] = { offsetof(struct pt_regs, bx), offsetof(struct pt_regs, bx), offsetof(struct pt_regs, bp), offsetof(struct pt_regs, bp), offsetof(struct pt_regs, si), offsetof(struct pt_regs, di), offsetof(struct pt_regs, bp), offsetof(struct pt_regs, bx), }; static const int regoff2[] = { offsetof(struct pt_regs, si), offsetof(struct pt_regs, di), offsetof(struct pt_regs, si), offsetof(struct pt_regs, di), -EDOM, -EDOM, -EDOM, -EDOM, }; if (!offs1 || !offs2) return -EINVAL; /* Operand is a register, use the generic function. */ if (X86_MODRM_MOD(insn->modrm.value) == 3) { *offs1 = insn_get_modrm_rm_off(insn, regs); *offs2 = -EDOM; return 0; } *offs1 = regoff1[X86_MODRM_RM(insn->modrm.value)]; *offs2 = regoff2[X86_MODRM_RM(insn->modrm.value)]; /* * If ModRM.mod is 0 and ModRM.rm is 110b, then we use displacement- * only addressing. This means that no registers are involved in * computing the effective address. Thus, ensure that the first * register offset is invalild. The second register offset is already * invalid under the aforementioned conditions. */ if ((X86_MODRM_MOD(insn->modrm.value) == 0) && (X86_MODRM_RM(insn->modrm.value) == 6)) *offs1 = -EDOM; return 0; } /** * get_desc() - Obtain contents of a segment descriptor * @out: Segment descriptor contents on success * @sel: Segment selector * * Given a segment selector, obtain a pointer to the segment descriptor. * Both global and local descriptor tables are supported. * * Returns: * * True on success, false on failure. * * NULL on error. */ static bool get_desc(struct desc_struct *out, unsigned short sel) { struct desc_ptr gdt_desc = {0, 0}; unsigned long desc_base; #ifdef CONFIG_MODIFY_LDT_SYSCALL if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) { bool success = false; struct ldt_struct *ldt; /* Bits [15:3] contain the index of the desired entry. */ sel >>= 3; mutex_lock(&current->active_mm->context.lock); ldt = current->active_mm->context.ldt; if (ldt && sel < ldt->nr_entries) { *out = ldt->entries[sel]; success = true; } mutex_unlock(&current->active_mm->context.lock); return success; } #endif native_store_gdt(&gdt_desc); /* * Segment descriptors have a size of 8 bytes. Thus, the index is * multiplied by 8 to obtain the memory offset of the desired descriptor * from the base of the GDT. As bits [15:3] of the segment selector * contain the index, it can be regarded as multiplied by 8 already. * All that remains is to clear bits [2:0]. */ desc_base = sel & ~(SEGMENT_RPL_MASK | SEGMENT_TI_MASK); if (desc_base > gdt_desc.size) return false; *out = *(struct desc_struct *)(gdt_desc.address + desc_base); return true; } /** * insn_get_seg_base() - Obtain base address of segment descriptor. * @regs: Register values as seen when entering kernel mode * @seg_reg_idx: Index of the segment register pointing to seg descriptor * * Obtain the base address of the segment as indicated by the segment descriptor * pointed by the segment selector. The segment selector is obtained from the * input segment register index @seg_reg_idx. * * Returns: * * In protected mode, base address of the segment. Zero in long mode, * except when FS or GS are used. In virtual-8086 mode, the segment * selector shifted 4 bits to the right. * * -1L in case of error. */ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx) { struct desc_struct desc; short sel; sel = get_segment_selector(regs, seg_reg_idx); if (sel < 0) return -1L; if (v8086_mode(regs)) /* * Base is simply the segment selector shifted 4 * bits to the right. */ return (unsigned long)(sel << 4); if (user_64bit_mode(regs)) { /* * Only FS or GS will have a base address, the rest of * the segments' bases are forced to 0. */ unsigned long base; if (seg_reg_idx == INAT_SEG_REG_FS) rdmsrl(MSR_FS_BASE, base); else if (seg_reg_idx == INAT_SEG_REG_GS) /* * swapgs was called at the kernel entry point. Thus, * MSR_KERNEL_GS_BASE will have the user-space GS base. */ rdmsrl(MSR_KERNEL_GS_BASE, base); else base = 0; return base; } /* In protected mode the segment selector cannot be null. */ if (!sel) return -1L; if (!get_desc(&desc, sel)) return -1L; return get_desc_base(&desc); } /** * get_seg_limit() - Obtain the limit of a segment descriptor * @regs: Register values as seen when entering kernel mode * @seg_reg_idx: Index of the segment register pointing to seg descriptor * * Obtain the limit of the segment as indicated by the segment descriptor * pointed by the segment selector. The segment selector is obtained from the * input segment register index @seg_reg_idx. * * Returns: * * In protected mode, the limit of the segment descriptor in bytes. * In long mode and virtual-8086 mode, segment limits are not enforced. Thus, * limit is returned as -1L to imply a limit-less segment. * * Zero is returned on error. */ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx) { struct desc_struct desc; unsigned long limit; short sel; sel = get_segment_selector(regs, seg_reg_idx); if (sel < 0) return 0; if (user_64bit_mode(regs) || v8086_mode(regs)) return -1L; if (!sel) return 0; if (!get_desc(&desc, sel)) return 0; /* * If the granularity bit is set, the limit is given in multiples * of 4096. This also means that the 12 least significant bits are * not tested when checking the segment limits. In practice, * this means that the segment ends in (limit << 12) + 0xfff. */ limit = get_desc_limit(&desc); if (desc.g) limit = (limit << 12) + 0xfff; return limit; } /** * insn_get_code_seg_params() - Obtain code segment parameters * @regs: Structure with register values as seen when entering kernel mode * * Obtain address and operand sizes of the code segment. It is obtained from the * selector contained in the CS register in regs. In protected mode, the default * address is determined by inspecting the L and D bits of the segment * descriptor. In virtual-8086 mode, the default is always two bytes for both * address and operand sizes. * * Returns: * * An int containing ORed-in default parameters on success. * * -EINVAL on error. */ int insn_get_code_seg_params(struct pt_regs *regs) { struct desc_struct desc; short sel; if (v8086_mode(regs)) /* Address and operand size are both 16-bit. */ return INSN_CODE_SEG_PARAMS(2, 2); sel = get_segment_selector(regs, INAT_SEG_REG_CS); if (sel < 0) return sel; if (!get_desc(&desc, sel)) return -EINVAL; /* * The most significant byte of the Type field of the segment descriptor * determines whether a segment contains data or code. If this is a data * segment, return error. */ if (!(desc.type & BIT(3))) return -EINVAL; switch ((desc.l << 1) | desc.d) { case 0: /* * Legacy mode. CS.L=0, CS.D=0. Address and operand size are * both 16-bit. */ return INSN_CODE_SEG_PARAMS(2, 2); case 1: /* * Legacy mode. CS.L=0, CS.D=1. Address and operand size are * both 32-bit. */ return INSN_CODE_SEG_PARAMS(4, 4); case 2: /* * IA-32e 64-bit mode. CS.L=1, CS.D=0. Address size is 64-bit; * operand size is 32-bit. */ return INSN_CODE_SEG_PARAMS(4, 8); case 3: /* Invalid setting. CS.L=1, CS.D=1 */ /* fall through */ default: return -EINVAL; } } /** * insn_get_modrm_rm_off() - Obtain register in r/m part of the ModRM byte * @insn: Instruction containing the ModRM byte * @regs: Register values as seen when entering kernel mode * * Returns: * * The register indicated by the r/m part of the ModRM byte. The * register is obtained as an offset from the base of pt_regs. In specific * cases, the returned value can be -EDOM to indicate that the particular value * of ModRM does not refer to a register and shall be ignored. */ int insn_get_modrm_rm_off(struct insn *insn, struct pt_regs *regs) { return get_reg_offset(insn, regs, REG_TYPE_RM); } /** * get_seg_base_limit() - obtain base address and limit of a segment * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Operand offset, in pt_regs, used to resolve segment descriptor * @base: Obtained segment base * @limit: Obtained segment limit * * Obtain the base address and limit of the segment associated with the operand * @regoff and, if any or allowed, override prefixes in @insn. This function is * different from insn_get_seg_base() as the latter does not resolve the segment * associated with the instruction operand. If a limit is not needed (e.g., * when running in long mode), @limit can be NULL. * * Returns: * * 0 on success. @base and @limit will contain the base address and of the * resolved segment, respectively. * * -EINVAL on error. */ static int get_seg_base_limit(struct insn *insn, struct pt_regs *regs, int regoff, unsigned long *base, unsigned long *limit) { int seg_reg_idx; if (!base) return -EINVAL; seg_reg_idx = resolve_seg_reg(insn, regs, regoff); if (seg_reg_idx < 0) return seg_reg_idx; *base = insn_get_seg_base(regs, seg_reg_idx); if (*base == -1L) return -EINVAL; if (!limit) return 0; *limit = get_seg_limit(regs, seg_reg_idx); if (!(*limit)) return -EINVAL; return 0; } /** * get_eff_addr_reg() - Obtain effective address from register operand * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Obtained operand offset, in pt_regs, with the effective address * @eff_addr: Obtained effective address * * Obtain the effective address stored in the register operand as indicated by * the ModRM byte. This function is to be used only with register addressing * (i.e., ModRM.mod is 3). The effective address is saved in @eff_addr. The * register operand, as an offset from the base of pt_regs, is saved in @regoff; * such offset can then be used to resolve the segment associated with the * operand. This function can be used with any of the supported address sizes * in x86. * * Returns: * * 0 on success. @eff_addr will have the effective address stored in the * operand indicated by ModRM. @regoff will have such operand as an offset from * the base of pt_regs. * * -EINVAL on error. */ static int get_eff_addr_reg(struct insn *insn, struct pt_regs *regs, int *regoff, long *eff_addr) { insn_get_modrm(insn); if (!insn->modrm.nbytes) return -EINVAL; if (X86_MODRM_MOD(insn->modrm.value) != 3) return -EINVAL; *regoff = get_reg_offset(insn, regs, REG_TYPE_RM); if (*regoff < 0) return -EINVAL; /* Ignore bytes that are outside the address size. */ if (insn->addr_bytes == 2) *eff_addr = regs_get_register(regs, *regoff) & 0xffff; else if (insn->addr_bytes == 4) *eff_addr = regs_get_register(regs, *regoff) & 0xffffffff; else /* 64-bit address */ *eff_addr = regs_get_register(regs, *regoff); return 0; } /** * get_eff_addr_modrm() - Obtain referenced effective address via ModRM * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Obtained operand offset, in pt_regs, associated with segment * @eff_addr: Obtained effective address * * Obtain the effective address referenced by the ModRM byte of @insn. After * identifying the registers involved in the register-indirect memory reference, * its value is obtained from the operands in @regs. The computed address is * stored @eff_addr. Also, the register operand that indicates the associated * segment is stored in @regoff, this parameter can later be used to determine * such segment. * * Returns: * * 0 on success. @eff_addr will have the referenced effective address. @regoff * will have a register, as an offset from the base of pt_regs, that can be used * to resolve the associated segment. * * -EINVAL on error. */ static int get_eff_addr_modrm(struct insn *insn, struct pt_regs *regs, int *regoff, long *eff_addr) { long tmp; if (insn->addr_bytes != 8 && insn->addr_bytes != 4) return -EINVAL; insn_get_modrm(insn); if (!insn->modrm.nbytes) return -EINVAL; if (X86_MODRM_MOD(insn->modrm.value) > 2) return -EINVAL; *regoff = get_reg_offset(insn, regs, REG_TYPE_RM); /* * -EDOM means that we must ignore the address_offset. In such a case, * in 64-bit mode the effective address relative to the rIP of the * following instruction. */ if (*regoff == -EDOM) { if (user_64bit_mode(regs)) tmp = regs->ip + insn->length; else tmp = 0; } else if (*regoff < 0) { return -EINVAL; } else { tmp = regs_get_register(regs, *regoff); } if (insn->addr_bytes == 4) { int addr32 = (int)(tmp & 0xffffffff) + insn->displacement.value; *eff_addr = addr32 & 0xffffffff; } else { *eff_addr = tmp + insn->displacement.value; } return 0; } /** * get_eff_addr_modrm_16() - Obtain referenced effective address via ModRM * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Obtained operand offset, in pt_regs, associated with segment * @eff_addr: Obtained effective address * * Obtain the 16-bit effective address referenced by the ModRM byte of @insn. * After identifying the registers involved in the register-indirect memory * reference, its value is obtained from the operands in @regs. The computed * address is stored @eff_addr. Also, the register operand that indicates * the associated segment is stored in @regoff, this parameter can later be used * to determine such segment. * * Returns: * * 0 on success. @eff_addr will have the referenced effective address. @regoff * will have a register, as an offset from the base of pt_regs, that can be used * to resolve the associated segment. * * -EINVAL on error. */ static int get_eff_addr_modrm_16(struct insn *insn, struct pt_regs *regs, int *regoff, short *eff_addr) { int addr_offset1, addr_offset2, ret; short addr1 = 0, addr2 = 0, displacement; if (insn->addr_bytes != 2) return -EINVAL; insn_get_modrm(insn); if (!insn->modrm.nbytes) return -EINVAL; if (X86_MODRM_MOD(insn->modrm.value) > 2) return -EINVAL; ret = get_reg_offset_16(insn, regs, &addr_offset1, &addr_offset2); if (ret < 0) return -EINVAL; /* * Don't fail on invalid offset values. They might be invalid because * they cannot be used for this particular value of ModRM. Instead, use * them in the computation only if they contain a valid value. */ if (addr_offset1 != -EDOM) addr1 = regs_get_register(regs, addr_offset1) & 0xffff; if (addr_offset2 != -EDOM) addr2 = regs_get_register(regs, addr_offset2) & 0xffff; displacement = insn->displacement.value & 0xffff; *eff_addr = addr1 + addr2 + displacement; /* * The first operand register could indicate to use of either SS or DS * registers to obtain the segment selector. The second operand * register can only indicate the use of DS. Thus, the first operand * will be used to obtain the segment selector. */ *regoff = addr_offset1; return 0; } /** * get_eff_addr_sib() - Obtain referenced effective address via SIB * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Obtained operand offset, in pt_regs, associated with segment * @eff_addr: Obtained effective address * * Obtain the effective address referenced by the SIB byte of @insn. After * identifying the registers involved in the indexed, register-indirect memory * reference, its value is obtained from the operands in @regs. The computed * address is stored @eff_addr. Also, the register operand that indicates the * associated segment is stored in @regoff, this parameter can later be used to * determine such segment. * * Returns: * * 0 on success. @eff_addr will have the referenced effective address. * @base_offset will have a register, as an offset from the base of pt_regs, * that can be used to resolve the associated segment. * * -EINVAL on error. */ static int get_eff_addr_sib(struct insn *insn, struct pt_regs *regs, int *base_offset, long *eff_addr) { long base, indx; int indx_offset; if (insn->addr_bytes != 8 && insn->addr_bytes != 4) return -EINVAL; insn_get_modrm(insn); if (!insn->modrm.nbytes) return -EINVAL; if (X86_MODRM_MOD(insn->modrm.value) > 2) return -EINVAL; insn_get_sib(insn); if (!insn->sib.nbytes) return -EINVAL; *base_offset = get_reg_offset(insn, regs, REG_TYPE_BASE); indx_offset = get_reg_offset(insn, regs, REG_TYPE_INDEX); /* * Negative values in the base and index offset means an error when * decoding the SIB byte. Except -EDOM, which means that the registers * should not be used in the address computation. */ if (*base_offset == -EDOM) base = 0; else if (*base_offset < 0) return -EINVAL; else base = regs_get_register(regs, *base_offset); if (indx_offset == -EDOM) indx = 0; else if (indx_offset < 0) return -EINVAL; else indx = regs_get_register(regs, indx_offset); if (insn->addr_bytes == 4) { int addr32, base32, idx32; base32 = base & 0xffffffff; idx32 = indx & 0xffffffff; addr32 = base32 + idx32 * (1 << X86_SIB_SCALE(insn->sib.value)); addr32 += insn->displacement.value; *eff_addr = addr32 & 0xffffffff; } else { *eff_addr = base + indx * (1 << X86_SIB_SCALE(insn->sib.value)); *eff_addr += insn->displacement.value; } return 0; } /** * get_addr_ref_16() - Obtain the 16-bit address referred by instruction * @insn: Instruction containing ModRM byte and displacement * @regs: Register values as seen when entering kernel mode * * This function is to be used with 16-bit address encodings. Obtain the memory * address referred by the instruction's ModRM and displacement bytes. Also, the * segment used as base is determined by either any segment override prefixes in * @insn or the default segment of the registers involved in the address * computation. In protected mode, segment limits are enforced. * * Returns: * * Linear address referenced by the instruction operands on success. * * -1L on error. */ static void __user *get_addr_ref_16(struct insn *insn, struct pt_regs *regs) { unsigned long linear_addr = -1L, seg_base, seg_limit; int ret, regoff; short eff_addr; long tmp; insn_get_modrm(insn); insn_get_displacement(insn); if (insn->addr_bytes != 2) goto out; if (X86_MODRM_MOD(insn->modrm.value) == 3) { ret = get_eff_addr_reg(insn, regs, &regoff, &tmp); if (ret) goto out; eff_addr = tmp; } else { ret = get_eff_addr_modrm_16(insn, regs, &regoff, &eff_addr); if (ret) goto out; } ret = get_seg_base_limit(insn, regs, regoff, &seg_base, &seg_limit); if (ret) goto out; /* * Before computing the linear address, make sure the effective address * is within the limits of the segment. In virtual-8086 mode, segment * limits are not enforced. In such a case, the segment limit is -1L to * reflect this fact. */ if ((unsigned long)(eff_addr & 0xffff) > seg_limit) goto out; linear_addr = (unsigned long)(eff_addr & 0xffff) + seg_base; /* Limit linear address to 20 bits */ if (v8086_mode(regs)) linear_addr &= 0xfffff; out: return (void __user *)linear_addr; } /** * get_addr_ref_32() - Obtain a 32-bit linear address * @insn: Instruction with ModRM, SIB bytes and displacement * @regs: Register values as seen when entering kernel mode * * This function is to be used with 32-bit address encodings to obtain the * linear memory address referred by the instruction's ModRM, SIB, * displacement bytes and segment base address, as applicable. If in protected * mode, segment limits are enforced. * * Returns: * * Linear address referenced by instruction and registers on success. * * -1L on error. */ static void __user *get_addr_ref_32(struct insn *insn, struct pt_regs *regs) { unsigned long linear_addr = -1L, seg_base, seg_limit; int eff_addr, regoff; long tmp; int ret; if (insn->addr_bytes != 4) goto out; if (X86_MODRM_MOD(insn->modrm.value) == 3) { ret = get_eff_addr_reg(insn, regs, &regoff, &tmp); if (ret) goto out; eff_addr = tmp; } else { if (insn->sib.nbytes) { ret = get_eff_addr_sib(insn, regs, &regoff, &tmp); if (ret) goto out; eff_addr = tmp; } else { ret = get_eff_addr_modrm(insn, regs, &regoff, &tmp); if (ret) goto out; eff_addr = tmp; } } ret = get_seg_base_limit(insn, regs, regoff, &seg_base, &seg_limit); if (ret) goto out; /* * In protected mode, before computing the linear address, make sure * the effective address is within the limits of the segment. * 32-bit addresses can be used in long and virtual-8086 modes if an * address override prefix is used. In such cases, segment limits are * not enforced. When in virtual-8086 mode, the segment limit is -1L * to reflect this situation. * * After computed, the effective address is treated as an unsigned * quantity. */ if (!user_64bit_mode(regs) && ((unsigned int)eff_addr > seg_limit)) goto out; /* * Even though 32-bit address encodings are allowed in virtual-8086 * mode, the address range is still limited to [0x-0xffff]. */ if (v8086_mode(regs) && (eff_addr & ~0xffff)) goto out; /* * Data type long could be 64 bits in size. Ensure that our 32-bit * effective address is not sign-extended when computing the linear * address. */ linear_addr = (unsigned long)(eff_addr & 0xffffffff) + seg_base; /* Limit linear address to 20 bits */ if (v8086_mode(regs)) linear_addr &= 0xfffff; out: return (void __user *)linear_addr; } /** * get_addr_ref_64() - Obtain a 64-bit linear address * @insn: Instruction struct with ModRM and SIB bytes and displacement * @regs: Structure with register values as seen when entering kernel mode * * This function is to be used with 64-bit address encodings to obtain the * linear memory address referred by the instruction's ModRM, SIB, * displacement bytes and segment base address, as applicable. * * Returns: * * Linear address referenced by instruction and registers on success. * * -1L on error. */ #ifndef CONFIG_X86_64 static void __user *get_addr_ref_64(struct insn *insn, struct pt_regs *regs) { return (void __user *)-1L; } #else static void __user *get_addr_ref_64(struct insn *insn, struct pt_regs *regs) { unsigned long linear_addr = -1L, seg_base; int regoff, ret; long eff_addr; if (insn->addr_bytes != 8) goto out; if (X86_MODRM_MOD(insn->modrm.value) == 3) { ret = get_eff_addr_reg(insn, regs, &regoff, &eff_addr); if (ret) goto out; } else { if (insn->sib.nbytes) { ret = get_eff_addr_sib(insn, regs, &regoff, &eff_addr); if (ret) goto out; } else { ret = get_eff_addr_modrm(insn, regs, &regoff, &eff_addr); if (ret) goto out; } } ret = get_seg_base_limit(insn, regs, regoff, &seg_base, NULL); if (ret) goto out; linear_addr = (unsigned long)eff_addr + seg_base; out: return (void __user *)linear_addr; } #endif /* CONFIG_X86_64 */ /** * insn_get_addr_ref() - Obtain the linear address referred by instruction * @insn: Instruction structure containing ModRM byte and displacement * @regs: Structure with register values as seen when entering kernel mode * * Obtain the linear address referred by the instruction's ModRM, SIB and * displacement bytes, and segment base, as applicable. In protected mode, * segment limits are enforced. * * Returns: * * Linear address referenced by instruction and registers on success. * * -1L on error. */ void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs) { if (!insn || !regs) return (void __user *)-1L; switch (insn->addr_bytes) { case 2: return get_addr_ref_16(insn, regs); case 4: return get_addr_ref_32(insn, regs); case 8: return get_addr_ref_64(insn, regs); default: return (void __user *)-1L; } }
static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx) { struct desc_struct *desc; unsigned long limit; short sel; sel = get_segment_selector(regs, seg_reg_idx); if (sel < 0) return 0; if (user_64bit_mode(regs) || v8086_mode(regs)) return -1L; if (!sel) return 0; desc = get_desc(sel); if (!desc) return 0; /* * If the granularity bit is set, the limit is given in multiples * of 4096. This also means that the 12 least significant bits are * not tested when checking the segment limits. In practice, * this means that the segment ends in (limit << 12) + 0xfff. */ limit = get_desc_limit(desc); if (desc->g) limit = (limit << 12) + 0xfff; return limit; }
static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx) { struct desc_struct desc; unsigned long limit; short sel; sel = get_segment_selector(regs, seg_reg_idx); if (sel < 0) return 0; if (user_64bit_mode(regs) || v8086_mode(regs)) return -1L; if (!sel) return 0; if (!get_desc(&desc, sel)) return 0; /* * If the granularity bit is set, the limit is given in multiples * of 4096. This also means that the 12 least significant bits are * not tested when checking the segment limits. In practice, * this means that the segment ends in (limit << 12) + 0xfff. */ limit = get_desc_limit(&desc); if (desc.g) limit = (limit << 12) + 0xfff; return limit; }
{'added': [(560, ' * get_desc() - Obtain contents of a segment descriptor'), (561, ' * @out:\tSegment descriptor contents on success'), (569, ' * True on success, false on failure.'), (573, 'static bool get_desc(struct desc_struct *out, unsigned short sel)'), (580, '\t\tbool success = false;'), (588, '\t\tif (ldt && sel < ldt->nr_entries) {'), (589, '\t\t\t*out = ldt->entries[sel];'), (590, '\t\t\tsuccess = true;'), (591, '\t\t}'), (595, '\t\treturn success;'), (610, '\t\treturn false;'), (612, '\t*out = *(struct desc_struct *)(gdt_desc.address + desc_base);'), (613, '\treturn true;'), (635, '\tstruct desc_struct desc;'), (673, '\tif (!get_desc(&desc, sel))'), (676, '\treturn get_desc_base(&desc);'), (698, '\tstruct desc_struct desc;'), (712, '\tif (!get_desc(&desc, sel))'), (721, '\tlimit = get_desc_limit(&desc);'), (722, '\tif (desc.g)'), (746, '\tstruct desc_struct desc;'), (757, '\tif (!get_desc(&desc, sel))'), (765, '\tif (!(desc.type & BIT(3)))'), (768, '\tswitch ((desc.l << 1) | desc.d) {')], 'deleted': [(560, ' * get_desc() - Obtain pointer to a segment descriptor'), (568, ' * Pointer to segment descriptor on success.'), (572, 'static struct desc_struct *get_desc(unsigned short sel)'), (579, '\t\tstruct desc_struct *desc = NULL;'), (587, '\t\tif (ldt && sel < ldt->nr_entries)'), (588, '\t\t\tdesc = &ldt->entries[sel];'), (592, '\t\treturn desc;'), (607, '\t\treturn NULL;'), (609, '\treturn (struct desc_struct *)(gdt_desc.address + desc_base);'), (631, '\tstruct desc_struct *desc;'), (669, '\tdesc = get_desc(sel);'), (670, '\tif (!desc)'), (673, '\treturn get_desc_base(desc);'), (695, '\tstruct desc_struct *desc;'), (709, '\tdesc = get_desc(sel);'), (710, '\tif (!desc)'), (719, '\tlimit = get_desc_limit(desc);'), (720, '\tif (desc->g)'), (744, '\tstruct desc_struct *desc;'), (755, '\tdesc = get_desc(sel);'), (756, '\tif (!desc)'), (764, '\tif (!(desc->type & BIT(3)))'), (767, '\tswitch ((desc->l << 1) | desc->d) {')]}
24
23
634
3,848
https://github.com/torvalds/linux
CVE-2019-13233
['CWE-416', 'CWE-362']
insn-eval.c
insn_get_code_seg_params
/* * Utility functions for x86 operand and address decoding * * Copyright (C) Intel Corporation 2017 */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/ratelimit.h> #include <linux/mmu_context.h> #include <asm/desc_defs.h> #include <asm/desc.h> #include <asm/inat.h> #include <asm/insn.h> #include <asm/insn-eval.h> #include <asm/ldt.h> #include <asm/vm86.h> #undef pr_fmt #define pr_fmt(fmt) "insn: " fmt enum reg_type { REG_TYPE_RM = 0, REG_TYPE_INDEX, REG_TYPE_BASE, }; /** * is_string_insn() - Determine if instruction is a string instruction * @insn: Instruction containing the opcode to inspect * * Returns: * * true if the instruction, determined by the opcode, is any of the * string instructions as defined in the Intel Software Development manual. * False otherwise. */ static bool is_string_insn(struct insn *insn) { insn_get_opcode(insn); /* All string instructions have a 1-byte opcode. */ if (insn->opcode.nbytes != 1) return false; switch (insn->opcode.bytes[0]) { case 0x6c ... 0x6f: /* INS, OUTS */ case 0xa4 ... 0xa7: /* MOVS, CMPS */ case 0xaa ... 0xaf: /* STOS, LODS, SCAS */ return true; default: return false; } } /** * get_seg_reg_override_idx() - obtain segment register override index * @insn: Valid instruction with segment override prefixes * * Inspect the instruction prefixes in @insn and find segment overrides, if any. * * Returns: * * A constant identifying the segment register to use, among CS, SS, DS, * ES, FS, or GS. INAT_SEG_REG_DEFAULT is returned if no segment override * prefixes were found. * * -EINVAL in case of error. */ static int get_seg_reg_override_idx(struct insn *insn) { int idx = INAT_SEG_REG_DEFAULT; int num_overrides = 0, i; insn_get_prefixes(insn); /* Look for any segment override prefixes. */ for (i = 0; i < insn->prefixes.nbytes; i++) { insn_attr_t attr; attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]); switch (attr) { case INAT_MAKE_PREFIX(INAT_PFX_CS): idx = INAT_SEG_REG_CS; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_SS): idx = INAT_SEG_REG_SS; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_DS): idx = INAT_SEG_REG_DS; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_ES): idx = INAT_SEG_REG_ES; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_FS): idx = INAT_SEG_REG_FS; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_GS): idx = INAT_SEG_REG_GS; num_overrides++; break; /* No default action needed. */ } } /* More than one segment override prefix leads to undefined behavior. */ if (num_overrides > 1) return -EINVAL; return idx; } /** * check_seg_overrides() - check if segment override prefixes are allowed * @insn: Valid instruction with segment override prefixes * @regoff: Operand offset, in pt_regs, for which the check is performed * * For a particular register used in register-indirect addressing, determine if * segment override prefixes can be used. Specifically, no overrides are allowed * for rDI if used with a string instruction. * * Returns: * * True if segment override prefixes can be used with the register indicated * in @regoff. False if otherwise. */ static bool check_seg_overrides(struct insn *insn, int regoff) { if (regoff == offsetof(struct pt_regs, di) && is_string_insn(insn)) return false; return true; } /** * resolve_default_seg() - resolve default segment register index for an operand * @insn: Instruction with opcode and address size. Must be valid. * @regs: Register values as seen when entering kernel mode * @off: Operand offset, in pt_regs, for which resolution is needed * * Resolve the default segment register index associated with the instruction * operand register indicated by @off. Such index is resolved based on defaults * described in the Intel Software Development Manual. * * Returns: * * If in protected mode, a constant identifying the segment register to use, * among CS, SS, ES or DS. If in long mode, INAT_SEG_REG_IGNORE. * * -EINVAL in case of error. */ static int resolve_default_seg(struct insn *insn, struct pt_regs *regs, int off) { if (user_64bit_mode(regs)) return INAT_SEG_REG_IGNORE; /* * Resolve the default segment register as described in Section 3.7.4 * of the Intel Software Development Manual Vol. 1: * * + DS for all references involving r[ABCD]X, and rSI. * + If used in a string instruction, ES for rDI. Otherwise, DS. * + AX, CX and DX are not valid register operands in 16-bit address * encodings but are valid for 32-bit and 64-bit encodings. * + -EDOM is reserved to identify for cases in which no register * is used (i.e., displacement-only addressing). Use DS. * + SS for rSP or rBP. * + CS for rIP. */ switch (off) { case offsetof(struct pt_regs, ax): case offsetof(struct pt_regs, cx): case offsetof(struct pt_regs, dx): /* Need insn to verify address size. */ if (insn->addr_bytes == 2) return -EINVAL; /* fall through */ case -EDOM: case offsetof(struct pt_regs, bx): case offsetof(struct pt_regs, si): return INAT_SEG_REG_DS; case offsetof(struct pt_regs, di): if (is_string_insn(insn)) return INAT_SEG_REG_ES; return INAT_SEG_REG_DS; case offsetof(struct pt_regs, bp): case offsetof(struct pt_regs, sp): return INAT_SEG_REG_SS; case offsetof(struct pt_regs, ip): return INAT_SEG_REG_CS; default: return -EINVAL; } } /** * resolve_seg_reg() - obtain segment register index * @insn: Instruction with operands * @regs: Register values as seen when entering kernel mode * @regoff: Operand offset, in pt_regs, used to deterimine segment register * * Determine the segment register associated with the operands and, if * applicable, prefixes and the instruction pointed by @insn. * * The segment register associated to an operand used in register-indirect * addressing depends on: * * a) Whether running in long mode (in such a case segments are ignored, except * if FS or GS are used). * * b) Whether segment override prefixes can be used. Certain instructions and * registers do not allow override prefixes. * * c) Whether segment overrides prefixes are found in the instruction prefixes. * * d) If there are not segment override prefixes or they cannot be used, the * default segment register associated with the operand register is used. * * The function checks first if segment override prefixes can be used with the * operand indicated by @regoff. If allowed, obtain such overridden segment * register index. Lastly, if not prefixes were found or cannot be used, resolve * the segment register index to use based on the defaults described in the * Intel documentation. In long mode, all segment register indexes will be * ignored, except if overrides were found for FS or GS. All these operations * are done using helper functions. * * The operand register, @regoff, is represented as the offset from the base of * pt_regs. * * As stated, the main use of this function is to determine the segment register * index based on the instruction, its operands and prefixes. Hence, @insn * must be valid. However, if @regoff indicates rIP, we don't need to inspect * @insn at all as in this case CS is used in all cases. This case is checked * before proceeding further. * * Please note that this function does not return the value in the segment * register (i.e., the segment selector) but our defined index. The segment * selector needs to be obtained using get_segment_selector() and passing the * segment register index resolved by this function. * * Returns: * * An index identifying the segment register to use, among CS, SS, DS, * ES, FS, or GS. INAT_SEG_REG_IGNORE is returned if running in long mode. * * -EINVAL in case of error. */ static int resolve_seg_reg(struct insn *insn, struct pt_regs *regs, int regoff) { int idx; /* * In the unlikely event of having to resolve the segment register * index for rIP, do it first. Segment override prefixes should not * be used. Hence, it is not necessary to inspect the instruction, * which may be invalid at this point. */ if (regoff == offsetof(struct pt_regs, ip)) { if (user_64bit_mode(regs)) return INAT_SEG_REG_IGNORE; else return INAT_SEG_REG_CS; } if (!insn) return -EINVAL; if (!check_seg_overrides(insn, regoff)) return resolve_default_seg(insn, regs, regoff); idx = get_seg_reg_override_idx(insn); if (idx < 0) return idx; if (idx == INAT_SEG_REG_DEFAULT) return resolve_default_seg(insn, regs, regoff); /* * In long mode, segment override prefixes are ignored, except for * overrides for FS and GS. */ if (user_64bit_mode(regs)) { if (idx != INAT_SEG_REG_FS && idx != INAT_SEG_REG_GS) idx = INAT_SEG_REG_IGNORE; } return idx; } /** * get_segment_selector() - obtain segment selector * @regs: Register values as seen when entering kernel mode * @seg_reg_idx: Segment register index to use * * Obtain the segment selector from any of the CS, SS, DS, ES, FS, GS segment * registers. In CONFIG_X86_32, the segment is obtained from either pt_regs or * kernel_vm86_regs as applicable. In CONFIG_X86_64, CS and SS are obtained * from pt_regs. DS, ES, FS and GS are obtained by reading the actual CPU * registers. This done for only for completeness as in CONFIG_X86_64 segment * registers are ignored. * * Returns: * * Value of the segment selector, including null when running in * long mode. * * -EINVAL on error. */ static short get_segment_selector(struct pt_regs *regs, int seg_reg_idx) { #ifdef CONFIG_X86_64 unsigned short sel; switch (seg_reg_idx) { case INAT_SEG_REG_IGNORE: return 0; case INAT_SEG_REG_CS: return (unsigned short)(regs->cs & 0xffff); case INAT_SEG_REG_SS: return (unsigned short)(regs->ss & 0xffff); case INAT_SEG_REG_DS: savesegment(ds, sel); return sel; case INAT_SEG_REG_ES: savesegment(es, sel); return sel; case INAT_SEG_REG_FS: savesegment(fs, sel); return sel; case INAT_SEG_REG_GS: savesegment(gs, sel); return sel; default: return -EINVAL; } #else /* CONFIG_X86_32 */ struct kernel_vm86_regs *vm86regs = (struct kernel_vm86_regs *)regs; if (v8086_mode(regs)) { switch (seg_reg_idx) { case INAT_SEG_REG_CS: return (unsigned short)(regs->cs & 0xffff); case INAT_SEG_REG_SS: return (unsigned short)(regs->ss & 0xffff); case INAT_SEG_REG_DS: return vm86regs->ds; case INAT_SEG_REG_ES: return vm86regs->es; case INAT_SEG_REG_FS: return vm86regs->fs; case INAT_SEG_REG_GS: return vm86regs->gs; case INAT_SEG_REG_IGNORE: /* fall through */ default: return -EINVAL; } } switch (seg_reg_idx) { case INAT_SEG_REG_CS: return (unsigned short)(regs->cs & 0xffff); case INAT_SEG_REG_SS: return (unsigned short)(regs->ss & 0xffff); case INAT_SEG_REG_DS: return (unsigned short)(regs->ds & 0xffff); case INAT_SEG_REG_ES: return (unsigned short)(regs->es & 0xffff); case INAT_SEG_REG_FS: return (unsigned short)(regs->fs & 0xffff); case INAT_SEG_REG_GS: /* * GS may or may not be in regs as per CONFIG_X86_32_LAZY_GS. * The macro below takes care of both cases. */ return get_user_gs(regs); case INAT_SEG_REG_IGNORE: /* fall through */ default: return -EINVAL; } #endif /* CONFIG_X86_64 */ } static int get_reg_offset(struct insn *insn, struct pt_regs *regs, enum reg_type type) { int regno = 0; static const int regoff[] = { offsetof(struct pt_regs, ax), offsetof(struct pt_regs, cx), offsetof(struct pt_regs, dx), offsetof(struct pt_regs, bx), offsetof(struct pt_regs, sp), offsetof(struct pt_regs, bp), offsetof(struct pt_regs, si), offsetof(struct pt_regs, di), #ifdef CONFIG_X86_64 offsetof(struct pt_regs, r8), offsetof(struct pt_regs, r9), offsetof(struct pt_regs, r10), offsetof(struct pt_regs, r11), offsetof(struct pt_regs, r12), offsetof(struct pt_regs, r13), offsetof(struct pt_regs, r14), offsetof(struct pt_regs, r15), #endif }; int nr_registers = ARRAY_SIZE(regoff); /* * Don't possibly decode a 32-bit instructions as * reading a 64-bit-only register. */ if (IS_ENABLED(CONFIG_X86_64) && !insn->x86_64) nr_registers -= 8; switch (type) { case REG_TYPE_RM: regno = X86_MODRM_RM(insn->modrm.value); /* * ModRM.mod == 0 and ModRM.rm == 5 means a 32-bit displacement * follows the ModRM byte. */ if (!X86_MODRM_MOD(insn->modrm.value) && regno == 5) return -EDOM; if (X86_REX_B(insn->rex_prefix.value)) regno += 8; break; case REG_TYPE_INDEX: regno = X86_SIB_INDEX(insn->sib.value); if (X86_REX_X(insn->rex_prefix.value)) regno += 8; /* * If ModRM.mod != 3 and SIB.index = 4 the scale*index * portion of the address computation is null. This is * true only if REX.X is 0. In such a case, the SIB index * is used in the address computation. */ if (X86_MODRM_MOD(insn->modrm.value) != 3 && regno == 4) return -EDOM; break; case REG_TYPE_BASE: regno = X86_SIB_BASE(insn->sib.value); /* * If ModRM.mod is 0 and SIB.base == 5, the base of the * register-indirect addressing is 0. In this case, a * 32-bit displacement follows the SIB byte. */ if (!X86_MODRM_MOD(insn->modrm.value) && regno == 5) return -EDOM; if (X86_REX_B(insn->rex_prefix.value)) regno += 8; break; default: pr_err_ratelimited("invalid register type: %d\n", type); return -EINVAL; } if (regno >= nr_registers) { WARN_ONCE(1, "decoded an instruction with an invalid register"); return -EINVAL; } return regoff[regno]; } /** * get_reg_offset_16() - Obtain offset of register indicated by instruction * @insn: Instruction containing ModRM byte * @regs: Register values as seen when entering kernel mode * @offs1: Offset of the first operand register * @offs2: Offset of the second opeand register, if applicable * * Obtain the offset, in pt_regs, of the registers indicated by the ModRM byte * in @insn. This function is to be used with 16-bit address encodings. The * @offs1 and @offs2 will be written with the offset of the two registers * indicated by the instruction. In cases where any of the registers is not * referenced by the instruction, the value will be set to -EDOM. * * Returns: * * 0 on success, -EINVAL on error. */ static int get_reg_offset_16(struct insn *insn, struct pt_regs *regs, int *offs1, int *offs2) { /* * 16-bit addressing can use one or two registers. Specifics of * encodings are given in Table 2-1. "16-Bit Addressing Forms with the * ModR/M Byte" of the Intel Software Development Manual. */ static const int regoff1[] = { offsetof(struct pt_regs, bx), offsetof(struct pt_regs, bx), offsetof(struct pt_regs, bp), offsetof(struct pt_regs, bp), offsetof(struct pt_regs, si), offsetof(struct pt_regs, di), offsetof(struct pt_regs, bp), offsetof(struct pt_regs, bx), }; static const int regoff2[] = { offsetof(struct pt_regs, si), offsetof(struct pt_regs, di), offsetof(struct pt_regs, si), offsetof(struct pt_regs, di), -EDOM, -EDOM, -EDOM, -EDOM, }; if (!offs1 || !offs2) return -EINVAL; /* Operand is a register, use the generic function. */ if (X86_MODRM_MOD(insn->modrm.value) == 3) { *offs1 = insn_get_modrm_rm_off(insn, regs); *offs2 = -EDOM; return 0; } *offs1 = regoff1[X86_MODRM_RM(insn->modrm.value)]; *offs2 = regoff2[X86_MODRM_RM(insn->modrm.value)]; /* * If ModRM.mod is 0 and ModRM.rm is 110b, then we use displacement- * only addressing. This means that no registers are involved in * computing the effective address. Thus, ensure that the first * register offset is invalild. The second register offset is already * invalid under the aforementioned conditions. */ if ((X86_MODRM_MOD(insn->modrm.value) == 0) && (X86_MODRM_RM(insn->modrm.value) == 6)) *offs1 = -EDOM; return 0; } /** * get_desc() - Obtain pointer to a segment descriptor * @sel: Segment selector * * Given a segment selector, obtain a pointer to the segment descriptor. * Both global and local descriptor tables are supported. * * Returns: * * Pointer to segment descriptor on success. * * NULL on error. */ static struct desc_struct *get_desc(unsigned short sel) { struct desc_ptr gdt_desc = {0, 0}; unsigned long desc_base; #ifdef CONFIG_MODIFY_LDT_SYSCALL if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) { struct desc_struct *desc = NULL; struct ldt_struct *ldt; /* Bits [15:3] contain the index of the desired entry. */ sel >>= 3; mutex_lock(&current->active_mm->context.lock); ldt = current->active_mm->context.ldt; if (ldt && sel < ldt->nr_entries) desc = &ldt->entries[sel]; mutex_unlock(&current->active_mm->context.lock); return desc; } #endif native_store_gdt(&gdt_desc); /* * Segment descriptors have a size of 8 bytes. Thus, the index is * multiplied by 8 to obtain the memory offset of the desired descriptor * from the base of the GDT. As bits [15:3] of the segment selector * contain the index, it can be regarded as multiplied by 8 already. * All that remains is to clear bits [2:0]. */ desc_base = sel & ~(SEGMENT_RPL_MASK | SEGMENT_TI_MASK); if (desc_base > gdt_desc.size) return NULL; return (struct desc_struct *)(gdt_desc.address + desc_base); } /** * insn_get_seg_base() - Obtain base address of segment descriptor. * @regs: Register values as seen when entering kernel mode * @seg_reg_idx: Index of the segment register pointing to seg descriptor * * Obtain the base address of the segment as indicated by the segment descriptor * pointed by the segment selector. The segment selector is obtained from the * input segment register index @seg_reg_idx. * * Returns: * * In protected mode, base address of the segment. Zero in long mode, * except when FS or GS are used. In virtual-8086 mode, the segment * selector shifted 4 bits to the right. * * -1L in case of error. */ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx) { struct desc_struct *desc; short sel; sel = get_segment_selector(regs, seg_reg_idx); if (sel < 0) return -1L; if (v8086_mode(regs)) /* * Base is simply the segment selector shifted 4 * bits to the right. */ return (unsigned long)(sel << 4); if (user_64bit_mode(regs)) { /* * Only FS or GS will have a base address, the rest of * the segments' bases are forced to 0. */ unsigned long base; if (seg_reg_idx == INAT_SEG_REG_FS) rdmsrl(MSR_FS_BASE, base); else if (seg_reg_idx == INAT_SEG_REG_GS) /* * swapgs was called at the kernel entry point. Thus, * MSR_KERNEL_GS_BASE will have the user-space GS base. */ rdmsrl(MSR_KERNEL_GS_BASE, base); else base = 0; return base; } /* In protected mode the segment selector cannot be null. */ if (!sel) return -1L; desc = get_desc(sel); if (!desc) return -1L; return get_desc_base(desc); } /** * get_seg_limit() - Obtain the limit of a segment descriptor * @regs: Register values as seen when entering kernel mode * @seg_reg_idx: Index of the segment register pointing to seg descriptor * * Obtain the limit of the segment as indicated by the segment descriptor * pointed by the segment selector. The segment selector is obtained from the * input segment register index @seg_reg_idx. * * Returns: * * In protected mode, the limit of the segment descriptor in bytes. * In long mode and virtual-8086 mode, segment limits are not enforced. Thus, * limit is returned as -1L to imply a limit-less segment. * * Zero is returned on error. */ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx) { struct desc_struct *desc; unsigned long limit; short sel; sel = get_segment_selector(regs, seg_reg_idx); if (sel < 0) return 0; if (user_64bit_mode(regs) || v8086_mode(regs)) return -1L; if (!sel) return 0; desc = get_desc(sel); if (!desc) return 0; /* * If the granularity bit is set, the limit is given in multiples * of 4096. This also means that the 12 least significant bits are * not tested when checking the segment limits. In practice, * this means that the segment ends in (limit << 12) + 0xfff. */ limit = get_desc_limit(desc); if (desc->g) limit = (limit << 12) + 0xfff; return limit; } /** * insn_get_code_seg_params() - Obtain code segment parameters * @regs: Structure with register values as seen when entering kernel mode * * Obtain address and operand sizes of the code segment. It is obtained from the * selector contained in the CS register in regs. In protected mode, the default * address is determined by inspecting the L and D bits of the segment * descriptor. In virtual-8086 mode, the default is always two bytes for both * address and operand sizes. * * Returns: * * An int containing ORed-in default parameters on success. * * -EINVAL on error. */ int insn_get_code_seg_params(struct pt_regs *regs) { struct desc_struct *desc; short sel; if (v8086_mode(regs)) /* Address and operand size are both 16-bit. */ return INSN_CODE_SEG_PARAMS(2, 2); sel = get_segment_selector(regs, INAT_SEG_REG_CS); if (sel < 0) return sel; desc = get_desc(sel); if (!desc) return -EINVAL; /* * The most significant byte of the Type field of the segment descriptor * determines whether a segment contains data or code. If this is a data * segment, return error. */ if (!(desc->type & BIT(3))) return -EINVAL; switch ((desc->l << 1) | desc->d) { case 0: /* * Legacy mode. CS.L=0, CS.D=0. Address and operand size are * both 16-bit. */ return INSN_CODE_SEG_PARAMS(2, 2); case 1: /* * Legacy mode. CS.L=0, CS.D=1. Address and operand size are * both 32-bit. */ return INSN_CODE_SEG_PARAMS(4, 4); case 2: /* * IA-32e 64-bit mode. CS.L=1, CS.D=0. Address size is 64-bit; * operand size is 32-bit. */ return INSN_CODE_SEG_PARAMS(4, 8); case 3: /* Invalid setting. CS.L=1, CS.D=1 */ /* fall through */ default: return -EINVAL; } } /** * insn_get_modrm_rm_off() - Obtain register in r/m part of the ModRM byte * @insn: Instruction containing the ModRM byte * @regs: Register values as seen when entering kernel mode * * Returns: * * The register indicated by the r/m part of the ModRM byte. The * register is obtained as an offset from the base of pt_regs. In specific * cases, the returned value can be -EDOM to indicate that the particular value * of ModRM does not refer to a register and shall be ignored. */ int insn_get_modrm_rm_off(struct insn *insn, struct pt_regs *regs) { return get_reg_offset(insn, regs, REG_TYPE_RM); } /** * get_seg_base_limit() - obtain base address and limit of a segment * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Operand offset, in pt_regs, used to resolve segment descriptor * @base: Obtained segment base * @limit: Obtained segment limit * * Obtain the base address and limit of the segment associated with the operand * @regoff and, if any or allowed, override prefixes in @insn. This function is * different from insn_get_seg_base() as the latter does not resolve the segment * associated with the instruction operand. If a limit is not needed (e.g., * when running in long mode), @limit can be NULL. * * Returns: * * 0 on success. @base and @limit will contain the base address and of the * resolved segment, respectively. * * -EINVAL on error. */ static int get_seg_base_limit(struct insn *insn, struct pt_regs *regs, int regoff, unsigned long *base, unsigned long *limit) { int seg_reg_idx; if (!base) return -EINVAL; seg_reg_idx = resolve_seg_reg(insn, regs, regoff); if (seg_reg_idx < 0) return seg_reg_idx; *base = insn_get_seg_base(regs, seg_reg_idx); if (*base == -1L) return -EINVAL; if (!limit) return 0; *limit = get_seg_limit(regs, seg_reg_idx); if (!(*limit)) return -EINVAL; return 0; } /** * get_eff_addr_reg() - Obtain effective address from register operand * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Obtained operand offset, in pt_regs, with the effective address * @eff_addr: Obtained effective address * * Obtain the effective address stored in the register operand as indicated by * the ModRM byte. This function is to be used only with register addressing * (i.e., ModRM.mod is 3). The effective address is saved in @eff_addr. The * register operand, as an offset from the base of pt_regs, is saved in @regoff; * such offset can then be used to resolve the segment associated with the * operand. This function can be used with any of the supported address sizes * in x86. * * Returns: * * 0 on success. @eff_addr will have the effective address stored in the * operand indicated by ModRM. @regoff will have such operand as an offset from * the base of pt_regs. * * -EINVAL on error. */ static int get_eff_addr_reg(struct insn *insn, struct pt_regs *regs, int *regoff, long *eff_addr) { insn_get_modrm(insn); if (!insn->modrm.nbytes) return -EINVAL; if (X86_MODRM_MOD(insn->modrm.value) != 3) return -EINVAL; *regoff = get_reg_offset(insn, regs, REG_TYPE_RM); if (*regoff < 0) return -EINVAL; /* Ignore bytes that are outside the address size. */ if (insn->addr_bytes == 2) *eff_addr = regs_get_register(regs, *regoff) & 0xffff; else if (insn->addr_bytes == 4) *eff_addr = regs_get_register(regs, *regoff) & 0xffffffff; else /* 64-bit address */ *eff_addr = regs_get_register(regs, *regoff); return 0; } /** * get_eff_addr_modrm() - Obtain referenced effective address via ModRM * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Obtained operand offset, in pt_regs, associated with segment * @eff_addr: Obtained effective address * * Obtain the effective address referenced by the ModRM byte of @insn. After * identifying the registers involved in the register-indirect memory reference, * its value is obtained from the operands in @regs. The computed address is * stored @eff_addr. Also, the register operand that indicates the associated * segment is stored in @regoff, this parameter can later be used to determine * such segment. * * Returns: * * 0 on success. @eff_addr will have the referenced effective address. @regoff * will have a register, as an offset from the base of pt_regs, that can be used * to resolve the associated segment. * * -EINVAL on error. */ static int get_eff_addr_modrm(struct insn *insn, struct pt_regs *regs, int *regoff, long *eff_addr) { long tmp; if (insn->addr_bytes != 8 && insn->addr_bytes != 4) return -EINVAL; insn_get_modrm(insn); if (!insn->modrm.nbytes) return -EINVAL; if (X86_MODRM_MOD(insn->modrm.value) > 2) return -EINVAL; *regoff = get_reg_offset(insn, regs, REG_TYPE_RM); /* * -EDOM means that we must ignore the address_offset. In such a case, * in 64-bit mode the effective address relative to the rIP of the * following instruction. */ if (*regoff == -EDOM) { if (user_64bit_mode(regs)) tmp = regs->ip + insn->length; else tmp = 0; } else if (*regoff < 0) { return -EINVAL; } else { tmp = regs_get_register(regs, *regoff); } if (insn->addr_bytes == 4) { int addr32 = (int)(tmp & 0xffffffff) + insn->displacement.value; *eff_addr = addr32 & 0xffffffff; } else { *eff_addr = tmp + insn->displacement.value; } return 0; } /** * get_eff_addr_modrm_16() - Obtain referenced effective address via ModRM * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Obtained operand offset, in pt_regs, associated with segment * @eff_addr: Obtained effective address * * Obtain the 16-bit effective address referenced by the ModRM byte of @insn. * After identifying the registers involved in the register-indirect memory * reference, its value is obtained from the operands in @regs. The computed * address is stored @eff_addr. Also, the register operand that indicates * the associated segment is stored in @regoff, this parameter can later be used * to determine such segment. * * Returns: * * 0 on success. @eff_addr will have the referenced effective address. @regoff * will have a register, as an offset from the base of pt_regs, that can be used * to resolve the associated segment. * * -EINVAL on error. */ static int get_eff_addr_modrm_16(struct insn *insn, struct pt_regs *regs, int *regoff, short *eff_addr) { int addr_offset1, addr_offset2, ret; short addr1 = 0, addr2 = 0, displacement; if (insn->addr_bytes != 2) return -EINVAL; insn_get_modrm(insn); if (!insn->modrm.nbytes) return -EINVAL; if (X86_MODRM_MOD(insn->modrm.value) > 2) return -EINVAL; ret = get_reg_offset_16(insn, regs, &addr_offset1, &addr_offset2); if (ret < 0) return -EINVAL; /* * Don't fail on invalid offset values. They might be invalid because * they cannot be used for this particular value of ModRM. Instead, use * them in the computation only if they contain a valid value. */ if (addr_offset1 != -EDOM) addr1 = regs_get_register(regs, addr_offset1) & 0xffff; if (addr_offset2 != -EDOM) addr2 = regs_get_register(regs, addr_offset2) & 0xffff; displacement = insn->displacement.value & 0xffff; *eff_addr = addr1 + addr2 + displacement; /* * The first operand register could indicate to use of either SS or DS * registers to obtain the segment selector. The second operand * register can only indicate the use of DS. Thus, the first operand * will be used to obtain the segment selector. */ *regoff = addr_offset1; return 0; } /** * get_eff_addr_sib() - Obtain referenced effective address via SIB * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Obtained operand offset, in pt_regs, associated with segment * @eff_addr: Obtained effective address * * Obtain the effective address referenced by the SIB byte of @insn. After * identifying the registers involved in the indexed, register-indirect memory * reference, its value is obtained from the operands in @regs. The computed * address is stored @eff_addr. Also, the register operand that indicates the * associated segment is stored in @regoff, this parameter can later be used to * determine such segment. * * Returns: * * 0 on success. @eff_addr will have the referenced effective address. * @base_offset will have a register, as an offset from the base of pt_regs, * that can be used to resolve the associated segment. * * -EINVAL on error. */ static int get_eff_addr_sib(struct insn *insn, struct pt_regs *regs, int *base_offset, long *eff_addr) { long base, indx; int indx_offset; if (insn->addr_bytes != 8 && insn->addr_bytes != 4) return -EINVAL; insn_get_modrm(insn); if (!insn->modrm.nbytes) return -EINVAL; if (X86_MODRM_MOD(insn->modrm.value) > 2) return -EINVAL; insn_get_sib(insn); if (!insn->sib.nbytes) return -EINVAL; *base_offset = get_reg_offset(insn, regs, REG_TYPE_BASE); indx_offset = get_reg_offset(insn, regs, REG_TYPE_INDEX); /* * Negative values in the base and index offset means an error when * decoding the SIB byte. Except -EDOM, which means that the registers * should not be used in the address computation. */ if (*base_offset == -EDOM) base = 0; else if (*base_offset < 0) return -EINVAL; else base = regs_get_register(regs, *base_offset); if (indx_offset == -EDOM) indx = 0; else if (indx_offset < 0) return -EINVAL; else indx = regs_get_register(regs, indx_offset); if (insn->addr_bytes == 4) { int addr32, base32, idx32; base32 = base & 0xffffffff; idx32 = indx & 0xffffffff; addr32 = base32 + idx32 * (1 << X86_SIB_SCALE(insn->sib.value)); addr32 += insn->displacement.value; *eff_addr = addr32 & 0xffffffff; } else { *eff_addr = base + indx * (1 << X86_SIB_SCALE(insn->sib.value)); *eff_addr += insn->displacement.value; } return 0; } /** * get_addr_ref_16() - Obtain the 16-bit address referred by instruction * @insn: Instruction containing ModRM byte and displacement * @regs: Register values as seen when entering kernel mode * * This function is to be used with 16-bit address encodings. Obtain the memory * address referred by the instruction's ModRM and displacement bytes. Also, the * segment used as base is determined by either any segment override prefixes in * @insn or the default segment of the registers involved in the address * computation. In protected mode, segment limits are enforced. * * Returns: * * Linear address referenced by the instruction operands on success. * * -1L on error. */ static void __user *get_addr_ref_16(struct insn *insn, struct pt_regs *regs) { unsigned long linear_addr = -1L, seg_base, seg_limit; int ret, regoff; short eff_addr; long tmp; insn_get_modrm(insn); insn_get_displacement(insn); if (insn->addr_bytes != 2) goto out; if (X86_MODRM_MOD(insn->modrm.value) == 3) { ret = get_eff_addr_reg(insn, regs, &regoff, &tmp); if (ret) goto out; eff_addr = tmp; } else { ret = get_eff_addr_modrm_16(insn, regs, &regoff, &eff_addr); if (ret) goto out; } ret = get_seg_base_limit(insn, regs, regoff, &seg_base, &seg_limit); if (ret) goto out; /* * Before computing the linear address, make sure the effective address * is within the limits of the segment. In virtual-8086 mode, segment * limits are not enforced. In such a case, the segment limit is -1L to * reflect this fact. */ if ((unsigned long)(eff_addr & 0xffff) > seg_limit) goto out; linear_addr = (unsigned long)(eff_addr & 0xffff) + seg_base; /* Limit linear address to 20 bits */ if (v8086_mode(regs)) linear_addr &= 0xfffff; out: return (void __user *)linear_addr; } /** * get_addr_ref_32() - Obtain a 32-bit linear address * @insn: Instruction with ModRM, SIB bytes and displacement * @regs: Register values as seen when entering kernel mode * * This function is to be used with 32-bit address encodings to obtain the * linear memory address referred by the instruction's ModRM, SIB, * displacement bytes and segment base address, as applicable. If in protected * mode, segment limits are enforced. * * Returns: * * Linear address referenced by instruction and registers on success. * * -1L on error. */ static void __user *get_addr_ref_32(struct insn *insn, struct pt_regs *regs) { unsigned long linear_addr = -1L, seg_base, seg_limit; int eff_addr, regoff; long tmp; int ret; if (insn->addr_bytes != 4) goto out; if (X86_MODRM_MOD(insn->modrm.value) == 3) { ret = get_eff_addr_reg(insn, regs, &regoff, &tmp); if (ret) goto out; eff_addr = tmp; } else { if (insn->sib.nbytes) { ret = get_eff_addr_sib(insn, regs, &regoff, &tmp); if (ret) goto out; eff_addr = tmp; } else { ret = get_eff_addr_modrm(insn, regs, &regoff, &tmp); if (ret) goto out; eff_addr = tmp; } } ret = get_seg_base_limit(insn, regs, regoff, &seg_base, &seg_limit); if (ret) goto out; /* * In protected mode, before computing the linear address, make sure * the effective address is within the limits of the segment. * 32-bit addresses can be used in long and virtual-8086 modes if an * address override prefix is used. In such cases, segment limits are * not enforced. When in virtual-8086 mode, the segment limit is -1L * to reflect this situation. * * After computed, the effective address is treated as an unsigned * quantity. */ if (!user_64bit_mode(regs) && ((unsigned int)eff_addr > seg_limit)) goto out; /* * Even though 32-bit address encodings are allowed in virtual-8086 * mode, the address range is still limited to [0x-0xffff]. */ if (v8086_mode(regs) && (eff_addr & ~0xffff)) goto out; /* * Data type long could be 64 bits in size. Ensure that our 32-bit * effective address is not sign-extended when computing the linear * address. */ linear_addr = (unsigned long)(eff_addr & 0xffffffff) + seg_base; /* Limit linear address to 20 bits */ if (v8086_mode(regs)) linear_addr &= 0xfffff; out: return (void __user *)linear_addr; } /** * get_addr_ref_64() - Obtain a 64-bit linear address * @insn: Instruction struct with ModRM and SIB bytes and displacement * @regs: Structure with register values as seen when entering kernel mode * * This function is to be used with 64-bit address encodings to obtain the * linear memory address referred by the instruction's ModRM, SIB, * displacement bytes and segment base address, as applicable. * * Returns: * * Linear address referenced by instruction and registers on success. * * -1L on error. */ #ifndef CONFIG_X86_64 static void __user *get_addr_ref_64(struct insn *insn, struct pt_regs *regs) { return (void __user *)-1L; } #else static void __user *get_addr_ref_64(struct insn *insn, struct pt_regs *regs) { unsigned long linear_addr = -1L, seg_base; int regoff, ret; long eff_addr; if (insn->addr_bytes != 8) goto out; if (X86_MODRM_MOD(insn->modrm.value) == 3) { ret = get_eff_addr_reg(insn, regs, &regoff, &eff_addr); if (ret) goto out; } else { if (insn->sib.nbytes) { ret = get_eff_addr_sib(insn, regs, &regoff, &eff_addr); if (ret) goto out; } else { ret = get_eff_addr_modrm(insn, regs, &regoff, &eff_addr); if (ret) goto out; } } ret = get_seg_base_limit(insn, regs, regoff, &seg_base, NULL); if (ret) goto out; linear_addr = (unsigned long)eff_addr + seg_base; out: return (void __user *)linear_addr; } #endif /* CONFIG_X86_64 */ /** * insn_get_addr_ref() - Obtain the linear address referred by instruction * @insn: Instruction structure containing ModRM byte and displacement * @regs: Structure with register values as seen when entering kernel mode * * Obtain the linear address referred by the instruction's ModRM, SIB and * displacement bytes, and segment base, as applicable. In protected mode, * segment limits are enforced. * * Returns: * * Linear address referenced by instruction and registers on success. * * -1L on error. */ void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs) { if (!insn || !regs) return (void __user *)-1L; switch (insn->addr_bytes) { case 2: return get_addr_ref_16(insn, regs); case 4: return get_addr_ref_32(insn, regs); case 8: return get_addr_ref_64(insn, regs); default: return (void __user *)-1L; } }
/* * Utility functions for x86 operand and address decoding * * Copyright (C) Intel Corporation 2017 */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/ratelimit.h> #include <linux/mmu_context.h> #include <asm/desc_defs.h> #include <asm/desc.h> #include <asm/inat.h> #include <asm/insn.h> #include <asm/insn-eval.h> #include <asm/ldt.h> #include <asm/vm86.h> #undef pr_fmt #define pr_fmt(fmt) "insn: " fmt enum reg_type { REG_TYPE_RM = 0, REG_TYPE_INDEX, REG_TYPE_BASE, }; /** * is_string_insn() - Determine if instruction is a string instruction * @insn: Instruction containing the opcode to inspect * * Returns: * * true if the instruction, determined by the opcode, is any of the * string instructions as defined in the Intel Software Development manual. * False otherwise. */ static bool is_string_insn(struct insn *insn) { insn_get_opcode(insn); /* All string instructions have a 1-byte opcode. */ if (insn->opcode.nbytes != 1) return false; switch (insn->opcode.bytes[0]) { case 0x6c ... 0x6f: /* INS, OUTS */ case 0xa4 ... 0xa7: /* MOVS, CMPS */ case 0xaa ... 0xaf: /* STOS, LODS, SCAS */ return true; default: return false; } } /** * get_seg_reg_override_idx() - obtain segment register override index * @insn: Valid instruction with segment override prefixes * * Inspect the instruction prefixes in @insn and find segment overrides, if any. * * Returns: * * A constant identifying the segment register to use, among CS, SS, DS, * ES, FS, or GS. INAT_SEG_REG_DEFAULT is returned if no segment override * prefixes were found. * * -EINVAL in case of error. */ static int get_seg_reg_override_idx(struct insn *insn) { int idx = INAT_SEG_REG_DEFAULT; int num_overrides = 0, i; insn_get_prefixes(insn); /* Look for any segment override prefixes. */ for (i = 0; i < insn->prefixes.nbytes; i++) { insn_attr_t attr; attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]); switch (attr) { case INAT_MAKE_PREFIX(INAT_PFX_CS): idx = INAT_SEG_REG_CS; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_SS): idx = INAT_SEG_REG_SS; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_DS): idx = INAT_SEG_REG_DS; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_ES): idx = INAT_SEG_REG_ES; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_FS): idx = INAT_SEG_REG_FS; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_GS): idx = INAT_SEG_REG_GS; num_overrides++; break; /* No default action needed. */ } } /* More than one segment override prefix leads to undefined behavior. */ if (num_overrides > 1) return -EINVAL; return idx; } /** * check_seg_overrides() - check if segment override prefixes are allowed * @insn: Valid instruction with segment override prefixes * @regoff: Operand offset, in pt_regs, for which the check is performed * * For a particular register used in register-indirect addressing, determine if * segment override prefixes can be used. Specifically, no overrides are allowed * for rDI if used with a string instruction. * * Returns: * * True if segment override prefixes can be used with the register indicated * in @regoff. False if otherwise. */ static bool check_seg_overrides(struct insn *insn, int regoff) { if (regoff == offsetof(struct pt_regs, di) && is_string_insn(insn)) return false; return true; } /** * resolve_default_seg() - resolve default segment register index for an operand * @insn: Instruction with opcode and address size. Must be valid. * @regs: Register values as seen when entering kernel mode * @off: Operand offset, in pt_regs, for which resolution is needed * * Resolve the default segment register index associated with the instruction * operand register indicated by @off. Such index is resolved based on defaults * described in the Intel Software Development Manual. * * Returns: * * If in protected mode, a constant identifying the segment register to use, * among CS, SS, ES or DS. If in long mode, INAT_SEG_REG_IGNORE. * * -EINVAL in case of error. */ static int resolve_default_seg(struct insn *insn, struct pt_regs *regs, int off) { if (user_64bit_mode(regs)) return INAT_SEG_REG_IGNORE; /* * Resolve the default segment register as described in Section 3.7.4 * of the Intel Software Development Manual Vol. 1: * * + DS for all references involving r[ABCD]X, and rSI. * + If used in a string instruction, ES for rDI. Otherwise, DS. * + AX, CX and DX are not valid register operands in 16-bit address * encodings but are valid for 32-bit and 64-bit encodings. * + -EDOM is reserved to identify for cases in which no register * is used (i.e., displacement-only addressing). Use DS. * + SS for rSP or rBP. * + CS for rIP. */ switch (off) { case offsetof(struct pt_regs, ax): case offsetof(struct pt_regs, cx): case offsetof(struct pt_regs, dx): /* Need insn to verify address size. */ if (insn->addr_bytes == 2) return -EINVAL; /* fall through */ case -EDOM: case offsetof(struct pt_regs, bx): case offsetof(struct pt_regs, si): return INAT_SEG_REG_DS; case offsetof(struct pt_regs, di): if (is_string_insn(insn)) return INAT_SEG_REG_ES; return INAT_SEG_REG_DS; case offsetof(struct pt_regs, bp): case offsetof(struct pt_regs, sp): return INAT_SEG_REG_SS; case offsetof(struct pt_regs, ip): return INAT_SEG_REG_CS; default: return -EINVAL; } } /** * resolve_seg_reg() - obtain segment register index * @insn: Instruction with operands * @regs: Register values as seen when entering kernel mode * @regoff: Operand offset, in pt_regs, used to deterimine segment register * * Determine the segment register associated with the operands and, if * applicable, prefixes and the instruction pointed by @insn. * * The segment register associated to an operand used in register-indirect * addressing depends on: * * a) Whether running in long mode (in such a case segments are ignored, except * if FS or GS are used). * * b) Whether segment override prefixes can be used. Certain instructions and * registers do not allow override prefixes. * * c) Whether segment overrides prefixes are found in the instruction prefixes. * * d) If there are not segment override prefixes or they cannot be used, the * default segment register associated with the operand register is used. * * The function checks first if segment override prefixes can be used with the * operand indicated by @regoff. If allowed, obtain such overridden segment * register index. Lastly, if not prefixes were found or cannot be used, resolve * the segment register index to use based on the defaults described in the * Intel documentation. In long mode, all segment register indexes will be * ignored, except if overrides were found for FS or GS. All these operations * are done using helper functions. * * The operand register, @regoff, is represented as the offset from the base of * pt_regs. * * As stated, the main use of this function is to determine the segment register * index based on the instruction, its operands and prefixes. Hence, @insn * must be valid. However, if @regoff indicates rIP, we don't need to inspect * @insn at all as in this case CS is used in all cases. This case is checked * before proceeding further. * * Please note that this function does not return the value in the segment * register (i.e., the segment selector) but our defined index. The segment * selector needs to be obtained using get_segment_selector() and passing the * segment register index resolved by this function. * * Returns: * * An index identifying the segment register to use, among CS, SS, DS, * ES, FS, or GS. INAT_SEG_REG_IGNORE is returned if running in long mode. * * -EINVAL in case of error. */ static int resolve_seg_reg(struct insn *insn, struct pt_regs *regs, int regoff) { int idx; /* * In the unlikely event of having to resolve the segment register * index for rIP, do it first. Segment override prefixes should not * be used. Hence, it is not necessary to inspect the instruction, * which may be invalid at this point. */ if (regoff == offsetof(struct pt_regs, ip)) { if (user_64bit_mode(regs)) return INAT_SEG_REG_IGNORE; else return INAT_SEG_REG_CS; } if (!insn) return -EINVAL; if (!check_seg_overrides(insn, regoff)) return resolve_default_seg(insn, regs, regoff); idx = get_seg_reg_override_idx(insn); if (idx < 0) return idx; if (idx == INAT_SEG_REG_DEFAULT) return resolve_default_seg(insn, regs, regoff); /* * In long mode, segment override prefixes are ignored, except for * overrides for FS and GS. */ if (user_64bit_mode(regs)) { if (idx != INAT_SEG_REG_FS && idx != INAT_SEG_REG_GS) idx = INAT_SEG_REG_IGNORE; } return idx; } /** * get_segment_selector() - obtain segment selector * @regs: Register values as seen when entering kernel mode * @seg_reg_idx: Segment register index to use * * Obtain the segment selector from any of the CS, SS, DS, ES, FS, GS segment * registers. In CONFIG_X86_32, the segment is obtained from either pt_regs or * kernel_vm86_regs as applicable. In CONFIG_X86_64, CS and SS are obtained * from pt_regs. DS, ES, FS and GS are obtained by reading the actual CPU * registers. This done for only for completeness as in CONFIG_X86_64 segment * registers are ignored. * * Returns: * * Value of the segment selector, including null when running in * long mode. * * -EINVAL on error. */ static short get_segment_selector(struct pt_regs *regs, int seg_reg_idx) { #ifdef CONFIG_X86_64 unsigned short sel; switch (seg_reg_idx) { case INAT_SEG_REG_IGNORE: return 0; case INAT_SEG_REG_CS: return (unsigned short)(regs->cs & 0xffff); case INAT_SEG_REG_SS: return (unsigned short)(regs->ss & 0xffff); case INAT_SEG_REG_DS: savesegment(ds, sel); return sel; case INAT_SEG_REG_ES: savesegment(es, sel); return sel; case INAT_SEG_REG_FS: savesegment(fs, sel); return sel; case INAT_SEG_REG_GS: savesegment(gs, sel); return sel; default: return -EINVAL; } #else /* CONFIG_X86_32 */ struct kernel_vm86_regs *vm86regs = (struct kernel_vm86_regs *)regs; if (v8086_mode(regs)) { switch (seg_reg_idx) { case INAT_SEG_REG_CS: return (unsigned short)(regs->cs & 0xffff); case INAT_SEG_REG_SS: return (unsigned short)(regs->ss & 0xffff); case INAT_SEG_REG_DS: return vm86regs->ds; case INAT_SEG_REG_ES: return vm86regs->es; case INAT_SEG_REG_FS: return vm86regs->fs; case INAT_SEG_REG_GS: return vm86regs->gs; case INAT_SEG_REG_IGNORE: /* fall through */ default: return -EINVAL; } } switch (seg_reg_idx) { case INAT_SEG_REG_CS: return (unsigned short)(regs->cs & 0xffff); case INAT_SEG_REG_SS: return (unsigned short)(regs->ss & 0xffff); case INAT_SEG_REG_DS: return (unsigned short)(regs->ds & 0xffff); case INAT_SEG_REG_ES: return (unsigned short)(regs->es & 0xffff); case INAT_SEG_REG_FS: return (unsigned short)(regs->fs & 0xffff); case INAT_SEG_REG_GS: /* * GS may or may not be in regs as per CONFIG_X86_32_LAZY_GS. * The macro below takes care of both cases. */ return get_user_gs(regs); case INAT_SEG_REG_IGNORE: /* fall through */ default: return -EINVAL; } #endif /* CONFIG_X86_64 */ } static int get_reg_offset(struct insn *insn, struct pt_regs *regs, enum reg_type type) { int regno = 0; static const int regoff[] = { offsetof(struct pt_regs, ax), offsetof(struct pt_regs, cx), offsetof(struct pt_regs, dx), offsetof(struct pt_regs, bx), offsetof(struct pt_regs, sp), offsetof(struct pt_regs, bp), offsetof(struct pt_regs, si), offsetof(struct pt_regs, di), #ifdef CONFIG_X86_64 offsetof(struct pt_regs, r8), offsetof(struct pt_regs, r9), offsetof(struct pt_regs, r10), offsetof(struct pt_regs, r11), offsetof(struct pt_regs, r12), offsetof(struct pt_regs, r13), offsetof(struct pt_regs, r14), offsetof(struct pt_regs, r15), #endif }; int nr_registers = ARRAY_SIZE(regoff); /* * Don't possibly decode a 32-bit instructions as * reading a 64-bit-only register. */ if (IS_ENABLED(CONFIG_X86_64) && !insn->x86_64) nr_registers -= 8; switch (type) { case REG_TYPE_RM: regno = X86_MODRM_RM(insn->modrm.value); /* * ModRM.mod == 0 and ModRM.rm == 5 means a 32-bit displacement * follows the ModRM byte. */ if (!X86_MODRM_MOD(insn->modrm.value) && regno == 5) return -EDOM; if (X86_REX_B(insn->rex_prefix.value)) regno += 8; break; case REG_TYPE_INDEX: regno = X86_SIB_INDEX(insn->sib.value); if (X86_REX_X(insn->rex_prefix.value)) regno += 8; /* * If ModRM.mod != 3 and SIB.index = 4 the scale*index * portion of the address computation is null. This is * true only if REX.X is 0. In such a case, the SIB index * is used in the address computation. */ if (X86_MODRM_MOD(insn->modrm.value) != 3 && regno == 4) return -EDOM; break; case REG_TYPE_BASE: regno = X86_SIB_BASE(insn->sib.value); /* * If ModRM.mod is 0 and SIB.base == 5, the base of the * register-indirect addressing is 0. In this case, a * 32-bit displacement follows the SIB byte. */ if (!X86_MODRM_MOD(insn->modrm.value) && regno == 5) return -EDOM; if (X86_REX_B(insn->rex_prefix.value)) regno += 8; break; default: pr_err_ratelimited("invalid register type: %d\n", type); return -EINVAL; } if (regno >= nr_registers) { WARN_ONCE(1, "decoded an instruction with an invalid register"); return -EINVAL; } return regoff[regno]; } /** * get_reg_offset_16() - Obtain offset of register indicated by instruction * @insn: Instruction containing ModRM byte * @regs: Register values as seen when entering kernel mode * @offs1: Offset of the first operand register * @offs2: Offset of the second opeand register, if applicable * * Obtain the offset, in pt_regs, of the registers indicated by the ModRM byte * in @insn. This function is to be used with 16-bit address encodings. The * @offs1 and @offs2 will be written with the offset of the two registers * indicated by the instruction. In cases where any of the registers is not * referenced by the instruction, the value will be set to -EDOM. * * Returns: * * 0 on success, -EINVAL on error. */ static int get_reg_offset_16(struct insn *insn, struct pt_regs *regs, int *offs1, int *offs2) { /* * 16-bit addressing can use one or two registers. Specifics of * encodings are given in Table 2-1. "16-Bit Addressing Forms with the * ModR/M Byte" of the Intel Software Development Manual. */ static const int regoff1[] = { offsetof(struct pt_regs, bx), offsetof(struct pt_regs, bx), offsetof(struct pt_regs, bp), offsetof(struct pt_regs, bp), offsetof(struct pt_regs, si), offsetof(struct pt_regs, di), offsetof(struct pt_regs, bp), offsetof(struct pt_regs, bx), }; static const int regoff2[] = { offsetof(struct pt_regs, si), offsetof(struct pt_regs, di), offsetof(struct pt_regs, si), offsetof(struct pt_regs, di), -EDOM, -EDOM, -EDOM, -EDOM, }; if (!offs1 || !offs2) return -EINVAL; /* Operand is a register, use the generic function. */ if (X86_MODRM_MOD(insn->modrm.value) == 3) { *offs1 = insn_get_modrm_rm_off(insn, regs); *offs2 = -EDOM; return 0; } *offs1 = regoff1[X86_MODRM_RM(insn->modrm.value)]; *offs2 = regoff2[X86_MODRM_RM(insn->modrm.value)]; /* * If ModRM.mod is 0 and ModRM.rm is 110b, then we use displacement- * only addressing. This means that no registers are involved in * computing the effective address. Thus, ensure that the first * register offset is invalild. The second register offset is already * invalid under the aforementioned conditions. */ if ((X86_MODRM_MOD(insn->modrm.value) == 0) && (X86_MODRM_RM(insn->modrm.value) == 6)) *offs1 = -EDOM; return 0; } /** * get_desc() - Obtain contents of a segment descriptor * @out: Segment descriptor contents on success * @sel: Segment selector * * Given a segment selector, obtain a pointer to the segment descriptor. * Both global and local descriptor tables are supported. * * Returns: * * True on success, false on failure. * * NULL on error. */ static bool get_desc(struct desc_struct *out, unsigned short sel) { struct desc_ptr gdt_desc = {0, 0}; unsigned long desc_base; #ifdef CONFIG_MODIFY_LDT_SYSCALL if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) { bool success = false; struct ldt_struct *ldt; /* Bits [15:3] contain the index of the desired entry. */ sel >>= 3; mutex_lock(&current->active_mm->context.lock); ldt = current->active_mm->context.ldt; if (ldt && sel < ldt->nr_entries) { *out = ldt->entries[sel]; success = true; } mutex_unlock(&current->active_mm->context.lock); return success; } #endif native_store_gdt(&gdt_desc); /* * Segment descriptors have a size of 8 bytes. Thus, the index is * multiplied by 8 to obtain the memory offset of the desired descriptor * from the base of the GDT. As bits [15:3] of the segment selector * contain the index, it can be regarded as multiplied by 8 already. * All that remains is to clear bits [2:0]. */ desc_base = sel & ~(SEGMENT_RPL_MASK | SEGMENT_TI_MASK); if (desc_base > gdt_desc.size) return false; *out = *(struct desc_struct *)(gdt_desc.address + desc_base); return true; } /** * insn_get_seg_base() - Obtain base address of segment descriptor. * @regs: Register values as seen when entering kernel mode * @seg_reg_idx: Index of the segment register pointing to seg descriptor * * Obtain the base address of the segment as indicated by the segment descriptor * pointed by the segment selector. The segment selector is obtained from the * input segment register index @seg_reg_idx. * * Returns: * * In protected mode, base address of the segment. Zero in long mode, * except when FS or GS are used. In virtual-8086 mode, the segment * selector shifted 4 bits to the right. * * -1L in case of error. */ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx) { struct desc_struct desc; short sel; sel = get_segment_selector(regs, seg_reg_idx); if (sel < 0) return -1L; if (v8086_mode(regs)) /* * Base is simply the segment selector shifted 4 * bits to the right. */ return (unsigned long)(sel << 4); if (user_64bit_mode(regs)) { /* * Only FS or GS will have a base address, the rest of * the segments' bases are forced to 0. */ unsigned long base; if (seg_reg_idx == INAT_SEG_REG_FS) rdmsrl(MSR_FS_BASE, base); else if (seg_reg_idx == INAT_SEG_REG_GS) /* * swapgs was called at the kernel entry point. Thus, * MSR_KERNEL_GS_BASE will have the user-space GS base. */ rdmsrl(MSR_KERNEL_GS_BASE, base); else base = 0; return base; } /* In protected mode the segment selector cannot be null. */ if (!sel) return -1L; if (!get_desc(&desc, sel)) return -1L; return get_desc_base(&desc); } /** * get_seg_limit() - Obtain the limit of a segment descriptor * @regs: Register values as seen when entering kernel mode * @seg_reg_idx: Index of the segment register pointing to seg descriptor * * Obtain the limit of the segment as indicated by the segment descriptor * pointed by the segment selector. The segment selector is obtained from the * input segment register index @seg_reg_idx. * * Returns: * * In protected mode, the limit of the segment descriptor in bytes. * In long mode and virtual-8086 mode, segment limits are not enforced. Thus, * limit is returned as -1L to imply a limit-less segment. * * Zero is returned on error. */ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx) { struct desc_struct desc; unsigned long limit; short sel; sel = get_segment_selector(regs, seg_reg_idx); if (sel < 0) return 0; if (user_64bit_mode(regs) || v8086_mode(regs)) return -1L; if (!sel) return 0; if (!get_desc(&desc, sel)) return 0; /* * If the granularity bit is set, the limit is given in multiples * of 4096. This also means that the 12 least significant bits are * not tested when checking the segment limits. In practice, * this means that the segment ends in (limit << 12) + 0xfff. */ limit = get_desc_limit(&desc); if (desc.g) limit = (limit << 12) + 0xfff; return limit; } /** * insn_get_code_seg_params() - Obtain code segment parameters * @regs: Structure with register values as seen when entering kernel mode * * Obtain address and operand sizes of the code segment. It is obtained from the * selector contained in the CS register in regs. In protected mode, the default * address is determined by inspecting the L and D bits of the segment * descriptor. In virtual-8086 mode, the default is always two bytes for both * address and operand sizes. * * Returns: * * An int containing ORed-in default parameters on success. * * -EINVAL on error. */ int insn_get_code_seg_params(struct pt_regs *regs) { struct desc_struct desc; short sel; if (v8086_mode(regs)) /* Address and operand size are both 16-bit. */ return INSN_CODE_SEG_PARAMS(2, 2); sel = get_segment_selector(regs, INAT_SEG_REG_CS); if (sel < 0) return sel; if (!get_desc(&desc, sel)) return -EINVAL; /* * The most significant byte of the Type field of the segment descriptor * determines whether a segment contains data or code. If this is a data * segment, return error. */ if (!(desc.type & BIT(3))) return -EINVAL; switch ((desc.l << 1) | desc.d) { case 0: /* * Legacy mode. CS.L=0, CS.D=0. Address and operand size are * both 16-bit. */ return INSN_CODE_SEG_PARAMS(2, 2); case 1: /* * Legacy mode. CS.L=0, CS.D=1. Address and operand size are * both 32-bit. */ return INSN_CODE_SEG_PARAMS(4, 4); case 2: /* * IA-32e 64-bit mode. CS.L=1, CS.D=0. Address size is 64-bit; * operand size is 32-bit. */ return INSN_CODE_SEG_PARAMS(4, 8); case 3: /* Invalid setting. CS.L=1, CS.D=1 */ /* fall through */ default: return -EINVAL; } } /** * insn_get_modrm_rm_off() - Obtain register in r/m part of the ModRM byte * @insn: Instruction containing the ModRM byte * @regs: Register values as seen when entering kernel mode * * Returns: * * The register indicated by the r/m part of the ModRM byte. The * register is obtained as an offset from the base of pt_regs. In specific * cases, the returned value can be -EDOM to indicate that the particular value * of ModRM does not refer to a register and shall be ignored. */ int insn_get_modrm_rm_off(struct insn *insn, struct pt_regs *regs) { return get_reg_offset(insn, regs, REG_TYPE_RM); } /** * get_seg_base_limit() - obtain base address and limit of a segment * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Operand offset, in pt_regs, used to resolve segment descriptor * @base: Obtained segment base * @limit: Obtained segment limit * * Obtain the base address and limit of the segment associated with the operand * @regoff and, if any or allowed, override prefixes in @insn. This function is * different from insn_get_seg_base() as the latter does not resolve the segment * associated with the instruction operand. If a limit is not needed (e.g., * when running in long mode), @limit can be NULL. * * Returns: * * 0 on success. @base and @limit will contain the base address and of the * resolved segment, respectively. * * -EINVAL on error. */ static int get_seg_base_limit(struct insn *insn, struct pt_regs *regs, int regoff, unsigned long *base, unsigned long *limit) { int seg_reg_idx; if (!base) return -EINVAL; seg_reg_idx = resolve_seg_reg(insn, regs, regoff); if (seg_reg_idx < 0) return seg_reg_idx; *base = insn_get_seg_base(regs, seg_reg_idx); if (*base == -1L) return -EINVAL; if (!limit) return 0; *limit = get_seg_limit(regs, seg_reg_idx); if (!(*limit)) return -EINVAL; return 0; } /** * get_eff_addr_reg() - Obtain effective address from register operand * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Obtained operand offset, in pt_regs, with the effective address * @eff_addr: Obtained effective address * * Obtain the effective address stored in the register operand as indicated by * the ModRM byte. This function is to be used only with register addressing * (i.e., ModRM.mod is 3). The effective address is saved in @eff_addr. The * register operand, as an offset from the base of pt_regs, is saved in @regoff; * such offset can then be used to resolve the segment associated with the * operand. This function can be used with any of the supported address sizes * in x86. * * Returns: * * 0 on success. @eff_addr will have the effective address stored in the * operand indicated by ModRM. @regoff will have such operand as an offset from * the base of pt_regs. * * -EINVAL on error. */ static int get_eff_addr_reg(struct insn *insn, struct pt_regs *regs, int *regoff, long *eff_addr) { insn_get_modrm(insn); if (!insn->modrm.nbytes) return -EINVAL; if (X86_MODRM_MOD(insn->modrm.value) != 3) return -EINVAL; *regoff = get_reg_offset(insn, regs, REG_TYPE_RM); if (*regoff < 0) return -EINVAL; /* Ignore bytes that are outside the address size. */ if (insn->addr_bytes == 2) *eff_addr = regs_get_register(regs, *regoff) & 0xffff; else if (insn->addr_bytes == 4) *eff_addr = regs_get_register(regs, *regoff) & 0xffffffff; else /* 64-bit address */ *eff_addr = regs_get_register(regs, *regoff); return 0; } /** * get_eff_addr_modrm() - Obtain referenced effective address via ModRM * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Obtained operand offset, in pt_regs, associated with segment * @eff_addr: Obtained effective address * * Obtain the effective address referenced by the ModRM byte of @insn. After * identifying the registers involved in the register-indirect memory reference, * its value is obtained from the operands in @regs. The computed address is * stored @eff_addr. Also, the register operand that indicates the associated * segment is stored in @regoff, this parameter can later be used to determine * such segment. * * Returns: * * 0 on success. @eff_addr will have the referenced effective address. @regoff * will have a register, as an offset from the base of pt_regs, that can be used * to resolve the associated segment. * * -EINVAL on error. */ static int get_eff_addr_modrm(struct insn *insn, struct pt_regs *regs, int *regoff, long *eff_addr) { long tmp; if (insn->addr_bytes != 8 && insn->addr_bytes != 4) return -EINVAL; insn_get_modrm(insn); if (!insn->modrm.nbytes) return -EINVAL; if (X86_MODRM_MOD(insn->modrm.value) > 2) return -EINVAL; *regoff = get_reg_offset(insn, regs, REG_TYPE_RM); /* * -EDOM means that we must ignore the address_offset. In such a case, * in 64-bit mode the effective address relative to the rIP of the * following instruction. */ if (*regoff == -EDOM) { if (user_64bit_mode(regs)) tmp = regs->ip + insn->length; else tmp = 0; } else if (*regoff < 0) { return -EINVAL; } else { tmp = regs_get_register(regs, *regoff); } if (insn->addr_bytes == 4) { int addr32 = (int)(tmp & 0xffffffff) + insn->displacement.value; *eff_addr = addr32 & 0xffffffff; } else { *eff_addr = tmp + insn->displacement.value; } return 0; } /** * get_eff_addr_modrm_16() - Obtain referenced effective address via ModRM * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Obtained operand offset, in pt_regs, associated with segment * @eff_addr: Obtained effective address * * Obtain the 16-bit effective address referenced by the ModRM byte of @insn. * After identifying the registers involved in the register-indirect memory * reference, its value is obtained from the operands in @regs. The computed * address is stored @eff_addr. Also, the register operand that indicates * the associated segment is stored in @regoff, this parameter can later be used * to determine such segment. * * Returns: * * 0 on success. @eff_addr will have the referenced effective address. @regoff * will have a register, as an offset from the base of pt_regs, that can be used * to resolve the associated segment. * * -EINVAL on error. */ static int get_eff_addr_modrm_16(struct insn *insn, struct pt_regs *regs, int *regoff, short *eff_addr) { int addr_offset1, addr_offset2, ret; short addr1 = 0, addr2 = 0, displacement; if (insn->addr_bytes != 2) return -EINVAL; insn_get_modrm(insn); if (!insn->modrm.nbytes) return -EINVAL; if (X86_MODRM_MOD(insn->modrm.value) > 2) return -EINVAL; ret = get_reg_offset_16(insn, regs, &addr_offset1, &addr_offset2); if (ret < 0) return -EINVAL; /* * Don't fail on invalid offset values. They might be invalid because * they cannot be used for this particular value of ModRM. Instead, use * them in the computation only if they contain a valid value. */ if (addr_offset1 != -EDOM) addr1 = regs_get_register(regs, addr_offset1) & 0xffff; if (addr_offset2 != -EDOM) addr2 = regs_get_register(regs, addr_offset2) & 0xffff; displacement = insn->displacement.value & 0xffff; *eff_addr = addr1 + addr2 + displacement; /* * The first operand register could indicate to use of either SS or DS * registers to obtain the segment selector. The second operand * register can only indicate the use of DS. Thus, the first operand * will be used to obtain the segment selector. */ *regoff = addr_offset1; return 0; } /** * get_eff_addr_sib() - Obtain referenced effective address via SIB * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Obtained operand offset, in pt_regs, associated with segment * @eff_addr: Obtained effective address * * Obtain the effective address referenced by the SIB byte of @insn. After * identifying the registers involved in the indexed, register-indirect memory * reference, its value is obtained from the operands in @regs. The computed * address is stored @eff_addr. Also, the register operand that indicates the * associated segment is stored in @regoff, this parameter can later be used to * determine such segment. * * Returns: * * 0 on success. @eff_addr will have the referenced effective address. * @base_offset will have a register, as an offset from the base of pt_regs, * that can be used to resolve the associated segment. * * -EINVAL on error. */ static int get_eff_addr_sib(struct insn *insn, struct pt_regs *regs, int *base_offset, long *eff_addr) { long base, indx; int indx_offset; if (insn->addr_bytes != 8 && insn->addr_bytes != 4) return -EINVAL; insn_get_modrm(insn); if (!insn->modrm.nbytes) return -EINVAL; if (X86_MODRM_MOD(insn->modrm.value) > 2) return -EINVAL; insn_get_sib(insn); if (!insn->sib.nbytes) return -EINVAL; *base_offset = get_reg_offset(insn, regs, REG_TYPE_BASE); indx_offset = get_reg_offset(insn, regs, REG_TYPE_INDEX); /* * Negative values in the base and index offset means an error when * decoding the SIB byte. Except -EDOM, which means that the registers * should not be used in the address computation. */ if (*base_offset == -EDOM) base = 0; else if (*base_offset < 0) return -EINVAL; else base = regs_get_register(regs, *base_offset); if (indx_offset == -EDOM) indx = 0; else if (indx_offset < 0) return -EINVAL; else indx = regs_get_register(regs, indx_offset); if (insn->addr_bytes == 4) { int addr32, base32, idx32; base32 = base & 0xffffffff; idx32 = indx & 0xffffffff; addr32 = base32 + idx32 * (1 << X86_SIB_SCALE(insn->sib.value)); addr32 += insn->displacement.value; *eff_addr = addr32 & 0xffffffff; } else { *eff_addr = base + indx * (1 << X86_SIB_SCALE(insn->sib.value)); *eff_addr += insn->displacement.value; } return 0; } /** * get_addr_ref_16() - Obtain the 16-bit address referred by instruction * @insn: Instruction containing ModRM byte and displacement * @regs: Register values as seen when entering kernel mode * * This function is to be used with 16-bit address encodings. Obtain the memory * address referred by the instruction's ModRM and displacement bytes. Also, the * segment used as base is determined by either any segment override prefixes in * @insn or the default segment of the registers involved in the address * computation. In protected mode, segment limits are enforced. * * Returns: * * Linear address referenced by the instruction operands on success. * * -1L on error. */ static void __user *get_addr_ref_16(struct insn *insn, struct pt_regs *regs) { unsigned long linear_addr = -1L, seg_base, seg_limit; int ret, regoff; short eff_addr; long tmp; insn_get_modrm(insn); insn_get_displacement(insn); if (insn->addr_bytes != 2) goto out; if (X86_MODRM_MOD(insn->modrm.value) == 3) { ret = get_eff_addr_reg(insn, regs, &regoff, &tmp); if (ret) goto out; eff_addr = tmp; } else { ret = get_eff_addr_modrm_16(insn, regs, &regoff, &eff_addr); if (ret) goto out; } ret = get_seg_base_limit(insn, regs, regoff, &seg_base, &seg_limit); if (ret) goto out; /* * Before computing the linear address, make sure the effective address * is within the limits of the segment. In virtual-8086 mode, segment * limits are not enforced. In such a case, the segment limit is -1L to * reflect this fact. */ if ((unsigned long)(eff_addr & 0xffff) > seg_limit) goto out; linear_addr = (unsigned long)(eff_addr & 0xffff) + seg_base; /* Limit linear address to 20 bits */ if (v8086_mode(regs)) linear_addr &= 0xfffff; out: return (void __user *)linear_addr; } /** * get_addr_ref_32() - Obtain a 32-bit linear address * @insn: Instruction with ModRM, SIB bytes and displacement * @regs: Register values as seen when entering kernel mode * * This function is to be used with 32-bit address encodings to obtain the * linear memory address referred by the instruction's ModRM, SIB, * displacement bytes and segment base address, as applicable. If in protected * mode, segment limits are enforced. * * Returns: * * Linear address referenced by instruction and registers on success. * * -1L on error. */ static void __user *get_addr_ref_32(struct insn *insn, struct pt_regs *regs) { unsigned long linear_addr = -1L, seg_base, seg_limit; int eff_addr, regoff; long tmp; int ret; if (insn->addr_bytes != 4) goto out; if (X86_MODRM_MOD(insn->modrm.value) == 3) { ret = get_eff_addr_reg(insn, regs, &regoff, &tmp); if (ret) goto out; eff_addr = tmp; } else { if (insn->sib.nbytes) { ret = get_eff_addr_sib(insn, regs, &regoff, &tmp); if (ret) goto out; eff_addr = tmp; } else { ret = get_eff_addr_modrm(insn, regs, &regoff, &tmp); if (ret) goto out; eff_addr = tmp; } } ret = get_seg_base_limit(insn, regs, regoff, &seg_base, &seg_limit); if (ret) goto out; /* * In protected mode, before computing the linear address, make sure * the effective address is within the limits of the segment. * 32-bit addresses can be used in long and virtual-8086 modes if an * address override prefix is used. In such cases, segment limits are * not enforced. When in virtual-8086 mode, the segment limit is -1L * to reflect this situation. * * After computed, the effective address is treated as an unsigned * quantity. */ if (!user_64bit_mode(regs) && ((unsigned int)eff_addr > seg_limit)) goto out; /* * Even though 32-bit address encodings are allowed in virtual-8086 * mode, the address range is still limited to [0x-0xffff]. */ if (v8086_mode(regs) && (eff_addr & ~0xffff)) goto out; /* * Data type long could be 64 bits in size. Ensure that our 32-bit * effective address is not sign-extended when computing the linear * address. */ linear_addr = (unsigned long)(eff_addr & 0xffffffff) + seg_base; /* Limit linear address to 20 bits */ if (v8086_mode(regs)) linear_addr &= 0xfffff; out: return (void __user *)linear_addr; } /** * get_addr_ref_64() - Obtain a 64-bit linear address * @insn: Instruction struct with ModRM and SIB bytes and displacement * @regs: Structure with register values as seen when entering kernel mode * * This function is to be used with 64-bit address encodings to obtain the * linear memory address referred by the instruction's ModRM, SIB, * displacement bytes and segment base address, as applicable. * * Returns: * * Linear address referenced by instruction and registers on success. * * -1L on error. */ #ifndef CONFIG_X86_64 static void __user *get_addr_ref_64(struct insn *insn, struct pt_regs *regs) { return (void __user *)-1L; } #else static void __user *get_addr_ref_64(struct insn *insn, struct pt_regs *regs) { unsigned long linear_addr = -1L, seg_base; int regoff, ret; long eff_addr; if (insn->addr_bytes != 8) goto out; if (X86_MODRM_MOD(insn->modrm.value) == 3) { ret = get_eff_addr_reg(insn, regs, &regoff, &eff_addr); if (ret) goto out; } else { if (insn->sib.nbytes) { ret = get_eff_addr_sib(insn, regs, &regoff, &eff_addr); if (ret) goto out; } else { ret = get_eff_addr_modrm(insn, regs, &regoff, &eff_addr); if (ret) goto out; } } ret = get_seg_base_limit(insn, regs, regoff, &seg_base, NULL); if (ret) goto out; linear_addr = (unsigned long)eff_addr + seg_base; out: return (void __user *)linear_addr; } #endif /* CONFIG_X86_64 */ /** * insn_get_addr_ref() - Obtain the linear address referred by instruction * @insn: Instruction structure containing ModRM byte and displacement * @regs: Structure with register values as seen when entering kernel mode * * Obtain the linear address referred by the instruction's ModRM, SIB and * displacement bytes, and segment base, as applicable. In protected mode, * segment limits are enforced. * * Returns: * * Linear address referenced by instruction and registers on success. * * -1L on error. */ void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs) { if (!insn || !regs) return (void __user *)-1L; switch (insn->addr_bytes) { case 2: return get_addr_ref_16(insn, regs); case 4: return get_addr_ref_32(insn, regs); case 8: return get_addr_ref_64(insn, regs); default: return (void __user *)-1L; } }
int insn_get_code_seg_params(struct pt_regs *regs) { struct desc_struct *desc; short sel; if (v8086_mode(regs)) /* Address and operand size are both 16-bit. */ return INSN_CODE_SEG_PARAMS(2, 2); sel = get_segment_selector(regs, INAT_SEG_REG_CS); if (sel < 0) return sel; desc = get_desc(sel); if (!desc) return -EINVAL; /* * The most significant byte of the Type field of the segment descriptor * determines whether a segment contains data or code. If this is a data * segment, return error. */ if (!(desc->type & BIT(3))) return -EINVAL; switch ((desc->l << 1) | desc->d) { case 0: /* * Legacy mode. CS.L=0, CS.D=0. Address and operand size are * both 16-bit. */ return INSN_CODE_SEG_PARAMS(2, 2); case 1: /* * Legacy mode. CS.L=0, CS.D=1. Address and operand size are * both 32-bit. */ return INSN_CODE_SEG_PARAMS(4, 4); case 2: /* * IA-32e 64-bit mode. CS.L=1, CS.D=0. Address size is 64-bit; * operand size is 32-bit. */ return INSN_CODE_SEG_PARAMS(4, 8); case 3: /* Invalid setting. CS.L=1, CS.D=1 */ /* fall through */ default: return -EINVAL; } }
int insn_get_code_seg_params(struct pt_regs *regs) { struct desc_struct desc; short sel; if (v8086_mode(regs)) /* Address and operand size are both 16-bit. */ return INSN_CODE_SEG_PARAMS(2, 2); sel = get_segment_selector(regs, INAT_SEG_REG_CS); if (sel < 0) return sel; if (!get_desc(&desc, sel)) return -EINVAL; /* * The most significant byte of the Type field of the segment descriptor * determines whether a segment contains data or code. If this is a data * segment, return error. */ if (!(desc.type & BIT(3))) return -EINVAL; switch ((desc.l << 1) | desc.d) { case 0: /* * Legacy mode. CS.L=0, CS.D=0. Address and operand size are * both 16-bit. */ return INSN_CODE_SEG_PARAMS(2, 2); case 1: /* * Legacy mode. CS.L=0, CS.D=1. Address and operand size are * both 32-bit. */ return INSN_CODE_SEG_PARAMS(4, 4); case 2: /* * IA-32e 64-bit mode. CS.L=1, CS.D=0. Address size is 64-bit; * operand size is 32-bit. */ return INSN_CODE_SEG_PARAMS(4, 8); case 3: /* Invalid setting. CS.L=1, CS.D=1 */ /* fall through */ default: return -EINVAL; } }
{'added': [(560, ' * get_desc() - Obtain contents of a segment descriptor'), (561, ' * @out:\tSegment descriptor contents on success'), (569, ' * True on success, false on failure.'), (573, 'static bool get_desc(struct desc_struct *out, unsigned short sel)'), (580, '\t\tbool success = false;'), (588, '\t\tif (ldt && sel < ldt->nr_entries) {'), (589, '\t\t\t*out = ldt->entries[sel];'), (590, '\t\t\tsuccess = true;'), (591, '\t\t}'), (595, '\t\treturn success;'), (610, '\t\treturn false;'), (612, '\t*out = *(struct desc_struct *)(gdt_desc.address + desc_base);'), (613, '\treturn true;'), (635, '\tstruct desc_struct desc;'), (673, '\tif (!get_desc(&desc, sel))'), (676, '\treturn get_desc_base(&desc);'), (698, '\tstruct desc_struct desc;'), (712, '\tif (!get_desc(&desc, sel))'), (721, '\tlimit = get_desc_limit(&desc);'), (722, '\tif (desc.g)'), (746, '\tstruct desc_struct desc;'), (757, '\tif (!get_desc(&desc, sel))'), (765, '\tif (!(desc.type & BIT(3)))'), (768, '\tswitch ((desc.l << 1) | desc.d) {')], 'deleted': [(560, ' * get_desc() - Obtain pointer to a segment descriptor'), (568, ' * Pointer to segment descriptor on success.'), (572, 'static struct desc_struct *get_desc(unsigned short sel)'), (579, '\t\tstruct desc_struct *desc = NULL;'), (587, '\t\tif (ldt && sel < ldt->nr_entries)'), (588, '\t\t\tdesc = &ldt->entries[sel];'), (592, '\t\treturn desc;'), (607, '\t\treturn NULL;'), (609, '\treturn (struct desc_struct *)(gdt_desc.address + desc_base);'), (631, '\tstruct desc_struct *desc;'), (669, '\tdesc = get_desc(sel);'), (670, '\tif (!desc)'), (673, '\treturn get_desc_base(desc);'), (695, '\tstruct desc_struct *desc;'), (709, '\tdesc = get_desc(sel);'), (710, '\tif (!desc)'), (719, '\tlimit = get_desc_limit(desc);'), (720, '\tif (desc->g)'), (744, '\tstruct desc_struct *desc;'), (755, '\tdesc = get_desc(sel);'), (756, '\tif (!desc)'), (764, '\tif (!(desc->type & BIT(3)))'), (767, '\tswitch ((desc->l << 1) | desc->d) {')]}
24
23
634
3,848
https://github.com/torvalds/linux
CVE-2019-13233
['CWE-416', 'CWE-362']
insn-eval.c
insn_get_seg_base
/* * Utility functions for x86 operand and address decoding * * Copyright (C) Intel Corporation 2017 */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/ratelimit.h> #include <linux/mmu_context.h> #include <asm/desc_defs.h> #include <asm/desc.h> #include <asm/inat.h> #include <asm/insn.h> #include <asm/insn-eval.h> #include <asm/ldt.h> #include <asm/vm86.h> #undef pr_fmt #define pr_fmt(fmt) "insn: " fmt enum reg_type { REG_TYPE_RM = 0, REG_TYPE_INDEX, REG_TYPE_BASE, }; /** * is_string_insn() - Determine if instruction is a string instruction * @insn: Instruction containing the opcode to inspect * * Returns: * * true if the instruction, determined by the opcode, is any of the * string instructions as defined in the Intel Software Development manual. * False otherwise. */ static bool is_string_insn(struct insn *insn) { insn_get_opcode(insn); /* All string instructions have a 1-byte opcode. */ if (insn->opcode.nbytes != 1) return false; switch (insn->opcode.bytes[0]) { case 0x6c ... 0x6f: /* INS, OUTS */ case 0xa4 ... 0xa7: /* MOVS, CMPS */ case 0xaa ... 0xaf: /* STOS, LODS, SCAS */ return true; default: return false; } } /** * get_seg_reg_override_idx() - obtain segment register override index * @insn: Valid instruction with segment override prefixes * * Inspect the instruction prefixes in @insn and find segment overrides, if any. * * Returns: * * A constant identifying the segment register to use, among CS, SS, DS, * ES, FS, or GS. INAT_SEG_REG_DEFAULT is returned if no segment override * prefixes were found. * * -EINVAL in case of error. */ static int get_seg_reg_override_idx(struct insn *insn) { int idx = INAT_SEG_REG_DEFAULT; int num_overrides = 0, i; insn_get_prefixes(insn); /* Look for any segment override prefixes. */ for (i = 0; i < insn->prefixes.nbytes; i++) { insn_attr_t attr; attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]); switch (attr) { case INAT_MAKE_PREFIX(INAT_PFX_CS): idx = INAT_SEG_REG_CS; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_SS): idx = INAT_SEG_REG_SS; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_DS): idx = INAT_SEG_REG_DS; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_ES): idx = INAT_SEG_REG_ES; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_FS): idx = INAT_SEG_REG_FS; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_GS): idx = INAT_SEG_REG_GS; num_overrides++; break; /* No default action needed. */ } } /* More than one segment override prefix leads to undefined behavior. */ if (num_overrides > 1) return -EINVAL; return idx; } /** * check_seg_overrides() - check if segment override prefixes are allowed * @insn: Valid instruction with segment override prefixes * @regoff: Operand offset, in pt_regs, for which the check is performed * * For a particular register used in register-indirect addressing, determine if * segment override prefixes can be used. Specifically, no overrides are allowed * for rDI if used with a string instruction. * * Returns: * * True if segment override prefixes can be used with the register indicated * in @regoff. False if otherwise. */ static bool check_seg_overrides(struct insn *insn, int regoff) { if (regoff == offsetof(struct pt_regs, di) && is_string_insn(insn)) return false; return true; } /** * resolve_default_seg() - resolve default segment register index for an operand * @insn: Instruction with opcode and address size. Must be valid. * @regs: Register values as seen when entering kernel mode * @off: Operand offset, in pt_regs, for which resolution is needed * * Resolve the default segment register index associated with the instruction * operand register indicated by @off. Such index is resolved based on defaults * described in the Intel Software Development Manual. * * Returns: * * If in protected mode, a constant identifying the segment register to use, * among CS, SS, ES or DS. If in long mode, INAT_SEG_REG_IGNORE. * * -EINVAL in case of error. */ static int resolve_default_seg(struct insn *insn, struct pt_regs *regs, int off) { if (user_64bit_mode(regs)) return INAT_SEG_REG_IGNORE; /* * Resolve the default segment register as described in Section 3.7.4 * of the Intel Software Development Manual Vol. 1: * * + DS for all references involving r[ABCD]X, and rSI. * + If used in a string instruction, ES for rDI. Otherwise, DS. * + AX, CX and DX are not valid register operands in 16-bit address * encodings but are valid for 32-bit and 64-bit encodings. * + -EDOM is reserved to identify for cases in which no register * is used (i.e., displacement-only addressing). Use DS. * + SS for rSP or rBP. * + CS for rIP. */ switch (off) { case offsetof(struct pt_regs, ax): case offsetof(struct pt_regs, cx): case offsetof(struct pt_regs, dx): /* Need insn to verify address size. */ if (insn->addr_bytes == 2) return -EINVAL; /* fall through */ case -EDOM: case offsetof(struct pt_regs, bx): case offsetof(struct pt_regs, si): return INAT_SEG_REG_DS; case offsetof(struct pt_regs, di): if (is_string_insn(insn)) return INAT_SEG_REG_ES; return INAT_SEG_REG_DS; case offsetof(struct pt_regs, bp): case offsetof(struct pt_regs, sp): return INAT_SEG_REG_SS; case offsetof(struct pt_regs, ip): return INAT_SEG_REG_CS; default: return -EINVAL; } } /** * resolve_seg_reg() - obtain segment register index * @insn: Instruction with operands * @regs: Register values as seen when entering kernel mode * @regoff: Operand offset, in pt_regs, used to deterimine segment register * * Determine the segment register associated with the operands and, if * applicable, prefixes and the instruction pointed by @insn. * * The segment register associated to an operand used in register-indirect * addressing depends on: * * a) Whether running in long mode (in such a case segments are ignored, except * if FS or GS are used). * * b) Whether segment override prefixes can be used. Certain instructions and * registers do not allow override prefixes. * * c) Whether segment overrides prefixes are found in the instruction prefixes. * * d) If there are not segment override prefixes or they cannot be used, the * default segment register associated with the operand register is used. * * The function checks first if segment override prefixes can be used with the * operand indicated by @regoff. If allowed, obtain such overridden segment * register index. Lastly, if not prefixes were found or cannot be used, resolve * the segment register index to use based on the defaults described in the * Intel documentation. In long mode, all segment register indexes will be * ignored, except if overrides were found for FS or GS. All these operations * are done using helper functions. * * The operand register, @regoff, is represented as the offset from the base of * pt_regs. * * As stated, the main use of this function is to determine the segment register * index based on the instruction, its operands and prefixes. Hence, @insn * must be valid. However, if @regoff indicates rIP, we don't need to inspect * @insn at all as in this case CS is used in all cases. This case is checked * before proceeding further. * * Please note that this function does not return the value in the segment * register (i.e., the segment selector) but our defined index. The segment * selector needs to be obtained using get_segment_selector() and passing the * segment register index resolved by this function. * * Returns: * * An index identifying the segment register to use, among CS, SS, DS, * ES, FS, or GS. INAT_SEG_REG_IGNORE is returned if running in long mode. * * -EINVAL in case of error. */ static int resolve_seg_reg(struct insn *insn, struct pt_regs *regs, int regoff) { int idx; /* * In the unlikely event of having to resolve the segment register * index for rIP, do it first. Segment override prefixes should not * be used. Hence, it is not necessary to inspect the instruction, * which may be invalid at this point. */ if (regoff == offsetof(struct pt_regs, ip)) { if (user_64bit_mode(regs)) return INAT_SEG_REG_IGNORE; else return INAT_SEG_REG_CS; } if (!insn) return -EINVAL; if (!check_seg_overrides(insn, regoff)) return resolve_default_seg(insn, regs, regoff); idx = get_seg_reg_override_idx(insn); if (idx < 0) return idx; if (idx == INAT_SEG_REG_DEFAULT) return resolve_default_seg(insn, regs, regoff); /* * In long mode, segment override prefixes are ignored, except for * overrides for FS and GS. */ if (user_64bit_mode(regs)) { if (idx != INAT_SEG_REG_FS && idx != INAT_SEG_REG_GS) idx = INAT_SEG_REG_IGNORE; } return idx; } /** * get_segment_selector() - obtain segment selector * @regs: Register values as seen when entering kernel mode * @seg_reg_idx: Segment register index to use * * Obtain the segment selector from any of the CS, SS, DS, ES, FS, GS segment * registers. In CONFIG_X86_32, the segment is obtained from either pt_regs or * kernel_vm86_regs as applicable. In CONFIG_X86_64, CS and SS are obtained * from pt_regs. DS, ES, FS and GS are obtained by reading the actual CPU * registers. This done for only for completeness as in CONFIG_X86_64 segment * registers are ignored. * * Returns: * * Value of the segment selector, including null when running in * long mode. * * -EINVAL on error. */ static short get_segment_selector(struct pt_regs *regs, int seg_reg_idx) { #ifdef CONFIG_X86_64 unsigned short sel; switch (seg_reg_idx) { case INAT_SEG_REG_IGNORE: return 0; case INAT_SEG_REG_CS: return (unsigned short)(regs->cs & 0xffff); case INAT_SEG_REG_SS: return (unsigned short)(regs->ss & 0xffff); case INAT_SEG_REG_DS: savesegment(ds, sel); return sel; case INAT_SEG_REG_ES: savesegment(es, sel); return sel; case INAT_SEG_REG_FS: savesegment(fs, sel); return sel; case INAT_SEG_REG_GS: savesegment(gs, sel); return sel; default: return -EINVAL; } #else /* CONFIG_X86_32 */ struct kernel_vm86_regs *vm86regs = (struct kernel_vm86_regs *)regs; if (v8086_mode(regs)) { switch (seg_reg_idx) { case INAT_SEG_REG_CS: return (unsigned short)(regs->cs & 0xffff); case INAT_SEG_REG_SS: return (unsigned short)(regs->ss & 0xffff); case INAT_SEG_REG_DS: return vm86regs->ds; case INAT_SEG_REG_ES: return vm86regs->es; case INAT_SEG_REG_FS: return vm86regs->fs; case INAT_SEG_REG_GS: return vm86regs->gs; case INAT_SEG_REG_IGNORE: /* fall through */ default: return -EINVAL; } } switch (seg_reg_idx) { case INAT_SEG_REG_CS: return (unsigned short)(regs->cs & 0xffff); case INAT_SEG_REG_SS: return (unsigned short)(regs->ss & 0xffff); case INAT_SEG_REG_DS: return (unsigned short)(regs->ds & 0xffff); case INAT_SEG_REG_ES: return (unsigned short)(regs->es & 0xffff); case INAT_SEG_REG_FS: return (unsigned short)(regs->fs & 0xffff); case INAT_SEG_REG_GS: /* * GS may or may not be in regs as per CONFIG_X86_32_LAZY_GS. * The macro below takes care of both cases. */ return get_user_gs(regs); case INAT_SEG_REG_IGNORE: /* fall through */ default: return -EINVAL; } #endif /* CONFIG_X86_64 */ } static int get_reg_offset(struct insn *insn, struct pt_regs *regs, enum reg_type type) { int regno = 0; static const int regoff[] = { offsetof(struct pt_regs, ax), offsetof(struct pt_regs, cx), offsetof(struct pt_regs, dx), offsetof(struct pt_regs, bx), offsetof(struct pt_regs, sp), offsetof(struct pt_regs, bp), offsetof(struct pt_regs, si), offsetof(struct pt_regs, di), #ifdef CONFIG_X86_64 offsetof(struct pt_regs, r8), offsetof(struct pt_regs, r9), offsetof(struct pt_regs, r10), offsetof(struct pt_regs, r11), offsetof(struct pt_regs, r12), offsetof(struct pt_regs, r13), offsetof(struct pt_regs, r14), offsetof(struct pt_regs, r15), #endif }; int nr_registers = ARRAY_SIZE(regoff); /* * Don't possibly decode a 32-bit instructions as * reading a 64-bit-only register. */ if (IS_ENABLED(CONFIG_X86_64) && !insn->x86_64) nr_registers -= 8; switch (type) { case REG_TYPE_RM: regno = X86_MODRM_RM(insn->modrm.value); /* * ModRM.mod == 0 and ModRM.rm == 5 means a 32-bit displacement * follows the ModRM byte. */ if (!X86_MODRM_MOD(insn->modrm.value) && regno == 5) return -EDOM; if (X86_REX_B(insn->rex_prefix.value)) regno += 8; break; case REG_TYPE_INDEX: regno = X86_SIB_INDEX(insn->sib.value); if (X86_REX_X(insn->rex_prefix.value)) regno += 8; /* * If ModRM.mod != 3 and SIB.index = 4 the scale*index * portion of the address computation is null. This is * true only if REX.X is 0. In such a case, the SIB index * is used in the address computation. */ if (X86_MODRM_MOD(insn->modrm.value) != 3 && regno == 4) return -EDOM; break; case REG_TYPE_BASE: regno = X86_SIB_BASE(insn->sib.value); /* * If ModRM.mod is 0 and SIB.base == 5, the base of the * register-indirect addressing is 0. In this case, a * 32-bit displacement follows the SIB byte. */ if (!X86_MODRM_MOD(insn->modrm.value) && regno == 5) return -EDOM; if (X86_REX_B(insn->rex_prefix.value)) regno += 8; break; default: pr_err_ratelimited("invalid register type: %d\n", type); return -EINVAL; } if (regno >= nr_registers) { WARN_ONCE(1, "decoded an instruction with an invalid register"); return -EINVAL; } return regoff[regno]; } /** * get_reg_offset_16() - Obtain offset of register indicated by instruction * @insn: Instruction containing ModRM byte * @regs: Register values as seen when entering kernel mode * @offs1: Offset of the first operand register * @offs2: Offset of the second opeand register, if applicable * * Obtain the offset, in pt_regs, of the registers indicated by the ModRM byte * in @insn. This function is to be used with 16-bit address encodings. The * @offs1 and @offs2 will be written with the offset of the two registers * indicated by the instruction. In cases where any of the registers is not * referenced by the instruction, the value will be set to -EDOM. * * Returns: * * 0 on success, -EINVAL on error. */ static int get_reg_offset_16(struct insn *insn, struct pt_regs *regs, int *offs1, int *offs2) { /* * 16-bit addressing can use one or two registers. Specifics of * encodings are given in Table 2-1. "16-Bit Addressing Forms with the * ModR/M Byte" of the Intel Software Development Manual. */ static const int regoff1[] = { offsetof(struct pt_regs, bx), offsetof(struct pt_regs, bx), offsetof(struct pt_regs, bp), offsetof(struct pt_regs, bp), offsetof(struct pt_regs, si), offsetof(struct pt_regs, di), offsetof(struct pt_regs, bp), offsetof(struct pt_regs, bx), }; static const int regoff2[] = { offsetof(struct pt_regs, si), offsetof(struct pt_regs, di), offsetof(struct pt_regs, si), offsetof(struct pt_regs, di), -EDOM, -EDOM, -EDOM, -EDOM, }; if (!offs1 || !offs2) return -EINVAL; /* Operand is a register, use the generic function. */ if (X86_MODRM_MOD(insn->modrm.value) == 3) { *offs1 = insn_get_modrm_rm_off(insn, regs); *offs2 = -EDOM; return 0; } *offs1 = regoff1[X86_MODRM_RM(insn->modrm.value)]; *offs2 = regoff2[X86_MODRM_RM(insn->modrm.value)]; /* * If ModRM.mod is 0 and ModRM.rm is 110b, then we use displacement- * only addressing. This means that no registers are involved in * computing the effective address. Thus, ensure that the first * register offset is invalild. The second register offset is already * invalid under the aforementioned conditions. */ if ((X86_MODRM_MOD(insn->modrm.value) == 0) && (X86_MODRM_RM(insn->modrm.value) == 6)) *offs1 = -EDOM; return 0; } /** * get_desc() - Obtain pointer to a segment descriptor * @sel: Segment selector * * Given a segment selector, obtain a pointer to the segment descriptor. * Both global and local descriptor tables are supported. * * Returns: * * Pointer to segment descriptor on success. * * NULL on error. */ static struct desc_struct *get_desc(unsigned short sel) { struct desc_ptr gdt_desc = {0, 0}; unsigned long desc_base; #ifdef CONFIG_MODIFY_LDT_SYSCALL if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) { struct desc_struct *desc = NULL; struct ldt_struct *ldt; /* Bits [15:3] contain the index of the desired entry. */ sel >>= 3; mutex_lock(&current->active_mm->context.lock); ldt = current->active_mm->context.ldt; if (ldt && sel < ldt->nr_entries) desc = &ldt->entries[sel]; mutex_unlock(&current->active_mm->context.lock); return desc; } #endif native_store_gdt(&gdt_desc); /* * Segment descriptors have a size of 8 bytes. Thus, the index is * multiplied by 8 to obtain the memory offset of the desired descriptor * from the base of the GDT. As bits [15:3] of the segment selector * contain the index, it can be regarded as multiplied by 8 already. * All that remains is to clear bits [2:0]. */ desc_base = sel & ~(SEGMENT_RPL_MASK | SEGMENT_TI_MASK); if (desc_base > gdt_desc.size) return NULL; return (struct desc_struct *)(gdt_desc.address + desc_base); } /** * insn_get_seg_base() - Obtain base address of segment descriptor. * @regs: Register values as seen when entering kernel mode * @seg_reg_idx: Index of the segment register pointing to seg descriptor * * Obtain the base address of the segment as indicated by the segment descriptor * pointed by the segment selector. The segment selector is obtained from the * input segment register index @seg_reg_idx. * * Returns: * * In protected mode, base address of the segment. Zero in long mode, * except when FS or GS are used. In virtual-8086 mode, the segment * selector shifted 4 bits to the right. * * -1L in case of error. */ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx) { struct desc_struct *desc; short sel; sel = get_segment_selector(regs, seg_reg_idx); if (sel < 0) return -1L; if (v8086_mode(regs)) /* * Base is simply the segment selector shifted 4 * bits to the right. */ return (unsigned long)(sel << 4); if (user_64bit_mode(regs)) { /* * Only FS or GS will have a base address, the rest of * the segments' bases are forced to 0. */ unsigned long base; if (seg_reg_idx == INAT_SEG_REG_FS) rdmsrl(MSR_FS_BASE, base); else if (seg_reg_idx == INAT_SEG_REG_GS) /* * swapgs was called at the kernel entry point. Thus, * MSR_KERNEL_GS_BASE will have the user-space GS base. */ rdmsrl(MSR_KERNEL_GS_BASE, base); else base = 0; return base; } /* In protected mode the segment selector cannot be null. */ if (!sel) return -1L; desc = get_desc(sel); if (!desc) return -1L; return get_desc_base(desc); } /** * get_seg_limit() - Obtain the limit of a segment descriptor * @regs: Register values as seen when entering kernel mode * @seg_reg_idx: Index of the segment register pointing to seg descriptor * * Obtain the limit of the segment as indicated by the segment descriptor * pointed by the segment selector. The segment selector is obtained from the * input segment register index @seg_reg_idx. * * Returns: * * In protected mode, the limit of the segment descriptor in bytes. * In long mode and virtual-8086 mode, segment limits are not enforced. Thus, * limit is returned as -1L to imply a limit-less segment. * * Zero is returned on error. */ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx) { struct desc_struct *desc; unsigned long limit; short sel; sel = get_segment_selector(regs, seg_reg_idx); if (sel < 0) return 0; if (user_64bit_mode(regs) || v8086_mode(regs)) return -1L; if (!sel) return 0; desc = get_desc(sel); if (!desc) return 0; /* * If the granularity bit is set, the limit is given in multiples * of 4096. This also means that the 12 least significant bits are * not tested when checking the segment limits. In practice, * this means that the segment ends in (limit << 12) + 0xfff. */ limit = get_desc_limit(desc); if (desc->g) limit = (limit << 12) + 0xfff; return limit; } /** * insn_get_code_seg_params() - Obtain code segment parameters * @regs: Structure with register values as seen when entering kernel mode * * Obtain address and operand sizes of the code segment. It is obtained from the * selector contained in the CS register in regs. In protected mode, the default * address is determined by inspecting the L and D bits of the segment * descriptor. In virtual-8086 mode, the default is always two bytes for both * address and operand sizes. * * Returns: * * An int containing ORed-in default parameters on success. * * -EINVAL on error. */ int insn_get_code_seg_params(struct pt_regs *regs) { struct desc_struct *desc; short sel; if (v8086_mode(regs)) /* Address and operand size are both 16-bit. */ return INSN_CODE_SEG_PARAMS(2, 2); sel = get_segment_selector(regs, INAT_SEG_REG_CS); if (sel < 0) return sel; desc = get_desc(sel); if (!desc) return -EINVAL; /* * The most significant byte of the Type field of the segment descriptor * determines whether a segment contains data or code. If this is a data * segment, return error. */ if (!(desc->type & BIT(3))) return -EINVAL; switch ((desc->l << 1) | desc->d) { case 0: /* * Legacy mode. CS.L=0, CS.D=0. Address and operand size are * both 16-bit. */ return INSN_CODE_SEG_PARAMS(2, 2); case 1: /* * Legacy mode. CS.L=0, CS.D=1. Address and operand size are * both 32-bit. */ return INSN_CODE_SEG_PARAMS(4, 4); case 2: /* * IA-32e 64-bit mode. CS.L=1, CS.D=0. Address size is 64-bit; * operand size is 32-bit. */ return INSN_CODE_SEG_PARAMS(4, 8); case 3: /* Invalid setting. CS.L=1, CS.D=1 */ /* fall through */ default: return -EINVAL; } } /** * insn_get_modrm_rm_off() - Obtain register in r/m part of the ModRM byte * @insn: Instruction containing the ModRM byte * @regs: Register values as seen when entering kernel mode * * Returns: * * The register indicated by the r/m part of the ModRM byte. The * register is obtained as an offset from the base of pt_regs. In specific * cases, the returned value can be -EDOM to indicate that the particular value * of ModRM does not refer to a register and shall be ignored. */ int insn_get_modrm_rm_off(struct insn *insn, struct pt_regs *regs) { return get_reg_offset(insn, regs, REG_TYPE_RM); } /** * get_seg_base_limit() - obtain base address and limit of a segment * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Operand offset, in pt_regs, used to resolve segment descriptor * @base: Obtained segment base * @limit: Obtained segment limit * * Obtain the base address and limit of the segment associated with the operand * @regoff and, if any or allowed, override prefixes in @insn. This function is * different from insn_get_seg_base() as the latter does not resolve the segment * associated with the instruction operand. If a limit is not needed (e.g., * when running in long mode), @limit can be NULL. * * Returns: * * 0 on success. @base and @limit will contain the base address and of the * resolved segment, respectively. * * -EINVAL on error. */ static int get_seg_base_limit(struct insn *insn, struct pt_regs *regs, int regoff, unsigned long *base, unsigned long *limit) { int seg_reg_idx; if (!base) return -EINVAL; seg_reg_idx = resolve_seg_reg(insn, regs, regoff); if (seg_reg_idx < 0) return seg_reg_idx; *base = insn_get_seg_base(regs, seg_reg_idx); if (*base == -1L) return -EINVAL; if (!limit) return 0; *limit = get_seg_limit(regs, seg_reg_idx); if (!(*limit)) return -EINVAL; return 0; } /** * get_eff_addr_reg() - Obtain effective address from register operand * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Obtained operand offset, in pt_regs, with the effective address * @eff_addr: Obtained effective address * * Obtain the effective address stored in the register operand as indicated by * the ModRM byte. This function is to be used only with register addressing * (i.e., ModRM.mod is 3). The effective address is saved in @eff_addr. The * register operand, as an offset from the base of pt_regs, is saved in @regoff; * such offset can then be used to resolve the segment associated with the * operand. This function can be used with any of the supported address sizes * in x86. * * Returns: * * 0 on success. @eff_addr will have the effective address stored in the * operand indicated by ModRM. @regoff will have such operand as an offset from * the base of pt_regs. * * -EINVAL on error. */ static int get_eff_addr_reg(struct insn *insn, struct pt_regs *regs, int *regoff, long *eff_addr) { insn_get_modrm(insn); if (!insn->modrm.nbytes) return -EINVAL; if (X86_MODRM_MOD(insn->modrm.value) != 3) return -EINVAL; *regoff = get_reg_offset(insn, regs, REG_TYPE_RM); if (*regoff < 0) return -EINVAL; /* Ignore bytes that are outside the address size. */ if (insn->addr_bytes == 2) *eff_addr = regs_get_register(regs, *regoff) & 0xffff; else if (insn->addr_bytes == 4) *eff_addr = regs_get_register(regs, *regoff) & 0xffffffff; else /* 64-bit address */ *eff_addr = regs_get_register(regs, *regoff); return 0; } /** * get_eff_addr_modrm() - Obtain referenced effective address via ModRM * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Obtained operand offset, in pt_regs, associated with segment * @eff_addr: Obtained effective address * * Obtain the effective address referenced by the ModRM byte of @insn. After * identifying the registers involved in the register-indirect memory reference, * its value is obtained from the operands in @regs. The computed address is * stored @eff_addr. Also, the register operand that indicates the associated * segment is stored in @regoff, this parameter can later be used to determine * such segment. * * Returns: * * 0 on success. @eff_addr will have the referenced effective address. @regoff * will have a register, as an offset from the base of pt_regs, that can be used * to resolve the associated segment. * * -EINVAL on error. */ static int get_eff_addr_modrm(struct insn *insn, struct pt_regs *regs, int *regoff, long *eff_addr) { long tmp; if (insn->addr_bytes != 8 && insn->addr_bytes != 4) return -EINVAL; insn_get_modrm(insn); if (!insn->modrm.nbytes) return -EINVAL; if (X86_MODRM_MOD(insn->modrm.value) > 2) return -EINVAL; *regoff = get_reg_offset(insn, regs, REG_TYPE_RM); /* * -EDOM means that we must ignore the address_offset. In such a case, * in 64-bit mode the effective address relative to the rIP of the * following instruction. */ if (*regoff == -EDOM) { if (user_64bit_mode(regs)) tmp = regs->ip + insn->length; else tmp = 0; } else if (*regoff < 0) { return -EINVAL; } else { tmp = regs_get_register(regs, *regoff); } if (insn->addr_bytes == 4) { int addr32 = (int)(tmp & 0xffffffff) + insn->displacement.value; *eff_addr = addr32 & 0xffffffff; } else { *eff_addr = tmp + insn->displacement.value; } return 0; } /** * get_eff_addr_modrm_16() - Obtain referenced effective address via ModRM * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Obtained operand offset, in pt_regs, associated with segment * @eff_addr: Obtained effective address * * Obtain the 16-bit effective address referenced by the ModRM byte of @insn. * After identifying the registers involved in the register-indirect memory * reference, its value is obtained from the operands in @regs. The computed * address is stored @eff_addr. Also, the register operand that indicates * the associated segment is stored in @regoff, this parameter can later be used * to determine such segment. * * Returns: * * 0 on success. @eff_addr will have the referenced effective address. @regoff * will have a register, as an offset from the base of pt_regs, that can be used * to resolve the associated segment. * * -EINVAL on error. */ static int get_eff_addr_modrm_16(struct insn *insn, struct pt_regs *regs, int *regoff, short *eff_addr) { int addr_offset1, addr_offset2, ret; short addr1 = 0, addr2 = 0, displacement; if (insn->addr_bytes != 2) return -EINVAL; insn_get_modrm(insn); if (!insn->modrm.nbytes) return -EINVAL; if (X86_MODRM_MOD(insn->modrm.value) > 2) return -EINVAL; ret = get_reg_offset_16(insn, regs, &addr_offset1, &addr_offset2); if (ret < 0) return -EINVAL; /* * Don't fail on invalid offset values. They might be invalid because * they cannot be used for this particular value of ModRM. Instead, use * them in the computation only if they contain a valid value. */ if (addr_offset1 != -EDOM) addr1 = regs_get_register(regs, addr_offset1) & 0xffff; if (addr_offset2 != -EDOM) addr2 = regs_get_register(regs, addr_offset2) & 0xffff; displacement = insn->displacement.value & 0xffff; *eff_addr = addr1 + addr2 + displacement; /* * The first operand register could indicate to use of either SS or DS * registers to obtain the segment selector. The second operand * register can only indicate the use of DS. Thus, the first operand * will be used to obtain the segment selector. */ *regoff = addr_offset1; return 0; } /** * get_eff_addr_sib() - Obtain referenced effective address via SIB * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Obtained operand offset, in pt_regs, associated with segment * @eff_addr: Obtained effective address * * Obtain the effective address referenced by the SIB byte of @insn. After * identifying the registers involved in the indexed, register-indirect memory * reference, its value is obtained from the operands in @regs. The computed * address is stored @eff_addr. Also, the register operand that indicates the * associated segment is stored in @regoff, this parameter can later be used to * determine such segment. * * Returns: * * 0 on success. @eff_addr will have the referenced effective address. * @base_offset will have a register, as an offset from the base of pt_regs, * that can be used to resolve the associated segment. * * -EINVAL on error. */ static int get_eff_addr_sib(struct insn *insn, struct pt_regs *regs, int *base_offset, long *eff_addr) { long base, indx; int indx_offset; if (insn->addr_bytes != 8 && insn->addr_bytes != 4) return -EINVAL; insn_get_modrm(insn); if (!insn->modrm.nbytes) return -EINVAL; if (X86_MODRM_MOD(insn->modrm.value) > 2) return -EINVAL; insn_get_sib(insn); if (!insn->sib.nbytes) return -EINVAL; *base_offset = get_reg_offset(insn, regs, REG_TYPE_BASE); indx_offset = get_reg_offset(insn, regs, REG_TYPE_INDEX); /* * Negative values in the base and index offset means an error when * decoding the SIB byte. Except -EDOM, which means that the registers * should not be used in the address computation. */ if (*base_offset == -EDOM) base = 0; else if (*base_offset < 0) return -EINVAL; else base = regs_get_register(regs, *base_offset); if (indx_offset == -EDOM) indx = 0; else if (indx_offset < 0) return -EINVAL; else indx = regs_get_register(regs, indx_offset); if (insn->addr_bytes == 4) { int addr32, base32, idx32; base32 = base & 0xffffffff; idx32 = indx & 0xffffffff; addr32 = base32 + idx32 * (1 << X86_SIB_SCALE(insn->sib.value)); addr32 += insn->displacement.value; *eff_addr = addr32 & 0xffffffff; } else { *eff_addr = base + indx * (1 << X86_SIB_SCALE(insn->sib.value)); *eff_addr += insn->displacement.value; } return 0; } /** * get_addr_ref_16() - Obtain the 16-bit address referred by instruction * @insn: Instruction containing ModRM byte and displacement * @regs: Register values as seen when entering kernel mode * * This function is to be used with 16-bit address encodings. Obtain the memory * address referred by the instruction's ModRM and displacement bytes. Also, the * segment used as base is determined by either any segment override prefixes in * @insn or the default segment of the registers involved in the address * computation. In protected mode, segment limits are enforced. * * Returns: * * Linear address referenced by the instruction operands on success. * * -1L on error. */ static void __user *get_addr_ref_16(struct insn *insn, struct pt_regs *regs) { unsigned long linear_addr = -1L, seg_base, seg_limit; int ret, regoff; short eff_addr; long tmp; insn_get_modrm(insn); insn_get_displacement(insn); if (insn->addr_bytes != 2) goto out; if (X86_MODRM_MOD(insn->modrm.value) == 3) { ret = get_eff_addr_reg(insn, regs, &regoff, &tmp); if (ret) goto out; eff_addr = tmp; } else { ret = get_eff_addr_modrm_16(insn, regs, &regoff, &eff_addr); if (ret) goto out; } ret = get_seg_base_limit(insn, regs, regoff, &seg_base, &seg_limit); if (ret) goto out; /* * Before computing the linear address, make sure the effective address * is within the limits of the segment. In virtual-8086 mode, segment * limits are not enforced. In such a case, the segment limit is -1L to * reflect this fact. */ if ((unsigned long)(eff_addr & 0xffff) > seg_limit) goto out; linear_addr = (unsigned long)(eff_addr & 0xffff) + seg_base; /* Limit linear address to 20 bits */ if (v8086_mode(regs)) linear_addr &= 0xfffff; out: return (void __user *)linear_addr; } /** * get_addr_ref_32() - Obtain a 32-bit linear address * @insn: Instruction with ModRM, SIB bytes and displacement * @regs: Register values as seen when entering kernel mode * * This function is to be used with 32-bit address encodings to obtain the * linear memory address referred by the instruction's ModRM, SIB, * displacement bytes and segment base address, as applicable. If in protected * mode, segment limits are enforced. * * Returns: * * Linear address referenced by instruction and registers on success. * * -1L on error. */ static void __user *get_addr_ref_32(struct insn *insn, struct pt_regs *regs) { unsigned long linear_addr = -1L, seg_base, seg_limit; int eff_addr, regoff; long tmp; int ret; if (insn->addr_bytes != 4) goto out; if (X86_MODRM_MOD(insn->modrm.value) == 3) { ret = get_eff_addr_reg(insn, regs, &regoff, &tmp); if (ret) goto out; eff_addr = tmp; } else { if (insn->sib.nbytes) { ret = get_eff_addr_sib(insn, regs, &regoff, &tmp); if (ret) goto out; eff_addr = tmp; } else { ret = get_eff_addr_modrm(insn, regs, &regoff, &tmp); if (ret) goto out; eff_addr = tmp; } } ret = get_seg_base_limit(insn, regs, regoff, &seg_base, &seg_limit); if (ret) goto out; /* * In protected mode, before computing the linear address, make sure * the effective address is within the limits of the segment. * 32-bit addresses can be used in long and virtual-8086 modes if an * address override prefix is used. In such cases, segment limits are * not enforced. When in virtual-8086 mode, the segment limit is -1L * to reflect this situation. * * After computed, the effective address is treated as an unsigned * quantity. */ if (!user_64bit_mode(regs) && ((unsigned int)eff_addr > seg_limit)) goto out; /* * Even though 32-bit address encodings are allowed in virtual-8086 * mode, the address range is still limited to [0x-0xffff]. */ if (v8086_mode(regs) && (eff_addr & ~0xffff)) goto out; /* * Data type long could be 64 bits in size. Ensure that our 32-bit * effective address is not sign-extended when computing the linear * address. */ linear_addr = (unsigned long)(eff_addr & 0xffffffff) + seg_base; /* Limit linear address to 20 bits */ if (v8086_mode(regs)) linear_addr &= 0xfffff; out: return (void __user *)linear_addr; } /** * get_addr_ref_64() - Obtain a 64-bit linear address * @insn: Instruction struct with ModRM and SIB bytes and displacement * @regs: Structure with register values as seen when entering kernel mode * * This function is to be used with 64-bit address encodings to obtain the * linear memory address referred by the instruction's ModRM, SIB, * displacement bytes and segment base address, as applicable. * * Returns: * * Linear address referenced by instruction and registers on success. * * -1L on error. */ #ifndef CONFIG_X86_64 static void __user *get_addr_ref_64(struct insn *insn, struct pt_regs *regs) { return (void __user *)-1L; } #else static void __user *get_addr_ref_64(struct insn *insn, struct pt_regs *regs) { unsigned long linear_addr = -1L, seg_base; int regoff, ret; long eff_addr; if (insn->addr_bytes != 8) goto out; if (X86_MODRM_MOD(insn->modrm.value) == 3) { ret = get_eff_addr_reg(insn, regs, &regoff, &eff_addr); if (ret) goto out; } else { if (insn->sib.nbytes) { ret = get_eff_addr_sib(insn, regs, &regoff, &eff_addr); if (ret) goto out; } else { ret = get_eff_addr_modrm(insn, regs, &regoff, &eff_addr); if (ret) goto out; } } ret = get_seg_base_limit(insn, regs, regoff, &seg_base, NULL); if (ret) goto out; linear_addr = (unsigned long)eff_addr + seg_base; out: return (void __user *)linear_addr; } #endif /* CONFIG_X86_64 */ /** * insn_get_addr_ref() - Obtain the linear address referred by instruction * @insn: Instruction structure containing ModRM byte and displacement * @regs: Structure with register values as seen when entering kernel mode * * Obtain the linear address referred by the instruction's ModRM, SIB and * displacement bytes, and segment base, as applicable. In protected mode, * segment limits are enforced. * * Returns: * * Linear address referenced by instruction and registers on success. * * -1L on error. */ void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs) { if (!insn || !regs) return (void __user *)-1L; switch (insn->addr_bytes) { case 2: return get_addr_ref_16(insn, regs); case 4: return get_addr_ref_32(insn, regs); case 8: return get_addr_ref_64(insn, regs); default: return (void __user *)-1L; } }
/* * Utility functions for x86 operand and address decoding * * Copyright (C) Intel Corporation 2017 */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/ratelimit.h> #include <linux/mmu_context.h> #include <asm/desc_defs.h> #include <asm/desc.h> #include <asm/inat.h> #include <asm/insn.h> #include <asm/insn-eval.h> #include <asm/ldt.h> #include <asm/vm86.h> #undef pr_fmt #define pr_fmt(fmt) "insn: " fmt enum reg_type { REG_TYPE_RM = 0, REG_TYPE_INDEX, REG_TYPE_BASE, }; /** * is_string_insn() - Determine if instruction is a string instruction * @insn: Instruction containing the opcode to inspect * * Returns: * * true if the instruction, determined by the opcode, is any of the * string instructions as defined in the Intel Software Development manual. * False otherwise. */ static bool is_string_insn(struct insn *insn) { insn_get_opcode(insn); /* All string instructions have a 1-byte opcode. */ if (insn->opcode.nbytes != 1) return false; switch (insn->opcode.bytes[0]) { case 0x6c ... 0x6f: /* INS, OUTS */ case 0xa4 ... 0xa7: /* MOVS, CMPS */ case 0xaa ... 0xaf: /* STOS, LODS, SCAS */ return true; default: return false; } } /** * get_seg_reg_override_idx() - obtain segment register override index * @insn: Valid instruction with segment override prefixes * * Inspect the instruction prefixes in @insn and find segment overrides, if any. * * Returns: * * A constant identifying the segment register to use, among CS, SS, DS, * ES, FS, or GS. INAT_SEG_REG_DEFAULT is returned if no segment override * prefixes were found. * * -EINVAL in case of error. */ static int get_seg_reg_override_idx(struct insn *insn) { int idx = INAT_SEG_REG_DEFAULT; int num_overrides = 0, i; insn_get_prefixes(insn); /* Look for any segment override prefixes. */ for (i = 0; i < insn->prefixes.nbytes; i++) { insn_attr_t attr; attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]); switch (attr) { case INAT_MAKE_PREFIX(INAT_PFX_CS): idx = INAT_SEG_REG_CS; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_SS): idx = INAT_SEG_REG_SS; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_DS): idx = INAT_SEG_REG_DS; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_ES): idx = INAT_SEG_REG_ES; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_FS): idx = INAT_SEG_REG_FS; num_overrides++; break; case INAT_MAKE_PREFIX(INAT_PFX_GS): idx = INAT_SEG_REG_GS; num_overrides++; break; /* No default action needed. */ } } /* More than one segment override prefix leads to undefined behavior. */ if (num_overrides > 1) return -EINVAL; return idx; } /** * check_seg_overrides() - check if segment override prefixes are allowed * @insn: Valid instruction with segment override prefixes * @regoff: Operand offset, in pt_regs, for which the check is performed * * For a particular register used in register-indirect addressing, determine if * segment override prefixes can be used. Specifically, no overrides are allowed * for rDI if used with a string instruction. * * Returns: * * True if segment override prefixes can be used with the register indicated * in @regoff. False if otherwise. */ static bool check_seg_overrides(struct insn *insn, int regoff) { if (regoff == offsetof(struct pt_regs, di) && is_string_insn(insn)) return false; return true; } /** * resolve_default_seg() - resolve default segment register index for an operand * @insn: Instruction with opcode and address size. Must be valid. * @regs: Register values as seen when entering kernel mode * @off: Operand offset, in pt_regs, for which resolution is needed * * Resolve the default segment register index associated with the instruction * operand register indicated by @off. Such index is resolved based on defaults * described in the Intel Software Development Manual. * * Returns: * * If in protected mode, a constant identifying the segment register to use, * among CS, SS, ES or DS. If in long mode, INAT_SEG_REG_IGNORE. * * -EINVAL in case of error. */ static int resolve_default_seg(struct insn *insn, struct pt_regs *regs, int off) { if (user_64bit_mode(regs)) return INAT_SEG_REG_IGNORE; /* * Resolve the default segment register as described in Section 3.7.4 * of the Intel Software Development Manual Vol. 1: * * + DS for all references involving r[ABCD]X, and rSI. * + If used in a string instruction, ES for rDI. Otherwise, DS. * + AX, CX and DX are not valid register operands in 16-bit address * encodings but are valid for 32-bit and 64-bit encodings. * + -EDOM is reserved to identify for cases in which no register * is used (i.e., displacement-only addressing). Use DS. * + SS for rSP or rBP. * + CS for rIP. */ switch (off) { case offsetof(struct pt_regs, ax): case offsetof(struct pt_regs, cx): case offsetof(struct pt_regs, dx): /* Need insn to verify address size. */ if (insn->addr_bytes == 2) return -EINVAL; /* fall through */ case -EDOM: case offsetof(struct pt_regs, bx): case offsetof(struct pt_regs, si): return INAT_SEG_REG_DS; case offsetof(struct pt_regs, di): if (is_string_insn(insn)) return INAT_SEG_REG_ES; return INAT_SEG_REG_DS; case offsetof(struct pt_regs, bp): case offsetof(struct pt_regs, sp): return INAT_SEG_REG_SS; case offsetof(struct pt_regs, ip): return INAT_SEG_REG_CS; default: return -EINVAL; } } /** * resolve_seg_reg() - obtain segment register index * @insn: Instruction with operands * @regs: Register values as seen when entering kernel mode * @regoff: Operand offset, in pt_regs, used to deterimine segment register * * Determine the segment register associated with the operands and, if * applicable, prefixes and the instruction pointed by @insn. * * The segment register associated to an operand used in register-indirect * addressing depends on: * * a) Whether running in long mode (in such a case segments are ignored, except * if FS or GS are used). * * b) Whether segment override prefixes can be used. Certain instructions and * registers do not allow override prefixes. * * c) Whether segment overrides prefixes are found in the instruction prefixes. * * d) If there are not segment override prefixes or they cannot be used, the * default segment register associated with the operand register is used. * * The function checks first if segment override prefixes can be used with the * operand indicated by @regoff. If allowed, obtain such overridden segment * register index. Lastly, if not prefixes were found or cannot be used, resolve * the segment register index to use based on the defaults described in the * Intel documentation. In long mode, all segment register indexes will be * ignored, except if overrides were found for FS or GS. All these operations * are done using helper functions. * * The operand register, @regoff, is represented as the offset from the base of * pt_regs. * * As stated, the main use of this function is to determine the segment register * index based on the instruction, its operands and prefixes. Hence, @insn * must be valid. However, if @regoff indicates rIP, we don't need to inspect * @insn at all as in this case CS is used in all cases. This case is checked * before proceeding further. * * Please note that this function does not return the value in the segment * register (i.e., the segment selector) but our defined index. The segment * selector needs to be obtained using get_segment_selector() and passing the * segment register index resolved by this function. * * Returns: * * An index identifying the segment register to use, among CS, SS, DS, * ES, FS, or GS. INAT_SEG_REG_IGNORE is returned if running in long mode. * * -EINVAL in case of error. */ static int resolve_seg_reg(struct insn *insn, struct pt_regs *regs, int regoff) { int idx; /* * In the unlikely event of having to resolve the segment register * index for rIP, do it first. Segment override prefixes should not * be used. Hence, it is not necessary to inspect the instruction, * which may be invalid at this point. */ if (regoff == offsetof(struct pt_regs, ip)) { if (user_64bit_mode(regs)) return INAT_SEG_REG_IGNORE; else return INAT_SEG_REG_CS; } if (!insn) return -EINVAL; if (!check_seg_overrides(insn, regoff)) return resolve_default_seg(insn, regs, regoff); idx = get_seg_reg_override_idx(insn); if (idx < 0) return idx; if (idx == INAT_SEG_REG_DEFAULT) return resolve_default_seg(insn, regs, regoff); /* * In long mode, segment override prefixes are ignored, except for * overrides for FS and GS. */ if (user_64bit_mode(regs)) { if (idx != INAT_SEG_REG_FS && idx != INAT_SEG_REG_GS) idx = INAT_SEG_REG_IGNORE; } return idx; } /** * get_segment_selector() - obtain segment selector * @regs: Register values as seen when entering kernel mode * @seg_reg_idx: Segment register index to use * * Obtain the segment selector from any of the CS, SS, DS, ES, FS, GS segment * registers. In CONFIG_X86_32, the segment is obtained from either pt_regs or * kernel_vm86_regs as applicable. In CONFIG_X86_64, CS and SS are obtained * from pt_regs. DS, ES, FS and GS are obtained by reading the actual CPU * registers. This done for only for completeness as in CONFIG_X86_64 segment * registers are ignored. * * Returns: * * Value of the segment selector, including null when running in * long mode. * * -EINVAL on error. */ static short get_segment_selector(struct pt_regs *regs, int seg_reg_idx) { #ifdef CONFIG_X86_64 unsigned short sel; switch (seg_reg_idx) { case INAT_SEG_REG_IGNORE: return 0; case INAT_SEG_REG_CS: return (unsigned short)(regs->cs & 0xffff); case INAT_SEG_REG_SS: return (unsigned short)(regs->ss & 0xffff); case INAT_SEG_REG_DS: savesegment(ds, sel); return sel; case INAT_SEG_REG_ES: savesegment(es, sel); return sel; case INAT_SEG_REG_FS: savesegment(fs, sel); return sel; case INAT_SEG_REG_GS: savesegment(gs, sel); return sel; default: return -EINVAL; } #else /* CONFIG_X86_32 */ struct kernel_vm86_regs *vm86regs = (struct kernel_vm86_regs *)regs; if (v8086_mode(regs)) { switch (seg_reg_idx) { case INAT_SEG_REG_CS: return (unsigned short)(regs->cs & 0xffff); case INAT_SEG_REG_SS: return (unsigned short)(regs->ss & 0xffff); case INAT_SEG_REG_DS: return vm86regs->ds; case INAT_SEG_REG_ES: return vm86regs->es; case INAT_SEG_REG_FS: return vm86regs->fs; case INAT_SEG_REG_GS: return vm86regs->gs; case INAT_SEG_REG_IGNORE: /* fall through */ default: return -EINVAL; } } switch (seg_reg_idx) { case INAT_SEG_REG_CS: return (unsigned short)(regs->cs & 0xffff); case INAT_SEG_REG_SS: return (unsigned short)(regs->ss & 0xffff); case INAT_SEG_REG_DS: return (unsigned short)(regs->ds & 0xffff); case INAT_SEG_REG_ES: return (unsigned short)(regs->es & 0xffff); case INAT_SEG_REG_FS: return (unsigned short)(regs->fs & 0xffff); case INAT_SEG_REG_GS: /* * GS may or may not be in regs as per CONFIG_X86_32_LAZY_GS. * The macro below takes care of both cases. */ return get_user_gs(regs); case INAT_SEG_REG_IGNORE: /* fall through */ default: return -EINVAL; } #endif /* CONFIG_X86_64 */ } static int get_reg_offset(struct insn *insn, struct pt_regs *regs, enum reg_type type) { int regno = 0; static const int regoff[] = { offsetof(struct pt_regs, ax), offsetof(struct pt_regs, cx), offsetof(struct pt_regs, dx), offsetof(struct pt_regs, bx), offsetof(struct pt_regs, sp), offsetof(struct pt_regs, bp), offsetof(struct pt_regs, si), offsetof(struct pt_regs, di), #ifdef CONFIG_X86_64 offsetof(struct pt_regs, r8), offsetof(struct pt_regs, r9), offsetof(struct pt_regs, r10), offsetof(struct pt_regs, r11), offsetof(struct pt_regs, r12), offsetof(struct pt_regs, r13), offsetof(struct pt_regs, r14), offsetof(struct pt_regs, r15), #endif }; int nr_registers = ARRAY_SIZE(regoff); /* * Don't possibly decode a 32-bit instructions as * reading a 64-bit-only register. */ if (IS_ENABLED(CONFIG_X86_64) && !insn->x86_64) nr_registers -= 8; switch (type) { case REG_TYPE_RM: regno = X86_MODRM_RM(insn->modrm.value); /* * ModRM.mod == 0 and ModRM.rm == 5 means a 32-bit displacement * follows the ModRM byte. */ if (!X86_MODRM_MOD(insn->modrm.value) && regno == 5) return -EDOM; if (X86_REX_B(insn->rex_prefix.value)) regno += 8; break; case REG_TYPE_INDEX: regno = X86_SIB_INDEX(insn->sib.value); if (X86_REX_X(insn->rex_prefix.value)) regno += 8; /* * If ModRM.mod != 3 and SIB.index = 4 the scale*index * portion of the address computation is null. This is * true only if REX.X is 0. In such a case, the SIB index * is used in the address computation. */ if (X86_MODRM_MOD(insn->modrm.value) != 3 && regno == 4) return -EDOM; break; case REG_TYPE_BASE: regno = X86_SIB_BASE(insn->sib.value); /* * If ModRM.mod is 0 and SIB.base == 5, the base of the * register-indirect addressing is 0. In this case, a * 32-bit displacement follows the SIB byte. */ if (!X86_MODRM_MOD(insn->modrm.value) && regno == 5) return -EDOM; if (X86_REX_B(insn->rex_prefix.value)) regno += 8; break; default: pr_err_ratelimited("invalid register type: %d\n", type); return -EINVAL; } if (regno >= nr_registers) { WARN_ONCE(1, "decoded an instruction with an invalid register"); return -EINVAL; } return regoff[regno]; } /** * get_reg_offset_16() - Obtain offset of register indicated by instruction * @insn: Instruction containing ModRM byte * @regs: Register values as seen when entering kernel mode * @offs1: Offset of the first operand register * @offs2: Offset of the second opeand register, if applicable * * Obtain the offset, in pt_regs, of the registers indicated by the ModRM byte * in @insn. This function is to be used with 16-bit address encodings. The * @offs1 and @offs2 will be written with the offset of the two registers * indicated by the instruction. In cases where any of the registers is not * referenced by the instruction, the value will be set to -EDOM. * * Returns: * * 0 on success, -EINVAL on error. */ static int get_reg_offset_16(struct insn *insn, struct pt_regs *regs, int *offs1, int *offs2) { /* * 16-bit addressing can use one or two registers. Specifics of * encodings are given in Table 2-1. "16-Bit Addressing Forms with the * ModR/M Byte" of the Intel Software Development Manual. */ static const int regoff1[] = { offsetof(struct pt_regs, bx), offsetof(struct pt_regs, bx), offsetof(struct pt_regs, bp), offsetof(struct pt_regs, bp), offsetof(struct pt_regs, si), offsetof(struct pt_regs, di), offsetof(struct pt_regs, bp), offsetof(struct pt_regs, bx), }; static const int regoff2[] = { offsetof(struct pt_regs, si), offsetof(struct pt_regs, di), offsetof(struct pt_regs, si), offsetof(struct pt_regs, di), -EDOM, -EDOM, -EDOM, -EDOM, }; if (!offs1 || !offs2) return -EINVAL; /* Operand is a register, use the generic function. */ if (X86_MODRM_MOD(insn->modrm.value) == 3) { *offs1 = insn_get_modrm_rm_off(insn, regs); *offs2 = -EDOM; return 0; } *offs1 = regoff1[X86_MODRM_RM(insn->modrm.value)]; *offs2 = regoff2[X86_MODRM_RM(insn->modrm.value)]; /* * If ModRM.mod is 0 and ModRM.rm is 110b, then we use displacement- * only addressing. This means that no registers are involved in * computing the effective address. Thus, ensure that the first * register offset is invalild. The second register offset is already * invalid under the aforementioned conditions. */ if ((X86_MODRM_MOD(insn->modrm.value) == 0) && (X86_MODRM_RM(insn->modrm.value) == 6)) *offs1 = -EDOM; return 0; } /** * get_desc() - Obtain contents of a segment descriptor * @out: Segment descriptor contents on success * @sel: Segment selector * * Given a segment selector, obtain a pointer to the segment descriptor. * Both global and local descriptor tables are supported. * * Returns: * * True on success, false on failure. * * NULL on error. */ static bool get_desc(struct desc_struct *out, unsigned short sel) { struct desc_ptr gdt_desc = {0, 0}; unsigned long desc_base; #ifdef CONFIG_MODIFY_LDT_SYSCALL if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) { bool success = false; struct ldt_struct *ldt; /* Bits [15:3] contain the index of the desired entry. */ sel >>= 3; mutex_lock(&current->active_mm->context.lock); ldt = current->active_mm->context.ldt; if (ldt && sel < ldt->nr_entries) { *out = ldt->entries[sel]; success = true; } mutex_unlock(&current->active_mm->context.lock); return success; } #endif native_store_gdt(&gdt_desc); /* * Segment descriptors have a size of 8 bytes. Thus, the index is * multiplied by 8 to obtain the memory offset of the desired descriptor * from the base of the GDT. As bits [15:3] of the segment selector * contain the index, it can be regarded as multiplied by 8 already. * All that remains is to clear bits [2:0]. */ desc_base = sel & ~(SEGMENT_RPL_MASK | SEGMENT_TI_MASK); if (desc_base > gdt_desc.size) return false; *out = *(struct desc_struct *)(gdt_desc.address + desc_base); return true; } /** * insn_get_seg_base() - Obtain base address of segment descriptor. * @regs: Register values as seen when entering kernel mode * @seg_reg_idx: Index of the segment register pointing to seg descriptor * * Obtain the base address of the segment as indicated by the segment descriptor * pointed by the segment selector. The segment selector is obtained from the * input segment register index @seg_reg_idx. * * Returns: * * In protected mode, base address of the segment. Zero in long mode, * except when FS or GS are used. In virtual-8086 mode, the segment * selector shifted 4 bits to the right. * * -1L in case of error. */ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx) { struct desc_struct desc; short sel; sel = get_segment_selector(regs, seg_reg_idx); if (sel < 0) return -1L; if (v8086_mode(regs)) /* * Base is simply the segment selector shifted 4 * bits to the right. */ return (unsigned long)(sel << 4); if (user_64bit_mode(regs)) { /* * Only FS or GS will have a base address, the rest of * the segments' bases are forced to 0. */ unsigned long base; if (seg_reg_idx == INAT_SEG_REG_FS) rdmsrl(MSR_FS_BASE, base); else if (seg_reg_idx == INAT_SEG_REG_GS) /* * swapgs was called at the kernel entry point. Thus, * MSR_KERNEL_GS_BASE will have the user-space GS base. */ rdmsrl(MSR_KERNEL_GS_BASE, base); else base = 0; return base; } /* In protected mode the segment selector cannot be null. */ if (!sel) return -1L; if (!get_desc(&desc, sel)) return -1L; return get_desc_base(&desc); } /** * get_seg_limit() - Obtain the limit of a segment descriptor * @regs: Register values as seen when entering kernel mode * @seg_reg_idx: Index of the segment register pointing to seg descriptor * * Obtain the limit of the segment as indicated by the segment descriptor * pointed by the segment selector. The segment selector is obtained from the * input segment register index @seg_reg_idx. * * Returns: * * In protected mode, the limit of the segment descriptor in bytes. * In long mode and virtual-8086 mode, segment limits are not enforced. Thus, * limit is returned as -1L to imply a limit-less segment. * * Zero is returned on error. */ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx) { struct desc_struct desc; unsigned long limit; short sel; sel = get_segment_selector(regs, seg_reg_idx); if (sel < 0) return 0; if (user_64bit_mode(regs) || v8086_mode(regs)) return -1L; if (!sel) return 0; if (!get_desc(&desc, sel)) return 0; /* * If the granularity bit is set, the limit is given in multiples * of 4096. This also means that the 12 least significant bits are * not tested when checking the segment limits. In practice, * this means that the segment ends in (limit << 12) + 0xfff. */ limit = get_desc_limit(&desc); if (desc.g) limit = (limit << 12) + 0xfff; return limit; } /** * insn_get_code_seg_params() - Obtain code segment parameters * @regs: Structure with register values as seen when entering kernel mode * * Obtain address and operand sizes of the code segment. It is obtained from the * selector contained in the CS register in regs. In protected mode, the default * address is determined by inspecting the L and D bits of the segment * descriptor. In virtual-8086 mode, the default is always two bytes for both * address and operand sizes. * * Returns: * * An int containing ORed-in default parameters on success. * * -EINVAL on error. */ int insn_get_code_seg_params(struct pt_regs *regs) { struct desc_struct desc; short sel; if (v8086_mode(regs)) /* Address and operand size are both 16-bit. */ return INSN_CODE_SEG_PARAMS(2, 2); sel = get_segment_selector(regs, INAT_SEG_REG_CS); if (sel < 0) return sel; if (!get_desc(&desc, sel)) return -EINVAL; /* * The most significant byte of the Type field of the segment descriptor * determines whether a segment contains data or code. If this is a data * segment, return error. */ if (!(desc.type & BIT(3))) return -EINVAL; switch ((desc.l << 1) | desc.d) { case 0: /* * Legacy mode. CS.L=0, CS.D=0. Address and operand size are * both 16-bit. */ return INSN_CODE_SEG_PARAMS(2, 2); case 1: /* * Legacy mode. CS.L=0, CS.D=1. Address and operand size are * both 32-bit. */ return INSN_CODE_SEG_PARAMS(4, 4); case 2: /* * IA-32e 64-bit mode. CS.L=1, CS.D=0. Address size is 64-bit; * operand size is 32-bit. */ return INSN_CODE_SEG_PARAMS(4, 8); case 3: /* Invalid setting. CS.L=1, CS.D=1 */ /* fall through */ default: return -EINVAL; } } /** * insn_get_modrm_rm_off() - Obtain register in r/m part of the ModRM byte * @insn: Instruction containing the ModRM byte * @regs: Register values as seen when entering kernel mode * * Returns: * * The register indicated by the r/m part of the ModRM byte. The * register is obtained as an offset from the base of pt_regs. In specific * cases, the returned value can be -EDOM to indicate that the particular value * of ModRM does not refer to a register and shall be ignored. */ int insn_get_modrm_rm_off(struct insn *insn, struct pt_regs *regs) { return get_reg_offset(insn, regs, REG_TYPE_RM); } /** * get_seg_base_limit() - obtain base address and limit of a segment * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Operand offset, in pt_regs, used to resolve segment descriptor * @base: Obtained segment base * @limit: Obtained segment limit * * Obtain the base address and limit of the segment associated with the operand * @regoff and, if any or allowed, override prefixes in @insn. This function is * different from insn_get_seg_base() as the latter does not resolve the segment * associated with the instruction operand. If a limit is not needed (e.g., * when running in long mode), @limit can be NULL. * * Returns: * * 0 on success. @base and @limit will contain the base address and of the * resolved segment, respectively. * * -EINVAL on error. */ static int get_seg_base_limit(struct insn *insn, struct pt_regs *regs, int regoff, unsigned long *base, unsigned long *limit) { int seg_reg_idx; if (!base) return -EINVAL; seg_reg_idx = resolve_seg_reg(insn, regs, regoff); if (seg_reg_idx < 0) return seg_reg_idx; *base = insn_get_seg_base(regs, seg_reg_idx); if (*base == -1L) return -EINVAL; if (!limit) return 0; *limit = get_seg_limit(regs, seg_reg_idx); if (!(*limit)) return -EINVAL; return 0; } /** * get_eff_addr_reg() - Obtain effective address from register operand * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Obtained operand offset, in pt_regs, with the effective address * @eff_addr: Obtained effective address * * Obtain the effective address stored in the register operand as indicated by * the ModRM byte. This function is to be used only with register addressing * (i.e., ModRM.mod is 3). The effective address is saved in @eff_addr. The * register operand, as an offset from the base of pt_regs, is saved in @regoff; * such offset can then be used to resolve the segment associated with the * operand. This function can be used with any of the supported address sizes * in x86. * * Returns: * * 0 on success. @eff_addr will have the effective address stored in the * operand indicated by ModRM. @regoff will have such operand as an offset from * the base of pt_regs. * * -EINVAL on error. */ static int get_eff_addr_reg(struct insn *insn, struct pt_regs *regs, int *regoff, long *eff_addr) { insn_get_modrm(insn); if (!insn->modrm.nbytes) return -EINVAL; if (X86_MODRM_MOD(insn->modrm.value) != 3) return -EINVAL; *regoff = get_reg_offset(insn, regs, REG_TYPE_RM); if (*regoff < 0) return -EINVAL; /* Ignore bytes that are outside the address size. */ if (insn->addr_bytes == 2) *eff_addr = regs_get_register(regs, *regoff) & 0xffff; else if (insn->addr_bytes == 4) *eff_addr = regs_get_register(regs, *regoff) & 0xffffffff; else /* 64-bit address */ *eff_addr = regs_get_register(regs, *regoff); return 0; } /** * get_eff_addr_modrm() - Obtain referenced effective address via ModRM * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Obtained operand offset, in pt_regs, associated with segment * @eff_addr: Obtained effective address * * Obtain the effective address referenced by the ModRM byte of @insn. After * identifying the registers involved in the register-indirect memory reference, * its value is obtained from the operands in @regs. The computed address is * stored @eff_addr. Also, the register operand that indicates the associated * segment is stored in @regoff, this parameter can later be used to determine * such segment. * * Returns: * * 0 on success. @eff_addr will have the referenced effective address. @regoff * will have a register, as an offset from the base of pt_regs, that can be used * to resolve the associated segment. * * -EINVAL on error. */ static int get_eff_addr_modrm(struct insn *insn, struct pt_regs *regs, int *regoff, long *eff_addr) { long tmp; if (insn->addr_bytes != 8 && insn->addr_bytes != 4) return -EINVAL; insn_get_modrm(insn); if (!insn->modrm.nbytes) return -EINVAL; if (X86_MODRM_MOD(insn->modrm.value) > 2) return -EINVAL; *regoff = get_reg_offset(insn, regs, REG_TYPE_RM); /* * -EDOM means that we must ignore the address_offset. In such a case, * in 64-bit mode the effective address relative to the rIP of the * following instruction. */ if (*regoff == -EDOM) { if (user_64bit_mode(regs)) tmp = regs->ip + insn->length; else tmp = 0; } else if (*regoff < 0) { return -EINVAL; } else { tmp = regs_get_register(regs, *regoff); } if (insn->addr_bytes == 4) { int addr32 = (int)(tmp & 0xffffffff) + insn->displacement.value; *eff_addr = addr32 & 0xffffffff; } else { *eff_addr = tmp + insn->displacement.value; } return 0; } /** * get_eff_addr_modrm_16() - Obtain referenced effective address via ModRM * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Obtained operand offset, in pt_regs, associated with segment * @eff_addr: Obtained effective address * * Obtain the 16-bit effective address referenced by the ModRM byte of @insn. * After identifying the registers involved in the register-indirect memory * reference, its value is obtained from the operands in @regs. The computed * address is stored @eff_addr. Also, the register operand that indicates * the associated segment is stored in @regoff, this parameter can later be used * to determine such segment. * * Returns: * * 0 on success. @eff_addr will have the referenced effective address. @regoff * will have a register, as an offset from the base of pt_regs, that can be used * to resolve the associated segment. * * -EINVAL on error. */ static int get_eff_addr_modrm_16(struct insn *insn, struct pt_regs *regs, int *regoff, short *eff_addr) { int addr_offset1, addr_offset2, ret; short addr1 = 0, addr2 = 0, displacement; if (insn->addr_bytes != 2) return -EINVAL; insn_get_modrm(insn); if (!insn->modrm.nbytes) return -EINVAL; if (X86_MODRM_MOD(insn->modrm.value) > 2) return -EINVAL; ret = get_reg_offset_16(insn, regs, &addr_offset1, &addr_offset2); if (ret < 0) return -EINVAL; /* * Don't fail on invalid offset values. They might be invalid because * they cannot be used for this particular value of ModRM. Instead, use * them in the computation only if they contain a valid value. */ if (addr_offset1 != -EDOM) addr1 = regs_get_register(regs, addr_offset1) & 0xffff; if (addr_offset2 != -EDOM) addr2 = regs_get_register(regs, addr_offset2) & 0xffff; displacement = insn->displacement.value & 0xffff; *eff_addr = addr1 + addr2 + displacement; /* * The first operand register could indicate to use of either SS or DS * registers to obtain the segment selector. The second operand * register can only indicate the use of DS. Thus, the first operand * will be used to obtain the segment selector. */ *regoff = addr_offset1; return 0; } /** * get_eff_addr_sib() - Obtain referenced effective address via SIB * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode * @regoff: Obtained operand offset, in pt_regs, associated with segment * @eff_addr: Obtained effective address * * Obtain the effective address referenced by the SIB byte of @insn. After * identifying the registers involved in the indexed, register-indirect memory * reference, its value is obtained from the operands in @regs. The computed * address is stored @eff_addr. Also, the register operand that indicates the * associated segment is stored in @regoff, this parameter can later be used to * determine such segment. * * Returns: * * 0 on success. @eff_addr will have the referenced effective address. * @base_offset will have a register, as an offset from the base of pt_regs, * that can be used to resolve the associated segment. * * -EINVAL on error. */ static int get_eff_addr_sib(struct insn *insn, struct pt_regs *regs, int *base_offset, long *eff_addr) { long base, indx; int indx_offset; if (insn->addr_bytes != 8 && insn->addr_bytes != 4) return -EINVAL; insn_get_modrm(insn); if (!insn->modrm.nbytes) return -EINVAL; if (X86_MODRM_MOD(insn->modrm.value) > 2) return -EINVAL; insn_get_sib(insn); if (!insn->sib.nbytes) return -EINVAL; *base_offset = get_reg_offset(insn, regs, REG_TYPE_BASE); indx_offset = get_reg_offset(insn, regs, REG_TYPE_INDEX); /* * Negative values in the base and index offset means an error when * decoding the SIB byte. Except -EDOM, which means that the registers * should not be used in the address computation. */ if (*base_offset == -EDOM) base = 0; else if (*base_offset < 0) return -EINVAL; else base = regs_get_register(regs, *base_offset); if (indx_offset == -EDOM) indx = 0; else if (indx_offset < 0) return -EINVAL; else indx = regs_get_register(regs, indx_offset); if (insn->addr_bytes == 4) { int addr32, base32, idx32; base32 = base & 0xffffffff; idx32 = indx & 0xffffffff; addr32 = base32 + idx32 * (1 << X86_SIB_SCALE(insn->sib.value)); addr32 += insn->displacement.value; *eff_addr = addr32 & 0xffffffff; } else { *eff_addr = base + indx * (1 << X86_SIB_SCALE(insn->sib.value)); *eff_addr += insn->displacement.value; } return 0; } /** * get_addr_ref_16() - Obtain the 16-bit address referred by instruction * @insn: Instruction containing ModRM byte and displacement * @regs: Register values as seen when entering kernel mode * * This function is to be used with 16-bit address encodings. Obtain the memory * address referred by the instruction's ModRM and displacement bytes. Also, the * segment used as base is determined by either any segment override prefixes in * @insn or the default segment of the registers involved in the address * computation. In protected mode, segment limits are enforced. * * Returns: * * Linear address referenced by the instruction operands on success. * * -1L on error. */ static void __user *get_addr_ref_16(struct insn *insn, struct pt_regs *regs) { unsigned long linear_addr = -1L, seg_base, seg_limit; int ret, regoff; short eff_addr; long tmp; insn_get_modrm(insn); insn_get_displacement(insn); if (insn->addr_bytes != 2) goto out; if (X86_MODRM_MOD(insn->modrm.value) == 3) { ret = get_eff_addr_reg(insn, regs, &regoff, &tmp); if (ret) goto out; eff_addr = tmp; } else { ret = get_eff_addr_modrm_16(insn, regs, &regoff, &eff_addr); if (ret) goto out; } ret = get_seg_base_limit(insn, regs, regoff, &seg_base, &seg_limit); if (ret) goto out; /* * Before computing the linear address, make sure the effective address * is within the limits of the segment. In virtual-8086 mode, segment * limits are not enforced. In such a case, the segment limit is -1L to * reflect this fact. */ if ((unsigned long)(eff_addr & 0xffff) > seg_limit) goto out; linear_addr = (unsigned long)(eff_addr & 0xffff) + seg_base; /* Limit linear address to 20 bits */ if (v8086_mode(regs)) linear_addr &= 0xfffff; out: return (void __user *)linear_addr; } /** * get_addr_ref_32() - Obtain a 32-bit linear address * @insn: Instruction with ModRM, SIB bytes and displacement * @regs: Register values as seen when entering kernel mode * * This function is to be used with 32-bit address encodings to obtain the * linear memory address referred by the instruction's ModRM, SIB, * displacement bytes and segment base address, as applicable. If in protected * mode, segment limits are enforced. * * Returns: * * Linear address referenced by instruction and registers on success. * * -1L on error. */ static void __user *get_addr_ref_32(struct insn *insn, struct pt_regs *regs) { unsigned long linear_addr = -1L, seg_base, seg_limit; int eff_addr, regoff; long tmp; int ret; if (insn->addr_bytes != 4) goto out; if (X86_MODRM_MOD(insn->modrm.value) == 3) { ret = get_eff_addr_reg(insn, regs, &regoff, &tmp); if (ret) goto out; eff_addr = tmp; } else { if (insn->sib.nbytes) { ret = get_eff_addr_sib(insn, regs, &regoff, &tmp); if (ret) goto out; eff_addr = tmp; } else { ret = get_eff_addr_modrm(insn, regs, &regoff, &tmp); if (ret) goto out; eff_addr = tmp; } } ret = get_seg_base_limit(insn, regs, regoff, &seg_base, &seg_limit); if (ret) goto out; /* * In protected mode, before computing the linear address, make sure * the effective address is within the limits of the segment. * 32-bit addresses can be used in long and virtual-8086 modes if an * address override prefix is used. In such cases, segment limits are * not enforced. When in virtual-8086 mode, the segment limit is -1L * to reflect this situation. * * After computed, the effective address is treated as an unsigned * quantity. */ if (!user_64bit_mode(regs) && ((unsigned int)eff_addr > seg_limit)) goto out; /* * Even though 32-bit address encodings are allowed in virtual-8086 * mode, the address range is still limited to [0x-0xffff]. */ if (v8086_mode(regs) && (eff_addr & ~0xffff)) goto out; /* * Data type long could be 64 bits in size. Ensure that our 32-bit * effective address is not sign-extended when computing the linear * address. */ linear_addr = (unsigned long)(eff_addr & 0xffffffff) + seg_base; /* Limit linear address to 20 bits */ if (v8086_mode(regs)) linear_addr &= 0xfffff; out: return (void __user *)linear_addr; } /** * get_addr_ref_64() - Obtain a 64-bit linear address * @insn: Instruction struct with ModRM and SIB bytes and displacement * @regs: Structure with register values as seen when entering kernel mode * * This function is to be used with 64-bit address encodings to obtain the * linear memory address referred by the instruction's ModRM, SIB, * displacement bytes and segment base address, as applicable. * * Returns: * * Linear address referenced by instruction and registers on success. * * -1L on error. */ #ifndef CONFIG_X86_64 static void __user *get_addr_ref_64(struct insn *insn, struct pt_regs *regs) { return (void __user *)-1L; } #else static void __user *get_addr_ref_64(struct insn *insn, struct pt_regs *regs) { unsigned long linear_addr = -1L, seg_base; int regoff, ret; long eff_addr; if (insn->addr_bytes != 8) goto out; if (X86_MODRM_MOD(insn->modrm.value) == 3) { ret = get_eff_addr_reg(insn, regs, &regoff, &eff_addr); if (ret) goto out; } else { if (insn->sib.nbytes) { ret = get_eff_addr_sib(insn, regs, &regoff, &eff_addr); if (ret) goto out; } else { ret = get_eff_addr_modrm(insn, regs, &regoff, &eff_addr); if (ret) goto out; } } ret = get_seg_base_limit(insn, regs, regoff, &seg_base, NULL); if (ret) goto out; linear_addr = (unsigned long)eff_addr + seg_base; out: return (void __user *)linear_addr; } #endif /* CONFIG_X86_64 */ /** * insn_get_addr_ref() - Obtain the linear address referred by instruction * @insn: Instruction structure containing ModRM byte and displacement * @regs: Structure with register values as seen when entering kernel mode * * Obtain the linear address referred by the instruction's ModRM, SIB and * displacement bytes, and segment base, as applicable. In protected mode, * segment limits are enforced. * * Returns: * * Linear address referenced by instruction and registers on success. * * -1L on error. */ void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs) { if (!insn || !regs) return (void __user *)-1L; switch (insn->addr_bytes) { case 2: return get_addr_ref_16(insn, regs); case 4: return get_addr_ref_32(insn, regs); case 8: return get_addr_ref_64(insn, regs); default: return (void __user *)-1L; } }
unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx) { struct desc_struct *desc; short sel; sel = get_segment_selector(regs, seg_reg_idx); if (sel < 0) return -1L; if (v8086_mode(regs)) /* * Base is simply the segment selector shifted 4 * bits to the right. */ return (unsigned long)(sel << 4); if (user_64bit_mode(regs)) { /* * Only FS or GS will have a base address, the rest of * the segments' bases are forced to 0. */ unsigned long base; if (seg_reg_idx == INAT_SEG_REG_FS) rdmsrl(MSR_FS_BASE, base); else if (seg_reg_idx == INAT_SEG_REG_GS) /* * swapgs was called at the kernel entry point. Thus, * MSR_KERNEL_GS_BASE will have the user-space GS base. */ rdmsrl(MSR_KERNEL_GS_BASE, base); else base = 0; return base; } /* In protected mode the segment selector cannot be null. */ if (!sel) return -1L; desc = get_desc(sel); if (!desc) return -1L; return get_desc_base(desc); }
unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx) { struct desc_struct desc; short sel; sel = get_segment_selector(regs, seg_reg_idx); if (sel < 0) return -1L; if (v8086_mode(regs)) /* * Base is simply the segment selector shifted 4 * bits to the right. */ return (unsigned long)(sel << 4); if (user_64bit_mode(regs)) { /* * Only FS or GS will have a base address, the rest of * the segments' bases are forced to 0. */ unsigned long base; if (seg_reg_idx == INAT_SEG_REG_FS) rdmsrl(MSR_FS_BASE, base); else if (seg_reg_idx == INAT_SEG_REG_GS) /* * swapgs was called at the kernel entry point. Thus, * MSR_KERNEL_GS_BASE will have the user-space GS base. */ rdmsrl(MSR_KERNEL_GS_BASE, base); else base = 0; return base; } /* In protected mode the segment selector cannot be null. */ if (!sel) return -1L; if (!get_desc(&desc, sel)) return -1L; return get_desc_base(&desc); }
{'added': [(560, ' * get_desc() - Obtain contents of a segment descriptor'), (561, ' * @out:\tSegment descriptor contents on success'), (569, ' * True on success, false on failure.'), (573, 'static bool get_desc(struct desc_struct *out, unsigned short sel)'), (580, '\t\tbool success = false;'), (588, '\t\tif (ldt && sel < ldt->nr_entries) {'), (589, '\t\t\t*out = ldt->entries[sel];'), (590, '\t\t\tsuccess = true;'), (591, '\t\t}'), (595, '\t\treturn success;'), (610, '\t\treturn false;'), (612, '\t*out = *(struct desc_struct *)(gdt_desc.address + desc_base);'), (613, '\treturn true;'), (635, '\tstruct desc_struct desc;'), (673, '\tif (!get_desc(&desc, sel))'), (676, '\treturn get_desc_base(&desc);'), (698, '\tstruct desc_struct desc;'), (712, '\tif (!get_desc(&desc, sel))'), (721, '\tlimit = get_desc_limit(&desc);'), (722, '\tif (desc.g)'), (746, '\tstruct desc_struct desc;'), (757, '\tif (!get_desc(&desc, sel))'), (765, '\tif (!(desc.type & BIT(3)))'), (768, '\tswitch ((desc.l << 1) | desc.d) {')], 'deleted': [(560, ' * get_desc() - Obtain pointer to a segment descriptor'), (568, ' * Pointer to segment descriptor on success.'), (572, 'static struct desc_struct *get_desc(unsigned short sel)'), (579, '\t\tstruct desc_struct *desc = NULL;'), (587, '\t\tif (ldt && sel < ldt->nr_entries)'), (588, '\t\t\tdesc = &ldt->entries[sel];'), (592, '\t\treturn desc;'), (607, '\t\treturn NULL;'), (609, '\treturn (struct desc_struct *)(gdt_desc.address + desc_base);'), (631, '\tstruct desc_struct *desc;'), (669, '\tdesc = get_desc(sel);'), (670, '\tif (!desc)'), (673, '\treturn get_desc_base(desc);'), (695, '\tstruct desc_struct *desc;'), (709, '\tdesc = get_desc(sel);'), (710, '\tif (!desc)'), (719, '\tlimit = get_desc_limit(desc);'), (720, '\tif (desc->g)'), (744, '\tstruct desc_struct *desc;'), (755, '\tdesc = get_desc(sel);'), (756, '\tif (!desc)'), (764, '\tif (!(desc->type & BIT(3)))'), (767, '\tswitch ((desc->l << 1) | desc->d) {')]}
24
23
634
3,848
https://github.com/torvalds/linux
CVE-2019-13233
['CWE-416', 'CWE-362']
vim9compile.c
generate_loadvar
/* vi:set ts=8 sts=4 sw=4 noet: * * VIM - Vi IMproved by Bram Moolenaar * * Do ":help uganda" in Vim to read copying and usage conditions. * Do ":help credits" in Vim to see a list of people who contributed. * See README.txt for an overview of the Vim source code. */ /* * vim9compile.c: compiling a :def function */ #define USING_FLOAT_STUFF #include "vim.h" #if defined(FEAT_EVAL) || defined(PROTO) // When not generating protos this is included in proto.h #ifdef PROTO # include "vim9.h" #endif // Functions defined with :def are stored in this growarray. // They are never removed, so that they can be found by index. // Deleted functions have the df_deleted flag set. garray_T def_functions = {0, 0, sizeof(dfunc_T), 50, NULL}; static void delete_def_function_contents(dfunc_T *dfunc, int mark_deleted); /* * Lookup variable "name" in the local scope and return it in "lvar". * "lvar->lv_from_outer" is incremented accordingly. * If "lvar" is NULL only check if the variable can be found. * Return FAIL if not found. */ int lookup_local(char_u *name, size_t len, lvar_T *lvar, cctx_T *cctx) { int idx; lvar_T *lvp; if (len == 0) return FAIL; // Find local in current function scope. for (idx = 0; idx < cctx->ctx_locals.ga_len; ++idx) { lvp = ((lvar_T *)cctx->ctx_locals.ga_data) + idx; if (STRNCMP(name, lvp->lv_name, len) == 0 && STRLEN(lvp->lv_name) == len) { if (lvar != NULL) { *lvar = *lvp; lvar->lv_from_outer = 0; } return OK; } } // Find local in outer function scope. if (cctx->ctx_outer != NULL) { if (lookup_local(name, len, lvar, cctx->ctx_outer) == OK) { if (lvar != NULL) { cctx->ctx_outer_used = TRUE; ++lvar->lv_from_outer; } return OK; } } return FAIL; } /* * Lookup an argument in the current function and an enclosing function. * Returns the argument index in "idxp" * Returns the argument type in "type" * Sets "gen_load_outer" to TRUE if found in outer scope. * Returns OK when found, FAIL otherwise. */ int arg_exists( char_u *name, size_t len, int *idxp, type_T **type, int *gen_load_outer, cctx_T *cctx) { int idx; char_u *va_name; if (len == 0) return FAIL; for (idx = 0; idx < cctx->ctx_ufunc->uf_args_visible; ++idx) { char_u *arg = FUNCARG(cctx->ctx_ufunc, idx); if (STRNCMP(name, arg, len) == 0 && arg[len] == NUL) { if (idxp != NULL) { // Arguments are located above the frame pointer. One further // if there is a vararg argument *idxp = idx - (cctx->ctx_ufunc->uf_args.ga_len + STACK_FRAME_SIZE) + (cctx->ctx_ufunc->uf_va_name != NULL ? -1 : 0); if (cctx->ctx_ufunc->uf_arg_types != NULL) *type = cctx->ctx_ufunc->uf_arg_types[idx]; else *type = &t_any; } return OK; } } va_name = cctx->ctx_ufunc->uf_va_name; if (va_name != NULL && STRNCMP(name, va_name, len) == 0 && va_name[len] == NUL) { if (idxp != NULL) { // varargs is always the last argument *idxp = -STACK_FRAME_SIZE - 1; *type = cctx->ctx_ufunc->uf_va_type; } return OK; } if (cctx->ctx_outer != NULL) { // Lookup the name for an argument of the outer function. if (arg_exists(name, len, idxp, type, gen_load_outer, cctx->ctx_outer) == OK) { if (gen_load_outer != NULL) ++*gen_load_outer; return OK; } } return FAIL; } /* * Lookup a script-local variable in the current script, possibly defined in a * block that contains the function "cctx->ctx_ufunc". * "cctx" is NULL at the script level, "cstack" is NULL in a function. * If "len" is <= 0 "name" must be NUL terminated. * Return NULL when not found. */ static sallvar_T * find_script_var(char_u *name, size_t len, cctx_T *cctx, cstack_T *cstack) { scriptitem_T *si = SCRIPT_ITEM(current_sctx.sc_sid); hashitem_T *hi; int cc; sallvar_T *sav; ufunc_T *ufunc; // Find the list of all script variables with the right name. if (len > 0) { cc = name[len]; name[len] = NUL; } hi = hash_find(&si->sn_all_vars.dv_hashtab, name); if (len > 0) name[len] = cc; if (HASHITEM_EMPTY(hi)) return NULL; sav = HI2SAV(hi); if (sav->sav_block_id == 0) // variable defined in the top script scope is always visible return sav; if (cctx == NULL) { // Not in a function scope, find variable with block ID equal to or // smaller than the current block id. Use "cstack" to go up the block // scopes. while (sav != NULL) { int idx; for (idx = cstack->cs_idx; idx >= 0; --idx) if (cstack->cs_block_id[idx] == sav->sav_block_id) break; if (idx >= 0) break; sav = sav->sav_next; } return sav; } // Go over the variables with this name and find one that was visible // from the function. ufunc = cctx->ctx_ufunc; while (sav != NULL) { int idx; // Go over the blocks that this function was defined in. If the // variable block ID matches it was visible to the function. for (idx = 0; idx < ufunc->uf_block_depth; ++idx) if (ufunc->uf_block_ids[idx] == sav->sav_block_id) return sav; sav = sav->sav_next; } // Not found, variable was not visible. return NULL; } /* * Return TRUE if the script context is Vim9 script. */ int script_is_vim9() { return SCRIPT_ITEM(current_sctx.sc_sid)->sn_version == SCRIPT_VERSION_VIM9; } /* * Lookup a variable (without s: prefix) in the current script. * "cctx" is NULL at the script level, "cstack" is NULL in a function. * Returns OK or FAIL. */ int script_var_exists(char_u *name, size_t len, cctx_T *cctx, cstack_T *cstack) { if (current_sctx.sc_sid <= 0) return FAIL; if (script_is_vim9()) { // Check script variables that were visible where the function was // defined. if (find_script_var(name, len, cctx, cstack) != NULL) return OK; } else { hashtab_T *ht = &SCRIPT_VARS(current_sctx.sc_sid); dictitem_T *di; int cc; // Check script variables that are currently visible cc = name[len]; name[len] = NUL; di = find_var_in_ht(ht, 0, name, TRUE); name[len] = cc; if (di != NULL) return OK; } return FAIL; } /* * Return TRUE if "name" is a local variable, argument, script variable or * imported. */ static int variable_exists(char_u *name, size_t len, cctx_T *cctx) { return (cctx != NULL && (lookup_local(name, len, NULL, cctx) == OK || arg_exists(name, len, NULL, NULL, NULL, cctx) == OK)) || script_var_exists(name, len, cctx, NULL) == OK || find_imported(name, len, FALSE) != NULL; } /* * Return TRUE if "name" is a local variable, argument, script variable, * imported or function. Or commands are being skipped, a declaration may have * been skipped then. */ static int item_exists(char_u *name, size_t len, int cmd UNUSED, cctx_T *cctx) { return variable_exists(name, len, cctx); } /* * Check if "p[len]" is already defined, either in script "import_sid" or in * compilation context "cctx". * "cctx" is NULL at the script level, "cstack" is NULL in a function. * Does not check the global namespace. * If "is_arg" is TRUE the error message is for an argument name. * Return FAIL and give an error if it defined. */ int check_defined( char_u *p, size_t len, cctx_T *cctx, cstack_T *cstack, int is_arg) { int c = p[len]; ufunc_T *ufunc = NULL; // underscore argument is OK if (len == 1 && *p == '_') return OK; if (script_var_exists(p, len, cctx, cstack) == OK) { if (is_arg) semsg(_(e_argument_already_declared_in_script_str), p); else semsg(_(e_variable_already_declared_in_script_str), p); return FAIL; } p[len] = NUL; if ((cctx != NULL && (lookup_local(p, len, NULL, cctx) == OK || arg_exists(p, len, NULL, NULL, NULL, cctx) == OK)) || find_imported(p, len, FALSE) != NULL || (ufunc = find_func_even_dead(p, 0)) != NULL) { // A local or script-local function can shadow a global function. if (ufunc == NULL || ((ufunc->uf_flags & FC_DEAD) == 0 && (!func_is_global(ufunc) || (p[0] == 'g' && p[1] == ':')))) { if (is_arg) semsg(_(e_argument_name_shadows_existing_variable_str), p); else semsg(_(e_name_already_defined_str), p); p[len] = c; return FAIL; } } p[len] = c; return OK; } /* * Return TRUE if "actual" could be "expected" and a runtime typecheck is to be * used. Return FALSE if the types will never match. */ static int use_typecheck(type_T *actual, type_T *expected) { if (actual->tt_type == VAR_ANY || actual->tt_type == VAR_UNKNOWN || (actual->tt_type == VAR_FUNC && (expected->tt_type == VAR_FUNC || expected->tt_type == VAR_PARTIAL) && (actual->tt_member == &t_any || actual->tt_member == &t_unknown || actual->tt_argcount < 0) && (actual->tt_member == &t_unknown || (actual->tt_member == &t_void) == (expected->tt_member == &t_void)))) return TRUE; if ((actual->tt_type == VAR_LIST || actual->tt_type == VAR_DICT) && actual->tt_type == expected->tt_type) // This takes care of a nested list or dict. return use_typecheck(actual->tt_member, expected->tt_member); return FALSE; } /* * Check that * - "actual" matches "expected" type or * - "actual" is a type that can be "expected" type: add a runtime check; or * - return FAIL. * If "actual_is_const" is TRUE then the type won't change at runtime, do not * generate a TYPECHECK. */ int need_type_where( type_T *actual, type_T *expected, int offset, where_T where, cctx_T *cctx, int silent, int actual_is_const) { int ret; if (expected == &t_bool && actual != &t_bool && (actual->tt_flags & TTFLAG_BOOL_OK)) { // Using "0", "1" or the result of an expression with "&&" or "||" as a // boolean is OK but requires a conversion. generate_2BOOL(cctx, FALSE, offset); return OK; } ret = check_type_maybe(expected, actual, FALSE, where); if (ret == OK) return OK; // If actual a constant a runtime check makes no sense. If it's // null_function it is OK. if (actual_is_const && ret == MAYBE && actual == &t_func_unknown) return OK; // If the actual type can be the expected type add a runtime check. if (!actual_is_const && ret == MAYBE && use_typecheck(actual, expected)) { generate_TYPECHECK(cctx, expected, offset, where.wt_variable, where.wt_index); return OK; } if (!silent) type_mismatch_where(expected, actual, where); return FAIL; } int need_type( type_T *actual, type_T *expected, int offset, int arg_idx, cctx_T *cctx, int silent, int actual_is_const) { where_T where = WHERE_INIT; where.wt_index = arg_idx; return need_type_where(actual, expected, offset, where, cctx, silent, actual_is_const); } /* * Reserve space for a local variable. * Return the variable or NULL if it failed. */ lvar_T * reserve_local( cctx_T *cctx, char_u *name, size_t len, int isConst, type_T *type) { lvar_T *lvar; dfunc_T *dfunc; if (arg_exists(name, len, NULL, NULL, NULL, cctx) == OK) { emsg_namelen(_(e_str_is_used_as_argument), name, (int)len); return NULL; } if (GA_GROW_FAILS(&cctx->ctx_locals, 1)) return NULL; lvar = ((lvar_T *)cctx->ctx_locals.ga_data) + cctx->ctx_locals.ga_len++; CLEAR_POINTER(lvar); // Every local variable uses the next entry on the stack. We could re-use // the last ones when leaving a scope, but then variables used in a closure // might get overwritten. To keep things simple do not re-use stack // entries. This is less efficient, but memory is cheap these days. dfunc = ((dfunc_T *)def_functions.ga_data) + cctx->ctx_ufunc->uf_dfunc_idx; lvar->lv_idx = dfunc->df_var_names.ga_len; lvar->lv_name = vim_strnsave(name, len == 0 ? STRLEN(name) : len); lvar->lv_const = isConst; lvar->lv_type = type; // Remember the name for debugging. if (GA_GROW_FAILS(&dfunc->df_var_names, 1)) return NULL; ((char_u **)dfunc->df_var_names.ga_data)[lvar->lv_idx] = vim_strsave(lvar->lv_name); ++dfunc->df_var_names.ga_len; return lvar; } /* * If "check_writable" is ASSIGN_CONST give an error if the variable was * defined with :final or :const, if "check_writable" is ASSIGN_FINAL give an * error if the variable was defined with :const. */ static int check_item_writable(svar_T *sv, int check_writable, char_u *name) { if ((check_writable == ASSIGN_CONST && sv->sv_const != 0) || (check_writable == ASSIGN_FINAL && sv->sv_const == ASSIGN_CONST)) { semsg(_(e_cannot_change_readonly_variable_str), name); return FAIL; } return OK; } /* * Find "name" in script-local items of script "sid". * Pass "check_writable" to check_item_writable(). * "cctx" is NULL at the script level, "cstack" is NULL in a function. * Returns the index in "sn_var_vals" if found. * If found but not in "sn_var_vals" returns -1. * If not found or the variable is not writable returns -2. */ int get_script_item_idx( int sid, char_u *name, int check_writable, cctx_T *cctx, cstack_T *cstack) { hashtab_T *ht; dictitem_T *di; scriptitem_T *si = SCRIPT_ITEM(sid); svar_T *sv; int idx; if (!SCRIPT_ID_VALID(sid)) return -1; if (sid == current_sctx.sc_sid) { sallvar_T *sav = find_script_var(name, 0, cctx, cstack); if (sav == NULL) return -2; idx = sav->sav_var_vals_idx; sv = ((svar_T *)si->sn_var_vals.ga_data) + idx; if (check_item_writable(sv, check_writable, name) == FAIL) return -2; return idx; } // First look the name up in the hashtable. ht = &SCRIPT_VARS(sid); di = find_var_in_ht(ht, 0, name, TRUE); if (di == NULL) { if (si->sn_autoload_prefix != NULL) { hashitem_T *hi; // A variable exported from an autoload script is in the global // variables, we can find it in the all_vars table. hi = hash_find(&si->sn_all_vars.dv_hashtab, name); if (!HASHITEM_EMPTY(hi)) return HI2SAV(hi)->sav_var_vals_idx; } return -2; } // Now find the svar_T index in sn_var_vals. for (idx = 0; idx < si->sn_var_vals.ga_len; ++idx) { sv = ((svar_T *)si->sn_var_vals.ga_data) + idx; if (sv->sv_tv == &di->di_tv) { if (check_item_writable(sv, check_writable, name) == FAIL) return -2; return idx; } } return -1; } static imported_T * find_imported_in_script(char_u *name, size_t len, int sid) { scriptitem_T *si; int idx; if (!SCRIPT_ID_VALID(sid)) return NULL; si = SCRIPT_ITEM(sid); for (idx = 0; idx < si->sn_imports.ga_len; ++idx) { imported_T *import = ((imported_T *)si->sn_imports.ga_data) + idx; if (len == 0 ? STRCMP(name, import->imp_name) == 0 : STRLEN(import->imp_name) == len && STRNCMP(name, import->imp_name, len) == 0) return import; } return NULL; } /* * Find "name" in imported items of the current script. * If "len" is 0 use any length that works. * If "load" is TRUE and the script was not loaded yet, load it now. */ imported_T * find_imported(char_u *name, size_t len, int load) { imported_T *ret; if (!SCRIPT_ID_VALID(current_sctx.sc_sid)) return NULL; ret = find_imported_in_script(name, len, current_sctx.sc_sid); if (ret != NULL && load && (ret->imp_flags & IMP_FLAGS_AUTOLOAD)) { scid_T dummy; int save_emsg_off = emsg_off; // "emsg_off" will be set when evaluating an expression silently, but // we do want to know about errors in a script. Also because it then // aborts when an error is encountered. emsg_off = FALSE; // script found before but not loaded yet ret->imp_flags &= ~IMP_FLAGS_AUTOLOAD; (void)do_source(SCRIPT_ITEM(ret->imp_sid)->sn_name, FALSE, DOSO_NONE, &dummy); emsg_off = save_emsg_off; } return ret; } /* * Called when checking for a following operator at "arg". When the rest of * the line is empty or only a comment, peek the next line. If there is a next * line return a pointer to it and set "nextp". * Otherwise skip over white space. */ char_u * may_peek_next_line(cctx_T *cctx, char_u *arg, char_u **nextp) { char_u *p = skipwhite(arg); *nextp = NULL; if (*p == NUL || (VIM_ISWHITE(*arg) && vim9_comment_start(p))) { *nextp = peek_next_line_from_context(cctx); if (*nextp != NULL) return *nextp; } return p; } /* * Return a pointer to the next line that isn't empty or only contains a * comment. Skips over white space. * Returns NULL if there is none. */ char_u * peek_next_line_from_context(cctx_T *cctx) { int lnum = cctx->ctx_lnum; while (++lnum < cctx->ctx_ufunc->uf_lines.ga_len) { char_u *line = ((char_u **)cctx->ctx_ufunc->uf_lines.ga_data)[lnum]; char_u *p; // ignore NULLs inserted for continuation lines if (line != NULL) { p = skipwhite(line); if (vim9_bad_comment(p)) return NULL; if (*p != NUL && !vim9_comment_start(p)) return p; } } return NULL; } /* * Get the next line of the function from "cctx". * Skips over empty lines. Skips over comment lines if "skip_comment" is TRUE. * Returns NULL when at the end. */ char_u * next_line_from_context(cctx_T *cctx, int skip_comment) { char_u *line; do { ++cctx->ctx_lnum; if (cctx->ctx_lnum >= cctx->ctx_ufunc->uf_lines.ga_len) { line = NULL; break; } line = ((char_u **)cctx->ctx_ufunc->uf_lines.ga_data)[cctx->ctx_lnum]; cctx->ctx_line_start = line; SOURCING_LNUM = cctx->ctx_lnum + 1; } while (line == NULL || *skipwhite(line) == NUL || (skip_comment && vim9_comment_start(skipwhite(line)))); return line; } /* * Skip over white space at "whitep" and assign to "*arg". * If "*arg" is at the end of the line, advance to the next line. * Also when "whitep" points to white space and "*arg" is on a "#". * Return FAIL if beyond the last line, "*arg" is unmodified then. */ int may_get_next_line(char_u *whitep, char_u **arg, cctx_T *cctx) { *arg = skipwhite(whitep); if (vim9_bad_comment(*arg)) return FAIL; if (**arg == NUL || (VIM_ISWHITE(*whitep) && vim9_comment_start(*arg))) { char_u *next = next_line_from_context(cctx, TRUE); if (next == NULL) return FAIL; *arg = skipwhite(next); } return OK; } /* * Idem, and give an error when failed. */ int may_get_next_line_error(char_u *whitep, char_u **arg, cctx_T *cctx) { if (may_get_next_line(whitep, arg, cctx) == FAIL) { SOURCING_LNUM = cctx->ctx_lnum + 1; emsg(_(e_line_incomplete)); return FAIL; } return OK; } /* * Get a line from the compilation context, compatible with exarg_T getline(). * Return a pointer to the line in allocated memory. * Return NULL for end-of-file or some error. */ static char_u * exarg_getline( int c UNUSED, void *cookie, int indent UNUSED, getline_opt_T options UNUSED) { cctx_T *cctx = (cctx_T *)cookie; char_u *p; for (;;) { if (cctx->ctx_lnum >= cctx->ctx_ufunc->uf_lines.ga_len - 1) return NULL; ++cctx->ctx_lnum; p = ((char_u **)cctx->ctx_ufunc->uf_lines.ga_data)[cctx->ctx_lnum]; // Comment lines result in NULL pointers, skip them. if (p != NULL) return vim_strsave(p); } } void fill_exarg_from_cctx(exarg_T *eap, cctx_T *cctx) { eap->getline = exarg_getline; eap->cookie = cctx; eap->skip = cctx->ctx_skip == SKIP_YES; } /* * Return TRUE if "ufunc" should be compiled, taking into account whether * "profile" indicates profiling is to be done. */ int func_needs_compiling(ufunc_T *ufunc, compiletype_T compile_type) { switch (ufunc->uf_def_status) { case UF_TO_BE_COMPILED: return TRUE; case UF_COMPILED: { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; switch (compile_type) { case CT_PROFILE: #ifdef FEAT_PROFILE return dfunc->df_instr_prof == NULL; #endif case CT_NONE: return dfunc->df_instr == NULL; case CT_DEBUG: return dfunc->df_instr_debug == NULL; } } case UF_NOT_COMPILED: case UF_COMPILE_ERROR: case UF_COMPILING: break; } return FALSE; } /* * Compile a nested :def command. */ static char_u * compile_nested_function(exarg_T *eap, cctx_T *cctx, garray_T *lines_to_free) { int is_global = *eap->arg == 'g' && eap->arg[1] == ':'; char_u *name_start = eap->arg; char_u *name_end = to_name_end(eap->arg, TRUE); int off; char_u *func_name; char_u *lambda_name; ufunc_T *ufunc; int r = FAIL; compiletype_T compile_type; isn_T *funcref_isn = NULL; lvar_T *lvar = NULL; if (eap->forceit) { emsg(_(e_cannot_use_bang_with_nested_def)); return NULL; } if (*name_start == '/') { name_end = skip_regexp(name_start + 1, '/', TRUE); if (*name_end == '/') ++name_end; set_nextcmd(eap, name_end); } if (name_end == name_start || *skipwhite(name_end) != '(') { if (!ends_excmd2(name_start, name_end)) { if (*skipwhite(name_end) == '.') semsg(_(e_cannot_define_dict_func_in_vim9_script_str), eap->cmd); else semsg(_(e_invalid_command_str), eap->cmd); return NULL; } // "def" or "def Name": list functions if (generate_DEF(cctx, name_start, name_end - name_start) == FAIL) return NULL; return eap->nextcmd == NULL ? (char_u *)"" : eap->nextcmd; } // Only g:Func() can use a namespace. if (name_start[1] == ':' && !is_global) { semsg(_(e_namespace_not_supported_str), name_start); return NULL; } if (cctx->ctx_skip != SKIP_YES && check_defined(name_start, name_end - name_start, cctx, NULL, FALSE) == FAIL) return NULL; if (!ASCII_ISUPPER(is_global ? name_start[2] : name_start[0])) { semsg(_(e_function_name_must_start_with_capital_str), name_start); return NULL; } eap->arg = name_end; fill_exarg_from_cctx(eap, cctx); eap->forceit = FALSE; // We use the special <Lamba>99 name, but it's not really a lambda. lambda_name = vim_strsave(get_lambda_name()); if (lambda_name == NULL) return NULL; // This may free the current line, make a copy of the name. off = is_global ? 2 : 0; func_name = vim_strnsave(name_start + off, name_end - name_start - off); if (func_name == NULL) { r = FAIL; goto theend; } ufunc = define_function(eap, lambda_name, lines_to_free); if (ufunc == NULL) { r = eap->skip ? OK : FAIL; goto theend; } if (eap->nextcmd != NULL) { semsg(_(e_text_found_after_str_str), eap->cmdidx == CMD_def ? "enddef" : "endfunction", eap->nextcmd); r = FAIL; func_ptr_unref(ufunc); goto theend; } // copy over the block scope IDs before compiling if (!is_global && cctx->ctx_ufunc->uf_block_depth > 0) { int block_depth = cctx->ctx_ufunc->uf_block_depth; ufunc->uf_block_ids = ALLOC_MULT(int, block_depth); if (ufunc->uf_block_ids != NULL) { mch_memmove(ufunc->uf_block_ids, cctx->ctx_ufunc->uf_block_ids, sizeof(int) * block_depth); ufunc->uf_block_depth = block_depth; } } // Define the funcref before compiling, so that it is found by any // recursive call. if (is_global) { r = generate_NEWFUNC(cctx, lambda_name, func_name); func_name = NULL; lambda_name = NULL; } else { // Define a local variable for the function reference. lvar = reserve_local(cctx, func_name, name_end - name_start, TRUE, ufunc->uf_func_type); if (lvar == NULL) goto theend; if (generate_FUNCREF(cctx, ufunc, &funcref_isn) == FAIL) goto theend; r = generate_STORE(cctx, ISN_STORE, lvar->lv_idx, NULL); } compile_type = get_compile_type(ufunc); #ifdef FEAT_PROFILE // If the outer function is profiled, also compile the nested function for // profiling. if (cctx->ctx_compile_type == CT_PROFILE) compile_type = CT_PROFILE; #endif if (func_needs_compiling(ufunc, compile_type) && compile_def_function(ufunc, TRUE, compile_type, cctx) == FAIL) { func_ptr_unref(ufunc); if (lvar != NULL) // Now the local variable can't be used. *lvar->lv_name = '/'; // impossible value goto theend; } #ifdef FEAT_PROFILE // When the outer function is compiled for profiling, the nested function // may be called without profiling. Compile it here in the right context. if (compile_type == CT_PROFILE && func_needs_compiling(ufunc, CT_NONE)) compile_def_function(ufunc, FALSE, CT_NONE, cctx); #endif // If a FUNCREF instruction was generated, set the index after compiling. if (funcref_isn != NULL && ufunc->uf_def_status == UF_COMPILED) funcref_isn->isn_arg.funcref.fr_dfunc_idx = ufunc->uf_dfunc_idx; theend: vim_free(lambda_name); vim_free(func_name); return r == FAIL ? NULL : (char_u *)""; } /* * Compile one Vim expression {expr} in string "p". * "p" points to the opening "{". * Return a pointer to the character after "}", NULL for an error. */ char_u * compile_one_expr_in_str(char_u *p, cctx_T *cctx) { char_u *block_start; char_u *block_end; // Skip the opening {. block_start = skipwhite(p + 1); block_end = block_start; if (*block_start != NUL && skip_expr(&block_end, NULL) == FAIL) return NULL; block_end = skipwhite(block_end); // The block must be closed by a }. if (*block_end != '}') { semsg(_(e_missing_close_curly_str), p); return NULL; } if (compile_expr0(&block_start, cctx) == FAIL) return NULL; may_generate_2STRING(-1, TRUE, cctx); return block_end + 1; } /* * Compile a string "str" (either containing a literal string or a mix of * literal strings and Vim expressions of the form `{expr}`). This is used * when compiling a heredoc assignment to a variable or an interpolated string * in a Vim9 def function. Vim9 instructions are generated to push strings, * evaluate expressions, concatenate them and create a list of lines. When * "evalstr" is TRUE, Vim expressions in "str" are evaluated. */ int compile_all_expr_in_str(char_u *str, int evalstr, cctx_T *cctx) { char_u *p = str; char_u *val; int count = 0; if (cctx->ctx_skip == SKIP_YES) return OK; if (!evalstr || *str == NUL) { // Literal string, possibly empty. val = *str != NUL ? vim_strsave(str) : NULL; return generate_PUSHS(cctx, &val); } // Push all the string pieces to the stack, followed by a ISN_CONCAT. while (*p != NUL) { char_u *lit_start; int escaped_brace = FALSE; // Look for a block start. lit_start = p; while (*p != '{' && *p != '}' && *p != NUL) ++p; if (*p != NUL && *p == p[1]) { // Escaped brace, unescape and continue. // Include the brace in the literal string. ++p; escaped_brace = TRUE; } else if (*p == '}') { semsg(_(e_stray_closing_curly_str), str); return FAIL; } // Append the literal part. if (p != lit_start) { val = vim_strnsave(lit_start, (size_t)(p - lit_start)); if (generate_PUSHS(cctx, &val) == FAIL) return FAIL; ++count; } if (*p == NUL) break; if (escaped_brace) { // Skip the second brace. ++p; continue; } p = compile_one_expr_in_str(p, cctx); if (p == NULL) return FAIL; ++count; } // Small optimization, if there's only a single piece skip the ISN_CONCAT. if (count > 1) return generate_CONCAT(cctx, count); return OK; } /* * Return the length of an assignment operator, or zero if there isn't one. */ int assignment_len(char_u *p, int *heredoc) { if (*p == '=') { if (p[1] == '<' && p[2] == '<') { *heredoc = TRUE; return 3; } return 1; } if (vim_strchr((char_u *)"+-*/%", *p) != NULL && p[1] == '=') return 2; if (STRNCMP(p, "..=", 3) == 0) return 3; return 0; } /* * Generate the load instruction for "name". */ static void generate_loadvar( cctx_T *cctx, assign_dest_T dest, char_u *name, lvar_T *lvar, type_T *type) { switch (dest) { case dest_option: case dest_func_option: generate_LOAD(cctx, ISN_LOADOPT, 0, name, type); break; case dest_global: if (vim_strchr(name, AUTOLOAD_CHAR) == NULL) { if (name[2] == NUL) generate_instr_type(cctx, ISN_LOADGDICT, &t_dict_any); else generate_LOAD(cctx, ISN_LOADG, 0, name + 2, type); } else generate_LOAD(cctx, ISN_LOADAUTO, 0, name, type); break; case dest_buffer: generate_LOAD(cctx, ISN_LOADB, 0, name + 2, type); break; case dest_window: generate_LOAD(cctx, ISN_LOADW, 0, name + 2, type); break; case dest_tab: generate_LOAD(cctx, ISN_LOADT, 0, name + 2, type); break; case dest_script: compile_load_scriptvar(cctx, name + (name[1] == ':' ? 2 : 0), NULL, NULL); break; case dest_env: // Include $ in the name here generate_LOAD(cctx, ISN_LOADENV, 0, name, type); break; case dest_reg: generate_LOAD(cctx, ISN_LOADREG, name[1], NULL, &t_string); break; case dest_vimvar: generate_LOADV(cctx, name + 2); break; case dest_local: if (lvar->lv_from_outer > 0) generate_LOADOUTER(cctx, lvar->lv_idx, lvar->lv_from_outer, type); else generate_LOAD(cctx, ISN_LOAD, lvar->lv_idx, NULL, type); break; case dest_expr: // list or dict value should already be on the stack. break; } } /* * Skip over "[expr]" or ".member". * Does not check for any errors. */ static char_u * skip_index(char_u *start) { char_u *p = start; if (*p == '[') { p = skipwhite(p + 1); (void)skip_expr(&p, NULL); p = skipwhite(p); if (*p == ']') return p + 1; return p; } // if (*p == '.') return to_name_end(p + 1, TRUE); } void vim9_declare_error(char_u *name) { char *scope = ""; switch (*name) { case 'g': scope = _("global"); break; case 'b': scope = _("buffer"); break; case 'w': scope = _("window"); break; case 't': scope = _("tab"); break; case 'v': scope = "v:"; break; case '$': semsg(_(e_cannot_declare_an_environment_variable), name); return; case '&': semsg(_(e_cannot_declare_an_option), name); return; case '@': semsg(_(e_cannot_declare_a_register_str), name); return; default: return; } semsg(_(e_cannot_declare_a_scope_variable), scope, name); } /* * For one assignment figure out the type of destination. Return it in "dest". * When not recognized "dest" is not set. * For an option "option_scope" is set. * For a v:var "vimvaridx" is set. * "type" is set to the destination type if known, unchanted otherwise. * Return FAIL if an error message was given. */ int get_var_dest( char_u *name, assign_dest_T *dest, cmdidx_T cmdidx, int *option_scope, int *vimvaridx, type_T **type, cctx_T *cctx) { char_u *p; if (*name == '&') { int cc; long numval; getoption_T opt_type; int opt_p_flags; *dest = dest_option; if (cmdidx == CMD_final || cmdidx == CMD_const) { emsg(_(e_cannot_lock_option)); return FAIL; } p = name; p = find_option_end(&p, option_scope); if (p == NULL) { // cannot happen? emsg(_(e_unexpected_characters_in_assignment)); return FAIL; } cc = *p; *p = NUL; opt_type = get_option_value(skip_option_env_lead(name), &numval, NULL, &opt_p_flags, *option_scope); *p = cc; switch (opt_type) { case gov_unknown: semsg(_(e_unknown_option_str), name); return FAIL; case gov_string: case gov_hidden_string: if (opt_p_flags & P_FUNC) { // might be a Funcref, check the type later *type = &t_any; *dest = dest_func_option; } else { *type = &t_string; } break; case gov_bool: case gov_hidden_bool: *type = &t_bool; break; case gov_number: case gov_hidden_number: *type = &t_number; break; } } else if (*name == '$') { *dest = dest_env; *type = &t_string; } else if (*name == '@') { if (name[1] != '@' && (!valid_yank_reg(name[1], FALSE) || name[1] == '.')) { emsg_invreg(name[1]); return FAIL; } *dest = dest_reg; *type = name[1] == '#' ? &t_number_or_string : &t_string; } else if (STRNCMP(name, "g:", 2) == 0) { *dest = dest_global; } else if (STRNCMP(name, "b:", 2) == 0) { *dest = dest_buffer; } else if (STRNCMP(name, "w:", 2) == 0) { *dest = dest_window; } else if (STRNCMP(name, "t:", 2) == 0) { *dest = dest_tab; } else if (STRNCMP(name, "v:", 2) == 0) { typval_T *vtv; int di_flags; *vimvaridx = find_vim_var(name + 2, &di_flags); if (*vimvaridx < 0) { semsg(_(e_variable_not_found_str), name); return FAIL; } // We use the current value of "sandbox" here, is that OK? if (var_check_ro(di_flags, name, FALSE)) return FAIL; *dest = dest_vimvar; vtv = get_vim_var_tv(*vimvaridx); *type = typval2type_vimvar(vtv, cctx->ctx_type_list); } return OK; } static int is_decl_command(cmdidx_T cmdidx) { return cmdidx == CMD_let || cmdidx == CMD_var || cmdidx == CMD_final || cmdidx == CMD_const; } /* * Figure out the LHS type and other properties for an assignment or one item * of ":unlet" with an index. * Returns OK or FAIL. */ int compile_lhs( char_u *var_start, lhs_T *lhs, cmdidx_T cmdidx, int heredoc, int has_cmd, // "var" before "var_start" int oplen, cctx_T *cctx) { char_u *var_end; int is_decl = is_decl_command(cmdidx); CLEAR_POINTER(lhs); lhs->lhs_dest = dest_local; lhs->lhs_vimvaridx = -1; lhs->lhs_scriptvar_idx = -1; // "dest_end" is the end of the destination, including "[expr]" or // ".name". // "var_end" is the end of the variable/option/etc. name. lhs->lhs_dest_end = skip_var_one(var_start, FALSE); if (*var_start == '@') var_end = var_start + 2; else { // skip over the leading "&", "&l:", "&g:" and "$" var_end = skip_option_env_lead(var_start); var_end = to_name_end(var_end, TRUE); } // "a: type" is declaring variable "a" with a type, not dict "a:". if (is_decl && lhs->lhs_dest_end == var_start + 2 && lhs->lhs_dest_end[-1] == ':') --lhs->lhs_dest_end; if (is_decl && var_end == var_start + 2 && var_end[-1] == ':') --var_end; lhs->lhs_end = lhs->lhs_dest_end; // compute the length of the destination without "[expr]" or ".name" lhs->lhs_varlen = var_end - var_start; lhs->lhs_varlen_total = lhs->lhs_varlen; lhs->lhs_name = vim_strnsave(var_start, lhs->lhs_varlen); if (lhs->lhs_name == NULL) return FAIL; if (lhs->lhs_dest_end > var_start + lhs->lhs_varlen) // Something follows after the variable: "var[idx]" or "var.key". lhs->lhs_has_index = TRUE; if (heredoc) lhs->lhs_type = &t_list_string; else lhs->lhs_type = &t_any; if (cctx->ctx_skip != SKIP_YES) { int declare_error = FALSE; if (get_var_dest(lhs->lhs_name, &lhs->lhs_dest, cmdidx, &lhs->lhs_opt_flags, &lhs->lhs_vimvaridx, &lhs->lhs_type, cctx) == FAIL) return FAIL; if (lhs->lhs_dest != dest_local && cmdidx != CMD_const && cmdidx != CMD_final) { // Specific kind of variable recognized. declare_error = is_decl; } else { // No specific kind of variable recognized, just a name. if (check_reserved_name(lhs->lhs_name) == FAIL) return FAIL; if (lookup_local(var_start, lhs->lhs_varlen, &lhs->lhs_local_lvar, cctx) == OK) lhs->lhs_lvar = &lhs->lhs_local_lvar; else { CLEAR_FIELD(lhs->lhs_arg_lvar); if (arg_exists(var_start, lhs->lhs_varlen, &lhs->lhs_arg_lvar.lv_idx, &lhs->lhs_arg_lvar.lv_type, &lhs->lhs_arg_lvar.lv_from_outer, cctx) == OK) { if (is_decl) { semsg(_(e_str_is_used_as_argument), lhs->lhs_name); return FAIL; } lhs->lhs_lvar = &lhs->lhs_arg_lvar; } } if (lhs->lhs_lvar != NULL) { if (is_decl) { semsg(_(e_variable_already_declared), lhs->lhs_name); return FAIL; } } else { int script_namespace = lhs->lhs_varlen > 1 && STRNCMP(var_start, "s:", 2) == 0; int script_var = (script_namespace ? script_var_exists(var_start + 2, lhs->lhs_varlen - 2, cctx, NULL) : script_var_exists(var_start, lhs->lhs_varlen, cctx, NULL)) == OK; imported_T *import = find_imported(var_start, lhs->lhs_varlen, FALSE); if (script_namespace || script_var || import != NULL) { char_u *rawname = lhs->lhs_name + (lhs->lhs_name[1] == ':' ? 2 : 0); if (script_namespace && current_script_is_vim9()) { semsg(_(e_cannot_use_s_colon_in_vim9_script_str), var_start); return FAIL; } if (is_decl) { if (script_namespace) semsg(_(e_cannot_declare_script_variable_in_function_str), lhs->lhs_name); else semsg(_(e_variable_already_declared_in_script_str), lhs->lhs_name); return FAIL; } else if (cctx->ctx_ufunc->uf_script_ctx_version == SCRIPT_VERSION_VIM9 && script_namespace && !script_var && import == NULL) { semsg(_(e_unknown_variable_str), lhs->lhs_name); return FAIL; } lhs->lhs_dest = dest_script; // existing script-local variables should have a type lhs->lhs_scriptvar_sid = current_sctx.sc_sid; if (import != NULL) { char_u *dot = vim_strchr(var_start, '.'); char_u *p; // for an import the name is what comes after the dot if (dot == NULL) { semsg(_(e_no_dot_after_imported_name_str), var_start); return FAIL; } p = skipwhite(dot + 1); var_end = to_name_end(p, TRUE); if (var_end == p) { semsg(_(e_missing_name_after_imported_name_str), var_start); return FAIL; } vim_free(lhs->lhs_name); lhs->lhs_varlen = var_end - p; lhs->lhs_name = vim_strnsave(p, lhs->lhs_varlen); if (lhs->lhs_name == NULL) return FAIL; rawname = lhs->lhs_name; lhs->lhs_scriptvar_sid = import->imp_sid; // TODO: where do we check this name is exported? // Check if something follows: "exp.var[idx]" or // "exp.var.key". lhs->lhs_has_index = lhs->lhs_dest_end > skipwhite(var_end); } if (SCRIPT_ID_VALID(lhs->lhs_scriptvar_sid)) { // Check writable only when no index follows. lhs->lhs_scriptvar_idx = get_script_item_idx( lhs->lhs_scriptvar_sid, rawname, lhs->lhs_has_index ? ASSIGN_FINAL : ASSIGN_CONST, cctx, NULL); if (lhs->lhs_scriptvar_idx >= 0) { scriptitem_T *si = SCRIPT_ITEM( lhs->lhs_scriptvar_sid); svar_T *sv = ((svar_T *)si->sn_var_vals.ga_data) + lhs->lhs_scriptvar_idx; lhs->lhs_type = sv->sv_type; } } } else if (check_defined(var_start, lhs->lhs_varlen, cctx, NULL, FALSE) == FAIL) return FAIL; } } if (declare_error) { vim9_declare_error(lhs->lhs_name); return FAIL; } } // handle "a:name" as a name, not index "name" in "a" if (lhs->lhs_varlen > 1 || var_start[lhs->lhs_varlen] != ':') var_end = lhs->lhs_dest_end; if (lhs->lhs_dest != dest_option && lhs->lhs_dest != dest_func_option) { if (is_decl && *var_end == ':') { char_u *p; // parse optional type: "let var: type = expr" if (!VIM_ISWHITE(var_end[1])) { semsg(_(e_white_space_required_after_str_str), ":", var_end); return FAIL; } p = skipwhite(var_end + 1); lhs->lhs_type = parse_type(&p, cctx->ctx_type_list, TRUE); if (lhs->lhs_type == NULL) return FAIL; lhs->lhs_has_type = TRUE; lhs->lhs_end = p; } else if (lhs->lhs_lvar != NULL) lhs->lhs_type = lhs->lhs_lvar->lv_type; } if (oplen == 3 && !heredoc && lhs->lhs_dest != dest_global && !lhs->lhs_has_index && lhs->lhs_type->tt_type != VAR_STRING && lhs->lhs_type->tt_type != VAR_ANY) { emsg(_(e_can_only_concatenate_to_string)); return FAIL; } if (lhs->lhs_lvar == NULL && lhs->lhs_dest == dest_local && cctx->ctx_skip != SKIP_YES) { if (oplen > 1 && !heredoc) { // +=, /=, etc. require an existing variable semsg(_(e_cannot_use_operator_on_new_variable), lhs->lhs_name); return FAIL; } if (!is_decl || (lhs->lhs_has_index && !has_cmd && cctx->ctx_skip != SKIP_YES)) { semsg(_(e_unknown_variable_str), lhs->lhs_name); return FAIL; } // Check the name is valid for a funcref. if ((lhs->lhs_type->tt_type == VAR_FUNC || lhs->lhs_type->tt_type == VAR_PARTIAL) && var_wrong_func_name(lhs->lhs_name, TRUE)) return FAIL; // New local variable. lhs->lhs_lvar = reserve_local(cctx, var_start, lhs->lhs_varlen, cmdidx == CMD_final || cmdidx == CMD_const, lhs->lhs_type); if (lhs->lhs_lvar == NULL) return FAIL; lhs->lhs_new_local = TRUE; } lhs->lhs_member_type = lhs->lhs_type; if (lhs->lhs_has_index) { char_u *after = var_start + lhs->lhs_varlen; char_u *p; // Something follows after the variable: "var[idx]" or "var.key". if (is_decl && cctx->ctx_skip != SKIP_YES) { if (has_cmd) emsg(_(e_cannot_use_index_when_declaring_variable)); else semsg(_(e_unknown_variable_str), lhs->lhs_name); return FAIL; } // Now: var_start[lhs->lhs_varlen] is '[' or '.' // Only the last index is used below, if there are others // before it generate code for the expression. Thus for // "ll[1][2]" the expression is "ll[1]" and "[2]" is the index. for (;;) { p = skip_index(after); if (*p != '[' && *p != '.') { lhs->lhs_varlen_total = p - var_start; break; } after = p; } if (after > var_start + lhs->lhs_varlen) { lhs->lhs_varlen = after - var_start; lhs->lhs_dest = dest_expr; // We don't know the type before evaluating the expression, // use "any" until then. lhs->lhs_type = &t_any; } if (lhs->lhs_type->tt_member == NULL) lhs->lhs_member_type = &t_any; else lhs->lhs_member_type = lhs->lhs_type->tt_member; } return OK; } /* * Figure out the LHS and check a few errors. */ int compile_assign_lhs( char_u *var_start, lhs_T *lhs, cmdidx_T cmdidx, int is_decl, int heredoc, int has_cmd, // "var" before "var_start" int oplen, cctx_T *cctx) { if (compile_lhs(var_start, lhs, cmdidx, heredoc, has_cmd, oplen, cctx) == FAIL) return FAIL; if (!lhs->lhs_has_index && lhs->lhs_lvar == &lhs->lhs_arg_lvar) { semsg(_(e_cannot_assign_to_argument), lhs->lhs_name); return FAIL; } if (!is_decl && lhs->lhs_lvar != NULL && lhs->lhs_lvar->lv_const && !lhs->lhs_has_index) { semsg(_(e_cannot_assign_to_constant), lhs->lhs_name); return FAIL; } return OK; } /* * Return TRUE if "lhs" has a range index: "[expr : expr]". */ static int has_list_index(char_u *idx_start, cctx_T *cctx) { char_u *p = idx_start; int save_skip; if (*p != '[') return FALSE; p = skipwhite(p + 1); if (*p == ':') return TRUE; save_skip = cctx->ctx_skip; cctx->ctx_skip = SKIP_YES; (void)compile_expr0(&p, cctx); cctx->ctx_skip = save_skip; return *skipwhite(p) == ':'; } /* * For an assignment with an index, compile the "idx" in "var[idx]" or "key" in * "var.key". */ static int compile_assign_index( char_u *var_start, lhs_T *lhs, int *range, cctx_T *cctx) { size_t varlen = lhs->lhs_varlen; char_u *p; int r = OK; int need_white_before = TRUE; int empty_second; p = var_start + varlen; if (*p == '[') { p = skipwhite(p + 1); if (*p == ':') { // empty first index, push zero r = generate_PUSHNR(cctx, 0); need_white_before = FALSE; } else r = compile_expr0(&p, cctx); if (r == OK && *skipwhite(p) == ':') { // unlet var[idx : idx] // blob[idx : idx] = value *range = TRUE; p = skipwhite(p); empty_second = *skipwhite(p + 1) == ']'; if ((need_white_before && !IS_WHITE_OR_NUL(p[-1])) || (!empty_second && !IS_WHITE_OR_NUL(p[1]))) { semsg(_(e_white_space_required_before_and_after_str_at_str), ":", p); return FAIL; } p = skipwhite(p + 1); if (*p == ']') // empty second index, push "none" r = generate_PUSHSPEC(cctx, VVAL_NONE); else r = compile_expr0(&p, cctx); } if (r == OK && *skipwhite(p) != ']') { // this should not happen emsg(_(e_missing_closing_square_brace)); r = FAIL; } } else // if (*p == '.') { char_u *key_end = to_name_end(p + 1, TRUE); char_u *key = vim_strnsave(p + 1, key_end - p - 1); r = generate_PUSHS(cctx, &key); } return r; } /* * For a LHS with an index, load the variable to be indexed. */ static int compile_load_lhs( lhs_T *lhs, char_u *var_start, type_T *rhs_type, cctx_T *cctx) { if (lhs->lhs_dest == dest_expr) { size_t varlen = lhs->lhs_varlen; int c = var_start[varlen]; int lines_len = cctx->ctx_ufunc->uf_lines.ga_len; char_u *p = var_start; int res; // Evaluate "ll[expr]" of "ll[expr][idx]". End the line with a NUL and // limit the lines array length to avoid skipping to a following line. var_start[varlen] = NUL; cctx->ctx_ufunc->uf_lines.ga_len = cctx->ctx_lnum + 1; res = compile_expr0(&p, cctx); var_start[varlen] = c; cctx->ctx_ufunc->uf_lines.ga_len = lines_len; if (res == FAIL || p != var_start + varlen) { // this should not happen if (res != FAIL) emsg(_(e_missing_closing_square_brace)); return FAIL; } lhs->lhs_type = cctx->ctx_type_stack.ga_len == 0 ? &t_void : get_type_on_stack(cctx, 0); // now we can properly check the type if (rhs_type != NULL && lhs->lhs_type->tt_member != NULL && rhs_type != &t_void && need_type(rhs_type, lhs->lhs_type->tt_member, -2, 0, cctx, FALSE, FALSE) == FAIL) return FAIL; } else generate_loadvar(cctx, lhs->lhs_dest, lhs->lhs_name, lhs->lhs_lvar, lhs->lhs_type); return OK; } /* * Produce code for loading "lhs" and also take care of an index. * Return OK/FAIL. */ int compile_load_lhs_with_index(lhs_T *lhs, char_u *var_start, cctx_T *cctx) { compile_load_lhs(lhs, var_start, NULL, cctx); if (lhs->lhs_has_index) { int range = FALSE; // Get member from list or dict. First compile the // index value. if (compile_assign_index(var_start, lhs, &range, cctx) == FAIL) return FAIL; if (range) { semsg(_(e_cannot_use_range_with_assignment_operator_str), var_start); return FAIL; } // Get the member. if (compile_member(FALSE, NULL, cctx) == FAIL) return FAIL; } return OK; } /* * Assignment to a list or dict member, or ":unlet" for the item, using the * information in "lhs". * Returns OK or FAIL. */ int compile_assign_unlet( char_u *var_start, lhs_T *lhs, int is_assign, type_T *rhs_type, cctx_T *cctx) { vartype_T dest_type; int range = FALSE; if (compile_assign_index(var_start, lhs, &range, cctx) == FAIL) return FAIL; if (is_assign && range && lhs->lhs_type->tt_type != VAR_LIST && lhs->lhs_type != &t_blob && lhs->lhs_type != &t_any) { semsg(_(e_cannot_use_range_with_assignment_str), var_start); return FAIL; } if (lhs->lhs_type == &t_any) { // Index on variable of unknown type: check at runtime. dest_type = VAR_ANY; } else { dest_type = lhs->lhs_type->tt_type; if (dest_type == VAR_DICT && range) { emsg(e_cannot_use_range_with_dictionary); return FAIL; } if (dest_type == VAR_DICT && may_generate_2STRING(-1, FALSE, cctx) == FAIL) return FAIL; if (dest_type == VAR_LIST || dest_type == VAR_BLOB) { type_T *type; if (range) { type = get_type_on_stack(cctx, 1); if (need_type(type, &t_number, -2, 0, cctx, FALSE, FALSE) == FAIL) return FAIL; } type = get_type_on_stack(cctx, 0); if ((dest_type != VAR_BLOB && type->tt_type != VAR_SPECIAL) && need_type(type, &t_number, -1, 0, cctx, FALSE, FALSE) == FAIL) return FAIL; } } // Load the dict or list. On the stack we then have: // - value (for assignment, not for :unlet) // - index // - for [a : b] second index // - variable if (compile_load_lhs(lhs, var_start, rhs_type, cctx) == FAIL) return FAIL; if (dest_type == VAR_LIST || dest_type == VAR_DICT || dest_type == VAR_BLOB || dest_type == VAR_ANY) { if (is_assign) { if (range) { if (generate_instr_drop(cctx, ISN_STORERANGE, 4) == NULL) return FAIL; } else { isn_T *isn = generate_instr_drop(cctx, ISN_STOREINDEX, 3); if (isn == NULL) return FAIL; isn->isn_arg.vartype = dest_type; } } else if (range) { if (generate_instr_drop(cctx, ISN_UNLETRANGE, 3) == NULL) return FAIL; } else { if (generate_instr_drop(cctx, ISN_UNLETINDEX, 2) == NULL) return FAIL; } } else { emsg(_(e_indexable_type_required)); return FAIL; } return OK; } /* * Compile declaration and assignment: * "let name" * "var name = expr" * "final name = expr" * "const name = expr" * "name = expr" * "arg" points to "name". * "++arg" and "--arg" * Return NULL for an error. * Return "arg" if it does not look like a variable list. */ static char_u * compile_assignment(char_u *arg, exarg_T *eap, cmdidx_T cmdidx, cctx_T *cctx) { char_u *var_start; char_u *p; char_u *end = arg; char_u *ret = NULL; int var_count = 0; int var_idx; int semicolon = 0; int did_generate_slice = FALSE; garray_T *instr = &cctx->ctx_instr; char_u *op; int oplen = 0; int heredoc = FALSE; int incdec = FALSE; type_T *rhs_type = &t_any; char_u *sp; int is_decl = is_decl_command(cmdidx); lhs_T lhs; long start_lnum = SOURCING_LNUM; // Skip over the "varname" or "[varname, varname]" to get to any "=". p = skip_var_list(arg, TRUE, &var_count, &semicolon, TRUE); if (p == NULL) return *arg == '[' ? arg : NULL; lhs.lhs_name = NULL; if (eap->cmdidx == CMD_increment || eap->cmdidx == CMD_decrement) { if (VIM_ISWHITE(eap->cmd[2])) { semsg(_(e_no_white_space_allowed_after_str_str), eap->cmdidx == CMD_increment ? "++" : "--", eap->cmd); return NULL; } op = (char_u *)(eap->cmdidx == CMD_increment ? "+=" : "-="); oplen = 2; incdec = TRUE; } else { sp = p; p = skipwhite(p); op = p; oplen = assignment_len(p, &heredoc); if (var_count > 0 && oplen == 0) // can be something like "[1, 2]->func()" return arg; if (oplen > 0 && (!VIM_ISWHITE(*sp) || !IS_WHITE_OR_NUL(op[oplen]))) { error_white_both(op, oplen); return NULL; } } if (heredoc) { list_T *l; // [let] varname =<< [trim] {end} eap->getline = exarg_getline; eap->cookie = cctx; l = heredoc_get(eap, op + 3, FALSE, TRUE); if (l == NULL) return NULL; list_free(l); p += STRLEN(p); end = p; } else if (var_count > 0) { char_u *wp; // for "[var, var] = expr" evaluate the expression here, loop over the // list of variables below. // A line break may follow the "=". wp = op + oplen; if (may_get_next_line_error(wp, &p, cctx) == FAIL) return FAIL; if (compile_expr0(&p, cctx) == FAIL) return NULL; end = p; if (cctx->ctx_skip != SKIP_YES) { type_T *stacktype; int needed_list_len; int did_check = FALSE; stacktype = cctx->ctx_type_stack.ga_len == 0 ? &t_void : get_type_on_stack(cctx, 0); if (stacktype->tt_type == VAR_VOID) { emsg(_(e_cannot_use_void_value)); goto theend; } if (need_type(stacktype, &t_list_any, -1, 0, cctx, FALSE, FALSE) == FAIL) goto theend; // If a constant list was used we can check the length right here. needed_list_len = semicolon ? var_count - 1 : var_count; if (instr->ga_len > 0) { isn_T *isn = ((isn_T *)instr->ga_data) + instr->ga_len - 1; if (isn->isn_type == ISN_NEWLIST) { did_check = TRUE; if (semicolon ? isn->isn_arg.number < needed_list_len : isn->isn_arg.number != needed_list_len) { semsg(_(e_expected_nr_items_but_got_nr), needed_list_len, (int)isn->isn_arg.number); goto theend; } } } if (!did_check) generate_CHECKLEN(cctx, needed_list_len, semicolon); if (stacktype->tt_member != NULL) rhs_type = stacktype->tt_member; } } /* * Loop over variables in "[var, var] = expr". * For "var = expr" and "let var: type" this is done only once. */ if (var_count > 0) var_start = skipwhite(arg + 1); // skip over the "[" else var_start = arg; for (var_idx = 0; var_idx == 0 || var_idx < var_count; var_idx++) { int instr_count = -1; int save_lnum; int skip_store = FALSE; type_T *inferred_type = NULL; if (var_start[0] == '_' && !eval_isnamec(var_start[1])) { // Ignore underscore in "[a, _, b] = list". if (var_count > 0) { var_start = skipwhite(var_start + 2); continue; } emsg(_(e_cannot_use_underscore_here)); goto theend; } vim_free(lhs.lhs_name); /* * Figure out the LHS type and other properties. */ if (compile_assign_lhs(var_start, &lhs, cmdidx, is_decl, heredoc, var_start > eap->cmd, oplen, cctx) == FAIL) goto theend; if (heredoc) { SOURCING_LNUM = start_lnum; if (lhs.lhs_has_type && need_type(&t_list_string, lhs.lhs_type, -1, 0, cctx, FALSE, FALSE) == FAIL) goto theend; } else { if (cctx->ctx_skip == SKIP_YES) { if (oplen > 0 && var_count == 0) { // skip over the "=" and the expression p = skipwhite(op + oplen); (void)compile_expr0(&p, cctx); } } else if (oplen > 0) { int is_const = FALSE; char_u *wp; // for "+=", "*=", "..=" etc. first load the current value if (*op != '=' && compile_load_lhs_with_index(&lhs, var_start, cctx) == FAIL) goto theend; // For "var = expr" evaluate the expression. if (var_count == 0) { int r; // Compile the expression. instr_count = instr->ga_len; if (incdec) { r = generate_PUSHNR(cctx, 1); } else { // Temporarily hide the new local variable here, it is // not available to this expression. if (lhs.lhs_new_local) --cctx->ctx_locals.ga_len; wp = op + oplen; if (may_get_next_line_error(wp, &p, cctx) == FAIL) { if (lhs.lhs_new_local) ++cctx->ctx_locals.ga_len; goto theend; } r = compile_expr0_ext(&p, cctx, &is_const); if (lhs.lhs_new_local) ++cctx->ctx_locals.ga_len; if (r == FAIL) goto theend; } } else if (semicolon && var_idx == var_count - 1) { // For "[var; var] = expr" get the rest of the list did_generate_slice = TRUE; if (generate_SLICE(cctx, var_count - 1) == FAIL) goto theend; } else { // For "[var, var] = expr" get the "var_idx" item from the // list. if (generate_GETITEM(cctx, var_idx, *op != '=') == FAIL) goto theend; } rhs_type = cctx->ctx_type_stack.ga_len == 0 ? &t_void : get_type_on_stack(cctx, 0); if (lhs.lhs_lvar != NULL && (is_decl || !lhs.lhs_has_type)) { if ((rhs_type->tt_type == VAR_FUNC || rhs_type->tt_type == VAR_PARTIAL) && !lhs.lhs_has_index && var_wrong_func_name(lhs.lhs_name, TRUE)) goto theend; if (lhs.lhs_new_local && !lhs.lhs_has_type) { if (rhs_type->tt_type == VAR_VOID) { emsg(_(e_cannot_use_void_value)); goto theend; } else { // An empty list or dict has a &t_unknown member, // for a variable that implies &t_any. if (rhs_type == &t_list_empty) lhs.lhs_lvar->lv_type = &t_list_any; else if (rhs_type == &t_dict_empty) lhs.lhs_lvar->lv_type = &t_dict_any; else if (rhs_type == &t_unknown) lhs.lhs_lvar->lv_type = &t_any; else { lhs.lhs_lvar->lv_type = rhs_type; inferred_type = rhs_type; } } } else if (*op == '=') { type_T *use_type = lhs.lhs_lvar->lv_type; where_T where = WHERE_INIT; // Without operator check type here, otherwise below. // Use the line number of the assignment. SOURCING_LNUM = start_lnum; where.wt_index = var_count > 0 ? var_idx + 1 : 0; where.wt_variable = var_count > 0; // If assigning to a list or dict member, use the // member type. Not for "list[:] =". if (lhs.lhs_has_index && !has_list_index(var_start + lhs.lhs_varlen, cctx)) use_type = lhs.lhs_member_type; if (need_type_where(rhs_type, use_type, -1, where, cctx, FALSE, is_const) == FAIL) goto theend; } } else { type_T *lhs_type = lhs.lhs_member_type; // Special case: assigning to @# can use a number or a // string. // Also: can assign a number to a float. if ((lhs_type == &t_number_or_string || lhs_type == &t_float) && rhs_type->tt_type == VAR_NUMBER) lhs_type = &t_number; if (*p != '=' && need_type(rhs_type, lhs_type, -1, 0, cctx, FALSE, FALSE) == FAIL) goto theend; } } else if (cmdidx == CMD_final) { emsg(_(e_final_requires_a_value)); goto theend; } else if (cmdidx == CMD_const) { emsg(_(e_const_requires_a_value)); goto theend; } else if (!lhs.lhs_has_type || lhs.lhs_dest == dest_option || lhs.lhs_dest == dest_func_option) { emsg(_(e_type_or_initialization_required)); goto theend; } else { int r = OK; // variables are always initialized if (GA_GROW_FAILS(instr, 1)) goto theend; switch (lhs.lhs_member_type->tt_type) { case VAR_BOOL: r = generate_PUSHBOOL(cctx, VVAL_FALSE); break; case VAR_FLOAT: #ifdef FEAT_FLOAT r = generate_PUSHF(cctx, 0.0); #endif break; case VAR_STRING: r = generate_PUSHS(cctx, NULL); break; case VAR_BLOB: r = generate_PUSHBLOB(cctx, blob_alloc()); break; case VAR_FUNC: r = generate_PUSHFUNC(cctx, NULL, &t_func_void); break; case VAR_LIST: r = generate_NEWLIST(cctx, 0, FALSE); break; case VAR_DICT: r = generate_NEWDICT(cctx, 0, FALSE); break; case VAR_JOB: r = generate_PUSHJOB(cctx); break; case VAR_CHANNEL: r = generate_PUSHCHANNEL(cctx); break; case VAR_NUMBER: case VAR_UNKNOWN: case VAR_ANY: case VAR_PARTIAL: case VAR_VOID: case VAR_INSTR: case VAR_SPECIAL: // cannot happen // This is skipped for local variables, they are always // initialized to zero. But in a "for" or "while" loop // the value may have been changed. if (lhs.lhs_dest == dest_local && !inside_loop_scope(cctx)) skip_store = TRUE; else { instr_count = instr->ga_len; r = generate_PUSHNR(cctx, 0); } break; } if (r == FAIL) goto theend; } if (var_count == 0) end = p; } // no need to parse more when skipping if (cctx->ctx_skip == SKIP_YES) break; if (oplen > 0 && *op != '=') { type_T *expected; type_T *stacktype = NULL; if (*op == '.') { if (may_generate_2STRING(-1, FALSE, cctx) == FAIL) goto theend; } else { expected = lhs.lhs_member_type; stacktype = get_type_on_stack(cctx, 0); if ( #ifdef FEAT_FLOAT // If variable is float operation with number is OK. !(expected == &t_float && (stacktype == &t_number || stacktype == &t_number_bool)) && #endif need_type(stacktype, expected, -1, 0, cctx, FALSE, FALSE) == FAIL) goto theend; } if (*op == '.') { if (generate_CONCAT(cctx, 2) == FAIL) goto theend; } else if (*op == '+') { if (generate_add_instr(cctx, operator_type(lhs.lhs_member_type, stacktype), lhs.lhs_member_type, stacktype, EXPR_APPEND) == FAIL) goto theend; } else if (generate_two_op(cctx, op) == FAIL) goto theend; } // Use the line number of the assignment for store instruction. save_lnum = cctx->ctx_lnum; cctx->ctx_lnum = start_lnum - 1; if (lhs.lhs_has_index) { // Use the info in "lhs" to store the value at the index in the // list or dict. if (compile_assign_unlet(var_start, &lhs, TRUE, rhs_type, cctx) == FAIL) { cctx->ctx_lnum = save_lnum; goto theend; } } else { if (is_decl && cmdidx == CMD_const && (lhs.lhs_dest == dest_script || lhs.lhs_dest == dest_global || lhs.lhs_dest == dest_local)) // ":const var": lock the value, but not referenced variables generate_LOCKCONST(cctx); if ((lhs.lhs_type->tt_type == VAR_DICT || lhs.lhs_type->tt_type == VAR_LIST) && lhs.lhs_type->tt_member != NULL && lhs.lhs_type->tt_member != &t_any && lhs.lhs_type->tt_member != &t_unknown) // Set the type in the list or dict, so that it can be checked, // also in legacy script. generate_SETTYPE(cctx, lhs.lhs_type); else if (inferred_type != NULL && (inferred_type->tt_type == VAR_DICT || inferred_type->tt_type == VAR_LIST) && inferred_type->tt_member != NULL && inferred_type->tt_member != &t_unknown && inferred_type->tt_member != &t_any) // Set the type in the list or dict, so that it can be checked, // also in legacy script. generate_SETTYPE(cctx, inferred_type); if (!skip_store && generate_store_lhs(cctx, &lhs, instr_count, is_decl) == FAIL) { cctx->ctx_lnum = save_lnum; goto theend; } } cctx->ctx_lnum = save_lnum; if (var_idx + 1 < var_count) var_start = skipwhite(lhs.lhs_end + 1); } // For "[var, var] = expr" drop the "expr" value. // Also for "[var, var; _] = expr". if (var_count > 0 && (!semicolon || !did_generate_slice)) { if (generate_instr_drop(cctx, ISN_DROP, 1) == NULL) goto theend; } ret = skipwhite(end); theend: vim_free(lhs.lhs_name); return ret; } /* * Check for an assignment at "eap->cmd", compile it if found. * Return NOTDONE if there is none, FAIL for failure, OK if done. */ static int may_compile_assignment(exarg_T *eap, char_u **line, cctx_T *cctx) { char_u *pskip; char_u *p; // Assuming the command starts with a variable or function name, // find what follows. // Skip over "var.member", "var[idx]" and the like. // Also "&opt = val", "$ENV = val" and "@r = val". pskip = (*eap->cmd == '&' || *eap->cmd == '$' || *eap->cmd == '@') ? eap->cmd + 1 : eap->cmd; p = to_name_end(pskip, TRUE); if (p > eap->cmd && *p != NUL) { char_u *var_end; int oplen; int heredoc; if (eap->cmd[0] == '@') var_end = eap->cmd + 2; else var_end = find_name_end(pskip, NULL, NULL, FNE_CHECK_START | FNE_INCL_BR); oplen = assignment_len(skipwhite(var_end), &heredoc); if (oplen > 0) { size_t len = p - eap->cmd; // Recognize an assignment if we recognize the variable // name: // "&opt = expr" // "$ENV = expr" // "@r = expr" // "g:var = expr" // "g:[key] = expr" // "local = expr" where "local" is a local var. // "script = expr" where "script" is a script-local var. // "import = expr" where "import" is an imported var if (*eap->cmd == '&' || *eap->cmd == '$' || *eap->cmd == '@' || ((len) > 2 && eap->cmd[1] == ':') || STRNCMP(eap->cmd, "g:[", 3) == 0 || variable_exists(eap->cmd, len, cctx)) { *line = compile_assignment(eap->cmd, eap, CMD_SIZE, cctx); if (*line == NULL || *line == eap->cmd) return FAIL; return OK; } } } if (*eap->cmd == '[') { // might be "[var, var] = expr" *line = compile_assignment(eap->cmd, eap, CMD_SIZE, cctx); if (*line == NULL) return FAIL; if (*line != eap->cmd) return OK; } return NOTDONE; } /* * Check if arguments of "ufunc" shadow variables in "cctx". * Return OK or FAIL. */ static int check_args_shadowing(ufunc_T *ufunc, cctx_T *cctx) { int i; char_u *arg; int r = OK; // Make sure arguments are not found when compiling a second time. ufunc->uf_args_visible = 0; // Check for arguments shadowing variables from the context. for (i = 0; i < ufunc->uf_args.ga_len; ++i) { arg = ((char_u **)(ufunc->uf_args.ga_data))[i]; if (check_defined(arg, STRLEN(arg), cctx, NULL, TRUE) == FAIL) { r = FAIL; break; } } ufunc->uf_args_visible = ufunc->uf_args.ga_len; return r; } /* * Get the compilation type that should be used for "ufunc". * Keep in sync with INSTRUCTIONS(). */ compiletype_T get_compile_type(ufunc_T *ufunc) { // Update uf_has_breakpoint if needed. update_has_breakpoint(ufunc); if (debug_break_level > 0 || may_break_in_function(ufunc)) return CT_DEBUG; #ifdef FEAT_PROFILE if (do_profiling == PROF_YES) { if (!ufunc->uf_profiling && has_profiling(FALSE, ufunc->uf_name, NULL)) func_do_profile(ufunc); if (ufunc->uf_profiling) return CT_PROFILE; } #endif return CT_NONE; } /* * Add a function to the list of :def functions. * This sets "ufunc->uf_dfunc_idx" but the function isn't compiled yet. */ static int add_def_function(ufunc_T *ufunc) { dfunc_T *dfunc; if (def_functions.ga_len == 0) { // The first position is not used, so that a zero uf_dfunc_idx means it // wasn't set. if (GA_GROW_FAILS(&def_functions, 1)) return FAIL; ++def_functions.ga_len; } // Add the function to "def_functions". if (GA_GROW_FAILS(&def_functions, 1)) return FAIL; dfunc = ((dfunc_T *)def_functions.ga_data) + def_functions.ga_len; CLEAR_POINTER(dfunc); dfunc->df_idx = def_functions.ga_len; ufunc->uf_dfunc_idx = dfunc->df_idx; dfunc->df_ufunc = ufunc; dfunc->df_name = vim_strsave(ufunc->uf_name); ga_init2(&dfunc->df_var_names, sizeof(char_u *), 10); ++dfunc->df_refcount; ++def_functions.ga_len; return OK; } /* * After ex_function() has collected all the function lines: parse and compile * the lines into instructions. * Adds the function to "def_functions". * When "check_return_type" is set then set ufunc->uf_ret_type to the type of * the return statement (used for lambda). When uf_ret_type is already set * then check that it matches. * When "profiling" is true add ISN_PROF_START instructions. * "outer_cctx" is set for a nested function. * This can be used recursively through compile_lambda(), which may reallocate * "def_functions". * Returns OK or FAIL. */ int compile_def_function( ufunc_T *ufunc, int check_return_type, compiletype_T compile_type, cctx_T *outer_cctx) { char_u *line = NULL; garray_T lines_to_free; char_u *p; char *errormsg = NULL; // error message cctx_T cctx; garray_T *instr; int did_emsg_before = did_emsg; int did_emsg_silent_before = did_emsg_silent; int ret = FAIL; sctx_T save_current_sctx = current_sctx; int save_estack_compiling = estack_compiling; int save_cmod_flags = cmdmod.cmod_flags; int do_estack_push; int new_def_function = FALSE; #ifdef FEAT_PROFILE int prof_lnum = -1; #endif int debug_lnum = -1; // allocated lines are freed at the end ga_init2(&lines_to_free, sizeof(char_u *), 50); // When using a function that was compiled before: Free old instructions. // The index is reused. Otherwise add a new entry in "def_functions". if (ufunc->uf_dfunc_idx > 0) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; isn_T *instr_dest = NULL; switch (compile_type) { case CT_PROFILE: #ifdef FEAT_PROFILE instr_dest = dfunc->df_instr_prof; break; #endif case CT_NONE: instr_dest = dfunc->df_instr; break; case CT_DEBUG: instr_dest = dfunc->df_instr_debug; break; } if (instr_dest != NULL) // Was compiled in this mode before: Free old instructions. delete_def_function_contents(dfunc, FALSE); ga_clear_strings(&dfunc->df_var_names); } else { if (add_def_function(ufunc) == FAIL) return FAIL; new_def_function = TRUE; } if ((ufunc->uf_flags & FC_CLOSURE) && outer_cctx == NULL) { semsg(_(e_compiling_closure_without_context_str), printable_func_name(ufunc)); return FAIL; } ufunc->uf_def_status = UF_COMPILING; CLEAR_FIELD(cctx); cctx.ctx_compile_type = compile_type; cctx.ctx_ufunc = ufunc; cctx.ctx_lnum = -1; cctx.ctx_outer = outer_cctx; ga_init2(&cctx.ctx_locals, sizeof(lvar_T), 10); // Each entry on the type stack consists of two type pointers. ga_init2(&cctx.ctx_type_stack, sizeof(type2_T), 50); cctx.ctx_type_list = &ufunc->uf_type_list; ga_init2(&cctx.ctx_instr, sizeof(isn_T), 50); instr = &cctx.ctx_instr; // Set the context to the function, it may be compiled when called from // another script. Set the script version to the most modern one. // The line number will be set in next_line_from_context(). current_sctx = ufunc->uf_script_ctx; current_sctx.sc_version = SCRIPT_VERSION_VIM9; // Don't use the flag from ":legacy" here. cmdmod.cmod_flags &= ~CMOD_LEGACY; // Make sure error messages are OK. do_estack_push = !estack_top_is_ufunc(ufunc, 1); if (do_estack_push) estack_push_ufunc(ufunc, 1); estack_compiling = TRUE; if (check_args_shadowing(ufunc, &cctx) == FAIL) goto erret; if (ufunc->uf_def_args.ga_len > 0) { int count = ufunc->uf_def_args.ga_len; int first_def_arg = ufunc->uf_args.ga_len - count; int i; char_u *arg; int off = STACK_FRAME_SIZE + (ufunc->uf_va_name != NULL ? 1 : 0); int did_set_arg_type = FALSE; // Produce instructions for the default values of optional arguments. SOURCING_LNUM = 0; // line number unknown for (i = 0; i < count; ++i) { type_T *val_type; int arg_idx = first_def_arg + i; where_T where = WHERE_INIT; int r; int jump_instr_idx = instr->ga_len; isn_T *isn; // Use a JUMP_IF_ARG_SET instruction to skip if the value was given. if (generate_JUMP_IF_ARG_SET(&cctx, i - count - off) == FAIL) goto erret; // Make sure later arguments are not found. ufunc->uf_args_visible = arg_idx; arg = ((char_u **)(ufunc->uf_def_args.ga_data))[i]; r = compile_expr0(&arg, &cctx); if (r == FAIL) goto erret; // If no type specified use the type of the default value. // Otherwise check that the default value type matches the // specified type. val_type = get_type_on_stack(&cctx, 0); where.wt_index = arg_idx + 1; if (ufunc->uf_arg_types[arg_idx] == &t_unknown) { did_set_arg_type = TRUE; ufunc->uf_arg_types[arg_idx] = val_type; } else if (need_type_where(val_type, ufunc->uf_arg_types[arg_idx], -1, where, &cctx, FALSE, FALSE) == FAIL) goto erret; if (generate_STORE(&cctx, ISN_STORE, i - count - off, NULL) == FAIL) goto erret; // set instruction index in JUMP_IF_ARG_SET to here isn = ((isn_T *)instr->ga_data) + jump_instr_idx; isn->isn_arg.jumparg.jump_where = instr->ga_len; } if (did_set_arg_type) set_function_type(ufunc); } ufunc->uf_args_visible = ufunc->uf_args.ga_len; /* * Loop over all the lines of the function and generate instructions. */ for (;;) { exarg_T ea; int starts_with_colon = FALSE; char_u *cmd; cmdmod_T local_cmdmod; // Bail out on the first error to avoid a flood of errors and report // the right line number when inside try/catch. if (did_emsg_before != did_emsg) goto erret; if (line != NULL && *line == '|') // the line continues after a '|' ++line; else if (line != NULL && *skipwhite(line) != NUL && !(*line == '#' && (line == cctx.ctx_line_start || VIM_ISWHITE(line[-1])))) { semsg(_(e_trailing_characters_str), line); goto erret; } else if (line != NULL && vim9_bad_comment(skipwhite(line))) goto erret; else { line = next_line_from_context(&cctx, FALSE); if (cctx.ctx_lnum >= ufunc->uf_lines.ga_len) { // beyond the last line #ifdef FEAT_PROFILE if (cctx.ctx_skip != SKIP_YES) may_generate_prof_end(&cctx, prof_lnum); #endif break; } // Make a copy, splitting off nextcmd and removing trailing spaces // may change it. if (line != NULL) { line = vim_strsave(line); if (ga_add_string(&lines_to_free, line) == FAIL) goto erret; } } CLEAR_FIELD(ea); ea.cmdlinep = &line; ea.cmd = skipwhite(line); ea.skip = cctx.ctx_skip == SKIP_YES; if (*ea.cmd == '#') { // "#" starts a comment, but "#{" is an error if (vim9_bad_comment(ea.cmd)) goto erret; line = (char_u *)""; continue; } #ifdef FEAT_PROFILE if (cctx.ctx_compile_type == CT_PROFILE && cctx.ctx_lnum != prof_lnum && cctx.ctx_skip != SKIP_YES) { may_generate_prof_end(&cctx, prof_lnum); prof_lnum = cctx.ctx_lnum; generate_instr(&cctx, ISN_PROF_START); } #endif if (cctx.ctx_compile_type == CT_DEBUG && cctx.ctx_lnum != debug_lnum && cctx.ctx_skip != SKIP_YES) { debug_lnum = cctx.ctx_lnum; generate_instr_debug(&cctx); } cctx.ctx_prev_lnum = cctx.ctx_lnum + 1; // Some things can be recognized by the first character. switch (*ea.cmd) { case '}': { // "}" ends a block scope scopetype_T stype = cctx.ctx_scope == NULL ? NO_SCOPE : cctx.ctx_scope->se_type; if (stype == BLOCK_SCOPE) { compile_endblock(&cctx); line = ea.cmd; } else { emsg(_(e_using_rcurly_outside_if_block_scope)); goto erret; } if (line != NULL) line = skipwhite(ea.cmd + 1); continue; } case '{': // "{" starts a block scope // "{'a': 1}->func() is something else if (ends_excmd(*skipwhite(ea.cmd + 1))) { line = compile_block(ea.cmd, &cctx); continue; } break; } /* * COMMAND MODIFIERS */ cctx.ctx_has_cmdmod = FALSE; if (parse_command_modifiers(&ea, &errormsg, &local_cmdmod, FALSE) == FAIL) goto erret; generate_cmdmods(&cctx, &local_cmdmod); undo_cmdmod(&local_cmdmod); // Check if there was a colon after the last command modifier or before // the current position. for (p = ea.cmd; p >= line; --p) { if (*p == ':') starts_with_colon = TRUE; if (p < ea.cmd && !VIM_ISWHITE(*p)) break; } // Skip ":call" to get to the function name, unless using :legacy p = ea.cmd; if (!(local_cmdmod.cmod_flags & CMOD_LEGACY)) { if (checkforcmd(&ea.cmd, "call", 3)) { if (*ea.cmd == '(') // not for "call()" ea.cmd = p; else ea.cmd = skipwhite(ea.cmd); } if (!starts_with_colon) { int assign; // Check for assignment after command modifiers. assign = may_compile_assignment(&ea, &line, &cctx); if (assign == OK) goto nextline; if (assign == FAIL) goto erret; } } /* * COMMAND after range * 'text'->func() should not be confused with 'a mark * 0z1234->func() should not be confused with a zero line number * "++nr" and "--nr" are eval commands * in "$ENV->func()" the "$" is not a range * "123->func()" is a method call */ cmd = ea.cmd; if ((*cmd != '$' || starts_with_colon) && (starts_with_colon || !(*cmd == '\'' || (cmd[0] == '0' && cmd[1] == 'z') || (cmd[0] != NUL && cmd[0] == cmd[1] && (*cmd == '+' || *cmd == '-')) || number_method(cmd)))) { ea.cmd = skip_range(ea.cmd, TRUE, NULL); if (ea.cmd > cmd) { if (!starts_with_colon && !(local_cmdmod.cmod_flags & CMOD_LEGACY)) { semsg(_(e_colon_required_before_range_str), cmd); goto erret; } ea.addr_count = 1; if (ends_excmd2(line, ea.cmd)) { // A range without a command: jump to the line. generate_EXEC(&cctx, ISN_EXECRANGE, vim_strnsave(cmd, ea.cmd - cmd)); line = ea.cmd; goto nextline; } } } p = find_ex_command(&ea, NULL, starts_with_colon || (local_cmdmod.cmod_flags & CMOD_LEGACY) ? NULL : item_exists, &cctx); if (p == NULL) { if (cctx.ctx_skip != SKIP_YES) semsg(_(e_ambiguous_use_of_user_defined_command_str), ea.cmd); goto erret; } // When using ":legacy cmd" always use compile_exec(). if (local_cmdmod.cmod_flags & CMOD_LEGACY) { char_u *start = ea.cmd; switch (ea.cmdidx) { case CMD_if: case CMD_elseif: case CMD_else: case CMD_endif: case CMD_for: case CMD_endfor: case CMD_continue: case CMD_break: case CMD_while: case CMD_endwhile: case CMD_try: case CMD_catch: case CMD_finally: case CMD_endtry: semsg(_(e_cannot_use_legacy_with_command_str), ea.cmd); goto erret; default: break; } // ":legacy return expr" needs to be handled differently. if (checkforcmd(&start, "return", 4)) ea.cmdidx = CMD_return; else ea.cmdidx = CMD_legacy; } if (p == ea.cmd && ea.cmdidx != CMD_SIZE) { // "eval" is used for "val->func()" and "var" for "var = val", then // "p" is equal to "ea.cmd" for a valid command. if (ea.cmdidx == CMD_eval || ea.cmdidx == CMD_var) ; else if (cctx.ctx_skip == SKIP_YES) { line += STRLEN(line); goto nextline; } else { semsg(_(e_command_not_recognized_str), ea.cmd); goto erret; } } if (cctx.ctx_had_return && ea.cmdidx != CMD_elseif && ea.cmdidx != CMD_else && ea.cmdidx != CMD_endif && ea.cmdidx != CMD_endfor && ea.cmdidx != CMD_endwhile && ea.cmdidx != CMD_catch && ea.cmdidx != CMD_finally && ea.cmdidx != CMD_endtry) { emsg(_(e_unreachable_code_after_return)); goto erret; } p = skipwhite(p); if (ea.cmdidx != CMD_SIZE && ea.cmdidx != CMD_write && ea.cmdidx != CMD_read) { if (ea.cmdidx >= 0) ea.argt = excmd_get_argt(ea.cmdidx); if ((ea.argt & EX_BANG) && *p == '!') { ea.forceit = TRUE; p = skipwhite(p + 1); } if ((ea.argt & EX_RANGE) == 0 && ea.addr_count > 0) { emsg(_(e_no_range_allowed)); goto erret; } } switch (ea.cmdidx) { case CMD_def: case CMD_function: ea.arg = p; line = compile_nested_function(&ea, &cctx, &lines_to_free); break; case CMD_return: line = compile_return(p, check_return_type, local_cmdmod.cmod_flags & CMOD_LEGACY, &cctx); cctx.ctx_had_return = TRUE; break; case CMD_let: emsg(_(e_cannot_use_let_in_vim9_script)); break; case CMD_var: case CMD_final: case CMD_const: case CMD_increment: case CMD_decrement: line = compile_assignment(p, &ea, ea.cmdidx, &cctx); if (line == p) { emsg(_(e_invalid_assignment)); line = NULL; } break; case CMD_unlet: case CMD_unlockvar: case CMD_lockvar: line = compile_unletlock(p, &ea, &cctx); break; case CMD_import: emsg(_(e_import_can_only_be_used_in_script)); line = NULL; break; case CMD_if: line = compile_if(p, &cctx); break; case CMD_elseif: line = compile_elseif(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_else: line = compile_else(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_endif: line = compile_endif(p, &cctx); break; case CMD_while: line = compile_while(p, &cctx); break; case CMD_endwhile: line = compile_endwhile(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_for: line = compile_for(p, &cctx); break; case CMD_endfor: line = compile_endfor(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_continue: line = compile_continue(p, &cctx); break; case CMD_break: line = compile_break(p, &cctx); break; case CMD_try: line = compile_try(p, &cctx); break; case CMD_catch: line = compile_catch(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_finally: line = compile_finally(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_endtry: line = compile_endtry(p, &cctx); break; case CMD_throw: line = compile_throw(p, &cctx); break; case CMD_eval: line = compile_eval(p, &cctx); break; case CMD_echo: case CMD_echon: case CMD_execute: case CMD_echomsg: case CMD_echoerr: case CMD_echoconsole: line = compile_mult_expr(p, ea.cmdidx, &cctx); break; case CMD_put: ea.cmd = cmd; line = compile_put(p, &ea, &cctx); break; case CMD_substitute: if (check_global_and_subst(ea.cmd, p) == FAIL) goto erret; if (cctx.ctx_skip == SKIP_YES) line = (char_u *)""; else { ea.arg = p; line = compile_substitute(line, &ea, &cctx); } break; case CMD_redir: ea.arg = p; line = compile_redir(line, &ea, &cctx); break; case CMD_cexpr: case CMD_lexpr: case CMD_caddexpr: case CMD_laddexpr: case CMD_cgetexpr: case CMD_lgetexpr: #ifdef FEAT_QUICKFIX ea.arg = p; line = compile_cexpr(line, &ea, &cctx); #else ex_ni(&ea); line = NULL; #endif break; case CMD_append: case CMD_change: case CMD_insert: case CMD_k: case CMD_t: case CMD_xit: not_in_vim9(&ea); goto erret; case CMD_SIZE: if (cctx.ctx_skip != SKIP_YES) { semsg(_(e_invalid_command_str), ea.cmd); goto erret; } // We don't check for a next command here. line = (char_u *)""; break; case CMD_lua: case CMD_mzscheme: case CMD_perl: case CMD_py3: case CMD_python3: case CMD_python: case CMD_pythonx: case CMD_ruby: case CMD_tcl: ea.arg = p; if (vim_strchr(line, '\n') == NULL) line = compile_exec(line, &ea, &cctx); else // heredoc lines have been concatenated with NL // characters in get_function_body() line = compile_script(line, &cctx); break; case CMD_vim9script: if (cctx.ctx_skip != SKIP_YES) { emsg(_(e_vim9script_can_only_be_used_in_script)); goto erret; } line = (char_u *)""; break; case CMD_global: if (check_global_and_subst(ea.cmd, p) == FAIL) goto erret; // FALLTHROUGH default: // Not recognized, execute with do_cmdline_cmd(). ea.arg = p; line = compile_exec(line, &ea, &cctx); break; } nextline: if (line == NULL) goto erret; line = skipwhite(line); // Undo any command modifiers. generate_undo_cmdmods(&cctx); if (cctx.ctx_type_stack.ga_len < 0) { iemsg("Type stack underflow"); goto erret; } } if (cctx.ctx_scope != NULL) { if (cctx.ctx_scope->se_type == IF_SCOPE) emsg(_(e_missing_endif)); else if (cctx.ctx_scope->se_type == WHILE_SCOPE) emsg(_(e_missing_endwhile)); else if (cctx.ctx_scope->se_type == FOR_SCOPE) emsg(_(e_missing_endfor)); else emsg(_(e_missing_rcurly)); goto erret; } if (!cctx.ctx_had_return) { if (ufunc->uf_ret_type->tt_type == VAR_UNKNOWN) ufunc->uf_ret_type = &t_void; else if (ufunc->uf_ret_type->tt_type != VAR_VOID) { emsg(_(e_missing_return_statement)); goto erret; } // Return void if there is no return at the end. generate_instr(&cctx, ISN_RETURN_VOID); } // When compiled with ":silent!" and there was an error don't consider the // function compiled. if (emsg_silent == 0 || did_emsg_silent == did_emsg_silent_before) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; dfunc->df_deleted = FALSE; dfunc->df_script_seq = current_sctx.sc_seq; #ifdef FEAT_PROFILE if (cctx.ctx_compile_type == CT_PROFILE) { dfunc->df_instr_prof = instr->ga_data; dfunc->df_instr_prof_count = instr->ga_len; } else #endif if (cctx.ctx_compile_type == CT_DEBUG) { dfunc->df_instr_debug = instr->ga_data; dfunc->df_instr_debug_count = instr->ga_len; } else { dfunc->df_instr = instr->ga_data; dfunc->df_instr_count = instr->ga_len; } dfunc->df_varcount = dfunc->df_var_names.ga_len; dfunc->df_has_closure = cctx.ctx_has_closure; if (cctx.ctx_outer_used) ufunc->uf_flags |= FC_CLOSURE; ufunc->uf_def_status = UF_COMPILED; } ret = OK; erret: if (ufunc->uf_def_status == UF_COMPILING) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; // Compiling aborted, free the generated instructions. clear_instr_ga(instr); VIM_CLEAR(dfunc->df_name); ga_clear_strings(&dfunc->df_var_names); // If using the last entry in the table and it was added above, we // might as well remove it. if (!dfunc->df_deleted && new_def_function && ufunc->uf_dfunc_idx == def_functions.ga_len - 1) { --def_functions.ga_len; ufunc->uf_dfunc_idx = 0; } ufunc->uf_def_status = UF_COMPILE_ERROR; while (cctx.ctx_scope != NULL) drop_scope(&cctx); if (errormsg != NULL) emsg(errormsg); else if (did_emsg == did_emsg_before) emsg(_(e_compiling_def_function_failed)); } if (cctx.ctx_redir_lhs.lhs_name != NULL) { if (ret == OK) { emsg(_(e_missing_redir_end)); ret = FAIL; } vim_free(cctx.ctx_redir_lhs.lhs_name); vim_free(cctx.ctx_redir_lhs.lhs_whole); } current_sctx = save_current_sctx; estack_compiling = save_estack_compiling; cmdmod.cmod_flags = save_cmod_flags; if (do_estack_push) estack_pop(); ga_clear_strings(&lines_to_free); free_locals(&cctx); ga_clear(&cctx.ctx_type_stack); return ret; } void set_function_type(ufunc_T *ufunc) { int varargs = ufunc->uf_va_name != NULL; int argcount = ufunc->uf_args.ga_len; // Create a type for the function, with the return type and any // argument types. // A vararg is included in uf_args.ga_len but not in uf_arg_types. // The type is included in "tt_args". if (argcount > 0 || varargs) { if (ufunc->uf_type_list.ga_itemsize == 0) ga_init2(&ufunc->uf_type_list, sizeof(type_T *), 10); ufunc->uf_func_type = alloc_func_type(ufunc->uf_ret_type, argcount, &ufunc->uf_type_list); // Add argument types to the function type. if (func_type_add_arg_types(ufunc->uf_func_type, argcount + varargs, &ufunc->uf_type_list) == FAIL) return; ufunc->uf_func_type->tt_argcount = argcount + varargs; ufunc->uf_func_type->tt_min_argcount = argcount - ufunc->uf_def_args.ga_len; if (ufunc->uf_arg_types == NULL) { int i; // lambda does not have argument types. for (i = 0; i < argcount; ++i) ufunc->uf_func_type->tt_args[i] = &t_any; } else mch_memmove(ufunc->uf_func_type->tt_args, ufunc->uf_arg_types, sizeof(type_T *) * argcount); if (varargs) { ufunc->uf_func_type->tt_args[argcount] = ufunc->uf_va_type == NULL ? &t_list_any : ufunc->uf_va_type; ufunc->uf_func_type->tt_flags = TTFLAG_VARARGS; } } else // No arguments, can use a predefined type. ufunc->uf_func_type = get_func_type(ufunc->uf_ret_type, argcount, &ufunc->uf_type_list); } /* * Free all instructions for "dfunc" except df_name. */ static void delete_def_function_contents(dfunc_T *dfunc, int mark_deleted) { int idx; ga_clear(&dfunc->df_def_args_isn); ga_clear_strings(&dfunc->df_var_names); if (dfunc->df_instr != NULL) { for (idx = 0; idx < dfunc->df_instr_count; ++idx) delete_instr(dfunc->df_instr + idx); VIM_CLEAR(dfunc->df_instr); dfunc->df_instr = NULL; } if (dfunc->df_instr_debug != NULL) { for (idx = 0; idx < dfunc->df_instr_debug_count; ++idx) delete_instr(dfunc->df_instr_debug + idx); VIM_CLEAR(dfunc->df_instr_debug); dfunc->df_instr_debug = NULL; } #ifdef FEAT_PROFILE if (dfunc->df_instr_prof != NULL) { for (idx = 0; idx < dfunc->df_instr_prof_count; ++idx) delete_instr(dfunc->df_instr_prof + idx); VIM_CLEAR(dfunc->df_instr_prof); dfunc->df_instr_prof = NULL; } #endif if (mark_deleted) dfunc->df_deleted = TRUE; if (dfunc->df_ufunc != NULL) dfunc->df_ufunc->uf_def_status = UF_NOT_COMPILED; } /* * When a user function is deleted, clear the contents of any associated def * function, unless another user function still uses it. * The position in def_functions can be re-used. */ void unlink_def_function(ufunc_T *ufunc) { if (ufunc->uf_dfunc_idx > 0) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; if (--dfunc->df_refcount <= 0) delete_def_function_contents(dfunc, TRUE); ufunc->uf_def_status = UF_NOT_COMPILED; ufunc->uf_dfunc_idx = 0; if (dfunc->df_ufunc == ufunc) dfunc->df_ufunc = NULL; } } /* * Used when a user function refers to an existing dfunc. */ void link_def_function(ufunc_T *ufunc) { if (ufunc->uf_dfunc_idx > 0) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; ++dfunc->df_refcount; } } #if defined(EXITFREE) || defined(PROTO) /* * Free all functions defined with ":def". */ void free_def_functions(void) { int idx; for (idx = 0; idx < def_functions.ga_len; ++idx) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + idx; delete_def_function_contents(dfunc, TRUE); vim_free(dfunc->df_name); } ga_clear(&def_functions); } #endif #endif // FEAT_EVAL
/* vi:set ts=8 sts=4 sw=4 noet: * * VIM - Vi IMproved by Bram Moolenaar * * Do ":help uganda" in Vim to read copying and usage conditions. * Do ":help credits" in Vim to see a list of people who contributed. * See README.txt for an overview of the Vim source code. */ /* * vim9compile.c: compiling a :def function */ #define USING_FLOAT_STUFF #include "vim.h" #if defined(FEAT_EVAL) || defined(PROTO) // When not generating protos this is included in proto.h #ifdef PROTO # include "vim9.h" #endif // Functions defined with :def are stored in this growarray. // They are never removed, so that they can be found by index. // Deleted functions have the df_deleted flag set. garray_T def_functions = {0, 0, sizeof(dfunc_T), 50, NULL}; static void delete_def_function_contents(dfunc_T *dfunc, int mark_deleted); /* * Lookup variable "name" in the local scope and return it in "lvar". * "lvar->lv_from_outer" is incremented accordingly. * If "lvar" is NULL only check if the variable can be found. * Return FAIL if not found. */ int lookup_local(char_u *name, size_t len, lvar_T *lvar, cctx_T *cctx) { int idx; lvar_T *lvp; if (len == 0) return FAIL; // Find local in current function scope. for (idx = 0; idx < cctx->ctx_locals.ga_len; ++idx) { lvp = ((lvar_T *)cctx->ctx_locals.ga_data) + idx; if (STRNCMP(name, lvp->lv_name, len) == 0 && STRLEN(lvp->lv_name) == len) { if (lvar != NULL) { *lvar = *lvp; lvar->lv_from_outer = 0; } return OK; } } // Find local in outer function scope. if (cctx->ctx_outer != NULL) { if (lookup_local(name, len, lvar, cctx->ctx_outer) == OK) { if (lvar != NULL) { cctx->ctx_outer_used = TRUE; ++lvar->lv_from_outer; } return OK; } } return FAIL; } /* * Lookup an argument in the current function and an enclosing function. * Returns the argument index in "idxp" * Returns the argument type in "type" * Sets "gen_load_outer" to TRUE if found in outer scope. * Returns OK when found, FAIL otherwise. */ int arg_exists( char_u *name, size_t len, int *idxp, type_T **type, int *gen_load_outer, cctx_T *cctx) { int idx; char_u *va_name; if (len == 0) return FAIL; for (idx = 0; idx < cctx->ctx_ufunc->uf_args_visible; ++idx) { char_u *arg = FUNCARG(cctx->ctx_ufunc, idx); if (STRNCMP(name, arg, len) == 0 && arg[len] == NUL) { if (idxp != NULL) { // Arguments are located above the frame pointer. One further // if there is a vararg argument *idxp = idx - (cctx->ctx_ufunc->uf_args.ga_len + STACK_FRAME_SIZE) + (cctx->ctx_ufunc->uf_va_name != NULL ? -1 : 0); if (cctx->ctx_ufunc->uf_arg_types != NULL) *type = cctx->ctx_ufunc->uf_arg_types[idx]; else *type = &t_any; } return OK; } } va_name = cctx->ctx_ufunc->uf_va_name; if (va_name != NULL && STRNCMP(name, va_name, len) == 0 && va_name[len] == NUL) { if (idxp != NULL) { // varargs is always the last argument *idxp = -STACK_FRAME_SIZE - 1; *type = cctx->ctx_ufunc->uf_va_type; } return OK; } if (cctx->ctx_outer != NULL) { // Lookup the name for an argument of the outer function. if (arg_exists(name, len, idxp, type, gen_load_outer, cctx->ctx_outer) == OK) { if (gen_load_outer != NULL) ++*gen_load_outer; return OK; } } return FAIL; } /* * Lookup a script-local variable in the current script, possibly defined in a * block that contains the function "cctx->ctx_ufunc". * "cctx" is NULL at the script level, "cstack" is NULL in a function. * If "len" is <= 0 "name" must be NUL terminated. * Return NULL when not found. */ static sallvar_T * find_script_var(char_u *name, size_t len, cctx_T *cctx, cstack_T *cstack) { scriptitem_T *si = SCRIPT_ITEM(current_sctx.sc_sid); hashitem_T *hi; int cc; sallvar_T *sav; ufunc_T *ufunc; // Find the list of all script variables with the right name. if (len > 0) { cc = name[len]; name[len] = NUL; } hi = hash_find(&si->sn_all_vars.dv_hashtab, name); if (len > 0) name[len] = cc; if (HASHITEM_EMPTY(hi)) return NULL; sav = HI2SAV(hi); if (sav->sav_block_id == 0) // variable defined in the top script scope is always visible return sav; if (cctx == NULL) { // Not in a function scope, find variable with block ID equal to or // smaller than the current block id. Use "cstack" to go up the block // scopes. while (sav != NULL) { int idx; for (idx = cstack->cs_idx; idx >= 0; --idx) if (cstack->cs_block_id[idx] == sav->sav_block_id) break; if (idx >= 0) break; sav = sav->sav_next; } return sav; } // Go over the variables with this name and find one that was visible // from the function. ufunc = cctx->ctx_ufunc; while (sav != NULL) { int idx; // Go over the blocks that this function was defined in. If the // variable block ID matches it was visible to the function. for (idx = 0; idx < ufunc->uf_block_depth; ++idx) if (ufunc->uf_block_ids[idx] == sav->sav_block_id) return sav; sav = sav->sav_next; } // Not found, variable was not visible. return NULL; } /* * Return TRUE if the script context is Vim9 script. */ int script_is_vim9() { return SCRIPT_ITEM(current_sctx.sc_sid)->sn_version == SCRIPT_VERSION_VIM9; } /* * Lookup a variable (without s: prefix) in the current script. * "cctx" is NULL at the script level, "cstack" is NULL in a function. * Returns OK or FAIL. */ int script_var_exists(char_u *name, size_t len, cctx_T *cctx, cstack_T *cstack) { if (current_sctx.sc_sid <= 0) return FAIL; if (script_is_vim9()) { // Check script variables that were visible where the function was // defined. if (find_script_var(name, len, cctx, cstack) != NULL) return OK; } else { hashtab_T *ht = &SCRIPT_VARS(current_sctx.sc_sid); dictitem_T *di; int cc; // Check script variables that are currently visible cc = name[len]; name[len] = NUL; di = find_var_in_ht(ht, 0, name, TRUE); name[len] = cc; if (di != NULL) return OK; } return FAIL; } /* * Return TRUE if "name" is a local variable, argument, script variable or * imported. */ static int variable_exists(char_u *name, size_t len, cctx_T *cctx) { return (cctx != NULL && (lookup_local(name, len, NULL, cctx) == OK || arg_exists(name, len, NULL, NULL, NULL, cctx) == OK)) || script_var_exists(name, len, cctx, NULL) == OK || find_imported(name, len, FALSE) != NULL; } /* * Return TRUE if "name" is a local variable, argument, script variable, * imported or function. Or commands are being skipped, a declaration may have * been skipped then. */ static int item_exists(char_u *name, size_t len, int cmd UNUSED, cctx_T *cctx) { return variable_exists(name, len, cctx); } /* * Check if "p[len]" is already defined, either in script "import_sid" or in * compilation context "cctx". * "cctx" is NULL at the script level, "cstack" is NULL in a function. * Does not check the global namespace. * If "is_arg" is TRUE the error message is for an argument name. * Return FAIL and give an error if it defined. */ int check_defined( char_u *p, size_t len, cctx_T *cctx, cstack_T *cstack, int is_arg) { int c = p[len]; ufunc_T *ufunc = NULL; // underscore argument is OK if (len == 1 && *p == '_') return OK; if (script_var_exists(p, len, cctx, cstack) == OK) { if (is_arg) semsg(_(e_argument_already_declared_in_script_str), p); else semsg(_(e_variable_already_declared_in_script_str), p); return FAIL; } p[len] = NUL; if ((cctx != NULL && (lookup_local(p, len, NULL, cctx) == OK || arg_exists(p, len, NULL, NULL, NULL, cctx) == OK)) || find_imported(p, len, FALSE) != NULL || (ufunc = find_func_even_dead(p, 0)) != NULL) { // A local or script-local function can shadow a global function. if (ufunc == NULL || ((ufunc->uf_flags & FC_DEAD) == 0 && (!func_is_global(ufunc) || (p[0] == 'g' && p[1] == ':')))) { if (is_arg) semsg(_(e_argument_name_shadows_existing_variable_str), p); else semsg(_(e_name_already_defined_str), p); p[len] = c; return FAIL; } } p[len] = c; return OK; } /* * Return TRUE if "actual" could be "expected" and a runtime typecheck is to be * used. Return FALSE if the types will never match. */ static int use_typecheck(type_T *actual, type_T *expected) { if (actual->tt_type == VAR_ANY || actual->tt_type == VAR_UNKNOWN || (actual->tt_type == VAR_FUNC && (expected->tt_type == VAR_FUNC || expected->tt_type == VAR_PARTIAL) && (actual->tt_member == &t_any || actual->tt_member == &t_unknown || actual->tt_argcount < 0) && (actual->tt_member == &t_unknown || (actual->tt_member == &t_void) == (expected->tt_member == &t_void)))) return TRUE; if ((actual->tt_type == VAR_LIST || actual->tt_type == VAR_DICT) && actual->tt_type == expected->tt_type) // This takes care of a nested list or dict. return use_typecheck(actual->tt_member, expected->tt_member); return FALSE; } /* * Check that * - "actual" matches "expected" type or * - "actual" is a type that can be "expected" type: add a runtime check; or * - return FAIL. * If "actual_is_const" is TRUE then the type won't change at runtime, do not * generate a TYPECHECK. */ int need_type_where( type_T *actual, type_T *expected, int offset, where_T where, cctx_T *cctx, int silent, int actual_is_const) { int ret; if (expected == &t_bool && actual != &t_bool && (actual->tt_flags & TTFLAG_BOOL_OK)) { // Using "0", "1" or the result of an expression with "&&" or "||" as a // boolean is OK but requires a conversion. generate_2BOOL(cctx, FALSE, offset); return OK; } ret = check_type_maybe(expected, actual, FALSE, where); if (ret == OK) return OK; // If actual a constant a runtime check makes no sense. If it's // null_function it is OK. if (actual_is_const && ret == MAYBE && actual == &t_func_unknown) return OK; // If the actual type can be the expected type add a runtime check. if (!actual_is_const && ret == MAYBE && use_typecheck(actual, expected)) { generate_TYPECHECK(cctx, expected, offset, where.wt_variable, where.wt_index); return OK; } if (!silent) type_mismatch_where(expected, actual, where); return FAIL; } int need_type( type_T *actual, type_T *expected, int offset, int arg_idx, cctx_T *cctx, int silent, int actual_is_const) { where_T where = WHERE_INIT; where.wt_index = arg_idx; return need_type_where(actual, expected, offset, where, cctx, silent, actual_is_const); } /* * Reserve space for a local variable. * Return the variable or NULL if it failed. */ lvar_T * reserve_local( cctx_T *cctx, char_u *name, size_t len, int isConst, type_T *type) { lvar_T *lvar; dfunc_T *dfunc; if (arg_exists(name, len, NULL, NULL, NULL, cctx) == OK) { emsg_namelen(_(e_str_is_used_as_argument), name, (int)len); return NULL; } if (GA_GROW_FAILS(&cctx->ctx_locals, 1)) return NULL; lvar = ((lvar_T *)cctx->ctx_locals.ga_data) + cctx->ctx_locals.ga_len++; CLEAR_POINTER(lvar); // Every local variable uses the next entry on the stack. We could re-use // the last ones when leaving a scope, but then variables used in a closure // might get overwritten. To keep things simple do not re-use stack // entries. This is less efficient, but memory is cheap these days. dfunc = ((dfunc_T *)def_functions.ga_data) + cctx->ctx_ufunc->uf_dfunc_idx; lvar->lv_idx = dfunc->df_var_names.ga_len; lvar->lv_name = vim_strnsave(name, len == 0 ? STRLEN(name) : len); lvar->lv_const = isConst; lvar->lv_type = type; // Remember the name for debugging. if (GA_GROW_FAILS(&dfunc->df_var_names, 1)) return NULL; ((char_u **)dfunc->df_var_names.ga_data)[lvar->lv_idx] = vim_strsave(lvar->lv_name); ++dfunc->df_var_names.ga_len; return lvar; } /* * If "check_writable" is ASSIGN_CONST give an error if the variable was * defined with :final or :const, if "check_writable" is ASSIGN_FINAL give an * error if the variable was defined with :const. */ static int check_item_writable(svar_T *sv, int check_writable, char_u *name) { if ((check_writable == ASSIGN_CONST && sv->sv_const != 0) || (check_writable == ASSIGN_FINAL && sv->sv_const == ASSIGN_CONST)) { semsg(_(e_cannot_change_readonly_variable_str), name); return FAIL; } return OK; } /* * Find "name" in script-local items of script "sid". * Pass "check_writable" to check_item_writable(). * "cctx" is NULL at the script level, "cstack" is NULL in a function. * Returns the index in "sn_var_vals" if found. * If found but not in "sn_var_vals" returns -1. * If not found or the variable is not writable returns -2. */ int get_script_item_idx( int sid, char_u *name, int check_writable, cctx_T *cctx, cstack_T *cstack) { hashtab_T *ht; dictitem_T *di; scriptitem_T *si = SCRIPT_ITEM(sid); svar_T *sv; int idx; if (!SCRIPT_ID_VALID(sid)) return -1; if (sid == current_sctx.sc_sid) { sallvar_T *sav = find_script_var(name, 0, cctx, cstack); if (sav == NULL) return -2; idx = sav->sav_var_vals_idx; sv = ((svar_T *)si->sn_var_vals.ga_data) + idx; if (check_item_writable(sv, check_writable, name) == FAIL) return -2; return idx; } // First look the name up in the hashtable. ht = &SCRIPT_VARS(sid); di = find_var_in_ht(ht, 0, name, TRUE); if (di == NULL) { if (si->sn_autoload_prefix != NULL) { hashitem_T *hi; // A variable exported from an autoload script is in the global // variables, we can find it in the all_vars table. hi = hash_find(&si->sn_all_vars.dv_hashtab, name); if (!HASHITEM_EMPTY(hi)) return HI2SAV(hi)->sav_var_vals_idx; } return -2; } // Now find the svar_T index in sn_var_vals. for (idx = 0; idx < si->sn_var_vals.ga_len; ++idx) { sv = ((svar_T *)si->sn_var_vals.ga_data) + idx; if (sv->sv_tv == &di->di_tv) { if (check_item_writable(sv, check_writable, name) == FAIL) return -2; return idx; } } return -1; } static imported_T * find_imported_in_script(char_u *name, size_t len, int sid) { scriptitem_T *si; int idx; if (!SCRIPT_ID_VALID(sid)) return NULL; si = SCRIPT_ITEM(sid); for (idx = 0; idx < si->sn_imports.ga_len; ++idx) { imported_T *import = ((imported_T *)si->sn_imports.ga_data) + idx; if (len == 0 ? STRCMP(name, import->imp_name) == 0 : STRLEN(import->imp_name) == len && STRNCMP(name, import->imp_name, len) == 0) return import; } return NULL; } /* * Find "name" in imported items of the current script. * If "len" is 0 use any length that works. * If "load" is TRUE and the script was not loaded yet, load it now. */ imported_T * find_imported(char_u *name, size_t len, int load) { imported_T *ret; if (!SCRIPT_ID_VALID(current_sctx.sc_sid)) return NULL; ret = find_imported_in_script(name, len, current_sctx.sc_sid); if (ret != NULL && load && (ret->imp_flags & IMP_FLAGS_AUTOLOAD)) { scid_T dummy; int save_emsg_off = emsg_off; // "emsg_off" will be set when evaluating an expression silently, but // we do want to know about errors in a script. Also because it then // aborts when an error is encountered. emsg_off = FALSE; // script found before but not loaded yet ret->imp_flags &= ~IMP_FLAGS_AUTOLOAD; (void)do_source(SCRIPT_ITEM(ret->imp_sid)->sn_name, FALSE, DOSO_NONE, &dummy); emsg_off = save_emsg_off; } return ret; } /* * Called when checking for a following operator at "arg". When the rest of * the line is empty or only a comment, peek the next line. If there is a next * line return a pointer to it and set "nextp". * Otherwise skip over white space. */ char_u * may_peek_next_line(cctx_T *cctx, char_u *arg, char_u **nextp) { char_u *p = skipwhite(arg); *nextp = NULL; if (*p == NUL || (VIM_ISWHITE(*arg) && vim9_comment_start(p))) { *nextp = peek_next_line_from_context(cctx); if (*nextp != NULL) return *nextp; } return p; } /* * Return a pointer to the next line that isn't empty or only contains a * comment. Skips over white space. * Returns NULL if there is none. */ char_u * peek_next_line_from_context(cctx_T *cctx) { int lnum = cctx->ctx_lnum; while (++lnum < cctx->ctx_ufunc->uf_lines.ga_len) { char_u *line = ((char_u **)cctx->ctx_ufunc->uf_lines.ga_data)[lnum]; char_u *p; // ignore NULLs inserted for continuation lines if (line != NULL) { p = skipwhite(line); if (vim9_bad_comment(p)) return NULL; if (*p != NUL && !vim9_comment_start(p)) return p; } } return NULL; } /* * Get the next line of the function from "cctx". * Skips over empty lines. Skips over comment lines if "skip_comment" is TRUE. * Returns NULL when at the end. */ char_u * next_line_from_context(cctx_T *cctx, int skip_comment) { char_u *line; do { ++cctx->ctx_lnum; if (cctx->ctx_lnum >= cctx->ctx_ufunc->uf_lines.ga_len) { line = NULL; break; } line = ((char_u **)cctx->ctx_ufunc->uf_lines.ga_data)[cctx->ctx_lnum]; cctx->ctx_line_start = line; SOURCING_LNUM = cctx->ctx_lnum + 1; } while (line == NULL || *skipwhite(line) == NUL || (skip_comment && vim9_comment_start(skipwhite(line)))); return line; } /* * Skip over white space at "whitep" and assign to "*arg". * If "*arg" is at the end of the line, advance to the next line. * Also when "whitep" points to white space and "*arg" is on a "#". * Return FAIL if beyond the last line, "*arg" is unmodified then. */ int may_get_next_line(char_u *whitep, char_u **arg, cctx_T *cctx) { *arg = skipwhite(whitep); if (vim9_bad_comment(*arg)) return FAIL; if (**arg == NUL || (VIM_ISWHITE(*whitep) && vim9_comment_start(*arg))) { char_u *next = next_line_from_context(cctx, TRUE); if (next == NULL) return FAIL; *arg = skipwhite(next); } return OK; } /* * Idem, and give an error when failed. */ int may_get_next_line_error(char_u *whitep, char_u **arg, cctx_T *cctx) { if (may_get_next_line(whitep, arg, cctx) == FAIL) { SOURCING_LNUM = cctx->ctx_lnum + 1; emsg(_(e_line_incomplete)); return FAIL; } return OK; } /* * Get a line from the compilation context, compatible with exarg_T getline(). * Return a pointer to the line in allocated memory. * Return NULL for end-of-file or some error. */ static char_u * exarg_getline( int c UNUSED, void *cookie, int indent UNUSED, getline_opt_T options UNUSED) { cctx_T *cctx = (cctx_T *)cookie; char_u *p; for (;;) { if (cctx->ctx_lnum >= cctx->ctx_ufunc->uf_lines.ga_len - 1) return NULL; ++cctx->ctx_lnum; p = ((char_u **)cctx->ctx_ufunc->uf_lines.ga_data)[cctx->ctx_lnum]; // Comment lines result in NULL pointers, skip them. if (p != NULL) return vim_strsave(p); } } void fill_exarg_from_cctx(exarg_T *eap, cctx_T *cctx) { eap->getline = exarg_getline; eap->cookie = cctx; eap->skip = cctx->ctx_skip == SKIP_YES; } /* * Return TRUE if "ufunc" should be compiled, taking into account whether * "profile" indicates profiling is to be done. */ int func_needs_compiling(ufunc_T *ufunc, compiletype_T compile_type) { switch (ufunc->uf_def_status) { case UF_TO_BE_COMPILED: return TRUE; case UF_COMPILED: { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; switch (compile_type) { case CT_PROFILE: #ifdef FEAT_PROFILE return dfunc->df_instr_prof == NULL; #endif case CT_NONE: return dfunc->df_instr == NULL; case CT_DEBUG: return dfunc->df_instr_debug == NULL; } } case UF_NOT_COMPILED: case UF_COMPILE_ERROR: case UF_COMPILING: break; } return FALSE; } /* * Compile a nested :def command. */ static char_u * compile_nested_function(exarg_T *eap, cctx_T *cctx, garray_T *lines_to_free) { int is_global = *eap->arg == 'g' && eap->arg[1] == ':'; char_u *name_start = eap->arg; char_u *name_end = to_name_end(eap->arg, TRUE); int off; char_u *func_name; char_u *lambda_name; ufunc_T *ufunc; int r = FAIL; compiletype_T compile_type; isn_T *funcref_isn = NULL; lvar_T *lvar = NULL; if (eap->forceit) { emsg(_(e_cannot_use_bang_with_nested_def)); return NULL; } if (*name_start == '/') { name_end = skip_regexp(name_start + 1, '/', TRUE); if (*name_end == '/') ++name_end; set_nextcmd(eap, name_end); } if (name_end == name_start || *skipwhite(name_end) != '(') { if (!ends_excmd2(name_start, name_end)) { if (*skipwhite(name_end) == '.') semsg(_(e_cannot_define_dict_func_in_vim9_script_str), eap->cmd); else semsg(_(e_invalid_command_str), eap->cmd); return NULL; } // "def" or "def Name": list functions if (generate_DEF(cctx, name_start, name_end - name_start) == FAIL) return NULL; return eap->nextcmd == NULL ? (char_u *)"" : eap->nextcmd; } // Only g:Func() can use a namespace. if (name_start[1] == ':' && !is_global) { semsg(_(e_namespace_not_supported_str), name_start); return NULL; } if (cctx->ctx_skip != SKIP_YES && check_defined(name_start, name_end - name_start, cctx, NULL, FALSE) == FAIL) return NULL; if (!ASCII_ISUPPER(is_global ? name_start[2] : name_start[0])) { semsg(_(e_function_name_must_start_with_capital_str), name_start); return NULL; } eap->arg = name_end; fill_exarg_from_cctx(eap, cctx); eap->forceit = FALSE; // We use the special <Lamba>99 name, but it's not really a lambda. lambda_name = vim_strsave(get_lambda_name()); if (lambda_name == NULL) return NULL; // This may free the current line, make a copy of the name. off = is_global ? 2 : 0; func_name = vim_strnsave(name_start + off, name_end - name_start - off); if (func_name == NULL) { r = FAIL; goto theend; } ufunc = define_function(eap, lambda_name, lines_to_free); if (ufunc == NULL) { r = eap->skip ? OK : FAIL; goto theend; } if (eap->nextcmd != NULL) { semsg(_(e_text_found_after_str_str), eap->cmdidx == CMD_def ? "enddef" : "endfunction", eap->nextcmd); r = FAIL; func_ptr_unref(ufunc); goto theend; } // copy over the block scope IDs before compiling if (!is_global && cctx->ctx_ufunc->uf_block_depth > 0) { int block_depth = cctx->ctx_ufunc->uf_block_depth; ufunc->uf_block_ids = ALLOC_MULT(int, block_depth); if (ufunc->uf_block_ids != NULL) { mch_memmove(ufunc->uf_block_ids, cctx->ctx_ufunc->uf_block_ids, sizeof(int) * block_depth); ufunc->uf_block_depth = block_depth; } } // Define the funcref before compiling, so that it is found by any // recursive call. if (is_global) { r = generate_NEWFUNC(cctx, lambda_name, func_name); func_name = NULL; lambda_name = NULL; } else { // Define a local variable for the function reference. lvar = reserve_local(cctx, func_name, name_end - name_start, TRUE, ufunc->uf_func_type); if (lvar == NULL) goto theend; if (generate_FUNCREF(cctx, ufunc, &funcref_isn) == FAIL) goto theend; r = generate_STORE(cctx, ISN_STORE, lvar->lv_idx, NULL); } compile_type = get_compile_type(ufunc); #ifdef FEAT_PROFILE // If the outer function is profiled, also compile the nested function for // profiling. if (cctx->ctx_compile_type == CT_PROFILE) compile_type = CT_PROFILE; #endif if (func_needs_compiling(ufunc, compile_type) && compile_def_function(ufunc, TRUE, compile_type, cctx) == FAIL) { func_ptr_unref(ufunc); if (lvar != NULL) // Now the local variable can't be used. *lvar->lv_name = '/'; // impossible value goto theend; } #ifdef FEAT_PROFILE // When the outer function is compiled for profiling, the nested function // may be called without profiling. Compile it here in the right context. if (compile_type == CT_PROFILE && func_needs_compiling(ufunc, CT_NONE)) compile_def_function(ufunc, FALSE, CT_NONE, cctx); #endif // If a FUNCREF instruction was generated, set the index after compiling. if (funcref_isn != NULL && ufunc->uf_def_status == UF_COMPILED) funcref_isn->isn_arg.funcref.fr_dfunc_idx = ufunc->uf_dfunc_idx; theend: vim_free(lambda_name); vim_free(func_name); return r == FAIL ? NULL : (char_u *)""; } /* * Compile one Vim expression {expr} in string "p". * "p" points to the opening "{". * Return a pointer to the character after "}", NULL for an error. */ char_u * compile_one_expr_in_str(char_u *p, cctx_T *cctx) { char_u *block_start; char_u *block_end; // Skip the opening {. block_start = skipwhite(p + 1); block_end = block_start; if (*block_start != NUL && skip_expr(&block_end, NULL) == FAIL) return NULL; block_end = skipwhite(block_end); // The block must be closed by a }. if (*block_end != '}') { semsg(_(e_missing_close_curly_str), p); return NULL; } if (compile_expr0(&block_start, cctx) == FAIL) return NULL; may_generate_2STRING(-1, TRUE, cctx); return block_end + 1; } /* * Compile a string "str" (either containing a literal string or a mix of * literal strings and Vim expressions of the form `{expr}`). This is used * when compiling a heredoc assignment to a variable or an interpolated string * in a Vim9 def function. Vim9 instructions are generated to push strings, * evaluate expressions, concatenate them and create a list of lines. When * "evalstr" is TRUE, Vim expressions in "str" are evaluated. */ int compile_all_expr_in_str(char_u *str, int evalstr, cctx_T *cctx) { char_u *p = str; char_u *val; int count = 0; if (cctx->ctx_skip == SKIP_YES) return OK; if (!evalstr || *str == NUL) { // Literal string, possibly empty. val = *str != NUL ? vim_strsave(str) : NULL; return generate_PUSHS(cctx, &val); } // Push all the string pieces to the stack, followed by a ISN_CONCAT. while (*p != NUL) { char_u *lit_start; int escaped_brace = FALSE; // Look for a block start. lit_start = p; while (*p != '{' && *p != '}' && *p != NUL) ++p; if (*p != NUL && *p == p[1]) { // Escaped brace, unescape and continue. // Include the brace in the literal string. ++p; escaped_brace = TRUE; } else if (*p == '}') { semsg(_(e_stray_closing_curly_str), str); return FAIL; } // Append the literal part. if (p != lit_start) { val = vim_strnsave(lit_start, (size_t)(p - lit_start)); if (generate_PUSHS(cctx, &val) == FAIL) return FAIL; ++count; } if (*p == NUL) break; if (escaped_brace) { // Skip the second brace. ++p; continue; } p = compile_one_expr_in_str(p, cctx); if (p == NULL) return FAIL; ++count; } // Small optimization, if there's only a single piece skip the ISN_CONCAT. if (count > 1) return generate_CONCAT(cctx, count); return OK; } /* * Return the length of an assignment operator, or zero if there isn't one. */ int assignment_len(char_u *p, int *heredoc) { if (*p == '=') { if (p[1] == '<' && p[2] == '<') { *heredoc = TRUE; return 3; } return 1; } if (vim_strchr((char_u *)"+-*/%", *p) != NULL && p[1] == '=') return 2; if (STRNCMP(p, "..=", 3) == 0) return 3; return 0; } /* * Generate the load instruction for "name". */ static void generate_loadvar( cctx_T *cctx, assign_dest_T dest, char_u *name, lvar_T *lvar, type_T *type) { switch (dest) { case dest_option: case dest_func_option: generate_LOAD(cctx, ISN_LOADOPT, 0, name, type); break; case dest_global: if (vim_strchr(name, AUTOLOAD_CHAR) == NULL) { if (name[2] == NUL) generate_instr_type(cctx, ISN_LOADGDICT, &t_dict_any); else generate_LOAD(cctx, ISN_LOADG, 0, name + 2, type); } else generate_LOAD(cctx, ISN_LOADAUTO, 0, name, type); break; case dest_buffer: generate_LOAD(cctx, ISN_LOADB, 0, name + 2, type); break; case dest_window: generate_LOAD(cctx, ISN_LOADW, 0, name + 2, type); break; case dest_tab: generate_LOAD(cctx, ISN_LOADT, 0, name + 2, type); break; case dest_script: compile_load_scriptvar(cctx, name + (name[1] == ':' ? 2 : 0), NULL, NULL); break; case dest_env: // Include $ in the name here generate_LOAD(cctx, ISN_LOADENV, 0, name, type); break; case dest_reg: generate_LOAD(cctx, ISN_LOADREG, name[1], NULL, &t_string); break; case dest_vimvar: generate_LOADV(cctx, name + 2); break; case dest_local: if (cctx->ctx_skip != SKIP_YES) { if (lvar->lv_from_outer > 0) generate_LOADOUTER(cctx, lvar->lv_idx, lvar->lv_from_outer, type); else generate_LOAD(cctx, ISN_LOAD, lvar->lv_idx, NULL, type); } break; case dest_expr: // list or dict value should already be on the stack. break; } } /* * Skip over "[expr]" or ".member". * Does not check for any errors. */ static char_u * skip_index(char_u *start) { char_u *p = start; if (*p == '[') { p = skipwhite(p + 1); (void)skip_expr(&p, NULL); p = skipwhite(p); if (*p == ']') return p + 1; return p; } // if (*p == '.') return to_name_end(p + 1, TRUE); } void vim9_declare_error(char_u *name) { char *scope = ""; switch (*name) { case 'g': scope = _("global"); break; case 'b': scope = _("buffer"); break; case 'w': scope = _("window"); break; case 't': scope = _("tab"); break; case 'v': scope = "v:"; break; case '$': semsg(_(e_cannot_declare_an_environment_variable), name); return; case '&': semsg(_(e_cannot_declare_an_option), name); return; case '@': semsg(_(e_cannot_declare_a_register_str), name); return; default: return; } semsg(_(e_cannot_declare_a_scope_variable), scope, name); } /* * For one assignment figure out the type of destination. Return it in "dest". * When not recognized "dest" is not set. * For an option "option_scope" is set. * For a v:var "vimvaridx" is set. * "type" is set to the destination type if known, unchanted otherwise. * Return FAIL if an error message was given. */ int get_var_dest( char_u *name, assign_dest_T *dest, cmdidx_T cmdidx, int *option_scope, int *vimvaridx, type_T **type, cctx_T *cctx) { char_u *p; if (*name == '&') { int cc; long numval; getoption_T opt_type; int opt_p_flags; *dest = dest_option; if (cmdidx == CMD_final || cmdidx == CMD_const) { emsg(_(e_cannot_lock_option)); return FAIL; } p = name; p = find_option_end(&p, option_scope); if (p == NULL) { // cannot happen? emsg(_(e_unexpected_characters_in_assignment)); return FAIL; } cc = *p; *p = NUL; opt_type = get_option_value(skip_option_env_lead(name), &numval, NULL, &opt_p_flags, *option_scope); *p = cc; switch (opt_type) { case gov_unknown: semsg(_(e_unknown_option_str), name); return FAIL; case gov_string: case gov_hidden_string: if (opt_p_flags & P_FUNC) { // might be a Funcref, check the type later *type = &t_any; *dest = dest_func_option; } else { *type = &t_string; } break; case gov_bool: case gov_hidden_bool: *type = &t_bool; break; case gov_number: case gov_hidden_number: *type = &t_number; break; } } else if (*name == '$') { *dest = dest_env; *type = &t_string; } else if (*name == '@') { if (name[1] != '@' && (!valid_yank_reg(name[1], FALSE) || name[1] == '.')) { emsg_invreg(name[1]); return FAIL; } *dest = dest_reg; *type = name[1] == '#' ? &t_number_or_string : &t_string; } else if (STRNCMP(name, "g:", 2) == 0) { *dest = dest_global; } else if (STRNCMP(name, "b:", 2) == 0) { *dest = dest_buffer; } else if (STRNCMP(name, "w:", 2) == 0) { *dest = dest_window; } else if (STRNCMP(name, "t:", 2) == 0) { *dest = dest_tab; } else if (STRNCMP(name, "v:", 2) == 0) { typval_T *vtv; int di_flags; *vimvaridx = find_vim_var(name + 2, &di_flags); if (*vimvaridx < 0) { semsg(_(e_variable_not_found_str), name); return FAIL; } // We use the current value of "sandbox" here, is that OK? if (var_check_ro(di_flags, name, FALSE)) return FAIL; *dest = dest_vimvar; vtv = get_vim_var_tv(*vimvaridx); *type = typval2type_vimvar(vtv, cctx->ctx_type_list); } return OK; } static int is_decl_command(cmdidx_T cmdidx) { return cmdidx == CMD_let || cmdidx == CMD_var || cmdidx == CMD_final || cmdidx == CMD_const; } /* * Figure out the LHS type and other properties for an assignment or one item * of ":unlet" with an index. * Returns OK or FAIL. */ int compile_lhs( char_u *var_start, lhs_T *lhs, cmdidx_T cmdidx, int heredoc, int has_cmd, // "var" before "var_start" int oplen, cctx_T *cctx) { char_u *var_end; int is_decl = is_decl_command(cmdidx); CLEAR_POINTER(lhs); lhs->lhs_dest = dest_local; lhs->lhs_vimvaridx = -1; lhs->lhs_scriptvar_idx = -1; // "dest_end" is the end of the destination, including "[expr]" or // ".name". // "var_end" is the end of the variable/option/etc. name. lhs->lhs_dest_end = skip_var_one(var_start, FALSE); if (*var_start == '@') var_end = var_start + 2; else { // skip over the leading "&", "&l:", "&g:" and "$" var_end = skip_option_env_lead(var_start); var_end = to_name_end(var_end, TRUE); } // "a: type" is declaring variable "a" with a type, not dict "a:". if (is_decl && lhs->lhs_dest_end == var_start + 2 && lhs->lhs_dest_end[-1] == ':') --lhs->lhs_dest_end; if (is_decl && var_end == var_start + 2 && var_end[-1] == ':') --var_end; lhs->lhs_end = lhs->lhs_dest_end; // compute the length of the destination without "[expr]" or ".name" lhs->lhs_varlen = var_end - var_start; lhs->lhs_varlen_total = lhs->lhs_varlen; lhs->lhs_name = vim_strnsave(var_start, lhs->lhs_varlen); if (lhs->lhs_name == NULL) return FAIL; if (lhs->lhs_dest_end > var_start + lhs->lhs_varlen) // Something follows after the variable: "var[idx]" or "var.key". lhs->lhs_has_index = TRUE; if (heredoc) lhs->lhs_type = &t_list_string; else lhs->lhs_type = &t_any; if (cctx->ctx_skip != SKIP_YES) { int declare_error = FALSE; if (get_var_dest(lhs->lhs_name, &lhs->lhs_dest, cmdidx, &lhs->lhs_opt_flags, &lhs->lhs_vimvaridx, &lhs->lhs_type, cctx) == FAIL) return FAIL; if (lhs->lhs_dest != dest_local && cmdidx != CMD_const && cmdidx != CMD_final) { // Specific kind of variable recognized. declare_error = is_decl; } else { // No specific kind of variable recognized, just a name. if (check_reserved_name(lhs->lhs_name) == FAIL) return FAIL; if (lookup_local(var_start, lhs->lhs_varlen, &lhs->lhs_local_lvar, cctx) == OK) lhs->lhs_lvar = &lhs->lhs_local_lvar; else { CLEAR_FIELD(lhs->lhs_arg_lvar); if (arg_exists(var_start, lhs->lhs_varlen, &lhs->lhs_arg_lvar.lv_idx, &lhs->lhs_arg_lvar.lv_type, &lhs->lhs_arg_lvar.lv_from_outer, cctx) == OK) { if (is_decl) { semsg(_(e_str_is_used_as_argument), lhs->lhs_name); return FAIL; } lhs->lhs_lvar = &lhs->lhs_arg_lvar; } } if (lhs->lhs_lvar != NULL) { if (is_decl) { semsg(_(e_variable_already_declared), lhs->lhs_name); return FAIL; } } else { int script_namespace = lhs->lhs_varlen > 1 && STRNCMP(var_start, "s:", 2) == 0; int script_var = (script_namespace ? script_var_exists(var_start + 2, lhs->lhs_varlen - 2, cctx, NULL) : script_var_exists(var_start, lhs->lhs_varlen, cctx, NULL)) == OK; imported_T *import = find_imported(var_start, lhs->lhs_varlen, FALSE); if (script_namespace || script_var || import != NULL) { char_u *rawname = lhs->lhs_name + (lhs->lhs_name[1] == ':' ? 2 : 0); if (script_namespace && current_script_is_vim9()) { semsg(_(e_cannot_use_s_colon_in_vim9_script_str), var_start); return FAIL; } if (is_decl) { if (script_namespace) semsg(_(e_cannot_declare_script_variable_in_function_str), lhs->lhs_name); else semsg(_(e_variable_already_declared_in_script_str), lhs->lhs_name); return FAIL; } else if (cctx->ctx_ufunc->uf_script_ctx_version == SCRIPT_VERSION_VIM9 && script_namespace && !script_var && import == NULL) { semsg(_(e_unknown_variable_str), lhs->lhs_name); return FAIL; } lhs->lhs_dest = dest_script; // existing script-local variables should have a type lhs->lhs_scriptvar_sid = current_sctx.sc_sid; if (import != NULL) { char_u *dot = vim_strchr(var_start, '.'); char_u *p; // for an import the name is what comes after the dot if (dot == NULL) { semsg(_(e_no_dot_after_imported_name_str), var_start); return FAIL; } p = skipwhite(dot + 1); var_end = to_name_end(p, TRUE); if (var_end == p) { semsg(_(e_missing_name_after_imported_name_str), var_start); return FAIL; } vim_free(lhs->lhs_name); lhs->lhs_varlen = var_end - p; lhs->lhs_name = vim_strnsave(p, lhs->lhs_varlen); if (lhs->lhs_name == NULL) return FAIL; rawname = lhs->lhs_name; lhs->lhs_scriptvar_sid = import->imp_sid; // TODO: where do we check this name is exported? // Check if something follows: "exp.var[idx]" or // "exp.var.key". lhs->lhs_has_index = lhs->lhs_dest_end > skipwhite(var_end); } if (SCRIPT_ID_VALID(lhs->lhs_scriptvar_sid)) { // Check writable only when no index follows. lhs->lhs_scriptvar_idx = get_script_item_idx( lhs->lhs_scriptvar_sid, rawname, lhs->lhs_has_index ? ASSIGN_FINAL : ASSIGN_CONST, cctx, NULL); if (lhs->lhs_scriptvar_idx >= 0) { scriptitem_T *si = SCRIPT_ITEM( lhs->lhs_scriptvar_sid); svar_T *sv = ((svar_T *)si->sn_var_vals.ga_data) + lhs->lhs_scriptvar_idx; lhs->lhs_type = sv->sv_type; } } } else if (check_defined(var_start, lhs->lhs_varlen, cctx, NULL, FALSE) == FAIL) return FAIL; } } if (declare_error) { vim9_declare_error(lhs->lhs_name); return FAIL; } } // handle "a:name" as a name, not index "name" in "a" if (lhs->lhs_varlen > 1 || var_start[lhs->lhs_varlen] != ':') var_end = lhs->lhs_dest_end; if (lhs->lhs_dest != dest_option && lhs->lhs_dest != dest_func_option) { if (is_decl && *var_end == ':') { char_u *p; // parse optional type: "let var: type = expr" if (!VIM_ISWHITE(var_end[1])) { semsg(_(e_white_space_required_after_str_str), ":", var_end); return FAIL; } p = skipwhite(var_end + 1); lhs->lhs_type = parse_type(&p, cctx->ctx_type_list, TRUE); if (lhs->lhs_type == NULL) return FAIL; lhs->lhs_has_type = TRUE; lhs->lhs_end = p; } else if (lhs->lhs_lvar != NULL) lhs->lhs_type = lhs->lhs_lvar->lv_type; } if (oplen == 3 && !heredoc && lhs->lhs_dest != dest_global && !lhs->lhs_has_index && lhs->lhs_type->tt_type != VAR_STRING && lhs->lhs_type->tt_type != VAR_ANY) { emsg(_(e_can_only_concatenate_to_string)); return FAIL; } if (lhs->lhs_lvar == NULL && lhs->lhs_dest == dest_local && cctx->ctx_skip != SKIP_YES) { if (oplen > 1 && !heredoc) { // +=, /=, etc. require an existing variable semsg(_(e_cannot_use_operator_on_new_variable), lhs->lhs_name); return FAIL; } if (!is_decl || (lhs->lhs_has_index && !has_cmd && cctx->ctx_skip != SKIP_YES)) { semsg(_(e_unknown_variable_str), lhs->lhs_name); return FAIL; } // Check the name is valid for a funcref. if ((lhs->lhs_type->tt_type == VAR_FUNC || lhs->lhs_type->tt_type == VAR_PARTIAL) && var_wrong_func_name(lhs->lhs_name, TRUE)) return FAIL; // New local variable. lhs->lhs_lvar = reserve_local(cctx, var_start, lhs->lhs_varlen, cmdidx == CMD_final || cmdidx == CMD_const, lhs->lhs_type); if (lhs->lhs_lvar == NULL) return FAIL; lhs->lhs_new_local = TRUE; } lhs->lhs_member_type = lhs->lhs_type; if (lhs->lhs_has_index) { char_u *after = var_start + lhs->lhs_varlen; char_u *p; // Something follows after the variable: "var[idx]" or "var.key". if (is_decl && cctx->ctx_skip != SKIP_YES) { if (has_cmd) emsg(_(e_cannot_use_index_when_declaring_variable)); else semsg(_(e_unknown_variable_str), lhs->lhs_name); return FAIL; } // Now: var_start[lhs->lhs_varlen] is '[' or '.' // Only the last index is used below, if there are others // before it generate code for the expression. Thus for // "ll[1][2]" the expression is "ll[1]" and "[2]" is the index. for (;;) { p = skip_index(after); if (*p != '[' && *p != '.') { lhs->lhs_varlen_total = p - var_start; break; } after = p; } if (after > var_start + lhs->lhs_varlen) { lhs->lhs_varlen = after - var_start; lhs->lhs_dest = dest_expr; // We don't know the type before evaluating the expression, // use "any" until then. lhs->lhs_type = &t_any; } if (lhs->lhs_type->tt_member == NULL) lhs->lhs_member_type = &t_any; else lhs->lhs_member_type = lhs->lhs_type->tt_member; } return OK; } /* * Figure out the LHS and check a few errors. */ int compile_assign_lhs( char_u *var_start, lhs_T *lhs, cmdidx_T cmdidx, int is_decl, int heredoc, int has_cmd, // "var" before "var_start" int oplen, cctx_T *cctx) { if (compile_lhs(var_start, lhs, cmdidx, heredoc, has_cmd, oplen, cctx) == FAIL) return FAIL; if (!lhs->lhs_has_index && lhs->lhs_lvar == &lhs->lhs_arg_lvar) { semsg(_(e_cannot_assign_to_argument), lhs->lhs_name); return FAIL; } if (!is_decl && lhs->lhs_lvar != NULL && lhs->lhs_lvar->lv_const && !lhs->lhs_has_index) { semsg(_(e_cannot_assign_to_constant), lhs->lhs_name); return FAIL; } return OK; } /* * Return TRUE if "lhs" has a range index: "[expr : expr]". */ static int has_list_index(char_u *idx_start, cctx_T *cctx) { char_u *p = idx_start; int save_skip; if (*p != '[') return FALSE; p = skipwhite(p + 1); if (*p == ':') return TRUE; save_skip = cctx->ctx_skip; cctx->ctx_skip = SKIP_YES; (void)compile_expr0(&p, cctx); cctx->ctx_skip = save_skip; return *skipwhite(p) == ':'; } /* * For an assignment with an index, compile the "idx" in "var[idx]" or "key" in * "var.key". */ static int compile_assign_index( char_u *var_start, lhs_T *lhs, int *range, cctx_T *cctx) { size_t varlen = lhs->lhs_varlen; char_u *p; int r = OK; int need_white_before = TRUE; int empty_second; p = var_start + varlen; if (*p == '[') { p = skipwhite(p + 1); if (*p == ':') { // empty first index, push zero r = generate_PUSHNR(cctx, 0); need_white_before = FALSE; } else r = compile_expr0(&p, cctx); if (r == OK && *skipwhite(p) == ':') { // unlet var[idx : idx] // blob[idx : idx] = value *range = TRUE; p = skipwhite(p); empty_second = *skipwhite(p + 1) == ']'; if ((need_white_before && !IS_WHITE_OR_NUL(p[-1])) || (!empty_second && !IS_WHITE_OR_NUL(p[1]))) { semsg(_(e_white_space_required_before_and_after_str_at_str), ":", p); return FAIL; } p = skipwhite(p + 1); if (*p == ']') // empty second index, push "none" r = generate_PUSHSPEC(cctx, VVAL_NONE); else r = compile_expr0(&p, cctx); } if (r == OK && *skipwhite(p) != ']') { // this should not happen emsg(_(e_missing_closing_square_brace)); r = FAIL; } } else // if (*p == '.') { char_u *key_end = to_name_end(p + 1, TRUE); char_u *key = vim_strnsave(p + 1, key_end - p - 1); r = generate_PUSHS(cctx, &key); } return r; } /* * For a LHS with an index, load the variable to be indexed. */ static int compile_load_lhs( lhs_T *lhs, char_u *var_start, type_T *rhs_type, cctx_T *cctx) { if (lhs->lhs_dest == dest_expr) { size_t varlen = lhs->lhs_varlen; int c = var_start[varlen]; int lines_len = cctx->ctx_ufunc->uf_lines.ga_len; char_u *p = var_start; int res; // Evaluate "ll[expr]" of "ll[expr][idx]". End the line with a NUL and // limit the lines array length to avoid skipping to a following line. var_start[varlen] = NUL; cctx->ctx_ufunc->uf_lines.ga_len = cctx->ctx_lnum + 1; res = compile_expr0(&p, cctx); var_start[varlen] = c; cctx->ctx_ufunc->uf_lines.ga_len = lines_len; if (res == FAIL || p != var_start + varlen) { // this should not happen if (res != FAIL) emsg(_(e_missing_closing_square_brace)); return FAIL; } lhs->lhs_type = cctx->ctx_type_stack.ga_len == 0 ? &t_void : get_type_on_stack(cctx, 0); // now we can properly check the type if (rhs_type != NULL && lhs->lhs_type->tt_member != NULL && rhs_type != &t_void && need_type(rhs_type, lhs->lhs_type->tt_member, -2, 0, cctx, FALSE, FALSE) == FAIL) return FAIL; } else generate_loadvar(cctx, lhs->lhs_dest, lhs->lhs_name, lhs->lhs_lvar, lhs->lhs_type); return OK; } /* * Produce code for loading "lhs" and also take care of an index. * Return OK/FAIL. */ int compile_load_lhs_with_index(lhs_T *lhs, char_u *var_start, cctx_T *cctx) { compile_load_lhs(lhs, var_start, NULL, cctx); if (lhs->lhs_has_index) { int range = FALSE; // Get member from list or dict. First compile the // index value. if (compile_assign_index(var_start, lhs, &range, cctx) == FAIL) return FAIL; if (range) { semsg(_(e_cannot_use_range_with_assignment_operator_str), var_start); return FAIL; } // Get the member. if (compile_member(FALSE, NULL, cctx) == FAIL) return FAIL; } return OK; } /* * Assignment to a list or dict member, or ":unlet" for the item, using the * information in "lhs". * Returns OK or FAIL. */ int compile_assign_unlet( char_u *var_start, lhs_T *lhs, int is_assign, type_T *rhs_type, cctx_T *cctx) { vartype_T dest_type; int range = FALSE; if (compile_assign_index(var_start, lhs, &range, cctx) == FAIL) return FAIL; if (is_assign && range && lhs->lhs_type->tt_type != VAR_LIST && lhs->lhs_type != &t_blob && lhs->lhs_type != &t_any) { semsg(_(e_cannot_use_range_with_assignment_str), var_start); return FAIL; } if (lhs->lhs_type == &t_any) { // Index on variable of unknown type: check at runtime. dest_type = VAR_ANY; } else { dest_type = lhs->lhs_type->tt_type; if (dest_type == VAR_DICT && range) { emsg(e_cannot_use_range_with_dictionary); return FAIL; } if (dest_type == VAR_DICT && may_generate_2STRING(-1, FALSE, cctx) == FAIL) return FAIL; if (dest_type == VAR_LIST || dest_type == VAR_BLOB) { type_T *type; if (range) { type = get_type_on_stack(cctx, 1); if (need_type(type, &t_number, -2, 0, cctx, FALSE, FALSE) == FAIL) return FAIL; } type = get_type_on_stack(cctx, 0); if ((dest_type != VAR_BLOB && type->tt_type != VAR_SPECIAL) && need_type(type, &t_number, -1, 0, cctx, FALSE, FALSE) == FAIL) return FAIL; } } if (cctx->ctx_skip == SKIP_YES) return OK; // Load the dict or list. On the stack we then have: // - value (for assignment, not for :unlet) // - index // - for [a : b] second index // - variable if (compile_load_lhs(lhs, var_start, rhs_type, cctx) == FAIL) return FAIL; if (dest_type == VAR_LIST || dest_type == VAR_DICT || dest_type == VAR_BLOB || dest_type == VAR_ANY) { if (is_assign) { if (range) { if (generate_instr_drop(cctx, ISN_STORERANGE, 4) == NULL) return FAIL; } else { isn_T *isn = generate_instr_drop(cctx, ISN_STOREINDEX, 3); if (isn == NULL) return FAIL; isn->isn_arg.vartype = dest_type; } } else if (range) { if (generate_instr_drop(cctx, ISN_UNLETRANGE, 3) == NULL) return FAIL; } else { if (generate_instr_drop(cctx, ISN_UNLETINDEX, 2) == NULL) return FAIL; } } else { emsg(_(e_indexable_type_required)); return FAIL; } return OK; } /* * Compile declaration and assignment: * "let name" * "var name = expr" * "final name = expr" * "const name = expr" * "name = expr" * "arg" points to "name". * "++arg" and "--arg" * Return NULL for an error. * Return "arg" if it does not look like a variable list. */ static char_u * compile_assignment(char_u *arg, exarg_T *eap, cmdidx_T cmdidx, cctx_T *cctx) { char_u *var_start; char_u *p; char_u *end = arg; char_u *ret = NULL; int var_count = 0; int var_idx; int semicolon = 0; int did_generate_slice = FALSE; garray_T *instr = &cctx->ctx_instr; char_u *op; int oplen = 0; int heredoc = FALSE; int incdec = FALSE; type_T *rhs_type = &t_any; char_u *sp; int is_decl = is_decl_command(cmdidx); lhs_T lhs; long start_lnum = SOURCING_LNUM; // Skip over the "varname" or "[varname, varname]" to get to any "=". p = skip_var_list(arg, TRUE, &var_count, &semicolon, TRUE); if (p == NULL) return *arg == '[' ? arg : NULL; lhs.lhs_name = NULL; if (eap->cmdidx == CMD_increment || eap->cmdidx == CMD_decrement) { if (VIM_ISWHITE(eap->cmd[2])) { semsg(_(e_no_white_space_allowed_after_str_str), eap->cmdidx == CMD_increment ? "++" : "--", eap->cmd); return NULL; } op = (char_u *)(eap->cmdidx == CMD_increment ? "+=" : "-="); oplen = 2; incdec = TRUE; } else { sp = p; p = skipwhite(p); op = p; oplen = assignment_len(p, &heredoc); if (var_count > 0 && oplen == 0) // can be something like "[1, 2]->func()" return arg; if (oplen > 0 && (!VIM_ISWHITE(*sp) || !IS_WHITE_OR_NUL(op[oplen]))) { error_white_both(op, oplen); return NULL; } } if (heredoc) { list_T *l; // [let] varname =<< [trim] {end} eap->getline = exarg_getline; eap->cookie = cctx; l = heredoc_get(eap, op + 3, FALSE, TRUE); if (l == NULL) return NULL; list_free(l); p += STRLEN(p); end = p; } else if (var_count > 0) { char_u *wp; // for "[var, var] = expr" evaluate the expression here, loop over the // list of variables below. // A line break may follow the "=". wp = op + oplen; if (may_get_next_line_error(wp, &p, cctx) == FAIL) return FAIL; if (compile_expr0(&p, cctx) == FAIL) return NULL; end = p; if (cctx->ctx_skip != SKIP_YES) { type_T *stacktype; int needed_list_len; int did_check = FALSE; stacktype = cctx->ctx_type_stack.ga_len == 0 ? &t_void : get_type_on_stack(cctx, 0); if (stacktype->tt_type == VAR_VOID) { emsg(_(e_cannot_use_void_value)); goto theend; } if (need_type(stacktype, &t_list_any, -1, 0, cctx, FALSE, FALSE) == FAIL) goto theend; // If a constant list was used we can check the length right here. needed_list_len = semicolon ? var_count - 1 : var_count; if (instr->ga_len > 0) { isn_T *isn = ((isn_T *)instr->ga_data) + instr->ga_len - 1; if (isn->isn_type == ISN_NEWLIST) { did_check = TRUE; if (semicolon ? isn->isn_arg.number < needed_list_len : isn->isn_arg.number != needed_list_len) { semsg(_(e_expected_nr_items_but_got_nr), needed_list_len, (int)isn->isn_arg.number); goto theend; } } } if (!did_check) generate_CHECKLEN(cctx, needed_list_len, semicolon); if (stacktype->tt_member != NULL) rhs_type = stacktype->tt_member; } } /* * Loop over variables in "[var, var] = expr". * For "var = expr" and "let var: type" this is done only once. */ if (var_count > 0) var_start = skipwhite(arg + 1); // skip over the "[" else var_start = arg; for (var_idx = 0; var_idx == 0 || var_idx < var_count; var_idx++) { int instr_count = -1; int save_lnum; int skip_store = FALSE; type_T *inferred_type = NULL; if (var_start[0] == '_' && !eval_isnamec(var_start[1])) { // Ignore underscore in "[a, _, b] = list". if (var_count > 0) { var_start = skipwhite(var_start + 2); continue; } emsg(_(e_cannot_use_underscore_here)); goto theend; } vim_free(lhs.lhs_name); /* * Figure out the LHS type and other properties. */ if (compile_assign_lhs(var_start, &lhs, cmdidx, is_decl, heredoc, var_start > eap->cmd, oplen, cctx) == FAIL) goto theend; if (heredoc) { SOURCING_LNUM = start_lnum; if (lhs.lhs_has_type && need_type(&t_list_string, lhs.lhs_type, -1, 0, cctx, FALSE, FALSE) == FAIL) goto theend; } else { if (cctx->ctx_skip == SKIP_YES) { if (oplen > 0 && var_count == 0) { // skip over the "=" and the expression p = skipwhite(op + oplen); (void)compile_expr0(&p, cctx); } } else if (oplen > 0) { int is_const = FALSE; char_u *wp; // for "+=", "*=", "..=" etc. first load the current value if (*op != '=' && compile_load_lhs_with_index(&lhs, var_start, cctx) == FAIL) goto theend; // For "var = expr" evaluate the expression. if (var_count == 0) { int r; // Compile the expression. instr_count = instr->ga_len; if (incdec) { r = generate_PUSHNR(cctx, 1); } else { // Temporarily hide the new local variable here, it is // not available to this expression. if (lhs.lhs_new_local) --cctx->ctx_locals.ga_len; wp = op + oplen; if (may_get_next_line_error(wp, &p, cctx) == FAIL) { if (lhs.lhs_new_local) ++cctx->ctx_locals.ga_len; goto theend; } r = compile_expr0_ext(&p, cctx, &is_const); if (lhs.lhs_new_local) ++cctx->ctx_locals.ga_len; if (r == FAIL) goto theend; } } else if (semicolon && var_idx == var_count - 1) { // For "[var; var] = expr" get the rest of the list did_generate_slice = TRUE; if (generate_SLICE(cctx, var_count - 1) == FAIL) goto theend; } else { // For "[var, var] = expr" get the "var_idx" item from the // list. if (generate_GETITEM(cctx, var_idx, *op != '=') == FAIL) goto theend; } rhs_type = cctx->ctx_type_stack.ga_len == 0 ? &t_void : get_type_on_stack(cctx, 0); if (lhs.lhs_lvar != NULL && (is_decl || !lhs.lhs_has_type)) { if ((rhs_type->tt_type == VAR_FUNC || rhs_type->tt_type == VAR_PARTIAL) && !lhs.lhs_has_index && var_wrong_func_name(lhs.lhs_name, TRUE)) goto theend; if (lhs.lhs_new_local && !lhs.lhs_has_type) { if (rhs_type->tt_type == VAR_VOID) { emsg(_(e_cannot_use_void_value)); goto theend; } else { // An empty list or dict has a &t_unknown member, // for a variable that implies &t_any. if (rhs_type == &t_list_empty) lhs.lhs_lvar->lv_type = &t_list_any; else if (rhs_type == &t_dict_empty) lhs.lhs_lvar->lv_type = &t_dict_any; else if (rhs_type == &t_unknown) lhs.lhs_lvar->lv_type = &t_any; else { lhs.lhs_lvar->lv_type = rhs_type; inferred_type = rhs_type; } } } else if (*op == '=') { type_T *use_type = lhs.lhs_lvar->lv_type; where_T where = WHERE_INIT; // Without operator check type here, otherwise below. // Use the line number of the assignment. SOURCING_LNUM = start_lnum; where.wt_index = var_count > 0 ? var_idx + 1 : 0; where.wt_variable = var_count > 0; // If assigning to a list or dict member, use the // member type. Not for "list[:] =". if (lhs.lhs_has_index && !has_list_index(var_start + lhs.lhs_varlen, cctx)) use_type = lhs.lhs_member_type; if (need_type_where(rhs_type, use_type, -1, where, cctx, FALSE, is_const) == FAIL) goto theend; } } else { type_T *lhs_type = lhs.lhs_member_type; // Special case: assigning to @# can use a number or a // string. // Also: can assign a number to a float. if ((lhs_type == &t_number_or_string || lhs_type == &t_float) && rhs_type->tt_type == VAR_NUMBER) lhs_type = &t_number; if (*p != '=' && need_type(rhs_type, lhs_type, -1, 0, cctx, FALSE, FALSE) == FAIL) goto theend; } } else if (cmdidx == CMD_final) { emsg(_(e_final_requires_a_value)); goto theend; } else if (cmdidx == CMD_const) { emsg(_(e_const_requires_a_value)); goto theend; } else if (!lhs.lhs_has_type || lhs.lhs_dest == dest_option || lhs.lhs_dest == dest_func_option) { emsg(_(e_type_or_initialization_required)); goto theend; } else { int r = OK; // variables are always initialized if (GA_GROW_FAILS(instr, 1)) goto theend; switch (lhs.lhs_member_type->tt_type) { case VAR_BOOL: r = generate_PUSHBOOL(cctx, VVAL_FALSE); break; case VAR_FLOAT: #ifdef FEAT_FLOAT r = generate_PUSHF(cctx, 0.0); #endif break; case VAR_STRING: r = generate_PUSHS(cctx, NULL); break; case VAR_BLOB: r = generate_PUSHBLOB(cctx, blob_alloc()); break; case VAR_FUNC: r = generate_PUSHFUNC(cctx, NULL, &t_func_void); break; case VAR_LIST: r = generate_NEWLIST(cctx, 0, FALSE); break; case VAR_DICT: r = generate_NEWDICT(cctx, 0, FALSE); break; case VAR_JOB: r = generate_PUSHJOB(cctx); break; case VAR_CHANNEL: r = generate_PUSHCHANNEL(cctx); break; case VAR_NUMBER: case VAR_UNKNOWN: case VAR_ANY: case VAR_PARTIAL: case VAR_VOID: case VAR_INSTR: case VAR_SPECIAL: // cannot happen // This is skipped for local variables, they are always // initialized to zero. But in a "for" or "while" loop // the value may have been changed. if (lhs.lhs_dest == dest_local && !inside_loop_scope(cctx)) skip_store = TRUE; else { instr_count = instr->ga_len; r = generate_PUSHNR(cctx, 0); } break; } if (r == FAIL) goto theend; } if (var_count == 0) end = p; } // no need to parse more when skipping if (cctx->ctx_skip == SKIP_YES) break; if (oplen > 0 && *op != '=') { type_T *expected; type_T *stacktype = NULL; if (*op == '.') { if (may_generate_2STRING(-1, FALSE, cctx) == FAIL) goto theend; } else { expected = lhs.lhs_member_type; stacktype = get_type_on_stack(cctx, 0); if ( #ifdef FEAT_FLOAT // If variable is float operation with number is OK. !(expected == &t_float && (stacktype == &t_number || stacktype == &t_number_bool)) && #endif need_type(stacktype, expected, -1, 0, cctx, FALSE, FALSE) == FAIL) goto theend; } if (*op == '.') { if (generate_CONCAT(cctx, 2) == FAIL) goto theend; } else if (*op == '+') { if (generate_add_instr(cctx, operator_type(lhs.lhs_member_type, stacktype), lhs.lhs_member_type, stacktype, EXPR_APPEND) == FAIL) goto theend; } else if (generate_two_op(cctx, op) == FAIL) goto theend; } // Use the line number of the assignment for store instruction. save_lnum = cctx->ctx_lnum; cctx->ctx_lnum = start_lnum - 1; if (lhs.lhs_has_index) { // Use the info in "lhs" to store the value at the index in the // list or dict. if (compile_assign_unlet(var_start, &lhs, TRUE, rhs_type, cctx) == FAIL) { cctx->ctx_lnum = save_lnum; goto theend; } } else { if (is_decl && cmdidx == CMD_const && (lhs.lhs_dest == dest_script || lhs.lhs_dest == dest_global || lhs.lhs_dest == dest_local)) // ":const var": lock the value, but not referenced variables generate_LOCKCONST(cctx); if ((lhs.lhs_type->tt_type == VAR_DICT || lhs.lhs_type->tt_type == VAR_LIST) && lhs.lhs_type->tt_member != NULL && lhs.lhs_type->tt_member != &t_any && lhs.lhs_type->tt_member != &t_unknown) // Set the type in the list or dict, so that it can be checked, // also in legacy script. generate_SETTYPE(cctx, lhs.lhs_type); else if (inferred_type != NULL && (inferred_type->tt_type == VAR_DICT || inferred_type->tt_type == VAR_LIST) && inferred_type->tt_member != NULL && inferred_type->tt_member != &t_unknown && inferred_type->tt_member != &t_any) // Set the type in the list or dict, so that it can be checked, // also in legacy script. generate_SETTYPE(cctx, inferred_type); if (!skip_store && generate_store_lhs(cctx, &lhs, instr_count, is_decl) == FAIL) { cctx->ctx_lnum = save_lnum; goto theend; } } cctx->ctx_lnum = save_lnum; if (var_idx + 1 < var_count) var_start = skipwhite(lhs.lhs_end + 1); } // For "[var, var] = expr" drop the "expr" value. // Also for "[var, var; _] = expr". if (var_count > 0 && (!semicolon || !did_generate_slice)) { if (generate_instr_drop(cctx, ISN_DROP, 1) == NULL) goto theend; } ret = skipwhite(end); theend: vim_free(lhs.lhs_name); return ret; } /* * Check for an assignment at "eap->cmd", compile it if found. * Return NOTDONE if there is none, FAIL for failure, OK if done. */ static int may_compile_assignment(exarg_T *eap, char_u **line, cctx_T *cctx) { char_u *pskip; char_u *p; // Assuming the command starts with a variable or function name, // find what follows. // Skip over "var.member", "var[idx]" and the like. // Also "&opt = val", "$ENV = val" and "@r = val". pskip = (*eap->cmd == '&' || *eap->cmd == '$' || *eap->cmd == '@') ? eap->cmd + 1 : eap->cmd; p = to_name_end(pskip, TRUE); if (p > eap->cmd && *p != NUL) { char_u *var_end; int oplen; int heredoc; if (eap->cmd[0] == '@') var_end = eap->cmd + 2; else var_end = find_name_end(pskip, NULL, NULL, FNE_CHECK_START | FNE_INCL_BR); oplen = assignment_len(skipwhite(var_end), &heredoc); if (oplen > 0) { size_t len = p - eap->cmd; // Recognize an assignment if we recognize the variable // name: // "&opt = expr" // "$ENV = expr" // "@r = expr" // "g:var = expr" // "g:[key] = expr" // "local = expr" where "local" is a local var. // "script = expr" where "script" is a script-local var. // "import = expr" where "import" is an imported var if (*eap->cmd == '&' || *eap->cmd == '$' || *eap->cmd == '@' || ((len) > 2 && eap->cmd[1] == ':') || STRNCMP(eap->cmd, "g:[", 3) == 0 || variable_exists(eap->cmd, len, cctx)) { *line = compile_assignment(eap->cmd, eap, CMD_SIZE, cctx); if (*line == NULL || *line == eap->cmd) return FAIL; return OK; } } } if (*eap->cmd == '[') { // might be "[var, var] = expr" *line = compile_assignment(eap->cmd, eap, CMD_SIZE, cctx); if (*line == NULL) return FAIL; if (*line != eap->cmd) return OK; } return NOTDONE; } /* * Check if arguments of "ufunc" shadow variables in "cctx". * Return OK or FAIL. */ static int check_args_shadowing(ufunc_T *ufunc, cctx_T *cctx) { int i; char_u *arg; int r = OK; // Make sure arguments are not found when compiling a second time. ufunc->uf_args_visible = 0; // Check for arguments shadowing variables from the context. for (i = 0; i < ufunc->uf_args.ga_len; ++i) { arg = ((char_u **)(ufunc->uf_args.ga_data))[i]; if (check_defined(arg, STRLEN(arg), cctx, NULL, TRUE) == FAIL) { r = FAIL; break; } } ufunc->uf_args_visible = ufunc->uf_args.ga_len; return r; } /* * Get the compilation type that should be used for "ufunc". * Keep in sync with INSTRUCTIONS(). */ compiletype_T get_compile_type(ufunc_T *ufunc) { // Update uf_has_breakpoint if needed. update_has_breakpoint(ufunc); if (debug_break_level > 0 || may_break_in_function(ufunc)) return CT_DEBUG; #ifdef FEAT_PROFILE if (do_profiling == PROF_YES) { if (!ufunc->uf_profiling && has_profiling(FALSE, ufunc->uf_name, NULL)) func_do_profile(ufunc); if (ufunc->uf_profiling) return CT_PROFILE; } #endif return CT_NONE; } /* * Add a function to the list of :def functions. * This sets "ufunc->uf_dfunc_idx" but the function isn't compiled yet. */ static int add_def_function(ufunc_T *ufunc) { dfunc_T *dfunc; if (def_functions.ga_len == 0) { // The first position is not used, so that a zero uf_dfunc_idx means it // wasn't set. if (GA_GROW_FAILS(&def_functions, 1)) return FAIL; ++def_functions.ga_len; } // Add the function to "def_functions". if (GA_GROW_FAILS(&def_functions, 1)) return FAIL; dfunc = ((dfunc_T *)def_functions.ga_data) + def_functions.ga_len; CLEAR_POINTER(dfunc); dfunc->df_idx = def_functions.ga_len; ufunc->uf_dfunc_idx = dfunc->df_idx; dfunc->df_ufunc = ufunc; dfunc->df_name = vim_strsave(ufunc->uf_name); ga_init2(&dfunc->df_var_names, sizeof(char_u *), 10); ++dfunc->df_refcount; ++def_functions.ga_len; return OK; } /* * After ex_function() has collected all the function lines: parse and compile * the lines into instructions. * Adds the function to "def_functions". * When "check_return_type" is set then set ufunc->uf_ret_type to the type of * the return statement (used for lambda). When uf_ret_type is already set * then check that it matches. * When "profiling" is true add ISN_PROF_START instructions. * "outer_cctx" is set for a nested function. * This can be used recursively through compile_lambda(), which may reallocate * "def_functions". * Returns OK or FAIL. */ int compile_def_function( ufunc_T *ufunc, int check_return_type, compiletype_T compile_type, cctx_T *outer_cctx) { char_u *line = NULL; garray_T lines_to_free; char_u *p; char *errormsg = NULL; // error message cctx_T cctx; garray_T *instr; int did_emsg_before = did_emsg; int did_emsg_silent_before = did_emsg_silent; int ret = FAIL; sctx_T save_current_sctx = current_sctx; int save_estack_compiling = estack_compiling; int save_cmod_flags = cmdmod.cmod_flags; int do_estack_push; int new_def_function = FALSE; #ifdef FEAT_PROFILE int prof_lnum = -1; #endif int debug_lnum = -1; // allocated lines are freed at the end ga_init2(&lines_to_free, sizeof(char_u *), 50); // When using a function that was compiled before: Free old instructions. // The index is reused. Otherwise add a new entry in "def_functions". if (ufunc->uf_dfunc_idx > 0) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; isn_T *instr_dest = NULL; switch (compile_type) { case CT_PROFILE: #ifdef FEAT_PROFILE instr_dest = dfunc->df_instr_prof; break; #endif case CT_NONE: instr_dest = dfunc->df_instr; break; case CT_DEBUG: instr_dest = dfunc->df_instr_debug; break; } if (instr_dest != NULL) // Was compiled in this mode before: Free old instructions. delete_def_function_contents(dfunc, FALSE); ga_clear_strings(&dfunc->df_var_names); } else { if (add_def_function(ufunc) == FAIL) return FAIL; new_def_function = TRUE; } if ((ufunc->uf_flags & FC_CLOSURE) && outer_cctx == NULL) { semsg(_(e_compiling_closure_without_context_str), printable_func_name(ufunc)); return FAIL; } ufunc->uf_def_status = UF_COMPILING; CLEAR_FIELD(cctx); cctx.ctx_compile_type = compile_type; cctx.ctx_ufunc = ufunc; cctx.ctx_lnum = -1; cctx.ctx_outer = outer_cctx; ga_init2(&cctx.ctx_locals, sizeof(lvar_T), 10); // Each entry on the type stack consists of two type pointers. ga_init2(&cctx.ctx_type_stack, sizeof(type2_T), 50); cctx.ctx_type_list = &ufunc->uf_type_list; ga_init2(&cctx.ctx_instr, sizeof(isn_T), 50); instr = &cctx.ctx_instr; // Set the context to the function, it may be compiled when called from // another script. Set the script version to the most modern one. // The line number will be set in next_line_from_context(). current_sctx = ufunc->uf_script_ctx; current_sctx.sc_version = SCRIPT_VERSION_VIM9; // Don't use the flag from ":legacy" here. cmdmod.cmod_flags &= ~CMOD_LEGACY; // Make sure error messages are OK. do_estack_push = !estack_top_is_ufunc(ufunc, 1); if (do_estack_push) estack_push_ufunc(ufunc, 1); estack_compiling = TRUE; if (check_args_shadowing(ufunc, &cctx) == FAIL) goto erret; if (ufunc->uf_def_args.ga_len > 0) { int count = ufunc->uf_def_args.ga_len; int first_def_arg = ufunc->uf_args.ga_len - count; int i; char_u *arg; int off = STACK_FRAME_SIZE + (ufunc->uf_va_name != NULL ? 1 : 0); int did_set_arg_type = FALSE; // Produce instructions for the default values of optional arguments. SOURCING_LNUM = 0; // line number unknown for (i = 0; i < count; ++i) { type_T *val_type; int arg_idx = first_def_arg + i; where_T where = WHERE_INIT; int r; int jump_instr_idx = instr->ga_len; isn_T *isn; // Use a JUMP_IF_ARG_SET instruction to skip if the value was given. if (generate_JUMP_IF_ARG_SET(&cctx, i - count - off) == FAIL) goto erret; // Make sure later arguments are not found. ufunc->uf_args_visible = arg_idx; arg = ((char_u **)(ufunc->uf_def_args.ga_data))[i]; r = compile_expr0(&arg, &cctx); if (r == FAIL) goto erret; // If no type specified use the type of the default value. // Otherwise check that the default value type matches the // specified type. val_type = get_type_on_stack(&cctx, 0); where.wt_index = arg_idx + 1; if (ufunc->uf_arg_types[arg_idx] == &t_unknown) { did_set_arg_type = TRUE; ufunc->uf_arg_types[arg_idx] = val_type; } else if (need_type_where(val_type, ufunc->uf_arg_types[arg_idx], -1, where, &cctx, FALSE, FALSE) == FAIL) goto erret; if (generate_STORE(&cctx, ISN_STORE, i - count - off, NULL) == FAIL) goto erret; // set instruction index in JUMP_IF_ARG_SET to here isn = ((isn_T *)instr->ga_data) + jump_instr_idx; isn->isn_arg.jumparg.jump_where = instr->ga_len; } if (did_set_arg_type) set_function_type(ufunc); } ufunc->uf_args_visible = ufunc->uf_args.ga_len; /* * Loop over all the lines of the function and generate instructions. */ for (;;) { exarg_T ea; int starts_with_colon = FALSE; char_u *cmd; cmdmod_T local_cmdmod; // Bail out on the first error to avoid a flood of errors and report // the right line number when inside try/catch. if (did_emsg_before != did_emsg) goto erret; if (line != NULL && *line == '|') // the line continues after a '|' ++line; else if (line != NULL && *skipwhite(line) != NUL && !(*line == '#' && (line == cctx.ctx_line_start || VIM_ISWHITE(line[-1])))) { semsg(_(e_trailing_characters_str), line); goto erret; } else if (line != NULL && vim9_bad_comment(skipwhite(line))) goto erret; else { line = next_line_from_context(&cctx, FALSE); if (cctx.ctx_lnum >= ufunc->uf_lines.ga_len) { // beyond the last line #ifdef FEAT_PROFILE if (cctx.ctx_skip != SKIP_YES) may_generate_prof_end(&cctx, prof_lnum); #endif break; } // Make a copy, splitting off nextcmd and removing trailing spaces // may change it. if (line != NULL) { line = vim_strsave(line); if (ga_add_string(&lines_to_free, line) == FAIL) goto erret; } } CLEAR_FIELD(ea); ea.cmdlinep = &line; ea.cmd = skipwhite(line); ea.skip = cctx.ctx_skip == SKIP_YES; if (*ea.cmd == '#') { // "#" starts a comment, but "#{" is an error if (vim9_bad_comment(ea.cmd)) goto erret; line = (char_u *)""; continue; } #ifdef FEAT_PROFILE if (cctx.ctx_compile_type == CT_PROFILE && cctx.ctx_lnum != prof_lnum && cctx.ctx_skip != SKIP_YES) { may_generate_prof_end(&cctx, prof_lnum); prof_lnum = cctx.ctx_lnum; generate_instr(&cctx, ISN_PROF_START); } #endif if (cctx.ctx_compile_type == CT_DEBUG && cctx.ctx_lnum != debug_lnum && cctx.ctx_skip != SKIP_YES) { debug_lnum = cctx.ctx_lnum; generate_instr_debug(&cctx); } cctx.ctx_prev_lnum = cctx.ctx_lnum + 1; // Some things can be recognized by the first character. switch (*ea.cmd) { case '}': { // "}" ends a block scope scopetype_T stype = cctx.ctx_scope == NULL ? NO_SCOPE : cctx.ctx_scope->se_type; if (stype == BLOCK_SCOPE) { compile_endblock(&cctx); line = ea.cmd; } else { emsg(_(e_using_rcurly_outside_if_block_scope)); goto erret; } if (line != NULL) line = skipwhite(ea.cmd + 1); continue; } case '{': // "{" starts a block scope // "{'a': 1}->func() is something else if (ends_excmd(*skipwhite(ea.cmd + 1))) { line = compile_block(ea.cmd, &cctx); continue; } break; } /* * COMMAND MODIFIERS */ cctx.ctx_has_cmdmod = FALSE; if (parse_command_modifiers(&ea, &errormsg, &local_cmdmod, FALSE) == FAIL) goto erret; generate_cmdmods(&cctx, &local_cmdmod); undo_cmdmod(&local_cmdmod); // Check if there was a colon after the last command modifier or before // the current position. for (p = ea.cmd; p >= line; --p) { if (*p == ':') starts_with_colon = TRUE; if (p < ea.cmd && !VIM_ISWHITE(*p)) break; } // Skip ":call" to get to the function name, unless using :legacy p = ea.cmd; if (!(local_cmdmod.cmod_flags & CMOD_LEGACY)) { if (checkforcmd(&ea.cmd, "call", 3)) { if (*ea.cmd == '(') // not for "call()" ea.cmd = p; else ea.cmd = skipwhite(ea.cmd); } if (!starts_with_colon) { int assign; // Check for assignment after command modifiers. assign = may_compile_assignment(&ea, &line, &cctx); if (assign == OK) goto nextline; if (assign == FAIL) goto erret; } } /* * COMMAND after range * 'text'->func() should not be confused with 'a mark * 0z1234->func() should not be confused with a zero line number * "++nr" and "--nr" are eval commands * in "$ENV->func()" the "$" is not a range * "123->func()" is a method call */ cmd = ea.cmd; if ((*cmd != '$' || starts_with_colon) && (starts_with_colon || !(*cmd == '\'' || (cmd[0] == '0' && cmd[1] == 'z') || (cmd[0] != NUL && cmd[0] == cmd[1] && (*cmd == '+' || *cmd == '-')) || number_method(cmd)))) { ea.cmd = skip_range(ea.cmd, TRUE, NULL); if (ea.cmd > cmd) { if (!starts_with_colon && !(local_cmdmod.cmod_flags & CMOD_LEGACY)) { semsg(_(e_colon_required_before_range_str), cmd); goto erret; } ea.addr_count = 1; if (ends_excmd2(line, ea.cmd)) { // A range without a command: jump to the line. generate_EXEC(&cctx, ISN_EXECRANGE, vim_strnsave(cmd, ea.cmd - cmd)); line = ea.cmd; goto nextline; } } } p = find_ex_command(&ea, NULL, starts_with_colon || (local_cmdmod.cmod_flags & CMOD_LEGACY) ? NULL : item_exists, &cctx); if (p == NULL) { if (cctx.ctx_skip != SKIP_YES) semsg(_(e_ambiguous_use_of_user_defined_command_str), ea.cmd); goto erret; } // When using ":legacy cmd" always use compile_exec(). if (local_cmdmod.cmod_flags & CMOD_LEGACY) { char_u *start = ea.cmd; switch (ea.cmdidx) { case CMD_if: case CMD_elseif: case CMD_else: case CMD_endif: case CMD_for: case CMD_endfor: case CMD_continue: case CMD_break: case CMD_while: case CMD_endwhile: case CMD_try: case CMD_catch: case CMD_finally: case CMD_endtry: semsg(_(e_cannot_use_legacy_with_command_str), ea.cmd); goto erret; default: break; } // ":legacy return expr" needs to be handled differently. if (checkforcmd(&start, "return", 4)) ea.cmdidx = CMD_return; else ea.cmdidx = CMD_legacy; } if (p == ea.cmd && ea.cmdidx != CMD_SIZE) { // "eval" is used for "val->func()" and "var" for "var = val", then // "p" is equal to "ea.cmd" for a valid command. if (ea.cmdidx == CMD_eval || ea.cmdidx == CMD_var) ; else if (cctx.ctx_skip == SKIP_YES) { line += STRLEN(line); goto nextline; } else { semsg(_(e_command_not_recognized_str), ea.cmd); goto erret; } } if (cctx.ctx_had_return && ea.cmdidx != CMD_elseif && ea.cmdidx != CMD_else && ea.cmdidx != CMD_endif && ea.cmdidx != CMD_endfor && ea.cmdidx != CMD_endwhile && ea.cmdidx != CMD_catch && ea.cmdidx != CMD_finally && ea.cmdidx != CMD_endtry) { emsg(_(e_unreachable_code_after_return)); goto erret; } p = skipwhite(p); if (ea.cmdidx != CMD_SIZE && ea.cmdidx != CMD_write && ea.cmdidx != CMD_read) { if (ea.cmdidx >= 0) ea.argt = excmd_get_argt(ea.cmdidx); if ((ea.argt & EX_BANG) && *p == '!') { ea.forceit = TRUE; p = skipwhite(p + 1); } if ((ea.argt & EX_RANGE) == 0 && ea.addr_count > 0) { emsg(_(e_no_range_allowed)); goto erret; } } switch (ea.cmdidx) { case CMD_def: case CMD_function: ea.arg = p; line = compile_nested_function(&ea, &cctx, &lines_to_free); break; case CMD_return: line = compile_return(p, check_return_type, local_cmdmod.cmod_flags & CMOD_LEGACY, &cctx); cctx.ctx_had_return = TRUE; break; case CMD_let: emsg(_(e_cannot_use_let_in_vim9_script)); break; case CMD_var: case CMD_final: case CMD_const: case CMD_increment: case CMD_decrement: line = compile_assignment(p, &ea, ea.cmdidx, &cctx); if (line == p) { emsg(_(e_invalid_assignment)); line = NULL; } break; case CMD_unlet: case CMD_unlockvar: case CMD_lockvar: line = compile_unletlock(p, &ea, &cctx); break; case CMD_import: emsg(_(e_import_can_only_be_used_in_script)); line = NULL; break; case CMD_if: line = compile_if(p, &cctx); break; case CMD_elseif: line = compile_elseif(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_else: line = compile_else(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_endif: line = compile_endif(p, &cctx); break; case CMD_while: line = compile_while(p, &cctx); break; case CMD_endwhile: line = compile_endwhile(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_for: line = compile_for(p, &cctx); break; case CMD_endfor: line = compile_endfor(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_continue: line = compile_continue(p, &cctx); break; case CMD_break: line = compile_break(p, &cctx); break; case CMD_try: line = compile_try(p, &cctx); break; case CMD_catch: line = compile_catch(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_finally: line = compile_finally(p, &cctx); cctx.ctx_had_return = FALSE; break; case CMD_endtry: line = compile_endtry(p, &cctx); break; case CMD_throw: line = compile_throw(p, &cctx); break; case CMD_eval: line = compile_eval(p, &cctx); break; case CMD_echo: case CMD_echon: case CMD_execute: case CMD_echomsg: case CMD_echoerr: case CMD_echoconsole: line = compile_mult_expr(p, ea.cmdidx, &cctx); break; case CMD_put: ea.cmd = cmd; line = compile_put(p, &ea, &cctx); break; case CMD_substitute: if (check_global_and_subst(ea.cmd, p) == FAIL) goto erret; if (cctx.ctx_skip == SKIP_YES) line = (char_u *)""; else { ea.arg = p; line = compile_substitute(line, &ea, &cctx); } break; case CMD_redir: ea.arg = p; line = compile_redir(line, &ea, &cctx); break; case CMD_cexpr: case CMD_lexpr: case CMD_caddexpr: case CMD_laddexpr: case CMD_cgetexpr: case CMD_lgetexpr: #ifdef FEAT_QUICKFIX ea.arg = p; line = compile_cexpr(line, &ea, &cctx); #else ex_ni(&ea); line = NULL; #endif break; case CMD_append: case CMD_change: case CMD_insert: case CMD_k: case CMD_t: case CMD_xit: not_in_vim9(&ea); goto erret; case CMD_SIZE: if (cctx.ctx_skip != SKIP_YES) { semsg(_(e_invalid_command_str), ea.cmd); goto erret; } // We don't check for a next command here. line = (char_u *)""; break; case CMD_lua: case CMD_mzscheme: case CMD_perl: case CMD_py3: case CMD_python3: case CMD_python: case CMD_pythonx: case CMD_ruby: case CMD_tcl: ea.arg = p; if (vim_strchr(line, '\n') == NULL) line = compile_exec(line, &ea, &cctx); else // heredoc lines have been concatenated with NL // characters in get_function_body() line = compile_script(line, &cctx); break; case CMD_vim9script: if (cctx.ctx_skip != SKIP_YES) { emsg(_(e_vim9script_can_only_be_used_in_script)); goto erret; } line = (char_u *)""; break; case CMD_global: if (check_global_and_subst(ea.cmd, p) == FAIL) goto erret; // FALLTHROUGH default: // Not recognized, execute with do_cmdline_cmd(). ea.arg = p; line = compile_exec(line, &ea, &cctx); break; } nextline: if (line == NULL) goto erret; line = skipwhite(line); // Undo any command modifiers. generate_undo_cmdmods(&cctx); if (cctx.ctx_type_stack.ga_len < 0) { iemsg("Type stack underflow"); goto erret; } } if (cctx.ctx_scope != NULL) { if (cctx.ctx_scope->se_type == IF_SCOPE) emsg(_(e_missing_endif)); else if (cctx.ctx_scope->se_type == WHILE_SCOPE) emsg(_(e_missing_endwhile)); else if (cctx.ctx_scope->se_type == FOR_SCOPE) emsg(_(e_missing_endfor)); else emsg(_(e_missing_rcurly)); goto erret; } if (!cctx.ctx_had_return) { if (ufunc->uf_ret_type->tt_type == VAR_UNKNOWN) ufunc->uf_ret_type = &t_void; else if (ufunc->uf_ret_type->tt_type != VAR_VOID) { emsg(_(e_missing_return_statement)); goto erret; } // Return void if there is no return at the end. generate_instr(&cctx, ISN_RETURN_VOID); } // When compiled with ":silent!" and there was an error don't consider the // function compiled. if (emsg_silent == 0 || did_emsg_silent == did_emsg_silent_before) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; dfunc->df_deleted = FALSE; dfunc->df_script_seq = current_sctx.sc_seq; #ifdef FEAT_PROFILE if (cctx.ctx_compile_type == CT_PROFILE) { dfunc->df_instr_prof = instr->ga_data; dfunc->df_instr_prof_count = instr->ga_len; } else #endif if (cctx.ctx_compile_type == CT_DEBUG) { dfunc->df_instr_debug = instr->ga_data; dfunc->df_instr_debug_count = instr->ga_len; } else { dfunc->df_instr = instr->ga_data; dfunc->df_instr_count = instr->ga_len; } dfunc->df_varcount = dfunc->df_var_names.ga_len; dfunc->df_has_closure = cctx.ctx_has_closure; if (cctx.ctx_outer_used) ufunc->uf_flags |= FC_CLOSURE; ufunc->uf_def_status = UF_COMPILED; } ret = OK; erret: if (ufunc->uf_def_status == UF_COMPILING) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; // Compiling aborted, free the generated instructions. clear_instr_ga(instr); VIM_CLEAR(dfunc->df_name); ga_clear_strings(&dfunc->df_var_names); // If using the last entry in the table and it was added above, we // might as well remove it. if (!dfunc->df_deleted && new_def_function && ufunc->uf_dfunc_idx == def_functions.ga_len - 1) { --def_functions.ga_len; ufunc->uf_dfunc_idx = 0; } ufunc->uf_def_status = UF_COMPILE_ERROR; while (cctx.ctx_scope != NULL) drop_scope(&cctx); if (errormsg != NULL) emsg(errormsg); else if (did_emsg == did_emsg_before) emsg(_(e_compiling_def_function_failed)); } if (cctx.ctx_redir_lhs.lhs_name != NULL) { if (ret == OK) { emsg(_(e_missing_redir_end)); ret = FAIL; } vim_free(cctx.ctx_redir_lhs.lhs_name); vim_free(cctx.ctx_redir_lhs.lhs_whole); } current_sctx = save_current_sctx; estack_compiling = save_estack_compiling; cmdmod.cmod_flags = save_cmod_flags; if (do_estack_push) estack_pop(); ga_clear_strings(&lines_to_free); free_locals(&cctx); ga_clear(&cctx.ctx_type_stack); return ret; } void set_function_type(ufunc_T *ufunc) { int varargs = ufunc->uf_va_name != NULL; int argcount = ufunc->uf_args.ga_len; // Create a type for the function, with the return type and any // argument types. // A vararg is included in uf_args.ga_len but not in uf_arg_types. // The type is included in "tt_args". if (argcount > 0 || varargs) { if (ufunc->uf_type_list.ga_itemsize == 0) ga_init2(&ufunc->uf_type_list, sizeof(type_T *), 10); ufunc->uf_func_type = alloc_func_type(ufunc->uf_ret_type, argcount, &ufunc->uf_type_list); // Add argument types to the function type. if (func_type_add_arg_types(ufunc->uf_func_type, argcount + varargs, &ufunc->uf_type_list) == FAIL) return; ufunc->uf_func_type->tt_argcount = argcount + varargs; ufunc->uf_func_type->tt_min_argcount = argcount - ufunc->uf_def_args.ga_len; if (ufunc->uf_arg_types == NULL) { int i; // lambda does not have argument types. for (i = 0; i < argcount; ++i) ufunc->uf_func_type->tt_args[i] = &t_any; } else mch_memmove(ufunc->uf_func_type->tt_args, ufunc->uf_arg_types, sizeof(type_T *) * argcount); if (varargs) { ufunc->uf_func_type->tt_args[argcount] = ufunc->uf_va_type == NULL ? &t_list_any : ufunc->uf_va_type; ufunc->uf_func_type->tt_flags = TTFLAG_VARARGS; } } else // No arguments, can use a predefined type. ufunc->uf_func_type = get_func_type(ufunc->uf_ret_type, argcount, &ufunc->uf_type_list); } /* * Free all instructions for "dfunc" except df_name. */ static void delete_def_function_contents(dfunc_T *dfunc, int mark_deleted) { int idx; ga_clear(&dfunc->df_def_args_isn); ga_clear_strings(&dfunc->df_var_names); if (dfunc->df_instr != NULL) { for (idx = 0; idx < dfunc->df_instr_count; ++idx) delete_instr(dfunc->df_instr + idx); VIM_CLEAR(dfunc->df_instr); dfunc->df_instr = NULL; } if (dfunc->df_instr_debug != NULL) { for (idx = 0; idx < dfunc->df_instr_debug_count; ++idx) delete_instr(dfunc->df_instr_debug + idx); VIM_CLEAR(dfunc->df_instr_debug); dfunc->df_instr_debug = NULL; } #ifdef FEAT_PROFILE if (dfunc->df_instr_prof != NULL) { for (idx = 0; idx < dfunc->df_instr_prof_count; ++idx) delete_instr(dfunc->df_instr_prof + idx); VIM_CLEAR(dfunc->df_instr_prof); dfunc->df_instr_prof = NULL; } #endif if (mark_deleted) dfunc->df_deleted = TRUE; if (dfunc->df_ufunc != NULL) dfunc->df_ufunc->uf_def_status = UF_NOT_COMPILED; } /* * When a user function is deleted, clear the contents of any associated def * function, unless another user function still uses it. * The position in def_functions can be re-used. */ void unlink_def_function(ufunc_T *ufunc) { if (ufunc->uf_dfunc_idx > 0) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; if (--dfunc->df_refcount <= 0) delete_def_function_contents(dfunc, TRUE); ufunc->uf_def_status = UF_NOT_COMPILED; ufunc->uf_dfunc_idx = 0; if (dfunc->df_ufunc == ufunc) dfunc->df_ufunc = NULL; } } /* * Used when a user function refers to an existing dfunc. */ void link_def_function(ufunc_T *ufunc) { if (ufunc->uf_dfunc_idx > 0) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + ufunc->uf_dfunc_idx; ++dfunc->df_refcount; } } #if defined(EXITFREE) || defined(PROTO) /* * Free all functions defined with ":def". */ void free_def_functions(void) { int idx; for (idx = 0; idx < def_functions.ga_len; ++idx) { dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data) + idx; delete_def_function_contents(dfunc, TRUE); vim_free(dfunc->df_name); } ga_clear(&def_functions); } #endif #endif // FEAT_EVAL
generate_loadvar( cctx_T *cctx, assign_dest_T dest, char_u *name, lvar_T *lvar, type_T *type) { switch (dest) { case dest_option: case dest_func_option: generate_LOAD(cctx, ISN_LOADOPT, 0, name, type); break; case dest_global: if (vim_strchr(name, AUTOLOAD_CHAR) == NULL) { if (name[2] == NUL) generate_instr_type(cctx, ISN_LOADGDICT, &t_dict_any); else generate_LOAD(cctx, ISN_LOADG, 0, name + 2, type); } else generate_LOAD(cctx, ISN_LOADAUTO, 0, name, type); break; case dest_buffer: generate_LOAD(cctx, ISN_LOADB, 0, name + 2, type); break; case dest_window: generate_LOAD(cctx, ISN_LOADW, 0, name + 2, type); break; case dest_tab: generate_LOAD(cctx, ISN_LOADT, 0, name + 2, type); break; case dest_script: compile_load_scriptvar(cctx, name + (name[1] == ':' ? 2 : 0), NULL, NULL); break; case dest_env: // Include $ in the name here generate_LOAD(cctx, ISN_LOADENV, 0, name, type); break; case dest_reg: generate_LOAD(cctx, ISN_LOADREG, name[1], NULL, &t_string); break; case dest_vimvar: generate_LOADV(cctx, name + 2); break; case dest_local: if (lvar->lv_from_outer > 0) generate_LOADOUTER(cctx, lvar->lv_idx, lvar->lv_from_outer, type); else generate_LOAD(cctx, ISN_LOAD, lvar->lv_idx, NULL, type); break; case dest_expr: // list or dict value should already be on the stack. break; } }
generate_loadvar( cctx_T *cctx, assign_dest_T dest, char_u *name, lvar_T *lvar, type_T *type) { switch (dest) { case dest_option: case dest_func_option: generate_LOAD(cctx, ISN_LOADOPT, 0, name, type); break; case dest_global: if (vim_strchr(name, AUTOLOAD_CHAR) == NULL) { if (name[2] == NUL) generate_instr_type(cctx, ISN_LOADGDICT, &t_dict_any); else generate_LOAD(cctx, ISN_LOADG, 0, name + 2, type); } else generate_LOAD(cctx, ISN_LOADAUTO, 0, name, type); break; case dest_buffer: generate_LOAD(cctx, ISN_LOADB, 0, name + 2, type); break; case dest_window: generate_LOAD(cctx, ISN_LOADW, 0, name + 2, type); break; case dest_tab: generate_LOAD(cctx, ISN_LOADT, 0, name + 2, type); break; case dest_script: compile_load_scriptvar(cctx, name + (name[1] == ':' ? 2 : 0), NULL, NULL); break; case dest_env: // Include $ in the name here generate_LOAD(cctx, ISN_LOADENV, 0, name, type); break; case dest_reg: generate_LOAD(cctx, ISN_LOADREG, name[1], NULL, &t_string); break; case dest_vimvar: generate_LOADV(cctx, name + 2); break; case dest_local: if (cctx->ctx_skip != SKIP_YES) { if (lvar->lv_from_outer > 0) generate_LOADOUTER(cctx, lvar->lv_idx, lvar->lv_from_outer, type); else generate_LOAD(cctx, ISN_LOAD, lvar->lv_idx, NULL, type); } break; case dest_expr: // list or dict value should already be on the stack. break; } }
{'added': [(1168, '\t if (cctx->ctx_skip != SKIP_YES)'), (1169, '\t {'), (1170, '\t\tif (lvar->lv_from_outer > 0)'), (1171, '\t\t generate_LOADOUTER(cctx, lvar->lv_idx, lvar->lv_from_outer,'), (1173, '\t\telse'), (1174, '\t\t generate_LOAD(cctx, ISN_LOAD, lvar->lv_idx, NULL, type);'), (1175, '\t }'), (1958, ' if (cctx->ctx_skip == SKIP_YES)'), (1959, '\treturn OK;'), (1960, '')], 'deleted': [(1168, '\t if (lvar->lv_from_outer > 0)'), (1169, '\t\tgenerate_LOADOUTER(cctx, lvar->lv_idx, lvar->lv_from_outer,'), (1171, '\t else'), (1172, '\t\tgenerate_LOAD(cctx, ISN_LOAD, lvar->lv_idx, NULL, type);')]}
10
4
2,761
15,117
https://github.com/vim/vim
CVE-2022-2874
['CWE-476']
upnpsoap.c
GetOutboundPinholeTimeout
/* $Id: upnpsoap.c,v 1.151 2018/03/13 10:32:53 nanard Exp $ */ /* vim: tabstop=4 shiftwidth=4 noexpandtab * MiniUPnP project * http://miniupnp.free.fr/ or https://miniupnp.tuxfamily.org/ * (c) 2006-2018 Thomas Bernard * This software is subject to the conditions detailed * in the LICENCE file provided within the distribution */ #include <stdio.h> #include <stdlib.h> #include <limits.h> #include <string.h> #include <errno.h> #include <sys/socket.h> #include <unistd.h> #include <syslog.h> #include <sys/types.h> #include <netinet/in.h> #include <arpa/inet.h> #include <netdb.h> #include <ctype.h> #include "macros.h" #include "config.h" #include "upnpglobalvars.h" #include "upnphttp.h" #include "upnpsoap.h" #include "upnpreplyparse.h" #include "upnpredirect.h" #include "upnppinhole.h" #include "getifaddr.h" #include "getifstats.h" #include "getconnstatus.h" #include "upnpurns.h" #include "upnputils.h" /* utility function */ static int is_numeric(const char * s) { while(*s) { if(*s < '0' || *s > '9') return 0; s++; } return 1; } static void BuildSendAndCloseSoapResp(struct upnphttp * h, const char * body, int bodylen) { static const char beforebody[] = "<?xml version=\"1.0\"?>\r\n" "<s:Envelope xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\" " "s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\">" "<s:Body>"; static const char afterbody[] = "</s:Body>" "</s:Envelope>\r\n"; int r = BuildHeader_upnphttp(h, 200, "OK", sizeof(beforebody) - 1 + sizeof(afterbody) - 1 + bodylen ); if(r >= 0) { memcpy(h->res_buf + h->res_buflen, beforebody, sizeof(beforebody) - 1); h->res_buflen += sizeof(beforebody) - 1; memcpy(h->res_buf + h->res_buflen, body, bodylen); h->res_buflen += bodylen; memcpy(h->res_buf + h->res_buflen, afterbody, sizeof(afterbody) - 1); h->res_buflen += sizeof(afterbody) - 1; } else { BuildResp2_upnphttp(h, 500, "Internal Server Error", NULL, 0); } SendRespAndClose_upnphttp(h); } static void GetConnectionTypeInfo(struct upnphttp * h, const char * action, const char * ns) { #if 0 static const char resp[] = "<u:GetConnectionTypeInfoResponse " "xmlns:u=\"" SERVICE_TYPE_WANIPC "\">" "<NewConnectionType>IP_Routed</NewConnectionType>" "<NewPossibleConnectionTypes>IP_Routed</NewPossibleConnectionTypes>" "</u:GetConnectionTypeInfoResponse>"; #endif static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<NewConnectionType>IP_Routed</NewConnectionType>" "<NewPossibleConnectionTypes>IP_Routed</NewPossibleConnectionTypes>" "</u:%sResponse>"; char body[512]; int bodylen; bodylen = snprintf(body, sizeof(body), resp, action, ns, action); BuildSendAndCloseSoapResp(h, body, bodylen); } /* maximum value for a UPNP ui4 type variable */ #define UPNP_UI4_MAX (4294967295ul) static void GetTotalBytesSent(struct upnphttp * h, const char * action, const char * ns) { int r; static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<NewTotalBytesSent>%lu</NewTotalBytesSent>" "</u:%sResponse>"; char body[512]; int bodylen; struct ifdata data; r = getifstats(ext_if_name, &data); bodylen = snprintf(body, sizeof(body), resp, action, ns, /* was "urn:schemas-upnp-org:service:WANCommonInterfaceConfig:1" */ #ifdef UPNP_STRICT r<0?0:(data.obytes & UPNP_UI4_MAX), action); #else /* UPNP_STRICT */ r<0?0:data.obytes, action); #endif /* UPNP_STRICT */ BuildSendAndCloseSoapResp(h, body, bodylen); } static void GetTotalBytesReceived(struct upnphttp * h, const char * action, const char * ns) { int r; static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<NewTotalBytesReceived>%lu</NewTotalBytesReceived>" "</u:%sResponse>"; char body[512]; int bodylen; struct ifdata data; r = getifstats(ext_if_name, &data); /* TotalBytesReceived * This variable represents the cumulative counter for total number of * bytes received downstream across all connection service instances on * WANDevice. The count rolls over to 0 after it reaching the maximum * value (2^32)-1. */ bodylen = snprintf(body, sizeof(body), resp, action, ns, /* was "urn:schemas-upnp-org:service:WANCommonInterfaceConfig:1" */ #ifdef UPNP_STRICT r<0?0:(data.ibytes & UPNP_UI4_MAX), action); #else /* UPNP_STRICT */ r<0?0:data.ibytes, action); #endif /* UPNP_STRICT */ BuildSendAndCloseSoapResp(h, body, bodylen); } static void GetTotalPacketsSent(struct upnphttp * h, const char * action, const char * ns) { int r; static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<NewTotalPacketsSent>%lu</NewTotalPacketsSent>" "</u:%sResponse>"; char body[512]; int bodylen; struct ifdata data; r = getifstats(ext_if_name, &data); bodylen = snprintf(body, sizeof(body), resp, action, ns,/*"urn:schemas-upnp-org:service:WANCommonInterfaceConfig:1",*/ #ifdef UPNP_STRICT r<0?0:(data.opackets & UPNP_UI4_MAX), action); #else /* UPNP_STRICT */ r<0?0:data.opackets, action); #endif /* UPNP_STRICT */ BuildSendAndCloseSoapResp(h, body, bodylen); } static void GetTotalPacketsReceived(struct upnphttp * h, const char * action, const char * ns) { int r; static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<NewTotalPacketsReceived>%lu</NewTotalPacketsReceived>" "</u:%sResponse>"; char body[512]; int bodylen; struct ifdata data; r = getifstats(ext_if_name, &data); bodylen = snprintf(body, sizeof(body), resp, action, ns, /* was "urn:schemas-upnp-org:service:WANCommonInterfaceConfig:1" */ #ifdef UPNP_STRICT r<0?0:(data.ipackets & UPNP_UI4_MAX), action); #else /* UPNP_STRICT */ r<0?0:data.ipackets, action); #endif /* UPNP_STRICT */ BuildSendAndCloseSoapResp(h, body, bodylen); } static void GetCommonLinkProperties(struct upnphttp * h, const char * action, const char * ns) { /* WANAccessType : set depending on the hardware : * DSL, POTS (plain old Telephone service), Cable, Ethernet */ static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<NewWANAccessType>%s</NewWANAccessType>" "<NewLayer1UpstreamMaxBitRate>%lu</NewLayer1UpstreamMaxBitRate>" "<NewLayer1DownstreamMaxBitRate>%lu</NewLayer1DownstreamMaxBitRate>" "<NewPhysicalLinkStatus>%s</NewPhysicalLinkStatus>" "</u:%sResponse>"; char body[2048]; int bodylen; struct ifdata data; const char * status = "Up"; /* Up, Down (Required), * Initializing, Unavailable (Optional) */ const char * wan_access_type = "Cable"; /* DSL, POTS, Cable, Ethernet */ char ext_ip_addr[INET_ADDRSTRLEN]; if((downstream_bitrate == 0) || (upstream_bitrate == 0)) { if(getifstats(ext_if_name, &data) >= 0) { if(downstream_bitrate == 0) downstream_bitrate = data.baudrate; if(upstream_bitrate == 0) upstream_bitrate = data.baudrate; } } if(getifaddr(ext_if_name, ext_ip_addr, INET_ADDRSTRLEN, NULL, NULL) < 0) { status = "Down"; } bodylen = snprintf(body, sizeof(body), resp, action, ns, /* was "urn:schemas-upnp-org:service:WANCommonInterfaceConfig:1" */ wan_access_type, upstream_bitrate, downstream_bitrate, status, action); BuildSendAndCloseSoapResp(h, body, bodylen); } static void GetStatusInfo(struct upnphttp * h, const char * action, const char * ns) { static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<NewConnectionStatus>%s</NewConnectionStatus>" "<NewLastConnectionError>ERROR_NONE</NewLastConnectionError>" "<NewUptime>%ld</NewUptime>" "</u:%sResponse>"; char body[512]; int bodylen; time_t uptime; const char * status; /* ConnectionStatus possible values : * Unconfigured, Connecting, Connected, PendingDisconnect, * Disconnecting, Disconnected */ status = get_wan_connection_status_str(ext_if_name); uptime = upnp_get_uptime(); bodylen = snprintf(body, sizeof(body), resp, action, ns, /*SERVICE_TYPE_WANIPC,*/ status, (long)uptime, action); BuildSendAndCloseSoapResp(h, body, bodylen); } static void GetNATRSIPStatus(struct upnphttp * h, const char * action, const char * ns) { #if 0 static const char resp[] = "<u:GetNATRSIPStatusResponse " "xmlns:u=\"" SERVICE_TYPE_WANIPC "\">" "<NewRSIPAvailable>0</NewRSIPAvailable>" "<NewNATEnabled>1</NewNATEnabled>" "</u:GetNATRSIPStatusResponse>"; UNUSED(action); #endif static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<NewRSIPAvailable>0</NewRSIPAvailable>" "<NewNATEnabled>1</NewNATEnabled>" "</u:%sResponse>"; char body[512]; int bodylen; /* 2.2.9. RSIPAvailable * This variable indicates if Realm-specific IP (RSIP) is available * as a feature on the InternetGatewayDevice. RSIP is being defined * in the NAT working group in the IETF to allow host-NATing using * a standard set of message exchanges. It also allows end-to-end * applications that otherwise break if NAT is introduced * (e.g. IPsec-based VPNs). * A gateway that does not support RSIP should set this variable to 0. */ bodylen = snprintf(body, sizeof(body), resp, action, ns, /*SERVICE_TYPE_WANIPC,*/ action); BuildSendAndCloseSoapResp(h, body, bodylen); } static void GetExternalIPAddress(struct upnphttp * h, const char * action, const char * ns) { static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<NewExternalIPAddress>%s</NewExternalIPAddress>" "</u:%sResponse>"; char body[512]; int bodylen; char ext_ip_addr[INET_ADDRSTRLEN]; /* Does that method need to work with IPv6 ? * There is usually no NAT with IPv6 */ #ifndef MULTIPLE_EXTERNAL_IP struct in_addr addr; if(use_ext_ip_addr) { strncpy(ext_ip_addr, use_ext_ip_addr, INET_ADDRSTRLEN); ext_ip_addr[INET_ADDRSTRLEN - 1] = '\0'; } else if(getifaddr(ext_if_name, ext_ip_addr, INET_ADDRSTRLEN, &addr, NULL) < 0) { syslog(LOG_ERR, "Failed to get ip address for interface %s", ext_if_name); strncpy(ext_ip_addr, "0.0.0.0", INET_ADDRSTRLEN); } if (addr_is_reserved(&addr)) strncpy(ext_ip_addr, "0.0.0.0", INET_ADDRSTRLEN); #else struct lan_addr_s * lan_addr; strncpy(ext_ip_addr, "0.0.0.0", INET_ADDRSTRLEN); for(lan_addr = lan_addrs.lh_first; lan_addr != NULL; lan_addr = lan_addr->list.le_next) { if( (h->clientaddr.s_addr & lan_addr->mask.s_addr) == (lan_addr->addr.s_addr & lan_addr->mask.s_addr)) { strncpy(ext_ip_addr, lan_addr->ext_ip_str, INET_ADDRSTRLEN); break; } } #endif if (strcmp(ext_ip_addr, "0.0.0.0") == 0) { SoapError(h, 501, "Action Failed"); return; } bodylen = snprintf(body, sizeof(body), resp, action, ns, /*SERVICE_TYPE_WANIPC,*/ ext_ip_addr, action); BuildSendAndCloseSoapResp(h, body, bodylen); } /* AddPortMapping method of WANIPConnection Service * Ignored argument : NewEnabled */ static void AddPortMapping(struct upnphttp * h, const char * action, const char * ns) { int r; /*static const char resp[] = "<u:AddPortMappingResponse " "xmlns:u=\"" SERVICE_TYPE_WANIPC "\"/>";*/ static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\"/>"; char body[512]; int bodylen; struct NameValueParserData data; char * int_ip, * int_port, * ext_port, * protocol, * desc; char * leaseduration_str; unsigned int leaseduration; char * r_host; unsigned short iport, eport; struct hostent *hp; /* getbyhostname() */ char ** ptr; /* getbyhostname() */ struct in_addr result_ip;/*unsigned char result_ip[16];*/ /* inet_pton() */ ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); int_ip = GetValueFromNameValueList(&data, "NewInternalClient"); if (int_ip) { /* trim */ while(int_ip[0] == ' ') int_ip++; } #ifdef UPNP_STRICT if (!int_ip || int_ip[0] == '\0') { ClearNameValueList(&data); SoapError(h, 402, "Invalid Args"); return; } #endif /* IGD 2 MUST support both wildcard and specific IP address values * for RemoteHost (only the wildcard value was REQUIRED in release 1.0) */ r_host = GetValueFromNameValueList(&data, "NewRemoteHost"); #ifndef SUPPORT_REMOTEHOST #ifdef UPNP_STRICT if (r_host && (r_host[0] != '\0') && (0 != strcmp(r_host, "*"))) { ClearNameValueList(&data); SoapError(h, 726, "RemoteHostOnlySupportsWildcard"); return; } #endif #endif #ifndef UPNP_STRICT /* if <NewInternalClient> arg is empty, use client address * see https://github.com/miniupnp/miniupnp/issues/236 */ if (!int_ip || int_ip[0] == '\0') { int_ip = h->clientaddr_str; memcpy(&result_ip, &(h->clientaddr), sizeof(struct in_addr)); } else #endif /* if ip not valid assume hostname and convert */ if (inet_pton(AF_INET, int_ip, &result_ip) <= 0) { hp = gethostbyname(int_ip); if(hp && hp->h_addrtype == AF_INET) { for(ptr = hp->h_addr_list; ptr && *ptr; ptr++) { int_ip = inet_ntoa(*((struct in_addr *) *ptr)); result_ip = *((struct in_addr *) *ptr); /* TODO : deal with more than one ip per hostname */ break; } } else { syslog(LOG_ERR, "Failed to convert hostname '%s' to ip address", int_ip); ClearNameValueList(&data); SoapError(h, 402, "Invalid Args"); return; } } /* check if NewInternalAddress is the client address */ if(GETFLAG(SECUREMODEMASK)) { if(h->clientaddr.s_addr != result_ip.s_addr) { syslog(LOG_INFO, "Client %s tried to redirect port to %s", inet_ntoa(h->clientaddr), int_ip); ClearNameValueList(&data); SoapError(h, 718, "ConflictInMappingEntry"); return; } } int_port = GetValueFromNameValueList(&data, "NewInternalPort"); ext_port = GetValueFromNameValueList(&data, "NewExternalPort"); protocol = GetValueFromNameValueList(&data, "NewProtocol"); desc = GetValueFromNameValueList(&data, "NewPortMappingDescription"); leaseduration_str = GetValueFromNameValueList(&data, "NewLeaseDuration"); if (!int_port || !ext_port || !protocol) { ClearNameValueList(&data); SoapError(h, 402, "Invalid Args"); return; } eport = (unsigned short)atoi(ext_port); iport = (unsigned short)atoi(int_port); if (strcmp(ext_port, "*") == 0 || eport == 0) { ClearNameValueList(&data); SoapError(h, 716, "Wildcard not permited in ExtPort"); return; } leaseduration = leaseduration_str ? atoi(leaseduration_str) : 0; #ifdef IGD_V2 /* PortMappingLeaseDuration can be either a value between 1 and * 604800 seconds or the zero value (for infinite lease time). * Note that an infinite lease time can be only set by out-of-band * mechanisms like WWW-administration, remote management or local * management. * If a control point uses the value 0 to indicate an infinite lease * time mapping, it is REQUIRED that gateway uses the maximum value * instead (e.g. 604800 seconds) */ if(leaseduration == 0 || leaseduration > 604800) leaseduration = 604800; #endif syslog(LOG_INFO, "%s: ext port %hu to %s:%hu protocol %s for: %s leaseduration=%u rhost=%s", action, eport, int_ip, iport, protocol, desc, leaseduration, r_host ? r_host : "NULL"); r = upnp_redirect(r_host, eport, int_ip, iport, protocol, desc, leaseduration); ClearNameValueList(&data); /* possible error codes for AddPortMapping : * 402 - Invalid Args * 501 - Action Failed * 715 - Wildcard not permited in SrcAddr * 716 - Wildcard not permited in ExtPort * 718 - ConflictInMappingEntry * 724 - SamePortValuesRequired (deprecated in IGD v2) * 725 - OnlyPermanentLeasesSupported The NAT implementation only supports permanent lease times on port mappings (deprecated in IGD v2) * 726 - RemoteHostOnlySupportsWildcard RemoteHost must be a wildcard and cannot be a specific IP address or DNS name (deprecated in IGD v2) * 727 - ExternalPortOnlySupportsWildcard ExternalPort must be a wildcard and cannot be a specific port value (deprecated in IGD v2) * 728 - NoPortMapsAvailable There are not enough free ports available to complete the mapping (added in IGD v2) * 729 - ConflictWithOtherMechanisms (added in IGD v2) */ switch(r) { case 0: /* success */ bodylen = snprintf(body, sizeof(body), resp, action, ns/*SERVICE_TYPE_WANIPC*/); BuildSendAndCloseSoapResp(h, body, bodylen); break; case -4: #ifdef IGD_V2 SoapError(h, 729, "ConflictWithOtherMechanisms"); break; #endif /* IGD_V2 */ case -2: /* already redirected */ case -3: /* not permitted */ SoapError(h, 718, "ConflictInMappingEntry"); break; default: SoapError(h, 501, "ActionFailed"); } } /* AddAnyPortMapping was added in WANIPConnection v2 */ static void AddAnyPortMapping(struct upnphttp * h, const char * action, const char * ns) { int r; static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<NewReservedPort>%hu</NewReservedPort>" "</u:%sResponse>"; char body[512]; int bodylen; struct NameValueParserData data; const char * int_ip, * int_port, * ext_port, * protocol, * desc; const char * r_host; unsigned short iport, eport; const char * leaseduration_str; unsigned int leaseduration; struct hostent *hp; /* getbyhostname() */ char ** ptr; /* getbyhostname() */ struct in_addr result_ip;/*unsigned char result_ip[16];*/ /* inet_pton() */ ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); r_host = GetValueFromNameValueList(&data, "NewRemoteHost"); ext_port = GetValueFromNameValueList(&data, "NewExternalPort"); protocol = GetValueFromNameValueList(&data, "NewProtocol"); int_port = GetValueFromNameValueList(&data, "NewInternalPort"); int_ip = GetValueFromNameValueList(&data, "NewInternalClient"); /* NewEnabled */ desc = GetValueFromNameValueList(&data, "NewPortMappingDescription"); leaseduration_str = GetValueFromNameValueList(&data, "NewLeaseDuration"); leaseduration = leaseduration_str ? atoi(leaseduration_str) : 0; if(leaseduration == 0) leaseduration = 604800; if (!int_ip || !ext_port || !int_port) { ClearNameValueList(&data); SoapError(h, 402, "Invalid Args"); return; } eport = (unsigned short)atoi(ext_port); iport = (unsigned short)atoi(int_port); if(iport == 0 || !is_numeric(ext_port)) { ClearNameValueList(&data); SoapError(h, 402, "Invalid Args"); return; } #ifndef SUPPORT_REMOTEHOST #ifdef UPNP_STRICT if (r_host && (r_host[0] != '\0') && (0 != strcmp(r_host, "*"))) { ClearNameValueList(&data); SoapError(h, 726, "RemoteHostOnlySupportsWildcard"); return; } #endif #endif /* if ip not valid assume hostname and convert */ if (inet_pton(AF_INET, int_ip, &result_ip) <= 0) { hp = gethostbyname(int_ip); if(hp && hp->h_addrtype == AF_INET) { for(ptr = hp->h_addr_list; ptr && *ptr; ptr++) { int_ip = inet_ntoa(*((struct in_addr *) *ptr)); result_ip = *((struct in_addr *) *ptr); /* TODO : deal with more than one ip per hostname */ break; } } else { syslog(LOG_ERR, "Failed to convert hostname '%s' to ip address", int_ip); ClearNameValueList(&data); SoapError(h, 402, "Invalid Args"); return; } } /* check if NewInternalAddress is the client address */ if(GETFLAG(SECUREMODEMASK)) { if(h->clientaddr.s_addr != result_ip.s_addr) { syslog(LOG_INFO, "Client %s tried to redirect port to %s", inet_ntoa(h->clientaddr), int_ip); ClearNameValueList(&data); SoapError(h, 606, "Action not authorized"); return; } } /* TODO : accept a different external port * have some smart strategy to choose the port */ for(;;) { r = upnp_redirect(r_host, eport, int_ip, iport, protocol, desc, leaseduration); if(r==-2 && eport < 65535) { eport++; } else { break; } } ClearNameValueList(&data); switch(r) { case 0: /* success */ bodylen = snprintf(body, sizeof(body), resp, action, ns, /*SERVICE_TYPE_WANIPC,*/ eport, action); BuildSendAndCloseSoapResp(h, body, bodylen); break; case -2: /* already redirected */ SoapError(h, 718, "ConflictInMappingEntry"); break; case -3: /* not permitted */ SoapError(h, 606, "Action not authorized"); break; default: SoapError(h, 501, "ActionFailed"); } } static void GetSpecificPortMappingEntry(struct upnphttp * h, const char * action, const char * ns) { int r; static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<NewInternalPort>%u</NewInternalPort>" "<NewInternalClient>%s</NewInternalClient>" "<NewEnabled>1</NewEnabled>" "<NewPortMappingDescription>%s</NewPortMappingDescription>" "<NewLeaseDuration>%u</NewLeaseDuration>" "</u:%sResponse>"; char body[1024]; int bodylen; struct NameValueParserData data; const char * r_host, * ext_port, * protocol; unsigned short eport, iport; char int_ip[32]; char desc[64]; unsigned int leaseduration = 0; ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); r_host = GetValueFromNameValueList(&data, "NewRemoteHost"); ext_port = GetValueFromNameValueList(&data, "NewExternalPort"); protocol = GetValueFromNameValueList(&data, "NewProtocol"); #ifdef UPNP_STRICT if(!ext_port || !protocol || !r_host) #else if(!ext_port || !protocol) #endif { ClearNameValueList(&data); SoapError(h, 402, "Invalid Args"); return; } #ifndef SUPPORT_REMOTEHOST #ifdef UPNP_STRICT if (r_host && (r_host[0] != '\0') && (0 != strcmp(r_host, "*"))) { ClearNameValueList(&data); SoapError(h, 726, "RemoteHostOnlySupportsWildcard"); return; } #endif #endif eport = (unsigned short)atoi(ext_port); if(eport == 0) { ClearNameValueList(&data); SoapError(h, 402, "Invalid Args"); return; } /* TODO : add r_host as an input parameter ... * We prevent several Port Mapping with same external port * but different remoteHost to be set up, so that is not * a priority. */ r = upnp_get_redirection_infos(eport, protocol, &iport, int_ip, sizeof(int_ip), desc, sizeof(desc), NULL, 0, &leaseduration); if(r < 0) { SoapError(h, 714, "NoSuchEntryInArray"); } else { syslog(LOG_INFO, "%s: rhost='%s' %s %s found => %s:%u desc='%s'", action, r_host ? r_host : "NULL", ext_port, protocol, int_ip, (unsigned int)iport, desc); bodylen = snprintf(body, sizeof(body), resp, action, ns/*SERVICE_TYPE_WANIPC*/, (unsigned int)iport, int_ip, desc, leaseduration, action); BuildSendAndCloseSoapResp(h, body, bodylen); } ClearNameValueList(&data); } static void DeletePortMapping(struct upnphttp * h, const char * action, const char * ns) { int r; /*static const char resp[] = "<u:DeletePortMappingResponse " "xmlns:u=\"" SERVICE_TYPE_WANIPC "\">" "</u:DeletePortMappingResponse>";*/ static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "</u:%sResponse>"; char body[512]; int bodylen; struct NameValueParserData data; const char * ext_port, * protocol; unsigned short eport; #ifdef UPNP_STRICT const char * r_host; #endif /* UPNP_STRICT */ ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); ext_port = GetValueFromNameValueList(&data, "NewExternalPort"); protocol = GetValueFromNameValueList(&data, "NewProtocol"); #ifdef UPNP_STRICT r_host = GetValueFromNameValueList(&data, "NewRemoteHost"); #endif /* UPNP_STRICT */ #ifdef UPNP_STRICT if(!ext_port || !protocol || !r_host) #else if(!ext_port || !protocol) #endif /* UPNP_STRICT */ { ClearNameValueList(&data); SoapError(h, 402, "Invalid Args"); return; } #ifndef SUPPORT_REMOTEHOST #ifdef UPNP_STRICT if (r_host && (r_host[0] != '\0') && (0 != strcmp(r_host, "*"))) { ClearNameValueList(&data); SoapError(h, 726, "RemoteHostOnlySupportsWildcard"); return; } #endif /* UPNP_STRICT */ #endif /* SUPPORT_REMOTEHOST */ eport = (unsigned short)atoi(ext_port); if(eport == 0) { ClearNameValueList(&data); SoapError(h, 402, "Invalid Args"); return; } syslog(LOG_INFO, "%s: external port: %hu, protocol: %s", action, eport, protocol); /* if in secure mode, check the IP * Removing a redirection is not a security threat, * just an annoyance for the user using it. So this is not * a priority. */ if(GETFLAG(SECUREMODEMASK)) { char int_ip[32]; struct in_addr int_ip_addr; unsigned short iport; unsigned int leaseduration = 0; r = upnp_get_redirection_infos(eport, protocol, &iport, int_ip, sizeof(int_ip), NULL, 0, NULL, 0, &leaseduration); if(r >= 0) { if(inet_pton(AF_INET, int_ip, &int_ip_addr) > 0) { if(h->clientaddr.s_addr != int_ip_addr.s_addr) { SoapError(h, 606, "Action not authorized"); /*SoapError(h, 714, "NoSuchEntryInArray");*/ ClearNameValueList(&data); return; } } } } r = upnp_delete_redirection(eport, protocol); if(r < 0) { SoapError(h, 714, "NoSuchEntryInArray"); } else { bodylen = snprintf(body, sizeof(body), resp, action, ns, action); BuildSendAndCloseSoapResp(h, body, bodylen); } ClearNameValueList(&data); } /* DeletePortMappingRange was added in IGD spec v2 */ static void DeletePortMappingRange(struct upnphttp * h, const char * action, const char * ns) { int r = -1; /*static const char resp[] = "<u:DeletePortMappingRangeResponse " "xmlns:u=\"" SERVICE_TYPE_WANIPC "\">" "</u:DeletePortMappingRangeResponse>";*/ static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "</u:%sResponse>"; char body[512]; int bodylen; struct NameValueParserData data; const char * protocol; const char * startport_s, * endport_s; unsigned short startport, endport; /*int manage;*/ unsigned short * port_list; unsigned int i, number = 0; ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); startport_s = GetValueFromNameValueList(&data, "NewStartPort"); endport_s = GetValueFromNameValueList(&data, "NewEndPort"); protocol = GetValueFromNameValueList(&data, "NewProtocol"); /*manage = atoi(GetValueFromNameValueList(&data, "NewManage"));*/ if(startport_s == NULL || endport_s == NULL || protocol == NULL || !is_numeric(startport_s) || !is_numeric(endport_s)) { SoapError(h, 402, "Invalid Args"); ClearNameValueList(&data); return; } startport = (unsigned short)atoi(startport_s); endport = (unsigned short)atoi(endport_s); /* possible errors : 606 - Action not authorized 730 - PortMappingNotFound 733 - InconsistentParameter */ if(startport > endport) { SoapError(h, 733, "InconsistentParameter"); ClearNameValueList(&data); return; } syslog(LOG_INFO, "%s: deleting external ports: %hu-%hu, protocol: %s", action, startport, endport, protocol); port_list = upnp_get_portmappings_in_range(startport, endport, protocol, &number); if(number == 0) { SoapError(h, 730, "PortMappingNotFound"); ClearNameValueList(&data); free(port_list); return; } for(i = 0; i < number; i++) { r = upnp_delete_redirection(port_list[i], protocol); syslog(LOG_INFO, "%s: deleting external port: %hu, protocol: %s: %s", action, port_list[i], protocol, r < 0 ? "failed" : "ok"); } free(port_list); bodylen = snprintf(body, sizeof(body), resp, action, ns, action); BuildSendAndCloseSoapResp(h, body, bodylen); ClearNameValueList(&data); } static void GetGenericPortMappingEntry(struct upnphttp * h, const char * action, const char * ns) { int r; static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<NewRemoteHost>%s</NewRemoteHost>" "<NewExternalPort>%u</NewExternalPort>" "<NewProtocol>%s</NewProtocol>" "<NewInternalPort>%u</NewInternalPort>" "<NewInternalClient>%s</NewInternalClient>" "<NewEnabled>1</NewEnabled>" "<NewPortMappingDescription>%s</NewPortMappingDescription>" "<NewLeaseDuration>%u</NewLeaseDuration>" "</u:%sResponse>"; long int index = 0; unsigned short eport, iport; const char * m_index; char * endptr; char protocol[8], iaddr[32]; char desc[64]; char rhost[40]; unsigned int leaseduration = 0; struct NameValueParserData data; ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); m_index = GetValueFromNameValueList(&data, "NewPortMappingIndex"); if(!m_index) { ClearNameValueList(&data); SoapError(h, 402, "Invalid Args"); return; } errno = 0; /* To distinguish success/failure after call */ index = strtol(m_index, &endptr, 10); if((errno == ERANGE && (index == LONG_MAX || index == LONG_MIN)) || (errno != 0 && index == 0) || (m_index == endptr)) { /* should condition (*endptr != '\0') be also an error ? */ if(m_index == endptr) syslog(LOG_WARNING, "%s: no digits were found in <%s>", "GetGenericPortMappingEntry", "NewPortMappingIndex"); else syslog(LOG_WARNING, "%s: strtol('%s'): %m", "GetGenericPortMappingEntry", m_index); ClearNameValueList(&data); SoapError(h, 402, "Invalid Args"); return; } syslog(LOG_INFO, "%s: index=%d", action, (int)index); rhost[0] = '\0'; r = upnp_get_redirection_infos_by_index((int)index, &eport, protocol, &iport, iaddr, sizeof(iaddr), desc, sizeof(desc), rhost, sizeof(rhost), &leaseduration); if(r < 0) { SoapError(h, 713, "SpecifiedArrayIndexInvalid"); } else { int bodylen; char body[2048]; bodylen = snprintf(body, sizeof(body), resp, action, ns, /*SERVICE_TYPE_WANIPC,*/ rhost, (unsigned int)eport, protocol, (unsigned int)iport, iaddr, desc, leaseduration, action); BuildSendAndCloseSoapResp(h, body, bodylen); } ClearNameValueList(&data); } /* GetListOfPortMappings was added in the IGD v2 specification */ static void GetListOfPortMappings(struct upnphttp * h, const char * action, const char * ns) { static const char resp_start[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<NewPortListing><![CDATA["; static const char resp_end[] = "]]></NewPortListing>" "</u:%sResponse>"; static const char list_start[] = "<p:PortMappingList xmlns:p=\"urn:schemas-upnp-org:gw:WANIPConnection\"" " xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"" " xsi:schemaLocation=\"urn:schemas-upnp-org:gw:WANIPConnection" " http://www.upnp.org/schemas/gw/WANIPConnection-v2.xsd\">"; static const char list_end[] = "</p:PortMappingList>"; static const char entry[] = "<p:PortMappingEntry>" "<p:NewRemoteHost>%s</p:NewRemoteHost>" "<p:NewExternalPort>%hu</p:NewExternalPort>" "<p:NewProtocol>%s</p:NewProtocol>" "<p:NewInternalPort>%hu</p:NewInternalPort>" "<p:NewInternalClient>%s</p:NewInternalClient>" "<p:NewEnabled>1</p:NewEnabled>" "<p:NewDescription>%s</p:NewDescription>" "<p:NewLeaseTime>%u</p:NewLeaseTime>" "</p:PortMappingEntry>"; char * body; size_t bodyalloc; int bodylen; int r = -1; unsigned short iport; char int_ip[32]; char desc[64]; char rhost[64]; unsigned int leaseduration = 0; struct NameValueParserData data; const char * startport_s, * endport_s; unsigned short startport, endport; const char * protocol; /*int manage;*/ const char * number_s; int number; unsigned short * port_list; unsigned int i, list_size = 0; ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); startport_s = GetValueFromNameValueList(&data, "NewStartPort"); endport_s = GetValueFromNameValueList(&data, "NewEndPort"); protocol = GetValueFromNameValueList(&data, "NewProtocol"); /*manage_s = GetValueFromNameValueList(&data, "NewManage");*/ number_s = GetValueFromNameValueList(&data, "NewNumberOfPorts"); if(startport_s == NULL || endport_s == NULL || protocol == NULL || number_s == NULL || !is_numeric(number_s) || !is_numeric(startport_s) || !is_numeric(endport_s)) { SoapError(h, 402, "Invalid Args"); ClearNameValueList(&data); return; } startport = (unsigned short)atoi(startport_s); endport = (unsigned short)atoi(endport_s); /*manage = atoi(manage_s);*/ number = atoi(number_s); if(number == 0) number = 1000; /* return up to 1000 mappings by default */ if(startport > endport) { SoapError(h, 733, "InconsistentParameter"); ClearNameValueList(&data); return; } /* build the PortMappingList xml document : <p:PortMappingList xmlns:p="urn:schemas-upnp-org:gw:WANIPConnection" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="urn:schemas-upnp-org:gw:WANIPConnection http://www.upnp.org/schemas/gw/WANIPConnection-v2.xsd"> <p:PortMappingEntry> <p:NewRemoteHost>202.233.2.1</p:NewRemoteHost> <p:NewExternalPort>2345</p:NewExternalPort> <p:NewProtocol>TCP</p:NewProtocol> <p:NewInternalPort>2345</p:NewInternalPort> <p:NewInternalClient>192.168.1.137</p:NewInternalClient> <p:NewEnabled>1</p:NewEnabled> <p:NewDescription>dooom</p:NewDescription> <p:NewLeaseTime>345</p:NewLeaseTime> </p:PortMappingEntry> </p:PortMappingList> */ bodyalloc = 4096; body = malloc(bodyalloc); if(!body) { ClearNameValueList(&data); SoapError(h, 501, "ActionFailed"); return; } bodylen = snprintf(body, bodyalloc, resp_start, action, ns/*SERVICE_TYPE_WANIPC*/); if(bodylen < 0) { SoapError(h, 501, "ActionFailed"); free(body); return; } memcpy(body+bodylen, list_start, sizeof(list_start)); bodylen += (sizeof(list_start) - 1); port_list = upnp_get_portmappings_in_range(startport, endport, protocol, &list_size); /* loop through port mappings */ for(i = 0; number > 0 && i < list_size; i++) { /* have a margin of 1024 bytes to store the new entry */ if((unsigned int)bodylen + 1024 > bodyalloc) { char * body_sav = body; bodyalloc += 4096; body = realloc(body, bodyalloc); if(!body) { syslog(LOG_CRIT, "realloc(%p, %u) FAILED", body_sav, (unsigned)bodyalloc); ClearNameValueList(&data); SoapError(h, 501, "ActionFailed"); free(body_sav); free(port_list); return; } } rhost[0] = '\0'; r = upnp_get_redirection_infos(port_list[i], protocol, &iport, int_ip, sizeof(int_ip), desc, sizeof(desc), rhost, sizeof(rhost), &leaseduration); if(r == 0) { bodylen += snprintf(body+bodylen, bodyalloc-bodylen, entry, rhost, port_list[i], protocol, iport, int_ip, desc, leaseduration); number--; } } free(port_list); port_list = NULL; if((bodylen + sizeof(list_end) + 1024) > bodyalloc) { char * body_sav = body; bodyalloc += (sizeof(list_end) + 1024); body = realloc(body, bodyalloc); if(!body) { syslog(LOG_CRIT, "realloc(%p, %u) FAILED", body_sav, (unsigned)bodyalloc); ClearNameValueList(&data); SoapError(h, 501, "ActionFailed"); free(body_sav); return; } } memcpy(body+bodylen, list_end, sizeof(list_end)); bodylen += (sizeof(list_end) - 1); bodylen += snprintf(body+bodylen, bodyalloc-bodylen, resp_end, action); BuildSendAndCloseSoapResp(h, body, bodylen); free(body); ClearNameValueList(&data); } #ifdef ENABLE_L3F_SERVICE static void SetDefaultConnectionService(struct upnphttp * h, const char * action, const char * ns) { /*static const char resp[] = "<u:SetDefaultConnectionServiceResponse " "xmlns:u=\"urn:schemas-upnp-org:service:Layer3Forwarding:1\">" "</u:SetDefaultConnectionServiceResponse>";*/ static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "</u:%sResponse>"; char body[512]; int bodylen; struct NameValueParserData data; char * p; ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); p = GetValueFromNameValueList(&data, "NewDefaultConnectionService"); if(p) { /* 720 InvalidDeviceUUID * 721 InvalidServiceID * 723 InvalidConnServiceSelection */ #ifdef UPNP_STRICT char * service; service = strchr(p, ','); if(0 != memcmp(uuidvalue_wcd, p, sizeof("uuid:00000000-0000-0000-0000-000000000000") - 1)) { SoapError(h, 720, "InvalidDeviceUUID"); } else if(service == NULL || 0 != strcmp(service+1, SERVICE_ID_WANIPC)) { SoapError(h, 721, "InvalidServiceID"); } else #endif { syslog(LOG_INFO, "%s(%s) : Ignored", action, p); bodylen = snprintf(body, sizeof(body), resp, action, ns, action); BuildSendAndCloseSoapResp(h, body, bodylen); } } else { /* missing argument */ SoapError(h, 402, "Invalid Args"); } ClearNameValueList(&data); } static void GetDefaultConnectionService(struct upnphttp * h, const char * action, const char * ns) { static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" #ifdef IGD_V2 "<NewDefaultConnectionService>%s:WANConnectionDevice:2," #else "<NewDefaultConnectionService>%s:WANConnectionDevice:1," #endif SERVICE_ID_WANIPC "</NewDefaultConnectionService>" "</u:%sResponse>"; /* example from UPnP_IGD_Layer3Forwarding 1.0.pdf : * uuid:44f5824f-c57d-418c-a131-f22b34e14111:WANConnectionDevice:1, * urn:upnp-org:serviceId:WANPPPConn1 */ char body[1024]; int bodylen; /* namespace : urn:schemas-upnp-org:service:Layer3Forwarding:1 */ bodylen = snprintf(body, sizeof(body), resp, action, ns, uuidvalue_wcd, action); BuildSendAndCloseSoapResp(h, body, bodylen); } #endif /* Added for compliance with WANIPConnection v2 */ static void SetConnectionType(struct upnphttp * h, const char * action, const char * ns) { #ifdef UPNP_STRICT const char * connection_type; #endif /* UPNP_STRICT */ struct NameValueParserData data; UNUSED(action); UNUSED(ns); ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); #ifdef UPNP_STRICT connection_type = GetValueFromNameValueList(&data, "NewConnectionType"); if(!connection_type) { ClearNameValueList(&data); SoapError(h, 402, "Invalid Args"); return; } #endif /* UPNP_STRICT */ /* Unconfigured, IP_Routed, IP_Bridged */ ClearNameValueList(&data); /* always return a ReadOnly error */ SoapError(h, 731, "ReadOnly"); } /* Added for compliance with WANIPConnection v2 */ static void RequestConnection(struct upnphttp * h, const char * action, const char * ns) { UNUSED(action); UNUSED(ns); SoapError(h, 606, "Action not authorized"); } /* Added for compliance with WANIPConnection v2 */ static void ForceTermination(struct upnphttp * h, const char * action, const char * ns) { UNUSED(action); UNUSED(ns); SoapError(h, 606, "Action not authorized"); } /* If a control point calls QueryStateVariable on a state variable that is not buffered in memory within (or otherwise available from) the service, the service must return a SOAP fault with an errorCode of 404 Invalid Var. QueryStateVariable remains useful as a limited test tool but may not be part of some future versions of UPnP. */ static void QueryStateVariable(struct upnphttp * h, const char * action, const char * ns) { static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<return>%s</return>" "</u:%sResponse>"; char body[512]; int bodylen; struct NameValueParserData data; const char * var_name; ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); /*var_name = GetValueFromNameValueList(&data, "QueryStateVariable"); */ /*var_name = GetValueFromNameValueListIgnoreNS(&data, "varName");*/ var_name = GetValueFromNameValueList(&data, "varName"); /*syslog(LOG_INFO, "QueryStateVariable(%.40s)", var_name); */ if(!var_name) { SoapError(h, 402, "Invalid Args"); } else if(strcmp(var_name, "ConnectionStatus") == 0) { const char * status; status = get_wan_connection_status_str(ext_if_name); bodylen = snprintf(body, sizeof(body), resp, action, ns,/*"urn:schemas-upnp-org:control-1-0",*/ status, action); BuildSendAndCloseSoapResp(h, body, bodylen); } #if 0 /* not useful */ else if(strcmp(var_name, "ConnectionType") == 0) { bodylen = snprintf(body, sizeof(body), resp, "IP_Routed"); BuildSendAndCloseSoapResp(h, body, bodylen); } else if(strcmp(var_name, "LastConnectionError") == 0) { bodylen = snprintf(body, sizeof(body), resp, "ERROR_NONE"); BuildSendAndCloseSoapResp(h, body, bodylen); } #endif else if(strcmp(var_name, "PortMappingNumberOfEntries") == 0) { char strn[10]; snprintf(strn, sizeof(strn), "%i", upnp_get_portmapping_number_of_entries()); bodylen = snprintf(body, sizeof(body), resp, action, ns,/*"urn:schemas-upnp-org:control-1-0",*/ strn, action); BuildSendAndCloseSoapResp(h, body, bodylen); } else { syslog(LOG_NOTICE, "%s: Unknown: %s", action, var_name?var_name:""); SoapError(h, 404, "Invalid Var"); } ClearNameValueList(&data); } #ifdef ENABLE_6FC_SERVICE #ifndef ENABLE_IPV6 #error "ENABLE_6FC_SERVICE needs ENABLE_IPV6" #endif /* WANIPv6FirewallControl actions */ static void GetFirewallStatus(struct upnphttp * h, const char * action, const char * ns) { static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<FirewallEnabled>%d</FirewallEnabled>" "<InboundPinholeAllowed>%d</InboundPinholeAllowed>" "</u:%sResponse>"; char body[512]; int bodylen; bodylen = snprintf(body, sizeof(body), resp, action, ns, /*"urn:schemas-upnp-org:service:WANIPv6FirewallControl:1",*/ GETFLAG(IPV6FCFWDISABLEDMASK) ? 0 : 1, GETFLAG(IPV6FCINBOUNDDISALLOWEDMASK) ? 0 : 1, action); BuildSendAndCloseSoapResp(h, body, bodylen); } static int CheckStatus(struct upnphttp * h) { if (GETFLAG(IPV6FCFWDISABLEDMASK)) { SoapError(h, 702, "FirewallDisabled"); return 0; } else if(GETFLAG(IPV6FCINBOUNDDISALLOWEDMASK)) { SoapError(h, 703, "InboundPinholeNotAllowed"); return 0; } else return 1; } #if 0 static int connecthostport(const char * host, unsigned short port, char * result) { int s, n; char hostname[INET6_ADDRSTRLEN]; char port_str[8], ifname[8], tmp[4]; struct addrinfo *ai, *p; struct addrinfo hints; memset(&hints, 0, sizeof(hints)); /* hints.ai_flags = AI_ADDRCONFIG; */ #ifdef AI_NUMERICSERV hints.ai_flags = AI_NUMERICSERV; #endif hints.ai_socktype = SOCK_STREAM; hints.ai_family = AF_UNSPEC; /* AF_INET, AF_INET6 or AF_UNSPEC */ /* hints.ai_protocol = IPPROTO_TCP; */ snprintf(port_str, sizeof(port_str), "%hu", port); strcpy(hostname, host); if(!strncmp(host, "fe80", 4)) { printf("Using an linklocal address\n"); strcpy(ifname, "%"); snprintf(tmp, sizeof(tmp), "%d", linklocal_index); strcat(ifname, tmp); strcat(hostname, ifname); printf("host: %s\n", hostname); } n = getaddrinfo(hostname, port_str, &hints, &ai); if(n != 0) { fprintf(stderr, "getaddrinfo() error : %s\n", gai_strerror(n)); return -1; } s = -1; for(p = ai; p; p = p->ai_next) { #ifdef DEBUG char tmp_host[256]; char tmp_service[256]; printf("ai_family=%d ai_socktype=%d ai_protocol=%d ai_addrlen=%d\n ", p->ai_family, p->ai_socktype, p->ai_protocol, p->ai_addrlen); getnameinfo(p->ai_addr, p->ai_addrlen, tmp_host, sizeof(tmp_host), tmp_service, sizeof(tmp_service), NI_NUMERICHOST | NI_NUMERICSERV); printf(" host=%s service=%s\n", tmp_host, tmp_service); #endif inet_ntop(AF_INET6, &(((struct sockaddr_in6 *)p->ai_addr)->sin6_addr), result, INET6_ADDRSTRLEN); return 0; } freeaddrinfo(ai); } #endif /* Check the security policy right */ static int PinholeVerification(struct upnphttp * h, char * int_ip, unsigned short int_port) { int n; char senderAddr[INET6_ADDRSTRLEN]=""; struct addrinfo hints, *ai, *p; struct in6_addr result_ip; /* Pinhole InternalClient address must correspond to the action sender */ syslog(LOG_INFO, "Checking internal IP@ and port (Security policy purpose)"); hints.ai_socktype = SOCK_STREAM; hints.ai_family = AF_UNSPEC; /* if ip not valid assume hostname and convert */ if (inet_pton(AF_INET6, int_ip, &result_ip) <= 0) { n = getaddrinfo(int_ip, NULL, &hints, &ai); if(!n && ai->ai_family == AF_INET6) { for(p = ai; p; p = p->ai_next) { inet_ntop(AF_INET6, (struct in6_addr *) p, int_ip, sizeof(struct in6_addr)); result_ip = *((struct in6_addr *) p); /* TODO : deal with more than one ip per hostname */ break; } } else { syslog(LOG_ERR, "Failed to convert hostname '%s' to ip address", int_ip); SoapError(h, 402, "Invalid Args"); return -1; } freeaddrinfo(p); } if(inet_ntop(AF_INET6, &(h->clientaddr_v6), senderAddr, INET6_ADDRSTRLEN) == NULL) { syslog(LOG_ERR, "inet_ntop: %m"); } #ifdef DEBUG printf("\tPinholeVerification:\n\t\tCompare sender @: %s\n\t\t to intClient @: %s\n", senderAddr, int_ip); #endif if(strcmp(senderAddr, int_ip) != 0) if(h->clientaddr_v6.s6_addr != result_ip.s6_addr) { syslog(LOG_INFO, "Client %s tried to access pinhole for internal %s and is not authorized to do it", senderAddr, int_ip); SoapError(h, 606, "Action not authorized"); return 0; } /* Pinhole InternalPort must be greater than or equal to 1024 */ if (int_port < 1024) { syslog(LOG_INFO, "Client %s tried to access pinhole with port < 1024 and is not authorized to do it", senderAddr); SoapError(h, 606, "Action not authorized"); return 0; } return 1; } static void AddPinhole(struct upnphttp * h, const char * action, const char * ns) { int r; static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<UniqueID>%d</UniqueID>" "</u:%sResponse>"; char body[512]; int bodylen; struct NameValueParserData data; char * rem_host, * rem_port, * int_ip, * int_port, * protocol, * leaseTime; int uid = 0; unsigned short iport, rport; int ltime; long proto; char rem_ip[INET6_ADDRSTRLEN]; if(CheckStatus(h)==0) return; ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); rem_host = GetValueFromNameValueList(&data, "RemoteHost"); rem_port = GetValueFromNameValueList(&data, "RemotePort"); int_ip = GetValueFromNameValueList(&data, "InternalClient"); int_port = GetValueFromNameValueList(&data, "InternalPort"); protocol = GetValueFromNameValueList(&data, "Protocol"); leaseTime = GetValueFromNameValueList(&data, "LeaseTime"); rport = (unsigned short)(rem_port ? atoi(rem_port) : 0); iport = (unsigned short)(int_port ? atoi(int_port) : 0); ltime = leaseTime ? atoi(leaseTime) : -1; errno = 0; proto = protocol ? strtol(protocol, NULL, 0) : -1; if(errno != 0 || proto > 65535 || proto < 0) { SoapError(h, 402, "Invalid Args"); goto clear_and_exit; } if(iport == 0) { SoapError(h, 706, "InternalPortWilcardingNotAllowed"); goto clear_and_exit; } /* In particular, [IGD2] RECOMMENDS that unauthenticated and * unauthorized control points are only allowed to invoke * this action with: * - InternalPort value greater than or equal to 1024, * - InternalClient value equals to the control point's IP address. * It is REQUIRED that InternalClient cannot be one of IPv6 * addresses used by the gateway. */ if(!int_ip || int_ip[0] == '\0' || 0 == strcmp(int_ip, "*")) { SoapError(h, 708, "WildCardNotPermittedInSrcIP"); goto clear_and_exit; } /* I guess it is useless to convert int_ip to literal ipv6 address */ if(rem_host) { /* trim */ while(isspace(rem_host[0])) rem_host++; } /* rem_host should be converted to literal ipv6 : */ if(rem_host && (rem_host[0] != '\0') && (rem_host[0] != '*')) { struct addrinfo *ai, *p; struct addrinfo hints; int err; memset(&hints, 0, sizeof(struct addrinfo)); hints.ai_family = AF_INET6; /*hints.ai_flags = */ /* hints.ai_protocol = proto; */ err = getaddrinfo(rem_host, rem_port, &hints, &ai); if(err == 0) { /* take the 1st IPv6 address */ for(p = ai; p; p = p->ai_next) { if(p->ai_family == AF_INET6) { inet_ntop(AF_INET6, &(((struct sockaddr_in6 *)p->ai_addr)->sin6_addr), rem_ip, sizeof(rem_ip)); syslog(LOG_INFO, "resolved '%s' to '%s'", rem_host, rem_ip); rem_host = rem_ip; break; } } freeaddrinfo(ai); } else { syslog(LOG_WARNING, "AddPinhole : getaddrinfo(%s) : %s", rem_host, gai_strerror(err)); #if 0 SoapError(h, 402, "Invalid Args"); goto clear_and_exit; #endif } } if(proto == 65535) { SoapError(h, 707, "ProtocolWilcardingNotAllowed"); goto clear_and_exit; } if(proto != IPPROTO_UDP && proto != IPPROTO_TCP #ifdef IPPROTO_UDPITE && atoi(protocol) != IPPROTO_UDPLITE #endif ) { SoapError(h, 705, "ProtocolNotSupported"); goto clear_and_exit; } if(ltime < 1 || ltime > 86400) { syslog(LOG_WARNING, "%s: LeaseTime=%d not supported, (ip=%s)", action, ltime, int_ip); SoapError(h, 402, "Invalid Args"); goto clear_and_exit; } if(PinholeVerification(h, int_ip, iport) <= 0) goto clear_and_exit; syslog(LOG_INFO, "%s: (inbound) from [%s]:%hu to [%s]:%hu with proto %ld during %d sec", action, rem_host?rem_host:"any", rport, int_ip, iport, proto, ltime); /* In cases where the RemoteHost, RemotePort, InternalPort, * InternalClient and Protocol are the same than an existing pinhole, * but LeaseTime is different, the device MUST extend the existing * pinhole's lease time and return the UniqueID of the existing pinhole. */ r = upnp_add_inboundpinhole(rem_host, rport, int_ip, iport, proto, "IGD2 pinhole", ltime, &uid); switch(r) { case 1: /* success */ bodylen = snprintf(body, sizeof(body), resp, action, ns/*"urn:schemas-upnp-org:service:WANIPv6FirewallControl:1"*/, uid, action); BuildSendAndCloseSoapResp(h, body, bodylen); break; case -1: /* not permitted */ SoapError(h, 701, "PinholeSpaceExhausted"); break; default: SoapError(h, 501, "ActionFailed"); break; } /* 606 Action not authorized * 701 PinholeSpaceExhausted * 702 FirewallDisabled * 703 InboundPinholeNotAllowed * 705 ProtocolNotSupported * 706 InternalPortWildcardingNotAllowed * 707 ProtocolWildcardingNotAllowed * 708 WildCardNotPermittedInSrcIP */ clear_and_exit: ClearNameValueList(&data); } static void UpdatePinhole(struct upnphttp * h, const char * action, const char * ns) { #if 0 static const char resp[] = "<u:UpdatePinholeResponse " "xmlns:u=\"urn:schemas-upnp-org:service:WANIPv6FirewallControl:1\">" "</u:UpdatePinholeResponse>"; #endif static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "</u:%sResponse>"; char body[512]; int bodylen; struct NameValueParserData data; const char * uid_str, * leaseTime; char iaddr[INET6_ADDRSTRLEN]; unsigned short iport; int ltime; int uid; int n; if(CheckStatus(h)==0) return; ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); uid_str = GetValueFromNameValueList(&data, "UniqueID"); leaseTime = GetValueFromNameValueList(&data, "NewLeaseTime"); uid = uid_str ? atoi(uid_str) : -1; ltime = leaseTime ? atoi(leaseTime) : -1; ClearNameValueList(&data); if(uid < 0 || uid > 65535 || ltime <= 0 || ltime > 86400) { SoapError(h, 402, "Invalid Args"); return; } /* Check that client is not updating an pinhole * it doesn't have access to, because of its public access */ n = upnp_get_pinhole_info(uid, NULL, 0, NULL, iaddr, sizeof(iaddr), &iport, NULL, /* proto */ NULL, 0, /* desc, desclen */ NULL, NULL); if (n >= 0) { if(PinholeVerification(h, iaddr, iport) <= 0) return; } else if(n == -2) { SoapError(h, 704, "NoSuchEntry"); return; } else { SoapError(h, 501, "ActionFailed"); return; } syslog(LOG_INFO, "%s: (inbound) updating lease duration to %d for pinhole with ID: %d", action, ltime, uid); n = upnp_update_inboundpinhole(uid, ltime); if(n == -1) SoapError(h, 704, "NoSuchEntry"); else if(n < 0) SoapError(h, 501, "ActionFailed"); else { bodylen = snprintf(body, sizeof(body), resp, action, ns, action); BuildSendAndCloseSoapResp(h, body, bodylen); } } static void GetOutboundPinholeTimeout(struct upnphttp * h, const char * action, const char * ns) { int r; static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<OutboundPinholeTimeout>%d</OutboundPinholeTimeout>" "</u:%sResponse>"; char body[512]; int bodylen; struct NameValueParserData data; char * int_ip, * int_port, * rem_host, * rem_port, * protocol; int opt=0; /*int proto=0;*/ unsigned short iport, rport; if (GETFLAG(IPV6FCFWDISABLEDMASK)) { SoapError(h, 702, "FirewallDisabled"); return; } ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); int_ip = GetValueFromNameValueList(&data, "InternalClient"); int_port = GetValueFromNameValueList(&data, "InternalPort"); rem_host = GetValueFromNameValueList(&data, "RemoteHost"); rem_port = GetValueFromNameValueList(&data, "RemotePort"); protocol = GetValueFromNameValueList(&data, "Protocol"); if (!int_port || !ext_port || !protocol) { ClearNameValueList(&data); SoapError(h, 402, "Invalid Args"); return; } rport = (unsigned short)atoi(rem_port); iport = (unsigned short)atoi(int_port); /*proto = atoi(protocol);*/ syslog(LOG_INFO, "%s: retrieving timeout for outbound pinhole from [%s]:%hu to [%s]:%hu protocol %s", action, int_ip, iport,rem_host, rport, protocol); /* TODO */ r = -1;/*upnp_check_outbound_pinhole(proto, &opt);*/ switch(r) { case 1: /* success */ bodylen = snprintf(body, sizeof(body), resp, action, ns/*"urn:schemas-upnp-org:service:WANIPv6FirewallControl:1"*/, opt, action); BuildSendAndCloseSoapResp(h, body, bodylen); break; case -5: /* Protocol not supported */ SoapError(h, 705, "ProtocolNotSupported"); break; default: SoapError(h, 501, "ActionFailed"); } ClearNameValueList(&data); } static void DeletePinhole(struct upnphttp * h, const char * action, const char * ns) { int n; #if 0 static const char resp[] = "<u:DeletePinholeResponse " "xmlns:u=\"urn:schemas-upnp-org:service:WANIPv6FirewallControl:1\">" "</u:DeletePinholeResponse>"; #endif static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "</u:%sResponse>"; char body[512]; int bodylen; struct NameValueParserData data; const char * uid_str; char iaddr[INET6_ADDRSTRLEN]; int proto; unsigned short iport; unsigned int leasetime; int uid; if(CheckStatus(h)==0) return; ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); uid_str = GetValueFromNameValueList(&data, "UniqueID"); uid = uid_str ? atoi(uid_str) : -1; ClearNameValueList(&data); if(uid < 0 || uid > 65535) { SoapError(h, 402, "Invalid Args"); return; } /* Check that client is not deleting an pinhole * it doesn't have access to, because of its public access */ n = upnp_get_pinhole_info(uid, NULL, 0, NULL, iaddr, sizeof(iaddr), &iport, &proto, NULL, 0, /* desc, desclen */ &leasetime, NULL); if (n >= 0) { if(PinholeVerification(h, iaddr, iport) <= 0) return; } else if(n == -2) { SoapError(h, 704, "NoSuchEntry"); return; } else { SoapError(h, 501, "ActionFailed"); return; } n = upnp_delete_inboundpinhole(uid); if(n < 0) { syslog(LOG_INFO, "%s: (inbound) failed to remove pinhole with ID: %d", action, uid); SoapError(h, 501, "ActionFailed"); return; } syslog(LOG_INFO, "%s: (inbound) pinhole with ID %d successfully removed", action, uid); bodylen = snprintf(body, sizeof(body), resp, action, ns, action); BuildSendAndCloseSoapResp(h, body, bodylen); } static void CheckPinholeWorking(struct upnphttp * h, const char * action, const char * ns) { static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<IsWorking>%d</IsWorking>" "</u:%sResponse>"; char body[512]; int bodylen; int r; struct NameValueParserData data; const char * uid_str; int uid; char iaddr[INET6_ADDRSTRLEN]; unsigned short iport; unsigned int packets; if(CheckStatus(h)==0) return; ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); uid_str = GetValueFromNameValueList(&data, "UniqueID"); uid = uid_str ? atoi(uid_str) : -1; ClearNameValueList(&data); if(uid < 0 || uid > 65535) { SoapError(h, 402, "Invalid Args"); return; } /* Check that client is not checking a pinhole * it doesn't have access to, because of its public access */ r = upnp_get_pinhole_info(uid, NULL, 0, NULL, iaddr, sizeof(iaddr), &iport, NULL, /* proto */ NULL, 0, /* desc, desclen */ NULL, &packets); if (r >= 0) { if(PinholeVerification(h, iaddr, iport) <= 0) return ; if(packets == 0) { SoapError(h, 709, "NoPacketSent"); return; } bodylen = snprintf(body, sizeof(body), resp, action, ns/*"urn:schemas-upnp-org:service:WANIPv6FirewallControl:1"*/, 1, action); BuildSendAndCloseSoapResp(h, body, bodylen); } else if(r == -2) SoapError(h, 704, "NoSuchEntry"); else SoapError(h, 501, "ActionFailed"); } static void GetPinholePackets(struct upnphttp * h, const char * action, const char * ns) { static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<PinholePackets>%u</PinholePackets>" "</u:%sResponse>"; char body[512]; int bodylen; struct NameValueParserData data; const char * uid_str; int n; char iaddr[INET6_ADDRSTRLEN]; unsigned short iport; unsigned int packets = 0; int uid; int proto; unsigned int leasetime; if(CheckStatus(h)==0) return; ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); uid_str = GetValueFromNameValueList(&data, "UniqueID"); uid = uid_str ? atoi(uid_str) : -1; ClearNameValueList(&data); if(uid < 0 || uid > 65535) { SoapError(h, 402, "Invalid Args"); return; } /* Check that client is not getting infos of a pinhole * it doesn't have access to, because of its public access */ n = upnp_get_pinhole_info(uid, NULL, 0, NULL, iaddr, sizeof(iaddr), &iport, &proto, NULL, 0, /* desc, desclen */ &leasetime, &packets); if (n >= 0) { if(PinholeVerification(h, iaddr, iport)<=0) return ; } #if 0 else if(r == -4 || r == -1) { SoapError(h, 704, "NoSuchEntry"); } #endif bodylen = snprintf(body, sizeof(body), resp, action, ns/*"urn:schemas-upnp-org:service:WANIPv6FirewallControl:1"*/, packets, action); BuildSendAndCloseSoapResp(h, body, bodylen); } #endif #ifdef ENABLE_DP_SERVICE static void SendSetupMessage(struct upnphttp * h, const char * action, const char * ns) { static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<OutMessage>%s</OutMessage>" "</u:%sResponse>"; char body[1024]; int bodylen; struct NameValueParserData data; const char * ProtocolType; /* string */ const char * InMessage; /* base64 */ const char * OutMessage = ""; /* base64 */ ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); ProtocolType = GetValueFromNameValueList(&data, "ProtocolType"); /* string */ InMessage = GetValueFromNameValueList(&data, "InMessage"); /* base64 */ if(ProtocolType == NULL || InMessage == NULL) { ClearNameValueList(&data); SoapError(h, 402, "Invalid Args"); return; } /*if(strcmp(ProtocolType, "DeviceProtection:1") != 0)*/ if(strcmp(ProtocolType, "WPS") != 0) { ClearNameValueList(&data); SoapError(h, 600, "Argument Value Invalid"); /* 703 ? */ return; } /* TODO : put here code for WPS */ bodylen = snprintf(body, sizeof(body), resp, action, ns/*"urn:schemas-upnp-org:service:DeviceProtection:1"*/, OutMessage, action); BuildSendAndCloseSoapResp(h, body, bodylen); ClearNameValueList(&data); } static void GetSupportedProtocols(struct upnphttp * h, const char * action, const char * ns) { static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<ProtocolList><![CDATA[%s]]></ProtocolList>" "</u:%sResponse>"; char body[1024]; int bodylen; const char * ProtocolList = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" "<SupportedProtocols xmlns=\"urn:schemas-upnp-org:gw:DeviceProtection\"" " xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"" " xsi:schemaLocation=\"urn:schemas-upnp-org:gw:DeviceProtection" " http://www.upnp.org/schemas/gw/DeviceProtection-v1.xsd\">" "<Introduction><Name>WPS</Name></Introduction>" "<Login><Name>PKCS5</Name></Login>" "</SupportedProtocols>"; bodylen = snprintf(body, sizeof(body), resp, action, ns/*"urn:schemas-upnp-org:service:DeviceProtection:1"*/, ProtocolList, action); BuildSendAndCloseSoapResp(h, body, bodylen); } static void GetAssignedRoles(struct upnphttp * h, const char * action, const char * ns) { static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<RoleList>%s</RoleList>" "</u:%sResponse>"; char body[1024]; int bodylen; const char * RoleList = "Public"; /* list of roles separated by spaces */ #ifdef ENABLE_HTTPS if(h->ssl != NULL) { /* we should get the Roles of the session (based on client certificate) */ X509 * peercert; peercert = SSL_get_peer_certificate(h->ssl); if(peercert != NULL) { RoleList = "Admin Basic"; X509_free(peercert); } } #endif bodylen = snprintf(body, sizeof(body), resp, action, ns/*"urn:schemas-upnp-org:service:DeviceProtection:1"*/, RoleList, action); BuildSendAndCloseSoapResp(h, body, bodylen); } #endif /* Windows XP as client send the following requests : * GetConnectionTypeInfo * GetNATRSIPStatus * ? GetTotalBytesSent - WANCommonInterfaceConfig * ? GetTotalBytesReceived - idem * ? GetTotalPacketsSent - idem * ? GetTotalPacketsReceived - idem * GetCommonLinkProperties - idem * GetStatusInfo - WANIPConnection * GetExternalIPAddress * QueryStateVariable / ConnectionStatus! */ static const struct { const char * methodName; void (*methodImpl)(struct upnphttp *, const char *, const char *); } soapMethods[] = { /* WANCommonInterfaceConfig */ { "QueryStateVariable", QueryStateVariable}, { "GetTotalBytesSent", GetTotalBytesSent}, { "GetTotalBytesReceived", GetTotalBytesReceived}, { "GetTotalPacketsSent", GetTotalPacketsSent}, { "GetTotalPacketsReceived", GetTotalPacketsReceived}, { "GetCommonLinkProperties", GetCommonLinkProperties}, { "GetStatusInfo", GetStatusInfo}, /* WANIPConnection */ { "GetConnectionTypeInfo", GetConnectionTypeInfo }, { "GetNATRSIPStatus", GetNATRSIPStatus}, { "GetExternalIPAddress", GetExternalIPAddress}, { "AddPortMapping", AddPortMapping}, { "DeletePortMapping", DeletePortMapping}, { "GetGenericPortMappingEntry", GetGenericPortMappingEntry}, { "GetSpecificPortMappingEntry", GetSpecificPortMappingEntry}, /* Required in WANIPConnection:2 */ { "SetConnectionType", SetConnectionType}, { "RequestConnection", RequestConnection}, { "ForceTermination", ForceTermination}, { "AddAnyPortMapping", AddAnyPortMapping}, { "DeletePortMappingRange", DeletePortMappingRange}, { "GetListOfPortMappings", GetListOfPortMappings}, #ifdef ENABLE_L3F_SERVICE /* Layer3Forwarding */ { "SetDefaultConnectionService", SetDefaultConnectionService}, { "GetDefaultConnectionService", GetDefaultConnectionService}, #endif #ifdef ENABLE_6FC_SERVICE /* WANIPv6FirewallControl */ { "GetFirewallStatus", GetFirewallStatus}, /* Required */ { "AddPinhole", AddPinhole}, /* Required */ { "UpdatePinhole", UpdatePinhole}, /* Required */ { "GetOutboundPinholeTimeout", GetOutboundPinholeTimeout}, /* Optional */ { "DeletePinhole", DeletePinhole}, /* Required */ { "CheckPinholeWorking", CheckPinholeWorking}, /* Optional */ { "GetPinholePackets", GetPinholePackets}, /* Required */ #endif #ifdef ENABLE_DP_SERVICE /* DeviceProtection */ { "SendSetupMessage", SendSetupMessage}, /* Required */ { "GetSupportedProtocols", GetSupportedProtocols}, /* Required */ { "GetAssignedRoles", GetAssignedRoles}, /* Required */ #endif { 0, 0 } }; void ExecuteSoapAction(struct upnphttp * h, const char * action, int n) { char * p; char * p2; int i, len, methodlen; char namespace[256]; /* SoapAction example : * urn:schemas-upnp-org:service:WANIPConnection:1#GetStatusInfo */ p = strchr(action, '#'); if(p && (p - action) < n) { for(i = 0; i < ((int)sizeof(namespace) - 1) && (action + i) < p; i++) namespace[i] = action[i]; namespace[i] = '\0'; p++; p2 = strchr(p, '"'); if(p2 && (p2 - action) <= n) methodlen = p2 - p; else methodlen = n - (p - action); /*syslog(LOG_DEBUG, "SoapMethod: %.*s %d %d %p %p %d", methodlen, p, methodlen, n, action, p, (int)(p - action));*/ for(i = 0; soapMethods[i].methodName; i++) { len = strlen(soapMethods[i].methodName); if((len == methodlen) && memcmp(p, soapMethods[i].methodName, len) == 0) { #ifdef DEBUG syslog(LOG_DEBUG, "Remote Call of SoapMethod '%s' %s", soapMethods[i].methodName, namespace); #endif /* DEBUG */ soapMethods[i].methodImpl(h, soapMethods[i].methodName, namespace); return; } } syslog(LOG_NOTICE, "SoapMethod: Unknown: %.*s %s", methodlen, p, namespace); } else { syslog(LOG_NOTICE, "cannot parse SoapAction"); } SoapError(h, 401, "Invalid Action"); } /* Standard Errors: * * errorCode errorDescription Description * -------- ---------------- ----------- * 401 Invalid Action No action by that name at this service. * 402 Invalid Args Could be any of the following: not enough in args, * too many in args, no in arg by that name, * one or more in args are of the wrong data type. * 403 Out of Sync Out of synchronization. * 501 Action Failed May be returned in current state of service * prevents invoking that action. * 600-699 TBD Common action errors. Defined by UPnP Forum * Technical Committee. * 700-799 TBD Action-specific errors for standard actions. * Defined by UPnP Forum working committee. * 800-899 TBD Action-specific errors for non-standard actions. * Defined by UPnP vendor. */ void SoapError(struct upnphttp * h, int errCode, const char * errDesc) { static const char resp[] = "<s:Envelope " "xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\" " "s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\">" "<s:Body>" "<s:Fault>" "<faultcode>s:Client</faultcode>" "<faultstring>UPnPError</faultstring>" "<detail>" "<UPnPError xmlns=\"urn:schemas-upnp-org:control-1-0\">" "<errorCode>%d</errorCode>" "<errorDescription>%s</errorDescription>" "</UPnPError>" "</detail>" "</s:Fault>" "</s:Body>" "</s:Envelope>"; char body[2048]; int bodylen; syslog(LOG_INFO, "Returning UPnPError %d: %s", errCode, errDesc); bodylen = snprintf(body, sizeof(body), resp, errCode, errDesc); BuildResp2_upnphttp(h, 500, "Internal Server Error", body, bodylen); SendRespAndClose_upnphttp(h); }
/* $Id: upnpsoap.c,v 1.151 2018/03/13 10:32:53 nanard Exp $ */ /* vim: tabstop=4 shiftwidth=4 noexpandtab * MiniUPnP project * http://miniupnp.free.fr/ or https://miniupnp.tuxfamily.org/ * (c) 2006-2018 Thomas Bernard * This software is subject to the conditions detailed * in the LICENCE file provided within the distribution */ #include <stdio.h> #include <stdlib.h> #include <limits.h> #include <string.h> #include <errno.h> #include <sys/socket.h> #include <unistd.h> #include <syslog.h> #include <sys/types.h> #include <netinet/in.h> #include <arpa/inet.h> #include <netdb.h> #include <ctype.h> #include "macros.h" #include "config.h" #include "upnpglobalvars.h" #include "upnphttp.h" #include "upnpsoap.h" #include "upnpreplyparse.h" #include "upnpredirect.h" #include "upnppinhole.h" #include "getifaddr.h" #include "getifstats.h" #include "getconnstatus.h" #include "upnpurns.h" #include "upnputils.h" /* utility function */ static int is_numeric(const char * s) { while(*s) { if(*s < '0' || *s > '9') return 0; s++; } return 1; } static void BuildSendAndCloseSoapResp(struct upnphttp * h, const char * body, int bodylen) { static const char beforebody[] = "<?xml version=\"1.0\"?>\r\n" "<s:Envelope xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\" " "s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\">" "<s:Body>"; static const char afterbody[] = "</s:Body>" "</s:Envelope>\r\n"; int r = BuildHeader_upnphttp(h, 200, "OK", sizeof(beforebody) - 1 + sizeof(afterbody) - 1 + bodylen ); if(r >= 0) { memcpy(h->res_buf + h->res_buflen, beforebody, sizeof(beforebody) - 1); h->res_buflen += sizeof(beforebody) - 1; memcpy(h->res_buf + h->res_buflen, body, bodylen); h->res_buflen += bodylen; memcpy(h->res_buf + h->res_buflen, afterbody, sizeof(afterbody) - 1); h->res_buflen += sizeof(afterbody) - 1; } else { BuildResp2_upnphttp(h, 500, "Internal Server Error", NULL, 0); } SendRespAndClose_upnphttp(h); } static void GetConnectionTypeInfo(struct upnphttp * h, const char * action, const char * ns) { #if 0 static const char resp[] = "<u:GetConnectionTypeInfoResponse " "xmlns:u=\"" SERVICE_TYPE_WANIPC "\">" "<NewConnectionType>IP_Routed</NewConnectionType>" "<NewPossibleConnectionTypes>IP_Routed</NewPossibleConnectionTypes>" "</u:GetConnectionTypeInfoResponse>"; #endif static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<NewConnectionType>IP_Routed</NewConnectionType>" "<NewPossibleConnectionTypes>IP_Routed</NewPossibleConnectionTypes>" "</u:%sResponse>"; char body[512]; int bodylen; bodylen = snprintf(body, sizeof(body), resp, action, ns, action); BuildSendAndCloseSoapResp(h, body, bodylen); } /* maximum value for a UPNP ui4 type variable */ #define UPNP_UI4_MAX (4294967295ul) static void GetTotalBytesSent(struct upnphttp * h, const char * action, const char * ns) { int r; static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<NewTotalBytesSent>%lu</NewTotalBytesSent>" "</u:%sResponse>"; char body[512]; int bodylen; struct ifdata data; r = getifstats(ext_if_name, &data); bodylen = snprintf(body, sizeof(body), resp, action, ns, /* was "urn:schemas-upnp-org:service:WANCommonInterfaceConfig:1" */ #ifdef UPNP_STRICT r<0?0:(data.obytes & UPNP_UI4_MAX), action); #else /* UPNP_STRICT */ r<0?0:data.obytes, action); #endif /* UPNP_STRICT */ BuildSendAndCloseSoapResp(h, body, bodylen); } static void GetTotalBytesReceived(struct upnphttp * h, const char * action, const char * ns) { int r; static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<NewTotalBytesReceived>%lu</NewTotalBytesReceived>" "</u:%sResponse>"; char body[512]; int bodylen; struct ifdata data; r = getifstats(ext_if_name, &data); /* TotalBytesReceived * This variable represents the cumulative counter for total number of * bytes received downstream across all connection service instances on * WANDevice. The count rolls over to 0 after it reaching the maximum * value (2^32)-1. */ bodylen = snprintf(body, sizeof(body), resp, action, ns, /* was "urn:schemas-upnp-org:service:WANCommonInterfaceConfig:1" */ #ifdef UPNP_STRICT r<0?0:(data.ibytes & UPNP_UI4_MAX), action); #else /* UPNP_STRICT */ r<0?0:data.ibytes, action); #endif /* UPNP_STRICT */ BuildSendAndCloseSoapResp(h, body, bodylen); } static void GetTotalPacketsSent(struct upnphttp * h, const char * action, const char * ns) { int r; static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<NewTotalPacketsSent>%lu</NewTotalPacketsSent>" "</u:%sResponse>"; char body[512]; int bodylen; struct ifdata data; r = getifstats(ext_if_name, &data); bodylen = snprintf(body, sizeof(body), resp, action, ns,/*"urn:schemas-upnp-org:service:WANCommonInterfaceConfig:1",*/ #ifdef UPNP_STRICT r<0?0:(data.opackets & UPNP_UI4_MAX), action); #else /* UPNP_STRICT */ r<0?0:data.opackets, action); #endif /* UPNP_STRICT */ BuildSendAndCloseSoapResp(h, body, bodylen); } static void GetTotalPacketsReceived(struct upnphttp * h, const char * action, const char * ns) { int r; static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<NewTotalPacketsReceived>%lu</NewTotalPacketsReceived>" "</u:%sResponse>"; char body[512]; int bodylen; struct ifdata data; r = getifstats(ext_if_name, &data); bodylen = snprintf(body, sizeof(body), resp, action, ns, /* was "urn:schemas-upnp-org:service:WANCommonInterfaceConfig:1" */ #ifdef UPNP_STRICT r<0?0:(data.ipackets & UPNP_UI4_MAX), action); #else /* UPNP_STRICT */ r<0?0:data.ipackets, action); #endif /* UPNP_STRICT */ BuildSendAndCloseSoapResp(h, body, bodylen); } static void GetCommonLinkProperties(struct upnphttp * h, const char * action, const char * ns) { /* WANAccessType : set depending on the hardware : * DSL, POTS (plain old Telephone service), Cable, Ethernet */ static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<NewWANAccessType>%s</NewWANAccessType>" "<NewLayer1UpstreamMaxBitRate>%lu</NewLayer1UpstreamMaxBitRate>" "<NewLayer1DownstreamMaxBitRate>%lu</NewLayer1DownstreamMaxBitRate>" "<NewPhysicalLinkStatus>%s</NewPhysicalLinkStatus>" "</u:%sResponse>"; char body[2048]; int bodylen; struct ifdata data; const char * status = "Up"; /* Up, Down (Required), * Initializing, Unavailable (Optional) */ const char * wan_access_type = "Cable"; /* DSL, POTS, Cable, Ethernet */ char ext_ip_addr[INET_ADDRSTRLEN]; if((downstream_bitrate == 0) || (upstream_bitrate == 0)) { if(getifstats(ext_if_name, &data) >= 0) { if(downstream_bitrate == 0) downstream_bitrate = data.baudrate; if(upstream_bitrate == 0) upstream_bitrate = data.baudrate; } } if(getifaddr(ext_if_name, ext_ip_addr, INET_ADDRSTRLEN, NULL, NULL) < 0) { status = "Down"; } bodylen = snprintf(body, sizeof(body), resp, action, ns, /* was "urn:schemas-upnp-org:service:WANCommonInterfaceConfig:1" */ wan_access_type, upstream_bitrate, downstream_bitrate, status, action); BuildSendAndCloseSoapResp(h, body, bodylen); } static void GetStatusInfo(struct upnphttp * h, const char * action, const char * ns) { static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<NewConnectionStatus>%s</NewConnectionStatus>" "<NewLastConnectionError>ERROR_NONE</NewLastConnectionError>" "<NewUptime>%ld</NewUptime>" "</u:%sResponse>"; char body[512]; int bodylen; time_t uptime; const char * status; /* ConnectionStatus possible values : * Unconfigured, Connecting, Connected, PendingDisconnect, * Disconnecting, Disconnected */ status = get_wan_connection_status_str(ext_if_name); uptime = upnp_get_uptime(); bodylen = snprintf(body, sizeof(body), resp, action, ns, /*SERVICE_TYPE_WANIPC,*/ status, (long)uptime, action); BuildSendAndCloseSoapResp(h, body, bodylen); } static void GetNATRSIPStatus(struct upnphttp * h, const char * action, const char * ns) { #if 0 static const char resp[] = "<u:GetNATRSIPStatusResponse " "xmlns:u=\"" SERVICE_TYPE_WANIPC "\">" "<NewRSIPAvailable>0</NewRSIPAvailable>" "<NewNATEnabled>1</NewNATEnabled>" "</u:GetNATRSIPStatusResponse>"; UNUSED(action); #endif static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<NewRSIPAvailable>0</NewRSIPAvailable>" "<NewNATEnabled>1</NewNATEnabled>" "</u:%sResponse>"; char body[512]; int bodylen; /* 2.2.9. RSIPAvailable * This variable indicates if Realm-specific IP (RSIP) is available * as a feature on the InternetGatewayDevice. RSIP is being defined * in the NAT working group in the IETF to allow host-NATing using * a standard set of message exchanges. It also allows end-to-end * applications that otherwise break if NAT is introduced * (e.g. IPsec-based VPNs). * A gateway that does not support RSIP should set this variable to 0. */ bodylen = snprintf(body, sizeof(body), resp, action, ns, /*SERVICE_TYPE_WANIPC,*/ action); BuildSendAndCloseSoapResp(h, body, bodylen); } static void GetExternalIPAddress(struct upnphttp * h, const char * action, const char * ns) { static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<NewExternalIPAddress>%s</NewExternalIPAddress>" "</u:%sResponse>"; char body[512]; int bodylen; char ext_ip_addr[INET_ADDRSTRLEN]; /* Does that method need to work with IPv6 ? * There is usually no NAT with IPv6 */ #ifndef MULTIPLE_EXTERNAL_IP struct in_addr addr; if(use_ext_ip_addr) { strncpy(ext_ip_addr, use_ext_ip_addr, INET_ADDRSTRLEN); ext_ip_addr[INET_ADDRSTRLEN - 1] = '\0'; } else if(getifaddr(ext_if_name, ext_ip_addr, INET_ADDRSTRLEN, &addr, NULL) < 0) { syslog(LOG_ERR, "Failed to get ip address for interface %s", ext_if_name); strncpy(ext_ip_addr, "0.0.0.0", INET_ADDRSTRLEN); } if (addr_is_reserved(&addr)) strncpy(ext_ip_addr, "0.0.0.0", INET_ADDRSTRLEN); #else struct lan_addr_s * lan_addr; strncpy(ext_ip_addr, "0.0.0.0", INET_ADDRSTRLEN); for(lan_addr = lan_addrs.lh_first; lan_addr != NULL; lan_addr = lan_addr->list.le_next) { if( (h->clientaddr.s_addr & lan_addr->mask.s_addr) == (lan_addr->addr.s_addr & lan_addr->mask.s_addr)) { strncpy(ext_ip_addr, lan_addr->ext_ip_str, INET_ADDRSTRLEN); break; } } #endif if (strcmp(ext_ip_addr, "0.0.0.0") == 0) { SoapError(h, 501, "Action Failed"); return; } bodylen = snprintf(body, sizeof(body), resp, action, ns, /*SERVICE_TYPE_WANIPC,*/ ext_ip_addr, action); BuildSendAndCloseSoapResp(h, body, bodylen); } /* AddPortMapping method of WANIPConnection Service * Ignored argument : NewEnabled */ static void AddPortMapping(struct upnphttp * h, const char * action, const char * ns) { int r; /*static const char resp[] = "<u:AddPortMappingResponse " "xmlns:u=\"" SERVICE_TYPE_WANIPC "\"/>";*/ static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\"/>"; char body[512]; int bodylen; struct NameValueParserData data; char * int_ip, * int_port, * ext_port, * protocol, * desc; char * leaseduration_str; unsigned int leaseduration; char * r_host; unsigned short iport, eport; struct hostent *hp; /* getbyhostname() */ char ** ptr; /* getbyhostname() */ struct in_addr result_ip;/*unsigned char result_ip[16];*/ /* inet_pton() */ ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); int_ip = GetValueFromNameValueList(&data, "NewInternalClient"); if (int_ip) { /* trim */ while(int_ip[0] == ' ') int_ip++; } #ifdef UPNP_STRICT if (!int_ip || int_ip[0] == '\0') { ClearNameValueList(&data); SoapError(h, 402, "Invalid Args"); return; } #endif /* IGD 2 MUST support both wildcard and specific IP address values * for RemoteHost (only the wildcard value was REQUIRED in release 1.0) */ r_host = GetValueFromNameValueList(&data, "NewRemoteHost"); #ifndef SUPPORT_REMOTEHOST #ifdef UPNP_STRICT if (r_host && (r_host[0] != '\0') && (0 != strcmp(r_host, "*"))) { ClearNameValueList(&data); SoapError(h, 726, "RemoteHostOnlySupportsWildcard"); return; } #endif #endif #ifndef UPNP_STRICT /* if <NewInternalClient> arg is empty, use client address * see https://github.com/miniupnp/miniupnp/issues/236 */ if (!int_ip || int_ip[0] == '\0') { int_ip = h->clientaddr_str; memcpy(&result_ip, &(h->clientaddr), sizeof(struct in_addr)); } else #endif /* if ip not valid assume hostname and convert */ if (inet_pton(AF_INET, int_ip, &result_ip) <= 0) { hp = gethostbyname(int_ip); if(hp && hp->h_addrtype == AF_INET) { for(ptr = hp->h_addr_list; ptr && *ptr; ptr++) { int_ip = inet_ntoa(*((struct in_addr *) *ptr)); result_ip = *((struct in_addr *) *ptr); /* TODO : deal with more than one ip per hostname */ break; } } else { syslog(LOG_ERR, "Failed to convert hostname '%s' to ip address", int_ip); ClearNameValueList(&data); SoapError(h, 402, "Invalid Args"); return; } } /* check if NewInternalAddress is the client address */ if(GETFLAG(SECUREMODEMASK)) { if(h->clientaddr.s_addr != result_ip.s_addr) { syslog(LOG_INFO, "Client %s tried to redirect port to %s", inet_ntoa(h->clientaddr), int_ip); ClearNameValueList(&data); SoapError(h, 718, "ConflictInMappingEntry"); return; } } int_port = GetValueFromNameValueList(&data, "NewInternalPort"); ext_port = GetValueFromNameValueList(&data, "NewExternalPort"); protocol = GetValueFromNameValueList(&data, "NewProtocol"); desc = GetValueFromNameValueList(&data, "NewPortMappingDescription"); leaseduration_str = GetValueFromNameValueList(&data, "NewLeaseDuration"); if (!int_port || !ext_port || !protocol) { ClearNameValueList(&data); SoapError(h, 402, "Invalid Args"); return; } eport = (unsigned short)atoi(ext_port); iport = (unsigned short)atoi(int_port); if (strcmp(ext_port, "*") == 0 || eport == 0) { ClearNameValueList(&data); SoapError(h, 716, "Wildcard not permited in ExtPort"); return; } leaseduration = leaseduration_str ? atoi(leaseduration_str) : 0; #ifdef IGD_V2 /* PortMappingLeaseDuration can be either a value between 1 and * 604800 seconds or the zero value (for infinite lease time). * Note that an infinite lease time can be only set by out-of-band * mechanisms like WWW-administration, remote management or local * management. * If a control point uses the value 0 to indicate an infinite lease * time mapping, it is REQUIRED that gateway uses the maximum value * instead (e.g. 604800 seconds) */ if(leaseduration == 0 || leaseduration > 604800) leaseduration = 604800; #endif syslog(LOG_INFO, "%s: ext port %hu to %s:%hu protocol %s for: %s leaseduration=%u rhost=%s", action, eport, int_ip, iport, protocol, desc, leaseduration, r_host ? r_host : "NULL"); r = upnp_redirect(r_host, eport, int_ip, iport, protocol, desc, leaseduration); ClearNameValueList(&data); /* possible error codes for AddPortMapping : * 402 - Invalid Args * 501 - Action Failed * 715 - Wildcard not permited in SrcAddr * 716 - Wildcard not permited in ExtPort * 718 - ConflictInMappingEntry * 724 - SamePortValuesRequired (deprecated in IGD v2) * 725 - OnlyPermanentLeasesSupported The NAT implementation only supports permanent lease times on port mappings (deprecated in IGD v2) * 726 - RemoteHostOnlySupportsWildcard RemoteHost must be a wildcard and cannot be a specific IP address or DNS name (deprecated in IGD v2) * 727 - ExternalPortOnlySupportsWildcard ExternalPort must be a wildcard and cannot be a specific port value (deprecated in IGD v2) * 728 - NoPortMapsAvailable There are not enough free ports available to complete the mapping (added in IGD v2) * 729 - ConflictWithOtherMechanisms (added in IGD v2) */ switch(r) { case 0: /* success */ bodylen = snprintf(body, sizeof(body), resp, action, ns/*SERVICE_TYPE_WANIPC*/); BuildSendAndCloseSoapResp(h, body, bodylen); break; case -4: #ifdef IGD_V2 SoapError(h, 729, "ConflictWithOtherMechanisms"); break; #endif /* IGD_V2 */ case -2: /* already redirected */ case -3: /* not permitted */ SoapError(h, 718, "ConflictInMappingEntry"); break; default: SoapError(h, 501, "ActionFailed"); } } /* AddAnyPortMapping was added in WANIPConnection v2 */ static void AddAnyPortMapping(struct upnphttp * h, const char * action, const char * ns) { int r; static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<NewReservedPort>%hu</NewReservedPort>" "</u:%sResponse>"; char body[512]; int bodylen; struct NameValueParserData data; const char * int_ip, * int_port, * ext_port, * protocol, * desc; const char * r_host; unsigned short iport, eport; const char * leaseduration_str; unsigned int leaseduration; struct hostent *hp; /* getbyhostname() */ char ** ptr; /* getbyhostname() */ struct in_addr result_ip;/*unsigned char result_ip[16];*/ /* inet_pton() */ ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); r_host = GetValueFromNameValueList(&data, "NewRemoteHost"); ext_port = GetValueFromNameValueList(&data, "NewExternalPort"); protocol = GetValueFromNameValueList(&data, "NewProtocol"); int_port = GetValueFromNameValueList(&data, "NewInternalPort"); int_ip = GetValueFromNameValueList(&data, "NewInternalClient"); /* NewEnabled */ desc = GetValueFromNameValueList(&data, "NewPortMappingDescription"); leaseduration_str = GetValueFromNameValueList(&data, "NewLeaseDuration"); leaseduration = leaseduration_str ? atoi(leaseduration_str) : 0; if(leaseduration == 0) leaseduration = 604800; if (!int_ip || !ext_port || !int_port) { ClearNameValueList(&data); SoapError(h, 402, "Invalid Args"); return; } eport = (unsigned short)atoi(ext_port); iport = (unsigned short)atoi(int_port); if(iport == 0 || !is_numeric(ext_port)) { ClearNameValueList(&data); SoapError(h, 402, "Invalid Args"); return; } #ifndef SUPPORT_REMOTEHOST #ifdef UPNP_STRICT if (r_host && (r_host[0] != '\0') && (0 != strcmp(r_host, "*"))) { ClearNameValueList(&data); SoapError(h, 726, "RemoteHostOnlySupportsWildcard"); return; } #endif #endif /* if ip not valid assume hostname and convert */ if (inet_pton(AF_INET, int_ip, &result_ip) <= 0) { hp = gethostbyname(int_ip); if(hp && hp->h_addrtype == AF_INET) { for(ptr = hp->h_addr_list; ptr && *ptr; ptr++) { int_ip = inet_ntoa(*((struct in_addr *) *ptr)); result_ip = *((struct in_addr *) *ptr); /* TODO : deal with more than one ip per hostname */ break; } } else { syslog(LOG_ERR, "Failed to convert hostname '%s' to ip address", int_ip); ClearNameValueList(&data); SoapError(h, 402, "Invalid Args"); return; } } /* check if NewInternalAddress is the client address */ if(GETFLAG(SECUREMODEMASK)) { if(h->clientaddr.s_addr != result_ip.s_addr) { syslog(LOG_INFO, "Client %s tried to redirect port to %s", inet_ntoa(h->clientaddr), int_ip); ClearNameValueList(&data); SoapError(h, 606, "Action not authorized"); return; } } /* TODO : accept a different external port * have some smart strategy to choose the port */ for(;;) { r = upnp_redirect(r_host, eport, int_ip, iport, protocol, desc, leaseduration); if(r==-2 && eport < 65535) { eport++; } else { break; } } ClearNameValueList(&data); switch(r) { case 0: /* success */ bodylen = snprintf(body, sizeof(body), resp, action, ns, /*SERVICE_TYPE_WANIPC,*/ eport, action); BuildSendAndCloseSoapResp(h, body, bodylen); break; case -2: /* already redirected */ SoapError(h, 718, "ConflictInMappingEntry"); break; case -3: /* not permitted */ SoapError(h, 606, "Action not authorized"); break; default: SoapError(h, 501, "ActionFailed"); } } static void GetSpecificPortMappingEntry(struct upnphttp * h, const char * action, const char * ns) { int r; static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<NewInternalPort>%u</NewInternalPort>" "<NewInternalClient>%s</NewInternalClient>" "<NewEnabled>1</NewEnabled>" "<NewPortMappingDescription>%s</NewPortMappingDescription>" "<NewLeaseDuration>%u</NewLeaseDuration>" "</u:%sResponse>"; char body[1024]; int bodylen; struct NameValueParserData data; const char * r_host, * ext_port, * protocol; unsigned short eport, iport; char int_ip[32]; char desc[64]; unsigned int leaseduration = 0; ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); r_host = GetValueFromNameValueList(&data, "NewRemoteHost"); ext_port = GetValueFromNameValueList(&data, "NewExternalPort"); protocol = GetValueFromNameValueList(&data, "NewProtocol"); #ifdef UPNP_STRICT if(!ext_port || !protocol || !r_host) #else if(!ext_port || !protocol) #endif { ClearNameValueList(&data); SoapError(h, 402, "Invalid Args"); return; } #ifndef SUPPORT_REMOTEHOST #ifdef UPNP_STRICT if (r_host && (r_host[0] != '\0') && (0 != strcmp(r_host, "*"))) { ClearNameValueList(&data); SoapError(h, 726, "RemoteHostOnlySupportsWildcard"); return; } #endif #endif eport = (unsigned short)atoi(ext_port); if(eport == 0) { ClearNameValueList(&data); SoapError(h, 402, "Invalid Args"); return; } /* TODO : add r_host as an input parameter ... * We prevent several Port Mapping with same external port * but different remoteHost to be set up, so that is not * a priority. */ r = upnp_get_redirection_infos(eport, protocol, &iport, int_ip, sizeof(int_ip), desc, sizeof(desc), NULL, 0, &leaseduration); if(r < 0) { SoapError(h, 714, "NoSuchEntryInArray"); } else { syslog(LOG_INFO, "%s: rhost='%s' %s %s found => %s:%u desc='%s'", action, r_host ? r_host : "NULL", ext_port, protocol, int_ip, (unsigned int)iport, desc); bodylen = snprintf(body, sizeof(body), resp, action, ns/*SERVICE_TYPE_WANIPC*/, (unsigned int)iport, int_ip, desc, leaseduration, action); BuildSendAndCloseSoapResp(h, body, bodylen); } ClearNameValueList(&data); } static void DeletePortMapping(struct upnphttp * h, const char * action, const char * ns) { int r; /*static const char resp[] = "<u:DeletePortMappingResponse " "xmlns:u=\"" SERVICE_TYPE_WANIPC "\">" "</u:DeletePortMappingResponse>";*/ static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "</u:%sResponse>"; char body[512]; int bodylen; struct NameValueParserData data; const char * ext_port, * protocol; unsigned short eport; #ifdef UPNP_STRICT const char * r_host; #endif /* UPNP_STRICT */ ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); ext_port = GetValueFromNameValueList(&data, "NewExternalPort"); protocol = GetValueFromNameValueList(&data, "NewProtocol"); #ifdef UPNP_STRICT r_host = GetValueFromNameValueList(&data, "NewRemoteHost"); #endif /* UPNP_STRICT */ #ifdef UPNP_STRICT if(!ext_port || !protocol || !r_host) #else if(!ext_port || !protocol) #endif /* UPNP_STRICT */ { ClearNameValueList(&data); SoapError(h, 402, "Invalid Args"); return; } #ifndef SUPPORT_REMOTEHOST #ifdef UPNP_STRICT if (r_host && (r_host[0] != '\0') && (0 != strcmp(r_host, "*"))) { ClearNameValueList(&data); SoapError(h, 726, "RemoteHostOnlySupportsWildcard"); return; } #endif /* UPNP_STRICT */ #endif /* SUPPORT_REMOTEHOST */ eport = (unsigned short)atoi(ext_port); if(eport == 0) { ClearNameValueList(&data); SoapError(h, 402, "Invalid Args"); return; } syslog(LOG_INFO, "%s: external port: %hu, protocol: %s", action, eport, protocol); /* if in secure mode, check the IP * Removing a redirection is not a security threat, * just an annoyance for the user using it. So this is not * a priority. */ if(GETFLAG(SECUREMODEMASK)) { char int_ip[32]; struct in_addr int_ip_addr; unsigned short iport; unsigned int leaseduration = 0; r = upnp_get_redirection_infos(eport, protocol, &iport, int_ip, sizeof(int_ip), NULL, 0, NULL, 0, &leaseduration); if(r >= 0) { if(inet_pton(AF_INET, int_ip, &int_ip_addr) > 0) { if(h->clientaddr.s_addr != int_ip_addr.s_addr) { SoapError(h, 606, "Action not authorized"); /*SoapError(h, 714, "NoSuchEntryInArray");*/ ClearNameValueList(&data); return; } } } } r = upnp_delete_redirection(eport, protocol); if(r < 0) { SoapError(h, 714, "NoSuchEntryInArray"); } else { bodylen = snprintf(body, sizeof(body), resp, action, ns, action); BuildSendAndCloseSoapResp(h, body, bodylen); } ClearNameValueList(&data); } /* DeletePortMappingRange was added in IGD spec v2 */ static void DeletePortMappingRange(struct upnphttp * h, const char * action, const char * ns) { int r = -1; /*static const char resp[] = "<u:DeletePortMappingRangeResponse " "xmlns:u=\"" SERVICE_TYPE_WANIPC "\">" "</u:DeletePortMappingRangeResponse>";*/ static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "</u:%sResponse>"; char body[512]; int bodylen; struct NameValueParserData data; const char * protocol; const char * startport_s, * endport_s; unsigned short startport, endport; /*int manage;*/ unsigned short * port_list; unsigned int i, number = 0; ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); startport_s = GetValueFromNameValueList(&data, "NewStartPort"); endport_s = GetValueFromNameValueList(&data, "NewEndPort"); protocol = GetValueFromNameValueList(&data, "NewProtocol"); /*manage = atoi(GetValueFromNameValueList(&data, "NewManage"));*/ if(startport_s == NULL || endport_s == NULL || protocol == NULL || !is_numeric(startport_s) || !is_numeric(endport_s)) { SoapError(h, 402, "Invalid Args"); ClearNameValueList(&data); return; } startport = (unsigned short)atoi(startport_s); endport = (unsigned short)atoi(endport_s); /* possible errors : 606 - Action not authorized 730 - PortMappingNotFound 733 - InconsistentParameter */ if(startport > endport) { SoapError(h, 733, "InconsistentParameter"); ClearNameValueList(&data); return; } syslog(LOG_INFO, "%s: deleting external ports: %hu-%hu, protocol: %s", action, startport, endport, protocol); port_list = upnp_get_portmappings_in_range(startport, endport, protocol, &number); if(number == 0) { SoapError(h, 730, "PortMappingNotFound"); ClearNameValueList(&data); free(port_list); return; } for(i = 0; i < number; i++) { r = upnp_delete_redirection(port_list[i], protocol); syslog(LOG_INFO, "%s: deleting external port: %hu, protocol: %s: %s", action, port_list[i], protocol, r < 0 ? "failed" : "ok"); } free(port_list); bodylen = snprintf(body, sizeof(body), resp, action, ns, action); BuildSendAndCloseSoapResp(h, body, bodylen); ClearNameValueList(&data); } static void GetGenericPortMappingEntry(struct upnphttp * h, const char * action, const char * ns) { int r; static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<NewRemoteHost>%s</NewRemoteHost>" "<NewExternalPort>%u</NewExternalPort>" "<NewProtocol>%s</NewProtocol>" "<NewInternalPort>%u</NewInternalPort>" "<NewInternalClient>%s</NewInternalClient>" "<NewEnabled>1</NewEnabled>" "<NewPortMappingDescription>%s</NewPortMappingDescription>" "<NewLeaseDuration>%u</NewLeaseDuration>" "</u:%sResponse>"; long int index = 0; unsigned short eport, iport; const char * m_index; char * endptr; char protocol[8], iaddr[32]; char desc[64]; char rhost[40]; unsigned int leaseduration = 0; struct NameValueParserData data; ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); m_index = GetValueFromNameValueList(&data, "NewPortMappingIndex"); if(!m_index) { ClearNameValueList(&data); SoapError(h, 402, "Invalid Args"); return; } errno = 0; /* To distinguish success/failure after call */ index = strtol(m_index, &endptr, 10); if((errno == ERANGE && (index == LONG_MAX || index == LONG_MIN)) || (errno != 0 && index == 0) || (m_index == endptr)) { /* should condition (*endptr != '\0') be also an error ? */ if(m_index == endptr) syslog(LOG_WARNING, "%s: no digits were found in <%s>", "GetGenericPortMappingEntry", "NewPortMappingIndex"); else syslog(LOG_WARNING, "%s: strtol('%s'): %m", "GetGenericPortMappingEntry", m_index); ClearNameValueList(&data); SoapError(h, 402, "Invalid Args"); return; } syslog(LOG_INFO, "%s: index=%d", action, (int)index); rhost[0] = '\0'; r = upnp_get_redirection_infos_by_index((int)index, &eport, protocol, &iport, iaddr, sizeof(iaddr), desc, sizeof(desc), rhost, sizeof(rhost), &leaseduration); if(r < 0) { SoapError(h, 713, "SpecifiedArrayIndexInvalid"); } else { int bodylen; char body[2048]; bodylen = snprintf(body, sizeof(body), resp, action, ns, /*SERVICE_TYPE_WANIPC,*/ rhost, (unsigned int)eport, protocol, (unsigned int)iport, iaddr, desc, leaseduration, action); BuildSendAndCloseSoapResp(h, body, bodylen); } ClearNameValueList(&data); } /* GetListOfPortMappings was added in the IGD v2 specification */ static void GetListOfPortMappings(struct upnphttp * h, const char * action, const char * ns) { static const char resp_start[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<NewPortListing><![CDATA["; static const char resp_end[] = "]]></NewPortListing>" "</u:%sResponse>"; static const char list_start[] = "<p:PortMappingList xmlns:p=\"urn:schemas-upnp-org:gw:WANIPConnection\"" " xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"" " xsi:schemaLocation=\"urn:schemas-upnp-org:gw:WANIPConnection" " http://www.upnp.org/schemas/gw/WANIPConnection-v2.xsd\">"; static const char list_end[] = "</p:PortMappingList>"; static const char entry[] = "<p:PortMappingEntry>" "<p:NewRemoteHost>%s</p:NewRemoteHost>" "<p:NewExternalPort>%hu</p:NewExternalPort>" "<p:NewProtocol>%s</p:NewProtocol>" "<p:NewInternalPort>%hu</p:NewInternalPort>" "<p:NewInternalClient>%s</p:NewInternalClient>" "<p:NewEnabled>1</p:NewEnabled>" "<p:NewDescription>%s</p:NewDescription>" "<p:NewLeaseTime>%u</p:NewLeaseTime>" "</p:PortMappingEntry>"; char * body; size_t bodyalloc; int bodylen; int r = -1; unsigned short iport; char int_ip[32]; char desc[64]; char rhost[64]; unsigned int leaseduration = 0; struct NameValueParserData data; const char * startport_s, * endport_s; unsigned short startport, endport; const char * protocol; /*int manage;*/ const char * number_s; int number; unsigned short * port_list; unsigned int i, list_size = 0; ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); startport_s = GetValueFromNameValueList(&data, "NewStartPort"); endport_s = GetValueFromNameValueList(&data, "NewEndPort"); protocol = GetValueFromNameValueList(&data, "NewProtocol"); /*manage_s = GetValueFromNameValueList(&data, "NewManage");*/ number_s = GetValueFromNameValueList(&data, "NewNumberOfPorts"); if(startport_s == NULL || endport_s == NULL || protocol == NULL || number_s == NULL || !is_numeric(number_s) || !is_numeric(startport_s) || !is_numeric(endport_s)) { SoapError(h, 402, "Invalid Args"); ClearNameValueList(&data); return; } startport = (unsigned short)atoi(startport_s); endport = (unsigned short)atoi(endport_s); /*manage = atoi(manage_s);*/ number = atoi(number_s); if(number == 0) number = 1000; /* return up to 1000 mappings by default */ if(startport > endport) { SoapError(h, 733, "InconsistentParameter"); ClearNameValueList(&data); return; } /* build the PortMappingList xml document : <p:PortMappingList xmlns:p="urn:schemas-upnp-org:gw:WANIPConnection" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="urn:schemas-upnp-org:gw:WANIPConnection http://www.upnp.org/schemas/gw/WANIPConnection-v2.xsd"> <p:PortMappingEntry> <p:NewRemoteHost>202.233.2.1</p:NewRemoteHost> <p:NewExternalPort>2345</p:NewExternalPort> <p:NewProtocol>TCP</p:NewProtocol> <p:NewInternalPort>2345</p:NewInternalPort> <p:NewInternalClient>192.168.1.137</p:NewInternalClient> <p:NewEnabled>1</p:NewEnabled> <p:NewDescription>dooom</p:NewDescription> <p:NewLeaseTime>345</p:NewLeaseTime> </p:PortMappingEntry> </p:PortMappingList> */ bodyalloc = 4096; body = malloc(bodyalloc); if(!body) { ClearNameValueList(&data); SoapError(h, 501, "ActionFailed"); return; } bodylen = snprintf(body, bodyalloc, resp_start, action, ns/*SERVICE_TYPE_WANIPC*/); if(bodylen < 0) { SoapError(h, 501, "ActionFailed"); free(body); return; } memcpy(body+bodylen, list_start, sizeof(list_start)); bodylen += (sizeof(list_start) - 1); port_list = upnp_get_portmappings_in_range(startport, endport, protocol, &list_size); /* loop through port mappings */ for(i = 0; number > 0 && i < list_size; i++) { /* have a margin of 1024 bytes to store the new entry */ if((unsigned int)bodylen + 1024 > bodyalloc) { char * body_sav = body; bodyalloc += 4096; body = realloc(body, bodyalloc); if(!body) { syslog(LOG_CRIT, "realloc(%p, %u) FAILED", body_sav, (unsigned)bodyalloc); ClearNameValueList(&data); SoapError(h, 501, "ActionFailed"); free(body_sav); free(port_list); return; } } rhost[0] = '\0'; r = upnp_get_redirection_infos(port_list[i], protocol, &iport, int_ip, sizeof(int_ip), desc, sizeof(desc), rhost, sizeof(rhost), &leaseduration); if(r == 0) { bodylen += snprintf(body+bodylen, bodyalloc-bodylen, entry, rhost, port_list[i], protocol, iport, int_ip, desc, leaseduration); number--; } } free(port_list); port_list = NULL; if((bodylen + sizeof(list_end) + 1024) > bodyalloc) { char * body_sav = body; bodyalloc += (sizeof(list_end) + 1024); body = realloc(body, bodyalloc); if(!body) { syslog(LOG_CRIT, "realloc(%p, %u) FAILED", body_sav, (unsigned)bodyalloc); ClearNameValueList(&data); SoapError(h, 501, "ActionFailed"); free(body_sav); return; } } memcpy(body+bodylen, list_end, sizeof(list_end)); bodylen += (sizeof(list_end) - 1); bodylen += snprintf(body+bodylen, bodyalloc-bodylen, resp_end, action); BuildSendAndCloseSoapResp(h, body, bodylen); free(body); ClearNameValueList(&data); } #ifdef ENABLE_L3F_SERVICE static void SetDefaultConnectionService(struct upnphttp * h, const char * action, const char * ns) { /*static const char resp[] = "<u:SetDefaultConnectionServiceResponse " "xmlns:u=\"urn:schemas-upnp-org:service:Layer3Forwarding:1\">" "</u:SetDefaultConnectionServiceResponse>";*/ static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "</u:%sResponse>"; char body[512]; int bodylen; struct NameValueParserData data; char * p; ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); p = GetValueFromNameValueList(&data, "NewDefaultConnectionService"); if(p) { /* 720 InvalidDeviceUUID * 721 InvalidServiceID * 723 InvalidConnServiceSelection */ #ifdef UPNP_STRICT char * service; service = strchr(p, ','); if(0 != memcmp(uuidvalue_wcd, p, sizeof("uuid:00000000-0000-0000-0000-000000000000") - 1)) { SoapError(h, 720, "InvalidDeviceUUID"); } else if(service == NULL || 0 != strcmp(service+1, SERVICE_ID_WANIPC)) { SoapError(h, 721, "InvalidServiceID"); } else #endif { syslog(LOG_INFO, "%s(%s) : Ignored", action, p); bodylen = snprintf(body, sizeof(body), resp, action, ns, action); BuildSendAndCloseSoapResp(h, body, bodylen); } } else { /* missing argument */ SoapError(h, 402, "Invalid Args"); } ClearNameValueList(&data); } static void GetDefaultConnectionService(struct upnphttp * h, const char * action, const char * ns) { static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" #ifdef IGD_V2 "<NewDefaultConnectionService>%s:WANConnectionDevice:2," #else "<NewDefaultConnectionService>%s:WANConnectionDevice:1," #endif SERVICE_ID_WANIPC "</NewDefaultConnectionService>" "</u:%sResponse>"; /* example from UPnP_IGD_Layer3Forwarding 1.0.pdf : * uuid:44f5824f-c57d-418c-a131-f22b34e14111:WANConnectionDevice:1, * urn:upnp-org:serviceId:WANPPPConn1 */ char body[1024]; int bodylen; /* namespace : urn:schemas-upnp-org:service:Layer3Forwarding:1 */ bodylen = snprintf(body, sizeof(body), resp, action, ns, uuidvalue_wcd, action); BuildSendAndCloseSoapResp(h, body, bodylen); } #endif /* Added for compliance with WANIPConnection v2 */ static void SetConnectionType(struct upnphttp * h, const char * action, const char * ns) { #ifdef UPNP_STRICT const char * connection_type; #endif /* UPNP_STRICT */ struct NameValueParserData data; UNUSED(action); UNUSED(ns); ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); #ifdef UPNP_STRICT connection_type = GetValueFromNameValueList(&data, "NewConnectionType"); if(!connection_type) { ClearNameValueList(&data); SoapError(h, 402, "Invalid Args"); return; } #endif /* UPNP_STRICT */ /* Unconfigured, IP_Routed, IP_Bridged */ ClearNameValueList(&data); /* always return a ReadOnly error */ SoapError(h, 731, "ReadOnly"); } /* Added for compliance with WANIPConnection v2 */ static void RequestConnection(struct upnphttp * h, const char * action, const char * ns) { UNUSED(action); UNUSED(ns); SoapError(h, 606, "Action not authorized"); } /* Added for compliance with WANIPConnection v2 */ static void ForceTermination(struct upnphttp * h, const char * action, const char * ns) { UNUSED(action); UNUSED(ns); SoapError(h, 606, "Action not authorized"); } /* If a control point calls QueryStateVariable on a state variable that is not buffered in memory within (or otherwise available from) the service, the service must return a SOAP fault with an errorCode of 404 Invalid Var. QueryStateVariable remains useful as a limited test tool but may not be part of some future versions of UPnP. */ static void QueryStateVariable(struct upnphttp * h, const char * action, const char * ns) { static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<return>%s</return>" "</u:%sResponse>"; char body[512]; int bodylen; struct NameValueParserData data; const char * var_name; ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); /*var_name = GetValueFromNameValueList(&data, "QueryStateVariable"); */ /*var_name = GetValueFromNameValueListIgnoreNS(&data, "varName");*/ var_name = GetValueFromNameValueList(&data, "varName"); /*syslog(LOG_INFO, "QueryStateVariable(%.40s)", var_name); */ if(!var_name) { SoapError(h, 402, "Invalid Args"); } else if(strcmp(var_name, "ConnectionStatus") == 0) { const char * status; status = get_wan_connection_status_str(ext_if_name); bodylen = snprintf(body, sizeof(body), resp, action, ns,/*"urn:schemas-upnp-org:control-1-0",*/ status, action); BuildSendAndCloseSoapResp(h, body, bodylen); } #if 0 /* not useful */ else if(strcmp(var_name, "ConnectionType") == 0) { bodylen = snprintf(body, sizeof(body), resp, "IP_Routed"); BuildSendAndCloseSoapResp(h, body, bodylen); } else if(strcmp(var_name, "LastConnectionError") == 0) { bodylen = snprintf(body, sizeof(body), resp, "ERROR_NONE"); BuildSendAndCloseSoapResp(h, body, bodylen); } #endif else if(strcmp(var_name, "PortMappingNumberOfEntries") == 0) { char strn[10]; snprintf(strn, sizeof(strn), "%i", upnp_get_portmapping_number_of_entries()); bodylen = snprintf(body, sizeof(body), resp, action, ns,/*"urn:schemas-upnp-org:control-1-0",*/ strn, action); BuildSendAndCloseSoapResp(h, body, bodylen); } else { syslog(LOG_NOTICE, "%s: Unknown: %s", action, var_name?var_name:""); SoapError(h, 404, "Invalid Var"); } ClearNameValueList(&data); } #ifdef ENABLE_6FC_SERVICE #ifndef ENABLE_IPV6 #error "ENABLE_6FC_SERVICE needs ENABLE_IPV6" #endif /* WANIPv6FirewallControl actions */ static void GetFirewallStatus(struct upnphttp * h, const char * action, const char * ns) { static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<FirewallEnabled>%d</FirewallEnabled>" "<InboundPinholeAllowed>%d</InboundPinholeAllowed>" "</u:%sResponse>"; char body[512]; int bodylen; bodylen = snprintf(body, sizeof(body), resp, action, ns, /*"urn:schemas-upnp-org:service:WANIPv6FirewallControl:1",*/ GETFLAG(IPV6FCFWDISABLEDMASK) ? 0 : 1, GETFLAG(IPV6FCINBOUNDDISALLOWEDMASK) ? 0 : 1, action); BuildSendAndCloseSoapResp(h, body, bodylen); } static int CheckStatus(struct upnphttp * h) { if (GETFLAG(IPV6FCFWDISABLEDMASK)) { SoapError(h, 702, "FirewallDisabled"); return 0; } else if(GETFLAG(IPV6FCINBOUNDDISALLOWEDMASK)) { SoapError(h, 703, "InboundPinholeNotAllowed"); return 0; } else return 1; } #if 0 static int connecthostport(const char * host, unsigned short port, char * result) { int s, n; char hostname[INET6_ADDRSTRLEN]; char port_str[8], ifname[8], tmp[4]; struct addrinfo *ai, *p; struct addrinfo hints; memset(&hints, 0, sizeof(hints)); /* hints.ai_flags = AI_ADDRCONFIG; */ #ifdef AI_NUMERICSERV hints.ai_flags = AI_NUMERICSERV; #endif hints.ai_socktype = SOCK_STREAM; hints.ai_family = AF_UNSPEC; /* AF_INET, AF_INET6 or AF_UNSPEC */ /* hints.ai_protocol = IPPROTO_TCP; */ snprintf(port_str, sizeof(port_str), "%hu", port); strcpy(hostname, host); if(!strncmp(host, "fe80", 4)) { printf("Using an linklocal address\n"); strcpy(ifname, "%"); snprintf(tmp, sizeof(tmp), "%d", linklocal_index); strcat(ifname, tmp); strcat(hostname, ifname); printf("host: %s\n", hostname); } n = getaddrinfo(hostname, port_str, &hints, &ai); if(n != 0) { fprintf(stderr, "getaddrinfo() error : %s\n", gai_strerror(n)); return -1; } s = -1; for(p = ai; p; p = p->ai_next) { #ifdef DEBUG char tmp_host[256]; char tmp_service[256]; printf("ai_family=%d ai_socktype=%d ai_protocol=%d ai_addrlen=%d\n ", p->ai_family, p->ai_socktype, p->ai_protocol, p->ai_addrlen); getnameinfo(p->ai_addr, p->ai_addrlen, tmp_host, sizeof(tmp_host), tmp_service, sizeof(tmp_service), NI_NUMERICHOST | NI_NUMERICSERV); printf(" host=%s service=%s\n", tmp_host, tmp_service); #endif inet_ntop(AF_INET6, &(((struct sockaddr_in6 *)p->ai_addr)->sin6_addr), result, INET6_ADDRSTRLEN); return 0; } freeaddrinfo(ai); } #endif /* Check the security policy right */ static int PinholeVerification(struct upnphttp * h, char * int_ip, unsigned short int_port) { int n; char senderAddr[INET6_ADDRSTRLEN]=""; struct addrinfo hints, *ai, *p; struct in6_addr result_ip; /* Pinhole InternalClient address must correspond to the action sender */ syslog(LOG_INFO, "Checking internal IP@ and port (Security policy purpose)"); hints.ai_socktype = SOCK_STREAM; hints.ai_family = AF_UNSPEC; /* if ip not valid assume hostname and convert */ if (inet_pton(AF_INET6, int_ip, &result_ip) <= 0) { n = getaddrinfo(int_ip, NULL, &hints, &ai); if(!n && ai->ai_family == AF_INET6) { for(p = ai; p; p = p->ai_next) { inet_ntop(AF_INET6, (struct in6_addr *) p, int_ip, sizeof(struct in6_addr)); result_ip = *((struct in6_addr *) p); /* TODO : deal with more than one ip per hostname */ break; } } else { syslog(LOG_ERR, "Failed to convert hostname '%s' to ip address", int_ip); SoapError(h, 402, "Invalid Args"); return -1; } freeaddrinfo(p); } if(inet_ntop(AF_INET6, &(h->clientaddr_v6), senderAddr, INET6_ADDRSTRLEN) == NULL) { syslog(LOG_ERR, "inet_ntop: %m"); } #ifdef DEBUG printf("\tPinholeVerification:\n\t\tCompare sender @: %s\n\t\t to intClient @: %s\n", senderAddr, int_ip); #endif if(strcmp(senderAddr, int_ip) != 0) if(h->clientaddr_v6.s6_addr != result_ip.s6_addr) { syslog(LOG_INFO, "Client %s tried to access pinhole for internal %s and is not authorized to do it", senderAddr, int_ip); SoapError(h, 606, "Action not authorized"); return 0; } /* Pinhole InternalPort must be greater than or equal to 1024 */ if (int_port < 1024) { syslog(LOG_INFO, "Client %s tried to access pinhole with port < 1024 and is not authorized to do it", senderAddr); SoapError(h, 606, "Action not authorized"); return 0; } return 1; } static void AddPinhole(struct upnphttp * h, const char * action, const char * ns) { int r; static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<UniqueID>%d</UniqueID>" "</u:%sResponse>"; char body[512]; int bodylen; struct NameValueParserData data; char * rem_host, * rem_port, * int_ip, * int_port, * protocol, * leaseTime; int uid = 0; unsigned short iport, rport; int ltime; long proto; char rem_ip[INET6_ADDRSTRLEN]; if(CheckStatus(h)==0) return; ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); rem_host = GetValueFromNameValueList(&data, "RemoteHost"); rem_port = GetValueFromNameValueList(&data, "RemotePort"); int_ip = GetValueFromNameValueList(&data, "InternalClient"); int_port = GetValueFromNameValueList(&data, "InternalPort"); protocol = GetValueFromNameValueList(&data, "Protocol"); leaseTime = GetValueFromNameValueList(&data, "LeaseTime"); rport = (unsigned short)(rem_port ? atoi(rem_port) : 0); iport = (unsigned short)(int_port ? atoi(int_port) : 0); ltime = leaseTime ? atoi(leaseTime) : -1; errno = 0; proto = protocol ? strtol(protocol, NULL, 0) : -1; if(errno != 0 || proto > 65535 || proto < 0) { SoapError(h, 402, "Invalid Args"); goto clear_and_exit; } if(iport == 0) { SoapError(h, 706, "InternalPortWilcardingNotAllowed"); goto clear_and_exit; } /* In particular, [IGD2] RECOMMENDS that unauthenticated and * unauthorized control points are only allowed to invoke * this action with: * - InternalPort value greater than or equal to 1024, * - InternalClient value equals to the control point's IP address. * It is REQUIRED that InternalClient cannot be one of IPv6 * addresses used by the gateway. */ if(!int_ip || int_ip[0] == '\0' || 0 == strcmp(int_ip, "*")) { SoapError(h, 708, "WildCardNotPermittedInSrcIP"); goto clear_and_exit; } /* I guess it is useless to convert int_ip to literal ipv6 address */ if(rem_host) { /* trim */ while(isspace(rem_host[0])) rem_host++; } /* rem_host should be converted to literal ipv6 : */ if(rem_host && (rem_host[0] != '\0') && (rem_host[0] != '*')) { struct addrinfo *ai, *p; struct addrinfo hints; int err; memset(&hints, 0, sizeof(struct addrinfo)); hints.ai_family = AF_INET6; /*hints.ai_flags = */ /* hints.ai_protocol = proto; */ err = getaddrinfo(rem_host, rem_port, &hints, &ai); if(err == 0) { /* take the 1st IPv6 address */ for(p = ai; p; p = p->ai_next) { if(p->ai_family == AF_INET6) { inet_ntop(AF_INET6, &(((struct sockaddr_in6 *)p->ai_addr)->sin6_addr), rem_ip, sizeof(rem_ip)); syslog(LOG_INFO, "resolved '%s' to '%s'", rem_host, rem_ip); rem_host = rem_ip; break; } } freeaddrinfo(ai); } else { syslog(LOG_WARNING, "AddPinhole : getaddrinfo(%s) : %s", rem_host, gai_strerror(err)); #if 0 SoapError(h, 402, "Invalid Args"); goto clear_and_exit; #endif } } if(proto == 65535) { SoapError(h, 707, "ProtocolWilcardingNotAllowed"); goto clear_and_exit; } if(proto != IPPROTO_UDP && proto != IPPROTO_TCP #ifdef IPPROTO_UDPITE && atoi(protocol) != IPPROTO_UDPLITE #endif ) { SoapError(h, 705, "ProtocolNotSupported"); goto clear_and_exit; } if(ltime < 1 || ltime > 86400) { syslog(LOG_WARNING, "%s: LeaseTime=%d not supported, (ip=%s)", action, ltime, int_ip); SoapError(h, 402, "Invalid Args"); goto clear_and_exit; } if(PinholeVerification(h, int_ip, iport) <= 0) goto clear_and_exit; syslog(LOG_INFO, "%s: (inbound) from [%s]:%hu to [%s]:%hu with proto %ld during %d sec", action, rem_host?rem_host:"any", rport, int_ip, iport, proto, ltime); /* In cases where the RemoteHost, RemotePort, InternalPort, * InternalClient and Protocol are the same than an existing pinhole, * but LeaseTime is different, the device MUST extend the existing * pinhole's lease time and return the UniqueID of the existing pinhole. */ r = upnp_add_inboundpinhole(rem_host, rport, int_ip, iport, proto, "IGD2 pinhole", ltime, &uid); switch(r) { case 1: /* success */ bodylen = snprintf(body, sizeof(body), resp, action, ns/*"urn:schemas-upnp-org:service:WANIPv6FirewallControl:1"*/, uid, action); BuildSendAndCloseSoapResp(h, body, bodylen); break; case -1: /* not permitted */ SoapError(h, 701, "PinholeSpaceExhausted"); break; default: SoapError(h, 501, "ActionFailed"); break; } /* 606 Action not authorized * 701 PinholeSpaceExhausted * 702 FirewallDisabled * 703 InboundPinholeNotAllowed * 705 ProtocolNotSupported * 706 InternalPortWildcardingNotAllowed * 707 ProtocolWildcardingNotAllowed * 708 WildCardNotPermittedInSrcIP */ clear_and_exit: ClearNameValueList(&data); } static void UpdatePinhole(struct upnphttp * h, const char * action, const char * ns) { #if 0 static const char resp[] = "<u:UpdatePinholeResponse " "xmlns:u=\"urn:schemas-upnp-org:service:WANIPv6FirewallControl:1\">" "</u:UpdatePinholeResponse>"; #endif static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "</u:%sResponse>"; char body[512]; int bodylen; struct NameValueParserData data; const char * uid_str, * leaseTime; char iaddr[INET6_ADDRSTRLEN]; unsigned short iport; int ltime; int uid; int n; if(CheckStatus(h)==0) return; ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); uid_str = GetValueFromNameValueList(&data, "UniqueID"); leaseTime = GetValueFromNameValueList(&data, "NewLeaseTime"); uid = uid_str ? atoi(uid_str) : -1; ltime = leaseTime ? atoi(leaseTime) : -1; ClearNameValueList(&data); if(uid < 0 || uid > 65535 || ltime <= 0 || ltime > 86400) { SoapError(h, 402, "Invalid Args"); return; } /* Check that client is not updating an pinhole * it doesn't have access to, because of its public access */ n = upnp_get_pinhole_info(uid, NULL, 0, NULL, iaddr, sizeof(iaddr), &iport, NULL, /* proto */ NULL, 0, /* desc, desclen */ NULL, NULL); if (n >= 0) { if(PinholeVerification(h, iaddr, iport) <= 0) return; } else if(n == -2) { SoapError(h, 704, "NoSuchEntry"); return; } else { SoapError(h, 501, "ActionFailed"); return; } syslog(LOG_INFO, "%s: (inbound) updating lease duration to %d for pinhole with ID: %d", action, ltime, uid); n = upnp_update_inboundpinhole(uid, ltime); if(n == -1) SoapError(h, 704, "NoSuchEntry"); else if(n < 0) SoapError(h, 501, "ActionFailed"); else { bodylen = snprintf(body, sizeof(body), resp, action, ns, action); BuildSendAndCloseSoapResp(h, body, bodylen); } } static void GetOutboundPinholeTimeout(struct upnphttp * h, const char * action, const char * ns) { int r; static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<OutboundPinholeTimeout>%d</OutboundPinholeTimeout>" "</u:%sResponse>"; char body[512]; int bodylen; struct NameValueParserData data; char * int_ip, * int_port, * rem_host, * rem_port, * protocol; int opt=0; /*int proto=0;*/ unsigned short iport, rport; if (GETFLAG(IPV6FCFWDISABLEDMASK)) { SoapError(h, 702, "FirewallDisabled"); return; } ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); int_ip = GetValueFromNameValueList(&data, "InternalClient"); int_port = GetValueFromNameValueList(&data, "InternalPort"); rem_host = GetValueFromNameValueList(&data, "RemoteHost"); rem_port = GetValueFromNameValueList(&data, "RemotePort"); protocol = GetValueFromNameValueList(&data, "Protocol"); if (!int_port || !rem_port || !protocol) { ClearNameValueList(&data); SoapError(h, 402, "Invalid Args"); return; } rport = (unsigned short)atoi(rem_port); iport = (unsigned short)atoi(int_port); /*proto = atoi(protocol);*/ syslog(LOG_INFO, "%s: retrieving timeout for outbound pinhole from [%s]:%hu to [%s]:%hu protocol %s", action, int_ip, iport,rem_host, rport, protocol); /* TODO */ r = -1;/*upnp_check_outbound_pinhole(proto, &opt);*/ switch(r) { case 1: /* success */ bodylen = snprintf(body, sizeof(body), resp, action, ns/*"urn:schemas-upnp-org:service:WANIPv6FirewallControl:1"*/, opt, action); BuildSendAndCloseSoapResp(h, body, bodylen); break; case -5: /* Protocol not supported */ SoapError(h, 705, "ProtocolNotSupported"); break; default: SoapError(h, 501, "ActionFailed"); } ClearNameValueList(&data); } static void DeletePinhole(struct upnphttp * h, const char * action, const char * ns) { int n; #if 0 static const char resp[] = "<u:DeletePinholeResponse " "xmlns:u=\"urn:schemas-upnp-org:service:WANIPv6FirewallControl:1\">" "</u:DeletePinholeResponse>"; #endif static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "</u:%sResponse>"; char body[512]; int bodylen; struct NameValueParserData data; const char * uid_str; char iaddr[INET6_ADDRSTRLEN]; int proto; unsigned short iport; unsigned int leasetime; int uid; if(CheckStatus(h)==0) return; ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); uid_str = GetValueFromNameValueList(&data, "UniqueID"); uid = uid_str ? atoi(uid_str) : -1; ClearNameValueList(&data); if(uid < 0 || uid > 65535) { SoapError(h, 402, "Invalid Args"); return; } /* Check that client is not deleting an pinhole * it doesn't have access to, because of its public access */ n = upnp_get_pinhole_info(uid, NULL, 0, NULL, iaddr, sizeof(iaddr), &iport, &proto, NULL, 0, /* desc, desclen */ &leasetime, NULL); if (n >= 0) { if(PinholeVerification(h, iaddr, iport) <= 0) return; } else if(n == -2) { SoapError(h, 704, "NoSuchEntry"); return; } else { SoapError(h, 501, "ActionFailed"); return; } n = upnp_delete_inboundpinhole(uid); if(n < 0) { syslog(LOG_INFO, "%s: (inbound) failed to remove pinhole with ID: %d", action, uid); SoapError(h, 501, "ActionFailed"); return; } syslog(LOG_INFO, "%s: (inbound) pinhole with ID %d successfully removed", action, uid); bodylen = snprintf(body, sizeof(body), resp, action, ns, action); BuildSendAndCloseSoapResp(h, body, bodylen); } static void CheckPinholeWorking(struct upnphttp * h, const char * action, const char * ns) { static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<IsWorking>%d</IsWorking>" "</u:%sResponse>"; char body[512]; int bodylen; int r; struct NameValueParserData data; const char * uid_str; int uid; char iaddr[INET6_ADDRSTRLEN]; unsigned short iport; unsigned int packets; if(CheckStatus(h)==0) return; ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); uid_str = GetValueFromNameValueList(&data, "UniqueID"); uid = uid_str ? atoi(uid_str) : -1; ClearNameValueList(&data); if(uid < 0 || uid > 65535) { SoapError(h, 402, "Invalid Args"); return; } /* Check that client is not checking a pinhole * it doesn't have access to, because of its public access */ r = upnp_get_pinhole_info(uid, NULL, 0, NULL, iaddr, sizeof(iaddr), &iport, NULL, /* proto */ NULL, 0, /* desc, desclen */ NULL, &packets); if (r >= 0) { if(PinholeVerification(h, iaddr, iport) <= 0) return ; if(packets == 0) { SoapError(h, 709, "NoPacketSent"); return; } bodylen = snprintf(body, sizeof(body), resp, action, ns/*"urn:schemas-upnp-org:service:WANIPv6FirewallControl:1"*/, 1, action); BuildSendAndCloseSoapResp(h, body, bodylen); } else if(r == -2) SoapError(h, 704, "NoSuchEntry"); else SoapError(h, 501, "ActionFailed"); } static void GetPinholePackets(struct upnphttp * h, const char * action, const char * ns) { static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<PinholePackets>%u</PinholePackets>" "</u:%sResponse>"; char body[512]; int bodylen; struct NameValueParserData data; const char * uid_str; int n; char iaddr[INET6_ADDRSTRLEN]; unsigned short iport; unsigned int packets = 0; int uid; int proto; unsigned int leasetime; if(CheckStatus(h)==0) return; ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); uid_str = GetValueFromNameValueList(&data, "UniqueID"); uid = uid_str ? atoi(uid_str) : -1; ClearNameValueList(&data); if(uid < 0 || uid > 65535) { SoapError(h, 402, "Invalid Args"); return; } /* Check that client is not getting infos of a pinhole * it doesn't have access to, because of its public access */ n = upnp_get_pinhole_info(uid, NULL, 0, NULL, iaddr, sizeof(iaddr), &iport, &proto, NULL, 0, /* desc, desclen */ &leasetime, &packets); if (n >= 0) { if(PinholeVerification(h, iaddr, iport)<=0) return ; } #if 0 else if(r == -4 || r == -1) { SoapError(h, 704, "NoSuchEntry"); } #endif bodylen = snprintf(body, sizeof(body), resp, action, ns/*"urn:schemas-upnp-org:service:WANIPv6FirewallControl:1"*/, packets, action); BuildSendAndCloseSoapResp(h, body, bodylen); } #endif #ifdef ENABLE_DP_SERVICE static void SendSetupMessage(struct upnphttp * h, const char * action, const char * ns) { static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<OutMessage>%s</OutMessage>" "</u:%sResponse>"; char body[1024]; int bodylen; struct NameValueParserData data; const char * ProtocolType; /* string */ const char * InMessage; /* base64 */ const char * OutMessage = ""; /* base64 */ ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); ProtocolType = GetValueFromNameValueList(&data, "ProtocolType"); /* string */ InMessage = GetValueFromNameValueList(&data, "InMessage"); /* base64 */ if(ProtocolType == NULL || InMessage == NULL) { ClearNameValueList(&data); SoapError(h, 402, "Invalid Args"); return; } /*if(strcmp(ProtocolType, "DeviceProtection:1") != 0)*/ if(strcmp(ProtocolType, "WPS") != 0) { ClearNameValueList(&data); SoapError(h, 600, "Argument Value Invalid"); /* 703 ? */ return; } /* TODO : put here code for WPS */ bodylen = snprintf(body, sizeof(body), resp, action, ns/*"urn:schemas-upnp-org:service:DeviceProtection:1"*/, OutMessage, action); BuildSendAndCloseSoapResp(h, body, bodylen); ClearNameValueList(&data); } static void GetSupportedProtocols(struct upnphttp * h, const char * action, const char * ns) { static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<ProtocolList><![CDATA[%s]]></ProtocolList>" "</u:%sResponse>"; char body[1024]; int bodylen; const char * ProtocolList = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" "<SupportedProtocols xmlns=\"urn:schemas-upnp-org:gw:DeviceProtection\"" " xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"" " xsi:schemaLocation=\"urn:schemas-upnp-org:gw:DeviceProtection" " http://www.upnp.org/schemas/gw/DeviceProtection-v1.xsd\">" "<Introduction><Name>WPS</Name></Introduction>" "<Login><Name>PKCS5</Name></Login>" "</SupportedProtocols>"; bodylen = snprintf(body, sizeof(body), resp, action, ns/*"urn:schemas-upnp-org:service:DeviceProtection:1"*/, ProtocolList, action); BuildSendAndCloseSoapResp(h, body, bodylen); } static void GetAssignedRoles(struct upnphttp * h, const char * action, const char * ns) { static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<RoleList>%s</RoleList>" "</u:%sResponse>"; char body[1024]; int bodylen; const char * RoleList = "Public"; /* list of roles separated by spaces */ #ifdef ENABLE_HTTPS if(h->ssl != NULL) { /* we should get the Roles of the session (based on client certificate) */ X509 * peercert; peercert = SSL_get_peer_certificate(h->ssl); if(peercert != NULL) { RoleList = "Admin Basic"; X509_free(peercert); } } #endif bodylen = snprintf(body, sizeof(body), resp, action, ns/*"urn:schemas-upnp-org:service:DeviceProtection:1"*/, RoleList, action); BuildSendAndCloseSoapResp(h, body, bodylen); } #endif /* Windows XP as client send the following requests : * GetConnectionTypeInfo * GetNATRSIPStatus * ? GetTotalBytesSent - WANCommonInterfaceConfig * ? GetTotalBytesReceived - idem * ? GetTotalPacketsSent - idem * ? GetTotalPacketsReceived - idem * GetCommonLinkProperties - idem * GetStatusInfo - WANIPConnection * GetExternalIPAddress * QueryStateVariable / ConnectionStatus! */ static const struct { const char * methodName; void (*methodImpl)(struct upnphttp *, const char *, const char *); } soapMethods[] = { /* WANCommonInterfaceConfig */ { "QueryStateVariable", QueryStateVariable}, { "GetTotalBytesSent", GetTotalBytesSent}, { "GetTotalBytesReceived", GetTotalBytesReceived}, { "GetTotalPacketsSent", GetTotalPacketsSent}, { "GetTotalPacketsReceived", GetTotalPacketsReceived}, { "GetCommonLinkProperties", GetCommonLinkProperties}, { "GetStatusInfo", GetStatusInfo}, /* WANIPConnection */ { "GetConnectionTypeInfo", GetConnectionTypeInfo }, { "GetNATRSIPStatus", GetNATRSIPStatus}, { "GetExternalIPAddress", GetExternalIPAddress}, { "AddPortMapping", AddPortMapping}, { "DeletePortMapping", DeletePortMapping}, { "GetGenericPortMappingEntry", GetGenericPortMappingEntry}, { "GetSpecificPortMappingEntry", GetSpecificPortMappingEntry}, /* Required in WANIPConnection:2 */ { "SetConnectionType", SetConnectionType}, { "RequestConnection", RequestConnection}, { "ForceTermination", ForceTermination}, { "AddAnyPortMapping", AddAnyPortMapping}, { "DeletePortMappingRange", DeletePortMappingRange}, { "GetListOfPortMappings", GetListOfPortMappings}, #ifdef ENABLE_L3F_SERVICE /* Layer3Forwarding */ { "SetDefaultConnectionService", SetDefaultConnectionService}, { "GetDefaultConnectionService", GetDefaultConnectionService}, #endif #ifdef ENABLE_6FC_SERVICE /* WANIPv6FirewallControl */ { "GetFirewallStatus", GetFirewallStatus}, /* Required */ { "AddPinhole", AddPinhole}, /* Required */ { "UpdatePinhole", UpdatePinhole}, /* Required */ { "GetOutboundPinholeTimeout", GetOutboundPinholeTimeout}, /* Optional */ { "DeletePinhole", DeletePinhole}, /* Required */ { "CheckPinholeWorking", CheckPinholeWorking}, /* Optional */ { "GetPinholePackets", GetPinholePackets}, /* Required */ #endif #ifdef ENABLE_DP_SERVICE /* DeviceProtection */ { "SendSetupMessage", SendSetupMessage}, /* Required */ { "GetSupportedProtocols", GetSupportedProtocols}, /* Required */ { "GetAssignedRoles", GetAssignedRoles}, /* Required */ #endif { 0, 0 } }; void ExecuteSoapAction(struct upnphttp * h, const char * action, int n) { char * p; char * p2; int i, len, methodlen; char namespace[256]; /* SoapAction example : * urn:schemas-upnp-org:service:WANIPConnection:1#GetStatusInfo */ p = strchr(action, '#'); if(p && (p - action) < n) { for(i = 0; i < ((int)sizeof(namespace) - 1) && (action + i) < p; i++) namespace[i] = action[i]; namespace[i] = '\0'; p++; p2 = strchr(p, '"'); if(p2 && (p2 - action) <= n) methodlen = p2 - p; else methodlen = n - (p - action); /*syslog(LOG_DEBUG, "SoapMethod: %.*s %d %d %p %p %d", methodlen, p, methodlen, n, action, p, (int)(p - action));*/ for(i = 0; soapMethods[i].methodName; i++) { len = strlen(soapMethods[i].methodName); if((len == methodlen) && memcmp(p, soapMethods[i].methodName, len) == 0) { #ifdef DEBUG syslog(LOG_DEBUG, "Remote Call of SoapMethod '%s' %s", soapMethods[i].methodName, namespace); #endif /* DEBUG */ soapMethods[i].methodImpl(h, soapMethods[i].methodName, namespace); return; } } syslog(LOG_NOTICE, "SoapMethod: Unknown: %.*s %s", methodlen, p, namespace); } else { syslog(LOG_NOTICE, "cannot parse SoapAction"); } SoapError(h, 401, "Invalid Action"); } /* Standard Errors: * * errorCode errorDescription Description * -------- ---------------- ----------- * 401 Invalid Action No action by that name at this service. * 402 Invalid Args Could be any of the following: not enough in args, * too many in args, no in arg by that name, * one or more in args are of the wrong data type. * 403 Out of Sync Out of synchronization. * 501 Action Failed May be returned in current state of service * prevents invoking that action. * 600-699 TBD Common action errors. Defined by UPnP Forum * Technical Committee. * 700-799 TBD Action-specific errors for standard actions. * Defined by UPnP Forum working committee. * 800-899 TBD Action-specific errors for non-standard actions. * Defined by UPnP vendor. */ void SoapError(struct upnphttp * h, int errCode, const char * errDesc) { static const char resp[] = "<s:Envelope " "xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\" " "s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\">" "<s:Body>" "<s:Fault>" "<faultcode>s:Client</faultcode>" "<faultstring>UPnPError</faultstring>" "<detail>" "<UPnPError xmlns=\"urn:schemas-upnp-org:control-1-0\">" "<errorCode>%d</errorCode>" "<errorDescription>%s</errorDescription>" "</UPnPError>" "</detail>" "</s:Fault>" "</s:Body>" "</s:Envelope>"; char body[2048]; int bodylen; syslog(LOG_INFO, "Returning UPnPError %d: %s", errCode, errDesc); bodylen = snprintf(body, sizeof(body), resp, errCode, errDesc); BuildResp2_upnphttp(h, 500, "Internal Server Error", body, bodylen); SendRespAndClose_upnphttp(h); }
GetOutboundPinholeTimeout(struct upnphttp * h, const char * action, const char * ns) { int r; static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<OutboundPinholeTimeout>%d</OutboundPinholeTimeout>" "</u:%sResponse>"; char body[512]; int bodylen; struct NameValueParserData data; char * int_ip, * int_port, * rem_host, * rem_port, * protocol; int opt=0; /*int proto=0;*/ unsigned short iport, rport; if (GETFLAG(IPV6FCFWDISABLEDMASK)) { SoapError(h, 702, "FirewallDisabled"); return; } ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); int_ip = GetValueFromNameValueList(&data, "InternalClient"); int_port = GetValueFromNameValueList(&data, "InternalPort"); rem_host = GetValueFromNameValueList(&data, "RemoteHost"); rem_port = GetValueFromNameValueList(&data, "RemotePort"); protocol = GetValueFromNameValueList(&data, "Protocol"); if (!int_port || !ext_port || !protocol) { ClearNameValueList(&data); SoapError(h, 402, "Invalid Args"); return; } rport = (unsigned short)atoi(rem_port); iport = (unsigned short)atoi(int_port); /*proto = atoi(protocol);*/ syslog(LOG_INFO, "%s: retrieving timeout for outbound pinhole from [%s]:%hu to [%s]:%hu protocol %s", action, int_ip, iport,rem_host, rport, protocol); /* TODO */ r = -1;/*upnp_check_outbound_pinhole(proto, &opt);*/ switch(r) { case 1: /* success */ bodylen = snprintf(body, sizeof(body), resp, action, ns/*"urn:schemas-upnp-org:service:WANIPv6FirewallControl:1"*/, opt, action); BuildSendAndCloseSoapResp(h, body, bodylen); break; case -5: /* Protocol not supported */ SoapError(h, 705, "ProtocolNotSupported"); break; default: SoapError(h, 501, "ActionFailed"); } ClearNameValueList(&data); }
GetOutboundPinholeTimeout(struct upnphttp * h, const char * action, const char * ns) { int r; static const char resp[] = "<u:%sResponse " "xmlns:u=\"%s\">" "<OutboundPinholeTimeout>%d</OutboundPinholeTimeout>" "</u:%sResponse>"; char body[512]; int bodylen; struct NameValueParserData data; char * int_ip, * int_port, * rem_host, * rem_port, * protocol; int opt=0; /*int proto=0;*/ unsigned short iport, rport; if (GETFLAG(IPV6FCFWDISABLEDMASK)) { SoapError(h, 702, "FirewallDisabled"); return; } ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data); int_ip = GetValueFromNameValueList(&data, "InternalClient"); int_port = GetValueFromNameValueList(&data, "InternalPort"); rem_host = GetValueFromNameValueList(&data, "RemoteHost"); rem_port = GetValueFromNameValueList(&data, "RemotePort"); protocol = GetValueFromNameValueList(&data, "Protocol"); if (!int_port || !rem_port || !protocol) { ClearNameValueList(&data); SoapError(h, 402, "Invalid Args"); return; } rport = (unsigned short)atoi(rem_port); iport = (unsigned short)atoi(int_port); /*proto = atoi(protocol);*/ syslog(LOG_INFO, "%s: retrieving timeout for outbound pinhole from [%s]:%hu to [%s]:%hu protocol %s", action, int_ip, iport,rem_host, rport, protocol); /* TODO */ r = -1;/*upnp_check_outbound_pinhole(proto, &opt);*/ switch(r) { case 1: /* success */ bodylen = snprintf(body, sizeof(body), resp, action, ns/*"urn:schemas-upnp-org:service:WANIPv6FirewallControl:1"*/, opt, action); BuildSendAndCloseSoapResp(h, body, bodylen); break; case -5: /* Protocol not supported */ SoapError(h, 705, "ProtocolNotSupported"); break; default: SoapError(h, 501, "ActionFailed"); } ClearNameValueList(&data); }
{'added': [(1853, '\tif (!int_port || !rem_port || !protocol)')], 'deleted': [(1853, '\tif (!int_port || !ext_port || !protocol)')]}
1
1
1,785
10,156
https://github.com/miniupnp/miniupnp
CVE-2019-12108
['CWE-476']
imap.c
imap_subscribe
/* * Copyright (C) 1996-1998,2012 Michael R. Elkins <me@mutt.org> * Copyright (C) 1996-1999 Brandon Long <blong@fiction.net> * Copyright (C) 1999-2009,2012,2017 Brendan Cully <brendan@kublai.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* Support for IMAP4rev1, with the occasional nod to IMAP 4. */ #if HAVE_CONFIG_H # include "config.h" #endif #include "mutt.h" #include "mx.h" #include "mailbox.h" #include "globals.h" #include "sort.h" #include "browser.h" #include "imap_private.h" #if defined(USE_SSL) # include "mutt_ssl.h" #endif #include "buffy.h" #if USE_HCACHE #include "hcache.h" #endif #include <unistd.h> #include <ctype.h> #include <string.h> #include <stdlib.h> #include <sys/types.h> #include <sys/stat.h> /* imap forward declarations */ static char* imap_get_flags (LIST** hflags, char* s); static int imap_check_capabilities (IMAP_DATA* idata); static void imap_set_flag (IMAP_DATA* idata, int aclbit, int flag, const char* str, char* flags, size_t flsize); /* imap_access: Check permissions on an IMAP mailbox. * TODO: ACL checks. Right now we assume if it exists we can * mess with it. */ int imap_access (const char* path) { IMAP_DATA* idata; IMAP_MBOX mx; char buf[LONG_STRING]; char mailbox[LONG_STRING]; char mbox[LONG_STRING]; int rc; if (imap_parse_path (path, &mx)) return -1; if (!(idata = imap_conn_find (&mx.account, option (OPTIMAPPASSIVE) ? MUTT_IMAP_CONN_NONEW : 0))) { FREE (&mx.mbox); return -1; } imap_fix_path (idata, mx.mbox, mailbox, sizeof (mailbox)); if (!*mailbox) strfcpy (mailbox, "INBOX", sizeof (mailbox)); /* we may already be in the folder we're checking */ if (!ascii_strcmp(idata->mailbox, mx.mbox)) { FREE (&mx.mbox); return 0; } FREE (&mx.mbox); if (imap_mboxcache_get (idata, mailbox, 0)) { dprint (3, (debugfile, "imap_access: found %s in cache\n", mailbox)); return 0; } imap_munge_mbox_name (idata, mbox, sizeof (mbox), mailbox); if (mutt_bit_isset (idata->capabilities, IMAP4REV1)) snprintf (buf, sizeof (buf), "STATUS %s (UIDVALIDITY)", mbox); else if (mutt_bit_isset (idata->capabilities, STATUS)) snprintf (buf, sizeof (buf), "STATUS %s (UID-VALIDITY)", mbox); else { dprint (2, (debugfile, "imap_access: STATUS not supported?\n")); return -1; } if ((rc = imap_exec (idata, buf, IMAP_CMD_FAIL_OK)) < 0) { dprint (1, (debugfile, "imap_access: Can't check STATUS of %s\n", mbox)); return rc; } return 0; } int imap_create_mailbox (IMAP_DATA* idata, char* mailbox) { char buf[LONG_STRING], mbox[LONG_STRING]; imap_munge_mbox_name (idata, mbox, sizeof (mbox), mailbox); snprintf (buf, sizeof (buf), "CREATE %s", mbox); if (imap_exec (idata, buf, 0) != 0) { mutt_error (_("CREATE failed: %s"), imap_cmd_trailer (idata)); return -1; } return 0; } int imap_rename_mailbox (IMAP_DATA* idata, IMAP_MBOX* mx, const char* newname) { char oldmbox[LONG_STRING]; char newmbox[LONG_STRING]; char buf[LONG_STRING]; imap_munge_mbox_name (idata, oldmbox, sizeof (oldmbox), mx->mbox); imap_munge_mbox_name (idata, newmbox, sizeof (newmbox), newname); snprintf (buf, sizeof (buf), "RENAME %s %s", oldmbox, newmbox); if (imap_exec (idata, buf, 0) != 0) return -1; return 0; } int imap_delete_mailbox (CONTEXT* ctx, IMAP_MBOX mx) { char buf[LONG_STRING], mbox[LONG_STRING]; IMAP_DATA *idata; if (!ctx || !ctx->data) { if (!(idata = imap_conn_find (&mx.account, option (OPTIMAPPASSIVE) ? MUTT_IMAP_CONN_NONEW : 0))) { FREE (&mx.mbox); return -1; } } else { idata = ctx->data; } imap_munge_mbox_name (idata, mbox, sizeof (mbox), mx.mbox); snprintf (buf, sizeof (buf), "DELETE %s", mbox); if (imap_exec ((IMAP_DATA*) idata, buf, 0) != 0) return -1; return 0; } /* imap_logout_all: close all open connections. Quick and dirty until we can * make sure we've got all the context we need. */ void imap_logout_all (void) { CONNECTION* conn; CONNECTION* tmp; conn = mutt_socket_head (); while (conn) { tmp = conn->next; if (conn->account.type == MUTT_ACCT_TYPE_IMAP && conn->fd >= 0) { mutt_message (_("Closing connection to %s..."), conn->account.host); imap_logout ((IMAP_DATA**) (void*) &conn->data); mutt_clear_error (); mutt_socket_free (conn); } conn = tmp; } } /* imap_read_literal: read bytes bytes from server into file. Not explicitly * buffered, relies on FILE buffering. NOTE: strips \r from \r\n. * Apparently even literals use \r\n-terminated strings ?! */ int imap_read_literal (FILE* fp, IMAP_DATA* idata, unsigned int bytes, progress_t* pbar) { unsigned int pos; char c; int r = 0; dprint (2, (debugfile, "imap_read_literal: reading %ld bytes\n", bytes)); for (pos = 0; pos < bytes; pos++) { if (mutt_socket_readchar (idata->conn, &c) != 1) { dprint (1, (debugfile, "imap_read_literal: error during read, %ld bytes read\n", pos)); idata->status = IMAP_FATAL; return -1; } #if 1 if (r == 1 && c != '\n') fputc ('\r', fp); if (c == '\r') { r = 1; continue; } else r = 0; #endif fputc (c, fp); if (pbar && !(pos % 1024)) mutt_progress_update (pbar, pos, -1); #ifdef DEBUG if (debuglevel >= IMAP_LOG_LTRL) fputc (c, debugfile); #endif } return 0; } /* imap_expunge_mailbox: Purge IMAP portion of expunged messages from the * context. Must not be done while something has a handle on any headers * (eg inside pager or editor). That is, check IMAP_REOPEN_ALLOW. */ void imap_expunge_mailbox (IMAP_DATA* idata) { HEADER* h; int i, cacheno; short old_sort; #ifdef USE_HCACHE idata->hcache = imap_hcache_open (idata, NULL); #endif old_sort = Sort; Sort = SORT_ORDER; mutt_sort_headers (idata->ctx, 0); for (i = 0; i < idata->ctx->msgcount; i++) { h = idata->ctx->hdrs[i]; if (h->index == INT_MAX) { dprint (2, (debugfile, "Expunging message UID %u.\n", HEADER_DATA (h)->uid)); h->active = 0; idata->ctx->size -= h->content->length; imap_cache_del (idata, h); #if USE_HCACHE imap_hcache_del (idata, HEADER_DATA(h)->uid); #endif /* free cached body from disk, if necessary */ cacheno = HEADER_DATA(h)->uid % IMAP_CACHE_LEN; if (idata->cache[cacheno].uid == HEADER_DATA(h)->uid && idata->cache[cacheno].path) { unlink (idata->cache[cacheno].path); FREE (&idata->cache[cacheno].path); } int_hash_delete (idata->uid_hash, HEADER_DATA(h)->uid, h, NULL); imap_free_header_data ((IMAP_HEADER_DATA**)&h->data); } else { h->index = i; /* Mutt has several places where it turns off h->active as a * hack. For example to avoid FLAG updates, or to exclude from * imap_exec_msgset. * * Unfortunately, when a reopen is allowed and the IMAP_EXPUNGE_PENDING * flag becomes set (e.g. a flag update to a modified header), * this function will be called by imap_cmd_finish(). * * The mx_update_tables() will free and remove these "inactive" headers, * despite that an EXPUNGE was not received for them. * This would result in memory leaks and segfaults due to dangling * pointers in the msn_index and uid_hash. * * So this is another hack to work around the hacks. We don't want to * remove the messages, so make sure active is on. */ h->active = 1; } } #if USE_HCACHE imap_hcache_close (idata); #endif /* We may be called on to expunge at any time. We can't rely on the caller * to always know to rethread */ mx_update_tables (idata->ctx, 0); Sort = old_sort; mutt_sort_headers (idata->ctx, 1); } /* imap_check_capabilities: make sure we can log in to this server. */ static int imap_check_capabilities (IMAP_DATA* idata) { if (imap_exec (idata, "CAPABILITY", 0) != 0) { imap_error ("imap_check_capabilities", idata->buf); return -1; } if (!(mutt_bit_isset(idata->capabilities,IMAP4) ||mutt_bit_isset(idata->capabilities,IMAP4REV1))) { mutt_error _("This IMAP server is ancient. Mutt does not work with it."); mutt_sleep (2); /* pause a moment to let the user see the error */ return -1; } return 0; } /* imap_conn_find: Find an open IMAP connection matching account, or open * a new one if none can be found. */ IMAP_DATA* imap_conn_find (const ACCOUNT* account, int flags) { CONNECTION* conn = NULL; ACCOUNT* creds = NULL; IMAP_DATA* idata = NULL; int new = 0; while ((conn = mutt_conn_find (conn, account))) { if (!creds) creds = &conn->account; else memcpy (&conn->account, creds, sizeof (ACCOUNT)); idata = (IMAP_DATA*)conn->data; if (flags & MUTT_IMAP_CONN_NONEW) { if (!idata) { /* This should only happen if we've come to the end of the list */ mutt_socket_free (conn); return NULL; } else if (idata->state < IMAP_AUTHENTICATED) continue; } if (flags & MUTT_IMAP_CONN_NOSELECT && idata && idata->state >= IMAP_SELECTED) continue; if (idata && idata->status == IMAP_FATAL) continue; break; } if (!conn) return NULL; /* this happens when the initial connection fails */ if (!idata) { /* The current connection is a new connection */ if (! (idata = imap_new_idata ())) { mutt_socket_free (conn); return NULL; } conn->data = idata; idata->conn = conn; new = 1; } if (idata->state == IMAP_DISCONNECTED) imap_open_connection (idata); if (idata->state == IMAP_CONNECTED) { if (!imap_authenticate (idata)) { idata->state = IMAP_AUTHENTICATED; FREE (&idata->capstr); new = 1; if (idata->conn->ssf) dprint (2, (debugfile, "Communication encrypted at %d bits\n", idata->conn->ssf)); } else mutt_account_unsetpass (&idata->conn->account); } if (new && idata->state == IMAP_AUTHENTICATED) { /* capabilities may have changed */ imap_exec (idata, "CAPABILITY", IMAP_CMD_QUEUE); /* enable RFC6855, if the server supports that */ if (mutt_bit_isset (idata->capabilities, ENABLE)) imap_exec (idata, "ENABLE UTF8=ACCEPT", IMAP_CMD_QUEUE); /* get root delimiter, '/' as default */ idata->delim = '/'; imap_exec (idata, "LIST \"\" \"\"", IMAP_CMD_QUEUE); if (option (OPTIMAPCHECKSUBSCRIBED)) imap_exec (idata, "LSUB \"\" \"*\"", IMAP_CMD_QUEUE); /* we may need the root delimiter before we open a mailbox */ imap_exec (idata, NULL, IMAP_CMD_FAIL_OK); } return idata; } int imap_open_connection (IMAP_DATA* idata) { char buf[LONG_STRING]; if (mutt_socket_open (idata->conn) < 0) return -1; idata->state = IMAP_CONNECTED; if (imap_cmd_step (idata) != IMAP_CMD_OK) { imap_close_connection (idata); return -1; } if (ascii_strncasecmp ("* OK", idata->buf, 4) == 0) { if (ascii_strncasecmp ("* OK [CAPABILITY", idata->buf, 16) && imap_check_capabilities (idata)) goto bail; #if defined(USE_SSL) /* Attempt STARTTLS if available and desired. */ if (!idata->conn->ssf && (option(OPTSSLFORCETLS) || mutt_bit_isset (idata->capabilities, STARTTLS))) { int rc; if (option(OPTSSLFORCETLS)) rc = MUTT_YES; else if ((rc = query_quadoption (OPT_SSLSTARTTLS, _("Secure connection with TLS?"))) == -1) goto err_close_conn; if (rc == MUTT_YES) { if ((rc = imap_exec (idata, "STARTTLS", IMAP_CMD_FAIL_OK)) == -1) goto bail; if (rc != -2) { if (mutt_ssl_starttls (idata->conn)) { mutt_error (_("Could not negotiate TLS connection")); mutt_sleep (1); goto err_close_conn; } else { /* RFC 2595 demands we recheck CAPABILITY after TLS completes. */ if (imap_exec (idata, "CAPABILITY", 0)) goto bail; } } } } if (option(OPTSSLFORCETLS) && ! idata->conn->ssf) { mutt_error _("Encrypted connection unavailable"); mutt_sleep (1); goto err_close_conn; } #endif } else if (ascii_strncasecmp ("* PREAUTH", idata->buf, 9) == 0) { idata->state = IMAP_AUTHENTICATED; if (imap_check_capabilities (idata) != 0) goto bail; FREE (&idata->capstr); } else { imap_error ("imap_open_connection()", buf); goto bail; } return 0; #if defined(USE_SSL) err_close_conn: imap_close_connection (idata); #endif bail: FREE (&idata->capstr); return -1; } void imap_close_connection(IMAP_DATA* idata) { if (idata->state != IMAP_DISCONNECTED) { mutt_socket_close (idata->conn); idata->state = IMAP_DISCONNECTED; } idata->seqno = idata->nextcmd = idata->lastcmd = idata->status = 0; memset (idata->cmds, 0, sizeof (IMAP_COMMAND) * idata->cmdslots); } /* imap_get_flags: Make a simple list out of a FLAGS response. * return stream following FLAGS response */ static char* imap_get_flags (LIST** hflags, char* s) { LIST* flags; char* flag_word; char ctmp; /* sanity-check string */ if (ascii_strncasecmp ("FLAGS", s, 5) != 0) { dprint (1, (debugfile, "imap_get_flags: not a FLAGS response: %s\n", s)); return NULL; } s += 5; SKIPWS(s); if (*s != '(') { dprint (1, (debugfile, "imap_get_flags: bogus FLAGS response: %s\n", s)); return NULL; } /* create list, update caller's flags handle */ flags = mutt_new_list(); *hflags = flags; while (*s && *s != ')') { s++; SKIPWS(s); flag_word = s; while (*s && (*s != ')') && !ISSPACE (*s)) s++; ctmp = *s; *s = '\0'; if (*flag_word) mutt_add_list (flags, flag_word); *s = ctmp; } /* note bad flags response */ if (*s != ')') { dprint (1, (debugfile, "imap_get_flags: Unterminated FLAGS response: %s\n", s)); mutt_free_list (hflags); return NULL; } s++; return s; } static int imap_open_mailbox (CONTEXT* ctx) { IMAP_DATA *idata; IMAP_STATUS* status; char buf[LONG_STRING]; char bufout[LONG_STRING]; int count = 0; IMAP_MBOX mx, pmx; int rc; if (imap_parse_path (ctx->path, &mx)) { mutt_error (_("%s is an invalid IMAP path"), ctx->path); return -1; } /* we require a connection which isn't currently in IMAP_SELECTED state */ if (!(idata = imap_conn_find (&(mx.account), MUTT_IMAP_CONN_NOSELECT))) goto fail_noidata; if (idata->state < IMAP_AUTHENTICATED) goto fail; /* once again the context is new */ ctx->data = idata; /* Clean up path and replace the one in the ctx */ imap_fix_path (idata, mx.mbox, buf, sizeof (buf)); if (!*buf) strfcpy (buf, "INBOX", sizeof (buf)); FREE(&(idata->mailbox)); idata->mailbox = safe_strdup (buf); imap_qualify_path (buf, sizeof (buf), &mx, idata->mailbox); FREE (&(ctx->path)); FREE (&(ctx->realpath)); ctx->path = safe_strdup (buf); ctx->realpath = safe_strdup (ctx->path); idata->ctx = ctx; /* clear mailbox status */ idata->status = 0; memset (idata->ctx->rights, 0, sizeof (idata->ctx->rights)); idata->newMailCount = 0; idata->max_msn = 0; mutt_message (_("Selecting %s..."), idata->mailbox); imap_munge_mbox_name (idata, buf, sizeof(buf), idata->mailbox); /* pipeline ACL test */ if (mutt_bit_isset (idata->capabilities, ACL)) { snprintf (bufout, sizeof (bufout), "MYRIGHTS %s", buf); imap_exec (idata, bufout, IMAP_CMD_QUEUE); } /* assume we have all rights if ACL is unavailable */ else { mutt_bit_set (idata->ctx->rights, MUTT_ACL_LOOKUP); mutt_bit_set (idata->ctx->rights, MUTT_ACL_READ); mutt_bit_set (idata->ctx->rights, MUTT_ACL_SEEN); mutt_bit_set (idata->ctx->rights, MUTT_ACL_WRITE); mutt_bit_set (idata->ctx->rights, MUTT_ACL_INSERT); mutt_bit_set (idata->ctx->rights, MUTT_ACL_POST); mutt_bit_set (idata->ctx->rights, MUTT_ACL_CREATE); mutt_bit_set (idata->ctx->rights, MUTT_ACL_DELETE); } /* pipeline the postponed count if possible */ pmx.mbox = NULL; if (mx_is_imap (Postponed) && !imap_parse_path (Postponed, &pmx) && mutt_account_match (&pmx.account, &mx.account)) imap_status (Postponed, 1); FREE (&pmx.mbox); snprintf (bufout, sizeof (bufout), "%s %s", ctx->readonly ? "EXAMINE" : "SELECT", buf); idata->state = IMAP_SELECTED; imap_cmd_start (idata, bufout); status = imap_mboxcache_get (idata, idata->mailbox, 1); do { char *pc; if ((rc = imap_cmd_step (idata)) != IMAP_CMD_CONTINUE) break; pc = idata->buf + 2; /* Obtain list of available flags here, may be overridden by a * PERMANENTFLAGS tag in the OK response */ if (ascii_strncasecmp ("FLAGS", pc, 5) == 0) { /* don't override PERMANENTFLAGS */ if (!idata->flags) { dprint (3, (debugfile, "Getting mailbox FLAGS\n")); if ((pc = imap_get_flags (&(idata->flags), pc)) == NULL) goto fail; } } /* PERMANENTFLAGS are massaged to look like FLAGS, then override FLAGS */ else if (ascii_strncasecmp ("OK [PERMANENTFLAGS", pc, 18) == 0) { dprint (3, (debugfile, "Getting mailbox PERMANENTFLAGS\n")); /* safe to call on NULL */ mutt_free_list (&(idata->flags)); /* skip "OK [PERMANENT" so syntax is the same as FLAGS */ pc += 13; if ((pc = imap_get_flags (&(idata->flags), pc)) == NULL) goto fail; } /* save UIDVALIDITY for the header cache */ else if (ascii_strncasecmp ("OK [UIDVALIDITY", pc, 14) == 0) { dprint (3, (debugfile, "Getting mailbox UIDVALIDITY\n")); pc += 3; pc = imap_next_word (pc); if (mutt_atoui (pc, &idata->uid_validity) < 0) goto fail; status->uidvalidity = idata->uid_validity; } else if (ascii_strncasecmp ("OK [UIDNEXT", pc, 11) == 0) { dprint (3, (debugfile, "Getting mailbox UIDNEXT\n")); pc += 3; pc = imap_next_word (pc); if (mutt_atoui (pc, &idata->uidnext) < 0) goto fail; status->uidnext = idata->uidnext; } else { pc = imap_next_word (pc); if (!ascii_strncasecmp ("EXISTS", pc, 6)) { count = idata->newMailCount; idata->newMailCount = 0; } } } while (rc == IMAP_CMD_CONTINUE); if (rc == IMAP_CMD_NO) { char *s; s = imap_next_word (idata->buf); /* skip seq */ s = imap_next_word (s); /* Skip response */ mutt_error ("%s", s); mutt_sleep (2); goto fail; } if (rc != IMAP_CMD_OK) goto fail; /* check for READ-ONLY notification */ if (!ascii_strncasecmp (imap_get_qualifier (idata->buf), "[READ-ONLY]", 11) \ && !mutt_bit_isset (idata->capabilities, ACL)) { dprint (2, (debugfile, "Mailbox is read-only.\n")); ctx->readonly = 1; } #ifdef DEBUG /* dump the mailbox flags we've found */ if (debuglevel > 2) { if (!idata->flags) dprint (3, (debugfile, "No folder flags found\n")); else { LIST* t = idata->flags; dprint (3, (debugfile, "Mailbox flags: ")); t = t->next; while (t) { dprint (3, (debugfile, "[%s] ", t->data)); t = t->next; } dprint (3, (debugfile, "\n")); } } #endif if (!(mutt_bit_isset(idata->ctx->rights, MUTT_ACL_DELETE) || mutt_bit_isset(idata->ctx->rights, MUTT_ACL_SEEN) || mutt_bit_isset(idata->ctx->rights, MUTT_ACL_WRITE) || mutt_bit_isset(idata->ctx->rights, MUTT_ACL_INSERT))) ctx->readonly = 1; ctx->hdrmax = count; ctx->hdrs = safe_calloc (count, sizeof (HEADER *)); ctx->v2r = safe_calloc (count, sizeof (int)); ctx->msgcount = 0; if (count && (imap_read_headers (idata, 1, count) < 0)) { mutt_error _("Error opening mailbox"); mutt_sleep (1); goto fail; } dprint (2, (debugfile, "imap_open_mailbox: msgcount is %d\n", ctx->msgcount)); FREE (&mx.mbox); return 0; fail: if (idata->state == IMAP_SELECTED) idata->state = IMAP_AUTHENTICATED; fail_noidata: FREE (&mx.mbox); return -1; } static int imap_open_mailbox_append (CONTEXT *ctx, int flags) { IMAP_DATA *idata; char buf[LONG_STRING]; char mailbox[LONG_STRING]; IMAP_MBOX mx; int rc; if (imap_parse_path (ctx->path, &mx)) return -1; /* in APPEND mode, we appear to hijack an existing IMAP connection - * ctx is brand new and mostly empty */ if (!(idata = imap_conn_find (&(mx.account), 0))) { FREE (&mx.mbox); return -1; } ctx->data = idata; imap_fix_path (idata, mx.mbox, mailbox, sizeof (mailbox)); if (!*mailbox) strfcpy (mailbox, "INBOX", sizeof (mailbox)); FREE (&mx.mbox); if ((rc = imap_access (ctx->path)) == 0) return 0; if (rc == -1) return -1; snprintf (buf, sizeof (buf), _("Create %s?"), mailbox); if (option (OPTCONFIRMCREATE) && mutt_yesorno (buf, 1) < 1) return -1; if (imap_create_mailbox (idata, mailbox) < 0) return -1; return 0; } /* imap_logout: Gracefully log out of server. */ void imap_logout (IMAP_DATA** idata) { /* we set status here to let imap_handle_untagged know we _expect_ to * receive a bye response (so it doesn't freak out and close the conn) */ (*idata)->status = IMAP_BYE; imap_cmd_start (*idata, "LOGOUT"); if (ImapPollTimeout <= 0 || mutt_socket_poll ((*idata)->conn, ImapPollTimeout) != 0) { while (imap_cmd_step (*idata) == IMAP_CMD_CONTINUE) ; } mutt_socket_close ((*idata)->conn); imap_free_idata (idata); } static int imap_open_new_message (MESSAGE *msg, CONTEXT *dest, HEADER *hdr) { char tmp[_POSIX_PATH_MAX]; mutt_mktemp (tmp, sizeof (tmp)); if ((msg->fp = safe_fopen (tmp, "w")) == NULL) { mutt_perror (tmp); return (-1); } msg->path = safe_strdup(tmp); return 0; } /* imap_set_flag: append str to flags if we currently have permission * according to aclbit */ static void imap_set_flag (IMAP_DATA* idata, int aclbit, int flag, const char *str, char *flags, size_t flsize) { if (mutt_bit_isset (idata->ctx->rights, aclbit)) if (flag && imap_has_flag (idata->flags, str)) safe_strcat (flags, flsize, str); } /* imap_has_flag: do a caseless comparison of the flag against a flag list, * return 1 if found or flag list has '\*', 0 otherwise */ int imap_has_flag (LIST* flag_list, const char* flag) { if (!flag_list) return 0; flag_list = flag_list->next; while (flag_list) { if (!ascii_strncasecmp (flag_list->data, flag, strlen (flag_list->data))) return 1; if (!ascii_strncmp (flag_list->data, "\\*", strlen (flag_list->data))) return 1; flag_list = flag_list->next; } return 0; } /* Note: headers must be in SORT_ORDER. See imap_exec_msgset for args. * Pos is an opaque pointer a la strtok. It should be 0 at first call. */ static int imap_make_msg_set (IMAP_DATA* idata, BUFFER* buf, int flag, int changed, int invert, int* pos) { HEADER** hdrs = idata->ctx->hdrs; int count = 0; /* number of messages in message set */ int match = 0; /* whether current message matches flag condition */ unsigned int setstart = 0; /* start of current message range */ int n; int started = 0; hdrs = idata->ctx->hdrs; for (n = *pos; n < idata->ctx->msgcount && buf->dptr - buf->data < IMAP_MAX_CMDLEN; n++) { match = 0; /* don't include pending expunged messages */ if (hdrs[n]->active) switch (flag) { case MUTT_DELETED: if (hdrs[n]->deleted != HEADER_DATA(hdrs[n])->deleted) match = invert ^ hdrs[n]->deleted; break; case MUTT_FLAG: if (hdrs[n]->flagged != HEADER_DATA(hdrs[n])->flagged) match = invert ^ hdrs[n]->flagged; break; case MUTT_OLD: if (hdrs[n]->old != HEADER_DATA(hdrs[n])->old) match = invert ^ hdrs[n]->old; break; case MUTT_READ: if (hdrs[n]->read != HEADER_DATA(hdrs[n])->read) match = invert ^ hdrs[n]->read; break; case MUTT_REPLIED: if (hdrs[n]->replied != HEADER_DATA(hdrs[n])->replied) match = invert ^ hdrs[n]->replied; break; case MUTT_TAG: if (hdrs[n]->tagged) match = 1; break; case MUTT_TRASH: if (hdrs[n]->deleted && !hdrs[n]->purge) match = 1; break; } if (match && (!changed || hdrs[n]->changed)) { count++; if (setstart == 0) { setstart = HEADER_DATA (hdrs[n])->uid; if (started == 0) { mutt_buffer_printf (buf, "%u", HEADER_DATA (hdrs[n])->uid); started = 1; } else mutt_buffer_printf (buf, ",%u", HEADER_DATA (hdrs[n])->uid); } /* tie up if the last message also matches */ else if (n == idata->ctx->msgcount-1) mutt_buffer_printf (buf, ":%u", HEADER_DATA (hdrs[n])->uid); } /* End current set if message doesn't match or we've reached the end * of the mailbox via inactive messages following the last match. */ else if (setstart && (hdrs[n]->active || n == idata->ctx->msgcount-1)) { if (HEADER_DATA (hdrs[n-1])->uid > setstart) mutt_buffer_printf (buf, ":%u", HEADER_DATA (hdrs[n-1])->uid); setstart = 0; } } *pos = n; return count; } /* Prepares commands for all messages matching conditions (must be flushed * with imap_exec) * Params: * idata: IMAP_DATA containing context containing header set * pre, post: commands are of the form "%s %s %s %s", tag, * pre, message set, post * flag: enum of flag type on which to filter * changed: include only changed messages in message set * invert: invert sense of flag, eg MUTT_READ matches unread messages * Returns: number of matched messages, or -1 on failure */ int imap_exec_msgset (IMAP_DATA* idata, const char* pre, const char* post, int flag, int changed, int invert) { HEADER** hdrs = NULL; short oldsort; BUFFER* cmd; int pos; int rc; int count = 0; if (! (cmd = mutt_buffer_new ())) { dprint (1, (debugfile, "imap_exec_msgset: unable to allocate buffer\n")); return -1; } /* We make a copy of the headers just in case resorting doesn't give exactly the original order (duplicate messages?), because other parts of the ctx are tied to the header order. This may be overkill. */ oldsort = Sort; if (Sort != SORT_ORDER) { hdrs = idata->ctx->hdrs; idata->ctx->hdrs = safe_malloc (idata->ctx->msgcount * sizeof (HEADER*)); memcpy (idata->ctx->hdrs, hdrs, idata->ctx->msgcount * sizeof (HEADER*)); Sort = SORT_ORDER; qsort (idata->ctx->hdrs, idata->ctx->msgcount, sizeof (HEADER*), mutt_get_sort_func (SORT_ORDER)); } pos = 0; do { cmd->dptr = cmd->data; mutt_buffer_printf (cmd, "%s ", pre); rc = imap_make_msg_set (idata, cmd, flag, changed, invert, &pos); if (rc > 0) { mutt_buffer_printf (cmd, " %s", post); if (imap_exec (idata, cmd->data, IMAP_CMD_QUEUE)) { rc = -1; goto out; } count += rc; } } while (rc > 0); rc = count; out: mutt_buffer_free (&cmd); if (oldsort != Sort) { Sort = oldsort; FREE (&idata->ctx->hdrs); idata->ctx->hdrs = hdrs; } return rc; } /* returns 0 if mutt's flags match cached server flags: * EXCLUDING the deleted flag. */ static int compare_flags_for_copy (HEADER* h) { IMAP_HEADER_DATA* hd = (IMAP_HEADER_DATA*)h->data; if (h->read != hd->read) return 1; if (h->old != hd->old) return 1; if (h->flagged != hd->flagged) return 1; if (h->replied != hd->replied) return 1; return 0; } /* Update the IMAP server to reflect the flags for a single message before * performing a "UID COPY". * NOTE: This does not sync the "deleted" flag state, because it is not * desirable to propagate that flag into the copy. */ int imap_sync_message_for_copy (IMAP_DATA *idata, HEADER *hdr, BUFFER *cmd, int *err_continue) { char flags[LONG_STRING]; char uid[11]; if (!compare_flags_for_copy (hdr)) { if (hdr->deleted == HEADER_DATA(hdr)->deleted) hdr->changed = 0; return 0; } snprintf (uid, sizeof (uid), "%u", HEADER_DATA(hdr)->uid); cmd->dptr = cmd->data; mutt_buffer_addstr (cmd, "UID STORE "); mutt_buffer_addstr (cmd, uid); flags[0] = '\0'; imap_set_flag (idata, MUTT_ACL_SEEN, hdr->read, "\\Seen ", flags, sizeof (flags)); imap_set_flag (idata, MUTT_ACL_WRITE, hdr->old, "Old ", flags, sizeof (flags)); imap_set_flag (idata, MUTT_ACL_WRITE, hdr->flagged, "\\Flagged ", flags, sizeof (flags)); imap_set_flag (idata, MUTT_ACL_WRITE, hdr->replied, "\\Answered ", flags, sizeof (flags)); imap_set_flag (idata, MUTT_ACL_DELETE, HEADER_DATA(hdr)->deleted, "\\Deleted ", flags, sizeof (flags)); /* now make sure we don't lose custom tags */ if (mutt_bit_isset (idata->ctx->rights, MUTT_ACL_WRITE)) imap_add_keywords (flags, hdr, idata->flags, sizeof (flags)); mutt_remove_trailing_ws (flags); /* UW-IMAP is OK with null flags, Cyrus isn't. The only solution is to * explicitly revoke all system flags (if we have permission) */ if (!*flags) { imap_set_flag (idata, MUTT_ACL_SEEN, 1, "\\Seen ", flags, sizeof (flags)); imap_set_flag (idata, MUTT_ACL_WRITE, 1, "Old ", flags, sizeof (flags)); imap_set_flag (idata, MUTT_ACL_WRITE, 1, "\\Flagged ", flags, sizeof (flags)); imap_set_flag (idata, MUTT_ACL_WRITE, 1, "\\Answered ", flags, sizeof (flags)); imap_set_flag (idata, MUTT_ACL_DELETE, !HEADER_DATA(hdr)->deleted, "\\Deleted ", flags, sizeof (flags)); mutt_remove_trailing_ws (flags); mutt_buffer_addstr (cmd, " -FLAGS.SILENT ("); } else mutt_buffer_addstr (cmd, " FLAGS.SILENT ("); mutt_buffer_addstr (cmd, flags); mutt_buffer_addstr (cmd, ")"); /* dumb hack for bad UW-IMAP 4.7 servers spurious FLAGS updates */ hdr->active = 0; /* after all this it's still possible to have no flags, if you * have no ACL rights */ if (*flags && (imap_exec (idata, cmd->data, 0) != 0) && err_continue && (*err_continue != MUTT_YES)) { *err_continue = imap_continue ("imap_sync_message: STORE failed", idata->buf); if (*err_continue != MUTT_YES) { hdr->active = 1; return -1; } } hdr->active = 1; if (hdr->deleted == HEADER_DATA(hdr)->deleted) hdr->changed = 0; return 0; } static int sync_helper (IMAP_DATA* idata, int right, int flag, const char* name) { int count = 0; int rc; char buf[LONG_STRING]; if (!idata->ctx) return -1; if (!mutt_bit_isset (idata->ctx->rights, right)) return 0; if (right == MUTT_ACL_WRITE && !imap_has_flag (idata->flags, name)) return 0; snprintf (buf, sizeof(buf), "+FLAGS.SILENT (%s)", name); if ((rc = imap_exec_msgset (idata, "UID STORE", buf, flag, 1, 0)) < 0) return rc; count += rc; buf[0] = '-'; if ((rc = imap_exec_msgset (idata, "UID STORE", buf, flag, 1, 1)) < 0) return rc; count += rc; return count; } /* update the IMAP server to reflect message changes done within mutt. * Arguments * ctx: the current context * expunge: 0 or 1 - do expunge? */ int imap_sync_mailbox (CONTEXT* ctx, int expunge, int* index_hint) { IMAP_DATA* idata; CONTEXT* appendctx = NULL; HEADER* h; HEADER** hdrs = NULL; int oldsort; int n; int rc; idata = (IMAP_DATA*) ctx->data; if (idata->state < IMAP_SELECTED) { dprint (2, (debugfile, "imap_sync_mailbox: no mailbox selected\n")); return -1; } /* This function is only called when the calling code expects the context * to be changed. */ imap_allow_reopen (ctx); if ((rc = imap_check_mailbox (ctx, index_hint, 0)) != 0) return rc; /* if we are expunging anyway, we can do deleted messages very quickly... */ if (expunge && mutt_bit_isset (ctx->rights, MUTT_ACL_DELETE)) { if ((rc = imap_exec_msgset (idata, "UID STORE", "+FLAGS.SILENT (\\Deleted)", MUTT_DELETED, 1, 0)) < 0) { mutt_error (_("Expunge failed")); mutt_sleep (1); goto out; } if (rc > 0) { /* mark these messages as unchanged so second pass ignores them. Done * here so BOGUS UW-IMAP 4.7 SILENT FLAGS updates are ignored. */ for (n = 0; n < ctx->msgcount; n++) if (ctx->hdrs[n]->deleted && ctx->hdrs[n]->changed) ctx->hdrs[n]->active = 0; mutt_message (_("Marking %d messages deleted..."), rc); } } #if USE_HCACHE idata->hcache = imap_hcache_open (idata, NULL); #endif /* save messages with real (non-flag) changes */ for (n = 0; n < ctx->msgcount; n++) { h = ctx->hdrs[n]; if (h->deleted) { imap_cache_del (idata, h); #if USE_HCACHE imap_hcache_del (idata, HEADER_DATA(h)->uid); #endif } if (h->active && h->changed) { #if USE_HCACHE imap_hcache_put (idata, h); #endif /* if the message has been rethreaded or attachments have been deleted * we delete the message and reupload it. * This works better if we're expunging, of course. */ if ((h->env && (h->env->refs_changed || h->env->irt_changed)) || h->attach_del || h->xlabel_changed) { mutt_message (_("Saving changed messages... [%d/%d]"), n+1, ctx->msgcount); if (!appendctx) appendctx = mx_open_mailbox (ctx->path, MUTT_APPEND | MUTT_QUIET, NULL); if (!appendctx) dprint (1, (debugfile, "imap_sync_mailbox: Error opening mailbox in append mode\n")); else _mutt_save_message (h, appendctx, 1, 0, 0); h->xlabel_changed = 0; } } } #if USE_HCACHE imap_hcache_close (idata); #endif /* presort here to avoid doing 10 resorts in imap_exec_msgset */ oldsort = Sort; if (Sort != SORT_ORDER) { hdrs = ctx->hdrs; ctx->hdrs = safe_malloc (ctx->msgcount * sizeof (HEADER*)); memcpy (ctx->hdrs, hdrs, ctx->msgcount * sizeof (HEADER*)); Sort = SORT_ORDER; qsort (ctx->hdrs, ctx->msgcount, sizeof (HEADER*), mutt_get_sort_func (SORT_ORDER)); } rc = sync_helper (idata, MUTT_ACL_DELETE, MUTT_DELETED, "\\Deleted"); if (rc >= 0) rc |= sync_helper (idata, MUTT_ACL_WRITE, MUTT_FLAG, "\\Flagged"); if (rc >= 0) rc |= sync_helper (idata, MUTT_ACL_WRITE, MUTT_OLD, "Old"); if (rc >= 0) rc |= sync_helper (idata, MUTT_ACL_SEEN, MUTT_READ, "\\Seen"); if (rc >= 0) rc |= sync_helper (idata, MUTT_ACL_WRITE, MUTT_REPLIED, "\\Answered"); if (oldsort != Sort) { Sort = oldsort; FREE (&ctx->hdrs); ctx->hdrs = hdrs; } /* Flush the queued flags if any were changed in sync_helper. */ if (rc > 0) if (imap_exec (idata, NULL, 0) != IMAP_CMD_OK) rc = -1; if (rc < 0) { if (ctx->closing) { if (mutt_yesorno (_("Error saving flags. Close anyway?"), 0) == MUTT_YES) { rc = 0; idata->state = IMAP_AUTHENTICATED; goto out; } } else mutt_error _("Error saving flags"); rc = -1; goto out; } /* Update local record of server state to reflect the synchronization just * completed. imap_read_headers always overwrites hcache-origin flags, so * there is no need to mutate the hcache after flag-only changes. */ for (n = 0; n < ctx->msgcount; n++) { HEADER_DATA(ctx->hdrs[n])->deleted = ctx->hdrs[n]->deleted; HEADER_DATA(ctx->hdrs[n])->flagged = ctx->hdrs[n]->flagged; HEADER_DATA(ctx->hdrs[n])->old = ctx->hdrs[n]->old; HEADER_DATA(ctx->hdrs[n])->read = ctx->hdrs[n]->read; HEADER_DATA(ctx->hdrs[n])->replied = ctx->hdrs[n]->replied; ctx->hdrs[n]->changed = 0; } ctx->changed = 0; /* We must send an EXPUNGE command if we're not closing. */ if (expunge && !(ctx->closing) && mutt_bit_isset(ctx->rights, MUTT_ACL_DELETE)) { mutt_message _("Expunging messages from server..."); /* Set expunge bit so we don't get spurious reopened messages */ idata->reopen |= IMAP_EXPUNGE_EXPECTED; if (imap_exec (idata, "EXPUNGE", 0) != 0) { idata->reopen &= ~IMAP_EXPUNGE_EXPECTED; imap_error (_("imap_sync_mailbox: EXPUNGE failed"), idata->buf); rc = -1; goto out; } idata->reopen &= ~IMAP_EXPUNGE_EXPECTED; } if (expunge && ctx->closing) { imap_exec (idata, "CLOSE", IMAP_CMD_QUEUE); idata->state = IMAP_AUTHENTICATED; } if (option (OPTMESSAGECACHECLEAN)) imap_cache_clean (idata); rc = 0; out: if (appendctx) { mx_fastclose_mailbox (appendctx); FREE (&appendctx); } return rc; } /* imap_close_mailbox: clean up IMAP data in CONTEXT */ int imap_close_mailbox (CONTEXT* ctx) { IMAP_DATA* idata; int i; idata = (IMAP_DATA*) ctx->data; /* Check to see if the mailbox is actually open */ if (!idata) return 0; /* imap_open_mailbox_append() borrows the IMAP_DATA temporarily, * just for the connection, but does not set idata->ctx to the * open-append ctx. * * So when these are equal, it means we are actually closing the * mailbox and should clean up idata. Otherwise, we don't want to * touch idata - it's still being used. */ if (ctx == idata->ctx) { if (idata->status != IMAP_FATAL && idata->state >= IMAP_SELECTED) { /* mx_close_mailbox won't sync if there are no deleted messages * and the mailbox is unchanged, so we may have to close here */ if (!ctx->deleted) imap_exec (idata, "CLOSE", IMAP_CMD_QUEUE); idata->state = IMAP_AUTHENTICATED; } idata->reopen &= IMAP_REOPEN_ALLOW; FREE (&(idata->mailbox)); mutt_free_list (&idata->flags); idata->ctx = NULL; hash_destroy (&idata->uid_hash, NULL); FREE (&idata->msn_index); idata->msn_index_size = 0; idata->max_msn = 0; for (i = 0; i < IMAP_CACHE_LEN; i++) { if (idata->cache[i].path) { unlink (idata->cache[i].path); FREE (&idata->cache[i].path); } } mutt_bcache_close (&idata->bcache); } /* free IMAP part of headers */ for (i = 0; i < ctx->msgcount; i++) /* mailbox may not have fully loaded */ if (ctx->hdrs[i] && ctx->hdrs[i]->data) imap_free_header_data ((IMAP_HEADER_DATA**)&(ctx->hdrs[i]->data)); return 0; } /* use the NOOP or IDLE command to poll for new mail * * return values: * MUTT_REOPENED mailbox has been externally modified * MUTT_NEW_MAIL new mail has arrived! * 0 no change * -1 error */ int imap_check_mailbox (CONTEXT *ctx, int *index_hint, int force) { /* overload keyboard timeout to avoid many mailbox checks in a row. * Most users don't like having to wait exactly when they press a key. */ IMAP_DATA* idata; int result = 0; idata = (IMAP_DATA*) ctx->data; /* try IDLE first, unless force is set */ if (!force && option (OPTIMAPIDLE) && mutt_bit_isset (idata->capabilities, IDLE) && (idata->state != IMAP_IDLE || time(NULL) >= idata->lastread + ImapKeepalive)) { if (imap_cmd_idle (idata) < 0) return -1; } if (idata->state == IMAP_IDLE) { while ((result = mutt_socket_poll (idata->conn, 0)) > 0) { if (imap_cmd_step (idata) != IMAP_CMD_CONTINUE) { dprint (1, (debugfile, "Error reading IDLE response\n")); return -1; } } if (result < 0) { dprint (1, (debugfile, "Poll failed, disabling IDLE\n")); mutt_bit_unset (idata->capabilities, IDLE); } } if ((force || (idata->state != IMAP_IDLE && time(NULL) >= idata->lastread + Timeout)) && imap_exec (idata, "NOOP", IMAP_CMD_POLL) != 0) return -1; /* We call this even when we haven't run NOOP in case we have pending * changes to process, since we can reopen here. */ imap_cmd_finish (idata); if (idata->check_status & IMAP_EXPUNGE_PENDING) result = MUTT_REOPENED; else if (idata->check_status & IMAP_NEWMAIL_PENDING) result = MUTT_NEW_MAIL; else if (idata->check_status & IMAP_FLAGS_PENDING) result = MUTT_FLAGS; idata->check_status = 0; return result; } static int imap_check_mailbox_reopen (CONTEXT *ctx, int *index_hint) { int rc; imap_allow_reopen (ctx); rc = imap_check_mailbox (ctx, index_hint, 0); imap_disallow_reopen (ctx); return rc; } /* split path into (idata,mailbox name) */ static int imap_get_mailbox (const char* path, IMAP_DATA** hidata, char* buf, size_t blen) { IMAP_MBOX mx; if (imap_parse_path (path, &mx)) { dprint (1, (debugfile, "imap_get_mailbox: Error parsing %s\n", path)); return -1; } if (!(*hidata = imap_conn_find (&(mx.account), option (OPTIMAPPASSIVE) ? MUTT_IMAP_CONN_NONEW : 0)) || (*hidata)->state < IMAP_AUTHENTICATED) { FREE (&mx.mbox); return -1; } imap_fix_path (*hidata, mx.mbox, buf, blen); if (!*buf) strfcpy (buf, "INBOX", blen); FREE (&mx.mbox); return 0; } /* check for new mail in any subscribed mailboxes. Given a list of mailboxes * rather than called once for each so that it can batch the commands and * save on round trips. Returns number of mailboxes with new mail. */ int imap_buffy_check (int force, int check_stats) { IMAP_DATA* idata; IMAP_DATA* lastdata = NULL; BUFFY* mailbox; char name[LONG_STRING]; char command[LONG_STRING]; char munged[LONG_STRING]; int buffies = 0; for (mailbox = Incoming; mailbox; mailbox = mailbox->next) { /* Init newly-added mailboxes */ if (! mailbox->magic) { if (mx_is_imap (mailbox->path)) mailbox->magic = MUTT_IMAP; } if (mailbox->magic != MUTT_IMAP) continue; if (imap_get_mailbox (mailbox->path, &idata, name, sizeof (name)) < 0) { mailbox->new = 0; continue; } /* Don't issue STATUS on the selected mailbox, it will be NOOPed or * IDLEd elsewhere. * idata->mailbox may be NULL for connections other than the current * mailbox's, and shouldn't expand to INBOX in that case. #3216. */ if (idata->mailbox && !imap_mxcmp (name, idata->mailbox)) { mailbox->new = 0; continue; } if (!mutt_bit_isset (idata->capabilities, IMAP4REV1) && !mutt_bit_isset (idata->capabilities, STATUS)) { dprint (2, (debugfile, "Server doesn't support STATUS\n")); continue; } if (lastdata && idata != lastdata) { /* Send commands to previous server. Sorting the buffy list * may prevent some infelicitous interleavings */ if (imap_exec (lastdata, NULL, IMAP_CMD_FAIL_OK | IMAP_CMD_POLL) == -1) dprint (1, (debugfile, "Error polling mailboxes\n")); lastdata = NULL; } if (!lastdata) lastdata = idata; imap_munge_mbox_name (idata, munged, sizeof (munged), name); if (check_stats) snprintf (command, sizeof (command), "STATUS %s (UIDNEXT UIDVALIDITY UNSEEN RECENT MESSAGES)", munged); else snprintf (command, sizeof (command), "STATUS %s (UIDNEXT UIDVALIDITY UNSEEN RECENT)", munged); if (imap_exec (idata, command, IMAP_CMD_QUEUE | IMAP_CMD_POLL) < 0) { dprint (1, (debugfile, "Error queueing command\n")); return 0; } } if (lastdata && (imap_exec (lastdata, NULL, IMAP_CMD_FAIL_OK | IMAP_CMD_POLL) == -1)) { dprint (1, (debugfile, "Error polling mailboxes\n")); return 0; } /* collect results */ for (mailbox = Incoming; mailbox; mailbox = mailbox->next) { if (mailbox->magic == MUTT_IMAP && mailbox->new) buffies++; } return buffies; } /* imap_status: returns count of messages in mailbox, or -1 on error. * if queue != 0, queue the command and expect it to have been run * on the next call (for pipelining the postponed count) */ int imap_status (char* path, int queue) { static int queued = 0; IMAP_DATA *idata; char buf[LONG_STRING]; char mbox[LONG_STRING]; IMAP_STATUS* status; if (imap_get_mailbox (path, &idata, buf, sizeof (buf)) < 0) return -1; /* We are in the folder we're polling - just return the mailbox count. * * Note that imap_mxcmp() converts NULL to "INBOX", so we need to * make sure the idata really is open to a folder. */ if (idata->ctx && !imap_mxcmp (buf, idata->mailbox)) return idata->ctx->msgcount; else if (mutt_bit_isset(idata->capabilities,IMAP4REV1) || mutt_bit_isset(idata->capabilities,STATUS)) { imap_munge_mbox_name (idata, mbox, sizeof(mbox), buf); snprintf (buf, sizeof (buf), "STATUS %s (%s)", mbox, "MESSAGES"); imap_unmunge_mbox_name (idata, mbox); } else /* Server does not support STATUS, and this is not the current mailbox. * There is no lightweight way to check recent arrivals */ return -1; if (queue) { imap_exec (idata, buf, IMAP_CMD_QUEUE); queued = 1; return 0; } else if (!queued) imap_exec (idata, buf, 0); queued = 0; if ((status = imap_mboxcache_get (idata, mbox, 0))) return status->messages; return 0; } /* return cached mailbox stats or NULL if create is 0 */ IMAP_STATUS* imap_mboxcache_get (IMAP_DATA* idata, const char* mbox, int create) { LIST* cur; IMAP_STATUS* status; IMAP_STATUS scache; #ifdef USE_HCACHE header_cache_t *hc = NULL; unsigned int *uidvalidity = NULL; unsigned int *uidnext = NULL; #endif for (cur = idata->mboxcache; cur; cur = cur->next) { status = (IMAP_STATUS*)cur->data; if (!imap_mxcmp (mbox, status->name)) return status; } status = NULL; /* lame */ if (create) { memset (&scache, 0, sizeof (scache)); scache.name = (char*)mbox; idata->mboxcache = mutt_add_list_n (idata->mboxcache, &scache, sizeof (scache)); status = imap_mboxcache_get (idata, mbox, 0); status->name = safe_strdup (mbox); } #ifdef USE_HCACHE hc = imap_hcache_open (idata, mbox); if (hc) { uidvalidity = mutt_hcache_fetch_raw (hc, "/UIDVALIDITY", imap_hcache_keylen); uidnext = mutt_hcache_fetch_raw (hc, "/UIDNEXT", imap_hcache_keylen); if (uidvalidity) { if (!status) { mutt_hcache_free ((void **)&uidvalidity); mutt_hcache_free ((void **)&uidnext); mutt_hcache_close (hc); return imap_mboxcache_get (idata, mbox, 1); } status->uidvalidity = *uidvalidity; status->uidnext = uidnext ? *uidnext: 0; dprint (3, (debugfile, "mboxcache: hcache uidvalidity %u, uidnext %u\n", status->uidvalidity, status->uidnext)); } mutt_hcache_free ((void **)&uidvalidity); mutt_hcache_free ((void **)&uidnext); mutt_hcache_close (hc); } #endif return status; } void imap_mboxcache_free (IMAP_DATA* idata) { LIST* cur; IMAP_STATUS* status; for (cur = idata->mboxcache; cur; cur = cur->next) { status = (IMAP_STATUS*)cur->data; FREE (&status->name); } mutt_free_list (&idata->mboxcache); } /* returns number of patterns in the search that should be done server-side * (eg are full-text) */ static int do_search (const pattern_t* search, int allpats) { int rc = 0; const pattern_t* pat; for (pat = search; pat; pat = pat->next) { switch (pat->op) { case MUTT_BODY: case MUTT_HEADER: case MUTT_WHOLE_MSG: if (pat->stringmatch) rc++; break; default: if (pat->child && do_search (pat->child, 1)) rc++; } if (!allpats) break; } return rc; } /* convert mutt pattern_t to IMAP SEARCH command containing only elements * that require full-text search (mutt already has what it needs for most * match types, and does a better job (eg server doesn't support regexps). */ static int imap_compile_search (const pattern_t* pat, BUFFER* buf) { if (! do_search (pat, 0)) return 0; if (pat->not) mutt_buffer_addstr (buf, "NOT "); if (pat->child) { int clauses; if ((clauses = do_search (pat->child, 1)) > 0) { const pattern_t* clause = pat->child; mutt_buffer_addch (buf, '('); while (clauses) { if (do_search (clause, 0)) { if (pat->op == MUTT_OR && clauses > 1) mutt_buffer_addstr (buf, "OR "); clauses--; if (imap_compile_search (clause, buf) < 0) return -1; if (clauses) mutt_buffer_addch (buf, ' '); } clause = clause->next; } mutt_buffer_addch (buf, ')'); } } else { char term[STRING]; char *delim; switch (pat->op) { case MUTT_HEADER: mutt_buffer_addstr (buf, "HEADER "); /* extract header name */ if (! (delim = strchr (pat->p.str, ':'))) { mutt_error (_("Header search without header name: %s"), pat->p.str); return -1; } *delim = '\0'; imap_quote_string (term, sizeof (term), pat->p.str); mutt_buffer_addstr (buf, term); mutt_buffer_addch (buf, ' '); /* and field */ *delim = ':'; delim++; SKIPWS(delim); imap_quote_string (term, sizeof (term), delim); mutt_buffer_addstr (buf, term); break; case MUTT_BODY: mutt_buffer_addstr (buf, "BODY "); imap_quote_string (term, sizeof (term), pat->p.str); mutt_buffer_addstr (buf, term); break; case MUTT_WHOLE_MSG: mutt_buffer_addstr (buf, "TEXT "); imap_quote_string (term, sizeof (term), pat->p.str); mutt_buffer_addstr (buf, term); break; } } return 0; } int imap_search (CONTEXT* ctx, const pattern_t* pat) { BUFFER buf; IMAP_DATA* idata = (IMAP_DATA*)ctx->data; int i; for (i = 0; i < ctx->msgcount; i++) ctx->hdrs[i]->matched = 0; if (!do_search (pat, 1)) return 0; mutt_buffer_init (&buf); mutt_buffer_addstr (&buf, "UID SEARCH "); if (imap_compile_search (pat, &buf) < 0) { FREE (&buf.data); return -1; } if (imap_exec (idata, buf.data, 0) < 0) { FREE (&buf.data); return -1; } FREE (&buf.data); return 0; } int imap_subscribe (char *path, int subscribe) { IMAP_DATA *idata; char buf[LONG_STRING]; char mbox[LONG_STRING]; char errstr[STRING]; BUFFER err, token; IMAP_MBOX mx; if (!mx_is_imap (path) || imap_parse_path (path, &mx) || !mx.mbox) { mutt_error (_("Bad mailbox name")); return -1; } if (!(idata = imap_conn_find (&(mx.account), 0))) goto fail; imap_fix_path (idata, mx.mbox, buf, sizeof (buf)); if (!*buf) strfcpy (buf, "INBOX", sizeof (buf)); if (option (OPTIMAPCHECKSUBSCRIBED)) { mutt_buffer_init (&token); mutt_buffer_init (&err); err.data = errstr; err.dsize = sizeof (errstr); snprintf (mbox, sizeof (mbox), "%smailboxes \"%s\"", subscribe ? "" : "un", path); if (mutt_parse_rc_line (mbox, &token, &err)) dprint (1, (debugfile, "Error adding subscribed mailbox: %s\n", errstr)); FREE (&token.data); } if (subscribe) mutt_message (_("Subscribing to %s..."), buf); else mutt_message (_("Unsubscribing from %s..."), buf); imap_munge_mbox_name (idata, mbox, sizeof(mbox), buf); snprintf (buf, sizeof (buf), "%sSUBSCRIBE %s", subscribe ? "" : "UN", mbox); if (imap_exec (idata, buf, 0) < 0) goto fail; imap_unmunge_mbox_name(idata, mx.mbox); if (subscribe) mutt_message (_("Subscribed to %s"), mx.mbox); else mutt_message (_("Unsubscribed from %s"), mx.mbox); FREE (&mx.mbox); return 0; fail: FREE (&mx.mbox); return -1; } /* trim dest to the length of the longest prefix it shares with src, * returning the length of the trimmed string */ static size_t longest_common_prefix (char *dest, const char* src, size_t start, size_t dlen) { size_t pos = start; while (pos < dlen && dest[pos] && dest[pos] == src[pos]) pos++; dest[pos] = '\0'; return pos; } /* look for IMAP URLs to complete from defined mailboxes. Could be extended * to complete over open connections and account/folder hooks too. */ static int imap_complete_hosts (char *dest, size_t len) { BUFFY* mailbox; CONNECTION* conn; int rc = -1; size_t matchlen; matchlen = mutt_strlen (dest); for (mailbox = Incoming; mailbox; mailbox = mailbox->next) { if (!mutt_strncmp (dest, mailbox->path, matchlen)) { if (rc) { strfcpy (dest, mailbox->path, len); rc = 0; } else longest_common_prefix (dest, mailbox->path, matchlen, len); } } for (conn = mutt_socket_head (); conn; conn = conn->next) { ciss_url_t url; char urlstr[LONG_STRING]; if (conn->account.type != MUTT_ACCT_TYPE_IMAP) continue; mutt_account_tourl (&conn->account, &url); /* FIXME: how to handle multiple users on the same host? */ url.user = NULL; url.path = NULL; url_ciss_tostring (&url, urlstr, sizeof (urlstr), 0); if (!mutt_strncmp (dest, urlstr, matchlen)) { if (rc) { strfcpy (dest, urlstr, len); rc = 0; } else longest_common_prefix (dest, urlstr, matchlen, len); } } return rc; } /* imap_complete: given a partial IMAP folder path, return a string which * adds as much to the path as is unique */ int imap_complete(char* dest, size_t dlen, char* path) { IMAP_DATA* idata; char list[LONG_STRING]; char buf[LONG_STRING]; IMAP_LIST listresp; char completion[LONG_STRING]; int clen; size_t matchlen = 0; int completions = 0; IMAP_MBOX mx; int rc; if (imap_parse_path (path, &mx)) { strfcpy (dest, path, dlen); return imap_complete_hosts (dest, dlen); } /* don't open a new socket just for completion. Instead complete over * known mailboxes/hooks/etc */ if (!(idata = imap_conn_find (&(mx.account), MUTT_IMAP_CONN_NONEW))) { FREE (&mx.mbox); strfcpy (dest, path, dlen); return imap_complete_hosts (dest, dlen); } /* reformat path for IMAP list, and append wildcard */ /* don't use INBOX in place of "" */ if (mx.mbox && mx.mbox[0]) imap_fix_path (idata, mx.mbox, list, sizeof(list)); else list[0] = '\0'; /* fire off command */ snprintf (buf, sizeof(buf), "%s \"\" \"%s%%\"", option (OPTIMAPLSUB) ? "LSUB" : "LIST", list); imap_cmd_start (idata, buf); /* and see what the results are */ strfcpy (completion, NONULL(mx.mbox), sizeof(completion)); idata->cmdtype = IMAP_CT_LIST; idata->cmddata = &listresp; do { listresp.name = NULL; rc = imap_cmd_step (idata); if (rc == IMAP_CMD_CONTINUE && listresp.name) { /* if the folder isn't selectable, append delimiter to force browse * to enter it on second tab. */ if (listresp.noselect) { clen = strlen(listresp.name); listresp.name[clen++] = listresp.delim; listresp.name[clen] = '\0'; } /* copy in first word */ if (!completions) { strfcpy (completion, listresp.name, sizeof(completion)); matchlen = strlen (completion); completions++; continue; } matchlen = longest_common_prefix (completion, listresp.name, 0, matchlen); completions++; } } while (rc == IMAP_CMD_CONTINUE); idata->cmddata = NULL; if (completions) { /* reformat output */ imap_qualify_path (dest, dlen, &mx, completion); mutt_pretty_mailbox (dest, dlen); FREE (&mx.mbox); return 0; } return -1; } /* imap_fast_trash: use server COPY command to copy deleted * messages to the trash folder. * Return codes: * -1: error * 0: success * 1: non-fatal error - try fetch/append */ int imap_fast_trash (CONTEXT* ctx, char* dest) { IMAP_DATA* idata; char mbox[LONG_STRING]; char mmbox[LONG_STRING]; char prompt[LONG_STRING]; int n, rc; IMAP_MBOX mx; int triedcreate = 0; BUFFER *sync_cmd = NULL; int err_continue = MUTT_NO; idata = (IMAP_DATA*) ctx->data; if (imap_parse_path (dest, &mx)) { dprint (1, (debugfile, "imap_fast_trash: bad destination %s\n", dest)); return -1; } /* check that the save-to folder is in the same account */ if (!mutt_account_match (&(CTX_DATA->conn->account), &(mx.account))) { dprint (3, (debugfile, "imap_fast_trash: %s not same server as %s\n", dest, ctx->path)); return 1; } imap_fix_path (idata, mx.mbox, mbox, sizeof (mbox)); if (!*mbox) strfcpy (mbox, "INBOX", sizeof (mbox)); imap_munge_mbox_name (idata, mmbox, sizeof (mmbox), mbox); sync_cmd = mutt_buffer_new (); for (n = 0; n < ctx->msgcount; n++) { if (ctx->hdrs[n]->active && ctx->hdrs[n]->changed && ctx->hdrs[n]->deleted && !ctx->hdrs[n]->purge) { rc = imap_sync_message_for_copy (idata, ctx->hdrs[n], sync_cmd, &err_continue); if (rc < 0) { dprint (1, (debugfile, "imap_fast_trash: could not sync\n")); goto out; } } } /* loop in case of TRYCREATE */ do { rc = imap_exec_msgset (idata, "UID COPY", mmbox, MUTT_TRASH, 0, 0); if (!rc) { dprint (1, (debugfile, "imap_fast_trash: No messages to trash\n")); rc = -1; goto out; } else if (rc < 0) { dprint (1, (debugfile, "could not queue copy\n")); goto out; } else mutt_message (_("Copying %d messages to %s..."), rc, mbox); /* let's get it on */ rc = imap_exec (idata, NULL, IMAP_CMD_FAIL_OK); if (rc == -2) { if (triedcreate) { dprint (1, (debugfile, "Already tried to create mailbox %s\n", mbox)); break; } /* bail out if command failed for reasons other than nonexistent target */ if (ascii_strncasecmp (imap_get_qualifier (idata->buf), "[TRYCREATE]", 11)) break; dprint (3, (debugfile, "imap_fast_trash: server suggests TRYCREATE\n")); snprintf (prompt, sizeof (prompt), _("Create %s?"), mbox); if (option (OPTCONFIRMCREATE) && mutt_yesorno (prompt, 1) < 1) { mutt_clear_error (); goto out; } if (imap_create_mailbox (idata, mbox) < 0) break; triedcreate = 1; } } while (rc == -2); if (rc != 0) { imap_error ("imap_fast_trash", idata->buf); goto out; } rc = 0; out: mutt_buffer_free (&sync_cmd); FREE (&mx.mbox); return rc < 0 ? -1 : rc; } struct mx_ops mx_imap_ops = { .open = imap_open_mailbox, .open_append = imap_open_mailbox_append, .close = imap_close_mailbox, .open_msg = imap_fetch_message, .close_msg = imap_close_message, .commit_msg = imap_commit_message, .open_new_msg = imap_open_new_message, .check = imap_check_mailbox_reopen, .sync = NULL, /* imap syncing is handled by imap_sync_mailbox */ };
/* * Copyright (C) 1996-1998,2012 Michael R. Elkins <me@mutt.org> * Copyright (C) 1996-1999 Brandon Long <blong@fiction.net> * Copyright (C) 1999-2009,2012,2017 Brendan Cully <brendan@kublai.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* Support for IMAP4rev1, with the occasional nod to IMAP 4. */ #if HAVE_CONFIG_H # include "config.h" #endif #include "mutt.h" #include "mx.h" #include "mailbox.h" #include "globals.h" #include "sort.h" #include "browser.h" #include "imap_private.h" #if defined(USE_SSL) # include "mutt_ssl.h" #endif #include "buffy.h" #if USE_HCACHE #include "hcache.h" #endif #include <unistd.h> #include <ctype.h> #include <string.h> #include <stdlib.h> #include <sys/types.h> #include <sys/stat.h> /* imap forward declarations */ static char* imap_get_flags (LIST** hflags, char* s); static int imap_check_capabilities (IMAP_DATA* idata); static void imap_set_flag (IMAP_DATA* idata, int aclbit, int flag, const char* str, char* flags, size_t flsize); /* imap_access: Check permissions on an IMAP mailbox. * TODO: ACL checks. Right now we assume if it exists we can * mess with it. */ int imap_access (const char* path) { IMAP_DATA* idata; IMAP_MBOX mx; char buf[LONG_STRING]; char mailbox[LONG_STRING]; char mbox[LONG_STRING]; int rc; if (imap_parse_path (path, &mx)) return -1; if (!(idata = imap_conn_find (&mx.account, option (OPTIMAPPASSIVE) ? MUTT_IMAP_CONN_NONEW : 0))) { FREE (&mx.mbox); return -1; } imap_fix_path (idata, mx.mbox, mailbox, sizeof (mailbox)); if (!*mailbox) strfcpy (mailbox, "INBOX", sizeof (mailbox)); /* we may already be in the folder we're checking */ if (!ascii_strcmp(idata->mailbox, mx.mbox)) { FREE (&mx.mbox); return 0; } FREE (&mx.mbox); if (imap_mboxcache_get (idata, mailbox, 0)) { dprint (3, (debugfile, "imap_access: found %s in cache\n", mailbox)); return 0; } imap_munge_mbox_name (idata, mbox, sizeof (mbox), mailbox); if (mutt_bit_isset (idata->capabilities, IMAP4REV1)) snprintf (buf, sizeof (buf), "STATUS %s (UIDVALIDITY)", mbox); else if (mutt_bit_isset (idata->capabilities, STATUS)) snprintf (buf, sizeof (buf), "STATUS %s (UID-VALIDITY)", mbox); else { dprint (2, (debugfile, "imap_access: STATUS not supported?\n")); return -1; } if ((rc = imap_exec (idata, buf, IMAP_CMD_FAIL_OK)) < 0) { dprint (1, (debugfile, "imap_access: Can't check STATUS of %s\n", mbox)); return rc; } return 0; } int imap_create_mailbox (IMAP_DATA* idata, char* mailbox) { char buf[LONG_STRING], mbox[LONG_STRING]; imap_munge_mbox_name (idata, mbox, sizeof (mbox), mailbox); snprintf (buf, sizeof (buf), "CREATE %s", mbox); if (imap_exec (idata, buf, 0) != 0) { mutt_error (_("CREATE failed: %s"), imap_cmd_trailer (idata)); return -1; } return 0; } int imap_rename_mailbox (IMAP_DATA* idata, IMAP_MBOX* mx, const char* newname) { char oldmbox[LONG_STRING]; char newmbox[LONG_STRING]; char buf[LONG_STRING]; imap_munge_mbox_name (idata, oldmbox, sizeof (oldmbox), mx->mbox); imap_munge_mbox_name (idata, newmbox, sizeof (newmbox), newname); snprintf (buf, sizeof (buf), "RENAME %s %s", oldmbox, newmbox); if (imap_exec (idata, buf, 0) != 0) return -1; return 0; } int imap_delete_mailbox (CONTEXT* ctx, IMAP_MBOX mx) { char buf[LONG_STRING], mbox[LONG_STRING]; IMAP_DATA *idata; if (!ctx || !ctx->data) { if (!(idata = imap_conn_find (&mx.account, option (OPTIMAPPASSIVE) ? MUTT_IMAP_CONN_NONEW : 0))) { FREE (&mx.mbox); return -1; } } else { idata = ctx->data; } imap_munge_mbox_name (idata, mbox, sizeof (mbox), mx.mbox); snprintf (buf, sizeof (buf), "DELETE %s", mbox); if (imap_exec ((IMAP_DATA*) idata, buf, 0) != 0) return -1; return 0; } /* imap_logout_all: close all open connections. Quick and dirty until we can * make sure we've got all the context we need. */ void imap_logout_all (void) { CONNECTION* conn; CONNECTION* tmp; conn = mutt_socket_head (); while (conn) { tmp = conn->next; if (conn->account.type == MUTT_ACCT_TYPE_IMAP && conn->fd >= 0) { mutt_message (_("Closing connection to %s..."), conn->account.host); imap_logout ((IMAP_DATA**) (void*) &conn->data); mutt_clear_error (); mutt_socket_free (conn); } conn = tmp; } } /* imap_read_literal: read bytes bytes from server into file. Not explicitly * buffered, relies on FILE buffering. NOTE: strips \r from \r\n. * Apparently even literals use \r\n-terminated strings ?! */ int imap_read_literal (FILE* fp, IMAP_DATA* idata, unsigned int bytes, progress_t* pbar) { unsigned int pos; char c; int r = 0; dprint (2, (debugfile, "imap_read_literal: reading %ld bytes\n", bytes)); for (pos = 0; pos < bytes; pos++) { if (mutt_socket_readchar (idata->conn, &c) != 1) { dprint (1, (debugfile, "imap_read_literal: error during read, %ld bytes read\n", pos)); idata->status = IMAP_FATAL; return -1; } #if 1 if (r == 1 && c != '\n') fputc ('\r', fp); if (c == '\r') { r = 1; continue; } else r = 0; #endif fputc (c, fp); if (pbar && !(pos % 1024)) mutt_progress_update (pbar, pos, -1); #ifdef DEBUG if (debuglevel >= IMAP_LOG_LTRL) fputc (c, debugfile); #endif } return 0; } /* imap_expunge_mailbox: Purge IMAP portion of expunged messages from the * context. Must not be done while something has a handle on any headers * (eg inside pager or editor). That is, check IMAP_REOPEN_ALLOW. */ void imap_expunge_mailbox (IMAP_DATA* idata) { HEADER* h; int i, cacheno; short old_sort; #ifdef USE_HCACHE idata->hcache = imap_hcache_open (idata, NULL); #endif old_sort = Sort; Sort = SORT_ORDER; mutt_sort_headers (idata->ctx, 0); for (i = 0; i < idata->ctx->msgcount; i++) { h = idata->ctx->hdrs[i]; if (h->index == INT_MAX) { dprint (2, (debugfile, "Expunging message UID %u.\n", HEADER_DATA (h)->uid)); h->active = 0; idata->ctx->size -= h->content->length; imap_cache_del (idata, h); #if USE_HCACHE imap_hcache_del (idata, HEADER_DATA(h)->uid); #endif /* free cached body from disk, if necessary */ cacheno = HEADER_DATA(h)->uid % IMAP_CACHE_LEN; if (idata->cache[cacheno].uid == HEADER_DATA(h)->uid && idata->cache[cacheno].path) { unlink (idata->cache[cacheno].path); FREE (&idata->cache[cacheno].path); } int_hash_delete (idata->uid_hash, HEADER_DATA(h)->uid, h, NULL); imap_free_header_data ((IMAP_HEADER_DATA**)&h->data); } else { h->index = i; /* Mutt has several places where it turns off h->active as a * hack. For example to avoid FLAG updates, or to exclude from * imap_exec_msgset. * * Unfortunately, when a reopen is allowed and the IMAP_EXPUNGE_PENDING * flag becomes set (e.g. a flag update to a modified header), * this function will be called by imap_cmd_finish(). * * The mx_update_tables() will free and remove these "inactive" headers, * despite that an EXPUNGE was not received for them. * This would result in memory leaks and segfaults due to dangling * pointers in the msn_index and uid_hash. * * So this is another hack to work around the hacks. We don't want to * remove the messages, so make sure active is on. */ h->active = 1; } } #if USE_HCACHE imap_hcache_close (idata); #endif /* We may be called on to expunge at any time. We can't rely on the caller * to always know to rethread */ mx_update_tables (idata->ctx, 0); Sort = old_sort; mutt_sort_headers (idata->ctx, 1); } /* imap_check_capabilities: make sure we can log in to this server. */ static int imap_check_capabilities (IMAP_DATA* idata) { if (imap_exec (idata, "CAPABILITY", 0) != 0) { imap_error ("imap_check_capabilities", idata->buf); return -1; } if (!(mutt_bit_isset(idata->capabilities,IMAP4) ||mutt_bit_isset(idata->capabilities,IMAP4REV1))) { mutt_error _("This IMAP server is ancient. Mutt does not work with it."); mutt_sleep (2); /* pause a moment to let the user see the error */ return -1; } return 0; } /* imap_conn_find: Find an open IMAP connection matching account, or open * a new one if none can be found. */ IMAP_DATA* imap_conn_find (const ACCOUNT* account, int flags) { CONNECTION* conn = NULL; ACCOUNT* creds = NULL; IMAP_DATA* idata = NULL; int new = 0; while ((conn = mutt_conn_find (conn, account))) { if (!creds) creds = &conn->account; else memcpy (&conn->account, creds, sizeof (ACCOUNT)); idata = (IMAP_DATA*)conn->data; if (flags & MUTT_IMAP_CONN_NONEW) { if (!idata) { /* This should only happen if we've come to the end of the list */ mutt_socket_free (conn); return NULL; } else if (idata->state < IMAP_AUTHENTICATED) continue; } if (flags & MUTT_IMAP_CONN_NOSELECT && idata && idata->state >= IMAP_SELECTED) continue; if (idata && idata->status == IMAP_FATAL) continue; break; } if (!conn) return NULL; /* this happens when the initial connection fails */ if (!idata) { /* The current connection is a new connection */ if (! (idata = imap_new_idata ())) { mutt_socket_free (conn); return NULL; } conn->data = idata; idata->conn = conn; new = 1; } if (idata->state == IMAP_DISCONNECTED) imap_open_connection (idata); if (idata->state == IMAP_CONNECTED) { if (!imap_authenticate (idata)) { idata->state = IMAP_AUTHENTICATED; FREE (&idata->capstr); new = 1; if (idata->conn->ssf) dprint (2, (debugfile, "Communication encrypted at %d bits\n", idata->conn->ssf)); } else mutt_account_unsetpass (&idata->conn->account); } if (new && idata->state == IMAP_AUTHENTICATED) { /* capabilities may have changed */ imap_exec (idata, "CAPABILITY", IMAP_CMD_QUEUE); /* enable RFC6855, if the server supports that */ if (mutt_bit_isset (idata->capabilities, ENABLE)) imap_exec (idata, "ENABLE UTF8=ACCEPT", IMAP_CMD_QUEUE); /* get root delimiter, '/' as default */ idata->delim = '/'; imap_exec (idata, "LIST \"\" \"\"", IMAP_CMD_QUEUE); if (option (OPTIMAPCHECKSUBSCRIBED)) imap_exec (idata, "LSUB \"\" \"*\"", IMAP_CMD_QUEUE); /* we may need the root delimiter before we open a mailbox */ imap_exec (idata, NULL, IMAP_CMD_FAIL_OK); } return idata; } int imap_open_connection (IMAP_DATA* idata) { char buf[LONG_STRING]; if (mutt_socket_open (idata->conn) < 0) return -1; idata->state = IMAP_CONNECTED; if (imap_cmd_step (idata) != IMAP_CMD_OK) { imap_close_connection (idata); return -1; } if (ascii_strncasecmp ("* OK", idata->buf, 4) == 0) { if (ascii_strncasecmp ("* OK [CAPABILITY", idata->buf, 16) && imap_check_capabilities (idata)) goto bail; #if defined(USE_SSL) /* Attempt STARTTLS if available and desired. */ if (!idata->conn->ssf && (option(OPTSSLFORCETLS) || mutt_bit_isset (idata->capabilities, STARTTLS))) { int rc; if (option(OPTSSLFORCETLS)) rc = MUTT_YES; else if ((rc = query_quadoption (OPT_SSLSTARTTLS, _("Secure connection with TLS?"))) == -1) goto err_close_conn; if (rc == MUTT_YES) { if ((rc = imap_exec (idata, "STARTTLS", IMAP_CMD_FAIL_OK)) == -1) goto bail; if (rc != -2) { if (mutt_ssl_starttls (idata->conn)) { mutt_error (_("Could not negotiate TLS connection")); mutt_sleep (1); goto err_close_conn; } else { /* RFC 2595 demands we recheck CAPABILITY after TLS completes. */ if (imap_exec (idata, "CAPABILITY", 0)) goto bail; } } } } if (option(OPTSSLFORCETLS) && ! idata->conn->ssf) { mutt_error _("Encrypted connection unavailable"); mutt_sleep (1); goto err_close_conn; } #endif } else if (ascii_strncasecmp ("* PREAUTH", idata->buf, 9) == 0) { idata->state = IMAP_AUTHENTICATED; if (imap_check_capabilities (idata) != 0) goto bail; FREE (&idata->capstr); } else { imap_error ("imap_open_connection()", buf); goto bail; } return 0; #if defined(USE_SSL) err_close_conn: imap_close_connection (idata); #endif bail: FREE (&idata->capstr); return -1; } void imap_close_connection(IMAP_DATA* idata) { if (idata->state != IMAP_DISCONNECTED) { mutt_socket_close (idata->conn); idata->state = IMAP_DISCONNECTED; } idata->seqno = idata->nextcmd = idata->lastcmd = idata->status = 0; memset (idata->cmds, 0, sizeof (IMAP_COMMAND) * idata->cmdslots); } /* imap_get_flags: Make a simple list out of a FLAGS response. * return stream following FLAGS response */ static char* imap_get_flags (LIST** hflags, char* s) { LIST* flags; char* flag_word; char ctmp; /* sanity-check string */ if (ascii_strncasecmp ("FLAGS", s, 5) != 0) { dprint (1, (debugfile, "imap_get_flags: not a FLAGS response: %s\n", s)); return NULL; } s += 5; SKIPWS(s); if (*s != '(') { dprint (1, (debugfile, "imap_get_flags: bogus FLAGS response: %s\n", s)); return NULL; } /* create list, update caller's flags handle */ flags = mutt_new_list(); *hflags = flags; while (*s && *s != ')') { s++; SKIPWS(s); flag_word = s; while (*s && (*s != ')') && !ISSPACE (*s)) s++; ctmp = *s; *s = '\0'; if (*flag_word) mutt_add_list (flags, flag_word); *s = ctmp; } /* note bad flags response */ if (*s != ')') { dprint (1, (debugfile, "imap_get_flags: Unterminated FLAGS response: %s\n", s)); mutt_free_list (hflags); return NULL; } s++; return s; } static int imap_open_mailbox (CONTEXT* ctx) { IMAP_DATA *idata; IMAP_STATUS* status; char buf[LONG_STRING]; char bufout[LONG_STRING]; int count = 0; IMAP_MBOX mx, pmx; int rc; if (imap_parse_path (ctx->path, &mx)) { mutt_error (_("%s is an invalid IMAP path"), ctx->path); return -1; } /* we require a connection which isn't currently in IMAP_SELECTED state */ if (!(idata = imap_conn_find (&(mx.account), MUTT_IMAP_CONN_NOSELECT))) goto fail_noidata; if (idata->state < IMAP_AUTHENTICATED) goto fail; /* once again the context is new */ ctx->data = idata; /* Clean up path and replace the one in the ctx */ imap_fix_path (idata, mx.mbox, buf, sizeof (buf)); if (!*buf) strfcpy (buf, "INBOX", sizeof (buf)); FREE(&(idata->mailbox)); idata->mailbox = safe_strdup (buf); imap_qualify_path (buf, sizeof (buf), &mx, idata->mailbox); FREE (&(ctx->path)); FREE (&(ctx->realpath)); ctx->path = safe_strdup (buf); ctx->realpath = safe_strdup (ctx->path); idata->ctx = ctx; /* clear mailbox status */ idata->status = 0; memset (idata->ctx->rights, 0, sizeof (idata->ctx->rights)); idata->newMailCount = 0; idata->max_msn = 0; mutt_message (_("Selecting %s..."), idata->mailbox); imap_munge_mbox_name (idata, buf, sizeof(buf), idata->mailbox); /* pipeline ACL test */ if (mutt_bit_isset (idata->capabilities, ACL)) { snprintf (bufout, sizeof (bufout), "MYRIGHTS %s", buf); imap_exec (idata, bufout, IMAP_CMD_QUEUE); } /* assume we have all rights if ACL is unavailable */ else { mutt_bit_set (idata->ctx->rights, MUTT_ACL_LOOKUP); mutt_bit_set (idata->ctx->rights, MUTT_ACL_READ); mutt_bit_set (idata->ctx->rights, MUTT_ACL_SEEN); mutt_bit_set (idata->ctx->rights, MUTT_ACL_WRITE); mutt_bit_set (idata->ctx->rights, MUTT_ACL_INSERT); mutt_bit_set (idata->ctx->rights, MUTT_ACL_POST); mutt_bit_set (idata->ctx->rights, MUTT_ACL_CREATE); mutt_bit_set (idata->ctx->rights, MUTT_ACL_DELETE); } /* pipeline the postponed count if possible */ pmx.mbox = NULL; if (mx_is_imap (Postponed) && !imap_parse_path (Postponed, &pmx) && mutt_account_match (&pmx.account, &mx.account)) imap_status (Postponed, 1); FREE (&pmx.mbox); snprintf (bufout, sizeof (bufout), "%s %s", ctx->readonly ? "EXAMINE" : "SELECT", buf); idata->state = IMAP_SELECTED; imap_cmd_start (idata, bufout); status = imap_mboxcache_get (idata, idata->mailbox, 1); do { char *pc; if ((rc = imap_cmd_step (idata)) != IMAP_CMD_CONTINUE) break; pc = idata->buf + 2; /* Obtain list of available flags here, may be overridden by a * PERMANENTFLAGS tag in the OK response */ if (ascii_strncasecmp ("FLAGS", pc, 5) == 0) { /* don't override PERMANENTFLAGS */ if (!idata->flags) { dprint (3, (debugfile, "Getting mailbox FLAGS\n")); if ((pc = imap_get_flags (&(idata->flags), pc)) == NULL) goto fail; } } /* PERMANENTFLAGS are massaged to look like FLAGS, then override FLAGS */ else if (ascii_strncasecmp ("OK [PERMANENTFLAGS", pc, 18) == 0) { dprint (3, (debugfile, "Getting mailbox PERMANENTFLAGS\n")); /* safe to call on NULL */ mutt_free_list (&(idata->flags)); /* skip "OK [PERMANENT" so syntax is the same as FLAGS */ pc += 13; if ((pc = imap_get_flags (&(idata->flags), pc)) == NULL) goto fail; } /* save UIDVALIDITY for the header cache */ else if (ascii_strncasecmp ("OK [UIDVALIDITY", pc, 14) == 0) { dprint (3, (debugfile, "Getting mailbox UIDVALIDITY\n")); pc += 3; pc = imap_next_word (pc); if (mutt_atoui (pc, &idata->uid_validity) < 0) goto fail; status->uidvalidity = idata->uid_validity; } else if (ascii_strncasecmp ("OK [UIDNEXT", pc, 11) == 0) { dprint (3, (debugfile, "Getting mailbox UIDNEXT\n")); pc += 3; pc = imap_next_word (pc); if (mutt_atoui (pc, &idata->uidnext) < 0) goto fail; status->uidnext = idata->uidnext; } else { pc = imap_next_word (pc); if (!ascii_strncasecmp ("EXISTS", pc, 6)) { count = idata->newMailCount; idata->newMailCount = 0; } } } while (rc == IMAP_CMD_CONTINUE); if (rc == IMAP_CMD_NO) { char *s; s = imap_next_word (idata->buf); /* skip seq */ s = imap_next_word (s); /* Skip response */ mutt_error ("%s", s); mutt_sleep (2); goto fail; } if (rc != IMAP_CMD_OK) goto fail; /* check for READ-ONLY notification */ if (!ascii_strncasecmp (imap_get_qualifier (idata->buf), "[READ-ONLY]", 11) \ && !mutt_bit_isset (idata->capabilities, ACL)) { dprint (2, (debugfile, "Mailbox is read-only.\n")); ctx->readonly = 1; } #ifdef DEBUG /* dump the mailbox flags we've found */ if (debuglevel > 2) { if (!idata->flags) dprint (3, (debugfile, "No folder flags found\n")); else { LIST* t = idata->flags; dprint (3, (debugfile, "Mailbox flags: ")); t = t->next; while (t) { dprint (3, (debugfile, "[%s] ", t->data)); t = t->next; } dprint (3, (debugfile, "\n")); } } #endif if (!(mutt_bit_isset(idata->ctx->rights, MUTT_ACL_DELETE) || mutt_bit_isset(idata->ctx->rights, MUTT_ACL_SEEN) || mutt_bit_isset(idata->ctx->rights, MUTT_ACL_WRITE) || mutt_bit_isset(idata->ctx->rights, MUTT_ACL_INSERT))) ctx->readonly = 1; ctx->hdrmax = count; ctx->hdrs = safe_calloc (count, sizeof (HEADER *)); ctx->v2r = safe_calloc (count, sizeof (int)); ctx->msgcount = 0; if (count && (imap_read_headers (idata, 1, count) < 0)) { mutt_error _("Error opening mailbox"); mutt_sleep (1); goto fail; } dprint (2, (debugfile, "imap_open_mailbox: msgcount is %d\n", ctx->msgcount)); FREE (&mx.mbox); return 0; fail: if (idata->state == IMAP_SELECTED) idata->state = IMAP_AUTHENTICATED; fail_noidata: FREE (&mx.mbox); return -1; } static int imap_open_mailbox_append (CONTEXT *ctx, int flags) { IMAP_DATA *idata; char buf[LONG_STRING]; char mailbox[LONG_STRING]; IMAP_MBOX mx; int rc; if (imap_parse_path (ctx->path, &mx)) return -1; /* in APPEND mode, we appear to hijack an existing IMAP connection - * ctx is brand new and mostly empty */ if (!(idata = imap_conn_find (&(mx.account), 0))) { FREE (&mx.mbox); return -1; } ctx->data = idata; imap_fix_path (idata, mx.mbox, mailbox, sizeof (mailbox)); if (!*mailbox) strfcpy (mailbox, "INBOX", sizeof (mailbox)); FREE (&mx.mbox); if ((rc = imap_access (ctx->path)) == 0) return 0; if (rc == -1) return -1; snprintf (buf, sizeof (buf), _("Create %s?"), mailbox); if (option (OPTCONFIRMCREATE) && mutt_yesorno (buf, 1) < 1) return -1; if (imap_create_mailbox (idata, mailbox) < 0) return -1; return 0; } /* imap_logout: Gracefully log out of server. */ void imap_logout (IMAP_DATA** idata) { /* we set status here to let imap_handle_untagged know we _expect_ to * receive a bye response (so it doesn't freak out and close the conn) */ (*idata)->status = IMAP_BYE; imap_cmd_start (*idata, "LOGOUT"); if (ImapPollTimeout <= 0 || mutt_socket_poll ((*idata)->conn, ImapPollTimeout) != 0) { while (imap_cmd_step (*idata) == IMAP_CMD_CONTINUE) ; } mutt_socket_close ((*idata)->conn); imap_free_idata (idata); } static int imap_open_new_message (MESSAGE *msg, CONTEXT *dest, HEADER *hdr) { char tmp[_POSIX_PATH_MAX]; mutt_mktemp (tmp, sizeof (tmp)); if ((msg->fp = safe_fopen (tmp, "w")) == NULL) { mutt_perror (tmp); return (-1); } msg->path = safe_strdup(tmp); return 0; } /* imap_set_flag: append str to flags if we currently have permission * according to aclbit */ static void imap_set_flag (IMAP_DATA* idata, int aclbit, int flag, const char *str, char *flags, size_t flsize) { if (mutt_bit_isset (idata->ctx->rights, aclbit)) if (flag && imap_has_flag (idata->flags, str)) safe_strcat (flags, flsize, str); } /* imap_has_flag: do a caseless comparison of the flag against a flag list, * return 1 if found or flag list has '\*', 0 otherwise */ int imap_has_flag (LIST* flag_list, const char* flag) { if (!flag_list) return 0; flag_list = flag_list->next; while (flag_list) { if (!ascii_strncasecmp (flag_list->data, flag, strlen (flag_list->data))) return 1; if (!ascii_strncmp (flag_list->data, "\\*", strlen (flag_list->data))) return 1; flag_list = flag_list->next; } return 0; } /* Note: headers must be in SORT_ORDER. See imap_exec_msgset for args. * Pos is an opaque pointer a la strtok. It should be 0 at first call. */ static int imap_make_msg_set (IMAP_DATA* idata, BUFFER* buf, int flag, int changed, int invert, int* pos) { HEADER** hdrs = idata->ctx->hdrs; int count = 0; /* number of messages in message set */ int match = 0; /* whether current message matches flag condition */ unsigned int setstart = 0; /* start of current message range */ int n; int started = 0; hdrs = idata->ctx->hdrs; for (n = *pos; n < idata->ctx->msgcount && buf->dptr - buf->data < IMAP_MAX_CMDLEN; n++) { match = 0; /* don't include pending expunged messages */ if (hdrs[n]->active) switch (flag) { case MUTT_DELETED: if (hdrs[n]->deleted != HEADER_DATA(hdrs[n])->deleted) match = invert ^ hdrs[n]->deleted; break; case MUTT_FLAG: if (hdrs[n]->flagged != HEADER_DATA(hdrs[n])->flagged) match = invert ^ hdrs[n]->flagged; break; case MUTT_OLD: if (hdrs[n]->old != HEADER_DATA(hdrs[n])->old) match = invert ^ hdrs[n]->old; break; case MUTT_READ: if (hdrs[n]->read != HEADER_DATA(hdrs[n])->read) match = invert ^ hdrs[n]->read; break; case MUTT_REPLIED: if (hdrs[n]->replied != HEADER_DATA(hdrs[n])->replied) match = invert ^ hdrs[n]->replied; break; case MUTT_TAG: if (hdrs[n]->tagged) match = 1; break; case MUTT_TRASH: if (hdrs[n]->deleted && !hdrs[n]->purge) match = 1; break; } if (match && (!changed || hdrs[n]->changed)) { count++; if (setstart == 0) { setstart = HEADER_DATA (hdrs[n])->uid; if (started == 0) { mutt_buffer_printf (buf, "%u", HEADER_DATA (hdrs[n])->uid); started = 1; } else mutt_buffer_printf (buf, ",%u", HEADER_DATA (hdrs[n])->uid); } /* tie up if the last message also matches */ else if (n == idata->ctx->msgcount-1) mutt_buffer_printf (buf, ":%u", HEADER_DATA (hdrs[n])->uid); } /* End current set if message doesn't match or we've reached the end * of the mailbox via inactive messages following the last match. */ else if (setstart && (hdrs[n]->active || n == idata->ctx->msgcount-1)) { if (HEADER_DATA (hdrs[n-1])->uid > setstart) mutt_buffer_printf (buf, ":%u", HEADER_DATA (hdrs[n-1])->uid); setstart = 0; } } *pos = n; return count; } /* Prepares commands for all messages matching conditions (must be flushed * with imap_exec) * Params: * idata: IMAP_DATA containing context containing header set * pre, post: commands are of the form "%s %s %s %s", tag, * pre, message set, post * flag: enum of flag type on which to filter * changed: include only changed messages in message set * invert: invert sense of flag, eg MUTT_READ matches unread messages * Returns: number of matched messages, or -1 on failure */ int imap_exec_msgset (IMAP_DATA* idata, const char* pre, const char* post, int flag, int changed, int invert) { HEADER** hdrs = NULL; short oldsort; BUFFER* cmd; int pos; int rc; int count = 0; if (! (cmd = mutt_buffer_new ())) { dprint (1, (debugfile, "imap_exec_msgset: unable to allocate buffer\n")); return -1; } /* We make a copy of the headers just in case resorting doesn't give exactly the original order (duplicate messages?), because other parts of the ctx are tied to the header order. This may be overkill. */ oldsort = Sort; if (Sort != SORT_ORDER) { hdrs = idata->ctx->hdrs; idata->ctx->hdrs = safe_malloc (idata->ctx->msgcount * sizeof (HEADER*)); memcpy (idata->ctx->hdrs, hdrs, idata->ctx->msgcount * sizeof (HEADER*)); Sort = SORT_ORDER; qsort (idata->ctx->hdrs, idata->ctx->msgcount, sizeof (HEADER*), mutt_get_sort_func (SORT_ORDER)); } pos = 0; do { cmd->dptr = cmd->data; mutt_buffer_printf (cmd, "%s ", pre); rc = imap_make_msg_set (idata, cmd, flag, changed, invert, &pos); if (rc > 0) { mutt_buffer_printf (cmd, " %s", post); if (imap_exec (idata, cmd->data, IMAP_CMD_QUEUE)) { rc = -1; goto out; } count += rc; } } while (rc > 0); rc = count; out: mutt_buffer_free (&cmd); if (oldsort != Sort) { Sort = oldsort; FREE (&idata->ctx->hdrs); idata->ctx->hdrs = hdrs; } return rc; } /* returns 0 if mutt's flags match cached server flags: * EXCLUDING the deleted flag. */ static int compare_flags_for_copy (HEADER* h) { IMAP_HEADER_DATA* hd = (IMAP_HEADER_DATA*)h->data; if (h->read != hd->read) return 1; if (h->old != hd->old) return 1; if (h->flagged != hd->flagged) return 1; if (h->replied != hd->replied) return 1; return 0; } /* Update the IMAP server to reflect the flags for a single message before * performing a "UID COPY". * NOTE: This does not sync the "deleted" flag state, because it is not * desirable to propagate that flag into the copy. */ int imap_sync_message_for_copy (IMAP_DATA *idata, HEADER *hdr, BUFFER *cmd, int *err_continue) { char flags[LONG_STRING]; char uid[11]; if (!compare_flags_for_copy (hdr)) { if (hdr->deleted == HEADER_DATA(hdr)->deleted) hdr->changed = 0; return 0; } snprintf (uid, sizeof (uid), "%u", HEADER_DATA(hdr)->uid); cmd->dptr = cmd->data; mutt_buffer_addstr (cmd, "UID STORE "); mutt_buffer_addstr (cmd, uid); flags[0] = '\0'; imap_set_flag (idata, MUTT_ACL_SEEN, hdr->read, "\\Seen ", flags, sizeof (flags)); imap_set_flag (idata, MUTT_ACL_WRITE, hdr->old, "Old ", flags, sizeof (flags)); imap_set_flag (idata, MUTT_ACL_WRITE, hdr->flagged, "\\Flagged ", flags, sizeof (flags)); imap_set_flag (idata, MUTT_ACL_WRITE, hdr->replied, "\\Answered ", flags, sizeof (flags)); imap_set_flag (idata, MUTT_ACL_DELETE, HEADER_DATA(hdr)->deleted, "\\Deleted ", flags, sizeof (flags)); /* now make sure we don't lose custom tags */ if (mutt_bit_isset (idata->ctx->rights, MUTT_ACL_WRITE)) imap_add_keywords (flags, hdr, idata->flags, sizeof (flags)); mutt_remove_trailing_ws (flags); /* UW-IMAP is OK with null flags, Cyrus isn't. The only solution is to * explicitly revoke all system flags (if we have permission) */ if (!*flags) { imap_set_flag (idata, MUTT_ACL_SEEN, 1, "\\Seen ", flags, sizeof (flags)); imap_set_flag (idata, MUTT_ACL_WRITE, 1, "Old ", flags, sizeof (flags)); imap_set_flag (idata, MUTT_ACL_WRITE, 1, "\\Flagged ", flags, sizeof (flags)); imap_set_flag (idata, MUTT_ACL_WRITE, 1, "\\Answered ", flags, sizeof (flags)); imap_set_flag (idata, MUTT_ACL_DELETE, !HEADER_DATA(hdr)->deleted, "\\Deleted ", flags, sizeof (flags)); mutt_remove_trailing_ws (flags); mutt_buffer_addstr (cmd, " -FLAGS.SILENT ("); } else mutt_buffer_addstr (cmd, " FLAGS.SILENT ("); mutt_buffer_addstr (cmd, flags); mutt_buffer_addstr (cmd, ")"); /* dumb hack for bad UW-IMAP 4.7 servers spurious FLAGS updates */ hdr->active = 0; /* after all this it's still possible to have no flags, if you * have no ACL rights */ if (*flags && (imap_exec (idata, cmd->data, 0) != 0) && err_continue && (*err_continue != MUTT_YES)) { *err_continue = imap_continue ("imap_sync_message: STORE failed", idata->buf); if (*err_continue != MUTT_YES) { hdr->active = 1; return -1; } } hdr->active = 1; if (hdr->deleted == HEADER_DATA(hdr)->deleted) hdr->changed = 0; return 0; } static int sync_helper (IMAP_DATA* idata, int right, int flag, const char* name) { int count = 0; int rc; char buf[LONG_STRING]; if (!idata->ctx) return -1; if (!mutt_bit_isset (idata->ctx->rights, right)) return 0; if (right == MUTT_ACL_WRITE && !imap_has_flag (idata->flags, name)) return 0; snprintf (buf, sizeof(buf), "+FLAGS.SILENT (%s)", name); if ((rc = imap_exec_msgset (idata, "UID STORE", buf, flag, 1, 0)) < 0) return rc; count += rc; buf[0] = '-'; if ((rc = imap_exec_msgset (idata, "UID STORE", buf, flag, 1, 1)) < 0) return rc; count += rc; return count; } /* update the IMAP server to reflect message changes done within mutt. * Arguments * ctx: the current context * expunge: 0 or 1 - do expunge? */ int imap_sync_mailbox (CONTEXT* ctx, int expunge, int* index_hint) { IMAP_DATA* idata; CONTEXT* appendctx = NULL; HEADER* h; HEADER** hdrs = NULL; int oldsort; int n; int rc; idata = (IMAP_DATA*) ctx->data; if (idata->state < IMAP_SELECTED) { dprint (2, (debugfile, "imap_sync_mailbox: no mailbox selected\n")); return -1; } /* This function is only called when the calling code expects the context * to be changed. */ imap_allow_reopen (ctx); if ((rc = imap_check_mailbox (ctx, index_hint, 0)) != 0) return rc; /* if we are expunging anyway, we can do deleted messages very quickly... */ if (expunge && mutt_bit_isset (ctx->rights, MUTT_ACL_DELETE)) { if ((rc = imap_exec_msgset (idata, "UID STORE", "+FLAGS.SILENT (\\Deleted)", MUTT_DELETED, 1, 0)) < 0) { mutt_error (_("Expunge failed")); mutt_sleep (1); goto out; } if (rc > 0) { /* mark these messages as unchanged so second pass ignores them. Done * here so BOGUS UW-IMAP 4.7 SILENT FLAGS updates are ignored. */ for (n = 0; n < ctx->msgcount; n++) if (ctx->hdrs[n]->deleted && ctx->hdrs[n]->changed) ctx->hdrs[n]->active = 0; mutt_message (_("Marking %d messages deleted..."), rc); } } #if USE_HCACHE idata->hcache = imap_hcache_open (idata, NULL); #endif /* save messages with real (non-flag) changes */ for (n = 0; n < ctx->msgcount; n++) { h = ctx->hdrs[n]; if (h->deleted) { imap_cache_del (idata, h); #if USE_HCACHE imap_hcache_del (idata, HEADER_DATA(h)->uid); #endif } if (h->active && h->changed) { #if USE_HCACHE imap_hcache_put (idata, h); #endif /* if the message has been rethreaded or attachments have been deleted * we delete the message and reupload it. * This works better if we're expunging, of course. */ if ((h->env && (h->env->refs_changed || h->env->irt_changed)) || h->attach_del || h->xlabel_changed) { mutt_message (_("Saving changed messages... [%d/%d]"), n+1, ctx->msgcount); if (!appendctx) appendctx = mx_open_mailbox (ctx->path, MUTT_APPEND | MUTT_QUIET, NULL); if (!appendctx) dprint (1, (debugfile, "imap_sync_mailbox: Error opening mailbox in append mode\n")); else _mutt_save_message (h, appendctx, 1, 0, 0); h->xlabel_changed = 0; } } } #if USE_HCACHE imap_hcache_close (idata); #endif /* presort here to avoid doing 10 resorts in imap_exec_msgset */ oldsort = Sort; if (Sort != SORT_ORDER) { hdrs = ctx->hdrs; ctx->hdrs = safe_malloc (ctx->msgcount * sizeof (HEADER*)); memcpy (ctx->hdrs, hdrs, ctx->msgcount * sizeof (HEADER*)); Sort = SORT_ORDER; qsort (ctx->hdrs, ctx->msgcount, sizeof (HEADER*), mutt_get_sort_func (SORT_ORDER)); } rc = sync_helper (idata, MUTT_ACL_DELETE, MUTT_DELETED, "\\Deleted"); if (rc >= 0) rc |= sync_helper (idata, MUTT_ACL_WRITE, MUTT_FLAG, "\\Flagged"); if (rc >= 0) rc |= sync_helper (idata, MUTT_ACL_WRITE, MUTT_OLD, "Old"); if (rc >= 0) rc |= sync_helper (idata, MUTT_ACL_SEEN, MUTT_READ, "\\Seen"); if (rc >= 0) rc |= sync_helper (idata, MUTT_ACL_WRITE, MUTT_REPLIED, "\\Answered"); if (oldsort != Sort) { Sort = oldsort; FREE (&ctx->hdrs); ctx->hdrs = hdrs; } /* Flush the queued flags if any were changed in sync_helper. */ if (rc > 0) if (imap_exec (idata, NULL, 0) != IMAP_CMD_OK) rc = -1; if (rc < 0) { if (ctx->closing) { if (mutt_yesorno (_("Error saving flags. Close anyway?"), 0) == MUTT_YES) { rc = 0; idata->state = IMAP_AUTHENTICATED; goto out; } } else mutt_error _("Error saving flags"); rc = -1; goto out; } /* Update local record of server state to reflect the synchronization just * completed. imap_read_headers always overwrites hcache-origin flags, so * there is no need to mutate the hcache after flag-only changes. */ for (n = 0; n < ctx->msgcount; n++) { HEADER_DATA(ctx->hdrs[n])->deleted = ctx->hdrs[n]->deleted; HEADER_DATA(ctx->hdrs[n])->flagged = ctx->hdrs[n]->flagged; HEADER_DATA(ctx->hdrs[n])->old = ctx->hdrs[n]->old; HEADER_DATA(ctx->hdrs[n])->read = ctx->hdrs[n]->read; HEADER_DATA(ctx->hdrs[n])->replied = ctx->hdrs[n]->replied; ctx->hdrs[n]->changed = 0; } ctx->changed = 0; /* We must send an EXPUNGE command if we're not closing. */ if (expunge && !(ctx->closing) && mutt_bit_isset(ctx->rights, MUTT_ACL_DELETE)) { mutt_message _("Expunging messages from server..."); /* Set expunge bit so we don't get spurious reopened messages */ idata->reopen |= IMAP_EXPUNGE_EXPECTED; if (imap_exec (idata, "EXPUNGE", 0) != 0) { idata->reopen &= ~IMAP_EXPUNGE_EXPECTED; imap_error (_("imap_sync_mailbox: EXPUNGE failed"), idata->buf); rc = -1; goto out; } idata->reopen &= ~IMAP_EXPUNGE_EXPECTED; } if (expunge && ctx->closing) { imap_exec (idata, "CLOSE", IMAP_CMD_QUEUE); idata->state = IMAP_AUTHENTICATED; } if (option (OPTMESSAGECACHECLEAN)) imap_cache_clean (idata); rc = 0; out: if (appendctx) { mx_fastclose_mailbox (appendctx); FREE (&appendctx); } return rc; } /* imap_close_mailbox: clean up IMAP data in CONTEXT */ int imap_close_mailbox (CONTEXT* ctx) { IMAP_DATA* idata; int i; idata = (IMAP_DATA*) ctx->data; /* Check to see if the mailbox is actually open */ if (!idata) return 0; /* imap_open_mailbox_append() borrows the IMAP_DATA temporarily, * just for the connection, but does not set idata->ctx to the * open-append ctx. * * So when these are equal, it means we are actually closing the * mailbox and should clean up idata. Otherwise, we don't want to * touch idata - it's still being used. */ if (ctx == idata->ctx) { if (idata->status != IMAP_FATAL && idata->state >= IMAP_SELECTED) { /* mx_close_mailbox won't sync if there are no deleted messages * and the mailbox is unchanged, so we may have to close here */ if (!ctx->deleted) imap_exec (idata, "CLOSE", IMAP_CMD_QUEUE); idata->state = IMAP_AUTHENTICATED; } idata->reopen &= IMAP_REOPEN_ALLOW; FREE (&(idata->mailbox)); mutt_free_list (&idata->flags); idata->ctx = NULL; hash_destroy (&idata->uid_hash, NULL); FREE (&idata->msn_index); idata->msn_index_size = 0; idata->max_msn = 0; for (i = 0; i < IMAP_CACHE_LEN; i++) { if (idata->cache[i].path) { unlink (idata->cache[i].path); FREE (&idata->cache[i].path); } } mutt_bcache_close (&idata->bcache); } /* free IMAP part of headers */ for (i = 0; i < ctx->msgcount; i++) /* mailbox may not have fully loaded */ if (ctx->hdrs[i] && ctx->hdrs[i]->data) imap_free_header_data ((IMAP_HEADER_DATA**)&(ctx->hdrs[i]->data)); return 0; } /* use the NOOP or IDLE command to poll for new mail * * return values: * MUTT_REOPENED mailbox has been externally modified * MUTT_NEW_MAIL new mail has arrived! * 0 no change * -1 error */ int imap_check_mailbox (CONTEXT *ctx, int *index_hint, int force) { /* overload keyboard timeout to avoid many mailbox checks in a row. * Most users don't like having to wait exactly when they press a key. */ IMAP_DATA* idata; int result = 0; idata = (IMAP_DATA*) ctx->data; /* try IDLE first, unless force is set */ if (!force && option (OPTIMAPIDLE) && mutt_bit_isset (idata->capabilities, IDLE) && (idata->state != IMAP_IDLE || time(NULL) >= idata->lastread + ImapKeepalive)) { if (imap_cmd_idle (idata) < 0) return -1; } if (idata->state == IMAP_IDLE) { while ((result = mutt_socket_poll (idata->conn, 0)) > 0) { if (imap_cmd_step (idata) != IMAP_CMD_CONTINUE) { dprint (1, (debugfile, "Error reading IDLE response\n")); return -1; } } if (result < 0) { dprint (1, (debugfile, "Poll failed, disabling IDLE\n")); mutt_bit_unset (idata->capabilities, IDLE); } } if ((force || (idata->state != IMAP_IDLE && time(NULL) >= idata->lastread + Timeout)) && imap_exec (idata, "NOOP", IMAP_CMD_POLL) != 0) return -1; /* We call this even when we haven't run NOOP in case we have pending * changes to process, since we can reopen here. */ imap_cmd_finish (idata); if (idata->check_status & IMAP_EXPUNGE_PENDING) result = MUTT_REOPENED; else if (idata->check_status & IMAP_NEWMAIL_PENDING) result = MUTT_NEW_MAIL; else if (idata->check_status & IMAP_FLAGS_PENDING) result = MUTT_FLAGS; idata->check_status = 0; return result; } static int imap_check_mailbox_reopen (CONTEXT *ctx, int *index_hint) { int rc; imap_allow_reopen (ctx); rc = imap_check_mailbox (ctx, index_hint, 0); imap_disallow_reopen (ctx); return rc; } /* split path into (idata,mailbox name) */ static int imap_get_mailbox (const char* path, IMAP_DATA** hidata, char* buf, size_t blen) { IMAP_MBOX mx; if (imap_parse_path (path, &mx)) { dprint (1, (debugfile, "imap_get_mailbox: Error parsing %s\n", path)); return -1; } if (!(*hidata = imap_conn_find (&(mx.account), option (OPTIMAPPASSIVE) ? MUTT_IMAP_CONN_NONEW : 0)) || (*hidata)->state < IMAP_AUTHENTICATED) { FREE (&mx.mbox); return -1; } imap_fix_path (*hidata, mx.mbox, buf, blen); if (!*buf) strfcpy (buf, "INBOX", blen); FREE (&mx.mbox); return 0; } /* check for new mail in any subscribed mailboxes. Given a list of mailboxes * rather than called once for each so that it can batch the commands and * save on round trips. Returns number of mailboxes with new mail. */ int imap_buffy_check (int force, int check_stats) { IMAP_DATA* idata; IMAP_DATA* lastdata = NULL; BUFFY* mailbox; char name[LONG_STRING]; char command[LONG_STRING]; char munged[LONG_STRING]; int buffies = 0; for (mailbox = Incoming; mailbox; mailbox = mailbox->next) { /* Init newly-added mailboxes */ if (! mailbox->magic) { if (mx_is_imap (mailbox->path)) mailbox->magic = MUTT_IMAP; } if (mailbox->magic != MUTT_IMAP) continue; if (imap_get_mailbox (mailbox->path, &idata, name, sizeof (name)) < 0) { mailbox->new = 0; continue; } /* Don't issue STATUS on the selected mailbox, it will be NOOPed or * IDLEd elsewhere. * idata->mailbox may be NULL for connections other than the current * mailbox's, and shouldn't expand to INBOX in that case. #3216. */ if (idata->mailbox && !imap_mxcmp (name, idata->mailbox)) { mailbox->new = 0; continue; } if (!mutt_bit_isset (idata->capabilities, IMAP4REV1) && !mutt_bit_isset (idata->capabilities, STATUS)) { dprint (2, (debugfile, "Server doesn't support STATUS\n")); continue; } if (lastdata && idata != lastdata) { /* Send commands to previous server. Sorting the buffy list * may prevent some infelicitous interleavings */ if (imap_exec (lastdata, NULL, IMAP_CMD_FAIL_OK | IMAP_CMD_POLL) == -1) dprint (1, (debugfile, "Error polling mailboxes\n")); lastdata = NULL; } if (!lastdata) lastdata = idata; imap_munge_mbox_name (idata, munged, sizeof (munged), name); if (check_stats) snprintf (command, sizeof (command), "STATUS %s (UIDNEXT UIDVALIDITY UNSEEN RECENT MESSAGES)", munged); else snprintf (command, sizeof (command), "STATUS %s (UIDNEXT UIDVALIDITY UNSEEN RECENT)", munged); if (imap_exec (idata, command, IMAP_CMD_QUEUE | IMAP_CMD_POLL) < 0) { dprint (1, (debugfile, "Error queueing command\n")); return 0; } } if (lastdata && (imap_exec (lastdata, NULL, IMAP_CMD_FAIL_OK | IMAP_CMD_POLL) == -1)) { dprint (1, (debugfile, "Error polling mailboxes\n")); return 0; } /* collect results */ for (mailbox = Incoming; mailbox; mailbox = mailbox->next) { if (mailbox->magic == MUTT_IMAP && mailbox->new) buffies++; } return buffies; } /* imap_status: returns count of messages in mailbox, or -1 on error. * if queue != 0, queue the command and expect it to have been run * on the next call (for pipelining the postponed count) */ int imap_status (char* path, int queue) { static int queued = 0; IMAP_DATA *idata; char buf[LONG_STRING]; char mbox[LONG_STRING]; IMAP_STATUS* status; if (imap_get_mailbox (path, &idata, buf, sizeof (buf)) < 0) return -1; /* We are in the folder we're polling - just return the mailbox count. * * Note that imap_mxcmp() converts NULL to "INBOX", so we need to * make sure the idata really is open to a folder. */ if (idata->ctx && !imap_mxcmp (buf, idata->mailbox)) return idata->ctx->msgcount; else if (mutt_bit_isset(idata->capabilities,IMAP4REV1) || mutt_bit_isset(idata->capabilities,STATUS)) { imap_munge_mbox_name (idata, mbox, sizeof(mbox), buf); snprintf (buf, sizeof (buf), "STATUS %s (%s)", mbox, "MESSAGES"); imap_unmunge_mbox_name (idata, mbox); } else /* Server does not support STATUS, and this is not the current mailbox. * There is no lightweight way to check recent arrivals */ return -1; if (queue) { imap_exec (idata, buf, IMAP_CMD_QUEUE); queued = 1; return 0; } else if (!queued) imap_exec (idata, buf, 0); queued = 0; if ((status = imap_mboxcache_get (idata, mbox, 0))) return status->messages; return 0; } /* return cached mailbox stats or NULL if create is 0 */ IMAP_STATUS* imap_mboxcache_get (IMAP_DATA* idata, const char* mbox, int create) { LIST* cur; IMAP_STATUS* status; IMAP_STATUS scache; #ifdef USE_HCACHE header_cache_t *hc = NULL; unsigned int *uidvalidity = NULL; unsigned int *uidnext = NULL; #endif for (cur = idata->mboxcache; cur; cur = cur->next) { status = (IMAP_STATUS*)cur->data; if (!imap_mxcmp (mbox, status->name)) return status; } status = NULL; /* lame */ if (create) { memset (&scache, 0, sizeof (scache)); scache.name = (char*)mbox; idata->mboxcache = mutt_add_list_n (idata->mboxcache, &scache, sizeof (scache)); status = imap_mboxcache_get (idata, mbox, 0); status->name = safe_strdup (mbox); } #ifdef USE_HCACHE hc = imap_hcache_open (idata, mbox); if (hc) { uidvalidity = mutt_hcache_fetch_raw (hc, "/UIDVALIDITY", imap_hcache_keylen); uidnext = mutt_hcache_fetch_raw (hc, "/UIDNEXT", imap_hcache_keylen); if (uidvalidity) { if (!status) { mutt_hcache_free ((void **)&uidvalidity); mutt_hcache_free ((void **)&uidnext); mutt_hcache_close (hc); return imap_mboxcache_get (idata, mbox, 1); } status->uidvalidity = *uidvalidity; status->uidnext = uidnext ? *uidnext: 0; dprint (3, (debugfile, "mboxcache: hcache uidvalidity %u, uidnext %u\n", status->uidvalidity, status->uidnext)); } mutt_hcache_free ((void **)&uidvalidity); mutt_hcache_free ((void **)&uidnext); mutt_hcache_close (hc); } #endif return status; } void imap_mboxcache_free (IMAP_DATA* idata) { LIST* cur; IMAP_STATUS* status; for (cur = idata->mboxcache; cur; cur = cur->next) { status = (IMAP_STATUS*)cur->data; FREE (&status->name); } mutt_free_list (&idata->mboxcache); } /* returns number of patterns in the search that should be done server-side * (eg are full-text) */ static int do_search (const pattern_t* search, int allpats) { int rc = 0; const pattern_t* pat; for (pat = search; pat; pat = pat->next) { switch (pat->op) { case MUTT_BODY: case MUTT_HEADER: case MUTT_WHOLE_MSG: if (pat->stringmatch) rc++; break; default: if (pat->child && do_search (pat->child, 1)) rc++; } if (!allpats) break; } return rc; } /* convert mutt pattern_t to IMAP SEARCH command containing only elements * that require full-text search (mutt already has what it needs for most * match types, and does a better job (eg server doesn't support regexps). */ static int imap_compile_search (const pattern_t* pat, BUFFER* buf) { if (! do_search (pat, 0)) return 0; if (pat->not) mutt_buffer_addstr (buf, "NOT "); if (pat->child) { int clauses; if ((clauses = do_search (pat->child, 1)) > 0) { const pattern_t* clause = pat->child; mutt_buffer_addch (buf, '('); while (clauses) { if (do_search (clause, 0)) { if (pat->op == MUTT_OR && clauses > 1) mutt_buffer_addstr (buf, "OR "); clauses--; if (imap_compile_search (clause, buf) < 0) return -1; if (clauses) mutt_buffer_addch (buf, ' '); } clause = clause->next; } mutt_buffer_addch (buf, ')'); } } else { char term[STRING]; char *delim; switch (pat->op) { case MUTT_HEADER: mutt_buffer_addstr (buf, "HEADER "); /* extract header name */ if (! (delim = strchr (pat->p.str, ':'))) { mutt_error (_("Header search without header name: %s"), pat->p.str); return -1; } *delim = '\0'; imap_quote_string (term, sizeof (term), pat->p.str); mutt_buffer_addstr (buf, term); mutt_buffer_addch (buf, ' '); /* and field */ *delim = ':'; delim++; SKIPWS(delim); imap_quote_string (term, sizeof (term), delim); mutt_buffer_addstr (buf, term); break; case MUTT_BODY: mutt_buffer_addstr (buf, "BODY "); imap_quote_string (term, sizeof (term), pat->p.str); mutt_buffer_addstr (buf, term); break; case MUTT_WHOLE_MSG: mutt_buffer_addstr (buf, "TEXT "); imap_quote_string (term, sizeof (term), pat->p.str); mutt_buffer_addstr (buf, term); break; } } return 0; } int imap_search (CONTEXT* ctx, const pattern_t* pat) { BUFFER buf; IMAP_DATA* idata = (IMAP_DATA*)ctx->data; int i; for (i = 0; i < ctx->msgcount; i++) ctx->hdrs[i]->matched = 0; if (!do_search (pat, 1)) return 0; mutt_buffer_init (&buf); mutt_buffer_addstr (&buf, "UID SEARCH "); if (imap_compile_search (pat, &buf) < 0) { FREE (&buf.data); return -1; } if (imap_exec (idata, buf.data, 0) < 0) { FREE (&buf.data); return -1; } FREE (&buf.data); return 0; } int imap_subscribe (char *path, int subscribe) { IMAP_DATA *idata; char buf[LONG_STRING]; char mbox[LONG_STRING]; char errstr[STRING]; int mblen; BUFFER err, token; IMAP_MBOX mx; if (!mx_is_imap (path) || imap_parse_path (path, &mx) || !mx.mbox) { mutt_error (_("Bad mailbox name")); return -1; } if (!(idata = imap_conn_find (&(mx.account), 0))) goto fail; imap_fix_path (idata, mx.mbox, buf, sizeof (buf)); if (!*buf) strfcpy (buf, "INBOX", sizeof (buf)); if (option (OPTIMAPCHECKSUBSCRIBED)) { mutt_buffer_init (&token); mutt_buffer_init (&err); err.data = errstr; err.dsize = sizeof (errstr); mblen = snprintf (mbox, sizeof (mbox), "%smailboxes ", subscribe ? "" : "un"); imap_quote_string_and_backquotes (mbox + mblen, sizeof(mbox) - mblen, path); if (mutt_parse_rc_line (mbox, &token, &err)) dprint (1, (debugfile, "Error adding subscribed mailbox: %s\n", errstr)); FREE (&token.data); } if (subscribe) mutt_message (_("Subscribing to %s..."), buf); else mutt_message (_("Unsubscribing from %s..."), buf); imap_munge_mbox_name (idata, mbox, sizeof(mbox), buf); snprintf (buf, sizeof (buf), "%sSUBSCRIBE %s", subscribe ? "" : "UN", mbox); if (imap_exec (idata, buf, 0) < 0) goto fail; imap_unmunge_mbox_name(idata, mx.mbox); if (subscribe) mutt_message (_("Subscribed to %s"), mx.mbox); else mutt_message (_("Unsubscribed from %s"), mx.mbox); FREE (&mx.mbox); return 0; fail: FREE (&mx.mbox); return -1; } /* trim dest to the length of the longest prefix it shares with src, * returning the length of the trimmed string */ static size_t longest_common_prefix (char *dest, const char* src, size_t start, size_t dlen) { size_t pos = start; while (pos < dlen && dest[pos] && dest[pos] == src[pos]) pos++; dest[pos] = '\0'; return pos; } /* look for IMAP URLs to complete from defined mailboxes. Could be extended * to complete over open connections and account/folder hooks too. */ static int imap_complete_hosts (char *dest, size_t len) { BUFFY* mailbox; CONNECTION* conn; int rc = -1; size_t matchlen; matchlen = mutt_strlen (dest); for (mailbox = Incoming; mailbox; mailbox = mailbox->next) { if (!mutt_strncmp (dest, mailbox->path, matchlen)) { if (rc) { strfcpy (dest, mailbox->path, len); rc = 0; } else longest_common_prefix (dest, mailbox->path, matchlen, len); } } for (conn = mutt_socket_head (); conn; conn = conn->next) { ciss_url_t url; char urlstr[LONG_STRING]; if (conn->account.type != MUTT_ACCT_TYPE_IMAP) continue; mutt_account_tourl (&conn->account, &url); /* FIXME: how to handle multiple users on the same host? */ url.user = NULL; url.path = NULL; url_ciss_tostring (&url, urlstr, sizeof (urlstr), 0); if (!mutt_strncmp (dest, urlstr, matchlen)) { if (rc) { strfcpy (dest, urlstr, len); rc = 0; } else longest_common_prefix (dest, urlstr, matchlen, len); } } return rc; } /* imap_complete: given a partial IMAP folder path, return a string which * adds as much to the path as is unique */ int imap_complete(char* dest, size_t dlen, char* path) { IMAP_DATA* idata; char list[LONG_STRING]; char buf[LONG_STRING]; IMAP_LIST listresp; char completion[LONG_STRING]; int clen; size_t matchlen = 0; int completions = 0; IMAP_MBOX mx; int rc; if (imap_parse_path (path, &mx)) { strfcpy (dest, path, dlen); return imap_complete_hosts (dest, dlen); } /* don't open a new socket just for completion. Instead complete over * known mailboxes/hooks/etc */ if (!(idata = imap_conn_find (&(mx.account), MUTT_IMAP_CONN_NONEW))) { FREE (&mx.mbox); strfcpy (dest, path, dlen); return imap_complete_hosts (dest, dlen); } /* reformat path for IMAP list, and append wildcard */ /* don't use INBOX in place of "" */ if (mx.mbox && mx.mbox[0]) imap_fix_path (idata, mx.mbox, list, sizeof(list)); else list[0] = '\0'; /* fire off command */ snprintf (buf, sizeof(buf), "%s \"\" \"%s%%\"", option (OPTIMAPLSUB) ? "LSUB" : "LIST", list); imap_cmd_start (idata, buf); /* and see what the results are */ strfcpy (completion, NONULL(mx.mbox), sizeof(completion)); idata->cmdtype = IMAP_CT_LIST; idata->cmddata = &listresp; do { listresp.name = NULL; rc = imap_cmd_step (idata); if (rc == IMAP_CMD_CONTINUE && listresp.name) { /* if the folder isn't selectable, append delimiter to force browse * to enter it on second tab. */ if (listresp.noselect) { clen = strlen(listresp.name); listresp.name[clen++] = listresp.delim; listresp.name[clen] = '\0'; } /* copy in first word */ if (!completions) { strfcpy (completion, listresp.name, sizeof(completion)); matchlen = strlen (completion); completions++; continue; } matchlen = longest_common_prefix (completion, listresp.name, 0, matchlen); completions++; } } while (rc == IMAP_CMD_CONTINUE); idata->cmddata = NULL; if (completions) { /* reformat output */ imap_qualify_path (dest, dlen, &mx, completion); mutt_pretty_mailbox (dest, dlen); FREE (&mx.mbox); return 0; } return -1; } /* imap_fast_trash: use server COPY command to copy deleted * messages to the trash folder. * Return codes: * -1: error * 0: success * 1: non-fatal error - try fetch/append */ int imap_fast_trash (CONTEXT* ctx, char* dest) { IMAP_DATA* idata; char mbox[LONG_STRING]; char mmbox[LONG_STRING]; char prompt[LONG_STRING]; int n, rc; IMAP_MBOX mx; int triedcreate = 0; BUFFER *sync_cmd = NULL; int err_continue = MUTT_NO; idata = (IMAP_DATA*) ctx->data; if (imap_parse_path (dest, &mx)) { dprint (1, (debugfile, "imap_fast_trash: bad destination %s\n", dest)); return -1; } /* check that the save-to folder is in the same account */ if (!mutt_account_match (&(CTX_DATA->conn->account), &(mx.account))) { dprint (3, (debugfile, "imap_fast_trash: %s not same server as %s\n", dest, ctx->path)); return 1; } imap_fix_path (idata, mx.mbox, mbox, sizeof (mbox)); if (!*mbox) strfcpy (mbox, "INBOX", sizeof (mbox)); imap_munge_mbox_name (idata, mmbox, sizeof (mmbox), mbox); sync_cmd = mutt_buffer_new (); for (n = 0; n < ctx->msgcount; n++) { if (ctx->hdrs[n]->active && ctx->hdrs[n]->changed && ctx->hdrs[n]->deleted && !ctx->hdrs[n]->purge) { rc = imap_sync_message_for_copy (idata, ctx->hdrs[n], sync_cmd, &err_continue); if (rc < 0) { dprint (1, (debugfile, "imap_fast_trash: could not sync\n")); goto out; } } } /* loop in case of TRYCREATE */ do { rc = imap_exec_msgset (idata, "UID COPY", mmbox, MUTT_TRASH, 0, 0); if (!rc) { dprint (1, (debugfile, "imap_fast_trash: No messages to trash\n")); rc = -1; goto out; } else if (rc < 0) { dprint (1, (debugfile, "could not queue copy\n")); goto out; } else mutt_message (_("Copying %d messages to %s..."), rc, mbox); /* let's get it on */ rc = imap_exec (idata, NULL, IMAP_CMD_FAIL_OK); if (rc == -2) { if (triedcreate) { dprint (1, (debugfile, "Already tried to create mailbox %s\n", mbox)); break; } /* bail out if command failed for reasons other than nonexistent target */ if (ascii_strncasecmp (imap_get_qualifier (idata->buf), "[TRYCREATE]", 11)) break; dprint (3, (debugfile, "imap_fast_trash: server suggests TRYCREATE\n")); snprintf (prompt, sizeof (prompt), _("Create %s?"), mbox); if (option (OPTCONFIRMCREATE) && mutt_yesorno (prompt, 1) < 1) { mutt_clear_error (); goto out; } if (imap_create_mailbox (idata, mbox) < 0) break; triedcreate = 1; } } while (rc == -2); if (rc != 0) { imap_error ("imap_fast_trash", idata->buf); goto out; } rc = 0; out: mutt_buffer_free (&sync_cmd); FREE (&mx.mbox); return rc < 0 ? -1 : rc; } struct mx_ops mx_imap_ops = { .open = imap_open_mailbox, .open_append = imap_open_mailbox_append, .close = imap_close_mailbox, .open_msg = imap_fetch_message, .close_msg = imap_close_message, .commit_msg = imap_commit_message, .open_new_msg = imap_open_new_message, .check = imap_check_mailbox_reopen, .sync = NULL, /* imap syncing is handled by imap_sync_mailbox */ };
int imap_subscribe (char *path, int subscribe) { IMAP_DATA *idata; char buf[LONG_STRING]; char mbox[LONG_STRING]; char errstr[STRING]; BUFFER err, token; IMAP_MBOX mx; if (!mx_is_imap (path) || imap_parse_path (path, &mx) || !mx.mbox) { mutt_error (_("Bad mailbox name")); return -1; } if (!(idata = imap_conn_find (&(mx.account), 0))) goto fail; imap_fix_path (idata, mx.mbox, buf, sizeof (buf)); if (!*buf) strfcpy (buf, "INBOX", sizeof (buf)); if (option (OPTIMAPCHECKSUBSCRIBED)) { mutt_buffer_init (&token); mutt_buffer_init (&err); err.data = errstr; err.dsize = sizeof (errstr); snprintf (mbox, sizeof (mbox), "%smailboxes \"%s\"", subscribe ? "" : "un", path); if (mutt_parse_rc_line (mbox, &token, &err)) dprint (1, (debugfile, "Error adding subscribed mailbox: %s\n", errstr)); FREE (&token.data); } if (subscribe) mutt_message (_("Subscribing to %s..."), buf); else mutt_message (_("Unsubscribing from %s..."), buf); imap_munge_mbox_name (idata, mbox, sizeof(mbox), buf); snprintf (buf, sizeof (buf), "%sSUBSCRIBE %s", subscribe ? "" : "UN", mbox); if (imap_exec (idata, buf, 0) < 0) goto fail; imap_unmunge_mbox_name(idata, mx.mbox); if (subscribe) mutt_message (_("Subscribed to %s"), mx.mbox); else mutt_message (_("Unsubscribed from %s"), mx.mbox); FREE (&mx.mbox); return 0; fail: FREE (&mx.mbox); return -1; }
int imap_subscribe (char *path, int subscribe) { IMAP_DATA *idata; char buf[LONG_STRING]; char mbox[LONG_STRING]; char errstr[STRING]; int mblen; BUFFER err, token; IMAP_MBOX mx; if (!mx_is_imap (path) || imap_parse_path (path, &mx) || !mx.mbox) { mutt_error (_("Bad mailbox name")); return -1; } if (!(idata = imap_conn_find (&(mx.account), 0))) goto fail; imap_fix_path (idata, mx.mbox, buf, sizeof (buf)); if (!*buf) strfcpy (buf, "INBOX", sizeof (buf)); if (option (OPTIMAPCHECKSUBSCRIBED)) { mutt_buffer_init (&token); mutt_buffer_init (&err); err.data = errstr; err.dsize = sizeof (errstr); mblen = snprintf (mbox, sizeof (mbox), "%smailboxes ", subscribe ? "" : "un"); imap_quote_string_and_backquotes (mbox + mblen, sizeof(mbox) - mblen, path); if (mutt_parse_rc_line (mbox, &token, &err)) dprint (1, (debugfile, "Error adding subscribed mailbox: %s\n", errstr)); FREE (&token.data); } if (subscribe) mutt_message (_("Subscribing to %s..."), buf); else mutt_message (_("Unsubscribing from %s..."), buf); imap_munge_mbox_name (idata, mbox, sizeof(mbox), buf); snprintf (buf, sizeof (buf), "%sSUBSCRIBE %s", subscribe ? "" : "UN", mbox); if (imap_exec (idata, buf, 0) < 0) goto fail; imap_unmunge_mbox_name(idata, mx.mbox); if (subscribe) mutt_message (_("Subscribed to %s"), mx.mbox); else mutt_message (_("Unsubscribed from %s"), mx.mbox); FREE (&mx.mbox); return 0; fail: FREE (&mx.mbox); return -1; }
{'added': [(1933, ' int mblen;'), (1955, ' mblen = snprintf (mbox, sizeof (mbox), "%smailboxes ",'), (1956, ' subscribe ? "" : "un");'), (1957, ' imap_quote_string_and_backquotes (mbox + mblen, sizeof(mbox) - mblen,'), (1958, ' path);')], 'deleted': [(1954, ' snprintf (mbox, sizeof (mbox), "%smailboxes \\"%s\\"",'), (1955, ' subscribe ? "" : "un", path);')]}
5
2
1,684
10,639
https://gitlab.com/muttmua/mutt
CVE-2018-14354
['CWE-78']